From af349116be1167ce3abbf9a4c217045452a49883 Mon Sep 17 00:00:00 2001 From: Dan Sun Date: Tue, 8 Oct 2019 23:32:53 -0400 Subject: [PATCH 1/6] Update kfserving manifests to 0.2.1 --- kfserving/kfserving-crds/base/crd.yaml | 2 +- kfserving/kfserving-install/base/config-map.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kfserving/kfserving-crds/base/crd.yaml b/kfserving/kfserving-crds/base/crd.yaml index 5b7329b24a..459a74ac2c 100644 --- a/kfserving/kfserving-crds/base/crd.yaml +++ b/kfserving/kfserving-crds/base/crd.yaml @@ -610,4 +610,4 @@ status: kind: "" plural: "" conditions: [] - storedVersions: [] \ No newline at end of file + storedVersions: [] diff --git a/kfserving/kfserving-install/base/config-map.yaml b/kfserving/kfserving-install/base/config-map.yaml index c152a45b41..4f1dbf9725 100644 --- a/kfserving/kfserving-install/base/config-map.yaml +++ b/kfserving/kfserving-install/base/config-map.yaml @@ -99,4 +99,4 @@ data: "memoryLimit": "1Gi", "cpuRequest": "100m", "cpuLimit": "1" - } \ No newline at end of file + } From a2559194a3127fc36b0dde20e5dcbac50d865a02 Mon Sep 17 00:00:00 2001 From: Dan Sun Date: Sat, 16 Nov 2019 13:18:19 -0500 Subject: [PATCH 2/6] knative 0.10 upgrade --- knative/knative-serving-crds/base/crd.yaml | 203 ++++++++--------- .../knative-serving-crds/base/namespace.yaml | 2 +- .../base/apiservice.yaml | 2 +- .../base/cluster-role-binding.yaml | 7 +- .../base/cluster-role.yaml | 88 +++++-- .../base/config-map.yaml | 214 +++++++++++------- .../base/deployment.yaml | 114 ++++++---- .../knative-serving-install/base/gateway.yaml | 14 +- .../knative-serving-install/base/image.yaml | 4 +- .../base/kustomization.yaml | 14 +- .../base/role-binding.yaml | 3 +- .../base/service-account.yaml | 2 +- .../knative-serving-install/base/service.yaml | 10 +- .../base/webhook-configuration.yaml | 36 +++ 14 files changed, 426 insertions(+), 287 deletions(-) create mode 100644 knative/knative-serving-install/base/webhook-configuration.yaml diff --git a/knative/knative-serving-crds/base/crd.yaml b/knative/knative-serving-crds/base/crd.yaml index 29ba272935..7c07240664 100644 --- a/knative/knative-serving-crds/base/crd.yaml +++ b/knative/knative-serving-crds/base/crd.yaml @@ -1,9 +1,10 @@ +--- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: certificates.networking.internal.knative.dev spec: additionalPrinterColumns: @@ -34,31 +35,47 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: clusteringresses.networking.internal.knative.dev + serving.knative.dev/release: "v0.10.0" + name: configurations.serving.knative.dev spec: additionalPrinterColumns: + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string - JSONPath: .status.conditions[?(@.type=='Ready')].status name: Ready type: string - JSONPath: .status.conditions[?(@.type=='Ready')].reason name: Reason type: string - group: networking.internal.knative.dev + group: serving.knative.dev names: categories: - - knative-internal - - networking - kind: ClusterIngress - plural: clusteringresses - singular: clusteringress - scope: Cluster + - all + - knative + - serving + kind: Configuration + plural: configurations + shortNames: + - config + - cfg + singular: configuration + scope: Namespaced subresources: status: {} versions: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -89,7 +106,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: ingresses.networking.internal.knative.dev spec: additionalPrinterColumns: @@ -123,7 +140,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: metrics.autoscaling.internal.knative.dev spec: additionalPrinterColumns: @@ -152,10 +169,16 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: podautoscalers.autoscaling.internal.knative.dev spec: additionalPrinterColumns: + - JSONPath: .status.desiredScale + name: DesiredScale + type: integer + - JSONPath: .status.actualScale + name: ActualScale + type: integer - JSONPath: .status.conditions[?(@.type=='Ready')].status name: Ready type: string @@ -187,92 +210,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: serverlessservices.networking.internal.knative.dev -spec: - additionalPrinterColumns: - - JSONPath: .spec.mode - name: Mode - type: string - - JSONPath: .status.serviceName - name: ServiceName - type: string - - JSONPath: .status.privateServiceName - name: PrivateServiceName - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string - group: networking.internal.knative.dev - names: - categories: - - knative-internal - - networking - kind: ServerlessService - plural: serverlessservices - shortNames: - - sks - singular: serverlessservice - scope: Namespaced - subresources: - status: {} - versions: - - name: v1alpha1 - served: true - storage: true - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - labels: - knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: configurations.serving.knative.dev -spec: - additionalPrinterColumns: - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string - group: serving.knative.dev - names: - categories: - - all - - knative - - serving - kind: Configuration - plural: configurations - shortNames: - - config - - cfg - singular: configuration - scope: Namespaced - subresources: - status: {} - versions: - - name: v1alpha1 - served: true - storage: true - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - labels: - knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: revisions.serving.knative.dev spec: additionalPrinterColumns: @@ -309,14 +247,21 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: routes.serving.knative.dev spec: additionalPrinterColumns: @@ -347,14 +292,21 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: services.serving.knative.dev spec: additionalPrinterColumns: @@ -392,5 +344,54 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + labels: + knative.dev/crd-install: "true" + serving.knative.dev/release: "v0.10.0" + name: serverlessservices.networking.internal.knative.dev +spec: + additionalPrinterColumns: + - JSONPath: .spec.mode + name: Mode + type: string + - JSONPath: .status.serviceName + name: ServiceName + type: string + - JSONPath: .status.privateServiceName + name: PrivateServiceName + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string + group: networking.internal.knative.dev + names: + categories: + - knative-internal + - networking + kind: ServerlessService + plural: serverlessservices + shortNames: + - sks + singular: serverlessservice + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha1 + served: true + storage: true --- diff --git a/knative/knative-serving-crds/base/namespace.yaml b/knative/knative-serving-crds/base/namespace.yaml index 84a55c0aa8..20eb73635e 100644 --- a/knative/knative-serving-crds/base/namespace.yaml +++ b/knative/knative-serving-crds/base/namespace.yaml @@ -3,6 +3,6 @@ kind: Namespace metadata: labels: istio-injection: enabled - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving diff --git a/knative/knative-serving-install/base/apiservice.yaml b/knative/knative-serving-install/base/apiservice.yaml index 85c60bac51..bb03e92fe8 100644 --- a/knative/knative-serving-install/base/apiservice.yaml +++ b/knative/knative-serving-install/base/apiservice.yaml @@ -3,7 +3,7 @@ kind: APIService metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: v1beta1.custom.metrics.k8s.io spec: group: custom.metrics.k8s.io diff --git a/knative/knative-serving-install/base/cluster-role-binding.yaml b/knative/knative-serving-install/base/cluster-role-binding.yaml index 522a85d118..633cfae0ad 100644 --- a/knative/knative-serving-install/base/cluster-role-binding.yaml +++ b/knative/knative-serving-install/base/cluster-role-binding.yaml @@ -3,7 +3,7 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: custom-metrics:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io @@ -20,7 +20,7 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: hpa-controller-custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io @@ -36,7 +36,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-controller-admin roleRef: apiGroup: rbac.authorization.k8s.io @@ -47,4 +47,3 @@ subjects: name: controller namespace: knative-serving ---- diff --git a/knative/knative-serving-install/base/cluster-role.yaml b/knative/knative-serving-install/base/cluster-role.yaml index 430efdb5d5..d5211ed50a 100644 --- a/knative/knative-serving-install/base/cluster-role.yaml +++ b/knative/knative-serving-install/base/cluster-role.yaml @@ -2,49 +2,43 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.8.0" - name: knative-serving-istio + autoscaling.knative.dev/metric-provider: custom-metrics + serving.knative.dev/release: "v0.10.0" + name: custom-metrics-server-resources rules: - apiGroups: - - networking.istio.io + - custom.metrics.k8s.io resources: - - virtualservices - - gateways + - '*' verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" - name: custom-metrics-server-resources + rbac.authorization.k8s.io/aggregate-to-admin: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-admin rules: - apiGroups: - - custom.metrics.k8s.io + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev resources: - '*' verbs: - '*' - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - serving.knative.dev/release: "v0.8.0" - name: knative-serving-namespaced-admin + rbac.authorization.k8s.io/aggregate-to-edit: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-edit rules: - apiGroups: - serving.knative.dev @@ -53,7 +47,29 @@ rules: resources: - '*' verbs: + - create + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-view +rules: +- apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + resources: - '*' + verbs: + - get + - list + - watch --- aggregationRule: @@ -64,7 +80,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-admin rules: [] --- @@ -73,7 +89,7 @@ kind: ClusterRole metadata: labels: serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-core rules: - apiGroups: @@ -118,6 +134,7 @@ rules: - admissionregistration.k8s.io resources: - mutatingwebhookconfigurations + - validatingwebhookconfigurations verbs: - get - list @@ -179,5 +196,26 @@ rules: - delete - patch - watch - --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + networking.knative.dev/ingress-provider: istio + serving.knative.dev/controller: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-istio +rules: +- apiGroups: + - networking.istio.io + resources: + - virtualservices + - gateways + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/knative/knative-serving-install/base/config-map.yaml b/knative/knative-serving-install/base/config-map.yaml index 6664b0a968..c8b642fd68 100644 --- a/knative/knative-serving-install/base/config-map.yaml +++ b/knative/knative-serving-install/base/config-map.yaml @@ -10,7 +10,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -31,11 +31,22 @@ data: container-concurrency-target-percentage: "70" # The container concurrency target default is what the Autoscaler will - # try to maintain when the Revision specifies unlimited concurrency. + # try to maintain when concurrency is used as the scaling metric for a + # Revision and the Revision specifies unlimited concurrency. # Even when specifying unlimited concurrency, the autoscaler will # horizontally scale the application based on this target concurrency. + # NOTE: Only one metric can be used for autoscaling a Revision. container-concurrency-target-default: "100" + # The requests per second (RPS) target default is what the Autoscaler will + # try to maintain when RPS is used as the scaling metric for a Revision and + # the Revision specifies unlimited RPS. Even when specifying unlimited RPS, + # the autoscaler will horizontally scale the application based on this + # target RPS. + # Must be greater than 1.0. + # NOTE: Only one metric can be used for autoscaling a Revision. + requests-per-second-target-default: "200" + # The target burst capacity specifies the size of burst in concurrent # requests that the system operator expects the system will receive. # Autoscaler will try to protect the system from queueing by introducing @@ -48,7 +59,7 @@ data: # -1 denotes unlimited target-burst-capacity and activator will always # be in the request path. # Other negative values are invalid. - target-burst-capacity: "0" + target-burst-capacity: "200" # When operating in a stable mode, the autoscaler operates on the # average concurrency over the stable window. @@ -74,8 +85,22 @@ data: # Max scale up rate limits the rate at which the autoscaler will # increase pod count. It is the maximum ratio of desired pods versus # observed pods. + # Cannot less or equal to 1. + # I.e with value of 2.0 the number of pods can at most go N to 2N + # over single Autoscaler period (see tick-interval), but at least N to + # N+1, if Autoscaler needs to scale up. max-scale-up-rate: "1000.0" + # Max scale down rate limits the rate at which the autoscaler will + # decrease pod count. It is the maximum ratio of observed pods versus + # desired pods. + # Cannot less or equal to 1. + # I.e. with value of 2.0 the number of pods can at most go N to N/2 + # over single Autoscaler evaluation period (see tick-interval), but at + # least N to N-1, if Autoscaler needs to scale down. + # Not yet used // TODO(vagababov) remove once other parts are ready. + max-scale-down-rate: "2.0" + # Scale to zero feature flag enable-scale-to-zero: "true" @@ -90,12 +115,13 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-autoscaler namespace: knative-serving --- +--- apiVersion: v1 data: _example: | @@ -108,7 +134,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -151,10 +177,17 @@ data: # enclosing Service or Configuration, so values such as # {{.Name}} are also valid. container-name-template: "user-container" + + # container-concurrency specifies the maximum number + # of requests the Container can handle at once, and requests + # above this threshold are queued. Setting a value of zero + # disables this throttling and lets through as many requests as + # the pod receives. + container-concurrency: "0" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-defaults namespace: knative-serving @@ -171,7 +204,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -179,11 +212,11 @@ data: # List of repositories for which tag to digest resolving should be skipped registriesSkippingTagResolving: "ko.local,dev.local" - queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:e0654305370cf3bbbd0f56f97789c92cf5215f752b70902eba5d5fc0e88c5aca + queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-deployment namespace: knative-serving @@ -200,7 +233,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -229,7 +262,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-domain namespace: knative-serving @@ -246,7 +279,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -268,60 +301,11 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-gc namespace: knative-serving --- -apiVersion: v1 -data: - _example: | - ################################ - # # - # EXAMPLE CONFIGURATION # - # # - ################################ - - # This block is not actually functional configuration, - # but serves to illustrate the available configuration - # options and document them in a way that is accessible - # to users that kubectl edit this config map. - # - # These sample configuration options may be copied out of - # this example block and unindented to be in the data block - # to actually change the configuration. - - # Default Knative Gateway after v0.3. It points to the Istio - # standard istio-ingressgateway, instead of a custom one that we - # used pre-0.3. - gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" - - # A cluster local gateway to allow pods outside of the mesh to access - # Services and Routes not exposing through an ingress. If the users - # do have a service mesh setup, this isn't required and can be removed. - # - # An example use case is when users want to use Istio without any - # sidecar injection (like Knative's istio-lean.yaml). Since every pod - # is outside of the service mesh in that case, a cluster-local service - # will need to be exposed to a cluster-local gateway to be accessible. - local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" - - # To use only Istio service mesh and no cluster-local-gateway, replace - # all local-gateway.* entries the following entry. - local-gateway.mesh: "mesh" - - # Feature flag to enable reconciling external Istio Gateways. - # When auto TLS feature is turned on, reconcileExternalGateway will be automatically enforced. - # 1. true: enabling reconciling external gateways. - # 2. false: disabling reconciling external gateways. - reconcileExternalGateway: "false" -kind: ConfigMap -metadata: - labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" - name: config-istio - namespace: knative-serving --- apiVersion: v1 @@ -336,7 +320,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -377,7 +361,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-logging namespace: knative-serving @@ -394,7 +378,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -437,16 +421,19 @@ data: # istio.sidecar.includeOutboundIPRanges: "*" - # clusteringress.class specifies the default cluster ingress class + # clusteringress.class has been deprecated. Please use ingress.class instead. + clusteringress.class: "istio.ingress.networking.knative.dev" + + # ingress.class specifies the default ingress class # to use when not dictated by Route annotation. # # If not specified, will use the Istio ingress. # - # Note that changing the ClusterIngress class of an existing Route + # Note that changing the Ingress class of an existing Route # will result in undefined behavior. Therefore it is best to only # update this value during the setup of Knative, to avoid getting # undefined behavior. - clusteringress.class: "istio.ingress.networking.knative.dev" + ingress.class: "istio.ingress.networking.knative.dev" # certificate.class specifies the default Certificate class # to use when not dictated by Route annotation. @@ -471,7 +458,7 @@ data: # of "{{.Name}}-{{.Namespace}}.{{.Domain}}", or removing the Namespace # entirely from the template. When choosing a new value be thoughtful # of the potential for conflicts - for example, when users choose to use - # characters such as - in their service, or namespace, names. + # characters such as `-` in their service, or namespace, names. # {{.Annotations}} can be used for any customization in the go template if needed. # We strongly recommend keeping namespace part of the template to avoid domain name clashes # Example '{{.Name}}-{{.Namespace}}.{{ index .Annotations "sub"}}.{{.Domain}}' @@ -491,16 +478,16 @@ data: autoTLS: "Disabled" # Controls the behavior of the HTTP endpoint for the Knative ingress. - # It requires autoTLS to be enabled. + # It requires autoTLS to be enabled or reconcileExternalGateway in config-istio to be true. # 1. Enabled: The Knative ingress will be able to serve HTTP connection. - # 2. Disabled: The Knative ingress ter will reject HTTP traffic. + # 2. Disabled: The Knative ingress will reject HTTP traffic. # 3. Redirected: The Knative ingress will send a 302 redirect for all # http connections, asking the clients to use HTTPS httpProtocol: "Enabled" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-network namespace: knative-serving @@ -517,7 +504,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -526,7 +513,7 @@ data: # logging.enable-var-log-collection defaults to false. # The fluentd daemon set will be set up to collect /var/log if # this flag is true. - logging.enable-var-log-collection: false + logging.enable-var-log-collection: "false" # logging.revision-url-template provides a template to use for producing the # logging URL that is injected into the status of each Revision. @@ -535,7 +522,8 @@ data: logging.revision-url-template: | http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.serving-knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase)))) - # If non-empty, this enables queue proxy writing request logs to stdout. + # If non-empty, this enables queue proxy writing user request logs to stdout, excluding probe + # requests. # The value determines the shape of the request logs and it must be a valid go text/template. # It is important to keep this as a single line. Multiple lines are parsed as separate entities # by most collection agents and will split the request logs into multiple records. @@ -564,6 +552,10 @@ data: # logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}' + # If true, this enables queue proxy writing request logs for probe requests to stdout. + # It uses the same template for user requests, i.e. logging.request-log-template. + logging.enable-probe-request-log: "false" + # metrics.backend-destination field specifies the system metrics destination. # It supports either prometheus (the default) or stackdriver. # Note: Using stackdriver will incur additional charges @@ -585,10 +577,16 @@ data: # flag to "true" could cause extra Stackdriver charge. # If metrics.backend-destination is not Stackdriver, this is ignored. metrics.allow-stackdriver-custom-metrics: "false" + + # profiling.enable indicates whether it is allowed to retrieve runtime profiling data from + # the pods via an HTTP server in the format expected by the pprof visualization tool. When + # enabled, the Knative Serving pods expose the profiling data on an alternate HTTP port 8008. + # The HTTP context root for profiling is then /debug/pprof/. + profiling.enable: "false" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-observability namespace: knative-serving @@ -605,18 +603,24 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block # to actually change the configuration. # - # If true we enable adding spans within our applications. - enable: "false" + # This may be "zipkin" or "stackdriver", the default is "none" + backend: "none" # URL to zipkin collector where traces are sent. + # This must be specified when backend is "zipkin" zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" + # The GCP project into which stackdriver metrics will be written + # when backend is "stackdriver". If unspecified, the project-id + # is read from GCP metadata when running on GCP. + stackdriver-project-id: "my-project" + # Enable zipkin debug mode. This allows all spans to be sent to the server # bypassing sampling. debug: "false" @@ -626,8 +630,56 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-tracing namespace: knative-serving - --- +apiVersion: v1 +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # Default Knative Gateway after v0.3. It points to the Istio + # standard istio-ingressgateway, instead of a custom one that we + # used pre-0.3. + gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + + # A cluster local gateway to allow pods outside of the mesh to access + # Services and Routes not exposing through an ingress. If the users + # do have a service mesh setup, this isn't required and can be removed. + # + # An example use case is when users want to use Istio without any + # sidecar injection (like Knative's istio-lean.yaml). Since every pod + # is outside of the service mesh in that case, a cluster-local service + # will need to be exposed to a cluster-local gateway to be accessible. + local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + + # To use only Istio service mesh and no cluster-local-gateway, replace + # all local-gateway.* entries the following entry. + local-gateway.mesh: "mesh" + + # Feature flag to enable reconciling external Istio Gateways. + # When auto TLS feature is turned on, reconcileExternalGateway will be automatically enforced. + # 1. true: enabling reconciling external gateways. + # 2. false: disabling reconciling external gateways. + reconcileExternalGateway: "false" +kind: ConfigMap +metadata: + labels: + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.10.0" + name: config-istio + namespace: knative-serving diff --git a/knative/knative-serving-install/base/deployment.yaml b/knative/knative-serving-install/base/deployment.yaml index fb451bda2f..893cfbd392 100644 --- a/knative/knative-serving-install/base/deployment.yaml +++ b/knative/knative-serving-install/base/deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: activator namespace: knative-serving spec: @@ -18,17 +18,18 @@ spec: labels: app: activator role: activator - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - - args: - - -logtostderr=false - - -stderrthreshold=FATAL - env: + - env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: SYSTEM_NAMESPACE valueFrom: fieldRef: @@ -38,8 +39,8 @@ spec: - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:88d864eb3c47881cf7ac058479d1c735cc3cf4f07a11aad0621cd36dcd9ae3c6 + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 livenessProbe: httpGet: httpHeaders: @@ -50,11 +51,13 @@ spec: name: activator ports: - containerPort: 8012 - name: http1-port + name: http1 - containerPort: 8013 - name: h2c-port + name: h2c - containerPort: 9090 - name: metrics-port + name: metrics + - containerPort: 8008 + name: profiling readinessProbe: httpGet: httpHeaders: @@ -74,12 +77,31 @@ spec: serviceAccountName: controller terminationGracePeriodSeconds: 300 --- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: activator + namespace: knative-serving +spec: + maxReplicas: 20 + metrics: + - resource: + name: cpu + targetAverageUtilization: 100 + type: Resource + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: activator + +--- apiVersion: apps/v1 kind: Deployment metadata: labels: autoscaling.knative.dev/autoscaler-provider: hpa - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler-hpa namespace: knative-serving spec: @@ -93,7 +115,7 @@ spec: sidecar.istio.io/inject: "false" labels: app: autoscaler-hpa - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -107,11 +129,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:a7801c3cf4edecfa51b7bd2068f97941f6714f7922cb4806245377c2b336b723 + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 name: autoscaler-hpa ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -122,14 +146,12 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- - apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler namespace: knative-serving spec: @@ -145,7 +167,7 @@ spec: traffic.sidecar.istio.io/includeInboundPorts: 8080,9090 labels: app: autoscaler - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - args: @@ -162,7 +184,7 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:aeaacec4feedee309293ac21da13e71a05a2ad84b1d5fcc01ffecfa6cfbb2870 + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c livenessProbe: httpGet: httpHeaders: @@ -178,6 +200,8 @@ spec: name: metrics - containerPort: 8443 name: custom-metrics + - containerPort: 8008 + name: profiling readinessProbe: httpGet: httpHeaders: @@ -195,27 +219,26 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - ---- +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" - name: networking-istio + serving.knative.dev/release: "v0.10.0" + name: controller namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: networking-istio + app: controller template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: networking-istio + app: controller + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -228,12 +251,14 @@ spec: - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:057c999bccfe32e9889616b571dc8d389c742ff66f0b5516bad651f05459b7bc - name: networking-istio + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 + name: controller ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -244,13 +269,12 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: webhook namespace: knative-serving spec: @@ -267,7 +291,7 @@ spec: labels: app: webhook role: webhook - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -281,11 +305,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:c2076674618933df53e90cf9ddd17f5ddbad513b8c95e955e45e37be7ca9e0e8 + image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 name: webhook ports: - containerPort: 9090 - name: metrics-port + name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 200m @@ -296,27 +322,27 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" - name: controller + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.10.0" + name: networking-istio namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: controller + app: networking-istio template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: controller - serving.knative.dev/release: "v0.8.0" + app: networking-istio + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -330,11 +356,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:3b096e55fa907cff53d37dadc5d20c29cea9bb18ed9e921a588fee17beb937df - name: controller + image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff + name: networking-istio ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -346,5 +374,3 @@ spec: allowPrivilegeEscalation: false serviceAccountName: controller ---- - diff --git a/knative/knative-serving-install/base/gateway.yaml b/knative/knative-serving-install/base/gateway.yaml index 28360635b7..70eaf999ec 100644 --- a/knative/knative-serving-install/base/gateway.yaml +++ b/knative/knative-serving-install/base/gateway.yaml @@ -3,7 +3,7 @@ kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-ingress-gateway namespace: knative-serving spec: @@ -16,22 +16,13 @@ spec: name: http number: 80 protocol: HTTP - - hosts: - - '*' - port: - name: https - number: 443 - protocol: HTTPS - tls: - mode: PASSTHROUGH - --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: cluster-local-gateway namespace: knative-serving spec: @@ -44,4 +35,3 @@ spec: name: http number: 80 protocol: HTTP - diff --git a/knative/knative-serving-install/base/image.yaml b/knative/knative-serving-install/base/image.yaml index 0ac8cf5856..32342f7d7c 100644 --- a/knative/knative-serving-install/base/image.yaml +++ b/knative/knative-serving-install/base/image.yaml @@ -2,9 +2,9 @@ apiVersion: caching.internal.knative.dev/v1alpha1 kind: Image metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: queue-proxy namespace: knative-serving spec: - image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:e0654305370cf3bbbd0f56f97789c92cf5215f752b70902eba5d5fc0e88c5aca + image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c diff --git a/knative/knative-serving-install/base/kustomization.yaml b/knative/knative-serving-install/base/kustomization.yaml index 4e00a2c963..702859467d 100644 --- a/knative/knative-serving-install/base/kustomization.yaml +++ b/knative/knative-serving-install/base/kustomization.yaml @@ -14,25 +14,25 @@ resources: - service.yaml - apiservice.yaml - image.yaml -- hpa.yaml +- webhook-configuration.yaml commonLabels: kustomize.component: knative images: - name: gcr.io/knative-releases/knative.dev/serving/cmd/activator newName: gcr.io/knative-releases/knative.dev/serving/cmd/activator - digest: sha256:88d864eb3c47881cf7ac058479d1c735cc3cf4f07a11aad0621cd36dcd9ae3c6 + digest: sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa - digest: sha256:a7801c3cf4edecfa51b7bd2068f97941f6714f7922cb4806245377c2b336b723 + digest: sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler - digest: sha256:aeaacec4feedee309293ac21da13e71a05a2ad84b1d5fcc01ffecfa6cfbb2870 + digest: sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c - name: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio newName: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio - digest: sha256:057c999bccfe32e9889616b571dc8d389c742ff66f0b5516bad651f05459b7bc + digest: sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff - name: gcr.io/knative-releases/knative.dev/serving/cmd/webhook newName: gcr.io/knative-releases/knative.dev/serving/cmd/webhook - digest: sha256:c2076674618933df53e90cf9ddd17f5ddbad513b8c95e955e45e37be7ca9e0e8 + digest: sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 - name: gcr.io/knative-releases/knative.dev/serving/cmd/controller newName: gcr.io/knative-releases/knative.dev/serving/cmd/controller - digest: sha256:3b096e55fa907cff53d37dadc5d20c29cea9bb18ed9e921a588fee17beb937df + digest: sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 diff --git a/knative/knative-serving-install/base/role-binding.yaml b/knative/knative-serving-install/base/role-binding.yaml index e5a6631cb5..4e6aacba66 100644 --- a/knative/knative-serving-install/base/role-binding.yaml +++ b/knative/knative-serving-install/base/role-binding.yaml @@ -3,7 +3,7 @@ kind: RoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: custom-metrics-auth-reader namespace: kube-system roleRef: @@ -14,4 +14,3 @@ subjects: - kind: ServiceAccount name: controller namespace: knative-serving - diff --git a/knative/knative-serving-install/base/service-account.yaml b/knative/knative-serving-install/base/service-account.yaml index 9713d5b5f6..b7486c324f 100644 --- a/knative/knative-serving-install/base/service-account.yaml +++ b/knative/knative-serving-install/base/service-account.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: controller namespace: knative-serving diff --git a/knative/knative-serving-install/base/service.yaml b/knative/knative-serving-install/base/service.yaml index efaab9d8f0..ce20637f18 100644 --- a/knative/knative-serving-install/base/service.yaml +++ b/knative/knative-serving-install/base/service.yaml @@ -3,7 +3,7 @@ kind: Service metadata: labels: app: activator - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: activator-service namespace: knative-serving spec: @@ -30,7 +30,7 @@ kind: Service metadata: labels: app: controller - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: controller namespace: knative-serving spec: @@ -48,7 +48,7 @@ kind: Service metadata: labels: role: webhook - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: webhook namespace: knative-serving spec: @@ -57,14 +57,13 @@ spec: targetPort: 8443 selector: role: webhook - --- apiVersion: v1 kind: Service metadata: labels: app: autoscaler - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler namespace: knative-serving spec: @@ -84,4 +83,3 @@ spec: selector: app: autoscaler ---- diff --git a/knative/knative-serving-install/base/webhook-configuration.yaml b/knative/knative-serving-install/base/webhook-configuration.yaml new file mode 100644 index 0000000000..515e488864 --- /dev/null +++ b/knative/knative-serving-install/base/webhook-configuration.yaml @@ -0,0 +1,36 @@ +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + labels: + serving.knative.dev/release: "v0.10.0" + name: webhook.serving.knative.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: webhook.serving.knative.dev +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + serving.knative.dev/release: "v0.10.0" + name: config.webhook.serving.knative.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists + From b71234e2154dd7f65c01695e61f726af6d09c2b3 Mon Sep 17 00:00:00 2001 From: Dan Sun Date: Sun, 8 Dec 2019 12:44:16 -0500 Subject: [PATCH 3/6] Update knative test --- .../base/kustomization.yaml | 2 +- .../knative-knative-serving-crds-base_test.go | 205 +++---- ...-serving-crds-overlays-application_test.go | 205 +++---- ...ative-knative-serving-install-base_test.go | 524 +++++++++++------- ...rving-install-overlays-application_test.go | 524 +++++++++++------- 5 files changed, 851 insertions(+), 609 deletions(-) diff --git a/knative/knative-serving-install/base/kustomization.yaml b/knative/knative-serving-install/base/kustomization.yaml index 702859467d..309ff63857 100644 --- a/knative/knative-serving-install/base/kustomization.yaml +++ b/knative/knative-serving-install/base/kustomization.yaml @@ -14,7 +14,7 @@ resources: - service.yaml - apiservice.yaml - image.yaml -- webhook-configuration.yaml +- webhook-configuration.yaml commonLabels: kustomize.component: knative images: diff --git a/tests/knative-knative-serving-crds-base_test.go b/tests/knative-knative-serving-crds-base_test.go index ea444c1799..d1eef36dd3 100644 --- a/tests/knative-knative-serving-crds-base_test.go +++ b/tests/knative-knative-serving-crds-base_test.go @@ -20,17 +20,18 @@ kind: Namespace metadata: labels: istio-injection: enabled - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving `) th.writeF("/manifests/knative/knative-serving-crds/base/crd.yaml", ` +--- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: certificates.networking.internal.knative.dev spec: additionalPrinterColumns: @@ -61,31 +62,47 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: clusteringresses.networking.internal.knative.dev + serving.knative.dev/release: "v0.10.0" + name: configurations.serving.knative.dev spec: additionalPrinterColumns: + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string - JSONPath: .status.conditions[?(@.type=='Ready')].status name: Ready type: string - JSONPath: .status.conditions[?(@.type=='Ready')].reason name: Reason type: string - group: networking.internal.knative.dev + group: serving.knative.dev names: categories: - - knative-internal - - networking - kind: ClusterIngress - plural: clusteringresses - singular: clusteringress - scope: Cluster + - all + - knative + - serving + kind: Configuration + plural: configurations + shortNames: + - config + - cfg + singular: configuration + scope: Namespaced subresources: status: {} versions: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -116,7 +133,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: ingresses.networking.internal.knative.dev spec: additionalPrinterColumns: @@ -150,7 +167,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: metrics.autoscaling.internal.knative.dev spec: additionalPrinterColumns: @@ -179,10 +196,16 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: podautoscalers.autoscaling.internal.knative.dev spec: additionalPrinterColumns: + - JSONPath: .status.desiredScale + name: DesiredScale + type: integer + - JSONPath: .status.actualScale + name: ActualScale + type: integer - JSONPath: .status.conditions[?(@.type=='Ready')].status name: Ready type: string @@ -214,92 +237,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: serverlessservices.networking.internal.knative.dev -spec: - additionalPrinterColumns: - - JSONPath: .spec.mode - name: Mode - type: string - - JSONPath: .status.serviceName - name: ServiceName - type: string - - JSONPath: .status.privateServiceName - name: PrivateServiceName - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string - group: networking.internal.knative.dev - names: - categories: - - knative-internal - - networking - kind: ServerlessService - plural: serverlessservices - shortNames: - - sks - singular: serverlessservice - scope: Namespaced - subresources: - status: {} - versions: - - name: v1alpha1 - served: true - storage: true - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - labels: - knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: configurations.serving.knative.dev -spec: - additionalPrinterColumns: - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string - group: serving.knative.dev - names: - categories: - - all - - knative - - serving - kind: Configuration - plural: configurations - shortNames: - - config - - cfg - singular: configuration - scope: Namespaced - subresources: - status: {} - versions: - - name: v1alpha1 - served: true - storage: true - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - labels: - knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: revisions.serving.knative.dev spec: additionalPrinterColumns: @@ -336,14 +274,21 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: routes.serving.knative.dev spec: additionalPrinterColumns: @@ -374,14 +319,21 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: services.serving.knative.dev spec: additionalPrinterColumns: @@ -419,6 +371,55 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + labels: + knative.dev/crd-install: "true" + serving.knative.dev/release: "v0.10.0" + name: serverlessservices.networking.internal.knative.dev +spec: + additionalPrinterColumns: + - JSONPath: .spec.mode + name: Mode + type: string + - JSONPath: .status.serviceName + name: ServiceName + type: string + - JSONPath: .status.privateServiceName + name: PrivateServiceName + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string + group: networking.internal.knative.dev + names: + categories: + - knative-internal + - networking + kind: ServerlessService + plural: serverlessservices + shortNames: + - sks + singular: serverlessservice + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha1 + served: true + storage: true --- `) diff --git a/tests/knative-knative-serving-crds-overlays-application_test.go b/tests/knative-knative-serving-crds-overlays-application_test.go index 34fe320853..e0f67a43d5 100644 --- a/tests/knative-knative-serving-crds-overlays-application_test.go +++ b/tests/knative-knative-serving-crds-overlays-application_test.go @@ -68,17 +68,18 @@ kind: Namespace metadata: labels: istio-injection: enabled - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving `) th.writeF("/manifests/knative/knative-serving-crds/base/crd.yaml", ` +--- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: certificates.networking.internal.knative.dev spec: additionalPrinterColumns: @@ -109,31 +110,47 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: clusteringresses.networking.internal.knative.dev + serving.knative.dev/release: "v0.10.0" + name: configurations.serving.knative.dev spec: additionalPrinterColumns: + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string - JSONPath: .status.conditions[?(@.type=='Ready')].status name: Ready type: string - JSONPath: .status.conditions[?(@.type=='Ready')].reason name: Reason type: string - group: networking.internal.knative.dev + group: serving.knative.dev names: categories: - - knative-internal - - networking - kind: ClusterIngress - plural: clusteringresses - singular: clusteringress - scope: Cluster + - all + - knative + - serving + kind: Configuration + plural: configurations + shortNames: + - config + - cfg + singular: configuration + scope: Namespaced subresources: status: {} versions: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -164,7 +181,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: ingresses.networking.internal.knative.dev spec: additionalPrinterColumns: @@ -198,7 +215,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: metrics.autoscaling.internal.knative.dev spec: additionalPrinterColumns: @@ -227,10 +244,16 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: podautoscalers.autoscaling.internal.knative.dev spec: additionalPrinterColumns: + - JSONPath: .status.desiredScale + name: DesiredScale + type: integer + - JSONPath: .status.actualScale + name: ActualScale + type: integer - JSONPath: .status.conditions[?(@.type=='Ready')].status name: Ready type: string @@ -262,92 +285,7 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: serverlessservices.networking.internal.knative.dev -spec: - additionalPrinterColumns: - - JSONPath: .spec.mode - name: Mode - type: string - - JSONPath: .status.serviceName - name: ServiceName - type: string - - JSONPath: .status.privateServiceName - name: PrivateServiceName - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string - group: networking.internal.knative.dev - names: - categories: - - knative-internal - - networking - kind: ServerlessService - plural: serverlessservices - shortNames: - - sks - singular: serverlessservice - scope: Namespaced - subresources: - status: {} - versions: - - name: v1alpha1 - served: true - storage: true - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - labels: - knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" - name: configurations.serving.knative.dev -spec: - additionalPrinterColumns: - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string - group: serving.knative.dev - names: - categories: - - all - - knative - - serving - kind: Configuration - plural: configurations - shortNames: - - config - - cfg - singular: configuration - scope: Namespaced - subresources: - status: {} - versions: - - name: v1alpha1 - served: true - storage: true - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - labels: - knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: revisions.serving.knative.dev spec: additionalPrinterColumns: @@ -384,14 +322,21 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: routes.serving.knative.dev spec: additionalPrinterColumns: @@ -422,14 +367,21 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: services.serving.knative.dev spec: additionalPrinterColumns: @@ -467,6 +419,55 @@ spec: - name: v1alpha1 served: true storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + labels: + knative.dev/crd-install: "true" + serving.knative.dev/release: "v0.10.0" + name: serverlessservices.networking.internal.knative.dev +spec: + additionalPrinterColumns: + - JSONPath: .spec.mode + name: Mode + type: string + - JSONPath: .status.serviceName + name: ServiceName + type: string + - JSONPath: .status.privateServiceName + name: PrivateServiceName + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string + group: networking.internal.knative.dev + names: + categories: + - knative-internal + - networking + kind: ServerlessService + plural: serverlessservices + shortNames: + - sks + singular: serverlessservice + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha1 + served: true + storage: true --- `) diff --git a/tests/knative-knative-serving-install-base_test.go b/tests/knative-knative-serving-install-base_test.go index 1cfe8d1b29..f7f957ee2c 100644 --- a/tests/knative-knative-serving-install-base_test.go +++ b/tests/knative-knative-serving-install-base_test.go @@ -20,7 +20,7 @@ kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-ingress-gateway namespace: knative-serving spec: @@ -33,22 +33,13 @@ spec: name: http number: 80 protocol: HTTP - - hosts: - - '*' - port: - name: https - number: 443 - protocol: HTTPS - tls: - mode: PASSTHROUGH - --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: cluster-local-gateway namespace: knative-serving spec: @@ -61,56 +52,49 @@ spec: name: http number: 80 protocol: HTTP - `) th.writeF("/manifests/knative/knative-serving-install/base/cluster-role.yaml", ` apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.8.0" - name: knative-serving-istio + autoscaling.knative.dev/metric-provider: custom-metrics + serving.knative.dev/release: "v0.10.0" + name: custom-metrics-server-resources rules: - apiGroups: - - networking.istio.io + - custom.metrics.k8s.io resources: - - virtualservices - - gateways + - '*' verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" - name: custom-metrics-server-resources + rbac.authorization.k8s.io/aggregate-to-admin: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-admin rules: - apiGroups: - - custom.metrics.k8s.io + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev resources: - '*' verbs: - '*' - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - serving.knative.dev/release: "v0.8.0" - name: knative-serving-namespaced-admin + rbac.authorization.k8s.io/aggregate-to-edit: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-edit rules: - apiGroups: - serving.knative.dev @@ -119,7 +103,29 @@ rules: resources: - '*' verbs: + - create + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-view +rules: +- apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + resources: - '*' + verbs: + - get + - list + - watch --- aggregationRule: @@ -130,7 +136,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-admin rules: [] --- @@ -139,7 +145,7 @@ kind: ClusterRole metadata: labels: serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-core rules: - apiGroups: @@ -184,6 +190,7 @@ rules: - admissionregistration.k8s.io resources: - mutatingwebhookconfigurations + - validatingwebhookconfigurations verbs: - get - list @@ -245,8 +252,29 @@ rules: - delete - patch - watch - --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + networking.knative.dev/ingress-provider: istio + serving.knative.dev/controller: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-istio +rules: +- apiGroups: + - networking.istio.io + resources: + - virtualservices + - gateways + verbs: + - get + - list + - create + - update + - delete + - patch + - watch `) th.writeF("/manifests/knative/knative-serving-install/base/cluster-role-binding.yaml", ` apiVersion: rbac.authorization.k8s.io/v1 @@ -254,7 +282,7 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: custom-metrics:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io @@ -271,7 +299,7 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: hpa-controller-custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io @@ -287,7 +315,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-controller-admin roleRef: apiGroup: rbac.authorization.k8s.io @@ -298,7 +326,6 @@ subjects: name: controller namespace: knative-serving ---- `) th.writeF("/manifests/knative/knative-serving-install/base/service-role.yaml", ` apiVersion: rbac.istio.io/v1alpha1 @@ -334,7 +361,7 @@ kind: RoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: custom-metrics-auth-reader namespace: kube-system roleRef: @@ -345,7 +372,6 @@ subjects: - kind: ServiceAccount name: controller namespace: knative-serving - `) th.writeF("/manifests/knative/knative-serving-install/base/config-map.yaml", ` apiVersion: v1 @@ -360,7 +386,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -381,11 +407,22 @@ data: container-concurrency-target-percentage: "70" # The container concurrency target default is what the Autoscaler will - # try to maintain when the Revision specifies unlimited concurrency. + # try to maintain when concurrency is used as the scaling metric for a + # Revision and the Revision specifies unlimited concurrency. # Even when specifying unlimited concurrency, the autoscaler will # horizontally scale the application based on this target concurrency. + # NOTE: Only one metric can be used for autoscaling a Revision. container-concurrency-target-default: "100" + # The requests per second (RPS) target default is what the Autoscaler will + # try to maintain when RPS is used as the scaling metric for a Revision and + # the Revision specifies unlimited RPS. Even when specifying unlimited RPS, + # the autoscaler will horizontally scale the application based on this + # target RPS. + # Must be greater than 1.0. + # NOTE: Only one metric can be used for autoscaling a Revision. + requests-per-second-target-default: "200" + # The target burst capacity specifies the size of burst in concurrent # requests that the system operator expects the system will receive. # Autoscaler will try to protect the system from queueing by introducing @@ -398,7 +435,7 @@ data: # -1 denotes unlimited target-burst-capacity and activator will always # be in the request path. # Other negative values are invalid. - target-burst-capacity: "0" + target-burst-capacity: "200" # When operating in a stable mode, the autoscaler operates on the # average concurrency over the stable window. @@ -424,8 +461,22 @@ data: # Max scale up rate limits the rate at which the autoscaler will # increase pod count. It is the maximum ratio of desired pods versus # observed pods. + # Cannot less or equal to 1. + # I.e with value of 2.0 the number of pods can at most go N to 2N + # over single Autoscaler period (see tick-interval), but at least N to + # N+1, if Autoscaler needs to scale up. max-scale-up-rate: "1000.0" + # Max scale down rate limits the rate at which the autoscaler will + # decrease pod count. It is the maximum ratio of observed pods versus + # desired pods. + # Cannot less or equal to 1. + # I.e. with value of 2.0 the number of pods can at most go N to N/2 + # over single Autoscaler evaluation period (see tick-interval), but at + # least N to N-1, if Autoscaler needs to scale down. + # Not yet used // TODO(vagababov) remove once other parts are ready. + max-scale-down-rate: "2.0" + # Scale to zero feature flag enable-scale-to-zero: "true" @@ -440,12 +491,13 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-autoscaler namespace: knative-serving --- +--- apiVersion: v1 data: _example: | @@ -458,7 +510,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -501,10 +553,17 @@ data: # enclosing Service or Configuration, so values such as # {{.Name}} are also valid. container-name-template: "user-container" + + # container-concurrency specifies the maximum number + # of requests the Container can handle at once, and requests + # above this threshold are queued. Setting a value of zero + # disables this throttling and lets through as many requests as + # the pod receives. + container-concurrency: "0" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-defaults namespace: knative-serving @@ -521,7 +580,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -529,11 +588,11 @@ data: # List of repositories for which tag to digest resolving should be skipped registriesSkippingTagResolving: "ko.local,dev.local" - queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:e0654305370cf3bbbd0f56f97789c92cf5215f752b70902eba5d5fc0e88c5aca + queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-deployment namespace: knative-serving @@ -550,7 +609,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -579,7 +638,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-domain namespace: knative-serving @@ -596,7 +655,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -618,60 +677,11 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-gc namespace: knative-serving --- -apiVersion: v1 -data: - _example: | - ################################ - # # - # EXAMPLE CONFIGURATION # - # # - ################################ - - # This block is not actually functional configuration, - # but serves to illustrate the available configuration - # options and document them in a way that is accessible - # to users that kubectl edit this config map. - # - # These sample configuration options may be copied out of - # this example block and unindented to be in the data block - # to actually change the configuration. - - # Default Knative Gateway after v0.3. It points to the Istio - # standard istio-ingressgateway, instead of a custom one that we - # used pre-0.3. - gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" - - # A cluster local gateway to allow pods outside of the mesh to access - # Services and Routes not exposing through an ingress. If the users - # do have a service mesh setup, this isn't required and can be removed. - # - # An example use case is when users want to use Istio without any - # sidecar injection (like Knative's istio-lean.yaml). Since every pod - # is outside of the service mesh in that case, a cluster-local service - # will need to be exposed to a cluster-local gateway to be accessible. - local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" - - # To use only Istio service mesh and no cluster-local-gateway, replace - # all local-gateway.* entries the following entry. - local-gateway.mesh: "mesh" - - # Feature flag to enable reconciling external Istio Gateways. - # When auto TLS feature is turned on, reconcileExternalGateway will be automatically enforced. - # 1. true: enabling reconciling external gateways. - # 2. false: disabling reconciling external gateways. - reconcileExternalGateway: "false" -kind: ConfigMap -metadata: - labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" - name: config-istio - namespace: knative-serving --- apiVersion: v1 @@ -686,7 +696,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -727,7 +737,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-logging namespace: knative-serving @@ -744,7 +754,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -787,16 +797,19 @@ data: # istio.sidecar.includeOutboundIPRanges: "*" - # clusteringress.class specifies the default cluster ingress class + # clusteringress.class has been deprecated. Please use ingress.class instead. + clusteringress.class: "istio.ingress.networking.knative.dev" + + # ingress.class specifies the default ingress class # to use when not dictated by Route annotation. # # If not specified, will use the Istio ingress. # - # Note that changing the ClusterIngress class of an existing Route + # Note that changing the Ingress class of an existing Route # will result in undefined behavior. Therefore it is best to only # update this value during the setup of Knative, to avoid getting # undefined behavior. - clusteringress.class: "istio.ingress.networking.knative.dev" + ingress.class: "istio.ingress.networking.knative.dev" # certificate.class specifies the default Certificate class # to use when not dictated by Route annotation. @@ -821,7 +834,7 @@ data: # of "{{.Name}}-{{.Namespace}}.{{.Domain}}", or removing the Namespace # entirely from the template. When choosing a new value be thoughtful # of the potential for conflicts - for example, when users choose to use - # characters such as - in their service, or namespace, names. + # characters such as `+"`"+`-`+"`"+` in their service, or namespace, names. # {{.Annotations}} can be used for any customization in the go template if needed. # We strongly recommend keeping namespace part of the template to avoid domain name clashes # Example '{{.Name}}-{{.Namespace}}.{{ index .Annotations "sub"}}.{{.Domain}}' @@ -841,16 +854,16 @@ data: autoTLS: "Disabled" # Controls the behavior of the HTTP endpoint for the Knative ingress. - # It requires autoTLS to be enabled. + # It requires autoTLS to be enabled or reconcileExternalGateway in config-istio to be true. # 1. Enabled: The Knative ingress will be able to serve HTTP connection. - # 2. Disabled: The Knative ingress ter will reject HTTP traffic. + # 2. Disabled: The Knative ingress will reject HTTP traffic. # 3. Redirected: The Knative ingress will send a 302 redirect for all # http connections, asking the clients to use HTTPS httpProtocol: "Enabled" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-network namespace: knative-serving @@ -867,7 +880,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -876,7 +889,7 @@ data: # logging.enable-var-log-collection defaults to false. # The fluentd daemon set will be set up to collect /var/log if # this flag is true. - logging.enable-var-log-collection: false + logging.enable-var-log-collection: "false" # logging.revision-url-template provides a template to use for producing the # logging URL that is injected into the status of each Revision. @@ -885,7 +898,8 @@ data: logging.revision-url-template: | http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.serving-knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase)))) - # If non-empty, this enables queue proxy writing request logs to stdout. + # If non-empty, this enables queue proxy writing user request logs to stdout, excluding probe + # requests. # The value determines the shape of the request logs and it must be a valid go text/template. # It is important to keep this as a single line. Multiple lines are parsed as separate entities # by most collection agents and will split the request logs into multiple records. @@ -914,6 +928,10 @@ data: # logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}' + # If true, this enables queue proxy writing request logs for probe requests to stdout. + # It uses the same template for user requests, i.e. logging.request-log-template. + logging.enable-probe-request-log: "false" + # metrics.backend-destination field specifies the system metrics destination. # It supports either prometheus (the default) or stackdriver. # Note: Using stackdriver will incur additional charges @@ -935,10 +953,16 @@ data: # flag to "true" could cause extra Stackdriver charge. # If metrics.backend-destination is not Stackdriver, this is ignored. metrics.allow-stackdriver-custom-metrics: "false" + + # profiling.enable indicates whether it is allowed to retrieve runtime profiling data from + # the pods via an HTTP server in the format expected by the pprof visualization tool. When + # enabled, the Knative Serving pods expose the profiling data on an alternate HTTP port 8008. + # The HTTP context root for profiling is then /debug/pprof/. + profiling.enable: "false" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-observability namespace: knative-serving @@ -955,18 +979,24 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block # to actually change the configuration. # - # If true we enable adding spans within our applications. - enable: "false" + # This may be "zipkin" or "stackdriver", the default is "none" + backend: "none" # URL to zipkin collector where traces are sent. + # This must be specified when backend is "zipkin" zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" + # The GCP project into which stackdriver metrics will be written + # when backend is "stackdriver". If unspecified, the project-id + # is read from GCP metadata when running on GCP. + stackdriver-project-id: "my-project" + # Enable zipkin debug mode. This allows all spans to be sent to the server # bypassing sampling. debug: "false" @@ -976,18 +1006,66 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-tracing namespace: knative-serving - --- +apiVersion: v1 +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `+"`"+`kubectl edit`+"`"+` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # Default Knative Gateway after v0.3. It points to the Istio + # standard istio-ingressgateway, instead of a custom one that we + # used pre-0.3. + gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + + # A cluster local gateway to allow pods outside of the mesh to access + # Services and Routes not exposing through an ingress. If the users + # do have a service mesh setup, this isn't required and can be removed. + # + # An example use case is when users want to use Istio without any + # sidecar injection (like Knative's istio-lean.yaml). Since every pod + # is outside of the service mesh in that case, a cluster-local service + # will need to be exposed to a cluster-local gateway to be accessible. + local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + + # To use only Istio service mesh and no cluster-local-gateway, replace + # all local-gateway.* entries the following entry. + local-gateway.mesh: "mesh" + + # Feature flag to enable reconciling external Istio Gateways. + # When auto TLS feature is turned on, reconcileExternalGateway will be automatically enforced. + # 1. true: enabling reconciling external gateways. + # 2. false: disabling reconciling external gateways. + reconcileExternalGateway: "false" +kind: ConfigMap +metadata: + labels: + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.10.0" + name: config-istio + namespace: knative-serving `) th.writeF("/manifests/knative/knative-serving-install/base/deployment.yaml", ` apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: activator namespace: knative-serving spec: @@ -1003,17 +1081,18 @@ spec: labels: app: activator role: activator - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - - args: - - -logtostderr=false - - -stderrthreshold=FATAL - env: + - env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: SYSTEM_NAMESPACE valueFrom: fieldRef: @@ -1023,8 +1102,8 @@ spec: - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:88d864eb3c47881cf7ac058479d1c735cc3cf4f07a11aad0621cd36dcd9ae3c6 + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 livenessProbe: httpGet: httpHeaders: @@ -1035,11 +1114,13 @@ spec: name: activator ports: - containerPort: 8012 - name: http1-port + name: http1 - containerPort: 8013 - name: h2c-port + name: h2c - containerPort: 9090 - name: metrics-port + name: metrics + - containerPort: 8008 + name: profiling readinessProbe: httpGet: httpHeaders: @@ -1059,12 +1140,31 @@ spec: serviceAccountName: controller terminationGracePeriodSeconds: 300 --- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: activator + namespace: knative-serving +spec: + maxReplicas: 20 + metrics: + - resource: + name: cpu + targetAverageUtilization: 100 + type: Resource + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: activator + +--- apiVersion: apps/v1 kind: Deployment metadata: labels: autoscaling.knative.dev/autoscaler-provider: hpa - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler-hpa namespace: knative-serving spec: @@ -1078,7 +1178,7 @@ spec: sidecar.istio.io/inject: "false" labels: app: autoscaler-hpa - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -1092,11 +1192,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:a7801c3cf4edecfa51b7bd2068f97941f6714f7922cb4806245377c2b336b723 + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 name: autoscaler-hpa ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -1107,14 +1209,12 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- - apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler namespace: knative-serving spec: @@ -1130,7 +1230,7 @@ spec: traffic.sidecar.istio.io/includeInboundPorts: 8080,9090 labels: app: autoscaler - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - args: @@ -1147,7 +1247,7 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:aeaacec4feedee309293ac21da13e71a05a2ad84b1d5fcc01ffecfa6cfbb2870 + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c livenessProbe: httpGet: httpHeaders: @@ -1163,6 +1263,8 @@ spec: name: metrics - containerPort: 8443 name: custom-metrics + - containerPort: 8008 + name: profiling readinessProbe: httpGet: httpHeaders: @@ -1180,27 +1282,26 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - ---- +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" - name: networking-istio + serving.knative.dev/release: "v0.10.0" + name: controller namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: networking-istio + app: controller template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: networking-istio + app: controller + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -1213,12 +1314,14 @@ spec: - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:057c999bccfe32e9889616b571dc8d389c742ff66f0b5516bad651f05459b7bc - name: networking-istio + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 + name: controller ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -1229,13 +1332,12 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: webhook namespace: knative-serving spec: @@ -1252,7 +1354,7 @@ spec: labels: app: webhook role: webhook - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -1266,11 +1368,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:c2076674618933df53e90cf9ddd17f5ddbad513b8c95e955e45e37be7ca9e0e8 + image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 name: webhook ports: - containerPort: 9090 - name: metrics-port + name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 200m @@ -1281,27 +1385,27 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" - name: controller + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.10.0" + name: networking-istio namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: controller + app: networking-istio template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: controller - serving.knative.dev/release: "v0.8.0" + app: networking-istio + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -1315,11 +1419,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:3b096e55fa907cff53d37dadc5d20c29cea9bb18ed9e921a588fee17beb937df - name: controller + image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff + name: networking-istio ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -1331,15 +1437,13 @@ spec: allowPrivilegeEscalation: false serviceAccountName: controller ---- - `) th.writeF("/manifests/knative/knative-serving-install/base/service-account.yaml", ` apiVersion: v1 kind: ServiceAccount metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: controller namespace: knative-serving @@ -1350,7 +1454,7 @@ kind: Service metadata: labels: app: activator - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: activator-service namespace: knative-serving spec: @@ -1377,7 +1481,7 @@ kind: Service metadata: labels: app: controller - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: controller namespace: knative-serving spec: @@ -1395,7 +1499,7 @@ kind: Service metadata: labels: role: webhook - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: webhook namespace: knative-serving spec: @@ -1404,14 +1508,13 @@ spec: targetPort: 8443 selector: role: webhook - --- apiVersion: v1 kind: Service metadata: labels: app: autoscaler - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler namespace: knative-serving spec: @@ -1431,7 +1534,6 @@ spec: selector: app: autoscaler ---- `) th.writeF("/manifests/knative/knative-serving-install/base/apiservice.yaml", ` apiVersion: apiregistration.k8s.io/v1beta1 @@ -1439,7 +1541,7 @@ kind: APIService metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: v1beta1.custom.metrics.k8s.io spec: group: custom.metrics.k8s.io @@ -1457,31 +1559,49 @@ apiVersion: caching.internal.knative.dev/v1alpha1 kind: Image metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: queue-proxy namespace: knative-serving spec: - image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:e0654305370cf3bbbd0f56f97789c92cf5215f752b70902eba5d5fc0e88c5aca + image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c `) - th.writeF("/manifests/knative/knative-serving-install/base/hpa.yaml", ` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler + th.writeF("/manifests/knative/knative-serving-install/base/webhook-configuration.yaml", ` +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration metadata: - name: activator - namespace: knative-serving -spec: - maxReplicas: 20 - metrics: - - resource: - name: cpu - targetAverageUtilization: 100 - type: Resource - minReplicas: 1 - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: activator + labels: + serving.knative.dev/release: "v0.10.0" + name: webhook.serving.knative.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: webhook.serving.knative.dev +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + serving.knative.dev/release: "v0.10.0" + name: config.webhook.serving.knative.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists `) th.writeK("/manifests/knative/knative-serving-install/base", ` @@ -1501,28 +1621,28 @@ resources: - service.yaml - apiservice.yaml - image.yaml -- hpa.yaml +- webhook-configuration.yaml commonLabels: kustomize.component: knative images: - name: gcr.io/knative-releases/knative.dev/serving/cmd/activator newName: gcr.io/knative-releases/knative.dev/serving/cmd/activator - digest: sha256:88d864eb3c47881cf7ac058479d1c735cc3cf4f07a11aad0621cd36dcd9ae3c6 + digest: sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa - digest: sha256:a7801c3cf4edecfa51b7bd2068f97941f6714f7922cb4806245377c2b336b723 + digest: sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler - digest: sha256:aeaacec4feedee309293ac21da13e71a05a2ad84b1d5fcc01ffecfa6cfbb2870 + digest: sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c - name: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio newName: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio - digest: sha256:057c999bccfe32e9889616b571dc8d389c742ff66f0b5516bad651f05459b7bc + digest: sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff - name: gcr.io/knative-releases/knative.dev/serving/cmd/webhook newName: gcr.io/knative-releases/knative.dev/serving/cmd/webhook - digest: sha256:c2076674618933df53e90cf9ddd17f5ddbad513b8c95e955e45e37be7ca9e0e8 + digest: sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 - name: gcr.io/knative-releases/knative.dev/serving/cmd/controller newName: gcr.io/knative-releases/knative.dev/serving/cmd/controller - digest: sha256:3b096e55fa907cff53d37dadc5d20c29cea9bb18ed9e921a588fee17beb937df + digest: sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 `) } diff --git a/tests/knative-knative-serving-install-overlays-application_test.go b/tests/knative-knative-serving-install-overlays-application_test.go index e4f3da1ada..d702eec140 100644 --- a/tests/knative-knative-serving-install-overlays-application_test.go +++ b/tests/knative-knative-serving-install-overlays-application_test.go @@ -68,7 +68,7 @@ kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-ingress-gateway namespace: knative-serving spec: @@ -81,22 +81,13 @@ spec: name: http number: 80 protocol: HTTP - - hosts: - - '*' - port: - name: https - number: 443 - protocol: HTTPS - tls: - mode: PASSTHROUGH - --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: cluster-local-gateway namespace: knative-serving spec: @@ -109,56 +100,49 @@ spec: name: http number: 80 protocol: HTTP - `) th.writeF("/manifests/knative/knative-serving-install/base/cluster-role.yaml", ` apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.8.0" - name: knative-serving-istio + autoscaling.knative.dev/metric-provider: custom-metrics + serving.knative.dev/release: "v0.10.0" + name: custom-metrics-server-resources rules: - apiGroups: - - networking.istio.io + - custom.metrics.k8s.io resources: - - virtualservices - - gateways + - '*' verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" - name: custom-metrics-server-resources + rbac.authorization.k8s.io/aggregate-to-admin: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-admin rules: - apiGroups: - - custom.metrics.k8s.io + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev resources: - '*' verbs: - '*' - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - serving.knative.dev/release: "v0.8.0" - name: knative-serving-namespaced-admin + rbac.authorization.k8s.io/aggregate-to-edit: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-edit rules: - apiGroups: - serving.knative.dev @@ -167,7 +151,29 @@ rules: resources: - '*' verbs: + - create + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-namespaced-view +rules: +- apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + resources: - '*' + verbs: + - get + - list + - watch --- aggregationRule: @@ -178,7 +184,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-admin rules: [] --- @@ -187,7 +193,7 @@ kind: ClusterRole metadata: labels: serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-core rules: - apiGroups: @@ -232,6 +238,7 @@ rules: - admissionregistration.k8s.io resources: - mutatingwebhookconfigurations + - validatingwebhookconfigurations verbs: - get - list @@ -293,8 +300,29 @@ rules: - delete - patch - watch - --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + networking.knative.dev/ingress-provider: istio + serving.knative.dev/controller: "true" + serving.knative.dev/release: "v0.10.0" + name: knative-serving-istio +rules: +- apiGroups: + - networking.istio.io + resources: + - virtualservices + - gateways + verbs: + - get + - list + - create + - update + - delete + - patch + - watch `) th.writeF("/manifests/knative/knative-serving-install/base/cluster-role-binding.yaml", ` apiVersion: rbac.authorization.k8s.io/v1 @@ -302,7 +330,7 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: custom-metrics:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io @@ -319,7 +347,7 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: hpa-controller-custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io @@ -335,7 +363,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: knative-serving-controller-admin roleRef: apiGroup: rbac.authorization.k8s.io @@ -346,7 +374,6 @@ subjects: name: controller namespace: knative-serving ---- `) th.writeF("/manifests/knative/knative-serving-install/base/service-role.yaml", ` apiVersion: rbac.istio.io/v1alpha1 @@ -382,7 +409,7 @@ kind: RoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: custom-metrics-auth-reader namespace: kube-system roleRef: @@ -393,7 +420,6 @@ subjects: - kind: ServiceAccount name: controller namespace: knative-serving - `) th.writeF("/manifests/knative/knative-serving-install/base/config-map.yaml", ` apiVersion: v1 @@ -408,7 +434,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -429,11 +455,22 @@ data: container-concurrency-target-percentage: "70" # The container concurrency target default is what the Autoscaler will - # try to maintain when the Revision specifies unlimited concurrency. + # try to maintain when concurrency is used as the scaling metric for a + # Revision and the Revision specifies unlimited concurrency. # Even when specifying unlimited concurrency, the autoscaler will # horizontally scale the application based on this target concurrency. + # NOTE: Only one metric can be used for autoscaling a Revision. container-concurrency-target-default: "100" + # The requests per second (RPS) target default is what the Autoscaler will + # try to maintain when RPS is used as the scaling metric for a Revision and + # the Revision specifies unlimited RPS. Even when specifying unlimited RPS, + # the autoscaler will horizontally scale the application based on this + # target RPS. + # Must be greater than 1.0. + # NOTE: Only one metric can be used for autoscaling a Revision. + requests-per-second-target-default: "200" + # The target burst capacity specifies the size of burst in concurrent # requests that the system operator expects the system will receive. # Autoscaler will try to protect the system from queueing by introducing @@ -446,7 +483,7 @@ data: # -1 denotes unlimited target-burst-capacity and activator will always # be in the request path. # Other negative values are invalid. - target-burst-capacity: "0" + target-burst-capacity: "200" # When operating in a stable mode, the autoscaler operates on the # average concurrency over the stable window. @@ -472,8 +509,22 @@ data: # Max scale up rate limits the rate at which the autoscaler will # increase pod count. It is the maximum ratio of desired pods versus # observed pods. + # Cannot less or equal to 1. + # I.e with value of 2.0 the number of pods can at most go N to 2N + # over single Autoscaler period (see tick-interval), but at least N to + # N+1, if Autoscaler needs to scale up. max-scale-up-rate: "1000.0" + # Max scale down rate limits the rate at which the autoscaler will + # decrease pod count. It is the maximum ratio of observed pods versus + # desired pods. + # Cannot less or equal to 1. + # I.e. with value of 2.0 the number of pods can at most go N to N/2 + # over single Autoscaler evaluation period (see tick-interval), but at + # least N to N-1, if Autoscaler needs to scale down. + # Not yet used // TODO(vagababov) remove once other parts are ready. + max-scale-down-rate: "2.0" + # Scale to zero feature flag enable-scale-to-zero: "true" @@ -488,12 +539,13 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-autoscaler namespace: knative-serving --- +--- apiVersion: v1 data: _example: | @@ -506,7 +558,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -549,10 +601,17 @@ data: # enclosing Service or Configuration, so values such as # {{.Name}} are also valid. container-name-template: "user-container" + + # container-concurrency specifies the maximum number + # of requests the Container can handle at once, and requests + # above this threshold are queued. Setting a value of zero + # disables this throttling and lets through as many requests as + # the pod receives. + container-concurrency: "0" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-defaults namespace: knative-serving @@ -569,7 +628,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -577,11 +636,11 @@ data: # List of repositories for which tag to digest resolving should be skipped registriesSkippingTagResolving: "ko.local,dev.local" - queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:e0654305370cf3bbbd0f56f97789c92cf5215f752b70902eba5d5fc0e88c5aca + queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-deployment namespace: knative-serving @@ -598,7 +657,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -627,7 +686,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-domain namespace: knative-serving @@ -644,7 +703,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -666,60 +725,11 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-gc namespace: knative-serving --- -apiVersion: v1 -data: - _example: | - ################################ - # # - # EXAMPLE CONFIGURATION # - # # - ################################ - - # This block is not actually functional configuration, - # but serves to illustrate the available configuration - # options and document them in a way that is accessible - # to users that kubectl edit this config map. - # - # These sample configuration options may be copied out of - # this example block and unindented to be in the data block - # to actually change the configuration. - - # Default Knative Gateway after v0.3. It points to the Istio - # standard istio-ingressgateway, instead of a custom one that we - # used pre-0.3. - gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" - - # A cluster local gateway to allow pods outside of the mesh to access - # Services and Routes not exposing through an ingress. If the users - # do have a service mesh setup, this isn't required and can be removed. - # - # An example use case is when users want to use Istio without any - # sidecar injection (like Knative's istio-lean.yaml). Since every pod - # is outside of the service mesh in that case, a cluster-local service - # will need to be exposed to a cluster-local gateway to be accessible. - local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" - - # To use only Istio service mesh and no cluster-local-gateway, replace - # all local-gateway.* entries the following entry. - local-gateway.mesh: "mesh" - - # Feature flag to enable reconciling external Istio Gateways. - # When auto TLS feature is turned on, reconcileExternalGateway will be automatically enforced. - # 1. true: enabling reconciling external gateways. - # 2. false: disabling reconciling external gateways. - reconcileExternalGateway: "false" -kind: ConfigMap -metadata: - labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" - name: config-istio - namespace: knative-serving --- apiVersion: v1 @@ -734,7 +744,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -775,7 +785,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-logging namespace: knative-serving @@ -792,7 +802,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -835,16 +845,19 @@ data: # istio.sidecar.includeOutboundIPRanges: "*" - # clusteringress.class specifies the default cluster ingress class + # clusteringress.class has been deprecated. Please use ingress.class instead. + clusteringress.class: "istio.ingress.networking.knative.dev" + + # ingress.class specifies the default ingress class # to use when not dictated by Route annotation. # # If not specified, will use the Istio ingress. # - # Note that changing the ClusterIngress class of an existing Route + # Note that changing the Ingress class of an existing Route # will result in undefined behavior. Therefore it is best to only # update this value during the setup of Knative, to avoid getting # undefined behavior. - clusteringress.class: "istio.ingress.networking.knative.dev" + ingress.class: "istio.ingress.networking.knative.dev" # certificate.class specifies the default Certificate class # to use when not dictated by Route annotation. @@ -869,7 +882,7 @@ data: # of "{{.Name}}-{{.Namespace}}.{{.Domain}}", or removing the Namespace # entirely from the template. When choosing a new value be thoughtful # of the potential for conflicts - for example, when users choose to use - # characters such as - in their service, or namespace, names. + # characters such as `+"`"+`-`+"`"+` in their service, or namespace, names. # {{.Annotations}} can be used for any customization in the go template if needed. # We strongly recommend keeping namespace part of the template to avoid domain name clashes # Example '{{.Name}}-{{.Namespace}}.{{ index .Annotations "sub"}}.{{.Domain}}' @@ -889,16 +902,16 @@ data: autoTLS: "Disabled" # Controls the behavior of the HTTP endpoint for the Knative ingress. - # It requires autoTLS to be enabled. + # It requires autoTLS to be enabled or reconcileExternalGateway in config-istio to be true. # 1. Enabled: The Knative ingress will be able to serve HTTP connection. - # 2. Disabled: The Knative ingress ter will reject HTTP traffic. + # 2. Disabled: The Knative ingress will reject HTTP traffic. # 3. Redirected: The Knative ingress will send a 302 redirect for all # http connections, asking the clients to use HTTPS httpProtocol: "Enabled" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-network namespace: knative-serving @@ -915,7 +928,7 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block @@ -924,7 +937,7 @@ data: # logging.enable-var-log-collection defaults to false. # The fluentd daemon set will be set up to collect /var/log if # this flag is true. - logging.enable-var-log-collection: false + logging.enable-var-log-collection: "false" # logging.revision-url-template provides a template to use for producing the # logging URL that is injected into the status of each Revision. @@ -933,7 +946,8 @@ data: logging.revision-url-template: | http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.serving-knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase)))) - # If non-empty, this enables queue proxy writing request logs to stdout. + # If non-empty, this enables queue proxy writing user request logs to stdout, excluding probe + # requests. # The value determines the shape of the request logs and it must be a valid go text/template. # It is important to keep this as a single line. Multiple lines are parsed as separate entities # by most collection agents and will split the request logs into multiple records. @@ -962,6 +976,10 @@ data: # logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}' + # If true, this enables queue proxy writing request logs for probe requests to stdout. + # It uses the same template for user requests, i.e. logging.request-log-template. + logging.enable-probe-request-log: "false" + # metrics.backend-destination field specifies the system metrics destination. # It supports either prometheus (the default) or stackdriver. # Note: Using stackdriver will incur additional charges @@ -983,10 +1001,16 @@ data: # flag to "true" could cause extra Stackdriver charge. # If metrics.backend-destination is not Stackdriver, this is ignored. metrics.allow-stackdriver-custom-metrics: "false" + + # profiling.enable indicates whether it is allowed to retrieve runtime profiling data from + # the pods via an HTTP server in the format expected by the pprof visualization tool. When + # enabled, the Knative Serving pods expose the profiling data on an alternate HTTP port 8008. + # The HTTP context root for profiling is then /debug/pprof/. + profiling.enable: "false" kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-observability namespace: knative-serving @@ -1003,18 +1027,24 @@ data: # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible - # to users that kubectl edit this config map. + # to users that `+"`"+`kubectl edit`+"`"+` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block # to actually change the configuration. # - # If true we enable adding spans within our applications. - enable: "false" + # This may be "zipkin" or "stackdriver", the default is "none" + backend: "none" # URL to zipkin collector where traces are sent. + # This must be specified when backend is "zipkin" zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" + # The GCP project into which stackdriver metrics will be written + # when backend is "stackdriver". If unspecified, the project-id + # is read from GCP metadata when running on GCP. + stackdriver-project-id: "my-project" + # Enable zipkin debug mode. This allows all spans to be sent to the server # bypassing sampling. debug: "false" @@ -1024,18 +1054,66 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: config-tracing namespace: knative-serving - --- +apiVersion: v1 +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `+"`"+`kubectl edit`+"`"+` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # Default Knative Gateway after v0.3. It points to the Istio + # standard istio-ingressgateway, instead of a custom one that we + # used pre-0.3. + gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + + # A cluster local gateway to allow pods outside of the mesh to access + # Services and Routes not exposing through an ingress. If the users + # do have a service mesh setup, this isn't required and can be removed. + # + # An example use case is when users want to use Istio without any + # sidecar injection (like Knative's istio-lean.yaml). Since every pod + # is outside of the service mesh in that case, a cluster-local service + # will need to be exposed to a cluster-local gateway to be accessible. + local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + + # To use only Istio service mesh and no cluster-local-gateway, replace + # all local-gateway.* entries the following entry. + local-gateway.mesh: "mesh" + + # Feature flag to enable reconciling external Istio Gateways. + # When auto TLS feature is turned on, reconcileExternalGateway will be automatically enforced. + # 1. true: enabling reconciling external gateways. + # 2. false: disabling reconciling external gateways. + reconcileExternalGateway: "false" +kind: ConfigMap +metadata: + labels: + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.10.0" + name: config-istio + namespace: knative-serving `) th.writeF("/manifests/knative/knative-serving-install/base/deployment.yaml", ` apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: activator namespace: knative-serving spec: @@ -1051,17 +1129,18 @@ spec: labels: app: activator role: activator - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - - args: - - -logtostderr=false - - -stderrthreshold=FATAL - env: + - env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: SYSTEM_NAMESPACE valueFrom: fieldRef: @@ -1071,8 +1150,8 @@ spec: - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:88d864eb3c47881cf7ac058479d1c735cc3cf4f07a11aad0621cd36dcd9ae3c6 + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 livenessProbe: httpGet: httpHeaders: @@ -1083,11 +1162,13 @@ spec: name: activator ports: - containerPort: 8012 - name: http1-port + name: http1 - containerPort: 8013 - name: h2c-port + name: h2c - containerPort: 9090 - name: metrics-port + name: metrics + - containerPort: 8008 + name: profiling readinessProbe: httpGet: httpHeaders: @@ -1107,12 +1188,31 @@ spec: serviceAccountName: controller terminationGracePeriodSeconds: 300 --- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: activator + namespace: knative-serving +spec: + maxReplicas: 20 + metrics: + - resource: + name: cpu + targetAverageUtilization: 100 + type: Resource + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: activator + +--- apiVersion: apps/v1 kind: Deployment metadata: labels: autoscaling.knative.dev/autoscaler-provider: hpa - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler-hpa namespace: knative-serving spec: @@ -1126,7 +1226,7 @@ spec: sidecar.istio.io/inject: "false" labels: app: autoscaler-hpa - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -1140,11 +1240,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:a7801c3cf4edecfa51b7bd2068f97941f6714f7922cb4806245377c2b336b723 + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 name: autoscaler-hpa ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -1155,14 +1257,12 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- - apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler namespace: knative-serving spec: @@ -1178,7 +1278,7 @@ spec: traffic.sidecar.istio.io/includeInboundPorts: 8080,9090 labels: app: autoscaler - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - args: @@ -1195,7 +1295,7 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:aeaacec4feedee309293ac21da13e71a05a2ad84b1d5fcc01ffecfa6cfbb2870 + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c livenessProbe: httpGet: httpHeaders: @@ -1211,6 +1311,8 @@ spec: name: metrics - containerPort: 8443 name: custom-metrics + - containerPort: 8008 + name: profiling readinessProbe: httpGet: httpHeaders: @@ -1228,27 +1330,26 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - ---- +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.8.0" - name: networking-istio + serving.knative.dev/release: "v0.10.0" + name: controller namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: networking-istio + app: controller template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: networking-istio + app: controller + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -1261,12 +1362,14 @@ spec: - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:057c999bccfe32e9889616b571dc8d389c742ff66f0b5516bad651f05459b7bc - name: networking-istio + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 + name: controller ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -1277,13 +1380,12 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: webhook namespace: knative-serving spec: @@ -1300,7 +1402,7 @@ spec: labels: app: webhook role: webhook - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -1314,11 +1416,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:c2076674618933df53e90cf9ddd17f5ddbad513b8c95e955e45e37be7ca9e0e8 + image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 name: webhook ports: - containerPort: 9090 - name: metrics-port + name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 200m @@ -1329,27 +1433,27 @@ spec: securityContext: allowPrivilegeEscalation: false serviceAccountName: controller - --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.8.0" - name: controller + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.10.0" + name: networking-istio namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: controller + app: networking-istio template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: controller - serving.knative.dev/release: "v0.8.0" + app: networking-istio + serving.knative.dev/release: "v0.10.0" spec: containers: - env: @@ -1363,11 +1467,13 @@ spec: value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:3b096e55fa907cff53d37dadc5d20c29cea9bb18ed9e921a588fee17beb937df - name: controller + image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff + name: networking-istio ports: - containerPort: 9090 name: metrics + - containerPort: 8008 + name: profiling resources: limits: cpu: 1000m @@ -1379,15 +1485,13 @@ spec: allowPrivilegeEscalation: false serviceAccountName: controller ---- - `) th.writeF("/manifests/knative/knative-serving-install/base/service-account.yaml", ` apiVersion: v1 kind: ServiceAccount metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: controller namespace: knative-serving @@ -1398,7 +1502,7 @@ kind: Service metadata: labels: app: activator - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: activator-service namespace: knative-serving spec: @@ -1425,7 +1529,7 @@ kind: Service metadata: labels: app: controller - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: controller namespace: knative-serving spec: @@ -1443,7 +1547,7 @@ kind: Service metadata: labels: role: webhook - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: webhook namespace: knative-serving spec: @@ -1452,14 +1556,13 @@ spec: targetPort: 8443 selector: role: webhook - --- apiVersion: v1 kind: Service metadata: labels: app: autoscaler - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: autoscaler namespace: knative-serving spec: @@ -1479,7 +1582,6 @@ spec: selector: app: autoscaler ---- `) th.writeF("/manifests/knative/knative-serving-install/base/apiservice.yaml", ` apiVersion: apiregistration.k8s.io/v1beta1 @@ -1487,7 +1589,7 @@ kind: APIService metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: v1beta1.custom.metrics.k8s.io spec: group: custom.metrics.k8s.io @@ -1505,31 +1607,49 @@ apiVersion: caching.internal.knative.dev/v1alpha1 kind: Image metadata: labels: - serving.knative.dev/release: "v0.8.0" + serving.knative.dev/release: "v0.10.0" name: queue-proxy namespace: knative-serving spec: - image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:e0654305370cf3bbbd0f56f97789c92cf5215f752b70902eba5d5fc0e88c5aca + image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c `) - th.writeF("/manifests/knative/knative-serving-install/base/hpa.yaml", ` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler + th.writeF("/manifests/knative/knative-serving-install/base/webhook-configuration.yaml", ` +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration metadata: - name: activator - namespace: knative-serving -spec: - maxReplicas: 20 - metrics: - - resource: - name: cpu - targetAverageUtilization: 100 - type: Resource - minReplicas: 1 - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: activator + labels: + serving.knative.dev/release: "v0.10.0" + name: webhook.serving.knative.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: webhook.serving.knative.dev +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + serving.knative.dev/release: "v0.10.0" + name: config.webhook.serving.knative.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists `) th.writeK("/manifests/knative/knative-serving-install/base", ` @@ -1549,28 +1669,28 @@ resources: - service.yaml - apiservice.yaml - image.yaml -- hpa.yaml +- webhook-configuration.yaml commonLabels: kustomize.component: knative images: - name: gcr.io/knative-releases/knative.dev/serving/cmd/activator newName: gcr.io/knative-releases/knative.dev/serving/cmd/activator - digest: sha256:88d864eb3c47881cf7ac058479d1c735cc3cf4f07a11aad0621cd36dcd9ae3c6 + digest: sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa - digest: sha256:a7801c3cf4edecfa51b7bd2068f97941f6714f7922cb4806245377c2b336b723 + digest: sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler - digest: sha256:aeaacec4feedee309293ac21da13e71a05a2ad84b1d5fcc01ffecfa6cfbb2870 + digest: sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c - name: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio newName: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio - digest: sha256:057c999bccfe32e9889616b571dc8d389c742ff66f0b5516bad651f05459b7bc + digest: sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff - name: gcr.io/knative-releases/knative.dev/serving/cmd/webhook newName: gcr.io/knative-releases/knative.dev/serving/cmd/webhook - digest: sha256:c2076674618933df53e90cf9ddd17f5ddbad513b8c95e955e45e37be7ca9e0e8 + digest: sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 - name: gcr.io/knative-releases/knative.dev/serving/cmd/controller newName: gcr.io/knative-releases/knative.dev/serving/cmd/controller - digest: sha256:3b096e55fa907cff53d37dadc5d20c29cea9bb18ed9e921a588fee17beb937df + digest: sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 `) } From a90b87e1187ba2de8e79a2db78b569f676708127 Mon Sep 17 00:00:00 2001 From: Dan Sun Date: Sun, 19 Jan 2020 11:26:16 -0500 Subject: [PATCH 4/6] Upgrade to knative 0.11 --- knative/knative-serving-crds/base/crd.yaml | 376 +++++++------- .../knative-serving-crds/base/namespace.yaml | 3 +- .../overlays/application/application.yaml | 4 +- .../overlays/application/kustomization.yaml | 4 +- .../base/apiservice.yaml | 3 +- .../base/cluster-role-binding.yaml | 25 +- .../base/cluster-role.yaml | 368 ++++++++------ .../base/config-map.yaml | 61 ++- .../base/deployment.yaml | 479 +++++++++--------- .../knative-serving-install/base/gateway.yaml | 32 +- knative/knative-serving-install/base/hpa.yaml | 13 +- .../knative-serving-install/base/image.yaml | 6 +- .../base/kustomization.yaml | 12 +- .../base/role-binding.yaml | 9 +- .../base/service-account.yaml | 4 +- .../knative-serving-install/base/service.yaml | 71 +-- .../base/webhook-configuration.yaml | 69 ++- .../overlays/application/application.yaml | 4 +- .../overlays/application/kustomization.yaml | 4 +- 19 files changed, 802 insertions(+), 745 deletions(-) diff --git a/knative/knative-serving-crds/base/crd.yaml b/knative/knative-serving-crds/base/crd.yaml index 7c07240664..1ce7c5904c 100644 --- a/knative/knative-serving-crds/base/crd.yaml +++ b/knative/knative-serving-crds/base/crd.yaml @@ -4,25 +4,25 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: certificates.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: Certificate plural: certificates shortNames: - - kcert + - kcert singular: certificate scope: Namespaced subresources: @@ -34,48 +34,49 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/podspecable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: configurations.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Configuration plural: configurations shortNames: - - config - - cfg + - config + - cfg singular: configuration scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -88,12 +89,12 @@ spec: group: caching.internal.knative.dev names: categories: - - knative-internal - - caching + - knative-internal + - caching kind: Image plural: images shortNames: - - img + - img singular: image scope: Namespaced subresources: @@ -106,33 +107,33 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: ingresses.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: Ingress plural: ingresses shortNames: - - ing + - ing singular: ingress scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true + - name: v1alpha1 + served: true + storage: true --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -140,21 +141,21 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: metrics.autoscaling.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: autoscaling.internal.knative.dev names: categories: - - knative-internal - - autoscaling + - knative-internal + - autoscaling kind: Metric plural: metrics singular: metric @@ -169,40 +170,40 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: podautoscalers.autoscaling.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.desiredScale - name: DesiredScale - type: integer - - JSONPath: .status.actualScale - name: ActualScale - type: integer - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.desiredScale + name: DesiredScale + type: integer + - JSONPath: .status.actualScale + name: ActualScale + type: integer + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: autoscaling.internal.knative.dev names: categories: - - knative-internal - - autoscaling + - knative-internal + - autoscaling kind: PodAutoscaler plural: podautoscalers shortNames: - - kpa - - pa + - kpa + - pa singular: podautoscaler scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true + - name: v1alpha1 + served: true + storage: true --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -210,49 +211,49 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: revisions.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .metadata.labels['serving\.knative\.dev/configuration'] - name: Config Name - type: string - - JSONPath: .status.serviceName - name: K8s Service Name - type: string - - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration'] - name: Generation - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .metadata.labels['serving\.knative\.dev/configuration'] + name: Config Name + type: string + - JSONPath: .status.serviceName + name: K8s Service Name + type: string + - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration'] + name: Generation + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Revision plural: revisions shortNames: - - rev + - rev singular: revision scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -261,43 +262,43 @@ metadata: labels: duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: routes.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.url - name: URL - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.url + name: URL + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Route plural: routes shortNames: - - rt + - rt singular: route scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -305,51 +306,52 @@ kind: CustomResourceDefinition metadata: labels: duck.knative.dev/addressable: "true" + duck.knative.dev/podspecable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: services.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.url - name: URL - type: string - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.url + name: URL + type: string + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Service plural: services shortNames: - - kservice - - ksvc + - kservice + - ksvc singular: service scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -357,41 +359,39 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: serverlessservices.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .spec.mode - name: Mode - type: string - - JSONPath: .status.serviceName - name: ServiceName - type: string - - JSONPath: .status.privateServiceName - name: PrivateServiceName - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .spec.mode + name: Mode + type: string + - JSONPath: .status.serviceName + name: ServiceName + type: string + - JSONPath: .status.privateServiceName + name: PrivateServiceName + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: ServerlessService plural: serverlessservices shortNames: - - sks + - sks singular: serverlessservice scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - ---- + - name: v1alpha1 + served: true + storage: true diff --git a/knative/knative-serving-crds/base/namespace.yaml b/knative/knative-serving-crds/base/namespace.yaml index 20eb73635e..04bb5b5225 100644 --- a/knative/knative-serving-crds/base/namespace.yaml +++ b/knative/knative-serving-crds/base/namespace.yaml @@ -3,6 +3,7 @@ kind: Namespace metadata: labels: istio-injection: enabled - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving + diff --git a/knative/knative-serving-crds/overlays/application/application.yaml b/knative/knative-serving-crds/overlays/application/application.yaml index 2d3a1b9d01..855a6ff837 100644 --- a/knative/knative-serving-crds/overlays/application/application.yaml +++ b/knative/knative-serving-crds/overlays/application/application.yaml @@ -6,11 +6,11 @@ spec: selector: matchLabels: app.kubernetes.io/name: knative-serving-crds - app.kubernetes.io/instance: knative-serving-crds-v0.8.0 + app.kubernetes.io/instance: knative-serving-crds-v0.11.1 app.kubernetes.io/managed-by: kfctl app.kubernetes.io/component: knative-serving-crds app.kubernetes.io/part-of: kubeflow - app.kubernetes.io/version: v0.8.0 + app.kubernetes.io/version: v0.11.1 componentKinds: - group: core kind: ConfigMap diff --git a/knative/knative-serving-crds/overlays/application/kustomization.yaml b/knative/knative-serving-crds/overlays/application/kustomization.yaml index cc55882a82..8b00b56800 100644 --- a/knative/knative-serving-crds/overlays/application/kustomization.yaml +++ b/knative/knative-serving-crds/overlays/application/kustomization.yaml @@ -6,8 +6,8 @@ resources: - application.yaml commonLabels: app.kubernetes.io/name: knative-serving-crds - app.kubernetes.io/instance: knative-serving-crds-v0.8.0 + app.kubernetes.io/instance: knative-serving-crds-v0.11.1 app.kubernetes.io/managed-by: kfctl app.kubernetes.io/component: knative-serving-crds app.kubernetes.io/part-of: kubeflow - app.kubernetes.io/version: v0.8.0 + app.kubernetes.io/version: v0.11.1 diff --git a/knative/knative-serving-install/base/apiservice.yaml b/knative/knative-serving-install/base/apiservice.yaml index bb03e92fe8..18500e6f94 100644 --- a/knative/knative-serving-install/base/apiservice.yaml +++ b/knative/knative-serving-install/base/apiservice.yaml @@ -3,7 +3,7 @@ kind: APIService metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: v1beta1.custom.metrics.k8s.io spec: group: custom.metrics.k8s.io @@ -15,3 +15,4 @@ spec: version: v1beta1 versionPriority: 100 + diff --git a/knative/knative-serving-install/base/cluster-role-binding.yaml b/knative/knative-serving-install/base/cluster-role-binding.yaml index 633cfae0ad..e69e4dbeeb 100644 --- a/knative/knative-serving-install/base/cluster-role-binding.yaml +++ b/knative/knative-serving-install/base/cluster-role-binding.yaml @@ -1,18 +1,19 @@ +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving --- apiVersion: rbac.authorization.k8s.io/v1 @@ -20,30 +21,30 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: hpa-controller-custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: custom-metrics-server-resources subjects: -- kind: ServiceAccount - name: horizontal-pod-autoscaler - namespace: kube-system + - kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-controller-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: knative-serving-admin subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving diff --git a/knative/knative-serving-install/base/cluster-role.yaml b/knative/knative-serving-install/base/cluster-role.yaml index d5211ed50a..81279631ee 100644 --- a/knative/knative-serving-install/base/cluster-role.yaml +++ b/knative/knative-serving-install/base/cluster-role.yaml @@ -1,17 +1,40 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + duck.knative.dev/addressable: "true" + serving.knative.dev/release: "v0.11.1" + name: knative-serving-addressable-resolver +rules: + - apiGroups: + - serving.knative.dev + resources: + - routes + - routes/status + - services + - services/status + verbs: + - get + - list + - watch + +--- + apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics-server-resources rules: -- apiGroups: - - custom.metrics.k8s.io - resources: - - '*' - verbs: - - '*' + - apiGroups: + - custom.metrics.k8s.io + resources: + - '*' + verbs: + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 @@ -19,68 +42,70 @@ kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-admin: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-admin rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - - caching.internal.knative.dev - resources: - - '*' - verbs: - - '*' + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-edit: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-edit rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - resources: - - '*' - verbs: - - create - - update - - patch - - delete + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - create + - update + - patch + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-view: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-view rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - resources: - - '*' - verbs: - - get - - list - - watch + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - get + - list + - watch --- aggregationRule: clusterRoleSelectors: - - matchLabels: - serving.knative.dev/controller: "true" + - matchLabels: + serving.knative.dev/controller: "true" apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-admin rules: [] --- @@ -89,113 +114,132 @@ kind: ClusterRole metadata: labels: serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-core rules: -- apiGroups: - - "" - resources: - - pods - - namespaces - - secrets - - configmaps - - endpoints - - services - - events - - serviceaccounts - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - "" - resources: - - endpoints/restricted - verbs: - - create -- apiGroups: - - apps - resources: - - deployments - - deployments/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - serving.knative.dev - - autoscaling.internal.knative.dev - - networking.internal.knative.dev - resources: - - '*' - - '*/status' - - '*/finalizers' - verbs: - - get - - list - - create - - update - - delete - - deletecollection - - patch - - watch -- apiGroups: - - caching.internal.knative.dev - resources: - - images - verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - apiGroups: + - "" + resources: + - pods + - namespaces + - secrets + - configmaps + - endpoints + - services + - events + - serviceaccounts + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - "" + resources: + - endpoints/restricted + verbs: + - create + - apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - serving.knative.dev + - autoscaling.internal.knative.dev + - networking.internal.knative.dev + resources: + - '*' + - '*/status' + - '*/finalizers' + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - patch + - watch + - apiGroups: + - caching.internal.knative.dev + resources: + - images + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + duck.knative.dev/podspecable: "true" + serving.knative.dev/release: "v0.11.1" + name: knative-serving-podspecable-binding +rules: + - apiGroups: + - serving.knative.dev + resources: + - configurations + - services + verbs: + - list + - watch + - patch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -203,19 +247,19 @@ metadata: labels: networking.knative.dev/ingress-provider: istio serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-istio rules: -- apiGroups: - - networking.istio.io - resources: - - virtualservices - - gateways - verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - apiGroups: + - networking.istio.io + resources: + - virtualservices + - gateways + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/knative/knative-serving-install/base/config-map.yaml b/knative/knative-serving-install/base/config-map.yaml index c8b642fd68..21dc2913ea 100644 --- a/knative/knative-serving-install/base/config-map.yaml +++ b/knative/knative-serving-install/base/config-map.yaml @@ -1,3 +1,4 @@ +--- apiVersion: v1 data: _example: | @@ -63,6 +64,7 @@ data: # When operating in a stable mode, the autoscaler operates on the # average concurrency over the stable window. + # Stable window must be in whole seconds. stable-window: "60s" # When observed average concurrency during the panic window reaches @@ -70,14 +72,10 @@ data: # enters panic mode. When operating in panic mode, the autoscaler # scales on the average concurrency over the panic window which is # panic-window-percentage of the stable-window. + # When computing the panic window it will be rounded to the closest + # whole second. panic-window-percentage: "10.0" - # Absolute panic window duration. - # Deprecated in favor of panic-window-percentage. - # Existing revisions will continue to scale based on panic-window - # but new revisions will default to panic-window-percentage. - panic-window: "6s" - # The percentage of the container concurrency target at which to # enter panic mode when reached within the panic window. panic-threshold-percentage: "200.0" @@ -115,7 +113,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-autoscaler namespace: knative-serving @@ -187,7 +185,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-defaults namespace: knative-serving @@ -212,11 +210,11 @@ data: # List of repositories for which tag to digest resolving should be skipped registriesSkippingTagResolving: "ko.local,dev.local" - queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c + queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:792f6945c7bc73a49a470a5b955c39c8bd174705743abf5fb71aa0f4c04128eb kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-deployment namespace: knative-serving @@ -262,7 +260,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-domain namespace: knative-serving @@ -301,7 +299,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-gc namespace: knative-serving @@ -361,7 +359,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-logging namespace: knative-serving @@ -410,7 +408,7 @@ data: # To determine the IP ranges of your cluster: # IBM Cloud Private: cat cluster/config.yaml | grep service_cluster_ip_range # IBM Cloud Kubernetes Service: "172.30.0.0/16,172.20.0.0/16,10.10.10.0/24" - # Google Container Engine (GKE): gcloud container clusters describe XXXXXXX --zone=XXXXXX | grep -e clusterIpv4Cidr -e servicesIpv4Cidr + # Google Container Engine (GKE): gcloud container clusters describe $CLUSTER_NAME --zone=$CLUSTER_ZONE | grep -e clusterIpv4Cidr -e servicesIpv4Cidr # Azure Kubernetes Service (AKS): "10.0.0.0/16" # Azure Container Service (ACS; deprecated): "10.244.0.0/16,10.240.0.0/16" # Azure Container Service Engine (ACS-Engine; OSS): Configurable, but defaults to "10.0.0.0/16" @@ -469,7 +467,7 @@ data: # when constructing the DNS name for "tags" within the traffic blocks # of Routes and Configuration. This is used in conjunction with the # domainTemplate above to determine the full URL for the tag. - tagTemplate: "{{.Name}}-{{.Tag}}" + tagTemplate: "{{.Tag}}-{{.Name}}" # Controls whether TLS certificates are automatically provisioned and # installed in the Knative ingress to terminate external TLS connection. @@ -487,7 +485,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-network namespace: knative-serving @@ -562,8 +560,8 @@ data: metrics.backend-destination: prometheus # metrics.request-metrics-backend-destination specifies the request metrics - # destination. If non-empty, it enables queue proxy to send request metrics. - # Currently supported values: prometheus, stackdriver. + # destination. It enables queue proxy to send request metrics. + # Currently supported values: prometheus (the default), stackdriver. metrics.request-metrics-backend-destination: prometheus # metrics.stackdriver-project-id field specifies the stackdriver project ID. This @@ -586,7 +584,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-observability namespace: knative-serving @@ -630,13 +628,14 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-tracing namespace: knative-serving + --- + apiVersion: v1 data: - _example: | ################################ # # # EXAMPLE CONFIGURATION # @@ -654,8 +653,12 @@ data: # Default Knative Gateway after v0.3. It points to the Istio # standard istio-ingressgateway, instead of a custom one that we - # used pre-0.3. - gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + # used pre-0.3. The configuration format should be `gateway. + # {{gateway_namespace}}.{{gateway_name}}: "{{ingress_name}}. + # {{ingress_namespace}}.svc.cluster.local"`. The {{gateway_namespace}} + # is optional; when it is omitted, the system will search for + # the gateway in the serving system namespace `knative-serving` + kubeflow-gateway.kubeflow.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" # A cluster local gateway to allow pods outside of the mesh to access # Services and Routes not exposing through an ingress. If the users @@ -665,10 +668,16 @@ data: # sidecar injection (like Knative's istio-lean.yaml). Since every pod # is outside of the service mesh in that case, a cluster-local service # will need to be exposed to a cluster-local gateway to be accessible. - local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + # The configuration format should be `local-gateway.{{local_gateway_namespace}}. + # {{local_gateway_name}}: "{{cluster_local_gateway_name}}. + # {{cluster_local_gateway_namespace}}.svc.cluster.local"`. The + # {{local_gateway_namespace}} is optional; when it is omitted, the system + # will search for the local gateway in the serving system namespace + # `knative-serving` + local-gateway.knative-serving.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" # To use only Istio service mesh and no cluster-local-gateway, replace - # all local-gateway.* entries the following entry. + # all local-gateway.* entries by the following entry. local-gateway.mesh: "mesh" # Feature flag to enable reconciling external Istio Gateways. @@ -680,6 +689,6 @@ kind: ConfigMap metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-istio namespace: knative-serving diff --git a/knative/knative-serving-install/base/deployment.yaml b/knative/knative-serving-install/base/deployment.yaml index 893cfbd392..7342b77567 100644 --- a/knative/knative-serving-install/base/deployment.yaml +++ b/knative/knative-serving-install/base/deployment.yaml @@ -1,8 +1,9 @@ +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: activator namespace: knative-serving spec: @@ -18,90 +19,71 @@ spec: labels: app: activator role: activator - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/internal/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 - livenessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: activator - path: /healthz - port: 8012 - name: activator - ports: - - containerPort: 8012 - name: http1 - - containerPort: 8013 - name: h2c - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - readinessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: activator - path: /healthz - port: 8012 - resources: - limits: - cpu: 1000m - memory: 600Mi - requests: - cpu: 300m - memory: 60Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:8e606671215cc029683e8cd633ec5de9eabeaa6e9a4392ff289883304be1f418 + livenessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: activator + path: /healthz + port: 8012 + name: activator + ports: + - containerPort: 8012 + name: http1 + - containerPort: 8013 + name: h2c + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + readinessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: activator + path: /healthz + port: 8012 + resources: + limits: + cpu: 1000m + memory: 600Mi + requests: + cpu: 300m + memory: 60Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller terminationGracePeriodSeconds: 300 --- -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: activator - namespace: knative-serving -spec: - maxReplicas: 20 - metrics: - - resource: - name: cpu - targetAverageUtilization: 100 - type: Resource - minReplicas: 1 - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: activator - ---- apiVersion: apps/v1 kind: Deployment metadata: labels: autoscaling.knative.dev/autoscaler-provider: hpa - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler-hpa namespace: knative-serving spec: @@ -115,43 +97,43 @@ spec: sidecar.istio.io/inject: "false" labels: app: autoscaler-hpa - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 - name: autoscaler-hpa - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:5e0fadf574e66fb1c893806b5c5e5f19139cc476ebf1dff9860789fe4ac5f545 + name: autoscaler-hpa + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler namespace: knative-serving spec: @@ -167,114 +149,115 @@ spec: traffic.sidecar.istio.io/includeInboundPorts: 8080,9090 labels: app: autoscaler - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - args: - - --secure-port=8443 - - --cert-dir=/tmp - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c - livenessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: autoscaler - path: /healthz - port: 8080 - name: autoscaler - ports: - - containerPort: 8080 - name: websocket - - containerPort: 9090 - name: metrics - - containerPort: 8443 - name: custom-metrics - - containerPort: 8008 - name: profiling - readinessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: autoscaler - path: /healthz - port: 8080 - resources: - limits: - cpu: 300m - memory: 400Mi - requests: - cpu: 30m - memory: 40Mi - securityContext: - allowPrivilegeEscalation: false + - args: + - --secure-port=8443 + - --cert-dir=/tmp + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:ef1f01b5fb3886d4c488a219687aac72d28e72f808691132f658259e4e02bb27 + livenessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: autoscaler + path: /healthz + port: 8080 + name: autoscaler + ports: + - containerPort: 8080 + name: websocket + - containerPort: 9090 + name: metrics + - containerPort: 8443 + name: custom-metrics + - containerPort: 8008 + name: profiling + readinessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: autoscaler + path: /healthz + port: 8080 + resources: + limits: + cpu: 300m + memory: 400Mi + requests: + cpu: 30m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller ---- +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" - name: controller + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.11.1" + name: networking-istio namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: controller + app: networking-istio template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: controller - serving.knative.dev/release: "v0.10.0" + app: networking-istio + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/internal/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 - name: controller - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:727a623ccb17676fae8058cb1691207a9658a8d71bc7603d701e23b1a6037e6c + name: networking-istio + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook namespace: knative-serving spec: @@ -291,86 +274,86 @@ spec: labels: app: webhook role: webhook - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 - name: webhook - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 20m - memory: 20Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:1ef3328282f31704b5802c1136bd117e8598fd9f437df8209ca87366c5ce9fcb + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 200m + memory: 200Mi + requests: + cpu: 20m + memory: 20Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" - name: networking-istio + serving.knative.dev/release: "v0.11.1" + name: controller namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: networking-istio + app: controller template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: networking-istio - serving.knative.dev/release: "v0.10.0" + app: controller + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff - name: networking-istio - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:5ca13e5b3ce5e2819c4567b75c0984650a57272ece44bc1dabf930f9fe1e19a1 + name: controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller +--- diff --git a/knative/knative-serving-install/base/gateway.yaml b/knative/knative-serving-install/base/gateway.yaml index 70eaf999ec..67bab98319 100644 --- a/knative/knative-serving-install/base/gateway.yaml +++ b/knative/knative-serving-install/base/gateway.yaml @@ -1,37 +1,19 @@ -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" - name: knative-ingress-gateway - namespace: knative-serving -spec: - selector: - istio: ingressgateway - servers: - - hosts: - - '*' - port: - name: http - number: 80 - protocol: HTTP --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: cluster-local-gateway namespace: knative-serving spec: selector: istio: cluster-local-gateway servers: - - hosts: - - '*' - port: - name: http - number: 80 - protocol: HTTP + - hosts: + - '*' + port: + name: http + number: 80 + protocol: HTTP diff --git a/knative/knative-serving-install/base/hpa.yaml b/knative/knative-serving-install/base/hpa.yaml index 2d295cfd7d..4cbde7fa0a 100644 --- a/knative/knative-serving-install/base/hpa.yaml +++ b/knative/knative-serving-install/base/hpa.yaml @@ -1,18 +1,23 @@ +--- apiVersion: autoscaling/v2beta1 kind: HorizontalPodAutoscaler metadata: + labels: + serving.knative.dev/release: "v0.11.1" name: activator namespace: knative-serving spec: maxReplicas: 20 metrics: - - resource: - name: cpu - targetAverageUtilization: 100 - type: Resource + - resource: + name: cpu + targetAverageUtilization: 100 + type: Resource minReplicas: 1 scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: activator +--- + diff --git a/knative/knative-serving-install/base/image.yaml b/knative/knative-serving-install/base/image.yaml index 32342f7d7c..21e40846e8 100644 --- a/knative/knative-serving-install/base/image.yaml +++ b/knative/knative-serving-install/base/image.yaml @@ -1,10 +1,12 @@ +--- apiVersion: caching.internal.knative.dev/v1alpha1 kind: Image metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: queue-proxy namespace: knative-serving spec: - image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c + image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:792f6945c7bc73a49a470a5b955c39c8bd174705743abf5fb71aa0f4c04128eb + diff --git a/knative/knative-serving-install/base/kustomization.yaml b/knative/knative-serving-install/base/kustomization.yaml index 309ff63857..992674b6b8 100644 --- a/knative/knative-serving-install/base/kustomization.yaml +++ b/knative/knative-serving-install/base/kustomization.yaml @@ -20,19 +20,19 @@ commonLabels: images: - name: gcr.io/knative-releases/knative.dev/serving/cmd/activator newName: gcr.io/knative-releases/knative.dev/serving/cmd/activator - digest: sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 + digest: sha256:8e606671215cc029683e8cd633ec5de9eabeaa6e9a4392ff289883304be1f418 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa - digest: sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 + digest: sha256:5e0fadf574e66fb1c893806b5c5e5f19139cc476ebf1dff9860789fe4ac5f545 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler - digest: sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c + digest: sha256:ef1f01b5fb3886d4c488a219687aac72d28e72f808691132f658259e4e02bb27 - name: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio newName: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio - digest: sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff + digest: sha256:727a623ccb17676fae8058cb1691207a9658a8d71bc7603d701e23b1a6037e6c - name: gcr.io/knative-releases/knative.dev/serving/cmd/webhook newName: gcr.io/knative-releases/knative.dev/serving/cmd/webhook - digest: sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 + digest: sha256:1ef3328282f31704b5802c1136bd117e8598fd9f437df8209ca87366c5ce9fcb - name: gcr.io/knative-releases/knative.dev/serving/cmd/controller newName: gcr.io/knative-releases/knative.dev/serving/cmd/controller - digest: sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 + digest: sha256:5ca13e5b3ce5e2819c4567b75c0984650a57272ece44bc1dabf930f9fe1e19a1 diff --git a/knative/knative-serving-install/base/role-binding.yaml b/knative/knative-serving-install/base/role-binding.yaml index 4e6aacba66..ce3a111147 100644 --- a/knative/knative-serving-install/base/role-binding.yaml +++ b/knative/knative-serving-install/base/role-binding.yaml @@ -1,9 +1,10 @@ +--- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics-auth-reader namespace: kube-system roleRef: @@ -11,6 +12,6 @@ roleRef: kind: Role name: extension-apiserver-authentication-reader subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving diff --git a/knative/knative-serving-install/base/service-account.yaml b/knative/knative-serving-install/base/service-account.yaml index b7486c324f..9517a13f45 100644 --- a/knative/knative-serving-install/base/service-account.yaml +++ b/knative/knative-serving-install/base/service-account.yaml @@ -1,8 +1,10 @@ +--- apiVersion: v1 kind: ServiceAccount metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: controller namespace: knative-serving + diff --git a/knative/knative-serving-install/base/service.yaml b/knative/knative-serving-install/base/service.yaml index ce20637f18..f96f1db808 100644 --- a/knative/knative-serving-install/base/service.yaml +++ b/knative/knative-serving-install/base/service.yaml @@ -1,25 +1,26 @@ +--- apiVersion: v1 kind: Service metadata: labels: app: activator - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: activator-service namespace: knative-serving spec: ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8012 - - name: http2 - port: 81 - protocol: TCP - targetPort: 8013 - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 + - name: http + port: 80 + protocol: TCP + targetPort: 8012 + - name: http2 + port: 81 + protocol: TCP + targetPort: 8013 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 selector: app: activator type: ClusterIP @@ -30,15 +31,15 @@ kind: Service metadata: labels: app: controller - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: controller namespace: knative-serving spec: ports: - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 selector: app: controller @@ -48,13 +49,14 @@ kind: Service metadata: labels: role: webhook - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook namespace: knative-serving spec: ports: - - port: 443 - targetPort: 8443 + - name: https-webhook + port: 443 + targetPort: 8443 selector: role: webhook --- @@ -63,23 +65,22 @@ kind: Service metadata: labels: app: autoscaler - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler namespace: knative-serving spec: ports: - - name: http - port: 8080 - protocol: TCP - targetPort: 8080 - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 - - name: custom-metrics - port: 443 - protocol: TCP - targetPort: 8443 + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + - name: https-custom-metrics + port: 443 + protocol: TCP + targetPort: 8443 selector: app: autoscaler - diff --git a/knative/knative-serving-install/base/webhook-configuration.yaml b/knative/knative-serving-install/base/webhook-configuration.yaml index 515e488864..fb77b16079 100644 --- a/knative/knative-serving-install/base/webhook-configuration.yaml +++ b/knative/knative-serving-install/base/webhook-configuration.yaml @@ -1,36 +1,61 @@ +--- apiVersion: admissionregistration.k8s.io/v1beta1 kind: MutatingWebhookConfiguration metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook.serving.knative.dev webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook - namespace: knative-serving - failurePolicy: Fail - name: webhook.serving.knative.dev + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: webhook.serving.knative.dev --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - serving.knative.dev/release: "v0.10.0" - name: config.webhook.serving.knative.dev + serving.knative.dev/release: "v0.11.1" + name: validation.webhook.serving.knative.dev webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook - namespace: knative-serving - failurePolicy: Fail + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: validation.webhook.serving.knative.dev +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + serving.knative.dev/release: "v0.11.1" name: config.webhook.serving.knative.dev - namespaceSelector: - matchExpressions: - - key: serving.knative.dev/release - operator: Exists +webhooks: + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + serving.knative.dev/release: "v0.11.1" + name: webhook-certs + namespace: knative-serving diff --git a/knative/knative-serving-install/overlays/application/application.yaml b/knative/knative-serving-install/overlays/application/application.yaml index b8890c6a39..1c7ff245cf 100644 --- a/knative/knative-serving-install/overlays/application/application.yaml +++ b/knative/knative-serving-install/overlays/application/application.yaml @@ -6,11 +6,11 @@ spec: selector: matchLabels: app.kubernetes.io/name: knative-serving-install - app.kubernetes.io/instance: knative-serving-install-v0.8.0 + app.kubernetes.io/instance: knative-serving-install-v0.11.1 app.kubernetes.io/managed-by: kfctl app.kubernetes.io/component: knative-serving-install app.kubernetes.io/part-of: kubeflow - app.kubernetes.io/version: v0.8.0 + app.kubernetes.io/version: v0.11.1 componentKinds: - group: core kind: ConfigMap diff --git a/knative/knative-serving-install/overlays/application/kustomization.yaml b/knative/knative-serving-install/overlays/application/kustomization.yaml index eaf725b4dd..29ae6e9b59 100644 --- a/knative/knative-serving-install/overlays/application/kustomization.yaml +++ b/knative/knative-serving-install/overlays/application/kustomization.yaml @@ -6,8 +6,8 @@ resources: - application.yaml commonLabels: app.kubernetes.io/name: knative-serving-install - app.kubernetes.io/instance: knative-serving-install-v0.8.0 + app.kubernetes.io/instance: knative-serving-install-v0.11.1 app.kubernetes.io/managed-by: kfctl app.kubernetes.io/component: knative-serving-install app.kubernetes.io/part-of: kubeflow - app.kubernetes.io/version: v0.8.0 + app.kubernetes.io/version: v0.11.1 From 2dfa59f47773a8262db457e1f525a4df87408755 Mon Sep 17 00:00:00 2001 From: Dan Sun Date: Sat, 25 Jan 2020 13:38:06 -0500 Subject: [PATCH 5/6] Update knative 0.11.1 tests --- .../base/config-map.yaml | 2 +- .../base/kustomization.yaml | 1 + .../knative-knative-serving-crds-base_test.go | 379 +++--- ...-serving-crds-overlays-application_test.go | 387 +++--- ...ative-knative-serving-install-base_test.go | 1165 ++++++++-------- ...rving-install-overlays-application_test.go | 1173 +++++++++-------- 6 files changed, 1632 insertions(+), 1475 deletions(-) diff --git a/knative/knative-serving-install/base/config-map.yaml b/knative/knative-serving-install/base/config-map.yaml index 21dc2913ea..a493e66bdf 100644 --- a/knative/knative-serving-install/base/config-map.yaml +++ b/knative/knative-serving-install/base/config-map.yaml @@ -658,7 +658,7 @@ data: # {{ingress_namespace}}.svc.cluster.local"`. The {{gateway_namespace}} # is optional; when it is omitted, the system will search for # the gateway in the serving system namespace `knative-serving` - kubeflow-gateway.kubeflow.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + gateway.kubeflow.kubeflow-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" # A cluster local gateway to allow pods outside of the mesh to access # Services and Routes not exposing through an ingress. If the users diff --git a/knative/knative-serving-install/base/kustomization.yaml b/knative/knative-serving-install/base/kustomization.yaml index 992674b6b8..9134cafbdf 100644 --- a/knative/knative-serving-install/base/kustomization.yaml +++ b/knative/knative-serving-install/base/kustomization.yaml @@ -14,6 +14,7 @@ resources: - service.yaml - apiservice.yaml - image.yaml +- hpa.yaml - webhook-configuration.yaml commonLabels: kustomize.component: knative diff --git a/tests/knative-knative-serving-crds-base_test.go b/tests/knative-knative-serving-crds-base_test.go index d1eef36dd3..038199dc25 100644 --- a/tests/knative-knative-serving-crds-base_test.go +++ b/tests/knative-knative-serving-crds-base_test.go @@ -20,9 +20,10 @@ kind: Namespace metadata: labels: istio-injection: enabled - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving + `) th.writeF("/manifests/knative/knative-serving-crds/base/crd.yaml", ` --- @@ -31,25 +32,25 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: certificates.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: Certificate plural: certificates shortNames: - - kcert + - kcert singular: certificate scope: Namespaced subresources: @@ -61,48 +62,49 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/podspecable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: configurations.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Configuration plural: configurations shortNames: - - config - - cfg + - config + - cfg singular: configuration scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -115,12 +117,12 @@ spec: group: caching.internal.knative.dev names: categories: - - knative-internal - - caching + - knative-internal + - caching kind: Image plural: images shortNames: - - img + - img singular: image scope: Namespaced subresources: @@ -133,33 +135,33 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: ingresses.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: Ingress plural: ingresses shortNames: - - ing + - ing singular: ingress scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true + - name: v1alpha1 + served: true + storage: true --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -167,21 +169,21 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: metrics.autoscaling.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: autoscaling.internal.knative.dev names: categories: - - knative-internal - - autoscaling + - knative-internal + - autoscaling kind: Metric plural: metrics singular: metric @@ -196,40 +198,40 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: podautoscalers.autoscaling.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.desiredScale - name: DesiredScale - type: integer - - JSONPath: .status.actualScale - name: ActualScale - type: integer - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.desiredScale + name: DesiredScale + type: integer + - JSONPath: .status.actualScale + name: ActualScale + type: integer + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: autoscaling.internal.knative.dev names: categories: - - knative-internal - - autoscaling + - knative-internal + - autoscaling kind: PodAutoscaler plural: podautoscalers shortNames: - - kpa - - pa + - kpa + - pa singular: podautoscaler scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true + - name: v1alpha1 + served: true + storage: true --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -237,49 +239,49 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: revisions.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .metadata.labels['serving\.knative\.dev/configuration'] - name: Config Name - type: string - - JSONPath: .status.serviceName - name: K8s Service Name - type: string - - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration'] - name: Generation - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .metadata.labels['serving\.knative\.dev/configuration'] + name: Config Name + type: string + - JSONPath: .status.serviceName + name: K8s Service Name + type: string + - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration'] + name: Generation + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Revision plural: revisions shortNames: - - rev + - rev singular: revision scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -288,43 +290,43 @@ metadata: labels: duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: routes.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.url - name: URL - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.url + name: URL + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Route plural: routes shortNames: - - rt + - rt singular: route scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -332,51 +334,52 @@ kind: CustomResourceDefinition metadata: labels: duck.knative.dev/addressable: "true" + duck.knative.dev/podspecable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: services.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.url - name: URL - type: string - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.url + name: URL + type: string + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Service plural: services shortNames: - - kservice - - ksvc + - kservice + - ksvc singular: service scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -384,44 +387,42 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: serverlessservices.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .spec.mode - name: Mode - type: string - - JSONPath: .status.serviceName - name: ServiceName - type: string - - JSONPath: .status.privateServiceName - name: PrivateServiceName - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .spec.mode + name: Mode + type: string + - JSONPath: .status.serviceName + name: ServiceName + type: string + - JSONPath: .status.privateServiceName + name: PrivateServiceName + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: ServerlessService plural: serverlessservices shortNames: - - sks + - sks singular: serverlessservice scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - ---- + - name: v1alpha1 + served: true + storage: true `) th.writeK("/manifests/knative/knative-serving-crds/base", ` apiVersion: kustomize.config.k8s.io/v1beta1 diff --git a/tests/knative-knative-serving-crds-overlays-application_test.go b/tests/knative-knative-serving-crds-overlays-application_test.go index e0f67a43d5..6d4f1a9c85 100644 --- a/tests/knative-knative-serving-crds-overlays-application_test.go +++ b/tests/knative-knative-serving-crds-overlays-application_test.go @@ -23,11 +23,11 @@ spec: selector: matchLabels: app.kubernetes.io/name: knative-serving-crds - app.kubernetes.io/instance: knative-serving-crds-v0.8.0 + app.kubernetes.io/instance: knative-serving-crds-v0.11.1 app.kubernetes.io/managed-by: kfctl app.kubernetes.io/component: knative-serving-crds app.kubernetes.io/part-of: kubeflow - app.kubernetes.io/version: v0.8.0 + app.kubernetes.io/version: v0.11.1 componentKinds: - group: core kind: ConfigMap @@ -56,11 +56,11 @@ resources: - application.yaml commonLabels: app.kubernetes.io/name: knative-serving-crds - app.kubernetes.io/instance: knative-serving-crds-v0.8.0 + app.kubernetes.io/instance: knative-serving-crds-v0.11.1 app.kubernetes.io/managed-by: kfctl app.kubernetes.io/component: knative-serving-crds app.kubernetes.io/part-of: kubeflow - app.kubernetes.io/version: v0.8.0 + app.kubernetes.io/version: v0.11.1 `) th.writeF("/manifests/knative/knative-serving-crds/base/namespace.yaml", ` apiVersion: v1 @@ -68,9 +68,10 @@ kind: Namespace metadata: labels: istio-injection: enabled - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving + `) th.writeF("/manifests/knative/knative-serving-crds/base/crd.yaml", ` --- @@ -79,25 +80,25 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: certificates.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: Certificate plural: certificates shortNames: - - kcert + - kcert singular: certificate scope: Namespaced subresources: @@ -109,48 +110,49 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: labels: + duck.knative.dev/podspecable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: configurations.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Configuration plural: configurations shortNames: - - config - - cfg + - config + - cfg singular: configuration scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -163,12 +165,12 @@ spec: group: caching.internal.knative.dev names: categories: - - knative-internal - - caching + - knative-internal + - caching kind: Image plural: images shortNames: - - img + - img singular: image scope: Namespaced subresources: @@ -181,33 +183,33 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: ingresses.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: Ingress plural: ingresses shortNames: - - ing + - ing singular: ingress scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true + - name: v1alpha1 + served: true + storage: true --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -215,21 +217,21 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: metrics.autoscaling.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: autoscaling.internal.knative.dev names: categories: - - knative-internal - - autoscaling + - knative-internal + - autoscaling kind: Metric plural: metrics singular: metric @@ -244,40 +246,40 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: podautoscalers.autoscaling.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.desiredScale - name: DesiredScale - type: integer - - JSONPath: .status.actualScale - name: ActualScale - type: integer - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.desiredScale + name: DesiredScale + type: integer + - JSONPath: .status.actualScale + name: ActualScale + type: integer + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: autoscaling.internal.knative.dev names: categories: - - knative-internal - - autoscaling + - knative-internal + - autoscaling kind: PodAutoscaler plural: podautoscalers shortNames: - - kpa - - pa + - kpa + - pa singular: podautoscaler scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true + - name: v1alpha1 + served: true + storage: true --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -285,49 +287,49 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: revisions.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .metadata.labels['serving\.knative\.dev/configuration'] - name: Config Name - type: string - - JSONPath: .status.serviceName - name: K8s Service Name - type: string - - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration'] - name: Generation - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .metadata.labels['serving\.knative\.dev/configuration'] + name: Config Name + type: string + - JSONPath: .status.serviceName + name: K8s Service Name + type: string + - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration'] + name: Generation + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Revision plural: revisions shortNames: - - rev + - rev singular: revision scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -336,43 +338,43 @@ metadata: labels: duck.knative.dev/addressable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: routes.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.url - name: URL - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.url + name: URL + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Route plural: routes shortNames: - - rt + - rt singular: route scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -380,51 +382,52 @@ kind: CustomResourceDefinition metadata: labels: duck.knative.dev/addressable: "true" + duck.knative.dev/podspecable: "true" knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: services.serving.knative.dev spec: additionalPrinterColumns: - - JSONPath: .status.url - name: URL - type: string - - JSONPath: .status.latestCreatedRevisionName - name: LatestCreated - type: string - - JSONPath: .status.latestReadyRevisionName - name: LatestReady - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .status.url + name: URL + type: string + - JSONPath: .status.latestCreatedRevisionName + name: LatestCreated + type: string + - JSONPath: .status.latestReadyRevisionName + name: LatestReady + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: serving.knative.dev names: categories: - - all - - knative - - serving + - all + - knative + - serving kind: Service plural: services shortNames: - - kservice - - ksvc + - kservice + - ksvc singular: service scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - - name: v1beta1 - served: true - storage: false - - name: v1 - served: true - storage: false + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -432,44 +435,42 @@ kind: CustomResourceDefinition metadata: labels: knative.dev/crd-install: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: serverlessservices.networking.internal.knative.dev spec: additionalPrinterColumns: - - JSONPath: .spec.mode - name: Mode - type: string - - JSONPath: .status.serviceName - name: ServiceName - type: string - - JSONPath: .status.privateServiceName - name: PrivateServiceName - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=='Ready')].reason - name: Reason - type: string + - JSONPath: .spec.mode + name: Mode + type: string + - JSONPath: .status.serviceName + name: ServiceName + type: string + - JSONPath: .status.privateServiceName + name: PrivateServiceName + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=='Ready')].reason + name: Reason + type: string group: networking.internal.knative.dev names: categories: - - knative-internal - - networking + - knative-internal + - networking kind: ServerlessService plural: serverlessservices shortNames: - - sks + - sks singular: serverlessservice scope: Namespaced subresources: status: {} versions: - - name: v1alpha1 - served: true - storage: true - ---- + - name: v1alpha1 + served: true + storage: true `) th.writeK("/manifests/knative/knative-serving-crds/base", ` apiVersion: kustomize.config.k8s.io/v1beta1 diff --git a/tests/knative-knative-serving-install-base_test.go b/tests/knative-knative-serving-install-base_test.go index f7f957ee2c..2d2e10319f 100644 --- a/tests/knative-knative-serving-install-base_test.go +++ b/tests/knative-knative-serving-install-base_test.go @@ -15,59 +15,64 @@ import ( func writeKnativeServingInstallBase(th *KustTestHarness) { th.writeF("/manifests/knative/knative-serving-install/base/gateway.yaml", ` -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" - name: knative-ingress-gateway - namespace: knative-serving -spec: - selector: - istio: ingressgateway - servers: - - hosts: - - '*' - port: - name: http - number: 80 - protocol: HTTP --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: cluster-local-gateway namespace: knative-serving spec: selector: istio: cluster-local-gateway servers: - - hosts: - - '*' - port: - name: http - number: 80 - protocol: HTTP + - hosts: + - '*' + port: + name: http + number: 80 + protocol: HTTP `) th.writeF("/manifests/knative/knative-serving-install/base/cluster-role.yaml", ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + duck.knative.dev/addressable: "true" + serving.knative.dev/release: "v0.11.1" + name: knative-serving-addressable-resolver +rules: + - apiGroups: + - serving.knative.dev + resources: + - routes + - routes/status + - services + - services/status + verbs: + - get + - list + - watch + +--- + apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics-server-resources rules: -- apiGroups: - - custom.metrics.k8s.io - resources: - - '*' - verbs: - - '*' + - apiGroups: + - custom.metrics.k8s.io + resources: + - '*' + verbs: + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 @@ -75,68 +80,70 @@ kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-admin: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-admin rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - - caching.internal.knative.dev - resources: - - '*' - verbs: - - '*' + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-edit: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-edit rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - resources: - - '*' - verbs: - - create - - update - - patch - - delete + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - create + - update + - patch + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-view: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-view rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - resources: - - '*' - verbs: - - get - - list - - watch + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - get + - list + - watch --- aggregationRule: clusterRoleSelectors: - - matchLabels: - serving.knative.dev/controller: "true" + - matchLabels: + serving.knative.dev/controller: "true" apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-admin rules: [] --- @@ -145,113 +152,132 @@ kind: ClusterRole metadata: labels: serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-core rules: -- apiGroups: - - "" - resources: - - pods - - namespaces - - secrets - - configmaps - - endpoints - - services - - events - - serviceaccounts - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - "" - resources: - - endpoints/restricted - verbs: - - create -- apiGroups: - - apps - resources: - - deployments - - deployments/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - serving.knative.dev - - autoscaling.internal.knative.dev - - networking.internal.knative.dev - resources: - - '*' - - '*/status' - - '*/finalizers' - verbs: - - get - - list - - create - - update - - delete - - deletecollection - - patch - - watch -- apiGroups: - - caching.internal.knative.dev - resources: - - images - verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - apiGroups: + - "" + resources: + - pods + - namespaces + - secrets + - configmaps + - endpoints + - services + - events + - serviceaccounts + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - "" + resources: + - endpoints/restricted + verbs: + - create + - apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - serving.knative.dev + - autoscaling.internal.knative.dev + - networking.internal.knative.dev + resources: + - '*' + - '*/status' + - '*/finalizers' + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - patch + - watch + - apiGroups: + - caching.internal.knative.dev + resources: + - images + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + duck.knative.dev/podspecable: "true" + serving.knative.dev/release: "v0.11.1" + name: knative-serving-podspecable-binding +rules: + - apiGroups: + - serving.knative.dev + resources: + - configurations + - services + verbs: + - list + - watch + - patch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -259,39 +285,40 @@ metadata: labels: networking.knative.dev/ingress-provider: istio serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-istio rules: -- apiGroups: - - networking.istio.io - resources: - - virtualservices - - gateways - verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - apiGroups: + - networking.istio.io + resources: + - virtualservices + - gateways + verbs: + - get + - list + - create + - update + - delete + - patch + - watch `) th.writeF("/manifests/knative/knative-serving-install/base/cluster-role-binding.yaml", ` +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving --- apiVersion: rbac.authorization.k8s.io/v1 @@ -299,32 +326,32 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: hpa-controller-custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: custom-metrics-server-resources subjects: -- kind: ServiceAccount - name: horizontal-pod-autoscaler - namespace: kube-system + - kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-controller-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: knative-serving-admin subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving `) th.writeF("/manifests/knative/knative-serving-install/base/service-role.yaml", ` @@ -356,12 +383,13 @@ spec: - user: '*' `) th.writeF("/manifests/knative/knative-serving-install/base/role-binding.yaml", ` +--- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics-auth-reader namespace: kube-system roleRef: @@ -369,11 +397,12 @@ roleRef: kind: Role name: extension-apiserver-authentication-reader subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving `) th.writeF("/manifests/knative/knative-serving-install/base/config-map.yaml", ` +--- apiVersion: v1 data: _example: | @@ -439,6 +468,7 @@ data: # When operating in a stable mode, the autoscaler operates on the # average concurrency over the stable window. + # Stable window must be in whole seconds. stable-window: "60s" # When observed average concurrency during the panic window reaches @@ -446,14 +476,10 @@ data: # enters panic mode. When operating in panic mode, the autoscaler # scales on the average concurrency over the panic window which is # panic-window-percentage of the stable-window. + # When computing the panic window it will be rounded to the closest + # whole second. panic-window-percentage: "10.0" - # Absolute panic window duration. - # Deprecated in favor of panic-window-percentage. - # Existing revisions will continue to scale based on panic-window - # but new revisions will default to panic-window-percentage. - panic-window: "6s" - # The percentage of the container concurrency target at which to # enter panic mode when reached within the panic window. panic-threshold-percentage: "200.0" @@ -491,7 +517,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-autoscaler namespace: knative-serving @@ -563,7 +589,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-defaults namespace: knative-serving @@ -588,11 +614,11 @@ data: # List of repositories for which tag to digest resolving should be skipped registriesSkippingTagResolving: "ko.local,dev.local" - queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c + queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:792f6945c7bc73a49a470a5b955c39c8bd174705743abf5fb71aa0f4c04128eb kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-deployment namespace: knative-serving @@ -638,7 +664,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-domain namespace: knative-serving @@ -677,7 +703,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-gc namespace: knative-serving @@ -737,7 +763,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-logging namespace: knative-serving @@ -786,7 +812,7 @@ data: # To determine the IP ranges of your cluster: # IBM Cloud Private: cat cluster/config.yaml | grep service_cluster_ip_range # IBM Cloud Kubernetes Service: "172.30.0.0/16,172.20.0.0/16,10.10.10.0/24" - # Google Container Engine (GKE): gcloud container clusters describe XXXXXXX --zone=XXXXXX | grep -e clusterIpv4Cidr -e servicesIpv4Cidr + # Google Container Engine (GKE): gcloud container clusters describe $CLUSTER_NAME --zone=$CLUSTER_ZONE | grep -e clusterIpv4Cidr -e servicesIpv4Cidr # Azure Kubernetes Service (AKS): "10.0.0.0/16" # Azure Container Service (ACS; deprecated): "10.244.0.0/16,10.240.0.0/16" # Azure Container Service Engine (ACS-Engine; OSS): Configurable, but defaults to "10.0.0.0/16" @@ -845,7 +871,7 @@ data: # when constructing the DNS name for "tags" within the traffic blocks # of Routes and Configuration. This is used in conjunction with the # domainTemplate above to determine the full URL for the tag. - tagTemplate: "{{.Name}}-{{.Tag}}" + tagTemplate: "{{.Tag}}-{{.Name}}" # Controls whether TLS certificates are automatically provisioned and # installed in the Knative ingress to terminate external TLS connection. @@ -863,7 +889,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-network namespace: knative-serving @@ -938,8 +964,8 @@ data: metrics.backend-destination: prometheus # metrics.request-metrics-backend-destination specifies the request metrics - # destination. If non-empty, it enables queue proxy to send request metrics. - # Currently supported values: prometheus, stackdriver. + # destination. It enables queue proxy to send request metrics. + # Currently supported values: prometheus (the default), stackdriver. metrics.request-metrics-backend-destination: prometheus # metrics.stackdriver-project-id field specifies the stackdriver project ID. This @@ -962,7 +988,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-observability namespace: knative-serving @@ -1006,13 +1032,14 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-tracing namespace: knative-serving + --- + apiVersion: v1 data: - _example: | ################################ # # # EXAMPLE CONFIGURATION # @@ -1030,8 +1057,12 @@ data: # Default Knative Gateway after v0.3. It points to the Istio # standard istio-ingressgateway, instead of a custom one that we - # used pre-0.3. - gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + # used pre-0.3. The configuration format should be `+"`"+`gateway. + # {{gateway_namespace}}.{{gateway_name}}: "{{ingress_name}}. + # {{ingress_namespace}}.svc.cluster.local"`+"`"+`. The {{gateway_namespace}} + # is optional; when it is omitted, the system will search for + # the gateway in the serving system namespace `+"`"+`knative-serving`+"`"+` + gateway.kubeflow.kubeflow-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" # A cluster local gateway to allow pods outside of the mesh to access # Services and Routes not exposing through an ingress. If the users @@ -1041,10 +1072,16 @@ data: # sidecar injection (like Knative's istio-lean.yaml). Since every pod # is outside of the service mesh in that case, a cluster-local service # will need to be exposed to a cluster-local gateway to be accessible. - local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + # The configuration format should be `+"`"+`local-gateway.{{local_gateway_namespace}}. + # {{local_gateway_name}}: "{{cluster_local_gateway_name}}. + # {{cluster_local_gateway_namespace}}.svc.cluster.local"`+"`"+`. The + # {{local_gateway_namespace}} is optional; when it is omitted, the system + # will search for the local gateway in the serving system namespace + # `+"`"+`knative-serving`+"`"+` + local-gateway.knative-serving.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" # To use only Istio service mesh and no cluster-local-gateway, replace - # all local-gateway.* entries the following entry. + # all local-gateway.* entries by the following entry. local-gateway.mesh: "mesh" # Feature flag to enable reconciling external Istio Gateways. @@ -1056,16 +1093,17 @@ kind: ConfigMap metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-istio namespace: knative-serving `) th.writeF("/manifests/knative/knative-serving-install/base/deployment.yaml", ` +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: activator namespace: knative-serving spec: @@ -1081,90 +1119,71 @@ spec: labels: app: activator role: activator - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/internal/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 - livenessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: activator - path: /healthz - port: 8012 - name: activator - ports: - - containerPort: 8012 - name: http1 - - containerPort: 8013 - name: h2c - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - readinessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: activator - path: /healthz - port: 8012 - resources: - limits: - cpu: 1000m - memory: 600Mi - requests: - cpu: 300m - memory: 60Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:8e606671215cc029683e8cd633ec5de9eabeaa6e9a4392ff289883304be1f418 + livenessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: activator + path: /healthz + port: 8012 + name: activator + ports: + - containerPort: 8012 + name: http1 + - containerPort: 8013 + name: h2c + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + readinessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: activator + path: /healthz + port: 8012 + resources: + limits: + cpu: 1000m + memory: 600Mi + requests: + cpu: 300m + memory: 60Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller terminationGracePeriodSeconds: 300 --- -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: activator - namespace: knative-serving -spec: - maxReplicas: 20 - metrics: - - resource: - name: cpu - targetAverageUtilization: 100 - type: Resource - minReplicas: 1 - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: activator - ---- apiVersion: apps/v1 kind: Deployment metadata: labels: autoscaling.knative.dev/autoscaler-provider: hpa - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler-hpa namespace: knative-serving spec: @@ -1178,43 +1197,43 @@ spec: sidecar.istio.io/inject: "false" labels: app: autoscaler-hpa - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 - name: autoscaler-hpa - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:5e0fadf574e66fb1c893806b5c5e5f19139cc476ebf1dff9860789fe4ac5f545 + name: autoscaler-hpa + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler namespace: knative-serving spec: @@ -1230,114 +1249,115 @@ spec: traffic.sidecar.istio.io/includeInboundPorts: 8080,9090 labels: app: autoscaler - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - args: - - --secure-port=8443 - - --cert-dir=/tmp - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c - livenessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: autoscaler - path: /healthz - port: 8080 - name: autoscaler - ports: - - containerPort: 8080 - name: websocket - - containerPort: 9090 - name: metrics - - containerPort: 8443 - name: custom-metrics - - containerPort: 8008 - name: profiling - readinessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: autoscaler - path: /healthz - port: 8080 - resources: - limits: - cpu: 300m - memory: 400Mi - requests: - cpu: 30m - memory: 40Mi - securityContext: - allowPrivilegeEscalation: false + - args: + - --secure-port=8443 + - --cert-dir=/tmp + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:ef1f01b5fb3886d4c488a219687aac72d28e72f808691132f658259e4e02bb27 + livenessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: autoscaler + path: /healthz + port: 8080 + name: autoscaler + ports: + - containerPort: 8080 + name: websocket + - containerPort: 9090 + name: metrics + - containerPort: 8443 + name: custom-metrics + - containerPort: 8008 + name: profiling + readinessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: autoscaler + path: /healthz + port: 8080 + resources: + limits: + cpu: 300m + memory: 400Mi + requests: + cpu: 30m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller ---- +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" - name: controller + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.11.1" + name: networking-istio namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: controller + app: networking-istio template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: controller - serving.knative.dev/release: "v0.10.0" + app: networking-istio + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/internal/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 - name: controller - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:727a623ccb17676fae8058cb1691207a9658a8d71bc7603d701e23b1a6037e6c + name: networking-istio + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook namespace: knative-serving spec: @@ -1354,123 +1374,126 @@ spec: labels: app: webhook role: webhook - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 - name: webhook - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 20m - memory: 20Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:1ef3328282f31704b5802c1136bd117e8598fd9f437df8209ca87366c5ce9fcb + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 200m + memory: 200Mi + requests: + cpu: 20m + memory: 20Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" - name: networking-istio + serving.knative.dev/release: "v0.11.1" + name: controller namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: networking-istio + app: controller template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: networking-istio - serving.knative.dev/release: "v0.10.0" + app: controller + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff - name: networking-istio - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:5ca13e5b3ce5e2819c4567b75c0984650a57272ece44bc1dabf930f9fe1e19a1 + name: controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller +--- `) th.writeF("/manifests/knative/knative-serving-install/base/service-account.yaml", ` +--- apiVersion: v1 kind: ServiceAccount metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: controller namespace: knative-serving + `) th.writeF("/manifests/knative/knative-serving-install/base/service.yaml", ` +--- apiVersion: v1 kind: Service metadata: labels: app: activator - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: activator-service namespace: knative-serving spec: ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8012 - - name: http2 - port: 81 - protocol: TCP - targetPort: 8013 - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 + - name: http + port: 80 + protocol: TCP + targetPort: 8012 + - name: http2 + port: 81 + protocol: TCP + targetPort: 8013 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 selector: app: activator type: ClusterIP @@ -1481,15 +1504,15 @@ kind: Service metadata: labels: app: controller - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: controller namespace: knative-serving spec: ports: - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 selector: app: controller @@ -1499,13 +1522,14 @@ kind: Service metadata: labels: role: webhook - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook namespace: knative-serving spec: ports: - - port: 443 - targetPort: 8443 + - name: https-webhook + port: 443 + targetPort: 8443 selector: role: webhook --- @@ -1514,26 +1538,25 @@ kind: Service metadata: labels: app: autoscaler - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler namespace: knative-serving spec: ports: - - name: http - port: 8080 - protocol: TCP - targetPort: 8080 - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 - - name: custom-metrics - port: 443 - protocol: TCP - targetPort: 8443 + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + - name: https-custom-metrics + port: 443 + protocol: TCP + targetPort: 8443 selector: app: autoscaler - `) th.writeF("/manifests/knative/knative-serving-install/base/apiservice.yaml", ` apiVersion: apiregistration.k8s.io/v1beta1 @@ -1541,7 +1564,7 @@ kind: APIService metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: v1beta1.custom.metrics.k8s.io spec: group: custom.metrics.k8s.io @@ -1553,55 +1576,108 @@ spec: version: v1beta1 versionPriority: 100 + `) th.writeF("/manifests/knative/knative-serving-install/base/image.yaml", ` +--- apiVersion: caching.internal.knative.dev/v1alpha1 kind: Image metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: queue-proxy namespace: knative-serving spec: - image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c + image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:792f6945c7bc73a49a470a5b955c39c8bd174705743abf5fb71aa0f4c04128eb + + +`) + th.writeF("/manifests/knative/knative-serving-install/base/hpa.yaml", ` +--- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + serving.knative.dev/release: "v0.11.1" + name: activator + namespace: knative-serving +spec: + maxReplicas: 20 + metrics: + - resource: + name: cpu + targetAverageUtilization: 100 + type: Resource + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: activator +--- + `) th.writeF("/manifests/knative/knative-serving-install/base/webhook-configuration.yaml", ` +--- apiVersion: admissionregistration.k8s.io/v1beta1 kind: MutatingWebhookConfiguration metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook.serving.knative.dev webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook - namespace: knative-serving - failurePolicy: Fail - name: webhook.serving.knative.dev + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: webhook.serving.knative.dev --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - serving.knative.dev/release: "v0.10.0" - name: config.webhook.serving.knative.dev + serving.knative.dev/release: "v0.11.1" + name: validation.webhook.serving.knative.dev webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook - namespace: knative-serving - failurePolicy: Fail + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: validation.webhook.serving.knative.dev +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + serving.knative.dev/release: "v0.11.1" name: config.webhook.serving.knative.dev - namespaceSelector: - matchExpressions: - - key: serving.knative.dev/release - operator: Exists +webhooks: + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + serving.knative.dev/release: "v0.11.1" + name: webhook-certs + namespace: knative-serving `) th.writeK("/manifests/knative/knative-serving-install/base", ` @@ -1621,28 +1697,29 @@ resources: - service.yaml - apiservice.yaml - image.yaml +- hpa.yaml - webhook-configuration.yaml commonLabels: kustomize.component: knative images: - name: gcr.io/knative-releases/knative.dev/serving/cmd/activator newName: gcr.io/knative-releases/knative.dev/serving/cmd/activator - digest: sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 + digest: sha256:8e606671215cc029683e8cd633ec5de9eabeaa6e9a4392ff289883304be1f418 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa - digest: sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 + digest: sha256:5e0fadf574e66fb1c893806b5c5e5f19139cc476ebf1dff9860789fe4ac5f545 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler - digest: sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c + digest: sha256:ef1f01b5fb3886d4c488a219687aac72d28e72f808691132f658259e4e02bb27 - name: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio newName: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio - digest: sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff + digest: sha256:727a623ccb17676fae8058cb1691207a9658a8d71bc7603d701e23b1a6037e6c - name: gcr.io/knative-releases/knative.dev/serving/cmd/webhook newName: gcr.io/knative-releases/knative.dev/serving/cmd/webhook - digest: sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 + digest: sha256:1ef3328282f31704b5802c1136bd117e8598fd9f437df8209ca87366c5ce9fcb - name: gcr.io/knative-releases/knative.dev/serving/cmd/controller newName: gcr.io/knative-releases/knative.dev/serving/cmd/controller - digest: sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 + digest: sha256:5ca13e5b3ce5e2819c4567b75c0984650a57272ece44bc1dabf930f9fe1e19a1 `) } diff --git a/tests/knative-knative-serving-install-overlays-application_test.go b/tests/knative-knative-serving-install-overlays-application_test.go index d702eec140..717eda154f 100644 --- a/tests/knative-knative-serving-install-overlays-application_test.go +++ b/tests/knative-knative-serving-install-overlays-application_test.go @@ -23,11 +23,11 @@ spec: selector: matchLabels: app.kubernetes.io/name: knative-serving-install - app.kubernetes.io/instance: knative-serving-install-v0.8.0 + app.kubernetes.io/instance: knative-serving-install-v0.11.1 app.kubernetes.io/managed-by: kfctl app.kubernetes.io/component: knative-serving-install app.kubernetes.io/part-of: kubeflow - app.kubernetes.io/version: v0.8.0 + app.kubernetes.io/version: v0.11.1 componentKinds: - group: core kind: ConfigMap @@ -56,66 +56,71 @@ resources: - application.yaml commonLabels: app.kubernetes.io/name: knative-serving-install - app.kubernetes.io/instance: knative-serving-install-v0.8.0 + app.kubernetes.io/instance: knative-serving-install-v0.11.1 app.kubernetes.io/managed-by: kfctl app.kubernetes.io/component: knative-serving-install app.kubernetes.io/part-of: kubeflow - app.kubernetes.io/version: v0.8.0 + app.kubernetes.io/version: v0.11.1 `) th.writeF("/manifests/knative/knative-serving-install/base/gateway.yaml", ` -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" - name: knative-ingress-gateway - namespace: knative-serving -spec: - selector: - istio: ingressgateway - servers: - - hosts: - - '*' - port: - name: http - number: 80 - protocol: HTTP --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: cluster-local-gateway namespace: knative-serving spec: selector: istio: cluster-local-gateway servers: - - hosts: - - '*' - port: - name: http - number: 80 - protocol: HTTP + - hosts: + - '*' + port: + name: http + number: 80 + protocol: HTTP `) th.writeF("/manifests/knative/knative-serving-install/base/cluster-role.yaml", ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + duck.knative.dev/addressable: "true" + serving.knative.dev/release: "v0.11.1" + name: knative-serving-addressable-resolver +rules: + - apiGroups: + - serving.knative.dev + resources: + - routes + - routes/status + - services + - services/status + verbs: + - get + - list + - watch + +--- + apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics-server-resources rules: -- apiGroups: - - custom.metrics.k8s.io - resources: - - '*' - verbs: - - '*' + - apiGroups: + - custom.metrics.k8s.io + resources: + - '*' + verbs: + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 @@ -123,68 +128,70 @@ kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-admin: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-admin rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - - caching.internal.knative.dev - resources: - - '*' - verbs: - - '*' + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-edit: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-edit rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - resources: - - '*' - verbs: - - create - - update - - patch - - delete + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - create + - update + - patch + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: rbac.authorization.k8s.io/aggregate-to-view: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-namespaced-view rules: -- apiGroups: - - serving.knative.dev - - networking.internal.knative.dev - - autoscaling.internal.knative.dev - resources: - - '*' - verbs: - - get - - list - - watch + - apiGroups: + - serving.knative.dev + - networking.internal.knative.dev + - autoscaling.internal.knative.dev + - caching.internal.knative.dev + resources: + - '*' + verbs: + - get + - list + - watch --- aggregationRule: clusterRoleSelectors: - - matchLabels: - serving.knative.dev/controller: "true" + - matchLabels: + serving.knative.dev/controller: "true" apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-admin rules: [] --- @@ -193,113 +200,132 @@ kind: ClusterRole metadata: labels: serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-core rules: -- apiGroups: - - "" - resources: - - pods - - namespaces - - secrets - - configmaps - - endpoints - - services - - events - - serviceaccounts - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - "" - resources: - - endpoints/restricted - verbs: - - create -- apiGroups: - - apps - resources: - - deployments - - deployments/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - serving.knative.dev - - autoscaling.internal.knative.dev - - networking.internal.knative.dev - resources: - - '*' - - '*/status' - - '*/finalizers' - verbs: - - get - - list - - create - - update - - delete - - deletecollection - - patch - - watch -- apiGroups: - - caching.internal.knative.dev - resources: - - images - verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - apiGroups: + - "" + resources: + - pods + - namespaces + - secrets + - configmaps + - endpoints + - services + - events + - serviceaccounts + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - "" + resources: + - endpoints/restricted + verbs: + - create + - apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + - apiGroups: + - serving.knative.dev + - autoscaling.internal.knative.dev + - networking.internal.knative.dev + resources: + - '*' + - '*/status' + - '*/finalizers' + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - patch + - watch + - apiGroups: + - caching.internal.knative.dev + resources: + - images + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + duck.knative.dev/podspecable: "true" + serving.knative.dev/release: "v0.11.1" + name: knative-serving-podspecable-binding +rules: + - apiGroups: + - serving.knative.dev + resources: + - configurations + - services + verbs: + - list + - watch + - patch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -307,39 +333,40 @@ metadata: labels: networking.knative.dev/ingress-provider: istio serving.knative.dev/controller: "true" - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-istio rules: -- apiGroups: - - networking.istio.io - resources: - - virtualservices - - gateways - verbs: - - get - - list - - create - - update - - delete - - patch - - watch + - apiGroups: + - networking.istio.io + resources: + - virtualservices + - gateways + verbs: + - get + - list + - create + - update + - delete + - patch + - watch `) th.writeF("/manifests/knative/knative-serving-install/base/cluster-role-binding.yaml", ` +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving --- apiVersion: rbac.authorization.k8s.io/v1 @@ -347,32 +374,32 @@ kind: ClusterRoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: hpa-controller-custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: custom-metrics-server-resources subjects: -- kind: ServiceAccount - name: horizontal-pod-autoscaler - namespace: kube-system + - kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: knative-serving-controller-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: knative-serving-admin subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving `) th.writeF("/manifests/knative/knative-serving-install/base/service-role.yaml", ` @@ -404,12 +431,13 @@ spec: - user: '*' `) th.writeF("/manifests/knative/knative-serving-install/base/role-binding.yaml", ` +--- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: custom-metrics-auth-reader namespace: kube-system roleRef: @@ -417,11 +445,12 @@ roleRef: kind: Role name: extension-apiserver-authentication-reader subjects: -- kind: ServiceAccount - name: controller - namespace: knative-serving + - kind: ServiceAccount + name: controller + namespace: knative-serving `) th.writeF("/manifests/knative/knative-serving-install/base/config-map.yaml", ` +--- apiVersion: v1 data: _example: | @@ -487,6 +516,7 @@ data: # When operating in a stable mode, the autoscaler operates on the # average concurrency over the stable window. + # Stable window must be in whole seconds. stable-window: "60s" # When observed average concurrency during the panic window reaches @@ -494,14 +524,10 @@ data: # enters panic mode. When operating in panic mode, the autoscaler # scales on the average concurrency over the panic window which is # panic-window-percentage of the stable-window. + # When computing the panic window it will be rounded to the closest + # whole second. panic-window-percentage: "10.0" - # Absolute panic window duration. - # Deprecated in favor of panic-window-percentage. - # Existing revisions will continue to scale based on panic-window - # but new revisions will default to panic-window-percentage. - panic-window: "6s" - # The percentage of the container concurrency target at which to # enter panic mode when reached within the panic window. panic-threshold-percentage: "200.0" @@ -539,7 +565,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-autoscaler namespace: knative-serving @@ -611,7 +637,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-defaults namespace: knative-serving @@ -636,11 +662,11 @@ data: # List of repositories for which tag to digest resolving should be skipped registriesSkippingTagResolving: "ko.local,dev.local" - queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c + queueSidecarImage: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:792f6945c7bc73a49a470a5b955c39c8bd174705743abf5fb71aa0f4c04128eb kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-deployment namespace: knative-serving @@ -686,7 +712,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-domain namespace: knative-serving @@ -725,7 +751,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-gc namespace: knative-serving @@ -785,7 +811,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-logging namespace: knative-serving @@ -834,7 +860,7 @@ data: # To determine the IP ranges of your cluster: # IBM Cloud Private: cat cluster/config.yaml | grep service_cluster_ip_range # IBM Cloud Kubernetes Service: "172.30.0.0/16,172.20.0.0/16,10.10.10.0/24" - # Google Container Engine (GKE): gcloud container clusters describe XXXXXXX --zone=XXXXXX | grep -e clusterIpv4Cidr -e servicesIpv4Cidr + # Google Container Engine (GKE): gcloud container clusters describe $CLUSTER_NAME --zone=$CLUSTER_ZONE | grep -e clusterIpv4Cidr -e servicesIpv4Cidr # Azure Kubernetes Service (AKS): "10.0.0.0/16" # Azure Container Service (ACS; deprecated): "10.244.0.0/16,10.240.0.0/16" # Azure Container Service Engine (ACS-Engine; OSS): Configurable, but defaults to "10.0.0.0/16" @@ -893,7 +919,7 @@ data: # when constructing the DNS name for "tags" within the traffic blocks # of Routes and Configuration. This is used in conjunction with the # domainTemplate above to determine the full URL for the tag. - tagTemplate: "{{.Name}}-{{.Tag}}" + tagTemplate: "{{.Tag}}-{{.Name}}" # Controls whether TLS certificates are automatically provisioned and # installed in the Knative ingress to terminate external TLS connection. @@ -911,7 +937,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-network namespace: knative-serving @@ -986,8 +1012,8 @@ data: metrics.backend-destination: prometheus # metrics.request-metrics-backend-destination specifies the request metrics - # destination. If non-empty, it enables queue proxy to send request metrics. - # Currently supported values: prometheus, stackdriver. + # destination. It enables queue proxy to send request metrics. + # Currently supported values: prometheus (the default), stackdriver. metrics.request-metrics-backend-destination: prometheus # metrics.stackdriver-project-id field specifies the stackdriver project ID. This @@ -1010,7 +1036,7 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-observability namespace: knative-serving @@ -1054,13 +1080,14 @@ data: kind: ConfigMap metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-tracing namespace: knative-serving + --- + apiVersion: v1 data: - _example: | ################################ # # # EXAMPLE CONFIGURATION # @@ -1078,8 +1105,12 @@ data: # Default Knative Gateway after v0.3. It points to the Istio # standard istio-ingressgateway, instead of a custom one that we - # used pre-0.3. - gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + # used pre-0.3. The configuration format should be `+"`"+`gateway. + # {{gateway_namespace}}.{{gateway_name}}: "{{ingress_name}}. + # {{ingress_namespace}}.svc.cluster.local"`+"`"+`. The {{gateway_namespace}} + # is optional; when it is omitted, the system will search for + # the gateway in the serving system namespace `+"`"+`knative-serving`+"`"+` + gateway.kubeflow.kubeflow-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" # A cluster local gateway to allow pods outside of the mesh to access # Services and Routes not exposing through an ingress. If the users @@ -1089,10 +1120,16 @@ data: # sidecar injection (like Knative's istio-lean.yaml). Since every pod # is outside of the service mesh in that case, a cluster-local service # will need to be exposed to a cluster-local gateway to be accessible. - local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + # The configuration format should be `+"`"+`local-gateway.{{local_gateway_namespace}}. + # {{local_gateway_name}}: "{{cluster_local_gateway_name}}. + # {{cluster_local_gateway_namespace}}.svc.cluster.local"`+"`"+`. The + # {{local_gateway_namespace}} is optional; when it is omitted, the system + # will search for the local gateway in the serving system namespace + # `+"`"+`knative-serving`+"`"+` + local-gateway.knative-serving.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" # To use only Istio service mesh and no cluster-local-gateway, replace - # all local-gateway.* entries the following entry. + # all local-gateway.* entries by the following entry. local-gateway.mesh: "mesh" # Feature flag to enable reconciling external Istio Gateways. @@ -1104,16 +1141,17 @@ kind: ConfigMap metadata: labels: networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: config-istio namespace: knative-serving `) th.writeF("/manifests/knative/knative-serving-install/base/deployment.yaml", ` +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: activator namespace: knative-serving spec: @@ -1129,90 +1167,71 @@ spec: labels: app: activator role: activator - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/internal/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 - livenessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: activator - path: /healthz - port: 8012 - name: activator - ports: - - containerPort: 8012 - name: http1 - - containerPort: 8013 - name: h2c - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - readinessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: activator - path: /healthz - port: 8012 - resources: - limits: - cpu: 1000m - memory: 600Mi - requests: - cpu: 300m - memory: 60Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/activator@sha256:8e606671215cc029683e8cd633ec5de9eabeaa6e9a4392ff289883304be1f418 + livenessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: activator + path: /healthz + port: 8012 + name: activator + ports: + - containerPort: 8012 + name: http1 + - containerPort: 8013 + name: h2c + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + readinessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: activator + path: /healthz + port: 8012 + resources: + limits: + cpu: 1000m + memory: 600Mi + requests: + cpu: 300m + memory: 60Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller terminationGracePeriodSeconds: 300 --- -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: activator - namespace: knative-serving -spec: - maxReplicas: 20 - metrics: - - resource: - name: cpu - targetAverageUtilization: 100 - type: Resource - minReplicas: 1 - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: activator - ---- apiVersion: apps/v1 kind: Deployment metadata: labels: autoscaling.knative.dev/autoscaler-provider: hpa - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler-hpa namespace: knative-serving spec: @@ -1226,43 +1245,43 @@ spec: sidecar.istio.io/inject: "false" labels: app: autoscaler-hpa - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 - name: autoscaler-hpa - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:5e0fadf574e66fb1c893806b5c5e5f19139cc476ebf1dff9860789fe4ac5f545 + name: autoscaler-hpa + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler namespace: knative-serving spec: @@ -1278,114 +1297,115 @@ spec: traffic.sidecar.istio.io/includeInboundPorts: 8080,9090 labels: app: autoscaler - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - args: - - --secure-port=8443 - - --cert-dir=/tmp - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c - livenessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: autoscaler - path: /healthz - port: 8080 - name: autoscaler - ports: - - containerPort: 8080 - name: websocket - - containerPort: 9090 - name: metrics - - containerPort: 8443 - name: custom-metrics - - containerPort: 8008 - name: profiling - readinessProbe: - httpGet: - httpHeaders: - - name: k-kubelet-probe - value: autoscaler - path: /healthz - port: 8080 - resources: - limits: - cpu: 300m - memory: 400Mi - requests: - cpu: 30m - memory: 40Mi - securityContext: - allowPrivilegeEscalation: false + - args: + - --secure-port=8443 + - --cert-dir=/tmp + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:ef1f01b5fb3886d4c488a219687aac72d28e72f808691132f658259e4e02bb27 + livenessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: autoscaler + path: /healthz + port: 8080 + name: autoscaler + ports: + - containerPort: 8080 + name: websocket + - containerPort: 9090 + name: metrics + - containerPort: 8443 + name: custom-metrics + - containerPort: 8008 + name: profiling + readinessProbe: + httpGet: + httpHeaders: + - name: k-kubelet-probe + value: autoscaler + path: /healthz + port: 8080 + resources: + limits: + cpu: 300m + memory: 400Mi + requests: + cpu: 30m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller ---- +--- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" - name: controller + networking.knative.dev/ingress-provider: istio + serving.knative.dev/release: "v0.11.1" + name: networking-istio namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: controller + app: networking-istio template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: controller - serving.knative.dev/release: "v0.10.0" + app: networking-istio + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/internal/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 - name: controller - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:727a623ccb17676fae8058cb1691207a9658a8d71bc7603d701e23b1a6037e6c + name: networking-istio + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook namespace: knative-serving spec: @@ -1402,123 +1422,126 @@ spec: labels: app: webhook role: webhook - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 - name: webhook - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 20m - memory: 20Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/webhook@sha256:1ef3328282f31704b5802c1136bd117e8598fd9f437df8209ca87366c5ce9fcb + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 200m + memory: 200Mi + requests: + cpu: 20m + memory: 20Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller --- apiVersion: apps/v1 kind: Deployment metadata: labels: - networking.knative.dev/ingress-provider: istio - serving.knative.dev/release: "v0.10.0" - name: networking-istio + serving.knative.dev/release: "v0.11.1" + name: controller namespace: knative-serving spec: replicas: 1 selector: matchLabels: - app: networking-istio + app: controller template: metadata: annotations: sidecar.istio.io/inject: "false" labels: - app: networking-istio - serving.knative.dev/release: "v0.10.0" + app: controller + serving.knative.dev/release: "v0.11.1" spec: containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: knative.dev/serving - image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio@sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff - name: networking-istio - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + image: gcr.io/knative-releases/knative.dev/serving/cmd/controller@sha256:5ca13e5b3ce5e2819c4567b75c0984650a57272ece44bc1dabf930f9fe1e19a1 + name: controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false serviceAccountName: controller +--- `) th.writeF("/manifests/knative/knative-serving-install/base/service-account.yaml", ` +--- apiVersion: v1 kind: ServiceAccount metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: controller namespace: knative-serving + `) th.writeF("/manifests/knative/knative-serving-install/base/service.yaml", ` +--- apiVersion: v1 kind: Service metadata: labels: app: activator - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: activator-service namespace: knative-serving spec: ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8012 - - name: http2 - port: 81 - protocol: TCP - targetPort: 8013 - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 + - name: http + port: 80 + protocol: TCP + targetPort: 8012 + - name: http2 + port: 81 + protocol: TCP + targetPort: 8013 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 selector: app: activator type: ClusterIP @@ -1529,15 +1552,15 @@ kind: Service metadata: labels: app: controller - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: controller namespace: knative-serving spec: ports: - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 selector: app: controller @@ -1547,13 +1570,14 @@ kind: Service metadata: labels: role: webhook - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook namespace: knative-serving spec: ports: - - port: 443 - targetPort: 8443 + - name: https-webhook + port: 443 + targetPort: 8443 selector: role: webhook --- @@ -1562,26 +1586,25 @@ kind: Service metadata: labels: app: autoscaler - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: autoscaler namespace: knative-serving spec: ports: - - name: http - port: 8080 - protocol: TCP - targetPort: 8080 - - name: metrics - port: 9090 - protocol: TCP - targetPort: 9090 - - name: custom-metrics - port: 443 - protocol: TCP - targetPort: 8443 + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + - name: https-custom-metrics + port: 443 + protocol: TCP + targetPort: 8443 selector: app: autoscaler - `) th.writeF("/manifests/knative/knative-serving-install/base/apiservice.yaml", ` apiVersion: apiregistration.k8s.io/v1beta1 @@ -1589,7 +1612,7 @@ kind: APIService metadata: labels: autoscaling.knative.dev/metric-provider: custom-metrics - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: v1beta1.custom.metrics.k8s.io spec: group: custom.metrics.k8s.io @@ -1601,55 +1624,108 @@ spec: version: v1beta1 versionPriority: 100 + `) th.writeF("/manifests/knative/knative-serving-install/base/image.yaml", ` +--- apiVersion: caching.internal.knative.dev/v1alpha1 kind: Image metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: queue-proxy namespace: knative-serving spec: - image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:5ff357b66622c98f24c56bba0a866be5e097306b83c5e6c41c28b6e87ec64c7c + image: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:792f6945c7bc73a49a470a5b955c39c8bd174705743abf5fb71aa0f4c04128eb + + +`) + th.writeF("/manifests/knative/knative-serving-install/base/hpa.yaml", ` +--- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + serving.knative.dev/release: "v0.11.1" + name: activator + namespace: knative-serving +spec: + maxReplicas: 20 + metrics: + - resource: + name: cpu + targetAverageUtilization: 100 + type: Resource + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: activator +--- + `) th.writeF("/manifests/knative/knative-serving-install/base/webhook-configuration.yaml", ` +--- apiVersion: admissionregistration.k8s.io/v1beta1 kind: MutatingWebhookConfiguration metadata: labels: - serving.knative.dev/release: "v0.10.0" + serving.knative.dev/release: "v0.11.1" name: webhook.serving.knative.dev webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook - namespace: knative-serving - failurePolicy: Fail - name: webhook.serving.knative.dev + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: webhook.serving.knative.dev --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - serving.knative.dev/release: "v0.10.0" - name: config.webhook.serving.knative.dev + serving.knative.dev/release: "v0.11.1" + name: validation.webhook.serving.knative.dev webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook - namespace: knative-serving - failurePolicy: Fail + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: validation.webhook.serving.knative.dev +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + serving.knative.dev/release: "v0.11.1" name: config.webhook.serving.knative.dev - namespaceSelector: - matchExpressions: - - key: serving.knative.dev/release - operator: Exists +webhooks: + - admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + serving.knative.dev/release: "v0.11.1" + name: webhook-certs + namespace: knative-serving `) th.writeK("/manifests/knative/knative-serving-install/base", ` @@ -1669,28 +1745,29 @@ resources: - service.yaml - apiservice.yaml - image.yaml +- hpa.yaml - webhook-configuration.yaml commonLabels: kustomize.component: knative images: - name: gcr.io/knative-releases/knative.dev/serving/cmd/activator newName: gcr.io/knative-releases/knative.dev/serving/cmd/activator - digest: sha256:0c52e0a85612bbedebf6d0de2b1951a4f762a05691f86e78079a5089d4848652 + digest: sha256:8e606671215cc029683e8cd633ec5de9eabeaa6e9a4392ff289883304be1f418 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa - digest: sha256:f5514430997ed3799e0f708d657fef935e7eef2774f073a46ffb06311c8b5e76 + digest: sha256:5e0fadf574e66fb1c893806b5c5e5f19139cc476ebf1dff9860789fe4ac5f545 - name: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler newName: gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler - digest: sha256:9b716bec384c166782f30756e0981ab11178e1a6b7a4fa6965cc6225abf8567c + digest: sha256:ef1f01b5fb3886d4c488a219687aac72d28e72f808691132f658259e4e02bb27 - name: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio newName: gcr.io/knative-releases/knative.dev/serving/cmd/networking/istio - digest: sha256:4bc49ca99adf8e4f5c498bdd1287cdf643e4b721e69b2c4a022fe98db46486ff + digest: sha256:727a623ccb17676fae8058cb1691207a9658a8d71bc7603d701e23b1a6037e6c - name: gcr.io/knative-releases/knative.dev/serving/cmd/webhook newName: gcr.io/knative-releases/knative.dev/serving/cmd/webhook - digest: sha256:f59e8d9782f17b1af3060152d99b70ae08f40aa69b799180d24964e527ebb818 + digest: sha256:1ef3328282f31704b5802c1136bd117e8598fd9f437df8209ca87366c5ce9fcb - name: gcr.io/knative-releases/knative.dev/serving/cmd/controller newName: gcr.io/knative-releases/knative.dev/serving/cmd/controller - digest: sha256:a168c9fa095c88b3e0bcbbaa6d4501a8a02ab740b360938879ae9df55964a758 + digest: sha256:5ca13e5b3ce5e2819c4567b75c0984650a57272ece44bc1dabf930f9fe1e19a1 `) } From f04fa9c962538db42d00e8fad16e18b30b02f194 Mon Sep 17 00:00:00 2001 From: Dan Sun Date: Sat, 25 Jan 2020 16:38:25 -0500 Subject: [PATCH 6/6] Point to kubeflow gateway for kfserving --- kfserving/kfserving-install/base/config-map.yaml | 2 +- tests/kfserving-kfserving-install-base_test.go | 2 +- tests/kfserving-kfserving-install-overlays-application_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kfserving/kfserving-install/base/config-map.yaml b/kfserving/kfserving-install/base/config-map.yaml index 4f1dbf9725..8ccd809ce6 100644 --- a/kfserving/kfserving-install/base/config-map.yaml +++ b/kfserving/kfserving-install/base/config-map.yaml @@ -89,7 +89,7 @@ data: } ingress: |- { - "ingressGateway" : "knative-ingress-gateway.knative-serving", + "ingressGateway" : "kubeflow-gateway.kubeflow", "ingressService" : "istio-ingressgateway.istio-system.svc.cluster.local" } logger: |- diff --git a/tests/kfserving-kfserving-install-base_test.go b/tests/kfserving-kfserving-install-base_test.go index 53bda4b105..f12ae28101 100644 --- a/tests/kfserving-kfserving-install-base_test.go +++ b/tests/kfserving-kfserving-install-base_test.go @@ -344,7 +344,7 @@ data: } ingress: |- { - "ingressGateway" : "knative-ingress-gateway.knative-serving", + "ingressGateway" : "kubeflow-gateway.kubeflow", "ingressService" : "istio-ingressgateway.istio-system.svc.cluster.local" } logger: |- diff --git a/tests/kfserving-kfserving-install-overlays-application_test.go b/tests/kfserving-kfserving-install-overlays-application_test.go index 4d7a735a54..a4f7bd24c8 100644 --- a/tests/kfserving-kfserving-install-overlays-application_test.go +++ b/tests/kfserving-kfserving-install-overlays-application_test.go @@ -401,7 +401,7 @@ data: } ingress: |- { - "ingressGateway" : "knative-ingress-gateway.knative-serving", + "ingressGateway" : "kubeflow-gateway.kubeflow", "ingressService" : "istio-ingressgateway.istio-system.svc.cluster.local" } logger: |-