diff --git a/foundry/ingress-nginx.values.yaml b/foundry/ingress-nginx.values.yaml index 278adc1..a9a9dd1 100644 --- a/foundry/ingress-nginx.values.yaml +++ b/foundry/ingress-nginx.values.yaml @@ -16,76 +16,66 @@ commonLabels: {} controller: name: controller image: - registry: k8s.gcr.io + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io image: ingress-nginx/controller ## for backwards compatibility consider setting the full image url via the repository value below ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail ## repository: - tag: "v1.1.2" - digest: sha256:28b11ce69e57843de44e3db6413e98d09de0f6688e33d4bd384002a44f78405c + tag: "v1.7.0" + digest: sha256:7612338342a1e7b8090bef78f2a04fffcadd548ccaabe8a47bf7758ff549a5f7 + digestChroot: sha256:e84ef3b44c8efeefd8b0aa08770a886bfea1f04c53b61b4ba9a7204e9f1a7edc pullPolicy: IfNotPresent # www-data -> uid 101 runAsUser: 101 allowPrivilegeEscalation: true - # -- Use an existing PSP instead of creating one existingPsp: "" - # -- Configures the controller container name containerName: controller - # -- Configures the ports that the nginx-controller listens on containerPort: http: 80 https: 443 - # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ config: hsts: "false" - # -- Annotations to be added to the controller config configuration configmap. configAnnotations: {} - # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers proxySetHeaders: {} - # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers addHeaders: {} - # -- Optionally customize the pod dnsConfig. dnsConfig: {} - # -- Optionally customize the pod hostname. hostname: {} - # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. - dnsPolicy: ClusterFirstWithHostNet - + dnsPolicy: ClusterFirst # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply reportNodeInternalIp: false - # -- Process Ingress objects without ingressClass annotation/ingressClassName field # Overrides value for --watch-ingress-without-class flag of the controller binary # Defaults to false watchIngressWithoutClass: false - # -- Process IngressClass per name (additionally as per spec.controller). ingressClassByName: false - + # -- This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-aware-hints="auto" + # Defaults to false + enableTopologyAwareRouting: false # -- This configuration defines if Ingress Controller should allow users to set # their own *-snippet annotations, otherwise this is forbidden / dropped # when users add those annotations. # Global snippets in ConfigMap are still respected allowSnippetAnnotations: true - # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 # is merged - hostNetwork: true - + hostNetwork: false ## Use host ports 80 and 443 ## Disabled by default hostPort: @@ -96,10 +86,8 @@ controller: http: 80 # -- 'hostPort' https port https: 443 - - # -- Election ID to use for status update - electionID: ingress-controller-leader - + # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader' + electionID: "" ## This section refers to the creation of the IngressClass resource ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 ingressClassResource: @@ -111,23 +99,19 @@ controller: default: true # -- Controller-value of the controller that is processing this ingressClass controllerValue: "k8s.io/ingress-nginx" - # -- Parameters is a link to a custom resource containing additional # configuration for the controller. This is optional if the controller # does not require extra parameters. parameters: {} - # -- For backwards compatibility with ingress.class annotation, use ingressClass. # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation ingressClass: nginx - # -- Labels to add to the pod container metadata podLabels: {} # key: value # -- Security Context policies for controller pods podSecurityContext: {} - # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls sysctls: {} # sysctls: @@ -143,7 +127,6 @@ controller: # -- Allows overriding of the publish service to bind to # Must be / pathOverride: "" - # Limit the scope of the controller to a specific namespace scope: # -- Enable 'scope' or not @@ -153,26 +136,21 @@ controller: # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. namespaceSelector: "" - # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) configMapNamespace: "" - tcp: # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) configMapNamespace: "" # -- Annotations to be added to the tcp config configmap annotations: {} - udp: # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) configMapNamespace: "" # -- Annotations to be added to the udp config configmap annotations: {} - # -- Maxmind license key to download GeoLite2 Databases. ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases maxmindLicenseKey: "" - # -- Additional command line arguments to pass to nginx-ingress-controller # E.g. to specify the default SSL certificate you can use extraArgs: {} @@ -190,7 +168,6 @@ controller: # -- Use a `DaemonSet` or `Deployment` kind: Deployment - # -- Annotations to be added to the controller Deployment or DaemonSet ## annotations: {} @@ -202,7 +179,6 @@ controller: # keel.sh/policy: patch # keel.sh/trigger: poll - # -- The update strategy to apply to the Deployment or DaemonSet ## updateStrategy: @@ -213,8 +189,6 @@ controller: # -- `minReadySeconds` to avoid killing pods before we are ready ## minReadySeconds: 0 - - # -- Node tolerations for server scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## @@ -228,68 +202,66 @@ controller: ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} - # # An example of preferred pod anti-affinity, weight is in the range 1-100 - # podAntiAffinity: - # preferredDuringSchedulingIgnoredDuringExecution: - # - weight: 100 - # podAffinityTerm: - # labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/name - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/component - # operator: In - # values: - # - controller - # topologyKey: kubernetes.io/hostname - - # # An example of required pod anti-affinity - # podAntiAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # - labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/name - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/component - # operator: In - # values: - # - controller - # topologyKey: "kubernetes.io/hostname" + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ ## topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: failure-domain.beta.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - # labelSelector: - # matchLabels: - # app.kubernetes.io/instance: ingress-nginx-internal + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready ## wait up to five minutes for the drain of connections ## terminationGracePeriodSeconds: 300 - # -- Node labels for controller pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: kubernetes.io/os: linux - ## Liveness and readiness probe values ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes ## @@ -326,58 +298,55 @@ controller: timeoutSeconds: 1 successThreshold: 1 failureThreshold: 3 - - # -- Path of the health check endpoint. All requests received on the port defined by # the healthz-port parameter are forwarded internally to this path. healthCheckPath: "/healthz" - # -- Address to bind the health check endpoint. # It is better to set this option to the internal node address # if the ingress nginx controller is running in the `hostNetwork: true` mode. healthCheckHost: "" - # -- Annotations to be added to controller pods ## podAnnotations: {} - replicaCount: 1 - + # -- Define either 'minAvailable' or 'maxUnavailable', never both. minAvailable: 1 + # -- Define either 'minAvailable' or 'maxUnavailable', never both. + # maxUnavailable: 1 ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 ## Ideally, there should be no limits. ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ resources: - ## limits: - ## cpu: 100m - ## memory: 90Mi + ## limits: + ## cpu: 100m + ## memory: 90Mi requests: cpu: 100m memory: 90Mi - # Mutually exclusive with keda autoscaling autoscaling: + apiVersion: autoscaling/v2 enabled: false + annotations: {} minReplicas: 1 maxReplicas: 11 targetCPUUtilizationPercentage: 50 targetMemoryUtilizationPercentage: 50 behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 - + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 autoscalingTemplate: [] # Custom or additional autoscaling metrics # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics @@ -407,40 +376,37 @@ controller: # annotations: # key: value triggers: [] - # - type: prometheus - # metadata: - # serverAddress: http://:9090 - # metricName: http_requests_total - # threshold: '100' - # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 # -- Enable mimalloc as a drop-in replacement for malloc. ## ref: https://github.com/microsoft/mimalloc ## enableMimalloc: true - ## Override NGINX template customTemplate: configMapName: "" configMapKey: "" - service: - enabled: false - + enabled: true # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http # It allows choosing the protocol for each backend specified in the Kubernetes service. @@ -448,7 +414,6 @@ controller: # Will be ignored for Kubernetes versions older than 1.20 ## appProtocol: true - annotations: {} labels: {} # clusterIP: "" @@ -457,13 +422,11 @@ controller: ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] - - # loadBalancerIP: "" + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" loadBalancerSourceRanges: [] - enableHttp: true enableHttps: true - ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer # externalTrafficPolicy: "" @@ -482,23 +445,18 @@ controller: # The ipFamilies and clusterIPs fields depend on the value of this field. ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ ipFamilyPolicy: "SingleStack" - # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically # based on cluster configuration and the ipFamilyPolicy field. ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ ipFamilies: - IPv4 - ports: http: 80 https: 443 - targetPorts: http: http https: https - type: LoadBalancer - ## type: NodePort ## nodePorts: ## http: 32080 @@ -510,26 +468,24 @@ controller: https: "" tcp: {} udp: {} - external: enabled: true - internal: # -- Enables an additional internal load balancer (besides the external one). enabled: false # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. annotations: {} - # loadBalancerIP: "" # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. loadBalancerSourceRanges: [] - ## Set external traffic policy to: "Local" to preserve source IP on ## providers supporting it ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer # externalTrafficPolicy: "" - + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false # -- Additional containers to be added to the controller pod. # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. extraContainers: [] @@ -570,15 +526,22 @@ controller: # image: busybox # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + # -- Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module extraModules: [] - ## Modules, which are mounted into the core nginx image - # - name: opentelemetry - # image: busybox + # - name: mytestmodule + # image: registry.k8s.io/ingress-nginx/mytestmodule + # containerSecurityContext: + # allowPrivilegeEscalation: false # # The image must contain a `/usr/local/bin/init_module.sh` executable, which # will be executed as initContainers, to move its config files within the # mounted volume. + opentelemetry: + enabled: false + image: registry.k8s.io/ingress-nginx/opentelemetry:v20230312-helm-chart-4.5.2-28-g66a760794@sha256:40f766ac4a9832f36f217bb0e98d44c8d38faeccbfe861fbc1a76af7e9ab257f + containerSecurityContext: + allowPrivilegeEscalation: false admissionWebhooks: annotations: {} # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". @@ -587,6 +550,15 @@ controller: ## These annotations will be added to the ValidatingWebhookConfiguration and ## the Jobs Spec of the admission webhooks. enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use failurePolicy: Fail # timeoutSeconds: 10 port: 8443 @@ -596,10 +568,9 @@ controller: objectSelector: {} # -- Labels to be added to admission webhooks labels: {} - # -- Use an existing PSP instead of creating one existingPsp: "" - + networkPolicyEnabled: false service: annotations: {} # clusterIP: "" @@ -608,29 +579,30 @@ controller: loadBalancerSourceRanges: [] servicePort: 443 type: ClusterIP - createSecretJob: + securityContext: + allowPrivilegeEscalation: false resources: {} - # limits: - # cpu: 10m - # memory: 20Mi - # requests: - # cpu: 10m - # memory: 20Mi - + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi patchWebhookJob: + securityContext: + allowPrivilegeEscalation: false resources: {} - patch: enabled: true image: - registry: k8s.gcr.io + registry: registry.k8s.io image: ingress-nginx/kube-webhook-certgen ## for backwards compatibility consider setting the full image url via the repository value below ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail ## repository: - tag: v1.1.1 - digest: sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660 + tag: v20230312-helm-chart-4.5.2-28-g66a760794 + digest: sha256:01d181618f270f2a96c04006f33b2699ad3ccb02da48d0f89b22abce084b292f pullPolicy: IfNotPresent # -- Provide a priority class name to the webhook patching job ## @@ -641,33 +613,46 @@ controller: tolerations: [] # -- Labels to be added to patch job resources labels: {} - runAsUser: 2000 - fsGroup: 2000 - + securityContext: + runAsNonRoot: true + runAsUser: 2000 + fsGroup: 2000 + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + # default to be 5y + duration: "" + admissionCert: + # default to be 1y + duration: "" + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" metrics: port: 10254 + portName: metrics # if this port is changed, change healthz-port: in extraArgs: accordingly enabled: false - service: annotations: {} # prometheus.io/scrape: "true" # prometheus.io/port: "10254" - + # -- Labels to be added to the metrics service resource + labels: {} # clusterIP: "" # -- List of IP addresses at which the stats-exporter service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] - # loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 10254 type: ClusterIP # externalTrafficPolicy: "" # nodePort: "" - serviceMonitor: enabled: false additionalLabels: {} @@ -684,46 +669,44 @@ controller: targetLabels: [] relabelings: [] metricRelabelings: [] - prometheusRule: enabled: false additionalLabels: {} # namespace: "" rules: [] - # # These are just examples rules, please adapt them to your needs - # - alert: NGINXConfigFailed - # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 - # for: 1s - # labels: - # severity: critical - # annotations: - # description: bad ingress config - nginx config test failed - # summary: uninstall the latest ingress changes to allow config reloads to resume - # - alert: NGINXCertificateExpiry - # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 - # for: 1s - # labels: - # severity: critical - # annotations: - # description: ssl certificate(s) will expire in less then a week - # summary: renew expiring certificates to avoid downtime - # - alert: NGINXTooMany500s - # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 - # for: 1m - # labels: - # severity: warning - # annotations: - # description: Too many 5XXs - # summary: More than 5% of all requests returned 5XX, this requires your attention - # - alert: NGINXTooMany400s - # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 - # for: 1m - # labels: - # severity: warning - # annotations: - # description: Too many 4XXs - # summary: More than 5% of all requests returned 4XX, this requires your attention - + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds # to 300, allowing the draining of connections up to five minutes. @@ -736,22 +719,18 @@ controller: exec: command: - /wait-shutdown - priorityClassName: "" - # -- Rollback limit ## revisionHistoryLimit: 10 - ## Default 404 backend ## defaultBackend: ## enabled: false - name: defaultbackend image: - registry: k8s.gcr.io + registry: registry.k8s.io image: defaultbackend-amd64 ## for backwards compatibility consider setting the full image url via the repository value below ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail @@ -763,21 +742,16 @@ defaultBackend: runAsNonRoot: true readOnlyRootFilesystem: true allowPrivilegeEscalation: false - # -- Use an existing PSP instead of creating one existingPsp: "" - extraArgs: {} - serviceAccount: create: true name: "" automountServiceAccountToken: true # -- Additional environment variables to set for defaultBackend pods extraEnvs: [] - port: 8080 - ## Readiness and liveness probes for default backend ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ ## @@ -793,7 +767,16 @@ defaultBackend: periodSeconds: 5 successThreshold: 1 timeoutSeconds: 5 + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 # -- Node tolerations for server scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## @@ -804,19 +787,16 @@ defaultBackend: # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" affinity: {} - # -- Security Context policies for controller pods # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for # notes on enabling and using sysctls ## podSecurityContext: {} - # -- Security Context policies for controller main container. # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for # notes on enabling and using sysctls ## containerSecurityContext: {} - # -- Labels to add to the pod container metadata podLabels: {} # key: value @@ -826,15 +806,11 @@ defaultBackend: ## nodeSelector: kubernetes.io/os: linux - # -- Annotations to be added to default backend pods ## podAnnotations: {} - replicaCount: 1 - minAvailable: 1 - resources: {} # limits: # cpu: 10m @@ -854,69 +830,64 @@ defaultBackend: # emptyDir: {} autoscaling: + apiVersion: autoscaling/v2 annotations: {} enabled: false minReplicas: 1 maxReplicas: 2 targetCPUUtilizationPercentage: 50 targetMemoryUtilizationPercentage: 50 - service: annotations: {} - # clusterIP: "" # -- List of IP addresses at which the default backend service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] - # loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 80 type: ClusterIP - priorityClassName: "" # -- Labels to be added to the default backend resources labels: {} - ## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 rbac: create: true scope: false - ## If true, create & use Pod Security Policy resources ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ podSecurityPolicy: enabled: false - serviceAccount: create: true name: "" automountServiceAccountToken: true # -- Annotations for the controller service account annotations: {} - # -- Optional array of imagePullSecrets containing private registry credentials ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ imagePullSecrets: [] # - name: secretName -# -- TCP service key:value pairs +# -- TCP service key-value pairs ## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md ## tcp: - 2049: "common/nfs-server-provisioner:2049" - #5432: "common/postgresql:5432" + 2049: "foundry/nfs-server-provisioner:2049" + #5432: "foundry/postgresql:5432" -# -- UDP service key:value pairs +# -- UDP service key-value pairs ## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md ## udp: {} # 53: "kube-system/kube-dns:53" +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" # -- (string) A base64-encoded Diffie-Hellman parameter. # This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` ## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param -dhParam: - +dhParam: "" diff --git a/setup-appliance b/setup-appliance index 5d82c0a..a4280db 100644 --- a/setup-appliance +++ b/setup-appliance @@ -71,7 +71,7 @@ fi # Install k3s mkdir -p /etc/rancher/k3s echo "nameserver 10.0.1.1" >> /etc/rancher/k3s/resolv.conf -curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="v1.25.6+k3s1" INSTALL_K3S_EXEC="--disable traefik --disable servicelb --resolv-conf /etc/rancher/k3s/resolv.conf" sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="v1.25.6+k3s1" INSTALL_K3S_EXEC="--disable traefik --resolv-conf /etc/rancher/k3s/resolv.conf" sh - sudo -u $SSH_USERNAME mkdir ~/.kube cp /etc/rancher/k3s/k3s.yaml ~/.kube/config sed -i 's/default/foundry/g' ~/.kube/config diff --git a/variables.pkr.hcl b/variables.pkr.hcl index de8bf69..3e2267e 100644 --- a/variables.pkr.hcl +++ b/variables.pkr.hcl @@ -54,8 +54,8 @@ locals { ] cpus = 2 disk_size = 40000 - iso_url = "https://releases.ubuntu.com/jammy/ubuntu-22.04.1-live-server-amd64.iso" - iso_checksum = "sha256:10f19c5b2b8d6db711582e0e27f5116296c34fe4b313ba45f9b201a5007056cb" + iso_url = "https://releases.ubuntu.com/22.04.2/ubuntu-22.04.2-live-server-amd64.iso" + iso_checksum = "sha256:5e38b55d57d94ff029719342357325ed3bda38fa80054f9330dc789cd2d43931" memory = 4096 shutdown_command = "echo '${var.ssh_password}'|sudo -S shutdown -P now" video_memory = 32