diff --git a/foundry/dnsmasq.conf b/foundry/dnsmasq.conf index 2d6735d..6e7e92f 100644 --- a/foundry/dnsmasq.conf +++ b/foundry/dnsmasq.conf @@ -1,2 +1,3 @@ bind-dynamic +listen-address=10.0.1.1 interface-name=foundry.local,eth0 diff --git a/foundry/gameboard.values.yaml b/foundry/gameboard.values.yaml index 337eedc..24ba86b 100644 --- a/foundry/gameboard.values.yaml +++ b/foundry/gameboard.values.yaml @@ -43,14 +43,20 @@ gameboard-api: ingress: enabled: true - annotations: {} + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: 10m + hosts: - host: foundry.local paths: - - /gameboard/api - - /gameboard/hub - - /gameboard/img - - /gameboard/doc + - path: /gameboard/api + pathType: ImplementationSpecific + - path: /gameboard/hub + pathType: ImplementationSpecific + - path: /gameboard/img + pathType: ImplementationSpecific + - path: /gameboard/doc + pathType: ImplementationSpecific tls: - secretName: appliance-cert hosts: @@ -172,7 +178,9 @@ gameboard-ui: annotations: {} hosts: - host: foundry.local - paths: ["/gameboard"] + paths: + - path: /gameboard + pathType: ImplementationSpecific tls: - secretName: appliance-cert hosts: diff --git a/foundry/identity.values.yaml b/foundry/identity.values.yaml index 741779b..44ead7a 100644 --- a/foundry/identity.values.yaml +++ b/foundry/identity.values.yaml @@ -11,7 +11,9 @@ identity-api: annotations: {} hosts: - host: foundry.local - paths: [ "/identity" ] + paths: + - path: /identity + pathType: ImplementationSpecific tls: - secretName: appliance-cert hosts: @@ -139,7 +141,9 @@ identity-ui: annotations: {} hosts: - host: foundry.local - paths: [ "/identity/ui" ] + paths: + - path: /identity/ui + pathType: ImplementationSpecific tls: - secretName: appliance-cert hosts: diff --git a/foundry/postgresql.values.yaml b/foundry/postgresql.values.yaml index 9cb2f63..773f219 100644 --- a/foundry/postgresql.values.yaml +++ b/foundry/postgresql.values.yaml @@ -1,511 +1,832 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value ## global: - postgresql: {} -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - -## Bitnami PostgreSQL image version -## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ -## -image: - registry: docker.io - repository: bitnami/postgresql - tag: 11.9.0-debian-10-r73 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## @param global.imageRegistry Global Docker image registry ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName - - ## Set to true if you would like to see extra information on logs - ## It turns BASH and NAMI debugging in minideb - ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + imagePullSecrets: [] + ## @param global.storageClass Global StorageClass for Persistent Volume(s) ## - debug: false + storageClass: "" + postgresql: + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`) + ## + auth: + postgresPassword: foundry + username: "" + password: "" + database: "" + existingSecret: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" -## String to partially override postgresql.fullname template (will maintain the release name) +## @section Common parameters ## -# nameOverride: -## String to fully override postgresql.fullname template +## @param kubeVersion Override Kubernetes version ## -# fullnameOverride: - +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) ## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template ## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: buster - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## Init container Security Context - ## Note: the chown of the data folder is done to securityContext.runAsUser - ## and not the below volumePermissions.securityContext.runAsUser - ## When runAsUser is set to special value "auto", init container will try to chwon the - ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). - ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with - ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false - ## - securityContext: - runAsUser: 0 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain ## -# schedulerName: - -## Pod Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) ## -securityContext: - enabled: true - fsGroup: 1001 - -## Container Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources ## -containerSecurityContext: - enabled: true - runAsUser: 1001 - -## Pod Service Account -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources ## -serviceAccount: +commonAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## enabled: false - ## Name of an already existing service account. Setting this value disables the automatic service account creation. - # name: + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity -## Pod Security Policy -## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @section PostgreSQL common parameters ## -psp: - create: false -## Creates role for ServiceAccount -## Required for PSP +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry PostgreSQL image registry +## @param image.repository PostgreSQL image repository +## @param image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set ## -rbac: - create: false - -replication: - enabled: false - user: repl_user - password: repl_password - slaveReplicas: 1 - ## Set synchronous commit mode: on, off, remote_apply, remote_write and local - ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL +image: + registry: docker.io + repository: bitnami/postgresql + tag: 14.1.0-debian-10-r80 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## - synchronousCommit: 'off' - ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication - ## NOTE: It cannot be > slaveReplicas + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName ## - numSynchronousReplicas: 0 - ## Replication Cluster application name. Useful for defining multiple replication policies + pullSecrets: [] + ## Set to true if you would like to see extra information on logs ## - applicationName: my_application - -## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) -# postgresqlPostgresPassword: - -## PostgreSQL user (has superuser privileges if username is `postgres`) -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run -## -postgresqlUsername: postgres - -## PostgreSQL password + debug: false +## Authentication parameters ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run -## -postgresqlPassword: foundry - -## PostgreSQL password using existing secret -## existingSecret: secret -## - -## Mount PostgreSQL secret as a file instead of passing environment variable -# usePasswordFile: false - -## Create a database ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run ## -# postgresqlDatabase: - -## PostgreSQL data dir -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -postgresqlDataDir: /bitnami/postgresql/data - -## An array to add extra environment variables -## For example: -## extraEnv: -## - name: FOO -## value: "bar" -## -# extraEnv: -extraEnv: [] - -## Name of a ConfigMap containing extra env vars -## -# extraEnvVarsCM: - -## Specify extra initdb args -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -# postgresqlInitdbArgs: - -## Specify a custom location for the PostgreSQL transaction log -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -# postgresqlInitdbWalDir: - -## PostgreSQL configuration -## Specify runtime configuration parameters as a dict, using camelCase, e.g. -## {"sharedBuffers": "500MB"} -## Alternatively, you can put your postgresql.conf under the files/ directory -## ref: https://www.postgresql.org/docs/current/static/runtime-config.html -## -# postgresqlConfiguration: - -## PostgreSQL extended configuration -## As above, but _appended_ to the main configuration -## Alternatively, you can put your *.conf under the files/conf.d/ directory -## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user + ## + postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials + ## `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret + ## The secret must contain the keys `postgres-password` (which is the password for "postgres" admin user), + ## `password` (which is the password for the custom user to create when `auth.username` is set), + ## and `replication-password` (which is the password for replication user). + ## The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and + ## picked from this secret in this case. + ## The value is evaluated as a template. + ## + existingSecret: "" + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false +## @param architecture PostgreSQL architecture (`standalone` or `replication`) ## -# postgresqlExtendedConf: - -## Configure current cluster's master server to be the standby server in other cluster. -## This will allow cross cluster replication and provide cross cluster high availability. -## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` ## -masterAsStandBy: - enabled: false - # masterHost: - # masterPort: - -## PostgreSQL client authentication configuration -## Specify content for pg_hba.conf -## Default: do not create pg_hba.conf -## Alternatively, you can put your pg_hba.conf under the files/ directory -# pgHbaConfiguration: |- -# local all all trust -# host all all localhost trust -# host mydatabase mysuser 192.168.0.0/24 md5 - -## ConfigMap with PostgreSQL configuration -## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration -# configurationConfigMap: - -## ConfigMap with PostgreSQL extended configuration -# extendedConfConfigMap: - -## initdb scripts -## Specify dictionary of scripts to be run at first boot -## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port ## -# initdbScripts: -# my_init_script.sh: | -# #!/bin/sh -# echo "Do something." - -## ConfigMap with scripts to be run at first boot -## NOTE: This will override initdbScripts -# initdbScriptsConfigMap: - -## Secret with scripts to be run at first boot (in case it contains sensitive information) -## NOTE: This can work along initdbScripts or initdbScriptsConfigMap -# initdbScriptsSecret: - -## Specify the PostgreSQL username and password to execute the initdb scripts -# initdbUser: -# initdbPassword: - +containerPorts: + postgresql: 5432 ## Audit settings ## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps ## audit: - ## Log client hostnames - ## logHostname: false - ## Log connections to the server - ## logConnections: false - ## Log disconnections - ## logDisconnections: false - ## Operation to audit using pgAudit (default if not set) - ## pgAuditLog: "" - ## Log catalog using pgAudit - ## pgAuditLogCatalog: "off" - ## Log level for clients - ## clientMinMessages: error - ## Template for log line prefix (default if not set) - ## logLinePrefix: "" - ## Log timezone - ## logTimezone: "" - -## Shared preload libraries -## -postgresqlSharedPreloadLibraries: "pgaudit" - -## Maximum total connections -## -postgresqlMaxConnections: - -## Maximum connections for the postgres user -## -postgresqlPostgresConnectionLimit: - -## Maximum connections for the created user -## -postgresqlDbUserConnectionLimit: - -## TCP keepalives interval -## -postgresqlTcpKeepalivesInterval: - -## TCP keepalives idle +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## @param ldap.url LDAP URL beginning in the form `ldap[s]://host[:port]/basedn` +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## @param ldap.baseDN Root DN to begin the search for the user in +## @param ldap.bindDN DN of user to bind to LDAP +## @param ldap.bind_password Password for the user to bind to LDAP +## @param ldap.search_attr Attribute to match against the user name in the search +## @param ldap.search_filter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## @param ldap.tls Set to `1` to use TLS encryption ## -postgresqlTcpKeepalivesIdle: - -## TCP keepalives count +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: "" + search_attr: "" + search_filter: "" + scheme: "" + tls: "" +## @param postgresqlDataDir PostgreSQL data dir folder ## -postgresqlTcpKeepalivesCount: - -## Statement timeout +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) ## -postgresqlStatementTimeout: - -## Remove pg_hba.conf lines with the following comma-separated patterns -## (cannot be used with custom pg_hba.conf) +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 ## -postgresqlPghbaRemoveFilters: - -## Optional duration in seconds the pod needs to terminate gracefully. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration ## -# terminationGracePeriodSeconds: 30 +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" -## LDAP configuration +## @section PostgreSQL Primary parameters ## -ldap: - enabled: false - url: '' - server: '' - port: '' - prefix: '' - suffix: '' - baseDN: '' - bindDN: '' - bind_password: - search_attr: '' - search_filter: '' - scheme: '' - tls: {} +primary: + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers + ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers + ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enable container security context + ## @param primary.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: {} + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 1Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} -## PostgreSQL service configuration +## @section PostgreSQL read only replica parameters ## -service: - ## PosgresSQL service type +readReplicas: + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe ## - type: ClusterIP - # clusterIP: None - port: 5432 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe ## - # nodePort: - - ## Provide any additional annotations which may be required. Evaluated as a template. + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one ## - annotations: {} - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one ## - # loadBalancerIP: - ## Load Balancer sources. Evaluated as a template. - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one ## - # loadBalancerSourceRanges: - # - 10.10.10.0/24 - -## Start master and slave(s) pod(s) without limitations on shm memory. -## By default docker and containerd (and possibly other container runtimes) -## limit `/dev/shm` to `64M` (see e.g. the -## [docker issue](https://github.com/docker-library/postgres/issues/416) and the -## [containerd issue](https://github.com/containerd/containerd/issues/3654), -## which could be not enough if PostgreSQL uses parallel workers heavily. -## -shmVolume: - ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove - ## this limitation. + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup ## - enabled: true - ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. - ## This option is ingored if `volumePermissions.enabled` is `false` + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers ## - chmod: - enabled: true - -## PostgreSQL data Persistent Volume Storage Class -## If defined, storageClassName: -## If set to "-", storageClassName: "", which disables dynamic provisioning -## If undefined (the default) or set to null, no storageClassName spec is -## set, choosing the default provisioner. (gp2 on AWS, standard on -## GKE, AWS & OpenStack) -## -persistence: - enabled: true - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod ## - # existingClaim: - - ## The path the volume will be mounted at, useful when using different - ## PostgreSQL images. + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enable container security context + ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container ## - mountPath: /bitnami/postgresql - - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## - subPath: '' - - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 1Gi - annotations: {} - -## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies -## -updateStrategy: - type: RollingUpdate - -## -## PostgreSQL Master parameters -## -master: - ## Node, affinity, tolerations, and priorityclass settings for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + hostAliases: [] + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) ## - nodeSelector: {} - affinity: {} - tolerations: [] labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## podAnnotations: {} - priorityClassName: '' - ## Extra init containers - ## Example + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## - ## extraInitContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## - extraInitContainers: [] - - ## Additional PostgreSQL Master Volume mounts + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity ## - extraVolumeMounts: [] - ## Additional PostgreSQL Master Volumes + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set ## - extraVolumes: [] - ## Add sidecars to the pod + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ ## - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## - sidecars: [] - - ## Override the service configuration for master + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods ## - service: {} - # type: - # nodePort: - # clusterIP: - -## -## PostgreSQL Slave parameters -## -slave: - ## Node, affinity, tolerations, and priorityclass settings for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + topologySpreadConstraints: {} + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) ## - nodeSelector: {} - affinity: {} - tolerations: [] - labels: {} - annotations: {} - podLabels: {} - podAnnotations: {} - priorityClassName: '' - ## Extra init containers - ## Example + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## - ## extraInitContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods ## - extraInitContainers: [] - - ## Additional PostgreSQL Slave Volume mounts + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) ## extraVolumeMounts: [] - ## Additional PostgreSQL Slave Volumes + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) ## extraVolumes: [] - ## Add sidecars to the pod - ## + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) ## For example: ## sidecars: ## - name: your-image-name @@ -516,199 +837,349 @@ slave: ## containerPort: 1234 ## sidecars: [] - - ## Override the service configuration for slave + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example ## - service: {} - # type: - # nodePort: - # clusterIP: - - ## Whether to enable PostgreSQL slave replicas data Persistent + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## PostgreSQL read only persistence configuration ## persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## enabled: true + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} - # Override the resource configuration for slave - resources: {} - # requests: - # memory: 256Mi - # cpu: 250m - -## Configure resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: {} -# requests: -# memory: 128Mi -# cpu: 100m -# limits: -# memory: 256Mi -# cpu: 250m +## @section NetworkPolicy parameters -## Add annotations to all the deployed resources +## Add networkpolicies ## -commonAnnotations: {} - networkPolicy: - ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## @param networkPolicy.enabled Enable network policies ## enabled: false - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port PostgreSQL is listening - ## on. When true, PostgreSQL will accept connections from any source - ## (with the correct destination port). + ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) + ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. + ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. ## - allowExternal: true - - ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the DB. - ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + metrics: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: monitoring + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: monitoring + ## + podSelector: {} + ## Ingress Rules ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} + ingressRules: + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node. + ## + primaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes. + ## + readReplicasAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## CustomRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule ## - explicitNamespacesSelector: {} + egressRules: + # Deny connections to external. This is not compatible with an external database. + denyConnectionsToExternal: false + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + +## @section Volume Permissions parameters -## Configure extra options for liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node ## -livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r327 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 -## Custom Liveness probe -## -customLivenessProbe: {} +## @section Other Parameters -## Custom Rediness probe +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## -customReadinessProbe: {} - +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) ## -## TLS configuration +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later ## -tls: - # Enable TLS traffic - enabled: false - # - # Whether to use the server's TLS cipher preferences rather than the client's. - preferServerCiphers: true - # - # Name of the Secret that contains the certificates - certificatesSecret: '' - # - # Certificate filename - certFilename: '' - # - # Certificate Key filename - certKeyFilename: '' - # - # CA Certificate filename - # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate - # ref: https://www.postgresql.org/docs/9.6/auth-methods.html - certCAFilename: - # - # File containing a Certificate Revocation List - crlFilename: +psp: + create: false + +## @section Metrics Parameters -## Configure metrics exporter -## metrics: + ## @param metrics.enabled Start a prometheus exporter + ## enabled: false - # resources: {} - service: - type: ClusterIP - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '9187' - loadBalancerIP: - serviceMonitor: - enabled: false - additionalLabels: {} - # namespace: monitoring - # interval: 30s - # scrapeTimeout: 10s - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository + ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets ## - prometheusRule: - enabled: false - additionalLabels: {} - namespace: '' - ## These are just examples rules, please adapt them to your needs. - ## Make sure to constraint the rules to the current postgresql service. - ## rules: - ## - alert: HugeReplicationLag - ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 - ## for: 1m - ## labels: - ## severity: critical - ## annotations: - ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). - ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). - ## - rules: [] - image: registry: docker.io repository: bitnami/postgres-exporter - tag: 0.8.0-debian-10-r262 + tag: 0.10.1-debian-10-r14 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName - ## Define additional custom metrics + pullSecrets: [] + ## @param metrics.customMetrics Define additional custom metrics ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file - # customMetrics: - # pg_database: - # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" - # metrics: - # - name: - # usage: "LABEL" - # description: "Name of the database" - # - size_bytes: - # usage: "GAUGE" - # description: "Size of the database in bytes" - # - ## An array to add extra env vars to configure postgres-exporter + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables ## For example: - # extraEnvVars: - # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS - # value: "true" - extraEnvVars: {} - - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" ## - securityContext: - enabled: false + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true runAsUser: 1001 - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## Configure extra options for liveness and readiness probes + runAsNonRoot: true + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe ## livenessProbe: enabled: true @@ -717,7 +1188,13 @@ metrics: timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 - + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## readinessProbe: enabled: true initialDelaySeconds: 5 @@ -725,8 +1202,122 @@ metrics: timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 - -## Array with extra yaml to deploy with the chart. Evaluated as a template -## -extraDeploy: [] - + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container + ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container + ## + resources: + limits: {} + requests: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] diff --git a/foundry/setup-foundry b/foundry/setup-foundry index e43ba84..827d6b6 100755 --- a/foundry/setup-foundry +++ b/foundry/setup-foundry @@ -93,9 +93,9 @@ helm install --wait -f gameboard.values.yaml gameboard sei/gameboard kubectl apply -f console-ingress.yaml # Add administrator user to Gameboard -timeout 5m bash -c 'until kubectl exec postgresql-postgresql-0 -- env PGPASSWORD=foundry psql -lqt -U postgres | cut -d \| -f 1 | grep -qw gameboard; do sleep 5; done' || false +timeout 5m bash -c 'until kubectl exec postgresql-0 -- env PGPASSWORD=foundry psql -lqt -U postgres | cut -d \| -f 1 | grep -qw gameboard; do sleep 5; done' || false sleep 5 -kubectl exec postgresql-postgresql-0 -- psql 'postgresql://postgres:foundry@localhost/gameboard' -c "INSERT INTO \"Users\" (\"Id\",\"Name\",\"ApprovedName\",\"Role\") VALUES ('dee684c5-2eaf-401a-915b-d3d4320fe5d5', 'Administrator', 'Administrator', 63);" +kubectl exec postgresql-0 -- psql 'postgresql://postgres:foundry@localhost/gameboard' -c "INSERT INTO \"Users\" (\"Id\",\"Name\",\"ApprovedName\",\"Role\") VALUES ('dee684c5-2eaf-401a-915b-d3d4320fe5d5', 'Administrator', 'Administrator', 63);" # Create git repo to track changes git init diff --git a/foundry/staticweb.values.yaml b/foundry/staticweb.values.yaml index 2159e75..a4b312d 100644 --- a/foundry/staticweb.values.yaml +++ b/foundry/staticweb.values.yaml @@ -56,7 +56,9 @@ ingress: annotations: {} hosts: - host: foundry.local - paths: [ "/" ] + paths: + - path: / + pathType: ImplementationSpecific tls: - secretName: appliance-cert hosts: diff --git a/foundry/topomojo.values.yaml b/foundry/topomojo.values.yaml index 990c5a0..818bc39 100644 --- a/foundry/topomojo.values.yaml +++ b/foundry/topomojo.values.yaml @@ -48,9 +48,12 @@ topomojo-api: hosts: - host: foundry.local paths: - - "/topomojo/api" - - "/topomojo/hub" - - "/topomojo/docs" + - path: /topomojo/api + pathType: ImplementationSpecific + - path: /topomojo/hub + pathType: ImplementationSpecific + - path: /topomojo/docs + pathType: ImplementationSpecific tls: - secretName: appliance-cert hosts: @@ -201,7 +204,9 @@ topomojo-ui: # kubernetes.io/tls-acme: "true" hosts: - host: foundry.local - paths: ["/topomojo"] + paths: + - path: /topomojo + pathType: ImplementationSpecific tls: - secretName: appliance-cert hosts: @@ -271,4 +276,3 @@ topomojo-ui: "useLocalStorage": false } } - diff --git a/install/stage1 b/install/stage1 index 9ae82cc..4f0849b 100644 --- a/install/stage1 +++ b/install/stage1 @@ -25,14 +25,28 @@ apt-get install -y dnsmasq avahi-daemon jq nfs-common sshpass mv ~/foundry/dnsmasq.conf /etc/dnsmasq.d/foundry.conf chown root:root /etc/dnsmasq.d/foundry.conf systemctl restart dnsmasq +sed -i -r 's/(use-ipv6=).*/\1no/' /etc/avahi/avahi-daemon.conf # Install MicroK8s -snap install microk8s --classic --channel=1.20/stable +snap install microk8s --classic --channel=1.23/stable microk8s status --wait-ready -microk8s enable dns storage ingress host-access metrics-server +microk8s enable dns storage ingress metrics-server usermod -a -G microk8s $SSH_USERNAME chown -f -R $SSH_USERNAME:$SSH_USERNAME ~/.kube sed -i '/^DNS\.5.*/a DNS.6 = foundry.local' /var/snap/microk8s/current/certs/csr.conf.template +cat < /etc/netplan/01-host-access.yaml +# Add loopback address for pods to talk to host +network: + version: 2 + ethernets: + lo: + match: + name: lo + addresses: + - 10.0.1.1/32: + label: lo:host-access +EOF +netplan apply # Install kubectl and Helm clients snap install kubectl --classic @@ -55,7 +69,7 @@ ln -s /home/foundry/foundry/foundry-banner /etc/update-motd.d/05-foundry-banner sed -i "s/{version}/$APPLIANCE_VERSION/" ~/foundry/web/index.html echo -e "Foundry Appliance $APPLIANCE_VERSION \\\n \l \n" > /etc/issue -# Disable IPv6 and reboot for changes to take effect -sed -i -r 's/(GRUB_CMDLINE_LINUX_DEFAULT=).*/\1"quiet net.ifnames=0 biosdevname=0 ipv6.disable=1"/' /etc/default/grub +# Enable quiet boot and reboot for changes to take effect +sed -i -r 's/(GRUB_CMDLINE_LINUX_DEFAULT=).*/\1"quiet net.ifnames=0 biosdevname=0"/' /etc/default/grub update-grub reboot diff --git a/variables.pkr.hcl b/variables.pkr.hcl index de8d78f..cd415d5 100644 --- a/variables.pkr.hcl +++ b/variables.pkr.hcl @@ -47,7 +47,7 @@ variable "vsphere_network" { locals { boot_command = [ " ", - "net.ifnames=0 biosdevname=0 ipv6.disable=1 autoinstall ds=nocloud-net;s=http://{{ .HTTPIP }}:{{ .HTTPPort }}/", + "net.ifnames=0 biosdevname=0 autoinstall ds=nocloud-net;s=http://{{ .HTTPIP }}:{{ .HTTPPort }}/", "", "" ]