547 lines
21 KiB
YAML
547 lines
21 KiB
YAML
image:
|
|
registry: ghcr.io
|
|
repository: zalando/postgres-operator
|
|
tag: v1.13.0
|
|
pullPolicy: "IfNotPresent"
|
|
|
|
# Optionally specify an array of imagePullSecrets.
|
|
# Secrets must be manually created in the namespace.
|
|
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
|
|
# imagePullSecrets:
|
|
# - name: myRegistryKeySecretName
|
|
|
|
podAnnotations: {}
|
|
podLabels: {}
|
|
|
|
configTarget: "OperatorConfigurationCRD"
|
|
|
|
# JSON logging format
|
|
enableJsonLogging: false
|
|
|
|
# general configuration parameters
|
|
configGeneral:
|
|
# the deployment should create/update the CRDs
|
|
enable_crd_registration: true
|
|
# specify categories under which crds should be listed
|
|
crd_categories:
|
|
- "all"
|
|
# update only the statefulsets without immediately doing the rolling update
|
|
enable_lazy_spilo_upgrade: false
|
|
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
|
|
enable_pgversion_env_var: true
|
|
# start any new database pod without limitations on shm memory
|
|
enable_shm_volume: true
|
|
# enables backwards compatible path between Spilo 12 and Spilo 13+ images
|
|
enable_spilo_wal_path_compat: false
|
|
# operator will sync only clusters where name starts with teamId prefix
|
|
enable_team_id_clustername_prefix: false
|
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
|
etcd_host: ""
|
|
# Spilo docker image
|
|
docker_image: ghcr.io/zalando/spilo-16:3.3-p1
|
|
|
|
# key name for annotation to ignore globally configured instance limits
|
|
# ignore_instance_limits_annotation_key: ""
|
|
|
|
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
|
# kubernetes_use_configmaps: false
|
|
|
|
# min number of instances in Postgres cluster. -1 = no limit
|
|
min_instances: -1
|
|
# max number of instances in Postgres cluster. -1 = no limit
|
|
max_instances: -1
|
|
# period between consecutive repair requests
|
|
repair_period: 5m
|
|
# period between consecutive sync requests
|
|
resync_period: 30m
|
|
# can prevent certain cases of memory overcommitment
|
|
# set_memory_request_to_limit: false
|
|
|
|
# map of sidecar names to docker images
|
|
# sidecar_docker_images:
|
|
# example: "exampleimage:exampletag"
|
|
|
|
# number of routines the operator spawns to process requests concurrently
|
|
workers: 8
|
|
|
|
# parameters describing Postgres users
|
|
configUsers:
|
|
# roles to be granted to database owners
|
|
# additional_owner_roles:
|
|
# - cron_admin
|
|
|
|
# enable password rotation for app users that are not database owners
|
|
enable_password_rotation: false
|
|
# rotation interval for updating credentials in K8s secrets of app users
|
|
password_rotation_interval: 90
|
|
# retention interval to keep rotation users
|
|
password_rotation_user_retention: 180
|
|
# postgres username used for replication between instances
|
|
replication_username: standby
|
|
# postgres superuser name to be created by initdb
|
|
super_username: postgres
|
|
|
|
configMajorVersionUpgrade:
|
|
# "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too
|
|
major_version_upgrade_mode: "manual"
|
|
# upgrades will only be carried out for clusters of listed teams when mode is "off"
|
|
# major_version_upgrade_team_allow_list:
|
|
# - acid
|
|
|
|
# minimal Postgres major version that will not automatically be upgraded
|
|
minimal_major_version: "12"
|
|
# target Postgres major version when upgrading clusters automatically
|
|
target_major_version: "16"
|
|
|
|
configKubernetes:
|
|
# list of additional capabilities for postgres container
|
|
# additional_pod_capabilities:
|
|
# - "SYS_NICE"
|
|
|
|
# default DNS domain of K8s cluster where operator is running
|
|
cluster_domain: cluster.local
|
|
# additional labels assigned to the cluster objects
|
|
cluster_labels:
|
|
application: spilo
|
|
# label assigned to Kubernetes objects created by the operator
|
|
cluster_name_label: cluster-name
|
|
# additional annotations to add to every database pod
|
|
# custom_pod_annotations:
|
|
# keya: valuea
|
|
# keyb: valueb
|
|
|
|
# key name for annotation that compares manifest value with current date
|
|
# delete_annotation_date_key: "delete-date"
|
|
|
|
# key name for annotation that compares manifest value with cluster name
|
|
# delete_annotation_name_key: "delete-clustername"
|
|
|
|
# list of annotations propagated from cluster manifest to statefulset and deployment
|
|
# downscaler_annotations:
|
|
# - deployment-time
|
|
# - downscaler/*
|
|
|
|
# allow user secrets in other namespaces than the Postgres cluster
|
|
enable_cross_namespace_secret: false
|
|
# use finalizers to ensure all managed resources are deleted prior to the postgresql CR
|
|
# this avoids stale resources in case the operator misses a delete event or is not running
|
|
# during deletion
|
|
enable_finalizers: false
|
|
# enables initContainers to run actions before Spilo is started
|
|
enable_init_containers: true
|
|
# toggles if child resources should have an owner reference to the postgresql CR
|
|
enable_owner_references: false
|
|
# toggles if operator should delete PVCs on cluster deletion
|
|
enable_persistent_volume_claim_deletion: true
|
|
# toggles pod anti affinity on the Postgres pods
|
|
enable_pod_antiaffinity: false
|
|
# toggles PDB to set to MinAvailabe 0 or 1
|
|
enable_pod_disruption_budget: true
|
|
# toogles readiness probe for database pods
|
|
enable_readiness_probe: false
|
|
# toggles if operator should delete secrets on cluster deletion
|
|
enable_secrets_deletion: true
|
|
# enables sidecar containers to run alongside Spilo in the same pod
|
|
enable_sidecars: true
|
|
|
|
# annotations to be ignored when comparing statefulsets, services etc.
|
|
# ignored_annotations:
|
|
# - k8s.v1.cni.cncf.io/network-status
|
|
|
|
# namespaced name of the secret containing infrastructure roles names and passwords
|
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
|
|
|
# list of annotation keys that can be inherited from the cluster manifest
|
|
# inherited_annotations:
|
|
# - owned-by
|
|
|
|
# list of label keys that can be inherited from the cluster manifest
|
|
# inherited_labels:
|
|
# - application
|
|
# - environment
|
|
|
|
# timeout for successful migration of master pods from unschedulable node
|
|
# master_pod_move_timeout: 20m
|
|
|
|
# set of labels that a running and active node should possess to be considered ready
|
|
# node_readiness_label:
|
|
# status: ready
|
|
|
|
# defines how nodeAffinity from manifest should be merged with node_readiness_label
|
|
# node_readiness_label_merge: "OR"
|
|
|
|
# namespaced name of the secret containing the OAuth2 token to pass to the teams API
|
|
# oauth_token_secret_name: postgresql-operator
|
|
|
|
# toggle if `spilo-role=master` selector should be added to the PDB (Pod Disruption Budget)
|
|
pdb_master_label_selector: true
|
|
# defines the template for PDB names
|
|
pdb_name_format: "postgres-{cluster}-pdb"
|
|
# specify the PVC retention policy when scaling down and/or deleting
|
|
persistent_volume_claim_retention_policy:
|
|
when_deleted: "retain"
|
|
when_scaled: "retain"
|
|
# switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution`
|
|
pod_antiaffinity_preferred_during_scheduling: false
|
|
# override topology key for pod anti affinity
|
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
|
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
|
# pod_environment_configmap: "default/my-custom-config"
|
|
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
|
|
# pod_environment_secret: "my-custom-secret"
|
|
|
|
# specify the pod management policy of stateful sets of Postgres clusters
|
|
pod_management_policy: "ordered_ready"
|
|
# label assigned to the Postgres pods (and services/endpoints)
|
|
pod_role_label: spilo-role
|
|
# service account definition as JSON/YAML string to be used by postgres cluster pods
|
|
# pod_service_account_definition: ""
|
|
|
|
# role binding definition as JSON/YAML string to be used by pod service account
|
|
# pod_service_account_role_binding_definition: ""
|
|
|
|
# Postgres pods are terminated forcefully after this timeout
|
|
pod_terminate_grace_period: 5m
|
|
# template for database user secrets generated by the operator,
|
|
# here username contains the namespace in the format namespace.username
|
|
# if the user is in different namespace than cluster and cross namespace secrets
|
|
# are enabled via `enable_cross_namespace_secret` flag in the configuration.
|
|
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
|
# sharing unix socket of PostgreSQL (`pg_socket`) with the sidecars
|
|
share_pgsocket_with_sidecars: false
|
|
# set user and group for the spilo container (required to run Spilo as non-root process)
|
|
# spilo_runasuser: 101
|
|
# spilo_runasgroup: 103
|
|
|
|
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
|
# spilo_fsgroup: 103
|
|
|
|
# whether the Spilo container should run in privileged mode
|
|
spilo_privileged: false
|
|
# whether the Spilo container should run with additional permissions other than parent.
|
|
# required by cron which needs setuid
|
|
spilo_allow_privilege_escalation: true
|
|
# storage resize strategy, available options are: ebs, pvc, off or mixed
|
|
storage_resize_mode: pvc
|
|
# pod toleration assigned to instances of every Postgres cluster
|
|
# toleration:
|
|
# key: db-only
|
|
# operator: Exists
|
|
# effect: NoSchedule
|
|
|
|
# operator watches for postgres objects in the given namespace
|
|
watched_namespace: "*" # listen to all namespaces
|
|
|
|
# configure resource requests for the Postgres pods
|
|
configPostgresPodResources:
|
|
# CPU limits for the postgres containers
|
|
default_cpu_limit: "1"
|
|
# CPU request value for the postgres containers
|
|
default_cpu_request: 100m
|
|
# memory limits for the postgres containers
|
|
default_memory_limit: 500Mi
|
|
# memory request value for the postgres containers
|
|
default_memory_request: 100Mi
|
|
# optional upper boundary for CPU request
|
|
# max_cpu_request: "1"
|
|
|
|
# optional upper boundary for memory request
|
|
# max_memory_request: 4Gi
|
|
|
|
# hard CPU minimum required to properly run a Postgres cluster
|
|
min_cpu_limit: 250m
|
|
# hard memory minimum required to properly run a Postgres cluster
|
|
min_memory_limit: 250Mi
|
|
|
|
# timeouts related to some operator actions
|
|
configTimeouts:
|
|
# interval between consecutive attempts of operator calling the Patroni API
|
|
patroni_api_check_interval: 1s
|
|
# timeout when waiting for successful response from Patroni API
|
|
patroni_api_check_timeout: 5s
|
|
# timeout when waiting for the Postgres pods to be deleted
|
|
pod_deletion_wait_timeout: 10m
|
|
# timeout when waiting for pod role and cluster labels
|
|
pod_label_wait_timeout: 10m
|
|
# interval between consecutive attempts waiting for postgresql CRD to be created
|
|
ready_wait_interval: 3s
|
|
# timeout for the complete postgres CRD creation
|
|
ready_wait_timeout: 30s
|
|
# interval to wait between consecutive attempts to check for some K8s resources
|
|
resource_check_interval: 3s
|
|
# timeout when waiting for the presence of a certain K8s resource (e.g. Sts, PDB)
|
|
resource_check_timeout: 10m
|
|
|
|
# configure behavior of load balancers
|
|
configLoadBalancer:
|
|
# DNS zone for cluster DNS name when load balancer is configured for cluster
|
|
db_hosted_zone: db.example.com
|
|
# annotations to apply to service when load balancing is enabled
|
|
# custom_service_annotations:
|
|
# keyx: valuez
|
|
# keya: valuea
|
|
|
|
# toggles service type load balancer pointing to the master pod of the cluster
|
|
enable_master_load_balancer: false
|
|
# toggles service type load balancer pointing to the master pooler pod of the cluster
|
|
enable_master_pooler_load_balancer: false
|
|
# toggles service type load balancer pointing to the replica pod of the cluster
|
|
enable_replica_load_balancer: false
|
|
# toggles service type load balancer pointing to the replica pooler pod of the cluster
|
|
enable_replica_pooler_load_balancer: false
|
|
# define external traffic policy for the load balancer
|
|
external_traffic_policy: "Cluster"
|
|
# defines the DNS name string template for the master load balancer cluster
|
|
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"
|
|
# deprecated DNS template for master load balancer using team name
|
|
master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
|
# defines the DNS name string template for the replica load balancer cluster
|
|
replica_dns_name_format: "{cluster}-repl.{namespace}.{hostedzone}"
|
|
# deprecated DNS template for replica load balancer using team name
|
|
replica_legacy_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
|
|
|
# options to aid debugging of the operator itself
|
|
configDebug:
|
|
# toggles verbose debug logs from the operator
|
|
debug_logging: true
|
|
# toggles operator functionality that require access to the postgres database
|
|
enable_database_access: true
|
|
|
|
# parameters affecting logging and REST API listener
|
|
configLoggingRestApi:
|
|
# REST API listener listens to this port
|
|
api_port: 8080
|
|
# number of entries in the cluster history ring buffer
|
|
cluster_history_entries: 1000
|
|
# number of lines in the ring buffer used to store cluster logs
|
|
ring_log_lines: 100
|
|
|
|
# configure interaction with non-Kubernetes objects from AWS or GCP
|
|
configAwsOrGcp:
|
|
# Additional Secret (aws or gcp credentials) to mount in the pod
|
|
# additional_secret_mount: "some-secret-name"
|
|
|
|
# Path to mount the above Secret in the filesystem of the container(s)
|
|
# additional_secret_mount_path: "/some/dir"
|
|
|
|
# AWS region used to store EBS volumes
|
|
aws_region: eu-central-1
|
|
|
|
# enable automatic migration on AWS from gp2 to gp3 volumes
|
|
enable_ebs_gp3_migration: false
|
|
# defines maximum volume size in GB until which auto migration happens
|
|
# enable_ebs_gp3_migration_max_size: 1000
|
|
|
|
# GCP credentials that will be used by the operator / pods
|
|
# gcp_credentials: ""
|
|
|
|
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
|
|
# kube_iam_role: ""
|
|
|
|
# S3 bucket to use for shipping postgres daily logs
|
|
# log_s3_bucket: ""
|
|
|
|
# S3 bucket to use for shipping WAL segments with WAL-E
|
|
# wal_s3_bucket: ""
|
|
|
|
# GCS bucket to use for shipping WAL segments with WAL-E
|
|
# wal_gs_bucket: ""
|
|
|
|
# Azure Storage Account to use for shipping WAL segments with WAL-G
|
|
# wal_az_storage_account: ""
|
|
|
|
# configure K8s cron job managed by the operator
|
|
configLogicalBackup:
|
|
# Azure Storage Account specs to store backup results
|
|
# logical_backup_azure_storage_account_name: ""
|
|
# logical_backup_azure_storage_container: ""
|
|
# logical_backup_azure_storage_account_key: ""
|
|
|
|
# resources for logical backup pod, if empty configPostgresPodResources will be used
|
|
# logical_backup_cpu_limit: ""
|
|
# logical_backup_cpu_request: ""
|
|
# logical_backup_memory_limit: ""
|
|
# logical_backup_memory_request: ""
|
|
|
|
# image for pods of the logical backup job (example runs pg_dumpall)
|
|
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
|
|
# path of google cloud service account json file
|
|
# logical_backup_google_application_credentials: ""
|
|
|
|
# prefix for the backup job name
|
|
logical_backup_job_prefix: "logical-backup-"
|
|
# storage provider - either "s3", "gcs" or "az"
|
|
logical_backup_provider: "s3"
|
|
# S3 Access Key ID
|
|
logical_backup_s3_access_key_id: ""
|
|
# S3 bucket to store backup results
|
|
logical_backup_s3_bucket: "my-bucket-url"
|
|
# S3 bucket prefix to use
|
|
logical_backup_s3_bucket_prefix: "spilo"
|
|
# S3 region of bucket
|
|
logical_backup_s3_region: ""
|
|
# S3 endpoint url when not using AWS
|
|
logical_backup_s3_endpoint: ""
|
|
# S3 Secret Access Key
|
|
logical_backup_s3_secret_access_key: ""
|
|
# S3 server side encryption
|
|
logical_backup_s3_sse: "AES256"
|
|
# S3 retention time for stored backups for example "2 week" or "7 days"
|
|
logical_backup_s3_retention_time: ""
|
|
# backup schedule in the cron format
|
|
logical_backup_schedule: "30 00 * * *"
|
|
# secret to be used as reference for env variables in cronjob
|
|
logical_backup_cronjob_environment_secret: ""
|
|
|
|
# automate creation of human users with teams API service
|
|
configTeamsApi:
|
|
# team_admin_role will have the rights to grant roles coming from PG manifests
|
|
enable_admin_role_for_users: true
|
|
# operator watches for PostgresTeam CRs to assign additional teams and members to clusters
|
|
enable_postgres_team_crd: false
|
|
# toogle to create additional superuser teams from PostgresTeam CRs
|
|
enable_postgres_team_crd_superusers: false
|
|
# toggle to automatically rename roles of former team members and deny LOGIN
|
|
enable_team_member_deprecation: false
|
|
# toggle to grant superuser to team members created from the Teams API
|
|
enable_team_superuser: false
|
|
# toggles usage of the Teams API by the operator
|
|
enable_teams_api: false
|
|
# should contain a URL to use for authentication (username and token)
|
|
# pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
|
|
|
# operator will add all team member roles to this group and add a pg_hba line
|
|
pam_role_name: zalandos
|
|
# List of teams which members need the superuser role in each Postgres cluster
|
|
postgres_superuser_teams:
|
|
- postgres_superusers
|
|
# List of roles that cannot be overwritten by an application, team or infrastructure role
|
|
protected_role_names:
|
|
- admin
|
|
- cron_admin
|
|
# Suffix to add if members are removed from TeamsAPI or PostgresTeam CRD
|
|
role_deletion_suffix: "_deleted"
|
|
# role name to grant to team members created from the Teams API
|
|
team_admin_role: admin
|
|
# postgres config parameters to apply to each team member role
|
|
team_api_role_configuration:
|
|
log_statement: all
|
|
# URL of the Teams API service
|
|
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
|
|
|
# configure connection pooler deployment created by the operator
|
|
configConnectionPooler:
|
|
# db schema to install lookup function into
|
|
connection_pooler_schema: "pooler"
|
|
# db user for pooler to use
|
|
connection_pooler_user: "pooler"
|
|
# docker image
|
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
|
|
# max db connections the pooler should hold
|
|
connection_pooler_max_db_connections: 60
|
|
# default pooling mode
|
|
connection_pooler_mode: "transaction"
|
|
# number of pooler instances
|
|
connection_pooler_number_of_instances: 2
|
|
# default resources
|
|
connection_pooler_default_cpu_request: 500m
|
|
connection_pooler_default_memory_request: 100Mi
|
|
connection_pooler_default_cpu_limit: "1"
|
|
connection_pooler_default_memory_limit: 100Mi
|
|
|
|
configPatroni:
|
|
# enable Patroni DCS failsafe_mode feature
|
|
enable_patroni_failsafe_mode: false
|
|
|
|
# Zalando's internal CDC stream feature
|
|
enableStreams: false
|
|
|
|
rbac:
|
|
# Specifies whether RBAC resources should be created
|
|
create: true
|
|
# Specifies whether ClusterRoles that are aggregated into the K8s default roles should be created. (https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings)
|
|
createAggregateClusterRoles: false
|
|
|
|
serviceAccount:
|
|
# Specifies whether a ServiceAccount should be created
|
|
create: true
|
|
# The name of the ServiceAccount to use.
|
|
# If not set and create is true, a name is generated using the fullname template
|
|
name:
|
|
|
|
podServiceAccount:
|
|
# The name of the ServiceAccount to be used by postgres cluster pods
|
|
# If not set a name is generated using the fullname template and "-pod" suffix
|
|
name: "postgres-pod"
|
|
|
|
# priority class for operator pod
|
|
priorityClassName: ""
|
|
|
|
# priority class for database pods
|
|
podPriorityClassName:
|
|
# If create is false with no name set, no podPriorityClassName is specified.
|
|
# Hence, the pod priorityClass is the one with globalDefault set.
|
|
# If there is no PriorityClass with globalDefault set, the priority of Pods with no priorityClassName is zero.
|
|
create: true
|
|
# If not set a name is generated using the fullname template and "-pod" suffix
|
|
name: ""
|
|
priority: 1000000
|
|
|
|
resources:
|
|
limits:
|
|
cpu: 500m
|
|
memory: 500Mi
|
|
requests:
|
|
cpu: 100m
|
|
memory: 250Mi
|
|
|
|
securityContext:
|
|
runAsUser: 1000
|
|
runAsNonRoot: true
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
|
|
# Allow to setup operator Deployment readiness probe
|
|
readinessProbe:
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
|
|
# configure extra environment variables
|
|
# Extra environment variables are writen in kubernetes format and added "as is" to the pod's env variables
|
|
# https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
|
|
# https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
|
|
extraEnvs:
|
|
[]
|
|
# Exemple of settings maximum amount of memory / cpu that can be used by go process (to match resources.limits)
|
|
# - name: MY_VAR
|
|
# value: my-value
|
|
# - name: GOMAXPROCS
|
|
# valueFrom:
|
|
# resourceFieldRef:
|
|
# resource: limits.cpu
|
|
# - name: GOMEMLIMIT
|
|
# valueFrom:
|
|
# resourceFieldRef:
|
|
# resource: limits.memory
|
|
|
|
# Affinity for pod assignment
|
|
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
affinity: {}
|
|
|
|
# Node labels for pod assignment
|
|
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
nodeSelector: {}
|
|
|
|
# Tolerations for pod assignment
|
|
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
tolerations: []
|
|
|
|
controllerID:
|
|
# Specifies whether a controller ID should be defined for the operator
|
|
# Note, all postgres manifest must then contain the following annotation to be found by this operator
|
|
# "acid.zalan.do/controller": <controller-ID-of-the-operator>
|
|
create: false
|
|
# The name of the controller ID to use.
|
|
# If not set and create is true, a name is generated using the fullname template
|
|
name:
|