From 9fe2c2adf24aa26e02b134aed3fa4c2de470bd11 Mon Sep 17 00:00:00 2001 From: ace Date: Sun, 17 Jan 2021 04:09:41 +0300 Subject: [PATCH] GHP publish --- README.md | 7 + adguard-home/.helmignore | 24 + adguard-home/Chart.yaml | 15 + adguard-home/README.md | 64 ++ adguard-home/templates/NOTES.txt | 15 + adguard-home/templates/_helpers.tpl | 32 + adguard-home/templates/config-pvc.yaml | 29 + adguard-home/templates/configmap.yaml | 14 + adguard-home/templates/deployment.yaml | 201 +++++ adguard-home/templates/ingress.yaml | 38 + adguard-home/templates/service-dhcp.yaml | 45 + .../templates/service-dns-over-https.yaml | 37 + .../templates/service-dns-over-tls.yaml | 37 + adguard-home/templates/service-tcp.yaml | 37 + adguard-home/templates/service-udp.yaml | 37 + adguard-home/templates/service.yaml | 35 + adguard-home/templates/servicemonitor.yaml | 23 + adguard-home/templates/work-pvc.yaml | 29 + adguard-home/values.yaml | 376 ++++++++ bitwarden/.helmignore | 21 + bitwarden/Chart.yaml | 14 + bitwarden/templates/NOTES.txt | 19 + bitwarden/templates/_helpers.tpl | 32 + bitwarden/templates/deployment.yaml | 81 ++ bitwarden/templates/ingress.yaml | 38 + .../templates/persistent-volume-claim.yaml | 24 + bitwarden/templates/service.yaml | 22 + bitwarden/values.yaml | 86 ++ chartmuseum/.helmignore | 23 + chartmuseum/Chart.yaml | 12 + chartmuseum/README.md | 749 ++++++++++++++++ chartmuseum/ci/ingress-values.yaml | 9 + chartmuseum/templates/NOTES.txt | 30 + chartmuseum/templates/_helpers.tpl | 142 +++ chartmuseum/templates/deployment.yaml | 220 +++++ chartmuseum/templates/ingress.yaml | 54 ++ chartmuseum/templates/pv.yaml | 21 + chartmuseum/templates/pvc.yaml | 27 + chartmuseum/templates/secret.yaml | 22 + chartmuseum/templates/service.yaml | 45 + chartmuseum/templates/serviceaccount.yaml | 17 + chartmuseum/templates/servicemonitor.yaml | 34 + chartmuseum/values.yaml | 306 +++++++ docker-registry/.helmignore | 21 + docker-registry/Chart.yaml | 10 + docker-registry/README.md | 95 ++ docker-registry/templates/NOTES.txt | 19 + docker-registry/templates/_helpers.tpl | 24 + docker-registry/templates/configmap.yaml | 12 + docker-registry/templates/deployment.yaml | 221 +++++ docker-registry/templates/ingress.yaml | 36 + .../templates/poddisruptionbudget.yaml | 17 + docker-registry/templates/pvc.yaml | 26 + docker-registry/templates/secret.yaml | 37 + docker-registry/templates/service.yaml | 35 + docker-registry/values.yaml | 147 +++ dovecot/.helmignore | 23 + dovecot/Chart.yaml | 21 + dovecot/templates/NOTES.txt | 15 + dovecot/templates/_helpers.tpl | 63 ++ dovecot/templates/configmap.yaml | 21 + dovecot/templates/deployment.yaml | 106 +++ .../templates/persistent-volume-claim.yaml | 24 + dovecot/templates/service.yaml | 42 + dovecot/templates/serviceaccount.yaml | 12 + dovecot/values.yaml | 212 +++++ gitea/Chart.yaml | 23 + gitea/LICENSE | 21 + gitea/README.md | 188 ++++ gitea/charts/mariadb/.helmignore | 21 + gitea/charts/mariadb/Chart.lock | 6 + gitea/charts/mariadb/Chart.yaml | 30 + gitea/charts/mariadb/README.md | 465 ++++++++++ .../charts/mariadb/charts/common/.helmignore | 22 + gitea/charts/mariadb/charts/common/Chart.yaml | 23 + gitea/charts/mariadb/charts/common/README.md | 309 +++++++ .../charts/common/templates/_affinities.tpl | 94 ++ .../charts/common/templates/_capabilities.tpl | 33 + .../charts/common/templates/_errors.tpl | 20 + .../charts/common/templates/_images.tpl | 43 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 57 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 45 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 ++ .../common/templates/validations/_mariadb.tpl | 103 +++ .../common/templates/validations/_mongodb.tpl | 108 +++ .../templates/validations/_postgresql.tpl | 131 +++ .../common/templates/validations/_redis.tpl | 72 ++ .../templates/validations/_validations.tpl | 44 + .../charts/mariadb/charts/common/values.yaml | 3 + ...lues-production-with-rbac-and-metrics.yaml | 33 + gitea/charts/mariadb/templates/NOTES.txt | 50 ++ gitea/charts/mariadb/templates/_helpers.tpl | 150 ++++ .../mariadb/templates/primary/configmap.yaml | 18 + .../primary/initialization-configmap.yaml | 11 + .../charts/mariadb/templates/primary/pdb.yaml | 25 + .../templates/primary/statefulset.yaml | 327 +++++++ .../charts/mariadb/templates/primary/svc.yaml | 49 + gitea/charts/mariadb/templates/role.yaml | 21 + .../charts/mariadb/templates/rolebinding.yaml | 21 + .../templates/secondary/configmap.yaml | 18 + .../mariadb/templates/secondary/pdb.yaml | 25 + .../templates/secondary/statefulset.yaml | 300 +++++++ .../mariadb/templates/secondary/svc.yaml | 51 ++ gitea/charts/mariadb/templates/secrets.yaml | 39 + .../mariadb/templates/serviceaccount.yaml | 18 + .../mariadb/templates/servicemonitor.yaml | 41 + gitea/charts/mariadb/values-production.yaml | 841 ++++++++++++++++++ gitea/charts/mariadb/values.schema.json | 176 ++++ gitea/charts/mariadb/values.yaml | 838 +++++++++++++++++ gitea/templates/NOTES.txt | 45 + gitea/templates/_helpers.tpl | 31 + gitea/templates/deployment.yaml | 55 ++ gitea/templates/gitea/_container.tpl | 60 ++ gitea/templates/gitea/gitea-config.yaml | 728 +++++++++++++++ gitea/templates/gitea/gitea-pvc.yaml | 59 ++ gitea/templates/gitea/gitea-svc.yaml | 25 + gitea/templates/gitea/post-install-job.yaml | 68 ++ gitea/templates/ingress.yaml | 42 + gitea/templates/init/_container.tpl | 42 + gitea/templates/memcached/_container.tpl | 35 + gitea/values.yaml | 260 ++++++ nextcloud/.helmignore | 21 + nextcloud/Chart.lock | 12 + nextcloud/Chart.yaml | 37 + nextcloud/README.md | 269 ++++++ nextcloud/charts/mariadb/.helmignore | 21 + nextcloud/charts/mariadb/Chart.yaml | 24 + nextcloud/charts/mariadb/README.md | 343 +++++++ .../ci/values-production-with-rbac.yaml | 31 + .../docker-entrypoint-initdb.d/README.md | 3 + nextcloud/charts/mariadb/templates/NOTES.txt | 49 + .../charts/mariadb/templates/_helpers.tpl | 288 ++++++ .../templates/initialization-configmap.yaml | 27 + .../mariadb/templates/master-configmap.yaml | 16 + .../charts/mariadb/templates/master-pdb.yaml | 25 + .../mariadb/templates/master-statefulset.yaml | 347 ++++++++ .../charts/mariadb/templates/master-svc.yaml | 47 + nextcloud/charts/mariadb/templates/role.yaml | 19 + .../charts/mariadb/templates/rolebinding.yaml | 19 + .../charts/mariadb/templates/secrets.yaml | 44 + .../mariadb/templates/serviceaccount.yaml | 13 + .../mariadb/templates/servicemonitor.yaml | 35 + .../mariadb/templates/slave-configmap.yaml | 16 + .../charts/mariadb/templates/slave-pdb.yaml | 27 + .../mariadb/templates/slave-statefulset.yaml | 317 +++++++ .../charts/mariadb/templates/slave-svc.yaml | 49 + .../charts/mariadb/values-production.yaml | 614 +++++++++++++ nextcloud/charts/mariadb/values.schema.json | 169 ++++ nextcloud/charts/mariadb/values.yaml | 602 +++++++++++++ nextcloud/charts/postgresql/.helmignore | 21 + nextcloud/charts/postgresql/Chart.yaml | 25 + nextcloud/charts/postgresql/README.md | 717 +++++++++++++++ .../postgresql/charts/common/.helmignore | 22 + .../postgresql/charts/common/Chart.yaml | 22 + .../charts/postgresql/charts/common/README.md | 286 ++++++ .../charts/common/templates/_affinities.tpl | 94 ++ .../charts/common/templates/_capabilities.tpl | 33 + .../charts/common/templates/_errors.tpl | 20 + .../charts/common/templates/_images.tpl | 43 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 49 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 45 + .../charts/common/templates/_validations.tpl | 278 ++++++ .../charts/common/templates/_warnings.tpl | 14 + .../postgresql/charts/common/values.yaml | 3 + .../postgresql/ci/commonAnnotations.yaml | 3 + .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + nextcloud/charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + nextcloud/charts/postgresql/requirements.lock | 6 + nextcloud/charts/postgresql/requirements.yaml | 4 + .../charts/postgresql/templates/NOTES.txt | 59 ++ .../charts/postgresql/templates/_helpers.tpl | 488 ++++++++++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../postgresql/templates/extra-list.yaml | 4 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 25 + .../postgresql/templates/networkpolicy.yaml | 38 + .../templates/podsecuritypolicy.yaml | 37 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/role.yaml | 19 + .../postgresql/templates/rolebinding.yaml | 19 + .../charts/postgresql/templates/secrets.yaml | 21 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 403 +++++++++ .../postgresql/templates/statefulset.yaml | 580 ++++++++++++ .../postgresql/templates/svc-headless.yaml | 27 + .../charts/postgresql/templates/svc-read.yaml | 42 + .../charts/postgresql/templates/svc.yaml | 40 + .../charts/postgresql/values-production.yaml | 711 +++++++++++++++ .../charts/postgresql/values.schema.json | 103 +++ nextcloud/charts/postgresql/values.yaml | 728 +++++++++++++++ nextcloud/charts/redis/.helmignore | 21 + nextcloud/charts/redis/Chart.yaml | 23 + nextcloud/charts/redis/README.md | 667 ++++++++++++++ .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/production-sentinel-values.yaml | 682 ++++++++++++++ .../redis/img/redis-cluster-topology.png | Bin 0 -> 11448 bytes nextcloud/charts/redis/img/redis-topology.png | Bin 0 -> 9709 bytes nextcloud/charts/redis/templates/NOTES.txt | 136 +++ nextcloud/charts/redis/templates/_helpers.tpl | 421 +++++++++ .../redis/templates/configmap-scripts.yaml | 349 ++++++++ .../charts/redis/templates/configmap.yaml | 53 ++ .../charts/redis/templates/headless-svc.yaml | 25 + .../redis/templates/health-configmap.yaml | 201 +++++ .../redis/templates/metrics-prometheus.yaml | 33 + .../charts/redis/templates/metrics-svc.yaml | 31 + .../charts/redis/templates/networkpolicy.yaml | 74 ++ nextcloud/charts/redis/templates/pdb.yaml | 21 + .../redis/templates/prometheusrule.yaml | 25 + nextcloud/charts/redis/templates/psp.yaml | 43 + .../templates/redis-master-statefulset.yaml | 346 +++++++ .../redis/templates/redis-master-svc.yaml | 40 + .../templates/redis-node-statefulset.yaml | 450 ++++++++++ .../charts/redis/templates/redis-role.yaml | 22 + .../redis/templates/redis-rolebinding.yaml | 19 + .../redis/templates/redis-serviceaccount.yaml | 12 + .../templates/redis-slave-statefulset.yaml | 354 ++++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + nextcloud/charts/redis/templates/secret.yaml | 15 + nextcloud/charts/redis/values-production.yaml | 784 ++++++++++++++++ nextcloud/charts/redis/values.schema.json | 168 ++++ nextcloud/charts/redis/values.yaml | 784 ++++++++++++++++ nextcloud/templates/NOTES.txt | 94 ++ nextcloud/templates/_helpers.tpl | 49 + nextcloud/templates/config.yaml | 127 +++ nextcloud/templates/cronjob.yaml | 75 ++ nextcloud/templates/db-secret.yaml | 22 + nextcloud/templates/deployment.yaml | 360 ++++++++ nextcloud/templates/hpa.yaml | 20 + nextcloud/templates/ingress.yaml | 31 + nextcloud/templates/metrics-deployment.yaml | 55 ++ nextcloud/templates/metrics-service.yaml | 31 + nextcloud/templates/nextcloud-pvc.yaml | 31 + nextcloud/templates/nginx-config.yaml | 185 ++++ nextcloud/templates/php-config.yaml | 16 + nextcloud/templates/secrets.yaml | 21 + nextcloud/templates/service.yaml | 25 + nextcloud/values.yaml | 399 +++++++++ nfs-client-provisioner/.helmignore | 21 + nfs-client-provisioner/Chart.yaml | 13 + nfs-client-provisioner/README.md | 82 ++ nfs-client-provisioner/ci/test-values.yaml | 5 + nfs-client-provisioner/templates/_helpers.tpl | 62 ++ .../templates/clusterrole.yaml | 30 + .../templates/clusterrolebinding.yaml | 19 + .../templates/deployment.yaml | 77 ++ .../templates/persistentvolume.yaml | 25 + .../templates/persistentvolumeclaim.yaml | 17 + .../templates/podsecuritypolicy.yaml | 31 + nfs-client-provisioner/templates/role.yaml | 21 + .../templates/rolebinding.yaml | 19 + .../templates/serviceaccount.yaml | 11 + .../templates/storageclass.yaml | 26 + nfs-client-provisioner/values.yaml | 78 ++ opendkim/.helmignore | 23 + opendkim/Chart.yaml | 21 + opendkim/templates/NOTES.txt | 15 + opendkim/templates/_helpers.tpl | 63 ++ opendkim/templates/configmap.yaml | 10 + opendkim/templates/deployment.yaml | 91 ++ .../templates/persistent-volume-claim.yaml | 24 + opendkim/templates/service.yaml | 15 + opendkim/templates/serviceaccount.yaml | 12 + opendkim/values.yaml | 96 ++ opendmarc/.helmignore | 23 + opendmarc/Chart.yaml | 21 + opendmarc/templates/NOTES.txt | 15 + opendmarc/templates/_helpers.tpl | 63 ++ opendmarc/templates/configmap.yaml | 11 + opendmarc/templates/deployment.yaml | 66 ++ .../templates/persistent-volume-claim.yaml | 24 + opendmarc/templates/service.yaml | 15 + opendmarc/templates/serviceaccount.yaml | 12 + opendmarc/values.yaml | 69 ++ openfaas/Chart.yaml | 21 + openfaas/OWNERS | 8 + openfaas/README.md | 459 ++++++++++ openfaas/templates/NOTES.txt | 9 + openfaas/templates/_helpers.tpl | 20 + openfaas/templates/alertmanager-cfg.yaml | 47 + openfaas/templates/alertmanager-dep.yaml | 108 +++ openfaas/templates/alertmanager-svc.yaml | 22 + openfaas/templates/basic-auth-plugin-dep.yaml | 106 +++ openfaas/templates/basic-auth-plugin-svc.yaml | 25 + openfaas/templates/controller-rbac.yaml | 227 +++++ openfaas/templates/faas-idler-dep.yaml | 75 ++ openfaas/templates/function-crd.yaml | 106 +++ openfaas/templates/gateway-dep.yaml | 260 ++++++ openfaas/templates/gateway-external-svc.yaml | 29 + openfaas/templates/gateway-svc.yaml | 20 + openfaas/templates/ingress-operator-crd.yaml | 94 ++ openfaas/templates/ingress-operator-dep.yaml | 41 + openfaas/templates/ingress-operator-rbac.yaml | 65 ++ openfaas/templates/ingress.yaml | 31 + openfaas/templates/istio-mtls.yaml | 58 ++ openfaas/templates/nats-dep.yaml | 82 ++ openfaas/templates/nats-svc.yaml | 32 + openfaas/templates/oauth2-plugin-dep.yaml | 139 +++ openfaas/templates/oauth2-plugin-svc.yaml | 26 + openfaas/templates/operator-rbac.yaml | 124 +++ openfaas/templates/profile-crd.yaml | 830 +++++++++++++++++ openfaas/templates/prometheus-cfg.yaml | 82 ++ openfaas/templates/prometheus-dep.yaml | 108 +++ openfaas/templates/prometheus-rbac.yaml | 162 ++++ openfaas/templates/prometheus-svc.yaml | 22 + openfaas/templates/psp.yaml | 69 ++ openfaas/templates/queueworker-dep.yaml | 87 ++ openfaas/templates/secret.yaml | 19 + openfaas/values-arm64.yaml | 48 + openfaas/values-armhf.yaml | 48 + openfaas/values.yaml | 203 +++++ openldap/.helmignore | 21 + openldap/Chart.yaml | 14 + openldap/README.md | 110 +++ openldap/templates/NOTES.txt | 20 + openldap/templates/_helpers.tpl | 40 + openldap/templates/configmap-customldif.yaml | 23 + openldap/templates/configmap-env.yaml | 20 + openldap/templates/deployment.yaml | 177 ++++ openldap/templates/pvc.yaml | 27 + openldap/templates/secret.yaml | 18 + openldap/templates/service.yaml | 44 + .../templates/tests/openldap-test-runner.yaml | 50 ++ openldap/templates/tests/openldap-tests.yaml | 22 + openldap/values.yaml | 120 +++ peertube/.helmignore | 23 + peertube/Chart.yaml | 15 + peertube/charts/postgresql-10.2.1.tgz | Bin 0 -> 52736 bytes peertube/charts/redis-12.3.2.tgz | Bin 0 -> 66001 bytes peertube/templates/NOTES.txt | 22 + peertube/templates/_helpers.tpl | 78 ++ peertube/templates/configmap.yaml | 14 + peertube/templates/deployment.yaml | 113 +++ peertube/templates/hpa.yaml | 28 + peertube/templates/ingress.yaml | 41 + peertube/templates/persistentvolumeclaim.yaml | 28 + peertube/templates/service.yaml | 15 + peertube/templates/serviceaccount.yaml | 12 + peertube/templates/tests/test-connection.yaml | 15 + peertube/values.yaml | 566 ++++++++++++ playmaker/.helmignore | 23 + playmaker/Chart.yaml | 23 + playmaker/templates/NOTES.txt | 21 + playmaker/templates/_helpers.tpl | 62 ++ playmaker/templates/deployment.yaml | 94 ++ playmaker/templates/hpa.yaml | 28 + playmaker/templates/ingress.yaml | 41 + .../templates/persistentvolumeclaim.yaml | 28 + playmaker/templates/secret.yaml | 13 + playmaker/templates/service.yaml | 15 + playmaker/templates/serviceaccount.yaml | 12 + .../templates/tests/test-connection.yaml | 15 + playmaker/values.yaml | 118 +++ postfix/.helmignore | 23 + postfix/Chart.yaml | 21 + postfix/templates/NOTES.txt | 15 + postfix/templates/_helpers.tpl | 63 ++ postfix/templates/configmap.yaml | 11 + postfix/templates/deployment.yaml | 91 ++ .../templates/persistent-volume-claim.yaml | 24 + postfix/templates/service.yaml | 26 + postfix/templates/serviceaccount.yaml | 12 + postfix/values.yaml | 338 +++++++ postgres-operator-ui/.helmignore | 22 + postgres-operator-ui/Chart.yaml | 19 + postgres-operator-ui/index.yaml | 52 ++ postgres-operator-ui/templates/NOTES.txt | 3 + postgres-operator-ui/templates/_helpers.tpl | 39 + .../templates/clusterrole.yaml | 53 ++ .../templates/clusterrolebinding.yaml | 19 + .../templates/deployment.yaml | 73 ++ postgres-operator-ui/templates/ingress.yaml | 44 + postgres-operator-ui/templates/service.yaml | 23 + .../templates/serviceaccount.yaml | 11 + postgres-operator-ui/values.yaml | 63 ++ postgres-operator/.helmignore | 21 + postgres-operator/Chart.yaml | 18 + .../crds/operatorconfigurations.yaml | 589 ++++++++++++ postgres-operator/crds/postgresqls.yaml | 608 +++++++++++++ postgres-operator/crds/postgresteams.yaml | 70 ++ postgres-operator/index.yaml | 48 + postgres-operator/templates/NOTES.txt | 3 + postgres-operator/templates/_helpers.tpl | 53 ++ .../templates/clusterrole-postgres-pod.yaml | 53 ++ postgres-operator/templates/clusterrole.yaml | 218 +++++ .../templates/clusterrolebinding.yaml | 19 + postgres-operator/templates/configmap.yaml | 27 + postgres-operator/templates/crds.yaml | 6 + postgres-operator/templates/deployment.yaml | 69 ++ .../templates/operatorconfiguration.yaml | 40 + .../postgres-pod-priority-class.yaml | 15 + postgres-operator/templates/service.yaml | 18 + .../templates/serviceaccount.yaml | 11 + postgres-operator/values-crd.yaml | 361 ++++++++ postgres-operator/values.yaml | 356 ++++++++ pypiserver/.helmignore | 2 + pypiserver/CHANGELOG.md | 69 ++ pypiserver/Chart.yaml | 14 + pypiserver/README.md | 85 ++ pypiserver/templates/NOTES.txt | 19 + pypiserver/templates/_helpers.tpl | 16 + pypiserver/templates/deployment.yaml | 107 +++ pypiserver/templates/ingress.yaml | 39 + pypiserver/templates/pvc.yaml | 26 + pypiserver/templates/secret.yaml | 15 + pypiserver/templates/service.yaml | 49 + pypiserver/values.yaml | 97 ++ roundcube/.helmignore | 21 + roundcube/Chart.yaml | 6 + roundcube/LICENSE | 202 +++++ roundcube/README.md | 1 + roundcube/templates/NOTES.txt | 19 + roundcube/templates/_helpers.tpl | 32 + roundcube/templates/config.yaml | 16 + roundcube/templates/deployment.yaml | 82 ++ roundcube/templates/ingress.yaml | 39 + .../templates/persistentvolumeclaim.yaml | 28 + roundcube/templates/service.yaml | 19 + roundcube/values.yaml | 76 ++ rspamd/.helmignore | 23 + rspamd/Chart.yaml | 21 + rspamd/templates/NOTES.txt | 15 + rspamd/templates/_helpers.tpl | 63 ++ rspamd/templates/configmap.yaml | 1 + rspamd/templates/deployment.yaml | 55 ++ rspamd/templates/persistent-volume-claim.yaml | 24 + rspamd/templates/service.yaml | 15 + rspamd/templates/serviceaccount.yaml | 12 + rspamd/values.yaml | 60 ++ wikijs/.helmignore | 23 + wikijs/Chart.lock | 6 + wikijs/Chart.yaml | 42 + wikijs/README.md | 143 +++ wikijs/charts/postgresql-6.5.0.tgz | Bin 0 -> 23426 bytes wikijs/templates/NOTES.txt | 21 + wikijs/templates/_helpers.tpl | 108 +++ wikijs/templates/deployment.yaml | 84 ++ wikijs/templates/ingress.yaml | 41 + wikijs/templates/service.yaml | 23 + wikijs/templates/serviceaccount.yaml | 12 + wikijs/templates/tests/test-connection.yaml | 15 + wikijs/values.yaml | 118 +++ 457 files changed, 40068 insertions(+) create mode 100644 README.md create mode 100644 adguard-home/.helmignore create mode 100644 adguard-home/Chart.yaml create mode 100644 adguard-home/README.md create mode 100644 adguard-home/templates/NOTES.txt create mode 100644 adguard-home/templates/_helpers.tpl create mode 100644 adguard-home/templates/config-pvc.yaml create mode 100644 adguard-home/templates/configmap.yaml create mode 100644 adguard-home/templates/deployment.yaml create mode 100644 adguard-home/templates/ingress.yaml create mode 100644 adguard-home/templates/service-dhcp.yaml create mode 100644 adguard-home/templates/service-dns-over-https.yaml create mode 100644 adguard-home/templates/service-dns-over-tls.yaml create mode 100644 adguard-home/templates/service-tcp.yaml create mode 100644 adguard-home/templates/service-udp.yaml create mode 100644 adguard-home/templates/service.yaml create mode 100644 adguard-home/templates/servicemonitor.yaml create mode 100644 adguard-home/templates/work-pvc.yaml create mode 100644 adguard-home/values.yaml create mode 100644 bitwarden/.helmignore create mode 100644 bitwarden/Chart.yaml create mode 100644 bitwarden/templates/NOTES.txt create mode 100644 bitwarden/templates/_helpers.tpl create mode 100644 bitwarden/templates/deployment.yaml create mode 100644 bitwarden/templates/ingress.yaml create mode 100644 bitwarden/templates/persistent-volume-claim.yaml create mode 100644 bitwarden/templates/service.yaml create mode 100644 bitwarden/values.yaml create mode 100755 chartmuseum/.helmignore create mode 100755 chartmuseum/Chart.yaml create mode 100755 chartmuseum/README.md create mode 100755 chartmuseum/ci/ingress-values.yaml create mode 100755 chartmuseum/templates/NOTES.txt create mode 100755 chartmuseum/templates/_helpers.tpl create mode 100755 chartmuseum/templates/deployment.yaml create mode 100755 chartmuseum/templates/ingress.yaml create mode 100755 chartmuseum/templates/pv.yaml create mode 100755 chartmuseum/templates/pvc.yaml create mode 100755 chartmuseum/templates/secret.yaml create mode 100755 chartmuseum/templates/service.yaml create mode 100755 chartmuseum/templates/serviceaccount.yaml create mode 100755 chartmuseum/templates/servicemonitor.yaml create mode 100755 chartmuseum/values.yaml create mode 100644 docker-registry/.helmignore create mode 100644 docker-registry/Chart.yaml create mode 100644 docker-registry/README.md create mode 100644 docker-registry/templates/NOTES.txt create mode 100644 docker-registry/templates/_helpers.tpl create mode 100644 docker-registry/templates/configmap.yaml create mode 100644 docker-registry/templates/deployment.yaml create mode 100644 docker-registry/templates/ingress.yaml create mode 100644 docker-registry/templates/poddisruptionbudget.yaml create mode 100644 docker-registry/templates/pvc.yaml create mode 100644 docker-registry/templates/secret.yaml create mode 100644 docker-registry/templates/service.yaml create mode 100644 docker-registry/values.yaml create mode 100644 dovecot/.helmignore create mode 100644 dovecot/Chart.yaml create mode 100644 dovecot/templates/NOTES.txt create mode 100644 dovecot/templates/_helpers.tpl create mode 100644 dovecot/templates/configmap.yaml create mode 100644 dovecot/templates/deployment.yaml create mode 100644 dovecot/templates/persistent-volume-claim.yaml create mode 100644 dovecot/templates/service.yaml create mode 100644 dovecot/templates/serviceaccount.yaml create mode 100644 dovecot/values.yaml create mode 100644 gitea/Chart.yaml create mode 100644 gitea/LICENSE create mode 100644 gitea/README.md create mode 100644 gitea/charts/mariadb/.helmignore create mode 100644 gitea/charts/mariadb/Chart.lock create mode 100644 gitea/charts/mariadb/Chart.yaml create mode 100644 gitea/charts/mariadb/README.md create mode 100644 gitea/charts/mariadb/charts/common/.helmignore create mode 100644 gitea/charts/mariadb/charts/common/Chart.yaml create mode 100644 gitea/charts/mariadb/charts/common/README.md create mode 100644 gitea/charts/mariadb/charts/common/templates/_affinities.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_capabilities.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_errors.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_images.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_labels.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_names.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_secrets.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_storage.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_tplvalues.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_utils.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/_warnings.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/validations/_cassandra.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/validations/_mariadb.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/validations/_mongodb.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/validations/_postgresql.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/validations/_redis.tpl create mode 100644 gitea/charts/mariadb/charts/common/templates/validations/_validations.tpl create mode 100644 gitea/charts/mariadb/charts/common/values.yaml create mode 100644 gitea/charts/mariadb/ci/values-production-with-rbac-and-metrics.yaml create mode 100644 gitea/charts/mariadb/templates/NOTES.txt create mode 100644 gitea/charts/mariadb/templates/_helpers.tpl create mode 100644 gitea/charts/mariadb/templates/primary/configmap.yaml create mode 100644 gitea/charts/mariadb/templates/primary/initialization-configmap.yaml create mode 100644 gitea/charts/mariadb/templates/primary/pdb.yaml create mode 100644 gitea/charts/mariadb/templates/primary/statefulset.yaml create mode 100644 gitea/charts/mariadb/templates/primary/svc.yaml create mode 100644 gitea/charts/mariadb/templates/role.yaml create mode 100644 gitea/charts/mariadb/templates/rolebinding.yaml create mode 100644 gitea/charts/mariadb/templates/secondary/configmap.yaml create mode 100644 gitea/charts/mariadb/templates/secondary/pdb.yaml create mode 100644 gitea/charts/mariadb/templates/secondary/statefulset.yaml create mode 100644 gitea/charts/mariadb/templates/secondary/svc.yaml create mode 100644 gitea/charts/mariadb/templates/secrets.yaml create mode 100644 gitea/charts/mariadb/templates/serviceaccount.yaml create mode 100644 gitea/charts/mariadb/templates/servicemonitor.yaml create mode 100644 gitea/charts/mariadb/values-production.yaml create mode 100644 gitea/charts/mariadb/values.schema.json create mode 100644 gitea/charts/mariadb/values.yaml create mode 100644 gitea/templates/NOTES.txt create mode 100644 gitea/templates/_helpers.tpl create mode 100644 gitea/templates/deployment.yaml create mode 100644 gitea/templates/gitea/_container.tpl create mode 100644 gitea/templates/gitea/gitea-config.yaml create mode 100644 gitea/templates/gitea/gitea-pvc.yaml create mode 100644 gitea/templates/gitea/gitea-svc.yaml create mode 100644 gitea/templates/gitea/post-install-job.yaml create mode 100644 gitea/templates/ingress.yaml create mode 100644 gitea/templates/init/_container.tpl create mode 100644 gitea/templates/memcached/_container.tpl create mode 100644 gitea/values.yaml create mode 100644 nextcloud/.helmignore create mode 100644 nextcloud/Chart.lock create mode 100644 nextcloud/Chart.yaml create mode 100644 nextcloud/README.md create mode 100644 nextcloud/charts/mariadb/.helmignore create mode 100644 nextcloud/charts/mariadb/Chart.yaml create mode 100644 nextcloud/charts/mariadb/README.md create mode 100644 nextcloud/charts/mariadb/ci/values-production-with-rbac.yaml create mode 100644 nextcloud/charts/mariadb/files/docker-entrypoint-initdb.d/README.md create mode 100644 nextcloud/charts/mariadb/templates/NOTES.txt create mode 100644 nextcloud/charts/mariadb/templates/_helpers.tpl create mode 100644 nextcloud/charts/mariadb/templates/initialization-configmap.yaml create mode 100644 nextcloud/charts/mariadb/templates/master-configmap.yaml create mode 100644 nextcloud/charts/mariadb/templates/master-pdb.yaml create mode 100644 nextcloud/charts/mariadb/templates/master-statefulset.yaml create mode 100644 nextcloud/charts/mariadb/templates/master-svc.yaml create mode 100644 nextcloud/charts/mariadb/templates/role.yaml create mode 100644 nextcloud/charts/mariadb/templates/rolebinding.yaml create mode 100644 nextcloud/charts/mariadb/templates/secrets.yaml create mode 100644 nextcloud/charts/mariadb/templates/serviceaccount.yaml create mode 100644 nextcloud/charts/mariadb/templates/servicemonitor.yaml create mode 100644 nextcloud/charts/mariadb/templates/slave-configmap.yaml create mode 100644 nextcloud/charts/mariadb/templates/slave-pdb.yaml create mode 100644 nextcloud/charts/mariadb/templates/slave-statefulset.yaml create mode 100644 nextcloud/charts/mariadb/templates/slave-svc.yaml create mode 100644 nextcloud/charts/mariadb/values-production.yaml create mode 100644 nextcloud/charts/mariadb/values.schema.json create mode 100644 nextcloud/charts/mariadb/values.yaml create mode 100644 nextcloud/charts/postgresql/.helmignore create mode 100644 nextcloud/charts/postgresql/Chart.yaml create mode 100644 nextcloud/charts/postgresql/README.md create mode 100644 nextcloud/charts/postgresql/charts/common/.helmignore create mode 100644 nextcloud/charts/postgresql/charts/common/Chart.yaml create mode 100644 nextcloud/charts/postgresql/charts/common/README.md create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_affinities.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_errors.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_images.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_labels.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_names.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_secrets.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_storage.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_utils.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_validations.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/templates/_warnings.tpl create mode 100644 nextcloud/charts/postgresql/charts/common/values.yaml create mode 100644 nextcloud/charts/postgresql/ci/commonAnnotations.yaml create mode 100644 nextcloud/charts/postgresql/ci/default-values.yaml create mode 100644 nextcloud/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100644 nextcloud/charts/postgresql/files/README.md create mode 100644 nextcloud/charts/postgresql/files/conf.d/README.md create mode 100644 nextcloud/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100644 nextcloud/charts/postgresql/requirements.lock create mode 100644 nextcloud/charts/postgresql/requirements.yaml create mode 100644 nextcloud/charts/postgresql/templates/NOTES.txt create mode 100644 nextcloud/charts/postgresql/templates/_helpers.tpl create mode 100644 nextcloud/charts/postgresql/templates/configmap.yaml create mode 100644 nextcloud/charts/postgresql/templates/extended-config-configmap.yaml create mode 100644 nextcloud/charts/postgresql/templates/extra-list.yaml create mode 100644 nextcloud/charts/postgresql/templates/initialization-configmap.yaml create mode 100644 nextcloud/charts/postgresql/templates/metrics-configmap.yaml create mode 100644 nextcloud/charts/postgresql/templates/metrics-svc.yaml create mode 100644 nextcloud/charts/postgresql/templates/networkpolicy.yaml create mode 100644 nextcloud/charts/postgresql/templates/podsecuritypolicy.yaml create mode 100644 nextcloud/charts/postgresql/templates/prometheusrule.yaml create mode 100644 nextcloud/charts/postgresql/templates/role.yaml create mode 100644 nextcloud/charts/postgresql/templates/rolebinding.yaml create mode 100644 nextcloud/charts/postgresql/templates/secrets.yaml create mode 100644 nextcloud/charts/postgresql/templates/serviceaccount.yaml create mode 100644 nextcloud/charts/postgresql/templates/servicemonitor.yaml create mode 100644 nextcloud/charts/postgresql/templates/statefulset-slaves.yaml create mode 100644 nextcloud/charts/postgresql/templates/statefulset.yaml create mode 100644 nextcloud/charts/postgresql/templates/svc-headless.yaml create mode 100644 nextcloud/charts/postgresql/templates/svc-read.yaml create mode 100644 nextcloud/charts/postgresql/templates/svc.yaml create mode 100644 nextcloud/charts/postgresql/values-production.yaml create mode 100644 nextcloud/charts/postgresql/values.schema.json create mode 100644 nextcloud/charts/postgresql/values.yaml create mode 100644 nextcloud/charts/redis/.helmignore create mode 100644 nextcloud/charts/redis/Chart.yaml create mode 100644 nextcloud/charts/redis/README.md create mode 100644 nextcloud/charts/redis/ci/extra-flags-values.yaml create mode 100644 nextcloud/charts/redis/ci/production-sentinel-values.yaml create mode 100644 nextcloud/charts/redis/img/redis-cluster-topology.png create mode 100644 nextcloud/charts/redis/img/redis-topology.png create mode 100644 nextcloud/charts/redis/templates/NOTES.txt create mode 100644 nextcloud/charts/redis/templates/_helpers.tpl create mode 100644 nextcloud/charts/redis/templates/configmap-scripts.yaml create mode 100644 nextcloud/charts/redis/templates/configmap.yaml create mode 100644 nextcloud/charts/redis/templates/headless-svc.yaml create mode 100644 nextcloud/charts/redis/templates/health-configmap.yaml create mode 100644 nextcloud/charts/redis/templates/metrics-prometheus.yaml create mode 100644 nextcloud/charts/redis/templates/metrics-svc.yaml create mode 100644 nextcloud/charts/redis/templates/networkpolicy.yaml create mode 100644 nextcloud/charts/redis/templates/pdb.yaml create mode 100644 nextcloud/charts/redis/templates/prometheusrule.yaml create mode 100644 nextcloud/charts/redis/templates/psp.yaml create mode 100644 nextcloud/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 nextcloud/charts/redis/templates/redis-master-svc.yaml create mode 100644 nextcloud/charts/redis/templates/redis-node-statefulset.yaml create mode 100644 nextcloud/charts/redis/templates/redis-role.yaml create mode 100644 nextcloud/charts/redis/templates/redis-rolebinding.yaml create mode 100644 nextcloud/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 nextcloud/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 nextcloud/charts/redis/templates/redis-slave-svc.yaml create mode 100644 nextcloud/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 nextcloud/charts/redis/templates/secret.yaml create mode 100644 nextcloud/charts/redis/values-production.yaml create mode 100644 nextcloud/charts/redis/values.schema.json create mode 100644 nextcloud/charts/redis/values.yaml create mode 100644 nextcloud/templates/NOTES.txt create mode 100644 nextcloud/templates/_helpers.tpl create mode 100644 nextcloud/templates/config.yaml create mode 100644 nextcloud/templates/cronjob.yaml create mode 100644 nextcloud/templates/db-secret.yaml create mode 100644 nextcloud/templates/deployment.yaml create mode 100644 nextcloud/templates/hpa.yaml create mode 100644 nextcloud/templates/ingress.yaml create mode 100644 nextcloud/templates/metrics-deployment.yaml create mode 100644 nextcloud/templates/metrics-service.yaml create mode 100644 nextcloud/templates/nextcloud-pvc.yaml create mode 100644 nextcloud/templates/nginx-config.yaml create mode 100644 nextcloud/templates/php-config.yaml create mode 100644 nextcloud/templates/secrets.yaml create mode 100644 nextcloud/templates/service.yaml create mode 100644 nextcloud/values.yaml create mode 100644 nfs-client-provisioner/.helmignore create mode 100644 nfs-client-provisioner/Chart.yaml create mode 100644 nfs-client-provisioner/README.md create mode 100644 nfs-client-provisioner/ci/test-values.yaml create mode 100644 nfs-client-provisioner/templates/_helpers.tpl create mode 100644 nfs-client-provisioner/templates/clusterrole.yaml create mode 100644 nfs-client-provisioner/templates/clusterrolebinding.yaml create mode 100644 nfs-client-provisioner/templates/deployment.yaml create mode 100644 nfs-client-provisioner/templates/persistentvolume.yaml create mode 100644 nfs-client-provisioner/templates/persistentvolumeclaim.yaml create mode 100644 nfs-client-provisioner/templates/podsecuritypolicy.yaml create mode 100644 nfs-client-provisioner/templates/role.yaml create mode 100644 nfs-client-provisioner/templates/rolebinding.yaml create mode 100644 nfs-client-provisioner/templates/serviceaccount.yaml create mode 100644 nfs-client-provisioner/templates/storageclass.yaml create mode 100644 nfs-client-provisioner/values.yaml create mode 100644 opendkim/.helmignore create mode 100644 opendkim/Chart.yaml create mode 100644 opendkim/templates/NOTES.txt create mode 100644 opendkim/templates/_helpers.tpl create mode 100644 opendkim/templates/configmap.yaml create mode 100644 opendkim/templates/deployment.yaml create mode 100644 opendkim/templates/persistent-volume-claim.yaml create mode 100644 opendkim/templates/service.yaml create mode 100644 opendkim/templates/serviceaccount.yaml create mode 100644 opendkim/values.yaml create mode 100644 opendmarc/.helmignore create mode 100644 opendmarc/Chart.yaml create mode 100644 opendmarc/templates/NOTES.txt create mode 100644 opendmarc/templates/_helpers.tpl create mode 100644 opendmarc/templates/configmap.yaml create mode 100644 opendmarc/templates/deployment.yaml create mode 100644 opendmarc/templates/persistent-volume-claim.yaml create mode 100644 opendmarc/templates/service.yaml create mode 100644 opendmarc/templates/serviceaccount.yaml create mode 100644 opendmarc/values.yaml create mode 100644 openfaas/Chart.yaml create mode 100644 openfaas/OWNERS create mode 100644 openfaas/README.md create mode 100644 openfaas/templates/NOTES.txt create mode 100644 openfaas/templates/_helpers.tpl create mode 100644 openfaas/templates/alertmanager-cfg.yaml create mode 100644 openfaas/templates/alertmanager-dep.yaml create mode 100644 openfaas/templates/alertmanager-svc.yaml create mode 100644 openfaas/templates/basic-auth-plugin-dep.yaml create mode 100644 openfaas/templates/basic-auth-plugin-svc.yaml create mode 100644 openfaas/templates/controller-rbac.yaml create mode 100644 openfaas/templates/faas-idler-dep.yaml create mode 100644 openfaas/templates/function-crd.yaml create mode 100644 openfaas/templates/gateway-dep.yaml create mode 100644 openfaas/templates/gateway-external-svc.yaml create mode 100644 openfaas/templates/gateway-svc.yaml create mode 100644 openfaas/templates/ingress-operator-crd.yaml create mode 100644 openfaas/templates/ingress-operator-dep.yaml create mode 100644 openfaas/templates/ingress-operator-rbac.yaml create mode 100644 openfaas/templates/ingress.yaml create mode 100644 openfaas/templates/istio-mtls.yaml create mode 100644 openfaas/templates/nats-dep.yaml create mode 100644 openfaas/templates/nats-svc.yaml create mode 100644 openfaas/templates/oauth2-plugin-dep.yaml create mode 100644 openfaas/templates/oauth2-plugin-svc.yaml create mode 100644 openfaas/templates/operator-rbac.yaml create mode 100644 openfaas/templates/profile-crd.yaml create mode 100644 openfaas/templates/prometheus-cfg.yaml create mode 100644 openfaas/templates/prometheus-dep.yaml create mode 100644 openfaas/templates/prometheus-rbac.yaml create mode 100644 openfaas/templates/prometheus-svc.yaml create mode 100644 openfaas/templates/psp.yaml create mode 100644 openfaas/templates/queueworker-dep.yaml create mode 100644 openfaas/templates/secret.yaml create mode 100644 openfaas/values-arm64.yaml create mode 100644 openfaas/values-armhf.yaml create mode 100644 openfaas/values.yaml create mode 100644 openldap/.helmignore create mode 100644 openldap/Chart.yaml create mode 100644 openldap/README.md create mode 100644 openldap/templates/NOTES.txt create mode 100644 openldap/templates/_helpers.tpl create mode 100644 openldap/templates/configmap-customldif.yaml create mode 100644 openldap/templates/configmap-env.yaml create mode 100644 openldap/templates/deployment.yaml create mode 100644 openldap/templates/pvc.yaml create mode 100644 openldap/templates/secret.yaml create mode 100644 openldap/templates/service.yaml create mode 100644 openldap/templates/tests/openldap-test-runner.yaml create mode 100644 openldap/templates/tests/openldap-tests.yaml create mode 100644 openldap/values.yaml create mode 100644 peertube/.helmignore create mode 100644 peertube/Chart.yaml create mode 100644 peertube/charts/postgresql-10.2.1.tgz create mode 100644 peertube/charts/redis-12.3.2.tgz create mode 100644 peertube/templates/NOTES.txt create mode 100644 peertube/templates/_helpers.tpl create mode 100644 peertube/templates/configmap.yaml create mode 100644 peertube/templates/deployment.yaml create mode 100644 peertube/templates/hpa.yaml create mode 100644 peertube/templates/ingress.yaml create mode 100644 peertube/templates/persistentvolumeclaim.yaml create mode 100644 peertube/templates/service.yaml create mode 100644 peertube/templates/serviceaccount.yaml create mode 100644 peertube/templates/tests/test-connection.yaml create mode 100644 peertube/values.yaml create mode 100644 playmaker/.helmignore create mode 100644 playmaker/Chart.yaml create mode 100644 playmaker/templates/NOTES.txt create mode 100644 playmaker/templates/_helpers.tpl create mode 100644 playmaker/templates/deployment.yaml create mode 100644 playmaker/templates/hpa.yaml create mode 100644 playmaker/templates/ingress.yaml create mode 100644 playmaker/templates/persistentvolumeclaim.yaml create mode 100644 playmaker/templates/secret.yaml create mode 100644 playmaker/templates/service.yaml create mode 100644 playmaker/templates/serviceaccount.yaml create mode 100644 playmaker/templates/tests/test-connection.yaml create mode 100644 playmaker/values.yaml create mode 100644 postfix/.helmignore create mode 100644 postfix/Chart.yaml create mode 100644 postfix/templates/NOTES.txt create mode 100644 postfix/templates/_helpers.tpl create mode 100644 postfix/templates/configmap.yaml create mode 100644 postfix/templates/deployment.yaml create mode 100644 postfix/templates/persistent-volume-claim.yaml create mode 100644 postfix/templates/service.yaml create mode 100644 postfix/templates/serviceaccount.yaml create mode 100644 postfix/values.yaml create mode 100644 postgres-operator-ui/.helmignore create mode 100644 postgres-operator-ui/Chart.yaml create mode 100644 postgres-operator-ui/index.yaml create mode 100644 postgres-operator-ui/templates/NOTES.txt create mode 100644 postgres-operator-ui/templates/_helpers.tpl create mode 100644 postgres-operator-ui/templates/clusterrole.yaml create mode 100644 postgres-operator-ui/templates/clusterrolebinding.yaml create mode 100644 postgres-operator-ui/templates/deployment.yaml create mode 100644 postgres-operator-ui/templates/ingress.yaml create mode 100644 postgres-operator-ui/templates/service.yaml create mode 100644 postgres-operator-ui/templates/serviceaccount.yaml create mode 100644 postgres-operator-ui/values.yaml create mode 100644 postgres-operator/.helmignore create mode 100644 postgres-operator/Chart.yaml create mode 100644 postgres-operator/crds/operatorconfigurations.yaml create mode 100644 postgres-operator/crds/postgresqls.yaml create mode 100644 postgres-operator/crds/postgresteams.yaml create mode 100644 postgres-operator/index.yaml create mode 100644 postgres-operator/templates/NOTES.txt create mode 100644 postgres-operator/templates/_helpers.tpl create mode 100644 postgres-operator/templates/clusterrole-postgres-pod.yaml create mode 100644 postgres-operator/templates/clusterrole.yaml create mode 100644 postgres-operator/templates/clusterrolebinding.yaml create mode 100644 postgres-operator/templates/configmap.yaml create mode 100644 postgres-operator/templates/crds.yaml create mode 100644 postgres-operator/templates/deployment.yaml create mode 100644 postgres-operator/templates/operatorconfiguration.yaml create mode 100644 postgres-operator/templates/postgres-pod-priority-class.yaml create mode 100644 postgres-operator/templates/service.yaml create mode 100644 postgres-operator/templates/serviceaccount.yaml create mode 100644 postgres-operator/values-crd.yaml create mode 100644 postgres-operator/values.yaml create mode 100644 pypiserver/.helmignore create mode 100644 pypiserver/CHANGELOG.md create mode 100644 pypiserver/Chart.yaml create mode 100644 pypiserver/README.md create mode 100644 pypiserver/templates/NOTES.txt create mode 100644 pypiserver/templates/_helpers.tpl create mode 100644 pypiserver/templates/deployment.yaml create mode 100644 pypiserver/templates/ingress.yaml create mode 100644 pypiserver/templates/pvc.yaml create mode 100644 pypiserver/templates/secret.yaml create mode 100644 pypiserver/templates/service.yaml create mode 100644 pypiserver/values.yaml create mode 100644 roundcube/.helmignore create mode 100644 roundcube/Chart.yaml create mode 100644 roundcube/LICENSE create mode 100644 roundcube/README.md create mode 100644 roundcube/templates/NOTES.txt create mode 100644 roundcube/templates/_helpers.tpl create mode 100644 roundcube/templates/config.yaml create mode 100644 roundcube/templates/deployment.yaml create mode 100644 roundcube/templates/ingress.yaml create mode 100644 roundcube/templates/persistentvolumeclaim.yaml create mode 100644 roundcube/templates/service.yaml create mode 100644 roundcube/values.yaml create mode 100644 rspamd/.helmignore create mode 100644 rspamd/Chart.yaml create mode 100644 rspamd/templates/NOTES.txt create mode 100644 rspamd/templates/_helpers.tpl create mode 100644 rspamd/templates/configmap.yaml create mode 100644 rspamd/templates/deployment.yaml create mode 100644 rspamd/templates/persistent-volume-claim.yaml create mode 100644 rspamd/templates/service.yaml create mode 100644 rspamd/templates/serviceaccount.yaml create mode 100644 rspamd/values.yaml create mode 100644 wikijs/.helmignore create mode 100644 wikijs/Chart.lock create mode 100644 wikijs/Chart.yaml create mode 100644 wikijs/README.md create mode 100644 wikijs/charts/postgresql-6.5.0.tgz create mode 100644 wikijs/templates/NOTES.txt create mode 100644 wikijs/templates/_helpers.tpl create mode 100644 wikijs/templates/deployment.yaml create mode 100644 wikijs/templates/ingress.yaml create mode 100644 wikijs/templates/service.yaml create mode 100644 wikijs/templates/serviceaccount.yaml create mode 100644 wikijs/templates/tests/test-connection.yaml create mode 100644 wikijs/values.yaml diff --git a/README.md b/README.md new file mode 100644 index 0000000..c6245d2 --- /dev/null +++ b/README.md @@ -0,0 +1,7 @@ +## Helm charts repository +Used for Geek Home Platform deployment + +## Usage + + helm repo add ghp https://charts.geekhome.org + diff --git a/adguard-home/.helmignore b/adguard-home/.helmignore new file mode 100644 index 0000000..e559de0 --- /dev/null +++ b/adguard-home/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +# OWNERS file for Kubernetes +OWNERS diff --git a/adguard-home/Chart.yaml b/adguard-home/Chart.yaml new file mode 100644 index 0000000..fc527fb --- /dev/null +++ b/adguard-home/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +appVersion: v0.102.0 +description: DNS proxy as ad-blocker for local network +home: https://github.com/k8s-at-home/charts/tree/master/charts/adguard-home +icon: https://avatars3.githubusercontent.com/u/8361145?s=200&v=4?sanitize=true +keywords: +- adguard-home +- adguard +- dns +maintainers: +- name: billimek +name: adguard-home +sources: +- https://github.com/AdguardTeam/AdGuardHome +version: 2.2.1 diff --git a/adguard-home/README.md b/adguard-home/README.md new file mode 100644 index 0000000..d3c9dc8 --- /dev/null +++ b/adguard-home/README.md @@ -0,0 +1,64 @@ +# DNS proxy as ad-blocker for local network + +This is an opinionated helm chart for [adguard-home](https://github.com/AdguardTeam/AdGuardHome) + +The default values and container images used in this chart will allow for running in a multi-arch cluster (amd64, arm, arm64) + +## TL;DR + +```shell +helm repo add k8s-at-home https://k8s-at-home.com/charts/ +helm install k8s-at-home/adguard-home +``` + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name adguard-home k8s-at-home/adguard-home +``` + +## Uninstalling the Chart + +To uninstall/delete the `adguard-home` deployment: + +```console +helm delete adguard-home --purge +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/adguard-home/values.yaml) file. It has several commented out suggested values. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install --name adguard-home \ + --set timeZone="America/New York" \ + k8s-at-home/adguard-home +``` + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +helm install --name adguard-home -f values.yaml k8s-at-home/adguard-home +``` + +#### Helm force upgrade + +```sh +helm upgrade --force +``` + +#### Delete the existing `adguard-home` services prior to upgrading + +```sh +kubectl delete svc/adguard-home +``` + +#### Remove the existing adguard-home chart first + +This is the 'easiest' approach, but will incur downtime which can be problematic if you rely on adguard-home for DNS diff --git a/adguard-home/templates/NOTES.txt b/adguard-home/templates/NOTES.txt new file mode 100644 index 0000000..3fe1043 --- /dev/null +++ b/adguard-home/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.serviceUDP.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "adguard-home.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.serviceUDP.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "adguard-home.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "adguard-home.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.serviceUDP.port }} +{{- else if contains "ClusterIP" .Values.serviceUDP.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "adguard-home.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:3000 +{{- end }} diff --git a/adguard-home/templates/_helpers.tpl b/adguard-home/templates/_helpers.tpl new file mode 100644 index 0000000..a6d7f87 --- /dev/null +++ b/adguard-home/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "adguard-home.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "adguard-home.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "adguard-home.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/adguard-home/templates/config-pvc.yaml b/adguard-home/templates/config-pvc.yaml new file mode 100644 index 0000000..e517b69 --- /dev/null +++ b/adguard-home/templates/config-pvc.yaml @@ -0,0 +1,29 @@ + +{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "adguard-home.fullname" . }}-config + {{- if .Values.persistence.config.skipuninstall }} + annotations: + "helm.sh/resource-policy": keep + {{- end }} + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.persistence.config.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.config.size | quote }} +{{- if .Values.persistence.config.storageClass }} +{{- if (eq "-" .Values.persistence.config.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.config.storageClass }}" +{{- end }} +{{- end }} +{{- end -}} diff --git a/adguard-home/templates/configmap.yaml b/adguard-home/templates/configmap.yaml new file mode 100644 index 0000000..87b97f7 --- /dev/null +++ b/adguard-home/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configAsCode.enabled }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "adguard-home.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + AdGuardHome.yaml: | + {{- toYaml .Values.configAsCode.config | nindent 4 }} +{{- end }} diff --git a/adguard-home/templates/deployment.yaml b/adguard-home/templates/deployment.yaml new file mode 100644 index 0000000..5e284b9 --- /dev/null +++ b/adguard-home/templates/deployment.yaml @@ -0,0 +1,201 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "adguard-home.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: 1 + strategy: + type: {{ .Values.strategyType }} + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + annotations: + {{- with .Values.podAnnotations }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.configAsCode.enabled }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + spec: + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- if or .Values.configAsCode.enabled .Values.securityContext.runAsUser }} + initContainers: + {{- if .Values.configAsCode.enabled }} + - name: "config" + securityContext: + readOnlyRootFilesystem: true + image: "{{ .Values.configAsCode.image.repository }}:{{ .Values.configAsCode.image.tag }}" + imagePullPolicy: {{ .Values.configAsCode.image.pullPolicy }} + command: ["sh", "-c", "cat /configmap/AdGuardHome.yaml > /opt/adguardhome/conf/AdGuardHome.yaml"] + resources: {{- toYaml .Values.configAsCode.resources | nindent 12 }} + volumeMounts: + - name: configmap + mountPath: /configmap + - name: config + mountPath: /opt/adguardhome/conf + readOnly: false + {{- end }} + {{- if .Values.securityContext.runAsUser }} + - name: "volume-permissions" + securityContext: + readOnlyRootFilesystem: true + image: "{{ .Values.volumePermissions.image.repository }}:{{ .Values.volumePermissions.image.tag }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:", "/opt/adguardhome/work", "/opt/adguardhome/conf"] + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: work + mountPath: /opt/adguardhome/work + readOnly: false + - name: config + mountPath: /opt/adguardhome/conf + readOnly: false + {{- end }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + {{- if .Values.timezone }} + - name: TZ + value: {{ .Values.timezone | quote }} + {{- end }} + volumeMounts: + - name: work + mountPath: /opt/adguardhome/work + readOnly: false + - name: config + mountPath: /opt/adguardhome/conf + readOnly: false + {{- if .Values.tlsSecretName }} + - name: certs + mountPath: /certs + readOnly: false + {{- end }} + ports: + - name: http + {{- if .Values.configAsCode.enabled }} + containerPort: {{ .Values.configAsCode.config.bind_port | default 3000 }} + {{- else }} + containerPort: 3000 + {{- end }} + - name: dns + {{- if .Values.configAsCode.enabled }} + containerPort: {{ .Values.configAsCode.config.dns.port | default 53 }} + {{- else }} + containerPort: 53 + {{- end }} + protocol: TCP + - name: dns-udp + {{- if .Values.configAsCode.enabled }} + containerPort: {{ .Values.configAsCode.config.dns.port | default 53 }} + {{- else }} + containerPort: 53 + {{- end }} + protocol: UDP + {{- if .Values.serviceDHCP.enabled }} + - name: dhcp-server-udp + containerPort: 67 + protocol: UDP + - name: dhcp-client-tcp + containerPort: 68 + protocol: TCP + - name: dhcp-client-udp + containerPort: 68 + protocol: UDP + {{- end }} + {{- if .Values.serviceDNSOverTLS.enabled }} + - name: dns-over-tls + containerPort: 853 + protocol: TCP + {{- end }} + {{- if .Values.serviceDNSOverHTTPS.enabled }} + - name: dns-over-https + containerPort: 443 + protocol: TCP + {{- end }} + {{- if .Values.probes.liveness.enabled }} + livenessProbe: + httpGet: + path: /login.html + port: http + scheme: HTTP + initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }} + failureThreshold: {{ .Values.probes.liveness.failureThreshold }} + periodSeconds: {{ .Values.probes.liveness.periodSeconds }} + {{- end }} + {{- if .Values.probes.readiness.enabled }} + readinessProbe: + httpGet: + path: /login.html + port: http + scheme: HTTP + initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }} + failureThreshold: {{ .Values.probes.readiness.failureThreshold }} + periodSeconds: {{ .Values.probes.readiness.periodSeconds }} + {{- end }} + {{- if .Values.probes.startup.enabled }} + startupProbe: + httpGet: + path: /login.html + port: http + scheme: HTTP + initialDelaySeconds: {{ .Values.probes.startup.initialDelaySeconds }} + failureThreshold: {{ .Values.probes.startup.failureThreshold }} + periodSeconds: {{ .Values.probes.startup.periodSeconds }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + {{- if .Values.tlsSecretName }} + - name: certs + secret: + secretName: {{ .Values.tlsSecretName }} + {{- end }} + {{- if .Values.configAsCode.enabled }} + - name: configmap + configMap: + name: {{ include "adguard-home.fullname" . }} + {{- end }} + - name: config + {{- if .Values.persistence.config.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "adguard-home.fullname" . }}-config{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: work + {{- if .Values.persistence.work.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.work.existingClaim }}{{ .Values.persistence.work.existingClaim }}{{- else }}{{ template "adguard-home.fullname" . }}-work{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/adguard-home/templates/ingress.yaml b/adguard-home/templates/ingress.yaml new file mode 100644 index 0000000..67a5519 --- /dev/null +++ b/adguard-home/templates/ingress.yaml @@ -0,0 +1,38 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "adguard-home.fullname" . -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: http + {{- end }} +{{- end }} diff --git a/adguard-home/templates/service-dhcp.yaml b/adguard-home/templates/service-dhcp.yaml new file mode 100644 index 0000000..8479cba --- /dev/null +++ b/adguard-home/templates/service-dhcp.yaml @@ -0,0 +1,45 @@ +{{- if .Values.serviceDHCP.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "adguard-home.fullname" . }}-dhcp + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.serviceDHCP.annotations }} + annotations: +{{ toYaml .Values.serviceDHCP.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.serviceDHCP.type }} + {{- if .Values.serviceDHCP.loadBalancerIP }} + loadBalancerIP: {{ .Values.serviceDHCP.loadBalancerIP }} + {{- end }} + {{- if .Values.serviceDHCP.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ toYaml .Values.serviceDHCP.loadBalancerSourceRanges | indent 4 }} + {{- end -}} + {{- if .Values.serviceDHCP.externalIPs }} + externalIPs: + {{ toYaml .Values.serviceDHCP.externalIPs | indent 4 }} + {{- end }} + externalTrafficPolicy: {{ .Values.serviceDHCP.externalTrafficPolicy }} + ports: + - port: 67 + targetPort: dhcp-server-udp + protocol: UDP + name: dhcp-server-udp + - port: 68 + targetPort: dhcp-client-tcp + protocol: TCP + name: dhcp-client-tcp + - port: 68 + targetPort: dhcp-client-udp + protocol: UDP + name: dhcp-client-udp + selector: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/adguard-home/templates/service-dns-over-https.yaml b/adguard-home/templates/service-dns-over-https.yaml new file mode 100644 index 0000000..20be0ba --- /dev/null +++ b/adguard-home/templates/service-dns-over-https.yaml @@ -0,0 +1,37 @@ +{{- if .Values.serviceDNSOverHTTPS.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "adguard-home.fullname" . }}-dns-over-https + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.serviceDNSOverHTTPS.annotations }} + annotations: +{{ toYaml .Values.serviceDNSOverHTTPS.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.serviceDNSOverHTTPS.type }} + {{- if .Values.serviceDNSOverHTTPS.loadBalancerIP }} + loadBalancerIP: {{ .Values.serviceDNSOverHTTPS.loadBalancerIP }} + {{- end }} + {{- if .Values.serviceDNSOverHTTPS.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ toYaml .Values.serviceDNSOverHTTPS.loadBalancerSourceRanges | indent 4 }} + {{- end -}} + {{- if .Values.serviceDNSOverHTTPS.externalIPs }} + externalIPs: + {{ toYaml .Values.serviceDNSOverHTTPS.externalIPs | indent 4 }} + {{- end }} + externalTrafficPolicy: {{ .Values.serviceDNSOverHTTPS.externalTrafficPolicy }} + ports: + - port: 443 + targetPort: dns-over-https + protocol: TCP + name: dns-over-https + selector: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/adguard-home/templates/service-dns-over-tls.yaml b/adguard-home/templates/service-dns-over-tls.yaml new file mode 100644 index 0000000..dafd63b --- /dev/null +++ b/adguard-home/templates/service-dns-over-tls.yaml @@ -0,0 +1,37 @@ +{{- if .Values.serviceDNSOverTLS.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "adguard-home.fullname" . }}-dns-over-tls + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.serviceDNSOverTLS.annotations }} + annotations: +{{ toYaml .Values.serviceDNSOverTLS.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.serviceDNSOverTLS.type }} + {{- if .Values.serviceDNSOverTLS.loadBalancerIP }} + loadBalancerIP: {{ .Values.serviceDNSOverTLS.loadBalancerIP }} + {{- end }} + {{- if .Values.serviceDNSOverTLS.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ toYaml .Values.serviceDNSOverTLS.loadBalancerSourceRanges | indent 4 }} + {{- end -}} + {{- if .Values.serviceDNSOverTLS.externalIPs }} + externalIPs: + {{ toYaml .Values.serviceDNSOverTLS.externalIPs | indent 4 }} + {{- end }} + externalTrafficPolicy: {{ .Values.serviceDNSOverTLS.externalTrafficPolicy }} + ports: + - port: 853 + targetPort: dns-over-tls + protocol: TCP + name: dns-over-tls + selector: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/adguard-home/templates/service-tcp.yaml b/adguard-home/templates/service-tcp.yaml new file mode 100644 index 0000000..6241405 --- /dev/null +++ b/adguard-home/templates/service-tcp.yaml @@ -0,0 +1,37 @@ +{{- if .Values.serviceTCP.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "adguard-home.fullname" . }}-tcp + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.serviceTCP.annotations }} + annotations: +{{ toYaml .Values.serviceTCP.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.serviceTCP.type }} + {{- if .Values.serviceTCP.loadBalancerIP }} + loadBalancerIP: {{ .Values.serviceTCP.loadBalancerIP }} + {{- end }} + {{- if .Values.serviceTCP.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ toYaml .Values.serviceTCP.loadBalancerSourceRanges | indent 4 }} + {{- end -}} + {{- if .Values.serviceTCP.externalIPs }} + externalIPs: + {{ toYaml .Values.serviceTCP.externalIPs | indent 4 }} + {{- end }} + externalTrafficPolicy: {{ .Values.serviceTCP.externalTrafficPolicy }} + ports: + - port: 53 + targetPort: dns + protocol: TCP + name: dns + selector: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/adguard-home/templates/service-udp.yaml b/adguard-home/templates/service-udp.yaml new file mode 100644 index 0000000..736acf3 --- /dev/null +++ b/adguard-home/templates/service-udp.yaml @@ -0,0 +1,37 @@ +{{- if .Values.serviceUDP.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "adguard-home.fullname" . }}-udp + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.serviceUDP.annotations }} + annotations: +{{ toYaml .Values.serviceUDP.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.serviceUDP.type }} + {{- if .Values.serviceUDP.loadBalancerIP }} + loadBalancerIP: {{ .Values.serviceUDP.loadBalancerIP }} + {{- end }} + {{- if .Values.serviceUDP.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ toYaml .Values.serviceUDP.loadBalancerSourceRanges | indent 4 }} + {{- end -}} + {{- if .Values.serviceUDP.externalIPs }} + externalIPs: + {{ toYaml .Values.serviceUDP.externalIPs | indent 4 }} + {{- end }} + externalTrafficPolicy: {{ .Values.serviceUDP.externalTrafficPolicy }} + ports: + - port: 53 + targetPort: dns-udp + protocol: UDP + name: dns-udp + selector: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/adguard-home/templates/service.yaml b/adguard-home/templates/service.yaml new file mode 100644 index 0000000..6b64150 --- /dev/null +++ b/adguard-home/templates/service.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "adguard-home.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} + {{- if .Values.service.externalIPs }} + externalIPs: + {{ toYaml .Values.service.externalIPs | indent 4 }} + {{- end }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + ports: + - port: 3000 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/adguard-home/templates/servicemonitor.yaml b/adguard-home/templates/servicemonitor.yaml new file mode 100644 index 0000000..c91fdc5 --- /dev/null +++ b/adguard-home/templates/servicemonitor.yaml @@ -0,0 +1,23 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "adguard-home.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + {{- with .Values.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + endpoints: + - port: http + interval: 30s + path: / +{{- end }} diff --git a/adguard-home/templates/work-pvc.yaml b/adguard-home/templates/work-pvc.yaml new file mode 100644 index 0000000..87b4b08 --- /dev/null +++ b/adguard-home/templates/work-pvc.yaml @@ -0,0 +1,29 @@ + +{{- if and .Values.persistence.work.enabled (not .Values.persistence.work.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "adguard-home.fullname" . }}-work + {{- if .Values.persistence.work.skipuninstall }} + annotations: + "helm.sh/resource-policy": keep + {{- end }} + labels: + app.kubernetes.io/name: {{ include "adguard-home.name" . }} + helm.sh/chart: {{ include "adguard-home.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.persistence.work.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.work.size | quote }} +{{- if .Values.persistence.work.storageClass }} +{{- if (eq "-" .Values.persistence.work.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.work.storageClass }}" +{{- end }} +{{- end }} +{{- end -}} diff --git a/adguard-home/values.yaml b/adguard-home/values.yaml new file mode 100644 index 0000000..afb1ab4 --- /dev/null +++ b/adguard-home/values.yaml @@ -0,0 +1,376 @@ +# upgrade strategy type (e.g. Recreate or RollingUpdate) +strategyType: Recreate + +configAsCode: + enabled: false + resources: {} + # requests: + # memory: 128Mi + # cpu: 100m + image: + repository: busybox + tag: latest + pullPolicy: Always + config: + bind_host: 0.0.0.0 + bind_port: 3000 + users: [] + # - name: admin + # password: $2y$05$mV4GSa5Dymk4Hjg3NCscBuCYSckCGfc2mbS57SNkBkBAfvqfOdFfm + http_proxy: "" + language: "en" + rlimit_nofile: 0 + debug_pprof: false + web_session_ttl: 720 + dns: + bind_host: 0.0.0.0 + port: 53 + statistics_interval: 1 + querylog_enabled: true + querylog_interval: 90 + querylog_size_memory: 1000 + anonymize_client_ip: false + protection_enabled: true + blocking_mode: default + blocking_ipv4: "" + blocking_ipv6: "" + blocked_response_ttl: 10 + parental_block_host: family-block.dns.adguard.com + safebrowsing_block_host: standard-block.dns.adguard.com + ratelimit: 0 + ratelimit_whitelist: [] + refuse_any: true + upstream_dns: + - https://dns10.quad9.net/dns-query + bootstrap_dns: + - 9.9.9.10 + - 149.112.112.10 + - 2620:fe::10 + - 2620:fe::fe:10 + all_servers: false + fastest_addr: false + allowed_clients: [] + # - 10.0.0.1 + # - 10.0.1.1/24 + disallowed_clients: [] + # - 10.0.1.1 + # - 10.0.11.1/24 + blocked_hosts: [] + # - example.org + # - '*.example.org' + # - '||example.org^' + cache_size: 4194304 + cache_ttl_min: 0 + cache_ttl_max: 0 + bogus_nxdomain: [] + aaaa_disabled: false + enable_dnssec: false + edns_client_subnet: false + filtering_enabled: true + filters_update_interval: 24 + parental_enabled: false + safesearch_enabled: false + safebrowsing_enabled: false + safebrowsing_cache_size: 1048576 + safesearch_cache_size: 1048576 + parental_cache_size: 1048576 + cache_time: 30 + rewrites: [] + # - domain: example.org + # answer: 127.0.0.1 + # - domain: '*.example.org' + # answer: 127.0.0.1 + blocked_services: [] + # - facebook + # - origin + # - twitter + # - snapchat + # - skype + # - whatsapp + # - instagram + # - youtube + # - netflix + # - twitch + # - discord + # - amazon + # - ebay + # - cloudflare + # - steam + # - epic_games + # - reddit + # - ok + # - vk + # - mail_ru + # - tiktok + tls: + enabled: false + server_name: "" + force_https: false + port_https: 443 + port_dns_over_tls: 853 + allow_unencrypted_doh: false + strict_sni_check: false + certificate_chain: "" + private_key: "" + certificate_path: "" + private_key_path: "" + filters: + - enabled: true + url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt + name: AdGuard DNS filter + id: 1 + - enabled: false + url: https://adaway.org/hosts.txt + name: AdAway + id: 2 + - enabled: false + url: https://www.malwaredomainlist.com/hostslist/hosts.txt + name: MalwareDomainList.com Hosts List + id: 4 + whitelist_filters: [] + # - enabled: true + # url: https://easylist-downloads.adblockplus.org/exceptionrules.txt + # name: Allow nonintrusive advertising + # id: 1595760241 + user_rules: [] + # - '||example.org^' + # - '@@||example.org^' + # - 127.0.0.1 example.org + # - '! Here goes a comment' + # - '# Also a comment' + dhcp: + enabled: false + interface_name: "" + gateway_ip: "" + subnet_mask: "" + range_start: "" + range_end: "" + lease_duration: 86400 + icmp_timeout_msec: 1000 + clients: [] + # - name: myuser + # tags: + # - user_admin + # ids: + # - 192.168.91.1 + # use_global_settings: true + # filtering_enabled: false + # parental_enabled: false + # safesearch_enabled: false + # safebrowsing_enabled: false + # use_global_blocked_services: true + # blocked_services: [] + # upstreams: [] + log_file: "" + verbose: false + schema_version: 6 + +tlsSecretName: "" +# name of the secret that contains the tls cert and key. +# this secret will be mounted inside the adguard container /certs path. e.g. works with cert-manager + +image: + repository: adguard/adguardhome + # Image tag is set via charts appVersion. If you want to override the tag, specify it here + # tag: vX.Y.Z + pullPolicy: IfNotPresent + +nameOverride: "" +fullnameOverride: "" + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # add: + # - NET_BIND_SERVICE + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +timezone: "UTC" + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + +# Probes configuration +probes: + liveness: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 5 + periodSeconds: 10 + readiness: + enabled: false + initialDelaySeconds: 5 + failureThreshold: 5 + periodSeconds: 10 + startup: + enabled: false + initialDelaySeconds: 5 + failureThreshold: 30 + periodSeconds: 10 + +service: + type: ClusterIP + # externalTrafficPolicy: Local + # externalIPs: [] + # loadBalancerIP: "" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: adguard-home-svc + +serviceTCP: + enabled: false + type: NodePort + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: adguard-home-svc + +serviceUDP: + enabled: true + type: NodePort + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: adguard-home-svc + +serviceDNSOverTLS: + enabled: false + ## Enable if you use AdGuard as a DNS over TLS/HTTPS server + type: NodePort + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: adguard-home-svc + +serviceDNSOverHTTPS: + enabled: false + ## Enable if you use AdGuard as a DNS over TLS/HTTPS server + type: NodePort + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: adguard-home-svc + +serviceDHCP: + enabled: false + ## Enable if you use AdGuard as a DHCP Server + type: NodePort + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "" + # a fixed LoadBalancer IP + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: adguard-home-svc + # external-dns.alpha.kubernetes.io/hostname: dns.example.com + +serviceMonitor: + enabled: false + additionalLabels: {} + +## Pod Annotations +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "api" + +persistence: + config: + enabled: true + ## adguard-home configuration data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## + ## If you want to reuse an existing claim, you can pass the name of the PVC using + ## the existingClaim variable + # existingClaim: your-claim + # subPath: some-subpath + accessMode: ReadWriteOnce + size: 20Mi + ## Do not delete the pvc upon helm uninstall + skipuninstall: false + work: + enabled: true + ## adguard-home work volume configuration + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## + ## If you want to reuse an existing claim, you can pass the name of the PVC using + ## the existingClaim variable + # existingClaim: your-claim + # subPath: some-subpath + accessMode: ReadWriteOnce + size: 10Gi + ## Do not delete the pvc upon helm uninstall + skipuninstall: false + +volumePermissions: + image: + repository: busybox + tag: latest + pullPolicy: Always + resources: {} + # requests: + # memory: 128Mi + # cpu: 100m + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # memory: 500Mi + # requests: + # cpu: 50m + # memory: 275Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/bitwarden/.helmignore b/bitwarden/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/bitwarden/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/bitwarden/Chart.yaml b/bitwarden/Chart.yaml new file mode 100644 index 0000000..1fe869e --- /dev/null +++ b/bitwarden/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Bitwarden Helm chart for Kubernetes +name: bitwarden +version: 0.1.6 +home: https://github.com/dani-garcia/bitwarden_rs +icon: https://raw.githubusercontent.com/bitwarden/brand/master/icons/icon.svg +sources: + - https://github.com/dani-garcia/bitwarden_rs + - https://github.com/cdwv/bitwarden-k8s +maintainers: + - name: CodeWave + email: hello@codewave.eu + url: https://codewave.eu diff --git a/bitwarden/templates/NOTES.txt b/bitwarden/templates/NOTES.txt new file mode 100644 index 0000000..08cafda --- /dev/null +++ b/bitwarden/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "bitwarden.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "bitwarden.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "bitwarden.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "bitwarden.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/bitwarden/templates/_helpers.tpl b/bitwarden/templates/_helpers.tpl new file mode 100644 index 0000000..28224a3 --- /dev/null +++ b/bitwarden/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "bitwarden.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "bitwarden.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "bitwarden.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/bitwarden/templates/deployment.yaml b/bitwarden/templates/deployment.yaml new file mode 100644 index 0000000..585590c --- /dev/null +++ b/bitwarden/templates/deployment.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "bitwarden.fullname" . }} + {{- if .Values.deploymentAnnotations }} + annotations: + {{- range $key, $value := .Values.deploymentAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "bitwarden.name" . }} + helm.sh/chart: {{ include "bitwarden.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "bitwarden.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "bitwarden.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: +{{ toYaml .Values.resources | indent 12 }} + env: + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + volumeMounts: + - name: data + mountPath: /data + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + - name: {{ .Values.image.pullSecrets }} + {{- end }} + volumes: + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "bitwarden.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/bitwarden/templates/ingress.yaml b/bitwarden/templates/ingress.yaml new file mode 100644 index 0000000..d19f9ab --- /dev/null +++ b/bitwarden/templates/ingress.yaml @@ -0,0 +1,38 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "bitwarden.fullname" . -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app.kubernetes.io/name: {{ include "bitwarden.name" . }} + helm.sh/chart: {{ include "bitwarden.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: http + {{- end }} +{{- end }} diff --git a/bitwarden/templates/persistent-volume-claim.yaml b/bitwarden/templates/persistent-volume-claim.yaml new file mode 100644 index 0000000..582276d --- /dev/null +++ b/bitwarden/templates/persistent-volume-claim.yaml @@ -0,0 +1,24 @@ +{{- if .Values.persistence.enabled }} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "bitwarden.fullname" . }} + labels: + app: {{ template "bitwarden.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} +{{- if .Values.persistence.storageClass }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- else }} + storageClassName: "" +{{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end -}} +{{- end }} diff --git a/bitwarden/templates/service.yaml b/bitwarden/templates/service.yaml new file mode 100644 index 0000000..29e228b --- /dev/null +++ b/bitwarden/templates/service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "bitwarden.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "bitwarden.name" . }} + helm.sh/chart: {{ include "bitwarden.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + type: {{ .Values.service.type }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} +{{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "bitwarden.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/bitwarden/values.yaml b/bitwarden/values.yaml new file mode 100644 index 0000000..0c22735 --- /dev/null +++ b/bitwarden/values.yaml @@ -0,0 +1,86 @@ +# Default values for bitwarden. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: bitwardenrs/server + tag: 1.18.0 + pullPolicy: IfNotPresent + # imagePullSecrets for accessing private registries + # pullSecrets: regcred + +env: + SIGNUPS_ALLOWED: true + INVITATIONS_ALLOWED: true + # SERVER_ADMIN_EMAIL + # DOMAIN + # YUBICO_CLIENT_ID + # YUBICO_SECRET_KEY + # DATA_FOLDER + # DATABASE_URL + # ATTACHMENTS_FOLDER + # ICON_CACHE_FOLDER + # ROCKET_LIMITS + # ROCKET_WORKERS + # SMTP_HOST + # SMTP_FROM + # SMTP_PORT + # SMTP_SSL + # SMTP_EXPLICIT_TLS + # SMTP_USERNAME + # SMTP_PASSWORD + # SHOW_PASSWORD_HINT + # WEB_VAULT_ENABLED + +nameOverride: "" +fullnameOverride: "" + +service: + type: ClusterIP + port: 80 + loadBalancerIP: "" + +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + path: / + hosts: + - bitwarden.example + tls: + - secretName: bitwarden-tls + hosts: + - bitwarden.example + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +podAnnotations: {} + +deploymentAnnotations: {} + +## Persist data to a persitent volume +persistence: + enabled: false + accessMode: ReadWriteOnce + size: 800Mi + #storageClass: + #existingClaim: "bitwarden-pvc" diff --git a/chartmuseum/.helmignore b/chartmuseum/.helmignore new file mode 100755 index 0000000..46fd899 --- /dev/null +++ b/chartmuseum/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS diff --git a/chartmuseum/Chart.yaml b/chartmuseum/Chart.yaml new file mode 100755 index 0000000..917a6f6 --- /dev/null +++ b/chartmuseum/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 0.12.0 +deprecated: true +description: DEPRECATED Host your own Helm Chart Repository +home: https://github.com/helm/chartmuseum +icon: https://raw.githubusercontent.com/helm/chartmuseum/master/logo2.png +keywords: +- chartmuseum +- helm +- charts repo +name: chartmuseum +version: 2.14.2 diff --git a/chartmuseum/README.md b/chartmuseum/README.md new file mode 100755 index 0000000..cb3774a --- /dev/null +++ b/chartmuseum/README.md @@ -0,0 +1,749 @@ +# ⚠️ Repo Archive Notice + +As of Nov 13, 2020, charts in this repo will no longer be updated. +For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/). + +# ChartMuseum Helm Chart + +Deploy your own private ChartMuseum. + +Please also see https://github.com/kubernetes-helm/chartmuseum + +## DEPRECATION NOTICE + +This chart is deprecated and no longer supported. + +## Table of Content + + + + + +- [ChartMuseum Helm Chart](#chartmuseum-helm-chart) + - [Table of Content](#table-of-content) + - [Prerequisites](#prerequisites) + - [Configuration](#configuration) + - [Installation](#installation) + - [Using with Amazon S3](#using-with-amazon-s3) + - [permissions grant with access keys](#permissions-grant-with-access-keys) + - [permissions grant with IAM instance profile](#permissions-grant-with-iam-instance-profile) + - [permissions grant with IAM assumed role](#permissions-grant-with-iam-assumed-role) + - [permissions grant with IAM Roles for Service Accounts](#permissions-grant-with-iam-roles-for-service-accounts) + - [Using with Google Cloud Storage](#using-with-google-cloud-storage) + - [Using with Google Cloud Storage and a Google Service Account](#using-with-google-cloud-storage-and-a-google-service-account) + - [Using with Microsoft Azure Blob Storage](#using-with-microsoft-azure-blob-storage) + - [Using with Alibaba Cloud OSS Storage](#using-with-alibaba-cloud-oss-storage) + - [Using with Openstack Object Storage](#using-with-openstack-object-storage) + - [Using with Oracle Object Storage](#using-with-oracle-object-storage) + - [Using an existing secret](#using-an-existing-secret) + - [Using with local filesystem storage](#using-with-local-filesystem-storage) + - [Setting local storage permissions with initContainers](#setting-local-storage-permissions-with-initcontainers) + - [Example storage class](#example-storage-class) + - [Authentication](#authentication) + - [Basic Authentication](#basic-authentication) + - [Bearer/Token auth](#bearertoken-auth) + - [Ingress](#ingress) + - [Hosts](#hosts) + - [Extra Paths](#extra-paths) + - [Annotations](#annotations) + - [Example Ingress configuration](#example-ingress-configuration) + - [Uninstall](#uninstall) + + + + +## Prerequisites + +* [If enabled] A persistent storage resource and RW access to it +* [If enabled] Kubernetes StorageClass for dynamic provisioning + +## Configuration + +By default this chart will not have persistent storage, and the API service +will be *DISABLED*. This protects against unauthorized access to the API +with default configuration values. + +In addition, by default, pod `securityContext.fsGroup` is set to `1000`. This +is the user/group that the ChartMuseum container runs as, and is used to +enable local persitant storage. If your cluster has DenySecurityContext enabled, +you can set `securityContext` to `{}` and still use this chart with one of +the cloud storage options. + +For a more robust solution supply helm install with a custom values.yaml +You are also required to create the StorageClass resource ahead of time: +``` +kubectl create -f /path/to/storage_class.yaml +``` + +The following table lists common configurable parameters of the chart and +their default values. See values.yaml for all available options. + +| Parameter | Description | Default | +| --------------------------------------- | --------------------------------------------------------------------------- | ------------------------------------ | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.repository` | Container image to use | `chartmuseum/chartmuseum` | +| `image.tag` | Container image tag to deploy | `v0.12.0` | +| `persistence.accessMode` | Access mode to use for PVC | `ReadWriteOnce` | +| `persistence.enabled` | Whether to use a PVC for persistent storage | `false` | +| `persistence.path` | PV mount path | `/storage` | +| `persistence.size` | Amount of space to claim for PVC | `8Gi` | +| `persistence.labels` | Additional labels for PVC | `{}` | +| `persistence.storageClass` | Storage Class to use for PVC | `-` | +| `persistence.volumeName` | Volume to use for PVC | `` | +| `persistence.pv.enabled` | Whether to use a PV for persistent storage | `false` | +| `persistence.pv.capacity.storage` | Storage size to use for PV | `8Gi` | +| `persistence.pv.accessMode` | Access mode to use for PV | `ReadWriteOnce` | +| `persistence.pv.nfs.server` | NFS server for PV | `` | +| `persistence.pv.nfs.path` | Storage Path | `` | +| `persistence.pv.pvname` | Custom name for private volume | `` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `replicaCount` | k8s replicas | `1` | +| `resources.limits.cpu` | Container maximum CPU | `100m` | +| `resources.limits.memory` | Container maximum memory | `128Mi` | +| `resources.requests.cpu` | Container requested CPU | `80m` | +| `resources.requests.memory` | Container requested memory | `64Mi` | +| `secret.labels` | Additional labels for secret | `false` | +| `serviceAccount.create` | If true, create the service account | `false` | +| `serviceAccount.name` | Name of the serviceAccount to create or use | `{{ chartmuseum.fullname }}` | +| `serviceAccount.annotations` | Additional Service Account annotations | `{}` | +| `securityContext.enabled` | Enable securityContext | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1000` | +| `securityContext.runAsNonRoot` | Running Pods as non-root | `` | +| `securityContext.supplementalGroups` | Control which group IDs containers add | `` | +| `containerSecurityContext` | Additional Container securityContext (ex. allowPrivilegeEscalation) | `{}` | +| `priorityClassName ` | priorityClassName | `""` | +| `nodeSelector` | Map of node labels for pod assignment | `{}` | +| `tolerations` | List of node taints to tolerate | `[]` | +| `affinity` | Map of node/pod affinities | `{}` | +| `schedulerName` | Kubernetes scheduler to use | `default` | +| `env.open.STORAGE` | Storage Backend to use | `local` | +| `env.open.STORAGE_ALIBABA_BUCKET` | Bucket to store charts in for Alibaba | `` | +| `env.open.STORAGE_ALIBABA_PREFIX` | Prefix to store charts under for Alibaba | `` | +| `env.open.STORAGE_ALIBABA_ENDPOINT` | Alternative Alibaba endpoint | `` | +| `env.open.STORAGE_ALIBABA_SSE` | Server side encryption algorithm to use | `` | +| `env.open.STORAGE_AMAZON_BUCKET` | Bucket to store charts in for AWS | `` | +| `env.open.STORAGE_AMAZON_ENDPOINT` | Alternative AWS endpoint | `` | +| `env.open.STORAGE_AMAZON_PREFIX` | Prefix to store charts under for AWS | `` | +| `env.open.STORAGE_AMAZON_REGION` | Region to use for bucket access for AWS | `` | +| `env.open.STORAGE_AMAZON_SSE` | Server side encryption algorithm to use | `` | +| `env.open.STORAGE_GOOGLE_BUCKET` | Bucket to store charts in for GCP | `` | +| `env.open.STORAGE_GOOGLE_PREFIX` | Prefix to store charts under for GCP | `` | +| `env.open.STORAGE_MICROSOFT_CONTAINER` | Container to store charts under for MS | `` | +| `env.open.STORAGE_MICROSOFT_PREFIX` | Prefix to store charts under for MS | `` | +| `env.open.STORAGE_OPENSTACK_CONTAINER` | Container to store charts for openstack | `` | +| `env.open.STORAGE_OPENSTACK_PREFIX` | Prefix to store charts for openstack | `` | +| `env.open.STORAGE_OPENSTACK_REGION` | Region of openstack container | `` | +| `env.open.STORAGE_OPENSTACK_CACERT` | Path to a CA cert bundle for openstack | `` | +| `env.open.STORAGE_ORACLE_COMPARTMENTID` | Compartment ID for Oracle Object Store | `` | +| `env.open.STORAGE_ORACLE_BUCKET` | Bucket to store charts in Oracle Object Store | `` | +| `env.open.STORAGE_ORACLE_PREFIX` | Prefix to store charts for Oracle object Store | `` | +| `env.open.CHART_POST_FORM_FIELD_NAME` | Form field to query for chart file content | `` | +| `env.open.PROV_POST_FORM_FIELD_NAME` | Form field to query for chart provenance | `` | +| `env.open.DEPTH` | levels of nested repos for multitenancy. | `0` | +| `env.open.DEBUG` | Show debug messages | `false` | +| `env.open.LOG_JSON` | Output structured logs in JSON | `true` | +| `env.open.DISABLE_STATEFILES` | Disable use of index-cache.yaml | `false` | +| `env.open.DISABLE_METRICS` | Disable Prometheus metrics | `true` | +| `env.open.DISABLE_API` | Disable all routes prefixed with /api | `true` | +| `env.open.ALLOW_OVERWRITE` | Allow chart versions to be re-uploaded | `false` | +| `env.open.CHART_URL` | Absolute url for .tgzs in index.yaml | `` | +| `env.open.AUTH_ANONYMOUS_GET` | Allow anon GET operations when auth is used | `false` | +| `env.open.CONTEXT_PATH` | Set the base context path | `` | +| `env.open.INDEX_LIMIT` | Parallel scan limit for the repo indexer | `` | +| `env.open.CACHE` | Cache store, can be one of: redis | `` | +| `env.open.CACHE_REDIS_ADDR` | Address of Redis service (host:port) | `` | +| `env.open.CACHE_REDIS_DB` | Redis database to be selected after connect | `0` | +| `env.open.BEARER_AUTH` | Enable bearer auth | `false` | +| `env.open.AUTH_REALM` | Realm used for bearer authentication | `` | +| `env.open.AUTH_SERVICE` | Service used for bearer authentication | `` | +| `env.field` | Expose pod information to containers through environment variables | `` | +| `env.existingSecret` | Name of the existing secret use values | `` | +| `env.existingSecret.BASIC_AUTH_USER` | Key name in the secret for the Username | `` | +| `env.existingSecret.BASIC_AUTH_PASS` | Key name in the secret for the Password | `` | +| `env.secret.BASIC_AUTH_USER` | Username for basic HTTP authentication | `` | +| `env.secret.BASIC_AUTH_PASS` | Password for basic HTTP authentication | `` | +| `env.secret.CACHE_REDIS_PASSWORD` | Redis requirepass server configuration | `` | +| `extraArgs` | Pass extra arguments to the chartmuseum binary | `` | +| `gcp.secret.enabled` | Flag for the GCP service account | `false` | +| `gcp.secret.name` | Secret name for the GCP json file | `` | +| `gcp.secret.key` | Secret key for te GCP json file | `credentials.json` | +| `oracle.secret.enabled` | Flag for Oracle OCI account | `false` | +| `oracle.secret.name` | Secret name for OCI config and key | `` | +| `oracle.secret.config` | Secret key that holds the OCI config | `config` | +| `oracle.secret.key_file` | Secret key that holds the OCI private key | `key_file` | +| `bearerAuth.secret.enabled` | Flag for bearer auth public key secret | `` | +| `bearerAuth.secret.publicKey` | The name of the secret with the public key | `` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `service.externalTrafficPolicy` | Source IP preservation (only for Service type NodePort and LoadBalancer) | `Local` | +| `service.loadBalancerSourceRanges` | Restricts access for LoadBalancer (only for Service type LoadBalancer) | `[]` | +| `service.servicename` | Custom name for service | `` | +| `service.labels` | Additional labels for service | `{}` | +| `serviceMonitor.enabled` | Enable the ServiceMontor resource to be deployed | `false` | +| `serviceMonitor.labels` | Labels for the servicemonitor used by the Prometheus Operator | `{}` | +| `serviceMonitor.namespace` | Namespace of the ServiceMonitor resource | `{{ .Release.Namespace }}` | +| `serviceMonitor.metricsPath` | Path to the Chartmuseum metrics path | `/metrics` | +| `serviceMonitor.interval` | Scrape interval, If not set, the Prometheus default scrape interval is used | `nil` | +| `serviceMonitor.timeout` | Scrape request timeout. If not set, the Prometheus default timeout is used | `nil` | +| `deployment.labels` | Additional labels for deployment | `{}` | +| `deployment.matchlabes` | Match labels for deployment selector | `{}` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.annotations` | Ingress annotations | `[]` | +| `ingress.labels` | Ingress labels | `[]` | +| `ingress.hosts[0].name` | Hostname for the ingress | `` | +| `ingress.hosts[0].path` | Path within the url structure | `` | +| `ingress.hosts[0].tls ` | Enable TLS on the ingress host | `false` | +| `ingress.hosts[0].tlsSecret` | TLS secret to use (must be manually created) | `` | +| `ingress.hosts[0].serviceName` | The name of the service to route traffic to. | `{{ .Values.service.externalPort }}` | +| `ingress.hosts[0].servicePort` | The port of the service to route traffic to. | `{{ .chartmuseum. }}` | +| `ingress.extraPaths[0].path` | Path within the url structure. | `` | +| `ingress.extraPaths[0].service` | The name of the service to route traffic to. | `` | +| `ingress.extraPaths[0].port` | The port of the service to route traffic to. | `` | + +Specify each parameter using the `--set key=value[,key=value]` argument to +`helm install`. + +## Installation + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with Amazon S3 +Make sure your environment is properly setup to access `my-s3-bucket` + +You need at least the following permissions inside your IAM Policy +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowListObjects", + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": "arn:aws:s3:::my-s3-bucket" + }, + { + "Sid": "AllowObjectsCRUD", + "Effect": "Allow", + "Action": [ + "s3:DeleteObject", + "s3:GetObject", + "s3:PutObject" + ], + "Resource": "arn:aws:s3:::my-s3-bucket/*" + } + ] +} +``` + +You can grant it to `chartmuseum` by several ways: + +#### permissions grant with access keys + +Grant permissions to `special user` and us it's access keys for auth on aws + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: amazon + STORAGE_AMAZON_BUCKET: my-s3-bucket + STORAGE_AMAZON_PREFIX: + STORAGE_AMAZON_REGION: us-east-1 + secret: + AWS_ACCESS_KEY_ID: "********" ## aws access key id value + AWS_SECRET_ACCESS_KEY: "********" ## aws access key secret value +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +#### permissions grant with IAM instance profile + +You can grant permissions to k8s node IAM instance profile. +For more information read this [article](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: amazon + STORAGE_AMAZON_BUCKET: my-s3-bucket + STORAGE_AMAZON_PREFIX: + STORAGE_AMAZON_REGION: us-east-1 +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +#### permissions grant with IAM assumed role + +To provide access with assumed role you need to install [kube2iam](https://github.com/kubernetes/charts/tree/master/stable/kube2iam) +and create role with granded permissions. + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: amazon + STORAGE_AMAZON_BUCKET: my-s3-bucket + STORAGE_AMAZON_PREFIX: + STORAGE_AMAZON_REGION: us-east-1 +replica: + annotations: + iam.amazonaws.com/role: "{assumed role name}" +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +#### permissions grant with IAM Roles for Service Accounts + +For Amazon EKS clusters, access can be provided with a service account using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: amazon + STORAGE_AMAZON_BUCKET: my-s3-bucket + STORAGE_AMAZON_PREFIX: + STORAGE_AMAZON_REGION: us-east-1 +serviceAccount: + create: true + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam::{aws account ID}:role/{assumed role name}" +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with Google Cloud Storage +Make sure your environment is properly setup to access `my-gcs-bucket` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: google + STORAGE_GOOGLE_BUCKET: my-gcs-bucket + STORAGE_GOOGLE_PREFIX: +``` + +### Using with Google Cloud Storage and a Google Service Account + +A Google service account credentials are stored in a json file. There are two approaches here. Ideally you don't want to send your secrets to tiller. In that case, before installing this chart, you should create a secret with those credentials: + +```shell +kubectl create secret generic chartmuseum-secret --from-file=credentials.json="my-project-45e35d85a593.json" +``` + +Then you can either use a `VALUES` yaml with your values or set those values in the command line: + +```shell +helm install stable/chartmuseum --debug --set gcp.secret.enabled=true,env.open.STORAGE=google,env.open.DISABLE_API=false,env.open.STORAGE_GOOGLE_BUCKET=my-gcp-chartmuseum,gcp.secret.name=chartmuseum-secret +``` + +If you prefer to use a yaml file: + +```yaml +env: + open: + STORAGE: google + STORAGE_GOOGLE_BUCKET: my-gcs-bucket + STORAGE_GOOGLE_PREFIX: + +gcp: + secret: + enabled: true + name: chartmuseum-secret + key: credentials.json +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +In case that you don't mind adding your secret to tiller (you shouldn't do it), this are the commands + +```yaml +env: + open: + STORAGE: google + STORAGE_GOOGLE_BUCKET: my-gcs-bucket + STORAGE_GOOGLE_PREFIX: + secret: + GOOGLE_CREDENTIALS_JSON: my-json-file-base64-encoded +gcp: + secret: + enabled: true + +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +To set the values directly in the command line, use the following command. Note that we have to base64 encode the json file because we cannot pass a multi-line text as a value. + +```shell +export JSONKEY=$(cat my-project-77e35d85a593.json | base64) +helm install stable/chartmuseum --debug --set gcp.secret.enabled=true,env.secret.GOOGLE_CREDENTIALS_JSON=${JSONKEY},env.open.STORAGE=google,env.open.DISABLE_API=false,env.open.STORAGE_GOOGLE_BUCKET=my-gcp-chartmuseum +``` + +### Using with Microsoft Azure Blob Storage + +Make sure your environment is properly setup to access `mycontainer`. + +To do so, you must set the following env vars: +- `AZURE_STORAGE_ACCOUNT` +- `AZURE_STORAGE_ACCESS_KEY` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: microsoft + STORAGE_MICROSOFT_CONTAINER: mycontainer + # prefix to store charts for microsoft storage backend + STORAGE_MICROSOFT_PREFIX: + secret: + AZURE_STORAGE_ACCOUNT: "********" ## azure storage account + AZURE_STORAGE_ACCESS_KEY: "********" ## azure storage account access key +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with Alibaba Cloud OSS Storage + +Make sure your environment is properly setup to access `my-oss-bucket`. + +To do so, you must set the following env vars: +- `ALIBABA_CLOUD_ACCESS_KEY_ID` +- `ALIBABA_CLOUD_ACCESS_KEY_SECRET` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: alibaba + STORAGE_ALIBABA_BUCKET: my-oss-bucket + STORAGE_ALIBABA_PREFIX: + STORAGE_ALIBABA_ENDPOINT: oss-cn-beijing.aliyuncs.com + secret: + ALIBABA_CLOUD_ACCESS_KEY_ID: "********" ## alibaba OSS access key id + ALIBABA_CLOUD_ACCESS_KEY_SECRET: "********" ## alibaba OSS access key secret +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with Openstack Object Storage + +Make sure your environment is properly setup to access `mycontainer`. + +To do so, you must set the following env vars (depending on your openstack version): +- `OS_AUTH_URL` +- either `OS_PROJECT_NAME` or `OS_TENANT_NAME` or `OS_PROJECT_ID` or `OS_TENANT_ID` +- either `OS_DOMAIN_NAME` or `OS_DOMAIN_ID` +- either `OS_USERNAME` or `OS_USERID` +- `OS_PASSWORD` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: openstack + STORAGE_OPENSTACK_CONTAINER: mycontainer + STORAGE_OPENSTACK_PREFIX: + STORAGE_OPENSTACK_REGION: YOURREGION + secret: + OS_AUTH_URL: https://myauth.url.com/v2.0/ + OS_TENANT_ID: yourtenantid + OS_USERNAME: yourusername + OS_PASSWORD: yourpassword +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` +### Using with Oracle Object Storage + +Oracle (OCI) configuration and private key need to be added to a secret and are mounted at /home/chartmuseum/.oci. Your OCI config needs to be under [DEFAULT] and your `key_file` needs to be /home/chartmuseum/.oci/oci.key. See https://docs.cloud.oracle.com/iaas/Content/API/Concepts/sdkconfig.htm + +```shell +kubectl create secret generic chartmuseum-secret --from-file=config=".oci/config" --from-file=key_file=".oci/oci.key" +``` + +Then you can either use a `VALUES` yaml with your values or set those values in the command line: + +```shell +helm install stable/chartmuseum --debug --set env.open.STORAGE=oracle,env.open.STORAGE_ORACLE_COMPARTMENTID=ocid1.compartment.oc1..abc123,env.open.STORAGE_ORACLE_BUCKET=myocibucket,env.open.STORAGE_ORACLE_PREFIX=chartmuseum,oracle.secret.enabled=true,oracle.secret.name=chartmuseum-secret +``` + +If you prefer to use a yaml file: + +```yaml +env: + open: + STORAGE: oracle + STORAGE_ORACLE_COMPARTMENTID: ocid1.compartment.oc1..abc123 + STORAGE_ORACLE_BUCKET: myocibucket + STORAGE_ORACLE_PREFIX: chartmuseum + +oracle: + secret: + enabled: enabled + name: chartmuseum-secret + config: config + key_file: key_file + +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using an existing secret + +It is possible to pre-create a secret in kubernetes and get this chart to use that + +Given you are for example using the above AWS example + +You could create a Secret like this + +```shell + kubectl create secret generic chartmuseum-secret --from-literal="aws-access-key=myaccesskey" --from-literal="aws-secret-access-key=mysecretaccesskey" --from-literal="basic-auth-user=curator" --from-literal="basic-auth-pass=mypassword" +``` + +Specify `custom.yaml` with such values + +```yaml +env: + open: + STORAGE: amazonexistingSecret + STORAGE_AMAZON_BUCKET: my-s3-bucket + STORAGE_AMAZON_PREFIX: + STORAGE_AMAZON_REGION: us-east-1 + existingSecret: chartmuseum-secret + existingSecretMappings: + AWS_ACCESS_KEY_ID: aws-access-key + AWS_SECRET_ACCESS_KEY: aws-secret-access-key + BASIC_AUTH_USER: basic-auth-user + BASIC_AUTH_PASS: basic-auth-pass +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Using with local filesystem storage +By default chartmuseum uses local filesystem storage. +But on pod recreation it will lose all charts, to prevent that enable persistent storage. + +```yaml +env: + open: + STORAGE: local +persistence: + enabled: true + accessMode: ReadWriteOnce + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## Chartmuseum data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" +``` + +Run command to install + +```shell +helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum +``` + +### Setting local storage permissions with initContainers + +Some clusters do not allow using securityContext to set permissions for persistent volumes. Instead, an initContainer can be created to run `chown` on the mounted volume. To enable it, set `securityContext.enabled` to `false`. + + +#### Example storage class + +Example storage-class.yaml provided here for use with a Ceph cluster. + +``` +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: storage-volume +provisioner: kubernetes.io/rbd +parameters: + monitors: "10.11.12.13:4567,10.11.12.14:4567" + adminId: admin + adminSecretName: thesecret + adminSecretNamespace: default + pool: chartstore + userId: user + userSecretName: thesecret +``` + +### Authentication + +By default this chart does not have any authentication configured and allows anyone to fetch or upload (assuming the API is enabled) charts there are two supported methods of authentication + +#### Basic Authentication + +This allows all API routes to be protected by HTTP basic auth, this is configured either as plain text in the values that gets stored as a secret in the kubernetes cluster by setting: + +```yaml +env: + secret: + BASIC_AUTH_USER: curator + BASIC_AUTH_PASS: mypassword +``` + +Or by using values from an existing secret in the cluster that can be created using: + +```shell +kubectl create secret generic chartmuseum-secret --from-literal="basic-auth-user=curator" --from-literal="basic-auth-pass=mypassword" +``` + +This secret can be used in the values file as follows: + +```yaml +env: + existingSecret: chartmuseum-secret + existingSecretMappings: + BASIC_AUTH_USER: basic-auth-user + BASIC_AUTH_PASS: basic-auth-pass +``` + +#### Bearer/Token auth + +When using this ChartMuseum is configured with a public key, and will accept RS256 JWT tokens signed by the associated private key, passed in the Authorization header. You can use the [chartmuseum/auth](https://github.com/chartmuseum/auth) Go library to generate valid JWT tokens. For more information about how this works, please see [chartmuseum/auth-server-example](https://github.com/chartmuseum/auth-server-example) + +To use this the public key should be stored in a secret this can be done with + +```shell +kubectl create secret generic chartmuseum-public-key --from-file=public-key.pem +``` + +And Bearer/Token auth can be configured using the following values + +```yaml +env: + open: + BEARER_AUTH: true + AUTH_REALM: + AUTH_SERVICE: + +bearerAuth: + secret: + enabled: true + publicKeySecret: chartmuseum-public-key +``` + +### Ingress + +This chart provides support for ingress resources. If you have an ingress controller installed on your cluster, such as [nginx-ingress](https://hub.kubeapps.com/charts/stable/nginx-ingress) or [traefik](https://hub.kubeapps.com/charts/stable/traefik) you can utilize the ingress controller to expose Kubeapps. + +To enable ingress integration, please set `ingress.enabled` to `true` + +#### Hosts + +Most likely you will only want to have one hostname that maps to this Chartmuseum installation, however, it is possible to have more than one host. To facilitate this, the `ingress.hosts` object is an array. TLS secrets referenced in the ingress host configuration must be manually created in the namespace. + +In most cases, you should not specify values for `ingress.hosts[0].serviceName` and `ingress.hosts[0].servicePort`. However, some ingress controllers support advanced scenarios requiring you to specify these values. For example, [setting up an SSL redirect using the AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/tasks/ssl_redirect/). + +#### Extra Paths + +Specifying extra paths to prepend to every host configuration is especially useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). + +```shell +helm install --name my-chartmuseum stable/chartmuseum \ + --set ingress.enabled=true \ + --set ingress.hosts[0].name=chartmuseum.domain.com \ + --set ingress.extraPaths[0].service=ssl-redirect \ + --set ingress.extraPaths[0].port=use-annotation \ +``` + + +#### Annotations + +For annotations, please see [this document for nginx](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md) and [this document for Traefik](https://docs.traefik.io/configuration/backends/kubernetes/#general-annotations). Not all annotations are supported by all ingress controllers, but this document does a good job of indicating which annotation is supported by many popular ingress controllers. Annotations can be set using `ingress.annotations`. + +#### Example Ingress configuration + +```shell +helm install --name my-chartmuseum stable/chartmuseum \ + --set ingress.enabled=true \ + --set ingress.hosts[0].name=chartmuseum.domain.com \ + --set ingress.hosts[0].path=/ + --set ingress.hosts[0].tls=true + --set ingress.hosts[0].tlsSecret=chartmuseum.tls-secret +``` + +## Uninstall + +By default, a deliberate uninstall will result in the persistent volume +claim being deleted. + +```shell +helm delete my-chartmuseum +``` + +To delete the deployment and its history: +```shell +helm delete --purge my-chartmuseum +``` diff --git a/chartmuseum/ci/ingress-values.yaml b/chartmuseum/ci/ingress-values.yaml new file mode 100755 index 0000000..04e7645 --- /dev/null +++ b/chartmuseum/ci/ingress-values.yaml @@ -0,0 +1,9 @@ +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + hosts: + - name: chartmuseum.domain1.com + path: / + tls: false diff --git a/chartmuseum/templates/NOTES.txt b/chartmuseum/templates/NOTES.txt new file mode 100755 index 0000000..5efa6be --- /dev/null +++ b/chartmuseum/templates/NOTES.txt @@ -0,0 +1,30 @@ +** Please be patient while the chart is being deployed ** + +Get the ChartMuseum URL by running: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "chartmuseum.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT{{ .Values.env.open.CONTEXT_PATH }}/ + +{{- else if contains "LoadBalancer" .Values.service.type }} + +** Please ensure an external IP is associated to the {{ template "chartmuseum.fullname" . }} service before proceeding ** +** Watch the status using: kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "chartmuseum.fullname" . }} ** + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "chartmuseum.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.externalPort }}{{ .Values.env.open.CONTEXT_PATH }}/ + +OR + + export SERVICE_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "chartmuseum.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + echo http://$SERVICE_HOST:{{ .Values.service.externalPort }}{{ .Values.env.open.CONTEXT_PATH }}/ + +{{- else if contains "ClusterIP" .Values.service.type }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "chartmuseum.name" . }}" -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080{{ .Values.env.open.CONTEXT_PATH }}/ + kubectl port-forward $POD_NAME 8080:8080 --namespace {{ .Release.Namespace }} + +{{- end }} diff --git a/chartmuseum/templates/_helpers.tpl b/chartmuseum/templates/_helpers.tpl new file mode 100755 index 0000000..e5bab6a --- /dev/null +++ b/chartmuseum/templates/_helpers.tpl @@ -0,0 +1,142 @@ +{{- /* +name defines a template for the name of the chartmuseum chart. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.nameOverride: Replaces the computed name with this given name +- .Values.namePrefix: Prefix +- .Values.global.namePrefix: Global prefix +- .Values.nameSuffix: Suffix +- .Values.global.nameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "chartmuseum.name" . -}}"' +*/ -}} +{{- define "chartmuseum.name"}} +{{- $global := default (dict) .Values.global -}} +{{- $base := default .Chart.Name .Values.nameOverride -}} +{{- $gpre := default "" $global.namePrefix -}} +{{- $pre := default "" .Values.namePrefix -}} +{{- $suf := default "" .Values.nameSuffix -}} +{{- $gsuf := default "" $global.nameSuffix -}} +{{- $name := print $gpre $pre $base $suf $gsuf -}} +{{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} + +{{- /* +fullname defines a suitably unique name for a resource by combining +the release name and the chartmuseum chart name. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.fullnameOverride: Replaces the computed name with this given name +- .Values.fullnamePrefix: Prefix +- .Values.global.fullnamePrefix: Global prefix +- .Values.fullnameSuffix: Suffix +- .Values.global.fullnameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "chartmuseum.fullname" . -}}"' +*/ -}} +{{- define "chartmuseum.fullname"}} +{{- $global := default (dict) .Values.global -}} +{{- $base := default (printf "%s-%s" .Release.Name .Chart.Name) .Values.fullnameOverride -}} +{{- $gpre := default "" $global.fullnamePrefix -}} +{{- $pre := default "" .Values.fullnamePrefix -}} +{{- $suf := default "" .Values.fullnameSuffix -}} +{{- $gsuf := default "" $global.fullnameSuffix -}} +{{- $name := print $gpre $pre $base $suf $gsuf -}} +{{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} + + +{{- /* +chartmuseum.labels.standard prints the standard chartmuseum Helm labels. + +The standard labels are frequently used in metadata. +*/ -}} +{{- define "chartmuseum.labels.standard" -}} +app: {{ template "chartmuseum.name" . }} +chart: {{ template "chartmuseum.chartref" . }} +heritage: {{ .Release.Service | quote }} +release: {{ .Release.Name | quote }} +{{- end -}} + +{{- /* +chartmuseum.chartref prints a chart name and version. + +It does minimal escaping for use in Kubernetes labels. + +Example output: + +chartmuseum-0.4.5 +*/ -}} +{{- define "chartmuseum.chartref" -}} +{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "chartmuseum.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "chartmuseum.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/chartmuseum/templates/deployment.yaml b/chartmuseum/templates/deployment.yaml new file mode 100755 index 0000000..d194aaf --- /dev/null +++ b/chartmuseum/templates/deployment.yaml @@ -0,0 +1,220 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "chartmuseum.fullname" . }} + annotations: +{{ toYaml .Values.deployment.annotations | indent 4 }} + labels: +{{ include "chartmuseum.labels.standard" . | indent 4 }} +{{- if .Values.deployment.labels }} +{{ toYaml .Values.deployment.labels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "chartmuseum.name" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.deployment.labels }} +{{ toYaml .Values.deployment.labels | indent 6 }} +{{- end }} + replicas: {{ .Values.replicaCount }} + strategy: +{{ toYaml .Values.strategy | indent 4 }} + revisionHistoryLimit: 10 +{{- if .Values.deployment.matchlabes }} + selector: + matchLabels: +{{ toYaml .Values.deployment.matchlabels | indent 6 }} +{{- end }} + template: + metadata: + name: {{ include "chartmuseum.fullname" . }} + annotations: +{{ toYaml .Values.replica.annotations | indent 8 }} + labels: + app: {{ template "chartmuseum.name" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.deployment.labels }} +{{ toYaml .Values.deployment.labels | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.runAsNonRoot }} + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + {{- end }} + {{- if .Values.securityContext.supplementalGroups }} + supplementalGroups: {{ .Values.securityContext.supplementalGroups }} + {{- end }} + {{- else if .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "chartmuseum.volumePermissions.image" . }} + imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}" + securityContext: + {{- toYaml .Values.containerSecurityContext | nindent 10 }} + command: ['sh', '-c', 'chown -R {{ .Values.securityContext.fsGroup }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.path }}'] + volumeMounts: + - mountPath: {{ .Values.persistence.path }} + name: storage-volume + {{- end }} +{{- include "chartmuseum.imagePullSecrets" . | indent 6 }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + {{- toYaml .Values.containerSecurityContext | nindent 10 }} + env: +{{- range $name, $value := .Values.env.open }} +{{- if not (empty $value) }} + - name: {{ $name | quote }} + value: {{ $value | quote }} +{{- end }} +{{- end }} +{{- range $name, $value := .Values.env.field }} +{{- if not ( empty $value) }} + - name: {{ $name | quote }} + valueFrom: + fieldRef: + fieldPath: {{ $value | quote }} +{{- end }} +{{- end }} +{{- if .Values.gcp.secret.enabled }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/etc/secrets/google/credentials.json" +{{- end }} +{{- if .Values.env.existingSecret }} +{{- $secret_name := .Values.env.existingSecret }} +{{- range $name, $key := .Values.env.existingSecretMappings }} +{{- if not ( empty $key) }} + - name: {{ $name | quote }} + valueFrom: + secretKeyRef: + name: {{ $secret_name | quote }} + key: {{ $key | quote }} +{{- end }} +{{- end }} +{{- else }} +{{- $secret_name := include "chartmuseum.fullname" . }} +{{- range $name, $value := .Values.env.secret }} +{{- if not ( empty $value) }} + - name: {{ $name | quote }} + valueFrom: + secretKeyRef: + name: {{ $secret_name }} + key: {{ $name | quote }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.bearerAuth.secret.enabled }} + - name: AUTH_CERT_PATH + value: /var/keys/public-key.pem +{{ end }} + args: + - --port=8080 +{{- if eq .Values.env.open.STORAGE "local" }} + - --storage-local-rootdir={{ .Values.persistence.path }} +{{- end }} +{{- if .Values.extraArgs }} +{{ toYaml .Values.extraArgs | indent 8 }} +{{- end }} + ports: + - name: http + containerPort: 8080 + livenessProbe: + httpGet: + path: {{ .Values.env.open.CONTEXT_PATH }}/health + port: http +{{ toYaml .Values.probes.liveness | indent 10 }} + readinessProbe: + httpGet: + path: {{ .Values.env.open.CONTEXT_PATH }}/health + port: http +{{ toYaml .Values.probes.readiness | indent 10 }} + volumeMounts: +{{- if eq .Values.env.open.STORAGE "local" }} + - mountPath: {{ .Values.persistence.path }} + name: storage-volume +{{- end }} +{{- if .Values.gcp.secret.enabled }} + - mountPath: /etc/secrets/google + name: {{ include "chartmuseum.fullname" . }}-gcp +{{- end }} +{{- if .Values.oracle.secret.enabled }} + - mountPath: /home/chartmuseum/.oci + name: {{ include "chartmuseum.fullname" . }}-oracle +{{- end }} +{{- if .Values.bearerAuth.secret.enabled }} + - name: public-key + mountPath: /var/keys + readOnly: true +{{- end }} + {{- with .Values.resources }} + resources: +{{ toYaml . | indent 10 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.deployment.schedulerName }} + schedulerName: {{ .Values.deployment.schedulerName }} + {{- end -}} + {{- if and .Values.serviceAccount.create .Values.serviceAccount.name }} + serviceAccountName: {{ .Values.serviceAccount.name }} + {{- else if .Values.serviceAccount.create }} + serviceAccountName: {{ include "chartmuseum.fullname" . }} + {{- else if .Values.serviceAccount.name }} + serviceAccountName: {{ .Values.serviceAccount.name }} + {{- end }} + volumes: + - name: storage-volume + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "chartmuseum.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end -}} + {{ if .Values.gcp.secret.enabled }} + - name: {{ include "chartmuseum.fullname" . }}-gcp + secret: + {{ if .Values.env.secret.GOOGLE_CREDENTIALS_JSON }} + secretName: {{ include "chartmuseum.fullname" . }} + items: + - key: GOOGLE_CREDENTIALS_JSON + path: credentials.json + {{ else }} + secretName: {{ .Values.gcp.secret.name }} + items: + - key: {{ .Values.gcp.secret.key }} + path: credentials.json + {{ end }} + {{ end }} + {{ if .Values.oracle.secret.enabled }} + - name: {{ include "chartmuseum.fullname" . }}-oracle + secret: + secretName: {{ .Values.oracle.secret.name }} + items: + - key: {{ .Values.oracle.secret.config }} + path: config + - key: {{ .Values.oracle.secret.key_file }} + path: oci.key + {{ end }} +{{- if .Values.bearerAuth.secret.enabled }} + - name: public-key + secret: + secretName: {{ .Values.bearerAuth.secret.publicKeySecret }} +{{- end }} diff --git a/chartmuseum/templates/ingress.yaml b/chartmuseum/templates/ingress.yaml new file mode 100755 index 0000000..5fa52e2 --- /dev/null +++ b/chartmuseum/templates/ingress.yaml @@ -0,0 +1,54 @@ +{{- if .Values.ingress.enabled }} +{{- $servicePort := .Values.service.externalPort -}} +{{- $serviceName := include "chartmuseum.fullname" . -}} +{{- $ingressExtraPaths := .Values.ingress.extraPaths -}} +--- +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ include "chartmuseum.fullname" . }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} + labels: +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} +{{ include "chartmuseum.labels.standard" . | indent 4 }} +spec: + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .name }} + http: + paths: + {{- range $ingressExtraPaths }} + - path: {{ default "/" .path | quote }} + backend: + {{- if $.Values.service.servicename }} + serviceName: {{ $.Values.service.servicename }} + {{- else }} + serviceName: {{ default $serviceName .service }} + {{- end }} + servicePort: {{ default $servicePort .port }} + {{- end }} + - path: {{ default "/" .path | quote }} + backend: + {{- if $.Values.service.servicename }} + serviceName: {{ $.Values.service.servicename }} + {{- else }} + serviceName: {{ default $serviceName .service }} + {{- end }} + servicePort: {{ default $servicePort .servicePort }} + {{- end }} + tls: + {{- range .Values.ingress.hosts }} + {{- if .tls }} + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/chartmuseum/templates/pv.yaml b/chartmuseum/templates/pv.yaml new file mode 100755 index 0000000..1aaff0f --- /dev/null +++ b/chartmuseum/templates/pv.yaml @@ -0,0 +1,21 @@ +{{- if .Values.persistence.pv.enabled -}} +apiVersion: v1 +kind: PersistentVolume +metadata: +{{- if .Values.persistence.pv.pvname }} + name: {{ .Values.persistence.pv.pvname }} +{{- else }} + name: {{ include "chartmuseum.fullname" . }} +{{- end }} + labels: + app: {{ include "chartmuseum.fullname" . }} + release: {{ .Release.Name | quote }} +spec: + capacity: + storage: {{ .Values.persistence.pv.capacity.storage }} + accessModes: + - {{ .Values.persistence.pv.accessMode | quote }} + nfs: + server: {{ .Values.persistence.pv.nfs.server }} + path: {{ .Values.persistence.pv.nfs.path | quote }} +{{- end }} \ No newline at end of file diff --git a/chartmuseum/templates/pvc.yaml b/chartmuseum/templates/pvc.yaml new file mode 100755 index 0000000..aaedace --- /dev/null +++ b/chartmuseum/templates/pvc.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "chartmuseum.fullname" . }} + labels: + app: {{ include "chartmuseum.fullname" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.persistence.labels }} +{{ toYaml .Values.persistence.labels | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- else if and .Values.persistence.volumeName (.Values.persistence.pv.enabled) }} + volumeName: "{{ .Values.persistence.volumeName }}" +{{- end }} +{{- end }} diff --git a/chartmuseum/templates/secret.yaml b/chartmuseum/templates/secret.yaml new file mode 100755 index 0000000..d4c837c --- /dev/null +++ b/chartmuseum/templates/secret.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.env.existingSecret -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "chartmuseum.fullname" . }} + labels: +{{- if .Values.secret.labels }} +{{ toYaml .Values.secret.labels | indent 4 }} +{{- end }} +{{ include "chartmuseum.labels.standard" . | indent 4 }} +type: Opaque +data: +{{- range $name, $value := .Values.env.secret }} +{{- if not (empty $value) }} +{{- if eq $name "GOOGLE_CREDENTIALS_JSON" }} + {{ $name }}: {{ $value }} + {{- else }} + {{ $name }}: {{ $value | b64enc }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/chartmuseum/templates/service.yaml b/chartmuseum/templates/service.yaml new file mode 100755 index 0000000..cc5a6a8 --- /dev/null +++ b/chartmuseum/templates/service.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.service.servicename }} + name: {{ .Values.service.servicename }} +{{- else }} + name: {{ include "chartmuseum.fullname" . }} +{{- end }} + labels: +{{ include "chartmuseum.labels.standard" . | indent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if (or (eq .Values.service.type "LoadBalancer") (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort)))) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} + {{- if (and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges) }} + loadBalancerSourceRanges: + {{- with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 2 }} + {{- end }} + {{- end }} + {{- if eq .Values.service.type "ClusterIP" }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- end }} + ports: + - port: {{ .Values.service.externalPort }} +{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{.Values.service.nodePort}} +{{- else }} + targetPort: http +{{- end }} + protocol: TCP + name: http + selector: + app: {{ template "chartmuseum.name" . }} + release: {{ .Release.Name | quote }} diff --git a/chartmuseum/templates/serviceaccount.yaml b/chartmuseum/templates/serviceaccount.yaml new file mode 100755 index 0000000..2561395 --- /dev/null +++ b/chartmuseum/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- if .Values.serviceAccount.name }} + name: {{ .Values.serviceAccount.name }} +{{- else }} + name: {{ include "chartmuseum.fullname" . }} +{{- end }} + labels: +{{ include "chartmuseum.labels.standard" . | indent 4 }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +{{- end -}} diff --git a/chartmuseum/templates/servicemonitor.yaml b/chartmuseum/templates/servicemonitor.yaml new file mode 100755 index 0000000..03dfb54 --- /dev/null +++ b/chartmuseum/templates/servicemonitor.yaml @@ -0,0 +1,34 @@ +{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: +{{- if .Values.serviceMonitor.labels }} + labels: +{{ toYaml .Values.serviceMonitor.labels | indent 4 }} +{{- end }} + name: {{ template "chartmuseum.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} +{{- end }} +spec: + endpoints: + - targetPort: 8080 +{{- if .Values.serviceMonitor.interval }} + interval: {{ .Values.serviceMonitor.interval }} +{{- end }} +{{- if .Values.serviceMonitor.metricsPath }} + path: {{ .Values.serviceMonitor.metricsPath }} +{{- end }} +{{- if .Values.serviceMonitor.timeout }} + scrapeTimeout: {{ .Values.serviceMonitor.timeout }} +{{- end }} + jobLabel: {{ template "chartmuseum.fullname" . }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "chartmuseum.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/chartmuseum/values.yaml b/chartmuseum/values.yaml new file mode 100755 index 0000000..8bd7912 --- /dev/null +++ b/chartmuseum/values.yaml @@ -0,0 +1,306 @@ +extraArgs: + # - --storage-timestamp-tolerance 1s +replicaCount: 1 +strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 +image: + repository: chartmuseum/chartmuseum + tag: v0.12.0 + pullPolicy: IfNotPresent +secret: + labels: {} +env: + open: + # storage backend, can be one of: local, alibaba, amazon, google, microsoft, oracle + STORAGE: local + # oss bucket to store charts for alibaba storage backend + STORAGE_ALIBABA_BUCKET: + # prefix to store charts for alibaba storage backend + STORAGE_ALIBABA_PREFIX: + # oss endpoint to store charts for alibaba storage backend + STORAGE_ALIBABA_ENDPOINT: + # server side encryption algorithm for alibaba storage backend, can be one + # of: AES256 or KMS + STORAGE_ALIBABA_SSE: + # s3 bucket to store charts for amazon storage backend + STORAGE_AMAZON_BUCKET: + # prefix to store charts for amazon storage backend + STORAGE_AMAZON_PREFIX: + # region of s3 bucket to store charts + STORAGE_AMAZON_REGION: + # alternative s3 endpoint + STORAGE_AMAZON_ENDPOINT: + # server side encryption algorithm + STORAGE_AMAZON_SSE: + # gcs bucket to store charts for google storage backend + STORAGE_GOOGLE_BUCKET: + # prefix to store charts for google storage backend + STORAGE_GOOGLE_PREFIX: + # container to store charts for microsoft storage backend + STORAGE_MICROSOFT_CONTAINER: + # prefix to store charts for microsoft storage backend + STORAGE_MICROSOFT_PREFIX: + # container to store charts for openstack storage backend + STORAGE_OPENSTACK_CONTAINER: + # prefix to store charts for openstack storage backend + STORAGE_OPENSTACK_PREFIX: + # region of openstack container + STORAGE_OPENSTACK_REGION: + # path to a CA cert bundle for your openstack endpoint + STORAGE_OPENSTACK_CACERT: + # compartment id for for oracle storage backend + STORAGE_ORACLE_COMPARTMENTID: + # oci bucket to store charts for oracle storage backend + STORAGE_ORACLE_BUCKET: + # prefix to store charts for oracle storage backend + STORAGE_ORACLE_PREFIX: + # form field which will be queried for the chart file content + CHART_POST_FORM_FIELD_NAME: chart + # form field which will be queried for the provenance file content + PROV_POST_FORM_FIELD_NAME: prov + # levels of nested repos for multitenancy. The default depth is 0 (singletenant server) + DEPTH: 0 + # show debug messages + DEBUG: false + # output structured logs as json + LOG_JSON: true + # disable use of index-cache.yaml + DISABLE_STATEFILES: false + # disable Prometheus metrics + DISABLE_METRICS: true + # disable all routes prefixed with /api + DISABLE_API: true + # allow chart versions to be re-uploaded + ALLOW_OVERWRITE: false + # absolute url for .tgzs in index.yaml + CHART_URL: + # allow anonymous GET operations when auth is used + AUTH_ANONYMOUS_GET: false + # sets the base context path + CONTEXT_PATH: + # parallel scan limit for the repo indexer + INDEX_LIMIT: 0 + # cache store, can be one of: redis (leave blank for inmemory cache) + CACHE: + # address of Redis service (host:port) + CACHE_REDIS_ADDR: + # Redis database to be selected after connect + CACHE_REDIS_DB: 0 + # enable bearer auth + BEARER_AUTH: false + # auth realm used for bearer auth + AUTH_REALM: + # auth service used for bearer auth + AUTH_SERVICE: + field: + # POD_IP: status.podIP + secret: + # username for basic http authentication + BASIC_AUTH_USER: + # password for basic http authentication + BASIC_AUTH_PASS: + # GCP service account json file + GOOGLE_CREDENTIALS_JSON: + # Redis requirepass server configuration + CACHE_REDIS_PASSWORD: + # Name of an existing secret to get the secret values ftom + existingSecret: + # Stores Enviromnt Variable to secret key name mappings + existingSecretMappings: + # username for basic http authentication + BASIC_AUTH_USER: + # password for basic http authentication + BASIC_AUTH_PASS: + # GCP service account json file + GOOGLE_CREDENTIALS_JSON: + # Redis requirepass server configuration + CACHE_REDIS_PASSWORD: + +deployment: + # Define scheduler name. Use of 'default' if empty + schedulerName: "" + ## Chartmuseum Deployment annotations + annotations: {} + # name: value + labels: {} + # name: value + matchlabels: {} + # name: value +replica: + ## Chartmuseum Replicas annotations + annotations: {} + ## Read more about kube2iam to provide access to s3 https://github.com/jtblin/kube2iam + # iam.amazonaws.com/role: role-arn +service: + servicename: + type: ClusterIP + externalTrafficPolicy: Local + ## Limits which cidr blocks can connect to service's load balancer + ## Only valid if service.type: LoadBalancer + loadBalancerSourceRanges: [] + # clusterIP: None + externalPort: 8080 + nodePort: + annotations: {} + labels: {} + +serviceMonitor: + enabled: false + # namespace: prometheus + labels: {} + metricsPath: "/metrics" + # timeout: 60 + # interval: 60 + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 80m +# memory: 64Mi + +probes: + liveness: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readiness: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + +serviceAccount: + create: false + # name: + ## Annotations for the Service Account + annotations: {} + +# UID/GID 1000 is the default user "chartmuseum" used in +# the container image starting in v0.8.0 and above. This +# is required for local persistent storage. If your cluster +# does not allow this, try setting securityContext: {} +securityContext: + enabled: true + fsGroup: 1000 + ## Optionally, specify supplementalGroups and/or + ## runAsNonRoot for security purposes + # runAsNonRoot: true + # supplementalGroups: [1000] + +containerSecurityContext: {} + +priorityClassName: "" + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +persistence: + enabled: false + accessMode: ReadWriteOnce + size: 8Gi + labels: {} + path: /storage + # name: value + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## Chartmuseum data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + # volumeName: + pv: + enabled: false + pvname: + capacity: + storage: 8Gi + accessMode: ReadWriteOnce + nfs: + server: + path: + +## Init containers parameters: +## volumePermissions: Change the owner of the persistent volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Ingress for load balancer +ingress: + enabled: false +## Chartmuseum Ingress labels +## +# labels: +# dns: "route53" + +## Chartmuseum Ingress annotations +## +# annotations: +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" + +## Chartmuseum Ingress hostnames +## Must be provided if Ingress is enabled +## +# hosts: +# - name: chartmuseum.domain1.com +# path: / +# tls: false +# - name: chartmuseum.domain2.com +# path: / +# +# ## Set this to true in order to enable TLS on the ingress record +# tls: true +# +# ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS +# ## Secrets must be added manually to the namespace +# tlsSecret: chartmuseum.domain2-tls + +# Adding secrets to tiller is not a great option, so If you want to use an existing +# secret that contains the json file, you can use the following entries +gcp: + secret: + enabled: false + # Name of the secret that contains the encoded json + name: + # Secret key that holds the json value. + key: credentials.json +oracle: + secret: + enabled: false + # Name of the secret that contains the encoded config and key + name: + # Secret key that holds the oci config + config: config + # Secret key that holds the oci private key + key_file: key_file +bearerAuth: + secret: + enabled: false + publicKeySecret: chartmuseum-public-key diff --git a/docker-registry/.helmignore b/docker-registry/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/docker-registry/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/docker-registry/Chart.yaml b/docker-registry/Chart.yaml new file mode 100644 index 0000000..c4e0e5d --- /dev/null +++ b/docker-registry/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +description: DEPRECATED A Helm chart for Docker Registry +name: docker-registry +version: 1.9.6 +appVersion: 2.7.1 +home: https://hub.docker.com/_/registry/ +icon: https://hub.docker.com/public/images/logos/mini-logo.svg +sources: + - https://github.com/docker/distribution-library-image +deprecated: true diff --git a/docker-registry/README.md b/docker-registry/README.md new file mode 100644 index 0000000..7a8de85 --- /dev/null +++ b/docker-registry/README.md @@ -0,0 +1,95 @@ +# ⚠️ Repo Archive Notice + +As of Nov 13, 2020, charts in this repo will no longer be updated. +For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/). + +# Docker Registry Helm Chart + +This directory contains a Kubernetes chart to deploy a private Docker Registry. + +## DEPRECATION NOTICE + +This chart is deprecated and no longer supported. + +## Prerequisites Details + +* PV support on underlying infrastructure (if persistence is required) + +## Chart Details + +This chart will do the following: + +* Implement a Docker registry deployment + +## Installing the Chart + +To install the chart, use the following: + +```console +$ helm install stable/docker-registry +``` + +## Configuration + +The following table lists the configurable parameters of the docker-registry chart and +their default values. + +| Parameter | Description | Default | +|:----------------------------|:-------------------------------------------------------------------------------------------|:----------------| +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.repository` | Container image to use | `registry` | +| `image.tag` | Container image tag to deploy | `2.7.1` | +| `imagePullSecrets` | Specify image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `persistence.accessMode` | Access mode to use for PVC | `ReadWriteOnce` | +| `persistence.enabled` | Whether to use a PVC for the Docker storage | `false` | +| `persistence.deleteEnabled` | Enable the deletion of image blobs and manifests by digest | `nil` | +| `persistence.size` | Amount of space to claim for PVC | `10Gi` | +| `persistence.storageClass` | Storage Class to use for PVC | `-` | +| `persistence.existingClaim` | Name of an existing PVC to use for config | `nil` | +| `service.port` | TCP port on which the service is exposed | `5000` | +| `service.type` | service type | `ClusterIP` | +| `service.clusterIP` | if `service.type` is `ClusterIP` and this is non-empty, sets the cluster IP of the service | `nil` | +| `service.nodePort` | if `service.type` is `NodePort` and this is non-empty, sets the node port of the service | `nil` | +| `service.loadBalancerIP | if `service.type` is `LoadBalancer` and this is non-empty, sets the loadBalancerIP of the service | `nil` | +| `service.loadBalancerSourceRanges`| if `service.type` is `LoadBalancer` and this is non-empty, sets the loadBalancerSourceRanges of the service | `nil` | +| `replicaCount` | k8s replicas | `1` | +| `updateStrategy` | update strategy for deployment | `{}` | +| `podAnnotations` | Annotations for pod | `{}` | +| `podLabels` | Labels for pod | `{}` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `resources.limits.cpu` | Container requested CPU | `nil` | +| `resources.limits.memory` | Container requested memory | `nil` | +| `priorityClassName ` | priorityClassName | `""` | +| `storage` | Storage system to use | `filesystem` | +| `tlsSecretName` | Name of secret for TLS certs | `nil` | +| `secrets.htpasswd` | Htpasswd authentication | `nil` | +| `secrets.s3.accessKey` | Access Key for S3 configuration | `nil` | +| `secrets.s3.secretKey` | Secret Key for S3 configuration | `nil` | +| `secrets.swift.username` | Username for Swift configuration | `nil` | +| `secrets.swift.password` | Password for Swift configuration | `nil` | +| `haSharedSecret` | Shared secret for Registry | `nil` | +| `configData` | Configuration hash for docker | `nil` | +| `s3.region` | S3 region | `nil` | +| `s3.regionEndpoint` | S3 region endpoint | `nil` | +| `s3.bucket` | S3 bucket name | `nil` | +| `s3.encrypt` | Store images in encrypted format | `nil` | +| `s3.secure` | Use HTTPS | `nil` | +| `swift.authurl` | Swift authurl | `nil` | +| `swift.container` | Swift container | `nil` | +| `nodeSelector` | node labels for pod assignment | `{}` | +| `affinity` | affinity settings | `{}` | +| `tolerations` | pod tolerations | `[]` | +| `ingress.enabled` | If true, Ingress will be created | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Ingress labels | `{}` | +| `ingress.path` | Ingress service path | `/` | +| `ingress.hosts` | Ingress hostnames | `[]` | +| `ingress.tls` | Ingress TLS configuration (YAML) | `[]` | +| `extraVolumeMounts` | Additional volumeMounts to the registry container | `[]` | +| `extraVolumes` | Additional volumes to the pod | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to +`helm install`. + +To generate htpasswd file, run this docker command: +`docker run --entrypoint htpasswd registry:2 -Bbn user password > ./htpasswd`. diff --git a/docker-registry/templates/NOTES.txt b/docker-registry/templates/NOTES.txt new file mode 100644 index 0000000..4a9152b --- /dev/null +++ b/docker-registry/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "docker-registry.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "docker-registry.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "docker-registry.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.externalPort }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "docker-registry.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl -n {{ .Release.Namespace }} port-forward $POD_NAME 8080:5000 +{{- end }} diff --git a/docker-registry/templates/_helpers.tpl b/docker-registry/templates/_helpers.tpl new file mode 100644 index 0000000..a91077e --- /dev/null +++ b/docker-registry/templates/_helpers.tpl @@ -0,0 +1,24 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "docker-registry.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "docker-registry.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/docker-registry/templates/configmap.yaml b/docker-registry/templates/configmap.yaml new file mode 100644 index 0000000..820bb4f --- /dev/null +++ b/docker-registry/templates/configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "docker-registry.fullname" . }}-config + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + config.yml: |- +{{ toYaml .Values.configData | indent 4 }} diff --git a/docker-registry/templates/deployment.yaml b/docker-registry/templates/deployment.yaml new file mode 100644 index 0000000..a146d76 --- /dev/null +++ b/docker-registry/templates/deployment.yaml @@ -0,0 +1,221 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "docker-registry.fullname" . }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.replicaCount }} +{{- if .Values.updateStrategy }} + strategy: +{{ toYaml .Values.updateStrategy | indent 4 }} +{{- end }} + minReadySeconds: 5 + template: + metadata: + labels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- if $.Values.podAnnotations }} +{{ toYaml $.Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" +{{- end }} +{{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/registry + - serve + - /etc/docker/registry/config.yml + ports: + - containerPort: 5000 + livenessProbe: + httpGet: +{{- if .Values.tlsSecretName }} + scheme: HTTPS +{{- end }} + path: / + port: 5000 + readinessProbe: + httpGet: +{{- if .Values.tlsSecretName }} + scheme: HTTPS +{{- end }} + path: / + port: 5000 + resources: +{{ toYaml .Values.resources | indent 12 }} + env: +{{- if .Values.secrets.htpasswd }} + - name: REGISTRY_AUTH + value: "htpasswd" + - name: REGISTRY_AUTH_HTPASSWD_REALM + value: "Registry Realm" + - name: REGISTRY_AUTH_HTPASSWD_PATH + value: "/auth/htpasswd" +{{- end }} + - name: REGISTRY_HTTP_SECRET + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: haSharedSecret +{{- if .Values.tlsSecretName }} + - name: REGISTRY_HTTP_TLS_CERTIFICATE + value: /etc/ssl/docker/tls.crt + - name: REGISTRY_HTTP_TLS_KEY + value: /etc/ssl/docker/tls.key +{{- end }} +{{- if eq .Values.storage "filesystem" }} + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: "/var/lib/registry" +{{- else if eq .Values.storage "azure" }} + - name: REGISTRY_STORAGE_AZURE_ACCOUNTNAME + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureAccountName + - name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureAccountKey + - name: REGISTRY_STORAGE_AZURE_CONTAINER + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureContainer +{{- else if eq .Values.storage "s3" }} + {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }} + - name: REGISTRY_STORAGE_S3_ACCESSKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: s3AccessKey + - name: REGISTRY_STORAGE_S3_SECRETKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: s3SecretKey + {{- end }} + - name: REGISTRY_STORAGE_S3_REGION + value: {{ required ".Values.s3.region is required" .Values.s3.region }} + {{- if .Values.s3.regionEndpoint }} + - name: REGISTRY_STORAGE_S3_REGIONENDPOINT + value: {{ .Values.s3.regionEndpoint }} + {{- end }} + - name: REGISTRY_STORAGE_S3_BUCKET + value: {{ required ".Values.s3.bucket is required" .Values.s3.bucket }} + {{- if .Values.s3.encrypt }} + - name: REGISTRY_STORAGE_S3_ENCRYPT + value: {{ .Values.s3.encrypt | quote }} + {{- end }} + {{- if .Values.s3.secure }} + - name: REGISTRY_STORAGE_S3_SECURE + value: {{ .Values.s3.secure | quote }} + {{- end }} +{{- else if eq .Values.storage "swift" }} + - name: REGISTRY_STORAGE_SWIFT_AUTHURL + value: {{ required ".Values.swift.authurl is required" .Values.swift.authurl }} + - name: REGISTRY_STORAGE_SWIFT_USERNAME + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: swiftUsername + - name: REGISTRY_STORAGE_SWIFT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: swiftPassword + - name: REGISTRY_STORAGE_SWIFT_CONTAINER + value: {{ required ".Values.swift.container is required" .Values.swift.container }} +{{- end }} +{{- if .Values.persistence.deleteEnabled }} + - name: REGISTRY_STORAGE_DELETE_ENABLED + value: "true" +{{- end }} + volumeMounts: +{{- if .Values.secrets.htpasswd }} + - name: auth + mountPath: /auth + readOnly: true +{{- end }} +{{- if eq .Values.storage "filesystem" }} + - name: data + mountPath: /var/lib/registry/ +{{- end }} + - name: "{{ template "docker-registry.fullname" . }}-config" + mountPath: "/etc/docker/registry" +{{- if .Values.tlsSecretName }} + - mountPath: /etc/ssl/docker + name: tls-cert + readOnly: true +{{- end }} +{{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} +{{- end }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} +{{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} + volumes: +{{- if .Values.secrets.htpasswd }} + - name: auth + secret: + secretName: {{ template "docker-registry.fullname" . }}-secret + items: + - key: htpasswd + path: htpasswd +{{- end }} +{{- if eq .Values.storage "filesystem" }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "docker-registry.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end }} + - name: {{ template "docker-registry.fullname" . }}-config + configMap: + name: {{ template "docker-registry.fullname" . }}-config +{{- if .Values.tlsSecretName }} + - name: tls-cert + secret: + secretName: {{ .Values.tlsSecretName }} +{{- end }} +{{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} +{{- end }} diff --git a/docker-registry/templates/ingress.yaml b/docker-registry/templates/ingress.yaml new file mode 100644 index 0000000..58ab5fa --- /dev/null +++ b/docker-registry/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "docker-registry.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $path := .Values.ingress.path -}} +apiVersion: {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} networking.k8s.io/v1beta1 {{- else }} extensions/v1beta1 {{- end }} +kind: Ingress +metadata: + name: {{ template "docker-registry.fullname" . }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: {{ $path }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/docker-registry/templates/poddisruptionbudget.yaml b/docker-registry/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..38eb384 --- /dev/null +++ b/docker-registry/templates/poddisruptionbudget.yaml @@ -0,0 +1,17 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "docker-registry.fullname" . }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/docker-registry/templates/pvc.yaml b/docker-registry/templates/pvc.yaml new file mode 100644 index 0000000..1619617 --- /dev/null +++ b/docker-registry/templates/pvc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.persistence.enabled }} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "docker-registry.fullname" . }} + labels: + app: {{ template "docker-registry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/docker-registry/templates/secret.yaml b/docker-registry/templates/secret.yaml new file mode 100644 index 0000000..c22fd30 --- /dev/null +++ b/docker-registry/templates/secret.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "docker-registry.fullname" . }}-secret + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- if .Values.secrets.htpasswd }} + htpasswd: {{ .Values.secrets.htpasswd | b64enc }} + {{- end }} + {{- if .Values.secrets.haSharedSecret }} + haSharedSecret: {{ .Values.secrets.haSharedSecret | b64enc | quote }} + {{- else }} + haSharedSecret: {{ randAlphaNum 16 | b64enc | quote }} + {{- end }} + + {{- if eq .Values.storage "azure" }} + {{- if and .Values.secrets.azure.accountName .Values.secrets.azure.accountKey .Values.secrets.azure.container }} + azureAccountName: {{ .Values.secrets.azure.accountName | b64enc | quote }} + azureAccountKey: {{ .Values.secrets.azure.accountKey | b64enc | quote }} + azureContainer: {{ .Values.secrets.azure.container | b64enc | quote }} + {{- end }} + {{- else if eq .Values.storage "s3" }} + {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }} + s3AccessKey: {{ .Values.secrets.s3.accessKey | b64enc | quote }} + s3SecretKey: {{ .Values.secrets.s3.secretKey | b64enc | quote }} + {{- end }} + {{- else if eq .Values.storage "swift" }} + {{- if and .Values.secrets.swift.username .Values.secrets.swift.password }} + swiftUsername: {{ .Values.secrets.swift.username | b64enc | quote }} + swiftPassword: {{ .Values.secrets.swift.password | b64enc | quote }} + {{- end }} + {{- end }} diff --git a/docker-registry/templates/service.yaml b/docker-registry/templates/service.yaml new file mode 100644 index 0000000..70893ab --- /dev/null +++ b/docker-registry/templates/service.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "docker-registry.fullname" . }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} +{{- if (and (eq .Values.service.type "ClusterIP") (not (empty .Values.service.clusterIP))) }} + clusterIP: {{ .Values.service.clusterIP }} +{{- end }} +{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} +{{- end }} +{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges))) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} +{{- end }} + ports: + - port: {{ .Values.service.port }} + protocol: TCP + name: {{ .Values.service.name }} + targetPort: 5000 +{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} diff --git a/docker-registry/values.yaml b/docker-registry/values.yaml new file mode 100644 index 0000000..b977966 --- /dev/null +++ b/docker-registry/values.yaml @@ -0,0 +1,147 @@ +# Default values for docker-registry. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 + +updateStrategy: + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + +podAnnotations: {} +podLabels: {} + +image: + repository: registry + tag: 2.7.1 + pullPolicy: IfNotPresent +# imagePullSecrets: + # - name: docker +service: + name: registry + type: ClusterIP + # clusterIP: + port: 5000 + # nodePort: + # loadBalancerIP: + # loadBalancerSourceRanges: + annotations: {} + # foo.io/bar: "true" +ingress: + enabled: false + path: / + # Used to create an Ingress record. + hosts: + - chart-example.local + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + tls: + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi +persistence: + accessMode: 'ReadWriteOnce' + enabled: false + size: 10Gi + # storageClass: '-' + +# set the type of filesystem to use: filesystem, s3 +storage: filesystem + +# Set this to name of secret for tls certs +# tlsSecretName: registry.docker.example.com +secrets: + haSharedSecret: "" + htpasswd: "" +# Secrets for Azure +# azure: +# accountName: "" +# accountKey: "" +# container: "" +# Secrets for S3 access and secret keys +# s3: +# accessKey: "" +# secretKey: "" +# Secrets for Swift username and password +# swift: +# username: "" +# password: "" + +# Options for s3 storage type: +# s3: +# region: us-east-1 +# regionEndpoint: s3.us-east-1.amazonaws.com +# bucket: my-bucket +# encrypt: false +# secure: true + +# Options for swift storage type: +# swift: +# authurl: http://swift.example.com/ +# container: my-container + +configData: + version: 0.1 + log: + fields: + service: registry + storage: + cache: + blobdescriptor: inmemory + http: + addr: :5000 + headers: + X-Content-Type-Options: [nosniff] + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + +securityContext: + enabled: true + runAsUser: 1000 + fsGroup: 1000 + +priorityClassName: "" + +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 2 + +nodeSelector: {} + +affinity: {} + +tolerations: [] + +extraVolumeMounts: [] +## Additional volumeMounts to the registry container. +# - mountPath: /secret-data +# name: cloudfront-pem-secret +# readOnly: true + +extraVolumes: [] +## Additional volumes to the pod. +# - name: cloudfront-pem-secret +# secret: +# secretName: cloudfront-credentials +# items: +# - key: cloudfront.pem +# path: cloudfront.pem +# mode: 511 diff --git a/dovecot/.helmignore b/dovecot/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/dovecot/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/dovecot/Chart.yaml b/dovecot/Chart.yaml new file mode 100644 index 0000000..40672f1 --- /dev/null +++ b/dovecot/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: dovecot +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.2 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 2.3.8 diff --git a/dovecot/templates/NOTES.txt b/dovecot/templates/NOTES.txt new file mode 100644 index 0000000..b12d4b9 --- /dev/null +++ b/dovecot/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "dovecot.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "dovecot.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "dovecot.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "dovecot.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/dovecot/templates/_helpers.tpl b/dovecot/templates/_helpers.tpl new file mode 100644 index 0000000..c96751f --- /dev/null +++ b/dovecot/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "dovecot.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "dovecot.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "dovecot.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "dovecot.labels" -}} +helm.sh/chart: {{ include "dovecot.chart" . }} +{{ include "dovecot.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "dovecot.selectorLabels" -}} +app.kubernetes.io/name: {{ include "dovecot.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "dovecot.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "dovecot.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/dovecot/templates/configmap.yaml b/dovecot/templates/configmap.yaml new file mode 100644 index 0000000..35cfe18 --- /dev/null +++ b/dovecot/templates/configmap.yaml @@ -0,0 +1,21 @@ +{{- range $key, $val := .Values.dovecot.configmaps.dovecot }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: dovecot-{{ $key }} +data: +{{ $key | indent 2 }}: | +{{ $val | indent 4 }} +{{- end }} + +{{- range $key, $val := .Values.dovecot.configmaps.confd }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: dovecot-confd-{{ $key }} +data: +{{ $key | indent 2 }}: | +{{ $val | indent 4 }} +{{- end }} diff --git a/dovecot/templates/deployment.yaml b/dovecot/templates/deployment.yaml new file mode 100644 index 0000000..2429c29 --- /dev/null +++ b/dovecot/templates/deployment.yaml @@ -0,0 +1,106 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "dovecot.fullname" . }} + labels: + {{- include "dovecot.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "dovecot.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "dovecot.selectorLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "dovecot.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: dovecot + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.dovecot.image.repository }}:{{ .Values.dovecot.image.tag }}" + imagePullPolicy: {{ .Values.dovecot.image.pullPolicy }} + ports: + - name: lmtp + containerPort: 24 + protocol: TCP + - name: pop3 + containerPort: 1110 + protocol: TCP + - name: imap + containerPort: 1109 + protocol: TCP + - name: sieve + containerPort: 4190 + protocol: TCP + - name: imaps + containerPort: 10993 + protocol: TCP + - name: pop3s + containerPort: 10995 + protocol: TCP + - name: auth + containerPort: 12345 + protocol: TCP + volumeMounts: + - name: data + mountPath: /home/vmail + - mountPath: /tls + name: tls + {{- range $key, $val := .Values.dovecot.configmaps.dovecot }} + - name: dovecot-{{ $key }} + mountPath: "/etc/dovecot/{{ $key }}.conf" + subPath: {{ $key }} + {{- end }} + {{- range $key, $val := .Values.dovecot.configmaps.confd }} + - name: dovecot-confd-{{ $key }} + mountPath: "/etc/dovecot/conf.d/{{ $key }}.conf" + subPath: {{ $key }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: tls + secret: + defaultMode: 420 + secretName: {{ if .Values.tls.existingSecret }}{{ .Values.tls.existingSecret }}{{- end }} + {{- end }} + {{- range $key, $val := .Values.dovecot.configmaps.dovecot }} + - name: dovecot-{{ $key }} + configMap: + name: dovecot-{{ $key }} + {{- end }} + {{- range $key, $val := .Values.dovecot.configmaps.confd }} + - name: dovecot-confd-{{ $key }} + configMap: + name: dovecot-confd-{{ $key }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/dovecot/templates/persistent-volume-claim.yaml b/dovecot/templates/persistent-volume-claim.yaml new file mode 100644 index 0000000..901ec3e --- /dev/null +++ b/dovecot/templates/persistent-volume-claim.yaml @@ -0,0 +1,24 @@ +{{- if .Values.persistence.enabled }} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ default "mailboxes" .Values.persistence.volumeName }} + labels: + app: {{ template "dovecot.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} +{{- if .Values.persistence.storageClass }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- else }} + storageClassName: "" +{{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end -}} +{{- end }} diff --git a/dovecot/templates/service.yaml b/dovecot/templates/service.yaml new file mode 100644 index 0000000..287e600 --- /dev/null +++ b/dovecot/templates/service.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "dovecot.fullname" . }} + labels: + {{- include "dovecot.labels" . | nindent 4 }} +spec: +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP | quote }} +{{- end }} + type: {{ .Values.service.type }} + ports: + - port: 24 + targetPort: 24 + protocol: TCP + name: lmtp + - port: 1110 + targetPort: 1110 + protocol: TCP + name: pop3 + - port: 1109 + targetPort: 1109 + protocol: TCP + name: imap + - port: 4190 + targetPort: 4190 + protocol: TCP + name: sieve + - port: 10993 + targetPort: 10993 + protocol: TCP + name: imaps + - port: 10995 + targetPort: 10995 + protocol: TCP + name: pop3s + - port: 12345 + targetPort: 12345 + protocol: TCP + name: auth + selector: + {{- include "dovecot.selectorLabels" . | nindent 4 }} diff --git a/dovecot/templates/serviceaccount.yaml b/dovecot/templates/serviceaccount.yaml new file mode 100644 index 0000000..697fd5b --- /dev/null +++ b/dovecot/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "dovecot.serviceAccountName" . }} + labels: + {{- include "dovecot.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/dovecot/values.yaml b/dovecot/values.yaml new file mode 100644 index 0000000..75f3f34 --- /dev/null +++ b/dovecot/values.yaml @@ -0,0 +1,212 @@ +# Default values for dovecot. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +persistence: + enabled: true + existingClaim: mailboxes + +tls: + enabled: true + existingSecret: mail.example.com-secret + +dovecot: + image: + repository: registry.geekhome.org/dovecot + tag: 2.3.8 + pullPolicy: Always + configmaps: + dovecot: + dovecot: | + mail_max_userip_connections = 100 + + haproxy_trusted_networks = 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 + haproxy_timeout = 30s + dict { + #quota = mysql:/etc/dovecot/dovecot-dict-sql.conf.ext + #expire = sqlite:/etc/dovecot/dovecot-dict-sql.conf.ext + } + + # Most of the actual configuration gets included below. The filenames are + # first sorted by their ASCII value and parsed in that order. The 00-prefixes + # in filenames are intended to make it easier to understand the ordering. + !include conf.d/*.conf + + # A config file can also tried to be included without giving an error if + # it's not found: + !include_try local.conf + ldap: | + uris = ldaps://openldap.example.com + dn = uid=ldapbind,ou=services,dc=example,dc=com + dnpass = HARD_PASSWORD + auth_bind = yes + auth_bind_userdn = uid=%n,ou=users,dc=example,dc=com + tls = no + ldap_version = 3 + base = ou=users,dc=example,dc=com + deref = never + scope = subtree + user_filter = (&(objectClass=posixAccount)(mail=%u)) + user_attrs = cn=home=/home/vmail/%$ + pass_filter = (&(objectClass=posixAccount)(mail=%u)) + pass_attrs = uid=user,userPassword=password + confd: + auth-ldap: | + passdb { + driver = ldap + + # Path for LDAP configuration file, see example-config/dovecot-ldap.conf.ext + args = /etc/dovecot/ldap.conf + } + userdb { + driver = ldap + args = /etc/dovecot/ldap.conf + + } + 10-auth: | + auth_default_realm = example.com + auth_username_format = %Lu + auth_mechanisms = plain login + 10-mail: | + mail_location = maildir:%h + namespace inbox { + inbox = yes + } + mail_uid = vmail + mail_gid = vmail + first_valid_uid = 1000 + last_valid_uid = 1000 + first_valid_gid = 1000 + last_valid_gid = 1000 + protocol !indexer-worker { + } + mbox_write_locks = fcntl + 10-master: | + service imap-login { + inet_listener imap { + #port = 143 + } + inet_listener imaps { + #port = 993 + #ssl = yes + } + inet_listener imap_haproxy { + port = 1109 + haproxy = yes + } + inet_listener imaps_haproxy { + port = 10993 + ssl = yes + haproxy = yes + } + } + + service pop3-login { + inet_listener pop3 { + #port = 110 + } + inet_listener pop3s { + #port = 995 + #ssl = yes + } + } + + service lmtp { + inet_listener lmtp { + port = 24 + } + unix_listener /var/spool/postfix/private/dovecot-lmtp { + mode = 0600 + group = postfix + user = postfix + } + user = vmail + } + + service imap { + } + + service pop3 { + } + + service auth { + inet_listener { + port = 12345 + } + unix_listener auth-userdb { + mode = 0660 + user = vmail + #group = + } + + # Postfix smtp-auth + unix_listener /var/spool/postfix/private/auth { + mode = 0660 + user = postfix + group = postfix + } + } + + service auth-worker { + } + + service dict { + unix_listener dict { + } + } + 10-ssl: | + ssl = required + ssl_cert = values.yaml +vim values.yaml # Edit to enable persistent storage +helm install gitea k8s-land/gitea -f values.yaml +``` + +### Database Configuration + +By default, we will launch a Mariadb database: + +```yaml +mariadb: + enabled: true +``` + +To use an external database, disable the in-pod database and fill in the "externalDB" values: + +```yaml +mariadb: + enabled: false + +#Connect to an external database + externalDB: + dbUser: "postgres" + dbPassword: "" + dbHost: "db-service-name.namespace.svc.cluster.local" # or some external host + dbPort: "5432" + dbDatabase: "gitea" +``` + +## Persistent Data + +By default, persistent data is not enabled and thus you'll have to enable it from within the `values.yaml`. + +Unless otherwise set to true, data will be deleted when the Pod is restarted. + +To prevent data loss, we will enable persistent data. + +First, enable persistency: + +```yaml +persistence: + enabled: true +``` + + +If you wish for helm **NOT** to replace data when re-deploying (updating the chart), add the `resource-policy` annotation: + +```yaml +persistence: + annotations: + "helm.sh/resource-policy": keep +``` + +To use a previously created PVC / volume, use the following: + +```yaml + existingGiteaClaim: gitea-gitea +``` + +## Ingress And External Host/Ports + +Gitea requires ports to be exposed for accessibility. The recommended way is using **ingress**, however, you can supply `LoadBalancer` to your values alternatively. + +By default, we expose via an ingress: + +To expose via an ingress: + +```yaml +ingress: + enabled: true +``` + +To expose the web application this chart will generate an ingress using the ingress controller of choice if specified. If an ingress is enabled services.http.externalHost must be specified. To expose SSH services it relies on either a LoadBalancer or NodePort. + +## Upgrading + +When upgrading, make sure you have the following enabled: + + - Persistency for both mariadb + Gitea + - Using `existingGiteaClaim` + - Due to using the [bitnami/mariadb](https://github.com/helm/charts/tree/master/stable/mariadb) chart, make sure to HARDCODE your passwords within `values.yaml`. Or else you'll be unable to update mariadb + +## Configuration + +Refer to [values.yaml](values.yaml) for the full run-down on defaults. + +The following table lists the configurable parameters of this chart and their default values. + +| Parameter | Description | Default | +|---------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------| +| `images.gitea` | `gitea` image | `gitea/gitea:1.9.3` | +| `images.memcached` | `memcached` image | `memcached:1.5.19-alpine` | +| `images.pullPolicy` | Image pull policy | `IfNotPresent` | +| `images.pullSecrets` | Specify an array of pull secrets | `[]` | +| `memcached.maxItemMemory` | Max item memory | `64` | +| `memcached.verbosity` | Verbosity | `v` | +| `memcached.extendedOptions` | Extended options for memcached | `modern` | +| `ingress.enabled` | Switch to create ingress for this chart deployment | `true` | +| `ingress.hostname ` | Hostname to be used for the ingress | `gitea.local` | +| `ingress.certManager` | Asks if we want to use cert-manager or not (let's encrypt, etc.) | `true` | +| `ingress.annotations` | Annotations used by the ingress | `[]` | +| `ingress.hosts ` | Additional hosts to be used by the ingress | `[]` | +| `ingress.tls ` | TLS secret keys to be used with Gitea | `[]` | +| `service.http.serviceType` | type of kubernetes services used for http i.e. ClusterIP, NodePort or LoadBalancer | `ClusterIP` | +| `service.http.port` | http port for web traffic | `3000` | +| `service.http.NodePort` | Manual NodePort for web traffic | `nil` | +| `service.http.externalPort` | Port exposed on the internet by a load balancer or firewall that redirects to the ingress or NodePort | `8280` | +| `service.http.externalHost` | IP or DNS name exposed on the internet by a load balancer or firewall that redirects to the ingress or Node for http traffic | `gitea.local` | +| `service.ssh.serviceType` | type of kubernetes services used for ssh i.e. ClusterIP, NodePort or LoadBalancer | `ClusterIP` | +| `service.ssh.port` | http port for web traffic | `22` | +| `service.ssh.NodePort` | Manual NodePort for ssh traffic | `nil` | +| `service.ssh.externalPort` | Port exposed on the internet by a load balancer or firewall that redirects to the ingress or NodePort | `nil` | +| `service.ssh.externalHost` | IP or DNS name exposed on the internet by a load balancer or firewall that redirects to the ingress or Node for http traffic | `gitea.local` | +| `resources.gitea.requests.memory` | gitea container memory request | `500Mi` | +| `resources.gitea.requests.cpu` | gitea container request cpu | `1000m` | +| `resources.gitea.limits.memory` | gitea container memory limits | `2Gi` | +| `resources.gitea.limits.cpu` | gitea container CPU/Memory resource requests/limits | `1` | +| `resources.memcached.requests.memory` | memcached container memory request | `64Mi` | +| `resources.memcached.requests.cpu` | memcached container request cpu | `50m` | +| `persistence.enabled` | Create PVCs to store gitea data | `false` | +| `persistence.existingGiteaClaim` | Already existing PVC that should be used for gitea data. | `nil` | +| `persistence.giteaSize` | Size of gitea pvc to create | `10Gi` | +| `persistence.annotations` | Annotations to set on created PVCs | `nil` | +| `persistence.storageClass` | NStorageClass to use for dynamic provision if not 'default' | `nil` | +| `mariadb.enabled` | Enable or diable mariadb | `true` | +| `mariadb.replication.enabled` | Enable or diable replication | `false` | +| `mariadb.db.name` | Default name | `gitea` | +| `mariadb.db.user` | Default user | `gitea` | +| `mariadb.persistence.enabled` | Enable or diable persistence | `true` | +| `mariadb.persistence.accessMode` | What access mode to use | `ReadWriteOnce` | +| `mariadb.persistence.size` | What size of database to use | `8Gi` | +| `externalDB.dbUser` | external db user | ` unset` | +| `externalDB.dbPassword` | external db password | ` unset` | +| `externalDB.dbHost` | external db host | ` unset` | +| `externalDB.dbPort` | external db port | ` unset` | +| `externalDB.dbDatabase` | external db database name | ` unset` | +| `config.disableInstaller` | Disable the installer | `false` | +| `config.offlineMode` | Sets Gitea's Offline Mode. Values are `true` or `false`. | `false` | +| `config.requireSignin` | Require Gitea user to be signed in to see any pages. Values are `true` or `false`. | `false` | +| `config.disableRegistration` | Disable Gitea's user registration. Values are `true` or `false`. | `false` | +| `config.openidSignin` | Allow login with OpenID. Values are `true` or `false`. | `true` | +| `nodeSelector` | Node to be selected | `{}` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `deploymentAnnotations` | Deployment annotations to be used | `{}` | +| `podAnnotations` | Pod deployment annotations to be used | `{}` | diff --git a/gitea/charts/mariadb/.helmignore b/gitea/charts/mariadb/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/gitea/charts/mariadb/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/gitea/charts/mariadb/Chart.lock b/gitea/charts/mariadb/Chart.lock new file mode 100644 index 0000000..85f9a8d --- /dev/null +++ b/gitea/charts/mariadb/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.1.2 +digest: sha256:e96477f37f86a4595dce9057f8d04f903f761f340440986129e53cc55f3d63ee +generated: "2020-12-11T12:21:32.262474+01:00" diff --git a/gitea/charts/mariadb/Chart.yaml b/gitea/charts/mariadb/Chart.yaml new file mode 100644 index 0000000..2d8ea6d --- /dev/null +++ b/gitea/charts/mariadb/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 10.5.8 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Fast, reliable, scalable, and easy to use open-source relational database + system. MariaDB Server is intended for mission-critical, heavy-load production systems + as well as for embedding into mass-deployed software. Highly available MariaDB cluster. +home: https://github.com/bitnami/charts/tree/master/bitnami/mariadb +icon: https://bitnami.com/assets/stacks/mariadb/img/mariadb-stack-220x234.png +keywords: +- mariadb +- mysql +- database +- sql +- prometheus +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mariadb +sources: +- https://github.com/bitnami/bitnami-docker-mariadb +- https://github.com/prometheus/mysqld_exporter +- https://mariadb.org +version: 9.1.4 diff --git a/gitea/charts/mariadb/README.md b/gitea/charts/mariadb/README.md new file mode 100644 index 0000000..8b527f3 --- /dev/null +++ b/gitea/charts/mariadb/README.md @@ -0,0 +1,465 @@ +# MariaDB + +[MariaDB](https://mariadb.org) is one of the most popular database servers in the world. It’s made by the original developers of MySQL and guaranteed to stay open source. Notable users include Wikipedia, Facebook and Google. + +MariaDB is developed as open source software and as a relational database it provides an SQL interface for accessing data. The latest versions of MariaDB also include GIS and JSON features. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/mariadb +``` + +## Introduction + +This chart bootstraps a [MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) replication cluster deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/mariadb +``` + +The command deploys MariaDB on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the MariaDB chart and their default values. + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `nameOverride` | String to partially override mariadb.fullname | `nil` | +| `fullnameOverride` | String to fully override mariadb.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `nil` | +| `commonAnnotations` | Annotations to add to all deployed objects | `[]` | +| `schedulerName` | Name of the scheduler (other than default) to dispatch pods | `nil` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `nil` | + +### MariaDB common parameters + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `image.registry` | MariaDB image registry | `docker.io` | +| `image.repository` | MariaDB image name | `bitnami/mariadb` | +| `image.tag` | MariaDB image tag | `{TAG_NAME}` | +| `image.pullPolicy` | MariaDB image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `architecture` | MariaDB architecture (`standalone` or `replication`) | `standalone` | +| `auth.rootPassword` | Password for the `root` user. Ignored if existing secret is provided. | _random 10 character alphanumeric string_ | +| `auth.database` | Name for a custom database to create | `my_database` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the new user. Ignored if existing secret is provided | _random 10 character long alphanumeric string_ | +| `auth.replicationUser` | MariaDB replication user | `nil` | +| `auth.replicationPassword` | MariaDB replication user password. Ignored if existing secret is provided | _random 10 character long alphanumeric string_ | +| `auth.forcePassword` | Force users to specify required passwords | `false` | +| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` | +| `auth.customPasswordFiles` | Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` | `{}` | +| `auth.existingSecret` | Use existing secret for password details (`auth.rootPassword`, `auth.password`, `auth.replicationPassword` will be ignored and picked up from this secret). The secret has to contain the keys `mariadb-root-password`, `mariadb-replication-password` and `mariadb-password` | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `nil` | + +### MariaDB Primary parameters + +| Parameter | Description | Default | +|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `primary.command` | Override default container command on MariaDB Primary container(s) (useful when using custom images) | `nil` | +| `primary.args` | Override default container args on MariaDB Primary container(s) (useful when using custom images) | `nil` | +| `primary.configuration` | MariaDB Primary configuration to be injected as ConfigMap | Check `values.yaml` file | +| `primary.existingConfigmap` | Name of existing ConfigMap with MariaDB Primary configuration | `nil` | +| `primary.updateStrategy` | Update strategy type for the MariaDB primary statefulset | `RollingUpdate` | +| `primary.podAnnotations` | Additional pod annotations for MariaDB primary pods | `{}` (evaluated as a template) | +| `primary.podLabels` | Additional pod labels for MariaDB primary pods | `{}` (evaluated as a template) | +| `primary.podAffinityPreset` | MariaDB primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | MariaDB primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | MariaDB primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | MariaDB primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | MariaDB primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for MariaDB primary pods assignment | `{}` (evaluated as a template) | +| `primary.nodeSelector` | Node labels for MariaDB primary pods assignment | `{}` (evaluated as a template) | +| `primary.tolerations` | Tolerations for MariaDB primary pods assignment | `[]` (evaluated as a template) | +| `primary.podSecurityContext.enabled` | Enable security context for MariaDB primary pods | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `primary.containerSecurityContext.enabled` | MariaDB primary container securityContext | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the MariaDB primary container | `1001` | +| `primary.livenessProbe` | Liveness probe configuration for MariaDB primary containers | Check `values.yaml` file | +| `primary.readinessProbe` | Readiness probe configuration for MariaDB primary containers | Check `values.yaml` file | +| `primary.customLivenessProbe` | Override default liveness probe for MariaDB primary containers | `nil` | +| `primary.customReadinessProbe` | Override default readiness probe for MariaDB primary containers | `nil` | +| `primary.resources.limits` | The resources limits for MariaDB primary containers | `{}` | +| `primary.resources.requests` | The requested resources for MariaDB primary containers | `{}` | +| `primary.extraEnvVars` | Extra environment variables to be set on MariaDB primary containers | `{}` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MariaDB primary containers | `nil` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MariaDB primary containers | `nil` | +| `primary.extraFlags` | MariaDB primary additional command line flags | `nil` | +| `primary.persistence.enabled` | Enable persistence on MariaDB primary replicas using a `PersistentVolumeClaim` | `true` | +| `primary.persistence.existingClaim` | Name of an existing `PersistentVolumeClaim` for MariaDB primary replicas | `nil` | +| `primary.persistence.annotations` | MariaDB primary persistent volume claim annotations | `{}` (evaluated as a template) | +| `primary.persistence.storageClass` | MariaDB primary persistent volume storage Class | `nil` | +| `primary.persistence.accessModes` | MariaDB primary persistent volume access Modes | `[ReadWriteOnce]` | +| `primary.persistence.size` | MariaDB primary persistent volume size | `8Gi` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` (evaluated as a template) | +| `primary.initContainers` | Add additional init containers for the MariaDB Primary pod(s) | `{}` (evaluated as a template) | +| `primary.sidecars` | Add additional sidecar containers for the MariaDB Primary pod(s) | `{}` (evaluated as a template) | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MariaDB Primary container(s) | `{}` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes to the MariaDB Primary pod(s) | `{}` | +| `primary.service.type` | MariaDB Primary K8s service type | `ClusterIP` | +| `primary.service.clusterIP` | MariaDB Primary K8s service clusterIP IP | `nil` | +| `primary.service.port` | MariaDB Primary K8s service port | `3306` | +| `primary.service.nodePort` | MariaDB Primary K8s service node port | `nil` | +| `primary.service.loadBalancerIP` | MariaDB Primary loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `primary.service.loadBalancerSourceRanges` | Address that are allowed when MariaDB Primary service is LoadBalancer | `[]` | +| `primary.pdb.create` | Enable/disable a Pod Disruption Budget creation for MariaDB primary pods | `false` | +| `primary.pdb.minAvailable` | Minimum number/percentage of MariaDB primary pods that should remain scheduled | `1` | +| `primary.pdb.maxUnavailable` | Maximum number/percentage of MariaDB primary pods that may be made unavailable | `nil` | + +### MariaDB Secondary parameters + +| Parameter | Description | Default | +|------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `secondary.command` | Override default container command on MariaDB Secondary container(s) (useful when using custom images) | `nil` | +| `secondary.args` | Override default container args on MariaDB Secondary container(s) (useful when using custom images) | `nil` | +| `secondary.configuration` | MariaDB Secondary configuration to be injected as ConfigMap | Check `values.yaml` file | +| `secondary.existingConfigmap` | Name of existing ConfigMap with MariaDB Secondary configuration | `nil` | +| `secondary.replicaCount` | Number of MariaDB secondary replicas | `1` | +| `secondary.updateStrategy` | Update strategy type for the MariaDB secondary statefulset | `RollingUpdate` | +| `secondary.podAnnotations` | Additional pod annotations for MariaDB secondary pods | `{}` (evaluated as a template) | +| `secondary.podLabels` | Additional pod labels for MariaDB secondary pods | `{}` (evaluated as a template) | +| `secondary.podAffinityPreset` | MariaDB secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.podAntiAffinityPreset` | MariaDB secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `secondary.nodeAffinityPreset.type` | MariaDB secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.nodeAffinityPreset.key` | MariaDB secondary node label key to match Ignored if `secondary.affinity` is set. | `""` | +| `secondary.nodeAffinityPreset.values` | MariaDB secondary node label values to match. Ignored if `secondary.affinity` is set. | `[]` | +| `secondary.affinity` | Affinity for MariaDB secondary pods assignment | `{}` (evaluated as a template) | +| `secondary.nodeSelector` | Node labels for MariaDB secondary pods assignment | `{}` (evaluated as a template) | +| `secondary.tolerations` | Tolerations for MariaDB secondary pods assignment | `[]` (evaluated as a template) | +| `secondary.podSecurityContext.enabled` | Enable security context for MariaDB secondary pods | `true` | +| `secondary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `secondary.containerSecurityContext.enabled` | MariaDB secondary container securityContext | `true` | +| `secondary.containerSecurityContext.runAsUser` | User ID for the MariaDB secondary container | `1001` | +| `secondary.livenessProbe` | Liveness probe configuration for MariaDB secondary containers | Check `values.yaml` file | +| `secondary.readinessProbe` | Readiness probe configuration for MariaDB secondary containers | Check `values.yaml` file | +| `secondary.customLivenessProbe` | Override default liveness probe for MariaDB secondary containers | `nil` | +| `secondary.customReadinessProbe` | Override default readiness probe for MariaDB secondary containers | `nil` | +| `secondary.resources.limits` | The resources limits for MariaDB secondary containers | `{}` | +| `secondary.resources.requests` | The requested resources for MariaDB secondary containers | `{}` | +| `secondary.extraEnvVars` | Extra environment variables to be set on MariaDB secondary containers | `{}` | +| `secondary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MariaDB secondary containers | `nil` | +| `secondary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MariaDB secondary containers | `nil` | +| `secondary.extraFlags` | MariaDB secondary additional command line flags | `nil` | +| `secondary.extraFlags` | MariaDB secondary additional command line flags | `nil` | +| `secondary.persistence.enabled` | Enable persistence on MariaDB secondary replicas using a `PersistentVolumeClaim` | `true` | +| `secondary.persistence.annotations` | MariaDB secondary persistent volume claim annotations | `{}` (evaluated as a template) | +| `secondary.persistence.storageClass` | MariaDB secondary persistent volume storage Class | `nil` | +| `secondary.persistence.accessModes` | MariaDB secondary persistent volume access Modes | `[ReadWriteOnce]` | +| `secondary.persistence.size` | MariaDB secondary persistent volume size | `8Gi` | +| `secondary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` (evaluated as a template) | +| `secondary.initContainers` | Add additional init containers for the MariaDB secondary pod(s) | `{}` (evaluated as a template) | +| `secondary.sidecars` | Add additional sidecar containers for the MariaDB secondary pod(s) | `{}` (evaluated as a template) | +| `secondary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MariaDB secondary container(s) | `{}` | +| `secondary.extraVolumes` | Optionally specify extra list of additional volumes to the MariaDB secondary pod(s) | `{}` | +| `secondary.service.type` | MariaDB secondary K8s service type | `ClusterIP` | +| `secondary.service.clusterIP` | MariaDB secondary K8s service clusterIP IP | `nil` | +| `secondary.service.port` | MariaDB secondary K8s service port | `3306` | +| `secondary.service.nodePort` | MariaDB secondary K8s service node port | `nil` | +| `secondary.service.loadBalancerIP` | MariaDB secondary loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `secondary.service.loadBalancerSourceRanges` | Address that are allowed when MariaDB secondary service is LoadBalancer | `[]` | +| `secondary.pdb.create` | Enable/disable a Pod Disruption Budget creation for MariaDB secondary pods | `false` | +| `secondary.pdb.minAvailable` | Minimum number/percentage of MariaDB secondary pods that should remain scheduled | `1` | +| `secondary.pdb.maxUnavailable` | Maximum number/percentage of MariaDB secondary pods that may be made unavailable | `nil` | + +### RBAC parameters + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `serviceAccount.create` | Enable the creation of a ServiceAccount for MariaDB pods | `true` | +| `serviceAccount.name` | Name of the created ServiceAccount | Generated using the `mariadb.fullname` template | +| `serviceAccount.annotations` | Annotations for MariaDB Service Account | `{}` (evaluated as a template) | +| `rbac.create` | Weather to create & use RBAC resources or not | `false` | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + +### Metrics parameters + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Exporter image registry | `docker.io` | +| `metrics.image.repository` | Exporter image name | `bitnami/mysqld-exporter` | +| `metrics.image.tag` | Exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.extraArgs.primary` | Extra args to be passed to mysqld_exporter on Primary pods | `[]` | +| `metrics.extraArgs.secondary` | Extra args to be passed to mysqld_exporter on Secondary pods | `[]` | +| `metrics.resources.limits` | The resources limits for MariaDB prometheus exporter containers | `{}` | +| `metrics.resources.requests` | The requested resources for MariaDB prometheus exporter containers | `{}` | +| `metrics.livenessProbe` | Liveness probe configuration for MariaDB prometheus exporter containers | Check `values.yaml` file | +| `metrics.readinessProbe` | Readiness probe configuration for MariaDB prometheus exporter containers | Check `values.yaml` file | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `nil` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `nil` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the Installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.release` | Used to pass Labels release that sometimes should be custom for Prometheus Operator | `nil` | + +The above parameters map to the env variables defined in [bitnami/mariadb](http://github.com/bitnami/bitnami-docker-mariadb). For more information please refer to the [bitnami/mariadb](http://github.com/bitnami/bitnami-docker-mariadb) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.rootPassword=secretpassword,auth.database=app_database \ + bitnami/mariadb +``` + +The above command sets the MariaDB `root` account password to `secretpassword`. Additionally it creates a database named `my_database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/mariadb +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Force users to specify a password and mount secrets as volumes instead of using environment variables: + +```diff +- auth.forcePassword: false ++ auth.forcePassword: true +- auth.usePasswordFiles: false ++ auth.usePasswordFiles: true +``` + +- Use "replication" architecture: + +```diff +- architecture: standalone ++ architecture: replication +``` + +- Desired number of secondary replicas: + +```diff +- secondary.replicaCount: 1 ++ secondary.replicaCount: 2 +``` + +- Start a side-car prometheus exporter: + +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Change MariaDB version + +To modify the MariaDB version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/mariadb/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Initialize a fresh instance + +The [Bitnami MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to this option, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the previous option. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +Take into account those scripts are treated differently depending on the extension. While the `.sh` scripts are executed in all the nodes; the `.sql` and `.sql.gz` scripts are only executed in the primary nodes. The reason behind this differentiation is that the `.sh` scripts allow adding conditions to determine what is the node running the script, while these conditions can't be set using `.sql` nor `sql.gz` files. This way it is possible to cover different use cases depending on their needs. + +If using a `.sh` script you want to do a "one-time" action like creating a database, you need to add a condition in your `.sh` script to be executed only in one of the nodes, such as + +```yaml +initdbScripts: + my_init_script.sh: | + #!/bin/sh + if [[ $(hostname) == *primary* ]]; then + echo "Primary node" + mysql -P 3306 -uroot -prandompassword -e "create database new_database"; + else + echo "No primary node" + fi +``` + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as MariaDB, you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Persistence + +The [Bitnami MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) image stores the MariaDB data and configurations at the `/bitnami/mariadb` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning, by default. An existing PersistentVolumeClaim can be defined. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.rootPassword` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Administrator credentials' section. Please note down the password and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/mariadb --set auth.rootPassword=[ROOT_PASSWORD] +``` + +| Note: you need to substitute the placeholder _[ROOT_PASSWORD]_ with the value obtained in the installation notes. + +### To 9.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 8.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The terms *master* and *slave* have been replaced by the terms *primary* and *secondary*. Therefore, parameters prefixed with `master` or `slave` are now prefixed with `primary` or `secondary`, respectively. + - `securityContext.*` is deprecated in favor of `primary.podSecurityContext`, `primary.containerSecurityContext`, `secondary.podSecurityContext`, and `secondary.containerSecurityContext`. + - Credentials parameter are reorganized under the `auth` parameter. + - `replication.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`. +- The default MariaDB version was updated from 10.3 to 10.5. According to the official documentation, upgrading from 10.3 should be painless. However, there are some things that have changed which could affect an upgrade: + - [Incompatible changes upgrading from MariaDB 10.3 to MariaDB 10.4](https://mariadb.com/kb/en/upgrading-from-mariadb-103-to-mariadb-104/#incompatible-changes-between-103-and-104). + - [Incompatible changes upgrading from MariaDB 10.4 to MariaDB 10.5](https://mariadb.com/kb/en/upgrading-from-mariadb-104-to-mariadb-105/#incompatible-changes-between-104-and-105). +- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). +- This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MariaDB chart, and migrate the data from your previous release. You have 2 alternatives to do so: + +- Create a backup of the database, and restore it on the new release using tools such as [mysqldump](https://mariadb.com/kb/en/mysqldump/). +- Reuse the PVC used to hold the master data on your previous release. To do so, use the `primary.persistence.existingClaim` parameter. The following example assumes that the release name is `mariadb`: + +```bash +$ helm install mariadb bitnami/mariadb --set auth.rootPassword=[ROOT_PASSWORD] --set primary.persistence.existingClaim=[EXISTING_PVC] +``` + +| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[ROOT_PASSWORD]_ with the root password used in your previous release. + +### To 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17308 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +### To 6.0.0 + +MariaDB version was updated from 10.1 to 10.3, there are no changes in the chart itself. According to the official documentation, upgrading from 10.1 should be painless. However, there are some things that have changed which could affect an upgrade: + +- [Incompatible changes upgrading from MariaDB 10.1 to MariaDB 10.2](https://mariadb.com/kb/en/library/upgrading-from-mariadb-101-to-mariadb-102//#incompatible-changes-between-101-and-102) +- [Incompatible changes upgrading from MariaDB 10.2 to MariaDB 10.3](https://mariadb.com/kb/en/library/upgrading-from-mariadb-102-to-mariadb-103/#incompatible-changes-between-102-and-103) + +### To 5.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 5.0.0. The following example assumes that the release name is mariadb: + +```console +$ kubectl delete statefulset opencart-mariadb --cascade=false +``` diff --git a/gitea/charts/mariadb/charts/common/.helmignore b/gitea/charts/mariadb/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/gitea/charts/mariadb/charts/common/Chart.yaml b/gitea/charts/mariadb/charts/common/Chart.yaml new file mode 100644 index 0000000..1bda8e2 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.1.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.1.2 diff --git a/gitea/charts/mariadb/charts/common/README.md b/gitea/charts/mariadb/charts/common/README.md new file mode 100644 index 0000000..a688953 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/README.md @@ -0,0 +1,309 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Labels + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/gitea/charts/mariadb/charts/common/templates/_affinities.tpl b/gitea/charts/mariadb/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..1ff26d5 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_capabilities.tpl b/gitea/charts/mariadb/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..143bef2 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_capabilities.tpl @@ -0,0 +1,33 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_errors.tpl b/gitea/charts/mariadb/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..d6d3ec6 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_errors.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: you must provide your current passwords when upgrade the release%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_images.tpl b/gitea/charts/mariadb/charts/common/templates/_images.tpl new file mode 100644 index 0000000..aafde9f --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_labels.tpl b/gitea/charts/mariadb/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_names.tpl b/gitea/charts/mariadb/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_secrets.tpl b/gitea/charts/mariadb/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..ebfb5d4 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_secrets.tpl @@ -0,0 +1,57 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- $name = .name -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_storage.tpl b/gitea/charts/mariadb/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_tplvalues.tpl b/gitea/charts/mariadb/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_utils.tpl b/gitea/charts/mariadb/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..74774a3 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_utils.tpl @@ -0,0 +1,45 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/_warnings.tpl b/gitea/charts/mariadb/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/validations/_cassandra.tpl b/gitea/charts/mariadb/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..7a274a0 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/validations/_mariadb.tpl b/gitea/charts/mariadb/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..3bf669d --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/validations/_mongodb.tpl b/gitea/charts/mariadb/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..7e0c1cb --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/validations/_postgresql.tpl b/gitea/charts/mariadb/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..f25e0ff --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/validations/_redis.tpl b/gitea/charts/mariadb/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..2ccc04d --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/templates/validations/_validations.tpl b/gitea/charts/mariadb/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..d4cf32c --- /dev/null +++ b/gitea/charts/mariadb/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,44 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s=$%s' to the command.%s" .valueKey .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/charts/common/values.yaml b/gitea/charts/mariadb/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/gitea/charts/mariadb/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/gitea/charts/mariadb/ci/values-production-with-rbac-and-metrics.yaml b/gitea/charts/mariadb/ci/values-production-with-rbac-and-metrics.yaml new file mode 100644 index 0000000..2f1a8d9 --- /dev/null +++ b/gitea/charts/mariadb/ci/values-production-with-rbac-and-metrics.yaml @@ -0,0 +1,33 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct +architecture: replication +auth: + usePasswordFiles: true + +primary: + extraEnvVars: + - name: TEST + value: "3" + extraEnvVarsSecret: example-secret + extraEnvVarsCM: example-cm + podDisruptionBudget: + create: true + +secondary: + replicaCount: 2 + extraEnvVars: + - name: TEST + value: "2" + extraEnvVarsSecret: example-secret-2 + extraEnvVarsCM: example-cm-2 + podDisruptionBudget: + create: true + +serviceAccount: + create: true + name: mariadb-service-account +rbac: + create: true + +metrics: + enabled: true diff --git a/gitea/charts/mariadb/templates/NOTES.txt b/gitea/charts/mariadb/templates/NOTES.txt new file mode 100644 index 0000000..efb4193 --- /dev/null +++ b/gitea/charts/mariadb/templates/NOTES.txt @@ -0,0 +1,50 @@ + +Please be patient while the chart is being deployed + +Tip: + + Watch the deployment status using the command: kubectl get pods -w --namespace {{ .Release.Namespace }} -l release={{ .Release.Name }} + +Services: + + echo Primary: {{ include "mariadb.primary.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.primary.service.port }} +{{- if eq .Values.architecture "replication" }} + echo Secondary: {{ include "mariadb.secondary.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.secondary.service.port }} +{{- end }} + +Administrator credentials: + + Username: root + Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.secretName" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) + +To connect to your database: + + 1. Run a pod that you can use as a client: + + kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --image {{ template "mariadb.image" . }} --namespace {{ .Release.Namespace }} --command -- bash + + 2. To connect to primary service (read/write): + + mysql -h {{ include "mariadb.primary.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -uroot -p {{ .Values.auth.database }} + +{{- if eq .Values.architecture "replication" }} + + 3. To connect to secondary service (read-only): + + mysql -h {{ include "mariadb.secondary.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -uroot -p {{ .Values.auth.database }} +{{- end }} + +To upgrade this helm chart: + + 1. Obtain the password as described on the 'Administrator credentials' section and set the 'auth.rootPassword' parameter as shown below: + + ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.secretName" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) + helm upgrade {{ .Release.Name }} bitnami/mariadb --set auth.rootPassword=$ROOT_PASSWORD + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "mariadb.validateValues" . }} +{{- if not .Values.auth.customPasswordFiles -}} + {{- $passwordValidationErrors := include "common.validations.values.mariadb.passwords" (dict "secret" (include "common.names.fullname" .) "context" $) -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $passwordValidationErrors) "context" $) -}} +{{- end }} diff --git a/gitea/charts/mariadb/templates/_helpers.tpl b/gitea/charts/mariadb/templates/_helpers.tpl new file mode 100644 index 0000000..4a8bf7f --- /dev/null +++ b/gitea/charts/mariadb/templates/_helpers.tpl @@ -0,0 +1,150 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "mariadb.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} +{{- printf "%s-%s" (include "common.names.fullname" .) "primary" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{- define "mariadb.secondary.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "secondary" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper MariaDB image name +*/}} +{{- define "mariadb.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper metrics image name +*/}} +{{- define "mariadb.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mariadb.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mariadb.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{ template "mariadb.initdbScriptsCM" . }} +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "mariadb.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" .Values.initdbScriptsConfigMap -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "mariadb.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "mariadb.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MariaDB Primary configuration +*/}} +{{- define "mariadb.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mariadb.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MariaDB Secondary +*/}} +{{- define "mariadb.primary.createConfigmap" -}} +{{- if and .Values.primary.configuration (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MariaDB Primary configuration +*/}} +{{- define "mariadb.secondary.configmapName" -}} +{{- if .Values.secondary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.secondary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mariadb.secondary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MariaDB Secondary +*/}} +{{- define "mariadb.secondary.createConfigmap" -}} +{{- if and (eq .Values.architecture "replication") .Values.secondary.configuration (not .Values.secondary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with MariaDB credentials +*/}} +{{- define "mariadb.secretName" -}} + {{- if .Values.auth.existingSecret -}} + {{- printf "%s" .Values.auth.existingSecret -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for MariaDB +*/}} +{{- define "mariadb.createSecret" -}} +{{- if and (not .Values.auth.existingSecret) (not .Values.auth.customPasswordFiles) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mariadb.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "mariadb.validateValues.architecture" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of MariaDB - must provide a valid architecture */}} +{{- define "mariadb.validateValues.architecture" -}} +{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replication") -}} +mariadb: architecture + Invalid architecture selected. Valid values are "standalone" and + "replication". Please set a valid architecture (--set architecture="xxxx") +{{- end -}} +{{- end -}} diff --git a/gitea/charts/mariadb/templates/primary/configmap.yaml b/gitea/charts/mariadb/templates/primary/configmap.yaml new file mode 100644 index 0000000..8ee5f03 --- /dev/null +++ b/gitea/charts/mariadb/templates/primary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mariadb.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mariadb.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- +{{ .Values.primary.configuration | indent 4 }} +{{- end -}} diff --git a/gitea/charts/mariadb/templates/primary/initialization-configmap.yaml b/gitea/charts/mariadb/templates/primary/initialization-configmap.yaml new file mode 100644 index 0000000..826b597 --- /dev/null +++ b/gitea/charts/mariadb/templates/primary/initialization-configmap.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "mariadb.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{ end }} diff --git a/gitea/charts/mariadb/templates/primary/pdb.yaml b/gitea/charts/mariadb/templates/primary/pdb.yaml new file mode 100644 index 0000000..4ca1bf8 --- /dev/null +++ b/gitea/charts/mariadb/templates/primary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if .Values.primary.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "mariadb.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.primary.pdb.minAvailable }} + minAvailable: {{ .Values.primary.pdb.minAvailable }} + {{- end }} + {{- if .Values.primary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.primary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/gitea/charts/mariadb/templates/primary/statefulset.yaml b/gitea/charts/mariadb/templates/primary/statefulset.yaml new file mode 100644 index 0000000..61739c7 --- /dev/null +++ b/gitea/charts/mariadb/templates/primary/statefulset.yaml @@ -0,0 +1,327 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mariadb.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + serviceName: {{ include "mariadb.primary.fullname" . }} + updateStrategy: + type: {{ .Values.primary.updateStrategy }} + {{- if (eq "Recreate" .Values.primary.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.primary.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.primary.rollingUpdatePartition }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mariadb.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "mariadb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mariadb.serviceAccountName" . }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.primary.initContainers (and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled) }} + initContainers: + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mariadb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /bitnami/mariadb + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mariadb + {{- end }} + {{- end }} + containers: + - name: mariadb + image: {{ include "mariadb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MARIADB_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MARIADB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-root-password + {{- end }} + {{- if not (empty .Values.auth.username) }} + - name: MARIADB_USER + value: {{ .Values.auth.username | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MARIADB_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-password" .Values.auth.customPasswordFiles.user }} + {{- else }} + - name: MARIADB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-password + {{- end }} + {{- end }} + - name: MARIADB_DATABASE + value: {{ .Values.auth.database | quote }} + {{- if eq .Values.architecture "replication" }} + - name: MARIADB_REPLICATION_MODE + value: "master" + - name: MARIADB_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MARIADB_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MARIADB_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraFlags }} + - name: MARIADB_EXTRA_FLAGS + value: "{{ .Values.primary.extraFlags }}" + {{- end }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.primary.livenessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MARIADB_ROOT_PASSWORD:-}" + if [[ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MARIADB_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.primary.readinessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MARIADB_ROOT_PASSWORD:-}" + if [[ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MARIADB_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{ toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mariadb + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mariadb/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mariadb-credentials + mountPath: /opt/bitnami/mariadb/secrets/ + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mariadb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MARIADB_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mariadb-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MARIADB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-root-password + {{- end }} + command: + - /bin/bash + - -ec + - | + password_aux="${MARIADB_ROOT_PASSWORD:-}" + if [[ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MARIADB_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.primary }} {{ . }} {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mariadb-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mariadb.primary.configmapName" . }} + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mariadb.initdbScriptsCM" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mariadb-credentials + secret: + secretName: {{ template "mariadb.secretName" . }} + items: + - key: mariadb-root-password + path: mariadb-root-password + - key: mariadb-password + path: mariadb-password + {{- if eq .Values.architecture "replication" }} + - key: mariadb-replication-password + path: mariadb-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim . }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if and .Values.primary.persistence.enabled (not .Values.primary.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: primary + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/gitea/charts/mariadb/templates/primary/svc.yaml b/gitea/charts/mariadb/templates/primary/svc.yaml new file mode 100644 index 0000000..9b60dd6 --- /dev/null +++ b/gitea/charts/mariadb/templates/primary/svc.yaml @@ -0,0 +1,49 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mariadb.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if and (eq .Values.primary.service.type "ClusterIP") .Values.primary.service.clusterIP }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + {{- if and .Values.primary.service.loadBalancerIP (eq .Values.primary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") .Values.primary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.primary.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.primary.service.port }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) .Values.primary.service.nodePort) }} + nodePort: {{ .Values.primary.service.nodePort }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + port: 9104 + protocol: TCP + targetPort: metrics + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/gitea/charts/mariadb/templates/role.yaml b/gitea/charts/mariadb/templates/role.yaml new file mode 100644 index 0000000..5dee7ca --- /dev/null +++ b/gitea/charts/mariadb/templates/role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- end }} diff --git a/gitea/charts/mariadb/templates/rolebinding.yaml b/gitea/charts/mariadb/templates/rolebinding.yaml new file mode 100644 index 0000000..a94b4b2 --- /dev/null +++ b/gitea/charts/mariadb/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "mariadb.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "common.names.fullname" . -}} +{{- end }} diff --git a/gitea/charts/mariadb/templates/secondary/configmap.yaml b/gitea/charts/mariadb/templates/secondary/configmap.yaml new file mode 100644 index 0000000..e672c05 --- /dev/null +++ b/gitea/charts/mariadb/templates/secondary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mariadb.secondary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mariadb.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- +{{ .Values.secondary.configuration | indent 4 }} +{{- end -}} diff --git a/gitea/charts/mariadb/templates/secondary/pdb.yaml b/gitea/charts/mariadb/templates/secondary/pdb.yaml new file mode 100644 index 0000000..f5c7e26 --- /dev/null +++ b/gitea/charts/mariadb/templates/secondary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if and (eq .Values.architecture "replication") .Values.secondary.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "mariadb.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.secondary.pdb.minAvailable }} + minAvailable: {{ .Values.secondary.pdb.minAvailable }} + {{- end }} + {{- if .Values.secondary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.secondary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/gitea/charts/mariadb/templates/secondary/statefulset.yaml b/gitea/charts/mariadb/templates/secondary/statefulset.yaml new file mode 100644 index 0000000..b32b94e --- /dev/null +++ b/gitea/charts/mariadb/templates/secondary/statefulset.yaml @@ -0,0 +1,300 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mariadb.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.secondary.replicaCount }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary + serviceName: {{ include "mariadb.secondary.fullname" . }} + updateStrategy: + type: {{ .Values.secondary.updateStrategy }} + {{- if (eq "Recreate" .Values.secondary.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.secondary.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.secondary.rollingUpdatePartition }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mariadb.secondary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/secondary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.secondary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "mariadb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mariadb.serviceAccountName" . }} + {{- if .Values.secondary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAffinityPreset "component" "secondary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAntiAffinityPreset "component" "secondary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.secondary.nodeAffinityPreset.type "key" .Values.secondary.nodeAffinityPreset.key "values" .Values.secondary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.secondary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.secondary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.secondary.initContainers (and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled) }} + initContainers: + {{- if .Values.secondary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mariadb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.secondary.containerSecurityContext.runAsUser }}:{{ .Values.secondary.podSecurityContext.fsGroup }} /bitnami/mariadb + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mariadb + {{- end }} + {{- end }} + containers: + - name: mariadb + image: {{ include "mariadb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.secondary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.secondary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MARIADB_REPLICATION_MODE + value: "slave" + - name: MARIADB_MASTER_HOST + value: {{ include "mariadb.primary.fullname" . }} + - name: MARIADB_MASTER_PORT_NUMBER + value: {{ .Values.primary.service.port | quote }} + - name: MARIADB_MASTER_ROOT_USER + value: "root" + {{- if .Values.auth.usePasswordFiles }} + - name: MARIADB_MASTER_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MARIADB_MASTER_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-root-password + {{- end }} + - name: MARIADB_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MARIADB_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MARIADB_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-replication-password + {{- end }} + {{- if .Values.secondary.extraFlags }} + - name: MARIADB_EXTRA_FLAGS + value: "{{ .Values.secondary.extraFlags }}" + {{- end }} + {{- if .Values.secondary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.secondary.extraEnvVarsCM .Values.secondary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.secondary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.secondary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.secondary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.secondary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if .Values.secondary.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.secondary.livenessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MARIADB_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MARIADB_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MARIADB_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.secondary.readinessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MARIADB_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MARIADB_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MARIADB_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.resources }} + resources: {{ toYaml .Values.secondary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mariadb + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mariadb/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mariadb-credentials + mountPath: /opt/bitnami/mariadb/secrets/ + {{- end }} + {{- if .Values.secondary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mariadb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MARIADB_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mariadb-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MARIADB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-root-password + {{- end }} + command: + - /bin/bash + - -ec + - | + password_aux="${MARIADB_ROOT_PASSWORD:-}" + if [[ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MARIADB_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.secondary }} {{ . }} {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mariadb-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.secondary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mariadb.secondary.configmapName" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mariadb-credentials + secret: + secretName: {{ template "mariadb.secretName" . }} + items: + - key: mariadb-root-password + path: mariadb-root-password + - key: mariadb-replication-password + path: mariadb-replication-password + {{- end }} + {{- if .Values.secondary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.secondary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: secondary + spec: + accessModes: + {{- range .Values.secondary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.secondary.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.secondary.persistence "global" .Values.global) }} + {{- if .Values.secondary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} +{{- end }} diff --git a/gitea/charts/mariadb/templates/secondary/svc.yaml b/gitea/charts/mariadb/templates/secondary/svc.yaml new file mode 100644 index 0000000..a0da812 --- /dev/null +++ b/gitea/charts/mariadb/templates/secondary/svc.yaml @@ -0,0 +1,51 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mariadb.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.secondary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.secondary.service.type }} + {{- if and (eq .Values.secondary.service.type "ClusterIP") .Values.secondary.service.clusterIP }} + clusterIP: {{ .Values.secondary.service.clusterIP }} + {{- end }} + {{- if and .Values.secondary.service.loadBalancerIP (eq .Values.secondary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.secondary.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.secondary.service.type "LoadBalancer") .Values.secondary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.secondary.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.secondary.service.port }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.secondary.service.type "NodePort") (eq .Values.secondary.service.type "LoadBalancer")) .Values.secondary.service.nodePort) }} + nodePort: {{ .Values.secondary.service.nodePort }} + {{- else if eq .Values.secondary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + port: 9104 + protocol: TCP + targetPort: metrics + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/gitea/charts/mariadb/templates/secrets.yaml b/gitea/charts/mariadb/templates/secrets.yaml new file mode 100644 index 0000000..1d08e2c --- /dev/null +++ b/gitea/charts/mariadb/templates/secrets.yaml @@ -0,0 +1,39 @@ +{{- if eq (include "mariadb.createSecret" .) "true" }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if not (empty .Values.auth.rootPassword) }} + mariadb-root-password: {{ .Values.auth.rootPassword | b64enc | quote }} + {{- else if (not .Values.auth.forcePassword) }} + mariadb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- else }} + mariadb-root-password: {{ required "A MariaDB Root Password is required!" .Values.auth.rootPassword }} + {{- end }} + {{- if and (not (empty .Values.auth.username)) (not (empty .Values.auth.password)) }} + mariadb-password: {{ .Values.auth.password | b64enc | quote }} + {{- else if (not .Values.auth.forcePassword) }} + mariadb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- else }} + mariadb-password: {{ required "A MariaDB Database Password is required!" .Values.auth.password }} + {{- end }} + {{- if eq .Values.architecture "replication" }} + {{- if not (empty .Values.auth.replicationPassword) }} + mariadb-replication-password: {{ .Values.auth.replicationPassword | b64enc | quote }} + {{- else if (not .Values.auth.forcePassword) }} + mariadb-replication-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- else }} + mariadb-replication-password: {{ required "A MariaDB Replication Password is required!" .Values.auth.replicationPassword }} + {{- end }} + {{- end }} +{{- end }} diff --git a/gitea/charts/mariadb/templates/serviceaccount.yaml b/gitea/charts/mariadb/templates/serviceaccount.yaml new file mode 100644 index 0000000..ec83685 --- /dev/null +++ b/gitea/charts/mariadb/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mariadb.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/gitea/charts/mariadb/templates/servicemonitor.yaml b/gitea/charts/mariadb/templates/servicemonitor.yaml new file mode 100644 index 0000000..273e58e --- /dev/null +++ b/gitea/charts/mariadb/templates/servicemonitor.yaml @@ -0,0 +1,41 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/gitea/charts/mariadb/values-production.yaml b/gitea/charts/mariadb/values-production.yaml new file mode 100644 index 0000000..c588b7e --- /dev/null +++ b/gitea/charts/mariadb/values-production.yaml @@ -0,0 +1,841 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MariaDB image +## ref: https://hub.docker.com/r/bitnami/mariadb/tags/ +## +image: + registry: docker.io + repository: bitnami/mariadb + tag: 10.5.8-debian-10-r21 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override mariadb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mariadb.fullname template +## +# fullnameOverride: + +## Cluster domain +## +clusterDomain: cluster.local + +## Common annotations to add to all MariaDB resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} + +## Common labels to add to all MariaDB resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## MariaDB architecture. Allowed values: standalone or replication +## +architecture: replication + +## MariaDB Authentication parameters +## +auth: + ## MariaDB root password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## MariaDB custom user and database + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-on-first-run + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run + ## + database: my_database + username: "" + password: "" + ## MariaDB replication user and password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster + ## + replicationUser: replicator + replicationPassword: "" + ## Existing secret with MariaDB credentials + ## NOTE: When it's set the previous parameters are ignored. + ## + # existingSecret: name-of-existing-secret + ## Force users to specify required passwords + ## + forcePassword: true + ## Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: true + ## Use custom secret files other than chart provided when usePasswordFiles is set to "true" + ## Example: + ## customPasswordFiles: + ## root: /vault/secrets/mariadb-root + ## user: /vault/secrets/mariadb-user + ## replicator: /vault/secrets/mariadb-replicator + ## + customPasswordFiles: + root: "" + user: "" + replicator: "" + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} + +## Existing ConfigMap with custom init scripts +## +# initdbScriptsConfigMap: + +## Mariadb Primary parameters +## +primary: + ## Command and args for running the container (set to default if not set). Use array form + ## + command: [] + args: [] + + ## Configure MariaDB Primary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + plugin_dir=/opt/bitnami/mariadb/plugin + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mariadb/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + + ## Name of existing ConfigMap with MariaDB Primary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + # existingConfiguration: + + ## updateStrategy for Mariadb Primary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + + ## Partition update strategy for Mariadb Primary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + # rollingUpdatePartition: + + ## Mariadb Primary pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## Mariadb Primary pod affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAffinityPreset: "" + + ## Mariadb Primary pod anti-affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + + ## Mariadb Primary node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + type: "" + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + + ## Affinity for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + + ## Node labels for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MariaDB primary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + + ## MariaDB primary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + + ## MariaDB primary container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 100m + requests: {} + # memory: 256Mi + # cpu: 100m + + ## MariaDB primary container's liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## MariaDB primary custom liveness probe + ## + customLivenessProbe: {} + + ## MariaDB primary custom rediness probe + ## + customReadinessProbe: {} + + ## MariaDB primary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + + ## An array to add extra environment variables on MariaDB primary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars for MariaDB primary containers: + ## + extraEnvVarsCM: "" + + ## Secret with extra env vars for MariaDB primary containers: + ## + extraEnvVarsSecret: "" + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + ## Name of existing PVC to hold MariaDB Primary data + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + # existingClaim: + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Persistent Volume Claim annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 8Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Extra volumes to add to the MariaDB Primary pod(s) + ## + extraVolumes: [] + + ## Extra volume mounts to add to the MariaDB Primary container(s) + ## + extraVolumeMounts: [] + + ## Extra init containers to add to the MariaDB Primary pod(s) + ## + initContainers: [] + + ## Extra sidecar containers to add to the MariaDB Primary pod(s) + ## + sidecars: [] + + ## MariaDB Primary Service parameters + ## + service: + ## Service type + ## + type: ClusterIP + ## Service port + ## + port: 3306 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Service clusterIP + ## + # clusterIP: None + clusterIP: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required + ## + annotations: {} + + ## MariaDB primary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## Mariadb Secondary parameters +## +secondary: + ## Number of Mariadb Secondary replicas to deploy + ## + replicaCount: 2 + + ## Command and args for running the container (set to default if not set). Use array form + ## + command: [] + args: [] + + ## Configure MariaDB Secondary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + + ## Name of existing ConfigMap with MariaDB Secondary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + # existingConfiguration: + + ## updateStrategy for Mariadb Secondary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + + ## Partition update strategy for Mariadb Secondary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + # rollingUpdatePartition: + + ## Mariadb Secondary pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## Mariadb Secondary pod affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAffinityPreset: "" + + ## Mariadb Secondary pod anti-affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + + ## Mariadb Secondary node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + type: "" + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + + ## Affinity for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + + ## Node labels for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MariaDB secondary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + + ## MariaDB secondary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + + ## MariaDB secondary container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 100m + requests: {} + # memory: 256Mi + # cpu: 100m + + ## MariaDB secondary container's liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## MariaDB secondary custom liveness probe + ## + customLivenessProbe: {} + + ## MariaDB secondary custom rediness probe + ## + customReadinessProbe: {} + + ## MariaDB secondary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + + ## An array to add extra environment variables on MariaDB secondary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars for MariaDB secondary containers: + ## + extraEnvVarsCM: "" + + ## Secret with extra env vars for MariaDB secondary containers: + ## + extraEnvVarsSecret: "" + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Persistent Volume Claim annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 8Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Extra volumes to add to the MariaDB Secondary pod(s) + ## + extraVolumes: [] + + ## Extra volume mounts to add to the MariaDB Secondary container(s) + ## + extraVolumeMounts: [] + + ## Extra init containers to add to the MariaDB Secondary pod(s) + ## + initContainers: [] + + ## Extra sidecar containers to add to the MariaDB Secondary pod(s) + ## + sidecars: [] + + ## MariaDB Secondary Service parameters + ## + service: + ## Service type + ## + type: ClusterIP + ## Service port + ## + port: 3306 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Service clusterIP + ## + # clusterIP: None + clusterIP: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required + ## + annotations: {} + + ## MariaDB secondary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## MariaDB pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the mariadb.fullname template + ## + # name: + ## Annotations to add to the service account (evaluated as a template) + ## + annotations: {} + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## + create: false + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## Mysqld Prometheus exporter parameters +## +metrics: + enabled: false + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.12.1-debian-10-r289 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9104" + + ## Extra args to be passed to mysqld_exporter + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## E.g. + ## - --collect.auto_increment.columns + ## - --collect.binlog_size + ## - --collect.engine_innodb_status + ## - --collect.engine_tokudb_status + ## - --collect.global_status + ## - --collect.global_variables + ## - --collect.info_schema.clientstats + ## - --collect.info_schema.innodb_metrics + ## - --collect.info_schema.innodb_tablespaces + ## - --collect.info_schema.innodb_cmp + ## - --collect.info_schema.innodb_cmpmem + ## - --collect.info_schema.processlist + ## - --collect.info_schema.processlist.min_time + ## - --collect.info_schema.query_response_time + ## - --collect.info_schema.tables + ## - --collect.info_schema.tables.databases + ## - --collect.info_schema.tablestats + ## - --collect.info_schema.userstats + ## - --collect.perf_schema.eventsstatements + ## - --collect.perf_schema.eventsstatements.digest_text_limit + ## - --collect.perf_schema.eventsstatements.limit + ## - --collect.perf_schema.eventsstatements.timelimit + ## - --collect.perf_schema.eventswaits + ## - --collect.perf_schema.file_events + ## - --collect.perf_schema.file_instances + ## - --collect.perf_schema.indexiowaits + ## - --collect.perf_schema.tableiowaits + ## - --collect.perf_schema.tablelocks + ## - --collect.perf_schema.replication_group_member_stats + ## - --collect.slave_status + ## - --collect.slave_hosts + ## - --collect.heartbeat + ## - --collect.heartbeat.database + ## - --collect.heartbeat.table + ## + extraArgs: + primary: [] + secondary: [] + + ## Mysqld Prometheus exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 100m + requests: {} + # memory: 256Mi + # cpu: 100m + + ## Mysqld Prometheus exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: false + ## Specify the namespace in which the serviceMonitor resource will be created + ## + # namespace: "" + ## Specify the interval at which metrics should be scraped + ## + interval: 30s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + ## + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work + ## + # release: "" + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} diff --git a/gitea/charts/mariadb/values.schema.json b/gitea/charts/mariadb/values.schema.json new file mode 100644 index 0000000..500c4eb --- /dev/null +++ b/gitea/charts/mariadb/values.schema.json @@ -0,0 +1,176 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "MariaDB architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`" + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "rootPassword": { + "type": "string", + "title": "MariaDB root password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + }, + "database": { + "type": "string", + "title": "MariaDB custom database", + "description": "Name of the custom database to be created during the 1st initialization of MariaDB", + "form": true + }, + "username": { + "type": "string", + "title": "MariaDB custom user", + "description": "Name of the custom user to be created during the 1st initialization of MariaDB. This user only has permissions on the MariaDB custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for MariaDB custom user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true, + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "replicationUser": { + "type": "string", + "title": "MariaDB replication user", + "description": "Name of user used to manage replication.", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + }, + "replicationPassword": { + "type": "string", + "title": "Password for MariaDB replication user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "primary": { + "type": "object", + "title": "Primary replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for primary replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + } + } + }, + "secondary": { + "type": "object", + "title": "Secondary replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for secondary replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/gitea/charts/mariadb/values.yaml b/gitea/charts/mariadb/values.yaml new file mode 100644 index 0000000..1183f72 --- /dev/null +++ b/gitea/charts/mariadb/values.yaml @@ -0,0 +1,838 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MariaDB image +## ref: https://hub.docker.com/r/bitnami/mariadb/tags/ +## +image: + registry: docker.io + repository: bitnami/mariadb + tag: 10.5.8-debian-10-r21 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override mariadb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mariadb.fullname template +## +# fullnameOverride: + +## Cluster domain +## +clusterDomain: cluster.local + +## Common annotations to add to all MariaDB resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} + +## Common labels to add to all MariaDB resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## MariaDB architecture. Allowed values: standalone or replication +## +architecture: standalone + +## MariaDB Authentication parameters +## +auth: + ## MariaDB root password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## MariaDB custom user and database + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-on-first-run + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run + ## + database: my_database + username: "" + password: "" + ## MariaDB replication user and password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster + ## + replicationUser: replicator + replicationPassword: "" + ## Existing secret with MariaDB credentials + ## NOTE: When it's set the previous parameters are ignored. + ## + # existingSecret: name-of-existing-secret + ## Force users to specify required passwords + ## + forcePassword: false + ## Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + ## Use custom secret files other than chart provided when usePasswordFiles is set to "true" + ## Example: + ## customPasswordFiles: + ## root: /vault/secrets/mariadb-root + ## user: /vault/secrets/mariadb-user + ## replicator: /vault/secrets/mariadb-replicator + ## + customPasswordFiles: {} + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} + +## Existing ConfigMap with custom init scripts +## +# initdbScriptsConfigMap: + +## Mariadb Primary parameters +## +primary: + ## Command and args for running the container (set to default if not set). Use array form + ## + command: [] + args: [] + + ## Configure MariaDB Primary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + plugin_dir=/opt/bitnami/mariadb/plugin + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mariadb/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + + ## Name of existing ConfigMap with MariaDB Primary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + # existingConfiguration: + + ## updateStrategy for Mariadb Primary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + + ## Partition update strategy for Mariadb Primary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + # rollingUpdatePartition: + + ## Mariadb Primary pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## Mariadb Primary pod affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAffinityPreset: "" + + ## Mariadb Primary pod anti-affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + + ## Mariadb Primary node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + type: "" + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + + ## Affinity for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + + ## Node labels for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MariaDB primary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + + ## MariaDB primary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + + ## MariaDB primary container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 100m + requests: {} + # memory: 256Mi + # cpu: 100m + + ## MariaDB primary container's liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## MariaDB primary custom liveness probe + ## + customLivenessProbe: {} + + ## MariaDB primary custom rediness probe + ## + customReadinessProbe: {} + + ## MariaDB primary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + + ## An array to add extra environment variables on MariaDB primary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars for MariaDB primary containers: + ## + extraEnvVarsCM: "" + + ## Secret with extra env vars for MariaDB primary containers: + ## + extraEnvVarsSecret: "" + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + ## Name of existing PVC to hold MariaDB Primary data + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + # existingClaim: + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Persistent Volume Claim annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 8Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Extra volumes to add to the MariaDB Primary pod(s) + ## + extraVolumes: [] + + ## Extra volume mounts to add to the MariaDB Primary container(s) + ## + extraVolumeMounts: [] + + ## Extra init containers to add to the MariaDB Primary pod(s) + ## + initContainers: [] + + ## Extra sidecar containers to add to the MariaDB Primary pod(s) + ## + sidecars: [] + + ## MariaDB Primary Service parameters + ## + service: + ## Service type + ## + type: ClusterIP + ## Service port + ## + port: 3306 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Service clusterIP + ## + # clusterIP: None + clusterIP: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required + ## + annotations: {} + + ## MariaDB primary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## Mariadb Secondary parameters +## +secondary: + ## Number of Mariadb Secondary replicas to deploy + ## + replicaCount: 1 + + ## Command and args for running the container (set to default if not set). Use array form + ## + command: [] + args: [] + + ## Configure MariaDB Secondary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + + ## Name of existing ConfigMap with MariaDB Secondary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + # existingConfiguration: + + ## updateStrategy for Mariadb Secondary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + + ## Partition update strategy for Mariadb Secondary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + # rollingUpdatePartition: + + ## Mariadb Secondary pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## Mariadb Secondary pod affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAffinityPreset: "" + + ## Mariadb Secondary pod anti-affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + + ## Mariadb Secondary node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + type: "" + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + + ## Affinity for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + + ## Node labels for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MariaDB secondary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + + ## MariaDB secondary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + + ## MariaDB secondary container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 100m + requests: {} + # memory: 256Mi + # cpu: 100m + + ## MariaDB secondary container's liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## MariaDB secondary custom liveness probe + ## + customLivenessProbe: {} + + ## MariaDB secondary custom rediness probe + ## + customReadinessProbe: {} + + ## MariaDB secondary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + + ## An array to add extra environment variables on MariaDB secondary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars for MariaDB secondary containers: + ## + extraEnvVarsCM: "" + + ## Secret with extra env vars for MariaDB secondary containers: + ## + extraEnvVarsSecret: "" + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Persistent Volume Claim annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 8Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Extra volumes to add to the MariaDB Secondary pod(s) + ## + extraVolumes: [] + + ## Extra volume mounts to add to the MariaDB Secondary container(s) + ## + extraVolumeMounts: [] + + ## Extra init containers to add to the MariaDB Secondary pod(s) + ## + initContainers: [] + + ## Extra sidecar containers to add to the MariaDB Secondary pod(s) + ## + sidecars: [] + + ## MariaDB Secondary Service parameters + ## + service: + ## Service type + ## + type: ClusterIP + ## Service port + ## + port: 3306 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Service clusterIP + ## + # clusterIP: None + clusterIP: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required + ## + annotations: {} + + ## MariaDB secondary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## MariaDB pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the mariadb.fullname template + ## + # name: + ## Annotations to add to the service account (evaluated as a template) + ## + annotations: {} + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## + create: false + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## Mysqld Prometheus exporter parameters +## +metrics: + enabled: false + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.12.1-debian-10-r289 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9104" + + ## Extra args to be passed to mysqld_exporter + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## E.g. + ## - --collect.auto_increment.columns + ## - --collect.binlog_size + ## - --collect.engine_innodb_status + ## - --collect.engine_tokudb_status + ## - --collect.global_status + ## - --collect.global_variables + ## - --collect.info_schema.clientstats + ## - --collect.info_schema.innodb_metrics + ## - --collect.info_schema.innodb_tablespaces + ## - --collect.info_schema.innodb_cmp + ## - --collect.info_schema.innodb_cmpmem + ## - --collect.info_schema.processlist + ## - --collect.info_schema.processlist.min_time + ## - --collect.info_schema.query_response_time + ## - --collect.info_schema.tables + ## - --collect.info_schema.tables.databases + ## - --collect.info_schema.tablestats + ## - --collect.info_schema.userstats + ## - --collect.perf_schema.eventsstatements + ## - --collect.perf_schema.eventsstatements.digest_text_limit + ## - --collect.perf_schema.eventsstatements.limit + ## - --collect.perf_schema.eventsstatements.timelimit + ## - --collect.perf_schema.eventswaits + ## - --collect.perf_schema.file_events + ## - --collect.perf_schema.file_instances + ## - --collect.perf_schema.indexiowaits + ## - --collect.perf_schema.tableiowaits + ## - --collect.perf_schema.tablelocks + ## - --collect.perf_schema.replication_group_member_stats + ## - --collect.slave_status + ## - --collect.slave_hosts + ## - --collect.heartbeat + ## - --collect.heartbeat.database + ## - --collect.heartbeat.table + ## + extraArgs: + primary: [] + secondary: [] + + ## Mysqld Prometheus exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 100m + requests: {} + # memory: 256Mi + # cpu: 100m + + ## Mysqld Prometheus exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: false + ## Specify the namespace in which the serviceMonitor resource will be created + ## + # namespace: "" + ## Specify the interval at which metrics should be scraped + ## + interval: 30s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + ## + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work + ## + # release: "" + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} diff --git a/gitea/templates/NOTES.txt b/gitea/templates/NOTES.txt new file mode 100644 index 0000000..780b7aa --- /dev/null +++ b/gitea/templates/NOTES.txt @@ -0,0 +1,45 @@ +1. Connect to your Gitea web URL by running: + +{{- if .Values.ingress.enabled }} + + Ingress is enabled for this chart deployment. Please access the web UI at {{ .Values.ingress.hostname }} + +{{- else if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP/ + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc -w {{ template "fullname" . }}http' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }}http -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ +{{- else if contains "ClusterIP" .Values.service.type }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080/ + kubectl port-forward $POD_NAME 8080:80 +{{- end }} + +2. Connect to your Gitea ssh port: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP/ + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc -w {{ template "fullname" . }}-ssh' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }}-ssh -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ +{{- else if contains "ClusterIP" .Values.service.type }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080/ + kubectl port-forward $POD_NAME 8022:22 +{{- end }} diff --git a/gitea/templates/_helpers.tpl b/gitea/templates/_helpers.tpl new file mode 100644 index 0000000..f662ef5 --- /dev/null +++ b/gitea/templates/_helpers.tpl @@ -0,0 +1,31 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 24 -}} +{{- end -}} + +{{- define "mariadb.fullname" -}} +{{- printf "%s-%s" .Release.Name "mariadb" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "gitea.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/gitea/templates/deployment.yaml b/gitea/templates/deployment.yaml new file mode 100644 index 0000000..6cd6c82 --- /dev/null +++ b/gitea/templates/deployment.yaml @@ -0,0 +1,55 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ template "fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "fullname" . }} + template: + metadata: + labels: + app: {{ template "fullname" . }} + spec: + containers: + {{- include "gitea" . | indent 6 }} + {{- include "memcached" . | indent 6 }} + initContainers: + {{- include "init" . | indent 6 }} + volumes: + - name: gitea-data + {{- if .Values.persistence.enabled }} + {{- if .Values.persistence.directGiteaVolumeMount }} +{{ tpl .Values.persistence.directGiteaVolumeMount . | indent 8 }} + {{- else }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingGiteaClaim | default "gitea-data" }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: gitea-lfs + {{- if .Values.lfs.enabled }} + {{- if .Values.lfs.directGiteaVolumeMount }} +{{ tpl .Values.lfs.directGiteaVolumeMount . | indent 8 }} + {{- else }} + persistentVolumeClaim: + claimName: {{ .Values.lfs.existingGiteaClaim | default "gitea-lfs" }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: gitea-config + configMap: + name: {{ template "fullname" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} diff --git a/gitea/templates/gitea/_container.tpl b/gitea/templates/gitea/_container.tpl new file mode 100644 index 0000000..7af9e93 --- /dev/null +++ b/gitea/templates/gitea/_container.tpl @@ -0,0 +1,60 @@ +{{/* +Create helm partial for gitea server +*/}} +{{- define "gitea" }} +- name: gitea + image: {{ .Values.images.gitea }} + imagePullPolicy: {{ .Values.images.pullPolicy }} + env: + {{- if .Values.env }} + {{- toYaml .Values.env | nindent 2 }} + {{- end }} + {{- if .Values.externalDB.existingSecret.enabled }} + - name: EXTERNALDB_USER + valueFrom: + secretKeyRef: + name: {{ .Values.externalDB.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDB.existingSecret.usernameKey | default "db-username" }} + - name: EXTERNALDB_PASS + valueFrom: + secretKeyRef: + name: {{ .Values.externalDB.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDB.existingSecret.passwordKey | default "db-password" }} + {{- end }} + {{- if .Values.mariadb.enabled }} + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.fullname" . }} + key: mariadb-password + {{- end }} + ports: + - name: ssh + containerPort: 22 + - name: http + containerPort: 3000 + livenessProbe: + tcpSocket: + port: http + initialDelaySeconds: 200 + timeoutSeconds: 1 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + tcpSocket: + port: http + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + resources: +{{ toYaml .Values.resources.gitea | indent 4 }} + volumeMounts: + - name: gitea-data + mountPath: /data + - name: gitea-lfs + mountPath: /app/gitea/data/lfs + - name: gitea-config + mountPath: /etc/gitea +{{- end }} diff --git a/gitea/templates/gitea/gitea-config.yaml b/gitea/templates/gitea/gitea-config.yaml new file mode 100644 index 0000000..02bf1a9 --- /dev/null +++ b/gitea/templates/gitea/gitea-config.yaml @@ -0,0 +1,728 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ template "fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + app.ini: |- + ; This file lists the default values used by Gitea + ; Copy required sections to your own app.ini (default is custom/conf/app.ini) + ; and modify as needed. + + ; see https://docs.gitea.io/en-us/config-cheat-sheet/ for additional documentation. + + ; App name that shows in every page title + APP_NAME = Gitea: Git with a cup of tea + ; Change it if you run locally + RUN_USER = git + ; Either "dev", "prod" or "test", default is "prod" + RUN_MODE = prod + + [repository] + ROOT = + SCRIPT_TYPE = bash + ; Default ANSI charset + ANSI_CHARSET = + ; Force every new repository to be private + FORCE_PRIVATE = false + ; Default privacy setting when creating a new repository, allowed values: last, private, public. Default is last which means the last setting used. + DEFAULT_PRIVATE = last + ; Global limit of repositories per user, applied at creation time. -1 means no limit + MAX_CREATION_LIMIT = -1 + ; Mirror sync queue length, increase if mirror syncing starts hanging + MIRROR_QUEUE_LENGTH = 1000 + ; Patch test queue length, increase if pull request patch testing starts hanging + PULL_REQUEST_QUEUE_LENGTH = 1000 + ; Preferred Licenses to place at the top of the List + ; The name here must match the filename in conf/license or custom/conf/license + PREFERRED_LICENSES = Apache License 2.0,MIT License + ; Disable the ability to interact with repositories using the HTTP protocol + DISABLE_HTTP_GIT = false + ; Force ssh:// clone url instead of scp-style uri when default SSH port is used + USE_COMPAT_SSH_URI = false + + [repository.editor] + ; List of file extensions for which lines should be wrapped in the CodeMirror editor + ; Separate extensions with a comma. To line wrap files without an extension, just put a comma + LINE_WRAP_EXTENSIONS = .txt,.md,.markdown,.mdown,.mkd, + ; Valid file modes that have a preview API associated with them, such as api/v1/markdown + ; Separate the values by commas. The preview tab in edit mode won't be displayed if the file extension doesn't match + PREVIEWABLE_FILE_MODES = markdown + + [repository.local] + ; Path for local repository copy. Defaults to `tmp/local-repo` + LOCAL_COPY_PATH = tmp/local-repo + ; Path for local wiki copy. Defaults to `tmp/local-wiki` + LOCAL_WIKI_PATH = tmp/local-wiki + + [repository.upload] + ; Whether repository file uploads are enabled. Defaults to `true` + ENABLED = true + ; Path for uploads. Defaults to `data/tmp/uploads` (tmp gets deleted on gitea restart) + TEMP_PATH = data/tmp/uploads + ; One or more allowed types, e.g. image/jpeg|image/png. Nothing means any file type + ALLOWED_TYPES = + ; Max size of each file in megabytes. Defaults to 3MB + FILE_MAX_SIZE = 3 + ; Max number of files per upload. Defaults to 5 + MAX_FILES = 5 + + [repository.pull-request] + ; List of prefixes used in Pull Request title to mark them as Work In Progress + WORK_IN_PROGRESS_PREFIXES=WIP:,[WIP] + + [ui] + ; Number of repositories that are displayed on one explore page + EXPLORE_PAGING_NUM = 20 + ; Number of issues that are displayed on one page + ISSUE_PAGING_NUM = 10 + ; Number of maximum commits displayed in one activity feed + FEED_MAX_COMMIT_NUM = 5 + ; Number of maximum commits displayed in commit graph. + GRAPH_MAX_COMMIT_NUM = 100 + ; Number of line of codes shown for a code comment + CODE_COMMENT_LINES = 4 + ; Value of `theme-color` meta tag, used by Android >= 5.0 + ; An invalid color like "none" or "disable" will have the default style + ; More info: https://developers.google.com/web/updates/2014/11/Support-for-theme-color-in-Chrome-39-for-Android + THEME_COLOR_META_TAG = `#6cc644` + ; Max size of files to be displayed (default is 8MiB) + MAX_DISPLAY_FILE_SIZE = 8388608 + ; Whether the email of the user should be shown in the Explore Users page + SHOW_USER_EMAIL = true + ; Set the default theme for the Gitea install + DEFAULT_THEME = gitea + + + [ui.admin] + ; Number of users that are displayed on one page + USER_PAGING_NUM = 50 + ; Number of repos that are displayed on one page + REPO_PAGING_NUM = 50 + ; Number of notices that are displayed on one page + NOTICE_PAGING_NUM = 25 + ; Number of organizations that are displayed on one page + ORG_PAGING_NUM = 50 + + [ui.user] + ; Number of repos that are displayed on one page + REPO_PAGING_NUM = 15 + + [ui.meta] + AUTHOR = Gitea - Git with a cup of tea + DESCRIPTION = Gitea (Git with a cup of tea) is a painless self-hosted Git service written in Go + KEYWORDS = go,git,self-hosted,gitea + + [markdown] + ; Enable hard line break extension + ENABLE_HARD_LINE_BREAK = false + ; List of custom URL-Schemes that are allowed as links when rendering Markdown + ; for example git,magnet + CUSTOM_URL_SCHEMES = + ; List of file extensions that should be rendered/edited as Markdown + ; Separate the extensions with a comma. To render files without any extension as markdown, just put a comma + FILE_EXTENSIONS = .md,.markdown,.mdown,.mkd + + [server] + ; The protocol the server listens on. One of 'http', 'https', 'unix' or 'fcgi'. + + ; PROTOCOL hardcoded to http since tls is delegated to ingress + PROTOCOL = http + {{- if .Values.service.http.externalHost }} + DOMAIN = {{ .Values.service.http.externalHost }} + {{- else if .Values.ingress.enabled }} + DOMAIN = {{ .Values.ingress.hostname }} + {{- else }} + DOMAIN = {{ template "fullname" . }}-http.{{ .Release.Namespace }}.svc.cluster.local + {{- end -}} + {{ if .Values.ingress.tls }} + {{- $proto := set . "proto" "https" }} + {{- else -}} + {{- $proto := set . "proto" "http" }} + {{- end -}} + {{- if and .Values.service.http.externalHost ( .Values.service.http.externalPort ) }} + ROOT_URL = {{ .proto }}://{{ .Values.service.http.externalHost }}:{{ .Values.service.http.externalPort }}/ + {{- else if .Values.service.http.externalHost }} + ROOT_URL = {{ .proto }}://{{ .Values.service.http.externalHost }}/ + {{- else if .Values.ingress.enabled }} + ROOT_URL = {{ .proto }}://%(DOMAIN)s + {{- else }} + ROOT_URL = %(PROTOCOL)s://%(DOMAIN)s:%(HTTP_PORT)s/ + {{- end -}} + ; The address to listen on. Either a IPv4/IPv6 address or the path to a unix socket. + HTTP_ADDR = 0.0.0.0 + HTTP_PORT = {{ .Values.service.http.port }} + ; If REDIRECT_OTHER_PORT is true, and PROTOCOL is set to https an http server + ; will be started on PORT_TO_REDIRECT and it will redirect plain, non-secure http requests to the main + ; ROOT_URL. Defaults are false for REDIRECT_OTHER_PORT and 80 for + ; PORT_TO_REDIRECT. + REDIRECT_OTHER_PORT = false + PORT_TO_REDIRECT = 80 + ; Permission for unix socket + UNIX_SOCKET_PERMISSION = 666 + ; Local (DMZ) URL for Gitea workers (such as SSH update) accessing web service. + ; In most cases you do not need to change the default value. + ; Alter it only if your SSH server node is not the same as HTTP node. + ; Do not set this variable if PROTOCOL is set to 'unix'. + LOCAL_ROOT_URL = %(PROTOCOL)s://%(HTTP_ADDR)s:%(HTTP_PORT)s/ + ; Disable SSH feature when not available + DISABLE_SSH = false + ; Whether to use the builtin SSH server or not. + START_SSH_SERVER = false + ; Username to use for the builtin SSH server. If blank, then it is the value of RUN_USER. + BUILTIN_SSH_SERVER_USER = + ; Domain name to be exposed in clone URL + {{- if .Values.service.ssh.externalHost }} + SSH_DOMAIN = {{ .Values.service.ssh.externalHost }} + {{- else if .Values.ingress.enabled }} + SSH_DOMAIN = {{ .Values.ingress.hostname }} + {{- else }} + SSH_DOMAIN = {{ template "fullname" . }}-ssh.{{ .Release.Namespace }}.svc.cluster.local + {{- end }} + ; THe network interface the builtin SSH server should listen on + SSH_LISTEN_HOST = + ; Port number to be exposed in clone URL + {{- if .Values.service.ssh.externalPort }} + SSH_PORT = {{ .Values.service.ssh.externalPort }} + {{- else }} + SSH_PORT = {{ .Values.service.ssh.port }} + {{- end }} + ; The port number the builtin SSH server should listen on + SSH_LISTEN_PORT = {{ .Values.service.ssh.port }} + ; Root path of SSH directory, default is '~/.ssh', but you have to use '/home/git/.ssh'. + SSH_ROOT_PATH = + ; Gitea will create a authorized_keys file by default when it is not using the internal ssh server + ; If you intend to use the AuthorizedKeysCommand functionality then you should turn this off. + SSH_CREATE_AUTHORIZED_KEYS_FILE = true + ; For the built-in SSH server, choose the ciphers to support for SSH connections, + ; for system SSH this setting has no effect + SSH_SERVER_CIPHERS = aes128-ctr, aes192-ctr, aes256-ctr, aes128-gcm@openssh.com, arcfour256, arcfour128 + ; For the built-in SSH server, choose the key exchange algorithms to support for SSH connections, + ; for system SSH this setting has no effect + SSH_SERVER_KEY_EXCHANGES = diffie-hellman-group1-sha1, diffie-hellman-group14-sha1, ecdh-sha2-nistp256, ecdh-sha2-nistp384, ecdh-sha2-nistp521, curve25519-sha256@libssh.org + ; For the built-in SSH server, choose the MACs to support for SSH connections, + ; for system SSH this setting has no effect + SSH_SERVER_MACS = hmac-sha2-256-etm@openssh.com, hmac-sha2-256, hmac-sha1, hmac-sha1-96 + ; Directory to create temporary files in when testing public keys using ssh-keygen, + ; default is the system temporary directory. + SSH_KEY_TEST_PATH = + ; Path to ssh-keygen, default is 'ssh-keygen' which means the shell is responsible for finding out which one to call. + SSH_KEYGEN_PATH = ssh-keygen + ; Enable SSH Authorized Key Backup when rewriting all keys, default is true + SSH_BACKUP_AUTHORIZED_KEYS = true + ; Enable exposure of SSH clone URL to anonymous visitors, default is false + SSH_EXPOSE_ANONYMOUS = false + ; Indicate whether to check minimum key size with corresponding type + MINIMUM_KEY_SIZE_CHECK = false + ; Disable CDN even in "prod" mode + OFFLINE_MODE = {{ .Values.config.offlineMode }} + DISABLE_ROUTER_LOG = false + ; Generate steps: + ; $ ./gitea cert -ca=true -duration=8760h0m0s -host=myhost.example.com + ; + ; Or from a .pfx file exported from the Windows certificate store (do + ; not forget to export the private key): + ; $ openssl pkcs12 -in cert.pfx -out cert.pem -nokeys + ; $ openssl pkcs12 -in cert.pfx -out key.pem -nocerts -nodes + CERT_FILE = custom/https/cert.pem + KEY_FILE = custom/https/key.pem + ; Root directory containing templates and static files. + ; default is the path where Gitea is executed + STATIC_ROOT_PATH = + ; Default path for App data + APP_DATA_PATH = data + ; Application level GZIP support + ENABLE_GZIP = false + ; Application profiling (memory and cpu) + ; For "web" command it listens on localhost:6060 + ; For "serve" command it dumps to disk at PPROF_DATA_PATH as (cpuprofile|memprofile)__ + ENABLE_PPROF = false + ; PPROF_DATA_PATH, use an absolute path when you start gitea as service + PPROF_DATA_PATH = data/tmp/pprof + ; Landing page, can be "home", "explore", or "organizations" + LANDING_PAGE = home + ; Enables git-lfs support. true or false, default is false. + LFS_START_SERVER = false + ; Where your lfs files reside, default is data/lfs. + LFS_CONTENT_PATH = data/lfs + ; LFS authentication secret, change this yourself + LFS_JWT_SECRET = + ; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail. + LFS_HTTP_AUTH_EXPIRY = 20m + + + ; Define allowed algorithms and their minimum key length (use -1 to disable a type) + [ssh.minimum_key_sizes] + ED25519 = 256 + ECDSA = 256 + RSA = 2048 + DSA = 1024 + + [database] + {{ if and .Values.externalDB.enabled (not .Values.mariadb.enabled) }} + ; Either "mysql", "postgres", "mssql" or "sqlite3", it's your choice + DB_TYPE = {{ .Values.externalDB.dbType }} + HOST = {{ .Values.externalDB.dbHost }}:{{ .Values.externalDB.dbPort }} + NAME = {{ .Values.externalDB.dbDatabase }} + {{ if .Values.externalDB.existingSecret.enabled }} + USER = EXTERNALDB_USER + PASSWD = EXTERNALDB_PASS + {{ else }} + USER = {{ .Values.externalDB.dbUser }} + PASSWD = {{ .Values.externalDB.dbPassword }} + {{ end }} + {{ else if .Values.mariadb.enabled }} + ; Either "mysql", "postgres", "mssql" or "sqlite3", it's your choice + DB_TYPE = mysql + HOST = {{ template "mariadb.fullname" . }}:3306 + NAME = {{ .Values.mariadb.auth.database }} + USER = {{ .Values.mariadb.auth.username }} + ; Use PASSWD = `your password` for quoting if you use special characters in the password. + PASSWD = {{ .Values.mariadb.auth.password }} + {{ end }} + ; For "postgres" only, either "disable", "require" or "verify-full" + SSL_MODE = {{ .Values.externalDB.dbSSL | default "disable" }} + ; For "sqlite3" and "tidb", use an absolute path when you start gitea as service + PATH = data/gitea.db + ; For "sqlite3" only. Query timeout + SQLITE_TIMEOUT = 500 + ; For iterate buffer, default is 50 + ITERATE_BUFFER_SIZE = 50 + ; Show the database generated SQL + LOG_SQL = true + + [indexer] + ISSUE_INDEXER_PATH = indexers/issues.bleve + ; repo indexer by default disabled, since it uses a lot of disk space + REPO_INDEXER_ENABLED = false + REPO_INDEXER_PATH = indexers/repos.bleve + UPDATE_BUFFER_LEN = 20 + MAX_FILE_SIZE = 1048576 + + [admin] + ; Disallow regular (non-admin) users from creating organizations. + DISABLE_REGULAR_ORG_CREATION = false + + [security] + ; Whether the installer is disabled + INSTALL_LOCK = {{ .Values.config.disableInstaller }} + ; !!CHANGE THIS TO KEEP YOUR USER DATA SAFE!! + {{ if .Values.config.secretKey }} + SECRET_KEY = {{ .Values.config.secretKey }} + {{ else }} + SECRET_KEY = {{ randAlphaNum 64 | quote }} + {{ end }} + + + ; How long to remember that an user is logged in before requiring relogin (in days) + LOGIN_REMEMBER_DAYS = 7 + COOKIE_USERNAME = gitea_awesome + COOKIE_REMEMBER_NAME = gitea_incredible + ; Reverse proxy authentication header name of user name + REVERSE_PROXY_AUTHENTICATION_USER = X-WEBAUTH-USER + ; The minimum password length for new Users + MIN_PASSWORD_LENGTH = 6 + ; Set to true to allow users to import local server paths + IMPORT_LOCAL_PATHS = false + ; Set to true to prevent all users (including admin) from creating custom git hooks + DISABLE_GIT_HOOKS = false + + [openid] + ; + ; OpenID is an open, standard and decentralized authentication protocol. + ; Your identity is the address of a webpage you provide, which describes + ; how to prove you are in control of that page. + ; + ; For more info: https://en.wikipedia.org/wiki/OpenID + ; + ; Current implementation supports OpenID-2.0 + ; + ; Tested to work providers at the time of writing: + ; - Any GNUSocial node (your.hostname.tld/username) + ; - Any SimpleID provider (http://simpleid.koinic.net) + ; - http://openid.org.cn/ + ; - openid.stackexchange.com + ; - login.launchpad.net + ; - .livejournal.com + ; + ; Whether to allow signin in via OpenID + ENABLE_OPENID_SIGNIN = {{ .Values.config.openidSignin }} + ; Whether to allow registering via OpenID + ; Do not include to rely on rhw DISABLE_REGISTRATION setting + ;ENABLE_OPENID_SIGNUP = true + ; Allowed URI patterns (POSIX regexp). + ; Space separated. + ; Only these would be allowed if non-blank. + ; Example value: trusted.domain.org trusted.domain.net + WHITELISTED_URIS = + ; Forbidden URI patterns (POSIX regexp). + ; Space separated. + ; Only used if WHITELISTED_URIS is blank. + ; Example value: loadaverage.org/badguy stackexchange.com/.*spammer + BLACKLISTED_URIS = + + [service] + ; Time limit to confirm account/email registration + ACTIVE_CODE_LIVE_MINUTES = 180 + ; Time limit to perform the reset of a forgotten password + RESET_PASSWD_CODE_LIVE_MINUTES = 180 + ; Whether a new user needs to confirm their email when registering. + REGISTER_EMAIL_CONFIRM = {{ .Values.config.register_email_confirm | default "false" }} + ; List of domain names that are allowed to be used to register on a Gitea instance + ; gitea.io,example.com + EMAIL_DOMAIN_WHITELIST= + ; Disallow registration, only allow admins to create accounts. + DISABLE_REGISTRATION = {{ .Values.config.disableRegistration }} + ; Allow registration only using third-party services, it works only when DISABLE_REGISTRATION is false + ALLOW_ONLY_EXTERNAL_REGISTRATION = false + ; User must sign in to view anything. + REQUIRE_SIGNIN_VIEW = {{ .Values.config.requireSignin }} + ; Mail notification + ENABLE_NOTIFY_MAIL = false + ; More detail: https://github.com/gogits/gogs/issues/165 + ENABLE_REVERSE_PROXY_AUTHENTICATION = false + ENABLE_REVERSE_PROXY_AUTO_REGISTRATION = false + ; Enable captcha validation for registration + ENABLE_CAPTCHA = false + ; Type of captcha you want to use. Options: image, recaptcha + CAPTCHA_TYPE = image + ; Enable recaptcha to use Google's recaptcha service + ; Go to https://www.google.com/recaptcha/admin to sign up for a key + RECAPTCHA_SECRET = + RECAPTCHA_SITEKEY = + ; Default value for KeepEmailPrivate + ; Each new user will get the value of this setting copied into their profile + DEFAULT_KEEP_EMAIL_PRIVATE = {{ .Values.config.default_keep_email_private | default "false" }} + ; Default value for AllowCreateOrganization + ; Every new user will have rights set to create organizations depending on this setting + DEFAULT_ALLOW_CREATE_ORGANIZATION = true + ; Default value for EnableDependencies + ; Repositories will use dependencies by default depending on this setting + DEFAULT_ENABLE_DEPENDENCIES = true + ; Enable heatmap on users profiles. + ENABLE_USER_HEATMAP = true + ; Enable Timetracking + ENABLE_TIMETRACKING = true + ; Default value for EnableTimetracking + ; Repositories will use timetracking by default depending on this setting + DEFAULT_ENABLE_TIMETRACKING = true + ; Default value for AllowOnlyContributorsToTrackTime + ; Only users with write permissions can track time if this is true + DEFAULT_ALLOW_ONLY_CONTRIBUTORS_TO_TRACK_TIME = true + ; Default value for the domain part of the user's email address in the git log + ; if he has set KeepEmailPrivate to true. The user's email will be replaced with a + ; concatenation of the user name in lower case, "@" and NO_REPLY_ADDRESS. + NO_REPLY_ADDRESS = {{ .Values.config.no_reply_address | default "noreply.example.com" }} + + [webhook] + ; Hook task queue length, increase if webhook shooting starts hanging + QUEUE_LENGTH = 1000 + ; Deliver timeout in seconds + DELIVER_TIMEOUT = 5 + ; Allow insecure certification + SKIP_TLS_VERIFY = false + ; Number of history information in each page + PAGING_NUM = 10 + + [mailer] + ENABLED = {{ .Values.config.mailer.enabled | default "false" }} + ; Buffer length of channel, keep it as it is if you don't know what it is. + SEND_BUFFER_LEN = 100 + ; Name displayed in mail title + SUBJECT = %(APP_NAME)s + ; Mail server + ; Gmail: smtp.gmail.com:587 + ; QQ: smtp.qq.com:465 + ; Note, if the port ends with "465", SMTPS will be used. Using STARTTLS on port 587 is recommended per RFC 6409. If the server supports STARTTLS it will always be used. + HOST = {{ .Values.config.mailer.host | default "" }} + ; Disable HELO operation when hostnames are different. + DISABLE_HELO = {{ .Values.config.mailer.disable_helo | default "" }} + ; Custom hostname for HELO operation, if no value is provided, one is retrieved from system. + HELO_HOSTNAME = {{ .Values.config.mailer.helo_hostname | default "" }} + ; Do not verify the certificate of the server. Only use this for self-signed certificates + SKIP_VERIFY = {{ .Values.config.mailer.skip_verify | default "false" }} + ; Use client certificate + USE_CERTIFICATE = {{ .Values.config.mailer.use_certificate | default "false" }} + CERT_FILE = {{ .Values.config.mailer.cert_file | default "custom.config.mailer/cert.pem" }} + KEY_FILE = {{ .Values.config.mailer.key_file | default "custom.config.mailer/key.pem" }} + ; Should SMTP connection use TLS + IS_TLS_ENABLED = {{ .Values.config.mailer.is_tls_enabled | default "false" }} + ; Mail from address, RFC 5322. This can be just an email address, or the `"Name" ` format + FROM = {{ .Values.config.mailer.from | default "" }} + ; Mailer user name and password + USER = {{ .Values.config.mailer.user | default "" }} + ; Use PASSWD = `your password` for quoting if you use special characters in the password. + PASSWD = {{ .Values.config.mailer.passwd | default "" }} + ; Send mails as plain text + SEND_AS_PLAIN_TEXT = {{ .Values.config.mailer.send_as_plain_text | default "false" }} + ; Enable sendmail (override SMTP) + MAILER_TYPE = {{ .Values.config.mailer.use_sendmail | default "smtp" }} + ; Specify an alternative sendmail binary + SENDMAIL_PATH = {{ .Values.config.mailer.sendmail_path | default "" }} + ; Specify any extra sendmail arguments + SENDMAIL_ARGS = {{ .Values.config.mailer.sendmail_args | default "" }} + + [cache] + ; Either "memory", "redis", or "memcache", default is "memory" + ADAPTER = memcache + ; For "memory" only, GC interval in seconds, default is 60 + INTERVAL = 60 + ; For "redis" and "memcache", connection host address + ;redis: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180 + HOST = 127.0.0.1:11211 + ; Time to keep items in cache if not used, default is 16 hours. + ; Setting it to 0 disables caching + ITEM_TTL = 16h + + [session] + ; Either "memory", "file", or "redis", default is "memory" + PROVIDER = memory + ; Provider config options + ; memory: doesn't have any config yet + ; file: session file path, e.g. `data/sessions` + ; redis: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180 + ; mysql: go-sql-driver/mysql dsn config string, e.g. `root:password@/session_table` + PROVIDER_CONFIG = data/sessions + ; Session cookie name + COOKIE_NAME = i_like_gitea + ; If you use session in https only, default is false + COOKIE_SECURE = false + ; Enable set cookie, default is true + ENABLE_SET_COOKIE = true + ; Session GC time interval in seconds, default is 86400 (1 day) + GC_INTERVAL_TIME = 86400 + ; Session life time in seconds, default is 86400 (1 day) + SESSION_LIFE_TIME = 86400 + + [picture] + AVATAR_UPLOAD_PATH = data/avatars + ; Max Width and Height of uploaded avatars. This is to limit the amount of RAM + ; used when resizing the image. + AVATAR_MAX_WIDTH = 4096 + AVATAR_MAX_HEIGHT = 3072 + ; Chinese users can choose "duoshuo" + ; or a custom avatar source, like: http://cn.gravatar.com/avatar/ + GRAVATAR_SOURCE = gravatar + ; This value will always be true in offline mode. + DISABLE_GRAVATAR = false + ; Federated avatar lookup uses DNS to discover avatar associated + ; with emails, see https://www.libravatar.org + ; This value will always be false in offline mode or when Gravatar is disabled. + ENABLE_FEDERATED_AVATAR = false + + [attachment] + ; Whether attachments are enabled. Defaults to `true` + ENABLED = true + ; Path for attachments. Defaults to `data/attachments` + PATH = data/attachments + ; One or more allowed types, e.g. image/jpeg|image/png + ALLOWED_TYPES = image/jpeg|image/png|application/zip|application/gzip + ; Max size of each file. Defaults to 4MB + MAX_SIZE = 4 + ; Max number of files per upload. Defaults to 5 + MAX_FILES = 5 + + [log] + ROOT_PATH = + ; Either "console", "file", "conn", "smtp" or "database", default is "console" + ; Use comma to separate multiple modes, e.g. "console, file" + MODE = console + ; Buffer length of the channel, keep it as it is if you don't know what it is. + BUFFER_LEN = 10000 + ; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace" + LEVEL = Critical + + ; For "console" mode only + [log.console] + LEVEL = + + ; For "file" mode only + [log.file] + LEVEL = + ; This enables automated log rotate(switch of following options), default is true + LOG_ROTATE = true + ; Max number of lines in a single file, default is 1000000 + MAX_LINES = 1000000 + ; Max size shift of a single file, default is 28 means 1 << 28, 256MB + MAX_SIZE_SHIFT = 28 + ; Segment log daily, default is true + DAILY_ROTATE = true + ; delete the log file after n days, default is 7 + MAX_DAYS = 7 + + ; For "conn" mode only + [log.conn] + LEVEL = + ; Reconnect host for every single message, default is false + RECONNECT_ON_MSG = false + ; Try to reconnect when connection is lost, default is false + RECONNECT = false + ; Either "tcp", "unix" or "udp", default is "tcp" + PROTOCOL = tcp + ; Host address + ADDR = + + ; For "smtp" mode only + [log.smtp] + LEVEL = + ; Name displayed in mail title, default is "Diagnostic message from server" + SUBJECT = Diagnostic message from server + ; Mail server + HOST = + ; Mailer user name and password + USER = + ; Use PASSWD = `your password` for quoting if you use special characters in the password. + PASSWD = + ; Receivers, can be one or more, e.g. 1@example.com,2@example.com + RECEIVERS = + + ; For "database" mode only + [log.database] + LEVEL = + ; Either "mysql" or "postgres" + DRIVER = + ; Based on xorm, e.g.: root:root@localhost/gitea?charset=utf8 + CONN = + + [cron] + ; Enable running cron tasks periodically. + ENABLED = true + ; Run cron tasks when Gitea starts. + RUN_AT_START = false + + ; Update mirrors + [cron.update_mirrors] + SCHEDULE = @every 10m + + ; Repository health check + [cron.repo_health_check] + SCHEDULE = @every 24h + TIMEOUT = 60s + ; Arguments for command 'git fsck', e.g. "--unreachable --tags" + ; see more on http://git-scm.com/docs/git-fsck + ARGS = + + ; Check repository statistics + [cron.check_repo_stats] + RUN_AT_START = true + SCHEDULE = @every 24h + + ; Clean up old repository archives + [cron.archive_cleanup] + ; Whether to enable the job + ENABLED = true + ; Whether to always run at least once at start up time (if ENABLED) + RUN_AT_START = true + ; Time interval for job to run + SCHEDULE = @every 24h + ; Archives created more than OLDER_THAN ago are subject to deletion + OLDER_THAN = 24h + + ; Synchronize external user data (only LDAP user synchronization is supported) + [cron.sync_external_users] + ; Synchronize external user data when starting server (default false) + RUN_AT_START = false + ; Interval as a duration between each synchronization (default every 24h) + SCHEDULE = @every 24h + ; Create new users, update existing user data and disable users that are not in external source anymore (default) + ; or only create new users if UPDATE_EXISTING is set to false + UPDATE_EXISTING = true + + [git] + ; Disables highlight of added and removed changes + DISABLE_DIFF_HIGHLIGHT = false + ; Max number of lines allowed in a single file in diff view + MAX_GIT_DIFF_LINES = 1000 + ; Max number of allowed characters in a line in diff view + MAX_GIT_DIFF_LINE_CHARACTERS = 5000 + ; Max number of files shown in diff view + MAX_GIT_DIFF_FILES = 100 + ; Arguments for command 'git gc', e.g. "--aggressive --auto" + ; see more on http://git-scm.com/docs/git-gc/ + GC_ARGS = + + ; Operation timeout in seconds + [git.timeout] + MIGRATE = 600 + MIRROR = 300 + CLONE = 300 + PULL = 300 + GC = 60 + + [mirror] + ; Default interval as a duration between each check + DEFAULT_INTERVAL = 8h + ; Min interval as a duration must be > 1m + MIN_INTERVAL = 10m + + [api] + ; Enables Swagger. True or false; default is true. + ENABLE_SWAGGER = true + ; Max number of items in a page + MAX_RESPONSE_ITEMS = 50 + + [i18n] + LANGS = en-US,zh-CN,zh-HK,zh-TW,de-DE,fr-FR,nl-NL,lv-LV,ru-RU,uk-UA,ja-JP,es-ES,pt-BR,pl-PL,bg-BG,it-IT,fi-FI,tr-TR,cs-CZ,sr-SP,sv-SE,ko-KR + NAMES = English,简体中文,繁體中文(香港),繁體中文(台灣),Deutsch,français,Nederlands,latviešu,русский,Українська,日本語,español,português do Brasil,polski,български,italiano,suomi,Türkçe,čeština,српски,svenska,한국어 + + ; Used for datetimepicker + [i18n.datelang] + en-US = en + zh-CN = zh + zh-HK = zh-HK + zh-TW = zh-TW + de-DE = de + fr-FR = fr + nl-NL = nl + lv-LV = lv + ru-RU = ru + uk-UA = uk + ja-JP = ja + es-ES = es + pt-BR = pt-BR + pl-PL = pl + bg-BG = bg + it-IT = it + fi-FI = fi + tr-TR = tr + cs-CZ = cs-CZ + sr-SP = sr + sv-SE = sv + ko-KR = ko + + [U2F] + ; Two Factor authentication with security keys + ; https://developers.yubico.com/U2F/App_ID.html + ;APP_ID = %(PROTOCOL)s://%(DOMAIN)s:%(HTTP_PORT)s/ + ; Comma seperated list of truisted facets + ;TRUSTED_FACETS = %(PROTOCOL)s://%(DOMAIN)s:%(HTTP_PORT)s/ + + ; Extension mapping to highlight class + ; e.g. .toml=ini + [highlight.mapping] + + [other] + SHOW_FOOTER_BRANDING = false + ; Show version information about Gitea and Go in the footer + SHOW_FOOTER_VERSION = true + ; Show template execution time in the footer + SHOW_FOOTER_TEMPLATE_LOAD_TIME = true + + [markup.asciidoc] + ENABLED = false + ; List of file extensions that should be rendered by an external command + FILE_EXTENSIONS = .adoc,.asciidoc + ; External command to render all matching extensions + RENDER_COMMAND = "asciidoc --out-file=- -" + ; Don't pass the file on STDIN, pass the filename as argument instead. + IS_INPUT_FILE = false + + [metrics] + ; Enables metrics endpoint. True or false; default is false. + ENABLED = false + ; If you want to add authorization, specify a token here + TOKEN = diff --git a/gitea/templates/gitea/gitea-pvc.yaml b/gitea/templates/gitea/gitea-pvc.yaml new file mode 100644 index 0000000..3f499dd --- /dev/null +++ b/gitea/templates/gitea/gitea-pvc.yaml @@ -0,0 +1,59 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingGiteaClaim) (not .Values.persistence.directGiteaVolumeMount) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: gitea-data + labels: + app: {{ template "fullname" . }} + chart: gitea + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | indent 4 }} +{{- end }} + +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +--- +{{- if and .Values.lfs.enabled (not .Values.lfs.existingGiteaClaim) (not .Values.lfs.directGiteaVolumeMount) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: gitea-lfs + labels: + app: {{ template "fullname" . }} + chart: gitea + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.lfs.annotations }} + annotations: +{{ toYaml .Values.lfs.annotations | indent 4 }} +{{- end }} + +spec: + accessModes: + - {{ .Values.lfs.accessMode | quote }} + resources: + requests: + storage: {{ .Values.lfs.size | quote }} +{{- if .Values.lfs.storageClass }} +{{- if (eq "-" .Values.lfs.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.lfs.storageClass }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/gitea/templates/gitea/gitea-svc.yaml b/gitea/templates/gitea/gitea-svc.yaml new file mode 100644 index 0000000..762d6d6 --- /dev/null +++ b/gitea/templates/gitea/gitea-svc.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }}-svc + labels: + app: {{ template "fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: {{ .Values.service.type }} + ports: + - name: http + port: 80 + targetPort: 3000 + {{- if .Values.ingress.tls }} + - name: https + port: 443 + targetPort: 3000 + {{- end }} + - name: ssh + port: 22 + targetPort: ssh + selector: + app: {{ template "fullname" . }} diff --git a/gitea/templates/gitea/post-install-job.yaml b/gitea/templates/gitea/post-install-job.yaml new file mode 100644 index 0000000..b00a5e1 --- /dev/null +++ b/gitea/templates/gitea/post-install-job.yaml @@ -0,0 +1,68 @@ +{{- if .Values.config.disableInstaller }} +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Release.Name }}" + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: "{{ .Release.Name }}" + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + spec: + restartPolicy: Never + containers: + - name: post-install-job + image: "{{ .Values.images.gitea }}" + command: + - '/bin/su' + - '-s' + - '/bin/bash' + - 'git' + - '-c' + - '/app/gitea/gitea admin create-user --admin --username {{ .Values.config.admin_user }} --password {{ .Values.config.admin_pass }} --email {{ .Values.config.admin_user }}@{{ .Values.config.mailer.domain }} || exit 0' + volumeMounts: + - name: gitea-data + mountPath: /data + - name: gitea-config + mountPath: /etc/gitea + volumes: + - name: gitea-data + {{- if .Values.persistence.enabled }} + {{- if .Values.persistence.directGiteaVolumeMount }} +{{ tpl .Values.persistence.directGiteaVolumeMount . | indent 8 }} + {{- else }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingGiteaClaim | default "gitea-data" }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: gitea-lfs + {{- if .Values.lfs.enabled }} + {{- if .Values.lfs.directGiteaVolumeMount }} +{{ tpl .Values.lfs.directGiteaVolumeMount . | indent 8 }} + {{- else }} + persistentVolumeClaim: + claimName: {{ .Values.lfs.existingGiteaClaim | default "gitea-lfs" }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: gitea-config + configMap: + name: {{ template "fullname" . }} +{{ end }} diff --git a/gitea/templates/ingress.yaml b/gitea/templates/ingress.yaml new file mode 100644 index 0000000..08728f8 --- /dev/null +++ b/gitea/templates/ingress.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ template "gitea.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ template "fullname" . }} + labels: + app: "{{ template "fullname" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- if .Values.ingress.hostname }} + - host: {{ .Values.ingress.hostname }} + http: + paths: + - path: / + backend: + serviceName: {{ template "fullname" . }}-svc + servicePort: {{ .Values.service.http.port }} + {{- end }} + {{- range .Values.ingress.hosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: "{{ template "fullname" $ }}-svc" + servicePort: {{ $.Values.service.http.port }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end }} +{{- end }} diff --git a/gitea/templates/init/_container.tpl b/gitea/templates/init/_container.tpl new file mode 100644 index 0000000..892b12d --- /dev/null +++ b/gitea/templates/init/_container.tpl @@ -0,0 +1,42 @@ +{{/* +Create helm partial for gitea server +*/}} +{{- define "init" }} +- name: init + image: {{ .Values.images.gitea }} + imagePullPolicy: {{ .Values.images.pullPolicy }} + env: + {{- if .Values.externalDB.existingSecret.enabled }} + - name: EXTERNALDB_USER + valueFrom: + secretKeyRef: + name: {{ .Values.externalDB.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDB.existingSecret.usernameKey | default "username" }} + - name: EXTERNALDB_PASS + valueFrom: + secretKeyRef: + name: {{ .Values.externalDB.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDB.existingSecret.passwordKey | default "password" }} + {{- end }} + {{- if .Values.mariadb.enabled }} + - name: MARIADB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.fullname" . }} + key: mariadb-password + {{- end }} + - name: SCRIPT + value: &script |- + mkdir -p /datatmp/gitea/conf + if [ ! -f /datatmp/gitea/conf/app.ini ]; then + sed "s/MARIADB_PASSWORD/${MARIADB_PASSWORD}/g" < /etc/gitea/app.ini > /datatmp/gitea/conf/app.ini + sed -i "s/EXTERNALDB_PASS/${EXTERNALDB_PASS}/g" /datatmp/gitea/conf/app.ini + sed -i "s/EXTERNALDB_USER/${EXTERNALDB_USER}/g" /datatmp/gitea/conf/app.ini + fi + command: ["/bin/sh",'-c', *script] + volumeMounts: + - name: gitea-data + mountPath: /datatmp + - name: gitea-config + mountPath: /etc/gitea +{{- end }} diff --git a/gitea/templates/memcached/_container.tpl b/gitea/templates/memcached/_container.tpl new file mode 100644 index 0000000..d79bc7b --- /dev/null +++ b/gitea/templates/memcached/_container.tpl @@ -0,0 +1,35 @@ +{{/* +Create helm partial for memcached +*/}} +{{- define "memcached" }} +- name: memcached + image: {{ .Values.images.memcached }} + imagePullPolicy: {{ .Values.images.pullPolicy }} + command: + - memcached + - -m {{ .Values.memcached.maxItemMemory }} + {{- if .Values.memcached.extendedOptions }} + - -o + - {{ .Values.memcached.extendedOptions }} + {{- end }} + {{- if .Values.memcached.verbosity }} + - -{{ .Values.memcached.verbosity }} + {{- end }} + ports: + - name: memcache + containerPort: 11211 + livenessProbe: + tcpSocket: + port: memcache + initialDelaySeconds: 30 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: memcache + initialDelaySeconds: 5 + timeoutSeconds: 1 + securityContext: + runAsUser: 1000 + resources: +{{ toYaml .Values.resources.memcached | indent 4 }} +{{- end }} diff --git a/gitea/values.yaml b/gitea/values.yaml new file mode 100644 index 0000000..d648c0d --- /dev/null +++ b/gitea/values.yaml @@ -0,0 +1,260 @@ +## Gitea image +## ref: https://hub.docker.com/r/gitea/gitea/tags/ +## + +tags: + mariadb: true + +images: + registry: docker.io + gitea: "gitea/gitea:1.13.1" + memcached: "memcached:1.6.9-alpine" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +# Allow configuration of lifecycle hooks +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ +lifecycle: {} + # postStartCommand: [] + # preStopCommand: [] + +# Config for Gitea +#config: +# # By default installer enabled +# disableInstaller: false +# # Set default user and password if installer disabled +# # User gitea (name 'admin' reserved by gitea) +# admin_user: gitea +# admin_pass: password +# # By default mailer disabled +# mailer: +# enabled: false +# host: mail.example.com:465 +# skip_verify: false +# is_tls_enabled: true +# from: gitea@example.com +# user: gitea +# passwd: password + +## Cache settings for memcache +memcached: + maxItemMemory: 64 + verbosity: v + extendedOptions: modern + +## Configure the ingress resource that allows you to access the +## Gitea installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +## Configure the ingress resource that allows you to access the +## Gitea installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: true + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## When the ingress is enabled, a host pointing to this will be created + hostname: gitea.local + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + annotations: {} + # certmanager.k8s.io/cluster-issuer: letsencrypt-prod + # kubernetes.io/ingress.class: nginx + + ## The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + # hosts: + # - name: gitea.local + # path: / + + ## The tls configuration for the ingress + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## Uncomment below to enable tls / https for let's encrypt / cert-manager + # tls: + # - hosts: + # - gitea.local + # secretName: gitea.tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: gitea.local-tls + # key: + # certificate: + # + +## This chart defaults to using an ingress for http, but change to LoadBalancer if using you cluster supports it +service: + type: ClusterIP + + http: + port: 3000 + ## Make the external port available + # externalPort: 8082 + # externalHost: gitea.local + ssh: + port: 22 + ## If serving on a different external port used for determining the ssh url in the gui + # externalPort: 22 + # externalHost: gitea.local + # externalIPs: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + gitea: + requests: + memory: 500Mi + cpu: 1000m + limits: + memory: 2Gi + cpu: 1 + memcached: + requests: + memory: 64Mi + cpu: 50m + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## ref: +## +persistence: + enabled: false + # existingGiteaClaim: gitea-data + size: 10Gi + #storageClass: "-" + accessMode: ReadWriteMany + ## addtional annotations for PVCs. Uncommenting will prevent the PVC from being deleted. + annotations: + "helm.sh/resource-policy": keep +## if you want to mount a volume directly without using a storageClass or pvcs +# directGiteaVolumeMount: +# glusterfs: +# endpoints: "192.168.1.1 192.168.1.2 192.168.1.3" +# path: giteaData +# directPostgresVolumeMount: +# glusterfs: +# endpoints: "192.168.1.1 192.168.1.2 192.168.1.3" +# path: giteaPostgresData + +## LFS volume +lfs: + enabled: false + # existingGiteaClaim: gitea-lfs + accessMode: ReadWriteMany + size: 20Gi + #storageClass: "-" + annotations: + "helm.sh/resource-policy": keep +## if you want to mount a volume directly without using a storageClass or pvcs +# directGiteaVolumeMount: +# glusterfs: +# endpoints: "192.168.1.1 192.168.1.2 192.168.1.3" +# path: giteaData +# directPostgresVolumeMount: +# glusterfs: +# endpoints: "192.168.1.1 192.168.1.2 192.168.1.3" +# path: giteaPostgresData + + + +## +## MariaDB chart configuration +## +## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml +## +mariadb: + ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters + enabled: true + ## Create a database and a database user + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run + ## + auth: + database: gitea + username: gitea + ## If the password is not specified, mariadb will generates a random password + ## + password: ThisIsMySuperSecretPassword + + ## MariaDB admin password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run + ## + rootPassword: ThisIsMySuperSecretPassword + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + primary: + persistence: + enabled: true + ## mariadb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + #storageClass: "-" + accessMode: ReadWriteOnce + size: 8Gi + +## Connect to an external database instead +externalDB: + enabled: false + dbUser: "postgres" + dbPassword: "" + dbHost: "service-name.namespace.svc.cluster.local" # or some external host + dbPort: "5432" + dbDatabase: "gitea" + dbSSL: "disable" + existingSecret: + enabled: false + secretName: nameofsecret + usernameKey: username + passwordKey: password + + +## Actual Gitea configuration (modified the default .ini file for Gitea) +## This will skip the initial installation screen. You must have a secretKey already defined +## and disableInstaller set to True +config: +## secretKey: ThisIsMySuperSecretKeyThatsUsedInterally + disableInstaller: false + offlineMode: false + requireSignin: false + disableRegistration: true + openidSignin: false + +## Common helm annotations +## Node labels and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +nodeSelector: {} +tolerations: [] +affinity: {} + +## Annotations for the deployment and nodes. +deploymentAnnotations: {} +podAnnotations: {} diff --git a/nextcloud/.helmignore b/nextcloud/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/nextcloud/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/nextcloud/Chart.lock b/nextcloud/Chart.lock new file mode 100644 index 0000000..9d5f75e --- /dev/null +++ b/nextcloud/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.8.9 +- name: mariadb + repository: https://charts.bitnami.com/bitnami + version: 7.10.2 +- name: redis + repository: https://charts.bitnami.com/bitnami + version: 11.0.5 +digest: sha256:b23a02c88d8d746aa46c03b38f429180ec2f484c3bdcf94ca9024acac7a686f4 +generated: "2020-11-11T12:55:39.484371762Z" diff --git a/nextcloud/Chart.yaml b/nextcloud/Chart.yaml new file mode 100644 index 0000000..68012fd --- /dev/null +++ b/nextcloud/Chart.yaml @@ -0,0 +1,37 @@ +apiVersion: v2 +appVersion: 19.0.3 +dependencies: +- condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.8.9 +- condition: mariadb.enabled + name: mariadb + repository: https://charts.bitnami.com/bitnami + version: 7.10.2 +- condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 11.0.5 +description: A file sharing server that puts the control and security of your own + data back into your hands. +home: https://nextcloud.com/ +icon: https://cdn.rawgit.com/docker-library/docs/defa5ffc7123177acd60ddef6e16bddf694cc35f/nextcloud/logo.svg +keywords: +- nextcloud +- storage +- http +- web +- php +maintainers: +- email: skjnldsv@protonmail.com + name: skjnldsv +- email: christian.ingenhaag@googlemail.com + name: chrisingenhaag +- email: jeff@billimek.com + name: billimek +name: nextcloud +sources: +- https://github.com/nextcloud/helm +- https://github.com/nextcloud/docker +version: 2.3.2 diff --git a/nextcloud/README.md b/nextcloud/README.md new file mode 100644 index 0000000..6520e83 --- /dev/null +++ b/nextcloud/README.md @@ -0,0 +1,269 @@ +# nextcloud + +[nextcloud](https://nextcloud.com/) is a file sharing server that puts the control and security of your own data back into your hands. + +## TL;DR; + +```console +helm repo add nextcloud https://nextcloud.github.io/helm/ +helm install my-release nextcloud/nextcloud +``` + +## Introduction + +This chart bootstraps an [nextcloud](https://hub.docker.com/_/nextcloud/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +It also packages the [Bitnami MariaDB chart](https://github.com/kubernetes/charts/tree/master/stable/mariadb) which is required for bootstrapping a MariaDB deployment for the database requirements of the nextcloud application. + +## Prerequisites + +- Kubernetes 1.9+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add nextcloud https://nextcloud.github.io/helm/ +helm install my-release nextcloud/nextcloud +``` + +The command deploys nextcloud on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the nextcloud chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------------------------------ | ------------------------------------------------------- | ------------------------------------------- | +| `image.repository` | nextcloud Image name | `nextcloud` | +| `image.tag` | nextcloud Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `ingress.enabled` | Enable use of ingress controllers | `false` | +| `ingress.servicePort` | Ingress' backend servicePort | `http` | +| `ingress.annotations` | An array of service annotations | `nil` | +| `ingress.labels` | An array of service labels | `nil` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `nextcloud.host` | nextcloud host to create application URLs | `nextcloud.kube.home` | +| `nextcloud.username` | User of the application | `admin` | +| `nextcloud.password` | Application password | `changeme` | +| `nextcloud.update` | Trigger update if custom command is used | `0` | +| `nextcloud.datadir` | nextcloud data dir location | `/var/www/html/data` | +| `nextcloud.tableprefix` | nextcloud db table prefix | `''` | +| `nextcloud.mail.enabled` | Whether to enable/disable email settings | `false` | +| `nextcloud.mail.fromAddress` | nextcloud mail send from field | `nil` | +| `nextcloud.mail.domain` | nextcloud mail domain | `nil` | +| `nextcloud.mail.smtp.host` | SMTP hostname | `nil` | +| `nextcloud.mail.smtp.secure` | SMTP connection `ssl` or empty | `''` | +| `nextcloud.mail.smtp.port` | Optional SMTP port | `nil` | +| `nextcloud.mail.smtp.authtype` | SMTP authentication method | `LOGIN` | +| `nextcloud.mail.smtp.name` | SMTP username | `''` | +| `nextcloud.mail.smtp.password` | SMTP password | `''` | +| `nextcloud.configs` | Config files created in `/var/www/html/config` | `{}` | +| `nextcloud.persistence.subPath` | Set the subPath for nextcloud to use in volume | `nil` | +| `nextcloud.phpConfigs` | PHP Config files created in `/usr/local/etc/php/conf.d` | `{}` | +| `nextcloud.defaultConfigs.\.htaccess` | Default .htaccess to protect `/var/www/html/config` | `true` | +| `nextcloud.defaultConfigs.\.redis\.config\.php` | Default Redis configuration | `true` | +| `nextcloud.defaultConfigs.\.apache-pretty-urls\.config\.php` | Default Apache configuration for rewrite urls | `true` | +| `nextcloud.defaultConfigs.\.apcu\.config\.php` | Default configuration to define APCu as local cache | `true` | +| `nextcloud.defaultConfigs.\.apps\.config\.php` | Default configuration for apps | `true` | +| `nextcloud.defaultConfigs.\.autoconfig\.php` | Default auto-configuration for databases | `true` | +| `nextcloud.defaultConfigs.\.smtp\.config\.php` | Default configuration for smtp | `true` | +| `nextcloud.strategy` | specifies the strategy used to replace old Pods by new ones | `type: Recreate` | +| `nextcloud.extraEnv` | specify additional environment variables | `{}` | +| `nextcloud.extraVolumes` | specify additional volumes for the NextCloud pod | `{}` | +| `nextcloud.extraVolumeMounts` | specify additional volume mounts for the NextCloud pod | `{}` | +| `nginx.enabled` | Enable nginx (requires you use php-fpm image) | `false` | +| `nginx.image.repository` | nginx Image name | `nginx` | +| `nginx.image.tag` | nginx Image tag | `alpine` | +| `nginx.image.pullPolicy` | nginx Image pull policy | `IfNotPresent` | +| `nginx.config.default` | Whether to use nextclouds recommended nginx config | `true` | +| `nginx.config.custom` | Specify a custom config for nginx | `{}` | +| `nginx.resources` | nginx resources | `{}` | +| `lifecycle.postStartCommand` | Specify deployment lifecycle hook postStartCommand | `nil` | +| `lifecycle.preStopCommand` | Specify deployment lifecycle hook preStopCommand | `nil` | +| `internalDatabase.enabled` | Whether to use internal sqlite database | `true` | +| `internalDatabase.database` | Name of the existing database | `nextcloud` | +| `externalDatabase.enabled` | Whether to use external database | `false` | +| `externalDatabase.type` | External database type: `mysql`, `postgresql` | `mysql` | +| `externalDatabase.host` | Host of the external database | `nil` | +| `externalDatabase.database` | Name of the existing database | `nextcloud` | +| `externalDatabase.user` | Existing username in the external db | `nextcloud` | +| `externalDatabase.password` | Password for the above username | `nil` | +| `externalDatabase.existingSecret.enabled` | Whether to use a existing secret or not | `false` | +| `externalDatabase.existingSecret.secretName` | Name of the existing secret | `nil` | +| `externalDatabase.existingSecret.usernameKey` | Name of the key that contains the username | `nil` | +| `externalDatabase.existingSecret.passwordKey` | Name of the key that contains the password | `nil` | +| `mariadb.enabled` | Whether to use the MariaDB chart | `false` | +| `mariadb.db.name` | Database name to create | `nextcloud` | +| `mariadb.db.password` | Password for the database | `changeme` | +| `mariadb.db.user` | Database user to create | `nextcloud` | +| `mariadb.rootUser.password` | MariaDB admin password | `nil` | +| `redis.enabled` | Whether to install/use redis for locking | `false` | +| `cronjob.enabled` | Whether to enable/disable cronjob | `false` | +| `cronjob.schedule` | Schedule for the CronJob | `*/15 * * * *` | +| `cronjob.annotations` | Annotations to add to the cronjob | {} | +| `cronjob.curlInsecure` | Set insecure (-k) option to curl | false | +| `cronjob.failedJobsHistoryLimit` | Specify the number of failed Jobs to keep | `5` | +| `cronjob.successfulJobsHistoryLimit` | Specify the number of completed Jobs to keep | `2` | +| `cronjob.resources` | Cronjob Resources | `nil` | +| `cronjob.nodeSelector` | Cronjob Node selector | `nil` | +| `cronjob.tolerations` | Cronjob tolerations | `nil` | +| `cronjob.affinity` | Cronjob affinity | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIp` | +| `service.loadBalancerIP` | LoadBalancerIp for service type LoadBalancer | `nil` | +| `service.nodePort` | NodePort for service type NodePort | `nil` | +| `persistence.enabled` | Enable persistence using PVC | `false` | +| `persistence.annotations` | PVC annotations | `{}` | +| `persistence.storageClass` | PVC Storage Class for nextcloud volume | `nil` (uses alpha storage class annotation) | +| `persistence.existingClaim` | An Existing PVC name for nextcloud volume | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | PVC Access Mode for nextcloud volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for nextcloud volume | `8Gi` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `livenessProbe.enabled` | Turn on and off liveness probe | `true` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `15` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe | `3` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe | `1` | +| `readinessProbe.enabled` | Turn on and off readiness probe | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `15` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe | `3` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe | `1` | +| `hpa.enabled` | Boolean to create a HorizontalPodAutoscaler | `false` | +| `hpa.cputhreshold` | CPU threshold percent for the HorizontalPodAutoscale | `60` | +| `hpa.minPods` | Min. pods for the Nextcloud HorizontalPodAutoscaler | `1` | +| `hpa.maxPods` | Max. pods for the Nextcloud HorizontalPodAutoscaler | `10` | +| `deploymentAnnotations` | Annotations to be added at 'deployment' level | not set | +| `podAnnotations` | Annotations to be added at 'pod' level | not set | +| `metrics.enabled` | Start Prometheus metrics exporter | `false` | +| `metrics.https` | Defines if https is used to connect to nextcloud | `false` (uses http) | +| `metrics.timeout` | When the scrape times out | `5s` | +| `metrics.image.repository` | Nextcloud metrics exporter image name | `xperimental/nextcloud-exporter` | +| `metrics.image.tag` | Nextcloud metrics exporter image tag | `v0.3.0` | +| `metrics.image.pullPolicy` | Nextcloud metrics exporter image pull policy | `IfNotPresent` | +| `metrics.podAnnotations` | Additional annotations for metrics exporter | not set | +| `metrics.podLabels` | Additional labels for metrics exporter | not set | +| `metrics.service.type` | Metrics: Kubernetes Service type | `ClusterIP` | +| `metrics.service.loadBalancerIP` | Metrics: LoadBalancerIp for service type LoadBalancer | `nil` | +| `metrics.service.nodePort` | Metrics: NodePort for service type NodePort | `nil` | +| `metrics.service.annotations` | Additional annotations for service metrics exporter | `{prometheus.io/scrape: "true", prometheus.io/port: "9205"}` | +| `metrics.service.labels` | Additional labels for service metrics exporter | `{}` | + +> **Note**: +> +> For nextcloud to function correctly, you should specify the `nextcloud.host` parameter to specify the FQDN (recommended) or the public IP address of the nextcloud service. +> +> Optionally, you can specify the `service.loadBalancerIP` parameter to assign a reserved IP address to the nextcloud service of the chart. However please note that this feature is only available on a few cloud providers (f.e. GKE). +> +> To reserve a public IP address on GKE: +> +> ```bash +> gcloud compute addresses create nextcloud-public-ip +> ``` +> +> The reserved IP address can be associated to the nextcloud service by specifying it as the value of the `service.loadBalancerIP` parameter while installing the chart. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install --name my-release \ + --set nextcloud.username=admin,nextcloud.password=password,mariadb.rootUser.password=secretpassword \ + nextcloud/nextcloud +``` + +The above command sets the nextcloud administrator account username and password to `admin` and `password` respectively. Additionally, it sets the MariaDB `root` user password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +helm install --name my-release -f values.yaml nextcloud/nextcloud +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Persistence + +The [Nextcloud](https://hub.docker.com/_/nextcloud/) image stores the nextcloud data and configurations at the `/var/www/html` paths of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Configuration](#configuration) section to enable persistence and configuration of the PVC. + +## Cronjob + +This chart can utilize Kubernetes `CronJob` resource to execute [background tasks](https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html). + +To use this functionality, set `cronjob.enabled` parameter to `true` and switch background mode to Webcron in your nextcloud settings page. +See the [Configuration](#configuration) section for further configuration of the cronjob resource. + +> **Note**: For the cronjobs to work correctly, ingress must be also enabled (set `ingress.enabled` to `true`) and `nextcloud.host` has to be publicly resolvable. + +## Multiple config.php file + +Nextcloud supports loading configuration parameters from multiple files. +You can add arbitrary files ending with `.config.php` in the `config/` directory. +See [documentation](https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file). + +For example, following config will configure Nextcloud with [S3 as primary storage](https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3) by creating file `/var/www/html/config/s3.config.php`: + +```yaml +nextcloud: + configs: + s3.config.php: |- + array( + 'class' => '\\OC\\Files\\ObjectStore\\S3', + 'arguments' => array( + 'bucket' => 'my-bucket', + 'autocreate' => true, + 'key' => 'xxx', + 'secret' => 'xxx', + 'region' => 'us-east-1', + 'use_ssl' => true + ) + ) + ); +``` + +## Hugepages + +If your node has hugepages enabled, but you do not map any into the container, it could fail to start with a bus error in Apache. This is due +to Apache attempting to memory map a file and use hugepages. The fix is to either disable huge pages on the node or map hugepages into the container: + +```yaml +nextcloud: + extraVolumes: + - name: hugepages + emptyDir: + medium: HugePages-2Mi + extraVolumeMounts: + - name: hugepages + mountPath: /dev/hugepages + resources: + requests: + hugepages-2Mi: 500Mi + # note that Kubernetes currently requires cpu or memory requests and limits before hugepages are allowed. + memory: 500Mi + limits: + # limit and request must be the same for hugepages. They are a fixed resource. + hugepages-2Mi: 500Mi + # note that Kubernetes currently requires cpu or memory requests and limits before hugepages are allowed. + memory: 1Gi +``` diff --git a/nextcloud/charts/mariadb/.helmignore b/nextcloud/charts/mariadb/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/nextcloud/charts/mariadb/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/nextcloud/charts/mariadb/Chart.yaml b/nextcloud/charts/mariadb/Chart.yaml new file mode 100644 index 0000000..ca05720 --- /dev/null +++ b/nextcloud/charts/mariadb/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 10.3.24 +description: Fast, reliable, scalable, and easy to use open-source relational database + system. MariaDB Server is intended for mission-critical, heavy-load production systems + as well as for embedding into mass-deployed software. Highly available MariaDB cluster. +home: https://github.com/bitnami/charts/tree/master/bitnami/mariadb +icon: https://bitnami.com/assets/stacks/mariadb/img/mariadb-stack-220x234.png +keywords: +- mariadb +- mysql +- database +- sql +- prometheus +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mariadb +sources: +- https://github.com/bitnami/bitnami-docker-mariadb +- https://github.com/prometheus/mysqld_exporter +- https://mariadb.org +version: 7.10.2 diff --git a/nextcloud/charts/mariadb/README.md b/nextcloud/charts/mariadb/README.md new file mode 100644 index 0000000..7d4a8ab --- /dev/null +++ b/nextcloud/charts/mariadb/README.md @@ -0,0 +1,343 @@ +# MariaDB + +[MariaDB](https://mariadb.org) is one of the most popular database servers in the world. It’s made by the original developers of MySQL and guaranteed to stay open source. Notable users include Wikipedia, Facebook and Google. + +MariaDB is developed as open source software and as a relational database it provides an SQL interface for accessing data. The latest versions of MariaDB also include GIS and JSON features. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/mariadb +``` + +## Introduction + +This chart bootstraps a [MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) replication cluster deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/mariadb +``` + +The command deploys MariaDB on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the MariaDB chart and their default values. + +| Parameter | Description | Default | +|----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | MariaDB image registry | `docker.io` | +| `image.repository` | MariaDB Image name | `bitnami/mariadb` | +| `image.tag` | MariaDB Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | MariaDB image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `nameOverride` | String to partially override mariadb.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override mariadb.fullname template with a string | `nil` | +| `podLabels` | Additional pod labels | `{}` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.clusterIp.master` | Specific cluster IP for master when service type is cluster IP. Use None for headless service | `nil` | +| `service.clusterIp.slave` | Specific cluster IP for slave when service type is cluster IP. Use None for headless service | `nil` | +| `service.port` | MySQL service port | `3306` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the mariadb.fullname template | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `rbac.create` | Create and use RBAC resources | `false` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `existingSecret` | Use existing secret for password details (`rootUser.password`, `db.password`, `replication.password` will be ignored and picked up from this secret). The secret has to contain the keys `mariadb-root-password`, `mariadb-replication-password` and `mariadb-password`. | `nil` | +| `secret.annotations` | Secret annotations (evaluated as a template) | `{}` | +| `secret.requirePasswords` | Require all passwords at the secret resource | `true` | +| `rootUser.password` | Password for the `root` user. Ignored if existing secret is provided. | _random 10 character alphanumeric string_ | +| `rootUser.forcePassword` | Force users to specify a password | `false` | +| `rootUser.injectSecretsAsVolume` | Mount admin user password as a file instead of using an environment variable | `false` | +| `rootUser.injectSecretsFile` | Path to file used for secrets as volume | `/opt/bitnami/mariadb/secrets/mariadb-root-password` | +| `db.name` | Name for new database to create | `my_database` | +| `db.user` | Username of new user to create | `""` | +| `db.password` | Password for the new user. Ignored if existing secret is provided. | _random 10 character alphanumeric string if `db.user` is defined_ | +| `db.forcePassword` | Force users to specify a password | `false` | +| `db.injectSecretsAsVolume` | Mount user password as a file instead of using an environment variable | `false` | +| `db.injectSecretsFile` | Path to file used for secrets as volume | `/opt/bitnami/mariadb/secrets/mariadb-password` | +| `replication.enabled` | MariaDB replication enabled | `true` | +| `replication.user` | MariaDB replication user | `replicator` | +| `replication.password` | MariaDB replication user password. Ignored if existing secret is provided. | _random 10 character alphanumeric string_ | +| `replication.forcePassword` | Force users to specify a password | `false` | +| `replication.injectSecretsAsVolume` | Mount replication user password as a file instead of using an environment variable | `false` | +| `replication.injectSecretsFile` | Path to file used for secrets as volume | `/opt/bitnami/mariadb/secrets/mariadb-replication-password` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `nil` | +| `master.annotations[].key` | key for the the annotation list item | `nil` | +| `master.annotations[].value` | value for the the annotation list item | `nil` | +| `master.extraFlags` | MariaDB master additional command line flags | `nil` | +| `master.affinity` | Master affinity (in addition to master.antiAffinity when set) | `{}` | +| `master.antiAffinity` | Master pod anti-affinity policy | `soft` | +| `master.nodeSelector` | Master node labels for pod assignment | `{}` | +| `master.tolerations` | List of node taints to tolerate (master) | `[]` | +| `master.updateStrategy` | Master statefulset update strategy policy | `RollingUpdate` | +| `master.persistence.enabled` | Enable persistence using PVC | `true` | +| `master.persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` | +| `master.persistence.subPath` | Subdirectory of the volume to mount | `nil` | +| `master.persistence.mountPath` | Path to mount the volume at | `/bitnami/mariadb` | +| `master.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `master.persistence.storageClass` | Persistent Volume Storage Class | `` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Persistent Volume Size | `8Gi` | +| `master.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `master.extraInitContainers` | Additional init containers as a string to be passed to the `tpl` function (master) | | +| `master.extraEnvVars` | Array containing extra env vars to configure MariaDB master replicas | `nil` | +| `master.extraEnvVarsCM` | Configmap containing extra env vars to configure MariaDB master replicas | `nil` | +| `master.extraEnvVarsSecret` | Secret containing extra env vars to configure MariaDB master replicas | `nil` | +| `master.config` | Config file for the MariaDB Master server | `_default values in the values.yaml file_` | +| `master.resources` | CPU/Memory resource requests/limits for master node | `{}` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (master) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master) | `120` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (master) | `10` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (master) | `1` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (master) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (master) | `3` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (master) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master) | `30` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (master) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (master) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (master) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (master) | `3` | +| `master.podDisruptionBudget.enabled` | If true, create a pod disruption budget for master pods. | `false` | +| `master.podDisruptionBudget.minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` | +| `master.podDisruptionBudget.maxUnavailable` | Maximum number / percentage of pods that may be made unavailable | `nil` | +| `master.service.annotations` | Master service annotations | `{}` | +| `slave.replicas` | Desired number of slave replicas | `1` | +| `slave.annotations[].key` | key for the the annotation list item | `nil` | +| `slave.annotations[].value` | value for the the annotation list item | `nil` | +| `slave.extraFlags` | MariaDB slave additional command line flags | `nil` | +| `slave.affinity` | Slave affinity (in addition to slave.antiAffinity when set) | `{}` | +| `slave.antiAffinity` | Slave pod anti-affinity policy | `soft` | +| `slave.nodeSelector` | Slave node labels for pod assignment | `{}` | +| `slave.tolerations` | List of node taints to tolerate for (slave) | `[]` | +| `slave.updateStrategy` | Slave statefulset update strategy policy | `RollingUpdate` | +| `slave.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `slave.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `slave.persistence.storageClass` | Persistent Volume Storage Class | `` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Persistent Volume Size | `8Gi` | +| `slave.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `slave.extraInitContainers` | Additional init containers as a string to be passed to the `tpl` function (slave) | `nil` | +| `slave.extraEnvVars` | Array containing extra env vars to configure MariaDB slave replicas | `nil` | +| `slave.extraEnvVarsCM` | ConfigMap containing extra env vars to configure MariaDB slave replicas | `nil` | +| `slave.extraEnvVarsSecret` | Secret containing extra env vars to configure MariaDB slave replicas | `nil` | +| `slave.config` | Config file for the MariaDB Slave replicas | `_default values in the values.yaml file_` | +| `slave.resources` | CPU/Memory resource requests/limits for slave node | `{}` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (slave) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (slave) | `120` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (slave) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (slave) | `1` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (slave) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (slave) | `3` | +| `slave.readinessProbe.enabled` | Turn on and off readiness probe (slave) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (slave) | `45` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (slave) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (slave) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (slave) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (slave) | `3` | +| `slave.podDisruptionBudget.enabled` | If true, create a pod disruption budget for slave pods. | `false` | +| `slave.podDisruptionBudget.minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` | +| `slave.podDisruptionBudget.maxUnavailable` | Maximum number / percentage of pods that may be made unavailable | `nil` | +| `slave.service.annotations` | Slave service annotations | `{}` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Exporter image registry | `docker.io` | +| `metrics.image.repository` | Exporter image name | `bitnami/mysqld-exporter` | +| `metrics.image.tag` | Exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.resources` | Exporter resource requests/limit | `nil` | +| `metrics.extraArgs.master` | Extra args to be passed to mysqld_exporter | `[]` | +| `metrics.extraArgs.slave` | Extra args to be passed to mysqld_exporter | `[]` | +| `metrics.livenessProbe.enabled` | Turn on and off liveness probe (metrics) | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (metrics) | `120` | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe (metrics) | `10` | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out (metrics) | `1` | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (metrics) | `1` | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (metrics) | `3` | +| `metrics.readinessProbe.enabled` | Turn on and off readiness probe (metrics) | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (metrics) | `30` | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe (metrics) | `10` | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out (metrics) | `1` | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (metrics) | `1` | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (metrics) | `3` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | + +The above parameters map to the env variables defined in [bitnami/mariadb](http://github.com/bitnami/bitnami-docker-mariadb). For more information please refer to the [bitnami/mariadb](http://github.com/bitnami/bitnami-docker-mariadb) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set rootUser.password=secretpassword,db.user=app_database \ + bitnami/mariadb +``` + +The above command sets the MariaDB `root` account password to `secretpassword`. Additionally it creates a database named `my_database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/mariadb +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Force users to specify a password and mount secrets as volumes instead of using environment variables: + +```diff +- rootUser.forcePassword: false +- rootUser.injectSecretsAsVolume: false ++ rootUser.forcePassword: true ++ rootUser.injectSecretsAsVolume: true +- db.forcePassword: false +- db.injectSecretsAsVolume: false ++ db.forcePassword: true ++ db.injectSecretsAsVolume: true +- replication.forcePassword: false +- replication.injectSecretsAsVolume: false ++ replication.forcePassword: true ++ replication.injectSecretsAsVolume: true +``` + +- Desired number of slave replicas: + +```diff +- slave.replicas: 1 ++ slave.replicas: 2 +``` + +- Start a side-car prometheus exporter: + +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Change MariaDB version + +To modify the MariaDB version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/mariadb/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Initialize a fresh instance + +The [Bitnami MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Extra Init Containers + +The feature allows for specifying a template string for a initContainer in the master/slave pod. Usecases include situations when you need some pre-run setup. For example, in IKS (IBM Cloud Kubernetes Service), non-root users do not have write permission on the volume mount path for NFS-powered file storage. So, you could use a initcontainer to `chown` the mount. See a example below, where we add an initContainer on the master pod that reports to an external resource that the db is going to starting. +`values.yaml` +```yaml +master: + extraInitContainers: | + - name: initcontainer + image: bitnami/minideb:buster + command: ["/bin/sh", "-c"] + args: + - install_packages curl && curl http://api-service.local/db/starting; +``` + +## Persistence + +The [Bitnami MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) image stores the MariaDB data and configurations at the `/bitnami/mariadb` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning, by default. An existing PersistentVolumeClaim can be defined. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +It's necessary to set the `rootUser.password` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Administrator credentials' section. Please note down the password and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/mariadb --set rootUser.password=[ROOT_PASSWORD] +``` + +| Note: you need to substitute the placeholder _[ROOT_PASSWORD]_ with the value obtained in the installation notes. + +### To 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17308 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +### To 6.0.0 + +MariaDB version was updated from 10.1 to 10.3, there are no changes in the chart itself. According to the official documentation, upgrading from 10.1 should be painless. However, there are some things that have changed which could affect an upgrade: + +- [Incompatible changes upgrading from MariaDB 10.1 to MariaDB 10.2](https://mariadb.com/kb/en/library/upgrading-from-mariadb-101-to-mariadb-102//#incompatible-changes-between-101-and-102) +- [Incompatible changes upgrading from MariaDB 10.2 to MariaDB 10.3](https://mariadb.com/kb/en/library/upgrading-from-mariadb-102-to-mariadb-103/#incompatible-changes-between-102-and-103) + +### To 5.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 5.0.0. The following example assumes that the release name is mariadb: + +```console +$ kubectl delete statefulset opencart-mariadb --cascade=false +``` diff --git a/nextcloud/charts/mariadb/ci/values-production-with-rbac.yaml b/nextcloud/charts/mariadb/ci/values-production-with-rbac.yaml new file mode 100644 index 0000000..3e71d77 --- /dev/null +++ b/nextcloud/charts/mariadb/ci/values-production-with-rbac.yaml @@ -0,0 +1,31 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct + +metrics: + enabled: true + +podDisruptionBudget: + create: true + +master: + extraEnvVars: + - name: TEST + value: "3" + + extraEnvVarsSecret: example-secret + extraEnvVarsCM: example-cm + +slave: + extraEnvVars: + - name: TEST + value: "2" + + extraEnvVarsSecret: example-secret-2 + extraEnvVarsCM: example-cm-2 + +rbac: + create: true + +serviceAccount: + create: true + name: mariadb-galera-service-account diff --git a/nextcloud/charts/mariadb/files/docker-entrypoint-initdb.d/README.md b/nextcloud/charts/mariadb/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..aaddde3 --- /dev/null +++ b/nextcloud/charts/mariadb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, .sql or .sql.gz file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mariadb](https://github.com/bitnami/bitnami-docker-mariadb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/nextcloud/charts/mariadb/templates/NOTES.txt b/nextcloud/charts/mariadb/templates/NOTES.txt new file mode 100644 index 0000000..9cf93ae --- /dev/null +++ b/nextcloud/charts/mariadb/templates/NOTES.txt @@ -0,0 +1,49 @@ + +Please be patient while the chart is being deployed + +Tip: + + Watch the deployment status using the command: kubectl get pods -w --namespace {{ .Release.Namespace }} -l release={{ .Release.Name }} + +Services: + + echo Master: {{ template "mariadb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port }} +{{- if .Values.replication.enabled }} + echo Slave: {{ template "slave.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port }} +{{- end }} + +Administrator credentials: + + Username: root + Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.secretName" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) + +To connect to your database: + + 1. Run a pod that you can use as a client: + + kubectl run {{ template "mariadb.fullname" . }}-client --rm --tty -i --restart='Never' --image {{ template "mariadb.image" . }} --namespace {{ .Release.Namespace }} --command -- bash + + 2. To connect to master service (read/write): + + mysql -h {{ template "mariadb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local -uroot -p {{ .Values.db.name }} + +{{- if .Values.replication.enabled }} + + 3. To connect to slave service (read-only): + + mysql -h {{ template "slave.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local -uroot -p {{ .Values.db.name }} +{{- end }} + +To upgrade this helm chart: + + 1. Obtain the password as described on the 'Administrator credentials' section and set the 'rootUser.password' parameter as shown below: + + ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.secretName" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) + helm upgrade {{ .Release.Name }} bitnami/mariadb --set rootUser.password=$ROOT_PASSWORD + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/_helpers.tpl b/nextcloud/charts/mariadb/templates/_helpers.tpl new file mode 100644 index 0000000..b5c1d97 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/_helpers.tpl @@ -0,0 +1,288 @@ +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "mariadb.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mariadb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mariadb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "master.fullname" -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" (include "mariadb.fullname" .) "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- include "mariadb.fullname" . -}} +{{- end -}} +{{- end -}} + +{{- define "slave.fullname" -}} +{{- printf "%s-%s" (include "mariadb.fullname" .) "slave" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "mariadb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create pod labels for mariadb +*/}} +{{- define "mariadb.podLabels" -}} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper MariaDB image name +*/}} +{{- define "mariadb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper metrics image name +*/}} +{{- define "mariadb.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{ template "mariadb.initdbScriptsCM" . }} +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "mariadb.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" .Values.initdbScriptsConfigMap -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "master.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "mariadb.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "mariadb.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mariadb.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mariadb.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for the master +*/}} +{{- define "mariadb.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for the slave +*/}} +{{- define "mariadb.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the Secret used to store the passwords +*/}} +{{- define "mariadb.secretName" -}} +{{- if .Values.existingSecret -}} +{{ .Values.existingSecret }} +{{- else -}} +{{ template "mariadb.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "mariadb.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "mariadb.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/nextcloud/charts/mariadb/templates/initialization-configmap.yaml b/nextcloud/charts/mariadb/templates/initialization-configmap.yaml new file mode 100644 index 0000000..2dbeb2d --- /dev/null +++ b/nextcloud/charts/mariadb/templates/initialization-configmap.yaml @@ -0,0 +1,27 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "master.fullname" . }}-init-scripts + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + chart: "{{ template "mariadb.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + component: "master" +{{- if and (.Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz") (not .Values.initdbScriptsConfigMap) }} +binaryData: +{{- $root := . }} +{{- range $path, $bytes := .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} + {{ base $path }}: {{ $root.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- if and (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}") (not .Values.initdbScriptsConfigMap) }} +{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}").AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{ end }} diff --git a/nextcloud/charts/mariadb/templates/master-configmap.yaml b/nextcloud/charts/mariadb/templates/master-configmap.yaml new file mode 100644 index 0000000..3620743 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/master-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.master.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "master.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + component: "master" + chart: "{{ template "mariadb.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: + my.cnf: |- +{{ .Values.master.config | indent 4 }} +{{- end -}} diff --git a/nextcloud/charts/mariadb/templates/master-pdb.yaml b/nextcloud/charts/mariadb/templates/master-pdb.yaml new file mode 100644 index 0000000..012d09a --- /dev/null +++ b/nextcloud/charts/mariadb/templates/master-pdb.yaml @@ -0,0 +1,25 @@ +{{- if .Values.master.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "mariadb.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + component: "master" + chart: {{ template "mariadb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: +{{- if .Values.master.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.master.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.master.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.master.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: "{{ template "mariadb.name" . }}" + component: "master" + release: {{ .Release.Name | quote }} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/master-statefulset.yaml b/nextcloud/charts/mariadb/templates/master-statefulset.yaml new file mode 100644 index 0000000..e574444 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/master-statefulset.yaml @@ -0,0 +1,347 @@ +apiVersion: {{ template "mariadb.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "master.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "mariadb.name" . }} + chart: {{ template "mariadb.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- include "mariadb.podLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: {{ template "mariadb.name" . }} + release: {{ .Release.Name }} + component: master + serviceName: {{ template "master.fullname" . }} + replicas: 1 + updateStrategy: + type: {{ .Values.master.updateStrategy.type }} + {{- if (eq "Recreate" .Values.master.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + template: + metadata: + {{- if .Values.master.annotations }} + annotations: {{- include "mariadb.tplValue" (dict "value" .Values.master.annotations "context" $) | nindent 8 }} + {{- end }} + labels: + app: {{ template "mariadb.name" . }} + chart: {{ template "mariadb.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- include "mariadb.podLabels" . | nindent 8 }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mariadb.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if eq .Values.master.antiAffinity "hard" }} + affinity: + {{- with .Values.master.affinity }} +{{ toYaml . | indent 8 }} + {{- end }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: {{ template "mariadb.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.master.antiAffinity "soft" }} + affinity: + {{- with .Values.master.affinity }} +{{ toYaml . | indent 8 }} + {{- end }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: {{ template "mariadb.name" . }} + release: {{ .Release.Name }} + {{- else}} + {{- with .Values.master.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{ toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end -}} + {{- with .Values.master.tolerations }} + tolerations: {{ toYaml . | nindent 8 }} + {{- end }} +{{- include "mariadb.imagePullSecrets" . | indent 6 }} + initContainers: + {{- if .Values.master.extraInitContainers }} +{{ tpl .Values.master.extraInitContainers . | indent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }} + - name: volume-permissions + image: {{ template "mariadb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.mountPath }}"] + securityContext: + runAsUser: 0 + resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: data + mountPath: {{ .Values.master.persistence.mountPath }} + {{- end }} + containers: + - name: "mariadb" + image: {{ template "mariadb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.image.debug}} + - name: BITNAMI_DEBUG + value: "true" + {{- end }} + {{- if .Values.master.extraFlags }} + - name: MARIADB_EXTRA_FLAGS + value: "{{ .Values.master.extraFlags }}" + {{- end }} + {{- if .Values.rootUser.injectSecretsAsVolume }} + - name: MARIADB_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-root-password" .Values.rootUser.injectSecretsFile }} + {{- else }} + - name: MARIADB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-root-password + {{- end }} + {{- if not (empty .Values.db.user) }} + - name: MARIADB_USER + value: "{{ .Values.db.user }}" + {{- if .Values.db.injectSecretsAsVolume }} + - name: MARIADB_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-password" .Values.db.injectSecretsFile }} + {{- else }} + - name: MARIADB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-password + {{- end }} + {{- end }} + - name: MARIADB_DATABASE + value: "{{ .Values.db.name }}" + {{- if .Values.replication.enabled }} + - name: MARIADB_REPLICATION_MODE + value: "master" + - name: MARIADB_REPLICATION_USER + value: "{{ .Values.replication.user }}" + {{- if .Values.replication.injectSecretsAsVolume }} + - name: MARIADB_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-replication-password" .Values.replication.injectSecretsFile }} + {{- else }} + - name: MARIADB_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-replication-password + {{- end }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- tpl (toYaml .Values.master.extraEnvVars) $ | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - sh + - -c + - | + password_aux="${MARIADB_ROOT_PASSWORD:-}" + if [ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]; then + password_aux=$(cat $MARIADB_ROOT_PASSWORD_FILE) + fi + mysqladmin status -uroot -p$password_aux + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - sh + - -c + - | + password_aux="${MARIADB_ROOT_PASSWORD:-}" + if [ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]; then + password_aux=$(cat $MARIADB_ROOT_PASSWORD_FILE) + fi + mysqladmin status -uroot -p$password_aux + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{ toYaml .Values.master.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.master.persistence.mountPath }} + {{- if .Values.master.persistence.subPath }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.master.config }} + - name: config + mountPath: /opt/bitnami/mariadb/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if or .Values.rootUser.injectSecretsAsVolume .Values.db.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} + - name: mariadb-credentials + mountPath: /opt/bitnami/mariadb/secrets/ + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mariadb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.rootUser.injectSecretsAsVolume }} + - name: MARIADB_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mariadb-root-password" .Values.rootUser.injectSecretsFile }} + {{- else }} + - name: MARIADB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-root-password + {{- end }} + command: + - sh + - -c + - | + password_aux="${MARIADB_ROOT_PASSWORD:-}" + if [ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]; then + password_aux=$(cat $MARIADB_ROOT_PASSWORD_FILE) + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.master }} {{ . }} {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{ toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if .Values.rootUser.injectSecretsAsVolume }} + volumeMounts: + - name: mariadb-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + volumes: + {{- if .Values.master.config }} + - name: config + configMap: + name: {{ template "master.fullname" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mariadb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.rootUser.injectSecretsAsVolume .Values.db.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} + - name: mariadb-credentials + secret: + secretName: {{ template "mariadb.fullname" . }} + items: + {{- if .Values.rootUser.injectSecretsAsVolume }} + - key: mariadb-root-password + path: mariadb-root-password + {{- end }} + {{- if .Values.db.injectSecretsAsVolume }} + - key: mariadb-password + path: mariadb-password + {{- end }} + {{- if and .Values.replication.enabled .Values.replication.injectSecretsAsVolume }} + - key: mariadb-replication-password + path: mariadb-replication-password + {{- end }} + {{- end }} +{{- if and .Values.master.persistence.enabled .Values.master.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ .Values.master.persistence.existingClaim }} +{{- else if not .Values.master.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.master.persistence.enabled (not .Values.master.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + labels: + app: "{{ template "mariadb.name" . }}" + component: "master" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "mariadb.master.storageClass" . }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "mariadb.tplValue" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- end -}} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/master-svc.yaml b/nextcloud/charts/mariadb/templates/master-svc.yaml new file mode 100644 index 0000000..5b1be80 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/master-svc.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mariadb.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + component: "master" + chart: "{{ template "mariadb.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- if or .Values.metrics.enabled .Values.master.service.annotations }} + annotations: +{{- if .Values.metrics.enabled }} +{{ toYaml .Values.metrics.annotations | indent 4 }} +{{- end }} +{{- if .Values.master.service.annotations }} +{{ toYaml .Values.master.service.annotations | indent 4 }} +{{- end }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "ClusterIP" }} + {{- if .Values.service.clusterIp }} + clusterIP: {{ .Values.service.clusterIp.master }} + {{- end }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.service.port }} + targetPort: mysql + {{- if eq .Values.service.type "NodePort" }} + {{- if .Values.service.nodePort }} + {{- if .Values.service.nodePort.master }} + nodePort: {{ .Values.service.nodePort.master }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + port: 9104 + targetPort: metrics + {{- end }} + selector: + app: "{{ template "mariadb.name" . }}" + component: "master" + release: "{{ .Release.Name }}" diff --git a/nextcloud/charts/mariadb/templates/role.yaml b/nextcloud/charts/mariadb/templates/role.yaml new file mode 100644 index 0000000..296c439 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "master.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + chart: "{{ template "mariadb.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/rolebinding.yaml b/nextcloud/charts/mariadb/templates/rolebinding.yaml new file mode 100644 index 0000000..223eda9 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "master.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + chart: "{{ template "mariadb.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +subjects: + - kind: ServiceAccount + name: {{ template "mariadb.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "master.fullname" . }} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/secrets.yaml b/nextcloud/charts/mariadb/templates/secrets.yaml new file mode 100644 index 0000000..5cb886a --- /dev/null +++ b/nextcloud/charts/mariadb/templates/secrets.yaml @@ -0,0 +1,44 @@ +{{- if (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mariadb.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + chart: "{{ template "mariadb.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.secret.annotations }} + annotations: {{- include "mariadb.tplValue" (dict "value" .Values.secret.annotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +{{- if .Values.secret.requirePasswords }} +data: + {{- if not (empty .Values.rootUser.password) }} + mariadb-root-password: "{{ .Values.rootUser.password | b64enc }}" + {{- else if (not .Values.rootUser.forcePassword) }} + mariadb-root-password: "{{ randAlphaNum 10 | b64enc }}" + {{ else }} + mariadb-root-password: {{ required "A MariaDB Root Password is required!" .Values.rootUser.password }} + {{- end }} + {{- if not (empty .Values.db.user) }} + {{- if not (empty .Values.db.password) }} + mariadb-password: "{{ .Values.db.password | b64enc }}" + {{- else if (not .Values.db.forcePassword) }} + mariadb-password: "{{ randAlphaNum 10 | b64enc }}" + {{- else }} + mariadb-password: {{ required "A MariaDB Database Password is required!" .Values.db.password }} + {{- end }} + {{- end }} + {{- if .Values.replication.enabled }} + {{- if not (empty .Values.replication.password) }} + mariadb-replication-password: "{{ .Values.replication.password | b64enc }}" + {{- else if (not .Values.replication.forcePassword) }} + mariadb-replication-password: "{{ randAlphaNum 10 | b64enc }}" + {{- else }} + mariadb-replication-password: {{ required "A MariaDB Replication Password is required!" .Values.replication.password }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/serviceaccount.yaml b/nextcloud/charts/mariadb/templates/serviceaccount.yaml new file mode 100644 index 0000000..1611e9c --- /dev/null +++ b/nextcloud/charts/mariadb/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "mariadb.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + chart: "{{ template "mariadb.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: {{- include "mariadb.tplValue" (dict "value" .Values.serviceAccount.annotations "context" $) | nindent 4 }} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/servicemonitor.yaml b/nextcloud/charts/mariadb/templates/servicemonitor.yaml new file mode 100644 index 0000000..696a5c0 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/servicemonitor.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "mariadb.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: + app: "{{ template "mariadb.name" . }}" + chart: {{ template "mariadb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + selector: + matchLabels: + app: {{ template "mariadb.name" . }} + release: {{ .Release.Name | quote }} + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/slave-configmap.yaml b/nextcloud/charts/mariadb/templates/slave-configmap.yaml new file mode 100644 index 0000000..21b0579 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/slave-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.replication.enabled .Values.slave.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "slave.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + component: "slave" + chart: "{{ template "mariadb.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: + my.cnf: |- +{{ .Values.slave.config | indent 4 }} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/slave-pdb.yaml b/nextcloud/charts/mariadb/templates/slave-pdb.yaml new file mode 100644 index 0000000..40f3572 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/slave-pdb.yaml @@ -0,0 +1,27 @@ +{{- if .Values.replication.enabled }} +{{- if .Values.slave.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "mariadb.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + component: "slave" + chart: {{ template "mariadb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: +{{- if .Values.slave.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.slave.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.slave.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.slave.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: "{{ template "mariadb.name" . }}" + component: "slave" + release: {{ .Release.Name | quote }} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/slave-statefulset.yaml b/nextcloud/charts/mariadb/templates/slave-statefulset.yaml new file mode 100644 index 0000000..cca7fc8 --- /dev/null +++ b/nextcloud/charts/mariadb/templates/slave-statefulset.yaml @@ -0,0 +1,317 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "mariadb.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "slave.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "mariadb.name" . }} + chart: {{ template "mariadb.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- include "mariadb.podLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: {{ template "mariadb.name" . }} + release: {{ .Release.Name }} + component: slave + serviceName: {{ template "slave.fullname" . }} + replicas: {{ .Values.slave.replicas }} + updateStrategy: + type: {{ .Values.slave.updateStrategy.type }} + {{- if (eq "Recreate" .Values.slave.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + template: + metadata: + {{- if .Values.slave.annotations }} + annotations: {{- include "mariadb.tplValue" (dict "value" .Values.slave.annotations "context" $) | nindent 8 }} + {{- end }} + labels: + app: {{ template "mariadb.name" . }} + chart: {{ template "mariadb.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- include "mariadb.podLabels" . | nindent 8 }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mariadb.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if eq .Values.slave.antiAffinity "hard" }} + affinity: + {{- with .Values.slave.affinity }} +{{ toYaml . | indent 8 }} + {{- end }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: {{ template "mariadb.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.slave.antiAffinity "soft" }} + affinity: + {{- with .Values.slave.affinity }} +{{ toYaml . | indent 8 }} + {{- end }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: {{ template "mariadb.name" . }} + release: {{ .Release.Name }} + {{- else}} + {{- with .Values.slave.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{ toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end -}} + {{- with .Values.slave.tolerations }} + tolerations: {{ toYaml . | nindent 8 }} + {{- end }} +{{- include "mariadb.imagePullSecrets" . | indent 6 }} + initContainers: + {{- if .Values.master.extraInitContainers }} + {{- tpl .Values.master.extraInitContainers . | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.slave.persistence.enabled }} + - name: volume-permissions + image: {{ template "mariadb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "/bitnami/mariadb"] + securityContext: + runAsUser: 0 + resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: data + mountPath: /bitnami/mariadb + {{- end }} + containers: + - name: "mariadb" + image: {{ template "mariadb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.image.debug}} + - name: BITNAMI_DEBUG + value: "true" + {{- end }} + {{- if .Values.slave.extraFlags }} + - name: MARIADB_EXTRA_FLAGS + value: "{{ .Values.slave.extraFlags }}" + {{- end }} + - name: MARIADB_REPLICATION_MODE + value: "slave" + - name: MARIADB_MASTER_HOST + value: {{ template "mariadb.fullname" . }} + - name: MARIADB_MASTER_PORT_NUMBER + value: "{{ .Values.service.port }}" + - name: MARIADB_MASTER_ROOT_USER + value: "root" + {{- if .Values.rootUser.injectSecretsAsVolume }} + - name: MARIADB_MASTER_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-root-password" .Values.rootUser.injectSecretsFile }} + {{- else }} + - name: MARIADB_MASTER_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-root-password + {{- end }} + - name: MARIADB_REPLICATION_USER + value: "{{ .Values.replication.user }}" + {{- if .Values.replication.injectSecretsAsVolume }} + - name: MARIADB_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mariadb/secrets/mariadb-replication-password" .Values.replication.injectSecretsFile }} + {{- else }} + - name: MARIADB_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-replication-password + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- tpl (toYaml .Values.slave.extraEnvVars) $ | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - sh + - -c + - | + password_aux="${MARIADB_MASTER_ROOT_PASSWORD:-}" + if [ -f "${MARIADB_MASTER_ROOT_PASSWORD_FILE:-}" ]; then + password_aux=$(cat $MARIADB_MASTER_ROOT_PASSWORD_FILE) + fi + mysqladmin status -uroot -p$password_aux + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - sh + - -c + - | + password_aux="${MARIADB_MASTER_ROOT_PASSWORD:-}" + if [ -f "${MARIADB_MASTER_ROOT_PASSWORD_FILE:-}" ]; then + password_aux=$(cat $MARIADB_MASTER_ROOT_PASSWORD_FILE) + fi + mysqladmin status -uroot -p$password_aux + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.slave.resources }} + resources: {{ toYaml .Values.slave.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mariadb + {{- if .Values.slave.config }} + - name: config + mountPath: /opt/bitnami/mariadb/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if or .Values.rootUser.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} + - name: mariadb-credentials + mountPath: /opt/bitnami/mariadb/secrets/ + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mariadb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.rootUser.injectSecretsAsVolume }} + - name: MARIADB_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mariadb-root-password" .Values.rootUser.injectSecretsFile }} + {{- else }} + - name: MARIADB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.secretName" . }} + key: mariadb-root-password + {{- end }} + command: + - sh + - -c + - | + password_aux="${MARIADB_ROOT_PASSWORD:-}" + if [ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]; then + password_aux=$(cat $MARIADB_ROOT_PASSWORD_FILE) + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.slave }} {{ . }} {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{ toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if .Values.rootUser.injectSecretsAsVolume }} + volumeMounts: + - name: mariadb-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + volumes: + {{- if .Values.slave.config }} + - name: config + configMap: + name: {{ template "slave.fullname" . }} + {{- end }} + {{- if or .Values.rootUser.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} + - name: mariadb-credentials + secret: + secretName: {{ template "mariadb.fullname" . }} + items: + {{- if .Values.rootUser.injectSecretsAsVolume }} + - key: mariadb-root-password + path: mariadb-root-password + {{- end }} + {{- if .Values.replication.injectSecretsAsVolume }} + - key: mariadb-replication-password + path: mariadb-replication-password + {{- end }} + {{- end }} +{{- if not .Values.slave.persistence.enabled }} + - name: "data" + emptyDir: {} +{{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: + app: "{{ template "mariadb.name" . }}" + component: "slave" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "mariadb.slave.storageClass" . }} + {{- if .Values.slave.persistence.selector }} + selector: {{- include "mariadb.tplValue" (dict "value" .Values.slave.persistence.selector "context" $) | nindent 10 }} + {{- end -}} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/mariadb/templates/slave-svc.yaml b/nextcloud/charts/mariadb/templates/slave-svc.yaml new file mode 100644 index 0000000..12fa7eb --- /dev/null +++ b/nextcloud/charts/mariadb/templates/slave-svc.yaml @@ -0,0 +1,49 @@ +{{- if .Values.replication.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "slave.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: "{{ template "mariadb.name" . }}" + chart: "{{ template "mariadb.chart" . }}" + component: "slave" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- if or .Values.metrics.enabled .Values.slave.service.annotations }} + annotations: +{{- if .Values.metrics.enabled }} +{{ toYaml .Values.metrics.annotations | indent 4 }} +{{- end }} +{{- if .Values.slave.service.annotations }} +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "ClusterIP" }} + {{- if .Values.service.clusterIp }} + clusterIP: {{ .Values.service.clusterIp.slave }} + {{- end }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.service.port }} + targetPort: mysql + {{- if (eq .Values.service.type "NodePort") }} + {{- if .Values.service.nodePort }} + {{- if .Values.service.nodePort.slave }} + nodePort: {{ .Values.service.nodePort.slave }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + port: 9104 + targetPort: metrics + {{- end }} + selector: + app: "{{ template "mariadb.name" . }}" + component: "slave" + release: "{{ .Release.Name }}" +{{- end }} diff --git a/nextcloud/charts/mariadb/values-production.yaml b/nextcloud/charts/mariadb/values-production.yaml new file mode 100644 index 0000000..f760e91 --- /dev/null +++ b/nextcloud/charts/mariadb/values-production.yaml @@ -0,0 +1,614 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Bitnami MariaDB image +## ref: https://hub.docker.com/r/bitnami/mariadb/tags/ +## +image: + registry: docker.io + repository: bitnami/mariadb + tag: 10.3.24-debian-10-r17 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override mariadb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mariadb.fullname template +## +# fullnameOverride: + +## Additional pod labels +## +# podLabels: +# extraLabel: extraValue + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +service: + ## Kubernetes service type, ClusterIP and NodePort are supported at present + type: ClusterIP + # clusterIp: + # master: xx.xx.xx.xx + # slave: xx.xx.xx.xx + port: 3306 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + # master: 30001 + # slave: 30002 + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the mariadb.fullname template + # name: + ## Annotations to add to the service account (evaluated as a template) + ## + annotations: {} + +## Role Based Access +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + create: false + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Use existing secret (ignores root, db and replication passwords) +## +# existingSecret: + +## Allow customization of the secret resource +## +secret: + ## Add custom annotations to the secret (evaluated as a template) + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + annotations: {} + ## Require all passwords at the secret resource + ## + requirePasswords: true + +## MariaDB admin credentials +## +rootUser: + ## MariaDB admin password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-the-root-password-on-first-run + ## + password: "" + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount admin password as a file instead of using an environment variable + ## + injectSecretsAsVolume: true + ## Specify custom secrets file location if injectSecretsAsVolume is true, + ## defaults to /opt/bitnami/mariadb/secrets/mariadb-root-password + # injectSecretsFile: "/vault/secrets/root-password" + +## Custom user/db credentials +## +db: + ## MariaDB username and password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-user-on-first-run + ## + user: "" + password: "" + ## Database to create + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-on-first-run + ## + name: my_database + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount user password as a file instead of using an environment variable + ## + injectSecretsAsVolume: true + ## Specify custom secrets file location if injectSecretsAsVolume is true, + ## defaults to /opt/bitnami/mariadb/secrets/mariadb-password + # injectSecretsFile: "/vault/secrets/user-password" + +## Replication configuration +## +replication: + ## Enable replication. This enables the creation of replicas of MariaDB. If false, only a + ## master deployment would be created + ## + enabled: true + ## MariaDB replication user + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster + ## + user: replicator + ## MariaDB replication user password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster + ## + password: "" + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount replication user password as a file instead of using an environment variable + ## + injectSecretsAsVolume: true + ## Specify custom secrets file location if injectSecretsAsVolume is true, + ## defaults to //opt/bitnami/mariadb/secrets/mariadb-replication-password + # injectSecretsFile: "/vault/secrets/replication-password" + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." +# +## ConfigMap with scripts to be run at first boot +## Note: This will override initdbScripts +# initdbScriptsConfigMap: + +master: + ## Mariadb Master additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + # annotations: + # key: value + # another-key: another-value + + ## MariaDB additional command line flags + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + + ## Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: + + ## Kept for backwards compatibility. You can now disable it by removing it. + ## if you wish to set it through master.affinity.podAntiAffinity instead. + ## + antiAffinity: soft + + ## Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## updateStrategy for MariaDB Master StatefulSet + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategy: + type: RollingUpdate + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + # Enable persistence using an existing PVC + # existingClaim: + # Subdirectory of the volume to mount + # subPath: + mountPath: /bitnami/mariadb + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Persistent Volume Claim annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 8Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + extraInitContainers: [] + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + # extraEnvVars: + + ## Configure MySQL with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + config: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + plugin_dir=/opt/bitnami/mariadb/plugin + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mariadb/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + + ## Configure master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + livenessProbe: + enabled: true + ## + ## Initializing the database could take some time + initialDelaySeconds: 120 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 + + ## Allow customization of the service resource + ## + service: + ## Add custom annotations to the service + ## + annotations: {} + +slave: + replicas: 2 + + ## Mariadb Slave additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + # annotations: + # key: value + # another-key: another-value + + ## MariaDB additional command line flags + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: --max-connect-errors=1000 --max_connections=155" + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: + + ## Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Kept for backwards compatibility. You can now disable it by removing it. + ## if you wish to set it through slave.affinity.podAntiAffinity instead. + ## + antiAffinity: soft + + ## Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## updateStrategy for MariaDB Slave StatefulSet + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategy: + type: RollingUpdate + + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + # storageClass: "-" + annotations: + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 8Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Add extra init containers + ## + extraInitContainers: [] + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + # extraEnvVars: + + ## Configure MySQL slave with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + config: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + + ## + ## Configure slave resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + livenessProbe: + enabled: true + ## + ## Initializing the database could take some time + initialDelaySeconds: 120 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 45 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 + + ## Allow customization of the service resource + ## + service: + ## Add custom annotations to the service + ## + annotations: {} + +metrics: + enabled: true + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.12.1-debian-10-r199 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9104" + + ## Extra args to be passed to mysqld_exporter + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## + extraArgs: + master: [] + slave: [] + # - --collect.auto_increment.columns + # - --collect.binlog_size + # - --collect.engine_innodb_status + # - --collect.engine_tokudb_status + # - --collect.global_status + # - --collect.global_variables + # - --collect.info_schema.clientstats + # - --collect.info_schema.innodb_metrics + # - --collect.info_schema.innodb_tablespaces + # - --collect.info_schema.innodb_cmp + # - --collect.info_schema.innodb_cmpmem + # - --collect.info_schema.processlist + # - --collect.info_schema.processlist.min_time + # - --collect.info_schema.query_response_time + # - --collect.info_schema.tables + # - --collect.info_schema.tables.databases + # - --collect.info_schema.tablestats + # - --collect.info_schema.userstats + # - --collect.perf_schema.eventsstatements + # - --collect.perf_schema.eventsstatements.digest_text_limit + # - --collect.perf_schema.eventsstatements.limit + # - --collect.perf_schema.eventsstatements.timelimit + # - --collect.perf_schema.eventswaits + # - --collect.perf_schema.file_events + # - --collect.perf_schema.file_instances + # - --collect.perf_schema.indexiowaits + # - --collect.perf_schema.tableiowaits + # - --collect.perf_schema.tablelocks + # - --collect.perf_schema.replication_group_member_stats + # - --collect.slave_status + # - --collect.slave_hosts + # - --collect.heartbeat + # - --collect.heartbeat.database + # - --collect.heartbeat.table + + livenessProbe: + enabled: true + ## + ## Initializing the database could take some time + initialDelaySeconds: 120 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + # scrapeTimeout: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus diff --git a/nextcloud/charts/mariadb/values.schema.json b/nextcloud/charts/mariadb/values.schema.json new file mode 100644 index 0000000..b1b72e1 --- /dev/null +++ b/nextcloud/charts/mariadb/values.schema.json @@ -0,0 +1,169 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "rootUser": { + "type": "object", + "properties": { + "password": { + "type": "string", + "title": "MariaDB admin password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + } + } + }, + "db": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "MariaDB custom database", + "description": "Name of the custom database to be created during the 1st initialization of MariaDB", + "form": true + }, + "user": { + "type": "string", + "title": "MariaDB custom user", + "description": "Name of the custom user to be created during the 1st initialization of MariaDB. This user only has permissions on the MariaDB custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for MariaDB custom user", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + } + } + }, + "replication": { + "type": "object", + "title": "Replication configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable replication configuration" + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/nextcloud/charts/mariadb/values.yaml b/nextcloud/charts/mariadb/values.yaml new file mode 100644 index 0000000..7a8563a --- /dev/null +++ b/nextcloud/charts/mariadb/values.yaml @@ -0,0 +1,602 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Bitnami MariaDB image +## ref: https://hub.docker.com/r/bitnami/mariadb/tags/ +## +image: + registry: docker.io + repository: bitnami/mariadb + tag: 10.3.24-debian-10-r17 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override mariadb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mariadb.fullname template +## +# fullnameOverride: + +## Additional pod labels +## +# podLabels: +# extraLabel: extraValue + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +service: + ## Kubernetes service type, ClusterIP and NodePort are supported at present + type: ClusterIP + # clusterIp: + # master: xx.xx.xx.xx + # slave: xx.xx.xx.xx + port: 3306 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + # master: 30001 + # slave: 30002 + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the mariadb.fullname template + # name: + ## Annotations to add to the service account (evaluated as a template) + ## + annotations: {} + +## Role Based Access +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + create: false + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Use existing secret (ignores root, db and replication passwords) +## +# existingSecret: + +## Allow customization of the secret resource +## +secret: + ## Add custom annotations to the secret (evaluated as a template) + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + annotations: {} + ## Require all passwords at the secret resource + ## + requirePasswords: true + +## MariaDB admin credentials +## +rootUser: + ## MariaDB admin password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-the-root-password-on-first-run + ## + password: "" + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false + ## Mount admin password as a file instead of using an environment variable + ## + injectSecretsAsVolume: false + ## Specify custom secrets file location if injectSecretsAsVolume is true, + ## defaults to /opt/bitnami/mariadb/secrets/mariadb-root-password + # injectSecretsFile: "/vault/secrets/root-password" + +## Custom user/db credentials +## +db: + ## MariaDB username and password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-user-on-first-run + ## + user: "" + password: "" + ## Database to create + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-on-first-run + ## + name: my_database + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false + ## Mount user password as a file instead of using an environment variable + ## + injectSecretsAsVolume: false + ## Specify custom secrets file location if injectSecretsAsVolume is true, + ## defaults to /opt/bitnami/mariadb/secrets/mariadb-password + # injectSecretsFile: "/vault/secrets/user-password" + +## Replication configuration +## +replication: + ## Enable replication. This enables the creation of replicas of MariaDB. If false, only a + ## master deployment would be created + ## + enabled: true + ## MariaDB replication user + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster + ## + user: replicator + ## MariaDB replication user password + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster + ## + password: "" + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false + ## Mount replication user password as a file instead of using an environment variable + ## + injectSecretsAsVolume: false + ## Specify custom secrets file location if injectSecretsAsVolume is true, + ## defaults to //opt/bitnami/mariadb/secrets/mariadb-replication-password + # injectSecretsFile: "/vault/secrets/replication-password" + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." +# +## ConfigMap with scripts to be run at first boot +## Note: This will override initdbScripts +# initdbScriptsConfigMap: + +master: + ## Mariadb Master additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + # annotations: + # key: value + # another-key: another-value + + ## MariaDB additional command line flags + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + + ## Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Kept for backwards compatibility. You can now disable it by removing it. + ## if you wish to set it through master.affinity.podAntiAffinity instead. + ## + antiAffinity: soft + + ## Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## updateStrategy for MariaDB Master StatefulSet + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategy: + type: RollingUpdate + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + # Enable persistence using an existing PVC + # existingClaim: + # Subdirectory of the volume to mount + # subPath: + mountPath: /bitnami/mariadb + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Persistent Volume Claim annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 8Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Add extra init Containers (evaluated as a templ + ## + extraInitContainers: [] + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: + + ## Configure MySQL with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + config: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + plugin_dir=/opt/bitnami/mariadb/plugin + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mariadb/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + + ## Configure master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + livenessProbe: + enabled: true + ## + ## Initializing the database could take some time + initialDelaySeconds: 120 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 + + ## Allow customization of the service resource + ## + service: + ## Add custom annotations to the service + ## + annotations: {} + # external-dns.alpha.kubernetes.io/hostname: db.example.com + +slave: + replicas: 1 + + ## Mariadb Slave additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + # annotations: + # key: value + # another-key: another-value + + ## MariaDB additional command line flags + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + + ## Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Kept for backwards compatibility. You can now disable it by removing it. + ## if you wish to set it through slave.affinity.podAntiAffinity instead. + ## + antiAffinity: soft + + ## Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## updateStrategy for MariaDB Slave StatefulSet + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategy: + type: RollingUpdate + + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + # storageClass: "-" + annotations: + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 8Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Add extra init containers + ## + extraInitContainers: + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: + + ## Configure MySQL slave with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + config: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + + ## + ## Configure slave resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + livenessProbe: + enabled: true + ## + ## Initializing the database could take some time + initialDelaySeconds: 120 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 45 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 + + ## Allow customization of the service resource + ## + service: + ## Add custom annotations to the service + ## + annotations: {} + # external-dns.alpha.kubernetes.io/hostname: rodb.example.com + +metrics: + enabled: false + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.12.1-debian-10-r199 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9104" + + ## Extra args to be passed to mysqld_exporter + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## + extraArgs: + master: [] + slave: [] + # - --collect.auto_increment.columns + # - --collect.binlog_size + # - --collect.engine_innodb_status + # - --collect.engine_tokudb_status + # - --collect.global_status + # - --collect.global_variables + # - --collect.info_schema.clientstats + # - --collect.info_schema.innodb_metrics + # - --collect.info_schema.innodb_tablespaces + # - --collect.info_schema.innodb_cmp + # - --collect.info_schema.innodb_cmpmem + # - --collect.info_schema.processlist + # - --collect.info_schema.processlist.min_time + # - --collect.info_schema.query_response_time + # - --collect.info_schema.tables + # - --collect.info_schema.tables.databases + # - --collect.info_schema.tablestats + # - --collect.info_schema.userstats + # - --collect.perf_schema.eventsstatements + # - --collect.perf_schema.eventsstatements.digest_text_limit + # - --collect.perf_schema.eventsstatements.limit + # - --collect.perf_schema.eventsstatements.timelimit + # - --collect.perf_schema.eventswaits + # - --collect.perf_schema.file_events + # - --collect.perf_schema.file_instances + # - --collect.perf_schema.indexiowaits + # - --collect.perf_schema.tableiowaits + # - --collect.perf_schema.tablelocks + # - --collect.perf_schema.replication_group_member_stats + # - --collect.slave_status + # - --collect.slave_hosts + # - --collect.heartbeat + # - --collect.heartbeat.database + # - --collect.heartbeat.table + + livenessProbe: + enabled: true + ## + ## Initializing the database could take some time + initialDelaySeconds: 120 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + ## + ## Default Kubernetes values + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + # scrapeTimeout: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus diff --git a/nextcloud/charts/postgresql/.helmignore b/nextcloud/charts/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/nextcloud/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/nextcloud/charts/postgresql/Chart.yaml b/nextcloud/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..adeccc6 --- /dev/null +++ b/nextcloud/charts/postgresql/Chart.yaml @@ -0,0 +1,25 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.9.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://github.com/bitnami/charts/tree/master/bitnami/postgresql +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +- https://www.postgresql.org/ +version: 9.8.9 diff --git a/nextcloud/charts/postgresql/README.md b/nextcloud/charts/postgresql/README.md new file mode 100644 index 0000000..38f30de --- /dev/null +++ b/nextcloud/charts/postgresql/README.md @@ -0,0 +1,717 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-postgres-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `postgresqlMaxConnections` | Maximum total connections | `nil` | +| `postgresqlPostgresConnectionLimit` | Maximum total connections for the postgres user | `nil` | +| `postgresqlDbUserConnectionLimit` | Maximum total connections for the non-admin user | `nil` | +| `postgresqlTcpKeepalivesInterval` | TCP keepalives interval | `nil` | +| `postgresqlTcpKeepalivesIdle` | TCP keepalives idle | `nil` | +| `postgresqlTcpKeepalivesCount` | TCP keepalives count | `nil` | +| `postgresqlStatementTimeout` | Statement timeout | `nil` | +| `postgresqlPghbaRemoveFilters` | Comma-separated list of patterns to remove from the pg_hba.conf file | `nil` | +| `customLivenessProbe` | Override default liveness probe | `nil` | +| `customReadinessProbe` | Override default readiness probe | `nil` | +| `audit.logHostname` | Add client hostnames to the log file | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `nil` | +| `audit.clientMinMessages` | Message log level to share with the user | `nil` | +| `audit.logLinePrefix` | Template string for the log line prefix | `nil` | +| `audit.logTimezone` | Timezone for the log timestamps | `nil` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `masterAsStandBy.enabled` | Whether to enable current cluster's Master as standby server of another cluster or not. | `false` | +| `masterAsStandBy.masterHost` | The Host of replication Master in the other cluster. | `nil` | +| `masterAsStandBy.masterPort ` | The Port of replication Master in the other cluster. | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.resources` | CPU/Memory resource requests/limits override for slaves. Will fallback to `values.resources` if not defined. | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `slave.persistence.enabled` | Whether to enable slave replicas persistence | `true` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the pod | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable container security context | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. | `nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template). | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,containerSecurityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 9.0.0 + +In this version the chart was adapted to follow the Helm label best practices, see [PR 3021](https://github.com/bitnami/charts/pull/3021). That means the backward compatibility is not guarantee when upgrading the chart to this major version. + +As a workaround, you can delete the existing statefulset (using the `--cascade=false` flag pods are not deleted) before upgrade the chart. For example, this can be a valid workflow: + +- Deploy an old version (8.X.X) + +```console +$ helm install postgresql bitnami/postgresql --version 8.10.14 +``` + +- Old version is up and running + +```console +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 1 2020-08-04 13:39:54.783480286 +0000 UTC deployed postgresql-8.10.14 11.8.0 + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 76s +``` + +- The upgrade to the latest one (9.X.X) is going to fail + +```console +$ helm upgrade postgresql bitnami/postgresql +Error: UPGRADE FAILED: cannot patch "postgresql-postgresql" with kind StatefulSet: StatefulSet.apps "postgresql-postgresql" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden +``` + +- Delete the statefulset + +```console +$ kubectl delete statefulsets.apps --cascade=false postgresql-postgresql +statefulset.apps "postgresql-postgresql" deleted +``` + +- Now the upgrade works + +```console +$ helm upgrade postgresql bitnami/postgresql +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 3 2020-08-04 13:42:08.020385884 +0000 UTC deployed postgresql-9.1.2 11.8.0 +``` + +- We can kill the existing pod and the new statefulset is going to create a new one: + +```console +$ kubectl delete pod postgresql-postgresql-0 +pod "postgresql-postgresql-0" deleted + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 19s +``` + +Please, note that without the `--cascade=false` both objects (statefulset and pod) are going to be removed and both objects will be deployed again with the `helm upgrade` command + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + +- Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/nextcloud/charts/postgresql/charts/common/.helmignore b/nextcloud/charts/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/nextcloud/charts/postgresql/charts/common/Chart.yaml b/nextcloud/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..5566cdc --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 0.8.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +version: 0.8.1 diff --git a/nextcloud/charts/postgresql/charts/common/README.md b/nextcloud/charts/postgresql/charts/common/README.md new file mode 100644 index 0000000..9bcdfd6 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/README.md @@ -0,0 +1,286 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|-----------------------------------------------------------------|----------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Labels + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-----------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|--------------------------------|-----------------------------------------------------------------|-----------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Notable changes + +N/A diff --git a/nextcloud/charts/postgresql/charts/common/templates/_affinities.tpl b/nextcloud/charts/postgresql/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..40f575c --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_capabilities.tpl b/nextcloud/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..143bef2 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,33 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_errors.tpl b/nextcloud/charts/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..d6d3ec6 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: you must provide your current passwords when upgrade the release%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_images.tpl b/nextcloud/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..aafde9f --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_labels.tpl b/nextcloud/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_names.tpl b/nextcloud/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_secrets.tpl b/nextcloud/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..8eee91d --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_storage.tpl b/nextcloud/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_tplvalues.tpl b/nextcloud/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_utils.tpl b/nextcloud/charts/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..74774a3 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,45 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_validations.tpl b/nextcloud/charts/postgresql/charts/common/templates/_validations.tpl new file mode 100644 index 0000000..05d1edb --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_validations.tpl @@ -0,0 +1,278 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s=$%s' to the command.%s" .valueKey .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} + +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/templates/_warnings.tpl b/nextcloud/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/nextcloud/charts/postgresql/charts/common/values.yaml b/nextcloud/charts/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/nextcloud/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/nextcloud/charts/postgresql/ci/commonAnnotations.yaml b/nextcloud/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 0000000..f697782 --- /dev/null +++ b/nextcloud/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,3 @@ +commonAnnotations: + helm.sh/hook: 'pre-install, pre-upgrade' + helm.sh/hook-weight: '-1' diff --git a/nextcloud/charts/postgresql/ci/default-values.yaml b/nextcloud/charts/postgresql/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/nextcloud/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/nextcloud/charts/postgresql/ci/shmvolume-disabled-values.yaml b/nextcloud/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 0000000..347d3b4 --- /dev/null +++ b/nextcloud/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/nextcloud/charts/postgresql/files/README.md b/nextcloud/charts/postgresql/files/README.md new file mode 100644 index 0000000..1813a2f --- /dev/null +++ b/nextcloud/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/nextcloud/charts/postgresql/files/conf.d/README.md b/nextcloud/charts/postgresql/files/conf.d/README.md new file mode 100644 index 0000000..184c187 --- /dev/null +++ b/nextcloud/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/nextcloud/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/nextcloud/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..cba3809 --- /dev/null +++ b/nextcloud/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/nextcloud/charts/postgresql/requirements.lock b/nextcloud/charts/postgresql/requirements.lock new file mode 100644 index 0000000..ee4ad0e --- /dev/null +++ b/nextcloud/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.8.1 +digest: sha256:0ad66414833efe58cf2fa5e725687e015940f3b9280f9f6ac2687c6686e2459e +generated: "2020-10-06T05:32:15.133183533Z" diff --git a/nextcloud/charts/postgresql/requirements.yaml b/nextcloud/charts/postgresql/requirements.yaml new file mode 100644 index 0000000..2c28bfe --- /dev/null +++ b/nextcloud/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami diff --git a/nextcloud/charts/postgresql/templates/NOTES.txt b/nextcloud/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..596e969 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,59 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- include "common.warnings.rollingTag" .Values.image -}} + +{{- $passwordValidationErrors := include "common.validations.values.postgresql.passwords" (dict "secret" (include "postgresql.fullname" .) "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $passwordValidationErrors) "context" $) -}} diff --git a/nextcloud/charts/postgresql/templates/_helpers.tpl b/nextcloud/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..b6a683a --- /dev/null +++ b/nextcloud/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,488 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if we should use an existingSecret. +*/}} +{{- define "postgresql.useExistingSecret" -}} +{{- if or .Values.global.postgresql.existingSecret .Values.existingSecret -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (include "postgresql.useExistingSecret" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/nextcloud/charts/postgresql/templates/configmap.yaml b/nextcloud/charts/postgresql/templates/configmap.yaml new file mode 100644 index 0000000..bc78771 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/nextcloud/charts/postgresql/templates/extended-config-configmap.yaml b/nextcloud/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 0000000..c679380 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/extra-list.yaml b/nextcloud/charts/postgresql/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/initialization-configmap.yaml b/nextcloud/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 0000000..2652ce7 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/metrics-configmap.yaml b/nextcloud/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 0000000..6216eca --- /dev/null +++ b/nextcloud/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/metrics-svc.yaml b/nextcloud/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 0000000..9181ac8 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/networkpolicy.yaml b/nextcloud/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 0000000..d3f1488 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes + - ports: + - port: 9187 + {{- end }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/podsecuritypolicy.yaml b/nextcloud/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..4a12da4 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/prometheusrule.yaml b/nextcloud/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..0afd8f4 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/role.yaml b/nextcloud/charts/postgresql/templates/role.yaml new file mode 100644 index 0000000..24148aa --- /dev/null +++ b/nextcloud/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/rolebinding.yaml b/nextcloud/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..a105fb4 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/secrets.yaml b/nextcloud/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..1aef20b --- /dev/null +++ b/nextcloud/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,21 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/nextcloud/charts/postgresql/templates/serviceaccount.yaml b/nextcloud/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..1e2a1f2 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/servicemonitor.yaml b/nextcloud/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 0000000..e118002 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/statefulset-slaves.yaml b/nextcloud/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 0000000..2f5fbff --- /dev/null +++ b/nextcloud/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,403 @@ +{{- if .Values.replication.enabled }} +{{- $slaveResources := coalesce .Values.slave.resources .Values.resources -}} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.slave.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if $slaveResources }} + resources: {{- toYaml $slaveResources | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.postgresqlMaxConnections }} + - name: POSTGRESQL_MAX_CONNECTIONS + value: {{ .Values.postgresqlMaxConnections | quote }} + {{- end }} + {{- if .Values.postgresqlPostgresConnectionLimit }} + - name: POSTGRESQL_POSTGRES_CONNECTION_LIMIT + value: {{ .Values.postgresqlPostgresConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlDbUserConnectionLimit }} + - name: POSTGRESQL_USERNAME_CONNECTION_LIMIT + value: {{ .Values.postgresqlDbUserConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesInterval }} + - name: POSTGRESQL_TCP_KEEPALIVES_INTERVAL + value: {{ .Values.postgresqlTcpKeepalivesInterval | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesIdle }} + - name: POSTGRESQL_TCP_KEEPALIVES_IDLE + value: {{ .Values.postgresqlTcpKeepalivesIdle | quote }} + {{- end }} + {{- if .Values.postgresqlStatementTimeout }} + - name: POSTGRESQL_STATEMENT_TIMEOUT + value: {{ .Values.postgresqlStatementTimeout | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeealivesCount }} + - name: POSTGRESQL_TCP_KEEPALIVES_COUNT + value: {{ .Values.postgresqlTcpKeealivesCount | quote }} + {{- end }} + {{- if .Values.postgresqlPghbaRemoveFilters }} + - name: POSTGRESQL_PGHBA_REMOVE_FILTERS + value: {{ .Values.postgresqlPghbaRemoveFilters | quote }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "common.tplvalues.render" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if or (not .Values.persistence.enabled) (not .Values.slave.persistence.enabled) }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if and .Values.persistence.enabled .Values.slave.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/statefulset.yaml b/nextcloud/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 0000000..563eb32 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,580 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.masterAsStandBy.enabled }} + - name: POSTGRES_MASTER_HOST + value: {{ .Values.masterAsStandBy.masterHost }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ .Values.masterAsStandBy.masterPort | quote }} + {{- end }} + {{- if or .Values.replication.enabled .Values.masterAsStandBy.enabled }} + - name: POSTGRES_REPLICATION_MODE + {{- if .Values.masterAsStandBy.enabled }} + value: "slave" + {{- else }} + value: "master" + {{- end }} + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end }} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote }} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.postgresqlMaxConnections }} + - name: POSTGRESQL_MAX_CONNECTIONS + value: {{ .Values.postgresqlMaxConnections | quote }} + {{- end }} + {{- if .Values.postgresqlPostgresConnectionLimit }} + - name: POSTGRESQL_POSTGRES_CONNECTION_LIMIT + value: {{ .Values.postgresqlPostgresConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlDbUserConnectionLimit }} + - name: POSTGRESQL_USERNAME_CONNECTION_LIMIT + value: {{ .Values.postgresqlDbUserConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesInterval }} + - name: POSTGRESQL_TCP_KEEPALIVES_INTERVAL + value: {{ .Values.postgresqlTcpKeepalivesInterval | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesIdle }} + - name: POSTGRESQL_TCP_KEEPALIVES_IDLE + value: {{ .Values.postgresqlTcpKeepalivesIdle | quote }} + {{- end }} + {{- if .Values.postgresqlStatementTimeout }} + - name: POSTGRESQL_STATEMENT_TIMEOUT + value: {{ .Values.postgresqlStatementTimeout | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeealivesCount }} + - name: POSTGRESQL_TCP_KEEPALIVES_COUNT + value: {{ .Values.postgresqlTcpKeealivesCount | quote }} + {{- end }} + {{- if .Values.postgresqlPghbaRemoveFilters }} + - name: POSTGRESQL_PGHBA_REMOVE_FILTERS + value: {{ .Values.postgresqlPghbaRemoveFilters | quote }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: {{- omit .Values.metrics.securityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/svc-headless.yaml b/nextcloud/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 0000000..0d50de1 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/nextcloud/charts/postgresql/templates/svc-read.yaml b/nextcloud/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 0000000..5ee051c --- /dev/null +++ b/nextcloud/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "common.tplvalues.render" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/nextcloud/charts/postgresql/templates/svc.yaml b/nextcloud/charts/postgresql/templates/svc.yaml new file mode 100644 index 0000000..3dbfaa1 --- /dev/null +++ b/nextcloud/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "common.tplvalues.render" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/nextcloud/charts/postgresql/values-production.yaml b/nextcloud/charts/postgresql/values-production.yaml new file mode 100644 index 0000000..3e144c1 --- /dev/null +++ b/nextcloud/charts/postgresql/values-production.yaml @@ -0,0 +1,711 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r48 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +## +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + ## + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + ## + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + ## + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret +## + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## Configure current cluster's master server to be the standby server in other cluster. +## This will allow cross cluster replication and provide cross cluster high availability. +## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. +## +masterAsStandBy: + enabled: false + # masterHost: + # masterPort: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## +audit: + ## Log client hostnames + ## + logHostname: false + ## Log connections to the server + ## + logConnections: false + ## Log disconnections + ## + logDisconnections: false + ## Operation to audit using pgAudit (default if not set) + ## + pgAuditLog: "" + ## Log catalog using pgAudit + ## + pgAuditLogCatalog: "off" + ## Log level for clients + ## + clientMinMessages: error + ## Template for log line prefix (default if not set) + ## + logLinePrefix: "" + ## Log timezone + ## + logTimezone: "" + +## Shared preload libraries +## +postgresqlSharedPreloadLibraries: "pgaudit" + +## Maximum total connections +## +postgresqlMaxConnections: + +## Maximum connections for the postgres user +## +postgresqlPostgresConnectionLimit: + +## Maximum connections for the created user +## +postgresqlDbUserConnectionLimit: + +## TCP keepalives interval +## +postgresqlTcpKeepalivesInterval: + +## TCP keepalives idle +## +postgresqlTcpKeepalivesIdle: + +## TCP keepalives count +## +postgresqlTcpKeepalivesCount: + +## Statement timeout +## +postgresqlStatementTimeout: + +## Remove pg_hba.conf lines with the following comma-separated patterns +## (cannot be used with custom pg_hba.conf) +## +postgresqlPghbaRemoveFilters: + +## PostgreSQL service configuration +## +service: + ## PosgresSQL service type + ## + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + + # Override the resource configuration for slave + resources: {} + # requests: + # memory: 256Mi + # cpu: 250m + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Liveness probe +## +customLivenessProbe: {} + +## Custom Rediness probe +## +customReadinessProbe: {} + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r242 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/nextcloud/charts/postgresql/values.schema.json b/nextcloud/charts/postgresql/values.schema.json new file mode 100644 index 0000000..7b5e2ef --- /dev/null +++ b/nextcloud/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/nextcloud/charts/postgresql/values.yaml b/nextcloud/charts/postgresql/values.yaml new file mode 100644 index 0000000..7b6a6fe --- /dev/null +++ b/nextcloud/charts/postgresql/values.yaml @@ -0,0 +1,728 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r48 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +## +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + ## + synchronousCommit: 'off' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + ## + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + ## + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret +## + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## Configure current cluster's master server to be the standby server in other cluster. +## This will allow cross cluster replication and provide cross cluster high availability. +## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. +## +masterAsStandBy: + enabled: false + # masterHost: + # masterPort: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## +audit: + ## Log client hostnames + ## + logHostname: false + ## Log connections to the server + ## + logConnections: false + ## Log disconnections + ## + logDisconnections: false + ## Operation to audit using pgAudit (default if not set) + ## + pgAuditLog: "" + ## Log catalog using pgAudit + ## + pgAuditLogCatalog: "off" + ## Log level for clients + ## + clientMinMessages: error + ## Template for log line prefix (default if not set) + ## + logLinePrefix: "" + ## Log timezone + ## + logTimezone: "" + +## Shared preload libraries +## +postgresqlSharedPreloadLibraries: "pgaudit" + +## Maximum total connections +## +postgresqlMaxConnections: + +## Maximum connections for the postgres user +## +postgresqlPostgresConnectionLimit: + +## Maximum connections for the created user +## +postgresqlDbUserConnectionLimit: + +## TCP keepalives interval +## +postgresqlTcpKeepalivesInterval: + +## TCP keepalives idle +## +postgresqlTcpKeepalivesIdle: + +## TCP keepalives count +## +postgresqlTcpKeepalivesCount: + +## Statement timeout +## +postgresqlStatementTimeout: + +## Remove pg_hba.conf lines with the following comma-separated patterns +## (cannot be used with custom pg_hba.conf) +## +postgresqlPghbaRemoveFilters: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +## +service: + ## PosgresSQL service type + ## + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + + # Override the resource configuration for slave + resources: {} + # requests: + # memory: 256Mi + # cpu: 250m + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Liveness probe +## +customLivenessProbe: {} + +## Custom Rediness probe +## +customReadinessProbe: {} + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r242 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Array with extra yaml to deploy with the chart. Evaluated as a template +## +extraDeploy: [] diff --git a/nextcloud/charts/redis/.helmignore b/nextcloud/charts/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/nextcloud/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/nextcloud/charts/redis/Chart.yaml b/nextcloud/charts/redis/Chart.yaml new file mode 100644 index 0000000..5e3983d --- /dev/null +++ b/nextcloud/charts/redis/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 6.0.8 +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 11.0.5 diff --git a/nextcloud/charts/redis/README.md b/nextcloud/charts/redis/README.md new file mode 100644 index 0000000..58fe239 --- /dev/null +++ b/nextcloud/charts/redis/README.md @@ -0,0 +1,667 @@ +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR + +```bash +# Testing configuration +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +```bash +# Production configuration +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis --values values-production.yaml +``` + +## Introduction + +This chart bootstraps a [Redis](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between Redis Helm Chart and Redis Cluster Helm Chart + +You can choose any of the two Redis Helm charts for deploying a Redis cluster. +While [Redis Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using Redis Sentinel, the [Redis Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a Redis Cluster topology with sharding. +The main features of each chart are the following: + +| Redis | Redis Cluster | +|-----------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![Redis Topology](img/redis-topology.png) | ![Redis Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `master.extraEnvVars` | Additional Environement Variables passed to the pod of the master's stateful set set | `[]` +| `master.extraEnvVarCMs` | Additional Environement Variables ConfigMappassed to the pod of the master's stateful set set | `[]` +| `master.extraEnvVarsSecret` | Additional Environement Variables Secret passed to the master's stateful set | `[]` +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable pods | `nil` | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename |`nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) |`nil` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | Redis Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | Redis slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.extraEnvVars` | Additional Environement Variables passed to the pod of the slave's stateful set set | `[]` +| `slave.extraEnvVarCMs` | Additional Environement Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` +| `masslaveter.extraEnvVarsSecret` | Additional Environement Variables Secret passed to the slave's stateful set | `[]` +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for Redis slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for Redis slave pod | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Change Redis version + +To modify the Redis version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain an extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the cetificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +> **Note TLS and Prometheus Metrics**: Current version of Redis Metrics Exporter (v1.6.1 at the time of writing) does not fully support the use of TLS. By enabling both features, the metric reporting pod is likely to not work as expected. See Redis Metrics Exporter issue [387](https://github.com/oliver006/redis_exporter/issues/387) for more information. + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 11.0.0 +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/nextcloud/charts/redis/ci/extra-flags-values.yaml b/nextcloud/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/nextcloud/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/nextcloud/charts/redis/ci/production-sentinel-values.yaml b/nextcloud/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..afa5c48 --- /dev/null +++ b/nextcloud/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} down + # description: Redis instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/nextcloud/charts/redis/img/redis-cluster-topology.png b/nextcloud/charts/redis/img/redis-cluster-topology.png new file mode 100644 index 0000000000000000000000000000000000000000..f0a02a9f8835381302731c9cb000b2835a45e7c9 GIT binary patch literal 11448 zcmeI2cUx22x9_8Zh@yyC0I4cPR3wC|kVp$H5PB1ZB#;m~gf1}_1O)^|QB*Y2#4gyt zMlbeAQxFvp712m>hX{m(GZyaOx!ZkSz`gf*j(>!>GS^(QjWNFCGu9zzC!56!6&9jU zsKs`+R<0=2Tv-%qj{Ji8aAni0vR)KQEHl>HJ2pI#N)HP{sbegEe^b}f4US~Qs$;Cw z_4G(lQ96Ni5-o-l&d`YniiJz?dw66Zok|Z1{M|-RS5J47uKp%8#vQGzjxpCah7XLM zj-j5O@9*{`T2RE_9UAE9LI+xoBnmwuHj)v%{&$O@SQ71bZ+Kl2DH#*!W~guJ6YNcK zHZTeO`>F9kF${WS#P4QkJslGrH2U}5u}M)uzb^*{#nUN4$W@Fr%;@i-!xQO$57ytD z8g7NACsAo=gGf@4U2vq4|L;yBNa25X;tb>6G}|@C+Q2i|iEP41uyWQ#JBJ%4#8?F< zhJ?qHVxj_RUKr=dpkNn9ac?y zjjf4Zb6GNz?1!C-z2!T|`I6FNrJxgzEdNh&X5<(5KrG*81T6sFq zW4#j0(FP$=R0ktuGQKhJ>5aE1x<}i)7{Yf-uoVV+b&Uyx|9F@?Q)9v%9i!kom8xr_ zZyDi8quLlJ$HoMDMv5Etu;N25ccDbf0|Q5Hey zuy9hmyN{lmxi_9+6lmk*8e~ZKun3K`af^yY(`;?=K0&@@N^D4|ZmdgqQerIL%hrxy zM`KVuoedc@Cs*&NU@z}TC!YjoOLVwfgp0XBq^=QOH`LfZDk)yy$}P!&oMd9pNV2eV za19GKbTn|V3?&(oC>TsM-pj)_-XqzO!f>`XcTI{5(F-#P4|eb+*@i_q#RWz9Cb}lU zY#Dly5oBMwdxVb>J|ZxR=Hn2B(esIp*C%@g#keGsV#(gFfsDuqU)z`@qaaF%b3~+m zaCivCJ1!{#Z-OS0%&mNabUj@xDKwK12OC|CUMSr<(LBO6#=*zKmh2FMcXPB0wAZ(G zOLT#y3^MdVd+FN4H&{DMs+$|$6}}_mun)3ByGD6M!*@NGc#37RH*(}cY~w5(g7s}^ zzJ{deP$OjHtPIHRcv})F*daVJ9OLS0Leg`Niu1-BQNyAVy)cG$-t^cQdT5*_#!w#< z6bFATlk}s>LCIl6HxqO;(KpT^F4{I2o&%lvSS5MJQIi?*1e?StBg+V5BGsE18|WMZ zW@PMb;;rjJh)Z(TCxklSlZ>ctaW=Yo)I?_w8+1I}9FDPYfEk{Rp z-ILpbFFK4J$#eW+T6n?W^l5qJZKu}h5eQ&HLB6NEPly?kFMd+9B*;21U%BtZEs%?pK-aOJA&_a1j0}?4#_1KVkJ~ zb1N$=!~T5LHEY(Ki6ShOkvZyUtN}kg)=-p%p8{pGCE=%=k}YB~GBVw%{}0_J=Dl%c zNm&_1!2faZ@ZlzI_bqfh*IP+9=vgI}rUzo0%pyLDCQ>#KFyN?VT{J-WBp zCg)?vCAY`Vo>`JeY8zKB+H+FMc}Q(BfZHZ_r67MSRsc_F7C zywt+dvh}JDW@2iJ{P>7kKAJK=H#hgl*|VFbw_3cM9SuWCQ8xa>xpUb;&rXoK%1McR zdnJ}CDoP!Dk~;a<;97Nc@fy+86}OQmPoC7l)OA^+lzf7Q)M`ViYrc0ZqM~%M)1#Jw zfq};3$}t=HHo0Bw?;rEaU97FebB`UnQf>OC*hZRgcFPL?lreG9^k2lGzd~ZiK3a#p zy)Aj;;m?Kl6GpPH?|86AUw`=-!|+_};cth8N7wOf@{BbUz8)iD>yD^dMsdu3sjgcm zW@l$t760Qj&u4MW_&1NM{x!;Vb#<@w7h9$;X)0sy)@H8k+&6o3!pHoJ>1>urexUjA z{*xzH?Mxn059_{ZJ+&^q>tPZG7Orn-Nb**9*6rK+L&p|ya{?=HmlwBKDP8rsHUCla zcztmWHlwP_c!*J;ZD_dCY!|w?1!4%~4*Dqs@kWrCfWZ=WDk*3@9_9&Rg z5F_^U)4UCdKkmGFRb;&gdi0)Bv?Q|{S_2<>6TK>ZjGG z9{XfuWax&!)fbyTe+?S#0yhM(;#x%99AT%DK(HZO64jEUVl(pL5m;Fr&fwZPdwct( z%F5ZbwcE;A()V1-YtksJyA;;q7Ewg>BpOX=mYSHHB=uG^+sd@U;l8gOmt?7&bDNkM zu3c;-9?ml75dN@2UC14Kwt46Ayu3oYV;XHVV{t*d3K&7wJ2t7s>ig?!V=XEy zTei8u;}z>#O<*5YkYo515Ix`}TT?sxu@^5d`*D}*ybg@r&so3e+v-a$5 zZUo2RH_Wjx%ILh}MVX!$Yom3g3NIn!T+5S?3nT>6XvGdEb=qi07N;DkD{zvz0b5=XcjZ z%dB}4OKjOLo4Fig7=&v@&9~z!vfyUH!r>W8>ohdhn40=cWhIkX&B#nMgdSzQe698C z&HwuHqLib$HKeUX^6As3C7g@0e3~gTl8)Z9$CYK#Xmq|lJ(njRXfx1~)AFtJO6k9w zkVqt3A`$kLt7TxY0_>xW)*>D8sh7e;aXnntiCP%~e12HDO9BW)FCB@FA<~X*CewMZ8<>mEH4zkm8a-`u%DeyvRRBF-r^R>#fZ%=tc?G*uabB8b?O`
    gt0u3YK+3VbZ)7Cr=g*&)YecY;+uHRLladT3#L-cG+Z4CN zKHgdOtr;9@>`b#Ba+WJ_{4yh$6P!auoRD{nC(pX_v#ErxkgbB-IsWFP&W_}s%qJmt z*68S5|2A?sI~z}@uitaVzm50v=e`T{#J^q`x%l~|^x)X1PD%ECQ3)6r7}&pZlW#!x z&7i92f%*0h4r6aaZ}j74CZ$tSQjUNV{3&o&kyxTw>qazFXX3Act31{S^Z5WvqP%;8 zRA3x!@p~>={SD~@*D62Me{9N}gIbv>4iVJ*7S*vcbLjhxnzRda8yg$XpFfY95st75=?aFC9}wq0 zF)@LdjmM0RV|#f>r?%h2M3;$#Pa{fO)5_jR(9T%&W!y0{L3|lv0qAhQ-1KmF^PQ#ZP>O;1`%Dz!7D2~MQU ziWTB2Dk_SwVr4}3^O^Dya_Y^8?nk&h01c(e<# zn#xC@6l4*7aQx)S269E;yL${|OE<2}jM=QQ;)KaRrkkog^P>D`le>${Uf8$b)1Kp> zs|yMWF0X0_-=xd4u&0Y3+CqaAK$nqa%A?fkq#lyJoui-Hen2>X}amW4T zsHl&Pl3-Q+5SF_hB#eanpW7uIe*?#A_w<-e)@q~SyPJD|z&6%iOR!QgO3a%bavc#AD>^6l_mA0H!azv|NZlSo-Y>{7yA@;9CP`qRRbUn%6+e)_JqE@(-#Tj($SLZ+CP~bsy z^z=kD2pv1Nx#8&8SRE{FDJOU-zYL2!Y4}F2TC*4mHh4*NbaWfddEQmOd&^+c{tSn~ z1k|3gva*{asc|0Qw8G!sK8kJ7>0&1%@zj--rXaiP*RVBWzVhzUM4V8cKhnFInYN=G zH(rj*4J;8!8EBy5Shg7(^gc>&AhW&&K%fp5%101T+q}K=U2!!ZR+L`2pe;1(yhCD5 zwZLA;2r!@WQK-ns$WLFsz)Bk!GrmY|iT$d|OtY-O3CYmGMWK8=bZ|_=(b>~egE`;B z&DC`utbzl(e>s=PLdF(@$Qo;F>;K4_clR(EnVF@3${I-&7|nq*XIAU@RjD#F9Ja!- z1gAUx{SAmEZGM?Fa-d@lh@@~DD8%M>?%ZkX;BXAvqz+>)+VLR1jdqa4x`}xGPujwJ zjrkWlZqbNgA0GNlNuA6sD9GCRDA|r(xQNS{?CRYFpMIBcClzE-x)A0>$IUe54MLV{ z5YKI7rd=<`4U$-_CQ(64=M>`}9L2G*vBc3jiKCFeFf-JY%#xS?czdS-j34BuI)Bic zdm|o%rQaRgNkv&y6rbx8JJcqgxpGr3w#j*=j^6?ni&d~S!D(vw4hk!~PV_BJXp*+! z*WgV`k!x(MK7_Aw8b^7^?KuC1QSW&W@?d1=cj}_1HTsUh>3@C-(>ADeNNAuaxTPcrHRdNQYOyka!)+4~pt_Se?%`LR z;xnbVV}q7fEhKKOH~ewy)Ya|Slm*-y!(D7)Ma_K*%o**nUm81s`t?3R<$}}M49N5# zO;E?-RNO8Q>V&^~b0AxTn{2rwaMM6V%>xSL3uu>2xLgQhlQT=!U!rWP1^I{yYZ~lH zCiu;Dr4yU|b(q->0jx?`SqgmC)WAIw&;orf@uOty$OokjUf03z=V70$*Vfi9CpRq= z6BFxuqRFa7_Xe7xHZ?Q6iq)$~_dm)_vy2nS?9xW&ct6KH?@M@!S2w7Qj z=(&&|);AuKJ$mZYdgjW#!lRHK-s#T|KF`4lq}0^Zf;%qCq)Ex|YizU%>b>pm?R_vc zRSXGr(F4tL*c4;KurAeqW_0EIH*LSGYHF$oXUzJ|^&Mz>+qx9d`YfxIVcpv04@u z7P_>TibzHZ0gUm@th>A0*2$^4z=W}M*)l}U-VX0sxfY*Qo3UcUk-WUm9lIV^fIX2} z*E1>gbxsAwdp~^&w6d{TYiukBsv-@H2jbq;uaTU`k8PiyHdu7U^IHAuYulqIMoAFE z)@W)<+`D%Vl!lF+oh0gfNr@P^e{hY~PEJeofU`lfu=n>@z+N=MaRRq@2)rNV)8nJT z@2b%EV~6$~IkFUaoQa9NiHS*l&Du`{aQhXwH+5(=#3Ug)*1bHtRYpZ6C+V**r214z zInzX4$S#&HU5fJe_fOoltD&J`-itFv$e}!K6uCrNS{k|d=FO@(bLK2qw5Xx4Za)P6 z0|_6JB*5sp?lCk$heZuEFCHT1#U4bGY#KpPcA?gW4GpmOE{Mljcuy6aXct?-SY!Dp z3!^+du-4Tf9pP_A2VlmeeOgNzIH%HHIGOnLW(iBYVtfv6^)iTckbN8)b|_e zoVEKYtaXQkm(nQ~GAVfY2Jx!TxY1AzKg z4W8SD1hSHnb$$=x9jC6&zn46&{rZM~PG4U@@aeQd%k+=!L|h&=GXz-6GC}dyXhKH= z2RD-ik+|;JvqhU9@s`~U>t4gmwFDg}4-1F*+3qmBukUG|ofzK+)Lv&ArD)0Pt3LZd z-u1WIs95{?74S@)R;*r~3HkcRiWX(dl$3`&lRqTHrj?s_W*^34BcsF5WXY98Q$y+qe39R*cE(Z(E%W;ALmX4_goK$fL!PTdlU!K_L zGZ#S${GQ0RvabpPh7{k+h|59y-9z+iMMVWtafpeDDIuauU^oMX>B{wQ?}V2ATkEct z1-3xFh3IQt>T@|OYirQg8@QaLu3rBN++g$_kzWV_bX-8s-S9SA)$!xUm-D{1T24vr z2w=Sg{!3!L9JEK2I^=1iqodoX)C;`zI_trjLECF>-h2{kEhacNi=@1UIHQz$az!l| z8)%lcvZ~uaCu?84c=5SCkAqGC7kmLkoDB}^lYD!N{7&jB&RM& zLL_Grvo2fFWDTItH85H@m+%ZR2)7Vtv%1**&5dUws}CJmvmB zGtS1^z4OMP90a2@w6(J{cXmF)lV1!SAerA&p@-{k0=K79n$nUDi8{9rS~df41@yU! zPg8_JTD8h3G71pC{{UNtA{1TRzuwRQ1S%=35%6pHJ^Yjj1%R3w-0SN*GK-4VKt?7H z+!zW;Nl8h@(I3T-s)4h!D)aJ1NHvB;hl3ET%cg*faQRJkm08o#)4X;fbmidWztg>;!#64Q4Gor0j7EW=${<=D*66b5 zujA|8puV!ZCvyl$p>fKDE~*bq0LeZf$BO##)&K#p0HJ_%O}wcQ9Y9h)n8zMKySw-s zXZgX*^dPPxyvOhfvCXxg%sc?h3$;R^(_K(P3*UU9 zmo=l*kS={AxsHB14URAhG(H6XjehrIFNH9@s`H1Z0V^NwC<~Q+g!6yY>m|$gN=Rl^ zC)eE2fUMg+rTS^MrFP;Z-c2n#cY!_uVPeiZze#uwRB7j-`Ucr7 z1d%86&I~B~qfip>kaqztb`AD8{~L0y=zSrD@@I~B`d2Tngq+?T{yoYEXi*LkuBUpH z0H|_0bQSw`DM}TKRR+*p$t3cnann&fH_kwO9H+mvI*@?5h2v`?1mF>%Lx?i#e=%R@ zUH7lahQrdIgA+lKduYc@^~aB7Sa?aW0Ti5oJ6nWrkqTQAxuWOM%e$d(bO`56eWtQ} zAwti*<#NWldf&6c`I?qc=!pDa3fZy)Qd5EY%4(=&UH>3%Ritcc}65Wn3V;U zGj2e~MHl15QJ{w7^`fl(h_e)2ahe3|wfW)bzFeGAc0@N6uhW(&X@lDYn22!P7*?JX zS_5i1OF&7$Q0K$m87KWL`6GbnHRhxCfFD-V(Sg!n$ez3nUTd_q?Wd&9l;h%{m%vmJ zYTjW<6sS6P{@L>DPstVMK}`bn6c@P7#^Y}sX9^jr!V9FNkWys-P#L=!m_#Y3d=bA4 zshnkXunU{T&JRo5MO)*iN!DDV5)vM-K@C$91wg?Pk52;}n%U9eURqOj-w@Jegh-NE zX+Y}we*8ELCNYG$IG^^iz#NJ$PzKfFm-!=I+`%_M==-;pa)8z>^u1K?EVY%s@=l- zy!B2{l-)a99)1PoVKEd^t^nOjr_({?$m#Ja;%i|Y6XALlsj?* z>f2SmNb!5t=79F8f=nm{tq1iagrj;_Mx5oXzuzcQ%oKKJ3r$X3;Sc6QhcC~WWrCv& zXf3jCE0cQ)RgQir2!ajTv5P`MK6d%PFUPe+eHQt}6WlYhv(uo|$me#iWoEQhPkvbq zRiQcH$&UT0Oko%H9MdEgMfO%NSiHCiiXkN&7kd{M8Aw*3GGGEM3_qo)KC5{Der=#X z{~{HX6C8=gD>te$AQ(BYi$kGmp1&nd8(e8RqDB}}dyzL1z;k#dz?_lSAL=Aj`lBCJ~vZaRd#_rNgaC%{d^a1(O*c4V(IEKPIWeu+|! z%s41R;Cpis01Q##f4*n&tq)o>%MwlZf?K7)8x|0Q+B8i-h>9Z>i#>Z#rjwtJ+1lD3 z!9G!kDkObY&CdG<*>PCB)+SAnpzcWopr+1YmyVP=wr>GG+}yaX!FnYg^j|U!I@Z5A*1U%+~3V zCPXPnD)TY4C4fAUv>D!~AU7VA@vb0?g9P~_1vT!1n$8ix_3$2(n1nwesZB`Vk~c;^CATva2ZRgf1zp?(eu{DL2oO*VOmYB7 zBSIoS4h$fnf{?E}XTO9r-rHMl?%cV3-@h+{@KL~<7Y!45Ic+v z^)Lto6Ar$AW@U!Y=x^d~2*j3yWE7t4A3^l?BOoM{bpL#lP?Yxy3?WM>=}9Omx&{Tw zdU(5f2D|!)$OaI|a0%}F2YPrDy$K$F+9=8^${&_jKCGaGRZx&n(pFN24@E^;byWqs zKkZ#T2?2ixRFRd30S=kEx_bwZ14Fzd|Fj^GT|NJdW~Fl2&%!mz$V#6`a8_3jwp5Pr z{?jIm5FFwi81Sc=0*o&UkNX`DIWmaw=duSO%-fy7xT=JLH~JeoJkk4au+G{41$dfjkSY4t(D}p^=!R^ z)U|CKrYmJoGw(?iivqLLdoBFF*ll?<2 z{d}S{4I;xL&BN8)(F%r&3f=@&csYzVH1I;Yo0Ig+@#@xYm`Gbo z6;m~mpLZnL*4@I=+EvXe#N5m`(AL<@!_3GsBE%yyGAcA&UeVCaP}fffP1ZH`@bDwr zhD2D}kpm6o6;)Mze6)RWD3Yg@kB^c!$`GxJM4>$4F##cp{%-QFkw)lnTQgk~MI&8P z9Tk10zyNPEcN7YiN#By}ZyI8$7_3L|v~!E_G_ke}4lu`u==l2t+o5oZCgwN=4ABVf zui&QcjmD~akyP<08^&v>n44=yl98cgJ8fdPxxR9gqDi2sKN3SAn}o{~-Th!}D@b7- zBK+@V8i5Z|Ggr5W#Ca+Q!5y+WQr{-X&)36BG1AnE5r%G*B_;-qhuCJ z3XxYc5B0Jokc_ z1Ak8sy+8wFWt6*~nwd6M)r1rorlJ;Lr9cdJSMxSgQB?OfRkKvnLmDd)4PlJ%D3S^z zavWp#D8Z=u|LmCG+Xw#siwR1ok~bZf5C~C(k)Ad-qG*2D7mHoyXI?BnJ}PN*Kx35p zB0Edh32l>qHgUl2g57l&%5g8&D_%rGS=zPh21n3VO3m93Nq4b_7)maAO;F`;?+;dI z-ItZmu3&nclXqOPbM=E;&Mi0Mw-@mX$2tScS4HKOIzK$%kW*?FHp&rSe6q*<=x6aE zqfMNg>J+N6p7{PeDbDP4JY7#u(T%p7vpxBL@S=FafS-O?m}=Pk;F*MkEia0-M7p}V zc-4c&*_k(Q+Pt~zwx>CH=JnS3p_-(c8nsiWPJNi4&-gY!bOoI+J3VmwAhdCn=amm0 zH+=HsiKwjX&TZSa_22d72@Vdnu(y|9yOvYvO3Uev{k8tx(8%cV+qw`=LBVvVhN#Cw zLjofsBfQ6=l+*S1@0E2(-XWms^8M5B)An}lxHwI-iyCKMzs6v(*hW9q&9zRg+}zxU z4j*ozS(RlK7iSg~34VOnAdr%hg0FI6f)VsHg)^$E+^rb`NfeiQbaZvIb8=46=~7+QAA^@?ael-1x%chcmnFW9`5+-`|sGz2%raXVf(`V0i3#DLBQH>AZa&2a2=T~O^{ytu7 z_ntkdHWCdMm|YOL>og08J_eIBjl3EpgIlwDo-b!G)p3ziNGKycoxO{cpb_=;P-$uD ztGW=+n+|mtvLNPAp49N0p}>*sdv4gE+APmW+f;m@<~!-shs{^TtOj?H z3|;Z`HzvWsiitagQf-_fQFqNX>Wm2l31a7^jLVlr{RVEKU_915DJBHl_U#HDPj~6* z>wl@t^21tLolZ_>-LiFS-A|R;t4C#UO^_=i3k#WFzmmHwvGX4upPufy!86%eET_S? z?FlB-z0B<5sp4XhVsiFCD!23pXetu_(~``!`vi6N+V0)EUqSA2@$jTkN{!B(Ia51V zQC~$G)9 zYeYmwrckuanlL?8u(o4U#qoQEP6w1;y}D)ie&`;d(?ut!bjv*l&7OQ7YeThN*2=zk zF=KHts7t|+D1+M(wcKMT8s+r`Ah^x8yOs<60_c?`zhDLm8KUmT?H?Qdx5)dH!?D_MFWZI#S$rq1v zB+wAf5s2Dv@Q!zXOEuAN&0x^rX?S5=J_%t|iN3u&ey6>3h?*0=V^7W~8OvepuYS zvHk<{P1I<&Q|vMhYh$B!B)(gK|5{m@Ze`yU$NDe^tnu^n1LmgXJ4J2#wb~hfxo<8z zJA2>w50|v&B#yC>B$0n!H2PB=3kroQb^j@GwmkvTY3y#FQ}dUJ=S&XqN==Q8$Nl=N z9^28KV?J*cH)DPl{V#bM3xnx-yaP3}NgPM4mWxYSY)bJ`M(cy1MRT<`7O_ zS&2B8pFj5K*SZ{Z1c=Sb&E4AIlqrGE!&jnn#BbWy?BtPi;`%AH>U+?v==j3B21pOa zB;M3b+DVT{dILn$>>~eRy6ei~_wNR=vB$oA`NG)Rzt$Jxdojmi*OV2Nl$a1Ul};_= z?PobfL^8X}%yo0bV>VW&u2)vJtu9TGzJ0Vs^!+N3k(S<$a8c%02z+;RZdn)^gotVo z1{PsuXUF#J(aM)~Je8T53HibT`LcEE))dMsv7wqkgPC?Jo;qch5&8iSAKyzjefpBwFgiTI=#4{#8O>d{c3I*NgCZ^R;=lN7vkDzJ4lIuE4zn=j{mtZ1sLs$wt;IpVe=%!g!U@8!+H#$KE(3~X|%7FAQ@$q{#L zN##by{X~*Agp7@i5eV4Hd~3^dNv*B=bB{e__HOSjw`YMB+|DC=`QE*~XAhd|!~FR8 z_+V}5F($YMJYDKqp64&E)$u*PG;18L#Q9MizAjh-arydnR*j20Z7LidKR(S;xIVZ6 z`9qfC6LR8|XW>^5JwXc_wE$g%13l{hqmSaUg!# zjYh}&`Q4OqD)UK$IdQm8ZeP>1$Ki0jow>QWBR~`Kg)d)rbbq~dL*S9w4vw^+7aF3! zkFHJ?GeCl|u~BJzRif_|-WkhKd)EjYCZWMRaFR-QW7#R#SmoLb`}iQQq6h2i?(Vw38r1=E|{@#%l6Kh&4Fnyf}Txn9EYy_yX2aTKPy=i-Trn`)RLyG(v zhK7c_{@|1P?E&_{z(9ao{Y#fFy`)T{W&z(%fKUONXTYj9kmY-06sQ9Jpy^lp^iU|^ z0LAml-n%J+>SBPj0Q#Kf$CkM?*S}f;8ZsRT``FlD?GE9vRAW_2tEzTwY-|9`ZT=c{ z%pwhLE@+L(sUPcZ!V|uOw&fy*U)$7l$KXAGX$WZ>8aE~-EzQZxdq{(M z_%cB3^@<9BqoM@rEVGZV@5szd3Zz&vWwaNvnZ+n4Xns%ys5%8Gx%A;fVU3HFHYaZJ z{rfiq)&V~_0LK%VcM2wh{7I&aSeqQ!!@PqZ0ZFKg)XGhCnc65)a zgoMOM$B_j+ZS9lOhYHStf~;_?cLO{ty^@tyjUDbExb4ZgYu5?F^Sl&FsU)adl#7cf z#BvbSMk7YzNlS}%W#3ri$<3u#t`Fur$E64#i*o5IMM3}LGPq5MqN9k1!n+e z_e)6pl}E#Gjx0Pl4U$XfHu0qn3Uy{0nToNtPQ0MxIZ8S5sgaJmq1U$Wj4SPRu^k}) z{qO}!``Lr_>jJN18T%_eU9Ydgu{vtm2Q*m;XfiP%BTeC$MeVN(??rt3D$caHaSF9d zKwei9U-EyQAL8LS_H|dz*|R7Eu`}+p98=Kmy95NbLynjfAvq76H451}^y<|KfM@rn zgl!DbaLv4!^7gGmSGA!8L(+0+{oGYjQsM=YrZt`K^kMsZ*T1!wr>3%rA3V4piR2X) z7UtE8J9^K5=)fO6>rovtWu~X6ciFsHbGo~XgZSe7NsuS3Q=PDH=dT%~(bfJ#swTyn zLf_v-%Q3U_o_PAy@G6y>LeZCM!$=GWX~hYC{`@&&@`C4wxw*7z_ck$QWv=aeWsdv! zNSPHOM_wNVP?x`8!YB|xWi#pyS;zY>5I+7kaR=~nUIjc z$j!yCUlSg?G$wQI-^I;+W_sF-Q5O}e1?m%t2ZBRFEFB!ub8>c{%grqT!P2;VWilVW zf=+8`X?gncB??-p2EI#=T$*%!c1|)mHMI#wk|&Moff5lF6-B@_C8wpGZuih3k*Yr| zuzK^Ma}yZk1x#G+*(odzxtenD(ut%b7Q31N&?2wyhx&q?YM!r!%8uyc zuo|8;&yf(ILG zbEkIiQGtmc`9lgxfZlG4A09InT-@kXem;+WIxneviCsif5Y`&X7x=9Gwef=l3dePQ zZCTdL{EZ;Is5+y(xN@b7j*~ViRNY}~Yg-$$st6kDG*HbIkl*o98z3MS7z00e@W29x zJM-d&38+x+9Pxbz4lu(jg>)U>x5kamb(pQOpGn*yP!RhN*rw)Plr&kxF-^9!t&JuA z$I$zr**7$zh`EW$_%~ynDr~z%z`fjeCT3=4Y1{7_!7$my#qr&pq-Y5n$oTG`J=U{6 zZ=Kq?@pSR}^z`(-{?yW-XbDd@T8>P>TVoc0ml$b($$(06wLB?xy%~FNG<2~9y^?ivgWfk=c}B{*6FA%m-FDSxA0+prtm z^D;;}bVoqtr!+v&f_txje5%j1ry<@t4R1%|n&awoi9}*5WxThx;XddaW|A~cGoW&? z1=Ev!tsI*p9zd09n#V;0eYcfkkJL8!fHpo;o9iP`V-pHWg|!Cry?GuNi>I@NL)jTX zeKL#_phZ?zR%7`&!5sMK_k~k$ zYjgQC^aH5k+XE^`tFeyh_L6mMH8u)(x-2x*8~rwGCxbgUYHe)XJm0g5rz--(;cu6p zu$PqNtg)GLr9B&OEk7rKplz$MiLAy7L8m(jLpP4%>0w@L?H%w}>d=jKJpJ(3q4IND zE$u=<(RYQ=%W^@|689-XzRzn-X$3EdP$akmBmy%x{#;#R6ua5`4Vql!Mdjsp9gF^^ z2Ws%u!`Nsj;l@7IFC@a&3#gx0Wmq4KyFdlthv-uTQ()163ZOn`f}&T)%xou+7wSD$ ztZ+b4q-How;ONn#LEneNo59@xtelE@d;BbQZT(KuMOI#>eqi_xITRU!GkQTtEHg~dIyyQo^1M~xbpWnHeGnJqeQ9*m?c>|JJs|260Rn4DW3Omd2LTs<>rFE= z3VJCkdMu_kPYQjvx*CX~ea*N11k+}D-(H!RpP$f>Muwe6qm^FCZVl3e(v24I?g1Y}sy)|R&dJZ83RVpkhfARl@^9Ewy1_-(ZQD+Y z8}S|qlhIuHuo-x!1u63E@(RXZgEz&jtoe4QW*Q(CUtGk&nc_tTi8AVxt5?N=7*E1@7dcvTXMu>j!0pg8FxbC; zzb;v@gd%(_6|^|{(_22M#<+x%xK1p6mZo+jxfw6d*Sv2nuX&dWf?;g7=LRtNvs`JL z#n))L5=I^NFs2Tu9x*kRDe147kni(vGlYX)|9_Yv!Au-sF>I$ZEbDY_>6sZ%u~&ArBk|wG!P{`p!p2V&BJGweO`jP?z&+k2NdN@ z=E~um(iJY9Y^GA9X5UDztgcGVY7-9QtKGK#TwnhTHB2f+XXl3)WYUePW&*V!E9(Rh zM`>+s|4iQm1soYtpJTY|SZfBT8X=@e$HcSMAR`)VU)~J;GA9;CHNrElKUz}tLrg72 zda`+jI#?6v9J4l4W>z1w)&K?oJFon4GP@5`N79^nv}Tbtop_s1NP~PlAfV!7zG?j7 zBS!?FE`X}0^vWw!$b{g@7$g<@lBo2cp~2GH`oP>oj=!u!EyCQ~{GqskE3DCno2yw3 zSRg-!d>op{9^15OlZCAu4ajB++Su5z$jQldDX7P_fSLuh;v5nU$7R4FWaZ%C5o$h6 z(mZEgv6@B2Izz=R<9Pqgi2QgAI#1>?6lHqg4i#}MXSr(%%J#(m_~yE@II{DMXkXOV zE`!fKHqI&FDN=`B<6Z?!;)(_jq=xYc{#Oz!Vrzt z4GIEprQpe`g;w-ba|OoK+}wanzVp&X|Jl;pkj|0Ck>*l&FSaS&BOfnt)HBLQQ3;8b zYfD**{sXtRa&mJ1_iiEj?jINCu8M%&y?==bKj{lmCfs zt#LEQhx&zwA>xJ4xp{c(7eq}#J}dYSDucg}>*=2i0xT(o|>K{kpItU_V5C|MX{qks>ED zsDof~;xw~jKaZk*{&@c@(X8zD#<`2?AqQsX<_d?s!K+|IfoKE|SRwL@2iPB1=yW!) zR%T{qs_-%qqbUa$flEq3k>!Ywd>puqII*@C^>a+*m&L@7TIc)ZZQxaxx?ks?I=#fy zkEa8^QO0+d-oMYEFXw!3qyE7IL7)j(5cqugNm*z>w3k-!Fbqu-7TqOUmNyjq|z zNO_NS?;>by79U8MmPx^Gr~-4OTrxk1vjtDydRRPY{K6EMscA0PjwuTSkqEzl$Y zJ-h;(7J$EfvxWra8@?e*1;MaFfKS_^7aEeFG>4UhLj#7-4pIT?)WiY5R!|E;FeCZz z%?wCZp@NLqYahGQ0ZkxMY3rNEq1FW}=HbSgFRU*>f=Ye2$pyu&fe%*?fMABIp?qp{ z+aN`v(R5N#qXKd5PhsHEh!sD_6FzTY0MP8JgL>j}^ITB>#Li~07R zRK(H2;^IzgZEdB03XL#@sy7jI$OyopzQ6yxNSq^NC1WmQolBr@-Rt-bTu>L)Lc;So zA8;yLmO8cMes8{wm0QE!ntoC}V5Ydfdk{&19UcXS%3`m5U<#$gr-)w@netuj+x;;pdLBjKx{+OdZa0uW*APf{{ za2?khGuHi;s2zGJ@RaVc@$($E#@vXe z=4OzFV~Z16OCG;{no~i4NUR*oblJWp;E<$bHiHAfpw!M3J_ao2Tf{Mu69OEGLo9yz zqT}dz(J$YCa)U}y z|MqaEnTt3GLA?x;KRLHhalLyc!p{h7GR0SSeQmlFraAspoDzB)_hX*XKuSsqJf(BDZYjPfQr{E)mc(LRZYQC!b6ex; zio0JunJ}i!|Hdq&O z5Dtc3m6?mcerHi4zGyTMiA`Xz7A4FIxVrzIzW~F60?g*w%*+l)KjN=nzt%I@zhg8e z`N|-sp$ps;#`%EfrAX$74<9n>2ylU_T^?_`dhOcilaxKOFRd&r5Mcal-M&{8tfR+| zA5XugZa-jh{siEysGM9UHIh<^>4V=Bg))8%@U(%X=>SF4nmwGI0JnmNqYDSaupM@Q zR%M(P^D1nDl8wPmU}YqkYp!m&aNz>O5h9Tkz+JZh1FxWfAJLb|<^*5CLIUNmAGilv zt&E>FXE?PF0KotMj!{=7h;wQ?rr`omD@a$t=x-mjF__(up9$XG*G6W_iuIu&TKhJ? zT}w-gJlDH}p}=8Fb8>MdfCX0yx(ZaV0LaQ$Am5=B9b=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/nextcloud/charts/redis/templates/configmap-scripts.yaml b/nextcloud/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 0000000..d78fe3d --- /dev/null +++ b/nextcloud/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,349 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + + export REDIS_REPLICATION_MODE="slave" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + if [[ ${BASH_REMATCH[2]} == "0" ]]; then + if [[ ! -f /data/redisboot.lock ]]; then + export REDIS_REPLICATION_MODE="master" + else + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli -a $REDIS_PASSWORD -h {{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} info" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h {{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -p {{ .Values.sentinel.port }} info" + fi + if [[ ! ($($sentinel_info_command)) ]]; then + export REDIS_REPLICATION_MODE="master" + rm /data/redisboot.lock + fi + fi + fi + fi + + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli -a $REDIS_PASSWORD -h {{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h {{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + touch /data/redisboot.lock + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" + } + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + if [[ ${BASH_REMATCH[2]} == "0" ]]; then + if [[ ! -f /data/sentinelboot.lock ]]; then + export REDIS_REPLICATION_MODE="master" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli -a $REDIS_PASSWORD -h {{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} info" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h {{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -p {{ .Values.sentinel.port }} info" + fi + if [[ ! ($($sentinel_info_command)) ]]; then + export REDIS_REPLICATION_MODE="master" + rm /data/sentinelboot.lock + fi + fi + fi + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli -a $REDIS_PASSWORD -h {{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h {{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + fi + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + touch /data/sentinelboot.lock + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/nextcloud/charts/redis/templates/configmap.yaml b/nextcloud/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..092fb94 --- /dev/null +++ b/nextcloud/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/redis/templates/headless-svc.yaml b/nextcloud/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..549a05d --- /dev/null +++ b/nextcloud/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/nextcloud/charts/redis/templates/health-configmap.yaml b/nextcloud/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..35819e0 --- /dev/null +++ b/nextcloud/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,201 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} +{{- if .Values.usePassword }} + no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning) +{{- end }} + response=$( + timeout -s 3 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD $no_auth_warning \ +{{- end }} + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} +{{- if .Values.usePassword }} + no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning) +{{- end }} + response=$( + timeout -s 3 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD $no_auth_warning \ +{{- end }} + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} +{{- if .Values.usePassword }} + no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning) +{{- end }} + response=$( + timeout -s 3 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD $no_auth_warning \ +{{- end }} + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} +{{- if .Values.usePassword }} + no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning) +{{- end }} + response=$( + timeout -s 3 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD $no_auth_warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} +{{- if .Values.usePassword }} + no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning) +{{- end }} + response=$( + timeout -s 3 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD $no_auth_warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/nextcloud/charts/redis/templates/metrics-prometheus.yaml b/nextcloud/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..551059a --- /dev/null +++ b/nextcloud/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,33 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/nextcloud/charts/redis/templates/metrics-svc.yaml b/nextcloud/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..f30e1fd --- /dev/null +++ b/nextcloud/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/nextcloud/charts/redis/templates/networkpolicy.yaml b/nextcloud/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..c56a9d3 --- /dev/null +++ b/nextcloud/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/nextcloud/charts/redis/templates/pdb.yaml b/nextcloud/charts/redis/templates/pdb.yaml new file mode 100644 index 0000000..8021430 --- /dev/null +++ b/nextcloud/charts/redis/templates/pdb.yaml @@ -0,0 +1,21 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/nextcloud/charts/redis/templates/prometheusrule.yaml b/nextcloud/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..9076a97 --- /dev/null +++ b/nextcloud/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/nextcloud/charts/redis/templates/psp.yaml b/nextcloud/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..08e0840 --- /dev/null +++ b/nextcloud/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/nextcloud/charts/redis/templates/redis-master-statefulset.yaml b/nextcloud/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..36dc4f5 --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,346 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: {{- toYaml .Values.securityContext.sysctls | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} + diff --git a/nextcloud/charts/redis/templates/redis-master-svc.yaml b/nextcloud/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..09eab2a --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,40 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/nextcloud/charts/redis/templates/redis-node-statefulset.yaml b/nextcloud/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 0000000..ae4e6ce --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,450 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: {{- toYaml .Values.securityContext.sysctls | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/nextcloud/charts/redis/templates/redis-role.yaml b/nextcloud/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..c741268 --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/nextcloud/charts/redis/templates/redis-rolebinding.yaml b/nextcloud/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..3657f14 --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/nextcloud/charts/redis/templates/redis-serviceaccount.yaml b/nextcloud/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..5c9707f --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/nextcloud/charts/redis/templates/redis-slave-statefulset.yaml b/nextcloud/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..8291e6a --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,354 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: {{- toYaml .Values.securityContext.sysctls | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/nextcloud/charts/redis/templates/redis-slave-svc.yaml b/nextcloud/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..dab36c3 --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/nextcloud/charts/redis/templates/redis-with-sentinel-svc.yaml b/nextcloud/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..f587373 --- /dev/null +++ b/nextcloud/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/nextcloud/charts/redis/templates/secret.yaml b/nextcloud/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..4c39ffd --- /dev/null +++ b/nextcloud/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/nextcloud/charts/redis/values-production.yaml b/nextcloud/charts/redis/values-production.yaml new file mode 100644 index 0000000..e6bf6b4 --- /dev/null +++ b/nextcloud/charts/redis/values-production.yaml @@ -0,0 +1,784 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.8-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.8-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis + ## + preExecCmds: "" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + ## + priorityClassName: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis + ## + preExecCmds: "" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.11.1-debian-10-r12 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} down + # description: Redis instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/nextcloud/charts/redis/values.schema.json b/nextcloud/charts/redis/values.schema.json new file mode 100644 index 0000000..3188d0c --- /dev/null +++ b/nextcloud/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/nextcloud/charts/redis/values.yaml b/nextcloud/charts/redis/values.yaml new file mode 100644 index 0000000..e442d93 --- /dev/null +++ b/nextcloud/charts/redis/values.yaml @@ -0,0 +1,784 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.8-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.8-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis + ## + preExecCmds: "" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + ## + priorityClassName: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis + ## + preExecCmds: "" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.11.1-debian-10-r12 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} down + # description: Redis instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/nextcloud/templates/NOTES.txt b/nextcloud/templates/NOTES.txt new file mode 100644 index 0000000..f1428c5 --- /dev/null +++ b/nextcloud/templates/NOTES.txt @@ -0,0 +1,94 @@ +{{- if or .Values.mariadb.enabled .Values.externalDatabase.host -}} + +{{- if empty .Values.nextcloud.host -}} +################################################################################# +### WARNING: You did not provide an external host in your 'helm install' call ### +################################################################################# + +This deployment will be incomplete until you configure nextcloud with a resolvable +host. To configure nextcloud with the URL of your service: + +1. Get the nextcloud URL by running: + + {{- if contains "NodePort" .Values.service.type }} + + export APP_PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nextcloud.fullname" . }} -o jsonpath="{.spec.ports[0].nodePort}") + export APP_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + + {{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nextcloud.fullname" . }}' + + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nextcloud.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + export APP_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "nextcloud.fullname" . }} -o jsonpath="{.data.nextcloud-password}" | base64 --decode) + {{- if .Values.mariadb.db.password }} + export APP_DATABASE_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "nextcloud.mariadb.fullname" . }} -o jsonpath="{.data.mariadb-password}" | base64 --decode) + {{- end }} + {{- end }} + +2. Complete your nextcloud deployment by running: + +{{- if .Values.mariadb.enabled }} + + helm upgrade {{ .Release.Name }} nextcloud/nextcloud \ + --set nextcloud.host=$APP_HOST,nextcloud.password=$APP_PASSWORD{{ if .Values.mariadb.db.password }},mariadb.db.password=$APP_DATABASE_PASSWORD{{ end }} +{{- else }} + + ## PLEASE UPDATE THE EXTERNAL DATABASE CONNECTION PARAMETERS IN THE FOLLOWING COMMAND AS NEEDED ## + + helm upgrade {{ .Release.Name }} nextcloud/nextcloud \ + --set nextcloud.password=$APP_PASSWORD,nextcloud.host=$APP_HOST,service.type={{ .Values.service.type }},mariadb.enabled=false{{- if not (empty .Values.externalDatabase.host) }},externalDatabase.host={{ .Values.externalDatabase.host }}{{- end }}{{- if not (empty .Values.externalDatabase.user) }},externalDatabase.user={{ .Values.externalDatabase.user }}{{- end }}{{- if not (empty .Values.externalDatabase.password) }},externalDatabase.password={{ .Values.externalDatabase.password }}{{- end }}{{- if not (empty .Values.externalDatabase.database) }},externalDatabase.database={{ .Values.externalDatabase.database }}{{- end }} +{{- end }} + +{{- else -}} +1. Get the nextcloud URL by running: + +{{- if eq .Values.service.type "ClusterIP" }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "nextcloud.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080/ + kubectl port-forward $POD_NAME 8080:80 +{{- else }} + + echo http://{{ .Values.nextcloud.host }}/ +{{- end }} + +2. Get your nextcloud login credentials by running: + + echo User: {{ .Values.nextcloud.username }} + echo Password: $(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "nextcloud.fullname" . }} -o jsonpath="{.data.nextcloud-password}" | base64 --decode) +{{- end }} + +{{- else -}} + +####################################################################################################### +## WARNING: You did not provide an external database host in your 'helm install' call ## +## Running Nextcloud with the integrated sqlite database is not recommended for production instances ## +####################################################################################################### + +For better performance etc. you have to configure nextcloud with a resolvable database +host. To configure nextcloud to use and external database host: + + +1. Complete your nextcloud deployment by running: + +{{- if contains "NodePort" .Values.service.type }} + export APP_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nextcloud.fullname" . }}' + + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nextcloud.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") +{{- else }} + + export APP_HOST=127.0.0.1 +{{- end }} + export APP_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "nextcloud.fullname" . }} -o jsonpath="{.data.nextcloud-password}" | base64 --decode) + + ## PLEASE UPDATE THE EXTERNAL DATABASE CONNECTION PARAMETERS IN THE FOLLOWING COMMAND AS NEEDED ## + + helm upgrade {{ .Release.Name }} nextcloud/nextcloud \ + --set nextcloud.password=$APP_PASSWORD,nextcloud.host=$APP_HOST,service.type={{ .Values.service.type }},mariadb.enabled=false{{- if not (empty .Values.externalDatabase.user) }},externalDatabase.user={{ .Values.externalDatabase.user }}{{- end }}{{- if not (empty .Values.externalDatabase.password) }},externalDatabase.password={{ .Values.externalDatabase.password }}{{- end }}{{- if not (empty .Values.externalDatabase.database) }},externalDatabase.database={{ .Values.externalDatabase.database }}{{- end }},externalDatabase.host=YOUR_EXTERNAL_DATABASE_HOST +{{- end }} diff --git a/nextcloud/templates/_helpers.tpl b/nextcloud/templates/_helpers.tpl new file mode 100644 index 0000000..87c6e91 --- /dev/null +++ b/nextcloud/templates/_helpers.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nextcloud.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nextcloud.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nextcloud.mariadb.fullname" -}} +{{- printf "%s-%s" .Release.Name "mariadb" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Create a default fully qualified redis app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nextcloud.redis.fullname" -}} +{{- printf "%s-%s" .Release.Name "redis" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nextcloud.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/nextcloud/templates/config.yaml b/nextcloud/templates/config.yaml new file mode 100644 index 0000000..7fb2ade --- /dev/null +++ b/nextcloud/templates/config.yaml @@ -0,0 +1,127 @@ +{{- if .Values.nextcloud.configs -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "nextcloud.fullname" . }}-config + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: +{{- range $key, $value := .Values.nextcloud.configs }} + {{ $key }}: |- +{{ $value | indent 4 }} +{{- end }} +{{- if .Values.nextcloud.defaultConfigs }} +{{- if index .Values.nextcloud.defaultConfigs ".htaccess" }} + .htaccess: |- + # line below if for Apache 2.4 + + Require all denied + + # line below if for Apache 2.2 + + deny from all + + # section for Apache 2.2 and 2.4 + + IndexIgnore * + +{{- end }} +{{- if index .Values.nextcloud.defaultConfigs "redis.config.php" }} + redis.config.php: |- + '\OC\Memcache\Redis', + 'memcache.locking' => '\OC\Memcache\Redis', + 'redis' => array( + 'host' => getenv('REDIS_HOST'), + 'port' => getenv('REDIS_HOST_PORT') ?: 6379, + ), + ); + } +{{- end }} +{{- if index .Values.nextcloud.defaultConfigs "apache-pretty-urls.config.php" }} + apache-pretty-urls.config.php: |- + '/', + ); +{{- end }} +{{- if index .Values.nextcloud.defaultConfigs "apcu.config.php" }} + apcu.config.php: |- + '\OC\Memcache\APCu', + ); +{{- end }} +{{- if index .Values.nextcloud.defaultConfigs "apps.config.php" }} + apps.config.php: |- + array ( + 0 => array ( + "path" => OC::$SERVERROOT."/apps", + "url" => "/apps", + "writable" => false, + ), + 1 => array ( + "path" => OC::$SERVERROOT."/custom_apps", + "url" => "/custom_apps", + "writable" => true, + ), + ), + ); +{{- end }} +{{- if index .Values.nextcloud.defaultConfigs "autoconfig.php" }} + autoconfig.php: |- + 'smtp', + 'mail_smtphost' => getenv('SMTP_HOST'), + 'mail_smtpport' => getenv('SMTP_PORT') ?: (getenv('SMTP_SECURE') ? 465 : 25), + 'mail_smtpsecure' => getenv('SMTP_SECURE') ?: '', + 'mail_smtpauth' => getenv('SMTP_NAME') && getenv('SMTP_PASSWORD'), + 'mail_smtpauthtype' => getenv('SMTP_AUTHTYPE') ?: 'LOGIN', + 'mail_smtpname' => getenv('SMTP_NAME') ?: '', + 'mail_smtppassword' => getenv('SMTP_PASSWORD') ?: '', + 'mail_from_address' => getenv('MAIL_FROM_ADDRESS'), + 'mail_domain' => getenv('MAIL_DOMAIN'), + ); + } +{{- end }} +{{- end }} +{{- end }} diff --git a/nextcloud/templates/cronjob.yaml b/nextcloud/templates/cronjob.yaml new file mode 100644 index 0000000..3a2eded --- /dev/null +++ b/nextcloud/templates/cronjob.yaml @@ -0,0 +1,75 @@ +{{- if .Values.cronjob.enabled }} +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: {{ template "nextcloud.fullname" . }}-cron + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + annotations: +{{ toYaml .Values.cronjob.annotations | indent 4 }} +spec: + schedule: "{{ .Values.cronjob.schedule }}" + concurrencyPolicy: Forbid + {{- with .Values.cronjob.failedJobsHistoryLimit }} + failedJobsHistoryLimit: {{ . }} + {{- end }} + {{- with .Values.cronjob.successfulJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ . }} + {{- end }} + jobTemplate: + metadata: + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + spec: + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + spec: + restartPolicy: Never + {{- if (default .Values.image.pullSecrets .Values.cronjob.image.pullSecrets) }} + imagePullSecrets: + {{- range (default .Values.image.pullSecrets .Values.cronjob.image.pullSecrets) }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ default .Values.image.repository .Values.cronjob.image.repository }}:{{ default .Values.image.tag .Values.cronjob.image.tag }}" + imagePullPolicy: {{ default .Values.image.pullPolicy .Values.cronjob.image.pullPolicy }} + command: [ "curl" ] + args: + {{- if .Values.ingress.enabled }} + {{- if .Values.cronjob.curlInsecure }} + - "-k" + {{- end }} + - "--fail" + - "-L" + {{- if .Values.ingress.tls }} + - "https://{{ .Values.nextcloud.host }}/cron.php" + {{- else }} + - "http://{{ .Values.nextcloud.host }}/cron.php" + {{- end }} + {{- else }} + - "http://{{ template "nextcloud.fullname" . }}:{{ .Values.service.port }}/cron.php" + {{- end }} + resources: +{{ toYaml (default .Values.resources .Values.cronjob.resources) | indent 16 }} + {{- with (default .Values.nodeSelector .Values.cronjob.nodeSelector) }} + nodeSelector: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with (default .Values.affinity .Values.cronjob.affinity) }} + affinity: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with (default .Values.tolerations .Values.cronjob.tolerations) }} + tolerations: +{{ toYaml . | indent 12 }}: + {{- end }} +{{- end }} diff --git a/nextcloud/templates/db-secret.yaml b/nextcloud/templates/db-secret.yaml new file mode 100644 index 0000000..790b65b --- /dev/null +++ b/nextcloud/templates/db-secret.yaml @@ -0,0 +1,22 @@ +{{- if or .Values.mariadb.enabled .Values.externalDatabase.enabled }} +{{- if not .Values.externalDatabase.existingSecret.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-%s" .Release.Name "db" }} + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +type: Opaque +data: + {{- if .Values.mariadb.enabled }} + db-password: {{ default "" .Values.mariadb.db.password | b64enc | quote }} + db-username: {{ default "" .Values.mariadb.db.user | b64enc | quote }} + {{- else }} + db-password: {{ default "" .Values.externalDatabase.password | b64enc | quote }} + db-username: {{ default "" .Values.externalDatabase.user | b64enc | quote }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/nextcloud/templates/deployment.yaml b/nextcloud/templates/deployment.yaml new file mode 100644 index 0000000..fad0d7a --- /dev/null +++ b/nextcloud/templates/deployment.yaml @@ -0,0 +1,360 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "nextcloud.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/component: app + {{- if .Values.deploymentAnnotations }} + annotations: +{{ toYaml .Values.deploymentAnnotations | indent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + strategy: +{{ toYaml .Values.nextcloud.strategy | indent 4 }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: app + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: app + {{- if .Values.redis.enabled }} + {{ template "nextcloud.redis.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.lifecycle }} + lifecycle: + {{- if .Values.lifecycle.postStartCommand }} + postStart: + exec: + command: + {{- toYaml .Values.lifecycle.postStartCommand | nindent 16 -}} + {{- end }} + {{- if .Values.lifecycle.preStopCommand }} + preStop: + exec: + command: + {{- toYaml .Values.lifecycle.preStopCommand | nindent 16 -}} + {{- end }} + {{- end }} + env: + {{- if .Values.internalDatabase.enabled }} + - name: SQLITE_DATABASE + value: {{ .Values.internalDatabase.name | quote }} + {{- else if .Values.mariadb.enabled }} + - name: MYSQL_HOST + value: {{ template "nextcloud.mariadb.fullname" . }} + - name: MYSQL_DATABASE + value: {{ .Values.mariadb.db.name | quote }} + - name: MYSQL_USER + valueFrom: + secretKeyRef: + name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDatabase.existingSecret.usernameKey | default "db-username" }} + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDatabase.existingSecret.passwordKey | default "db-password" }} + {{- else }} + {{- if eq .Values.externalDatabase.type "postgresql" }} + - name: POSTGRES_HOST + value: {{ .Values.externalDatabase.host | quote }} + - name: POSTGRES_DB + value: {{ .Values.externalDatabase.database | quote }} + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDatabase.existingSecret.usernameKey | default "db-username" }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDatabase.existingSecret.passwordKey | default "db-password" }} + {{- else }} + - name: MYSQL_HOST + value: {{ .Values.externalDatabase.host | quote }} + - name: MYSQL_DATABASE + value: {{ .Values.externalDatabase.database | quote }} + - name: MYSQL_USER + valueFrom: + secretKeyRef: + name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDatabase.existingSecret.usernameKey | default "db-username" }} + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf "%s-%s" .Release.Name "db") }} + key: {{ .Values.externalDatabase.existingSecret.passwordKey | default "db-password" }} + {{- end }} + {{- end }} + - name: NEXTCLOUD_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ template "nextcloud.fullname" . }} + key: nextcloud-username + - name: NEXTCLOUD_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "nextcloud.fullname" . }} + key: nextcloud-password + - name: NEXTCLOUD_TRUSTED_DOMAINS + value: {{ .Values.nextcloud.host }} + {{- if ne (int .Values.nextcloud.update) 0 }} + - name: NEXTCLOUD_UPDATE + value: {{ .Values.nextcloud.update | quote }} + {{- end }} + - name: NEXTCLOUD_DATA_DIR + value: {{ .Values.nextcloud.datadir | quote }} + {{- if .Values.nextcloud.tableprefix }} + - name: NEXTCLOUD_TABLE_PREFIX + value: {{ .Values.nextcloud.tableprefix | quote }} + {{- end }} + {{- if .Values.nextcloud.mail.enabled }} + - name: MAIL_FROM_ADDRESS + value: {{ .Values.nextcloud.mail.fromAddress | quote }} + - name: MAIL_DOMAIN + value: {{ .Values.nextcloud.mail.domain | quote }} + - name: SMTP_HOST + value: {{ .Values.nextcloud.mail.smtp.host | quote }} + - name: SMTP_SECURE + value: {{ .Values.nextcloud.mail.smtp.secure | quote }} + - name: SMTP_PORT + value: {{ .Values.nextcloud.mail.smtp.port | quote }} + - name: SMTP_AUTHTYPE + value: {{ .Values.nextcloud.mail.smtp.authtype | quote }} + - name: SMTP_NAME + valueFrom: + secretKeyRef: + name: {{ template "nextcloud.fullname" . }} + key: smtp-username + - name: SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "nextcloud.fullname" . }} + key: smtp-password + {{- end }} + {{- if .Values.redis.enabled }} + - name: REDIS_HOST + value: {{ template "nextcloud.redis.fullname" . }}-master + - name: REDIS_HOST_PORT + value: {{ .Values.redis.redisPort | quote }} + {{- end }} + {{- if .Values.nextcloud.extraEnv }} +{{ toYaml .Values.nextcloud.extraEnv | indent 8 }} + {{- end }} + {{- if not .Values.nginx.enabled }} + ports: + - name: http + containerPort: 80 + protocol: TCP + {{- end }} + {{- if and .Values.livenessProbe.enabled (not .Values.nginx.enabled) }} + livenessProbe: + httpGet: + path: /status.php + port: http + httpHeaders: + - name: Host + value: {{ .Values.nextcloud.host | quote }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if and .Values.readinessProbe.enabled (not .Values.nginx.enabled) }} + readinessProbe: + httpGet: + path: /status.php + port: http + httpHeaders: + - name: Host + value: {{ .Values.nextcloud.host | quote }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: nextcloud-data + mountPath: /var/www/ + subPath: {{ ternary "root" (printf "%s/%s" .Values.nextcloud.persistence.subPath "root") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/html + subPath: {{ ternary "html" (printf "%s/%s" .Values.nextcloud.persistence.subPath "html") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: {{ .Values.nextcloud.datadir }} + subPath: {{ ternary "data" (printf "%s/%s" .Values.nextcloud.persistence.subPath "data") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/html/config + subPath: {{ ternary "config" (printf "%s/%s" .Values.nextcloud.persistence.subPath "config") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/html/custom_apps + subPath: {{ ternary "custom_apps" (printf "%s/%s" .Values.nextcloud.persistence.subPath "custom_apps") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/tmp + subPath: {{ ternary "tmp" (printf "%s/%s" .Values.nextcloud.persistence.subPath "tmp") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/html/themes + subPath: {{ ternary "themes" (printf "%s/%s" .Values.nextcloud.persistence.subPath "themes") (empty .Values.nextcloud.persistence.subPath) }} + {{- range $key, $value := .Values.nextcloud.configs }} + - name: nextcloud-config + mountPath: /var/www/html/config/{{ $key }} + subPath: {{ $key }} + {{- end }} + {{- if .Values.nextcloud.configs }} + {{- range $key, $value := .Values.nextcloud.defaultConfigs }} + {{- if $value }} + - name: nextcloud-config + mountPath: /var/www/html/config/{{ $key }} + subPath: {{ $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.nextcloud.phpConfigs }} + - name: nextcloud-phpconfig + mountPath: /usr/local/etc/php/conf.d/{{ $key }} + subPath: {{ $key }} + {{- end }} + {{- if .Values.nextcloud.extraVolumeMounts }} +{{ toYaml .Values.nextcloud.extraVolumeMounts | indent 8 }} + {{- end }} + {{- if .Values.nginx.enabled }} + - name: {{ .Chart.Name }}-nginx + image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /status.php + port: http + httpHeaders: + - name: Host + value: {{ .Values.nextcloud.host | quote }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /status.php + port: http + httpHeaders: + - name: Host + value: {{ .Values.nextcloud.host | quote }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.nginx.resources | indent 10 }} + volumeMounts: + - name: nextcloud-data + mountPath: /var/www/ + subPath: {{ ternary "root" (printf "%s/%s" .Values.nextcloud.persistence.subPath "root") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/html + subPath: {{ ternary "html" (printf "%s/%s" .Values.nextcloud.persistence.subPath "html") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: {{ .Values.nextcloud.datadir }} + subPath: {{ ternary "data" (printf "%s/%s" .Values.nextcloud.persistence.subPath "data") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/html/config + subPath: {{ ternary "config" (printf "%s/%s" .Values.nextcloud.persistence.subPath "config") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/html/custom_apps + subPath: {{ ternary "custom_apps" (printf "%s/%s" .Values.nextcloud.persistence.subPath "custom_apps") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/tmp + subPath: {{ ternary "tmp" (printf "%s/%s" .Values.nextcloud.persistence.subPath "tmp") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-data + mountPath: /var/www/html/themes + subPath: {{ ternary "themes" (printf "%s/%s" .Values.nextcloud.persistence.subPath "themes") (empty .Values.nextcloud.persistence.subPath) }} + - name: nextcloud-nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: nextcloud-data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "nextcloud.fullname" . }}-nextcloud{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.nextcloud.configs }} + - name: nextcloud-config + configMap: + name: {{ template "nextcloud.fullname" . }}-config + {{- end }} + {{- if .Values.nextcloud.phpConfigs }} + - name: nextcloud-phpconfig + configMap: + name: {{ template "nextcloud.fullname" . }}-phpconfig + {{- end }} + {{- if .Values.nginx.enabled }} + - name: nextcloud-nginx-config + configMap: + name: {{ template "nextcloud.fullname" . }}-nginxconfig + {{- end }} + {{- if .Values.nextcloud.extraVolumes }} +{{ toYaml .Values.nextcloud.extraVolumes | indent 6 }} + {{- end }} + {{- if .Values.nginx.enabled }} + # Will mount configuration files as www-data (id: 82) for nextcloud + securityContext: + fsGroup: 82 + {{- else }} + # Will mount configuration files as www-data (id: 33) for nextcloud + securityContext: + fsGroup: 33 + {{- end }} diff --git a/nextcloud/templates/hpa.yaml b/nextcloud/templates/hpa.yaml new file mode 100644 index 0000000..52a3004 --- /dev/null +++ b/nextcloud/templates/hpa.yaml @@ -0,0 +1,20 @@ +{{- if .Values.hpa.enabled -}} +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "nextcloud.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/component: app +spec: + scaleTargetRef: + kind: Deployment + apiVersion: apps/v1 + name: {{ template "nextcloud.fullname" . }} + minReplicas: {{ .Values.hpa.minPods }} + maxReplicas: {{ .Values.hpa.maxPods }} + targetCPUUtilizationPercentage: {{ .Values.hpa.cputhreshold }} +{{- end }} \ No newline at end of file diff --git a/nextcloud/templates/ingress.yaml b/nextcloud/templates/ingress.yaml new file mode 100644 index 0000000..980c79a --- /dev/null +++ b/nextcloud/templates/ingress.yaml @@ -0,0 +1,31 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ template "nextcloud.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/component: app +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} +{{- if .Values.ingress.annotations }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} +{{- end }} +spec: + rules: + - host: {{ .Values.nextcloud.host }} + http: + paths: + - backend: + serviceName: {{ template "nextcloud.fullname" . }} + servicePort: {{ .Values.service.port }} +{{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} +{{- end -}} +{{- end }} diff --git a/nextcloud/templates/metrics-deployment.yaml b/nextcloud/templates/metrics-deployment.yaml new file mode 100644 index 0000000..66eb842 --- /dev/null +++ b/nextcloud/templates/metrics-deployment.yaml @@ -0,0 +1,55 @@ +{{- if .Values.metrics.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "nextcloud.fullname" . }}-metrics + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/component: metrics +spec: + replicas: {{ .Values.metrics.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: metrics + template: + metadata: + annotations: {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + spec: + containers: + - name: metrics-exporter + image: "{{ .Values.metrics.image.repository }}:{{ .Values.metrics.image.tag }}" + imagePullPolicy: {{ .Values.metrics.image.pullPolicy }} + env: + - name: NEXTCLOUD_USERNAME + valueFrom: + secretKeyRef: + name: {{ template "nextcloud.fullname" . }} + key: nextcloud-username + - name: NEXTCLOUD_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "nextcloud.fullname" . }} + key: nextcloud-password + - name: NEXTCLOUD_SERVER + value: http{{ if .Values.metrics.https }}s{{ end }}://{{ .Values.nextcloud.host }} + - name: NEXTCLOUD_TIMEOUT + value: {{ .Values.metrics.timeout }} + ports: + - name: metrics + containerPort: 9205 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 10 }} + {{- end }} +{{- end }} diff --git a/nextcloud/templates/metrics-service.yaml b/nextcloud/templates/metrics-service.yaml new file mode 100644 index 0000000..b349751 --- /dev/null +++ b/nextcloud/templates/metrics-service.yaml @@ -0,0 +1,31 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nextcloud.fullname" . }}-metrics + labels: + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9205 + targetPort: metrics + selector: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/nextcloud/templates/nextcloud-pvc.yaml b/nextcloud/templates/nextcloud-pvc.yaml new file mode 100644 index 0000000..8c1789a --- /dev/null +++ b/nextcloud/templates/nextcloud-pvc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.persistence.enabled -}} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "nextcloud.fullname" . }}-nextcloud + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/component: app +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/nextcloud/templates/nginx-config.yaml b/nextcloud/templates/nginx-config.yaml new file mode 100644 index 0000000..c4b2087 --- /dev/null +++ b/nextcloud/templates/nginx-config.yaml @@ -0,0 +1,185 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "nextcloud.fullname" . }}-nginxconfig + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + nginx.conf: |- +{{- if .Values.nginx.config.default }} + worker_processes auto; + + error_log /var/log/nginx/error.log warn; + pid /var/run/nginx.pid; + + + events { + worker_connections 1024; + } + + + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + upstream php-handler { + server 127.0.0.1:9000; + } + + server { + listen 80; + + # Add headers to serve security related headers + # Before enabling Strict-Transport-Security headers please read into this + # topic first. + #add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload;" always; + # + # WARNING: Only add the preload option once you read about + # the consequences in https://hstspreload.org/. This option + # will add the domain to a hardcoded list that is shipped + # in all major browsers and getting removed from this list + # could take several months. + add_header Referrer-Policy "no-referrer" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-Download-Options "noopen" always; + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Permitted-Cross-Domain-Policies "none" always; + add_header X-Robots-Tag "none" always; + add_header X-XSS-Protection "1; mode=block" always; + + # Remove X-Powered-By, which is an information leak + fastcgi_hide_header X-Powered-By; + + # Path to the root of your installation + root /var/www/html; + + location = /robots.txt { + allow all; + log_not_found off; + access_log off; + } + + # The following 2 rules are only needed for the user_webfinger app. + # Uncomment it if you're planning to use this app. + #rewrite ^/.well-known/host-meta /public.php?service=host-meta last; + #rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json last; + + # The following rule is only needed for the Social app. + # Uncomment it if you're planning to use this app. + #rewrite ^/.well-known/webfinger /public.php?service=webfinger last; + + location = /.well-known/carddav { + return 301 $scheme://$host:$server_port/remote.php/dav; + } + + location = /.well-known/caldav { + return 301 $scheme://$host:$server_port/remote.php/dav; + } + + # set max upload size + client_max_body_size 10G; + fastcgi_buffers 64 4K; + + # Enable gzip but do not remove ETag headers + gzip on; + gzip_vary on; + gzip_comp_level 4; + gzip_min_length 256; + gzip_proxied expired no-cache no-store private no_last_modified no_etag auth; + gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy; + + # Uncomment if your server is build with the ngx_pagespeed module + # This module is currently not supported. + #pagespeed off; + + location / { + rewrite ^ /index.php; + } + + location ~ ^\/(?:build|tests|config|lib|3rdparty|templates|data)\/ { + deny all; + } + location ~ ^\/(?:\.|autotest|occ|issue|indie|db_|console) { + deny all; + } + + location ~ ^\/(?:index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[ms]-provider\/.+)\.php(?:$|\/) { + fastcgi_split_path_info ^(.+?\.php)(\/.*|)$; + set $path_info $fastcgi_path_info; + try_files $fastcgi_script_name =404; + include fastcgi_params; + fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; + fastcgi_param PATH_INFO $path_info; + # fastcgi_param HTTPS on; + + # Avoid sending the security headers twice + fastcgi_param modHeadersAvailable true; + + # Enable pretty urls + fastcgi_param front_controller_active true; + fastcgi_pass php-handler; + fastcgi_intercept_errors on; + fastcgi_request_buffering off; + } + + location ~ ^\/(?:updater|oc[ms]-provider)(?:$|\/) { + try_files $uri/ =404; + index index.php; + } + + # Adding the cache control header for js, css and map files + # Make sure it is BELOW the PHP block + location ~ \.(?:css|js|woff2?|svg|gif|map)$ { + try_files $uri /index.php$request_uri; + add_header Cache-Control "public, max-age=15778463"; + # Add headers to serve security related headers (It is intended to + # have those duplicated to the ones above) + # Before enabling Strict-Transport-Security headers please read into + # this topic first. + #add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload;" always; + # + # WARNING: Only add the preload option once you read about + # the consequences in https://hstspreload.org/. This option + # will add the domain to a hardcoded list that is shipped + # in all major browsers and getting removed from this list + # could take several months. + add_header Referrer-Policy "no-referrer" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-Download-Options "noopen" always; + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Permitted-Cross-Domain-Policies "none" always; + add_header X-Robots-Tag "none" always; + add_header X-XSS-Protection "1; mode=block" always; + + # Optional: Don't log access to assets + access_log off; + } + + location ~ \.(?:png|html|ttf|ico|jpg|jpeg|bcmap)$ { + try_files $uri /index.php$request_uri; + # Optional: Don't log access to other assets + access_log off; + } + } + } +{{- else }} +{{ .Values.nginx.config.custom | indent 4 }} +{{- end }} +{{- end }} diff --git a/nextcloud/templates/php-config.yaml b/nextcloud/templates/php-config.yaml new file mode 100644 index 0000000..3ff9e95 --- /dev/null +++ b/nextcloud/templates/php-config.yaml @@ -0,0 +1,16 @@ +{{- if .Values.nextcloud.phpConfigs -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "nextcloud.fullname" . }}-phpconfig + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: +{{- range $key, $value := .Values.nextcloud.phpConfigs }} + {{ $key }}: |- +{{ $value | indent 4 }} +{{- end }} +{{- end }} diff --git a/nextcloud/templates/secrets.yaml b/nextcloud/templates/secrets.yaml new file mode 100644 index 0000000..8473eba --- /dev/null +++ b/nextcloud/templates/secrets.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "nextcloud.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +type: Opaque +data: + nextcloud-username: {{ .Values.nextcloud.username | b64enc | quote }} + {{ if .Values.nextcloud.password }} + nextcloud-password: {{ .Values.nextcloud.password | b64enc | quote }} + {{ else }} + nextcloud-password: {{ randAlphaNum 10 | b64enc | quote }} + {{ end }} + {{- if .Values.nextcloud.mail.enabled }} + smtp-username: {{ default "" .Values.nextcloud.mail.smtp.name | b64enc | quote }} + smtp-password: {{ default "" .Values.nextcloud.mail.smtp.password | b64enc | quote }} + {{- end }} diff --git a/nextcloud/templates/service.yaml b/nextcloud/templates/service.yaml new file mode 100644 index 0000000..68f67e9 --- /dev/null +++ b/nextcloud/templates/service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nextcloud.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} + helm.sh/chart: {{ include "nextcloud.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/component: app +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if eq .Values.service.type "NodePort" }} + nodePort: {{ default "" .Values.service.nodePort}} + {{- end }} + selector: + app.kubernetes.io/name: {{ include "nextcloud.name" . }} diff --git a/nextcloud/values.yaml b/nextcloud/values.yaml new file mode 100644 index 0000000..546238c --- /dev/null +++ b/nextcloud/values.yaml @@ -0,0 +1,399 @@ +## Official nextcloud image version +## ref: https://hub.docker.com/r/library/nextcloud/tags/ +## +image: + repository: nextcloud + tag: 19.0.3-apache + pullPolicy: IfNotPresent + # pullSecrets: + # - myRegistrKeySecretName + +nameOverride: "" +fullnameOverride: "" + +# Number of replicas to be deployed +replicaCount: 1 + +## Allowing use of ingress controllers +## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## +ingress: + enabled: false + annotations: {} + # nginx.ingress.kubernetes.io/proxy-body-size: 4G + # kubernetes.io/tls-acme: "true" + # certmanager.k8s.io/cluster-issuer: letsencrypt-prod + # nginx.ingress.kubernetes.io/server-snippet: |- + # server_tokens off; + # proxy_hide_header X-Powered-By; + + # rewrite ^/.well-known/webfinger /public.php?service=webfinger last; + # rewrite ^/.well-known/host-meta /public.php?service=host-meta last; + # rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json; + # location = /.well-known/carddav { + # return 301 $scheme://$host/remote.php/dav; + # } + # location = /.well-known/caldav { + # return 301 $scheme://$host/remote.php/dav; + # } + # location = /robots.txt { + # allow all; + # log_not_found off; + # access_log off; + # } + # location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ { + # deny all; + # } + # location ~ ^/(?:autotest|occ|issue|indie|db_|console) { + # deny all; + # } + # tls: + # - secretName: nextcloud-tls + # hosts: + # - nextcloud.kube.home + labels: {} + + +# Allow configuration of lifecycle hooks +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ +lifecycle: {} + # postStartCommand: [] + # preStopCommand: [] + +nextcloud: + host: nextcloud.kube.home + username: admin + password: changeme + update: 0 + datadir: /var/www/html/data + tableprefix: + persistence: + subPath: + mail: + enabled: false + fromAddress: user + domain: domain.com + smtp: + host: domain.com + secure: ssl + port: 465 + authtype: LOGIN + name: user + password: pass + # PHP Configuration files + # Will be injected in /usr/local/etc/php/conf.d + phpConfigs: {} + # Default config files + # IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself + # Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config + defaultConfigs: + # To protect /var/www/html/config + .htaccess: true + # Redis default configuration + redis.config.php: true + # Apache configuration for rewrite urls + apache-pretty-urls.config.php: true + # Define APCu as local cache + apcu.config.php: true + # Apps directory configs + apps.config.php: true + # Used for auto configure database + autoconfig.php: true + # SMTP default configuration + smtp.config.php: true + # Extra config files created in /var/www/html/config/ + # ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file + configs: {} + + # For example, to use S3 as primary storage + # ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3 + # + # configs: + # s3.config.php: |- + # array( + # 'class' => '\\OC\\Files\\ObjectStore\\S3', + # 'arguments' => array( + # 'bucket' => 'my-bucket', + # 'autocreate' => true, + # 'key' => 'xxx', + # 'secret' => 'xxx', + # 'region' => 'us-east-1', + # 'use_ssl' => true + # ) + # ) + # ); + + ## Strategy used to replace old pods + ## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + strategy: + type: Recreate + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + + ## + ## Extra environment variables + extraEnv: + # - name: SOME_SECRET_ENV + # valueFrom: + # secretKeyRef: + # name: nextcloud + # key: secret_key + + # Extra mounts for the pods. Example shown is for connecting a legacy NFS volume + # to NextCloud pods in Kubernetes. This can then be configured in External Storage + extraVolumes: + # - name: nfs + # nfs: + # server: "10.0.0.1" + # path: "/nextcloud_data" + # readOnly: false + extraVolumeMounts: + # - name: nfs + # mountPath: "/legacy_data" + +nginx: + ## You need to set an fpm version of the image for nextcloud if you want to use nginx! + enabled: false + image: + repository: nginx + tag: alpine + pullPolicy: IfNotPresent + + config: + # This generates the default nginx config as per the nextcloud documentation + default: true + # custom: |- + # worker_processes 1;.. + + resources: {} + +internalDatabase: + enabled: true + name: nextcloud + +## +## External database configuration +## +externalDatabase: + enabled: false + + ## Supported database engines: mysql or postgresql + type: mysql + + ## Database host + host: + + ## Database user + user: nextcloud + + ## Database password + password: + + ## Database name + database: nextcloud + + ## Use a existing secret + existingSecret: + enabled: false + # secretName: nameofsecret + # usernameKey: username + # passwordKey: password + +## +## MariaDB chart configuration +## +mariadb: + ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters + enabled: false + + db: + name: nextcloud + user: nextcloud + password: changeme + + replication: + enabled: false + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + master: + persistence: + enabled: false + # storageClass: "" + accessMode: ReadWriteOnce + size: 8Gi + +postgresql: + enabled: false + global: + postgresql: + postgresqlUsername: nextcloud + postgresqlPassword: changeme + postgresqlDatabase: nextcloud + persistence: + enabled: false + # storageClass: "" + +redis: + enabled: false + usePassword: false + +## Cronjob to execute Nextcloud background tasks +## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#webcron +## +cronjob: + enabled: false + # Nexcloud image is used as default but only curl is needed + image: {} + # repository: nextcloud + # tag: 16.0.3-apache + # pullPolicy: IfNotPresent + # pullSecrets: + # - myRegistrKeySecretName + # Every 5 minutes + # Note: Setting this to any any other value than 5 minutes might + # cause issues with how nextcloud background jobs are executed + schedule: "*/5 * * * *" + annotations: {} + # Set curl's insecure option if you use e.g. self-signed certificates + curlInsecure: false + failedJobsHistoryLimit: 5 + successfulJobsHistoryLimit: 2 + # If not set, nextcloud deployment one will be set + # resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # If not set, nextcloud deployment one will be set + # nodeSelector: {} + + # If not set, nextcloud deployment one will be set + # tolerations: [] + + # If not set, nextcloud deployment one will be set + # affinity: {} + +service: + type: ClusterIP + port: 8080 + loadBalancerIP: nil + nodePort: nil + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + # Nextcloud Data (/var/www/html) + enabled: false + annotations: {} + ## nextcloud data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 8Gi + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +## Liveness and readiness probe values +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + +## Enable pod autoscaling using HorizontalPodAutoscaler +## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ +## +hpa: + enabled: false + cputhreshold: 60 + minPods: 1 + maxPods: 10 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + replicaCount: 1 + # The metrics exporter needs to know how you serve Nextcloud either http or https + https: false + timeout: 5s + + image: + repository: xperimental/nextcloud-exporter + tag: v0.3.0 + pullPolicy: IfNotPresent + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter pod Annotation and Labels + # podAnnotations: {} + + # podLabels: {} + + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9205" + labels: {} diff --git a/nfs-client-provisioner/.helmignore b/nfs-client-provisioner/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/nfs-client-provisioner/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/nfs-client-provisioner/Chart.yaml b/nfs-client-provisioner/Chart.yaml new file mode 100644 index 0000000..206f0cd --- /dev/null +++ b/nfs-client-provisioner/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +appVersion: 3.1.0 +deprecated: true +description: DEPRECATED - nfs-client is an automatic provisioner that used your *already + configured* NFS server, automatically creating Persistent Volumes. +home: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client +keywords: +- nfs +- storage +name: nfs-client-provisioner +sources: +- https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client +version: 1.2.11 diff --git a/nfs-client-provisioner/README.md b/nfs-client-provisioner/README.md new file mode 100644 index 0000000..4aee358 --- /dev/null +++ b/nfs-client-provisioner/README.md @@ -0,0 +1,82 @@ +# ⚠️ Repo Archive Notice + +As of Nov 13, 2020, charts in this repo will no longer be updated. +For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/). + +# nfs-client-provisioner + +The [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) is an automatic provisioner for Kubernetes that uses your *already configured* NFS server, automatically creating Persistent Volumes. + +## DEPRECATION NOTICE + +This chart is deprecated and no longer supported. + +## TL;DR; + +```console +$ helm install --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner +``` + +For **arm** deployments set `image.repository` to `--set image.repository=quay.io/external_storage/nfs-client-provisioner-arm` + +## Introduction + +This charts installs custom [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/) into a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. It also installs a [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) into the cluster which dynamically creates persistent volumes from single NFS share. + +## Prerequisites + +- Kubernetes 1.9+ +- Existing NFS Share + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner +``` + +The command deploys the given storage class in the default configuration. It can be used afterswards to provision persistent volumes. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of this chart and their default values. + +| Parameter | Description | Default | +| ----------------------------------- | ----------------------------------------------------------- | ------------------------------------------------- | +| `replicaCount` | Number of provisioner instances to deployed | `1` | +| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` | +| `image.repository` | Provisioner image | `quay.io/external_storage/nfs-client-provisioner` | +| `image.tag` | Version of provisioner image | `v3.1.0-k8s1.11` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `storageClass.name` | Name of the storageClass | `nfs-client` | +| `storageClass.defaultClass` | Set as the default StorageClass | `false` | +| `storageClass.allowVolumeExpansion` | Allow expanding the volume | `true` | +| `storageClass.reclaimPolicy` | Method used to reclaim an obsoleted volume | `Delete` | +| `storageClass.provisionerName` | Name of the provisionerName | null | +| `storageClass.archiveOnDelete` | Archive pvc when deleting | `true` | +| `storageClass.accessModes` | Set access mode for PV | `ReadWriteOnce` | +| `nfs.server` | Hostname of the NFS server | null (ip or hostname) | +| `nfs.path` | Basepath of the mount point to be used | `/ifs/kubernetes` | +| `nfs.mountOptions` | Mount options (e.g. 'nfsvers=3') | null | +| `resources` | Resources required (e.g. CPU, memory) | `{}` | +| `rbac.create` | Use Role-based Access Control | `true` | +| `podSecurityPolicy.enabled` | Create & use Pod Security Policy resources | `false` | +| `priorityClassName` | Set pod priorityClassName | null | +| `serviceAccount.create` | Should we create a ServiceAccount | `true` | +| `serviceAccount.name` | Name of the ServiceAccount to use | null | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `affinity` | Affinity settings | `{}` | +| `tolerations` | List of node taints to tolerate | `[]` | diff --git a/nfs-client-provisioner/ci/test-values.yaml b/nfs-client-provisioner/ci/test-values.yaml new file mode 100644 index 0000000..4237de5 --- /dev/null +++ b/nfs-client-provisioner/ci/test-values.yaml @@ -0,0 +1,5 @@ +nfs: + server: 127.0.0.1 +podSecurityPolicy: + enabled: true +buildMode: true diff --git a/nfs-client-provisioner/templates/_helpers.tpl b/nfs-client-provisioner/templates/_helpers.tpl new file mode 100644 index 0000000..d8d4e51 --- /dev/null +++ b/nfs-client-provisioner/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nfs-client-provisioner.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nfs-client-provisioner.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nfs-client-provisioner.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nfs-client-provisioner.provisionerName" -}} +{{- if .Values.storageClass.provisionerName -}} +{{- printf .Values.storageClass.provisionerName -}} +{{- else -}} +cluster.local/{{ template "nfs-client-provisioner.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "nfs-client-provisioner.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "nfs-client-provisioner.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/nfs-client-provisioner/templates/clusterrole.yaml b/nfs-client-provisioner/templates/clusterrole.yaml new file mode 100644 index 0000000..fa43e0d --- /dev/null +++ b/nfs-client-provisioner/templates/clusterrole.yaml @@ -0,0 +1,30 @@ +{{- if .Values.rbac.create }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + app: {{ template "nfs-client-provisioner.name" . }} + chart: {{ template "nfs-client-provisioner.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nfs-client-provisioner.fullname" . }}-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nfs-client-provisioner.fullname" . }}] +{{- end }} +{{- end }} diff --git a/nfs-client-provisioner/templates/clusterrolebinding.yaml b/nfs-client-provisioner/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..e04c719 --- /dev/null +++ b/nfs-client-provisioner/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + app: {{ template "nfs-client-provisioner.name" . }} + chart: {{ template "nfs-client-provisioner.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: run-{{ template "nfs-client-provisioner.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nfs-client-provisioner.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "nfs-client-provisioner.fullname" . }}-runner + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/nfs-client-provisioner/templates/deployment.yaml b/nfs-client-provisioner/templates/deployment.yaml new file mode 100644 index 0000000..9c5a34d --- /dev/null +++ b/nfs-client-provisioner/templates/deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "nfs-client-provisioner.fullname" . }} + labels: + app: {{ template "nfs-client-provisioner.name" . }} + chart: {{ template "nfs-client-provisioner.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: {{ .Values.replicaCount }} + strategy: + type: {{ .Values.strategyType }} + selector: + matchLabels: + app: {{ template "nfs-client-provisioner.name" . }} + release: {{ .Release.Name }} + template: + metadata: + annotations: + {{- if and (.Values.tolerations) (semverCompare "<1.6-0" .Capabilities.KubeVersion.GitVersion) }} + scheduler.alpha.kubernetes.io/tolerations: '{{ toJson .Values.tolerations }}' + {{- end }} + labels: + app: {{ template "nfs-client-provisioner.name" . }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "nfs-client-provisioner.serviceAccountName" . }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: {{ template "nfs-client-provisioner.provisionerName" . }} + - name: NFS_SERVER + value: {{ .Values.nfs.server }} + - name: NFS_PATH + value: {{ .Values.nfs.path }} + {{- with .Values.resources }} + resources: +{{ toYaml . | indent 12 }} + {{- end }} + volumes: + - name: nfs-client-root +{{- if .Values.buildMode }} + emptyDir: {} +{{- else if .Values.nfs.mountOptions }} + persistentVolumeClaim: + claimName: pvc-{{ template "nfs-client-provisioner.fullname" . }} +{{- else }} + nfs: + server: {{ .Values.nfs.server }} + path: {{ .Values.nfs.path }} +{{- end }} + {{- if and (.Values.tolerations) (semverCompare "^1.6-0" .Capabilities.KubeVersion.GitVersion) }} + tolerations: +{{ toYaml .Values.tolerations | indent 6 }} + {{- end }} diff --git a/nfs-client-provisioner/templates/persistentvolume.yaml b/nfs-client-provisioner/templates/persistentvolume.yaml new file mode 100644 index 0000000..d13ad0b --- /dev/null +++ b/nfs-client-provisioner/templates/persistentvolume.yaml @@ -0,0 +1,25 @@ +{{ if .Values.nfs.mountOptions -}} +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pv-{{ template "nfs-client-provisioner.fullname" . }} + labels: + nfs-client-provisioner: {{ template "nfs-client-provisioner.fullname" . }} +spec: + capacity: + storage: 10Mi + volumeMode: Filesystem + accessModes: + - {{ .Values.storageClass.accessModes }} + persistentVolumeReclaimPolicy: {{ .Values.storageClass.reclaimPolicy }} + storageClassName: "" + {{- if .Values.nfs.mountOptions }} + mountOptions: + {{- range .Values.nfs.mountOptions }} + - {{ . }} + {{- end }} + {{- end }} + nfs: + server: {{ .Values.nfs.server }} + path: {{ .Values.nfs.path }} +{{ end -}} diff --git a/nfs-client-provisioner/templates/persistentvolumeclaim.yaml b/nfs-client-provisioner/templates/persistentvolumeclaim.yaml new file mode 100644 index 0000000..9bcf430 --- /dev/null +++ b/nfs-client-provisioner/templates/persistentvolumeclaim.yaml @@ -0,0 +1,17 @@ +{{ if .Values.nfs.mountOptions -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvc-{{ template "nfs-client-provisioner.fullname" . }} +spec: + accessModes: + - {{ .Values.storageClass.accessModes }} + volumeMode: Filesystem + storageClassName: "" + selector: + matchLabels: + nfs-client-provisioner: {{ template "nfs-client-provisioner.fullname" . }} + resources: + requests: + storage: 10Mi +{{ end -}} diff --git a/nfs-client-provisioner/templates/podsecuritypolicy.yaml b/nfs-client-provisioner/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..830fad6 --- /dev/null +++ b/nfs-client-provisioner/templates/podsecuritypolicy.yaml @@ -0,0 +1,31 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nfs-client-provisioner.fullname" . }} + labels: + app: {{ template "nfs-client-provisioner.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'secret' + - 'nfs' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/nfs-client-provisioner/templates/role.yaml b/nfs-client-provisioner/templates/role.yaml new file mode 100644 index 0000000..0cccdcb --- /dev/null +++ b/nfs-client-provisioner/templates/role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + app: {{ template "nfs-client-provisioner.name" . }} + chart: {{ template "nfs-client-provisioner.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nfs-client-provisioner.fullname" . }}] +{{- end }} +{{- end }} diff --git a/nfs-client-provisioner/templates/rolebinding.yaml b/nfs-client-provisioner/templates/rolebinding.yaml new file mode 100644 index 0000000..57c1c87 --- /dev/null +++ b/nfs-client-provisioner/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + app: {{ template "nfs-client-provisioner.name" . }} + chart: {{ template "nfs-client-provisioner.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nfs-client-provisioner.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/nfs-client-provisioner/templates/serviceaccount.yaml b/nfs-client-provisioner/templates/serviceaccount.yaml new file mode 100644 index 0000000..2940896 --- /dev/null +++ b/nfs-client-provisioner/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{ if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nfs-client-provisioner.name" . }} + chart: {{ template "nfs-client-provisioner.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nfs-client-provisioner.serviceAccountName" . }} +{{- end -}} diff --git a/nfs-client-provisioner/templates/storageclass.yaml b/nfs-client-provisioner/templates/storageclass.yaml new file mode 100644 index 0000000..81953c0 --- /dev/null +++ b/nfs-client-provisioner/templates/storageclass.yaml @@ -0,0 +1,26 @@ +{{ if .Values.storageClass.create -}} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + labels: + app: {{ template "nfs-client-provisioner.name" . }} + chart: {{ template "nfs-client-provisioner.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ .Values.storageClass.name }} +{{- if .Values.storageClass.defaultClass }} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{{- end }} +provisioner: {{ template "nfs-client-provisioner.provisionerName" . }} +allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }} +reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }} +parameters: + archiveOnDelete: "{{ .Values.storageClass.archiveOnDelete }}" +{{- if .Values.nfs.mountOptions }} +mountOptions: + {{- range .Values.nfs.mountOptions }} + - {{ . }} + {{- end }} +{{- end }} +{{ end -}} diff --git a/nfs-client-provisioner/values.yaml b/nfs-client-provisioner/values.yaml new file mode 100644 index 0000000..90b985f --- /dev/null +++ b/nfs-client-provisioner/values.yaml @@ -0,0 +1,78 @@ +# Default values for nfs-client-provisioner. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +strategyType: Recreate + +image: + repository: quay.io/external_storage/nfs-client-provisioner + tag: v3.1.0-k8s1.11 + pullPolicy: IfNotPresent + +nfs: + server: + path: /ifs/kubernetes + mountOptions: + +# For creating the StorageClass automatically: +storageClass: + create: true + + # Set a provisioner name. If unset, a name will be generated. + # provisionerName: + + # Set StorageClass as the default StorageClass + # Ignored if storageClass.create is false + defaultClass: false + + # Set a StorageClass name + # Ignored if storageClass.create is false + name: nfs-client + + # Allow volume to be expanded dynamically + allowVolumeExpansion: true + + # Method used to reclaim an obsoleted volume + reclaimPolicy: Delete + + # When set to false your PVs will not be archived by the provisioner upon deletion of the PVC. + archiveOnDelete: true + + # Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany + accessModes: ReadWriteOnce + +## For RBAC support: +rbac: + # Specifies whether RBAC resources should be created + create: true + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +## Set pod priorityClassName +# priorityClassName: "" + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/opendkim/.helmignore b/opendkim/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/opendkim/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/opendkim/Chart.yaml b/opendkim/Chart.yaml new file mode 100644 index 0000000..8ce0781 --- /dev/null +++ b/opendkim/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: opendkim +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 2.11.0 diff --git a/opendkim/templates/NOTES.txt b/opendkim/templates/NOTES.txt new file mode 100644 index 0000000..1d8ab87 --- /dev/null +++ b/opendkim/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "opendkim.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "opendkim.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "opendkim.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "opendkim.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/opendkim/templates/_helpers.tpl b/opendkim/templates/_helpers.tpl new file mode 100644 index 0000000..21a670b --- /dev/null +++ b/opendkim/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "opendkim.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "opendkim.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "opendkim.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "opendkim.labels" -}} +helm.sh/chart: {{ include "opendkim.chart" . }} +{{ include "opendkim.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "opendkim.selectorLabels" -}} +app.kubernetes.io/name: {{ include "opendkim.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "opendkim.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "opendkim.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/opendkim/templates/configmap.yaml b/opendkim/templates/configmap.yaml new file mode 100644 index 0000000..8ed9da5 --- /dev/null +++ b/opendkim/templates/configmap.yaml @@ -0,0 +1,10 @@ +{{- range $key, $val := .Values.opendkim.configmaps }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: opendkim-{{ $key }} +data: +{{ $key | indent 2 }}: | +{{ $val | indent 4 }} +{{- end }} diff --git a/opendkim/templates/deployment.yaml b/opendkim/templates/deployment.yaml new file mode 100644 index 0000000..454150e --- /dev/null +++ b/opendkim/templates/deployment.yaml @@ -0,0 +1,91 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "opendkim.fullname" . }} + labels: + {{- include "opendkim.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "opendkim.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "opendkim.selectorLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "opendkim.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + initContainers: + - name: init-opendkim + image: "{{ .Values.opendkim.image.repository }}:{{ .Values.opendkim.image.tag }}" + command: ['sh', '-c', 'cp -a /etc/opendkim/* /tmp/ ; chmod 750 /tmp/ ; chmod 600 /tmp/keys/* ; chown opendkim:opendkim -R /tmp/ ; exit 0'] + volumeMounts: + - name: opendkim-data + mountPath: /tmp + - name: opendkim-default-private + mountPath: "/etc/opendkim/keys/default.private" + subPath: default-private + - name: opendkim-default-public + mountPath: "/etc/opendkim/keys/default.txt" + subPath: default-public + - name: opendkim-opendkim + mountPath: "/etc/opendkim/opendkim.conf" + subPath: opendkim + - name: opendkim-keytable + mountPath: "/etc/opendkim/KeyTable" + subPath: keytable + - name: opendkim-signingtable + mountPath: "/etc/opendkim/SigningTable" + subPath: signingtable + - name: opendkim-trustedhosts + mountPath: "/etc/opendkim/TrustedHosts" + subPath: trustedhosts + containers: + - name: opendkim + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.opendkim.image.repository }}:{{ .Values.opendkim.image.tag }}" + imagePullPolicy: {{ .Values.opendkim.image.pullPolicy }} + #command: ['sh','-c','sleep 10000'] + ports: + - name: dkim + containerPort: 8891 + protocol: TCP + volumeMounts: + - name: opendkim-data + mountPath: /etc/opendkim + volumes: + - name: opendkim-data + emptyDir: {} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- range $key, $val := .Values.opendkim.configmaps }} + - name: opendkim-{{ $key }} + configMap: + name: opendkim-{{ $key }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/opendkim/templates/persistent-volume-claim.yaml b/opendkim/templates/persistent-volume-claim.yaml new file mode 100644 index 0000000..ac0324e --- /dev/null +++ b/opendkim/templates/persistent-volume-claim.yaml @@ -0,0 +1,24 @@ +{{- if .Values.persistence.enabled }} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "opendkim.fullname" . }} + labels: + app: {{ template "opendkim.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} +{{- if .Values.persistence.storageClass }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- else }} + storageClassName: "" +{{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end -}} +{{- end }} diff --git a/opendkim/templates/service.yaml b/opendkim/templates/service.yaml new file mode 100644 index 0000000..ff8563f --- /dev/null +++ b/opendkim/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opendkim.fullname" . }} + labels: + {{- include "opendkim.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: 8891 + targetPort: 8891 + protocol: TCP + name: dkim + selector: + {{- include "opendkim.selectorLabels" . | nindent 4 }} diff --git a/opendkim/templates/serviceaccount.yaml b/opendkim/templates/serviceaccount.yaml new file mode 100644 index 0000000..375ad60 --- /dev/null +++ b/opendkim/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opendkim.serviceAccountName" . }} + labels: + {{- include "opendkim.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/opendkim/values.yaml b/opendkim/values.yaml new file mode 100644 index 0000000..a341e20 --- /dev/null +++ b/opendkim/values.yaml @@ -0,0 +1,96 @@ +# Default values for opendkim. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +persistence: + enabled: false + existingClaim: mailboxes + +opendkim: + image: + repository: registry.geekhome.org/opendkim + tag: 2.11.0 + pullPolicy: Always + configmaps: + opendkim: | + PidFile /var/run/opendkim/opendkim.pid + Mode sv + Syslog yes + SyslogSuccess yes + LogWhy yes + UserID opendkim:opendkim + Socket inet:8891 + Umask 002 + SendReports yes + SoftwareHeader yes + Canonicalization relaxed/relaxed + Domain example.com + Selector default + MinimumKeyBits 1024 + KeyTable refile:/etc/opendkim/KeyTable + SigningTable refile:/etc/opendkim/SigningTable + ExternalIgnoreList refile:/etc/opendkim/TrustedHosts + InternalHosts refile:/etc/opendkim/TrustedHosts + OversignHeaders From + keytable: | + default._domainkey.example.com example.com:default:/etc/opendkim/keys/default.private + signingtable: | + *@example.com default._domainkey.example.com + trustedhosts: | + 127.0.0.1 + ::1 + *.example.com + default-private: | + -----BEGIN RSA PRIVATE KEY----- + YOUR_DKIM_PRIVATE_KEY + -----END RSA PRIVATE KEY----- + default-public: | + default._domainkey IN TXT ( "v=DKIM1; k=rsa; " + "p=YOUR_DKIM_PUBLIC_KEY" ) ; ----- DKIM key default for example.com + +service: + type: ClusterIP + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/opendmarc/.helmignore b/opendmarc/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/opendmarc/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/opendmarc/Chart.yaml b/opendmarc/Chart.yaml new file mode 100644 index 0000000..2cfa022 --- /dev/null +++ b/opendmarc/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: opendmarc +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.3.2 diff --git a/opendmarc/templates/NOTES.txt b/opendmarc/templates/NOTES.txt new file mode 100644 index 0000000..f6b8572 --- /dev/null +++ b/opendmarc/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "opendmarc.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "opendmarc.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "opendmarc.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "opendmarc.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/opendmarc/templates/_helpers.tpl b/opendmarc/templates/_helpers.tpl new file mode 100644 index 0000000..da5b4ee --- /dev/null +++ b/opendmarc/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "opendmarc.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "opendmarc.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "opendmarc.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "opendmarc.labels" -}} +helm.sh/chart: {{ include "opendmarc.chart" . }} +{{ include "opendmarc.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "opendmarc.selectorLabels" -}} +app.kubernetes.io/name: {{ include "opendmarc.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "opendmarc.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "opendmarc.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/opendmarc/templates/configmap.yaml b/opendmarc/templates/configmap.yaml new file mode 100644 index 0000000..a28f8db --- /dev/null +++ b/opendmarc/templates/configmap.yaml @@ -0,0 +1,11 @@ +{{- range $key, $val := .Values.opendmarc.configmaps }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: opendmarc-{{ $key }} +data: +{{ $key | indent 2 }}: | +{{ $val | indent 4 }} +{{- end }} +--- diff --git a/opendmarc/templates/deployment.yaml b/opendmarc/templates/deployment.yaml new file mode 100644 index 0000000..0ff87c9 --- /dev/null +++ b/opendmarc/templates/deployment.yaml @@ -0,0 +1,66 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "opendmarc.fullname" . }} + labels: + {{- include "opendmarc.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "opendmarc.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "opendmarc.selectorLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "opendmarc.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: opendmarc + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.opendmarc.image.repository }}:{{ .Values.opendmarc.image.tag }}" + imagePullPolicy: {{ .Values.opendmarc.image.pullPolicy }} + ports: + - name: dmarc + containerPort: 8893 + protocol: TCP + volumeMounts: + {{- range $key, $val := .Values.opendmarc.configmaps }} + - name: opendmarc-{{ $key }} + mountPath: "/etc/opendmarc/{{ $key }}.conf" + subPath: {{ $key }} + {{- end }} + volumes: + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- range $key, $val := .Values.opendmarc.configmaps }} + - name: opendmarc-{{ $key }} + configMap: + name: opendmarc-{{ $key }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/opendmarc/templates/persistent-volume-claim.yaml b/opendmarc/templates/persistent-volume-claim.yaml new file mode 100644 index 0000000..47668b7 --- /dev/null +++ b/opendmarc/templates/persistent-volume-claim.yaml @@ -0,0 +1,24 @@ +{{- if .Values.persistence.enabled }} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "opendmarc.fullname" . }} + labels: + app: {{ template "opendmarc.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} +{{- if .Values.persistence.storageClass }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- else }} + storageClassName: "" +{{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end -}} +{{- end }} diff --git a/opendmarc/templates/service.yaml b/opendmarc/templates/service.yaml new file mode 100644 index 0000000..ab003eb --- /dev/null +++ b/opendmarc/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opendmarc.fullname" . }} + labels: + {{- include "opendmarc.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: 8893 + targetPort: 8893 + protocol: TCP + name: dmarc + selector: + {{- include "opendmarc.selectorLabels" . | nindent 4 }} diff --git a/opendmarc/templates/serviceaccount.yaml b/opendmarc/templates/serviceaccount.yaml new file mode 100644 index 0000000..a6d0721 --- /dev/null +++ b/opendmarc/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opendmarc.serviceAccountName" . }} + labels: + {{- include "opendmarc.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/opendmarc/values.yaml b/opendmarc/values.yaml new file mode 100644 index 0000000..b4f8f85 --- /dev/null +++ b/opendmarc/values.yaml @@ -0,0 +1,69 @@ +# Default values for opendmarc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +persistence: + enabled: false + existingClaim: mailboxes + +opendmarc: + image: + repository: registry.geekhome.org/opendmarc + tag: 1.3.2 + pullPolicy: Always + configmaps: + opendmarc: | + AuthservID mail.example.com + Socket inet:8893 + SoftwareHeader true + SPFIgnoreResults true + SPFSelfValidate true + RequiredHeaders true + Syslog true + UserID opendmarc:mail +service: + type: ClusterIP + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/openfaas/Chart.yaml b/openfaas/Chart.yaml new file mode 100644 index 0000000..53f3920 --- /dev/null +++ b/openfaas/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +description: OpenFaaS - Serverless Functions Made Simple +home: https://www.openfaas.com +icon: https://raw.githubusercontent.com/openfaas/media/master/OpenFaaS_logo_stacked_opaque.png +keywords: +- serverless +- functions +- platform +- faas +maintainers: +- email: alex@openfaas.com + name: alexellis +- email: roesler.lucas@gmail.com + name: lucasroesler +- email: alistair.hey@gmail.com + name: Waterdrips +name: openfaas +sources: +- https://github.com/openfaas/faas +- https://github.com/openfaas/faas-netes +version: 6.2.3 diff --git a/openfaas/OWNERS b/openfaas/OWNERS new file mode 100644 index 0000000..b44a9d9 --- /dev/null +++ b/openfaas/OWNERS @@ -0,0 +1,8 @@ +approvers: +- alexellis +- rimusz +- LucasRoesler +reviewers: +- alexellis +- rimusz +- LucasRoesler diff --git a/openfaas/README.md b/openfaas/README.md new file mode 100644 index 0000000..de57234 --- /dev/null +++ b/openfaas/README.md @@ -0,0 +1,459 @@ +# OpenFaaS - Serverless Functions Made Simple + +OpenFaaS logo + +[OpenFaaS](https://github.com/openfaas/faas) (Functions as a Service) is a framework for building serverless functions with Docker and Kubernetes which has first class support for metrics. Any process can be packaged as a function enabling you to consume a range of web events without repetitive boiler-plate coding. + +## Highlights + +* Ease of use through UI portal and *one-click* install +* Write functions in any language for Linux or Windows and package in Docker/OCI image format +* Portable - runs on existing hardware or public/private cloud. Native [Kubernetes](https://github.com/openfaas/faas-netes) support, Docker Swarm also available +* [Operator / CRD option available](https://github.com/openfaas/faas-netes/) +* [faas-cli](http://github.com/openfaas/faas-cli) available with stack.yml for creating and managing functions +* Auto-scales according to metrics from Prometheus +* Scales to zero and back again and can be tuned at a per-function level +* Works with service-meshes + * Tested with [Istio](https://istio.io) including mTLS + * Tested with [Linkerd2](https://github.com/openfaas-incubator/openfaas-linkerd2) including mTLS and traffic splitting with SMI + +## Deploy OpenFaaS + +### 1) Install with arkade + +It is recommended that you use arkade to install OpenFaaS. arkade is a CLI tool which automates the helm CLI and chart download and installation. The `openfaas` app also has a number of options available via `arkade install openfaas --help` + +The installation with arkade is as simple as the following which installs OpenFaaS, sets up an Ingress record, and a TLS certificate with cert-manager. + +```bash +arkade install openfaas +arkade install openfaas-ingress \ + --domain openfaas.example.com \ + --email wm@example.com +``` + +See a complete example here: [Get TLS for OpenFaaS the easy way with arkade](https://blog.alexellis.io/tls-the-easy-way-with-openfaas-and-k3sup/) + +If you wish to continue without using arkade, read on for instructions. + +### 2) Install with helm + +These instructions are for Intel (normal computers), jump to the end of the document for ARM and Raspberry Pi. + +To use the chart, you will need Helm, we recommend helm 3: + +Get it from arkade: + +```bash +arkade get helm +``` + +Or use the helm3 installer: + +```bash +curl -sSLf https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash +``` + +We recommend creating two namespaces, one for the OpenFaaS *core services* and one for the *functions*: + +```sh +kubectl apply -f https://raw.githubusercontent.com/openfaas/faas-netes/master/namespaces.yml +``` + +You will now have `openfaas` and `openfaas-fn`. If you want to change the names or to install into multiple installations then edit `namespaces.yml` from the `faas-netes` repo. + +Add the OpenFaaS `helm` chart: + +```sh +helm repo add openfaas https://openfaas.github.io/faas-netes/ +``` + +Now decide how you want to expose the services and edit the `helm upgrade` command as required. + +* To use NodePorts (default) pass no additional flags +* To use a LoadBalancer add `--set serviceType=LoadBalancer` +* To use an IngressController add `--set ingress.enabled=true` + +> Note: even without a LoadBalancer or IngressController you can access your gateway at any time via `kubectl port-forward`. + +### Deploy + +Note that the commands will differ slightly between versions, if not specified, the instructions are for helm 2. + +Now deploy OpenFaaS from the helm chart repo: + +```sh +helm repo update \ + && helm upgrade openfaas --install openfaas/openfaas \ + --namespace openfaas \ + --set functionNamespace=openfaas-fn \ + --set generateBasicAuth=true +``` + +> The above command will also update your helm repo to pull in any new releases. + +Retrieve the OpenFaaS credentials with: + +```sh +PASSWORD=$(kubectl -n openfaas get secret basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode) && \ +echo "OpenFaaS admin password: $PASSWORD" +``` + +#### Generate basic-auth credentials + +The chart has a pre-install hook which can generate basic-auth credentials, enable it with `--set generateBasicAuth=true`. + +Alternatively, you can set `generateBasicAuth` to `false` and generate or supply the basic-auth credentials yourself. This is the option you may want if you are using `helm template`. + +```sh +# generate a random password +PASSWORD=$(head -c 12 /dev/urandom | shasum| cut -d' ' -f1) +kubectl -n openfaas create secret generic basic-auth \ +--from-literal=basic-auth-user=admin \ +--from-literal=basic-auth-password="$PASSWORD" + +echo "OpenFaaS admin password: $PASSWORD" +``` + +#### Tuning cold-start + +The concept of a cold-start in OpenFaaS only applies if you A) use faas-idler and B) set a specific function to scale to zero. Otherwise there is not a cold-start, because at least one replica of your function remains available. + +There are two ways to reduce the Kubernetes cold-start for a pre-pulled image, which is around 1-2 seconds. + +1) Don't set the function to scale down to zero, just set it a minimum availability i.e. 1/1 replicas +2) Use async invocations via the `/async-function/` route on the gateway, so that the latency is hidden from the caller +3) Tune the readinessProbes to be aggressively low values. This will reduce the cold-start at the cost of more `kubelet` CPU usage + +To achieve around 1s coldstart, set `values.yaml`: + +```yaml +faasnetes: + +# redacted + readinessProbe: + initialDelaySeconds: 0 + timeoutSeconds: 1 + periodSeconds: 1 + livenessProbe: + initialDelaySeconds: 0 + timeoutSeconds: 1 + periodSeconds: 1 +# redacted + imagePullPolicy: "IfNotPresent" # Image pull policy for deployed functions +``` + + +In addition: + +* Pre-pull images on each node +* Use an in-cluster registry to reduce the pull latency for images +* Set the `imagePullPolicy` to `IfNotPresent` so that the `kubelet` only pulls images which are not already available +* Explore alternatives such as not scaling to absolute zero, and using async calls which do not show the cold start + +#### httpProbe vs. execProbe + +A note on health-checking probes for functions: + +* httpProbe - (`default`) most efficient. (compatible with Istio >= 1.1.5) +* execProbe - least efficient option, but compatible with Istio < 1.1.5 + +Use `--set faasnetes.httpProbe=true/false` to toggle between http / exec probes. + +### Verify the installation + +Once all the services are up and running, log into your gateway using the OpenFaaS CLI. This will cache your credentials into your `~/.openfaas/config.yml` file. + +Fetch your public IP or NodePort via `kubectl get svc -n openfaas gateway-external -o wide` and set it as an environmental variable as below: + +```sh +export OPENFAAS_URL=http://127.0.0.1:31112 +``` + +If using a remote cluster, you can port-forward the gateway to your local machine: + +```sh +export OPENFAAS_URL=http://127.0.0.1:8080 +kubectl port-forward -n openfaas svc/gateway 8080:8080 & +``` + +Now log in with the CLI and check connectivity: + +```sh +echo -n $PASSWORD | faas-cli login -g $OPENFAAS_URL -u admin --password-stdin + +faas-cli version +``` + +## OpenFaaS Operator and Function CRD + +If you would like to work with Function CRDs there is an alternative controller to faas-netes named [OpenFaaS Operator](https://github.com/openfaas-incubator/openfaas-operator) which can be swapped in at deployment time. +The OpenFaaS Operator is suitable for development and testing and may replace the faas-netes controller in the future. +The Operator is compatible with Kubernetes 1.9 or later. + +To use it, add the flag: `--set operator.create=true` when installing with Helm. + +### faas-netes vs OpenFaaS Operator + +The faas-netes controller is the most tested, stable and supported version of the OpenFaaS integration with Kubernetes. In contrast the OpenFaaS Operator is based upon the codebase and features from `faas-netes`, but offers a tighter integration with Kubernetes through [CustomResourceDefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). This means you can type in `kubectl get functions` for instance. + +See also: [Introducing the OpenFaaS Operator](https://www.openfaas.com/blog/kubernetes-operator-crd/) + +## Deployment with `helm template` + +This option is good for those that have issues with or concerns about installing Tiller, the server/cluster component of helm. Using the `helm` CLI, we can pre-render and then apply the templates using `kubectl`. + +1. Clone the faas-netes repository + ```sh + git clone https://github.com/openfaas/faas-netes.git + cd faas-netes + ``` + +2. Render the chart to a Kubernetes manifest called `openfaas.yaml` + + Helm 3: + ```sh + helm template \ + openfaas chart/openfaas/ \ + --namespace openfaas \ + --set basic_auth=true \ + --set functionNamespace=openfaas-fn > openfaas.yaml + ``` + + Helm 2: + + ```sh + helm template chart/openfaas \ + --name openfaas \ + --namespace openfaas \ + --set basic_auth=true \ + --set functionNamespace=openfaas-fn > openfaas.yaml + ``` + + You can set the values and overrides just as you would in the install/upgrade commands above. + +3. Install the components using `kubectl` + + ```sh + kubectl apply -f namespaces.yml,openfaas.yaml + ``` + +Now [verify your installation](#verify-the-installation). + +## Test a local helm chart + +You can run the following command from within the `faas-netes/chart` folder in the `faas-netes` repo. + +```sh +helm upgrade openfaas --install chart/openfaas \ + --namespace openfaas \ + --set basic_auth=true \ + --set functionNamespace=openfaas-fn +``` + +## Exposing services + +### NodePorts + +By default a NodePort will be created for the API Gateway. + +### Metrics + +You temporarily access the Prometheus metrics by using `port-forward` + +```sh +kubectl --namespace openfaas port-forward deployment/prometheus 31119:9090 +``` + +Then open `http://localhost:31119` to directly query the OpenFaaS metrics scraped by Prometheus. + +### LB + +If you're running on a cloud such as AKS or GKE you will need to pass an additional flag of `--set serviceType=LoadBalancer` to tell `helm` to create LoadBalancer objects instead. An alternative to using multiple LoadBalancers is to install an Ingress controller. + +### Deploy with an IngressController + +In order to make use of automatic ingress settings you will need an IngressController in your cluster such as Traefik or Nginx. + +Add `--set ingress.enabled` to enable ingress pass `--set ingress.enabled=true` when running the installation via `helm`. + +By default services will be exposed with following hostnames (can be changed, see values.yaml for details): + +* `gateway.openfaas.local` + +### Endpoint load-balancing + +Some configurations in combination with client-side KeepAlive settings may because load to be spread unevenly between replicas of a function. If you experience this, there are three ways to work around it: + +* [Install Linkerd2](https://github.com/openfaas-incubator/openfaas-linkerd2) which takes over load-balancing from the Kubernetes L4 Service (recommended) +* Disable KeepAlive in the client-side code (not recommended) +* Configure the gateway to pass invocations through to the faas-netes provider (alternative to using Linkerd2) + + ```sh + --set gateway.directFunctions=false + ``` + + In this mode, all invocations will pass through the gateway to faas-netes, which will look up endpoint IPs directly from Kubernetes, the additional hop may add some latency, but will do fair load-balancing, even with KeepAlive. + +### SSL / TLS + +If you require TLS/SSL then please make use of an IngressController. A full guide is provided to [enable TLS for the OpenFaaS Gateway using cert-manager and Let's Encrypt](https://docs.openfaas.com/reference/ssl/kubernetes-with-cert-manager/). + +### Service meshes +If you use a service mesh like Linkerd or Istio in your cluster, then you should enable the `directFunctions` mode using: + +```sh +--set gateway.directFunctions=true +``` + +### Istio mTLS + +To install OpenFaaS with Istio mTLS pass `--set istio.mtls=true` and disable the HTTP probes: + +```sh +helm upgrade openfaas --install chart/openfaas \ + --namespace openfaas \ + --set basic_auth=true \ + --set functionNamespace=openfaas-fn \ + --set exposeServices=false \ + --set faasnetes.httpProbe=false \ + --set httpProbe=false \ + --set gateway.directFunctions=true \ + --set istio.mtls=true +``` + +The above command will enable mTLS for the openfaas control plane services and functions excluding NATS. + +> Note that the above instructions were tested on GKE 1.13 and Istio 1.2 + +## Zero scale + +### Scale-up from zero (on by default) + +Scaling up from zero replicas is enabled by default, to turn it off set `scaleFromZero` to `false` in the helm chart options for the `gateway` component. + +```sh +--set gateway.scaleFromZero=true/false +``` + +### Scale-down to zero (off by default) + +Scaling down to zero replicas can be achieved either through the REST API and your own controller, or by using the [faas-idler](https://github.com/openfaas-incubator/faas-idler) component. + +By default the faas-idler is set to only do a dryRun and to not scale any functions down. + +```sh +--set faasIdler.dryRun=true/false +``` + +The faas-idler will only scale down functions which have marked themselves as eligible for this behaviour through the use of a label: `com.openfaas.scale.zero=true`. + +See also: [faas-idler README](https://docs.openfaas.com/architecture/autoscaling/#zero-scale). + +## Removing the OpenFaaS + +All control plane components can be cleaned up with helm: + +Helm 3: + +```sh +helm delete openfaas --namespace openfaas +``` + +Helm 2: + +```sh +helm delete --purge openfaas +``` + +Follow this by the following to remove all other associated objects: + +```sh +kubectl delete namespace openfaas openfaas-fn +``` + +In some cases your additional functions may need to be either deleted before deleting the chart with `faas-cli` or manually deleted using `kubectl delete`. + +## ARM + +If you would like to deploy OpenFaaS to ARM i.e. Raspberry Pi, ARM64 machines provided by Packet.net, Scaleway or to AWS Graviton, then you should use the appropriate values.yaml file. + +* `values-armhf.yml` - for Raspberry Pi and other ARMv7 boards (run `uname -a` to find out which you have) +* `values-arm64.yml` - for everything else (`arm64` or `aarch64`) + +It is recommended that you install OpenFaaS to ARM machines [using k3sup](https://k3sup.dev/) instead of helm directly since it will determine the correct values to be used. + +See also: [Kubernetes and Raspberry Pi in the docs](https://docs.openfaas.com/deployment/kubernetes) + +## Kubernetes versioning +This Helm chart currently supports version 1.16+ + +Note that OpenFaaS itself may support a wider range of versions, [see here](../../README.md#kubernetes-versions) + +## Getting help + +Feel free to seek out help using the [OpenFaaS Slack workspace](https://slack.openfaas.io/), please do not raise issues for technical support, unless you suspect and can provide instructions for reproducing an error in the chart. + +## Configuration + +Additional OpenFaaS options in `values.yaml`. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `functionNamespace` | Functions namespace, preferred `openfaas-fn` | `default` | +| `clusterRole` | Use a `ClusterRole` for the Operator or faas-netes. Set to `true` for multiple namespace support | `false` | +| `createCRDs` | Create the CRDs for OpenFaaS Functions and Profiles | `true` | +| `basic_auth` | Enable basic authentication on the gateway and Prometheus. Warning: do not disable. | `true` | +| `async` | Enables asynchronous function invocations. If `.nats.external.enabled` is `false`, also deploys NATS Streaming | `true` | +| `exposeServices` | Expose `NodePorts/LoadBalancer` | `true` | +| `serviceType` | Type of external service to use `NodePort/LoadBalancer` | `NodePort` | +| `generateBasicAuth` | Generate admin password for basic authentication | `false` | +| `rbac` | Enable RBAC | `true` | +| `httpProbe` | Setting to true will use HTTP for readiness and liveness probe on the OpenFaaS system Pods (compatible with Istio >= 1.1.5) | `true` | +| `psp` | Enable [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for OpenFaaS accounts | `false` | +| `securityContext` | Deploy with a `securityContext` set, this can be disabled for use with Istio sidecar injection | `true` | +| `openfaasImagePullPolicy` | Image pull policy for openfaas components, can change to `IfNotPresent` in offline env | `Always` | +| `kubernetesDNSDomain` | Domain name of the Kubernetes cluster | `cluster.local` | +| `operator.create` | Use the OpenFaaS operator CRD controller, default uses faas-netes as the Kubernetes controller | `false` | +| `ingress.enabled` | Create ingress resources | `false` | +| `faasnetes.httpProbe` | Use a httpProbe instead of exec | `false` | +| `ingressOperator.create` | Create the ingress-operator component | `false` | +| `ingressOperator.replicas` | Replicas of the ingress-operator| `1` | +| `ingressOperator.image` | Container image used in ingress-operator| `openfaas/ingress-operator:0.6.2` | +| `ingressOperator.resources` | Limits and requests for memory and CPU usage | Memory Requests: 25Mi | +| `faasnetes.readTimeout` | Queue worker read timeout | `60s` | +| `faasnetes.writeTimeout` | Queue worker write timeout | `60s` | +| `faasnetes.imagePullPolicy` | Image pull policy for deployed functions | `Always` | +| `faasnetes.setNonRootUser` | Force all function containers to run with user id `12000` | `false` | +| `gateway.directFunctions` | Invoke functions directly without using the provider | `true` | +| `gateway.replicas` | Replicas of the gateway, pick more than `1` for HA | `1` | +| `gateway.readTimeout` | Queue worker read timeout | `65s` | +| `gateway.writeTimeout` | Queue worker write timeout | `65s` | +| `gateway.upstreamTimeout` | Maximum duration of upstream function call, should be lower than `readTimeout`/`writeTimeout` | `60s` | +| `gateway.scaleFromZero` | Enables an intercepting proxy which will scale any function from 0 replicas to the desired amount | `true` | +| `gateway.maxIdleConns` | Set max idle connections from gateway to functions | `1024` | +| `gateway.maxIdleConnsPerHost` | Set max idle connections from gateway to functions per host | `1024` | +| `gateway.logsProviderURL` | Set a custom logs provider url | `""` | +| `queueWorker.durableQueueSubscriptions` | Whether to use a durable queue subscription | `false` | +| `queueWorker.queueGroup` | The name of the queue group used to process asynchronous function invocations | `faas` | +| `queueWorker.replicas` | Replicas of the queue-worker, pick more than `1` for HA | `1` | +| `queueWorker.ackWait` | Max duration of any async task/request | `60s` | +| `nats.channel` | The name of the NATS Streaming channel to use for asynchronous function invocations | `faas-request` | +| `nats.external.clusterName` | The name of the externally-managed NATS Streaming server | `` | +| `nats.external.enabled` | Whether to use an externally-managed NATS Streaming server | `false` | +| `nats.external.host` | The host at which the externally-managed NATS Streaming server can be reached | `""` | +| `nats.external.port` | The port at which the externally-managed NATS Streaming server can be reached | `""` | +| `nats.enableMonitoring` | Enable the NATS monitoring endpoints on port `8222` for NATS Streaming deployments managed by this chart | `false` | +| `nats.metrics.enabled` | Export Prometheus metrics for NATS, no multi-arch support | `false` | +| `nats.metrics.image` | Container image used for the NATS Prometheus exporter, not multi-arch | `synadia/prometheus-nats-exporter:0.6.2` | +| `faasIdler.create` | Create the faasIdler component | `true` | +| `faasIdler.inactivityDuration` | Duration after which faas-idler will scale function down to 0 | `15m` | +| `faasIdler.reconcileInterval` | The time between each of reconciliation | `1m` | +| `faasIdler.dryRun` | When set to false the OpenFaaS API will be called to scale down idle functions, by default this is set to only print in the logs. | `true` | +| `prometheus.create` | Create the Prometheus component | `true` | +| `alertmanager.create` | Create the AlertManager component | `true` | +| `istio.mtls` | Create Istio policies and destination rules to enforce mTLS for OpenFaaS components and functions | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. +See values.yaml for detailed configuration. diff --git a/openfaas/templates/NOTES.txt b/openfaas/templates/NOTES.txt new file mode 100644 index 0000000..c79da8b --- /dev/null +++ b/openfaas/templates/NOTES.txt @@ -0,0 +1,9 @@ +To verify that openfaas has started, run: + + kubectl -n {{ .Release.Namespace }} get deployments -l "release={{ .Release.Name }}, app={{ template "openfaas.name" . }}" + +{{- if .Values.generateBasicAuth }} +To retrieve the admin password, run: + + echo $(kubectl -n {{ .Release.Namespace }} get secret basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode) +{{- end }} diff --git a/openfaas/templates/_helpers.tpl b/openfaas/templates/_helpers.tpl new file mode 100644 index 0000000..28810ad --- /dev/null +++ b/openfaas/templates/_helpers.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "openfaas.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "openfaas.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} diff --git a/openfaas/templates/alertmanager-cfg.yaml b/openfaas/templates/alertmanager-cfg.yaml new file mode 100644 index 0000000..24c5b8f --- /dev/null +++ b/openfaas/templates/alertmanager-cfg.yaml @@ -0,0 +1,47 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.alertmanager.create }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: alertmanager-config + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: alertmanager-config + namespace: {{ .Release.Namespace | quote }} +data: + alertmanager.yml: | + route: + group_by: ['alertname', 'cluster', 'service'] + group_wait: 5s + group_interval: 10s + repeat_interval: 30s + receiver: scale-up + routes: + - match: + service: gateway + receiver: scale-up + severity: major + + inhibit_rules: + - source_match: + severity: 'critical' + target_match: + severity: 'warning' + equal: ['alertname', 'cluster', 'service'] + + receivers: + - name: 'scale-up' + webhook_configs: + - url: http://gateway.{{ .Release.Namespace }}:8080/system/alert + send_resolved: true + {{- if .Values.basic_auth }} + http_config: + basic_auth: + username: admin + password_file: /var/secrets/basic-auth-password + {{- end -}} +{{- end }} \ No newline at end of file diff --git a/openfaas/templates/alertmanager-dep.yaml b/openfaas/templates/alertmanager-dep.yaml new file mode 100644 index 0000000..d14ec77 --- /dev/null +++ b/openfaas/templates/alertmanager-dep.yaml @@ -0,0 +1,108 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.alertmanager.create }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: alertmanager + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: alertmanager + namespace: {{ .Release.Namespace | quote }} +spec: + replicas: 1 + selector: + matchLabels: + app: alertmanager + template: + metadata: + labels: + app: alertmanager + annotations: + sidecar.istio.io/inject: "true" + checksum/alertmanager-config: {{ include (print $.Template.BasePath "/alertmanager-cfg.yaml") . | sha256sum | quote }} + spec: + containers: + - name: alertmanager + image: {{ .Values.alertmanager.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + command: + - "alertmanager" + - "--config.file=/alertmanager.yml" + - "--storage.path=/alertmanager" + - "--cluster.listen-address=" + livenessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /-/ready + port: 9093 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=30 + - --spider + - http://localhost:9093/-/ready + {{- end }} + timeoutSeconds: 30 + readinessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /-/ready + port: 9093 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=30 + - --spider + - http://localhost:9093/-/ready + {{- end }} + timeoutSeconds: 30 + ports: + - containerPort: 9093 + protocol: TCP + resources: + {{- .Values.alertmanager.resources | toYaml | nindent 12 }} + volumeMounts: + - mountPath: /alertmanager.yml + name: alertmanager-config + subPath: alertmanager.yml + {{- if .Values.basic_auth }} + - name: auth + readOnly: true + mountPath: "/var/secrets" + {{- end }} + volumes: + - name: alertmanager-config + configMap: + name: alertmanager-config + items: + - key: alertmanager.yml + path: alertmanager.yml + mode: 0644 + {{- if .Values.basic_auth }} + - name: auth + secret: + secretName: basic-auth + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/openfaas/templates/alertmanager-svc.yaml b/openfaas/templates/alertmanager-svc.yaml new file mode 100644 index 0000000..d85b443 --- /dev/null +++ b/openfaas/templates/alertmanager-svc.yaml @@ -0,0 +1,22 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.alertmanager.create }} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: alertmanager + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: alertmanager + namespace: {{ .Release.Namespace | quote }} +spec: + type: ClusterIP + ports: + - port: 9093 + protocol: TCP + selector: + app: alertmanager +{{- end }} \ No newline at end of file diff --git a/openfaas/templates/basic-auth-plugin-dep.yaml b/openfaas/templates/basic-auth-plugin-dep.yaml new file mode 100644 index 0000000..5ccb571 --- /dev/null +++ b/openfaas/templates/basic-auth-plugin-dep.yaml @@ -0,0 +1,106 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.basic_auth }} +{{- if not .Values.oauth2Plugin.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: basic-auth-plugin + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: basic-auth-plugin + namespace: {{ .Release.Namespace | quote }} +spec: + replicas: {{ .Values.basicAuthPlugin.replicas }} + selector: + matchLabels: + app: basic-auth-plugin + template: + metadata: + annotations: + prometheus.io.scrape: "false" + labels: + app: basic-auth-plugin + spec: + {{- if .Values.basic_auth }} + volumes: + - name: auth + secret: + secretName: basic-auth + {{- end }} + containers: + - name: basic-auth-plugin + resources: + {{- .Values.basicAuthPlugin.resources | toYaml | nindent 12 }} + image: {{ .Values.basicAuthPlugin.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + {{- if .Values.securityContext }} + securityContext: + readOnlyRootFilesystem: true + runAsUser: 10001 + {{- end }} + livenessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /health + port: 8080 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=5 + - --spider + - http://localhost:8080/health + {{- end }} + timeoutSeconds: 5 + readinessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /health + port: 8080 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=5 + - --spider + - http://localhost:8080/health + {{- end }} + timeoutSeconds: 5 + env: + {{- if .Values.basic_auth }} + - name: secret_mount_path + value: "/var/secrets" + - name: basic_auth + value: "{{ .Values.basic_auth }}" + volumeMounts: + - name: auth + readOnly: true + mountPath: "/var/secrets" + ports: + - name: http + containerPort: 8080 + protocol: TCP + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + +{{- end }} +{{- end }} diff --git a/openfaas/templates/basic-auth-plugin-svc.yaml b/openfaas/templates/basic-auth-plugin-svc.yaml new file mode 100644 index 0000000..9bf3152 --- /dev/null +++ b/openfaas/templates/basic-auth-plugin-svc.yaml @@ -0,0 +1,25 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.basic_auth }} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: basic-auth-plugin + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: basic-auth-plugin + namespace: {{ .Release.Namespace | quote }} +spec: + type: ClusterIP + ports: + - port: 8080 + targetPort: http + protocol: TCP + name: http + selector: + app: basic-auth-plugin + +{{- end }} \ No newline at end of file diff --git a/openfaas/templates/controller-rbac.yaml b/openfaas/templates/controller-rbac.yaml new file mode 100644 index 0000000..81008bf --- /dev/null +++ b/openfaas/templates/controller-rbac.yaml @@ -0,0 +1,227 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if eq .Values.operator.create false }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: faas-controller + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ .Release.Name }}-controller + namespace: {{ .Release.Namespace | quote }} +{{- if .Values.rbac }} +{{- if .Values.clusterRole }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: faas-controller + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ .Release.Name }}-controller + namespace: {{ $functionNs | quote }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - delete + - update + - apiGroups: + - extensions + - apps + resources: + - deployments + verbs: + - get + - list + - watch + - create + - delete + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "openfaas.com" + resources: + - "profiles" + verbs: + - "get" + - "list" + - "watch" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: faas-controller + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ .Release.Name }}-controller + namespace: {{ $functionNs | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-controller +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-controller + namespace: {{ .Release.Namespace | quote }} +{{- else }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: faas-controller + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ .Release.Name }}-controller + namespace: {{ $functionNs | quote }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - delete + - update + - apiGroups: + - extensions + - apps + resources: + - deployments + verbs: + - get + - list + - watch + - create + - delete + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + - endpoints + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: faas-controller + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ .Release.Name }}-controller + namespace: {{ $functionNs | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-controller +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-controller + namespace: {{ .Release.Namespace | quote }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: faas-controller + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ .Release.Name }}-profiles + namespace: {{ .Release.Namespace | quote }} +rules: + - apiGroups: + - "openfaas.com" + resources: + - "profiles" + verbs: + - "get" + - "list" + - "watch" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: faas-controller + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ .Release.Name }}-profiles + namespace: {{ .Release.Namespace | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-profiles +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-controller + namespace: {{ .Release.Namespace | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/openfaas/templates/faas-idler-dep.yaml b/openfaas/templates/faas-idler-dep.yaml new file mode 100644 index 0000000..3dd3210 --- /dev/null +++ b/openfaas/templates/faas-idler-dep.yaml @@ -0,0 +1,75 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.faasIdler.create }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: faas-idler + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: faas-idler + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: {{ .Values.faasIdler.replicas }} + selector: + matchLabels: + app: faas-idler + template: + metadata: + annotations: + prometheus.io.scrape: "false" + labels: + app: faas-idler + spec: + containers: + - name: faas-idler + resources: + {{- .Values.faasIdler.resources | toYaml | nindent 12 }} + image: {{ .Values.faasIdler.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + env: + - name: gateway_url + value: "http://gateway.{{ .Release.Namespace }}:8080/" + - name: prometheus_host + value: "prometheus.{{ .Release.Namespace }}" + - name: prometheus_port + value: "9090" + - name: inactivity_duration + value: {{ .Values.faasIdler.inactivityDuration }} + - name: reconcile_interval + value: {{ .Values.faasIdler.reconcileInterval }} +{{- if .Values.faasIdler.writeDebug }} + - name: write_debug + value: {{ .Values.faasIdler.writeDebug }} +{{- end }} + command: + - /home/app/faas-idler + - -dry-run={{ .Values.faasIdler.dryRun }} + +{{- if .Values.basic_auth }} + volumeMounts: + - name: auth + readOnly: true + mountPath: "/var/secrets/" + volumes: + - name: auth + secret: + secretName: basic-auth + +{{- end }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/openfaas/templates/function-crd.yaml b/openfaas/templates/function-crd.yaml new file mode 100644 index 0000000..5bbae9f --- /dev/null +++ b/openfaas/templates/function-crd.yaml @@ -0,0 +1,106 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.operator.create }} +{{- if .Values.createCRDs }} + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + creationTimestamp: null + name: functions.openfaas.com +spec: + group: openfaas.com + names: + kind: Function + listKind: FunctionList + plural: functions + singular: function + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Function describes an OpenFaaS function + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FunctionSpec is the spec for a Function resource + type: object + required: + - image + - name + properties: + annotations: + type: object + additionalProperties: + type: string + constraints: + type: array + items: + type: string + environment: + type: object + additionalProperties: + type: string + handler: + type: string + image: + type: string + labels: + type: object + additionalProperties: + type: string + limits: + description: FunctionResources is used to set CPU and memory limits + and requests + type: object + properties: + cpu: + type: string + memory: + type: string + name: + type: string + readOnlyRootFilesystem: + type: boolean + requests: + description: FunctionResources is used to set CPU and memory limits + and requests + type: object + properties: + cpu: + type: string + memory: + type: string + secrets: + type: array + items: + type: string + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + + +--- +{{- end }} +{{- end }} diff --git a/openfaas/templates/gateway-dep.yaml b/openfaas/templates/gateway-dep.yaml new file mode 100644 index 0000000..b3b056a --- /dev/null +++ b/openfaas/templates/gateway-dep.yaml @@ -0,0 +1,260 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: gateway + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: gateway + namespace: {{ .Release.Namespace | quote }} +spec: + replicas: {{ .Values.gateway.replicas }} + selector: + matchLabels: + app: gateway + template: + metadata: + annotations: + prometheus.io.scrape: "true" + prometheus.io.port: "8082" + labels: + app: gateway + spec: + {{- if .Values.operator.create }} + serviceAccountName: {{ .Release.Name }}-operator + {{- else }} + serviceAccountName: {{ .Release.Name }}-controller + {{- end }} + volumes: + - name: faas-netes-temp-volume + emptyDir: {} + {{- if .Values.basic_auth }} + - name: auth + secret: + secretName: basic-auth + {{- end }} + containers: + - name: gateway + resources: + {{- .Values.gateway.resources | toYaml | nindent 12 }} + image: {{ .Values.gateway.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + {{- if .Values.securityContext }} + securityContext: + readOnlyRootFilesystem: true + runAsUser: 10001 + {{- end }} + livenessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /healthz + port: 8080 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=5 + - --spider + - http://localhost:8080/healthz + {{- end }} + timeoutSeconds: 5 + readinessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /healthz + port: 8080 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=5 + - --spider + - http://localhost:8080/healthz + {{- end }} + timeoutSeconds: 5 + env: + {{- if .Values.gateway.logsProviderURL }} + - name: logs_provider_url + value: "{{ .Values.gateway.logsProviderURL }}" + {{- end }} + - name: read_timeout + value: "{{ .Values.gateway.readTimeout }}" + - name: write_timeout + value: "{{ .Values.gateway.writeTimeout }}" + - name: upstream_timeout + value: "{{ .Values.gateway.upstreamTimeout }}" + - name: functions_provider_url + value: "http://127.0.0.1:8081/" + - name: direct_functions + {{- if .Values.gateway.directFunctions }} + value: "{{.Values.gateway.directFunctions}}" + {{- else}} + value: "false" + {{- end }} + - name: direct_functions_suffix + value: "{{ $functionNs }}.svc.{{ .Values.kubernetesDNSDomain }}" + - name: function_namespace + value: {{ $functionNs | quote }} + {{- if .Values.nats.external.enabled }} + - name: faas_nats_address + value: "{{ .Values.nats.external.host }}" + - name: faas_nats_port + value: "{{ .Values.nats.external.port }}" + - name: faas_nats_cluster_name + value: "{{ .Values.nats.external.clusterName }}" + - name: faas_nats_channel + value: "{{ .Values.nats.channel }}" + {{- else }} + {{- if .Values.async }} + - name: faas_nats_address + value: "nats.{{ .Release.Namespace }}.svc.{{ .Values.kubernetesDNSDomain }}" + - name: faas_nats_port + value: "4222" + - name: faas_nats_channel + value: "{{ .Values.nats.channel }}" + {{- end }} + {{- end }} + {{- if .Values.basic_auth }} + - name: basic_auth + value: "true" + - name: secret_mount_path + value: "/var/secrets" + {{- if .Values.oauth2Plugin.enabled }} + - name: auth_proxy_url + value: "http://oauth2-plugin.{{ .Release.Namespace }}:8080/validate" + - name: auth_pass_body + value: "false" + {{- else }} + - name: auth_proxy_url + value: "http://basic-auth-plugin.{{ .Release.Namespace }}:8080/validate" + - name: auth_pass_body + value: "false" + {{- end }} + {{- end }} + - name: scale_from_zero + value: "{{ .Values.gateway.scaleFromZero }}" + - name: max_idle_conns + value: "{{ .Values.gateway.maxIdleConns }}" + - name: max_idle_conns_per_host + value: "{{ .Values.gateway.maxIdleConnsPerHost }}" + {{- if .Values.basic_auth }} + volumeMounts: + - name: auth + readOnly: true + mountPath: "/var/secrets" + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + {{- if .Values.operator.create }} + - name: operator + resources: + {{- .Values.operator.resources | toYaml | nindent 12 }} + image: {{ .Values.operator.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + command: + - ./faas-netes + - -operator=true + env: + - name: port + value: "8081" + - name: function_namespace + value: {{ $functionNs | quote }} + - name: profiles_namespace + value: {{ .Release.Namespace | quote }} + - name: read_timeout + value: "{{ .Values.faasnetes.readTimeout }}" + - name: write_timeout + value: "{{ .Values.faasnetes.writeTimeout }}" + - name: image_pull_policy + value: {{ .Values.faasnetes.imagePullPolicy | quote }} + - name: http_probe + value: "{{ .Values.faasnetes.httpProbe }}" + - name: set_nonroot_user + value: "{{ .Values.faasnetes.setNonRootUser }}" + - name: readiness_probe_initial_delay_seconds + value: "{{ .Values.faasnetes.readinessProbe.initialDelaySeconds }}" + - name: readiness_probe_timeout_seconds + value: "{{ .Values.faasnetes.readinessProbe.timeoutSeconds }}" + - name: readiness_probe_period_seconds + value: "{{ .Values.faasnetes.readinessProbe.periodSeconds }}" + - name: liveness_probe_initial_delay_seconds + value: "{{ .Values.faasnetes.livenessProbe.initialDelaySeconds }}" + - name: liveness_probe_timeout_seconds + value: "{{ .Values.faasnetes.livenessProbe.timeoutSeconds }}" + - name: liveness_probe_period_seconds + value: "{{ .Values.faasnetes.livenessProbe.periodSeconds }}" + - name: cluster_role + value: "{{ .Values.clusterRole }}" + ports: + - containerPort: 8081 + protocol: TCP + {{- else }} + - name: faas-netes + resources: + {{- .Values.faasnetes.resources | toYaml | nindent 12 }} + image: {{ .Values.faasnetes.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + {{- if .Values.securityContext }} + securityContext: + readOnlyRootFilesystem: true + runAsUser: 10001 + {{- end }} + env: + - name: port + value: "8081" + - name: function_namespace + value: {{ $functionNs | quote }} + - name: read_timeout + value: "{{ .Values.faasnetes.readTimeout }}" + - name: profiles_namespace + value: {{ .Release.Namespace | quote }} + - name: write_timeout + value: "{{ .Values.faasnetes.writeTimeout }}" + - name: image_pull_policy + value: {{ .Values.faasnetes.imagePullPolicy | quote }} + - name: http_probe + value: "{{ .Values.faasnetes.httpProbe }}" + - name: set_nonroot_user + value: "{{ .Values.faasnetes.setNonRootUser }}" + - name: readiness_probe_initial_delay_seconds + value: "{{ .Values.faasnetes.readinessProbe.initialDelaySeconds }}" + - name: readiness_probe_timeout_seconds + value: "{{ .Values.faasnetes.readinessProbe.timeoutSeconds }}" + - name: readiness_probe_period_seconds + value: "{{ .Values.faasnetes.readinessProbe.periodSeconds }}" + - name: liveness_probe_initial_delay_seconds + value: "{{ .Values.faasnetes.livenessProbe.initialDelaySeconds }}" + - name: liveness_probe_timeout_seconds + value: "{{ .Values.faasnetes.livenessProbe.timeoutSeconds }}" + - name: liveness_probe_period_seconds + value: "{{ .Values.faasnetes.livenessProbe.periodSeconds }}" + - name: cluster_role + value: "{{ .Values.clusterRole }}" + volumeMounts: + - mountPath: /tmp + name: faas-netes-temp-volume + ports: + - containerPort: 8081 + protocol: TCP + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/openfaas/templates/gateway-external-svc.yaml b/openfaas/templates/gateway-external-svc.yaml new file mode 100644 index 0000000..0c19617 --- /dev/null +++ b/openfaas/templates/gateway-external-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.exposeServices }} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: gateway + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.gatewayExternal.annotations }} + annotations: {{ toYaml .Values.gatewayExternal.annotations | nindent 4 }} +{{- end }} + name: gateway-external + namespace: {{ .Release.Namespace | quote }} +spec: + type: {{ .Values.serviceType }} + ports: + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + {{- if contains "NodePort" .Values.serviceType }} + nodePort: {{ .Values.gateway.nodePort }} + {{- end }} + selector: + app: gateway + {{- end }} diff --git a/openfaas/templates/gateway-svc.yaml b/openfaas/templates/gateway-svc.yaml new file mode 100644 index 0000000..16ab7a9 --- /dev/null +++ b/openfaas/templates/gateway-svc.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: gateway + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: gateway + namespace: {{ .Release.Namespace | quote }} +spec: + type: ClusterIP + ports: + - name: http + port: 8080 + targetPort: http + protocol: TCP + selector: + app: gateway diff --git a/openfaas/templates/ingress-operator-crd.yaml b/openfaas/templates/ingress-operator-crd.yaml new file mode 100644 index 0000000..29ee69f --- /dev/null +++ b/openfaas/templates/ingress-operator-crd.yaml @@ -0,0 +1,94 @@ +{{- if .Values.ingressOperator.create }} +{{- if .Values.createCRDs }} + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + creationTimestamp: null + name: functioningresses.openfaas.com +spec: + group: openfaas.com + names: + kind: FunctionIngress + listKind: FunctionIngressList + plural: functioningresses + singular: functioningress + scope: Namespaced + versions: + - name: v1alpha2 + schema: + openAPIV3Schema: + description: FunctionIngress describes an OpenFaaS function + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FunctionIngressSpec is the spec for a FunctionIngress resource. + It must be created in the same namespace as the gateway, i.e. openfaas. + type: object + required: + - domain + - function + properties: + bypassGateway: + description: BypassGateway, when true creates an Ingress record directly + for the Function name without using the gateway in the hot path + type: boolean + domain: + description: Domain such as "api.example.com" + type: string + function: + description: Function such as "nodeinfo" + type: string + ingressType: + description: IngressType such as "nginx" + type: string + path: + description: Path such as "/v1/profiles/view/(.*)", or leave empty + for default + type: string + tls: + description: Enable TLS via cert-manager + type: object + properties: + enabled: + type: boolean + issuerRef: + description: ObjectReference is a reference to an object with + a given name and kind. + type: object + required: + - name + properties: + kind: + type: string + name: + type: string + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +{{- end }} +{{- end }} diff --git a/openfaas/templates/ingress-operator-dep.yaml b/openfaas/templates/ingress-operator-dep.yaml new file mode 100644 index 0000000..0b4fb96 --- /dev/null +++ b/openfaas/templates/ingress-operator-dep.yaml @@ -0,0 +1,41 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.ingressOperator.create }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: ingress-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: ingress-operator + namespace: {{ .Release.Namespace | quote }} +spec: + replicas: {{ .Values.ingressOperator.replicas }} + selector: + matchLabels: + app: ingress-operator + template: + metadata: + annotations: + prometheus.io.scrape: "true" + labels: + app: ingress-operator + spec: + serviceAccountName: ingress-operator + containers: + - name: operator + resources: + {{- .Values.ingressOperator.resources | toYaml | nindent 10 }} + image: {{ .Values.ingressOperator.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + command: + - ./ingress-operator + - -logtostderr + env: + - name: function_namespace + value: {{ $functionNs | quote }} + - name: ingress_namespace + value: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/openfaas/templates/ingress-operator-rbac.yaml b/openfaas/templates/ingress-operator-rbac.yaml new file mode 100644 index 0000000..c25f6f4 --- /dev/null +++ b/openfaas/templates/ingress-operator-rbac.yaml @@ -0,0 +1,65 @@ +{{- if .Values.ingressOperator.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-operator + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: ingress-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + +{{- if .Values.rbac }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ingress-operator-rw + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: ingress-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: ["openfaas.com"] + resources: ["functioningresses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: ["certmanager.k8s.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["pods", "pods/log", "namespaces", "endpoints"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ingress-operator-rw + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: ingress-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-operator-rw +subjects: +- kind: ServiceAccount + name: ingress-operator + namespace: {{ .Release.Namespace | quote }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/openfaas/templates/ingress.yaml b/openfaas/templates/ingress.yaml new file mode 100644 index 0000000..b011649 --- /dev/null +++ b/openfaas/templates/ingress.yaml @@ -0,0 +1,31 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "openfaas.name" . }}-ingress + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host.host }} + http: + paths: + - path: {{ $host.path }} + backend: + serviceName: {{ $host.serviceName }} + servicePort: {{ $host.servicePort }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/openfaas/templates/istio-mtls.yaml b/openfaas/templates/istio-mtls.yaml new file mode 100644 index 0000000..e0b4688 --- /dev/null +++ b/openfaas/templates/istio-mtls.yaml @@ -0,0 +1,58 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.istio.mtls -}} +# enforce mTLS to openfaas control plane +apiVersion: authentication.istio.io/v1alpha1 +kind: Policy +metadata: + name: default + namespace: {{ .Release.Namespace }} +spec: + peers: + - mtls: {} +--- +# enforce mTLS to openfaas control plane +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: default + namespace: {{ .Release.Namespace }} +spec: + host: "*.{{ .Release.Namespace }}.svc.cluster.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL +--- +# enforce mTLS to functions +apiVersion: authentication.istio.io/v1alpha1 +kind: Policy +metadata: + name: default + namespace: {{ $functionNs }} +spec: + peers: + - mtls: {} +--- +# enforce mTLS to functions +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: default + namespace: {{ $functionNs | quote }} +spec: + host: "*.{{ $functionNs }}.svc.cluster.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL +--- +# disable mTLS to nats, the nats protocol is not supported by Istio +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: "nats-no-mtls" + namespace: {{ .Release.Namespace }} +spec: + host: "nats.{{ .Release.Namespace }}.svc.cluster.local" + trafficPolicy: + tls: + mode: DISABLE +{{- end -}} \ No newline at end of file diff --git a/openfaas/templates/nats-dep.yaml b/openfaas/templates/nats-dep.yaml new file mode 100644 index 0000000..cebdf0d --- /dev/null +++ b/openfaas/templates/nats-dep.yaml @@ -0,0 +1,82 @@ +{{- if and .Values.async (not .Values.nats.external.enabled) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: nats + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: nats + namespace: {{ .Release.Namespace | quote }} +spec: + replicas: 1 + selector: + matchLabels: + app: nats + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + prometheus.io.scrape: {{ .Values.nats.metrics.enabled | quote }} + {{- if .Values.nats.metrics.enabled }} + prometheus.io.port: "7777" + {{- end }} + labels: + app: nats + spec: + containers: + - name: nats + resources: + {{- .Values.nats.resources | toYaml | nindent 12 }} + image: {{ .Values.nats.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + ports: + - containerPort: 4222 + protocol: TCP + {{- if .Values.nats.enableMonitoring }} + - containerPort: 8222 + protocol: TCP + {{- end }} + command: ["/nats-streaming-server"] + args: + - --store + - memory + - --cluster_id + - faas-cluster + {{- if or .Values.nats.enableMonitoring .Values.nats.metrics.enabled }} + - -m + - "8222" + {{- end }} + {{- if .Values.nats.metrics.enabled }} + - name: metrics + image: {{ .Values.nats.metrics.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + ports: + - containerPort: 7777 + protocol: TCP + args: + - -port + - "7777" + - -connz + - -routez + - -subz + - -varz + - -channelz + - -serverz + - http://localhost:8222 + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/openfaas/templates/nats-svc.yaml b/openfaas/templates/nats-svc.yaml new file mode 100644 index 0000000..22ce202 --- /dev/null +++ b/openfaas/templates/nats-svc.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.async (not .Values.nats.external.enabled) }} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: nats + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: nats + namespace: {{ .Release.Namespace | quote }} +spec: + type: ClusterIP + ports: + - port: 4222 + protocol: TCP + name: clients + {{- if .Values.nats.enableMonitoring }} + - port: 8222 + protocol: TCP + name: monitoring + {{- end }} + {{- if .Values.nats.metrics.enabled }} + - port: 7777 + protocol: TCP + name: metrics + {{- end }} + selector: + app: nats +{{- end }} diff --git a/openfaas/templates/oauth2-plugin-dep.yaml b/openfaas/templates/oauth2-plugin-dep.yaml new file mode 100644 index 0000000..de902ab --- /dev/null +++ b/openfaas/templates/oauth2-plugin-dep.yaml @@ -0,0 +1,139 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.oauth2Plugin.enabled }} +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: oauth2-plugin + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: oauth2-plugin + namespace: {{ .Release.Namespace | quote }} +spec: + replicas: {{ .Values.oauth2Plugin.replicas }} + selector: + matchLabels: + app: oauth2-plugin + template: + metadata: + annotations: + prometheus.io.scrape: "false" + labels: + app: oauth2-plugin + spec: + volumes: + - name: oauth2-plugin-temp-volume + emptyDir: {} + {{- if .Values.basic_auth }} + - name: auth + secret: + secretName: basic-auth + {{- end }} + containers: + - name: oauth2-plugin + resources: + {{- .Values.oauth2Plugin.resources | toYaml | nindent 12 }} + image: {{ .Values.oauth2Plugin.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + {{- if .Values.securityContext }} + securityContext: + readOnlyRootFilesystem: true + runAsUser: 10001 + {{- end }} + livenessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /health + port: 8080 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=5 + - --spider + - http://localhost:8080/health + {{- end }} + timeoutSeconds: 5 + readinessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /health + port: 8080 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=5 + - --spider + - http://localhost:8080/health + {{- end }} + timeoutSeconds: 5 + args: + - "-license={{- .Values.oauth2Plugin.license}}" + - "-provider={{- .Values.oauth2Plugin.provider}}" + env: + - name: client_id + value: "{{- .Values.oauth2Plugin.clientID}}" + - name: client_secret + value: "{{- .Values.oauth2Plugin.clientSecret}}" + - name: cookie_domain + value: "{{- .Values.oauth2Plugin.cookieDomain}}" + - name: base_host + value: "{{- .Values.oauth2Plugin.baseHost}}" + - name: port + value: "8080" + - name: authorize_url + value: "{{- .Values.oauth2Plugin.authorizeURL}}" + - name: welcome_page_url + value: "{{- .Values.oauth2Plugin.welcomePageURL}}" + - name: public_key_path + value: "" # leave blank if using jwks + - name: audience + value: "{{- .Values.oauth2Plugin.audience}}" + - name: token_url + value: "{{- .Values.oauth2Plugin.tokenURL}}" + - name: scopes + value: "{{- .Values.oauth2Plugin.scopes}}" + - name: jwks_url + value: "{{- .Values.oauth2Plugin.jwksURL}}" + - name: insecure_tls + value: "{{- .Values.oauth2Plugin.insecureTLS}}" + {{- if .Values.basic_auth }} + - name: secret_mount_path + value: "/var/secrets" + {{- end }} + volumeMounts: + - name: oauth2-plugin-temp-volume + mountPath: /tmp + {{- if .Values.basic_auth }} + - name: auth + readOnly: true + mountPath: "/var/secrets" + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + +{{- end }} diff --git a/openfaas/templates/oauth2-plugin-svc.yaml b/openfaas/templates/oauth2-plugin-svc.yaml new file mode 100644 index 0000000..19693ee --- /dev/null +++ b/openfaas/templates/oauth2-plugin-svc.yaml @@ -0,0 +1,26 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.oauth2Plugin.enabled }} +--- + +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: oauth2-plugin + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: oauth2-plugin + namespace: {{ .Release.Namespace | quote }} +spec: + type: ClusterIP + ports: + - port: 8080 + targetPort: http + protocol: TCP + name: http + selector: + app: oauth2-plugin + +{{- end }} diff --git a/openfaas/templates/operator-rbac.yaml b/openfaas/templates/operator-rbac.yaml new file mode 100644 index 0000000..36c191f --- /dev/null +++ b/openfaas/templates/operator-rbac.yaml @@ -0,0 +1,124 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.operator.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-operator + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: openfaas-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.rbac }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-operator-rw + namespace: {{ $functionNs | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: openfaas-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: ["openfaas.com"] + resources: ["functions"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: ["apps", "extensions"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["pods", "pods/log", "namespaces", "endpoints"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-operator-rw + namespace: {{ $functionNs | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: openfaas-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-operator-rw +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-operator + namespace: {{ .Release.Namespace | quote }} +{{- if .Values.clusterRole}} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-operator-controller + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: openaas-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: ["openfaas.com"] + resources: ["functions"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["openfaas.com"] + resources: ["profiles"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["pods", "pods/log", "namespaces", "endpoints"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-operator-controller + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: openfaas-operator + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-operator-controller +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-operator + namespace: {{ .Release.Namespace | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/openfaas/templates/profile-crd.yaml b/openfaas/templates/profile-crd.yaml new file mode 100644 index 0000000..bc433da --- /dev/null +++ b/openfaas/templates/profile-crd.yaml @@ -0,0 +1,830 @@ +{{- if .Values.createCRDs }} + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + creationTimestamp: null + name: profiles.openfaas.com +spec: + group: openfaas.com + names: + kind: Profile + listKind: ProfileList + plural: profiles + singular: profile + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Profile and ProfileSpec are used to customise the Pod template + for functions + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'ProfileSpec is an openfaas api extensions that can be predefined + and applied to functions by annotating them with `com.openfaas/profile: + name1,name2`' + type: object + properties: + affinity: + description: "If specified, the pod's scheduling constraints \n copied + to the Pod Affinity, this will replace any existing value or previously + applied Profile. We use a replacement strategy because it is not + clear that merging affinities will actually produce a meaning Affinity + definition, it would likely result in an impossible to satisfy constraint" + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + type: array + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements + by node's fields. + type: array + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + type: array + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + type: array + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements + by node's fields. + type: array + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + type: array + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + type: array + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + podSecurityContext: + description: "SecurityContext holds pod-level security attributes + and common container settings. Optional: Defaults to empty. See + type description for default values of each field. \n each non-nil + value will be merged into the function's PodSecurityContext, the + value will replace any existing value or previously applied Profile" + type: object + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume." + type: integer + format: int64 + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified defaults to "Always".' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + type: integer + format: int64 + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + type: integer + format: int64 + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + type: object + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. + type: array + items: + type: integer + format: int64 + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. + type: array + items: + description: Sysctl defines a kernel parameter to be set + type: object + required: + - name + - value + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + type: object + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + runtimeClassName: + description: "RuntimeClassName refers to a RuntimeClass object in + the node.k8s.io group, which should be used to run this pod. If + no RuntimeClass resource matches the named class, the pod will not + be run. If unset or empty, the \"legacy\" RuntimeClass will be used, + which is an implicit class with an empty definition that uses the + default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + This is a beta feature as of Kubernetes v1.14. \n copied to the + Pod RunTimeClass, this will replace any existing value or previously + applied Profile." + type: string + tolerations: + description: "If specified, the function's pod tolerations. \n merged + into the Pod Tolerations" + type: array + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +{{- end }} diff --git a/openfaas/templates/prometheus-cfg.yaml b/openfaas/templates/prometheus-cfg.yaml new file mode 100644 index 0000000..c34e419 --- /dev/null +++ b/openfaas/templates/prometheus-cfg.yaml @@ -0,0 +1,82 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.prometheus.create }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus-config + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: prometheus-config + namespace: {{ .Release.Namespace | quote }} +data: + prometheus.yml: | + global: + scrape_interval: 15s + evaluation_interval: 15s + external_labels: + monitor: 'faas-monitor' + + rule_files: + - 'alert.rules.yml' + + scrape_configs: + - job_name: 'prometheus' + scrape_interval: 5s + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'kubernetes-pods' + scrape_interval: 5s + honor_labels: false + kubernetes_sd_configs: + - role: pod + namespaces: + names: + - {{ .Release.Namespace }} +{{- if ne $functionNs (.Release.Namespace | toString) }} + - {{ $functionNs }} +{{- end }} + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + + alerting: + alertmanagers: + - static_configs: + - targets: + - alertmanager:9093 + + alert.rules.yml: | + groups: + - name: openfaas + rules: + - alert: service_down + expr: up == 0 + - alert: APIHighInvocationRate + expr: sum(rate(gateway_function_invocation_total{code="200"}[10s])) BY (function_name) > 5 + for: 5s + labels: + service: gateway + severity: major + annotations: + description: High invocation total on "{{ "{{" }}$labels.function_name{{ "}}" }}" + summary: High invocation total on "{{ "{{" }}$labels.function_name{{ "}}" }}" +{{- end }} \ No newline at end of file diff --git a/openfaas/templates/prometheus-dep.yaml b/openfaas/templates/prometheus-dep.yaml new file mode 100644 index 0000000..f13ec5c --- /dev/null +++ b/openfaas/templates/prometheus-dep.yaml @@ -0,0 +1,108 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.prometheus.create }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: prometheus + namespace: {{ .Release.Namespace | quote }} +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + annotations: + sidecar.istio.io/inject: "true" + checksum/prometheus-config: {{ include (print $.Template.BasePath "/prometheus-cfg.yaml") . | sha256sum | quote }} + spec: + serviceAccountName: {{ .Release.Name }}-prometheus + containers: + - name: prometheus + resources: + {{- .Values.prometheus.resources | toYaml | nindent 12 }} + image: {{ .Values.prometheus.image }} + command: + - "prometheus" + - "--config.file=/etc/prometheus/prometheus.yml" + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + livenessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /-/healthy + port: 9090 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=30 + - --spider + - http://localhost:9090/-/healthy + {{- end }} + timeoutSeconds: 30 + readinessProbe: + {{- if .Values.httpProbe }} + httpGet: + path: /-/healthy + port: 9090 + {{- else }} + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=30 + - --spider + - http://localhost:9090/-/healthy + {{- end }} + timeoutSeconds: 30 + ports: + - containerPort: 9090 + protocol: TCP + volumeMounts: + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + - mountPath: /etc/prometheus/alert.rules.yml + name: prometheus-config + subPath: alert.rules.yml + - mountPath: /prometheus/data + name: prom-data + volumes: + - name: prometheus-config + configMap: + name: prometheus-config + items: + - key: prometheus.yml + path: prometheus.yml + mode: 0644 + - key: alert.rules.yml + path: alert.rules.yml + mode: 0644 + - name: prom-data + emptyDir: {} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + +{{- end }} diff --git a/openfaas/templates/prometheus-rbac.yaml b/openfaas/templates/prometheus-rbac.yaml new file mode 100644 index 0000000..98d2f48 --- /dev/null +++ b/openfaas/templates/prometheus-rbac.yaml @@ -0,0 +1,162 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.prometheus.create }} + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-prometheus + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +--- + +{{- if .Values.clusterRole }} + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-prometheus + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: [""] + resources: + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-prometheus + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-prometheus +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-prometheus + namespace: {{ .Release.Namespace | quote }} +{{- if ne $functionNs (.Release.Namespace | toString) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-prometheus + namespace: {{ $functionNs | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-prometheus +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-prometheus + namespace: {{ .Release.Namespace | quote }} +{{- end }} + +{{- else -}} + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-prometheus + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: [""] + resources: + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-prometheus + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-prometheus +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-prometheus + namespace: {{ .Release.Namespace | quote }} +{{- if ne $functionNs (.Release.Namespace | toString) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-prometheus-fn + namespace: {{ $functionNs | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: [""] + resources: + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-prometheus-fn + namespace: {{ $functionNs | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-prometheus-fn +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-prometheus + namespace: {{ .Release.Namespace | quote }} +{{- end }} + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/openfaas/templates/prometheus-svc.yaml b/openfaas/templates/prometheus-svc.yaml new file mode 100644 index 0000000..17e8ac9 --- /dev/null +++ b/openfaas/templates/prometheus-svc.yaml @@ -0,0 +1,22 @@ +{{- $functionNs := default .Release.Namespace .Values.functionNamespace }} +{{- if .Values.prometheus.create }} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: prometheus + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: prometheus + namespace: {{ .Release.Namespace | quote }} +spec: + type: ClusterIP + ports: + - port: 9090 + protocol: TCP + selector: + app: prometheus +{{- end }} \ No newline at end of file diff --git a/openfaas/templates/psp.yaml b/openfaas/templates/psp.yaml new file mode 100644 index 0000000..7ac977f --- /dev/null +++ b/openfaas/templates/psp.yaml @@ -0,0 +1,69 @@ +{{- if .Values.psp }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ .Release.Name }}-psp + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + privileged: false + hostIPC: false + hostNetwork: false + hostPID: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_ADMIN + - NET_RAW + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 1 + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-psp + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ .Release.Name }}-psp +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-psp + namespace: {{ .Release.Namespace | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-psp +subjects: + # bind the PSP cluster role to all service accounts in the OF namespace + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts:{{ .Release.Namespace }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/openfaas/templates/queueworker-dep.yaml b/openfaas/templates/queueworker-dep.yaml new file mode 100644 index 0000000..250225f --- /dev/null +++ b/openfaas/templates/queueworker-dep.yaml @@ -0,0 +1,87 @@ +{{- if .Values.async }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: queue-worker + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: queue-worker + namespace: {{ .Release.Namespace | quote }} +spec: + replicas: {{ .Values.queueWorker.replicas }} + selector: + matchLabels: + app: queue-worker + template: + metadata: + annotations: + prometheus.io.scrape: "false" + labels: + app: queue-worker + spec: + {{- if .Values.basic_auth }} + volumes: + - name: auth + secret: + secretName: basic-auth + {{- end }} + containers: + - name: queue-worker + resources: + {{- .Values.queueWorker.resources | toYaml | nindent 12 }} + image: {{ .Values.queueWorker.image }} + imagePullPolicy: {{ .Values.openfaasImagePullPolicy }} + env: + {{- if .Values.nats.external.enabled }} + - name: faas_nats_address + value: "{{ .Values.nats.external.host }}" + - name: faas_nats_port + value: "{{ .Values.nats.external.port }}" + - name: faas_nats_cluster_name + value: "{{ .Values.nats.external.clusterName }}" + {{- else }} + - name: faas_nats_address + value: "nats.{{ .Release.Namespace }}.svc.{{ .Values.kubernetesDNSDomain }}" + {{- end}} + - name: faas_nats_channel + value: "{{ .Values.nats.channel }}" + - name: faas_nats_queue_group + value: "{{ .Values.queueWorker.queueGroup }}" + - name: faas_gateway_address + value: "gateway.{{ .Release.Namespace }}.svc.{{ .Values.kubernetesDNSDomain }}" + - name: "gateway_invoke" + value: "{{ .Values.queueWorker.gatewayInvoke }}" + {{- if .Values.functionNamespace }} + - name: faas_function_suffix + value: ".{{ .Values.functionNamespace }}.svc.{{ .Values.kubernetesDNSDomain }}" + {{- end }} + - name: max_inflight + value: "{{ .Values.queueWorker.maxInflight }}" + - name: ack_wait # Max duration of any async task / request + value: {{ .Values.queueWorker.ackWait }} + {{- if .Values.basic_auth }} + - name: secret_mount_path + value: "/var/secrets" + - name: basic_auth + value: "{{ .Values.basic_auth }}" + volumeMounts: + - name: auth + readOnly: true + mountPath: "/var/secrets" + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/openfaas/templates/secret.yaml b/openfaas/templates/secret.yaml new file mode 100644 index 0000000..951800a --- /dev/null +++ b/openfaas/templates/secret.yaml @@ -0,0 +1,19 @@ +{{- if .Values.generateBasicAuth }} +apiVersion: v1 +kind: Secret +metadata: + name: basic-auth + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "openfaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: gateway + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": "pre-install" +data: + basic-auth-user: {{ "admin" | b64enc | quote }} + # kubectl -n openfaas get secret basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode + basic-auth-password: {{ randAlphaNum 12 | b64enc | quote }} +{{- end }} diff --git a/openfaas/values-arm64.yaml b/openfaas/values-arm64.yaml new file mode 100644 index 0000000..041defd --- /dev/null +++ b/openfaas/values-arm64.yaml @@ -0,0 +1,48 @@ +basic_auth: true + +clusterRole: false +createCRDs: true + +nodeSelector: + beta.kubernetes.io/arch: arm64 + +gateway: + image: openfaas/gateway:0.20.1-arm64 + directFunctions: true + +oauth2Plugin: + enabled: false + +faasnetes: + image: ghcr.io/openfaas/faas-netes:0.12.12 + +operator: + image: ghcr.io/openfaas/faas-netes:0.12.12 + create: false + +queueWorker: + image: openfaas/queue-worker:0.11.2 + +prometheus: + image: prom/prometheus:v2.11.0 + resources: + requests: + memory: "100Mi" + +alertmanager: + image: prom/alertmanager:v0.18.0 + +faasIdler: + image: openfaas/faas-idler:0.4.0-arm64 + +basicAuthPlugin: + image: openfaas/basic-auth-plugin:0.20.1-arm64 + replicas: 1 + +ingressOperator: + create: false + +# Unfortunately the exporter is not multi-arch (yet) +nats: + metrics: + enabled: false diff --git a/openfaas/values-armhf.yaml b/openfaas/values-armhf.yaml new file mode 100644 index 0000000..46703cf --- /dev/null +++ b/openfaas/values-armhf.yaml @@ -0,0 +1,48 @@ +basic_auth: true + +clusterRole: false +createCRDs: true + +nodeSelector: + beta.kubernetes.io/arch: arm + +gateway: + image: openfaas/gateway:0.20.2-armhf + directFunctions: true + +oauth2Plugin: + enabled: false + +faasnetes: + image: ghcr.io/openfaas/faas-netes:0.12.12 + +operator: + image: ghcr.io/openfaas/faas-netes:0.12.12 + create: false + +queueWorker: + image: openfaas/queue-worker:0.11.2 + +prometheus: + image: prom/prometheus:v2.11.0 + resources: + requests: + memory: "100Mi" + +alertmanager: + image: prom/alertmanager:v0.18.0 + +faasIdler: + image: openfaas/faas-idler:0.4.0-armhf + +basicAuthPlugin: + image: openfaas/basic-auth-plugin:0.20.1-armhf + replicas: 1 + +ingressOperator: + create: false + +# Unfortunately the exporter is not multi-arch (yet) +nats: + metrics: + enabled: false diff --git a/openfaas/values.yaml b/openfaas/values.yaml new file mode 100644 index 0000000..bc6cc8c --- /dev/null +++ b/openfaas/values.yaml @@ -0,0 +1,203 @@ +functionNamespace: openfaas-fn # Default namespace for functions + +async: true + +exposeServices: true +serviceType: NodePort +httpProbe: true # Setting to true will use HTTP for readiness and liveness probe on the OpenFaaS system Pods (incompatible with Istio < 1.1.5) +rbac: true +clusterRole: false # Set to true to have OpenFaaS administrate multiple namespaces +createCRDs: true + +# create pod security policies for OpenFaaS control plane +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: false +securityContext: true +basic_auth: true +generateBasicAuth: false + +# image pull policy for openfaas components, can change to `IfNotPresent` in offline env +openfaasImagePullPolicy: "Always" + +gatewayExternal: + annotations: {} + +gateway: + image: openfaas/gateway:0.20.2 + readTimeout : "65s" + writeTimeout : "65s" + upstreamTimeout : "60s" # Must be smaller than read/write_timeout + replicas: 1 + scaleFromZero: true + # change the port when creating multiple releases in the same baremetal cluster + nodePort: 31112 + maxIdleConns: 1024 + maxIdleConnsPerHost: 1024 + directFunctions: false + # Custom logs provider url. For example openfaas-loki would be + # "http://ofloki-openfaas-loki.openfaas:9191/" + logsProviderURL: "" + resources: + requests: + memory: "120Mi" + cpu: "50m" + +basicAuthPlugin: + image: openfaas/basic-auth-plugin:0.20.1 + replicas: 1 + resources: + requests: + memory: "50Mi" + cpu: "20m" + +oauth2Plugin: + enabled: false + provider: "" # Leave blank, or put "azure" + license: "example" + insecureTLS: false + scopes: "openid profile email" + jwksURL: https://example.eu.auth0.com/.well-known/jwks.json + tokenURL: https://example.eu.auth0.com/oauth/token + audience: https://example.eu.auth0.com/api/v2/ + authorizeURL: https://example.eu.auth0.com/authorize + welcomePageURL: https://gw.oauth.example.com + cookieDomain: ".oauth.example.com" + baseHost: "http://auth.oauth.example.com" + clientSecret: SECRET + clientID: ID + resources: + requests: + memory: "120Mi" + cpu: "50m" + replicas: 1 + image: openfaas/openfaas-oidc-plugin:0.3.7 + securityContext: true + +faasnetes: + image: ghcr.io/openfaas/faas-netes:0.12.12 + readTimeout : "60s" + writeTimeout : "60s" + imagePullPolicy : "Always" # Image pull policy for deployed functions + httpProbe: true # Setting to true will use HTTP for readiness and liveness probe on Pods (incompatible with Istio < 1.1.5) + setNonRootUser: false + readinessProbe: + initialDelaySeconds: 2 + timeoutSeconds: 1 # Tuned-in to run checks early and quickly to support fast cold-start from zero replicas + periodSeconds: 2 # Reduce to 1 for a faster cold-start, increase higher for lower-CPU usage + livenessProbe: + initialDelaySeconds: 2 + timeoutSeconds: 1 + periodSeconds: 2 # Reduce to 1 for a faster cold-start, increase higher for lower-CPU usage + resources: + requests: + memory: "120Mi" + cpu: "50m" + +# replaces faas-netes with openfaas-operator +operator: + image: ghcr.io/openfaas/faas-netes:0.12.12 + create: false + # set this to false when creating multiple releases in the same cluster + # must be true for the first one only + createCRD: true + resources: + requests: + memory: "120Mi" + cpu: "50m" + +queueWorker: + image: openfaas/queue-worker:0.11.2 + # Control HA of queue-worker + replicas: 1 + # Control the concurrent invocations + maxInflight: 1 + gatewayInvoke: true + queueGroup: "faas" + ackWait : "60s" + resources: + requests: + memory: "120Mi" + cpu: "50m" + +# monitoring and auto-scaling components +# both components +prometheus: + image: prom/prometheus:v2.11.0 + create: true + resources: + requests: + memory: "512Mi" + +alertmanager: + image: prom/alertmanager:v0.18.0 + create: true + resources: + requests: + memory: "25Mi" + limits: + memory: "50Mi" + +# async provider +nats: + channel: "faas-request" + external: + clusterName: "" + enabled: false + host: "" + port: "" + image: nats-streaming:0.17.0 + enableMonitoring: false + metrics: + enabled: false + image: synadia/prometheus-nats-exporter:0.6.2 + resources: + requests: + memory: "120Mi" + +# ingress configuration +ingress: + enabled: false + # Used to create Ingress record (should be used with exposeServices: false). + hosts: + - host: gateway.openfaas.local # Replace with gateway.example.com if public-facing + serviceName: gateway + servicePort: 8080 + path: / + annotations: + kubernetes.io/ingress.class: nginx + tls: + # Secrets must be manually created in the namespace. + +# ingressOperator (optional) – component to have specific FQDN and TLS for Functions +# https://github.com/openfaas-incubator/ingress-operator +ingressOperator: + image: openfaas/ingress-operator:0.6.6 + replicas: 1 + create: false + resources: + requests: + memory: "25Mi" + +# faas-idler configuration +faasIdler: + image: openfaas/faas-idler:0.4.0 + replicas: 1 + create: true + inactivityDuration: 30m # If a function is inactive for 15 minutes, it may be scaled to zero + reconcileInterval: 2m # The interval between each attempt to scale functions to zero + dryRun: true # Set to false to enable the idler to apply changes and scale to zero + resources: + requests: + memory: "64Mi" + +nodeSelector: + beta.kubernetes.io/arch: amd64 + +tolerations: [] + +affinity: {} + +kubernetesDNSDomain: cluster.local + +istio: + mtls: false diff --git a/openldap/.helmignore b/openldap/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/openldap/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/openldap/Chart.yaml b/openldap/Chart.yaml new file mode 100644 index 0000000..250fa0a --- /dev/null +++ b/openldap/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +name: openldap +home: https://www.openldap.org +version: 1.2.7 +appVersion: 2.4.48 +description: DEPRECATED - Community developed LDAP software +icon: http://www.openldap.org/images/headers/LDAPworm.gif +keywords: + - ldap + - openldap +sources: + - https://github.com/kubernetes/charts +deprecated: true +engine: gotpl diff --git a/openldap/README.md b/openldap/README.md new file mode 100644 index 0000000..5f4ee63 --- /dev/null +++ b/openldap/README.md @@ -0,0 +1,110 @@ +# ⚠️ Repo Archive Notice + +As of Nov 13, 2020, charts in this repo will no longer be updated. +For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/). + +# OpenLDAP Helm Chart + +## DEPRECATION NOTICE + +This chart is deprecated and no longer supported. + +## Prerequisites Details +* Kubernetes 1.8+ +* PV support on the underlying infrastructure + +## Chart Details +This chart will do the following: + +* Instantiate an instance of OpenLDAP server + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/openldap +``` + +## Configuration + +We use the docker images provided by https://github.com/osixia/docker-openldap. The docker image is highly configurable and well documented. Please consult to documentation for the docker image for more information. + +The following table lists the configurable parameters of the openldap chart and their default values. + +| Parameter | Description | Default | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `replicaCount` | Number of replicas | `1` | +| `strategy` | Deployment strategy | `{}` | +| `image.repository` | Container image repository | `osixia/openldap` | +| `image.tag` | Container image tag | `1.1.10` | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `extraLabels` | Labels to add to the Resources | `{}` | +| `podAnnotations` | Annotations to add to the pod | `{}` | +| `existingSecret` | Use an existing secret for admin and config user passwords | `""` | +| `service.annotations` | Annotations to add to the service | `{}` | +| `service.clusterIP` | IP address to assign to the service | `nil` | +| `service.externalIPs` | Service external IP addresses | `[]` | +| `service.ldapPort` | External service port for LDAP | `389` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | +| `service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | `[]` | +| `service.sslLdapPort` | External service port for SSL+LDAP | `636` | +| `service.type` | Service type | `ClusterIP` | +| `env` | List of key value pairs as env variables to be sent to the docker image. See https://github.com/osixia/docker-openldap for available ones | `[see values.yaml]` | +| `logLevel` | Set the container log level. Valid values: `none`, `error`, `warning`, `info`, `debug`, `trace` | `info` | +| `tls.enabled` | Set to enable TLS/LDAPS - should also set `tls.secret` | `false` | +| `tls.secret` | Secret containing TLS cert and key (eg, generated via cert-manager) | `""` | +| `tls.CA.enabled` | Set to enable custom CA crt file - should also set `tls.CA.secret` | `false` | +| `tls.CA.secret` | Secret containing CA certificate (ca.crt) | `""` | +| `adminPassword` | Password for admin user. Unset to auto-generate the password | None | +| `configPassword` | Password for config user. Unset to auto-generate the password | None | +| `customLdifFiles` | Custom ldif files to seed the LDAP server. List of filename -> data pairs | None | +| `persistence.enabled` | Whether to use PersistentVolumes or not | `false` | +| `persistence.storageClass` | Storage class for PersistentVolumes. | `` | +| `persistence.accessMode` | Access mode for PersistentVolumes | `ReadWriteOnce` | +| `persistence.size` | PersistentVolumeClaim storage size | `8Gi` | +| `persistence.existingClaim` | An Existing PVC name for openLDAPA volume | None | +| `resources` | Container resource requests and limits in yaml | `{}` | +| `initResources` | initContainer resource requests and limits in yaml | `{}` | +| `test.enabled` | Conditionally provision test resources | `false` | +| `test.image.repository` | Test container image requires bats framework | `dduportal/bats` | +| `test.image.tag` | Test container tag | `0.4.0` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/openldap +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + + +## Cleanup orphaned Persistent Volumes + +Deleting the Deployment will not delete associated Persistent Volumes if persistence is enabled. + +Do the following after deleting the chart release to clean up orphaned Persistent Volumes. + +```bash +$ kubectl delete pvc -l release=${RELEASE-NAME} +``` + +## Custom Secret + +`existingSecret` can be used to override the default secret.yaml provided + +## Testing + +Helm tests are included and they confirm connection to slapd. + +```bash +helm install . --set test.enabled=true +helm test +RUNNING: foolish-mouse-openldap-service-test-akmms +PASSED: foolish-mouse-openldap-service-test-akmms +``` + +It will confirm that we can do an ldapsearch with the default credentials diff --git a/openldap/templates/NOTES.txt b/openldap/templates/NOTES.txt new file mode 100644 index 0000000..09cf0ed --- /dev/null +++ b/openldap/templates/NOTES.txt @@ -0,0 +1,20 @@ +OpenLDAP has been installed. You can access the server from within the k8s cluster using: + + {{ template "openldap.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ldapPort }} + + +You can access the LDAP adminPassword and configPassword using: + + kubectl get secret --namespace {{ .Release.Namespace }} {{ template "openldap.secretName" . }} -o jsonpath="{.data.LDAP_ADMIN_PASSWORD}" | base64 --decode; echo + kubectl get secret --namespace {{ .Release.Namespace }} {{ template "openldap.secretName" . }} -o jsonpath="{.data.LDAP_CONFIG_PASSWORD}" | base64 --decode; echo + + +You can access the LDAP service, from within the cluster (or with kubectl port-forward) with a command like (replace password and domain): + ldapsearch -x -H ldap://{{ template "openldap.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ldapPort }} -b dc=example,dc=org -D "cn=admin,dc=example,dc=org" -w $LDAP_ADMIN_PASSWORD + + +Test server health using Helm test: + helm test {{ .Release.Name }} + + +You can also consider installing the helm chart for phpldapadmin to manage this instance of OpenLDAP, or install Apache Directory Studio, and connect using kubectl port-forward. diff --git a/openldap/templates/_helpers.tpl b/openldap/templates/_helpers.tpl new file mode 100644 index 0000000..75a118d --- /dev/null +++ b/openldap/templates/_helpers.tpl @@ -0,0 +1,40 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "openldap.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "openldap.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "openldap.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Generate chart secret name +*/}} +{{- define "openldap.secretName" -}} +{{ default (include "openldap.fullname" .) .Values.existingSecret }} +{{- end -}} diff --git a/openldap/templates/configmap-customldif.yaml b/openldap/templates/configmap-customldif.yaml new file mode 100644 index 0000000..f060d1d --- /dev/null +++ b/openldap/templates/configmap-customldif.yaml @@ -0,0 +1,23 @@ +# +# A ConfigMap spec for openldap slapd that map directly to files under +# /container/service/slapd/assets/config/bootstrap/ldif/custom +# +{{- if .Values.customLdifFiles }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "openldap.fullname" . }}-customldif + labels: + app: {{ template "openldap.name" . }} + chart: {{ template "openldap.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels | indent 4 }} +{{- end }} +data: +{{- range $key, $val := .Values.customLdifFiles }} + {{ $key }}: |- +{{ $val | indent 4}} +{{- end }} +{{- end }} diff --git a/openldap/templates/configmap-env.yaml b/openldap/templates/configmap-env.yaml new file mode 100644 index 0000000..d8fe9a4 --- /dev/null +++ b/openldap/templates/configmap-env.yaml @@ -0,0 +1,20 @@ +# +# A ConfigMap spec for openldap slapd that map directly to env variables in the Pod. +# List of environment variables supported is from the docker image: +# https://github.com/osixia/docker-openldap#beginner-guide +# Note that passwords are defined as secrets +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "openldap.fullname" . }}-env + labels: + app: {{ template "openldap.name" . }} + chart: {{ template "openldap.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels | indent 4 }} +{{- end }} +data: +{{ toYaml .Values.env | indent 2 }} diff --git a/openldap/templates/deployment.yaml b/openldap/templates/deployment.yaml new file mode 100644 index 0000000..3110ebd --- /dev/null +++ b/openldap/templates/deployment.yaml @@ -0,0 +1,177 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "openldap.fullname" . }} + labels: + app: {{ template "openldap.name" . }} + chart: {{ template "openldap.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicaCount }} +{{- if .Values.strategy }} + strategy: +{{ toYaml .Values.strategy | indent 4 }} +{{- end }} + selector: + matchLabels: + app: {{ template "openldap.name" . }} + release: {{ .Release.Name }} + template: + metadata: + annotations: + checksum/configmap-env: {{ include (print $.Template.BasePath "/configmap-env.yaml") . | sha256sum }} +{{- if .Values.customLdifFiles}} + checksum/configmap-customldif: {{ include (print $.Template.BasePath "/configmap-customldif.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations}} +{{ toYaml .Values.podAnnotations | indent 8}} +{{- end }} + labels: + app: {{ template "openldap.name" . }} + release: {{ .Release.Name }} + spec: + {{- if or .Values.customLdifFiles .Values.tls.enabled }} + initContainers: + {{- end }} + {{- if .Values.customLdifFiles }} + - name: {{ .Chart.Name }}-init-ldif + image: busybox + command: ['sh', '-c', 'cp /customldif/* /ldifworkingdir'] + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name: customldif + mountPath: /customldif + - name: ldifworkingdir + mountPath: /ldifworkingdir + resources: +{{ toYaml .Values.initResources | indent 10 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: {{ .Chart.Name }}-init-tls + image: busybox + command: ['sh', '-c', 'cp /tls/* /certs'] + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name: tls + mountPath: /tls + - name: certs + mountPath: /certs + resources: +{{ toYaml .Values.initResources | indent 10 }} + {{- if .Values.tls.CA.enabled }} + - name: {{ .Chart.Name }}-init-catls + image: busybox + command: ['sh', '-c', 'cp /catls/ca.crt /certs'] + volumeMounts: + - name: catls + mountPath: /catls + - name: certs + mountPath: /certs + resources: +{{ toYaml .Values.initResources | indent 10 }} + {{- end }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - -l + - {{ .Values.logLevel }} +{{- if .Values.customLdifFiles }} + - --copy-service +{{- end }} + ports: + - name: ldap-port + containerPort: 389 + - name: ssl-ldap-port + containerPort: 636 + envFrom: + - configMapRef: + name: {{ template "openldap.fullname" . }}-env + - secretRef: + name: {{ template "openldap.secretName" . }} + volumeMounts: + - name: data + mountPath: /var/lib/ldap + subPath: data + - name: data + mountPath: /etc/ldap/slapd.d + subPath: config-data + {{- if .Values.customLdifFiles }} + - name: ldifworkingdir + mountPath: /container/service/slapd/assets/config/bootstrap/ldif/custom + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /container/service/slapd/assets/certs + {{- end }} + env: + {{- if .Values.tls.enabled }} + - name: LDAP_TLS_CRT_FILENAME + value: tls.crt + - name: LDAP_TLS_KEY_FILENAME + value: tls.key + {{- if .Values.tls.CA.enabled }} + - name: LDAP_TLS_CA_CRT_FILENAME + value: ca.crt + {{- end }} + {{- end }} + livenessProbe: + tcpSocket: + port: ldap-port + initialDelaySeconds: 20 + periodSeconds: 10 + failureThreshold: 10 + readinessProbe: + tcpSocket: + port: ldap-port + initialDelaySeconds: 20 + periodSeconds: 10 + failureThreshold: 10 + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.customLdifFiles }} + - name: customldif + configMap: + name: {{ template "openldap.fullname" . }}-customldif + - name: ldifworkingdir + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: tls + secret: + secretName: {{ .Values.tls.secret }} + {{- if .Values.tls.CA.enabled }} + - name: catls + secret: + secretName: {{ .Values.tls.CA.secret }} + {{- end }} + {{- end }} + - name: certs + emptyDir: + medium: Memory + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "openldap.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end -}} diff --git a/openldap/templates/pvc.yaml b/openldap/templates/pvc.yaml new file mode 100644 index 0000000..96d6c86 --- /dev/null +++ b/openldap/templates/pvc.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "openldap.fullname" . }} + labels: + app: {{ template "openldap.name" . }} + chart: {{ template "openldap.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/openldap/templates/secret.yaml b/openldap/templates/secret.yaml new file mode 100644 index 0000000..9c7953a --- /dev/null +++ b/openldap/templates/secret.yaml @@ -0,0 +1,18 @@ +{{ if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "openldap.fullname" . }} + labels: + app: {{ template "openldap.name" . }} + chart: {{ template "openldap.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels | indent 4 }} +{{- end }} +type: Opaque +data: + LDAP_ADMIN_PASSWORD: {{ .Values.adminPassword | default (randAlphaNum 32) | b64enc | quote }} + LDAP_CONFIG_PASSWORD: {{ .Values.configPassword | default (randAlphaNum 32) | b64enc | quote }} +{{ end }} diff --git a/openldap/templates/service.yaml b/openldap/templates/service.yaml new file mode 100644 index 0000000..e1bb2d3 --- /dev/null +++ b/openldap/templates/service.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} + name: {{ template "openldap.fullname" . }} + labels: + app: {{ template "openldap.name" . }} + chart: {{ template "openldap.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels | indent 4 }} +{{- end }} +spec: + {{- with .Values.service.clusterIP }} + clusterIP: {{ . | quote }} + {{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP | quote }} +{{- end }} +{{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: ldap-port + protocol: TCP + port: {{ .Values.service.ldapPort }} + targetPort: ldap-port + - name: ssl-ldap-port + protocol: TCP + port: {{ .Values.service.sslLdapPort }} + targetPort: ssl-ldap-port + selector: + app: {{ template "openldap.name" . }} + release: {{ .Release.Name }} + type: {{ .Values.service.type }} diff --git a/openldap/templates/tests/openldap-test-runner.yaml b/openldap/templates/tests/openldap-test-runner.yaml new file mode 100644 index 0000000..cfcaf21 --- /dev/null +++ b/openldap/templates/tests/openldap-test-runner.yaml @@ -0,0 +1,50 @@ +{{- if .Values.test.enabled -}} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ template "openldap.fullname" . }}-test-{{ randAlphaNum 5 | lower }}" + labels: + app: {{ template "openldap.name" . }} + chart: {{ template "openldap.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels | indent 4 }} +{{- end }} + annotations: + "helm.sh/hook": test-success +spec: + initContainers: + - name: test-framework + image: {{ .Values.test.image.repository }}:{{ .Values.test.image.tag }} + command: + - "bash" + - "-c" + - | + set -ex + # copy bats to tools dir + cp -R /usr/local/libexec/ /tools/bats/ + volumeMounts: + - mountPath: /tools + name: tools + containers: + - name: {{ .Release.Name }}-test + image: {{ .Values.test.image.repository }}:{{ .Values.test.image.tag }} + envFrom: + - secretRef: + name: {{ template "openldap.secretName" . }} + command: ["/tools/bats/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + - mountPath: /tools + name: tools + volumes: + - name: tests + configMap: + name: {{ template "openldap.fullname" . }}-tests + - name: tools + emptyDir: {} + restartPolicy: Never +{{- end -}} diff --git a/openldap/templates/tests/openldap-tests.yaml b/openldap/templates/tests/openldap-tests.yaml new file mode 100644 index 0000000..1cdeb80 --- /dev/null +++ b/openldap/templates/tests/openldap-tests.yaml @@ -0,0 +1,22 @@ +{{- if .Values.test.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "openldap.fullname" . }}-tests + labels: + app: {{ template "openldap.name" . }} + chart: {{ template "openldap.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels | indent 4 }} +{{- end }} +data: + run.sh: |- + @test "Testing connecting to slapd server" { + # Ideally, this should be in the docker image, but there is not a generic image we can use + # with bats and ldap-utils installed. It is not worth for now to push an image for this. + apt-get update && apt-get install -y ldap-utils + ldapsearch -x -H ldap://{{ template "openldap.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ldapPort }} -b "dc=example,dc=org" -D "cn=admin,dc=example,dc=org" -w $LDAP_ADMIN_PASSWORD + } +{{- end -}} diff --git a/openldap/values.yaml b/openldap/values.yaml new file mode 100644 index 0000000..9601802 --- /dev/null +++ b/openldap/values.yaml @@ -0,0 +1,120 @@ +# Default values for openldap. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +# Define deployment strategy - IMPORTANT: use rollingUpdate: null when use Recreate strategy. +# It prevents from merging with existing map keys which are forbidden. +strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + # + # or + # + # type: Recreate + # rollingUpdate: null +image: + # From repository https://github.com/osixia/docker-openldap + repository: osixia/openldap + tag: 1.2.4 + pullPolicy: IfNotPresent + +# Spcifies an existing secret to be used for admin and config user passwords +existingSecret: "" + +# settings for enabling TLS +tls: + enabled: false + secret: "" # The name of a kubernetes.io/tls type secret to use for TLS + CA: + enabled: false + secret: "" # The name of a generic secret to use for custom CA certificate (ca.crt) +## Add additional labels to all resources +extraLabels: {} +## Add additional annotations to pods +podAnnotations: {} +service: + annotations: {} + + ldapPort: 389 + sslLdapPort: 636 # Only used if tls.enabled is true + ## List of IP addresses at which the service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + type: ClusterIP + +# Default configuration for openldap as environment variables. These get injected directly in the container. +# Use the env variables from https://github.com/osixia/docker-openldap#beginner-guide +env: + LDAP_ORGANISATION: "Example Inc." + LDAP_DOMAIN: "example.org" + LDAP_BACKEND: "hdb" + LDAP_TLS: "true" + LDAP_TLS_ENFORCE: "false" + LDAP_REMOVE_CONFIG_AFTER_SETUP: "true" + +# Default Passwords to use, stored as a secret. If unset, passwords are auto-generated. +# You can override these at install time with +# helm install openldap --set openldap.adminPassword=,openldap.configPassword= +# adminPassword: admin +# configPassword: config + +# Custom openldap configuration files used to override default settings +# customLdifFiles: + # 01-default-users.ldif: |- + # Predefine users here + +## Persist data to a persistent volume +persistence: + enabled: false + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + size: 8Gi + # existingClaim: "" + +resources: {} + # requests: + # cpu: "100m" + # memory: "256Mi" + # limits: + # cpu: "500m" + # memory: "512Mi" + +initResources: {} + # requests: + # cpu: "100m" + # memory: "128Mi" + # limits: + # cpu: "100m" + # memory: "128Mi" + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +## test container details +test: + enabled: false + image: + repository: dduportal/bats + tag: 0.4.0 + +# Set the container log level +# Valid log levels: none, error, warning, info (default), debug, trace +logLevel: info diff --git a/peertube/.helmignore b/peertube/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/peertube/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/peertube/Chart.yaml b/peertube/Chart.yaml new file mode 100644 index 0000000..2e5e219 --- /dev/null +++ b/peertube/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: peertube +description: A PeerTube Helm chart for Kubernetes +dependencies: +- condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 10.2.1 +- condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 12.3.2 +type: application +version: 0.1.0 +appVersion: 3.0.0 diff --git a/peertube/charts/postgresql-10.2.1.tgz b/peertube/charts/postgresql-10.2.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..8ba24509191e67d18a6c5959ebbb2188d50f8f7a GIT binary patch literal 52736 zcmV)kK%l=LiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMYccN;g7IJ$rHQ`FL#wUia9hhG`q&F)W@6h|kL%oSy4=apimW2F2)S=KZ5Op31Xr< zL(|}2)|+;Ac6MIBe5wB3+1bheyZiFR-oNa=c=P7v>(_7Iyx9Gho!u8N_jdmUcGiqX zX^Oc(>|b{7+*ZDGPb8r*qyh;-L*8ovcu3$eWwRa}B7ug8;}(j@N6a{(VGpkNS}2N= zpS!!=-JR}E%f}H8eH?lO^Ioe1A)4YIcyv0YVGDrah;kwn*ohDk?`>~;lG%87NJNOH zr0db?w)wjSaGf;J{oMWB@-g=qiG<2hTnZz~ps!hX@ZTp}fI^_dzhh5y7!Fi-PyoK^ z0!&ee#(0WD0er>UuJTF zLLV`ocRV_c0)j#hcUu!G`)a#2CSno~-R_v4ZVQI-_7rh}S@J2Jd@bV1&RH-nSrF$~ z@NF)T_lY}Iw#jtt{OqU#o!#A?&%3*Cx=}c8eZsRF%6v5>PE+ZRRwtoRs{{HQ!%;vy zMZ#7Gydahsx2A}M0+A3i1r1LT3FHXd%>UzH+ycV@Z_R+F<+0C*_kVoMCAcYKf`r`> zODbeOv~-?>IHxi5G>6ThxB0i@Q|}XIolM7VwVRupZu+51*?7B^GTlyhue;m&)-&^A zQbGO))cdsNHWtYLofj`&6y*QT>qh=RM7l@Oz1Mxw^2r!;(E~m~d#_&i@aTsdnrJ*ZKMX`psT5{~sbfdj{_UIz#~+s`V1c6pb-Nh@mMKnDN%LXV4EY z;&=-}Dlmu%5+Ejo!wm@npx2l&;>&zosJsOv^n%zY;TT*=vn5rB5gEq}`yeR9T-P6K zR1_!5h7V%Kq}GP65ed*RNJ>;7+tWGccucrpT60r5{Wu5)*kf484z)BCsqm)!--9n- zTh9QpAbK#J*}u=w6sIK|kUfc;QVgM-Wq+$QEW{I zjZl?k7#^jPSgJl+%~4XgOtIV+Xw0`|pB1Q{lIgZx^UK<89Rd z-+mS`+==2KAmO;9^Ase>0|}}xbRz0^5;c5V_C$BC6ESj)1n$_; z-nPAcruv(3uFROnxwboMBm|4G9JCpY;fBV64*~gv>Mh?s>O-tG^6R#WqA|MVDqJdDRhj%t{1Ka=1)H0%r! zpL7zqw&Nd5_QVr>*oN>?8pYILCUan0EvvzwLoXyUFlu(aDBAnfrD-q@AYZ}B;+{bK6I9FSJ zxs49TTIkZ75Hrcj@<${^4Hu>gOlcg7h>}nZ|9Kql^A8-e-iW_rG>&BRMc^#}97<{J zTWTATDZf<7pK=M5@`DjTL%koBzZb}ywM(6K${oX!S=poUvn{P4cUG4?_b+>r9v9@# zA_=60M~1^?Cu2s$3=U{0@Mj~csX)j4lMZ$it{J`xB`X5m_3B0nufa zyEQN=ph7AXgdq;-O>r7^7l8P6f-%!LPf3VW6MhX`c`n(5M#D`9N)l>hzgIn-o0Uf^$ zZRm{lt~7X!ki-`oOoN$(I*J2z^7cN$;b1~W0&XV6o4^Ed;GURn9u(OHn9gU&oxuITLXj2ZBTa?j-GKmG4>Fu$)PH1?p43IDPIs`z>Kq z7XB<$`z6#$76z$Gv;fRP(u3Wdon3|BLG>iRA1ss4gT?4NL!BV33CgaChhg9IL3fY_Z3wIKOlzeJt`vt-gPZjBkNr1K^^=4L>b`EXc`5$rx`#n zbQE5zA9Ba0ng6Hv?{x-fy`cx~A!2P!q_j@*7yS0S!X{Bqq<%Ld<5Lvb-H+x#gPcT) zXQ6#Wj31nu6BV;Lwx>@-_(MQ!%N(6I z7z#P^)aQVX9}L}JQ6O0*&#PAB)a#Se8KWm;3phtidW!qR6UlZHO>uCbt_pSREpk)LKB2@5?D6GrN9{AggfHGqqFim{o>%1nmrQC!om3erLq!w4)mAP%*)* zBPJ+RE0M1c+#Em>qdX~(46!6hdv@2S_d8$)#+sfy=Ta${A~sVS4}IV<)F+q9B4?O< zAR#DhY*od07s*YP>JWI0a&AjxR5+Mncw7$0tgYFU8MnJ-SNrYm)5qfh57(S@{+aEa`rx zT=X(Ieab{p1O!Cgw#8zCLt)#k6bxz;9cp8&XnZ*tqOKWid+yo;pDImNZ>sX87Eb%L z_}nR9Lhl~jcGRt#rYF+lFSqersQd}#BFR>TR8FqIbe0;C&A3ugcmKNA-F^K-cc;6v zz4sEP{ws%p5d7(VSW6V(VEJbb>)|0YX)xD)}I*=AfB%RU6`Q zmkXreCp453l#XlxGfCzC00~fv6AUR8bw=HS$d9-YQ8*NArmiG98mX?Ik#;SM>_EfO zm$TE^rOdt5z`K0XgIjZJ3(x*_dq~1eD&W_OTL zhjNNfvaM5mZTB5gD1OFXEQ~2Zj^Rv(I(tigWp^|Cu}=ip6>If{Pl6$#RLEND@18y} zIH6-JA*F0k_a}C{AJFm7CNFaYr|Qr!#A=RkyX5Ny%c*>jmMPTt33os^l@2o>GWFg^ z*vJlbJs|0#RkLWUskC7hYPl{+0nM@4Ozy*VnG-thLA&jA1PK(-aTZds(t$44gEk$F zoazC-#`-8lQ!(j^{&`BmQ_MLUW8MSI7&VtD7xv0k)|AZ$B*YLgJR+axb|awU6B6RS zDw*wxRQ3L(A$E#hm@HLLF|X2BWkXWwfq)DdVnpqXoQ;<*)z^hiY?&Ukqp>2phINWQ zlW9B!L4{o0ozctyJ4K&eM#xlkxyWuNY)v(zvY_ngOA7CVOo>ZL>VrO6 z-Ru}0;G4N29Z!Zxqwo_6^yPlbxhMud(KSQh*rTD(4V(B%N7dp--4e?AG-k-d$_k;@ zJydcoFV_gk?R-0;`aj#xEL1|n4jpym2fY+KAKuCK`j`>w4>Wc?c%d)MP7e3`1wE-1 z_$Vqrb7w&hoq7A;{ULR^!!!GXgns5na$4V0z!CE%mq-Yf zG^cV#iu^Xun_wyR)ejMHWk;4%tnHOap^N5&Sz-E=%b|8_h*^|KDfxHL9#$Xsl|Jv; zUp+X}0!>rl)yo%qt*l{|I&`N#ma4zG;jxqcgd+bf3Q*`_rY&=9ITsIJ(?%)b6jPaZ zLM$ZLkZ^4Cwx3Ce9f^-bI5keP81@YIeX}D&;S7*3S7ycQ&bt+J2|YzKr4gB{oh}^7 zKB~E=4k)ZCO*c8Bcb>k;F-r}d@)U82Am@;hZxRa{%E_}vqFBgkqum)A1XAh*4qC_k zL@_}=q{!-55)1lYj4_=z|r2cfTBM$xrZa+DJ(t?~_)eEG6|~ zTf!gvsimBLFX)$$Urx7!0FkM&N^^+c|9Ftt+*u_$fjGn?%CKc(brLYdFr;zlTlT_c zO(q<0x#ZpgPPf#WZEj_@5<@<N1cs+lv(8yWpw>xs@sMoPm>SJTH*nxA5 z{9hRn_>_>k`#2s6EwsD zPx6%QK@X-gxpXU?)l{;2X&m`T;DM0)lJP9}lvZzyMsXm;rFuRgxLQ`Z`=xldR&T=~ zR~$wIWuV3lIF7~kGmTS+YoyFBJBw&9(SXmXvZno@K3fxLrwe_f5o8{**dy~^flrg- z)CV*gN#Q?}8wifYvZ|f7wj<%VgRk|m=r-b7&~4`lbu$zs-&I+#y0a3EhZs-!-=%vb zE9JUlt+3BBc$@v-Vz72{sAH}aP#}c^e8RIt&e+dK-Em_1{Rz){(9V(q@8J&mvm;lR zwrjtw%a*mG@tWI^LEZk8QdGCITg=*XTeW@u?RPWcmU?;l6{Wn+K@Zk6S)FQ+d}gCw z9P>pitqsu)D&4{?-O79_aQ2qDiqu7ltdgwERtZ(hF%i#|eR;CIHFdPy&y5vdE7_b^ z4{A2(B}Y=u<@Z5mA=1k|4a_<9dJ(D^N2$;YHD)4dI6*PZO4v-3%BtT=CX>?ft*C0d&?eK~eFCh3xIEpdF>Np7rCbwEG8fnm&N zL;5+*H;)2(@Y{2rKHq}pN!{naC;O<9R-HMRp2w7o+(+jL5zheBUzEj|23H$e8JsK# z;KausV#!@gM16;h8Ji}pRGIPxYR+iP%!8INd57W@#YJ#^HU^_r&XqXvBlY4lXRkmL^IvS*DYN*7b6 zePJ-OHVZlRD`WdyShUmH|F~LMu8;rddRW@sMicv|)WkCU{;hQ}SNxPIWQFe)T3M#t z!>DE{9UqT&c2}I%p`hKTe)bQopsB$#t2AFmO)g$DjNw0G%u^2)`ogXUd#_%f z5|igevCP<+=1ho_hAj~NMvhEL?&NZz!1U-eiq%VYgWtB+WfNC)kU_MFW*$k&^6uTW zpxQ1~-FHL*onb#EhJKwoozgHbw_4`%*zm29NqSuEdrY7sICG1ofV=Qs%I@QE%rNI5 zm|T%OARM%5gLxSl(wpLqnt6)@_PE}_o>CtNsyk5>%)0vRySV~VRkj>mAhDP6C}Y?& zi-s|-(<)5*63}$joWQ4Yf@Pi?se3@lor!3rp}|73=aySmn}d6D1FXmJVgpuDTe)GP)YrqX zrLHO%MxG|Y;ahVZ`ZgBYI+jc^-+~)#EZbEXW1Ol7j3_&IYK{Ihku@R-vdU4EzRdO1 zvymHpqh;Lz=r+}2p3qEraFBn?Mmf#^a~)O9p%L(jy5L3uR|wk?3Y4h{_PfqXbd*_z zs>gD{-3+7ZJsVS|^M0o6(Pz0eN?fSu3LW^O)O=ZQ3#c;dluFuhxRq++7ToV&(*?H@ zKYI<7ic9@+O5bg3O)ntVIFysks=P$~iWnW@T6aHnAGh0vI!PlP-N^Pcr?SdX*SNNP zUuKnEcdHfdI=-wO6S-e<+Rdywv(oovc5bS6fN3> zVRyS4$wFp*DSe}&$~wx4bp@v_`<`>%3W+A_budT&}G z;Xqa8=}o_C(7rvCHH~$>5*LZZyaI`tAS2=-p)^_dr+ovfk8%~rgY>g{uwax!S?eeB zv$Fgzc$Tm83!ar%+plU#nIjleEWc#EOJDVTs!*Tnr*yhc2m48vlK#jX_t_`PbnhHr+ZU84gbU@KAxME_<=7MpMm#QsA7w(D zFPprI_fdJZ(=o>^Gq=ocGFw{ZkC@t(;aEvO?lB~*J(ukEytIfhQ~m4i4{vmVqEVm0 zj}97^!_V5*^Ly-0qu=Gjs(8h2Bm@wV2^u)AZ{Tq7L%Ne|9r zy_^rRHGN4{!4(ZybhTO*ER`>0J<2fU+bN|xcCCD-#h4vx?ZzSVlp@M>Zh2}uw0@pA z3cFBBiPY5o9&=$k%z?#$)KDVbJLIEC==mn7e*UB^wUcAIQ}hV~m)xx%7;?c7F^ZM~ zW%XhEY%-mbmWxvVUb1}V#192n^5W0&7(4zjPS7|lq;v_^gJ^u|JVv|>(D;ji|6}{h zm((#{yVQ|g8@llIYsbvS_E*@37q54A;6Gv4ZE7U-lijJC6(kyzv(e`G8Z&i(CUse& zeV-)X3zIadm+>U;zO9rzS+^JYT?f+M3x_iaLDJ$o&Cp-Jwq@xFjoAj@?B;q9Pn9)F zb{C!^v^-$riYiD?SXT78XE~^o$WgG~_jbBJWSxJ$*xN0*vCFy8)F&N{Gdstb-Juek zt~tAT{9)$Dd{KA9XpDJ?gJh&G?MOY7Aj5Gd#5Z=Xb*>5OAnW8ustfvjNHutxu8tDz zXng4>t}JvGz#lP|+|eE!og5upfZz3zI{1S9fg<}B9PAH{;Mbpz&LjZyNUS?SJOOr@ zxOLe`s&B#bgZF1=M+XXc508G@|8R0~ zd3tnleta;nWU|gmq^8zx<)EgwAf^8D)*xbvzUW|%kHHdsB=3uE4LGg1G1r>n%I68D z--W$>aXUR{LtVb5F5gsFZL2Fb)&Rwg{$s&S&R8{F)hVT>{0vRiNvu9lb`D!rfRCzo zL2|dG>L03VN{RT^-szik->De?LA_^0-M@1huBnX$@n2rPez}v2|L|(}MHBz)LDH8N zK>IsA=ItJ|&8?Um{f^-T%EsIJ&u8r|SzOE_EX(SMTRJada=$GI=ItJQQSz!?^pux; zbD(hL5nVyi`9uWj!@lx~7<0_hk0Z*aZAavO-O4v*T|z9?R8nbiL+K@GSqlzHbnzlt zRK8T{GC9>)<}fY32$P^Pg&o{8`3#3XW~zyR_zqb$N-FP^gzX;emVH5=+e*_Y`Su|s zLc)DV+R4YSE%#qbzvRTN4)u~=C;nQ`lj=z!N6nQ*=iCC9j_$&tN@`aOR}SfUu+%qw zLtMz8&j9nrb@A9{iFv9JP(E-m7sY}ZM%bzPthixs=j9I-1uRRPIxGPxBP6B0DE@}D zIOu;^6Crl03Rpr+b988x{arO_l7pc-)258OovD-_G96-pNWd41E2HaHO@Se$0Y+hk z$TAl}PQ#VrDy74gI{{OZyDAJwDDXHh(Gm=lwMhb^maLo*-^XEb_9`W*ZMI=KR*sOE z=;o51Zd=MNG1`Ua#CKU}RvEb}n&d}R#bikB&ay!AVtFm`Mi#9u_I&w)36_eJ%Dbo< zET8RBOaA}sz3;%>NKs}Z$hXhlUmOj(;u`u|=N|LsB2_urf7ZZK3Y zz?G%Z&4dIvJu(2oVTer(H*KQ!{r9bw>*7$qXVwNk*oSL^w1HOUPQ|${mLJV+ijHgI zcuqPz(+A7iQs{<=2?_Ppm@#TpCXG=0zNY)-OLCpqO$=h>2Y&5!t*=n+CsIG^-d3-= z9n-MZmoFV4Bj|qAdt^t#TVD_G_3Khtse`rUQ8N}{SgTb%w z&kuk6ZsSut#GVKgNaGH?(@CwbsybiEH9WaTF}%~EAa|;A;r+4wrRyVs9HWH9sbAt6 z{c9U;l?SNTFQL=%u}6KpnIT~%0jh9v=&3H|s>$=12s4)+k_d*wl3VHI%7DfB`D)G7 zb+2vLWVzaN!B{wUI&2D^ju12G5cvyB;Vgc9eul3xdtQ%59ermS?w zcj?#^hN}%`TA|Q~4J!dMHicFOY{G_|d*x*^wxOFwTy#46etU^KbWN)t)vX1jtP7ov z83^cfAmV=n(CH{2wzcoIJ0BEP4fe&;8&$@KoGdK5!8e>bw}3)C&}D4_w=+?Vr9%q5 zdl?x0WN1J_59chX3Ag9|1#@O?w!GRf+E+u!+RCQ0PZ`B#d^8*1);XzFIbpTEZ7jGo z%eTIzU2m&}+01*%a+27@wXIjUc0x&OVxl~wef^pdd}r?uk1mhZ-U*wQjrFkAwj-9yCtE3 zrf3ENeZqiXgl}L~9{d)u}7V=+y8HOT&N z55f|=dvCfs-JS04BO2!V%1mx2S3;y~6c8T?tg(?bJB8wAEN>7CNjTfUlD>ynCIeN-~VR4I4i31hn<$r0tA)6}N55Ps-?} z4c~26cTIT*=iN9OGvwp6$u6EoVwN;~o$E{+HUc8$MpbW1+W*$)GxwcJ&;KqbIEXOg zT@eLqXk-5Q-`>ktJA1kFzt=mvZ<_PJhe%((Y<~~eWZF}&4o4)wa*6qID#_waa1XxU z*0XT?`&Jrz&3NY2Ang+0;~ll^b3D$b~(o)X8N#5U$zvS zgQ{VlJA&r0MmJDc2aJbNQ^-ht_3NBckzeiFp?I_c`YV zcV)R+BymS-N%A;VCC*E;o2s;sFnI)5lWUZT1xpDNHM?z2$W+jpT3vQ4EbOj5e~U7u zm*}c}ub70bp4aW<8Q8KD!a0*#YC8np@^Lmv0soSw}ewX3VY=qu&-Yk#eyO=pE>eb** zS+irhv+jvqK{T^WEJ?AvJg-NKlGVBtReNT*%z&CY?oc{}wbt${(KL%(?oGC}0$Q*~ zaUVq==o%W)4%}M4D_sp!1-EM~;qYhdK{UQ3T!NjM>)QnetZ)l|#8lY3+_pcIAAYQ; z=rpP?brY*TJFX%~Lbqk|vc9X-1v(F8W>r%n;0HLl`sO_Ngl7xN{(@(^_?P4wxDB0& zd!t?J8u?3l7rhR$-ibq zC<$q{EvC_SH<3`-hu`7$)>J7eo-MjS03Oh3L;|dw$J(2IAfcdAvnAm;kOD_V)7E~= zJy~#sNHAaEC!JKQvSQrywcg9ZKNCcD9Ws{r^<6&Mrz%?m54`c zhA0AF4bg@kO*7j?YRZCtrzC{-|AuX+XvZ<;Ps-*Ofd36Y?w@=(8oljFnvi{tla zz5fl>1)i;Pt46%Wa%v1AuninQlNg`Mwg!$zAV;Yivj7M;aqA{nJ<9-}hR=m^Tc)3P zDZh+13cq&$;iG5?wyWj{N1t&elETf3Bo(c7L%)HTMq-vN6|-P-v--34I=Q*2R;3id zUef(7aLOrF?{72)6VIDt0YfS#aAk{MDLk*7I#*!4(sC&z1(_PtL-PtJvf+*)4#^)e zh8yjQCw01Wa=72uRZgk$13aP<SmzPU>)*DcC}K2XL>XU1v zbUDbENr7ZWh2!ZpW^yG$3`6@rcDuVfot-vx4^V`LBp`xd-u)#WVpF^OjtGlQMw~%g zZDG~T<@WXN5DT%9NGpr1V$AQb>#}+IPCMS6@2}qn`^yaSes-Aa<1}Xv zxE#9YCpA$$KPf%hQV68(q3_z25=do}>|0xK-Yu2d|0V~#-5HwR-2!-l|F6BBH~IJf zuV3u#H1@v_k(?KlkK%ki_foU`lwB>;slwHR>)qBT68gRLaJ4nX0{KXwUJHzC(E3j^ zGj|rQ3+gW=j}i3OE*B{Dk$%2%3n2-8913`;kBJJmYq^*{x>PoTSxHkO3a+jBg>rEO z8<0G8h;e&P1J1(+*eplToP}wk+0)xv451`qy|UeXb9n%_U{KhDg+>Qi?)j>Nxn&^t z%!#vpy7t{CJlnGFvK>2ZMXXxj)l(%225y0e=o9v|S;dd88}9rOV)is~@idu3H@DnU zG%Ma%X>z3iHL2#+NSb+B=$ZZNTg$B`ttXYP|GE3*&K>oE1?&Irix<1u^?z^o#p`DM ze~{#Co2wK6Ir?3H`}^v}{m3y~$SvTdHVvEix_p;gL;Mr3{AZ{W5H8lT0bC&ecXnUD%J2VPzIxfn|A$EFbaW)P z^#L#EQK!=>d&N{D!m<)9`;@wwn~yY=&i|}c)m_!Uh5FyCS8odX->cWn{C|j4L;uqe zqdGVgY^GeZcC`JQQXRhV$rk@3kpJ(F|NQExhZ!TeoK{$FOnA@7I<6h!KbPY;|s zz&jOu|83~7HQfU&(*NGP%Afzg zdGosQ|M4KHHDzr7dv{om`|M*n+|l$##;xnDKA2l_qiUDUBO*5%zNc`o8>y*Xdr z+=EQ?gM$o}Ky-pjoHxA&%r|Mnm$Cn`(5XG^Bm zVn5>yJF_xWsdUvwvP#2Sef{{SQx2`fkElOzWEg-Ws(O{n+9I*`TDNEX`r1g=`$0f& zfP_ODhrSbaf*XnKi03q;ni-tOSGvNN^woR~-|T6<5u?*=vpJgBRG>|omh{x_1PV?e z%$qx(qkwosBz||f#W3xLOo_MNv;##=zc%RP3}SaL+e}XRSe0z6u-5L&EGp^Z$uRO; zrT8sYOI-$y7@cA_pXWa%=l?sqjs3^Nq})Vv z9#`eIe%Uv<^IiybQ+=VYVqU9)MrUXi{N)#XMy^SK$8Pj@{g5Qt89n#xY1?y~qutvi zKa;q%LO-`T2oRYjU)4*U+cq5w~EC{Uo{|M5)G)0hiYT%)b7BNA!ffhh@lu-i)4 zw9h?w{p!_=S1kY&PjFq}#s!{QQat9RrCk0e>!~Eooi~7sF1e%PsBlU{A}Ax_xa(1dsgi};$pK}^`96_r0}8bEoYr=(7U!KMKD|Y)E@`CrG3bvS`QKS^kEFTd zyL8*3;OO#E02`LBoA8~3><>@q+IDTJRQ}7|*?OLSskBJ`@9w?H$A4|!e?3sj$qvUk zLaN1|(*S2=yDDXU6lfo?$&2wu!>=z4wOA@5o!x~=d50om6kzzx@dVKRU8|~?)0lZ! zea&yq2)4zpF&nBPF~_-@>L@AIb(a{ie>Q>n50)zB|B!^fRNfxo{(r9!|NHgpmyP|; zgQU9h|E*?+W*NATWq=0w9FJsoodtIVve{K)&d{`YtQ;OI;&vco&9U!!G!8|}UQ4L9 zlMqJ>eN4h;-d(x@*mZsM=k{G?;R&0CiP>jL`i}iGiPWFJfSDI_TXH z&7mY>UA|SP%wi16qwdVZCVwEdQRyQxlPNXbOWVR`9%or3i)?k}#kf5>$3YeHUoGFr zEZ_Hh{=f73RbKwTdGV&v{~si|MxhnjTjCop^MtEJ#hbT4WE4zyp}4!;UwWx|N(;q( ztHSf@A<#C_a=Y-`{w0~=Kk+N(hB!~hC5CTcdT~*H^=}fR~;AcILoGU1)B4( z|CXw|=6k}rJt)5?tn-kd5<&$oPx59lv6sQ2uO5FUmxWQ;JAnFhC!xUX+BHA5Idyoa zSi4|~Sq}|u*?2*wn8qTloXM_(Pz9U}RwYKMiCJjzs-XN3C;QZ5oo33-@zDBG`T8%A zz@s?eSacXhzS)YcdLT4^{eQLddiO=q|JPo#{y#*jw~LphzLT7AZaCrG-Z(<>A(~jlpaX6Yg_%xKD5*I{SK=;ilS~d z5P(Og5e?O&0%ZbUFo($yFi2MWQf~u!AB`)%hWv(aA?J4=m0?=TGRi6hW&u1~?tSV6 zBR{~LEAvi`OJ;d>r;3a4{jPl_&nIkmOj8ceXD)K;?v7;&GEYC2X8j-B2Oo>szvNNK ze3po)e=>hkl-Y94g;(-AI5&}MwWBmL!*(UA9rq^Kj|0rkoEtHz!#UYnNLAHn7uB`Q z@VK$K5sYhF?%Z}nhv6WS;iv|Vh)o6wsOu80qv_^PM=eFwMT#X?1Tg*J3X zdspswy2z{e-hJ&0umt_AR+nwkf#}&;HmK~u*RMTSfzFjPFaLCG+m#T_kH1XQPd;HL z?(8C%+j3sPJj0vvQ0U44r&a?V4M)|us>uy+c6OcuvLh1ub-+MpD(7q@Ca?>gDf(>x zc3ey>jebYg>L}~Qw)T2Q#@x)L z&iTUmTYGL6Am7h4tvdzhe#kAuORt|*v#4_pZ{ECtZ69B6^T~8cluFj*l7=Esk)(B$ zvhORSP3!O9aAByyTgxLqZ&|oEmRkYMYC-#23))hhDId|i4&?hE(#5-!g^fTvX{B`M zQmR-LhPo7y`Z&~WH&0;=`BW|p1@|{gs%BQJ&Z*KeC979&1x19%!?{(>_k6_)R?Y7f z3(~QwS4ENM^r}=S*27?qK(uW$bklJ|rOv@4k)EZ199z$t@a74J`JG+rc&2RYo2|I& zb*pK`Y@^`I26@e14Xig)CTZ%1J9X1M0o*FEVk z>~zz^eutVBx3D$#xfL~+l#PYU#EM0?boMS@SgPR7KL@LYIS#M$TXa48Z;vm|_D_#5 z50Bn{c$b^@YT}q-W+7rT;{^`d%C$>7H+nuE#^drMinP7{`@zM#^P|CkpIm-?fAZn< z=<@LRyu96Zjg_IjsFD8r^NY)~52tUB=CrcFUtfEkw63*gC5XK{+`rhbBy$2Y!Qv1J zbVyhgRVx;hJRL62kNPLa2m2St@6Rqz-ya@Tkb(#38qX(b2GoZ^6;*3IYlyjIDlJ{+ zbk8Am%yHj5Hu#AI3t23KYJYz)`1Sqy;pI=qCsp0ATHDNvu9$eBMK)N=)Oq~kcUPN94-^D2eh z-rUTduqS2v<}W*~%1?=6j*mVQE=V{|-@|RLd9tQ|taqjs9+Ry&O$+!v;yGIAbKPCY z;?2p@!@B0tyyGA~fw8-TO#SbZ%cHaXwJ&)-HE%4 zTbmU037Ha6hvAYPYSNv{ljGCl>YZc#*4Naz!=c>eE#I{dgQIh~P~D|#|Y$6N^vR|z4srM;kH6^YaJ0yo&y?(b^4JTk%RXi z&X(y)vB`Cby76R)&heC9WQtJUT*y~)KU zad8x0)JfaPp_E*a^p}gZQ$jyE4k6~e&*-q?vNuyCi!J_~zH>>DE3t)yN_$8!Hj#zQ zc?M@b5@?9jXxUu{MUYV@;L;2yyii!>r+R&|FW8@wS z%-x>?=Yh^DBw)w{X-m}S=mE{?2Iibod+0IzIn)kippO+=p^*QvlvRYU>*#Q zJr8q!F=3cbXyDHQlrNeGH9{na8Lk5=U$nyT$i387@pMmIDD#BkonFb5wN>>(YlZuH zIT6VM5uYvda5BCF9b7K>ORr5`Bo8zSxj195mz_;H>fC8MLSDcVlX=@wh-{8$tL3HvyiOpAJ8x*4t6q=IQlhs$=2)WaA0!=uFQOq zh9isc5(8S%bJvZm?t5ve@#SQQs;QHCmyv!S|2#zHpS;XNjwML9D#cd3^jX02_h^%v z1}p139Hs4p087L9phELkTR&mk&gr(2!;e68~-)hFTA6e zW(#R|DF&cDBgKC}`y6X${q1=b6ACgK1SA}PP(^wW#zD}6(#6$1iYrsu>kgi7`8o|f zdtsdoS|^=(zg18i;V*e7PHsa5H(@&Ee4(X)RCrhT?h~GE!FOuWhu)9H^Kw-JzJ67M zV#>{vYlRHg8x#OioA6WW=hXp?PHD%|Dp};T@6ED1hZ2rY--KlL+<(NF=eM+6P-Y*u zlW{sIC_vo_Yv!fVIn7fc7|`LlmSR^>4+$RdYClVM(NYz82p+rLENR zsPx70{Qu?g#7tY{|MULif4$k=d-*aS|3&`O`2T!>^q}51EAL4dnIbl;3<^^^W!K<* z)3jUWg3_WCtQL~@$-CgR{eM*6H+LVq*7PubKU21S44R09d-4fdfd8o6fx4_Q$H%5A_VCU?Gpsw`59o3@uFVyw z#ju_i&^e_`9e~a$e>dNsF5vrg`&rt~-F;Z#l*^Mk!7%ZPxEz-AoXnMAS=CBSCU}JIXUiAJ zeLfH<^xw|v`iraOw6n4ilkqd<3tc-bC(9IL)`cwUdGL-JuY?p=eiuhXQ4M?G^8;G8!JXWJEyjf-v9)WoFV%3xr|bFpUPHXp{*L5e{V)%lL@ z$M00Vb8u$O7d1MuZQGgHcw#3L+qRR5ZJU!kvCWBXTNB$hzr4SDzp8uxIn~vt>r_{F zpVe#c-Fx-?qI0KV^{usy`DgLjPn;#bFTCZUxcMf>0A!gOvZwCfY@bP2vwpW7+T9+p zsjx2G-rA!aQ2|?0t!gDY5<9Mzbr=^JsOjvT;tN27GQZQL{*TXHyHQ*Eg>B!NY{fZ+lRE%$Z-t~Nftisj&B#4z(FA)*|DB`7)J zZs@f!;VNc1GG@A^w#L3D><>c*GkJs6c%Q8H-wUbuZ%0r9`j`4T3z$UV9!WCfqP93j zYcs#_(yUAib=a3|kNRJXpvqxACtv|u*2+~d8Z+SZ7ND4HAaI%f5O5AQ|0^CguKj;% z<$3};yexyU=wI z{w21-#dv&4iICD;9^Y351@OJNgKvG(>>(kVkr}m)qtTE{$+DfC4jP3q>~8_uc(p#yT`A6W&2h@dD?ORNU{~ zjY*rmy93UD<3w_007pXEvYNK!^qlmrro0O@23{3AHf2DaIu9KUhGf_q)?3~bTY1w52=+`%D?3LMakEqK38Oz()nJL5H&Yl8}2`zC2 zwjCAB+V@JH@zqxPd#I~vZGo6U3^Xy#-$G@69oMPlyx&9;X!+#3TRno`#Sx4j8Z|?O z_&nmvqTt;TzAs&-?{!=fF`G5SCPQERwD{xPCOb}RC-?R=xRGuAQ-MYmwi(oK<6}pz zNl^A^2$sP&GR2yp4(nA5BlwmE{~&}Yl@b+;86?OPF@vAKj<%R)DX@~Q3_cyB zc%BSqO-p#!LlMBb^*^IVZI(}_Zd%WU!gX+%4c0>5PyuJYp&o(Vh~R6osX89tQo7^_AkV8o63YUEGVsAb!;kC-xpKPV0h})1F^Zq%BL)TN&cX^hva?eJ#Fa7=R|^ ztQ)*RB?xy&A}&Z~l64_iN$8QMh*xa|WHk5@G-g=h$S{zdBv#rf=3Dc(u4$$tace(F zvOqARP_A4~rUAl842Sc+E)pT)7p#F;HNVg7Qaghe6($@@0jR*tuVWDWvwdRAiZgd`a0{(VlU` zn7(s&&?i=%Ll+GXZQ!^PteYlOu3Z|?L$Q-XLiWMXy_GnyM*d(w4=@j68_CDUfG*n# zQPhx!6;A~muA9rxI@M%>8yVBzJDP(4@g9Cpk=MXGzSV0Ub}N?ad{OjQkv+fdK70-S z=@fo{%I4&qeEmW$b8ypkX}3wfxw2`IFxNc)fF=|JIV7CY`O>N>+Yr6yO-W?^&PvPH zF~(|uUbYttNtg^$&+urWLVvMW^cR+AAbQY-u79|-JpfEwyXOARJaUp~2PX)kg}!4N zMW5J9t}J;-kWAeWt~g2aW;I_J9%FXDP!rZB+mgH$YIf9rtq`^R*=BudQv{@aLugVD zGFY|Ihirm3Gwq>81dpUPUF>GmudnYN#0l|T1mEiMGdXM80JU|tNX+ia7;?I%J)v~e zSXmMFQ0N)@Xj?m(=;N?P8VT&81RXrFpfyqPk+64@%%V%SbmT7jEvHrG9H@qD3Dr*<%~2bZpZ4m87x7`^{Q?`xe1ld8114A8yn41DLz^I9=3)lkpN0$4~H4 z!`s`N94#_-?fFk5_v@l#%_n94ORrFqjLiP{Yk3gLHl*@3BhZ}%h`DkuvTpIO*Kxf@QxLxf?|QUY@$N1 z)ZUT8Y-)Of0n;r#$}8@EzR!;?wPjR%ups}h?vIdWwlfPk2nuUa{tR+u=oBwuwm3+_ zn*86z*9ewp4?np4ALFT9L)U-WY|OdxX*Q?CntW*Ygms1)@mTd>-g@XmoDbk&EzWg<=T$_L7=p9xRG`DKBpcX2A z`ocA__u_SS?6zzY3UAdR9{}&S7uYk%G$MZccaS94?vI^nu%UKVmG`F@%bhxyBsBfs zq9A)*et5|YAOnA~Kn`2+kn|_c&<+<5XapR-#WQ*1$}IzzN)|DqNVEuYI^4zPc4)mk zZdilcu=y9*a2`3zmQv_Hg887I*bvN!AvJ3ktUTWt=*EL1f#ORV+kgLd-%mI2hhsmq z2iw{IUVMJUQG?vhf}YT>iZ%WX@-6Yy>1XfiBBAHv#v?*M1D7%X1&va$EP9#6+%R6p z?h<~$L7F})M6wf3!k}>;k#5+p82NP`PXpS-V9}SxqKqbiBJ|nlUG9yqQxxsBNcRU% zhUq!PMusx#F!cTFaw9 z0?1Av9n*@gR=IUshT56H&f68E#+x|E5s zC%_L{3g_eC$H3!!a~v3bV@RBgZP9wS~>)?qXcb11Nxp@Q?C>(j)V{X#S!?5?&K6aL3zyUG- z7;b89o1rPDDM^tyuTui(zz!so5Y}s`46{2mrt~ChGao>;pJBp8?pajXRW!(dvlMcc zsb}4=ESM}zD6~7;z#*NFqJ%@dp3fE$LaXXuqi$)E-460u8fW78pZmfjeA!7Px+VLK z->EX=srGYZzuX8W{w@8~!TBpUCbPs%h{mvTUY=>VRkrI;>LSaP+T@>MRUsct=@aAv zr%nPxJM8J`6XtX1`Ozk=?UFjQ(2d-KS{LL?YwCZNAHTYQH^=)7{46!0ay{O5NPp1mVWm&C&NZyDPrB57 z)i}aDn1gyzMy148A_+o8Mo8joc9B0fw#$4=j^I0oe=7y!PqVB9iFu!?9WRi78URe) zMRz>6hTPp3DEW>R#l7{BS?y2n$)1_y3_vuiNxE|{ybtlO*5e^a;;exv{q+rJUN}D> za%>B!*z)ZTM949!eg_h7EmzNwLqtFMVZg{N?{HkF#jDnq9L^yf|u>ztD5UwZ>-++24}!DVNN14n=Qfsg8~vq=k3) zcAR9{(tMe5r~;?UvX2u@Et(Zo)UNe>vLm zUwt=u_4D!g3I76ZsU?WX_RGxU$Ew=*v!759q0m@R1#t!j!noei=l(dr75=X3?`~A2 zw`gA#;@9|Iec8MYab`I>6J0OYc01;Kq5|BDY9N4=bdW;)@>0%u?3b}pKu4(cK&DL0 zaQafcBa%1zB=6~Hlf{(Je`*F1XhD?xqmAN^WxhE{)^$V11cO>9tl5clC8gC;l={*7u~)h!b~jD z5T74^zg}a!*p&RDC5f5dKAyJxy2rKF3lHXASHg^c>zaT0!43Q1aYy^CbpB#!Pr0x-Gx&3?l*97lyQMmxR;G`_g2Qyuud{Uzl0!L$ zgJ#cw&Yi9e51CYj&q0tF6qwc#$Ar}>>k+zwl|nCB>mwqcAFNQ@Rkuji?!sx!Jn@M< zYeyxYpI6e`xf-$#)RopDl>ER(z#|T6EYkF8rQT68Y&+qOquS7lmkLFYWItUTUsZyS zTX|fx&+ERWea5^$GGzn;+1^1Q=ehzdqx2j&x z)}rIwDzyKztd{qx}A z?!2Gr7Qruh%V@nXqm_Z6EPVMl%H9c)oTt%=ny%1Bo-Z)!ng0G;Pl)Xh%7ur5LQf4! z!(UmpQ;8C^cwty~1qswM$mb5b5lWq{V=YlmwyYf6<{ z^2sOL)=Aa`caHY$M;gPW*rlcmj$Tn@Oi8&4`cKE{&?%rF2crcFinfsro;eyvT)5i> z@d&#AQ25JtPK*9IsYkLKJvRe;x*&^@Nt7V~;MAAn=}qGxkePqV?J*WcBJxfgcpf zS5-vezp;f0$a7R_zM}8;q%>(@;#5n?vs7t5C7e4;9s-T9R-nB53`D`YTL;{oPB5#b z`yTasA=CbmRe6#jqL^hl1c)HHE%x67P?IKIjwrBsU~8xqo1_eYew!PcHjyg)4{&kNKG(kHolxoa zIS`0G=Q2~J$$;h=e8AcaQty4)D|!MlwM++x7egG!aL5)*RhZ9^0Foj?ud$X&?AiTK zCAO|Cctv%VfiY^flYbPGfLcTE%Cj>tHx*7S_pEX0od17V&*^COZ!Vlrp^T70RBxz6 ziSoh@m$0mD3TU4gnI55|Qa#w9?`(O6*$4ewR{k{9+AwUMVantV&8uwj4LUdmbuB6*$;;OF&FW@k7$RyAIII7#hc~V!*-Y^}4 z^%3Y84C)l~;z-TZE-nQ0VL*JnrlEltK-U(r&%EzB(OVs6bbin=Ari2#ThFm*lYivS zb(G3o0DW_>U(tP=EwoSk9wD#*aNw?jPCQMqgm$G)(JgF^)Dz77UAp9&h zaL)QY$!S}8y<@9SX!InOM?_TE0FxHaz1W~9e70;eiu)*v)>9~nP~;ni+?lalJcX+r z!?~|md9sY{+~GJLEuG*{(le1q%jx#!3Zdk!O>&&0z3bEcq*uaLGZDV{>KZX06-$;j z{A2x!b?DtKK>pTVy=_B?(h8s{2WVT@_{aVkJUJCp4dW$c? z!7bgy;=Mn0(*GhJei%9_majwvW4ojWr8rv61^oxf-=cN`mMsQHrZT3_;_)7nx~{9T&kQ1^-cU~4l?>fA7M?AqMz~4yf5+#E%@74J3iaO zzQ>|BHsGD~lE$0}&Jcu%o&I`cPHRO%;1SO~TGS}7AXb};+{!p^1^#%X3w^)saV(te zM7xl^emcyP&^HQE3moDd$BTAu3S1=Z3#F7j{0qfp*pDOnA^dw#Ng67g3H?NgSz-Kz?Dj?`-N%d>Qe@c5q-3LhG#Nx#zn=E_)Xj^4!FPFsKII^jE! zjx_C8qnIjJiQp~WDnN@+w0I+wB}UL#w_J67_wjopm*ZQML0pK7-=)}jfxtifOHtCz0|zrmF`-%VDi3IUgl zMtMpn#@5x5;z?a{T4%dO!VtMHhk!9|T_yS}uysK#oNq#6^G$Xb^=k|YguQf*bBOx^ zAYz}|=(#h08ZbA7o3Y~+``D!+z6EoBWsy!!ecTi`NfV_pLb90=0wfGKokUjOm?kDU z;F#e@z-JZpdS05SxJOKLU0oWbo)V(1CwQfa>;pfzdMf##rMtw=aFrqQp94gc2sld9 zSFX8+V!LjpZ`0>J3#Uz96CiKU8Dq^)McD@DB_|MJf~-~5%Y^;`x;77!%3e{JS;TCL z8JO0u9r3);UKi57*iIb3DIY4m-l`eghhR3dFYratwwH44G}b zqa@B_QLIZ5ch!GHn8Ed`5uuF5vi)CZ1tDKmh>yoE`tI50_&Tr#^ zDDGDY+j@<0b3z&*un!sRRt_B<9ejI_I+yhvtY2rN+iAss>z>(~>#(lqL6XQvYB8p? zTL3p9K3p@$P|xlqtBHqgSsUd6B%>U;tWB5ex7|-0wC4D>N`<1w$)?W4U*GbjS+T** zF2LONw>aKQR=8{gh>WeB)2Yxvm&?Dye;@mCdGM5o%)PXX@^scTHCY83#{|tgJ9Upn zQe7U=fCv8SZXALl_&rp7GV05o{8e!mAHUU*YJFOAPA~A_^CSYQ|HhTS_auO@|pYsk7Ey_V*wGxX@S;QpEeS*Lnr~F99#Vt__Y@{>Z)$`!kyB zqzd`-Mk$%!S%vr6_ip(AQf_IfZtAjCo}?ZkNl+74ilI3`))dv9mVIL0JfEa4`!;_- zBjTQqMCb}Qx~kKovT{5I^XG<1f(Jwx*&0~O)WpwXTQxnl5&o&Kz%=M z*I|;@5}F3{zJ&EX9O!54pN4fJJa15WMo}CAT@!7T=D!Td3&aEyF}*F?IeH}1;@`T} zWO%1XH92J6?0~*N5thP6fTTB$pnubB*R!fP>H^3dNX4p)FOZ}!0ndNK-5W?g!iwcj7}3|Qg`9_bA@AQ- z2hYBIJXRBh222Rv+RyiWpg+@;7SF|fKgJSpo=u1G90-3c6clRT5LpldV^svzae)z6 z>v0swY3~(yg}7#Mov(40EQqh#K0>;thwx<)?^gx~XI|E*H*to@6L3crpq3e3b1~15 znaaHEuT7zsa|8D7C!YX_tw%lPe`d)3uR1M@b0cwf|NUgsEoyYmumcii_RnN=>d88tV-JL7j8)vqG=DmHa!=O(#Ds|=soWlAO_BcZ+unns{ z44IYFakV{7-=>7z(W`Y2X}CftJl$zVj>;26nyplAZko74h&(;CDpPAMTzfzWAiZk> z&?g%Vbu(%{Z-Bi1`P`|MoXke9c%PY?)xdV2)M?%626NqoWLF3wq@z||>P)l6wpyVx z*l5BDXDBlvXBKz1jHc8dl&pR&n*3_hKEA1m?IQIy_!JHqH?##HyoQlX8Qq?Q19utQ;*TSPkC*ey zvxCKJy3dQ2vhT`Oaig3;_|@F*q^ndX0ziVbgP#gaA#So_@mH%&QnUG}QA(j=L}v@1LPf-h-)MtNQxZ?vc8Sw$frG>ajVj!cDfH7&>eneje$$3>0?OyX)Y2~JeFzzE_x{X$O-q|e^F!CKZ=$S zdRiUyt=|ifv{6%`50wKFv&(0ER1h735?4XGVt&gbo$m-x!cPSzT^EidR zDK`kF@l)l-MCFO6CnNKOuqxn##)ea8=IQ#l%i#NHdv(1I*g)Sk+b*D+3?tHYh#xk^ zo^xpO>^iy2aOtk@V)lr|T*ULGTvlx|Z2gqBe|7*CL0@8AXZ-Y3C1=!jqwwVKF{kN} zhwwIw(xV$aL%OFIyDj&hwwbl2@c0M-w)X%U#A;IZAD}!_Nj+41%$tv!5<5Nxr^u!{L6lnquC@-fS{2VXc&t&_44V=3ux(%`W=~b{~D{i21^~aK!#9ZaU2S)Onu2e zp4Gsc8(`;BQAPJ&A$B_+Ja2x(xYOZxyQLpooAIBPSDY}@F3(SL0zom!tyuT`QE}u2 z(9h^AfCqHfj(S7W22E4^sT4fk0mwk)d~`xwPn*|wFNgbkJFzL;E~%o|JIu=wfKFku zrIDpdRyzB7H@p7+UFktA^ooXNBBUtHZDcI0+x8dr6TZOxS+n|FQ zEA!VX#`4L2ELk1u(B(q|`ZW$~hFEW4-o)@2W$2twOE>cW?b@T{;x%=f-ODIsfz4_Ow8A5E_`@H@d?{~kl2{Y?og-lF*J=TolRke<5o3wUHl+E}k8;-Egg!brBwOkDNHRXkxTH^A zGNQu#WLnoCXPGa8e7e|SE@u>{G>t4gw9Iug@ys166mQPf;Bh)|@Q`M_)U(_3a?ly8 zvH>KAlr!KDWu<6`tQk{FMJ~(B4%ALvON~mjsO#67G!U95~e`gNQ zW){oOrli=o1 z4#3XHrK618Ic?RSHgCizGCn+~#TXdH;cowgzaB+rVte-R@kjd7A(G07++0%AWW>1@ z^lqRoic29f`c$V>~0b&46Mj#A_J1{XJTL`(4tuTsK@)3=bTvrVx>fRBsw{Mt*o z7-GpOGQnlrMvFO#f5E{M7dA^O8!C^bn9gg>;!;wLDD!-~sRY`;dQ)PNATekjO1v+r z7pq=NwJPMRv;LR_&H46CfPLdhlrK3NPi!-M=k0{gf$&Mn={DJk!(+^w%XNZF%Rp-t zkk=3W%)9I9bnj2+a@b$PJiH#N$vy0T{7vk)(sFX$5G^03@73eT=H7oGH3TkG%)LBO zx&;q7E@!3Kd(#{Xv|w@+g8Xn%>w6OMie7Wz--joAo@XLg^f9KOB9So$7?P0RFDrh^ zY@_W9-`-fiv$HO?%iSTeSC$Q(dKgSQ*=7rjMiWx1U!AaFDZ41C4MNLQ=SNc+r%F1B zHH1GJLn@SSV}adQPvl&^6)c9HoktFFTO5`WCrfhtGNxT!mnBW%t$|2Xs=oaXk|@4~ z_2QGAJXxfdwOk+g!p%nyjqc_>CMS-XKy#rU=_zv3KypIz$7}M(z5;+nqAB)}IW#1Q z>U9WT->_wb#}PE0j2u?666%+0PE&{Qr`KFMykFI(Utp#oc_nmmo0VcO zx$?%lal^}+1v3*xj=4^$;QQctog%Ju3v3lE96U@qa#hhJR} zdlXl7FFYR+m=$`k9mq}W9I(&bnfl#=}L&Pln z_Abr*xL4Pg4{FmVG2u$l%CyJ;jQ|RyN=&~r?EBX5dX6P==Oam2h;2BXSaGCqnGkl` zfq&?;Z+bNPCJ?kjCd{SS8StdV`vbNf9TBk`Fs9wPLZn|hQbKx&*%G|g_$#S zw#{tVv$C63ejNdq|8iy=`CMJU#0doeadX3QUi(CRVL}=A+?1zdP$k)Z)w}{6T2)Q# z^;c~aU@7wjy;vkkY>^?fZQ=lnp_mQ4lM&-`je7xnhi0O%#;8-5JfIDdPe7#IYrTbhMihxf`UMJ9d}ztM07hj zc=ks9?S(LaV7LrZ(-Na!DqK`$i|2>H?+8oSqQIdr zxKJky3Fjx^LEawd%!yICP$6cbL(OO=d$%LjXr|K-%;;mtqLa6j!(?o&#+~*5y!8@- zsO3K_AI7na;{bofb&L-Cq?Q3$*+F&nWX}XKc-T!P|IUfLoHd{9MN5%VN->SO_c?>O zb0nHfh;UR9l{^EIFB3S3;MY?6${d>N?58b z588f96%RMMy;X~jF+Mgu=B>;-UuMO-gPMzi;o-;2!QDcl=(__obKqFQPZU6I(!(Ng z6t>XXqhZvc=%kT_xcBRNQ~|3|5H(NCcht8N7t`GSMguiyuT2W!eNOeR2h9$b$<=gJRaNicaA&9tC1`$N)bE%Pm`Thtw3?rC=voO9$mMY%Nv=_eyz|S=IZ8&?^$vXmB`_!HavGRz13FMz?Qh+8Kk1E$HSSH>I4X zx!b+^6nOt~HuUF|W32{9{$bL)2-j5DBkI`K+Y56F+vv({p?o|@3wc=4|EpP&=bh_R z`Msz%Mj@Y$G;o=1u>-8OwFNL!^$P)($q11in0JOU?81Qvmz$|t>4i;dwosij(&#)F zYWJ5`#9CX_bFx9E$=xuyPVjaj%fv+4MYgsMl}F*SIf!#Tv_A(Vh~D<+ieHH2g#RQZ zcWs~t{aL;QhC&wY!3i()zkooaHVVwm0GI`mU`1k1*FpbS&2N(Zk9>mYb^oaudp0_R z(b?5StX*QQJ$vdb?Sj#_!ZjHg%~y^w*dSPBMpH4G@Nbe7yaG5{bdy4!8Zx(uE02U+ zJMF!iXkaj(VWEJctrYL562d<#n9D7PQFuTM8!fe=sYi@E^)50$@=#5lhy0Gye8J2! zJ!a-0 z?ezSh8MT5pe8IUe5=Gk_Z@J~g9EZg#Ne2ve0ngQ8kIipMb}t!?>Yh5tJu)^vW$NTa zp`iijkY{p(QvVl&{aMg!SW>p=!hDg)A2_!KUk~vd9)m1)7PM++flmAM^hTIMm__01 z1t%hRql&`rm_|$bj}Z{93mJZO0494hmZ^{dTM0_mz_%UYZed71wHO`Z^fTrLQM06E ziHQ3J?7l7{J>sK5%Ft0*chcKB#pbo28FA%wcl1IHr)Tg2Dh_ph18#xp7o=qLr-qXO zLvxa85n3>$y*6XD@KeNr6r_O-gcM-F7}qjBY>@I%bi9&NxWP}H#T<+H8fjMMh`=aI z3xdKx4NTHZ^mq#Kn{2EuY+ajjzfS`FABQ0VA3J^w_d9)2B2892Ros`4FE^SMuD;%8 z4O!OdENVTWXOPer+B$Y=`a^x)4pO|d$U<=un(vKk&@Trd>U&USmE>I+$H}-jn!3_s z_3rslU<7Rc$SUEW-1lzijayVR1(JBrIIaiTIOUcxE< zJVLVzso$;CxM#(!!i*0k_$!zs-QrnjxnA&+~)I}HeV=FGBVyTPmNm`U!i~HNBk=Ut}XB){#tZ3%5}#;Z|imb6lg*qfkN6QlW5;W7jHn2VxVOWQumrwIyS6oPg?gcA)vd*e zpVv>}iPD8FLLKvmTt6gtCi%`EGod+`CA$|92uDj~UN`L7(9e+dZb+YO2o>tzcfXud z#hX;Lcit6xtcW+P($bV%ZEb@fQA;k66vxvDo=zxxB|*yq)d-hSM`*aeMDmn?s9$bi zFwIrj!95{~uEd@HnT7up_%!<{8tKvBBBVn~xi~@sXBrFAQ~ZaSeIZwe4TBkT;0nMO z-Xr`;di7_yB553QsqtyVEA?xsiP#dH!Sc>j5GZg;P$ak=W3vU`n6K<(%5YGm`jSd4!o6v3W6CXae=XQ&GoODnoi&-bvr)fS;^rf#9 zRRiY{#3WE7Gi7#-Q7KbwPg=*E&z#h-PF2*0ZE;zF;}P}u#fhSqI?6+77#zHbCs@C` zgFX96|ysu@n(r5N-Zon3^kzx`>VVtIDMYxz#wI-nTafwJB%{rHH&enu2~tX zcoMOaT-o0DeTCQRHTUmLtqVuxO(Mn*-Kc?Y{-K3_wl+_9mA~=cs4?mhFm`M|LVn-4CVBT5#$f8e|)J$RPwV2QO*US4JE=NMFk&|AXjp_DK{BwYw zERyO;3Wmr!q%3T_rEoH|>l%gzr^&5}wINPf2d(6kcjnaH8tli4y3>~UhZevV=aNx#O zxwJz7q*yKUb=oTCOQsqaHO#U=t9O{GpkFhys**AJ!W8rp!`^DVnlO?yD*UosHgN$m z{dKB71{73Zm#xS=Plr25Y$*z3(bQNL%Kt+U zb`)k@l$^Ysx3f5&&zSwJF<#m#Jt&X}RkXnZ3t{;;7I~4A$)O>)QuktgQoI&4#T#?X z(8x}&Pa@A&bkRta(6Z1_h(qNM{0(3qctjn)+W3>zmqXI|hi5Yk8KHz!;WJ&gDOVm6 z73St=YnR%OEl3o4CTXb(`Mx6e?wWm?mOYGGImfp=!E>V|ZWEo(wvkOD`eYCB1G3;0_R{tiGwcszWaOFIGMgPKNJZ3$gw*0u zJjzA00t@i81ua?m>!6!NkbW1qK}?7Qq=jT~bfP1Xc<@pc<8YiG8mj3(A7DUj>mU%n z?B;SLX&DAF5ip)xC>FyRoe2+;8k~1fhb7HbJd2j z0Jwz6)Ye+(*4k8uueIMxK0>Rns%U8kp=SbT)zm-YQ!ZSIHG?aCD>)Dox14jEGr1og zN ztsZV>6A|P>w0SZfLZt7Oc3%?89?NmmvpJqCFWKL+{HWOotYPhrgrCB% z#2@yIwmHidQ{oL(Et4DX?_aFGRLu7`#9d7x%5z5LKrv<$QF)o0cX#pgJLnN~a`Ond zzc^mqFKT{V)OZa;NtDzcQq>?nPCa?omoN^oa1<+o^7IVl1!-dAdP9i^aoQ^TbkZqv z5+w6Plt71Q!T30_t7yltY-=_#YF3-VR!_Dv&^ij<@W9H&pb(`se&2_ZYow+RMd^;! zY~KZOwGWxFC=5qP$L*BUbmEAFRH|werhFWv9AS}8H9}dxw>G7> zsZ-SMuZ7%I9lC3r86QGpib`DtH;;WyiCF6ul#%ERm5C`M(bR9WWbp8EmLAa;HT|QU za(-ZB#3qh8gCuTDnA)9x3)LADJrO0bXd7<^p*L6j`=kS%8lo40Oh|R>nttmXW_31DN66 zQEOEqt$6rI@(p*3`0kVGYn#kG?<`O~*%A`WH*KD)mVo}+Il_nx+7K4&>1MK3(qb0M z9XL7^CyyLAY5yQta;%64E8$R7jmMD0*A;fh(6eiQf(oCfPGGZI-8k8@Zb>qOtXUs- z%z#<;5^^vd>;MVT$?X!b{tSkRiZ{ja^JjHT}mw5)BDkh4m)OpjUfV z+|gSX`6+nvK!^I<=Mb{~wY#LxBAdCA6Tu`h4IV3s;j}cJ3%xPe))>V6YIF=m>;gZ; zff>!?Khz1v;YsT+#fOCjVVhSphw`wU`2GGj%;=MJMtzhKBtjX#Eh^4Z8HG=vceS_=e{ez@pNC88)(kjVotU=f#R>-i~|JHybRNp=Q#z_ zjekvUI9ic=pcPk4(hK<3`DHkf)G+R;w-eZ-=yo7r;uOzYvrM8pa3yU)>q00W5KYFv zXY-PQAQ6QIH$8P;wG`FOqdWhMyB9s*pVe!b=D@lk(~?93t)q>r^|gY(zV?Qt&v}@K zY+jpc=^I$}#f%s%a65-t_?;FziO?>JXP6?!s{FZ%N?7W?W>Z?gJekS_1<4fmRN5pqa%@q2oct5G2hxka*G8^+5vvjF+hg=q~PQch-_)1_K8S zJBBf#4qyqa12WGpfwUF1Dx04Ld-S z?ng9En@G)ieWuWYgd|v5t29R{Y)CYc8u+h^c-90yS{39&>N%5q!E)@-P*cp*?~<~Q5N*gER^ovgZ>$-p~j^p9}jrU4$`&polua zp#Sn;o;o|Q*09bc6$O~Zwhdhy#lLW(GBCQ=&4N81U%VK`WeGA{cSuX+K&E+=m*VmC z|H*~nk*@0PXUial*bhX(#oo&J+w-+heHUFtVwYe>Zr5xg1O#bYM60kGo6QepyLMpo z)!?+cCrKK>om^5qLF_(tyk+c7Qjyxe*z#dVJ))dv#XnBtc#&PWAIlS{1*u z*t}AQOw@xIh{AYMnO(=@495}+`-2m=3KzV0xVsdK{EjmJ-j9L zr`~9f2mPZot0y*YSc*7B9{BcUeFkdM6gaHq*_7z8-Nl;WJIp5Fc>u~#W4uGQf(R3v z@_2ik@{o&lvy z$I@|)4Q1ULjhpA40MV!>feL}?UyqMSU*4Y2pU3CN7k>o0dAV131^6uL&zy4var{4swvVm#c7ui|yPhlnH{71p>tMLN2=4eBA)BmzBVwXKGU4ikJzwD3x4*;Y< zTffKN?i2sn^PMNpJKN8nlD~F3oep_-zW-;Fz4y-!?$Jx}=Wf@3uFgq;G7OGzrdoYY z5BCoKK?pI^P;bu8_fF5%-+QkPTT}2IA*8#7!#~v=3L#EJ?PpJg*_(5R)M#-arc#|o z#nY|-2ryB?$^#n3w zd1OM+Jwg@PqjCV(M{t-2rZ=ISAtSKUxl#fnN|GjELfOo^XxGFl5KQLjCK=O9M)0nMYs_X1=`uEm!2n2K zktRb{=-i+AT{Zl@Fti3Mfc=gS_D;$-gl-*}GOC~P>PK-#G3QEjHZcKpP#+~8!UQ5I z$PkPW))hM1*en>4tFE?d16;p;VC^|Kbf_LQ=A;4|0ZRt0cl{+&d>GHjtM|Y77_r81 zTCd*!LeQkcgiWUvj5uip?5D_H-Lj{roHej#EKtX|` zpf}x7LS|oqAn)9Lw*9Ry1>H&0k`XlNQe0>j*l zL2!aieTxE-{gS4A!1#dY^;?*JK2A+oKPH-?_^H3^KdZ?knKIt-E9SVrqykK*C$7^Q2wdeL!kZ!#{$>=2zYnE z8vT6W{OJWP52Qi(W5~F`kN>jkvD+x4;d(rNtOFJrBY0;;x?MLYp!=co_;C?N5H3cf ziM|+G3qWlifUzgdHIvqd6;lL-Z;yO1P>bSAD?U$65#@c zH_*&*ZVi3yHl{L*J}uHkxx$7q?F4AmdO1;Ew@t0b)L|+>qbDTK=#b&nCC2j15mhR( z0E?!oVZcIt>zfP>0dnFzD3KW=M{2tQFcvE>wPNY@`qz~izp@xSDk}DU4m)R6A+58# zkKU0wi~@|;aM1k;xmH+6^>`cebJhA7?4Lxg&FJ(}!)+@Q5?l&yW!a8xw$2n4&2@y+ z0b^mG2A8A=x4<&-znlw)R;$pJJbsK}6Dvmr2rqg3xMd5zSAquB0UdN;o+xp(ppOYl zuE-V5gzq~&*f2GfE{yWoFO{xF8iU(4R2xWiEhn$=(htL^R%%zGk}Y2ICOV^{*1i+} z%Qdtuj4hnP#~f8LWER#bj^dq^SvWg_1w6$6o!3@*cMTO zz=*gZ(4L|?$RKF_R<*y9lCInJceFOo(1rjQZ%0F_FT4sb8+&QZPSlK8ylvCgCCT|Aj`KRE%^B z4Z)Rh;MPRD6eg)=mVRN@05(a4V$veR^?ZjTa6c^IlVA6-6$Do2} zszAyxbDFu_D=I~ob z+T{u5b($dGVQ%6J~yI?cK7p+$FfXE#|c>9kq;P$0x@E0S1bU{0Jx z))yWB@BZH}NI#>A5^6cQ=)#*4fpRXC@cvFnmZGra+`pTUwqOCUesOkq`u=GD@Z+1k z*M}Dlbg24+=cCVGgjP#w%E2ynm@lLM2-T$+aaUYHdtE`uCf+=Z9y${3JK) z12s{OjU8owGUeK&bfnSFfHAJr}#YM zWoCLyc}Uicu;Q!-OthF7xu|L)!9t^Pw(JuNh!;r$x=OIaHCIkb{EYc<2<(MR)DezLo#2XP82#NR3RwV~}=X)>r z&h(eRku5n+x6l;Dw~K5o0V{b!oTGt>H^y-F3bu0=`1CDd0gIwM3#52ckA(IYRK%O= zTm-5=2rUgVsEo$O>JbG`%Rj;bi|9X?Q~>r#jo5}@(H1vqiwfD+;jnZr2{T(O<|?6B)?mcOwD5NXjAeX@N_PB%C==2}oq#kQo*b6apR<%!(f?wNgc(nZ zk(Ac>|L2y|CvowTxnLQ2cXoJMF9Dt{B;zqF3YcIKdifu{JUV}~_xcE2DJ=v)Dm63A z)GEUTcNSemF&i=nU)MZFwS2RNGo=>4)-04$S3%mJ?#1J%NCM+*vu7Q-(Iok6`{;B! zou^Npz`vbNr}}TFyYuu<-JNI8o;-c}?AcEDPo3`5Cr`V7BAw+-arTodw)Rh*ukNcl zxd-ypcD&aj#2CJgIKS+X_HXSpW1eOMHWjV#G)wVYyIm>nU9)I7l0DM)y6t-c@o^7@!d|F?H4^#4g`cXj^1$+OSX2{D-{ zv)ZLHO~NgnHRP}|DMlsc2GL=`$247$`UmyY&cDurv`jQG@BBY|Qse)(w|7_P|64o@ zCgl3sEL=eu%{1rY@>}PS>!+MvD@y)_G2JVO1`UL07}}ItY=%7Y{Ss=uHh}|dV4$g1 zN(4{c?Sgl%l_rcK)8`}O+WDWJ9_rROVcz+F(%G)?|K08F?bZ4J7SBJL52&`hDrbM< zS&{q+ae&DeMl)s-l!YWLvOMB#L!WaSHpL_Wzrm@v<#C7{YdViYZUN(ex#2`!^S}2F zeBZ}8|II0qm(a)T^S}GFuK#=5*7VVprglxu)%En=jbJl`v+ZgSj2rf6AIoKY)Ic390NF5Z>rpm#TT53W%K6|f4 z9D%1te*e;1Gr4t46FoAXn7`l9F)K302Y$M1ML?02{0 zWxLxgLe|#Ena$^1Nlt{H-TZiuFk8_h5)F#1&m>V(iN zqY(O|3Jbno4ISprB4PLUGsZ9e51u$zZVf#tb4EHshaW3{=2_MnHWClMlb=;GCsnon_)34U%QtVvOjP`k99O*0 zIjN>`z`9l622{t>2Y5-_0HB4iI%A5f3#wIhN^YLOk;a6hbwJZ(d4YAY5zsL`%9dIe zyKnlPT7y)iYT`)MT()i>-Uu`gRp{!hv|6gjsR`Z-DS*ySRC`&U1`Q25&WcR{kpnEK zLLYO)`^ExQzcoKW#Bx%=lI?=C7Lz0xWjx{N>^qJm8S{|!2v0T%9}G6NljBF4rt!r7 zLY+4lL-T;f4Fbe8H@YAFr(0Q`$Y{(=ip^^pqc7d7cl+M2d&l1K;rqj5i}#8V?!y3k zq|K8ycKMSrs4ud;Rl-$bK8g?x){A)UMR^RNpd)^mfO77#INP9s8{#$|Nhf>3-%2ZN zhMwf(v!bD5{kz!CspE{guQS9#C};f+GoQaJh#$;FofLz0%l@niK%+vobeaMbOvaOs z&O3cjth^6s-Z8(&NCS$EaIO{<{y|S9O;qi-i;da|yLtXE9f03z``1j)eUEy^#@Pc# z7p@sHA-yb5$hr~%6E4|*ZM4?hxP$sH6MwK*oio^ZuLBQ}r!0e?jy-lXfD*L`o_8}> zee(lB!86XKjd59;Ea0Eoe)J0a_{tP568NqoUTcU3S=$W@d&oID1Vb_8s<3&5EPAi_ z0!_UJno#nohDf7udxz1ei^vkHvuW9fT5D?G%{YIG;szw|IO~2Zz@N3KCbeU2QH@LY z9SpJziaMi8k2I+QA-#$k!k&>MmFy6PT5;2u0!kHkf(-T$0AxlFlPma9BpLtd?OS|7 zkbmfrcAsW#T%@9o{fm6~C#LH&Y>EyN@S3J>ka9aX2$)cA?LE!J{%fsS(L%D(LPav{ z6PgXh9WA=M@kh|xUIw8xN_q)>k){Z=%`{sHLMtJ^Y@;AMiLCV;(&5j2x?eVo zFZ=iD%%o_3uY=uy8U-B;o5rU_5Faoq&9{ zmGmYgydQBP?Q19mZz=`G6YJeKo(N!3yFYIG-KP-tZ)^Jr8Hc+L04o~i1_*0JtSG{q z*RN8i;2MO?+7w%B<)-ZjdUFOe4OQ6>s%oV*O;>eD9v2D};*WTu4k#a(0w(BUo)9V> zbC5-L-850%@#z*yC2ZF+gFVA`@RD{s`KaE0#KwIw>XFa-_Li*u*H%ADw#3N1V8Mu! z_5laiW;u$Iq2Dgi_X;Jqa!M(zfI8)98)M_KxGSh%fKti+DzoR6EnHx2r-D{#Nvca! z*=uC*7wVI&E80cTB3LtTf(Vv~2#o8TOTmrG_|%?5R%RM=xCC?tM0hr3gw>&IVFbo) zZca$(cq0iG@FX;GIYU6!7_1T0<_aqfGa4|smRm?MYZX;$^ok>(4<`w$@Cm5l=im z@F1`%z@+oxmFn+=Wf2e0aCAMggBs*|*iBVC4r$tm;qxr+5%9%C5~zQq3c2e)Aug#Y zln;s@3aK5uF+ZXtEd8+hs~$8?vtaa*N}1X2;LRXXXW6_9Moh^A@I%G|3@hNk=V2tR zZRPD564gwUC`P#jfA>(6bIgZ^aw(h*_gh8{6Z7HE`dvvy3U!LeVTKj6!MRN3{h~~* zei#Wi_*hZtp!A_s@2w4>0M|EaU65r?hqy}X#-ubFn4mrz)?NY29P^=GWHS*eY0QV^ zmg<%Eu~?6^`C#BwkJ%MN6&J3eoeKVZ9VM@s5Ol~yPsb{Ps^{iPT-8+F$5FyGyv>|O z%!kKO!cJVs)I(7$_mzpwKv7)`>XdKD;ZJJx$ zdXoo0LmC@A-#;OjjHNV=t{@brWZ4ys%k|HL^cP#^NEd0YAI5A}-7sd2^^E>~TE%_5 z%|JT|Dk0w(zyi-J)$HeU{EC%MA+~dgUBvtz4FOi+O4Jif$*-7SQYv-?(9(LLQ!YZi zYDwN{e510Z!%?5&5PkxDC~AzgxjLy~cKtzPvJ);;lWq+;N$qJ+fA@^C`sf7s`M~_@ zkvDj75c%&u+1YND4YWY>&O@eHe|^(qfa{pk@Fk6D66knYZ%jFN9AZIRB}*W6W#Q>0 z_fax5@3z)di;&b-4Y)8;G9&Ei(Vu9Ej3yI8Lq!3A#M2^jfxe6$(+S4;#w#A59I81| z>p@^lYJ97rcY+@%QdJl{RaeAa1We_q1G@4=@eE5`6w@*Y+FoA|pje&Q8P?H>C4e*} z#b7a5^PKrFaPthIZ#?JHqTvQQhzOE+S4bm zFYvkqi~-~bl-D10XAp(1(dJ#VLP+T*R6h zKH1n*jp5k}Oh_XLGL-OqI8v0oECQkVSsb$%_9Lbke9@;KHf?V>Qg(7Y|K zl+1EWQin;&F)xA~rvqakf+3O=%EiV&k5X)}J>%7D+rp%==;H$HKFG6Y&q%RfO6mql z!eQTz_+D5Z-&~QLnJek)VVxLIE!j5py!8saZI5FJ7ptuT2`iH%P#DZw7LBoFRPq! zh`e7!L)IpJ#py@PAh{h&q$Yml-e!ryppPevoV>@VpyvAC5Mnzrk@s<2O43kERJPo2 zdrDG4diI)!OdBzJftkYfMV8;xX;BSlD-9gU}=6i6Y@3vg=AlEH7s)LX((c*PJ9v=uRO8kXPY;so$|J zrY}(xC*v^~tU0MBw$^Y=p1?tAsjfWGp5=s3Iat@e!>E zEzZLtt}Ns=mN#Yb(E2V)h90{@_2?EAA{r)O?WI^;&FaB#)0sz;(4&cr>>G`d&~{P> z*69(!2Xd2)Xck%_pND(83K%X+3&YcA==Sq~rkTj7I#pG0`efBGzQum3|K4~;s#0Y- z#wz4oZqZD9H3Sy8p5oLmuzD3jQM=%53MEmC7bj}ySx-^n|bsDu7Y2lqO3GDSfp9j)t zn68K7*G}nX=~E4|H}#by7O8_u;VQ40P$0*Vd<6F9R?O9P5bsy^N-TBR94`ns4M=hz zyS4-GXC*>}w_HeiNJrRjH_qNhAOc1yr!)(T}22P0#b z!`2{Ns5Kh!*L%k!$Es$**ag3U;S~;b;ntekDK|kx6taM3RyCe-i?phyLCBNn@|OQF z=0OvlXWBm`v2Gp2cvHIHE1yo9Tx4{ClxNaqb&BrPmvTSqu_7+0mPmJd=ZWR|%)kqr z^49fjZPeW%l;WBeJ_4xA(`+8%;fhYHtg>t1Srjix-X`(H7^qgbmy#`Ujpl*=1A=q> zahaIP9RKKSOflURBlF*qktxCZ_hw~W^wY%8)V@z)YD(q44QErV*dD&VD3k8JNgeI4m{L7J-LlhCpZJ#n!aJ5%LLz>zbab))E(JBP+Saz+IIBPK#uDqDP^i zbF-XbF^-|RRF>^RGCqb&aa<;BX1zLC5;7r;XS5o|MsfYEn6JZ6c;DYbK zJpxC32o@j=VN-*L`>kZ?T<-k-MK76NmlF;qcU0;uh&mUFv%!x<5s9GC(?bzC)M}OGIhl%4yP-y^kftw<{uEHw;ZaQZGBg`~B(Rq#9f2n+(^;_v$!IxjD594~aPx@e z&Y*>Ts49xvXz0-R$*G#!G#k)`HMfQ)Stg@F6i^9#t^3pXYog2mP3{+;!MI-YimKL6 z`sdX0zpzQQ&M$1zTy1Y!PjF?f&4$7TdRKU$z^-haogG`G+@BaeMq3Ds33Vr|S+WJ) z;Z;|j5?XjD(CrK7w7+MkWap#axGsvcG`&-HWxV~4BOxKAKwLCZ#$#h9vnUV>`SDDm zNvD3-XqQmqIvvZwCduu{5o8n;EUJ%oV-amg{$uy~Gb~WIKhxuQ z&67w|l!lW-9Mip$ylWyy0N(GBF}8wik@{vVRux{<5t0aw+Zce7Zby?`jT=tv3{G>T z=6z zUsjCTzj7hXgayg+STQIm&W?sOm3Z7FM^7E_rLd~7*ET~g`oamB(Ma=I(3D|^jfbBT z09iv(5t2%kFKN|i%oHsCoDZ23y5X1(i$cIkWId7&KRSMMA7eWFUAOjtP`a?x2-g${}V_xcmnf?A6o5TL(&X{*vBo@eV~!>`OBAA`X}Sr#t_ zbn}LF^rX1Zf8H`ooPi+EqETW3#&!JX<#?4l+ud5^wMx`S)O)PruQ~D84ghhwme<-I zUZ0QIsO~!BITHzs?M{6(JGG{oOokAjS-0g~Mbx7vFpc6CP)P(GzAlKP8g@GT7}}@{ z_==E!=S)FI`|R-eaQ~cyen>$LM)uBN**D4l-q|7f_2gB%{89g+??d~={ipuUoyPT#(sQk{UBJA=4& zFzSRun$Yz}=B#%fZ4&#VtA3beeYDZGt1vFFl%p4P-l!)RpnGf|@4Y-cZdX2*@K4+2 zLCtvEr4H2WKK3V)iD})u+I#ovaCSFmYKbb{s3m}Q&29O#AQliOqT+|$fFHnqj@=ks z9lJuWh?3E$2PUOB75bG?&7Qpac=-2|x2NZarymcF&h}m&AAUSI{Ausq@%hKshv%n9 z`)6i}Of(I{pOsoI?mQX;yDb5+)Ptsx8Nu)xxR7elv?Yw`meM%#hqqQD=y}%!`Z}LJ zLyB&eqnjn^X|nVbX_}C_bbn9A_Pyhrub$?|&HtxF7NK(hZ-D@_oD2YS@_%$YyU(ie z|F=7<{GZ?CsR$kcm23#C^-(g&sE}D6$UI{$nnv$23Vn}UZMSHe+Miv2+uv!G2>2dB zSKA5snZ;uQumt!OqS6}oON@6E1*Fg9HDifk=P?OAaMTzf0IKU)cX;CjYP$Y}L{hML z;I~G6+)T=igrI?2W{jEeNcIy9r4RIwL75^_W1@zqfk2|}y{O>!f z{4d|+Sz99&JcyYZB4ItK6`tKc+S)%LlJi)Y_*ja?W5RD~iRV2EDcFbyu(ZM!_y3uC z{teXCo|`O<2sVy#mq^DH40>~l+&zNEOKAg$kj*IAzmowNf%s+#as3vIFI&hdsqYko zal$%mkk2HEl28q}+bJI&4Q2_KWSxyuIdP`FQ7uZ+EK1~nv}v09Wu>#N)FXcpZFlWz z-8tCSxk)b}6LrY-;Eh>;Z|&NO%95u%=EKP^3ip)9V?x;75UDQ@L`x`sc_3Qg^_zg` z&1rzw42sJG#OE$xEDVKT4}%p5ti#`e(6LlyK|u?m0rB6X)=_8qbq`PVquYjCa7HQ5DF)}&`E5v#zweIom!h_BS}`3=ZP`7v zx|ikjun?;Xxw_Ic<7pN_h7@~oqq7&KF`tZCBK^g-%BfH^RoXPc##b!c=i`)SjI{q} z*Y7^@I_)}Toel#2hHVh2BWXhja*)xsuDX3DX}4Wo)k=AqrmAFnX$$3qBU*5!+#26< zsr1yp$09}OjJ4nbnbK1h$AEpkm@TzH5krW-4^%dH{Nq=5T*L_VFP{UCw$O$*Rhzy0 z-i;`Wc2@fTZ}Ke0{{cxjA7v2m6~IA@l_Otr-;Ao$Q9>q|H`_nnfTetU5V1IfDmsH+r_(M_aczX% zyBcBl1|#g+5!QyH&n<2ev(1b8NZe^zIlEy;>PxUu)&wd3!6sL=8Bic}kwC3XwKw9K zocNwi%j$#U6yXD>G!anBW*`CnWD|0^!)-N!UL?siPaa7kM;X6HQ1zi-Jxbyk)7Vmf zClqr{X{E_jerDd17P5PFse}TSfAJ^*?N)siT+IYc!iM@amCliPhvO*8A3wkt^_uox zkoIrM$==!7uWwHe&dA~E>DyBXGlA#P_k4`Y%?Of_-Y0R;lay z1U`es=(p8xte)A=BJ^Je?!7bmzq7kjrT;t6p04Qsw|H(q{~eJ~34|;|A1Io@Lr`B$sdHBPEjO@B$h z(LkaBS$A*6oMtv)KdQ^ptBkbNG4x_Qs9Zye(xuoGYl`oUYVYN<5dGKMw%alQ^XUKX zvzq+>w6nXS|KH-d$^FkOmavRUM*1|kRHq73aH4b{2@>#eN+pC&g78x`j@=8Qy^|wz zNhDm+zfK=CxanT3oT`2IwIr;+>Ra=MEQNPGYp1Aq%G4h>m z+pP@=vP|u*qeS`yUV-!OI?qUwkNYg!FdnTY$N@Pj0$f58@EH8VB`=a_kO>{fI>v+= zg%x+HmWK(?)G(t&vLUO9x(90ETsilA#O!C2^vgyN&>=7&Z_ZRQS=}7V7dg84-itZf z?xvf7!p6d3pm;{s*}qGTH!0ya>ZPV@%&`M!=aH1Spiq3r5Q)xr*x9Pe-3h{yOlweQ zWOErob<~aoY2#M5Yke5|j>Z9$ZZe(5qC`$<37~LxTXm{g=L1jhZUMZJq2m#Q!>u^6 z^GV7GD5(^XiaZ6c>2QJ5uFHxO-3kV-mDn`d9V;Nm=h ziGHGtr3gIz8E2_&Zq+4b)9;DQKCi^XuRrL$TgSO`MD3_SGwk12ouU+1oF4a9mXshQ&&m#CA!Q|HHf3E!3dGe%+ z|F?I#EBya0o*Ur*!%wNsE{c+kmSzBKrV9)%j`3=ya8MVpiBZ+5k~SdsXCkva3CPnO z_%j-xLFRhW_UgKwjdVdk^@uHkC`i#!x90*E+FzB1RO^p6ha%X>!mt)1EI4(5>8pL;kS+k<~C4goHSubowbh& z0T(S0$^CE^_gO!;1Xkn#lBUUT0(7L<4{11t~5m*>ropIXbz*fqn(~hR*Hh zjlyTBb`0Dg4%b>-qfkB5_DxOqc;zyrt?{oyYlyzuq&=S4KW8a;D@}Riu=HZf+8&v3 zpT3j(7{k;$Dqy^Y(g8f32q|P3U9rTl6}d10NV%naN{)iWLxWN-Yo0J4(_dlyxk&0t z92I)THfkLr@}lzCF8&5o>C@oynr304fRExxPB5zIU!!Eezb-J+nwdZL469n(5Z8*U zSV@c{0`rHY5Yd2KGfde4;c#hOT~ncGmg}Q%*ZS--jMgZBO;K2ju3OStTPCGpfl5uk zsM2oDCDiNPkX!Wd?xlK!9L5B0V{4k{joJWVs2OmNR))^)2v!Q*g;RXvY30^OcS$s`GxpD zOn-k@{9m{Gw5I>reYWENzR7cQ`#;wJ>B=67AM^(pBj5w4aliDj!E (6upFt(xndU+(e6>`F7KXEdlZRzg+l?BBo!l!|Or zFpj|pZW+Av{;mC6+qH3XU1I0e1^hB)WK(Ce(kI-%XA%4_(^&H=x1#`a@PFrN=ULVN zzq`AN|NBjz8{z*XWSJmTXTPZ=pVIK+&tTdMK+2%4f*`=_W>NsJYC`@rDp2pDg28L7 zok>tqLg6xv%@-r#)ogAWs;uiepNZuEpyPO>x%NG>Ol^N+drf6uD%U$;Bk z&sO%I-{iT$`A54iC5UEuU^BrEnIzOml>q&-FaUy*7Uzac>T_=`JWD>0n4-OKdem>B zj%Qvjh!>`G?D%$OM(5K!X6WynW7-W9rU$cv5X^Ig4Sf0X;)nIiyw3s|!{X?r@w_61 z=e+n#+C&fPefdH>PW~l$qLAW?_V0d3CG}Maw6pv24$5U6ve>@z;$`RIOoqqF~p4K^2U%gMPbE$mo-& z=G1#Z+D>JlJo0V-U+TjT_OR$Vn__x5ZBI2tj?+$qyJ}l!w9D94#I8|Sm&hp7>N`Fl zkNzSaq5clEb|wxPYGol%gDFFZ^WlZ5Hq)2uO6^|}D-Wwci{z$U z#Uk|onr2Cq3~$W>%+>#NYWkmNtN35v=D7j={}sW2&_2(l4)~DqIEL)2bU2IbTL{wS z&*^Z6`{ya=au()kY|Yh~&?F@5Rx)WDeRP{R4xJm?FRkjm`I%&F$UglPB_U~h*>Asv zfBoUJ7}4$Br@c11to^!o`sV1(s~$NmW=}#USs+8UrW%$F}4HjQSH z?i(FCgv@v{j3*>xYMM%T0V^DO#iOvk+JKKJNi!ZIGe(@mWPS_u_Ev2l@<5c5d=%+L zwnqHglYA=&*-p|V^mH$t>cw+Pwj9~zeh4qC=c}Iu@V^t$Wb0!*Ca5Ut@=PtOPY#gpGx-&#(a1#?*Wp4dx1tS_aM6NpvUcNMY7g@c zF4hp1t+WvK-wBqTWNZ+9TBw;zHu14ir6m(>>BNL2>bVveej&J)`z$-M16u)NMfExu zybsNM(fO-<-q*35-_Sh8+$m6*1$^Tb=Yjr8(1Oe*1y%qR~r5T zM|_;eGD>UVuuQw{re2FfT`{+r7ruWl{}ja$%_cVUno(InFr#QVl18S6+^wbS_*uAX z?n$t5)2Cyj?|>EN4!a~l`^6C50J~*j+-Vb!PSv{NGx>L}c-_iHuTO3< zm7DFoL0-+9zW8RCb{5zLR~ImSN6f@#tbIwTppaX&HQh#&O6zOa-B|hHe&1-#T}*En z1j`C&_uG4a`rJv(+4JftYue88m&FYEZ_59VX%^A2e}&QJ7d&m?AcuaxSNc4w>_*UFx-=9fhCGHKWr; zu3yisKVN*AUH_D&aTEZEOq))R2o5fHZVvR z4sLN_SBIx238`%N;s#+WjbWp1zqo86)U0%Uk(VllR?R8h>0^8#7rC?@P;bvzgehlc zI`lP^UjPV;T6eHaO=Ft9V*gzP`9%oa(q#E1()vZleG6gUG~3YLZ3OuRn_03LZzger zeZQp$KdqIo6yQy>_gs8;(dkXjSczx5L*e}nk6UR9Uvth}igc5MHKpE?6*KDm-e6}7 zF8V?4YBBsdJL+P|rX6$N-}871H`f2~WXPAQ|LJyio>b$%?sT56^1pnO=T`b31p{}X z|Ix5@fAl|kqSN(1IO4C=0O?-tNCQ+~)SGF5bp54hfXZ!6(E!cX+mxG_qPLl=MXF3{ zx)$laX@Cx>qyGf-+?MuoIXWz0#c9XF#T300ygr-`W;fBG zxTBaOo!(sCg-QpvhkL09(@5;UfgTJixM%2VdbRD$*S?vIPR^7oW4U`JT!mKMD%EuRTUaGCjX}OkmCW)an5n~`r;>5b(k)ancdGuJC2Twp zMcJ$=d=2Kid-dn{RpoM-{iTC{_g}~;Ou`N zFI@%bfY-gz0v2PMt_Up3e4Q@P>FaLPfz1niBaNWb#8PBGZ zZd1FG)zYXd8oH=%$c)9lsqxTE=v!z9?$(fK_RQ{+mg9E3e#n9-WD4;EI@S?{8PU-I z^*Cn{wK}lng83$dVJYm!C7XDB;F)i;2-nnrsSrFw$9y%T4Iu__l;}|#8dbs4Q>n)6 z8I-yG3ST}YXkue2rymDA5mF#J>fBm73WuxVDDV1rQ*_NIkvCI)%x}-3WVlcmHfUGE491jMrJE^r`mg#8o6W> z;iph`nwh35-fZItDGMqcwyPSldSmYdyFq(-QM&ydw7q$GuwP@`R-62EMVpzkWw;lLN)+1n|in%nqFDNV2WeN;aoUBb}OMkvGCFTB#Aozdj&4&|+-@g38N z^Ym@TI_{p`U*YJ#!E;mj&!uRKN`dp@zwUImtM)(7R`x&N=DCgdr)45l2Bf_{s|$R{ zMnic(xcfXAbUKhywaVt%^aJp!Jx$=VcioyT@;aTileM(oTX(s=b!~5puzAzCyLI5& z8`q|33L7H4_P@1Vib#uRh40Za~ zaBJ}ZTRIQOy#BFRDUm1%sie(aOurUifR`_*9%iQSt%{^1D9wi+5hcT#h)}?=Z#R0X zETprfIM8!5(ZNEQNjfs~w9lW@)VZ{)A{>Bt5nzBfR%|mLO8d9+7Eoc~$N1msciob{# zKd8d`T&Xe8B=r9x+|4ZhvWr%m{mRgP`Oj4T-ygAf91Rnmv9Il84*%cT-LBaGcb`7# ztnxp6lV^>bP$^l4$^3A?6){RkKab*2QI3=bmvqR4-&!N*qeu{urzy{*AY#Pg7)&C_ zn95)jCBsdUv6#x}N)yxKJx#*a8cEm?jcC_VRiKR($$xG5ggRAp3~cV_#gSt;c?!UbY_la-6mv|6i>&@}6c9&jmU9|JZxh-ZpM!zwfUYNC6V( zc-NNQ=AP?|;v%ox8ch?!NgLn>K}GG3B*rUpOLAqab^rSThvYtFy==>N3S>Vd@-834 zd2vY23}3z0QBo-R1yd6xJVUnU$|?CJN~SuM8SedqbkS1zKYjiD)i1A;Np|}_R?Gi) z-yeQIcmCV^{`>EG-{k*3o-Vv__d*3}c*6&ZsTsVq+z>_2;nPo;Puj0PKM6NW*-0YN zW4d8#&&YzOdM|fHfA{>7KD_y# zSEo_b@As{}9c5Uc$WWvly-hFZI&#+@St?UhnoDs6`^iOe5u7soWn4$ql~}%$=A~Hv zx9yC2956kO9)SfPs*Yz+7It}2l_I!U!G8<=PJ7WA7ugZm6+9pP!is2uhFP21g6sfZ zuw8pce+!197czb|YPp8}W`8YR%6hLieIwbN1~zFqC;N&)pqzgBd_s;8sVtDzj2Ryek=7m>(;D0^JRc6Z;D%dZFD+ztD(4vr ziv?8Bd@kV?>Qr%Bp2Eh@CA_{U+?9LBqA1P!3ejb}`dRmVTh%V3OXz#CXBDOrAu~+0 z9GX5{!YP_k31DDffc@lgI&&*;z6e{F&~JL+{@=TI9lzJ2!`5Tx#q-mS{rCSnJAGIM z$(X6E1&Mue-;fkC^C-E3lR{1&+o0K?%C+DY);=jFmhGCV&7W zW0~G-Yr0!`^bi9h(=!3|3k&qA=hyiT=8s*&hsL36^{Tf6?djM{u;z7OvfY{R)*0jcxTcI@YlZYoL;}1iwlF&pXu(EPL$%LOx>QCJh1ZGH8#ciFKIfQwoDG_k&uN@?X&^GUqHgHz#)2vo z09Rr0s8(C~zAHM0a_GH{hFxZ=Q-M`B`ceioC2B_#4q|DYN*QemT2rz${<=jJTXXH{i~%eqw8~jdP<3BGH(`<^{t0 z+tl;ZmB6|M_Sjr-NXN6iRXFdFZYg&riCcLG6Rj)>x=vbZ0oD zt)*&o-tXxQXKhKl)#$3cNHk~PVhAfa(@CL`ij8o4#}f}DlG{7fco?DE-cd-!c!kx{ z+E~kKG1oN;ELh2%)L%b8={CwPkrm>m4~5HCvc`DZ*bMlJ_4{g$+n9^KVjbV)NmJxi zM779JIexUibmbat#>`Ffj0;^Kus64($|bDm92=Wzd$;!bt-Osvk7b!>;6QLOTcMlX z%DfmV{G&u+@>!^yw#<+IqrNrAtX<-{_E*E#FE?&sy>5+*H`v*|DL-wE51U-Gz`K`r zPT)n!bGwM0awWyYIqfP=$L(Y)xII+nyt0!TkDikjxxn|YhsQY>=bV5od!HyS3~=GJ zoL$6LJ`G{kcP{;NwC*oV*AR=g_r8BiEyUqvd%txa#yNj*XJnaL{)S3Usue~b!_QB;A?T3Cu>))9(vEoRFQQ8*vOG)_jtwZX z>8%deV`fbncgRQgYS7tV6=<~yUA7Zn+20IB5Av*|%Ulqh_B&YQ2-#$lWkw0@a+*Jnp-*Hd?WywhaV zbB~oo$vo#`^ns1+YH<5qkLFle3$ zR4d^!iW;F#Yq~nEN)n^io>9$~eMr=v7;$kCMTU*mHS5|jY}9O~m-L<`y*R@GXCm(H z$LiT(6cuHjpU9l2Yhfj14`;w!^CwlsDpM70Q9XX1pR<{M418F_VR81D5*eAtyDsOH zMnlDqhi|1hQK(Uv#||EUHd_P4lwz|8fE#paBvwNjaxCOC&;e-S8UZ=1!T0?nQIz4J zY+!ZZ@^)RpfCHKI5g$6OLrN;=XK24zCdLb+7;rI^YT|rXT#TJ=(!yo*vMl^u(DIOp z6n$d@Jc_y%_ca--+X|vHv{ z%$0}6=}po^g2iJLX?FzpGG|(^W5FNCQoF<$HGl?wlX*G8aFz(v(k>hi1B-Ry&Mq{& zc}vlMosN&D%DA?0YRG05p{a}4(_C8p)q=PaccO@;DQf+T%y1pJF#WL9SgUkZ1t zqf=zrN5u`kBLZmt=i1KR^FQ$#J^cGe9z_K+<8|$x_-rhLnvhwmr7C1|6*~vkaYXO7 z$l>tXg$82h5YPp}00G^lUF1f$70eSpgGvd%(s4qvtL+I za6xLL)s}x>h19w>{bYZJGk3_a$XmyLz7p9ky0rqe3r%3V0F^yx3xx!y zKjW-IrNt!;*aB8sP4&?`lvCy#&?q#g^a3P{WFR(!M7gL=DRx2234-Ah)V6_hCP*kp zwziSBb>+^<3aK@xSdf|PmMn!aa;4|7VO4gZZWrn#c^+5vK91wX5y2iSHxCB}?+NE7 zB32ZeS9K(OSFk@ytipUcXPwh0{~1 z6k6tZo0NU~`}do)Z@+hgw0#@q?OU6k`PK>ih&IPVtC5CMrFQ0&y4o%GH@Q(DjMZ5b z@Pg=68Ou=_-6EUMPe97pE|gIln9O{(RZ71!N2(t>o!vQX{?O?V&p)0kx?z5&`3@ch zQUBdGF_-medETn1k~_R8q%zQG=7&d5&dLF%Ca20Fw;G)->w=|Nxr5f+0I`HGS_%u^ z51qdftMifuAO7933XW-f_8r7=hN;YOCt8Gv-i;8^Ya*hz9Ypl*01>^#hzO}Aeho2P z-qA@yFWFD_qv+E+0dM6LCj(T_JKTkX{e%5omxW+@5RwGJInzK361yfEa>4Z&v)Y?} z#Lc)IP{Iex$UZMeR~u$Z9m`>C#u)p2J3G5z#$3aMeUYki#`V4~3bh@!H)pE-r*zE3 z2(?S8;Bs5o?{($~FLAV7Fw&|BsR~&+aMl?PhR95*Y*Sh0vLCpi9y)u9TCq~;4PeS~ zQFY}<0NiNIhjxj@ROYRW3T3a~y-A|z>C=zoPvikVM}@$Beh>7S`sR`FzfYe=Q4A*v zrNAs2=lFS%p4&67Sn@d|{{FqpwWnz5^NK*Ku>xNkIMDeID<(D2)B9$anH z<);8C^4U%TpMFAxs}sqD1TvNdSi$|EoYTk-t$|Y}EKg-Hk%Xmsb4KyysV5jM)GUPf4K2+#Sn_9HBr<`ExdF?V5)4CXG}nmc?FLM$J&U$LITvTz@j~d?_SnjyWJ)6yXSv=}sAWFI*aCMz z{~pGL)F!S0adh5mw71Df78Iu-ELQ?!I|k}6o*u+T`k#mYq@--$o^Q{$=i74+&wl~{ O0RR7pT=t0o-UR?DoDmcN literal 0 HcmV?d00001 diff --git a/peertube/charts/redis-12.3.2.tgz b/peertube/charts/redis-12.3.2.tgz new file mode 100644 index 0000000000000000000000000000000000000000..5f86dcd5d0a9415d6c3b2bfeaafb5e47e01b891f GIT binary patch literal 66001 zcmV)fK&8JQiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMYcSL8U7D87H^r-(7^6!hXupkA{%w`P|Hx|?5DQ5>Lp?w+pR zDy6N=)Znx{mJF2x+|T|ydfDO-G@M?_#}*@ z_}_MyZmZb2Z{$H?NIBw!hOFNK@DRr%8c+Lhgg6=?hC3)Czu=e=8usCOzk{ME{rRf9 z+x^dukC_*fh|5>+BOC&wN$lYrK>jrfJ?z63p1Rj4NH8!?W88%k4uk<6avVa8hd7S0 z51ayGfWSu_bpRNT6OSh`28QEn90MbvhXpJK9ty#uAx9(>CJya~`pNuQwumoD8}io%9BThiF22o|p}+$776p6T~=< z(@%1`bciRqY9Q!u5(HwRjUMC3yJD(zk4#4DFIT>H_xE?d>_30ejlxmq3Quon?6ZEy z1qIs`9h9`{xRMfb5GIJo&f-`W#S=t=zM%k)*?(I|bO1smy7;|%XO+i(OuYa0F%#e> zDCQ*W4&$^!?n6htQ&_1Hm2Ec=PXFur)Vsp5YdR!q^m~d1T|#@EYr{Z$``zc={m$2( z1b^U1v8d!X_D~Bzv;2Sc;`!_6Mfv~o_1;?kKgP4M0Y3(GfC4y@lNQJXjW9$gMib0& z%sLwza28<1@D7BOW8h=NfsYA;8xjOSuW=j`U%bbq$~!VYg$XJbZCSR1H_mBdtv?VfRd|y zxV`U)j%ra2=i;sImX?y@bBu@CdLyK+lCV}evMJL!U5!xrHlmDh zx!C8IH-H+9I2x&sIec0!1RE@gBDoT~0!0^vz8hl}VULq*tcwmwfZc$ER|<;`h&HvN z;re($Q0VUMy78-53XzKldt^8Tlnv4u4Tv{YpO5emB>`uG$DSPoH)zV90U2i95n9Nz z0CCLtvmL+z6Uqjc#ZQK(l%K_z;gCNobi3c{T_pn?hnQozr1{iiy@cV|9VNuaJ=p;3 zZA3A4qa+AOIC9lH)2}E+wGVbvs-^c*okBq{1!lS!0*Yfa1v)HoWLHx|{enrtI1DgM zP?$&2zj_$F`N}0pZN(AQ3vFU?eUS(~hTJh;HLhNHM<;JWhm|U;( z0k|Q23}SN1nId4NjSFa5>m+1csPwF33gmhUeUu z0W{3FLpa01u!0{`wUN_^c&uk?bU-W{XBvD;(UR!8kmb2(A}3bMEj0^P*EFmptmf=g z%GSx*21)B$eVHM27Z$r|8}rLw-NQy1JW~p;F_U`M~#y8Y4Y> z3~7|O8PYf}Z$j$p#kg7AC@^NWt6vio^;OklDfxiLsUT(?apGm|oE$OPI7Z%BNP>WP zNVj`3G_`#7Ijtl``v`|P7V;WFj6$DIfcSv8Xd&iFl)})`cPHP3f-x4$01{{mxy|9^ zNC;llm;mEyIMUoA5I;G}N%2F&Ux2^x7y*^@K=sHLuQ-5!T;mWkWparz@`?Nv#dJ_f z=(%}1dvBU@(`H=NOmD-gEZ=OK?wLAg;yW6Z`h@D;}@{!U_Sy?-h7aNUOo#cXc@%)EqgI(eHToY=6KFXws8)hD_1 z=NKc#%6bGb{%3-jHp&HLLU@rcW=eC9PNGB#Gi{NQ>u}mcs#=<+%x{1Rp2!7w|K+Q9 zBz@~eNgwuhcPEvYUug9xifM#nP9z^|ovCjGbzLPsjnvSIKl(;1JULSXp-Z3T#F*X{ zs+aPeY2^3|$FY!YJVwJI@gS0m6>Qy%ahR%cq9w7CzNN_j9tA>Ci?`=nP>^I07e!Kv zh{^71^|hy#EEj83+<+qA9q4A{0xR(dM=l>@*9!;^xvR#-C6VH5UH4M;!mPNOHl3nA zAy-D~V?hvQ6iDT`-9^byr>2}u4Ay!%^=Rnfhzs>n>CA43`3;S)NH{X@dK&_!D=-qv zPktRWBWmz4i>VyZ?4`lyET-3De}Td&pls=~%<*tDCf*n(XetzkSgF%S_xDgJ-X=`i zPYh70G8KuFV-8M1=)#hfXc$aaOdkU~&l@(OY=46i7ytnp;K2H6VSjR__h*tL2xhW> zKd1N^)_KO@h>C3=2rZ>6wr84c)$2a!pWT3ZDCl%F_L>82V}fYpW{mk*FczHJ<$#T8 z68M7hT7NUEOJN73;9ZU}=!z5S<3Kt%q9~Yl;qPkNN)1<5*^$E`c6Ute#za3o8pqhv zCIK~}6tS$uU3Uc3#5LPD!JlRq!K6b3ha}Y28yac^#Oku6i%EzANW_J@5Q=SqTS(H%BG$Q=OP~;r|o*v!Ivi zzK{axXSLzYeUoI|qFQOsa3SF+#*Cd_yhGd@zqR=;{d7kC;@6I5@B`12TlIQ&xyTi) z=NI)er)pj5ZGhNM2+9^fDJy9=)%rrJ2qHSK_w|J75k(3RVW|%3L&BtiV~n9}%+UUy zRFTe!Ax={ff)p-lQ1gyMv78b#>5i_jkQpN{a&;|N*Xo1A(0@lU{Rj4VSC-W!bOEuI zbvm6+JV0J8iO;`3ILwy&wgeVx77emB`#cG-?3pSi{gFc|m{AEBQCM&E^eSkixu}@# zlUzU%`7x$RWbK{6mCIbn1ZlFjITgjIAK*$>#aw04pBTtqOqA?1S36904S6NX> zUOHJg7!g&XWNROS;tr`{s;B_h^uwj0J0vmVZk&XIu2I@?$1!xmUkH=z#iU6Cw#X=? zF&5iYd`)PQk!8EHk(O5WkG^z;nVwwXsi2Bf&3H`kHTKowb*?R|qKYyT=fM~VKjW#= zZQiM+LR*RugCP;piG++}XkRX$F0!th-!nlkdP-7RV zHxdF3k_m<_h3Gpu?%SQTfR`==nYC(yLNvm@fh(Z=qM(QYJ`50<UX0f#80UpvA8=R$*GQcpPNhsu*p{&F}P&1WiG4GW2QerSR;PUN7 z-ZmuOrxHUObVMP&)vB~_%d&shYZ9kMJ=aQ_nlIT*lm?=ZaEg#g%G<= zlxv7Lraxj99{r@8w+yyO2sHK;HMON$d>$aN#qmfq#<4QyfImKybu#dJWNn7dd$2o7 zy7lc{zF2ew_o8@|h?!+CNr$=ZRci7zRRR*?tlo~Y?|ng&C@^#9lyVH8@fYlU7BVl2 zmGfId(@-*#qWswhrxzz-myMm0K~oozY!xRVL@}X?P+W%Uj8vCW*J6y1zhLih;xj4x z=CP0JWneYdQPmJCbwm+x7@*O-M3;4{2rkW&KZvjT1-Rh2?gV{Nh96fOOs;8wI0>-J zdEnS3V;lwU0C`sd9l4DB3&Y;-{)@@LkzytVE;J`?8J$d!%dp@xA+f|BOU&LJbIB3Y zHs1Q48q;y1s_3Lq7(l2%G&GLhLsP~GM2`wua2!lClkz~+5=&?}QWjq!ltRFhh)?xW zUFH8R``Z)>8w#*ked*9O%CT$EHQm$A8w{0B>cS6iKV1BD^u7J-;O*Ov%_n-|TI?be zQ)L{G>x{MKXlGAOjTZD;>@@IAPg^Wpv43^xKx{==Pc_k#UIjn5DU7IpP%v0jVEhdk zIE4;`LO_AsjB^ak8)H8SaJ&P!JL-bNIE}BIxdH=_KOv!5?BmQY$DGxg=ve3;Wor6=mzMfLk~ zQ5A<@&lyElIsD4guMLX&M?kJLnSk7c(aKt@iQDi50y-Q0^V#KxDW0zS%iGV)C&x%YO}#|naPRD10jfIY7UX4lArm?N`dnLnjI28ILdaT+Tb!fAkGlt~+*upq|BNbc3=R8n#Zc~60n z@(*BbSqukP0YO%W+yUmu9b&|j*kV~xgF3qu{U=5;Gl{B z>V+en!ZxKc9Xh{Zqkc~S2eIo837_^*$cbxy84S$tx>1_PXIrS!=&_B~PT6nIDI2c+ zu8aF!6$$OP7m{zzQ8ipP^a-}K zJ(di75=)smH6}jYG^eUn;52QMgy7?A+w5zC73Hj=A%?SGly61vuF_E7u2!N3XA%=> z{m~Q_xJrX?c_{SR1TcpaN;X>iFoWEx!T+&e{mrVQH%gA{R8@1F9X-Lcp@4xu4bg^dWdn8W)?tnL@H1&pRcv z?gDI$qJ5wt9Q^%4^a6!GiUr{_NI(93ydyrtpXHgy3y!?2?LtUM?gWVAs|srR3>Sy-!a(gJlvVrvQi5c$#?!Nm@EG~F?RR*&#Gm4Umu6pev~ zM>xP7i@mZQL2a^VT1*p8V}b&CK9h@?W-qC|0b;~+(@zZZ-bNx?6sxCs&9Iz{*3W*G zEUgl^5*EJDUF_#Om5wrsxm=WV@suR|cuWInb+K^nZA7LW<86*@Y*H-x zRVtRya-|8H@M${NGUIdMLm!?!GvOX4WSUd4`0DLC4zJ-F#d;<9q1v>29A5u|V#~Oz z9lNGw$eg*ELpUoZH-MkYrGJR&q@TV57!n-#=Xhwp({GhPP5(6g&0#0~>ei<89g0*m zPrj%9s>+A&tm@kT)3&a5{b;v-<_oz{{`O22)~<=ZSueUqJ4GP&X-FO8qL*5QK@`TANTCpz)5_9;ZTkN3E?S9OIY>Er~KAbG<+s_ehxIEP=apKAZ3l ziGC}M-tKQJeSRb7D_m>%YYl&`;kVN8b$o-42G|cRi4PV zEj*8ld*v=n$ku|!87o%zB8oBc#nLF08WQpq7|k+OC!*>>YvuaS&uh$-}wMXlTd9;+8a*nez9jk~iDYr1Mw`^73 zrEezKrKnE(ki{EGnkfU^{Gmx1z)DL^qnT!H!(ZF**Eak=lnsAvh5r_;@N4_|qu9^k zET$98$2ehd{6*%>iJ=GYFpr6+juWUqD{>z#n-K`>_SA*~jry?H-Fw;HwG;3BMJ-e7 zlr`;A)+mn9qX&k-j}W!JQBbTiL3=D&GBtrpHK2%K?J0+YI6LG3-WYpV?iHTO9D_gO z>CR;OUQE`Xv9!urRkG$ww3wlI9^}vZf>>zv1QFC=UW_847YG&2sVJZ}dd`3D?eD3b zPhOcVnRmz#rZjmL8@;pQl%PjrOj$2$$EA_j0?36=D^l-hDCSr}k2D+BoXZ2*2Du@G z82kFb>_+yb4-*A3Rc-{ZV1T@<^xS zpRsg%KBb91q6~*L#_uoSS4&Ktgp8vgkpISsI!piC)?zu1D`>jG13zJh6hzD{VXASXU>U_PtI`J{CEVNu1xV>vC(Zcr#6&T!H6=6 zbgH*Rxqj8ow#*k-P)M!$m2GvjZ#ElWd2rp@kBPl~jNu<*mC}^Zg?1oP-9|o&q}y&H zh&`oAEGKrh)V)JjVqX_yRzrH@+}>t6ES-GLNe8-cfA4CQ!?}kyZ(!E~IiztP_9xaGtp!_e z1lL~USbAn~9P8bI`i6?aJ*|>VCP-)~=eg96aC-{^-%Zs_H+_G9@4)@NO!Ah4lgkf3 zA1txk&4J4k_nkCI{X|A%yFo?czvXbyBv%%!PeP`C8caE6i;ci{u(!LLhu>>xZi2on z(OMqx-{3!UEtf~Xa`+p@zn%dWKRI<6wW4ooQVOAGI=*>xh7>AcIAz{LeLCs2iQm+NiV=XO2 zAr+&H{+K~TA6RP0e47h5F=-MoZZ6i==ch}Z|QbODO27OLn*xvqM8hl}S zrYk2DN`syHO3fV8o6xYCH1iK>;Nw{7L7`DoE3*Z4)6_tn)X5N1rOT>ZN+mArvStu6 zM_l?2q~^=hfG%@S12`z*)Y(ztB8B)A9!q1TH@;E%z7=4*YbsL;^~F@2NEboEaQx?y zm^(XqRW~tBefUg#aFfq{`R|W#2VTO#^&N1)9qxZt;1xo052Z0xZth{CvcpMR|CkK9 z_F{;Yg?XN`78*~6z=HS;wge$}1u#KPnaU3PI-?x?4^3nk;SEYNv?&Y59JGF8&<#aD z2PTQ0Kt?qZNl$gIvX&ihRO-N5mdF(|7O9HFUb(zdyH;i}H5+ASmr}~6nqW4S&`Jlg z!JTUxcMjhFP+Q${{B58}Y++p*22)qI4WM4hxSy$tZh<3{Hsk1f+ikcbo3WjjD2Org zr$*w6v14Lkaa6*u3zf4>%+)mamw0c&7b6@GX*@Bx&D5S)43TvE`9gI5EQFbr_H0WT zkbsHpE19T`B)X==hZ_`!qRe(j!8sMEsG&)f^{S%96wfN~={w7uAu27K9W!feOne`Q7RCUMoRf|gYCGBc2yq$z zW>Z&xz0VOs@BORjV2xa!4(9Hn3$4jC)1zY0<$EoVb1xePz7BpKHLXJ`hm9-a+wu?! zqf=2HQ0$Ef$MRNDC#`#LQ8&AgsSbM*ay-JZQ;p??j;vIdaJ1`U#ppM2K#mKHfya>C zDs|r}{*~tsMVYzSBTaaw?-*fAv5BRl*AcsbNgkUsOUJho4aaj4zXmy|=VO?~%T4V; z6;IoQC>5?|>-NE!P7dIGfsYvZSCRP8ATb*TAqzwlAm^e-j6+{?qQe5>7v`2iQ@(#k z1PS))zM?OVRI_Woe+Y?~)XtBj48LXy!i;GM_EglaWNT!qW+miiCR7ZG6|~nhG8Iz; z>PAK;wAUdWsi(Y|F^XY=t3|~ET!+sbl1wcI^9V8}rstGni>%T$^7;PJg(J)Lcr6>( zveEwj(6Ui--&!=bL1nd~v1EvDBPPFwUN2oGsr{8Ts`mO|Pr>0JFDl{bd4A(yh_b$$ zOGl;uLA&pCf_Z5Jl-o0IESEzPqQJh`#+jv1Ywvvv3Rf*J8B1hQPZ}inm?ILfhZ&-5 z_owcWsc7H$$Qy@6S4Y5zk6jPN`N0t|SQYeIhN<46<$^osSsNc}IcuQbdmTV$a*`OV zXRTx@Df9Kzk!|E%r30haw{nZ~tuiaoe-%nCC3_ng))D`|UmI#4@X>e1>#!u&JAHq7 ze9`4!_zK&YVgG%-`{H@o{`+!m|9y<-yYJvkMavH4kxF^T+|8KiE5&7~G!O;@EKg73 zD4W30GN=@^?Z>c-R}Gkg!QtVf{nJ zT@yQSBM-jOa1=~|C8H5$`brvs8@nM~T`J+EF>em&a6`xBg>t%Zz|tewEfHL!fcTkbfQFW}KMUyUu6~FGv*OS|dkA`+?t4;s}`-T0=L>IR$lyLq*qF8WyEG4=#m&FdB+2 z-W%t(>m!a7(bH=e?;W_4x3s-_0j}#~kNS8!*XK)WZum`)VP$HI9E80toF^e5>ps#{ z%@AxL76^F=P6ej-OX8RZ%$BnNV2BbY;lk7?FoM2-*>z(q69>L|c8afY{0v+dhu5WX z$^Qmdg{@V!BR?2|n+vtTb%6yKM?gZIWKZ<#kVaEQLwS#(b^Tv<^|P-S#*n~^+EJh< zM!CmnPj$2_fVU<~zsDz$U2_!}T;b_r2wcg-&x^Nzc(6$CbuCH!4_c?mq>&j{P62VQ z?_;YRFqtIJbvd7cOWHH$M*+jqLu+hecBwfl-bkV@$3NyH)^eAH;*@h!sw=Bo3EE&nFNysObB@)U5X+TTvtfE zf*=#6QC40HnY1~XtFl7%UO8b0b!j;39Z(8F+Qmz9UB6v!PVdLk;Xqvd0D_Oivs%3ggsCVQVs0+ZV@ zsLI*Ux;2{$u{jYN5w+c;31`IDR%~tB5{gyAYZNoqFPpS8GiIZ8`~E(+Lpyzcbo}Y$ ztgtncNx@qxK>1x4;dsJ+-Tkf0IN}NGBHxe2o>;!qKkl9F5(;PU&o2u|EYduSu-6q( zWqT&!{=JjtD{MWp6pFb?xv7aIR-erj>*C`7bSGiPmN~x3oA_t*&guKh<35~ld76-; zD+~yR_{JPyufBglov9X+#u&!BsVDXCGL}SIyCl=dso95TIb*@K*ODP_W)JbR+!VMt zKL6$9uwoXpZnK5H;${VIZ-u%W%LCh6B=qqYFtf!zGTtTONX!=4-Il!4l?YKIsW7Mil}ZHr2SGK>Na^QWQa(&qszHEqn(i`N|L+TH<=SrWuueJLADn` z6a4-f$Fb1b?|{ci=)tS!@=r3kNQOi51spd6CM2G#1X&(C6|@os34>T?z>^4X$rJs9 zsZ~hsRkrJpmu0dBqnLzz2+qGV_uqwZv^$~x{sqU3(69!@G^o#zK`eDs_-BFwnHdsL z6iH0Goxfv609l?3G6-B446rA!X<>AN?W=W^n~UJUR~MSdG^Q$jfy_s-rF*SiVq)7P z)cWmi=VX{CQmP0j0?BuTr`grj6)H@qr%4cKY-`Aut>kXmKW;vzLvG5{7HGv2W`ihx zY)a(&Z`!k+g=b0!1tq7#WVq(u&W&J^p>ohvyYozhy1y0oonxNF%0-HzD5gV>WWF^g1tFBYYd8ntG+WOQ7P8aC4`Oplh#vn)m0aZDqJwV{xTFSl=$ zQ2Ets#G`hjq|(YHGC9>!%Sb3$u4}&9@PER= z1opdodnOo6#pz^Mwg?pf=`2w44E!~UwPWq}|h zRJ%C1a^PI6`ILd18_6@(Y^XMy+bt{weX$9Yx^0?fQtG8@JDKto=9*Qdsxzlop~iZi zt>@WOnrGR>f-PfUHs^}ux7v-7F4cP0<;u@B>54UH&bfSr^^{vrxu-tm3ddO0xm4Fa zw%R-y%ef}oPRp8_b4}2C7&E78UDfrZT~FGlF=^!$gZc^x94q^1t+|T_>=4`b!t^A% zbC|{HN*Kzj#XfvdT2TOGI@Zdpvdi0Yz8OLV!_2{QFS+LSv zE3oy)sz8cw(T>@Obt_2q(^4DHi{1xw^bvWUzJXH++PYrq(g&*WX`O94U#zRrom#Rd zH7IWt{HK=hdb6br`Gl$JmpSIBz>xfYXtdDJP;Yq}&MZFv&8cXK;SlvVh)vW+q4aqJU zqtHJHqA@y6Ca|~Lo>DsXx73MhnMRhGse4;<+~o1LW_gwpd_L1uu3oL;oi?p*b@ z*1p%;_tVh6)t324P7{#K2`Ndmz?A`IjSdzobG`Fbnb|&7zZ5r1Ko7*v4ZLg-Qj{#d z>vyz-9pN_Bv#W(~Z&jGtZABD68Opm0bKWW|xqpq(ud0MC;i|sZ*&?F(dtF&^W`FZA z>N_+)th%nx52+=QW{+)Au&l@V_dm|In1{nL_O8UR^D&OqO`iJdbKq!{UgIX4Wl$TQ zRSaw`Xz5#7RI}NYI<-!6eKcmP(KmhvF&^PBKakJ|$BjRJ^#5b?j*Zd&%U6A8TWY&H z=Sq6Byyy`4m}7Z!-If)2Tr!mABr_`2p zg0dx?iY-iIZww1HZeT15X%4Xx2-m}Jq}dEWW%?~f#b#R0OVP!8Z_~Jz?oC^~1?j&k z%0E8m&fCjIYR=l?HVaI72D{eOBoi{N#pPOD7P_3OAd*3rmzK?No~E!|sPC(km2)7l zBC%AdFXkQJ0Wf?}~1#Jp3+;VvPJl8ZsUu5^|PvXXQYfJe=7}r;MH`_zy}#a6X2P zRn)aIRHkK3Wk`qgWBAL#+YiSV@XNv5lcR&nllQ0nkD)Z(no4()ezV60e$hQONjvFK zyOUff^qO%7KmUcnwT{qlH8fSED{k%!2L=AmUU%JsfU z>cb_CXh26(xR6D_1|X1K+1zeE1d>Y{smR`jIU4?l;{O~9+U%I+MD6XazRkn1U zJK*$x&?MNdu3g?3f9*q^b3DcKzv^S>|lp1;^Hod13GdiVAE z{O@Bt*&(bxT<>+RNa**47#ot&I}~*$m?Ix?)bBuM>YbH7N->VEA=5qdYqCJx!9Nqq z_2g4|>(qVZD2ghZ%9@eIFopC%*fO@FfFi?fIR>Pd zzOG~k%w?>TnFZaD`nb!+eYleiZ~Un@AYo5t(i6W4`=oCh`836BYumb(BcPtx8O0z9 zym2;f_xkRAcGt!Rcyd7`OgaDb<>lB7BW| zUa-2uIeW~l&iWUTf9mWbKOG+&y*<9T_@uM6w8?rn#>fvaV+*JBRn%5_K-#yvM00j# z9o6jl@!8vxL#e`jdiVb5_>H~;uagak`!8@dw?U?T^brSSYEvMZ$2@lu@hGV%E+L~SLPV57= zRT>}JNWyT8d|zdJ(XxTwgy5d*tn;>kzO;Ff0h8r5-KJ((%>44{hm*I*Me>*!q)#aM z^5(OLIBeclRJ;FNMb0d7s0}7Z%OW!G4lXW_&lkn1s&+MMg_1KdbT+Fw#DO<&(wk>X zyycB4I46K6VDpiI`6qCPP!VE^x!2Q{JE6S@s5uGaMOC%rgD~Xa#-^UF`CAKra`d|p#=@3;Go;rtCanM|-x5XWLm z&c#~I3yCI$J``hh>xq^?T{sy6;b}CY4AE7Jld7jS>k698k%9>y;~2&$9HmXuSeaF1 zdQ3--LlVXqd1Ij%sw;p-z{vy)P3{H}d1awF zNS3XAN)NQ|-jjjQ$e+rlbb4zQVZpob{ZzvpV+iq$xkpCd}h00@4ag~Clu%tFflk%2g!*H;fwaDX%ycJ7M-yP+KiefrIgJ231j6$X-kWjhB zl#fnxaM;UB`@EjltnA2*cv;nHqv)hnX9Dk7$lcDm76cB?f4q3J<+yGnv-&!_ zI_IZ&{~z)L519FA&t|dQ+*cMB6W<*YA;_I-*-=^T3QwzQr9!2;EmYs!K|?*1@Uc#~ z%nJ1mK3x9v>G18z@#*D7b!Dwm%DYLynPR3(RZzQhu*)KQ2GAe7>P%WyKaPI-barrl z@a`hlo!i{y1SPX=aahw`mo;kD8@uM6irQ5dMNHNk`JE1}7A<*sUw&;VEe5T^Ho00{ z7l^1&7)H|b?-aL~KGW7;*v26>|{sYv7Q8@F@_+ z60ENSIKl&h!o6K}(W$}Eslm|4!JExzA9r{6p23E^MF~MjLs$B+r4uzZPpdSPqTkO% zHJR};&LU?O4h1XqrbTpkBMr~hCTT_XH_qmkpsRZf4%@u-ad5wRYjMrJc~|5I{b3}; ztzkuH+o?Bl=i6ItW9dHKnAkQ|YYzO)uA%MfVDBx2IQzsfuXaIP;i;Z(@}NLC5^qX; zE}f`qe!0e&tyt&+g@luTVIX0Y@NBHo8uE``@8ieb$B!RBT9At+{@nZc@y`w4KmJ$m z)*-1q-Glkt1bOcPA1wHs1Oxv~PB(;_pFxAry) zh^x)mGv%M<34Ih8A{@^7xhh>+6 zlSk2+wH!E= zEcDNW#>phx<00Swqz&oT-q`4Rdk*~RO?AU1PAb_vtX^J)^d>Y8shO+|n|pN^y%iSS z8Z&v_24F*QExpDt!Xb{4;8TfNiaXQmg2Yo9HTWb-&50PE_5Qcesh6Lk(x$YKghMqfWj>~(9r))7eGYy{G`8(UO?{lmxdtT>1VmJEGa zDu|~#^-P<u&22oKhZ;AZCB>ngHR823~H@3{Qi}O-+S0HtUC7icM@~_i0Vp3 ztnd?w^Q}dtSy?HUnO9eN62ZiIW7jvDjO8mlwfSFGkoUzr&G}z;pTBxl%>VLofB)q= z|I1@MtIz-P`^@_SkgXbhxO0mcE7N(CxroFL#brqp1qAz`3M8@AM^qjenU<&I# z(&)_e;5b!TtYsa0Nw1@6^IlPYb!ECWnelCkS?NkKz5dOhv@AA@&_!5t&{ut96q94_0#~$e1Il%l$>2X4&&3RlL;SU;*^_-Lixi z?iK`nRG|ib6?D+&7V4|W8>B06stj-|QbkjwbXZZbqWrO)x;E9>emR6H_Vw)j>5mRn z<=M;%roP@Z@Lc*OC8wLP^);if8U0&ebole1D?Yw`e{ghi`lG(0f-nfE5a8;}h>+|I zb08s1@Px+Gh2~2;onK6;e7c+UWLk1E)kUALCs!qvzTR?zVun-ZXI*r2m9l@2L_N6u z;r)lxqfaMiZ}t>YJ;8wKdx3cRB~C)(r=QM0oSqz+Z-45M=uXtUGleulWC&Yn6L0qH z3@0aNZ#MV-3hCF~zv@qhI_8$*Een@C2>EJr@2}|#t7HbQ2uNX`hg6YUVBXUw|85dK z?7+#{j>K&THg`=ARjY+g7Q7+_J=YGnU%kj`h(Vhp0&N}vR)Dl3IaYCe^JB$iEC=MY zmwSI0QT7#e-8HkXnf)7K_OGVzR$D?oq0q0GKI%%KPd{Vi?1=jv`*n9bGO zuWgrHKacYi?f(%G0T|MyWI>r_s9-bV9) zS&_HPUzjqSzf-|9S;71Jj_pv;!8Rlubv+tm%DNt%^onj~4Q0Vt!(t#=HHM)TZBzD4 z%vCqP&z{f6kz4DZxWgNAG3%RGc)9~BD=PHg%%Yj`;93Fa0s`FM%l0*cQxpckmL9o1?g!n{kzTZ7wa~7s%4VWx7M(>9@rKGCSi5+ zx50XJ644Ncybq2>-?5qUqKuy8OcO{@7m&6Np#P&hb?g5M!@p*(|NGDPit%4B_jcFo z|6@Eg^3&RkuRKC56W>oLKwvfNWorpNYvJmu&gQPWR+muEs!@m`h0+cDGa5!=NVyDJ z)us{aMZXOmHh>Z`6-&GdsH&`m9(&Tct7)egC<)Z_?1& zp?g`;)raXlSeWzrsdE=QM-d#Lq*?ifJ+M%^xaUT{L+_{fa%>!)zhjq;gU7{&E^+d&HA)6svfC z@$1<9!151OXaf#p22?*}5MxFId8v=CVXZdVi$1)3Zn;pRm~!gTpbsC8&T=4W!Uyt- zp)5l*15Sa;ywJK2GIMNmbIqagtpnj}K-NUOS6nf`X&$%2@SCTC4b0P;9n8~$Em9Oo zSTU?5EDgc8L2TVlNO_HblZktewWgQV8^(09>@dyV2dTU!eQ`<+!H_PxiA1>*bNGoKvQZ!g2^T-rDv~5XqM*Uih7UQY~>C%MALy)HM za`vm)h6u1kQs$ll=EW$&Oy(}Ow=uQSDc#pR#`(&p{uK59$RDgQ^M`t7p8wo?RnY%m z?C!7qe~q+Bf{b&607|8i9f=;&K9Ut#oqPa0CQyQf@kIW-?qAM6!jK#nvn3| z8j(QWQYjm;-{e%7cGa5_F;+rAUp|DWfI{xwY$Pf`Jdney{dQ|@G&TeHBJJ& znx$Zt{D1Mf82|C=`HSak`TrPCMON0L>Q#K7vB}62Y?o0(h7+~WwrxlVC%uaq#WaZu?u=S$ zZdz-K0HHlvQ2<*8$8FfO(EJNLQX-bG^pxa3TV>`y_007DzbeZA7rU?4{{KgL%4+3> z&bF;B2wsel5(2g6{&J;~$v@p4@cJ)@+>CTB)}tl%sdpR+)?Rm z)I;{8kaemjLVWe%*}H`C^CUcA>TYRi8%lL?SL{Fvw$pqnbq&(C%S_Oh_EpA|@sqQ| z3`g-odx`Qd&rXiA7m?|vpS_DQ^52KS^qf-u0|_vjGL9$qcbR|KKZ*OFF8adk_X{b;3;#8;Ry~o3VyT}xu@Kx zYZ@e0eBm?CWDj zR-C_Pql?9iftnki*1Ernc){u%xb~zAEN@L!=v{4%rog+|%{ImW*2W_?!?ZLzm_f`S)s4ma&Q_R*H^+(M_T+-^E|H1&&)!uk=<3KzD` zHLkx!Oiu0aDV1)AP>H)0`LTD!l8MfUCkd->hkz}8?`Zc@k4*P_#PAv7V{oc7=sDZc z_`$|#|K%%|rRUAHlvZiq*aEF61+0A&s{9Q2wQO`y6eTlx>7Y_*te&A-V^&?I4QfWR zrATUe>v>EjXoSy_Ah?huW=_sP_Ntb;#T{8GY^G2^8M)|$@H!CSXc!LMnf|wVV{^#iDZBn%geITr+)6<>jnT$6Bp%8j|vDH4oOJ()RGtaO*`XZ zGt(9t3#p$5%VcHkx79KD6}6W^gVO-V#kpaimDvp%#NbuWQ!}Vs_QjtTRL>}`XJQbKOOmz0lD$MMvKwf z2(84Gr7o`+SjLtP9zJY@|8%eB0P| zdE1nor_7fq1uozA0j`BMjg7pLTLd!`)^__GOx;03%R2=1=2tOh_c{K`zxh|UglOYN z1fGSJrZ!y-`3(FH<+dZ))FE{Dts~iuqt2KA{yz5v@dbK$J_;oa997v}ta4_RUGA^N zDGisw|LGB^31N69h@#D_q~7bYPe1mz6@_1;f~p)kJ9~Y!VA9?1-Ngri^vk=|oM!{q zDk4@}a|}C{XY9oU3w5?8|Hj{l)*h_Vzn~pN1u2IG!P?DlOfLtkya^8ZpnrLi;cDr4 z%-;WOkmo?4T;9(4$kX`-J6MwWCl&?qD^TZ82vfc{VUFEh6=LOX1=mW*rc|&6Fu$??RkZjWn@HeFz0D&o4l>Vfk}hcV9oQ3?UD^C;X0f8%*-qs<|Bis8^_HlT&xQknlC(P zb~M;HdL9*_(Xj>v<(DMx^|2Jl>>gjgy120!M+#y&tzbl z7nCja&xib=t;=jDce1fwN$#vS&HK@4J0(T9@Oj}pd(qz`1GK8XICxU07JH^Io@7*i zVQSbExI%Q0D$V!v0GQCORr=TeA5?BS?r;t$CW^;#yb=jorCz%~98#iHw-6ujvoTir zyAdpYLNna@i{zUAWb@TxCkvj_1=6l!%U(JKEzVMWRQ704BuUWQ&vnKe{X0gD zo+5BfTQ zlH=Qu>V&($0^wIpmCa$WdbaHD8Im1_0)EC+*2L+D-*taX&KZ}|vrH>`Neg~H)7+!ksZtJn3gURrC?{EI4IAgHm};7!yk_SS_L-%Qm7XF zKHDL$n2O;$@ooL$XC-gUJEC6iG3-|vp3om~>#;@o5!X&mVPTkxxjXj%hHj~0?g<64 zNY?k^$v*qH20ndL|C+3=UMl#l_!L2{J2RiONhL{5@1YQciJm}QRBs9#sOOL2) zr8CC%(W7YKRImGK$_SF6sbD%g=g1ak!y4UetvXwx*+EA>>CF-+V~!M_xzrl|gAhkV0$s^^`OP)Szy zoPA9tSlZQ30gYb!BQV>xj*ea7w1$N5zJWB5BR{lOuW~?ZO5*r^sc$FC=H~)SxC>_D zUd15qY7PHgCQT}hVeaBYg-$ZT9nZne=j>0r*|ZMGO*lk@vRw6V*w_rvk%yzSpAYe8 zNKf|*Ec%{uug#Aplb$%3_L@5vcT_vMzkI|32Xo5c4uyl*2@Ks-a!~Y2m#|n^{un}O z@y>?SfyO3rqpm3s$+#|84eARtq;mFP&Oaj93*Z{QzbZnnZq$RTI?6nMw5K7yvHDi2 z^MqM4)BK;l28(o=>hKeBTj5V@T775_XqCTu=!r0QE(?wTgNwa%K&cUamSv8RLJf{T zbse##vNHDT;JxUpX_&^skSG`YG6ScX&e0zFRQ(KC2j4N3U+M62NV~sec?GP-CuIz5 z6`#T8aw@j{+0ApVhbiZn*H_#`-nRs!hEZC*cw5l?1~!>N+Mv&5Dx=Q$b~F0k11QgtnhHaqf9F()2I+%J_@_weq&7 zFY!(e*!}c;Iq00Q@%$?#{vdj?a>(ASj_2`QHJYsN5!!*Wx8|usj@(t-HaaHgqTIn( z7_9sTM0`$RAdtOTN2>UGV}g00pfJ zEM((M;YtCnl;7gx@az!(CQ*}pMxH5rtm_uVVVR^y%ZRB;&-IvcH{jmEmW{;U@U*Fo zwOG<=ThwX0j-TaT(6ET`K&_EoecN<%wlUP^fz=9l9Rlf7fp8|GHB@{G%+-;mL?7$M zG`{s3pTDRjELb+%cvh*zCkIK36~rVZ9rBUP1}Bz&8YEBnSB%;D4?K#8qkc$C%dy6X zz0~TFUFAW~OonnGJ8*Ztwezt|Qae7nKW%T^4|CDj98CVXRyaJWq@LxY$$VwRa$b`D zYrHSDN|=+VraDAZNMHkDZ5mGiOy9WV`hF;8r`NX0FIJCfYLa4SOk^z}*xR(%jhXAD zqxY!RRPRfOx%vKp|ArEAkRR@>%?{9pE>AyrXZLSYe7`H63%Zld&$sujp=zx$68JNm z5`r@LmdVax-$_^gINAAFY3En#)6>@Vxx~70^8S14$lEMqvC=}gw~N28kE`#?Kg2am zH`=M)CFw)e`OWT3yR%bVd^cKt$ROL&R9H6gjexHz_XwdaGCqYuYE_GPXnBElm41 zGGm&K^(dJdnOoV$Og0f55E3500D~`8S!z`cLkqongVnkf3N>&?B+iCRc3rmNExOQ} z+}3ski>WOp0djO_Y7Ta#^%G$I5yK3wn=h<6&9bjeQ{L|mD-(-&7fW~-JraJKe7mn7 z7IrLu8h_tY8-Kq)((S~#9e@;QIrgk6bMalhcBuNEDZOs+_vU8?E^PYAZ+Lu@e<>(? zjaXp=zh|mhgED*u7$4lo^vs0)>ck(6$gl%9>iFIHULwwDnbdZs9h9@?*k>}LDnZcu z9oU{BD}M8m?Q1#2L47cfXi4Nj$CQuQgj`m!d)(_PS@c1Y4KH|KHgvjvNN4u^;TO8G zym=~qOt-nQ<8{pC3(Dnt*OSW?n!XL%0v#)OE{#zh-;`-OwR${~y$?~+osM`w zBXpuTQD_#kXToj>V+yN&Io;?+l#x~9vQzHb)9r4GS}`a&JXkdFFk5xyce7KROzY+S zQXX4RZJFU4n)Fesb5p3rhXdhHfJ(>v_QLpU^Gi^?)!p^G-{9v{i>+>o(&WTu*6WUt zcp@k}YzHOI4A5JMK)r(-nHO8s)a^1%B{$8U?#>%qp6z4p3XrZv+Y>R9?A^<#$6sgS z{mu0L@ju#@8sDYycvwd|#2Y#ZgY$=Ypcww@pY+RG9{H4_ztwU+V;I-X3v%C}HM9wA zVSFZE&}QyVn1ikJKDNs}%S;pRuNxbmP9NWCmEt%N^%TB^$)v9JZvag|M?G$%$_-3# z8B9MH(e+hKGAOO>n^ee4G$*pkC*1V3sT5Ctvf}h0_seHGU9f}Jv^4+9>->qV??y2T zAQ*b$j=yuj4SHB({eT2mvE9`b7X1cr;FLe)7kJ6%6`UJl~%{)<7*Utal zAyw;O`?(RrHgrJi310E=ES+dUD-WwxLZH8|p>!B=t*#&*KH;GL;qtHMO9<{iy~bxv z<%_x)Gy9eoU7^qU>qJ3|V@3kL_GvkyYN=V`swz^~@5u99J`FZ!(QBD8lNjw|*Fw>I zC&A71dq-fti?#K1+^D;L+=%{Q?g=h4;$S}KzG6aTU#HPFz)tpD$0|qvPxiILUOc@o z!J-dw4^!MT-}LT>wOT6~9Qx=V`uMXdSmYkM{LijN_dlwf{=YHS88Y~EO#RWW?4J6Q zIqRR>N9@G*flmW z$S%cxGHk(fqgN+Faffl08w=Tkzn z>Mguq<(&|l#hhHiw?$~HlOUFVv5)VDJ=&-Lc@8bKMY{YD{+@q@ER}qFIn-!|o2)67 zyea%#weq;|lNuq;+FQa~LDtY>1@^9!y!v0|Cb@R|81$bJfZK_`!^xjbP2AM=!P|UO zKWr6OhdYR@av9TIK}I^VXKp889m-}y=f+erC@$E_W4%+m>EiB=M4L&!DkLbato&Uk zYe92yXyRr1XD!ml$IZ9Xm)fs46IiW@J?KL+JgRS#e@mNY3VSc4KfBY17pu*n=GxxP z9MhH@1%$t4Z6y{+36?_WH7OO?C=^J-v{;;vRswnqWFiB(-hY}A%KwL}F{E-kwpx4% zYS)^0?|yfEAN2hX%LlNEMqbDSltHwGCBA?#F&`)iMZ!B7IDb{Itpty=ne?VeSAOdq z;z2M5&xE+$RT);WQW2lqak_%8&`(ll83zgVI^ZKY;7ZKHX&GmMuK zxFWepHHhj0&3w>QWMITFjTawl#vKc%yfy$GC;m(OspU5t@2B>?EVR^|CeE(z6MP%A zrSHAO7w$1%Rg}`>j)l4<3}hzXZ+63qyoNVo-bo!er`}&be58ihOYZ_#wgT@Y+bh9t z?Ww0P^zW|@b`w7H7kBQm;;U<#T z$p*ZmcX6LZt4ZMy2FBpyhhv6QamP7-@W4rmq9JF3_`%t@Oe%@t6fMwZ14EgAg@tjg zzVz0;uq5#QyZZ{*#sag&4142m^FWG%6SZ6CR+fXuTZ07nF5Cr!-Wb0sIrdUz@Wm&N z$f>jco}!M%MmHr?5$;BNibqATtqZKB;euXVsQpQtn`y>hN{@&qB?6UNQMCX!LUfBk zyX>7aIIqM{heSev?YscR?$Hs~t`Zxk{(@ckL0H7RzLhJZu;{TTf=hE9GL!MF>a|{zjXiW?CngensG)?3X?36X~K-_@)`#mx|i?!S6 zLtjs6r;iR9A6m0q2t`DiMHt>tJoHnLmJl9wga#pU$R*hmOC&F(GrUTATIxw6>PjOg zs%>eIM3Op(h{5KE!|5%;#(A0z7zeu`5OUNU41Z9`z^G+Hc{y)JJcv?kmiAIZv>hGr zUsyqygsw&{emHZ82WB~}B3~3KW;=_A?}hSn5B*3ZwG=IQskpbCJ*l~cbwFCTv-1<$K%C@b@DDclg)LeIHRMN>zDjxni6AUF4Hh;;{KRk@O$s!4wQ zLv3Z4x_f8~4*e*IzzurR8D74UxT;>!$_-=(>lGEs>eClh>GqCoa@WTp`G1Ck|M@e0 z5RDF98MU**SqG#?l*lXr6i(IcNQ2rtg2f`jPSQLlUVL8oo@<9S!Q*q@=X_r0{?ncN zB!}&%>wOWkIW#i8GGOC&dQ|j&jUw~(GD$A({hyZtII<1Pok})9v%XaXICIK4)(H}4 z{;kmPBB*`^d7jAVl%#$fwF~g6)8X^gl>1U;pD{y8Xyrf)3DRLl;ERmXot(gF;2L>q za$b!kwP%rY(~sw?vQX>t%Ca&aiPxx;DDBE=Q^v3byzNX6K4x<}x-*aafA2-R^dkQ{ zf1aYUN=*-bZ!{u}J^mn`M2h06ol$2nlgXvb47-pHawA5u$)6{d;+)bT|H63^J%RbiJEtUg&#_?593UGl;g%vTqh1<;s}WE)Vhq!Bp3A$GKG!`U zo3O2wWr~TM@#0`dAsb{5-7jmAdQuLP8UQOOxn7P9NB^%+$y7pgg=|5-?B?^s`A<2_ zfO`XyP0S&YTMe(YRjHc+|C2|aUi}}i+#yU|CGvI#Gz^B+j`1|N z_#CXzc5@3bs~p1dDql$*)hpv+=b>O5LI)9LWu`}d;46G$+pefx5IVf##Tl(B&UPUF^quFBEb@mWdFp9EVqNBnx8MxTYvTzv-mVU08){^P( zT0e~n-oX_>RXN|fsqG1fWXd!$nW)i_Fm znNjm-Ydv|?>N$N@T+aL8=?M_#)cp6h`C=s!Yblv7>ZUiq)LE_F@cbeyF9$@|EFF@+dEsX(8UkS z<6E7z`Lf=3xHcX}B9u$q`K3EQ4v`Vb{1{|;*(*bBN`WnZcbEU=!JnPKX%OA7}c-s?i9VO-()1VCjcb;L-abeVTh*XOSjOU$3) zr1c(542^nn?duvdb2zrL@LUFD1wz68c9l)=;wl!L(#tl9`x^7F7uqCv&83+5^zW;Le820EVayq6snTe5EqxFwtc8bd! zzkc)^0(_$ihgrt1Bh*9-08g{sk7@>FLAkLIepqGnneS!+kW$bUYNGW+{PzzT{OI$d zdT~utx(_EPon#-wql9WHlpi)4X@;lE`&&uHqkXFfsdU8H;ghNrj>IS*?*xgEG{IJ z6eMQu>uo%g;r)?gE-)=$QW`|Vy9R%*L@&tF&CnvN$!MRvEe2oLuacj36~&ULJxf{S z1l3u|@G7Ti z!*o=KmBH`f&|eREGW4wS2Hqshf{BuYL+emeEDP25P3989sqKn$M3D`jN8=e_kWT)? zkVd`5G!$<=zCH(Igs@`&=k?>I>#`vxmG&L7VJA+asDqQ zH@lYP(yW7Of;@IyaxFF+8(fz-rX~9;9{x=1PG7>uO&214&xTM}oj^-W>I8SO=8$gH z_jnH04NXjLI>iI7O35pNO%S7;0W z*78|o9Gr)9jTOy36~3T_X7_~*}B-^VX-pCbYIlg$BbVQbH@9Y)2% z;;$(>b%jjhMi3^WP`C?llDwe!Ek=NCRZ*`g1CHtD3f(K6&bk1pRb z_~L`ZN_PI03anL-hXj(reG7d zFfw~8P`|OQqT5-oa(*}->4j|CwDjEsSogQ%Ei0JyxFZ(KTE@#0@sSyBU}t8ID&|1+ z8@Hz?7}w98y|3Ww>t7;V%9gum6V#ELa(Sl)Y#ELi>=!`P_ zt7LjS>5XE%Y$&Jh)xD1~fqPsB#ozvXbyR3)kLxjwsuEEX~SEftE*Q`bzi zr)n`=&O>YFhtE?zJyK@#9zUY27uzKLh81YoK0+1zGKFa;0t;)4X7Q&_;v9yXNR}ZO4A(B*t*T~rfJI8l(GBzt-b#-O%U}&-$+8v|u1p<9vDD6>Bn6IG+Ie2WOaW zAhRot$jDd1_x@KqxT*D7f3M%bAoWjX{2BGKmcl}4FgWV7^9FWtZbSxqh-SjQ$_8^b zxD18Y2P9CS4K#r2x12yrm|zh`h!NLyr4mwQX4w};o_ar0aH1}j6jD_9TW&MGAw%8(-}xle4iU9Vk*(gA+o`Kw6KJt}VJM%p zv}hD8y!JU51vrE>TvG=c+xixOq>A&R0lvN19u5DDGVBa;6r;^Xk~9{BbYX2tIeGU+ zd^e7AxcV1tc}SV&!isL%@(OTA8yOMA%|iKf_Vc)`mg)m$+DQ(dj1j1wbmy8}PdEwD zH4S;g>-_`Jxb}_cXYqOodi3Ce2#hH{`_$KkY{Rzw$xPF{B)>p=0*H|mc~}3j5ReM2 z51j1vp`GQ;J>!_;6fwWeb%eu~$67svc$=4@t<_eM(KacX_#@&=$0EsSiXgE7!GSo$ z-i!Uj)aXFotbQrN%76OUJD~1fvt;Lij%!_}KO7lY(7@5rBHsKLr}qwNdQ$By$O(h! zF)}LQirJWF@NUPgycA)Qd0Osm(7V@23o%NiqoSq{8c>dgp8~(~H#(d2%TE0QK~!W) z@l51fM|XGs-6=V7aWK<9Y|V|fJf~(dDo0w$(btrro0*^um>w7Xqh3JL`@kp_{Zr(!F|>CnIG@W5aDWG=A>CZyTp=tr#GySzcpY|1&FHvl80 zrKO`cqa|TEFitU~571%5eLBvgk}lRZg*deyb1FWwgP^EOr0ck4PBV6}Fwh=GuNV-y zzUZBk!ymk%rA<(trkoD<61yi(e;1F&G`#uu_T;Dey_(E*o%Lc`bbV~AvD$q3)Wq1PfiH7{*ly6}_xb%}Xt zF^y0no1Z8@N6^LkBeSy8S4l*jF-N%i%WTNYGN|hOO6ya&>0j7zO#377L+@TVcpsY< z7K~v$>?tT25|tvQC$vJS0W3y7@tAjOE5YL_qsquoDkoCor)NXi3c|_wMngQaLMmFI zPtOV;tHQYr7|Pj1+YAW##2Z`)`stIEJ7v2XFJ^BO48>pDtYW4zVU|UzOjn;X+ku(1 zHLB^&vUvD!z7}sn&fSIHjAH>_-pag{g$<38J&>D*fvs|-4*{To$(k4b5Asih!LxG1v;e) z8CK_65roO{^nMaz1Di%o!bI;2BcMx~Pv*DF55?w2syiY$2e2TY^51p7_7{97o6!k2 zw8HQ`ShzP2HvIetxQk?`lBR+};mga!$FTU^j~I6zBCM~&_-in{@C5OgCrrG$8b<(} zKHJQRg9`-y>5a)LR3A@a@U&OtC`TS6uQX?F5We zF}LsvGO;q1nGQ3`fdzOFsop^!$}D6f!Lbu0f=a@*)H-XpsWY*kTJ>b;TjqwQ33g>> z3CZ{&dnξ#NMCc%PnlI;|jUWRUAXMWjj$Qfs(WQkLpk+`w`ux^m*Y<~bb?A(%8$ zq}Ga?(VU%B7)_-e;f<*Elyc{D$k=D+VE@VE*=HDj%!&{EB4g|n6^U;s(3l@xS+F$Z zQ&(^QrRXQ-sRG|QcQ^n~31ngr5paD600`sQ>hc4TqHYhOUQY8kNZml=BuTj9zkxqC zOvGU}#E^fHqKDyz&xT&^lTe#p{>%x%9oU+d|G(8QCN?6UjeoHs43NSFS?Z$0{l7*q z35DB21I&FnyX_Q`aZ)_JJ6Ai0q#R;<)oOHH9`HA`yt2p$2h@RV;HT-3lLph0FREc) zTwaJ=RKv;L)c7!{Zs+(j9>=)wdYWzx`NPR&xsS%ShToEo@Wx1tjkrk`!AZ^w!JAY} zVFe|T66xTL_)w1WaYA1?*AG&lJ}eIkEN+rjXw-`crf(gltC)4#-qUApinc8et%RakeDeN^ zM2QFEg99_@t6~eDgDS>Z-lb1QtW@10L&TaS-Br$3L4b@T^Ee#CbC(uv`mP!@#tpf1 z!0@$}y!Wt{eh2(+r{WblA1S)g zrAW>!ynq6Fc8D83qM$T1=PhXsXVixy^pih&B>Wrc$K?tSab)QEN($?WT)GQ;#Jq?G zKgft+f%X2jsC^ZsG{&tw0s< zC&p^vemU;nRLY`yKk= zZ~NBP1Ub!Q70k~T;$VuNE<3m{fsOKy?byc|I8?);#(NdD0)}Q|0MqX&P|9AHPyWyfo z>;A7>xU;y+2ZVFnbNvPuqk{;T`+z>60=LT=T=4N&Kaqy=8$ptkFs5344{jNKtjqDJ z+JFK%D)brQ9x+%LvC7xzW_MLU!FO3w_nS(2Ds<&PE(;<&^Oy)KR(}k7E(2m}e~$rV ze;l#5ww>9 zZ=PrpM~TrOl?RCVTortAjxEJ1bB5B>4)AC!4 z3)QUSwdQ1%4ki>yYm!#Bh;lgc!&?SFVujmE6sYa4v7|^AtP{DKsev0Dv$`t_GQZMQ ztm>!I;d>?Hj|vM9#RCp(8t(0`-+3&b<4-W5d+Dwk`qbM>=1 z+Cz#jU=5F_@GT7*+EoxH5M^EKZsN5ec=1w31rcN#_%Kk=;x);{;r*9(aEel1!olhI zgQ8!@MzK3dFJ+4fbO-Q^c{RcSr5H&lz)qmC(MDrO+ae%`7w_XR7z`(l{8S95nCJm5 z3gcWx(~jNdGg!7nr(vlH%VtIKoi-jdN*baE$3e4GG^7|I0LnLG(T^Bo@82a5oOp!!;&u{P!duI}O0*V4@DzHR zMP5eU3<*4)R(6mQ0&^venH1XaU@*kCg_)({#>1U0 z{4*i&R)mvu02=U@@^-oQCobh2UU_^Xs+sApe&3Jv;|4t}^otPmY%uy~7~fk`_s=kV zaTXVpNdkn(|BM;^yj1$VcRuZ+8qUL35MQobFgKgNM9Wj^ad=y;ZtHqB} zgQin2pM^<%M`Q^B3<4dVd{uv-fwN+ozSPG1#v#iF^hF=zyyx`Afq_>8ERu+Z@z$F+ z>IFZoMLveu?u)C>_|vGq?ZpSJ64NI#7n-*NABct^P6ZdFtvkb%$(r{tDD;^rnkKMb zD)bZD6WHSq6HncRwp$zFZPX>vY2|LFrTo`im{nHIOvH-nVKpJCT#auHB2vPnvY6KW z+A3tunWujU8bs!*O}%~LVWc<_#1()JxD50#zU+5y^E$kT3)qQy8amo4uFkDw_%Mak zA>8pAi@WW4XfGezDt4ukV$E?v{YjBtQXH5%Bfr~|fI}c; z-8bQ2^{=tuR-?)us*9839Wc_$wMRie$^lCXlQIfDnH0qsltfY^xJx3T!G!cnV*S)6 z=tBsZlGzR+bzZhM{bWahOE$wx$Bzz!N)U5q>HtFlqs$%2mQuJlbo2vH-8zQRKcx_8 z)d3W`lLp2ehj0)Hw$ewj=8C;}P%!a8oac}6c2r`y z+GnKq2tG3@JRgE+jLn_UVW)a;E_Pvrwj?AhVESyJOHx2>)I%ibZJcp~$L<}tgI0K; z_zH%1a7p=PQ zqAyxBA$Gun9R||^tr*Sda-I`yj*bNPnZj-P6^dKRzl{OLTt?Nw7|5585bmyme&(DD zh$%s0aB#XL>#a8o)&dk&<_JLw%?In;65($7<`9K*h2=KP#8fo+2d*QT1`KwJ*Z1Zn z46B2J^beBGXG5}f1nEc*xVp~a{sNA~67x6Y9I*R~Iofy?+~ zy`r85IARC3J1bf99oPb(&`<^Xa4*qc1|vTv9+ zmQWzBL{UxWh&DNI9P+E4M_lv^+bsfZxR^2e!j?`KQcfZ@=>hpQtvIG@M+{?0KXPuK ztvNl2Px`NMmQBI-_vBOuHG8$(y;Y8oU`7!~*f%|8$_L@C4O}sH#z+7e z^KkG$`!(TElnewnzm8uOc`mk4mI=okJ5I6_@n2a~?2f;AlR}s|+%|H2Rud~FX1vA3 zb4qBynRsHic#U;DsdNJ2_t-ifv)Fbn&Y^v3Y1Y<*u4%Sv4Y%thXVh$}a=Wy0r*E6U zU((yl{K$KC)dwk%yZ;>W9 z7?QJu@_UqaXduPMdO%(H`+H;-pml;)a5)Aau0S$UOGH`Sr|*F`0WFo6s+TjC5E++6 ze2&@Q2t*tvky0+IIy?#q#lT~sSsOuQCnoggD+j)!f5oW5!Y9U9N0DOO{P;nJ3$V!9 zhSLW*go4aC7y~d?XNnF#n=(NS;~J3g2LUK7=_!dk{(DSjK0DNHOJr+9a2^1i`vF2}6B`-Z})%|ChJ45uJ2-8AYvl5Bw?~lZX@c8C7;FufKR^V>Q>h z4i4~Bz}Zaq&oLF6E#Uy+Yq`(j+HOG*sv$$W+J|W3BN&03LOcm7( zRg84-hKkKQo7JH4F-@(J*!k#Bv6S(6F7DWz*DG+ctO_I0oDd!o59ukaJlO5Yc}Tng zuJLqEp^4B9{gDs(17cXXS%_3d(#&9E4x273s3%-xAl%BfC_JyIq5whuCM!bf`wS)H z^pVI}XRnAak+^^9asBoPUe&&_1oR$LokJu@=|0xeMC{tYLc?IRjz8GPbrTH@BTQXS z+`d1awDZ?cOD~DKdhxt5Ovs!qfeo-}P?ff!EtX?8hZZR>jFIc?t(HI9R zAKXewpVF%Xnteyu#Un|(21b#PYi54xT2A8n%}>V=N;#Mxix{H&tF1iQk{apMeJfn% z1)LXX;PYcpRm%`L%@}G!&^1z;MmL8}x1K?Kg(l%f1$OX&({)apH~1gg@ChaWTFvt- z35HLF}8ADA680Y-_Y)E*X8h z(0mPSFoTa%vnVJM^;KGr?_~^U>t&W$OgLP?a;r64P(gmKNpf}y;?ojo#>tLhSR^iY z3(d~XHZ*?+2flq!<( zMlhU#fe&{rJ8sBkzCW6_?@Wtzs$SGJcQI%KyWdML6a7<%juT;_NLYm26g`N$jLQe7 zP74b}WA0lJG9`P|;3)kx$jbl%PQrj9Lewj!}P*9e^NH z9)A%Q%3(vrdXe+s^kDLm<50e`P-7^Y$p<*YtxoG3pXQN?^j#4Txx~~nAr*$QZ4fy= zb=dvzrAHntRO2E5s_3u0pY%fme{n-=sQV|DQU<~6Zwm@90+<`n z4ST4xdrWyyb=<&YH3vtT1AF{s$yE+^cJ`5|oHZIivfY+>5*nwUKHp}&dymit)RYc- z4tn;nxd^USI39qM6Er;O*!X4YbI3ZLp*(?-rVG9WPJ&`;iE$$!eNbFeskG$ij2Rdh zeTAG+M@FhFBXpUsIUv0+5rPtmw7SHf6ul*mPF}$l^B|DtTwYjgoBm9$Nc4Nc6eTuq zZhcaRvFHO7QHqC4;!X+f4Yt8RoM8)%$;)6YcWzi9*wbM)_>80-qe3c>o{epd`3_a| z#QK{`rnrD#n9&hesDUi`>c;WOkV6O}nrlJaD=sr*NXgns!U_ZrifYeQ7Tege8p1Lj zASYydglfnlu-G19d&NFAX`gW`7dOIeljsoUb`J%Zi`wAmMQ}O~TYQdfQb`&u43yGT zViXPZm}Qiekue7E)`fwOo-gPURMqlrVJOQ98|lTJ@s zS3sv#>;d>uD@ynb-6=~VbVF+z4%&tC3u2IQKt26t*ZwBJoD7s=FbIUUyEB4`|1 zi6)cY7=6Z#9PlMB7BQpJ1;YVc8WJ|m6-nzVV2GM8<$JIU5rQDv1d2V;wGEV4xx$k# zrA8@w@tsXjH!?$B`%mCX6=Z;1?hGfC9Zbv4q4HR1rjqB-Z1K3D?(n99H;ATxaIbYxoTbzyy%J$Gjmo%PmPR%7leAPp9RGdV+y zC_=&+n92%&OW;zL+<>*a?y^1+a{34*>EevTO;lu?Wq!?yB3t-N$wY$=LWmv-$5K+I z8tlbz9J!VnP7|iir9A>8oKJ#IHE+cxQpPn{6tBgCl3?l$F(Z(Vu}AL{3r)(jZXtp^ z7he1jxt78;GgZl?O@RXqPle-)*I?v-L3dPbQQ`3COfY}=vXR2B%2BLdNFnU(aW_Cd zGcvkLB?BEx51WLcX&$176fPe{zXgLCBSN-i;4I{79mEj|hA*O?uuoE_&BRK0FgRatuG)<|D+%iFxW~|dKfm6)z$xxF-ZJrQI z<4i`p#Ht^p{P4JQFJsMJ-uN1tZ>QSElze^38S9OqISBE=IdM)#K;(KsS3vuk7QNY$ zWSKOP`TZ!icz~Z(AxOlKoEf>hKHSfXvJ%9Sbex%|Cor2=+NdwqfeQ{7pZke-poEod z0n1D_@yuK;=TP#N)DnED#JVc%sG+hLS%6@q$x(^snSTCh!J6YU0K0GfR$&|z_ome4 zLoS{fUu_yjWN(D{DA^BX9y+^4%xk{93Y?8@wJ~c}+PKsxsTX57o2?`=OJEQGF^<{t z3rB8T*|@oo2p_Lr2!~^3wmf#5_CN%|E`0b>pk4dYaEqm%F?&SWH0gKr`V0pG4c(}H zqr`lYb#JsvhA)k6#~ddg(>P~jV7PKD`fXteEBzK3FoOLDCbL_1q*#tg*{G+6WQEpm zJe+fwRq2Kd_Y61W_z9g+m}>6~86Jc={FG45Plx-1^PI>oHo&QJO-p&yWR+-0P`^3B zS&Ka@%o_`QBTzaaj{V^X7Lyid!4Hz{{5&x9cK%Nhr`2={fgjewS1b%mS8FN*+lP14acZw#Sb?SN|5M!|*h}*QLs*^0IIB?bth>4EVjV0duPLuEwIh3>=K6 z^Wwr1^2%bx${N4rjT@QUDngu%gs=T!kL4dNk+ z?Fx7mP`I#0Ci_2`wX3^RPJC(Rb*t<94)N|6WBz-9U+;2p?Iplajijbb{w|UFznbRZ&m- zjj8DH81!Atq0N$T3)t4x^TV8I)A6^43KL93iYQSv>T9KyL*^K7jQisIraJEkKau8n z#E*$5lTj20D5#rrs(|SV$!-({;y^XdaL!i7&Sp2C`Do_re;Z%+YmH=;{@ffI!#W>2 z$Lok|8I2x=*{0O+cAg=cO+zAE8DHbi7(3qC>1vNA&q^Gb`4xAP8=wT zNaCN!!svRLVAycTxIyB1Ni4g{duHJmD$3gk*A{q#hbg#O3AQH5Fx?@k9PF-jzfEv* zb4Eu(6}c~D@5kb!FtT(yJR*)7o7n6dv1Q~VkQ+j5<9)GFNn&jd08n`_>%4xN-0KcrSlN!M_AI0t$rb9>CeR1$TLP&WjE_ z_0WQLn`?BkGVgeP^s9~Jl9e1iD#O@^Bx%!Ig)Qv}0vSbqM zI-mAzu>yl31*E`zFgSgu0agNxiYbiLC<2*R>QnPb3<(i8t=2$L_Hs_$qg&?uQiD$B zaN!oc99OI}gLfOJw3rjb8o2OXMkh$KzQWB$*Owe7tQ$p;i2$B?w&22=lJb(m+LHVT z=xT4mbnFyl)nSZ)?!1KE1R;)wP$0+0pgyFwWa2fcU>CQrtH7`N5ej6whsjn<` z0HQaLFjS&MV_X=-nff@HSE+55upp2mpi#rvd|m`O@UoMFE=uP)c#s;G^9R&W8ksC+ z+7VeJj;oz^5bhf+Fz$MIE;KWiJA~ODMnV4lyc8>}L50vhBGaJd%3ZeDrYoo;$>VwUS>88S=qSF|biYN+Tx-*dIr$5GJeC_mfmU zgAY(U7srWOV+bEZzSk5K)_}Pqh;dw!>Wg3&l~lt>VU|VHpb_t)k01V-VG>HGvxu-A zYUh=*ujD`){T?VAft)&#OP-N!h2+-k)o9Rpa7_V$ck`+cqK4b>LOVt^3L#lJm}JFs zHwoK=a|KvUxdIFe32?HXS4@2nJtLJB7;=h?KCKZ@ChT9uMbMfnQQo zJ(Pn6>nN#I@{rAuN{Wj8IT8Cu@R2zMLzNrdk-f)jHVCZq)i#XH)J( z=NAcUNPt6WqaS5T14<*0bseuRP{SCv)PrD?aB=5w%6G({s8S?Zxa$}9^jMy$igUd4m}$O9xA zK*Gz~P_3o#(I7)lT4;}G@D48mNhSf()S!YEgBQVxsvJUoIY=y^JtKGWQi1VeJrfk( zGzV@*03P^8$9)u5$cK*{$|6G0os_TVMU|5SGDj;q(3B;T2PmqHGItmPju`R~{u`|c zC>*2$ur*O#8_~6aFAAWLMmVBLWW*i|4Wo@Lr~4B@_){96sEUV?*J9Nb>gZO^_bO-INHo zj4|HN$u+5*h;BAno85`{ICyVd8dYH-k6{L(cDu}!!G;uaj39(ih{Zw8F*hR0@b+W-D1FTjzqqrHC!vU=ZIl*M1A`Vm{3pkcn4fDWkR0>2bR9uD(A4bEe zhRuvJsQ^-88AwLt=(u~URSgBWkx=pl*1*W2)8-{g0c=x)S1U0MBnGG@5$-`cJqGE9 zn}qi>v1P{4gP8zbRCiB-DvULys;)G1U>q{7N(7Zs1BYTE?U`x7k>SWl!|LA#ho-K9 zECEt13qlA$`C0LC(G?MfL!AT0QYZ^VV%-trj2b!6@-ivx4IROqHR$KeeL5ohL`^f1VJp=Jrj6lbgT?`F zzD@BFvn6(VAqXxB9swr6lSNsPjV09_C>+CrfoPXLFDJIoKmuB4-dk+83EDm zi4aD@&|f?&iSoHiki6;m3l3{&k;$&XVO1sa&&>4bHZM%;=*iSn{bs8;1H7^n3_#?D zJvfn-gft|^AhQKPmIS0+@-ZT72$e$J*x)3X28m~Na~)CbA~PN@@M?&J^3qIuSgkn2;_@bCi6GHGMFC6==RRQ@#V_#d>&K0QY25L4WWLBvNQ8YD2L z<;LnAQD0MnQteF&!MH;eP2t9|fcW~u6BMh@)Ck*wk@4mI0h#K9i9M1b>QJOOp}8Sw zKA1Gn$PvcDs6Ew^Q_#>`s?n^2EP$V5M52Ib71D zK|aXlP+j6$2BN!ROry-6f@`SGNJ%0NdFY&2A(GV0v8gsoLbv^J_6#?lPGD>`Agt~h zBO@atBPS~h{XZikBmDmv_Uw!Vd*<-r*><~q`0$*B40~2~PIdyw=&lKL_6sU1BPV2Z zd93io{g3>TKskp@=UPr2WFtZOS5fjKjFyVZfE*MR;0p4Pe@5PzidkhCk(#Er^&RQha|Fj6!cV*ZdX_bf(daHb(E0t(c4S4+ z|Kau{_rGU;Nl9R|Ah{R;6r<#7L?Y9rSQL$mR3vFoEQko{jbd@r8>bnZ*jO&Wi7Y3& zdC07rvx?9(5MDeLbnZ~DNy3mb4GfRfi{4Hrn}?s&(46TgJ0*Nlt(PINBqdpJFL@|{ z&eS3g_(SyHm5iU$zcBQG+d(X*e>{#0;kQ~VMU`a6%M}TXqQGuz4S!oSSMPZ(IQEsO z3$gTqmqk67laheUdGf%)WWs}s(Fow zjwSH~1p(!E(%e!~5K5x@AEQehT68MB7D?!zRivxDu)$!T2ObFbLm!*_qPZWb4$-0o ztVKqGn3IGeGn~SO*9terO{0KRgTKG#>aFWT^i&U83Pwg`Vc#T-R}%Fhus#FnVBy`2 z^iQW+R@?xo;zc*7X!)+9tqMhrck>U|;uLLILAm#|~q zcky7b8!PDt5+XG5vSKPqqwb5ej6yi~D$Zr57}7&(`faVUH_qN-s^}UG&;S< z8Vca8ga62*JYiWi-U#w_wF!+8kq|#b&%!Az&13{WF?is^$K?{kj$#DJn_&*Hl76Qc zW~9VTVBC`E<^n3@w=fTB)iW5-XX#0>PZW|05we-G&^6L|Q1XJ-s6V9hsm66okg?I= zg{!9kP)MpX;uoz2?U9VnBrl(W)Csgxh2%~$0mldr><5Npt&zt;H^YbaZ51SjwYta@ zjDRvMkX#`^{5bVL4YMz+Ci04@N{lgYI6i1Y6;Ldp3J{QP+n~5Yq!9c#PcujglA94M zkumG0LG(gIVQDGCix5$KYer$XyE-2SNISp8asu*R2n2+X4HtzbEFYpjQL`erC0XX& zL_&^CjDV;eO~yg9GGb{4`4@Cq zA%Ia1?ja?7ih$)5l@~Gd0S8aD>5?!ql9!MOe-nfXQD&=%;ldLuC^j$_a+DBY#C=VXK2*G$*%AQL4}L#6(XGQ54~>A78LcIp_tmC1%PQY5JuC> zC(%M@fP^7M%S;*0B6$?Fc0|n~QzHk(LS=%&$=VZ04seD~lW$kl^qAURO#%}{98anm z-NHyZdhA3jbJQOtGB+ksWNTO7kWe$J^OTxNkx4bFb|{8Q#S*Cw(ZNcaq(qIAOSOba z>6*-8&p`P%7&orx5b!Z#(2vqXlh_sBeDe%*pw}-#eI2*D7y}#mh2!WOJ>@!b4weWVoGSC2=9@(e zH^3%XA877|_ArhMVWN$TJ1Rsez>u8W#*iRG0V>eI$UI7j8lI9~!z6l|RGa|=ZCpg5 zGmv5oSx&Wt*;OIg(G?SZRBMEdlCc!-F3t{W)bAvdTcAv zfM_F=&=E3c>bd2Tf+CCT$A(C72u%%N$f_lRZHAkXWRBD}*|RLV(OdhD#`n`|gGMA! zWa2J09T06I@FJ(T3rlXSsSa?eVvv|F2SuCWOFSsWg`t{5*VPtyAOn|eS_tw0ImpGM zHqjTJloV$CPeRlxF^c`a0p-vVIIF4(iN-HwE+AN4jJsZtyjF#u%K>|aBg^kfGzMv2uiI2r*cBb z!|7Qf|RYr%RU?7Pm-UF-prP)Q%}0 zW&EqKyxd|uEaVm$&Ws@=7rHjO921|mM-!l=2FTcPjp;;s!oH4@3+1gy$3vteogZsr zPy-TM7$!VLneqRS`T>$HL%I+SC~hCe1_cy`Z1dVcqM}N2ed0kG?V@A^)RkoZprG;r zfwO8ODF%z3_OKE&!*Qwv3E+WV3yWTOf9?OovtW zal&xZ!)*MF*uSClj|&ER!ax#MsSu=__cFYYp>~|3=`-;&4R4b>2c@TceXwdz6eiH7$shA<-JZ>!t* zmtJQJX%pxzLt74}CpuY1<($(KK{N2mTmV#!OFT5SaTK&&DX~!6!hx#_iMtfEusNdB zAjx_Bn%hL&M90KsrQ>c8et{fTdJ^KZ$mreMGu#dpTVD+evbs2xv69FK{m8;5wPYH3 zU|wRByrhimMxnlppUufiL@IbAq;#Ih%neIyIxYa|L5Z$H>5QoIR{9s60sTAHE**@9 zJgiBl`tiV@Lj^7#(Q+TUmK%f6__P*rSv4q*Ee}jf#5o;p8R@x+Gb|>9tT9>%>-l0- z;5cX&FiOP`&7mu%zp_TPiB>bf5N&aou!|Crb1Yy~R2Rz=6i9q9E;B-7m^ICz?j=GF zHS9PKpfn)H@syxTD>QCS2ZXXy5p}9ICNBb(YcT2}sIvsRRg%bo>WNr+M)*g~(a1*F z3Rg9nNn)aY&{&k7P>og$g*Zk6>KPfKI52urB4!j^Xnd4!XhVz$iPpqu%@Px7RYoET zDO&XwW7!r%e?gJV0;CvXT~blyNj#Jo6aV6k>u~RiX9?pyn=GSiY$Y$nx@{f5S%A-dMYqOL^cBjR~|_y)EWjP8Knl)Zh9I< z$RrmbP<@gDFDgo-k6gBl>xFaIqFf-Kg3BAn!x$OY8y8!YWYT&@$SFtoq$7M%oP3fo zV-nd9v&j2L(0SwOb*IW3tE{MkJDTRWJPu9qp=(iVsfFw6neTy+Q^7x4U?TG&E52k;^NVkU5-J80@z7aXX!Rq$9OBeP(WNI-JX%^_#E(9|y z938x2Oz$u%n;wftKcZhbqF*_pUpb;*Iig?bN&O0zg=sDQ5}NBQomTQk&P@29k#3P) z4Ns&A(l86lc=LeWmTk*41_9c$Fk(CsKo?&C9liq<;W__)e=n7!Fjme$(~&5tiSmf> zgikPve2+!YLRoj}0&XAYuD8~6A(ZZ592ZLShpM17GLA!1t)YoNd;}~G!0H%T8+`V( z`vIY$xMd~)DG|H@iBUelqDA5eyTguoZ8Q61F_dXWIWBmVgLJ4?dLfDzt6P#eNlDko zu}T440a(}iRY)SdV^K5Nfbck4-IbsBpwx22V>-!D(GvJ^j~Y3Joe3_+U9SZ_=!k=o z!JsH`ilS%iCoPd;s(}$8x2};Jqd#z?k5Pvznm`zmg2YE16iG5yRSTvWuoNvF5C1RY zj512p6p|F%knF#z%yF~|6@3*2rb(*6%9IwH2BzWxJ}=!lWRz!CyFQ0;K^F&lWFqvI zD0py6(kZK5!v{R&m{_BrL~LRHm2eLI}dNu9&w?*C~gwK!*FJ zGuK4vuJ133TrfxgP@0rET%?CUf_$W97iL%{fYi@^2y9e~1YT^N)KxI`P?C^oVOPj{ z@FeU&rZFPM9F+k{QN1#!%n>xp2<0YBks-he7=eRG53QqEY816s0HR$v4~q}8f`NIY zr;&;C=IKc_63t045`hi0w9tI?i7jB{NRVOZ(gOu_hu`o-FqAAMF(E+V8aNrLH&KyM z!#gG%!)r}~em?`*lp0em3NUXTfb$)Uhp}nTTUru9OACrSH}%7Eid*IpOLDM%q2f!% z?##fgKzfidyw_(O5W_g%GzS85tT>qk5$h~hNad76+ks)go{?ehKfROT{7h39Ml*Mu z4@Q_qj@g8@CV&KqOdY|ZaNmt=6B#wN2K3joQU5hBc2yB zd0JL0JRe)Eb2W|&l{D~fL-d#7V1-SUh9sqsEXS}N$IgOm*(_C38R3vNeOk7EMk>e$ zk$Y`d{!n}EV<5RgYX)%5zuz7kM~b%1Xm~222#lhFY~bg`pi1>Ph5)$Otnk4o>B4Nx zF>J^a6iuO2ZZam~BHc&;0gM)NNOM}0Rj1J&kcpv@S2!S`+_5f!5$mH^BB_1y0A@{M z|1UF@-cpQN-Qeqp5?gtoNUwM%HHw_98T?696j8z<4II+FBVC0S8c2c$;qZ~2TcR)y zhbT?^d68#1mo7%Tf(jL@jgm7?jdMh1jz~IV7F!R}CSiBvMz={ALcidOP&8gduwo1| zb5fE;H!*`|t5Iw$tSXJ!+@QoxVgiskM%m!4XlwxRBNX}`!j@W4OaqXLM;TS%X=oTz zT6RI^7&b&tE!4}>9p zHU*2OK!J=g@5d}EuCLI@$RhkE)q?vgqc7>| zjHIZwAw>uO?{>ASkrfq1r;(%&CNGbOi`JP+6~_mrLm<(JHxe_5@>~YOhw3CDLFBaJ zjN3old0Kz3AI=%d`@QMZ+>CCLRjYv-5WQU;qx1Z~jvUAE@ch3ynGXAr{J%Z%yRy1+ z^of?U;rTwXY)o-YLc(AAB_tfx|M+7Q5)!O$uJ|}1q1RPv$z;{v1c_8aa%K_!SF$6+ zCn;)jW^uB^!2|*}mS?;&<5z4VrzSfb$&L&}H+YW4-#9WH8P<#}t39*Mo{^kcn3bEH zS(xdtgPG!9KgVGF!nVZSh=42I3@lAGD zXOE~Sqmm4d_%pl1Ho|7N<2R5Y#*bOZxO4DnW=V2p5t(|jBZDWComJ@Z7u)%e$7|1) z7_Q7G4WETOfv6sTQwhoJ4PitN|_oaSLA9c1+S}guD#SVYDyq?YH4msRk?5C#HqQ0qt+#R zCzKBNPDpl)Dz9tG9X@`-D8F~&++;`Tq^j{seHlNyNb=TCnak9TcQj6!oa`ta<(0Eb zM`X<{lI6;pB6rC|N2Y7kq^j}RP2=Vim(+@-imO4Lo$MG@%uX2YuJ?`~-N=ux_ekDk z$Eb=qYK_0dBUO65Zl#vZ92+WUJr%(+$0SG5h=|9CObye$ZWEsbV6aZNA_1#1d<)49#6*T ztRk_(>lvLpSC!d`0j9E$7wuJz6Y83(CpX#a#`=rqPIZ-J2i2NVyM6W?_na&>Fp9Ga zex{*riler0a;Y+$9bH+&am42q2O$XFv9N}Hy*9h+5B+cZAe zQOe~^u}{hvo9rlUoKRQfsjV%oNp_UNd5q=C>@|T&bCMmS9OE0@MRO;^BR6|YaMUAMH8mp`M|B`LxpcTk2sBN~%qg49t1@37EXvHu z%H)Eeq3p2xTiKaI>X^v!7A9vFB|A## ztwdK8zQ%Ybv*hD%3MVBbfP}K*!n&s0zWs1s^^%|8U-v_M@yq-L{Rf}Bvbki`ba@6ymw=-9M?Uc zpI21gullqzF1i1k{VyK%*0p~NKH2v1BM)xu)4ILky=yi$ZkS$l!dqDtM;Gi~bMi&s zUpna5PcA=k(Aig9am5v9U*f*xiYu-V*4hh;i;HtUT?!Ht6K@TapVGJQbrrcouDIfg z^+N{qD=sc(`z`F9n3(u*@Ah7O`}TeBvOiF>UX#<#xNF4<_r6`VE5qrb- z$$iesNEmW`?~aa+uePj8djE0%RX19vt^M%BffrwVaiFnr^lLX--+J6XP2IkwZ%0Q* z#?9qtWjGVoU31RmUw!3!Y27;4zFpt0U%&pZJb&-b6=U;W>KJqCzRV#Ddmp#&i<@R$ zd&3Qv_0RNxl$7I_xQ$ke5Z^mDthbwDVe+X?scud_PnKb_pyr?FTQr!GV8AwjCupjMj1GuU&D>J-@cH0 z?M7zHN~YYtMb=edtOms`1Y2aQ%;nB{(8dwvsRzGX3d&6vra6! z{G^vxv|Ti;<&1YOL5&+fTYAssZLJ;sx30V9(wlBNbLQM_$-;XNm+T#U`IcuxnQPas z{bbv=-jBR@^`nnwZM**X?Tgq|1^1oUUOe#rDUTfc+}xkn-nl4$!IMwsZc}a>mXmYF z$a%Y7axWS=fA{XDZOZgh_qkgaZyB`jUneZQa%$SlXU}?I+7s)xta4v>%PqrN7T!~I z`AO$F9H+vEo9-TW_r^KBx9>P(*O$%kzrXyFJ7o6v9s71{{b=5fmmlRf2Y&u~!Zt6iz zU3AgzZ=d-1uBx}PcYS%*_J_nRZSE)KR@;anF1P!wRqoNBfByNHHrJT;MT6RlP9AyL zWA*QjZTrW#?HgBZnf2|KHuqDrTW!OKyza?--Sf!K{x;_IrS>ZpELfahG-*rQPb*ii zcD25-b@u+J-d=kA@yEaLxc`H@HU?5tQ&Ycs>(0w}_J8_?j@uUP|8Q-`Z5x;DRj#x% zt0sT)>8BUJ{(AYg+UYfO=jJ{8?6cJ~XWCj89KUPmRr^;Jls&kq?bG(lUUpsl%rh7K z_|s2!SIyziv`zopt+(F#TFW>6_ia9IS9{0cr$)8TKeyj8#~kzhFTdQBf9ZYOHr;)~ zf&~li+8D6xT2Zil(cu116%;MHvh

    *UWxwY+K z_PMWQ?%ce!YuDbjF>w5@6|HcLRRu3zbLH@loBN$`rtOCOOTTumx#XCo`7KZH?BDQm z$J=W<#w>noZ|2)ew>&p*_}-tt8QOB}J1=H#yztT;&+n_;+vgc)>(@-{;8(`%@9!y} z{J_LVXTP&R%B?A%GNu3fkMc<^^EYg1yJgFuGRN4~zg@iUrKNWF^zSx~8ouD2e@$}# zy8DMs-Yu8yzZcFkY3FfgvE{7Sd*`^DY@58-p4i;dvSk07qU}{J+ZPSK_dm0ei>|)% z;-ioI?6vgOH{@R`T)TMjC3oBbwq>4u+5USM?Z0mDl70WW>(`}Qo{$EuTR8CimSf*r z^IH1qtB*eJq%m#d)3bXqo zI=IpW`zNf}xilp;weY9^Zn%3RxM23CyL!L+>Z^Bcyr{^1j`BU*@^HMlY`WdPn7} z3$NU?sV1xb-{pJz$bVaR%{g1wU31PR@7oLZ_dj!AQrWPbllpP(FFo<`MOS&-S|-A8 z@>}+PvTw)goAU3RxP9Z3a6QXc-;#2{kpI51?5g)3@s4Zz;faRrTaK#O-0|az%}bUn z`E<{hZI>@vbn=1)3(nqI@AY{8amO7moa_Di)t%cmuX3*W%eKtfFL!Kd8@#K%V`u4m zc004S*Y=N|o3{7r)k|)>txv_~Pj>8B^y7~|-nY8rwe;10-JI1F3T5v;#vb_OuLJVb z^)pv|z3#yoqxbHKB_s$kcO;jb>dvVBnNfP(!yuix3f^4QA!a)Tye}4BS z+l;v%T=mNA#}aL}2fyBN&BBGHJfHfvTW4+B`Qwk5{F86`X~XdG)2BCoP|$Ys&AmVQ z?;z*aty?cW!#dUZ-oxCJbG9BkcHFoh-mib?(}LgjoZQ^peC;i_4Eu3k^`PTU8t_7G z)yU+Q(g(H-+IRg+9b4KyO1WUjxfktbZp)oB3a{nrPhY%z>jO`GwDz-?uR1E>jH{06 z=;)aIh^OM6tG0df(CYdB5+rOiAIe&JJ^bthvyLaz~*Ji?Rwu;RwXTLLS z`m;^rcQ<~%W8ei1eU@F&cq81hMf=NKt=p@l8wU0}_uO;WZ`^oO{#MEVpC|hcxxV-1 zCk?!$t!>b&uf2BNk|oC!6cqfrqxH9c&9yJfZ#nz&lLsA}F#Wl?r{6hxQE7R3pUSrU z6Du}<|IVaY%b$IAz?U1_+TVM`d(?k^-MnMdrcDV6o=5sW;@NQD-e10Z?~&v3^Yc^E z(k@%QdUcKT)l=%WQ!gAj^6riIoK8r~=5x6AVyE+toxOt(&)Hhq>zHGfKl$WY`T6;~ ze|~q?D&Jd)%{$*34bkp>Gs{>1)^Wl7lLyWGnai)(d`iE5NB`@c1xuGMy>E4n`qi8F zo%!Zf>CaF8`Qtt-%5RoGD44b<`Gseny^wqU*-6VjYJ2FT9shc%4D1k?8`SfPVUp^vUg^`ymQ;;zNepl%%DMo23*k4XGK-pgLCaSZ}1eCmQHM` z{OP|NUi|Tk*Tzques!kzjXR!wc0l{WfiF&zp8NYp&&`9k=85N)mJjI%`GXs7y6HvN zs!#s&lmfRj?ToAB^dYC;IKr^p%ZB~7Vdal6*Nq>4@uQDs`EHr`^B2$GcH3?Dow<3b z-JPNQ7a}p%;~BL%D{acdy+$t_aPdtyohd8IQ3?+?{e0|QS6|)dm5=Ao9slWvuN%+T`0?Y% zHn#;^PCN6=t()#1w{q$O34g19_rVGKTlehQliT>+fV4AS^Z!=<#S>K%e%n{K`$w<8 z;oJBB{>t@3$F+?=e^+C_eb=w)5M8Z%mv3LXbhz}9f5%73TbuTM`@|35J+osh{KPvQ z;<-0xRopPJ-@IMh3qO9oo`7Nc(Ru*h!S_hZi-SORB zJD0AUoEB(ogcx`Ih7I37-e$YUYw95ZWM!T9&ASi5FTi={9ffi73Hvfnf;f5mLwR4c zztpjRU*G=yQz8AhXZ)Rk?>{NrmihJH_xC>z;^bYEmi_4dOTx^iPmd8kc)Vh7pNH4{ zcf+lA^{Z+V9b&Jr&-r!toZWl&l(iM+eE-7_YhJm2=-nHAr|eph4@dg$p~dH|>y>cz zIm1tyGiS~wZ}rjlPk-vv@#Dvv$)T@2;cqKetay0GWx)w&Ty>29{YS6Mf6=z3Z7!sz zHLdf{?lxY3l_6?b@a&|k`Qbiv*3KkXD6TU zYW>>TI`|de{{CWl!e1v(URqPI`lHkD{^vi3?H~EhV@&Jb_Kq=&`|iv9d}%_0BuStD z_rGJ?3bS*S|MtmKzXC1ui&hovceM_F<(~ch8_E;@di{`!>WLGFv>ZF3wx;Hi|8BT- zT-&FoZLeyDbCV%kQ&Lhw*_!`6mAT-mtL`?lHN6v3tk%EZdh0p1=}&@|1>-Jg{BA&2 z#fv|G^X}bMbB66&aWXv6^4~l96bwm#cYfx~nPbL{yFUNrh`pj`46rv_>yUDeYxYd_ILIwPuY)3_~C~in!aCi+;z*A zSz8vIH*nGqweV-f3n_Wb?t9k*}m(u9QDE)1R8vTs+3&zJJxjAw?lEUa0u zV1d(d^Y{NzHh%j0&6%6NOu2G@_JDnl4BS_ryQ}ep_Tubs-hF7a(+Os_{5)a$^UYKC z4w!L%O3Djw-&_0EL$eA7CnuM+xsTue(05zf_ODv~w7a$SW&8L`PtM<+@XLpdgZ^2$ zY{A;KqqZq(azVkM+JBa>+5gMOi@4WrENgR**}mhuEp1y@t$xPc+SqO%e--!Ijmum1 z?K*4Z+^mz%y)dO^;XQ*|f9>11Z(rZL4-ULBeU`0d;W%gO6Dte8bhkdSa`ozV`}pGq zwf<_qz3H>w>W+^Fo$DvVZ(;4biHip)vMPhuMBM~9yRKcon^KwArB3O zjAPo1N3tvnh7Gg$-+y$?`sXe^;!16JTUdKwJ%-S@4B0B zPHj2k;{Dg9?Hc{qr%QczY|P)+=e+aI^Symf-}!y|U-HsR#oWh_)=i###buZEf~0QF z)^%s)H|ORETYyobTY0H<+T-BD3kS@2rlI8B2WQy%O@o^|_I#P!+VSj8KKs?zUVCx= ziRWa@`K+_12xoB_<|byLfS8%h}lp`;yAGFG^V0`PfZy$YE*{zjR8<1ZC^N0`FiAg@4YvsvU1&RBa~B4Jr#1ZkNQ72BdPT27Z#j; z{)de&#)d(;?SYTJ@wR6>o;y3T~^jR;f}lR>a}Ojp7k3x zlvGxp!YrtoK_~C~wPbotme)!?Q)^#Utt6HL70k_0kKA@i^<^1zsTz_5Cc!+T#vwfA#k8kDHIKs;U}1 zWXNBh+;c|)u-p6Ie*5ja_kQzG^2Ve~-K~SK+<))o?ooZ4O9vxO#ERAd{T6(ZnOKIp z*uQmYb+5L*WiI&Lwn6QeCGNg@%&G8nRsEJbGA>>}|I%KR!QcFW@i;fI+WT^lBq0W23wF-_GTgU79T4cj4c zURpk+&siA>Ti0Dvq^emiw|iB~z6O?M?|Agly1li@1JOkK_iMlG()~ZamOf_e*tJ*p z&phR{(=MxAmbd1^4+k;~b5@q`l-E6(Q1;HQ|F^S!;lTCl*T3-AoulV`{dVf-UwpBA z<;qjexayb}Hk`>Y%y~cl_+#Mfw^Ijq(8s=FRdMWR_qLA zKi%|0Vvhg)%O=eX9rNKcjicXva7N#=&t6b*Yff2P>Cb4t?;Jaz--$b48QSJtf9-is zetG*mNXYKGYw+}E8Y=eQf9z9pe;)Slhh{DMQ zy))nakCXm=|CCE-J~#K%w?+>tnLhotog*qwKj)mQZXY@CvsLZq7Bx3NyK{tzg|MEH zKJ)5-KQPU`eaosb6%|YJN58$i=<-wg_rDT8+_vfNOh6-+l82^viDg zZT~NGc0Bis>-|UhC+Gfr-_8+F%U`^D`m?*MtIaD;{-|l|iQLEKpQn|Slq}8hpZ%AkkG^i{(v+5k=dFZ%*RJ-CuNUs$ ze|mebzin+_bZ6s^kFL1lici{ZnOK#aoP7MrCy#bI@7TQh^8ci4nf2nTf)7@$p6_mb z;>ll^F15Rl{q@&hS4tn}Z>zoG@x~o9)vw>~{fKwtjrk?1+c)NKnf2nzf_=_b+l50a zm*pKZYu2piZ(jGtjcHT#h%^O)I`Sd!N#0*QDXwH`=E^1^L%it5$urWmS?a z%Xd}T@7mhFXzTLJ&t3ceWA%53^SbBs^E}eu^9baxGd!=KRa{b1*0%io?HfZ|KAyFz z;LACWbxfaK-m6zH=Le7bH+i4D;f5Pd+xgX7MSJ_qaJIhD-XSurZ+tlxa_TE5r+xqZ z_kZ^4Qw|8YT;qLR_jLuFTv1TFFXZOUn|BY`&urJr zSwGM&`Ax~*=51{$F`wJ{@M}w&*@4=9b6>rAbET#X@6*{(;uGWY-<=k)$@61u9#jvucpaFlM>yDnN$QE_wd z#e6}H^XjE_<37C3^moWLH?*_2H+OSO|2${PmB9zF=67>|;IYQ3tEpU)Se?4lbVJvR z*O~s;){PT8PCM!ek0+)0p)hR>r-dUoH<=yUNg?-LaA9xK)kYj z)nKt_w9}3qJN89=dUG!*UCw}VZ#)D+!pNsX+Lo*=8K*t^_Wcsa_J3%pCiZmHk9eiD zRQo_-p<}Lj_>Mh$wr<`$v0bktVPFotZ3}xM)SzlB3PpX7L8xk+sL8UHYbjx{(5M$u zHJA+R&$IgvZ#hPM*~GY5b-dPeQJl?&*|TN=W@i5V601Az7fM$A7>wb)Z6g2e++wss z;M*r>vX;i2`aGv+*REZn3JSS5pGQVUMG*!XyVP8AmVN8(cK7m9jxG4T{eB_w^7;up zz?LoB`{j#Q+fmhn>h2dw67ZQhvVTs*2b-IyyRc^`}Hd zMHz>^9Dn}&c{evVAg=h(>T}`kgDFr~?deCk#L|mr-BDy$KK^^y_s{oT?*%<@ba8R1 zpIqo+Z9PHNDQj&`PEM1bllS4-VPRo}$){gP&I{HSKRxO4zjNO2|pY#f#_p`S}^g4=&l7lmwRfGT*;2Uv)mRu06M~#LM@M zAO^T_(5UjslU2ccHmNx7f^|QsqzTXCe=OB^u6+CS93w_2WsaVxUQkU!eTX%gOs+Q* zd(uDSw}X_gy-kcUPgeE$`T0F61;>fJXw8k^bdX{k{W-fu-d{vS?1{aVx!0jbtxayj zLC)FF??{OF$Mw|vSIjC8tP=O~^76WnwRP*((ujA@nkuBf^)&b=9;6s^+NOyBrl#_y zrl#d_uDz0G-ektdw8LIn!LQsV>je^tBP>Z$NfX|_eGAjmGiS~Wyp}v!w@B!@$h5h0xAt^4 zz{D~=J$)@oEQLVJz#oGHo0F4wm~Q9_cyP$wIP(3z&vSaN3?^@YiRHJ$E`2&IAJ!NJY2UzhtC3I9%70jlZnVG@-#njZ)IO^j8AqJv* zHA`e|Sj)Bfnwl>C5+Q4MF7i=axX__rBD>U#TV|F)N*ELwmoiD@{pZhKy?aZK+zc$+ z=bUYLFX+*PZ=MK0?x=Mm2EKcCVWNb@4wzH67v^3Y9QamHTROS75M~a^4|4OajoLih1p1p5hh=YT}-SbgwA!*?* zG5^q^iGzcK_k!FE_AZXF2@5OxJjdno;OE4FZOTetg4%S(h!hkQFxl+NjgzGhxQ0<$ zYxZt9NIAV`;Ou90m#V?f>k#ws-Q4Et@gh4iGb?Z1noOZk?iHRs@Zf>hiO>d@ehHl} zS1ClmDKN{FnYlB%w`p z)}`EIt;+jtQm(wZvAv-7?(E*yC0*~1KFa-cdPTeezCS_0rjaUg=Oj(P0>_RYFL2d4 zu-`T^j`rnn4^ZQuEu=B+b=l1M2dv?-k12ys~j} z^SgIx?A99l-2Bk4TAfG!PfN|JnXd;|g*$m|O-BQGVoOH8+%D_L`2uS zx5la=aPbMobNT(YY1b}q5G$yi-qVt1rl_bmw)^o(m?)3*v*>uyl4f>ort6Y*Z=aqc zzP`D$bVbQMH4|%V>j~b!`>tENR%A)vtL=7ncIl=!RLW4wcJb_stV4;($r4u9)_Km^ zhOmH{v3>iLFIUcnwTK$hpKPA4WV4TO*gMNOOi@=)ub@`3)GVWF@Q@*_ZZ!tw&R13* zpEB3rO8R`u^sKByUD>d;glTd5+L$i8X*nJw41DLp#FR(sjf0<#LIEbTEH#3XHeePzau9s8oQ zb25X$h>VPcW!#Ap5>}ty+-YuZ9>ZWTzV`Pk0)Pkzf-*}Ywe$1y-`6aD2Vc6y5pS)$ zy`|_+PR}1G%9x7aA<0|*`+d^9QYNAPbz){;FQ;u<^WE&MbT5s~p~|GNLbj*6bSp1z8nxvs9JiJ{Iv@c;KTz){`6XnRi^GURV!!gAM-r2IGf z{~LL)81nz82S#`pL{cN8eK|Uz+z6HzRol&-;j&_xr_$;SED!Mjgs)vn=u7#J2 z4JXXT%Gt@&z{4)W(~;~H8RYBe&TvfI+tZfiVCP`Y3i8&|Hn&<%VVamPw+>u=%%wRSMB7_%BDwBFvWFGX#>T_OHat+@ZaKx+*otIl59t!3EsSXl zOa0~E#@gndG()Quj=J8?Gy`uAjq6SgiFTu?=-Rj$nK(yssRk6=sPHJ)2qU7Sj-9R! z1u|68weT=k(X|OSCc1`Mx%!%T2YN&;cQlzw{gqVjCtiyslLqhZ@)^?7D z#@5zBD!Mjwwk|DD8;r7dj9Bhs>7;9K>1<(OqsO3AT!_}zIx4z0?i`vk+g+Ds1qH1L zj0|$}c4yIDec2YY5Y`H7g07P*LC1q^??}@LG@&?p83uh3}>3L2gGrT&?Xb9D!MkFD^zqX$SS%P!OoGsOe0qlwzoIT-TScbciqFLU0j@~pk7gt}limpZ2a*}N*Dcsv3#5ls428Ki%LN2~x zdJe|nMqJ3#JlepJ=IIVam^=CU+OCN34W(HdTIjiO!`RwJuH4||P?$ro9mhF{=3#3` z3=E@lLxc3lHUvvc6rJ$1+|q7lW#K-YxgY~*faW$d6wwo}oyh=>j|fJaV%5B|@6^Yb3m{3hs|AL`}R zGgtI|$+DN~KC1Ya1-G}QTlzWrWNzQoxMKH;LjkGFf+y||CPRfu2M^gUcJ$P%oV;LB zodnxX{ZsJk0`22UER%`z(=#P>oR>|Pd8J$1_aZRkSRlFgcKo}gwTzNJKwGc&#d%3B zy-Ioe4CVJ%XSy!=pxk6XcKURa)B*=9Wu;vUr*BL4Ew-}K4J?{5{l?~hu~0dok!lkj zZW!MEcw<7s_}jT=3UzgLu-q&mI&SQ^adpRoTsiIcCUiHICzh8RtzW+$R`ziS)@WsyF z{_>ME?CG+y$$l52FE=&KYi@3qSsJaMY@;NlxoY#2d4>UB-adt;%{6OGUH0NP-n-}F z<>hsWYB=t+-?iDZXD`6vs){@d({ppva&lz5o?V!?Wy==d(tt6^HcB>W@+qaI+OY7v zX3ZKIJ+j1`n!QOocFdYJYu3cU)bw3tUiRUQjg7u1LuOw~nU|OQ}E`oz|D6%yZl+ z`|#1DiQ8|G22RFZT017oAXLh9@GJ5A$6H$YEl(d*B}s4FvuCTcU*wp9Z(nS4P2;9X zNo{)Yz^ks#?zL|*sr*j15%G4GPVm#S1{Ri<+cGlN7Z)$AE9+wQbP%Xd&(5Ade}201 zBw}@Hm6$>-fR`$o=D3r}>UyMe_3G7Sh4-5qgL12C_Aa)zWo?yP91bj5B5P`Dsy#R5 z`pug&|=4)nP>m3F9SXji54z&06{9` z+TT+4?E|Qd$E-QB9us#h93vt=d9n`a`ZOyWn~z7+sa~F*8#ZsAIDW!}Gv5tPA6TM6 zxbox-8`g|Jf9BM=dv(9KyrHfBu=JbSTrIr#q^llWVxi04^|`qUxtwi{TW2qN@$Oxk zLi~4k&a4@03px(Ym@(t-@gUb(($Yz(`Su$(Zam$2gHW-g*oC^FgsYujXADQXamK*Q|1JWH`x zZ{BQ8eYxw{@#A^-+ui*AweaF5a~$iZWZfZ0#>DLZ{dXtMtn%>YD-$B4qd%&I&aL*S zKmG9|YrLrF)>Lz&i4!L-gP?*onODjhgp|`}3N7f)r ziSDWQDdV(z)ae|WuuEh4dK31W&8g0B30Gwokc(ciJCPRCAF zgD|DcSEKDg(ahW|kKJd_;=lIyZ%sAV1&czi?@;&rTK;C?$u$GfPJ8hLjzq>7M}m^L zxcIbb(*V4v$)(!bG_lilBD;!hlII$T=f?H!Zj*Vn_QVPO@nT|0speAyi$az~N8|JN z@3*c7<}CU0gl!!AwX{v(?94p2gScTzAnR>`Y2at4^MyXH^Z%ao&rc-Pv$dT5dzvRS&|M_wSjcprE8w zbKRTQuP-B!4z$UT)OHnbt$+A1-#ObbpO`)q)+zJy%sCRr#yJwIdfz=ytCo*(;z+R8 z=UFDc>^yo=N^V2fB@aT`#>vvfFPFZFc=v3Iea4zE4SrlMH}c~RwY-MTZqr~kovwNF z)~(l@ix#lo+`gQl;%HS3EH3pTP#>S5JSu18w#mfbAnBTCD32EvHH=9+`0nBl`3znJC%PoT$K?Xw2I)V%FBy`NZ!k z+ixU%XsK>F+2Ds@F-zmh7qIDO~N2^aj*R2+Bt z9(Bx6{(WWnRB0{0S>NUQLe*Symc4s+!5Uq}-z^iT7DpyN=x!>n4A6ejR=5BCmoK() zaZ5jb{0Lv|9|P~=A9^f}>(|%S(;Fk=bJVZu)s5}b6%^9$7rI(zAf9B0jvlS<`_vlN z+qGPz;m010MT;hj1nAGzVLV&X*&}bv6p6kdKYRAh<>d;Z3p6!P?dsT}?)m;rHseZQ8DL~2ouLd?ZJlkr z=DRTmFX!OkAR>|**E^@br!(=|HJi@MBn_#_4@*{xUGPhrEUmfk)G4X$YOXd+rglh3 zh?h@stP|maZ}Gx|yMlh0^}U)ITI5Y27J;Enp$UA}qC7D{jEz`Ai_BH#PP*DKkk?mKnrRAa-^uP-*7=Co(@Y$?vkk=^!O zH2ZyHd{1Ceu`iYS`$E6Mkfe@|kX2_nH~LLi5(tEcwL5q2Y<_Zvt(|@6PR;$##}3cC z=rTnz>HF>rF<)NxwdP7==iNF<${^-c_KB7hqi2^^GA^j#MH|-@6bFh;mAzC7R$5g9 zYBIW{iTCc`$4+C{Dbx)_Suq ztL7}0?sEt0*S^JbnQeFHTUlFgNj2Ba(x=Qwl{Hbsi<+!0D4y=Rv}cy-z-P}oo$F&3 zhj(3iR7SL}2E@v|4Dw4$rwtAc<`dJ$eU4u0^4k)1yr{|Mg5vo)Iy!bvPP61Tbd`p8 zuef^c8cf*(ib9gp(j;?DyyqNml<@cWN7+D%N#uJ#lK#7H+LDhGr^?}#@uD{p3X08_ z$RtL;-)H=|(xd*+`}a(OoY@08h(vmCP&D_dM|~;%>DlFJyIgOw-rSz^xvkEUBf%QG ztN2<}R8-!PBQ6|?v*ymuQy!n9Ey9bMOmZZ&EL^l`x{S;Myy(+?5$~QII&wrxZbME& zL5FBaXlQeL`GHq%yCkj~b5$O`kSxEp%(wmehQ8 z$4pJ@fB>Lo?_xE)_@ql7Dp#wj%#Sv-Tv|6SfB&J!nf`0G$S;i!sLQv`-=DrrgD@8H zDz4l##nRVTrntD+s@i?$&6^G~5~526zqS~&-z2vgE6X_U+|ziLyJ-FT_3vLMX}$dT z`c{6+f)!CwCgeMNHa1r#DygVo;?dIwi{G8!(8RbXcbt63!rFRcoAFi;Z|_aJ^@3ie zE`EEdm~hytdj3YR=w9xMlOeOuM!ZYCv0d%lz`T2L@cl|owrV)Cx-7aUq%O=Z&$sx2 z;=7AKOyy(Wn}2@yTp_gK$i}L`>2fzz=FXc}M!qxmb9a-p~wHmeHCFopMd(zC4T35|xlyd;PlIfr5f9sWuC% zJyaUy%+|<$`0yd}&F-KVot;T##A-!-{aKTxG?s;gEOf~+Zoao=Y-1vmo`sV znl^j(#izuocBGux1+ zO}r){(jd9=(Lv|z=W!`%X_A9|wejK=6Rb1{9$7wqQ4{tqjyScepuF5DfB*jMxI3?I zY;W%FrVY&X_xC?P*x$K+#!Q2150z$=DI~5NCl>hr#btQImF?GOW=h*6%Y@zkB%xp` zD=#mft~^OzUjFot!Ea&Dc#&3b;X25PT7W2mM>p^I<`;u z@d?U?hb1fbGoDw(M-N{7G4Kv<`26|vZUn-{+qa#Z_Tpz}D9=|_6|GK{W7py4_s@3R zxvHalW&5Tn^Y+A596B_y{8{uO4t{l#W^Hw~Sp2u9=gf`=MPvn6C#P4v4i=?eH&p1S zc0O}*adAnSeA2!;Rc>2uZv6e)uox8|Vo}EZ@AcjtAy52n%=Rr-K9`)FEJZ8GXU3=m z1r}v!(4RPnO_{44yNISvKdNlBYoUo%`$~#qSd7YS(lN||_YtHQ?lgV3CUp+j1;jGxMV$rZg1XKFa$5kHn zn-7+FWhm1N7GCKl#P}93tS<3deELGjI7h<9(>p`vmHTjsMa2`#J-$?S6XJY}#Ur{N z>Cg+Vb`z|9i^c0t?0gpgW8jHH(H#2Go7X%D)93}HqIfjUn>RQQ;c_KxV%6+2ByyPyG>^PCoba;txad>e5 zjhd1jC$=^n9`G&3eQqk*VODiRju~@O&ap>}xk%+~YE$T~)6PYE?vP1ALBxh5tL^OU zE`9I&1S`ASlx(jAXam~X)0W2cT4idkzFQF&6B!fJ^7h=jO^JzO^mE;Yoc4PY;HLor zfQ0Bcn^f5?Sy?hytE%SDpFieYc(;X%%hZt2Q0wPjirJ023gu73=Pg;Xg!$!ZL}gRc zyzZv**4QV@wiomde0i>cv# zd~!Oh?NOPMg@r|cwoGZ{hrj~`1HYnYVZUcLEMm zNI>LG7nfAWPw6o&v8Q(}bUayBHeGf5jsDOZYsZY!4t=N*`~5>TtR7fZpDRj|)>1Su zn5}jG{n-95@6t@;`T-3MiK3#S{QcXmF-@;}5Q^x}&d(uUOE&L(STcR?+^suy%<>`- zwxmLthkcF)c0MebIBC*4WqX;$;Too2UyQ3Ov#T;zxVi7E2MsT?#ig+Fc&%yD-o+7H zQ(w(m9Nx7oGSWCUHa0+i?zWvfRq!*4L|QGQX00rqXGp!Y&wRs^j{(Y)CQXttW&*ID za^Qe+U6|cEV}+Z0C98IJe0+VY?hHG~%GOp%Ny(BUo0lrTbnBr*3pj6|%vt1fWR_gw zthJv$EGno;40Pz}E`NTlr2N^|#^XUP9rcIvTNd2hxya}JJx8rPSP!d+J=12aEYjMN zrcN_W$P5t#LnMpOjQ~SfV`^+~sY9j-J3~Nl{w9ez>#tw8EC0G^vPu1e2Y{mDSaWl8 z&G?l0XGB{o*W|Za+7;F8T`WB~IB4hU8qwYpa_8~}vAV-PTDNn}{5i8Srng$k=f9~e zD2VQOp#HV5Prbt&!ugg3PWV1B@S!EmY-_5;)EBYFiHBP&p@Kc>>1%&m8k2we^rQBM z*Qw%@CxXfNYPgp8?4>@H%wWp^ifB#))rs-AIn^@xluRCPD^A|3-dwZ*PzRsX&t~xE^>grmdYzrDI+t2VK#;Bhfwk*|hSqh^|{kB^U-mX=nX z&cqx^m5dlu60RPDQ+B;RX6L7(qSE%TB)d(9p7JSg6{{wZT7VCGX(em6X4NLUR~n6t zjT0p$rR6GdVWvA=kMyM%c=^Zm5HwbwebB7^D%NqA#^oFnwpBHAQI2F!I?+^Cvp()y zFZlI+^VE$%L-gl5+Yj|V{##O;rHmCiTH0cbyli72%&&@2RwepCEv#YBuhjZeNkIl_bPuR81 z@0%<0iNWHnmW#V~OP+((M?giT>fonzU0UO@3Dc)fA9|YYcoIcnP8!Tp&&te{?&#>S zs=hnECb9PsUcoRkBsO*l`F0lQYUpvO$g?nTpuZ=d7(BMsGKRWWGTYH$$r9Pd#>QTf z(ndZYv*%LZUHO%*l?OdaN=mG=4QX9>Y{cI*H?M=G6&@ z=i$X}+`@$xU*0}_ItbM8;TboBn#Ax%T?NCP9VaOFOBv5iZI7MFX{6c&1OSHgC!25H zbXZjpJLkiP59!L23JMAqEL^zWrO+}e>P*`qi|(iAGF?lJH}gi+Gw6xK=`@R}d%Brd>;sdLzs`5^qlA32kr-rwkN|M$J{QmXB z0xd0DF4w>>et>J2k)W0vJ0mJ8N|AoBa;k!9a-zZ-UycN!C3aQo+fSfp(KB}K(gh0_ zvXrxL)z{a3Qs@DIB~U;w_4Oy0Ln-oP11|PV!s>@d*?4e zmNUuJ)HI)XXm0C`p#R8(~5t-jg7 z=b6{i6c`tX_6Xu4(vX{wWyFxzTz!@_KxyI>_H|P#H%-Z>AF+J!^~Yi5kne+GrYnvV z{J6O1xmP*;sd#*R{DX!Dqi?6{g0FsjeJlGZCD}$PKR^HX^02ymYu^je1|oowk@VAZ z5!Es8E+m$gYWr$P&6+i9S>gTW?|q*R9XYb8k$P>Lk}dPYOY6+%F1BHwTv>SRr!NzW zRa8{cl_xFo{(YIV^OW0Uy^6v9P`iwTxNp4%=k7c0)}6qp=u_2@n#^LeOIpXR8K}3; zQ0{tuQQ=ZgZU=tv)TUFkaU!yd!zD75=bM@NKk7ayt-x>n_z#iyeXY4a-uk2)gg)F? zR#q2XSoHhvS1)f6%f8_K?scaBCS`l$=+6s80Cn|A(HANjLwhUw-ei+A&q24O+>P$f zkL`UPmLpjs7CI{kb^0w+zt(g{bHU_^QdrC2-F08jsk^-Ndk4hyeZH#V7F?#=SC1Xo! zUPz8)_b~{5K2B_Ez@t(Sem<86st8=flc5dFTex!LjFef$+|Gx=RO&1_%X(vl$Iz|V zN3l!lH*cOuf}qCilvxXul*VmWb)F$-In!9-F|jlD(W>ImhreI`+xZg1~9Cmp{%j$cz`nv~S0l9lC;_oU2PU}CbHQJ|<- zlNkO;Pa%9~N1kQkwAr(v`%!kgRh{=FDA}fNPR&!#k?bxFGVNMO5Q*$4D)fpvF{i&i zRc^6%c$aUTj{cTUd24!W5}y{V@b#S-A0MAxYkM=Vp)>5`D|^AG;vy&%bE5OS^25g` zL#NN1w+O%KV3_HHTig1WhrGsIs;*9~l|7u?NZoYw=+QPA`npefrl~cFl?Bo7Uzii* za~k$8{dir;nA4Un_)z?PfNaQ>4Qo!zw^}ZJl_j~{VYbMX%F1oqx3|21o&Jf0;56%* zC=)C38nbU@Ieme;`Zkyc2G!)4r^zq1s%D1fNWRWso04z@k@p`zTC85Zmzrstdbp^_ zyy+Cpx*A}b?y!3cbKgZMcms3a=r3qx_5F9dom3^} z>e~rO5`M)Z+O)^|@+YgURh{=-jj$%nsY&cS9ca4Bqxe$Km+==r)hu>*b9?c*lV#i2 z*Vp#=wUZ*1C1;j3eIW zv@LigQj^F&9c{X5MX~eCmoN75)*O_f~`^BViE0po#H#UEoFX{ei z{sFHsFJ9f)E@McQxpL)-8-eipW2evY;jquE@i~&;w)`du7Ca`wQmwf7l_af-&9^eO zCf~{vef|1%mO-f0CP{_V9Xn>#g(ETe50L$acbE@4T2VZ?l#}9c#Kc|7rR2HA&a6 z+i|)d&pzyPRBA|wed%!W#J_PO8&6E$6|y}iNA~KqYgZZ?q-UzRB-|j~dm?uzFK^jnl@MMVXyM_5=|mIYiMd*I-~mshvU)V$;A<|eZJM#6;2Qh>hxtjm`#x7{n4 ztm>4t)*$pDprut?5S4n=qd`Pu4ATO%9O z+hy+IF+&QsB7t)MU~_w6u1#d5aer_3WHU1}PUpiZums+8iZ&hf8qK#%oZ9xVWZw7h z-|v<){1XxrTi_~Gs>Wc?(fIl5>XYCG{ru$T&Yf!oYqw#huUZTX9IRf6YY-F_=ahME z*-X0Xp`w^E-Erq;JDY#4kc1=r21^jXy)qe4csS>D)sOb#c9EYuO``sPNZtY@wJZtE z*rXW+(y0HIkJ-=t{|xo@3`h9?85#WY|NAfbQ&IwKsf<973Roc#*U?Dm0?8MUVBSa) zBMpGg;6MN;804V2(U8QdjBtp>qL9!u48tpH02Dfr$|WHQR*_^-5S>VY*c$w3;Ar%D zE*q^MgboEcj^aqVDm2;AP^OfipAHs}mhfVrBiZAIhjtfNQW8g7K_i+A)xZlxQ0Hgc z7~aBP3Aa8xK`1TCQ{cBamXU{-{{pZ;!`cfbodqyQ#u1^?gSmm4Sa^IQgQhiO{T*_3gM^}dMKM0h8+p-Z>XuQX(E_-($G+RSkgTHCZTcN#YZHuQ$y1M zr~wr6&~yL_8&IQy9Eih^7tJQev_o9FvP=_e3N5^XK)G zoSDDU6(AWaI4m@06M*DPMdraLHp1D8hSC@7rl1Dj@ z!*i(cYKQ|c+i=60=db7p(&dS~&w;xTBC8{N`Z;06@jAvREOxU0Ln(Fz=JF>*_aBp8 zejk6I{_=D7{DcPoDHZF~cviTN}9mBIKgXJbOCj3>l_g&On+d=2Onm_}O$L&Q)o z3Jqd#Ie5T8900<&3>KG$2Xw^&BzK}YnFFz$DO4(j4G|f15*rT~!UD560FXfn6(+p= z`Z~tw16aVLQX#5m6rIS%13E}5U={}t=o;!7nV>h>oS)NED6JEgZwARyDnxJS3=%Xv zx$;jkKJk*m@UuQ)*^|+nPYf2H5M$}m`2E;hp*fWykQfY80xE?FV*T2ad9_IBV}2!s zvf; z&ET+hu$<`>qP>+66}RM_FEn{Gz(DQ+IwL{y3Q++nB^;tdY&3T@3j#?Ljq)4$T;XhI8HExBT^|4or6R7 z1xI0`=NhZ)h0cPSBxppRpv8Cq`>9D~5J9TA zI5zq}O&I4Q_}(8UixVc;Ln+j;dNc+Jq9W-LnM`VwCg6kKT13D_xUB}V8?bW6MwKxn z)i;C1f`}L*fZiu`tps>GHV**3xBT4y@I}Cs2sS`Pazs+-7&NFGO{}) zzwD4m%+>@PVDTAVFLe4a5#T>Y^P`qQ|QINnp=>6S`dfr>8`648VmjF8$0!CsPXx@7~Uyc@{LL3I4f`I5)&`lLi+zfM53P{ro3WZ>j5kzF- zuv$2*79LP!gUgsKMhHaYXd>@oZ(uLLnyjL@I4ck&{^1IHSeRQ5G5f!|zOazRpbl|t z4_-cf^fgVuo&%5>@L5F33;Z%a|1l_#bz3;IJ`Mnx6eNYPVD4%h!sO^k7U7{%ApwP- zbQ*`8R4gAQ!qF*^K&5bq6*#;Cmko_<6{dN^UkoBbnp`@p{86Y-5VABvDX00;4wFFw zcsVg)d7lGCa>RLq!hVkiU18~y5%Ea}-7qY9EI&gmd>2Q6Qt*`d#6bxl(1{Eh4JK`< zdWKX50wI_(AlwQ~XO7AO5PV8lkS3c!10!L&ftd>Uv<4jkb|i7=xgMn&c4CprctBAH zRzT4_x%}j~BiH0VtibS-mlDwywXi(G=wr`6Uvd^dcQv}`JiPD^FgEd#_}I{{^QtTe zWN?BNC$%;60cpj6>qc^cy}$SAcZCb__77FQfm%B!5HcR zktuYDqye~jTk@eX9wNr?IS?W%-D~WDU54Hy`*GTG^p?TQ;zOLI)Tu61tigNDV&@1YzxlNMQy;EELB9 zNWpGMouTHjofSOL!p3WHl(gXm6?`n|H~_MOxbV#qaKn{fp;W!+sqTrh>h)Kl-g2YfNBM8T) zL_>g%wyr)cP!Sqk%X!1fMgQV!`?qe0zls7qVj*y!*#9<)OyIs=|}x zNecih|1ju08?Pnr9Ueg>GT^n~K-8!qG&vBi1@ljO5DNCfM2QYSG$tns<+H4a2OLLB{bUV;pa|yy;=x^G1jYq&UFFiQs8xWg+;>+{sB?@SI>5SKzq7QYnfH zAbdtZ-S}%zG9RyCgu_|}6rqKI6*yRT#W4b5HH58&cA3rMKI zs*agL_@Xs>S77oWvVn3*qQ0(Ba zg4skW=7piBi(>^tR3o861GEft$Aa$=aw90@VN0SZtRVne!h5MXif;^ZX+|@yp`@Eb zWO~9{FNaUynM0ZkQZnjt>u947M_F*C(G7M#X-seQ#&~^`*89KPGQ1re)E7u#LbyPP1LDXK$lxuQ!bSn7$K8xN2kgcuMMb+k2+|7jU3t`-;ag%W`OFssu@ z=nw(z{O`Rx#)h;O31QRxs^~@8zqB2~mM=$Vd?M2U}rytOy`Hax1XU5&+(J2_rCC!S5Il zRSj4oSY#?t2UC<3af+i=Q&iNzE*J(BumMsO9i&kZEhn79rZ6xgHLpD`9UBF(0A4np zdIuor2+|duOQmW6bjFD85DW>R@Mi=w0*(ei3}!I+mQbv{q23{uCIBo5V(J16I$-YO z3C{wglRy@{@Ix)xI$CSMj{#M{l?l;3IUq4qeV8|h(8nK&ICL*iV(tucbM%zLat$nj z_^>D($dyh+Pb!-d4dDS}TMD0Yjo3B5EW&%`FVPWnP@~-=B?0jQ0B9r3<5-z>B+OOV zLZk$1LclDFTHVlce+(AJ#5e?jvZAmASU7=6LeK~ftN{l_Y48Xww!1gB@X$$hirid4w6n~dN76)NU*z5cy`8#_I^9Pu2BSjn*gNmRo0%O)v zV)FXonVMOGqv4MNKbnXIOSuRHP2oiG@7fUfJnR7;P*dZ%dhooZISiQjV(L0X4+p|Q z7RCu|Mu&E->EYfWO90%(th&6OfgwxokTrPdJwyTYANjToi$TNlivWO3fv6-8h%8u! zm7)Zd|4-h0g+_-M)diGNXOM|{@(9lqKU!r=XMyUN{pYXN#Y{h#O@GK1;)xpDheTn_ zG=b;w3r(JM3Ied61|kfKZ#ogj*cS^zv2Dbwh;LUxfFOv1sMb6;8-T&mfYoC@(bhnD zD}^2mu_&;V#9+fTmqdFl&T`8Va5BFMwu4q7IXR{vSVk5sr=Yha(R4Ct!KP0W>huGZczI0cM!j^C~(* zQFvqzv2vK$6X(S(pCTIJ*oN zr%Aw&zY(%#C;<2OU6Y0y92KegDNOSV!~cch|HAP9Pht4KQ2751DEu${{J#W02i#Z; z8pH{PxNN{W5($^X0<-{Uh{K`~(a#Cce~$b->pvO`2*SEU^$9Qt59ny>7-(t>hTJhm zxzw*#G~{9oX-tu#!o9-_0>$cf}5pokPOe1L!Wk%KvF=o<%s7z`0Z zaiLHY5?#;{iqfD(xx#nV5kf#Yfht4Yk64R8kvq+WKl24(K8xKCcs1BW7RZFf0{8|f zj`9Mk=P8=#=<1+~kFYE|0^OmIfhY!7jl~l?NBKgK$Y4PXw$@NTI0h5e01&>1$x+S> zI()~5iIF^gRmkE23gU5OfDS<<>|xvWAnB1CmqORqy~zC2&IpJXVHcI75T=;8G#%+fhs`7UydjsJV^8 z1|axf2*Cx}0LcGNUjtw>G=z~>4!;^KuAvcDVYye+0}Fb{U;&zz@T5o4bN|wvQbIum z9w|cx%@chX zE(Np&nj|w=ctDpXP=X1;0wwUN4To5WVFqHcFy#P!1`a3g{3=`;4TL2W522usKuin( zk5?J(Mw4G27pDls#UbI{gxVoNY$A(-NF#r@Em+wVYEF1@oe@0BjuI3s*de;crb1)D zmK+7J{9&5v9~i}f*#B$?76Llj+QRK?{iHh@82KOQO}JrGz(lC$k-I z(-VLgTF^kM41mTZ24ldXaN|P9B3Aer@K1Z>y@!%R^r;FW?!xjNKYlbi9?d^GSB_AW zbvT765c5OeU=158FEqshbju;Q9z^noB{F{wg9B3k(Wb97psiL1Oo5Smt;7Bwwb#KQ z%&Cw$1Mr;x^Y&P7J*YP(d~aFaT&2I+w%4(*(kv{IL}LcfitNPDsO@jQ(%> z5E0mX9Meh2+k_V-0b+9ikRMow7b(-;O@r400yRWX*bqR4z;Fl%q=NL&QCwAc4lIA- z_k#Ju+0!YUA>%n)FkC$R7=_Q=AQp|nL&YqEL3$7xa)uE>hgf_EI4rgXhW#SIFJkgR zAToY%7A!Ij-2KSLM~f-(4{-w;g-#(sfkS{nAeYSp5Cky-G~n(pbYMg%TVVzLZ<@k% zbd7&v3KKqbBZCR?eVy>Am>JB7p!1Fyg7T9YR1(BOq^Omf%tZskAy9uXh0MW>7%*pH6NxBT$TMWXqo9!3zyf$7wc*C#)kLD$ zK_o1g9I%|hMZ5?jKz?vG6v6l#YrT{=Tsl0@KwkVjcra)uOvF$vR2ickOVOx-e|RK> zUgIHBqXhL5iYcWn5ETr)jYwdZ5#gHtz?TgTW`+5&0kJqiaL3%$<`1h2*!`41Cal8Z z7<6hB4(S^J&?3a`Ckl0L0jzjYGpsBGt%e#%H-ffHkjjEUQWTHk!h6St8H*ws?8_64 zR|elze%vp(oaepBgji$-i^hvA9!@K%P)XI^=`m2EzQQ#8s=>

    zHj9fKC?Jqa3WEMpz`Q?l<{WB_&6|!DzsLe=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "peertube.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} diff --git a/peertube/templates/persistentvolumeclaim.yaml b/peertube/templates/persistentvolumeclaim.yaml new file mode 100644 index 0000000..54cdb25 --- /dev/null +++ b/peertube/templates/persistentvolumeclaim.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "peertube.fullname" . }} + labels: + app: {{ template "peertube.name" . }} + chart: {{ template "peertube.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/peertube/templates/service.yaml b/peertube/templates/service.yaml new file mode 100644 index 0000000..587e68a --- /dev/null +++ b/peertube/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "peertube.fullname" . }} + labels: + {{- include "peertube.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "peertube.selectorLabels" . | nindent 4 }} diff --git a/peertube/templates/serviceaccount.yaml b/peertube/templates/serviceaccount.yaml new file mode 100644 index 0000000..4321b71 --- /dev/null +++ b/peertube/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "peertube.serviceAccountName" . }} + labels: + {{- include "peertube.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/peertube/templates/tests/test-connection.yaml b/peertube/templates/tests/test-connection.yaml new file mode 100644 index 0000000..49f49f4 --- /dev/null +++ b/peertube/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "peertube.fullname" . }}-test-connection" + labels: + {{- include "peertube.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "peertube.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/peertube/values.yaml b/peertube/values.yaml new file mode 100644 index 0000000..f97d444 --- /dev/null +++ b/peertube/values.yaml @@ -0,0 +1,566 @@ +# Default values for peertube. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: chocobozzz/peertube + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "v3.0.0-buster" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +configAsCode: + enabled: true + config: + listen: + hostname: '0.0.0.0' + port: 9000 + # Correspond to your reverse proxy server_name/listen configuration + webserver: + https: true + hostname: 'peertube.example.com' + port: 443 + rates_limit: + api: + # 50 attempts in 10 seconds + window: 10 seconds + max: 50 + login: + # 15 attempts in 5 min + window: 5 minutes + max: 15 + signup: + # 2 attempts in 5 min (only succeeded attempts are taken into account) + window: 5 minutes + max: 2 + ask_send_email: + # 3 attempts in 5 min + window: 5 minutes + max: 3 + # Proxies to trust to get real client IP + # If you run PeerTube just behind a local proxy (nginx), keep 'loopback' + # If you run PeerTube behind a remote proxy, add the proxy IP address (or subnet) + trust_proxy: + - 'loopback' + - 'linklocal' + - 'uniquelocal' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + # Your database name will be database.name OR "peertube"+database.suffix + database: + hostname: 'peertube-postgresql' + port: 5432 + ssl: false + suffix: '' + username: 'postgres' + password: 'peertube' + pool: + max: 5 + # Redis server for short time storage + # You can also specify a 'socket' path to a unix socket but first need to + # comment out hostname and port + redis: + hostname: 'peertube-redis-master' + port: 6379 + auth: null + db: 0 + # SMTP server to send emails + smtp: + # smtp or sendmail + transport: smtp + # Path to sendmail command. Required if you use sendmail transport + sendmail: null + hostname: mail.example.com + port: 465 # If you use StartTLS: 587 + username: peertube + password: password + tls: true # If you use StartTLS: false + disable_starttls: true + ca_file: null # Used for self signed certificates + from_address: 'peertube@example.com' + email: + body: + signature: "PeerTube" + subject: + prefix: "[PeerTube]" + # From the project root directory + storage: + tmp: '/var/www/peertube/storage/tmp/' # Use to download data (imports etc), store uploaded files before processing... + avatars: '/var/www/peertube/storage/avatars/' + videos: '/var/www/peertube/storage/videos/' + streaming_playlists: '/var/www/peertube/storage/streaming-playlists/' + redundancy: '/var/www/peertube/storage/redundancy/' + logs: '/var/www/peertube/storage/logs/' + previews: '/var/www/peertube/storage/previews/' + thumbnails: '/var/www/peertube/storage/thumbnails/' + torrents: '/var/www/peertube/storage/torrents/' + captions: '/var/www/peertube/storage/captions/' + cache: '/var/www/peertube/storage/cache/' + plugins: '/var/www/peertube/storage/plugins/' + # Overridable client files : logo.svg, favicon.png and icons/*.png (PWA) in client/dist/assets/images + # Could contain for example assets/images/favicon.png + # If the file exists, peertube will serve it + # If not, peertube will fallback to the default fil + client_overrides: '/var/www/peertube/storage/client-overrides/' + log: + level: 'info' # debug/info/warning/error + rotation: + enabled : true # Enabled by default, if disabled make sure that 'storage.logs' is pointing to a folder handled by logrotate + maxFileSize: 12MB + maxFiles: 20 + anonymizeIP: false + trending: + videos: + interval_days: 7 # Compute trending videos for the last x days + # Cache remote videos on your server, to help other instances to broadcast the video + # You can define multiple caches using different sizes/strategies + # Once you have defined your strategies, choose which instances you want to cache in admin -> manage follows -> following + redundancy: + videos: + check_interval: '1 hour' # How often you want to check new videos to cache + strategies: # Just uncomment strategies you want + # - + # size: '10GB' + # # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances) + # min_lifetime: '48 hours' + # strategy: 'most-views' # Cache videos that have the most views + # - + # size: '10GB' + # # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances) + # min_lifetime: '48 hours' + # strategy: 'trending' # Cache trending videos + # - + # size: '10GB' + # # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances) + # min_lifetime: '48 hours' + # strategy: 'recently-added' # Cache recently added videos + # min_views: 10 # Having at least x views + # Other instances that duplicate your content + remote_redundancy: + videos: + # 'nobody': Do not accept remote redundancies + # 'anybody': Accept remote redundancies from anybody + # 'followings': Accept redundancies from instance followings + accept_from: 'followings' + csp: + enabled: false + report_only: true # CSP directives are still being tested, so disable the report only mode at your own risk! + report_uri: + tracker: + # If you disable the tracker, you disable the P2P aspect of PeerTube + enabled: true + # Only handle requests on your videos. + # If you set this to false it means you have a public tracker. + # Then, it is possible that clients overload your instance with external torrents + private: true + # Reject peers that do a lot of announces (could improve privacy of TCP/UDP peers) + reject_too_many_announces: false + history: + videos: + # If you want to limit users videos history + # -1 means there is no limitations + # Other values could be '6 months' or '30 days' etc (PeerTube will periodically delete old entries from database) + max_age: -1 + views: + videos: + # PeerTube creates a database entry every hour for each video to track views over a period of time + # This is used in particular by the Trending page + # PeerTube could remove old remote video views if you want to reduce your database size (video view counter will not be altered) + # -1 means no cleanup + # Other values could be '6 months' or '30 days' etc (PeerTube will periodically delete old entries from database) + remote: + max_age: '30 days' + plugins: + # The website PeerTube will ask for available PeerTube plugins and themes + # This is an unmoderated plugin index, so only install plugins/themes you trust + index: + enabled: true + check_latest_versions_interval: '12 hours' # How often you want to check new plugins/themes versions + url: 'https://packages.joinpeertube.org' + federation: + videos: + federate_unlisted: false + ############################################################################### + # + # From this point, all the following keys can be overridden by the web interface + # (local-production.json file). If you need to change some values, prefer to + # use the web interface because the configuration will be automatically + # reloaded without any need to restart PeerTube. + # + # /!\ If you already have a local-production.json file, the modification of the + # following keys will have no effect /!\. + # + ############################################################################### + cache: + previews: + size: 500 # Max number of previews you want to cache + captions: + size: 500 # Max number of video captions/subtitles you want to cache + admin: + # Used to generate the root user at first startup + # And to receive emails from the contact form + email: 'peertube@example.com' + contact_form: + enabled: true + signup: + enabled: false + limit: 10 # When the limit is reached, registrations are disabled. -1 == unlimited + requires_email_verification: false + filters: + cidr: # You can specify CIDR ranges to whitelist (empty = no filtering) or blacklist + whitelist: [] + blacklist: [] + user: + # Default value of maximum video BYTES the user can upload (does not take into account transcoded files). + # -1 == unlimited + video_quota: -1 + video_quota_daily: -1 + # If enabled, the video will be transcoded to mp4 (x264) with "faststart" flag + # In addition, if some resolutions are enabled the mp4 video file will be transcoded to these new resolutions. + # Please, do not disable transcoding since many uploaded videos will not work + transcoding: + enabled: true + # Allow your users to upload .mkv, .mov, .avi, .wmv, .flv, .f4v, .3g2, .3gp, .mts, m2ts, .mxf, .nut videos + allow_additional_extensions: true + # If a user uploads an audio file, PeerTube will create a video by merging the preview file and the audio file + allow_audio_files: true + threads: 2 + resolutions: # Only created if the original video has a higher resolution, uses more storage! + 0p: true # audio-only (creates mp4 without video stream, always created when enabled) + 240p: true + 360p: true + 480p: true + 720p: true + 1080p: true + 2160p: true + # Generate videos in a WebTorrent format (what we do since the first PeerTube release) + # If you also enabled the hls format, it will multiply videos storage by 2 + # If disabled, breaks federation with PeerTube instances < 2.1 + webtorrent: + enabled: true + # /!\ Requires ffmpeg >= 4.1 + # Generate HLS playlists and fragmented MP4 files. Better playback than with WebTorrent: + # * Resolution change is smoother + # * Faster playback in particular with long videos + # * More stable playback (less bugs/infinite loading) + # If you also enabled the webtorrent format, it will multiply videos storage by 2 + hls: + enabled: true + live: + enabled: true + # Limit lives duration + # Set null to disable duration limit + max_duration: -1 # For example: '5 hours' + # Limit max number of live videos created on your instance + # -1 == unlimited + max_instance_lives: 10 + # Limit max number of live videos created by a user on your instance + # -1 == unlimited + max_user_lives: 2 + # Allow your users to save a replay of their live + # PeerTube will transcode segments in a video file + # If the user daily/total quota is reached, PeerTube will stop the live + # /!\ transcoding.enabled (and not live.transcoding.enabled) has to be true to create a replay + allow_replay: true + rtmp: + port: 1935 + # Allow to transcode the live streaming in multiple live resolutions + transcoding: + enabled: true + threads: 2 + resolutions: + 240p: true + 360p: true + 480p: true + 720p: true + 1080p: true + 2160p: true + import: + # Add ability for your users to import remote videos (from YouTube, torrent...) + videos: + http: # Classic HTTP or all sites supported by youtube-dl https://rg3.github.io/youtube-dl/supportedsites.html + enabled: true + # You can use an HTTP/HTTPS/SOCKS proxy with youtube-dl + proxy: + enabled: false + url: "" + torrent: # Magnet URI or torrent file (use classic TCP/UDP/WebSeed to download the file) + enabled: true + auto_blacklist: + # New videos automatically blacklisted so moderators can review before publishing + videos: + of_users: + enabled: false + # Instance settings + instance: + name: 'PeerTube' + short_description: 'PeerTube, a federated (ActivityPub) video streaming platform using P2P (BitTorrent) directly in the web browser with WebTorrent and Angular.' + description: 'Welcome to PeerTube instance!' # Support markdown + terms: 'No terms for now.' # Support markdown + code_of_conduct: '' # Supports markdown + # Who moderates the instance? What is the policy regarding NSFW videos? Political videos? etc + moderation_information: '' # Supports markdown + # Why did you create this instance? + creation_reason: '' + # Who is behind the instance? A single person? A non profit? + administrator: '' + # How long do you plan to maintain this instance? + maintenance_lifetime: '' + # How will you pay the PeerTube instance server? With your own funds? With users donations? Advertising? + business_model: '' + # If you want to explain on what type of hardware your PeerTube instance runs + # Example: "2 vCore, 2GB RAM..." + hardware_information: '' # Supports Markdown + # What are the main languages of your instance? To interact with your users for example + # Uncomment or add the languages you want + # List of supported languages: https://peertube.cpy.re/api/v1/videos/languages + languages: + # - en + # - es + # - fr + # You can specify the main categories of your instance (dedicated to music, gaming or politics etc) + # Uncomment or add the category ids you want + # List of supported categories: https://peertube.cpy.re/api/v1/videos/categories + categories: + # - 1 # Music + # - 2 # Films + # - 3 # Vehicles + # - 4 # Art + # - 5 # Sports + # - 6 # Travels + # - 7 # Gaming + # - 8 # People + # - 9 # Comedy + # - 10 # Entertainment + # - 11 # News & Politics + # - 12 # How To + # - 13 # Education + # - 14 # Activism + # - 15 # Science & Technology + # - 16 # Animals + # - 17 # Kids + # - 18 # Food + default_client_route: '/videos/trending' + # Whether or not the instance is dedicated to NSFW content + # Enabling it will allow other administrators to know that you are mainly federating sensitive content + # Moreover, the NSFW checkbox on video upload will be automatically checked by default + is_nsfw: false + # By default, "do_not_list" or "blur" or "display" NSFW videos + # Could be overridden per user with a setting + default_nsfw_policy: 'display' + customizations: + javascript: '' # Directly your JavaScript code (without