Compare commits

...

132 Commits

Author SHA1 Message Date
ace
c669ac9e36 bitwarden: bump to 1.34.3, helm chart 2.0.40 2025-08-05 19:29:38 +03:00
ace
cce620dca0 bitwarden: bump to 1.34.2, helm chart 2.0.39 2025-07-27 23:58:40 +03:00
ace
8bc72c25d6 gitea: bump to 1.24.3, helm chart 12.1.2
peertube: bump to 7.2.3, helm chart 0.4.5

mastodon: bump to v4.4.2, helm chart 6.5.1

gitea-act-runner: bump to 0.2.12, helm chart 0.1.13
2025-07-25 15:20:04 +03:00
ace
4cd0b9b606 mastodon: bump to v4.4.1, helm chart v6.5.0 2025-07-14 00:14:39 +03:00
ace
dd02301ecf mastodon: bump to v4.2.22, helm chart v5.1.9 2025-07-13 20:56:03 +03:00
ace
e380042c04 cert-manager: bump to v1.18.2, helm chart v1.18.2 2025-07-07 11:40:06 +03:00
ace
55fd39b008 adguard-home: bump to v0.107.63, helm chart v2.3.33
cert-manager: bump to v1.18.1, helm chart v1.18.1

gitea: bump to v1.24.2, helm chart v12.1.1

rspamd: bump to v3.12.1, helm chart v0.5.7

peertube: bump to v7.2.1, helm chart v0.4.4
2025-06-28 16:28:45 +03:00
ace
7ed4837d00 adguard-home: bump to v0.107.62, helm chart v2.3.32
roundcube: bump to v1.6.11, helm chart v0.4.6

cert-manager: bump to v1.17.2, helm chart v1.17.2

bitwarden: bump to v1.34.1, helm chart v2.0.38
2025-06-11 00:56:12 +03:00
ace
a85d4b1c04 fix dns integration for cert-manager 2025-06-11 00:55:44 +03:00
ace
160fdef1cf add gitignore 2025-05-26 19:40:42 +03:00
ace
ab2040d287 add k8s_cluster_name variable and bump gitea helm chart to 12.0.0 2025-05-23 13:05:30 +03:00
ace
8a727e5dbf knot: backup config 2025-05-23 13:05:14 +03:00
ace
f6e52e1f65 fix dns integration via external-dns deployment 2025-05-23 13:05:08 +03:00
ace
74ae2c4694 pwgen: rewrite checks and passwords generation 2025-05-23 13:05:03 +03:00
ace
e67b5702d5 gitea: bump to v1.23.6 with helm chart v11.0.1
cert-manager: bump to v1.17.1, helm chart v1.17.1
2025-03-27 12:37:13 +03:00
ace
cfbdf9cc99 ingress-nginx: bump v1.12.1 with helm chart v4.12.1
wikijs: bump to v2.5.307, helm chart v2.3.19

peertube: bump to v7.1.0, helm chart v0.4.2

adguard-home: bump to v0.107.59, helm chart v2.3.31
2025-03-25 16:27:56 +03:00
ace
ee7630d087 mastodon: bump to v4.2.19, helm chart v5.1.7 2025-03-14 23:39:20 +03:00
ace
2b2d7369ed rspamd: bump to v3.11.1, helm chart v0.5.6 2025-03-09 17:02:22 +03:00
ace
d5d7e94c44 gitea-act-runner: bump to v0.2.11, helm chart v0.1.12
gitea: bump to v1.23.4 with helm chart v11.0.0

mastodon: bump to v4.2.17, helm chart v5.1.6

adguard-home: bump to v0.107.57, helm chart v2.3.30
2025-03-02 17:25:16 +03:00
ace
b46b8db671 postfix: bump to v3.5.25, helm chart v0.1.7
dovecot: bump to v2.3.16, helm chart v0.1.8

rspamd: bump to v3.11.0, helm chart v0.5.5

roundcube: bump to v1.6.10, helm chart v0.4.5
2025-02-15 22:27:38 +03:00
ace
a97fca288f bitwarden: bump to v1.33.2, helm chart v2.0.37 2025-02-12 15:31:17 +03:00
ace
66e627144f bitwarden: bump to v1.33.1, helm chart v2.0.36
ikijs: bump to v2.5.306, helm chart v2.3.18

gitea: bump to v1.22.3 with helm chart v10.6.0
2025-02-06 00:36:46 +03:00
ace
b561d64770 peertube: bump to v7.0.1, helm chart v0.4.1
bitwarden: bump to v1.33.0, helm chart v2.0.35

adguard-home: bump to v0.107.56, helm chart v2.3.29

cert-manager: bump to v1.16.3, helm chart v1.16.3

metallb: bump to v0.14.9, helm chart v0.14.9
2025-01-26 17:13:27 +03:00
ace
e2201d03f7 peertube: bump to v7.0.0, helm chart v0.4.0
bitwarden: bump to v1.32.7, helm chart v2.0.34
2024-12-22 15:13:02 +03:00
ace
ee0305870a bitwarden: bump to v1.32.5, helm chart v2.0.32 2024-11-19 17:59:02 +03:00
ace
d8f13a79d4 bitwarden: bump to v1.32.4, helm chart v2.0.31
adguard-home: bump to v0.107.54, helm chart v2.3.28
2024-11-11 13:34:56 +03:00
ace
53334d338a rspamd: bump to v3.10.2, helm chart v0.5.4
peertube: bump to v6.3.3, helm chart v0.3.8

bitwarden: bump to v1.32.3, helm chart v2.0.30
2024-11-03 17:07:06 +03:00
ace
9efcd2ffa1 gitea: bump to v1.22.3 with helm chart v10.5.0 2024-10-21 11:47:12 +03:00
ace
9a390d4637 bitwarden: bump to v1.32.2, helm chart v2.0.29
wikijs: bump to v2.5.305, helm chart v2.3.17
2024-10-14 11:20:34 +03:00
ace
d32fd50715 peertube: bump to v6.3.2, helm chart v0.3.7 2024-10-08 14:42:07 +03:00
ace
9309c4de87 peertube: bump to v6.3.1, helm chart v0.3.6 2024-10-08 10:58:12 +03:00
ace
f4646f1a49 gitea-act-runner: bump to v0.2.11, helm chart v0.1.11 2024-10-06 22:08:57 +03:00
ace
1fd0d78314 bitwarden: bump to v1.32.1, helm chart v2.0.28
adguard-home: bump to v0.107.53, helm chart v2.3.27
2024-10-04 04:57:34 +03:00
ace
5adaf91a44 wikijs: bump to v2.5.304, helm chart v2.3.16 2024-09-22 22:22:09 +03:00
ace
4db7240f5a gitea: bump to v1.22.2 with helm chart v10.4.1 2024-09-12 14:51:01 +03:00
ace
526bc6c2c0 roundcube: bump to v1.6.9, helm chart v0.4.4 2024-09-03 19:41:26 +03:00
ace
70a50a5c15 update dovecot, postfix and rspamd 2024-08-24 03:30:37 +03:00
ace
4c0646972c postgres-operator: bump to v1.13.0, helm chart v1.13.0
postgres-operator-ui: bump to v1.13.0, helm chart v1.13.0
2024-08-24 03:12:47 +03:00
ace
e221d7fa65 mastodon: bump to v4.2.12, helm chart v5.1.5 2024-08-19 19:25:12 +03:00
ace
bc214c1763 cert-manager: bump to v1.15.3, helm chart v1.15.3
mastodon: bump to v4.2.11, helm chart v5.1.4
2024-08-19 14:16:20 +03:00
ace
21614ffc2e update services versions 2024-08-13 02:50:15 +03:00
ace
2a83565d59 bitwarden: bump to v1.31.0, helm chart v2.0.26
peertube: bump to v6.2.1, helm chart v0.3.5
2024-08-03 00:42:44 +03:00
ace
87d7312099 mastodon: bump to v4.2.10, helm chart v5.1.3 2024-07-05 01:20:13 +03:00
ace
7d4b66a777 nextcloud: bump to v29.0.3, helm chart v5.0.2 2024-06-30 01:15:18 +03:00
ace
e05607693a cert-manager: bump to v1.14.6, helm chart v1.14.6
postgres-operator: bump to v1.12.2, helm chart v1.12.2

postgres-operator-ui: bump to v1.12.2, helm chart v1.12.2
2024-06-16 17:39:16 +03:00
ace
c16ab291dd fix selinux for haproxy 2024-06-16 17:39:10 +03:00
ace
5a980d28ad fix selinux for nginx 2024-06-16 17:39:08 +03:00
ace
4bdaff7cca fix vps playbook 2024-06-01 19:09:58 +03:00
ace
2991123422 mastodon: bump to vv4.2.9, helm chart v5.1.2
postgres-operator: bump to v1.12.0, helm chart v1.12.0

postgres-operator-ui: bump to v1.12.0, helm chart v1.12.0
2024-06-01 03:42:30 +03:00
ace
e9c70618f6 roundcube: fix tls 2024-06-01 03:42:18 +03:00
ace
6b2f7f716d mass update 2024-05-25 18:12:30 +03:00
ace
d057e60ea4 wikijs: bump to v2.5.303, helm chart v2.3.15
adguard-home: bump to v0.107.50, helm chart v2.3.26

roundcube: bump to v1.6.7, helm chart v0.4.2
2024-05-25 18:12:25 +03:00
ace
4a74d0649e keycloak: fix clients 2024-05-13 04:20:08 +03:00
ace
2dc354fad5 make postgres more tunable 2024-05-08 00:39:16 +03:00
ace
826aa160a9 peertube: bump to v6.1.0, helm chart v0.3.4 2024-05-08 00:35:01 +03:00
ace
6e7a28c2ae update and fix sample vars 2024-05-06 03:00:59 +03:00
ace
d3db3af028 make postgres more tunable 2024-05-06 03:00:42 +03:00
ace
d9dd334988 fix tsig key name 2024-05-06 03:00:27 +03:00
ace
5d57ca8dce fix core infra dns 2024-05-06 03:00:19 +03:00
ace
7e508f906f fix core infra dns 2024-05-06 03:00:14 +03:00
ace
3ac1b27b4c add keycloak 2024-05-06 03:00:10 +03:00
ace
14c3ebe2cc add metallb_enabled: true to group values 2024-05-05 23:04:53 +03:00
ace
c8831139e0 fix dkim, tsig and vapid keys generation 2024-05-05 23:04:47 +03:00
ace
6cfac8f302 enable postgresql in place upgrade and bump postgresql version to 16 2024-05-05 20:13:28 +03:00
ace
ef847dfe87 postgres-operator: bump to v1.11.0, helm chart v1.11.0
postgres-operator-ui: bump to v1.11.0, helm chart v1.11.0
2024-05-05 18:45:14 +03:00
ace
9659378110 wikijs: bump to v2.5.302, helm chart v2.3.14
cert-manager: bump to v1.14.5, helm chart v1.14.5

metallb: bump to v0.14.5, helm chart v0.14.5
2024-04-27 12:07:09 +03:00
ace
0cbd95af0e gitea: bump to v1.21.11 with helm chart v10.1.4 2024-04-16 22:17:27 +03:00
ace
0d70f46844 peertube: bump to v6.0.4, helm chart v0.3.3
adguard-home: bump to v0.107.48, helm chart v2.3.24

gitea-act-runner: bump to v0.2.10, helm chart v0.1.10
2024-04-12 01:04:15 +03:00
ace
340996d22f gitea-act-runner: bump to v0.2.7, helm chart v0.1.9
nextcloud: bump to v28.0.3, helm chart v4.6.4
2024-03-27 18:05:32 +03:00
ace
8b92348013 bitwarden: bump to v1.30.5, helm chart v2.0.25 2024-03-03 00:44:27 +03:00
ace
587050e55a rspamd: bump to v3.8.4, helm chart v0.5.2 2024-03-02 17:05:16 +03:00
ace
281ca92f36 gitea: bump to v1.21.7 with helm chart v10.1.3 2024-02-26 19:02:47 +03:00
ace
531edc036e cert-manager: bump to v1.14.3, helm chart v1.14.3 2024-02-23 18:43:43 +03:00
ace
5331f1e18f rspamd: bump to v3.8.3 with helm chart v0.5.1 2024-02-23 18:18:02 +03:00
ace
c2dc1d3bde gitea: bump to v1.21.6 with helm chart v10.1.2 2024-02-23 18:09:15 +03:00
ace
11e938cc41 cert-manager: bump to v1.14.2, helm chart v1.14.2 2024-02-11 15:25:10 +03:00
ace
1c2befb922 bitwarden: bump to v1.30.3, helm chart v2.0.24
gitea: bump to v1.21.5 with helm chart v10.1.1
2024-02-02 12:16:01 +03:00
ace
022d974f55 bitwarden: bump to v1.30.2, helm chart v2.0.23
wikijs: bump to v2.5.301, helm chart v2.3.13

external-dns: migrate to bitnami/external-dns helm v6.31.0
2024-02-01 15:19:31 +03:00
ace
9d1480d728 external-dns: migrate to bitnami/external-dns helm chart with working ingress class support 2024-02-01 15:19:28 +03:00
ace
55aa5ef74f dovecot: bump to v2.3.16, helm chart v0.1.6
postfix: bump to v3.5.9, helm chart v0.1.5

rspamd: bump to v3.8.1, helm chart v0.5.0
2024-01-26 02:07:24 +03:00
ace
ace8287788 gitea: bump to v1.21.4 with helm chart v10.1.0 2024-01-22 21:36:11 +03:00
ace
bf1fae3c45 knot: rewrite role 2024-01-20 04:26:37 +03:00
ace
37225e7895 peertube: bump to v6.0.3, helm chart v0.3.2 2024-01-19 17:28:09 +03:00
ace
3cf9589a41 gitea: bump to v1.21.3 with helm chart v10.0.2 2023-12-24 01:55:48 +03:00
ace
3fbcbf17ed rspamd: bump to v3.7.5, helm chart v0.4.9 2023-12-16 23:30:09 +03:00
ace
980f006c07 bitwarden: bump to v1.30.1, helm chart v2.0.22
adguard-home: bump to v0.107.43, helm chart v2.3.23

peertube: bump to v6.0.2, helm chart v0.3.1

nextcloud: bump to v27.1.4, helm chart v4.5.5
2023-12-14 21:51:17 +03:00
ace
b71f5d4cb3 peertube: bump to v6.0.1, helm chart v0.3.0 2023-11-29 17:00:08 +03:00
ace
308792df46 gitea: bump to v1.21.1 with helm chart v9.6.1 2023-11-28 20:58:42 +03:00
ace
390b7800f4 gitea: bump to v1.20.5 with helm chart v9.5.1 2023-11-18 22:07:46 +03:00
ace
1ceec4f8e8 roundcube: bump to v1.6.5, helm chart v0.4.0 2023-11-18 20:57:41 +03:00
ace
a0613f6c2c adguard-home: bump to v0.107.41, helm chart v2.3.22
roundcube: bump to v1.6.5, helm chart v0.4.0
2023-11-18 20:57:32 +03:00
ace
5736e94fad nextcloud: bump to v27.1.3, helm chart v4.3.6
rspamd: bump to v3.7.3, helm chart v0.4.7
2023-10-30 10:41:15 +03:00
ace
a398ec5f9c adguard-home: bump to v0.107.40, helm chart v2.3.21
nextcloud: bump to v27.1.2, helm chart v4.3.5
2023-10-22 17:56:17 +03:00
ace
bfa2cd4258 adguard-home: bump to v0.107.39, helm chart v2.3.20 2023-10-13 18:26:09 +03:00
ace
5039fb107f gitea: bump to v1.20.5 with helm chart v9.5.0 2023-10-10 14:09:17 +03:00
ace
de2c464903 rspamd: bump to v3.7.1, helm chart v0.4.6 2023-10-08 02:15:53 +03:00
ace
91d40a2372 gitea-act-runner: bump to v0.2.6, helm chart v0.1.8 2023-10-02 20:07:38 +03:00
ace
acec270b9b cert-manager: bump to v1.13.1, helm chart v1.13.1
gitea-act-runner: bump to v0.2.6, helm chart v0.1.7
2023-10-02 01:05:23 +03:00
ace
993269a308 gitea-act-runner: bump to v0.2.5, helm chart v0.1.6 2023-09-15 23:35:39 +03:00
ace
5f333565a3 metallb: fix for kubernetes running without kube-proxy 2023-09-12 23:24:02 +03:00
ace
0bff6e9a26 adguard-home: bump to v0.107.38, helm chart v2.3.19
cert-manager: bump to v1.12.4, helm chart v1.12.4

metallb: bump to v0.13.11, helm chart v0.13.11
2023-09-12 23:23:56 +03:00
ace
f1d03ff98e postgres-operator-ui: bump to v1.10.1, helm chart v1.10.1
postgres-operator: bump to v1.10.1, helm chart v1.10.1

adguard-home: bump to v0.107.37, helm chart v2.3.18
2023-09-09 16:55:41 +03:00
ace
6a1d272453 postgres: fix postgres-operator-ui ingress path 2023-09-09 16:55:38 +03:00
ace
d4b3f63495 gitea: bump to v1.20.4 with helm chart v9.4.0 2023-09-08 19:49:56 +03:00
ace
f246848075 gitea: bump to v1.20.3 with helm chart v9.2.0 2023-08-22 22:20:55 +03:00
ace
57ddce5a22 add minio helm repo 2023-08-18 14:32:49 +03:00
ace
984bd5c7f1 harbor: bump to v2.8.4, helm chart v1.12.4 2023-08-18 14:32:44 +03:00
ace
b2f3716e80 nextcloud: bump to v27.0.2, helm chart v3.5.22 2023-08-17 18:32:16 +03:00
ace
5b387889f0 wikijs: bump to v2.5.300, helm chart v2.3.12 2023-08-12 18:13:05 +03:00
ace
f64d588e5d rspamd: bump to v3.6, helm chart v0.4.5 2023-08-11 14:17:07 +03:00
ace
79b2817a20 ingress-nginx: bump v1.3.0 with helm chart v4.2.3 2023-08-03 03:19:54 +03:00
ace
f4cf2abdf1 adguard-home: bump to v0.107.36, helm chart v2.3.17 2023-08-03 00:31:06 +03:00
ace
cd40d6b633 gitea-act-runner: bump to v0.2.5, helm chart v0.1.5 2023-08-02 12:31:41 +03:00
ace
1fc8edc277 rspamd: bump to v3.5, helm chart v0.4.4
postfix: bump to v3.5.9, helm chart v0.1.4

dovecot: bump to v2.3.16, helm chart v0.1.5
2023-08-01 18:34:58 +03:00
ace
9e773269be gitea: bump to v1.20.2 with helm chart v9.1.0 2023-08-01 15:30:59 +03:00
ace
df9bf23816 nextcloud: bump to v27.0.1, helm chart v3.5.20 2023-07-29 22:45:23 +03:00
ace
8ab7aa470c bitwarden: bump to v1.29.1, helm chart v2.0.21
gitea-act-runner: bump to v0.2.4, helm chart v0.1.4
2023-07-29 20:03:04 +03:00
ace
a15dd5c7e2 adguard-home: bump to v0.107.35, helm chart v2.3.16
minio: bump to RELEASE.2023-07-07T07-13-57Z, helm chart v5.0.13
2023-07-27 22:54:32 +03:00
ace
c16674aa9c gitea: bump to v1.20.1 with helm chart v9.0.4 2023-07-23 22:48:19 +03:00
ace
6663242b5b fix readme 2023-07-18 21:35:07 +03:00
ace
62654976bf mastodon: bump to v4.1.4, helm chart v4.0.1 2023-07-10 11:55:37 +03:00
ace
48b06e879a cert-manager: bump to v1.12.2, helm chart v1.12.2 2023-07-05 16:56:23 +03:00
ace
8fa86ea81a cert-manager: fix mistype 2023-07-05 16:56:18 +03:00
ace
773e9b9cd9 update requirements.txt 2023-07-05 16:55:59 +03:00
ace
eb123edc9f gitea-act-runner: bump to v0.2.3, helm chart v0.1.3 2023-07-04 21:14:36 +03:00
ace
8e9fd451af adguard-home: bump to v0.107.33, helm chart v2.3.15 2023-07-04 21:01:21 +03:00
ace
b5d37d2f5a peertube: bump to v5.2.0, helm chart v0.2.2 2023-06-24 18:08:18 +03:00
ace
9d3eef1f7c gitea-act-runner: bump to v0.2.0, helm chart v0.1.2 2023-06-18 20:23:28 +03:00
ace
c413885893 adguard-home: bump to v0.107.32, helm chart v2.3.14 2023-06-17 04:16:58 +03:00
ace
ca9fceb972 update minio vars example 2023-06-14 23:18:19 +03:00
ace
1eb2302c31 add minio 2023-06-14 23:02:55 +03:00
ace
c99f760481 fix ghp repo url 2023-06-14 23:02:48 +03:00
88 changed files with 1471 additions and 649 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.venv

View File

@@ -1,4 +1,4 @@
## Ansible playbooks repository
Used for Geek Home Platform deployment
Used for [Geek Home Platform](https://geekhome.org/) deployment
[Documentation](https://wikijs.geekhome.org/home)

View File

@@ -1,4 +1,5 @@
# Common #
k8s_cluster_name: mycluster
namespace: ghp
docker_registry: gitea.geekhome.org/ghp
domain: example.com
@@ -11,6 +12,7 @@ web_proxy_internal_ip: INTERNAL_VPS_IP
# Core infrastructure #
## MetalLB ##
metallb_enabled: true
metallb_advertisement_type: "BGP"
metallb_address_pool:
- 192.168.250.0/24
@@ -21,13 +23,13 @@ metallb_bgp_aggregation_length: 24
## Nginx Ingress ##
### Internal ###
internal_ingress_class: "ghp-internal-nginx"
internal_ingress_class: "{{ namespace }}-internal-nginx"
internal_loadbalancer_ip: "192.168.250.0"
### External ###
internal_ingress_class: "ghp-external-nginx"
external_ingress_class: "{{ namespace }}-external-nginx"
external_loadbalancer_ip: "192.168.250.10"
### Local ###
internal_ingress_class: "ghp-local-nginx"
local_ingress_class: "{{ namespace }}-local-nginx"
local_loadbalancer_ip: "192.168.250.20"
## External-dns ##
@@ -36,6 +38,8 @@ dns_namespace: dns
# Shared infrastructure #
## PostgreSQL ##
postgres_enabled: true
postgres_operator_enabled: true
postgres_operator_ui_enabled: true
postgres_db_namespace: "{{ namespace }}"
## OpenLDAP ##
@@ -51,6 +55,13 @@ openldap_simple_users:
- { name: testuser1, sn: 6001, uid: 6001, gid: 6001 }
- { name: testuser2, sn: 6002, uid: 6002, gid: 6002 }
## MinIO ##
minio_enabled: true
minio_publish: false
minio_console_publish: false
#minio_size: "100Gi"
#minio_storage: "nfs-hdd"
## Harbor ##
harbor_enabled: false
harbor_publish: false

View File

@@ -1,5 +1,5 @@
# MetalLB balancer
metallb_version: 0.13.10
metallb_version: 0.14.9
# NFS provisioners
nfs_client_provisioner_hdd_version: 4.0.18
@@ -12,64 +12,66 @@ ceph_csi_rbd_version: 3.8.0
ceph_csi_cephfs_version: 3.8.0
# Cert-manager
cert_manager_version: 1.12.1
cert_manager_version: 1.18.2
# External-DNS
external_dns_version: 6.8.1
local_dns_version: 6.8.1
internal_dns_version: 6.8.1
service_dns_version: 6.8.1
external_dns_version: 6.31.0
local_dns_version: 6.31.0
internal_dns_version: 6.31.0
service_dns_version: 6.31.0
# Ingress Nginx
external_ingress_nginx_version: 4.2.0
internal_ingress_nginx_version: 4.2.0
local_ingress_nginx_version: 4.2.0
external_ingress_nginx_version: 4.12.1
internal_ingress_nginx_version: 4.12.1
local_ingress_nginx_version: 4.12.1
# PostgreSQL operator
postgres_operator_version: 1.10.0
postgres_operator_ui_version: 1.10.0
postgres_operator_version: 1.13.0
postgres_operator_ui_version: 1.13.0
# OpenLDAP
openldap_version: 1.2.7
# MinIO
minio_version: 5.0.13
# Adguard Home
adguard_version: 2.3.13
adguard_version: 2.3.33
# Bitwarden (aka Vaultwarden)
bitwarden_version: 2.0.20
bitwarden_version: 2.0.40
# Gitea
gitea_ingress_nginx_version: 4.2.0
gitea_dns_version: 6.8.1
gitea_version: 8.3.0
gitea_ingress_nginx_version: 4.12.1
gitea_dns_version: 6.31.0
gitea_version: 12.1.2
# Gitea Act Runner
gitea_act_runner_version: 0.1.1
gitea_act_runner_version: 0.1.13
# Docker and Helm chart registries
harbor_version: 1.12.2
harbor_version: 1.12.4
# Mastodon
mastodon_version: 4.0.0
mastodon_version: 6.5.1
# Nextcloud
nextcloud_version: 3.5.13
nextcloud_version: 5.0.2
# Email
dovecot_version: 0.1.4
postfix_version: 0.1.3
roundcube_version: 0.3.0
rspamd_version: 0.4.3
dovecot_version: 0.1.8
postfix_version: 0.1.7
roundcube_version: 0.4.6
rspamd_version: 0.5.7
# Pypi server
pypiserver_version: 2.5.0
# WikiJS
wikijs_version: 2.3.11
wikijs_version: 2.3.19
# PeerTube
peertube_version: 0.2.1
peertube_version: 0.4.5
# Playmaker android APK repository
playmaker_version: 0.1.3

View File

@@ -33,5 +33,7 @@ ddclient_hosts:
- "{% if mastodon_publish %}{{ mastodon_short_name | default('mastodon') }}.{{ domain }}{% else %}omitme{% endif %}"
- "{% if harbor_publish %}{{ harbor_short_name | default('harbor') }}.{{ domain }}{% else %}omitme{% endif %}"
- "{% if roundcube_publish %}{{ roundcube_short_name | default('webmail') }}.{{ domain }}{% else %}omitme{% endif %}"
- "{% if minio_publish %}{{ minio_short_name | default('minio') }}.{{ domain }}{% else %}omitme{% endif %}"
- "{% if minio_console_publish %}{{ minio_console_short_name | default('minio') }}.{{ minio_short_name | default('minio') }}.{{ domain }}{% else %}omitme{% endif %}"
- "{{ harbor_readonly_ingress | default('omitme') }}"
- "{{ wikijs_readonly_ingress | default('omitme') }}"

View File

@@ -0,0 +1,46 @@
keycloak_values: {}
keycloak_realms: {}
# - id: myrealm
# realm: myrealm
keycloak_clients: {}
# - client_id: gitea
# realm: myrealm
# public_client: true
# - client_id: gitea
# realm: myrealm
# public_client: false
keycloak_clients_default_protocol_mappings: {}
# - config:
# access.token.claim: true
# claim.name: "groups"
# id.token.claim: true
# jsonType.label: String
# user.attribute: groups
# userinfo.token.claim: true
# name: groups
# protocol: openid-connect
# protocolMapper: oidc-usermodel-attribute-mapper
keycloak_groups: {}
# - name: admins
# realm: myrealm
# - name: devops
# realm: myrealm
keycloak_users: {}
# - username: John Doe
# realm: myrealm
# firstName: John
# lastName: Doe
# credentials:
# - type: password
# value: my_very_strong_password
# temporary: true
# groups:
# - name: admins
# state: present
# - name: devops
# state: present

View File

@@ -0,0 +1 @@
minio_values: {}

View File

@@ -1,43 +1 @@
nextcloud_values:
nextcloud:
configs:
mail.fix.config.php: |-
<?php
$CONFIG = array (
"mail_smtptimeout" => 60,
);
fix.config.php: |-
<?php
$CONFIG = array (
'trusted_proxies' => ['{{ web_proxy_internal_ip }}'],
'overwriteprotocol' => 'https',
'overwrite.cli.url' => 'https://nextcloud.{{ domain }}',
'mail_smtpstreamoptions' =>
array (
'ssl' =>
array (
'allow_self_signed' => true,
'verify_peer' => false,
'verify_peer_name' => false,
),
),
);
rgw.config.php: |-
<?php
$CONFIG = array (
'objectstore_multibucket' => array(
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' => array(
'bucket' => 'nextcloud',
'autocreate' => true,
'key' => 'K4PNZLSTLIDQJMZUV27P',
'secret' => 'iPScsni8RS2aT9MFymfQYLPD7W8dVrRqFpafBKDc',
'hostname' => 'sds1-osd1.lan',
'port' => 8080,
'use_ssl' => false,
'num_buckets' => 16,
'region' => 'us-east-1',
'use_path_style' => true
),
),
);
nextcloud_values: {}

View File

@@ -12,20 +12,14 @@ knot_conf: |
any: debug
key:
- id: k8s
- id: k8s-{{ k8s_cluster_name }}-{{ namespace }}
algorithm: hmac-sha512
secret: {{ k8s_tsig }}
- id: vps
- id: ddclient-{{ k8s_cluster_name }}-{{ namespace }}
algorithm: hmac-sha512
secret: {{ ddclient_tsig }}
remote:
# - id: slave
# address: 192.168.1.1@53
#
# - id: master
# address: 192.168.2.1@53
remote:
- id: dns_server
address: 127.0.0.1@53
@@ -34,24 +28,15 @@ knot_conf: |
- id: dns_zone_sbm
parent: [dns_server]
acl:
- id: deny_all
deny: on # no action specified and deny on implies denial of all actions
- id: key_rule
key: [vps, k8s] # Access based just on TSIG key
key: [k8s-{{ k8s_cluster_name }}-{{ namespace }},ddclient-{{ k8s_cluster_name }}-{{ namespace }}] # Access based just on TSIG key
address: 192.168.0.0/16
action: [transfer, notify, update]
# - id: acl_slave
# address: 192.168.1.1
# action: transfer
# - id: acl_master
# address: 192.168.2.1
# action: notify
template:
- id: default
storage: "/var/lib/knot"
@@ -73,14 +58,3 @@ knot_conf: |
dnssec-signing: on
dnssec-policy: rsa
zonefile-load: difference
# # Master zone
# - domain: example.com
# notify: slave
# acl: acl_slave
# # Slave zone
# - domain: example.net
# master: master
# acl: acl_master

View File

@@ -37,7 +37,7 @@ haproxy_config: |
log global
option tcplog
timeout server 1m
timeout connect 7s
timeout connect 10s
server postfix {{ postfix_loadbalancer_ip }}:2525 send-proxy
frontend ft_submission
@@ -54,7 +54,7 @@ haproxy_config: |
log global
option tcplog
timeout server 1m
timeout connect 7s
timeout connect 10s
server postfix {{ postfix_loadbalancer_ip }}:10587 send-proxy
frontend ft_submissions
@@ -71,7 +71,7 @@ haproxy_config: |
log global
option tcplog
timeout server 1m
timeout connect 7s
timeout connect 10s
server postfix {{ postfix_loadbalancer_ip }}:10465 send-proxy
frontend ft_imap

View File

@@ -75,6 +75,12 @@ nginx:
{% if roundcube_publish %}
{{ roundcube_short_name | default('webmail') }}.{{ domain }} https_{{ namespace }};
{% endif %}
{% if minio_publish %}
{{ minio_short_name | default('minio') }}.{{ domain }} https_{{ namespace }};
{% endif %}
{% if minio_console_publish %}
{{ minio_console_short_name | default('console') }}.{{ minio_short_name | default('minio') }}.{{ domain }} https_{{ namespace }};
{% endif %}
{% if wikijs_readonly_ingress %}
{{ wikijs_readonly_ingress }} https_{{ namespace }};
{% endif %}

View File

@@ -3,85 +3,91 @@
connection: local
tasks:
- name: Deploy MetalLB
import_role:
import_role:
name: metallb
when: metallb_enabled | default(true)
tags: metallb
- name: Deploy External Ingress Nginx
import_role:
import_role:
name: external-ingress-nginx
when: external_ingress_nginx_enabled | default(true)
tags:
tags:
- external-ingress-nginx
- ingress-nginx
- name: Deploy Internal Ingress Nginx
import_role:
import_role:
name: internal-ingress-nginx
when: internal_ingress_nginx_enabled | default(true)
tags:
tags:
- internal-ingress-nginx
- ingress-nginx
- name: Deploy Local Ingress Nginx
import_role:
import_role:
name: local-ingress-nginx
when: local_ingress_nginx_enabled | default(true)
tags:
tags:
- local-ingress-nginx
- ingress-nginx
- name: Deploy Internal DNS
import_role:
import_role:
name: internal-dns
when: internal_dns_enabled | default(true)
tags:
when:
- internal_dns_enabled | default(true)
- domain is defined
tags:
- internal-dns
- dns
- name: Deploy Local DNS
import_role:
import_role:
name: local-dns
when: local_dns_enabled | default(true)
tags:
when:
- local_dns_enabled | default(true)
- local_domain is defined
tags:
- local-dns
- dns
- name: Deploy Service DNS
import_role:
import_role:
name: service-dns
when: service_dns_enabled | default(true)
tags:
when:
- service_dns_enabled | default(true)
- domain is defined
tags:
- service-dns
- dns
- name: Deploy Cert-manager
import_role:
import_role:
name: cert-manager
when: cert_manager_enabled | default(true)
tags: cert-manager
- name: Deploy NFS-client-provisioner
import_role:
import_role:
name: nfs-client-provisioner
when: nfs_client_provisioner_enabled | default(true)
tags: nfs-client-provisioner
- name: Deploy CSI Ceph RBD
import_role:
import_role:
name: ceph-csi-rbd
when: ceph_csi_rbd_enabled | default(false)
tags: ceph-csi-rbd
- name: Deploy CSI CephFS
import_role:
import_role:
name: ceph-csi-cephfs
when: ceph_csi_cephfs_enabled | default(false)
tags: ceph-csi-cephfs
- name: Deploy Metrics-server
import_role:
import_role:
name: metrics-server
when: metrics_server_enabled | default(true)
tags: metrics-server

View File

@@ -1,4 +1,5 @@
---
- hosts: knot_dns
become: true
roles:
- knot

View File

@@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- keycloak

5
playbooks/ghp/minio.yaml Normal file
View File

@@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- minio

View File

@@ -3,14 +3,25 @@
connection: local
tasks:
- name: Deploy PostgreSQL
import_role:
import_role:
name: postgres
when: postgres_enabled | default(true)
tags: postgres
- name: Deploy OpenLDAP
import_role:
import_role:
name: openldap
when: openldap_enabled | default(true)
tags: openldap
- name: Deploy MinIO
import_role:
name: minio
when: minio_enabled | default(true)
tags: minio
- name: Deploy Keycloak
import_role:
name: keycloak
when: keycloak_enabled | default(false)
tags: keycloak

View File

@@ -1,15 +1,18 @@
---
- hosts: web_proxy
become: true
roles:
- nginx
tags: web-proxy
- hosts: mail_proxy
become: true
roles:
- haproxy
tags: mail-proxy
- hosts: ddclient
become: true
roles:
- { role: docker, when: ddclient_container_engine == "docker" }
- { role: podman, when: ddclient_container_engine == "podman" }

View File

@@ -1,28 +1,29 @@
ansible==7.5.0
ansible-core==2.14.5
cachetools==5.3.0
certifi==2023.5.7
cffi==1.15.1
charset-normalizer==3.1.0
cryptography==40.0.2
google-auth==2.18.0
idna==3.4
Jinja2==3.1.2
kubernetes==26.1.0
MarkupSafe==2.1.2
ansible==9.5.1
ansible-core==2.16.6
cachetools==5.3.3
certifi==2024.2.2
cffi==1.16.0
charset-normalizer==3.3.2
cryptography==42.0.7
google-auth==2.29.0
idna==3.7
Jinja2==3.1.4
kubernetes==29.0.0
MarkupSafe==2.1.5
netaddr==1.2.1
oauthlib==3.2.2
openshift==0.13.1
packaging==23.1
pyasn1==0.5.0
pyasn1-modules==0.3.0
pycparser==2.21
python-dateutil==2.8.2
openshift==0.13.2
packaging==24.0
pyasn1==0.6.0
pyasn1_modules==0.4.0
pycparser==2.22
python-dateutil==2.9.0.post0
python-string-utils==1.0.0
PyYAML==6.0
requests==2.30.0
requests-oauthlib==1.3.1
resolvelib==0.8.1
PyYAML==6.0.1
requests==2.31.0
requests-oauthlib==2.0.0
resolvelib==1.0.1
rsa==4.9
six==1.16.0
urllib3==1.26.15
websocket-client==1.5.1
urllib3==2.2.1
websocket-client==1.8.0

View File

@@ -19,7 +19,7 @@
group: cert-manager.io
- set_fact:
adguard_combined_values: "{{ adguard_default_values | combine(adguard_values, recursive=true) }}"
adguard_combined_values: "{{ adguard_default_values | combine(adguard_values | default({}), recursive=true) }}"
- name: Deploy Adguard Home
kubernetes.core.helm:

View File

@@ -5,7 +5,7 @@
- postgres_enabled is defined and postgres_enabled
- set_fact:
bitwarden_combined_values: "{{ bitwarden_default_values | combine(bitwarden_values, recursive=true) }}"
bitwarden_combined_values: "{{ bitwarden_default_values | combine(bitwarden_values | default({}), recursive=true) }}"
- name: Deploy Bitwarden
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
- set_fact:
ceph_csi_cephfs_combined_values: "{{ ceph_csi_cephfs_default_values | combine(ceph_csi_cephfs_values, recursive=true) }}"
ceph_csi_cephfs_combined_values: "{{ ceph_csi_cephfs_default_values | combine(ceph_csi_cephfs_values | default({}), recursive=true) }}"
- name: Deploy CSI CephFS {{ ceph_csi_cephfs_version }}
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
- set_fact:
ceph_csi_rbd_combined_values: "{{ ceph_csi_rbd_default_values | combine(ceph_csi_rbd_values, recursive=true) }}"
ceph_csi_rbd_combined_values: "{{ ceph_csi_rbd_default_values | combine(ceph_csi_rbd_values | default({}), recursive=true) }}"
- name: Deploy CSI Ceph RBD {{ ceph_csi_rbd_version }}
kubernetes.core.helm:

View File

@@ -1,6 +1,7 @@
cert_manager_chart_ref: "jetstack/cert-manager"
cert_manager_namespace: "cert-manager"
ceph_manager_lets_encrypt_mailbox: "admin@{{ domain }}"
cert_manager_lets_encrypt_mailbox: "admin@{{ domain }}"
cert_manager_base64_tsig_key: "{{ k8s_tsig | b64encode }}"
cert_manager_default_values:
installCRDs: true
crds:
enabled: true

View File

@@ -1,5 +1,5 @@
- set_fact:
cert_manager_combined_values: "{{ cert_manager_default_values | combine(cert_manager_values, recursive=true) }}"
cert_manager_combined_values: "{{ cert_manager_default_values | combine(cert_manager_values | default({}), recursive=true) }}"
- name: Deploy Cert-manager {{ cert_manager_version }}
kubernetes.core.helm:
@@ -50,7 +50,7 @@
rfc2136:
nameserver: "{{ external_dns_ip | default(dns_ip) }}:53"
tsigAlgorithm: HMACSHA512
tsigKeyName: k8s
tsigKeyName: "k8s-{{ k8s_cluster_name }}-{{ namespace }}"
tsigSecretSecretRef:
key: tsig-secret-key
name: tsig-secret
@@ -81,7 +81,7 @@
rfc2136:
nameserver: "{{ external_dns_ip | default(dns_ip) }}:53"
tsigAlgorithm: HMACSHA512
tsigKeyName: k8s
tsigKeyName: "k8s-{{ k8s_cluster_name }}-{{ namespace }}"
tsigSecretSecretRef:
key: tsig-secret-key
name: tsig-secret

View File

@@ -10,7 +10,7 @@ dovecot_default_values:
existingSecret: "{{ mail_short_name | default('mail') }}.{{ domain }}-secret"
dovecot:
configmaps:
dovecot:
dovecot:
dovecot: |
protocols = imap lmtp sieve
mail_max_userip_connections = 1000
@@ -51,14 +51,12 @@ dovecot_default_values:
auth-ldap: |
passdb {
driver = ldap
# Path for LDAP configuration file, see example-config/dovecot-ldap.conf.ext
args = /etc/dovecot/ldap.conf
}
userdb {
driver = ldap
args = /etc/dovecot/ldap.conf
}
10-auth: |
auth_default_realm = {{ domain }}
@@ -80,7 +78,7 @@ dovecot_default_values:
mbox_write_locks = fcntl
10-master: |
protocol imap {
mail_plugins = virtual
mail_plugins = virtual
}
service imap-login {
inet_listener imap {
@@ -130,13 +128,13 @@ dovecot_default_values:
}
service auth {
inet_listener {
inet_listener {
port = 12345
}
unix_listener auth-userdb {
mode = 0660
user = vmail
#group =
#group =
}
# Postfix smtp-auth
@@ -158,7 +156,7 @@ dovecot_default_values:
ssl = required
#verbose_ssl = yes
ssl_prefer_server_ciphers = yes
ssl_min_protocol = TLSv1.2
ssl_min_protocol = TLSv1.3
ssl_cert = </tls/tls.crt
ssl_key = </tls/tls.key
10-logging: |
@@ -170,7 +168,202 @@ dovecot_default_values:
hostname = {{ domain }}
rejection_reason = Your message to was automatically rejected:%n%r
protocol lda {
mail_plugins = virtual sieve
mail_plugins = virtual sieve
}
20-lmtp: |
protocol lmtp {
mail_plugins = virtual sieve
postmaster_address = postmaster@{{ domain }}
}
20-managesieve: |
service managesieve-login {
inet_listener sieve {
port = 4190
ssl = yes
}
service_count = 1
vsz_limit = 64M
}
service managesieve {
process_limit = 1024
}
service:
type: LoadBalancer
loadBalancerIP: "{{ dovecot_loadbalancer_ip | default(omit) }}"
# WIP
dovecot_oidc: false
dovecot_oidc_default_values:
replicaCount: 1
persistence:
enabled: true
existingClaim: mailboxes
tls:
enabled: true
existingSecret: "{{ mail_short_name | default('mail') }}.{{ domain }}-secret"
dovecot:
configmaps:
dovecot:
dovecot: |
protocols = imap lmtp sieve
mail_max_userip_connections = 1000
mail_plugins = virtual
auth_debug = yes
auth_verbose = yes
#haproxy_trusted_networks = 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
#haproxy_timeout = 30s
dict {
#quota = mysql:/etc/dovecot/dovecot-dict-sql.conf.ext
#expire = sqlite:/etc/dovecot/dovecot-dict-sql.conf.ext
}
# Most of the actual configuration gets included below. The filenames are
# first sorted by their ASCII value and parsed in that order. The 00-prefixes
# in filenames are intended to make it easier to understand the ordering.
!include conf.d/*.conf
# A config file can also tried to be included without giving an error if
# it's not found:
!include_try local.conf
oauth2: |
introspection_mode = post
introspection_url = https://{{ dovecot_oidc_username }}:{{ dovecot_oidc_password }}@keycloak.{{ domain }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/token/introspect
grant_url = https://keycloak.{{ domain }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/token
#client_id = dovecot
#client_secret = X10dQgQprHLxZj8nsvB2fEpJwuBr0hWq
tokeninfo_url = https://keycloak.{{ domain }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/token
#tls_ca_cert_file = /etc/ssl/certs/ca-certificates.crt
rawlog_dir = /tmp/oauth2
debug = yes
#use_grant_password = no
#username_attribute = username
#pass_attrs = pass=%{oauth2:access_token}
ldap: |
confd:
auth-ldap: |
auth-oauth2: |
passdb {
driver = oauth2
mechanisms = xoauth2 oauthbearer
args = /etc/dovecot/oauth2.conf
}
userdb {
driver = static
args = uid=vmail gid=vmail home=/home/vmail/%u
}
10-auth: |
auth_default_realm = {{ domain }}
auth_username_format = %Lu
auth_mechanisms = plain login xoauth2 oauthbearer
10-mail: |
mail_location = maildir:%h
namespace inbox {
inbox = yes
}
mail_uid = vmail
mail_gid = vmail
first_valid_uid = 1000
last_valid_uid = 1000
first_valid_gid = 1000
last_valid_gid = 1000
protocol !indexer-worker {
}
mbox_write_locks = fcntl
10-master: |
protocol imap {
mail_plugins = virtual
}
service imap-login {
inet_listener imap {
#port = 143
}
inet_listener imaps {
#port = 993
#ssl = yes
}
inet_listener imap_haproxy {
port = 1109
#haproxy = yes
}
inet_listener imaps_haproxy {
port = 10993
ssl = yes
#haproxy = yes
}
}
service pop3-login {
inet_listener pop3 {
#port = 110
}
inet_listener pop3s {
#port = 995
#ssl = yes
}
}
service lmtp {
inet_listener lmtp {
port = 24
}
unix_listener /var/spool/postfix/private/dovecot-lmtp {
mode = 0600
group = postfix
user = postfix
}
user = vmail
}
service imap {
}
service pop3 {
}
service auth {
inet_listener {
port = 12345
}
unix_listener auth-userdb {
mode = 0660
user = vmail
#group =
}
# Postfix smtp-auth
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
}
service auth-worker {
}
service dict {
unix_listener dict {
}
}
10-ssl: |
ssl = required
#verbose_ssl = yes
ssl_prefer_server_ciphers = yes
ssl_min_protocol = TLSv1.3
ssl_cert = </tls/tls.crt
ssl_key = </tls/tls.key
10-logging: |
log_path = /dev/stderr
info_log_path = /dev/stdout
debug_log_path = /dev/stdout
15-lda: |
postmaster_address = postmaster@{{ domain }}
hostname = {{ domain }}
rejection_reason = Your message to was automatically rejected:%n%r
protocol lda {
mail_plugins = virtual sieve
}
20-lmtp: |
protocol lmtp {

View File

@@ -1,5 +1,10 @@
- set_fact:
dovecot_combined_values: "{{ dovecot_default_values | combine(dovecot_values, recursive=true) }}"
dovecot_combined_values: "{{ dovecot_default_values | combine(dovecot_values | default({}), recursive=true) }}"
when: not mail_oidc
- set_fact:
dovecot_combined_values: "{{ dovecot_oidc_default_values | combine(dovecot_oidc_values | default({}), recursive=true) }}"
when: mail_oidc
- name: Deploy Dovecot
kubernetes.core.helm:
@@ -9,5 +14,4 @@
chart_ref: "{{ dovecot_chart_ref }}"
chart_version: "{{ dovecot_version | default(omit) }}"
release_values: "{{ dovecot_combined_values | from_yaml }}"
wait: true

View File

@@ -1,7 +1,9 @@
external_dns_chart_ref: "ghp/external-dns"
external_dns_chart_ref: "bitnami/external-dns"
external_dns_tsigKeyname: "k8s-{{ k8s_cluster_name }}-{{ namespace }}"
external_dns_tsigSecretAlg: "hmac-sha512"
external_dns_default_values:
fullnameOverride: "{{ external_dns_name | default(namespace + '-external-dns') }}"
ingressClass: "{{ external_ingress_class }}"
ingressClassFilters: ["{{ external_ingress_class }}"]
domainFilters: ["{{ external_domain | default(domain) }}"]
provider: rfc2136
rfc2136:
@@ -9,8 +11,8 @@ external_dns_default_values:
port: 53
zone: "{{ external_domain | default(domain) }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ external_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ external_dns_tsigKeyname | default('k8s') }}"
tsigSecretAlg: "{{ external_dns_tsigSecretAlg }}"
tsigKeyname: "{{ external_dns_tsigKeyname }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@@ -1,5 +1,5 @@
- set_fact:
external_dns_combined_values: "{{ external_dns_default_values | combine(external_dns_values, recursive=true) }}"
external_dns_combined_values: "{{ external_dns_default_values | combine(external_dns_values | default({}), recursive=true) }}"
- name: Deploy external DNS
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
- set_fact:
external_ingress_nginx_combined_values: "{{ external_ingress_nginx_default_values | combine(external_ingress_nginx_values, recursive=true) }}"
external_ingress_nginx_combined_values: "{{ external_ingress_nginx_default_values | combine(external_ingress_nginx_values | default({}), recursive=true) }}"
- name: Deploy external Nginx Ingress
kubernetes.core.helm:

View File

@@ -2,10 +2,17 @@ gitea_act_runner_enabled: true
gitea_act_runner_chart_ref: "ghp/gitea-act-runner"
gitea_act_runner_gitea_instance_short_name: "gitea"
gitea_act_runner_default_values:
giteaInstance: "{{ gitea_act_runner_gitea_instance_short_name }}.{{ domain }}"
giteaInstance: "https://{{ gitea_act_runner_gitea_instance_short_name }}.{{ domain }}"
token: "{{ gitea_act_runner_token }}"
persistence:
enabled: true
accessMode: "{{ gitea_act_runner_storage_mode | default('ReadWriteMany') }}"
size: "{{ gitea_act_runner_size | default('25Gi') }}"
storageClass: "{{ gitea_act_runner_storage | default('nfs-ssd') }}"
configAsCode:
enabled: true
config:
container:
valid_volumes:
- /data/containers/*

View File

@@ -1,5 +1,5 @@
- set_fact:
gitea_act_runner_combined_values: "{{ gitea_act_runner_default_values | combine(gitea_act_runner_values, recursive=true) }}"
gitea_act_runner_combined_values: "{{ gitea_act_runner_default_values | combine(gitea_act_runner_values | default({}), recursive=true) }}"
- name: Deploy Gitea Act Runner
kubernetes.core.helm:

View File

@@ -6,6 +6,8 @@ gitea_use_external_db: true
gitea_short_name: "gitea"
gitea_ingress_class: "{{ gitea_namespace | default(namespace) }}-{{ 'public' if gitea_publish_web else 'private' }}-gitea-ingress-nginx"
gitea_default_values:
strategy:
type: "Recreate"
service:
http:
clusterIP:
@@ -30,6 +32,12 @@ gitea_default_values:
hosts:
- "{{ gitea_short_name }}.{{ domain }}"
valkey-cluster:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: true
accessModes:
@@ -60,6 +68,12 @@ gitea_default_values:
config:
# APP_NAME: "Gitea: Git with a cup of tea"
RUN_MODE: prod
session:
PROVIDER: "memory"
queue:
TYPE: "level"
cache:
ADAPTER: "memory"
server:
LFS_START_SERVER: true
ROOT_URL: "https://{{ gitea_short_name}}.{{ domain }}"
@@ -73,12 +87,11 @@ gitea_default_values:
ALLOWED_HOST_LIST: "*"
mailer:
ENABLED: "true"
HOST: "{{ mail_short_name | default('mail') }}.{{ mail_domain | default(domain) }}:465"
IS_TLS_ENABLED: "true"
SMTP_ADDR: "{{ mail_short_name | default('mail') }}.{{ mail_domain | default(domain) }}"
FROM: "gitea@{{ mail_domain | default(domain) }}"
USER: "{{ gitea_ldap_user | default('gitea') }}"
PASSWD: "{{ gitea_ldap_pass | default(gitea_ldap_password) }}"
MAILER_TYPE: "smtp"
PROTOCOL: "smtps"
actions:
ENABLED: "true"
packages:
@@ -139,10 +152,12 @@ gitea_ingress_nginx_default_values:
22: "{{ gitea_namespace | default(namespace) }}/{{ namespace }}-gitea-ssh:22"
gitea_dns_chart_ref: "ghp/external-dns"
gitea_dns_chart_ref: "bitnami/external-dns"
gitea_dns_tsigKeyname: "k8s-{{ k8s_cluster_name }}-{{ namespace }}"
gitea_dns_tsigSecretAlg: "hmac-sha512"
gitea_dns_default_values:
fullnameOverride: "{{ gitea_dns_name | default(namespace + '-gitea-internal-dns') }}"
ingressClass: "{{ gitea_ingress_class }}"
ingressClassFilters: ["{{ gitea_ingress_class }}"]
domainFilters: ["{{ domain }}"]
provider: rfc2136
rfc2136:
@@ -150,8 +165,8 @@ gitea_dns_default_values:
port: 53
zone: "{{ domain }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ gitea_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ gitea_dns_tsigKeyname | default('k8s') }}"
tsigSecretAlg: "{{ gitea_dns_tsigSecretAlg }}"
tsigKeyname: "{{ gitea_dns_tsigKeyname }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@@ -5,20 +5,20 @@
- postgres_enabled is defined and postgres_enabled
- set_fact:
gitea_combined_values: "{{ gitea_default_values | combine(gitea_values, recursive=true) }}"
gitea_combined_values: "{{ gitea_default_values | combine(gitea_values | default({}), recursive=true) }}"
- set_fact:
gitea_combined_values: "{{ gitea_combined_values | combine(gitea_external_db_values, recursive=true) }}"
gitea_combined_values: "{{ gitea_combined_values | combine(gitea_external_db_values | default({}), recursive=true) }}"
when: gitea_use_external_db
- set_fact:
gitea_dns_combined_values: "{{ gitea_dns_default_values | combine(gitea_dns_values, recursive=true) }}"
gitea_dns_combined_values: "{{ gitea_dns_default_values | combine(gitea_dns_values | default({}), recursive=true) }}"
- set_fact:
gitea_ingress_nginx_combined_values: "{{ gitea_ingress_nginx_default_values | combine(gitea_ingress_nginx_values, recursive=true) }}"
gitea_ingress_nginx_combined_values: "{{ gitea_ingress_nginx_default_values | combine(gitea_ingress_nginx_values | default({}), recursive=true) }}"
- set_fact:
gitea_ingress_nginx_combined_values: "{{ gitea_ingress_nginx_combined_values | combine(gitea_publish_ingress_nginx_values, recursive=true) }}"
gitea_ingress_nginx_combined_values: "{{ gitea_ingress_nginx_combined_values | combine(gitea_publish_ingress_nginx_values | default({}), recursive=true) }}"
when: gitea_publish_web
- name: Deploy Nginx Ingress for Gitea

View File

@@ -13,6 +13,13 @@
- restart haproxy
when: haproxy_config is defined
- name: set haproxy_connect_any flag on and keep it persistent across reboots
ansible.posix.seboolean:
name: haproxy_connect_any
state: yes
persistent: yes
when: ansible_selinux is defined and ansible_selinux != False and ansible_selinux.status == 'enabled'
- name: start haproxy service
systemd:
name: haproxy

View File

@@ -5,7 +5,7 @@
- postgres_enabled is defined and postgres_enabled
- set_fact:
harbor_combined_values: "{{ harbor_default_values | combine(harbor_values, recursive=true) }}"
harbor_combined_values: "{{ harbor_default_values | combine(harbor_values | default({}), recursive=true) }}"
- name: Deploy Harbor
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
helm_repos:
- { name: 'ghp', url: 'https://git.geekhome.org' }
- { name: 'ghp', url: 'https://gitea.geekhome.org/api/packages/ghp/helm' }
- { name: 'jetstack', url: 'https://charts.jetstack.io' }
- { name: 'bitnami', url: 'https://charts.bitnami.com/bitnami' }
- { name: 'ingress-nginx', url: 'https://kubernetes.github.io/ingress-nginx' }
@@ -11,4 +11,4 @@ helm_repos:
- { name: 'metallb', url: 'https://metallb.github.io/metallb' }
- { name: 'nfs-subdir-external-provisioner', url: 'https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner' }
- { name: 'metrics-server', url: 'https://kubernetes-sigs.github.io/metrics-server/' }
- { name: 'minio', url: 'https://charts.min.io/' }

View File

@@ -1,7 +1,9 @@
internal_dns_chart_ref: "ghp/external-dns"
internal_dns_chart_ref: "bitnami/external-dns"
internal_dns_tsigKeyname: "k8s-{{ k8s_cluster_name }}-{{ namespace }}"
internal_dns_tsigSecretAlg: "hmac-sha512"
internal_dns_default_values:
fullnameOverride: "{{ internal_dns_name | default(namespace + '-internal-dns') }}"
ingressClass: "{{ internal_ingress_class }}"
ingressClassFilters: ["{{ internal_ingress_class }}"]
domainFilters: ["{{ internal_domain | default(domain) }}"]
provider: rfc2136
rfc2136:
@@ -9,8 +11,8 @@ internal_dns_default_values:
port: 53
zone: "{{ internal_domain | default(domain) }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ internal_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ internal_dns_tsigKeyname | default('k8s') }}"
tsigSecretAlg: "{{ internal_dns_tsigSecretAlg }}"
tsigKeyname: "{{ internal_dns_tsigKeyname }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@@ -1,5 +1,5 @@
- set_fact:
internal_dns_combined_values: "{{ internal_dns_default_values | combine(internal_dns_values, recursive=true) }}"
internal_dns_combined_values: "{{ internal_dns_default_values | combine(internal_dns_values | default({}), recursive=true) }}"
- name: Deploy internal DNS
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
- set_fact:
internal_ingress_nginx_combined_values: "{{ internal_ingress_nginx_default_values | combine(internal_ingress_nginx_values, recursive=true) }}"
internal_ingress_nginx_combined_values: "{{ internal_ingress_nginx_default_values | combine(internal_ingress_nginx_values | default({}), recursive=true) }}"
- name: Deploy internal Nginx Ingress
kubernetes.core.helm:

View File

@@ -0,0 +1,63 @@
keycloak_enabled: true
keycloak_publish: false
keycloak_console_publish: false
keycloak_use_external_db: true
keycloak_chart_ref: "codecentric/keycloakx"
keycloak_short_name: "keycloak"
keycloak_console_short_name: "console"
keycloak_default_values:
command:
- /opt/keycloak/bin/kc.sh
- start
- --http-enabled=true
- --http-port=8080
- --hostname={{ keycloak_short_name }}.{{ domain }}
- --hostname-strict=false
- --hostname-strict-https=false
database:
database: "keycloak"
hostname: "{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}"
username: "{{ keycloak_db_username | default(omit) }}"
password: "{{ keycloak_db_password | default(omit) }}"
port: 5432
vendor: postgres
extraEnv: |
- name: KEYCLOAK_ADMIN
value: admin
- name: KEYCLOAK_ADMIN_PASSWORD
value: {{ keycloak_admin_password }}
- name: JAVA_OPTS_APPEND
value: >-
-Djgroups.dns.query={{ keycloak_short_name }}-keycloakx-headless
ingress:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
enabled: true
ingressClassName: "{{ external_ingress_class if minio_publish else internal_ingress_class }}"
rules:
- host: "{{ keycloak_short_name }}.{{ domain }}"
paths:
- path: /auth/
pathType: Prefix
servicePort: http
tls:
- hosts:
- "{{ keycloak_short_name }}.{{ domain }}"
secretName: "{{ keycloak_short_name }}.{{ domain }}-tls"
keycloak_realms: {}
keycloak_clients: {}
keycloak_clients_default_protocol_mappings: {}
# - config:
# access.token.claim: true
# claim.name: "groups"
# id.token.claim: true
# jsonType.label: String
# user.attribute: groups
# userinfo.token.claim: true
# name: groups
# protocol: openid-connect
# protocolMapper: oidc-usermodel-attribute-mapper
keycloak_users: {}
keycloak_groups: {}

View File

@@ -0,0 +1,96 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- keycloak_use_external_db
- postgres_enabled is defined and postgres_enabled
- set_fact:
keycloak_combined_values: "{{ keycloak_default_values | combine(keycloak_values | default({}), recursive=true) }}"
- name: Deploy Keycloak
kubernetes.core.helm:
release_namespace: "{{ keycloak_namespace | default(namespace) }}"
release_name: "{{ keycloak_name | default('keycloak') }}"
chart_ref: "{{ keycloak_chart_ref }}"
chart_version: "{{ keycloak_version | default(omit) }}"
release_values: "{{ keycloak_combined_values | from_yaml }}"
- name: Wait Keycloak until HTTP status is 200
uri:
url: "https://{{ keycloak_short_name }}.{{ domain }}/auth"
return_content: yes
validate_certs: no
status_code:
- 200
until: uri_output.status == 200
retries: 24 # Retries for 24 * 5 seconds = 120 seconds = 2 minutes
delay: 5 # Every 5 seconds
register: uri_output
- name: Create or update Keycloak client, authentication with credentials
community.general.keycloak_client:
client_id: admin-cli
auth_keycloak_url: "https://{{ keycloak_short_name }}.{{ domain }}/auth"
auth_realm: master
auth_username: admin
auth_password: "{{ keycloak_admin_password }}"
state: present
- name: Create or update Keycloak realms
community.general.keycloak_realm:
auth_client_id: admin-cli
auth_keycloak_url: "https://{{ keycloak_short_name }}.{{ domain }}/auth"
auth_realm: master
auth_username: admin
auth_password: "{{ keycloak_admin_password }}"
id: "{{ item.id }}"
realm: "{{ item.realm }}"
state: "{{ item.state | default('present') }}"
enabled: "{{ item.enabled | default(true) }}"
loop: "{{ keycloak_realms }}"
- name: Create or update Keycloak clients
community.general.keycloak_client:
auth_client_id: admin-cli
auth_keycloak_url: "https://{{ keycloak_short_name }}.{{ domain }}/auth"
auth_realm: master
auth_username: admin
auth_password: "{{ keycloak_admin_password }}"
client_id: "{{ item.client_id }}"
realm: "{{ item.realm }}"
name: "{{ \"${client_\" + item.client_id + \"}\" }}"
protocol: openid-connect
public_client: "{{ item.public_client | default(false) }}"
standard_flow_enabled: "{{ item.standard_flow_enabled | default(true) }}"
implicit_flow_enabled: "{{ item.implicit_flow_enabled | default(true) }}"
direct_access_grants_enabled: "{{ item.direct_access_grants_enabled | default(true) }}"
state: "{{ item.state | default('present') }}"
protocol_mappers: "{{ keycloak_clients_default_protocol_mappings }}"
loop: "{{ keycloak_clients }}"
- name: Create Keycloak groups
community.general.keycloak_group:
auth_client_id: admin-cli
auth_keycloak_url: "https://{{ keycloak_short_name }}.{{ domain }}/auth"
auth_realm: master
auth_username: admin
auth_password: "{{ keycloak_admin_password }}"
realm: "{{ item.realm }}"
name: "{{ item.name }}"
state: "{{ item.state | default('present') }}"
loop: "{{ keycloak_groups }}"
- name: Create Keycloak users
community.general.keycloak_user:
auth_client_id: admin-cli
auth_keycloak_url: "https://{{ keycloak_short_name }}.{{ domain }}/auth"
auth_realm: master
auth_username: admin
auth_password: "{{ keycloak_admin_password }}"
realm: "{{ item.realm }}"
state: "{{ item.state | default('present') }}"
username: "{{ item.username }}"
firstName: "{{ item.firstName }}"
lastName: "{{ item.lastName }}"
email: "{{ item.email | default( item.username + '@' + domain) }}"
enabled: "{{ item.enabled | default(true) }}"
emailVerified: "{{ item.emailVerified | default(true) }}"
credentials: "{{ item.credentials }}"
groups: "{{ item.groups }}"
loop: "{{ keycloak_users }}"

View File

@@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ keycloak_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "keycloak.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup Keycloak DB secret
set_fact:
keycloak_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ keycloak_db_secret }}"
verbosity: 2
- name: Set Keycloak DB username
set_fact:
keycloak_db_username: "{{ keycloak_db_secret.data.username | b64decode }}"
- name: Set Keycloak DB password
set_fact:
keycloak_db_password: "{{ keycloak_db_secret.data.password | b64decode }}"

View File

@@ -1,4 +1,15 @@
---
knot_version: ""
# XDP datapath options
# Note: rfc2136 aka nsupdate aka dynamic update
# not working with XDP
knot_xdp: false
knot_xdp_interface: "eth0"
# QUIC protocol
knot_quic: false
knot_conf: |
# This is a sample of a minimal configuration file for Knot DNS.
# See knot.conf(5) or refer to the server documentation.
@@ -8,7 +19,16 @@ knot_conf: |
user: knot:knot
listen: [ 0.0.0.0@53, ::@53 ]
udp-max-payload: 1232
{% if knot_quic %}
listen-quic: [ 0.0.0.0, :: ]
{% endif %}
{% if knot_xdp %}
xdp:
listen: {{ knot_xdp_interface }}
udp: true
tcp: true
quic: true
{% endif %}
log:
- target: syslog
any: debug

View File

@@ -0,0 +1,6 @@
---
- name: Restart knot
systemd:
name: knot
state: restarted
daemon_reload: true

View File

@@ -1,4 +0,0 @@
---
- name: restart knot
service: "name=knot state=restarted"
become: true

View File

@@ -0,0 +1,28 @@
---
- name: Install knot {{- knot_version }} packages
dnf:
name: "{{ knot_packages }}"
state: "{{ 'latest' if knot_version == 'latest' else 'present' }}"
update_cache: true
- name: Include XDP configuratio
when: knot_xdp
block:
- name: Create override directory for knot systemd unit
file:
name: /etc/systemd/system/knot.service.d
state: directory
- name: Create override.conf for knot for XDP
copy:
dest: /etc/systemd/system/knot.service.d/override.conf
content: |
[Service]
CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN CAP_SYS_ADMIN CAP_IPC_LOCK CAP_SYS_RESOURCE
AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN CAP_SYS_ADMIN CAP_IPC_LOCK CAP_SYS_RESOURCE
notify: Restart knot
- name: Install deps for XDP
dnf:
name: "{{ knot_deps_packages }}"
state: present
update_cache: true

View File

@@ -1,7 +0,0 @@
---
# RedHat Family (RedHat, Fendora, CentOS, Amazon, etc)
- name: packages (RedHat)
yum:
name: knot
state: present
when: ansible_os_family == "RedHat"

View File

@@ -0,0 +1,41 @@
---
- name: Make sure handlers are flushed immediately
meta: flush_handlers
- name: Load a variable file based on the OS type
include_vars: "{{ lookup('first_found', params) }}"
vars:
params:
files:
- "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yaml"
- "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_version'] }}.yaml"
- "{{ ansible_facts['os_family'] }}-{{ ansible_facts['distribution_major_version'] }}.yaml"
- "{{ ansible_facts['os_family'] }}-{{ ansible_facts['distribution_version'] }}.yaml"
- "{{ ansible_facts['distribution'] }}.yaml"
- "{{ ansible_facts['os_family'] }}.yaml"
- main.yaml
paths:
- "vars"
tags: knot_vars
- name: Include knot install for {{ ansible_distribution }}
include_tasks: "{{ ansible_facts['os_family'] }}.yaml"
- name: Configure knot
copy:
content: "{{ knot_conf }}"
dest: /etc/knot/knot.conf
mode: 0640
owner: "root"
group: "knot"
validate: "knotc -c %s conf-check"
backup: true
notify: Restart knot
- name: Enable and start knot
systemd:
name: "knot"
enabled: true
state: started
masked: false
daemon_reload: true

View File

@@ -1,24 +0,0 @@
---
- name: install
become: true
include: install.yml
# Configuration
- name: configure knot
become: true
copy:
content: "{{ knot_conf }}"
dest: /etc/knot/knot.conf
mode: 0640
owner: "root"
group: "knot"
validate: "knotc -c %s conf-check"
notify: restart knot
- name: enable knot
become: true
systemd:
name: "knot"
enabled: yes
state: started
daemon_reload: yes

View File

@@ -0,0 +1,7 @@
knot_package_name: knot
knot_packages:
- "{{ knot_package_name + '-' + knot_version if (knot_version is defined and (knot_version != '*' and knot_version != '' and knot_version != 'latest')) else knot_package_name }}"
- "{{ knot_package_name + '-' + 'tools' + '-' + knot_version if (knot_version is defined and (knot_version != '*' and knot_version != '' and knot_version != 'latest')) else knot_package_name }}"
knot_deps_packages:
- xdp-tools
- bpftool

View File

@@ -0,0 +1 @@
---

View File

@@ -1,7 +1,9 @@
local_dns_chart_ref: "ghp/external-dns"
local_dns_chart_ref: "bitnami/external-dns"
local_dns_tsigKeyname: "k8s-{{ k8s_cluster_name }}-{{ namespace }}"
local_dns_tsigSecretAlg: "hmac-sha512"
local_dns_default_values:
fullnameOverride: "{{ local_dns_name | default(namespace + '-local-dns') }}"
ingressClass: "{{ local_ingress_class }}"
ingressClassFilters: ["{{ local_ingress_class }}"]
domainFilters: ["{{ local_domain }}"]
provider: rfc2136
rfc2136:
@@ -9,8 +11,8 @@ local_dns_default_values:
port: 53
zone: "{{ local_domain }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ local_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ local_dns_tsigKeyname | default('k8s') }}"
tsigSecretAlg: "{{ local_dns_tsigSecretAlg }}"
tsigKeyname: "{{ local_dns_tsigKeyname }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@@ -1,5 +1,5 @@
- set_fact:
local_dns_combined_values: "{{ local_dns_default_values | combine(local_dns_values, recursive=true) }}"
local_dns_combined_values: "{{ local_dns_default_values | combine(local_dns_values | default({}), recursive=true) }}"
- name: Deploy local DNS
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
- set_fact:
local_ingress_nginx_combined_values: "{{ local_ingress_nginx_default_values | combine(local_ingress_nginx_values, recursive=true) }}"
local_ingress_nginx_combined_values: "{{ local_ingress_nginx_default_values | combine(local_ingress_nginx_values | default({}), recursive=true) }}"
- name: Deploy local Nginx Ingress
kubernetes.core.helm:

View File

@@ -1 +1,4 @@
mail_short_name: "mail"
# WIP
mail_oidc: false

View File

@@ -37,11 +37,15 @@
storageClassName: "{{ mailbox_storage | default('nfs-hdd') }}"
- name: Deploy Postfix
vars:
postfix_oidc: "{{ mail_oidc }}"
import_role:
name: postfix
tags: postfix
- name: Deploy Dovecot
vars:
dovecot_oidc: "{{ mail_oidc }}"
import_role:
name: dovecot
tags: dovecot

View File

@@ -26,43 +26,26 @@ mastodon_default_values:
- "{{ mastodon_short_name }}.{{ domain }}"
mastodon:
# create an initial administrator user; the password is autogenerated and will
# have to be reset
createAdmin:
enabled: true
username: "{{ mastodon_admin_user | default(mastodon_admin_username) | default('mastodon') }}"
password: "{{ mastodon_admin_pass | default(mastodon_admin_password) }}"
email: "{{ mastodon_admin_email }}"
# available locales: https://github.com/tootsuite/mastodon/blob/master/config/application.rb#L43
locale: en
local_domain: "{{ mastodon_short_name }}.{{ domain }}"
cron:
# run `tootctl media remove` every week
removeMedia:
enabled: true
schedule: "0 0 * * 0"
web:
port: 3000
streaming:
port: 4000
# this should be set manually since os.cpus() returns the number of CPUs on
# the node running the pod, which is unrelated to the resources allocated to
# the pod by k8s
workers: 2
sidekiq:
concurrency: 25
# these must be set manually; autogenerated keys are rotated on each upgrade
secrets:
secret_key_base: "{{ mastodon_vapid_public_key_base64 | hash('sha256') }}"
otp_secret: "{{ mastodon_vapid_public_key_base64 | hash('sha256') | hash('sha256') }}"
vapid:
private_key: "{{ mastodon_vapid_private_key_base64 | b64decode }}"
public_key: "{{ mastodon_vapid_public_key_base64 | b64decode }}"
activeRecordEncryption:
primaryKey: "{{ mastodon_primary_key_secret }}"
deterministicKey: "{{ mastodon_deterministic_key_secret }}"
keyDerivationSalt: "{{ mastodon_key_derivation_salt_secret }}"
smtp:
auth_method: login
ca_file:
@@ -95,11 +78,6 @@ mastodon_default_values:
storage: "{{ mastodon_system_size | default('100Gi') }}"
elasticsearch:
# `false` will disable full-text search
#
# if you enable ES after the initial install, you will need to manually run
# RAILS_ENV=production bundle exec rake chewy:sync
# (https://docs.joinmastodon.org/admin/optional/elasticsearch/)
enabled: "{{ mastodon_enable_elasticsearch }}"
master:
name: master
@@ -116,20 +94,14 @@ mastodon_default_values:
##
replicas: 1
# https://github.com/bitnami/charts/tree/master/bitnami/postgresql#parameters
postgresql:
# Disable for external PostgreSQL
enabled: false
postgresqlHostname: "{{ namespace }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local"
# you must set a password; the password generated by the postgresql chart will
# be rotated on each upgrade:
# https://github.com/bitnami/charts/tree/master/bitnami/postgresql#upgrade
auth:
database: mastodon
username: "{{ mastodon_db_username }}"
password: "{{ mastodon_db_password }}"
# https://github.com/bitnami/charts/tree/master/bitnami/redis#parameters
redis:
architecture: standalone
enabled: true

View File

@@ -5,7 +5,7 @@
- postgres_enabled is defined and postgres_enabled
- set_fact:
mastodon_combined_values: "{{ mastodon_default_values | combine(mastodon_values, recursive=true) }}"
mastodon_combined_values: "{{ mastodon_default_values | combine(mastodon_values | default({}), recursive=true) }}"
- name: Deploy Mastodon
kubernetes.core.helm:
@@ -15,38 +15,3 @@
chart_ref: "{{ mastodon_chart_ref }}"
chart_version: "{{ mastodon_version | default(omit) }}"
release_values: "{{ mastodon_combined_values | from_yaml }}"
- name: Search for mastodon web pod
kubernetes.core.k8s_info:
kind: Pod
namespace: "{{ mastodon_namespace | default(namespace) }}"
label_selectors:
- app.kubernetes.io/component=web
- app.kubernetes.io/instance=mastodon
register: mastodon_web_pod_name
- name: Remove mastodon web pod for restart
kubernetes.core.k8s:
state: absent
api_version: v1
kind: Pod
namespace: "{{ mastodon_namespace | default(namespace) }}"
name: "{{ mastodon_web_pod_name.resources[0].metadata.name }}"
- name: Search for mastodon streaming pod
kubernetes.core.k8s_info:
kind: Pod
namespace: "{{ mastodon_namespace | default(namespace) }}"
label_selectors:
- app.kubernetes.io/component=streaming
- app.kubernetes.io/instance=mastodon
register: mastodon_streaming_pod_name
- name: Remove mastodon streaming pod for restart
kubernetes.core.k8s:
state: absent
api_version: v1
kind: Pod
namespace: "{{ mastodon_namespace | default(namespace) }}"
name: "{{ mastodon_streaming_pod_name.resources[0].metadata.name }}"

View File

@@ -1,5 +1,5 @@
- set_fact:
metallb_combined_values: "{{ metallb_default_values | combine(metallb_values, recursive=true) }}"
metallb_combined_values: "{{ metallb_default_values | combine(metallb_values | default({}), recursive=true) }}"
- name: Deploy MetalLB
kubernetes.core.helm:
@@ -11,15 +11,25 @@
release_values: "{{ metallb_combined_values | from_yaml }}"
wait: true
- name: Check if kube-proxy ConfigMap exist
check_mode: false
shell: |
kubectl get configmap kube-proxy -n kube-system
register: _kube_proxy_configmap
failed_when: false
changed_when: false
- name: Check for strict arp
check_mode: false
shell: |
kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl diff -f - -n kube-system
register: check_strict_arp
when: metallb_strict_arp
changed_when: check_strict_arp.rc != 0
register: check_strict_arp_status
when:
- _kube_proxy_configmap.rc == 0
- metallb_strict_arp
changed_when: check_strict_arp_status.rc != 0
- name: Apply strict arp
shell: |
@@ -28,8 +38,9 @@
kubectl apply -f - -n kube-system \
&& kubectl -n kube-system delete pods --selector=k8s-app=kube-proxy
when:
- strict_arp_for_metallb
- metallb_strict_arp.changed
- _kube_proxy_configmap.rc == 0
- metallb_strict_arp
- metallb_strict_arp_status.changed
- name: Apply MetalLB L2 definitions
k8s:

View File

@@ -1,5 +1,5 @@
- set_fact:
metrics_server_combined_values: "{{ metrics_server_default_values | combine(metrics_server_values, recursive=true) }}"
metrics_server_combined_values: "{{ metrics_server_default_values | combine(metrics_server_values | default({}), recursive=true) }}"
- name: Deploy Metrics server
kubernetes.core.helm:

View File

@@ -0,0 +1,61 @@
minio_enabled: true
minio_publish: false
minio_console_publish: false
minio_chart_ref: "minio/minio"
minio_short_name: "minio"
minio_console_short_name: "console"
minio_default_values:
mode: distributed ## other supported values are "standalone"
resources:
requests:
memory: 512Mi
replicas: 4
rootUser: "admin"
rootPassword: "{{ minio_admin_password }}"
persistence:
enabled: true
storageClass: "{{ minio_storage | default('nfs-hdd') }}"
accessMode: "{{ minio_storage_mode | default('ReadWriteOnce') }}"
size: "{{ minio_size | default('100Gi') }}"
ingress:
enabled: true
ingressClassName: "{{ external_ingress_class if minio_publish else internal_ingress_class }}"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
# kubernetes.io/ingress.global-static-ip-name: ""
# nginx.ingress.kubernetes.io/secure-backends: "true"
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
path: /
hosts:
- "{{ minio_short_name }}.{{ domain }}"
tls:
- secretName: "{{ minio_short_name }}.{{ domain }}-tls"
hosts:
- "{{ minio_short_name }}.{{ domain }}"
consoleIngress:
enabled: true
ingressClassName: "{{ external_ingress_class if minio_console_publish else internal_ingress_class }}"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
path: /
hosts:
- "{{ minio_console_short_name }}.{{ minio_short_name }}.{{ domain }}"
tls:
- secretName: "{{ minio_console_short_name }}.{{ minio_short_name }}.{{ domain }}-tls"
hosts:
- "{{ minio_console_short_name }}.{{ minio_short_name }}.{{ domain }}"

View File

@@ -0,0 +1,12 @@
- set_fact:
minio_combined_values: "{{ minio_default_values | combine(minio_values | default({}), recursive=true) }}"
- name: Deploy MinIO
kubernetes.core.helm:
create_namespace: true
release_namespace: "{{ minio_namespace | default(namespace) }}"
release_name: "{{ minio_name | default('minio') }}"
chart_ref: "{{ minio_chart_ref }}"
chart_version: "{{ minio_version | default(omit) }}"
release_values: "{{ minio_combined_values | from_yaml }}"
wait: true

View File

@@ -5,7 +5,7 @@
- postgres_enabled is defined and postgres_enabled
- set_fact:
nextcloud_combined_values: "{{ nextcloud_default_values | combine(nextcloud_values, recursive=true) }}"
nextcloud_combined_values: "{{ nextcloud_default_values | combine(nextcloud_values | default({}), recursive=true) }}"
- name: Deploy Nextcloud
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
- set_fact:
nfs_client_provisioner_hdd_combined_values: "{{ nfs_client_provisioner_hdd_default_values | combine(nfs_client_provisioner_hdd_values, recursive=true) }}"
nfs_client_provisioner_hdd_combined_values: "{{ nfs_client_provisioner_hdd_default_values | combine(nfs_client_provisioner_hdd_values | default({}), recursive=true) }}"
- name: Deploy NFS client provisioner for HDD storage
kubernetes.core.helm:
@@ -12,7 +12,7 @@
wait: true
- set_fact:
nfs_client_provisioner_ssd_combined_values: "{{ nfs_client_provisioner_ssd_default_values | combine(nfs_client_provisioner_ssd_values, recursive=true) }}"
nfs_client_provisioner_ssd_combined_values: "{{ nfs_client_provisioner_ssd_default_values | combine(nfs_client_provisioner_ssd_values | default({}), recursive=true) }}"
- name: Deploy NFS client provisioner for SSD storage
kubernetes.core.helm:

View File

@@ -10,3 +10,17 @@
register: install_nginx_result
tags:
- nginx-install
- name: set httpd_can_network_connect flag on and keep it persistent across reboots
ansible.posix.seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
when: ansible_selinux is defined and ansible_selinux != False and ansible_selinux.status == 'enabled'
- name: set httpd_can_network_relay flag on and keep it persistent across reboots
ansible.posix.seboolean:
name: httpd_can_network_relay
state: yes
persistent: yes
when: ansible_selinux is defined and ansible_selinux != False and ansible_selinux.status == 'enabled'

View File

@@ -31,7 +31,7 @@
group: cert-manager.io
- set_fact:
openldap_combined_values: "{{ openldap_default_values | combine(openldap_values, recursive=true) }}"
openldap_combined_values: "{{ openldap_default_values | combine(openldap_values | default({}), recursive=true) }}"
- name: Deploy OpenLDAP
kubernetes.core.helm:

View File

@@ -5,7 +5,7 @@
- postgres_enabled is defined and postgres_enabled
- set_fact:
peertube_combined_values: "{{ peertube_default_values | combine(peertube_values, recursive=true) }}"
peertube_combined_values: "{{ peertube_default_values | combine(peertube_values | default({}), recursive=true) }}"
- name: Deploy PeerTube
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
- set_fact:
playmaker_combined_values: "{{ playmaker_default_values | combine(playmaker_values, recursive=true) }}"
playmaker_combined_values: "{{ playmaker_default_values | combine(playmaker_values | default({}), recursive=true) }}"
- name: Deploy Docker playmaker
kubernetes.core.helm:

View File

@@ -57,12 +57,12 @@ postfix_default_values:
virtual_transport = lmtp:inet:{{ dovecot_short_name | default('dovecot') }}.{{ namespace }}.svc.cluster.local:24
# Certs and TLS options
smtpd_tls_cert_file = /tls/tls.crt
smtpd_tls_cert_file = /tls/tls.crt
smtpd_tls_key_file = /tls/tls.key
smtpd_use_tls = yes
smtpd_tls_auth_only = yes
smtpd_tls_security_level = may
smtp_tls_loglevel = 1
smtp_tls_loglevel = 1
smtpd_tls_loglevel = 1
smtpd_tls_received_header = yes
smtpd_tls_session_cache_timeout = 3600s
@@ -117,7 +117,7 @@ postfix_default_values:
manpage_directory = /usr/share/man
newaliases_path = /usr/bin/newaliases
mailq_path = /usr/bin/mailq
master: |
#
# Postfix master process configuration file. For details on the format
@@ -287,6 +287,290 @@ postfix_default_values:
query_filter = mail=%s
result_attribute = cn
cache = no
service:
type: LoadBalancer
loadBalancerIP: "{{ postfix_loadbalancer_ip | default(omit) }}"
# WIP
postfix_oidc: false
postfix_oidc_default_values:
replicaCount: 1
persistence:
enabled: true
existingClaim: mailboxes
tls:
enabled: true
existingSecret: "{{ mail_short_name | default('mail') }}.{{ domain }}-secret"
postfix:
configmaps:
main: |
#smtp_host_lookup = native
compatibility_level = 2
maillog_file = /dev/stdout
# Use ipv4 and listen on all interfaces
inet_protocols = ipv4
inet_interfaces = all
queue_directory = /var/spool/postfix
command_directory = /usr/sbin
daemon_directory = /usr/libexec/postfix
data_directory = /var/lib/postfix
mail_owner = postfix
# Postfix full server name for mail send/recieve
myhostname = {{ mail_short_name | default('mail') }}.{{ domain }}
# Set domain name
mydomain = {{ domain }}
# Local name for mail send
myorigin = $mydomain
# Local mail delivery
mydestination = $myhostname, localhost.$mydomain, localhost
# Transport type
local_transport = virtual
# Local users map
#local_recipient_maps = $virtual_mailbox_maps
# Reject code
unknown_local_recipient_reject_code = 550
# Virtual domain list
virtual_mailbox_domains = {{ domain }}
virtual_mailbox_base = /var/mail/vhosts
# Allowed users map
#virtual_mailbox_maps = ldap:/etc/postfix/ldap-local-recipients.cf
# Dovecot socket for mail delivery
#virtual_transport = lmtp:unix:private/dovecot-lmtp
virtual_transport = lmtp:inet:{{ dovecot_short_name | default('dovecot') }}.{{ namespace }}.svc.cluster.local:24
# Certs and TLS options
smtpd_tls_cert_file = /tls/tls.crt
smtpd_tls_key_file = /tls/tls.key
smtpd_use_tls = yes
smtpd_tls_auth_only = yes
smtpd_tls_security_level = may
smtp_tls_loglevel = 1
smtpd_tls_loglevel = 1
smtpd_tls_received_header = yes
smtpd_tls_session_cache_timeout = 3600s
smtp_tls_note_starttls_offer = yes
tls_random_source = dev:/dev/urandom
smtp_tls_security_level = may
# DANE-Settings
#smtp_dns_support_level=dnssec
#smtp_host_lookup=dns
#smtp_tls_security_level = dane
#smtp_tls_loglevel=1
# Filters for mail
smtpd_helo_required = yes
#smtpd_recipient_restrictions = permit_sasl_authenticated, permit_mynetworks, reject_unauth_destination, reject_unknown_sender_domain, reject_invalid_helo_hostname, reject_unauth_destination
smtpd_recipient_restrictions = permit_sasl_authenticated, permit_mynetworks, reject_unauth_destination, reject_non_fqdn_sender, reject_unknown_sender_domain, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, reject_unauth_destination
smtpd_discard_ehlo_keywords = ''
# SASL auth with dovecot options
smtpd_sasl_auth_enable = yes
smtpd_sasl_security_options = noanonymous
broken_sasl_auth_clients = yes
smtpd_sasl_type = dovecot
smtpd_sasl_path = inet:{{ dovecot_short_name | default('dovecot') }}.{{ namespace }}.svc.cluster.local:12345
smtpd_sasl_local_domain = $myorigin
milter_protocol = 6
milter_mail_macros = i {mail_addr} {client_addr} {client_name} {auth_authen}
smtpd_milters = inet:{{ rspamd_short_name | default('rspamd') }}.{{ namespace }}.svc.cluster.local:11332
non_smtpd_milters = $smtpd_milters
milter_default_action = accept
smtpd_tls_CAfile = /etc/ssl/certs/ca-bundle.crt
smtp_tls_CAfile = /etc/ssl/certs/ca-bundle.crt
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtp_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtp_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtp_tls_exclude_ciphers = EXP, MEDIUM, LOW, DES, 3DES, SSLv2
smtpd_tls_exclude_ciphers = EXP, MEDIUM, LOW, DES, 3DES, SSLv2
tls_high_cipherlist = kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!RC4:!MD5:!DES:!EXP:!SEED:!IDEA:!3DES
tls_medium_cipherlist = kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!MD5:!DES:!EXP:!SEED:!IDEA:!3DES
smtp_tls_ciphers = high
smtpd_tls_ciphers = high
sendmail_path = /usr/sbin/sendmail
html_directory = no
setgid_group = postdrop
manpage_directory = /usr/share/man
newaliases_path = /usr/bin/newaliases
mailq_path = /usr/bin/mailq
master: |
#
# Postfix master process configuration file. For details on the format
# of the file, see the master(5) manual page (command: "man 5 master").
#
# Do not forget to execute "postfix reload" after editing this file.
#
# ==========================================================================
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (100)
# ==========================================================================
smtp inet n - n - - smtpd
#smtp inet n - n - 1 postscreen
smtpd pass - - n - - smtpd
dnsblog unix - - n - 0 dnsblog
tlsproxy unix - - n - 0 tlsproxy
submission inet n - n - - smtpd
# -o syslog_name=postfix/submission
# -o smtpd_tls_security_level=encrypt
# -o smtpd_sasl_auth_enable=yes
# -o smtpd_reject_unlisted_recipient=no
# -o smtpd_client_restrictions=$mua_client_restrictions
# -o smtpd_helo_restrictions=$mua_helo_restrictions
# -o smtpd_sender_restrictions=$mua_sender_restrictions
# -o smtpd_recipient_restrictions=permit_sasl_authenticated,reject
# -o milter_macro_daemon_name=ORIGINATING
smtps inet n - n - - smtpd
# -o syslog_name=postfix/smtps
-o smtpd_tls_wrappermode=yes
-o smtpd_sasl_auth_enable=yes
# -o smtpd_reject_unlisted_recipient=no
# -o smtpd_client_restrictions=$mua_client_restrictions
# -o smtpd_helo_restrictions=$mua_helo_restrictions
# -o smtpd_sender_restrictions=$mua_sender_restrictions
-o smtpd_recipient_restrictions=permit_sasl_authenticated,reject
# -o milter_macro_daemon_name=ORIGINATING
#628 inet n - n - - qmqpd
pickup unix n - n 60 1 pickup
cleanup unix n - n - 0 cleanup
qmgr unix n - n 300 1 qmgr
#qmgr unix n - n 300 1 oqmgr
tlsmgr unix - - n 1000? 1 tlsmgr
rewrite unix - - n - - trivial-rewrite
bounce unix - - n - 0 bounce
defer unix - - n - 0 bounce
trace unix - - n - 0 bounce
verify unix - - n - 1 verify
flush unix n - n 1000? 0 flush
proxymap unix - - n - - proxymap
proxywrite unix - - n - 1 proxymap
smtp unix - - n - - smtp
relay unix - - n - - smtp
# -o smtp_helo_timeout=5 -o smtp_connect_timeout=5
showq unix n - n - - showq
error unix - - n - - error
retry unix - - n - - error
discard unix - - n - - discard
local unix - n n - - local
virtual unix - n n - - virtual
lmtp unix - - n - - lmtp
anvil unix - - n - 1 anvil
scache unix - - n - 1 scache
postlog unix-dgram n - n - 1 postlogd
2525 inet n - n - 1 postscreen
#-o postscreen_upstream_proxy_protocol=haproxy
-o postscreen_cache_map=btree:$data_directory/postscreen_2525_cache
-o syslog_name=postfix/2525
10587 inet n - n - - smtpd
-o syslog_name=postfix/10587
-o smtpd_tls_security_level=encrypt
-o smtpd_tls_wrappermode=no
-o smtpd_sasl_auth_enable=yes
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
-o smtpd_recipient_restrictions=permit_mynetworks,permit_sasl_authenticated,reject
-o milter_macro_daemon_name=ORIGINATING
-o smtpd_sasl_type=dovecot
-o smtpd_sasl_path=inet:{{ dovecot_short_name | default('dovecot') }}.{{ namespace }}.svc.cluster.local:12345
#-o smtpd_upstream_proxy_protocol=haproxy
10465 inet n - n - - smtpd
-o syslog_name=postfix/10465
-o smtpd_tls_wrappermode=yes
-o smtpd_sasl_auth_enable=yes
-o smtpd_recipient_restrictions=permit_mynetworks,permit_sasl_authenticated,reject
-o milter_macro_daemon_name=ORIGINATING
-o smtpd_sasl_type=dovecot
-o smtpd_sasl_path=inet:{{ dovecot_short_name | default('dovecot') }}.{{ namespace }}.svc.cluster.local:12345
#-o smtpd_upstream_proxy_protocol=haproxy
#
# ====================================================================
# Interfaces to non-Postfix software. Be sure to examine the manual
# pages of the non-Postfix software to find out what options it wants.
#
# Many of the following services use the Postfix pipe(8) delivery
# agent. See the pipe(8) man page for information about ${recipient}
# and other message envelope options.
# ====================================================================
#
# maildrop. See the Postfix MAILDROP_README file for details.
# Also specify in main.cf: maildrop_destination_recipient_limit=1
#
#maildrop unix - n n - - pipe
# flags=DRhu user=vmail argv=/usr/local/bin/maildrop -d ${recipient}
#
# ====================================================================
#
# Recent Cyrus versions can use the existing "lmtp" master.cf entry.
#
# Specify in cyrus.conf:
# lmtp cmd="lmtpd -a" listen="localhost:lmtp" proto=tcp4
#
# Specify in main.cf one or more of the following:
# mailbox_transport = lmtp:inet:localhost
# virtual_transport = lmtp:inet:localhost
#
# ====================================================================
#
# Cyrus 2.1.5 (Amos Gouaux)
# Also specify in main.cf: cyrus_destination_recipient_limit=1
#
#cyrus unix - n n - - pipe
# user=cyrus argv=/usr/lib/cyrus-imapd/deliver -e -r ${sender} -m ${extension} ${user}
#
# ====================================================================
#
# Old example of delivery via Cyrus.
#
#old-cyrus unix - n n - - pipe
# flags=R user=cyrus argv=/usr/lib/cyrus-imapd/deliver -e -m ${extension} ${user}
#
# ====================================================================
#
# See the Postfix UUCP_README file for configuration details.
#
#uucp unix - n n - - pipe
# flags=Fqhu user=uucp argv=uux -r -n -z -a$sender - $nexthop!rmail ($recipient)
#
# ====================================================================
#
# Other external delivery methods.
#
#ifmail unix - n n - - pipe
# flags=F user=ftn argv=/usr/lib/ifmail/ifmail -r $nexthop ($recipient)
#
#bsmtp unix - n n - - pipe
# flags=Fq. user=bsmtp argv=/usr/local/sbin/bsmtp -f $sender $nexthop $recipient
#
#scalemail-backend unix - n n - 2 pipe
# flags=R user=scalemail argv=/usr/lib/scalemail/bin/scalemail-store
# ${nexthop} ${user} ${extension}
#
#mailman unix - n n - - pipe
# flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py
# ${nexthop} ${user}
#dane unix - - n - - smtp
# -o smtp_dns_support_level=dnssec
# -o smtp_tls_security_level=dane
#policyd-spf unix - n n - 0 spawn user=nobody argv=/usr/libexec/postfix/policyd-spf
ldap-local-recipients: |
service:
type: LoadBalancer
loadBalancerIP: "{{ postfix_loadbalancer_ip | default(omit) }}"

View File

@@ -1,5 +1,10 @@
- set_fact:
postfix_combined_values: "{{ postfix_default_values | combine(postfix_values, recursive=true) }}"
postfix_combined_values: "{{ postfix_default_values | combine(postfix_values | default({}), recursive=true) }}"
when: not mail_oidc
- set_fact:
postfix_combined_values: "{{ postfix_oidc_default_values | combine(postfix_oidc_values | default({}), recursive=true) }}"
when: mail_oidc
- name: Deploy Postfix
kubernetes.core.helm:

View File

@@ -1,3 +1,8 @@
postgres_enabled: true
postgres_operator_enabled: true
postgres_operator_ui_enabled: true
postgres_operator_chart_ref: "ghp/postgres-operator"
postgres_operator_ui_chart_ref: "ghp/postgres-operator-ui"
postgres_operator_ui_short_name: "postgres-operator-ui"
postgres_operator_default_values:
configKubernetes:
@@ -5,16 +10,24 @@ postgres_operator_default_values:
storage_resize_mode: pvc
watched_namespace: "{{ postgres_operator_watch_namespace | default(namespace) }}"
configMajorVersionUpgrade:
# "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too
major_version_upgrade_mode: "full"
postgres_operator_ui_default_values:
replicaCount: 1
envs:
# IMPORTANT: While operator chart and UI chart are idendependent, this is the interface between
# UI and operator API. Insert the service name of the operator API here!
appUrl: "https://{{ postgres_operator_ui_short_name }}.{{ domain }}"
operatorApiUrl: "http://postgres-operator:8080"
operatorClusterNameLabel: "cluster-name"
resourcesVisible: "False"
targetNamespace: "{{ namespace }}"
teams:
- "acid"
- "{{ postgres_db_team | default(namespace) }}"
# configure UI ingress. If needed: "enabled: true"
ingress:
@@ -24,7 +37,7 @@ postgres_operator_ui_default_values:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: "{{ postgres_operator_ui_short_name }}.{{ domain }}"
paths: [""]
paths: ["/"]
tls:
- secretName: "{{ postgres_operator_ui_short_name }}.{{ domain }}-tls"
hosts:
@@ -43,7 +56,7 @@ postgres_db_definitions:
spec:
teamId: "{{ postgres_db_team | default(namespace) }}"
postgresql:
version: "12"
version: "16"
numberOfInstances: 2
volume:
size: "{{ postgres_size | default('10Gi') }}"
@@ -55,6 +68,7 @@ postgres_db_definitions:
roundcube: []
harbor: []
mastodon: []
keycloak: []
databases:
gitea: gitea
bitwarden: bitwarden
@@ -66,6 +80,7 @@ postgres_db_definitions:
harbor_notary_server: harbor
harbor_notary_signer: harbor
mastodon: mastodon
keycloak: keycloak
preparedDatabases:
peertube:
defaultUsers: true

View File

@@ -43,7 +43,8 @@
ALLOW_NOSSL: "true"
- set_fact:
postgres_operator_combined_values: "{{ postgres_operator_default_values | combine(postgres_operator_values, recursive=true) }}"
postgres_operator_combined_values: "{{ postgres_operator_default_values | combine(postgres_operator_values | default({}), recursive=true) }}"
when: postgres_operator_enabled
- name: Deploy Postgres Operator
kubernetes.core.helm:
@@ -54,9 +55,11 @@
chart_version: "{{ postgres_operator_version | default(omit) }}"
release_values: "{{ postgres_operator_combined_values | from_yaml }}"
wait: true
when: postgres_operator_enabled
- set_fact:
postgres_operator_ui_combined_values: "{{ postgres_operator_ui_default_values | combine(postgres_operator_ui_values, recursive=true) }}"
postgres_operator_ui_combined_values: "{{ postgres_operator_ui_default_values | combine(postgres_operator_ui_values | default({}), recursive=true) }}"
when: postgres_operator_ui_enabled
- name: Deploy Postgres Operator UI
kubernetes.core.helm:
@@ -67,6 +70,7 @@
chart_version: "{{ postgres_operator_ui_version | default(omit) }}"
release_values: "{{ postgres_operator_ui_combined_values | from_yaml }}"
wait: true
when: postgres_operator_ui_enabled
- name: Create Postgres databases
k8s:

View File

@@ -14,10 +14,15 @@ default_accounts:
- { name: mastodon_ldap }
- { name: harbor_admin }
- { name: systemuser }
- { name: minio_admin }
- { name: keycloak_admin }
secret_keys:
- { name: peertube }
- { name: harbor }
- { name: mastodon_primary_key }
- { name: mastodon_deterministic_key }
- { name: mastodon_key_derivation_salt }
htpasswd_accounts:
- { name: pypiserver_admin }

View File

@@ -1,47 +1,36 @@
- name: Test if DKIM private key exists
shell: grep -c "dkim_private_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: dkim_private_key_test_grep
- name: Test if DKIM public key exists
shell: grep -c "dkim_public_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: dkim_public_key_test_grep
- name: Create DKIM keys
docker_container:
name: ddclient
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "sh dkim-key.sh {{ mail_domain | default(domain) }}"
register: dkim_container_output
when: dkim_private_key_test_grep.stdout == '0' or dkim_public_key_test_grep.stdout == '0'
- name: Set ddclient_key
set_fact:
dkim_keys: "{{ dkim_container_output.ansible_facts.docker_container.Output | from_yaml }}"
when: dkim_private_key_test_grep.stdout == '0' or dkim_public_key_test_grep.stdout == '0'
- name: Show DKIM private key
debug:
msg: "ddclient private key: {{ dkim_keys['dkim'][0]['default.private'] | b64decode }}"
verbosity: 2
when: dkim_private_key_test_grep.stdout == '0'
- name: Show DKIM public key
debug:
msg: "ddclient public key: {{ dkim_keys['dkim'][0]['default.txt'] | b64decode }}"
verbosity: 2
when: dkim_public_key_test_grep.stdout == '0'
- name: Write DKIM private key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "dkim_private_key_base64: \"{{ dkim_keys['dkim'][0]['default.private'] }}\""
when: dkim_private_key_test_grep.stdout == '0'
- name: Write DKIM public key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "dkim_public_key_base64: \"{{ dkim_keys['dkim'][0]['default.txt'] }}\""
when: dkim_public_key_test_grep.stdout == '0'
- name: Generate DKIM keys
when: passwords['dkim_public_key_base64'] is not defined or passwords['dkim_private_key_base64'] is not defined
block:
- name: Create DKIM keys
docker_container:
name: ddclient
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "sh dkim-key.sh {{ mail_domain | default(domain) }}"
register: dkim_container_output
- name: Set dkim_keys
set_fact:
dkim_keys: "{{ dkim_container_output.container.Output | from_yaml }}"
- name: Show DKIM private key
debug:
msg: "ddclient private key: {{ dkim_keys['dkim'][0]['default.private'] | b64decode }}"
verbosity: 2
- name: Show DKIM public key
debug:
msg: "ddclient public key: {{ dkim_keys['dkim'][0]['default.txt'] | b64decode }}"
verbosity: 2
- name: Write DKIM private key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "dkim_private_key_base64: \"{{ dkim_keys['dkim'][0]['default.private'] }}\""
- name: Write DKIM public key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "dkim_public_key_base64: \"{{ dkim_keys['dkim'][0]['default.txt'] }}\""

View File

@@ -1,46 +1,36 @@
- name: Test if password exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_password" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: password_test_grep
- name: Test if password htpasswd hash exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_htpasswd_hash" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: htpasswd_hash_test_grep
- name: Create password for {{ item.name }}
shell: "< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-64};echo;"
register: password
when: password_test_grep.stdout == '0'
- name: Show password json for {{ item.name }}
debug:
msg: "{{ password }}"
verbosity: 2
when: password_test_grep.stdout == '0'
- name: Create bcrypt hash from password for {{ item.name }}
docker_container:
name: slappasswd
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "htpasswd -B -n -i -b -C 16 {{ item.name }} {{ password.stdout | default(item.name + '_password') }}"
register: docker_container_output
when: htpasswd_hash_test_grep.stdout == '0'
- name: Show docker_container_output for {{ item.name }}
debug:
msg: "{{ docker_container_output }}"
verbosity: 2
- name: Write password for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_password: \"{{ password.stdout }}\""
when: password_test_grep.stdout == '0'
- name: Write htpasswd hash for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_htpasswd_hash: \"{{ docker_container_output.container.Output.split('\n')[0].split(':')[1] }}\""
when: htpasswd_hash_test_grep.stdout == '0'
- name: Generate htpasswd for {{ item.name }}
when: passwords[item.name + '_password'] is not defined or passwords[item.name + '_htpasswd_hash'] is not defined
block:
- name: Create password for {{ item.name }}
shell: "< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-64};echo;"
register: password
- name: Show password json for {{ item.name }}
debug:
msg: "{{ password }}"
verbosity: 2
- name: Create bcrypt hash from password for {{ item.name }}
docker_container:
name: slappasswd
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "htpasswd -B -n -i -b -C 16 {{ item.name }} {{ password.stdout | default(item.name + '_password') }}"
register: docker_container_output
- name: Show docker_container_output for {{ item.name }}
debug:
msg: "{{ docker_container_output }}"
verbosity: 2
- name: Write password for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_password: \"{{ password.stdout }}\""
- name: Write htpasswd hash for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_htpasswd_hash: \"{{ docker_container_output.container.Output.split('\n')[0].split(':')[1] }}\""

View File

@@ -1,7 +1,22 @@
- name: Create passwords.yaml file
- name: Check that passwords.yaml exists
stat:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
register: passwords_file
- name: Create passwords.yaml file if not exists
file:
name: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
state: touch
when: not passwords_file.stat.exists
- name: Read passwords.yaml file
slurp:
src: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
register: passwords_b64
- name: Set facts about passwords
set_fact:
passwords: "{{ passwords_b64['content'] | b64decode | from_yaml }}"
- name: Create files directory for ddclient tsig
file:

View File

@@ -1,49 +1,34 @@
- name: Test if password exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_password" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: password_test_grep
- name: Test if password pbkdf2-sha512 hash exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_pbkdf2_sha512_hash" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: pbkdf2_sha512_hash_test_grep
- name: Generate password for {{ item.name }}
when: passwords[item.name + '_password'] is not defined
block:
- name: Create password for {{ item.name }}
shell: "< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-64};echo;"
register: password
- name: Show password json for {{ item.name }}
debug:
msg: "{{ password }}"
verbosity: 2
- name: Create password for {{ item.name }}
shell: "< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-64};echo;"
register: password
when: password_test_grep.stdout == '0'
- name: Write password for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_password: \"{{ password.stdout }}\""
- name: Show password json for {{ item.name }}
debug:
msg: "{{ password }}"
verbosity: 2
when: password_test_grep.stdout == '0'
- name: Create PBKDF2-SHA512 hash from password for {{ item.name }}
docker_container:
name: slappasswd
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "slappasswd -o module-load=pw-pbkdf2 -h {PBKDF2-SHA512} -s {{ password.stdout | default(item.name + '_password') }}"
register: docker_container_output
when: pbkdf2_sha512_hash_test_grep.stdout == '0'
- debug:
msg: "{{ docker_container_output }}"
- name: Show docker_container_output for {{ item.name }}
debug:
msg: "{{ docker_container_output }}"
verbosity: 2
- name: Write password for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_password: \"{{ password.stdout }}\""
when: password_test_grep.stdout == '0'
- name: Write PBKDF2-SHA512 hash for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_pbkdf2_sha512_hash: \"{{ docker_container_output.container.Output.split('\n')[0] }}\""
when: pbkdf2_sha512_hash_test_grep.stdout == '0'
- name: Generate password for {{ item.name }}
when: passwords[item.name + '_pbkdf2_sha512_hash'] is not defined
block:
- name: Create PBKDF2-SHA512 hash from password for {{ item.name }}
docker_container:
name: slappasswd
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "slappasswd -o module-load=pw-pbkdf2 -h {PBKDF2-SHA512} -s {{ password.stdout | default(item.name + '_password') }}"
register: docker_container_output
- name: Write PBKDF2-SHA512 hash for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_pbkdf2_sha512_hash: \"{{ docker_container_output.container.Output.split('\n')[0] }}\""

View File

@@ -1,20 +1,16 @@
- name: Test if secret exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_secret" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: secret_test_grep
- name: Create secret for {{ item.name }}
shell: "openssl rand -hex 32"
register: secret
when: secret_test_grep.stdout == '0'
- name: Show secret json for {{ item.name }}
debug:
msg: "{{ secret }}"
verbosity: 2
when: secret_test_grep.stdout == '0'
- name: Write secret for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_secret: \"{{ secret.stdout }}\""
when: secret_test_grep.stdout == '0'
when: passwords[item.name + '_secret'] is not defined
block:
- name: Create secret for {{ item.name }}
shell: "openssl rand -hex 32"
register: secret
- name: Show secret json for {{ item.name }}
debug:
msg: "{{ secret }}"
verbosity: 2
- name: Write secret for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_secret: \"{{ secret.stdout }}\""

View File

@@ -1,99 +1,78 @@
- name: Test if k8s TSIG key exists
shell: grep -c "k8s_tsig" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: k8s_tsig_test_grep
- name: Generate K8s TSIG for Knot DNS
when: passwords['k8s_tsig'] is not defined
block:
- name: Generate k8s TSIG key for Knot DNS
docker_container:
name: keymgr
image: "{{ docker_registry }}/tsig"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "keymgr -t k8s-{{ k8s_cluster_name }}-{{ namespace }} hmac-sha512"
register: knot_container_output
- name: Set k8s_key
set_fact:
k8s_key: "{{ knot_container_output.container.Output | from_yaml }}"
- name: Show k8s TSIG key
debug:
msg: "Knot k8s key for k8s-{{ k8s_cluster_name }}-{{ namespace }}: {{ k8s_key['key'][0]['secret'] }}"
- name: Write TSIG for Kubernetes
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "k8s_tsig: \"{{ k8s_key['key'][0]['secret'] }}\""
- name: Test if ddclinet TSIG key exists
shell: grep -c "ddclient_tsig" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: ddclient_tsig_test_grep
- name: Generate ddclient private and public TSIG keys for Knot DNS
when:
- passwords['ddclient_tsig_public_key_base64'] is not defined or passwords['ddclient_tsig_private_key_base64'] is not defined
block:
- name: Generate TSIG key for ddclient
docker_container:
name: ddclient
image: "{{ docker_registry }}/tsig"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "bash tsig-key.sh ddclient-{{ k8s_cluster_name }}-{{ namespace }}"
register: ddclient_container_output
- name: Set ddclient_key
set_fact:
ddclient_key: "{{ ddclient_container_output.container.Output | from_yaml }}"
- name: Show ddclient TSIG public key file
debug:
msg: "ddclient key for ddclient-{{ k8s_cluster_name }}-{{ namespace }}: {{ ddclient_key['tsig'][0]['key'] | b64decode }}"
verbosity: 2
- name: Show ddclient TSIG private key file
debug:
msg: "ddclient key for ddclient-{{ k8s_cluster_name }}-{{ namespace }}: {{ ddclient_key['tsig'][0]['private'] | b64decode }}"
verbosity: 2
- name: Write ddclient TSIG public key file in base64
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig_public_key_base64: \"{{ ddclient_key['tsig'][0]['key'] }}\""
- name: Show ddclient TSIG key
debug:
msg: "{{ ddclient_tsig_key }}"
verbosity: 2
- name: Write ddclient TSIG key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig: \"{{ ddclient_tsig_key['Key'] }}\""
- name: Test if ddclinet TSIG key exists
shell: grep -c "ddclient_tsig_public_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: ddclient_tsig_public_key_test_grep
- name: Write ddclient TSIG private key file in base64
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig_private_key_base64: \"{{ ddclient_key['tsig'][0]['private'] }}\""
- name: Test if ddclinet TSIG key exists
shell: grep -c "ddclient_tsig_private_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: ddclient_tsig_private_key_test_grep
- name: Generate k8s TSIG key for Knot DNS
docker_container:
name: keymgr
image: "{{ docker_registry }}/tsig"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "keymgr -t k8s hmac-sha512"
register: knot_container_output
when: k8s_tsig_test_grep.stdout == '0'
- name: Set k8s_key
set_fact:
k8s_key: "{{ knot_container_output.ansible_facts.docker_container.Output | from_yaml }}"
when: k8s_tsig_test_grep.stdout == '0'
- name: Show k8s TSIG key
debug:
msg: "Knot k8s key: {{ k8s_key['key'][0]['secret'] }}"
when: k8s_tsig_test_grep.stdout == '0'
- name: Write TSIG for Kubernetes
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "k8s_tsig: \"{{ k8s_key['key'][0]['secret'] }}\""
when: k8s_tsig_test_grep.stdout == '0'
- name: Generate TSIG key for ddclient
docker_container:
name: ddclient
image: "{{ docker_registry }}/tsig"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "bash tsig-key.sh {{ namespace }}"
register: ddclient_container_output
when: ddclient_tsig_public_key_test_grep.stdout == '0' or ddclient_tsig_private_key_test_grep.stdout == '0'
- name: Set ddclient_key
set_fact:
ddclient_key: "{{ ddclient_container_output.ansible_facts.docker_container.Output | from_yaml }}"
when: ddclient_tsig_public_key_test_grep.stdout == '0' or ddclient_tsig_private_key_test_grep.stdout == '0'
- name: Show ddclient TSIG public key file
debug:
msg: "ddclient key: {{ ddclient_key['tsig'][0]['key'] | b64decode }}"
verbosity: 2
when: ddclient_tsig_public_key_test_grep.stdout == '0'
- name: Show ddclient TSIG private key file
debug:
msg: "ddclient key: {{ ddclient_key['tsig'][0]['private'] | b64decode }}"
verbosity: 2
when: ddclient_tsig_private_key_test_grep.stdout == '0'
- name: Write ddclient TSIG public key file in base64
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig_public_key_base64: \"{{ ddclient_key['tsig'][0]['key'] }}\""
when: ddclient_tsig_public_key_test_grep.stdout == '0'
- name: Write ddclient TSIG private key file in base64
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig_private_key_base64: \"{{ ddclient_key['tsig'][0]['private'] }}\""
when: ddclient_tsig_private_key_test_grep.stdout == '0'
- name: Set ddclient TSIG key
set_fact:
ddclient_tsig_key: "{{ ddclient_key['tsig'][0]['private'] | b64decode | from_yaml }}"
when: ddclient_tsig_test_grep.stdout == '0'
- name: Show ddclient TSIG key
debug:
msg: "{{ ddclient_tsig_key }}"
verbosity: 2
when: ddclient_tsig_test_grep.stdout == '0'
- name: Write ddclient TSIG key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig: \"{{ ddclient_tsig_key['Key'] }}\""
when: ddclient_tsig_test_grep.stdout == '0'
- name: Set ddclient TSIG key
set_fact:
ddclient_tsig_key: "{{ ddclient_key['tsig'][0]['private'] | b64decode | from_yaml }}"

View File

@@ -1,47 +1,36 @@
- name: Test if VAPID private key exists
shell: grep -c "^{{ item.name }}_vapid_private_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: vapid_private_key_test_grep
- name: Test if VAPID public key exists
shell: grep -c "^{{ item.name }}_vapid_public_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: vapid_public_key_test_grep
- name: Create VAPID keys
docker_container:
name: vapid
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "/vapid"
register: vapid_container_output
when: vapid_private_key_test_grep.stdout == '0' or vapid_public_key_test_grep.stdout == '0'
- name: Set VAPID keys fact
set_fact:
vapid_keys: "{{ vapid_container_output.ansible_facts.docker_container.Output | from_yaml }}"
when: vapid_private_key_test_grep.stdout == '0' or vapid_public_key_test_grep.stdout == '0'
- name: Show VAPID private key
debug:
msg: "vapid private key: {{ vapid_keys['vapidPrivateKey'] }}"
verbosity: 2
when: vapid_private_key_test_grep.stdout == '0'
- name: Show VAPID public key
debug:
msg: "vapid public key: {{ vapid_keys['vapidPublicKey'] }}"
verbosity: 2
when: vapid_public_key_test_grep.stdout == '0'
- name: Write VAPID private key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_vapid_private_key_base64: \"{{ vapid_keys['vapidPrivateKey'] | b64encode }}\""
when: vapid_private_key_test_grep.stdout == '0'
- name: Write VAPID public key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_vapid_public_key_base64: \"{{ vapid_keys['vapidPublicKey'] | b64encode }}\""
when: vapid_public_key_test_grep.stdout == '0'
- name: Generate VAPID keys
when: passwords[item.name + '_vapid_public_key_base64'] is not defined or passwords[item.name + '_vapid_private_key_base64'] is not defined
block:
- name: Create VAPID keys
docker_container:
name: vapid
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "/vapid"
register: vapid_container_output
- name: Set VAPID keys fact
set_fact:
vapid_keys: "{{ vapid_container_output.container.Output | from_yaml }}"
- name: Show VAPID private key
debug:
msg: "vapid private key: {{ vapid_keys['vapidPrivateKey'] }}"
verbosity: 2
- name: Show VAPID public key
debug:
msg: "vapid public key: {{ vapid_keys['vapidPublicKey'] }}"
verbosity: 2
- name: Write VAPID private key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_vapid_private_key_base64: \"{{ vapid_keys['vapidPrivateKey'] | b64encode }}\""
- name: Write VAPID public key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_vapid_public_key_base64: \"{{ vapid_keys['vapidPublicKey'] | b64encode }}\""

View File

@@ -1,7 +1,7 @@
- set_fact:
pypiserver_combined_values: "{{ pypiserver_default_values | combine(pypiserver_values, recursive=true) }}"
pypiserver_combined_values: "{{ pypiserver_default_values | combine(pypiserver_values | default({}), recursive=true) }}"
- name: Deploy Docker pypiserver
- name: Deploy pypiserver
kubernetes.core.helm:
create_namespace: true
release_namespace: "{{ pypiserver_namespace | default(namespace) }}"

View File

@@ -15,10 +15,14 @@ roundcube_default_values:
value: "{{ roundcube_db_password }}"
- name: ROUNDCUBEMAIL_DB_NAME
value: roundcube
- name: ROUNDCUREMAIL_IMAP_HOST
value: "ssl://{{ mail_short_name | default('mail') }}.{{ domain }}:993"
- name: ROUNDCUBEMAIL_DEFAULT_HOST
value: "ssl://{{ mail_short_name | default('mail') }}.{{ domain }}"
- name: ROUNDCUBEMAIL_DEFAULT_PORT
value: "993"
- name: ROUNDCUBEMAIL_SMTP_HOST
value: "ssl://{{ mail_short_name | default('mail') }}.{{ domain }}:465"
- name: ROUNDCUBEMAIL_SMTP_SERVER
value: "ssl://{{ mail_short_name | default('mail') }}.{{ domain }}"
- name: ROUNDCUBEMAIL_SMTP_PORT

View File

@@ -5,7 +5,7 @@
- postgres_enabled is defined and postgres_enabled
- set_fact:
roundcube_combined_values: "{{ roundcube_default_values | combine(roundcube_values, recursive=true) }}"
roundcube_combined_values: "{{ roundcube_default_values | combine(roundcube_values | default({}), recursive=true) }}"
- name: Deploy RoundCube
kubernetes.core.helm:

View File

@@ -1,5 +1,5 @@
- set_fact:
rspamd_combined_values: "{{ rspamd_default_values | combine(rspamd_values, recursive=true) }}"
rspamd_combined_values: "{{ rspamd_default_values | combine(rspamd_values | default({}), recursive=true) }}"
- name: Deploy Rspamd
kubernetes.core.helm:

View File

@@ -1,4 +1,6 @@
service_dns_chart_ref: "ghp/external-dns"
service_dns_chart_ref: "bitnami/external-dns"
service_dns_tsigKeyname: "k8s-{{ k8s_cluster_name }}-{{ namespace }}"
service_dns_tsigSecretAlg: "hmac-sha512"
service_dns_default_values:
fullnameOverride: "{{ service_dns_name | default(namespace + '-service-dns') }}"
domainFilters: ["{{ service_domain | default(domain) }}"]
@@ -9,8 +11,8 @@ service_dns_default_values:
port: 53
zone: "{{ service_domain | default(domain) }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ service_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ service_dns_tsigKeyname | default('k8s') }}"
tsigSecretAlg: "{{ service_dns_tsigSecretAlg }}"
tsigKeyname: "{{ service_dns_tsigKeyname }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@@ -1,5 +1,5 @@
- set_fact:
service_dns_combined_values: "{{ service_dns_default_values | combine(service_dns_values, recursive=true) }}"
service_dns_combined_values: "{{ service_dns_default_values | combine(service_dns_values | default({}), recursive=true) }}"
- name: Deploy service DNS
kubernetes.core.helm:

View File

@@ -5,7 +5,7 @@
- postgres_enabled is defined and postgres_enabled
- set_fact:
wikijs_combined_values: "{{ wikijs_default_values | combine(wikijs_values, recursive=true) }}"
wikijs_combined_values: "{{ wikijs_default_values | combine(wikijs_values | default({}), recursive=true) }}"
- name: Deploy WikiJS
kubernetes.core.helm: