commit b4b740a239ede765c5aaf3e3f6c33928f347367e Author: ace Date: Sat Jan 9 20:54:42 2021 +0300 GHP publish diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..0e2916b --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +host_key_checking = False +pipelining = True +callback_whitelist = timer, profile_tasks +forks = 50 +roles_path = roles + +[ssh_connection] +pipelining = True +ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null diff --git a/inventory/ghp/sample/group_vars/all/all.yaml b/inventory/ghp/sample/group_vars/all/all.yaml new file mode 100644 index 0000000..24881e1 --- /dev/null +++ b/inventory/ghp/sample/group_vars/all/all.yaml @@ -0,0 +1,128 @@ +# Common # +namespace: ghp +docker_registry: registry.ghp.0xace.cc +domain: example.com +mail_domain: "{{ domain }}" +local_domain: lan +dns_ip: YOUR_RFC2136_DNS_IP +mail_proxy_public_ip: PUBLIC_VPS_IP +mail_proxy_private_ip: "{{ dns_ip }}" +web_proxy_internal_ip: INTERNAL_VPS_IP + +# Core infrastructure # +## Nginx Ingress ## +### Internal ### +internal_ingress_class: "ghp-internal-nginx" +internal_loadbalancer_ip: "192.168.250.0" +### External ### +internal_ingress_class: "ghp-external-nginx" +external_loadbalancer_ip: "192.168.250.10" +### Local ### +internal_ingress_class: "ghp-local-nginx" +local_loadbalancer_ip: "192.168.250.20" + +## External-dns ## +dns_namespace: dns + +# Shared infrastructure # +## PostgreSQL ## +postgres_enable: true +postgres_db_namespace: "{{ namespace }}" + +## OpenLDAP ## +openldap_enabled: true +#openldap_size: "10Gi" +#openldap_storage: "nfs-ssd" +openldap_loadbalancer_ip: "192.168.250.2" +openldap_domain: "dc=example,dc=com" +openldap_custom_users: + - { name: myuser1 } + - { name: myuser2 } +openldap_simple_users: + - { name: testuser1, sn: 6001, uid: 6001, gid: 6001 } + - { name: testuser2, sn: 6002, uid: 6002, gid: 6002 } + +## Docker-registry ## +registry_enabled: true +#registry_size: "100Gi" +#registry_storage: "nfs-hdd" +registry_publish: false + +## ChartMuseum ## +chartmuseum_enabled: true +#chartmuseum_size: "10Gi" +#chartmuseum_storage: "nfs-hdd" +#chartmuseum_publish: false +#chartmuseum_login: admin +#chartmuseum_pass: + +# End User Applications # +## Email ## +mail_enabled: true +#mailbox_size: "50Gi" +#mailbox_storage: "nfs-hdd" +roundcube_enabled: true +roundcube_publish: false +postfix_loadbalancer_ip: "192.168.250.3" +dovecot_loadbalancer_ip: "192.168.250.4" + +## Nextcloud ## +nextcloud_enabled: true +#nextcloud_size: "20Gi" +#nextcloud_storage: "nfs-ssd" +#nextcloud_pass: +#nextcloud_mail_pass: +nextcloud_publish: true + +## Bitwarden Password Manager ## +bitwarden_enabled: true +#bitwarden_size: "8Gi" +#bitwarden_storage: "nfs-ssd" +#bitwarden_smtp_pass: +bitwarden_publish: false + +## Gitea ## +gitea_enabled: true +#gitea_size: "20Gi" +#gitea_storage: "nfs-ssd" +#gitea_lfs: true +#gitea_lfs_size: "50Gi" +#gitea_lfs_storage: "nfs-hdd" +gitea_publish_web: false +gitea_publish_ssh: false +gitea_loadbalancer_ip: "192.168.250.5" + +## Drone ## +drone_enabled: true +#drone_size: "10Gi" +#drone_storage: "nfs-ssd" +#drone_gitea_client_id: +#drone_gitea_client_secret: +drone_publish: false + +### WikiJS ### +wikijs_enabled: true +wikijs_publish: false + +### Playmaker ### +playmaker_enabled: false +playmaker_publish: false + +### Pypiserver ### +pypiserver_enabled: false +pypiserver_publish: false + +### PeerTube ### +peertube_enabled: false +peertube_publish: false +#peertube_size: "100Gi" +#peertube_storage: "nfs-hdd" + +### Adguard Home ### +adguard_enabled: false +adguard_publish: false +adguard_loadbalancer_ip: "192.168.250.6" +#adguard_config_size: "20Mi" +#adguard_config_storage: "nfs-ssd" +#adguard_work_size: "10Gi" +#adguard_work_storage: "nfs-ssd" diff --git a/inventory/ghp/sample/group_vars/all/passwords.yaml b/inventory/ghp/sample/group_vars/all/passwords.yaml new file mode 100644 index 0000000..e69de29 diff --git a/inventory/ghp/sample/group_vars/ddclient.yaml b/inventory/ghp/sample/group_vars/ddclient.yaml new file mode 100644 index 0000000..706353b --- /dev/null +++ b/inventory/ghp/sample/group_vars/ddclient.yaml @@ -0,0 +1,36 @@ +ddclient_conf: | + daemon=300 + syslog=yes + mail-failure=root + pid=/var/run/ddclient/ddclient.pid + ssl=yes + debug=yes + verbose=yes + + {% for host in ddclient_hosts %} + {% if host != 'omitme' %} + use=web + web=checkip.dyndns.org + protocol=nsupdate + server={{ external_dns_ip | default(dns_ip) }} + login=/usr/bin/nsupdate + password=/config/Kvps.key + zone={{ domain }} + ttl=60 + {{ host }} + + {% endif %} + {% endfor %} + +ddclient_hosts: + - "{% if nextcloud_publish | default(false) %}nextcloud.{{ domain }}{% else %}omitme{% endif %}" + - "{% if drone_publish | default(false) %}drone.{{ domain }}{% else %}omitme{% endif %}" + - "{% if gitea_publish | default(false) %}gitea.{{ domain }}{% else %}omitme{% endif %}" + - "{% if bitwarden_publish | default(false) %}bitwarden.{{ domain }}{% else %}omitme{% endif %}" + - "{% if wikijs_publish | default(false) %}wikijs.{{ domain }}{% else %}omitme{% endif %}" + - "{% if chartmuseum_publish | default(false) %}charts.{{ domain }}{% else %}omitme{% endif %}" + - "{% if registry_publish | default(false) %}registry.{{ domain }}{% else %}omitme{% endif %}" + - "{% if peertube_publish | default(false) %}peertube.{{ domain }}{% else %}omitme{% endif %}" + - "{{ registry_readonly_ingress | default('omitme') }}" + - "{{ chartmuseum_readonly_ingress | default('omitme') }}" + - "{{ wikijs_readonly_ingress | default('omitme') }}" diff --git a/inventory/ghp/sample/group_vars/k8s/adguard.yaml b/inventory/ghp/sample/group_vars/k8s/adguard.yaml new file mode 100644 index 0000000..15b25a9 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/adguard.yaml @@ -0,0 +1 @@ +adguard_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/bitwarden.yaml b/inventory/ghp/sample/group_vars/k8s/bitwarden.yaml new file mode 100644 index 0000000..c16718f --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/bitwarden.yaml @@ -0,0 +1 @@ +bitwarden_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/cert-manager.yaml b/inventory/ghp/sample/group_vars/k8s/cert-manager.yaml new file mode 100644 index 0000000..630cac5 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/cert-manager.yaml @@ -0,0 +1 @@ +cert_manager_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/chartmuseum.yaml b/inventory/ghp/sample/group_vars/k8s/chartmuseum.yaml new file mode 100644 index 0000000..94a6136 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/chartmuseum.yaml @@ -0,0 +1 @@ +chartmuseum_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/dovecot.yaml b/inventory/ghp/sample/group_vars/k8s/dovecot.yaml new file mode 100644 index 0000000..6dd1960 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/dovecot.yaml @@ -0,0 +1 @@ +dovecot_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/drone.yaml b/inventory/ghp/sample/group_vars/k8s/drone.yaml new file mode 100644 index 0000000..4fa8c89 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/drone.yaml @@ -0,0 +1,2 @@ +drone_values: {} +drone_runner_kube_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/external-dns.yaml b/inventory/ghp/sample/group_vars/k8s/external-dns.yaml new file mode 100644 index 0000000..c500f15 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/external-dns.yaml @@ -0,0 +1 @@ +external_dns_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/external-ingress-nginx.yaml b/inventory/ghp/sample/group_vars/k8s/external-ingress-nginx.yaml new file mode 100644 index 0000000..0064009 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/external-ingress-nginx.yaml @@ -0,0 +1 @@ +external_ingress_nginx_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/gitea.yaml b/inventory/ghp/sample/group_vars/k8s/gitea.yaml new file mode 100644 index 0000000..648a1c4 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/gitea.yaml @@ -0,0 +1,3 @@ +gitea_values: {} +gitea_ingress_nginx_values: {} +gitea_dns_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/internal-dns.yaml b/inventory/ghp/sample/group_vars/k8s/internal-dns.yaml new file mode 100644 index 0000000..f361121 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/internal-dns.yaml @@ -0,0 +1 @@ +internal_dns_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/internal-ingress-nginx.yaml b/inventory/ghp/sample/group_vars/k8s/internal-ingress-nginx.yaml new file mode 100644 index 0000000..cc3266e --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/internal-ingress-nginx.yaml @@ -0,0 +1 @@ +internal_ingress_nginx_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/local-dns.yaml b/inventory/ghp/sample/group_vars/k8s/local-dns.yaml new file mode 100644 index 0000000..0f0edee --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/local-dns.yaml @@ -0,0 +1 @@ +local_dns_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/local-ingress-nginx.yaml b/inventory/ghp/sample/group_vars/k8s/local-ingress-nginx.yaml new file mode 100644 index 0000000..7e46ba8 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/local-ingress-nginx.yaml @@ -0,0 +1 @@ +local_ingress_nginx_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/metallb.yaml b/inventory/ghp/sample/group_vars/k8s/metallb.yaml new file mode 100644 index 0000000..da313d7 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/metallb.yaml @@ -0,0 +1,13 @@ +metallb_values: + configInline: + peers: + - peer-address: 192.168.5.1 + peer-asn: 64601 + my-asn: 65500 + address-pools: + - name: default + protocol: bgp + bgp-advertisements: + - aggregation-length: 24 + addresses: + - 192.168.250.0/24 diff --git a/inventory/ghp/sample/group_vars/k8s/metrics-server.yaml b/inventory/ghp/sample/group_vars/k8s/metrics-server.yaml new file mode 100644 index 0000000..6b1f75b --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/metrics-server.yaml @@ -0,0 +1 @@ +metrics_server_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/nextcloud.yaml b/inventory/ghp/sample/group_vars/k8s/nextcloud.yaml new file mode 100644 index 0000000..a974cad --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/nextcloud.yaml @@ -0,0 +1,43 @@ +nextcloud_values: + nextcloud: + configs: + mail.fix.config.php: |- + 60, + ); + fix.config.php: |- + ['{{ web_proxy_internal_ip }}'], + 'overwriteprotocol' => 'https', + 'overwrite.cli.url' => 'https://nextcloud.{{ domain }}', + 'mail_smtpstreamoptions' => + array ( + 'ssl' => + array ( + 'allow_self_signed' => true, + 'verify_peer' => false, + 'verify_peer_name' => false, + ), + ), + ); + rgw.config.php: |- + array( + 'class' => '\\OC\\Files\\ObjectStore\\S3', + 'arguments' => array( + 'bucket' => 'nextcloud', + 'autocreate' => true, + 'key' => 'K4PNZLSTLIDQJMZUV27P', + 'secret' => 'iPScsni8RS2aT9MFymfQYLPD7W8dVrRqFpafBKDc', + 'hostname' => 'sds1-osd1.lan', + 'port' => 8080, + 'use_ssl' => false, + 'num_buckets' => 16, + 'region' => 'us-east-1', + 'use_path_style' => true + ), + ), + ); diff --git a/inventory/ghp/sample/group_vars/k8s/nfs-client-provisioner.yaml b/inventory/ghp/sample/group_vars/k8s/nfs-client-provisioner.yaml new file mode 100644 index 0000000..8476763 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/nfs-client-provisioner.yaml @@ -0,0 +1,15 @@ +nfs_client_provisioner_hdd_values: + replicaCount: 1 + strategyType: Recreate + nfs: + server: + path: + defaultClass: false + +nfs_client_provisioner_ssd_values: + replicaCount: 1 + strategyType: Recreate + nfs: + server: + path: + defaultClass: true diff --git a/inventory/ghp/sample/group_vars/k8s/opendkim.yaml b/inventory/ghp/sample/group_vars/k8s/opendkim.yaml new file mode 100644 index 0000000..2830efa --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/opendkim.yaml @@ -0,0 +1 @@ +opendkim_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/opendmarc.yaml b/inventory/ghp/sample/group_vars/k8s/opendmarc.yaml new file mode 100644 index 0000000..10d461c --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/opendmarc.yaml @@ -0,0 +1 @@ +opendmarc_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/openldap.yaml b/inventory/ghp/sample/group_vars/k8s/openldap.yaml new file mode 100644 index 0000000..928cd87 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/openldap.yaml @@ -0,0 +1,58 @@ +openldap_values: + customLdifFiles: + 04-custom-users.ldif: |- + dn: uid=myuser1,ou=users,{{ openldap_domain }} + changetype: add + uid: myuser1 + cn: myuser1 + sn: 5001 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/myuser1 + uidNumber: 5001 + gidNumber: 5001 + userPassword: {{ myuser1_pbkdf2_sha512_hash }} + mail: myuser1@{{ domain }} + mail: myuser1_second_mail@{{ domain }} + gecos: myuser1 description + + dn: uid=myuser2,ou=users,{{ openldap_domain }} + changetype: add + uid: myuser2 + cn: myuser2 + sn: 5002 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/myuser2 + uidNumber: 5002 + gidNumber: 5002 + userPassword: {{ myuser2_pbkdf2_sha512_hash }} + mail: myuser2@{{ domain }} + mail: myuser2_second_mail@{{ domain }} + gecos: myuser2 description + + + 05-autogen-simple-users.ldif: |- + {% for user in openldap_simple_users %} + dn: uid={{ user.name }},ou=users,{{ openldap_domain }} + changetype: add + uid: {{ user.name }} + cn: {{ user.name }} + sn: {{ user.sn }} + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/{{ user.name }} + uidNumber: {{ user.uid }} + gidNumber: {{ user.gid }} + userPassword: {{ hostvars[inventory_hostname][user.name + '_pbkdf2_sha512_hash'] | default('nopass') }} + mail: {{ user.name }}@{{ domain }} + gecos: {{ user.name }} user + + {% endfor %} + diff --git a/inventory/ghp/sample/group_vars/k8s/peertube.yaml b/inventory/ghp/sample/group_vars/k8s/peertube.yaml new file mode 100644 index 0000000..f4838ff --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/peertube.yaml @@ -0,0 +1 @@ +peertube_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/postfix.yaml b/inventory/ghp/sample/group_vars/k8s/postfix.yaml new file mode 100644 index 0000000..4eac63b --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/postfix.yaml @@ -0,0 +1 @@ +postfix_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/postgres.yaml b/inventory/ghp/sample/group_vars/k8s/postgres.yaml new file mode 100644 index 0000000..63fe8d4 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/postgres.yaml @@ -0,0 +1,2 @@ +postgres_operator_values: {} +postgres_operator_ui_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/registry.yaml b/inventory/ghp/sample/group_vars/k8s/registry.yaml new file mode 100644 index 0000000..1cde5a6 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/registry.yaml @@ -0,0 +1 @@ +registry_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/roundcube.yaml b/inventory/ghp/sample/group_vars/k8s/roundcube.yaml new file mode 100644 index 0000000..60fd651 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/roundcube.yaml @@ -0,0 +1 @@ +roundcube_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/rspamd.yaml b/inventory/ghp/sample/group_vars/k8s/rspamd.yaml new file mode 100644 index 0000000..313cd7b --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/rspamd.yaml @@ -0,0 +1 @@ +rspamd_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/service-dns.yaml b/inventory/ghp/sample/group_vars/k8s/service-dns.yaml new file mode 100644 index 0000000..8ff9996 --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/service-dns.yaml @@ -0,0 +1 @@ +service_dns_values: {} diff --git a/inventory/ghp/sample/group_vars/k8s/wikijs.yaml b/inventory/ghp/sample/group_vars/k8s/wikijs.yaml new file mode 100644 index 0000000..a626cdb --- /dev/null +++ b/inventory/ghp/sample/group_vars/k8s/wikijs.yaml @@ -0,0 +1 @@ +wikijs_values: {} diff --git a/inventory/ghp/sample/group_vars/knot_dns.yaml b/inventory/ghp/sample/group_vars/knot_dns.yaml new file mode 100644 index 0000000..426a0c5 --- /dev/null +++ b/inventory/ghp/sample/group_vars/knot_dns.yaml @@ -0,0 +1,87 @@ +knot_conf: | + # This is a sample of a minimal configuration file for Knot DNS. + # See knot.conf(5) or refer to the server documentation. + + server: + rundir: "/run/knot" + user: knot:knot + listen: [ 0.0.0.0@53, ::@53 ] + udp-max-payload: 1232 + + log: + - target: syslog + any: debug + + key: + - id: k8s + algorithm: hmac-sha512 + secret: {{ k8s_tsig }} + + - id: vps + algorithm: hmac-sha512 + secret: {{ ddclient_tsig }} + + remote: + # - id: slave + # address: 192.168.1.1@53 + # + # - id: master + # address: 192.168.2.1@53 + remote: + - id: dns_server + address: 127.0.0.1@53 + + submission: + - id: dns_zone_sbm + parent: [dns_server] + + + acl: + - id: deny_all + deny: on # no action specified and deny on implies denial of all actions + + - id: key_rule + key: [vps, k8s] # Access based just on TSIG key + address: 192.168.0.0/16 + action: [transfer, notify, update] + + # - id: acl_slave + # address: 192.168.1.1 + # action: transfer + + # - id: acl_master + # address: 192.168.2.1 + # action: notify + + template: + - id: default + storage: "/var/lib/knot" + file: "%s.zone" + + policy: + - id: rsa + algorithm: RSASHA512 + ksk-size: 4096 + zsk-size: 2048 + nsec3: on + ksk-submission: dns_zone_sbm + + zone: + - domain: "{{ domain }}" + storage: "/var/lib/knot/zones/" + file: "{{ domain }}.zone" + acl: [deny_all, key_rule] + dnssec-signing: on + dnssec-policy: rsa + zonefile-load: difference + + # # Master zone + # - domain: example.com + # notify: slave + # acl: acl_slave + + # # Slave zone + # - domain: example.net + # master: master + # acl: acl_master + diff --git a/inventory/ghp/sample/group_vars/mail_proxy.yaml b/inventory/ghp/sample/group_vars/mail_proxy.yaml new file mode 100644 index 0000000..742f100 --- /dev/null +++ b/inventory/ghp/sample/group_vars/mail_proxy.yaml @@ -0,0 +1,102 @@ +haproxy_config: | + global + chroot /var/lib/haproxy + daemon + group haproxy + maxconn 200000 + nbproc {{ ansible_processor_count }} + pidfile /var/run/haproxy.pid + user haproxy + stats socket /var/run/haproxy.stat + stats bind-process 1 + log 127.0.0.1 local0 + + defaults + log global + maxconn 200000 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 10m + timeout server 10m + timeout check 10s + + frontend ft_smtp + bind {{ mail_proxy_public_ip }}:25 + bind {{ mail_proxy_private_ip }}:25 + mode tcp + timeout client 1m + log global + option tcplog + default_backend bk_smtp + + backend bk_smtp + mode tcp + log global + option tcplog + timeout server 1m + timeout connect 7s + server postfix {{ postfix_loadbalancer_ip }}:2525 send-proxy + + frontend ft_submission + bind {{ mail_proxy_public_ip }}:587 + bind {{ mail_proxy_private_ip }}:587 + mode tcp + timeout client 1m + log global + option tcplog + default_backend bk_submission + + backend bk_submission + mode tcp + log global + option tcplog + timeout server 1m + timeout connect 7s + server postfix {{ postfix_loadbalancer_ip }}:10587 send-proxy + + frontend ft_submissions + bind {{ mail_proxy_public_ip }}:465 + bind {{ mail_proxy_private_ip }}:465 + mode tcp + timeout client 1m + log global + option tcplog + default_backend bk_submissions + + backend bk_submissions + mode tcp + log global + option tcplog + timeout server 1m + timeout connect 7s + server postfix {{ postfix_loadbalancer_ip }}:10465 send-proxy + + frontend ft_imap + bind {{ mail_proxy_public_ip }}:143 + bind {{ mail_proxy_private_ip }}:143 + mode tcp + default_backend bk_imap + + backend bk_imap + mode tcp + balance leastconn + stick store-request src + stick-table type ip size 200k expire 30m + server imap1 {{ dovecot_loadbalancer_ip }}:1109 send-proxy-v2 + + frontend ft_imaps + bind {{ mail_proxy_public_ip }}:993 + bind {{ mail_proxy_private_ip }}:993 + mode tcp + default_backend bk_imaps + + backend bk_imaps + mode tcp + balance leastconn + stick store-request src + stick-table type ip size 200k expire 30m + server imaps1 {{ dovecot_loadbalancer_ip }}:10993 send-proxy-v2 + diff --git a/inventory/ghp/sample/group_vars/web_proxy.yaml b/inventory/ghp/sample/group_vars/web_proxy.yaml new file mode 100644 index 0000000..e5ecd7e --- /dev/null +++ b/inventory/ghp/sample/group_vars/web_proxy.yaml @@ -0,0 +1,97 @@ +nginx: + nginx.conf: | + user nginx; + worker_processes {{ ansible_processor_count }}; + error_log /var/log/nginx/error.log debug; + pid /var/run/nginx.pid; + + events { + worker_connections 4096; + } + + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; + } + stream { + server { + listen 443; + proxy_pass $upstream; + ssl_preread on; + proxy_protocol on; + } + map $ssl_preread_server_name $upstream { + include /etc/nginx/stream.d/*.map; + } + + include /etc/nginx/stream.d/*.conf; + } + + stream.d: + - name: "k8s-ghp-{{ namespace }}.map" + data: | + {% if gitea_publish_ssh %} + default gitea_ssh_{{ namespace }}; + {% endif %} + {% if gitea_publish_web %} + gitea.{{ domain }} gitea_web_{{ namespace }}; + {% endif %} + {% if bitwarden_publish %} + bitwarden.{{ domain }} https_{{ namespace }}; + {% endif %} + {% if wikijs_publish %} + wikijs.{{ domain }} https_{{ namespace }}; + {% endif %} + {% if drone_publish %} + drone.{{ domain }} https_{{ namespace }}; + {% endif %} + {% if nextcloud_publish %} + nextcloud.{{ domain }} https_{{ namespace }}; + {% endif %} + {% if registry_publish %} + registry.{{ domain }} https_{{ namespace }}; + {% endif %} + {% if registry_readonly_ingress %} + {{ registry_readonly_ingress }} https_{{ namespace }}; + {% endif %} + {% if chartmuseum_publish %} + charts.{{ domain }} https_{{ namespace }}; + {% endif %} + {% if chartmuseum_readonly_ingress %} + {{ chartmuseum_readonly_ingress }} https_{{ namespace }}; + {% endif %} + {% if wikijs_readonly_ingress %} + {{ wikijs_readonly_ingress }} https_{{ namespace }}; + {% endif %} + {% if peertube_publish %} + peertube.{{ domain }} https_{{ namespace }}; + {% endif %} + - name: "k8s-ghp-{{ namespace }}.conf" + data: |- + {% if gitea_publish_ssh %} + upstream gitea_ssh_{{ namespace }} { + server {{ gitea_loadbalancer_ip }}:22; + } + {% endif %} + + {% if gitea_publish_web %} + upstream gitea_web_{{ namespace }} { + server {{ gitea_loadbalancer_ip }}:443; + } + {% endif %} + + upstream https_{{ namespace }} { + server {{ external_loadbalancer_ip }}:443; + } + diff --git a/inventory/ghp/sample/hosts b/inventory/ghp/sample/hosts new file mode 100644 index 0000000..dc8055f --- /dev/null +++ b/inventory/ghp/sample/hosts @@ -0,0 +1,16 @@ +[vps:children] +knot_dns +web_proxy +mail_proxy +ddclient + +[ddclient] + +[web_proxy] + +[mail_proxy] + +[knot_dns] + +[k8s] +localhost ansible_python_interpreter="python" diff --git a/playbooks/ghp/bootstrap.yaml b/playbooks/ghp/bootstrap.yaml new file mode 100644 index 0000000..3669a38 --- /dev/null +++ b/playbooks/ghp/bootstrap.yaml @@ -0,0 +1,27 @@ +- hosts: localhost + connection: local + pre_tasks: + - name: Check docker is working + shell: docker info + register: docker_info + changed_when: "docker_info.rc != 0" + failed_when: "docker_info.rc != 0" + - name: Check Helm installed + shell: helm version + register: helm_version + changed_when: "helm_version.rc != 0" + failed_when: "helm_version.rc != 0" + - name: Helm version + debug: + msg: "{{ helm_version.stdout }}" + - name: Check kubectl installed and have access to cluster + shell: kubectl get nodes + register: kubectl_cluster_nodes + changed_when: "kubectl_cluster_nodes.rc != 0" + failed_when: "kubectl_cluster_nodes.rc != 0" + - name: Kubectl nodes output + debug: + msg: "{{ kubectl_cluster_nodes.stdout.split('\n') }}" + roles: + - helm-repos + - pwgen diff --git a/playbooks/ghp/cert-manager.yaml b/playbooks/ghp/cert-manager.yaml new file mode 100644 index 0000000..bbbb0aa --- /dev/null +++ b/playbooks/ghp/cert-manager.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - cert-manager diff --git a/playbooks/ghp/chartmuseum.yaml b/playbooks/ghp/chartmuseum.yaml new file mode 100644 index 0000000..905c4d0 --- /dev/null +++ b/playbooks/ghp/chartmuseum.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - chartmuseum diff --git a/playbooks/ghp/core-infra.yaml b/playbooks/ghp/core-infra.yaml new file mode 100644 index 0000000..faebb60 --- /dev/null +++ b/playbooks/ghp/core-infra.yaml @@ -0,0 +1,75 @@ +--- +- hosts: k8s + connection: local + tasks: + - name: Deploy MetalLB + import_role: + name: metallb + when: metallb_enabled | default(true) + tags: metallb + + - name: Deploy External Ingress Nginx + import_role: + name: external-ingress-nginx + when: external_ingress_nginx_enabled | default(true) + tags: + - external-ingress-nginx + - ingress-nginx + + - name: Deploy Internal Ingress Nginx + import_role: + name: internal-ingress-nginx + when: internal_ingress_nginx_enabled | default(true) + tags: + - internal-ingress-nginx + - ingress-nginx + + - name: Deploy Local Ingress Nginx + import_role: + name: internal-ingress-nginx + when: local_ingress_nginx_enabled | default(true) + tags: + - local-ingress-nginx + - ingress-nginx + + - name: Deploy Internal DNS + import_role: + name: internal-dns + when: internal_dns_enabled | default(true) + tags: + - internal-dns + - dns + + - name: Deploy Local DNS + import_role: + name: local-dns + when: local_dns_enabled | default(true) + tags: + - local-dns + - dns + + - name: Deploy Service DNS + import_role: + name: service-dns + when: service_dns_enabled | default(true) + tags: + - service-dns + - dns + + - name: Deploy Cert-manager + import_role: + name: cert-manager + when: cert_manager_enabled | default(true) + tags: cert-manager + + - name: Deploy NFS-client-provisioner + import_role: + name: nfs-client-provisioner + when: nfs_client_provisioner_enabled | default(true) + tags: nfs-client-provisioner + + - name: Deploy Metrics-server + import_role: + name: metrics-server + when: metrics_server_enabled | default(true) + tags: metrics-server diff --git a/playbooks/ghp/dns.yaml b/playbooks/ghp/dns.yaml new file mode 100644 index 0000000..0a0f35d --- /dev/null +++ b/playbooks/ghp/dns.yaml @@ -0,0 +1,4 @@ +--- +- hosts: knot_dns + roles: + - knot diff --git a/playbooks/ghp/dovecot.yaml b/playbooks/ghp/dovecot.yaml new file mode 100644 index 0000000..7018b88 --- /dev/null +++ b/playbooks/ghp/dovecot.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - dovecot diff --git a/playbooks/ghp/drone.yaml b/playbooks/ghp/drone.yaml new file mode 100644 index 0000000..408dabc --- /dev/null +++ b/playbooks/ghp/drone.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - drone diff --git a/playbooks/ghp/external-ingress-nginx.yaml b/playbooks/ghp/external-ingress-nginx.yaml new file mode 100644 index 0000000..d0ff41a --- /dev/null +++ b/playbooks/ghp/external-ingress-nginx.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - external-ingress-nginx diff --git a/playbooks/ghp/gitea.yaml b/playbooks/ghp/gitea.yaml new file mode 100644 index 0000000..7cf56bc --- /dev/null +++ b/playbooks/ghp/gitea.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - gitea diff --git a/playbooks/ghp/internal-dns.yaml b/playbooks/ghp/internal-dns.yaml new file mode 100644 index 0000000..9e51c10 --- /dev/null +++ b/playbooks/ghp/internal-dns.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - internal-dns diff --git a/playbooks/ghp/internal-ingress-nginx.yaml b/playbooks/ghp/internal-ingress-nginx.yaml new file mode 100644 index 0000000..037d0ad --- /dev/null +++ b/playbooks/ghp/internal-ingress-nginx.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - internal-ingress-nginx diff --git a/playbooks/ghp/local-dns.yaml b/playbooks/ghp/local-dns.yaml new file mode 100644 index 0000000..97bcfa9 --- /dev/null +++ b/playbooks/ghp/local-dns.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - local-dns diff --git a/playbooks/ghp/local-ingress-nginx.yaml b/playbooks/ghp/local-ingress-nginx.yaml new file mode 100644 index 0000000..4a979be --- /dev/null +++ b/playbooks/ghp/local-ingress-nginx.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - local-ingress-nginx diff --git a/playbooks/ghp/mail.yaml b/playbooks/ghp/mail.yaml new file mode 100644 index 0000000..3f799a8 --- /dev/null +++ b/playbooks/ghp/mail.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - mail diff --git a/playbooks/ghp/metallb.yaml b/playbooks/ghp/metallb.yaml new file mode 100644 index 0000000..d6011f1 --- /dev/null +++ b/playbooks/ghp/metallb.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - metallb diff --git a/playbooks/ghp/metrics-server.yaml b/playbooks/ghp/metrics-server.yaml new file mode 100644 index 0000000..071f1a3 --- /dev/null +++ b/playbooks/ghp/metrics-server.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - metrics-server diff --git a/playbooks/ghp/nextcloud.yaml b/playbooks/ghp/nextcloud.yaml new file mode 100644 index 0000000..450c5d4 --- /dev/null +++ b/playbooks/ghp/nextcloud.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - nextcloud diff --git a/playbooks/ghp/nfs-client-provisioner.yaml b/playbooks/ghp/nfs-client-provisioner.yaml new file mode 100644 index 0000000..c202625 --- /dev/null +++ b/playbooks/ghp/nfs-client-provisioner.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - nfs-client-provisioner diff --git a/playbooks/ghp/opendkim.yaml b/playbooks/ghp/opendkim.yaml new file mode 100644 index 0000000..b74dcfb --- /dev/null +++ b/playbooks/ghp/opendkim.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - opendkim diff --git a/playbooks/ghp/opendmarc.yaml b/playbooks/ghp/opendmarc.yaml new file mode 100644 index 0000000..bded453 --- /dev/null +++ b/playbooks/ghp/opendmarc.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - opendmarc diff --git a/playbooks/ghp/openldap.yaml b/playbooks/ghp/openldap.yaml new file mode 100644 index 0000000..67535fb --- /dev/null +++ b/playbooks/ghp/openldap.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - openldap diff --git a/playbooks/ghp/playmaker.yaml b/playbooks/ghp/playmaker.yaml new file mode 100644 index 0000000..7b4edc6 --- /dev/null +++ b/playbooks/ghp/playmaker.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - playmaker diff --git a/playbooks/ghp/postfix.yaml b/playbooks/ghp/postfix.yaml new file mode 100644 index 0000000..169b472 --- /dev/null +++ b/playbooks/ghp/postfix.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - postfix diff --git a/playbooks/ghp/postgres.yaml b/playbooks/ghp/postgres.yaml new file mode 100644 index 0000000..34bfa5f --- /dev/null +++ b/playbooks/ghp/postgres.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - postgres diff --git a/playbooks/ghp/pypiserver.yaml b/playbooks/ghp/pypiserver.yaml new file mode 100644 index 0000000..659922d --- /dev/null +++ b/playbooks/ghp/pypiserver.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - pypiserver diff --git a/playbooks/ghp/registry.yaml b/playbooks/ghp/registry.yaml new file mode 100644 index 0000000..a5de87e --- /dev/null +++ b/playbooks/ghp/registry.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - registry diff --git a/playbooks/ghp/roundcube.yaml b/playbooks/ghp/roundcube.yaml new file mode 100644 index 0000000..c75ca04 --- /dev/null +++ b/playbooks/ghp/roundcube.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - roundcube diff --git a/playbooks/ghp/rspamd.yaml b/playbooks/ghp/rspamd.yaml new file mode 100644 index 0000000..2f4ab67 --- /dev/null +++ b/playbooks/ghp/rspamd.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - rspamd diff --git a/playbooks/ghp/shared-infra.yaml b/playbooks/ghp/shared-infra.yaml new file mode 100644 index 0000000..d1e1767 --- /dev/null +++ b/playbooks/ghp/shared-infra.yaml @@ -0,0 +1,27 @@ +--- +- hosts: k8s + connection: local + tasks: + - name: Deploy PostgreSQL + import_role: + name: postgres + when: postgres_enabled | default(true) + tags: postgres + + - name: Deploy OpenLDAP + import_role: + name: openldap + when: openldap_enabled | default(true) + tags: openldap + + - name: Deploy Docker registry + import_role: + name: registry + when: registry_enabled | default(true) + tags: registry + + - name: Deploy ChartMuseum + import_role: + name: chartmuseum + when: chartmuseum_enabled | default(true) + tags: chartmuseum diff --git a/playbooks/ghp/site.yaml b/playbooks/ghp/site.yaml new file mode 100644 index 0000000..855864a --- /dev/null +++ b/playbooks/ghp/site.yaml @@ -0,0 +1,12 @@ +--- +- name: Deploy Core Infrastructure + import_playbook: core-infra.yaml + tags: core-infra + +- name: Deploy Shared Infrastructure + import_playbook: shared-infra.yaml + tags: shared-infra + +- name: Deploy End User Applications + import_playbook: user-apps.yaml + tags: user-apps diff --git a/playbooks/ghp/user-apps.yaml b/playbooks/ghp/user-apps.yaml new file mode 100644 index 0000000..fd0606c --- /dev/null +++ b/playbooks/ghp/user-apps.yaml @@ -0,0 +1,63 @@ +--- +- hosts: k8s + connection: local + tasks: + - name: Deploy Mail + import_role: + name: mail + when: mail_enabled | default(true) + tags: mail + + - name: Deploy Nextcloud + import_role: + name: nextcloud + when: nextcloud_enabled | default(true) + tags: nextcloud + + - name: Deploy Bitwarden + import_role: + name: bitwarden + when: bitwarden_enabled | default(true) + tags: bitwarden + + - name: Deploy Gitea + import_role: + name: gitea + when: gitea_enabled | default(true) + tags: gitea + + - name: Deploy Drone + import_role: + name: drone + when: drone_enabled | default(true) + tags: drone + + - name: Deploy WikiJS + import_role: + name: wikijs + when: wikijs_enabled | default(true) + tags: wikijs + + - name: Deploy Playmaker + import_role: + name: playmaker + when: playmaker_enabled | default(false) + tags: playmaker + + - name: Deploy Pypiserver + import_role: + name: pypiserver + when: pypiserver_enabled | default(false) + tags: pypiserver + + - name: Deploy PeerTube + import_role: + name: peertube + when: peertube_enabled | default(false) + tags: peertube + + - name: Deploy Adguard Home + import_role: + name: adguard-home + when: adguard_enabled | default(false) + tags: adguard diff --git a/playbooks/ghp/vps.yaml b/playbooks/ghp/vps.yaml new file mode 100644 index 0000000..db7fc1f --- /dev/null +++ b/playbooks/ghp/vps.yaml @@ -0,0 +1,17 @@ +--- +- hosts: web_proxy + roles: + - nginx + tags: web-proxy + +- hosts: mail_proxy + roles: + - haproxy + tags: mail-proxy + +- hosts: ddclient + roles: + - docker + - role: ddclient + dockerize: true + tags: ddclient diff --git a/playbooks/ghp/wikijs.yaml b/playbooks/ghp/wikijs.yaml new file mode 100644 index 0000000..ae2acce --- /dev/null +++ b/playbooks/ghp/wikijs.yaml @@ -0,0 +1,5 @@ +--- +- hosts: k8s + connection: local + roles: + - wikijs diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..39715eb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,37 @@ +ansible==2.10.3 +ansible-base==2.10.3 +attrs==20.3.0 +cachetools==4.1.1 +certifi==2020.6.20 +cffi==1.14.3 +chardet==3.0.4 +cryptography==3.2.1 +docker==4.3.1 +google-auth==1.23.0 +idna==2.10 +importlib-metadata==2.0.0 +Jinja2==2.11.2 +jsonschema==3.2.0 +kubernetes==11.0.0 +kubernetes-validate==1.18.0 +MarkupSafe==1.1.1 +oauthlib==3.1.0 +openshift==0.11.2 +packaging==20.4 +pyasn1==0.4.8 +pyasn1-modules==0.2.8 +pycparser==2.20 +pyparsing==2.4.7 +pyrsistent==0.17.3 +python-dateutil==2.8.1 +python-string-utils==1.0.0 +PyYAML==5.3.1 +requests==2.24.0 +requests-oauthlib==1.3.0 +rsa==4.6 +ruamel.yaml==0.16.12 +ruamel.yaml.clib==0.2.2 +six==1.15.0 +urllib3==1.25.11 +websocket-client==0.57.0 +zipp==3.4.0 diff --git a/roles/adguard-home/defaults/main.yaml b/roles/adguard-home/defaults/main.yaml new file mode 100644 index 0000000..e702c7d --- /dev/null +++ b/roles/adguard-home/defaults/main.yaml @@ -0,0 +1,276 @@ +adguard_enabled: false +adguard_publish: false +adguard_default_values: + # upgrade strategy type (e.g. Recreate or RollingUpdate) + strategyType: RollingUpdate + configAsCode: + enabled: true + resources: {} + # requests: + # memory: 128Mi + # cpu: 100m + image: + repository: busybox + tag: latest + pullPolicy: Always + config: + bind_host: 0.0.0.0 + bind_port: 3000 + users: + - name: admin + password: "{{ adguard_admin_htpasswd_hash }}" + http_proxy: "" + language: "en" + rlimit_nofile: 0 + debug_pprof: false + web_session_ttl: 720 + dns: + bind_host: 0.0.0.0 + port: 53 + statistics_interval: 1 + querylog_enabled: true + querylog_interval: 90 + querylog_size_memory: 1000 + anonymize_client_ip: false + protection_enabled: true + blocking_mode: default + blocking_ipv4: "" + blocking_ipv6: "" + blocked_response_ttl: 10 + parental_block_host: family-block.dns.adguard.com + safebrowsing_block_host: standard-block.dns.adguard.com + ratelimit: 0 + ratelimit_whitelist: [] + refuse_any: true + upstream_dns: + - https://dns10.quad9.net/dns-query + bootstrap_dns: + - 9.9.9.10 + - 149.112.112.10 + - 2620:fe::10 + - 2620:fe::fe:10 + all_servers: false + fastest_addr: false + allowed_clients: [] + # - 10.0.0.1 + # - 10.0.1.1/24 + disallowed_clients: [] + # - 10.0.1.1 + # - 10.0.11.1/24 + blocked_hosts: [] + # - example.org + # - '*.example.org' + # - '||example.org^' + cache_size: 4194304 + cache_ttl_min: 0 + cache_ttl_max: 0 + bogus_nxdomain: [] + aaaa_disabled: false + enable_dnssec: false + edns_client_subnet: false + filtering_enabled: true + filters_update_interval: 8 + parental_enabled: false + safesearch_enabled: false + safebrowsing_enabled: false + safebrowsing_cache_size: 1048576 + safesearch_cache_size: 1048576 + parental_cache_size: 1048576 + cache_time: 30 + rewrites: [] + # - domain: example.org + # answer: 127.0.0.1 + # - domain: '*.example.org' + # answer: 127.0.0.1 + blocked_services: + - facebook + - origin + - twitter + - snapchat + - skype + - whatsapp + - instagram + - youtube + - netflix + - twitch + - discord + - amazon + - ebay + - cloudflare + - steam + - epic_games + - reddit + - ok + - vk + - mail_ru + - tiktok + tls: + enabled: true + server_name: "{{ adguard_dns_name | default('dns.' + domain) }}" + force_https: false + port_https: 443 + port_dns_over_tls: 853 + allow_unencrypted_doh: false + strict_sni_check: false + certificate_chain: "" + private_key: "" + certificate_path: "/certs/tls.crt" + private_key_path: "/certs/tls.key" + filters: + - enabled: true + url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt + name: AdGuard DNS filter + id: 1 + - enabled: false + url: https://adaway.org/hosts.txt + name: AdAway + id: 2 + - enabled: false + url: https://www.malwaredomainlist.com/hostslist/hosts.txt + name: MalwareDomainList.com Hosts List + id: 4 + whitelist_filters: [] + # - enabled: true + # url: https://easylist-downloads.adblockplus.org/exceptionrules.txt + # name: Allow nonintrusive advertising + # id: 1595760241 + user_rules: [] + # - '||example.org^' + # - '@@||example.org^' + # - 127.0.0.1 example.org + # - '! Here goes a comment' + # - '# Also a comment' + dhcp: + enabled: false + interface_name: "" + gateway_ip: "" + subnet_mask: "" + range_start: "" + range_end: "" + lease_duration: 86400 + icmp_timeout_msec: 1000 + clients: [] + # - name: myuser + # tags: + # - user_admin + # ids: + # - 192.168.91.1 + # use_global_settings: true + # filtering_enabled: false + # parental_enabled: false + # safesearch_enabled: false + # safebrowsing_enabled: false + # use_global_blocked_services: true + # blocked_services: [] + # upstreams: [] + log_file: "" + verbose: false + schema_version: 6 + + tlsSecretName: "{{ adguard_dns_name | default('dns.' + domain) }}-secret" + timezone: "UTC" + ingress: + enabled: true + annotations: + cert-manager.io/acme-challenge-type: dns01 + cert-manager.io/acme-dns01-provider: rfc2136 + cert-manager.io/cluster-issuer: letsencrypt-prod + kubernetes.io/ingress.class: "{{ external_ingress_class if adguard_publish else internal_ingress_class }}" + kubernetes.io/tls-acme: "true" + path: / + hosts: + - adguard.{{ domain }} + tls: + - secretName: adguard.{{ domain }}-tls + hosts: + - adguard.{{ domain }} + + service: + type: ClusterIP + # externalTrafficPolicy: Local + # externalIPs: [] + # loadBalancerIP: "" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: adguard-home-svc + + serviceTCP: + enabled: true + type: LoadBalancer + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "{{ adguard_loadbalancer_ip }}" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: + # metallb.universe.tf/address-pool: network-services + metallb.universe.tf/allow-shared-ip: adguard-home-svc + + serviceUDP: + enabled: true + type: LoadBalancer + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "{{ adguard_loadbalancer_ip }}" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: + # metallb.universe.tf/address-pool: network-services + metallb.universe.tf/allow-shared-ip: adguard-home-svc + + serviceDNSOverTLS: + enabled: true + ## Enable if you use AdGuard as a DNS over TLS/HTTPS server + type: LoadBalancer + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "{{ adguard_loadbalancer_ip }}" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: + # metallb.universe.tf/address-pool: network-services + metallb.universe.tf/allow-shared-ip: adguard-home-svc + + serviceDNSOverHTTPS: + enabled: true + ## Enable if you use AdGuard as a DNS over TLS/HTTPS server + type: LoadBalancer + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "{{ adguard_loadbalancer_ip }}" + # a fixed LoadBalancer IP + # loadBalancerSourceRanges: [] + annotations: + # metallb.universe.tf/address-pool: network-services + metallb.universe.tf/allow-shared-ip: adguard-home-svc + external-dns.alpha.kubernetes.io/hostname: "{{ adguard_dns_name | default('dns.' + domain) }}" + + serviceDHCP: + enabled: false + ## Enable if you use AdGuard as a DHCP Server + type: NodePort + # externalTrafficPolicy: Local + # externalIPs: [] + loadBalancerIP: "" + # a fixed LoadBalancer IP + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: adguard-home-svc + + persistence: + config: + enabled: true + accessMode: "{{ adguard_config_storage_mode | default('ReadWriteMany') }}" + size: "{{ adguard_config_size | default('20Mi') }}" + storageClass: "{{ adguard_config_storage | default('nfs-ssd') }}" + ## Do not delete the pvc upon helm uninstall + skipuninstall: false + work: + enabled: true + accessMode: "{{ adguard_work_storage_mode | default('ReadWriteMany') }}" + size: "{{ adguard_work_size | default('10Gi') }}" + storageClass: "{{ adguard_work_storage | default('nfs-ssd') }}" + ## Do not delete the pvc upon helm uninstall + skipuninstall: false diff --git a/roles/adguard-home/tasks/main.yaml b/roles/adguard-home/tasks/main.yaml new file mode 100644 index 0000000..e1ae581 --- /dev/null +++ b/roles/adguard-home/tasks/main.yaml @@ -0,0 +1,32 @@ +- name: Request cert for Adguard Home + k8s: + state: present + definition: + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + name: "{{ adguard_dns_name | default('dns.' + domain) }}-crt" + namespace: "{{ adguard_namespace | default(namespace) }}" + spec: + secretName: "{{ adguard_dns_name | default('dns.' + domain) }}-secret" + dnsNames: + - "{{ adguard_dns_name | default('dns.' + domain) }}" + issuerRef: + name: letsencrypt-prod + # We can reference ClusterIssuers by changing the kind here. + # The default value is Issuer (i.e. a locally namespaced Issuer) + kind: ClusterIssuer + group: cert-manager.io + +- set_fact: + adguard_combined_values: "{{ adguard_default_values | combine(adguard_values, recursive=true) }}" + +- name: Deploy Adguard Home + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ adguard_namespace | default(namespace) }}" + release_name: "{{ adguard_name | default('adguard') }}" + chart_ref: "{{ adguard_chart | default('ghp/adguard-home') }}" + chart_version: "{{ adguard_version | default(omit) }}" + release_values: "{{ adguard_combined_values | from_yaml }}" + diff --git a/roles/bitwarden/defaults/main.yaml b/roles/bitwarden/defaults/main.yaml new file mode 100644 index 0000000..fb0de71 --- /dev/null +++ b/roles/bitwarden/defaults/main.yaml @@ -0,0 +1,40 @@ +bitwarden_enabled: true +bitwarden_publish: false +bitwarden_use_external_db: true +bitwarden_default_values: + env: + SIGNUPS_ALLOWED: true + INVITATIONS_ALLOWED: true + DATABASE_URL: "postgresql://{{ bitwarden_db_username }}:{{ bitwarden_db_password }}@{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local:5432/bitwarden?sslmode=require" + DOMAIN: "https://bitwarden.{{ domain }}" + SMTP_FROM: "bitwarden@{{ domain }}" + SMTP_HOST: "mail.{{ domain }}" + SMTP_PASSWORD: "{{ bitwarden_ldap_pass | default(bitwarden_ldap_password) }}" + SMTP_SSL: "true" + SMTP_EXPLICIT_TLS: "true" + SMTP_PORT: "465" + SMTP_USERNAME: "bitwarden@{{ domain }}" + SMTP_TIMEOUT: "120" + LOG_LEVEL: "debug" + EXTENDED_LOGGING: "true" + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "{{ external_ingress_class if bitwarden_publish else internal_ingress_class }}" + cert-manager.io/cluster-issuer: "letsencrypt-prod" + cert-manager.io/acme-dns01-provider: "rfc2136" + cert-manager.io/acme-challenge-type: "dns01" + kubernetes.io/tls-acme: "true" + path: / + hosts: + - "bitwarden.{{ domain }}" + tls: + - secretName: "bitwarden.{{ domain }}-tls" + hosts: + - "bitwarden.{{ domain }}" + persistence: + enabled: true + accessMode: "{{ bitwarden_storage_mode | default('ReadWriteMany') }}" + size: "{{ bitwarden_size | default('8Gi') }}" + storageClass: "{{ bitwarden_storage | default('nfs-ssd') }}" + diff --git a/roles/bitwarden/tasks/main.yaml b/roles/bitwarden/tasks/main.yaml new file mode 100644 index 0000000..55b3b39 --- /dev/null +++ b/roles/bitwarden/tasks/main.yaml @@ -0,0 +1,19 @@ +- name: Import secret.yaml to obtain secrets + include_tasks: secrets.yaml + when: + - bitwarden_use_external_db + - postgres_enable + +- set_fact: + bitwarden_combined_values: "{{ bitwarden_default_values | combine(bitwarden_values, recursive=true) }}" + +- name: Deploy Bitwarden + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ bitwarden_namespace | default(namespace) }}" + release_name: "{{ bitwarden_name | default('bitwarden') }}" + chart_ref: "{{ bitwarden_chart | default('ghp/bitwarden') }}" + chart_version: "{{ bitwarden_version | default(omit) }}" + release_values: "{{ bitwarden_combined_values | from_yaml }}" + wait: true + diff --git a/roles/bitwarden/tasks/secrets.yaml b/roles/bitwarden/tasks/secrets.yaml new file mode 100644 index 0000000..fa58db1 --- /dev/null +++ b/roles/bitwarden/tasks/secrets.yaml @@ -0,0 +1,25 @@ +- block: + - name: Set DB namespace for secret lookup + set_fact: + db_namespace: "{{ bitwarden_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}" + + - name: Set DB secret name for lookup + set_fact: + db_secret_name: "bitwarden.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do" + + - name: Lookup Bitwarden DB secret + set_fact: + bitwarden_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}" + + - debug: + msg: "{{ bitwarden_db_secret }}" + verbosity: 2 + + - name: Set Bitwarden DB username + set_fact: + bitwarden_db_username: "{{ bitwarden_db_secret.data.username | b64decode }}" + + - name: Set Bitwarden DB password + set_fact: + bitwarden_db_password: "{{ bitwarden_db_secret.data.password | b64decode }}" + diff --git a/roles/cert-manager/defaults/main.yaml b/roles/cert-manager/defaults/main.yaml new file mode 100644 index 0000000..37f0878 --- /dev/null +++ b/roles/cert-manager/defaults/main.yaml @@ -0,0 +1,6 @@ +cert_manager_version: v1.1.0 +cert_manager_namespace: cert-manager +lets_encrypt_mailbox: "admin@{{ domain }}" +cert_manager_base64_tsig_key: "{{ k8s_tsig | b64encode }}" +cert_manager_default_values: + installCRDs: true diff --git a/roles/cert-manager/tasks/main.yaml b/roles/cert-manager/tasks/main.yaml new file mode 100644 index 0000000..0d9723e --- /dev/null +++ b/roles/cert-manager/tasks/main.yaml @@ -0,0 +1,88 @@ +- set_fact: + cert_manager_combined_values: "{{ cert_manager_default_values | combine(cert_manager_values, recursive=true) }}" + +- name: Deploy Cert-manager {{ cert_manager_version }} + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ cert_manager_namespace | default('cert-manager') }}" + release_name: "{{ cert_manager_name | default('cert-manager') }}" + chart_ref: "{{ cert_manager_chart | default('jetstack/cert-manager') }}" + chart_version: "{{ cert_manager_version }}" + release_values: "{{ cert_manager_combined_values | from_yaml | default(omit) }}" + wait: true + +- name: Create secret for DNS RFC2136 (NSUPDATE) + k8s: + state: present + definition: + apiVersion: v1 + data: + tsig-secret-key: "{{ cert_manager_base64_tsig_key }}" + kind: Secret + metadata: + name: tsig-secret + namespace: cert-manager + type: Opaque + +- name: Create Production ClusterIssuer for Let's Encrypt + k8s: + state: present + definition: + apiVersion: cert-manager.io/v1alpha2 + kind: ClusterIssuer + metadata: + name: letsencrypt-prod + spec: + acme: + # The ACME server URL + server: https://acme-v02.api.letsencrypt.org/directory + # Email address used for ACME registration + email: "{{ lets_encrypt_mailbox }}" + # Name of a secret used to store the ACME account private key + privateKeySecretRef: + name: letsencrypt-prod + # Enable the HTTP-01 challenge provider + solvers: + #- http01: + # ingress: + # class: nginx + - dns01: + rfc2136: + nameserver: "{{ external_dns_ip | default(dns_ip) }}:53" + tsigAlgorithm: HMACSHA512 + tsigKeyName: k8s + tsigSecretSecretRef: + key: tsig-secret-key + name: tsig-secret + +- name: Create Staging ClusterIssuer for Let's Encrypt + k8s: + state: present + definition: + apiVersion: cert-manager.io/v1alpha2 + kind: ClusterIssuer + metadata: + name: letsencrypt-staging + spec: + acme: + # The ACME server URL + server: https://acme-staging-v02.api.letsencrypt.org/directory + # Email address used for ACME registration + email: "{{ lets_encrypt_mailbox }}" + # Name of a secret used to store the ACME account private key + privateKeySecretRef: + name: letsencrypt-staging + # Enable the HTTP-01 challenge provider + solvers: + #- http01: + # ingress: + # class: nginx + - dns01: + rfc2136: + nameserver: "{{ external_dns_ip | default(dns_ip) }}:53" + tsigAlgorithm: HMACSHA512 + tsigKeyName: k8s + tsigSecretSecretRef: + key: tsig-secret-key + name: tsig-secret + diff --git a/roles/chartmuseum/defaults/main.yaml b/roles/chartmuseum/defaults/main.yaml new file mode 100644 index 0000000..6b8fba2 --- /dev/null +++ b/roles/chartmuseum/defaults/main.yaml @@ -0,0 +1,86 @@ +chartmuseum_enabled: true +chartmuseum_publish: false +chartmuseum_default_values: + env: + open: + # storage backend, can be one of: local, alibaba, amazon, google, microsoft, oracle + STORAGE: local + # levels of nested repos for multitenancy. The default depth is 0 (singletenant server) + DEPTH: 0 + # sets the base context path + CONTEXT_PATH: / + # show debug messages + DEBUG: false + # output structured logs as json + LOG_JSON: true + # disable use of index-cache.yaml + DISABLE_STATEFILES: false + # disable Prometheus metrics + DISABLE_METRICS: true + # disable all routes prefixed with /api + DISABLE_API: false + # allow chart versions to be re-uploaded + ALLOW_OVERWRITE: true + # allow anonymous GET operations when auth is used + AUTH_ANONYMOUS_GET: true + secret: + # username for basic http authentication + BASIC_AUTH_USER: "{{ chartmuseum_admin_login | default('admin') }}" + # password for basic http authentication + BASIC_AUTH_PASS: "{{ chartmuseum_admin_pass | default(chartmuseum_admin_password) }}" + + persistence: + enabled: true + accessMode: "{{ chartmuseum_storage_mode | default('ReadWriteMany') }}" + size: "{{ chartmuseum_size | default('10Gi') }}" + labels: {} + path: /storage + storageClass: "{{ chartmuseum_storage | default('nfs-hdd') }}" + + ## Ingress for load balancer + ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + cert-manager.io/acme-dns01-provider: "rfc2136" + cert-manager.io/acme-challenge-type: "dns01" + kubernetes.io/ingress.class: "{{ external_ingress_class if chartmuseum_publish else internal_ingress_class }}" + kubernetes.io/tls-acme: "true" + hosts: + - name: charts.{{ domain }} + path: / + tls: true + tlsSecret: charts.{{ domain }}-tls + +chartmuseum_readonly_ingress_definition: | + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + annotations: + cert-manager.io/acme-challenge-type: dns01 + cert-manager.io/acme-dns01-provider: rfc2136 + cert-manager.io/cluster-issuer: letsencrypt-prod + kubernetes.io/ingress.class: "{{ external_ingress_class }}" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/proxy-read-timeout: "600" + nginx.ingress.kubernetes.io/proxy-send-timeout: "600" + nginx.ingress.kubernetes.io/configuration-snippet: |- + limit_except GET { + deny all; + } + name: chartmuseum-public + namespace: "{{ chartmuseum_namespace | default(namespace) }}" + spec: + rules: + - host: "{{ chartmuseum_readonly_ingress }}" + http: + paths: + - backend: + serviceName: chartmuseum-chartmuseum + servicePort: 8080 + path: / + tls: + - hosts: + - "{{ chartmuseum_readonly_ingress }}" + secretName: "{{ chartmuseum_readonly_ingress }}-tls" + diff --git a/roles/chartmuseum/tasks/main.yaml b/roles/chartmuseum/tasks/main.yaml new file mode 100644 index 0000000..a8fcf8a --- /dev/null +++ b/roles/chartmuseum/tasks/main.yaml @@ -0,0 +1,20 @@ +- set_fact: + chartmuseum_combined_values: "{{ chartmuseum_default_values | combine(chartmuseum_values, recursive=true) }}" + +- name: Deploy ChartMuseum + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ chartmuseum_namespace | default(namespace) }}" + release_name: "{{ chartmuseum_name | default('chartmuseum') }}" + chart_ref: "{{ chartmuseum_chart | default('ghp/chartmuseum') }}" + chart_version: "{{ chartmuseum_version | default(omit) }}" + release_values: "{{ chartmuseum_combined_values | from_yaml }}" + wait: true + +- name: Deploy readonly public ingress for ChartMuseum + when: chartmuseum_readonly_ingress is defined + k8s: + state: present + definition: + "{{ chartmuseum_readonly_ingress_definition }}" + diff --git a/roles/ddclient/defaults/main.yml b/roles/ddclient/defaults/main.yml new file mode 100644 index 0000000..236f0a2 --- /dev/null +++ b/roles/ddclient/defaults/main.yml @@ -0,0 +1,3 @@ +dockerize: false +namespace: ddclient +ddclient_image_tag: v3.9.1-ls45 diff --git a/roles/ddclient/handlers/main.yml b/roles/ddclient/handlers/main.yml new file mode 100644 index 0000000..b719212 --- /dev/null +++ b/roles/ddclient/handlers/main.yml @@ -0,0 +1,27 @@ +--- +- name: start ddclient + systemd: + name: ddclient + state: started + enabled: yes + +- name: restart ddclient + systemd: + name: ddclient + state: restarted + enabled: yes + +- name: restart docker ddclient + community.general.docker_container: + name: "{{ namespace }}-ddclient" + image: "{{ docker_registry }}/ddclient:{{ ddclient_image_tag | default('v3.9.1-ls45') }}" + state: started + restart: yes + container_default_behavior: no_defaults + detach: true + restart_policy: unless-stopped + volumes: + - "/opt/{{ namespace }}/ddclient.conf:/config/ddclient.conf" + - "/opt/{{ namespace }}/Kvps.key:/config/Kvps.key" + - "/opt/{{ namespace }}/Kvps.private:/config/Kvps.private" + diff --git a/roles/ddclient/tasks/configure.yml b/roles/ddclient/tasks/configure.yml new file mode 100644 index 0000000..caf28fd --- /dev/null +++ b/roles/ddclient/tasks/configure.yml @@ -0,0 +1,18 @@ +--- +- block: + - name: copy public key for ddclient + copy: + dest: /etc/Kvps.key + src: files/Kvps.key + + - name: copy private key for ddclient + copy: + dest: /etc/Kvps.private + src: files/Kvps.private + + - name: configure ddlient.conf + copy: + content: "{{ ddclient_conf }}" + dest: "/etc/ddclient.conf" + notify: restart ddclient + diff --git a/roles/ddclient/tasks/docker.yml b/roles/ddclient/tasks/docker.yml new file mode 100644 index 0000000..8172396 --- /dev/null +++ b/roles/ddclient/tasks/docker.yml @@ -0,0 +1,35 @@ +--- +- name: Create configuration dir for {{ namespace }} + file: + name: "/opt/{{ namespace }}" + state: directory + +- name: Copy ddclient configuration for {{ namespace }} + copy: + dest: "/opt/{{ namespace }}/ddclient.conf" + content: "{{ ddclient_conf }}" + notify: restart docker ddclient + +- name: Copy Kvps.key for {{ namespace }} + copy: + dest: "/opt/{{ namespace }}/Kvps.key" + content: "{{ ddclient_tsig_public_key_base64 | b64decode }}" + notify: restart docker ddclient + +- name: Copy Kvps.private for {{ namespace }} + copy: + dest: "/opt/{{ namespace }}/Kvps.private" + content: "{{ ddclient_tsig_private_key_base64 | b64decode }}" + notify: restart docker ddclient + +- name: Start ddclient in docker for {{ namespace }} + docker_container: + name: "{{ namespace }}-ddclient" + image: "{{ docker_registry }}/ddclient:{{ ddclient_image_tag }}" + state: started + container_default_behavior: no_defaults + restart_policy: unless-stopped + volumes: + - "/opt/{{ namespace }}/ddclient.conf:/config/ddclient.conf" + - "/opt/{{ namespace }}/Kvps.key:/config/Kvps.key" + - "/opt/{{ namespace }}/Kvps.private:/config/Kvps.private" diff --git a/roles/ddclient/tasks/install.yml b/roles/ddclient/tasks/install.yml new file mode 100644 index 0000000..8548bef --- /dev/null +++ b/roles/ddclient/tasks/install.yml @@ -0,0 +1,11 @@ +--- +- block: + - name: installing ddclient + package: + name: + - ddclient + state: present + notify: start ddclient + register: install_ddlient_result + tags: + - ddclient-install diff --git a/roles/ddclient/tasks/main.yml b/roles/ddclient/tasks/main.yml new file mode 100644 index 0000000..15965b3 --- /dev/null +++ b/roles/ddclient/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- block: + - import_tasks: install.yml + when: not dockerize + - import_tasks: configure.yml + when: not dockerize + become: true + +- block: + - import_tasks: docker.yml + when: dockerize + become: true diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml new file mode 100644 index 0000000..5bd08e1 --- /dev/null +++ b/roles/docker/defaults/main.yml @@ -0,0 +1 @@ +install_docker_ce_repo: 'yes' diff --git a/roles/docker/tasks/docker.yml b/roles/docker/tasks/docker.yml new file mode 100644 index 0000000..df5f426 --- /dev/null +++ b/roles/docker/tasks/docker.yml @@ -0,0 +1,54 @@ +--- +- block: + - name: Install packages for Docker + yum: + name: + - device-mapper-persistent-data + - lvm2 + - libselinux-python + state: present + + - name: add docker-ce repo + yum_repository: + name: docker-ce-stable + file: docker-ce + description: Docker CE Stable - $basearch + enabled: yes + baseurl: https://download.docker.com/linux/centos/7/$basearch/stable + gpgkey: https://download.docker.com/linux/centos/gpg + gpgcheck: yes + when: install_docker_ce_repo == 'yes' + become: yes + + - name: Install Docker + package: + name: docker-ce + state: present + become: yes + + - name: Create /etc/docker directory + file: + path: /etc/docker + state: directory + + - name: Deploy Docker daemon.json + template: + src: daemon.json.j2 + dest: /etc/docker/daemon.json + register: daemon_config_result + + - name: Start Docker service + service: + name: docker + state: started + enabled: yes + become: yes + + - name: Restart Docker + systemd: + state: restarted + name: docker + when: daemon_config_result.changed + + tags: + - docker diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml new file mode 100644 index 0000000..c2c20c5 --- /dev/null +++ b/roles/docker/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- block: + - import_tasks: docker.yml + tags: + - docker diff --git a/roles/docker/templates/daemon.json.j2 b/roles/docker/templates/daemon.json.j2 new file mode 100644 index 0000000..644e843 --- /dev/null +++ b/roles/docker/templates/daemon.json.j2 @@ -0,0 +1,18 @@ +{% if docker is defined %} + +{% if docker.insecure_registries is defined %} +{% set insecure_registries = docker.insecure_registries %} +{% endif %} + +{% endif %} +{ + "exec-opts": ["native.cgroupdriver=systemd"], + "log-driver": "json-file", + {% if insecure_registries is defined %} + "insecure-registries" : ["{{ insecure_registries }}"], + {% endif %} + "log-opts": { + "max-size": "100m" + } +} + diff --git a/roles/dovecot/defaults/main.yaml b/roles/dovecot/defaults/main.yaml new file mode 100644 index 0000000..adef54b --- /dev/null +++ b/roles/dovecot/defaults/main.yaml @@ -0,0 +1,201 @@ +dovecot_default_values: + replicaCount: 1 + persistence: + enabled: true + existingClaim: mailboxes + + tls: + enabled: true + existingSecret: mail.{{ domain }}-secret + + dovecot: + image: + repository: "{{ docker_registry }}/dovecot" + tag: latest + pullPolicy: Always + configmaps: + dovecot: + dovecot: | + protocols = imap lmtp sieve + mail_max_userip_connections = 1000 + mail_plugins = virtual + + haproxy_trusted_networks = 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 + haproxy_timeout = 30s + dict { + #quota = mysql:/etc/dovecot/dovecot-dict-sql.conf.ext + #expire = sqlite:/etc/dovecot/dovecot-dict-sql.conf.ext + } + + # Most of the actual configuration gets included below. The filenames are + # first sorted by their ASCII value and parsed in that order. The 00-prefixes + # in filenames are intended to make it easier to understand the ordering. + !include conf.d/*.conf + + # A config file can also tried to be included without giving an error if + # it's not found: + !include_try local.conf + ldap: | + uris = ldaps://openldap.{{ domain }} + dn = uid=ldapbind,ou=services,{{ openldap_domain }} + dnpass = {{ ldapbind_pass | default(ldapbind_password) }} + auth_bind = yes + auth_bind_userdn = uid=%n,ou=users,{{ openldap_domain }} + tls = no + ldap_version = 3 + base = ou=users,{{ openldap_domain }} + deref = never + scope = subtree + user_filter = (&(objectClass=posixAccount)(mail=%u)) + user_attrs = cn=home=/home/vmail/%$ + pass_filter = (&(objectClass=posixAccount)(mail=%u)) + pass_attrs = uid=user,userPassword=password + #default_pass_scheme = CRYPT + confd: + auth-ldap: | + passdb { + driver = ldap + + # Path for LDAP configuration file, see example-config/dovecot-ldap.conf.ext + args = /etc/dovecot/ldap.conf + } + userdb { + driver = ldap + args = /etc/dovecot/ldap.conf + + } + 10-auth: | + auth_default_realm = {{ domain }} + auth_username_format = %Lu + auth_mechanisms = plain login + 10-mail: | + mail_location = maildir:%h + namespace inbox { + inbox = yes + } + mail_uid = vmail + mail_gid = vmail + first_valid_uid = 1000 + last_valid_uid = 1000 + first_valid_gid = 1000 + last_valid_gid = 1000 + protocol !indexer-worker { + } + mbox_write_locks = fcntl + 10-master: | + protocol imap { + mail_plugins = virtual + } + service imap-login { + inet_listener imap { + #port = 143 + } + inet_listener imaps { + #port = 993 + #ssl = yes + } + inet_listener imap_haproxy { + port = 1109 + haproxy = yes + } + inet_listener imaps_haproxy { + port = 10993 + ssl = yes + haproxy = yes + } + } + + service pop3-login { + inet_listener pop3 { + #port = 110 + } + inet_listener pop3s { + #port = 995 + #ssl = yes + } + } + + service lmtp { + inet_listener lmtp { + port = 24 + } + unix_listener /var/spool/postfix/private/dovecot-lmtp { + mode = 0600 + group = postfix + user = postfix + } + user = vmail + } + + service imap { + } + + service pop3 { + } + + service auth { + inet_listener { + port = 12345 + } + unix_listener auth-userdb { + mode = 0660 + user = vmail + #group = + } + + # Postfix smtp-auth + unix_listener /var/spool/postfix/private/auth { + mode = 0660 + user = postfix + group = postfix + } + } + + service auth-worker { + } + + service dict { + unix_listener dict { + } + } + 10-ssl: | + ssl = required + #verbose_ssl = yes + ssl_prefer_server_ciphers = yes + ssl_min_protocol = TLSv1.2 + ssl_cert = 60, + ); + fix.config.php: |- + ['{{ web_proxy_internal_ip }}'], + 'overwriteprotocol' => 'https', + 'overwrite.cli.url' => 'https://nextcloud.{{ domain }}', + 'mail_smtpstreamoptions' => + array ( + 'ssl' => + array ( + 'allow_self_signed' => true, + 'verify_peer' => false, + 'verify_peer_name' => false, + ), + ), + ); + strategy: + type: RollingUpdate + internalDatabase: + enabled: false + name: nextcloud + # Disable Mariadb setup + mariadb: + enabled: false + # Enable Redis + redis: + enabled: true + usePassword: false + cluster: + enabled: false + ## External database configuration + externalDatabase: + enabled: true + ## Supported database engines: mysql or postgresql + type: postgresql + ## Database host + host: "{{ namespace }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local" + ## Database name + database: nextcloud + user: "{{ nextcloud_db_username }}" + password: "{{ nextcloud_db_password }}" + + ## Cronjob to execute Nextcloud background tasks + ## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron-jobs + ## + cronjob: + enabled: true + # Nexcloud image is used as default but only curl is needed + image: + repository: nextcloud + tag: 19.0-apache + schedule: "*/5 * * * *" + annotations: {} + # Set curl's insecure option if you use e.g. self-signed certificates + curlInsecure: false + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + service: + type: ClusterIP + port: 8080 + loadBalancerIP: nil + persistence: + # Nextcloud Data (/var/www/html) + enabled: true + storageClass: "{{ gitea_storage | default('nfs-ssd') }}" + accessMode: "{{ gitea_storage_mode | default('ReadWriteMany') }}" + size: "{{ gitea_size | default('20Gi') }}" + livenessProbe: + enabled: false + readinessProbe: + enabled: false + diff --git a/roles/nextcloud/tasks/main.yaml b/roles/nextcloud/tasks/main.yaml new file mode 100644 index 0000000..3e130a6 --- /dev/null +++ b/roles/nextcloud/tasks/main.yaml @@ -0,0 +1,19 @@ +- name: Import secret.yaml to obtain secrets + include_tasks: secrets.yaml + when: + - nextcloud_use_external_db + - postgres_enable + +- set_fact: + nextcloud_combined_values: "{{ nextcloud_default_values | combine(nextcloud_values, recursive=true) }}" + +- name: Deploy Nextcloud + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ nextcloud_namespace | default(namespace) }}" + release_name: "{{ nextcloud_name | default('nextcloud') }}" + chart_ref: "{{ nextcloud_chart | default('nextcloud/nextcloud') }}" + chart_version: "{{ nextcloud_version | default(omit) }}" + release_values: "{{ nextcloud_combined_values | from_yaml }}" + wait: false + diff --git a/roles/nextcloud/tasks/secrets.yaml b/roles/nextcloud/tasks/secrets.yaml new file mode 100644 index 0000000..54f91e9 --- /dev/null +++ b/roles/nextcloud/tasks/secrets.yaml @@ -0,0 +1,25 @@ +- block: + - name: Set DB namespace for secret lookup + set_fact: + db_namespace: "{{ nextcloud_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}" + + - name: Set DB secret name for lookup + set_fact: + db_secret_name: "nextcloud.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do" + + - name: Lookup Nextcloud DB secret + set_fact: + nextcloud_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}" + + - debug: + msg: "{{ nextcloud_db_secret }}" + verbosity: 2 + + - name: Set Nextcloud DB username + set_fact: + nextcloud_db_username: "{{ nextcloud_db_secret.data.username | b64decode }}" + + - name: Set Nextcloud DB password + set_fact: + nextcloud_db_password: "{{ nextcloud_db_secret.data.password | b64decode }}" + diff --git a/roles/nfs-client-provisioner/defaults/main.yaml b/roles/nfs-client-provisioner/defaults/main.yaml new file mode 100644 index 0000000..6a1c04c --- /dev/null +++ b/roles/nfs-client-provisioner/defaults/main.yaml @@ -0,0 +1,70 @@ +nfs_client_provisioner_namespace: nfs-client-provisioner +nfs_client_provisioner_hdd_default_values: + replicaCount: 1 + strategyType: Recreate + nfs: + server: + path: + + # For creating the StorageClass automatically: + storageClass: + create: true + + # Set a provisioner name. If unset, a name will be generated. + # provisionerName: + + # Set StorageClass as the default StorageClass + # Ignored if storageClass.create is false + defaultClass: false + + # Set a StorageClass name + # Ignored if storageClass.create is false + name: nfs-hdd + + # Allow volume to be expanded dynamically + allowVolumeExpansion: true + + # Method used to reclaim an obsoleted volume + reclaimPolicy: Delete + + # When set to false your PVs will not be archived by the provisioner upon deletion of the PVC. + archiveOnDelete: false + + # Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany + accessModes: ReadWriteMany + + +nfs_client_provisioner_ssd_default_values: + replicaCount: 1 + strategyType: Recreate + nfs: + server: + path: + + # For creating the StorageClass automatically: + storageClass: + create: true + + # Set a provisioner name. If unset, a name will be generated. + # provisionerName: + + # Set StorageClass as the default StorageClass + # Ignored if storageClass.create is false + defaultClass: true + + # Set a StorageClass name + # Ignored if storageClass.create is false + name: nfs-ssd + + # Allow volume to be expanded dynamically + allowVolumeExpansion: true + + # Method used to reclaim an obsoleted volume + reclaimPolicy: Delete + + # When set to false your PVs will not be archived by the provisioner upon deletion of the PVC. + archiveOnDelete: false + + # Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany + accessModes: ReadWriteMany + diff --git a/roles/nfs-client-provisioner/tasks/main.yaml b/roles/nfs-client-provisioner/tasks/main.yaml new file mode 100644 index 0000000..07b2048 --- /dev/null +++ b/roles/nfs-client-provisioner/tasks/main.yaml @@ -0,0 +1,25 @@ +- set_fact: + nfs_client_provisioner_hdd_combined_values: "{{ nfs_client_provisioner_hdd_default_values | combine(nfs_client_provisioner_hdd_values, recursive=true) }}" + +- name: Deploy NFS client provisioner for HDD storage + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ nfs_client_provisioner_hdd_namespace | default(nfs_client_provisioner_namespace) | default(namespace) }}" + release_name: "{{ nfs_client_provisioner_hdd_name | default('nfs-client-provisioner-hdd') }}" + chart_ref: "{{ nfs_client_provisioner_hdd_chart | default('stable/nfs-client-provisioner') }}" + chart_version: "{{ nfs_client_provisioner_hdd_version | default(omit) }}" + release_values: "{{ nfs_client_provisioner_hdd_combined_values | from_yaml }}" + wait: true + +- set_fact: + nfs_client_provisioner_ssd_combined_values: "{{ nfs_client_provisioner_ssd_default_values | combine(nfs_client_provisioner_ssd_values, recursive=true) }}" + +- name: Deploy NFS client provisioner for SSD storage + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ nfs_client_provisioner_ssd_namespace | default(nfs_client_provisioner_namespace) | default(namespace) }}" + release_name: "{{ nfs_client_provisioner_ssd_name | default('nfs-client-provisioner-ssd') }}" + chart_ref: "{{ nfs_client_provisioner_ssd_chart | default('stable/nfs-client-provisioner') }}" + chart_version: "{{ nfs_client_provisioner_ssd_version | default(omit) }}" + release_values: "{{ nfs_client_provisioner_ssd_combined_values | from_yaml }}" + wait: true diff --git a/roles/nginx/defaults/main.yml b/roles/nginx/defaults/main.yml new file mode 100644 index 0000000..aaa7cbc --- /dev/null +++ b/roles/nginx/defaults/main.yml @@ -0,0 +1,3 @@ +registry_readonly_ingress: false +wikijs_readonly_ingress: false +chartmuseum_readonly_ingress: false diff --git a/roles/nginx/handlers/main.yml b/roles/nginx/handlers/main.yml new file mode 100644 index 0000000..df9aae0 --- /dev/null +++ b/roles/nginx/handlers/main.yml @@ -0,0 +1,13 @@ +--- +- name: start nginx + systemd: + name: nginx + state: started + enabled: yes + +- name: reload nginx + systemd: + name: nginx + state: reloaded + enabled: yes + diff --git a/roles/nginx/tasks/configure.yml b/roles/nginx/tasks/configure.yml new file mode 100644 index 0000000..9b7723b --- /dev/null +++ b/roles/nginx/tasks/configure.yml @@ -0,0 +1,39 @@ +--- +- block: + - name: configure nginx.conf + copy: + content: "{{ nginx['nginx.conf'] }}" + dest: "/etc/nginx/nginx.conf" + notify: reload nginx + when: nginx['nginx.conf'] is defined + + - name: add configs nginx to conf.d + copy: + content: "{{ item.data }}" + dest: "/etc/nginx/conf.d/{{ item.name }}" + loop: "{{ nginx['conf.d'] }}" + notify: reload nginx + when: nginx['conf.d'] is defined + + - name: add configs nginx to stream.d + copy: + content: "{{ item.data }}" + dest: "/etc/nginx/stream.d/{{ item.name }}" + loop: "{{ nginx['stream.d'] }}" + notify: reload nginx + when: nginx['stream.d'] is defined + + - name: check if ssl dir exist + file: path=/etc/nginx/ssl state=directory + when: nginx.ssl is defined + + - name: add ssl certs and keys + copy: + content: "{{ item.data }}" + dest: "/etc/nginx/ssl/{{ item.name }}" + loop: "{{ nginx.ssl }}" + notify: reload nginx + when: nginx.ssl is defined + + tags: + - nginx-configure diff --git a/roles/nginx/tasks/install.yml b/roles/nginx/tasks/install.yml new file mode 100644 index 0000000..22581dc --- /dev/null +++ b/roles/nginx/tasks/install.yml @@ -0,0 +1,11 @@ +--- +- block: + - name: installing nginx + package: + name: + - nginx + state: present + notify: start nginx + register: install_nginx_result + tags: + - nginx-install diff --git a/roles/nginx/tasks/main.yml b/roles/nginx/tasks/main.yml new file mode 100644 index 0000000..e29051b --- /dev/null +++ b/roles/nginx/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- block: + - import_tasks: install.yml + - import_tasks: configure.yml + + become: true + tags: + - nginx diff --git a/roles/opendkim/defaults/main.yaml b/roles/opendkim/defaults/main.yaml new file mode 100644 index 0000000..c7b7f6c --- /dev/null +++ b/roles/opendkim/defaults/main.yaml @@ -0,0 +1,45 @@ +opendkim_default_values: + replicaCount: 1 + persistence: + enabled: false + existingClaim: mailboxes + opendkim: + image: + repository: "{{ docker_registry }}/opendkim" + tag: latest + pullPolicy: Always + configmaps: + opendkim: | + PidFile /var/run/opendkim/opendkim.pid + Mode sv + Syslog yes + SyslogSuccess yes + LogWhy yes + UserID opendkim:opendkim + Socket inet:8891 + Umask 002 + SendReports yes + SoftwareHeader yes + Canonicalization relaxed/relaxed + Domain {{ domain }} + Selector default + MinimumKeyBits 1024 + KeyTable refile:/etc/opendkim/KeyTable + SigningTable refile:/etc/opendkim/SigningTable + ExternalIgnoreList refile:/etc/opendkim/TrustedHosts + InternalHosts refile:/etc/opendkim/TrustedHosts + OversignHeaders From + keytable: | + default._domainkey.{{ domain }} {{ domain }}:default:/etc/opendkim/keys/default.private + signingtable: | + *@{{ domain }} default._domainkey.{{ domain }} + trustedhosts: | + 127.0.0.1 + ::1 + *.{{ domain }} + default-private: | + {{ dkim_private_key_base64 | b64decode }} + default-public: | + {{ dkim_public_key_base64 | b64decode }} + service: + type: ClusterIP diff --git a/roles/opendkim/tasks/main.yaml b/roles/opendkim/tasks/main.yaml new file mode 100644 index 0000000..5feefaf --- /dev/null +++ b/roles/opendkim/tasks/main.yaml @@ -0,0 +1,13 @@ +- set_fact: + opendkim_combined_values: "{{ opendkim_default_values | combine(opendkim_values, recursive=true) }}" + +- name: Deploy OpenDKIM + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ opendkim_namespace | default(mail_namespace) | default(namespace) }}" + release_name: "{{ opendkim_name | default('opendkim') }}" + chart_ref: "{{ opendkim_chart | default('ghp/opendkim') }}" + chart_version: "{{ opendkim_version | default(omit) }}" + release_values: "{{ opendkim_combined_values | from_yaml }}" + wait: true + diff --git a/roles/opendmarc/defaults/main.yaml b/roles/opendmarc/defaults/main.yaml new file mode 100644 index 0000000..d344421 --- /dev/null +++ b/roles/opendmarc/defaults/main.yaml @@ -0,0 +1,24 @@ +opendmarc_default_values: + replicaCount: 1 + persistence: + enabled: false + existingClaim: mailboxes + + opendmarc: + image: + repository: "{{ docker_registry }}/opendmarc" + tag: latest + pullPolicy: Always + configmaps: + opendmarc: | + AuthservID mail.{{ domain }} + Socket inet:8893 + SoftwareHeader true + SPFIgnoreResults true + SPFSelfValidate true + RequiredHeaders true + Syslog true + UserID opendmarc:mail + service: + type: ClusterIP + diff --git a/roles/opendmarc/tasks/main.yaml b/roles/opendmarc/tasks/main.yaml new file mode 100644 index 0000000..fd29751 --- /dev/null +++ b/roles/opendmarc/tasks/main.yaml @@ -0,0 +1,13 @@ +- set_fact: + opendmarc_combined_values: "{{ opendmarc_default_values | combine(opendmarc_values, recursive=true) }}" + +- name: Deploy OpenDMARC + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ opendmarc_namespace | default(mail_namespace) | default(namespace) }}" + release_name: "{{ opendmarc_name | default('opendmarc') }}" + chart_ref: "{{ opendmarc_chart | default('ghp/opendmarc') }}" + chart_version: "{{ opendmarc_version | default(omit) }}" + release_values: "{{ opendmarc_combined_values | from_yaml }}" + wait: true + diff --git a/roles/openldap/defaults/main.yaml b/roles/openldap/defaults/main.yaml new file mode 100644 index 0000000..3754308 --- /dev/null +++ b/roles/openldap/defaults/main.yaml @@ -0,0 +1,279 @@ +openldap_default_values: + replicaCount: 1 + + # Define deployment strategy - IMPORTANT: use rollingUpdate: null when use Recreate strategy. + # It prevents from merging with existing map keys which are forbidden. + strategy: + type: RollingUpdate + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + # + # or + # + # type: Recreate + # rollingUpdate: null + image: + # From repository https://github.com/osixia/docker-openldap + repository: osixia/openldap + tag: 1.4.0 + pullPolicy: Always + + # Spcifies an existing secret to be used for admin and config user passwords + existingSecret: "" + + # settings for enabling TLS + tls: + enabled: true + secret: "openldap.{{ domain }}-secret" # The name of a kubernetes.io/tls type secret to use for TLS + CA: + enabled: true + secret: "openldap.{{ domain }}-ca" # The name of a generic secret to use for custom CA certificate (ca.crt) + + ## Add additional labels to all resources + extraLabels: {} + ## Add additional annotations to pods + podAnnotations: {} + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: openldap.{{ domain }} + clusterIP: "" + + ldapPort: 389 + sslLdapPort: 636 # Only used if tls.enabled is true + ## List of IP addresses at which the service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "{{ openldap_loadbalancer_ip | default(omit) }}" + loadBalancerSourceRanges: [] + type: LoadBalancer + + # Default configuration for openldap as environment variables. These get injected directly in the container. + # Use the env variables from https://github.com/osixia/docker-openldap#beginner-guide + env: + LDAP_ORGANISATION: "{{ ldap_org | default('GHP') }}" + LDAP_DOMAIN: "{{ ldap_domain | default(domain) }}" + LDAP_BACKEND: "mdb" + LDAP_TLS: "true" + LDAP_TLS_ENFORCE: "false" + LDAP_RFC2307BIS_SCHEMA: "true" + LDAP_TLS_VERIFY_CLIENT: "try" + + # Default Passwords to use, stored as a secret. If unset, passwords are auto-generated. + # You can override these at install time with + # helm install openldap --set openldap.adminPassword=,openldap.configPassword= + adminPassword: "{{ openldap_admin_pass | default(openldap_admin_password) }}" + configPassword: "{{ openldap_config_pass | default(openldap_config_password) }}" + + # Custom openldap configuration files used to override default settings + customLdifFiles: + 01-pw-pbkdf2.ldif: |- + dn: cn=module{0},cn=config + changetype: modify + add: olcModuleLoad + olcModuleLoad: pw-pbkdf2 + 02-acl.ldif: |- + dn: olcDatabase={1}mdb,cn=config + changetype: modify + add: olcAccess + olcAccess: {1}to * by users read by anonymous auth by * none + 03-default-users.ldif: |- + dn: ou=groups,{{ openldap_domain }} + changetype: add + objectClass: organizationalUnit + objectClass: top + ou: groups + + dn: ou=users,{{ openldap_domain }} + changetype: add + objectClass: organizationalUnit + objectClass: top + ou: users + + dn: ou=services,{{ openldap_domain }} + changetype: add + objectClass: organizationalUnit + objectClass: top + ou: services + + dn: uid=admin,ou=users,{{ openldap_domain }} + changetype: add + uid: admin + cn: admin + sn: 3 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/admin + uidNumber: 14583103 + gidNumber: 14564103 + userPassword: {{ openldap_admin_pbkdf2_sha512_hash }} + gecos: Admin user + + dn: uid=systemuser,ou=services,{{ openldap_domain }} + changetype: add + uid: systemuser + cn: systemuser + sn: 4 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/systemuser + uidNumber: 14583104 + gidNumber: 14564104 + userPassword: {{ systemuser_pbkdf2_sha512_hash }} + mail: systemuser@{{ domain }} + gecos: System user + + dn: uid=nextcloud,ou=users,{{ openldap_domain }} + changetype: add + uid: nextcloud + cn: nextcloud + sn: 6 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/nextcloud + uidNumber: 14583106 + gidNumber: 14564106 + userPassword: {{ nextcloud_ldap_pbkdf2_sha512_hash }} + mail: nextcloud@{{ domain }} + gecos: Nexcloud user + + dn: uid=ldapbind,ou=services,{{ openldap_domain }} + changetype: add + uid: ldapbind + cn: ldapbind + sn: 7 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /sbin/nologin + homeDirectory: /home/ldapbind + uidNumber: 14583107 + gidNumber: 14564107 + userPassword: {{ ldapbind_pbkdf2_sha512_hash }} + gecos: LDAP bind user + + dn: uid=bitwarden,ou=users,{{ openldap_domain }} + changetype: add + uid: bitwarden + cn: bitwarden + sn: 8 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/bitwarden + uidNumber: 14583108 + gidNumber: 14564108 + userPassword: {{ bitwarden_ldap_pbkdf2_sha512_hash }} + mail: bitwarden@{{ domain }} + gecos: Bitwarden user + + dn: uid=gitea,ou=users,{{ openldap_domain }} + changetype: add + uid: gitea + cn: gitea + sn: 9 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/gitea + uidNumber: 14583109 + gidNumber: 14564109 + userPassword: {{ gitea_ldap_pbkdf2_sha512_hash }} + mail: gitea@{{ domain }} + gecos: Gitea user + + dn: uid=wikijs,ou=users,{{ openldap_domain }} + changetype: add + uid: wikijs + cn: wikijs + sn: 10 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/wikijs + uidNumber: 14583110 + gidNumber: 14564110 + userPassword: {{ wikijs_ldap_pbkdf2_sha512_hash }} + mail: wikijs@{{ domain }} + gecos: WikiJS user + + dn: uid=peertube,ou=users,{{ openldap_domain }} + changetype: add + uid: peertube + cn: peertube + sn: 11 + objectClass: top + objectClass: posixAccount + objectClass: inetOrgPerson + loginShell: /bin/bash + homeDirectory: /home/peertube + uidNumber: 14583111 + gidNumber: 14564111 + userPassword: {{ peertube_ldap_pbkdf2_sha512_hash }} + mail: peertube@{{ domain }} + gecos: PeerTube user + + dn: cn=admin,ou=groups,{{ openldap_domain }} + changetype: add + objectClass: groupOfUniqueNames + cn: admin + description: Admin users + uniqueMember: cn=admin,{{ openldap_domain }} + + 06-memberof.ldif: |- + dn: cn=services,ou=groups,{{ openldap_domain }} + changetype: add + objectClass: groupOfUniqueNames + cn: services + description: System users + uniqueMember: uid=systemuser,ou=services,{{ openldap_domain }} + uniqueMember: uid=ldapbind,ou=services,{{ openldap_domain }} + uniqueMember: uid=nextcloud,ou=users,{{ openldap_domain }} + uniqueMember: uid=bitwarden,ou=users,{{ openldap_domain }} + uniqueMember: uid=gitea,ou=users,{{ openldap_domain }} + uniqueMember: uid=wikijs,ou=users,{{ openldap_domain }} + uniqueMember: uid=peertube,ou=users,{{ openldap_domain }} + + dn: cn=users,ou=groups,{{ openldap_domain }} + changetype: add + objectClass: groupOfUniqueNames + cn: users + description: Simple users + {% for user in openldap_simple_users %} + uniqueMember: uid={{ user.name }},ou=users,{{ openldap_domain }} + {% endfor %} + {% for user in openldap_custom_users %} + uniqueMember: uid={{ user.name }},ou=users,{{ openldap_domain }} + {% endfor %} + + ## Persist data to a persistent volume + persistence: + enabled: true + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "{{ openldap_storage | default('nfs-ssd') }}" + accessMode: "{{ openldap_storage_mode | default('ReadWriteMany') }}" + size: "{{ openldap_size | default('8Gi') }}" + # existingClaim: "" + +## test container details +test: + enabled: false + diff --git a/roles/openldap/tasks/main.yaml b/roles/openldap/tasks/main.yaml new file mode 100644 index 0000000..9b39d95 --- /dev/null +++ b/roles/openldap/tasks/main.yaml @@ -0,0 +1,44 @@ +- name: Create Let's Encrypt ISRG Root X1 CA secret for OpenLDAP + k8s: + state: present + definition: + apiVersion: v1 + data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZhekNDQTFPZ0F3SUJBZ0lSQUlJUXo3RFNRT05aUkdQZ3UyT0Npd0F3RFFZSktvWklodmNOQVFFTEJRQXcKVHpFTE1Ba0dBMVVFQmhNQ1ZWTXhLVEFuQmdOVkJBb1RJRWx1ZEdWeWJtVjBJRk5sWTNWeWFYUjVJRkpsYzJWaApjbU5vSUVkeWIzVndNUlV3RXdZRFZRUURFd3hKVTFKSElGSnZiM1FnV0RFd0hoY05NVFV3TmpBME1URXdORE00CldoY05NelV3TmpBME1URXdORE00V2pCUE1Rc3dDUVlEVlFRR0V3SlZVekVwTUNjR0ExVUVDaE1nU1c1MFpYSnUKWlhRZ1UyVmpkWEpwZEhrZ1VtVnpaV0Z5WTJnZ1IzSnZkWEF4RlRBVEJnTlZCQU1UREVsVFVrY2dVbTl2ZENCWQpNVENDQWlJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dJUEFEQ0NBZ29DZ2dJQkFLM29KSFAwRkRmem01NHJWeWdjCmg3N2N0OTg0a0l4dVBPWlhvSGozZGNLaS92VnFidllBVHlqYjNtaUdiRVNUdHJGai9SUVNhNzhmMHVveG15RisKMFRNOHVrajEzWG5mczdqL0V2RWhta3ZCaW9aeGFVcG1abXlQZmp4d3Y2MHBJZ2J6NU1EbWdLN2lTNCszbVg2VQpBNS9UUjVkOG1VZ2pVK2c0cms4S2I0TXUwVWxYaklCMHR0b3YwRGlOZXdOd0lSdDE4akE4K28rdTNkcGpxK3NXClQ4S09FVXQrend2by83VjNMdlN5ZTByZ1RCSWxESENOQXltZzRWTWs3QlBaN2htL0VMTktqRCtKbzJGUjNxeUgKQjVUMFkzSHNMdUp2VzVpQjRZbGNOSGxzZHU4N2tHSjU1dHVrbWk4bXhkQVE0UTdlMlJDT0Z2dTM5NmozeCtVQwpCNWlQTmdpVjUrSTNsZzAyZFo3N0RuS3hIWnU4QS9sSkJkaUIzUVcwS3RaQjZhd0JkcFVLRDlqZjFiMFNIelV2CktCZHMwcGpCcUFsa2QyNUhON3JPckZsZWFKMS9jdGFKeFFaQktUNVpQdDBtOVNUSkVhZGFvMHhBSDBhaG1iV24KT2xGdWhqdWVmWEtuRWdWNFdlMCtVWGdWQ3dPUGpkQXZCYkkrZTBvY1MzTUZFdnpHNnVCUUUzeERrM1N6eW5UbgpqaDhCQ05BdzFGdHhOclFIdXNFd01GeEl0NEk3bUtaOVlJcWlveW1DekxxOWd3UWJvb01EUWFIV0JmRWJ3cmJ3CnFIeUdPMGFvU0NxSTNIYWFkcjhmYXFVOUdZL3JPUE5rM3NnckRRb28vL2ZiNGhWQzFDTFFKMTNoZWY0WTUzQ0kKclU3bTJZczZ4dDBuVVc3L3ZHVDFNME5QQWdNQkFBR2pRakJBTUE0R0ExVWREd0VCL3dRRUF3SUJCakFQQmdOVgpIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJSNXRGbm1lN2JsNUFGemdBaUl5QnBZOXVtYmJqQU5CZ2txCmhraUc5dzBCQVFzRkFBT0NBZ0VBVlI5WXFieXlxRkRRRExIWUdta2dKeWtJckdGMVhJcHUrSUxsYVMvVjlsWkwKdWJoekVGblRJWmQrNTB4eCs3TFNZSzA1cUF2cUZ5RldoZkZRRGxucnp1Qlo2YnJKRmUrR25ZK0VnUGJrNlpHUQozQmViWWh0RjhHYVYwbnh2d3VvNzd4L1B5OWF1Si9HcHNNaXUvWDErbXZvaUJPdi8yWC9xa1NzaXNSY09qL0tLCk5GdFkyUHdCeVZTNXVDYk1pb2d6aVV3dGhEeUMzKzZXVndXNkxMdjN4TGZIVGp1Q3ZqSElJbk56a3RIQ2dLUTUKT1JBekk0Sk1QSitHc2xXWUhiNHBob3dpbTU3aWF6dFhPb0p3VGR3Sng0bkxDZ2ROYk9oZGpzbnZ6cXZIdTdVcgpUa1hXU3RBbXpPVnl5Z2hxcFpYakZhSDNwTzNKTEYrbCsvK3NLQUl1dnRkN3UrTnhlNUFXMHdkZVJsTjhOd2RDCmpOUEVscHpWbWJVcTRKVWFnRWl1VERrSHpzeEhwRktWSzdxNCs2M1NNMU45NVIxTmJkV2hzY2RDYitaQUp6VmMKb3lpM0I0M25qVE9RNXlPZisxQ2NlV3hHMWJRVnM1WnVmcHNNbGpxNFVpMC8xbHZoK3dqQ2hQNGtxS09KMnF4cQo0Umdxc2FoRFlWdlRIOXc3alhieUxlaU5kZDhYTTJ3OVUvdDd5MEZmLzl5aTBHRTQ0WmE0ckYyTE45ZDExVFBBCm1SR3VuVUhCY25XRXZnSkJRbDluSkVpVTBac252Z2MvdWJoUGdYUlI0WHEzN1owajRyN2cxU2dFRXp3eEE1N2QKZW15UHhnY1l4bi9lUjQ0L0tKNEVCcytsVkRSM3ZleUptK2tYUTk5YjIxLytqaDVYb3MxQW5YNWlJdHJlR0NjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + kind: Secret + metadata: + name: "openldap.{{ domain }}-ca" + namespace: "{{ openldap_namespace | default(namespace) }}" + +- name: Request cert for OpenLDAP + k8s: + state: present + definition: + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + name: "openldap.{{ domain }}-crt" + namespace: "{{ openldap_namespace | default(namespace) }}" + spec: + secretName: "openldap.{{ domain }}-secret" + dnsNames: + - "openldap.{{ domain }}" + issuerRef: + name: letsencrypt-prod + # We can reference ClusterIssuers by changing the kind here. + # The default value is Issuer (i.e. a locally namespaced Issuer) + kind: ClusterIssuer + group: cert-manager.io + +- set_fact: + openldap_combined_values: "{{ openldap_default_values | combine(openldap_values, recursive=true) }}" + +- name: Deploy OpenLDAP + community.kubernetes.helm: + create_namespace: true + release_namespace: "{{ openldap_namespace | default(namespace) }}" + release_name: "{{ openldap_name | default('openldap') }}" + chart_ref: "{{ openldap_chart | default('ghp/openldap') }}" + chart_version: "{{ openldap_version | default(omit) }}" + release_values: "{{ openldap_combined_values | from_yaml }}" + diff --git a/roles/peertube/defaults/main.yaml b/roles/peertube/defaults/main.yaml new file mode 100644 index 0000000..c5032d2 --- /dev/null +++ b/roles/peertube/defaults/main.yaml @@ -0,0 +1,453 @@ +peertube_enabled: false +peertube_publish: false +peertube_use_external_db: true +peertube_default_values: + replicaCount: 1 + image: + repository: chocobozzz/peertube + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "v3.0.0-buster" + imagePullSecrets: [] + nameOverride: "" + fullnameOverride: "" + configAsCode: + enabled: true + config: + listen: + hostname: '0.0.0.0' + port: 9000 + # Correspond to your reverse proxy server_name/listen configuration + webserver: + https: true + hostname: 'peertube.{{ domain }}' + port: 443 + rates_limit: + api: + # 50 attempts in 10 seconds + window: 10 seconds + max: 50 + login: + # 15 attempts in 5 min + window: 5 minutes + max: 15 + signup: + # 2 attempts in 5 min (only succeeded attempts are taken into account) + window: 5 minutes + max: 2 + ask_send_email: + # 3 attempts in 5 min + window: 5 minutes + max: 3 + # Proxies to trust to get real client IP + # If you run PeerTube just behind a local proxy (nginx), keep 'loopback' + # If you run PeerTube behind a remote proxy, add the proxy IP address (or subnet) + trust_proxy: + - 'loopback' + - 'linklocal' + - 'uniquelocal' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + # Your database name will be database.name OR "peertube"+database.suffix + database: + hostname: '{{ namespace }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local' + port: 5432 + ssl: require + suffix: '' + username: '{{ peertube_db_username }}' + password: '{{ peertube_db_password }}' + pool: + max: 5 + # Redis server for short time storage + # You can also specify a 'socket' path to a unix socket but first need to + # comment out hostname and port + redis: + hostname: 'peertube-redis-master' + port: 6379 + auth: null + db: 0 + # SMTP server to send emails + smtp: + # smtp or sendmail + transport: smtp + # Path to sendmail command. Required if you use sendmail transport + sendmail: null + hostname: "mail.{{ domain }}" + port: 465 # If you use StartTLS: 587 + username: peertube + password: "{{ peertube_ldap_password }}" + tls: true # If you use StartTLS: false + disable_starttls: true + ca_file: null # Used for self signed certificates + from_address: 'peertube@{{ domain }}' + email: + body: + signature: "PeerTube" + subject: + prefix: "[PeerTube]" + # From the project root directory + storage: + tmp: '/var/www/peertube/storage/tmp/' # Use to download data (imports etc), store uploaded files before processing... + avatars: '/var/www/peertube/storage/avatars/' + videos: '/var/www/peertube/storage/videos/' + streaming_playlists: '/var/www/peertube/storage/streaming-playlists/' + redundancy: '/var/www/peertube/storage/redundancy/' + logs: '/var/www/peertube/storage/logs/' + previews: '/var/www/peertube/storage/previews/' + thumbnails: '/var/www/peertube/storage/thumbnails/' + torrents: '/var/www/peertube/storage/torrents/' + captions: '/var/www/peertube/storage/captions/' + cache: '/var/www/peertube/storage/cache/' + plugins: '/var/www/peertube/storage/plugins/' + # Overridable client files : logo.svg, favicon.png and icons/*.png (PWA) in client/dist/assets/images + # Could contain for example assets/images/favicon.png + # If the file exists, peertube will serve it + # If not, peertube will fallback to the default fil + client_overrides: '/var/www/peertube/storage/client-overrides/' + log: + level: 'info' # debug/info/warning/error + rotation: + enabled : true # Enabled by default, if disabled make sure that 'storage.logs' is pointing to a folder handled by logrotate + maxFileSize: 12MB + maxFiles: 20 + anonymizeIP: false + trending: + videos: + interval_days: 7 # Compute trending videos for the last x days + # Cache remote videos on your server, to help other instances to broadcast the video + # You can define multiple caches using different sizes/strategies + # Once you have defined your strategies, choose which instances you want to cache in admin -> manage follows -> following + redundancy: + videos: + check_interval: '1 hour' # How often you want to check new videos to cache + strategies: # Just uncomment strategies you want + # - + # size: '10GB' + # # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances) + # min_lifetime: '48 hours' + # strategy: 'most-views' # Cache videos that have the most views + # - + # size: '10GB' + # # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances) + # min_lifetime: '48 hours' + # strategy: 'trending' # Cache trending videos + # - + # size: '10GB' + # # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances) + # min_lifetime: '48 hours' + # strategy: 'recently-added' # Cache recently added videos + # min_views: 10 # Having at least x views + # Other instances that duplicate your content + remote_redundancy: + videos: + # 'nobody': Do not accept remote redundancies + # 'anybody': Accept remote redundancies from anybody + # 'followings': Accept redundancies from instance followings + accept_from: 'followings' + csp: + enabled: false + report_only: true # CSP directives are still being tested, so disable the report only mode at your own risk! + report_uri: + tracker: + # If you disable the tracker, you disable the P2P aspect of PeerTube + enabled: true + # Only handle requests on your videos. + # If you set this to false it means you have a public tracker. + # Then, it is possible that clients overload your instance with external torrents + private: true + # Reject peers that do a lot of announces (could improve privacy of TCP/UDP peers) + reject_too_many_announces: false + history: + videos: + # If you want to limit users videos history + # -1 means there is no limitations + # Other values could be '6 months' or '30 days' etc (PeerTube will periodically delete old entries from database) + max_age: -1 + views: + videos: + # PeerTube creates a database entry every hour for each video to track views over a period of time + # This is used in particular by the Trending page + # PeerTube could remove old remote video views if you want to reduce your database size (video view counter will not be altered) + # -1 means no cleanup + # Other values could be '6 months' or '30 days' etc (PeerTube will periodically delete old entries from database) + remote: + max_age: '30 days' + plugins: + # The website PeerTube will ask for available PeerTube plugins and themes + # This is an unmoderated plugin index, so only install plugins/themes you trust + index: + enabled: true + check_latest_versions_interval: '12 hours' # How often you want to check new plugins/themes versions + url: 'https://packages.joinpeertube.org' + federation: + videos: + federate_unlisted: false + ############################################################################### + # + # From this point, all the following keys can be overridden by the web interface + # (local-production.json file). If you need to change some values, prefer to + # use the web interface because the configuration will be automatically + # reloaded without any need to restart PeerTube. + # + # /!\ If you already have a local-production.json file, the modification of the + # following keys will have no effect /!\. + # + ############################################################################### + cache: + previews: + size: 500 # Max number of previews you want to cache + captions: + size: 500 # Max number of video captions/subtitles you want to cache + admin: + # Used to generate the root user at first startup + # And to receive emails from the contact form + email: 'peertube@{{ domain }}' + contact_form: + enabled: true + signup: + enabled: false + limit: 10 # When the limit is reached, registrations are disabled. -1 == unlimited + requires_email_verification: false + filters: + cidr: # You can specify CIDR ranges to whitelist (empty = no filtering) or blacklist + whitelist: [] + blacklist: [] + user: + # Default value of maximum video BYTES the user can upload (does not take into account transcoded files). + # -1 == unlimited + video_quota: -1 + video_quota_daily: -1 + # If enabled, the video will be transcoded to mp4 (x264) with "faststart" flag + # In addition, if some resolutions are enabled the mp4 video file will be transcoded to these new resolutions. + # Please, do not disable transcoding since many uploaded videos will not work + transcoding: + enabled: true + # Allow your users to upload .mkv, .mov, .avi, .wmv, .flv, .f4v, .3g2, .3gp, .mts, m2ts, .mxf, .nut videos + allow_additional_extensions: true + # If a user uploads an audio file, PeerTube will create a video by merging the preview file and the audio file + allow_audio_files: true + threads: 2 + resolutions: # Only created if the original video has a higher resolution, uses more storage! + 0p: true # audio-only (creates mp4 without video stream, always created when enabled) + 240p: true + 360p: true + 480p: true + 720p: true + 1080p: true + 2160p: true + # Generate videos in a WebTorrent format (what we do since the first PeerTube release) + # If you also enabled the hls format, it will multiply videos storage by 2 + # If disabled, breaks federation with PeerTube instances < 2.1 + webtorrent: + enabled: true + # /!\ Requires ffmpeg >= 4.1 + # Generate HLS playlists and fragmented MP4 files. Better playback than with WebTorrent: + # * Resolution change is smoother + # * Faster playback in particular with long videos + # * More stable playback (less bugs/infinite loading) + # If you also enabled the webtorrent format, it will multiply videos storage by 2 + hls: + enabled: true + live: + enabled: true + # Limit lives duration + # Set null to disable duration limit + max_duration: -1 # For example: '5 hours' + # Limit max number of live videos created on your instance + # -1 == unlimited + max_instance_lives: 10 + # Limit max number of live videos created by a user on your instance + # -1 == unlimited + max_user_lives: 2 + # Allow your users to save a replay of their live + # PeerTube will transcode segments in a video file + # If the user daily/total quota is reached, PeerTube will stop the live + # /!\ transcoding.enabled (and not live.transcoding.enabled) has to be true to create a replay + allow_replay: true + rtmp: + port: 1935 + # Allow to transcode the live streaming in multiple live resolutions + transcoding: + enabled: true + threads: 2 + resolutions: + 240p: true + 360p: true + 480p: true + 720p: true + 1080p: true + 2160p: true + import: + # Add ability for your users to import remote videos (from YouTube, torrent...) + videos: + http: # Classic HTTP or all sites supported by youtube-dl https://rg3.github.io/youtube-dl/supportedsites.html + enabled: true + # You can use an HTTP/HTTPS/SOCKS proxy with youtube-dl + proxy: + enabled: false + url: "" + torrent: # Magnet URI or torrent file (use classic TCP/UDP/WebSeed to download the file) + enabled: true + auto_blacklist: + # New videos automatically blacklisted so moderators can review before publishing + videos: + of_users: + enabled: false + # Instance settings + instance: + name: 'GHP PeerTube' + short_description: 'PeerTube, a federated (ActivityPub) video streaming platform using P2P (BitTorrent) directly in the web browser with WebTorrent and Angular.' + description: 'Welcome to GHP PeerTube instance!' # Support markdown + terms: 'No terms for now.' # Support markdown + code_of_conduct: '' # Supports markdown + # Who moderates the instance? What is the policy regarding NSFW videos? Political videos? etc + moderation_information: '' # Supports markdown + # Why did you create this instance? + creation_reason: '' + # Who is behind the instance? A single person? A non profit? + administrator: '' + # How long do you plan to maintain this instance? + maintenance_lifetime: '' + # How will you pay the PeerTube instance server? With your own funds? With users donations? Advertising? + business_model: '' + # If you want to explain on what type of hardware your PeerTube instance runs + # Example: "2 vCore, 2GB RAM..." + hardware_information: '' # Supports Markdown + # What are the main languages of your instance? To interact with your users for example + # Uncomment or add the languages you want + # List of supported languages: https://peertube.cpy.re/api/v1/videos/languages + languages: + # - en + # - es + # - fr + # You can specify the main categories of your instance (dedicated to music, gaming or politics etc) + # Uncomment or add the category ids you want + # List of supported categories: https://peertube.cpy.re/api/v1/videos/categories + categories: + # - 1 # Music + # - 2 # Films + # - 3 # Vehicles + # - 4 # Art + # - 5 # Sports + # - 6 # Travels + # - 7 # Gaming + # - 8 # People + # - 9 # Comedy + # - 10 # Entertainment + # - 11 # News & Politics + # - 12 # How To + # - 13 # Education + # - 14 # Activism + # - 15 # Science & Technology + # - 16 # Animals + # - 17 # Kids + # - 18 # Food + default_client_route: '/videos/recently-added' + # Whether or not the instance is dedicated to NSFW content + # Enabling it will allow other administrators to know that you are mainly federating sensitive content + # Moreover, the NSFW checkbox on video upload will be automatically checked by default + is_nsfw: false + # By default, "do_not_list" or "blur" or "display" NSFW videos + # Could be overridden per user with a setting + default_nsfw_policy: 'display' + customizations: + javascript: '' # Directly your JavaScript code (without