GHP publish

This commit is contained in:
ace
2021-01-09 20:54:42 +03:00
commit b4b740a239
173 changed files with 5392 additions and 0 deletions

View File

@ -0,0 +1,276 @@
adguard_enabled: false
adguard_publish: false
adguard_default_values:
# upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: RollingUpdate
configAsCode:
enabled: true
resources: {}
# requests:
# memory: 128Mi
# cpu: 100m
image:
repository: busybox
tag: latest
pullPolicy: Always
config:
bind_host: 0.0.0.0
bind_port: 3000
users:
- name: admin
password: "{{ adguard_admin_htpasswd_hash }}"
http_proxy: ""
language: "en"
rlimit_nofile: 0
debug_pprof: false
web_session_ttl: 720
dns:
bind_host: 0.0.0.0
port: 53
statistics_interval: 1
querylog_enabled: true
querylog_interval: 90
querylog_size_memory: 1000
anonymize_client_ip: false
protection_enabled: true
blocking_mode: default
blocking_ipv4: ""
blocking_ipv6: ""
blocked_response_ttl: 10
parental_block_host: family-block.dns.adguard.com
safebrowsing_block_host: standard-block.dns.adguard.com
ratelimit: 0
ratelimit_whitelist: []
refuse_any: true
upstream_dns:
- https://dns10.quad9.net/dns-query
bootstrap_dns:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
all_servers: false
fastest_addr: false
allowed_clients: []
# - 10.0.0.1
# - 10.0.1.1/24
disallowed_clients: []
# - 10.0.1.1
# - 10.0.11.1/24
blocked_hosts: []
# - example.org
# - '*.example.org'
# - '||example.org^'
cache_size: 4194304
cache_ttl_min: 0
cache_ttl_max: 0
bogus_nxdomain: []
aaaa_disabled: false
enable_dnssec: false
edns_client_subnet: false
filtering_enabled: true
filters_update_interval: 8
parental_enabled: false
safesearch_enabled: false
safebrowsing_enabled: false
safebrowsing_cache_size: 1048576
safesearch_cache_size: 1048576
parental_cache_size: 1048576
cache_time: 30
rewrites: []
# - domain: example.org
# answer: 127.0.0.1
# - domain: '*.example.org'
# answer: 127.0.0.1
blocked_services:
- facebook
- origin
- twitter
- snapchat
- skype
- whatsapp
- instagram
- youtube
- netflix
- twitch
- discord
- amazon
- ebay
- cloudflare
- steam
- epic_games
- reddit
- ok
- vk
- mail_ru
- tiktok
tls:
enabled: true
server_name: "{{ adguard_dns_name | default('dns.' + domain) }}"
force_https: false
port_https: 443
port_dns_over_tls: 853
allow_unencrypted_doh: false
strict_sni_check: false
certificate_chain: ""
private_key: ""
certificate_path: "/certs/tls.crt"
private_key_path: "/certs/tls.key"
filters:
- enabled: true
url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt
name: AdGuard DNS filter
id: 1
- enabled: false
url: https://adaway.org/hosts.txt
name: AdAway
id: 2
- enabled: false
url: https://www.malwaredomainlist.com/hostslist/hosts.txt
name: MalwareDomainList.com Hosts List
id: 4
whitelist_filters: []
# - enabled: true
# url: https://easylist-downloads.adblockplus.org/exceptionrules.txt
# name: Allow nonintrusive advertising
# id: 1595760241
user_rules: []
# - '||example.org^'
# - '@@||example.org^'
# - 127.0.0.1 example.org
# - '! Here goes a comment'
# - '# Also a comment'
dhcp:
enabled: false
interface_name: ""
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
clients: []
# - name: myuser
# tags:
# - user_admin
# ids:
# - 192.168.91.1
# use_global_settings: true
# filtering_enabled: false
# parental_enabled: false
# safesearch_enabled: false
# safebrowsing_enabled: false
# use_global_blocked_services: true
# blocked_services: []
# upstreams: []
log_file: ""
verbose: false
schema_version: 6
tlsSecretName: "{{ adguard_dns_name | default('dns.' + domain) }}-secret"
timezone: "UTC"
ingress:
enabled: true
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/acme-dns01-provider: rfc2136
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: "{{ external_ingress_class if adguard_publish else internal_ingress_class }}"
kubernetes.io/tls-acme: "true"
path: /
hosts:
- adguard.{{ domain }}
tls:
- secretName: adguard.{{ domain }}-tls
hosts:
- adguard.{{ domain }}
service:
type: ClusterIP
# externalTrafficPolicy: Local
# externalIPs: []
# loadBalancerIP: ""
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
# metallb.universe.tf/allow-shared-ip: adguard-home-svc
serviceTCP:
enabled: true
type: LoadBalancer
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: "{{ adguard_loadbalancer_ip }}"
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: adguard-home-svc
serviceUDP:
enabled: true
type: LoadBalancer
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: "{{ adguard_loadbalancer_ip }}"
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: adguard-home-svc
serviceDNSOverTLS:
enabled: true
## Enable if you use AdGuard as a DNS over TLS/HTTPS server
type: LoadBalancer
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: "{{ adguard_loadbalancer_ip }}"
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: adguard-home-svc
serviceDNSOverHTTPS:
enabled: true
## Enable if you use AdGuard as a DNS over TLS/HTTPS server
type: LoadBalancer
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: "{{ adguard_loadbalancer_ip }}"
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: adguard-home-svc
external-dns.alpha.kubernetes.io/hostname: "{{ adguard_dns_name | default('dns.' + domain) }}"
serviceDHCP:
enabled: false
## Enable if you use AdGuard as a DHCP Server
type: NodePort
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: ""
# a fixed LoadBalancer IP
annotations: {}
# metallb.universe.tf/address-pool: network-services
# metallb.universe.tf/allow-shared-ip: adguard-home-svc
persistence:
config:
enabled: true
accessMode: "{{ adguard_config_storage_mode | default('ReadWriteMany') }}"
size: "{{ adguard_config_size | default('20Mi') }}"
storageClass: "{{ adguard_config_storage | default('nfs-ssd') }}"
## Do not delete the pvc upon helm uninstall
skipuninstall: false
work:
enabled: true
accessMode: "{{ adguard_work_storage_mode | default('ReadWriteMany') }}"
size: "{{ adguard_work_size | default('10Gi') }}"
storageClass: "{{ adguard_work_storage | default('nfs-ssd') }}"
## Do not delete the pvc upon helm uninstall
skipuninstall: false

View File

@ -0,0 +1,32 @@
- name: Request cert for Adguard Home
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: "{{ adguard_dns_name | default('dns.' + domain) }}-crt"
namespace: "{{ adguard_namespace | default(namespace) }}"
spec:
secretName: "{{ adguard_dns_name | default('dns.' + domain) }}-secret"
dnsNames:
- "{{ adguard_dns_name | default('dns.' + domain) }}"
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: ClusterIssuer
group: cert-manager.io
- set_fact:
adguard_combined_values: "{{ adguard_default_values | combine(adguard_values, recursive=true) }}"
- name: Deploy Adguard Home
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ adguard_namespace | default(namespace) }}"
release_name: "{{ adguard_name | default('adguard') }}"
chart_ref: "{{ adguard_chart | default('ghp/adguard-home') }}"
chart_version: "{{ adguard_version | default(omit) }}"
release_values: "{{ adguard_combined_values | from_yaml }}"

View File

@ -0,0 +1,40 @@
bitwarden_enabled: true
bitwarden_publish: false
bitwarden_use_external_db: true
bitwarden_default_values:
env:
SIGNUPS_ALLOWED: true
INVITATIONS_ALLOWED: true
DATABASE_URL: "postgresql://{{ bitwarden_db_username }}:{{ bitwarden_db_password }}@{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local:5432/bitwarden?sslmode=require"
DOMAIN: "https://bitwarden.{{ domain }}"
SMTP_FROM: "bitwarden@{{ domain }}"
SMTP_HOST: "mail.{{ domain }}"
SMTP_PASSWORD: "{{ bitwarden_ldap_pass | default(bitwarden_ldap_password) }}"
SMTP_SSL: "true"
SMTP_EXPLICIT_TLS: "true"
SMTP_PORT: "465"
SMTP_USERNAME: "bitwarden@{{ domain }}"
SMTP_TIMEOUT: "120"
LOG_LEVEL: "debug"
EXTENDED_LOGGING: "true"
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "{{ external_ingress_class if bitwarden_publish else internal_ingress_class }}"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/tls-acme: "true"
path: /
hosts:
- "bitwarden.{{ domain }}"
tls:
- secretName: "bitwarden.{{ domain }}-tls"
hosts:
- "bitwarden.{{ domain }}"
persistence:
enabled: true
accessMode: "{{ bitwarden_storage_mode | default('ReadWriteMany') }}"
size: "{{ bitwarden_size | default('8Gi') }}"
storageClass: "{{ bitwarden_storage | default('nfs-ssd') }}"

View File

@ -0,0 +1,19 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- bitwarden_use_external_db
- postgres_enable
- set_fact:
bitwarden_combined_values: "{{ bitwarden_default_values | combine(bitwarden_values, recursive=true) }}"
- name: Deploy Bitwarden
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ bitwarden_namespace | default(namespace) }}"
release_name: "{{ bitwarden_name | default('bitwarden') }}"
chart_ref: "{{ bitwarden_chart | default('ghp/bitwarden') }}"
chart_version: "{{ bitwarden_version | default(omit) }}"
release_values: "{{ bitwarden_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ bitwarden_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "bitwarden.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup Bitwarden DB secret
set_fact:
bitwarden_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ bitwarden_db_secret }}"
verbosity: 2
- name: Set Bitwarden DB username
set_fact:
bitwarden_db_username: "{{ bitwarden_db_secret.data.username | b64decode }}"
- name: Set Bitwarden DB password
set_fact:
bitwarden_db_password: "{{ bitwarden_db_secret.data.password | b64decode }}"

View File

@ -0,0 +1,6 @@
cert_manager_version: v1.1.0
cert_manager_namespace: cert-manager
lets_encrypt_mailbox: "admin@{{ domain }}"
cert_manager_base64_tsig_key: "{{ k8s_tsig | b64encode }}"
cert_manager_default_values:
installCRDs: true

View File

@ -0,0 +1,88 @@
- set_fact:
cert_manager_combined_values: "{{ cert_manager_default_values | combine(cert_manager_values, recursive=true) }}"
- name: Deploy Cert-manager {{ cert_manager_version }}
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ cert_manager_namespace | default('cert-manager') }}"
release_name: "{{ cert_manager_name | default('cert-manager') }}"
chart_ref: "{{ cert_manager_chart | default('jetstack/cert-manager') }}"
chart_version: "{{ cert_manager_version }}"
release_values: "{{ cert_manager_combined_values | from_yaml | default(omit) }}"
wait: true
- name: Create secret for DNS RFC2136 (NSUPDATE)
k8s:
state: present
definition:
apiVersion: v1
data:
tsig-secret-key: "{{ cert_manager_base64_tsig_key }}"
kind: Secret
metadata:
name: tsig-secret
namespace: cert-manager
type: Opaque
- name: Create Production ClusterIssuer for Let's Encrypt
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: "{{ lets_encrypt_mailbox }}"
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
# Enable the HTTP-01 challenge provider
solvers:
#- http01:
# ingress:
# class: nginx
- dns01:
rfc2136:
nameserver: "{{ external_dns_ip | default(dns_ip) }}:53"
tsigAlgorithm: HMACSHA512
tsigKeyName: k8s
tsigSecretSecretRef:
key: tsig-secret-key
name: tsig-secret
- name: Create Staging ClusterIssuer for Let's Encrypt
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: "{{ lets_encrypt_mailbox }}"
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
# Enable the HTTP-01 challenge provider
solvers:
#- http01:
# ingress:
# class: nginx
- dns01:
rfc2136:
nameserver: "{{ external_dns_ip | default(dns_ip) }}:53"
tsigAlgorithm: HMACSHA512
tsigKeyName: k8s
tsigSecretSecretRef:
key: tsig-secret-key
name: tsig-secret

View File

@ -0,0 +1,86 @@
chartmuseum_enabled: true
chartmuseum_publish: false
chartmuseum_default_values:
env:
open:
# storage backend, can be one of: local, alibaba, amazon, google, microsoft, oracle
STORAGE: local
# levels of nested repos for multitenancy. The default depth is 0 (singletenant server)
DEPTH: 0
# sets the base context path
CONTEXT_PATH: /
# show debug messages
DEBUG: false
# output structured logs as json
LOG_JSON: true
# disable use of index-cache.yaml
DISABLE_STATEFILES: false
# disable Prometheus metrics
DISABLE_METRICS: true
# disable all routes prefixed with /api
DISABLE_API: false
# allow chart versions to be re-uploaded
ALLOW_OVERWRITE: true
# allow anonymous GET operations when auth is used
AUTH_ANONYMOUS_GET: true
secret:
# username for basic http authentication
BASIC_AUTH_USER: "{{ chartmuseum_admin_login | default('admin') }}"
# password for basic http authentication
BASIC_AUTH_PASS: "{{ chartmuseum_admin_pass | default(chartmuseum_admin_password) }}"
persistence:
enabled: true
accessMode: "{{ chartmuseum_storage_mode | default('ReadWriteMany') }}"
size: "{{ chartmuseum_size | default('10Gi') }}"
labels: {}
path: /storage
storageClass: "{{ chartmuseum_storage | default('nfs-hdd') }}"
## Ingress for load balancer
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/ingress.class: "{{ external_ingress_class if chartmuseum_publish else internal_ingress_class }}"
kubernetes.io/tls-acme: "true"
hosts:
- name: charts.{{ domain }}
path: /
tls: true
tlsSecret: charts.{{ domain }}-tls
chartmuseum_readonly_ingress_definition: |
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/acme-dns01-provider: rfc2136
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: "{{ external_ingress_class }}"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/configuration-snippet: |-
limit_except GET {
deny all;
}
name: chartmuseum-public
namespace: "{{ chartmuseum_namespace | default(namespace) }}"
spec:
rules:
- host: "{{ chartmuseum_readonly_ingress }}"
http:
paths:
- backend:
serviceName: chartmuseum-chartmuseum
servicePort: 8080
path: /
tls:
- hosts:
- "{{ chartmuseum_readonly_ingress }}"
secretName: "{{ chartmuseum_readonly_ingress }}-tls"

View File

@ -0,0 +1,20 @@
- set_fact:
chartmuseum_combined_values: "{{ chartmuseum_default_values | combine(chartmuseum_values, recursive=true) }}"
- name: Deploy ChartMuseum
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ chartmuseum_namespace | default(namespace) }}"
release_name: "{{ chartmuseum_name | default('chartmuseum') }}"
chart_ref: "{{ chartmuseum_chart | default('ghp/chartmuseum') }}"
chart_version: "{{ chartmuseum_version | default(omit) }}"
release_values: "{{ chartmuseum_combined_values | from_yaml }}"
wait: true
- name: Deploy readonly public ingress for ChartMuseum
when: chartmuseum_readonly_ingress is defined
k8s:
state: present
definition:
"{{ chartmuseum_readonly_ingress_definition }}"

View File

@ -0,0 +1,3 @@
dockerize: false
namespace: ddclient
ddclient_image_tag: v3.9.1-ls45

View File

@ -0,0 +1,27 @@
---
- name: start ddclient
systemd:
name: ddclient
state: started
enabled: yes
- name: restart ddclient
systemd:
name: ddclient
state: restarted
enabled: yes
- name: restart docker ddclient
community.general.docker_container:
name: "{{ namespace }}-ddclient"
image: "{{ docker_registry }}/ddclient:{{ ddclient_image_tag | default('v3.9.1-ls45') }}"
state: started
restart: yes
container_default_behavior: no_defaults
detach: true
restart_policy: unless-stopped
volumes:
- "/opt/{{ namespace }}/ddclient.conf:/config/ddclient.conf"
- "/opt/{{ namespace }}/Kvps.key:/config/Kvps.key"
- "/opt/{{ namespace }}/Kvps.private:/config/Kvps.private"

View File

@ -0,0 +1,18 @@
---
- block:
- name: copy public key for ddclient
copy:
dest: /etc/Kvps.key
src: files/Kvps.key
- name: copy private key for ddclient
copy:
dest: /etc/Kvps.private
src: files/Kvps.private
- name: configure ddlient.conf
copy:
content: "{{ ddclient_conf }}"
dest: "/etc/ddclient.conf"
notify: restart ddclient

View File

@ -0,0 +1,35 @@
---
- name: Create configuration dir for {{ namespace }}
file:
name: "/opt/{{ namespace }}"
state: directory
- name: Copy ddclient configuration for {{ namespace }}
copy:
dest: "/opt/{{ namespace }}/ddclient.conf"
content: "{{ ddclient_conf }}"
notify: restart docker ddclient
- name: Copy Kvps.key for {{ namespace }}
copy:
dest: "/opt/{{ namespace }}/Kvps.key"
content: "{{ ddclient_tsig_public_key_base64 | b64decode }}"
notify: restart docker ddclient
- name: Copy Kvps.private for {{ namespace }}
copy:
dest: "/opt/{{ namespace }}/Kvps.private"
content: "{{ ddclient_tsig_private_key_base64 | b64decode }}"
notify: restart docker ddclient
- name: Start ddclient in docker for {{ namespace }}
docker_container:
name: "{{ namespace }}-ddclient"
image: "{{ docker_registry }}/ddclient:{{ ddclient_image_tag }}"
state: started
container_default_behavior: no_defaults
restart_policy: unless-stopped
volumes:
- "/opt/{{ namespace }}/ddclient.conf:/config/ddclient.conf"
- "/opt/{{ namespace }}/Kvps.key:/config/Kvps.key"
- "/opt/{{ namespace }}/Kvps.private:/config/Kvps.private"

View File

@ -0,0 +1,11 @@
---
- block:
- name: installing ddclient
package:
name:
- ddclient
state: present
notify: start ddclient
register: install_ddlient_result
tags:
- ddclient-install

View File

@ -0,0 +1,12 @@
---
- block:
- import_tasks: install.yml
when: not dockerize
- import_tasks: configure.yml
when: not dockerize
become: true
- block:
- import_tasks: docker.yml
when: dockerize
become: true

View File

@ -0,0 +1 @@
install_docker_ce_repo: 'yes'

View File

@ -0,0 +1,54 @@
---
- block:
- name: Install packages for Docker
yum:
name:
- device-mapper-persistent-data
- lvm2
- libselinux-python
state: present
- name: add docker-ce repo
yum_repository:
name: docker-ce-stable
file: docker-ce
description: Docker CE Stable - $basearch
enabled: yes
baseurl: https://download.docker.com/linux/centos/7/$basearch/stable
gpgkey: https://download.docker.com/linux/centos/gpg
gpgcheck: yes
when: install_docker_ce_repo == 'yes'
become: yes
- name: Install Docker
package:
name: docker-ce
state: present
become: yes
- name: Create /etc/docker directory
file:
path: /etc/docker
state: directory
- name: Deploy Docker daemon.json
template:
src: daemon.json.j2
dest: /etc/docker/daemon.json
register: daemon_config_result
- name: Start Docker service
service:
name: docker
state: started
enabled: yes
become: yes
- name: Restart Docker
systemd:
state: restarted
name: docker
when: daemon_config_result.changed
tags:
- docker

View File

@ -0,0 +1,5 @@
---
- block:
- import_tasks: docker.yml
tags:
- docker

View File

@ -0,0 +1,18 @@
{% if docker is defined %}
{% if docker.insecure_registries is defined %}
{% set insecure_registries = docker.insecure_registries %}
{% endif %}
{% endif %}
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
{% if insecure_registries is defined %}
"insecure-registries" : ["{{ insecure_registries }}"],
{% endif %}
"log-opts": {
"max-size": "100m"
}
}

View File

@ -0,0 +1,201 @@
dovecot_default_values:
replicaCount: 1
persistence:
enabled: true
existingClaim: mailboxes
tls:
enabled: true
existingSecret: mail.{{ domain }}-secret
dovecot:
image:
repository: "{{ docker_registry }}/dovecot"
tag: latest
pullPolicy: Always
configmaps:
dovecot:
dovecot: |
protocols = imap lmtp sieve
mail_max_userip_connections = 1000
mail_plugins = virtual
haproxy_trusted_networks = 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
haproxy_timeout = 30s
dict {
#quota = mysql:/etc/dovecot/dovecot-dict-sql.conf.ext
#expire = sqlite:/etc/dovecot/dovecot-dict-sql.conf.ext
}
# Most of the actual configuration gets included below. The filenames are
# first sorted by their ASCII value and parsed in that order. The 00-prefixes
# in filenames are intended to make it easier to understand the ordering.
!include conf.d/*.conf
# A config file can also tried to be included without giving an error if
# it's not found:
!include_try local.conf
ldap: |
uris = ldaps://openldap.{{ domain }}
dn = uid=ldapbind,ou=services,{{ openldap_domain }}
dnpass = {{ ldapbind_pass | default(ldapbind_password) }}
auth_bind = yes
auth_bind_userdn = uid=%n,ou=users,{{ openldap_domain }}
tls = no
ldap_version = 3
base = ou=users,{{ openldap_domain }}
deref = never
scope = subtree
user_filter = (&(objectClass=posixAccount)(mail=%u))
user_attrs = cn=home=/home/vmail/%$
pass_filter = (&(objectClass=posixAccount)(mail=%u))
pass_attrs = uid=user,userPassword=password
#default_pass_scheme = CRYPT
confd:
auth-ldap: |
passdb {
driver = ldap
# Path for LDAP configuration file, see example-config/dovecot-ldap.conf.ext
args = /etc/dovecot/ldap.conf
}
userdb {
driver = ldap
args = /etc/dovecot/ldap.conf
}
10-auth: |
auth_default_realm = {{ domain }}
auth_username_format = %Lu
auth_mechanisms = plain login
10-mail: |
mail_location = maildir:%h
namespace inbox {
inbox = yes
}
mail_uid = vmail
mail_gid = vmail
first_valid_uid = 1000
last_valid_uid = 1000
first_valid_gid = 1000
last_valid_gid = 1000
protocol !indexer-worker {
}
mbox_write_locks = fcntl
10-master: |
protocol imap {
mail_plugins = virtual
}
service imap-login {
inet_listener imap {
#port = 143
}
inet_listener imaps {
#port = 993
#ssl = yes
}
inet_listener imap_haproxy {
port = 1109
haproxy = yes
}
inet_listener imaps_haproxy {
port = 10993
ssl = yes
haproxy = yes
}
}
service pop3-login {
inet_listener pop3 {
#port = 110
}
inet_listener pop3s {
#port = 995
#ssl = yes
}
}
service lmtp {
inet_listener lmtp {
port = 24
}
unix_listener /var/spool/postfix/private/dovecot-lmtp {
mode = 0600
group = postfix
user = postfix
}
user = vmail
}
service imap {
}
service pop3 {
}
service auth {
inet_listener {
port = 12345
}
unix_listener auth-userdb {
mode = 0660
user = vmail
#group =
}
# Postfix smtp-auth
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
}
service auth-worker {
}
service dict {
unix_listener dict {
}
}
10-ssl: |
ssl = required
#verbose_ssl = yes
ssl_prefer_server_ciphers = yes
ssl_min_protocol = TLSv1.2
ssl_cert = </tls/tls.crt
ssl_key = </tls/tls.key
10-logging: |
log_path = /dev/stderr
info_log_path = /dev/stdout
debug_log_path = /dev/stdout
15-lda: |
postmaster_address = postmaster@{{ domain }}
hostname = {{ domain }}
rejection_reason = Your message to was automatically rejected:%n%r
protocol lda {
mail_plugins = virtual sieve
}
20-lmtp: |
protocol lmtp {
mail_plugins = virtual sieve
postmaster_address = postmaster@{{ domain }}
}
20-managesieve: |
service managesieve-login {
inet_listener sieve {
port = 4190
ssl = yes
}
service_count = 1
vsz_limit = 64M
}
service managesieve {
process_limit = 1024
}
service:
type: LoadBalancer
loadBalancerIP: "{{ dovecot_loadbalancer_ip | default(omit) }}"

View File

@ -0,0 +1,13 @@
- set_fact:
dovecot_combined_values: "{{ dovecot_default_values | combine(dovecot_values, recursive=true) }}"
- name: Deploy Dovecot
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ dovecot_namespace | default(mail_namespace) | default(namespace) }}"
release_name: "{{ dovecot_name | default('dovecot') }}"
chart_ref: "{{ dovecot_chart | default('ghp/dovecot') }}"
chart_version: "{{ dovecot_version | default(omit) }}"
release_values: "{{ dovecot_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,50 @@
drone_enabled: true
drone_publish: false
drone_use_external_db: true
drone_default_values:
service:
type: ClusterIP
port: 80
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "{{ external_ingress_class if drone_publish else internal_ingress_class }}"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
hosts:
- host: "drone.{{ domain }}"
paths:
- "/"
tls:
- secretName: "drone.{{ domain }}-tls"
hosts:
- "drone.{{ domain }}"
persistentVolume:
enabled: true
accessModes:
- "{{ drone_storage_mode | default('ReadWriteMany') }}"
mountPath: /data
size: "{{ drone_size | default('8Gi') }}"
storageClass: "{{ drone_storage | default('nfs-ssd') }}"
env:
DRONE_SERVER_HOST: "drone.{{ domain }}"
DRONE_SERVER_PROTO: https
DRONE_RPC_SECRET: "{{ drone_rpc_secret | default(omit) }}"
DRONE_DATABASE_DRIVER: "postgres"
DRONE_DATABASE_DATASOURCE: "postgres://{{ drone_db_username }}:{{ drone_db_password }}@{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local:5432/drone?sslmode=disable"
DRONE_DATABASE_SECRET: "{{ drone_database_secret | default(omit) }}"
DRONE_GITEA_CLIENT_ID: "{{ drone_gitea_client_id | default(omit) }}"
DRONE_GITEA_CLIENT_SECRET: "{{ drone_gitea_client_secret | default(omit) }}"
DRONE_GITEA_SERVER: "https://gitea.{{ domain }}"
drone_runner_kube_default_values:
rbac:
buildNamespaces:
- "{{ drone_namespace | default(namespace) }}"
env:
DRONE_RPC_SECRET: "{{ drone_rpc_secret }}"
DRONE_RPC_HOST: "drone.{{ domain }}"
DRONE_RPC_PROTO: https
DRONE_NAMESPACE_DEFAULT: "{{ drone_namespace | default(namespace) }}"

View File

@ -0,0 +1,31 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- drone_use_external_db
- postgres_enable
- set_fact:
drone_combined_values: "{{ drone_default_values | combine(drone_values, recursive=true) }}"
- set_fact:
drone_runner_kube_combined_values: "{{ drone_runner_kube_default_values | combine(drone_runner_kube_values, recursive=true) }}"
- name: Deploy Drone Server
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ drone_namespace | default(namespace) }}"
release_name: "{{ drone_name | default('drone') }}"
chart_ref: "{{ drone_chart | default('drone/drone') }}"
chart_version: "{{ drone_version | default(omit) }}"
release_values: "{{ drone_combined_values | from_yaml }}"
wait: true
- name: Deploy Drone Runner Kube
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ drone_runner_kube_namespace | default(namespace) }}"
release_name: "{{ drone_runner_kube_name | default('drone-runner-kube') }}"
chart_ref: "{{ drone_runner_kube_chart | default('drone/drone-runner-kube') }}"
chart_version: "{{ drone_runner_kube_version | default(omit) }}"
release_values: "{{ drone_runner_kube_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ drone_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "drone.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup Drone DB secret
set_fact:
drone_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ drone_db_secret }}"
verbosity: 2
- name: Set Drone DB username
set_fact:
drone_db_username: "{{ drone_db_secret.data.username | b64decode }}"
- name: Set Drone DB password
set_fact:
drone_db_password: "{{ drone_db_secret.data.password | b64decode }}"

View File

@ -0,0 +1,15 @@
external_dns_default_values:
fullnameOverride: "{{ external_dns_name | default(namespace + '-external-dns') }}"
annotationFilter: "kubernetes.io/ingress.class={{ external_ingress_class }}"
domainFilters: ["{{ external_domain | default(domain) }}"]
provider: rfc2136
rfc2136:
host: "{{ external_dns_ip | default(dns_ip) }}"
port: 53
zone: "{{ external_domain | default(domain) }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ external_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ external_dns_tsigKeyname | default('k8s') }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@ -0,0 +1,12 @@
- set_fact:
external_dns_combined_values: "{{ external_dns_default_values | combine(external_dns_values, recursive=true) }}"
- name: Deploy external DNS
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ external_dns_namespace | default(dns_namespace) | default(namespace) }}"
release_name: "{{ external_dns_name | default(namespace + '-external-dns') }}"
chart_ref: "{{ external_dns_chart | default('bitnami/external-dns') }}"
chart_version: "{{ external_dns_version | default(omit) }}"
release_values: "{{ external_dns_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,14 @@
external_ingress_nginx_default_values:
controller:
config:
use-proxy-protocol: true
use-forward-headers: true
compute-full-forward-for: true
publishService:
enabled: true
scope:
enabled: false
service:
loadBalancerIP: "{{ external_loadbalancer_ip | default(omit) }}"
externalTrafficPolicy: Local
ingressClass: "{{ external_ingress_class }}"

View File

@ -0,0 +1,13 @@
- set_fact:
external_ingress_nginx_combined_values: "{{ external_ingress_nginx_default_values | combine(external_ingress_nginx_values, recursive=true) }}"
- name: Deploy external Nginx Ingress
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ external_ingress_nginx_namespace | default(ingress_namespace) | default(namespace) }}"
release_name: "{{ external_ingress_nginx_name | default(namespace + '-external-ingress-nginx') }}"
chart_ref: "{{ external_ingress_nginx_chart | default('ingress-nginx/ingress-nginx') }}"
chart_version: "{{ external_ingress_nginx_version | default(omit) }}"
release_values: "{{ external_ingress_nginx_combined_values | from_yaml }}"
wait: true
when: external_ingress_nginx_enabled | default(true)

View File

@ -0,0 +1,137 @@
gitea_enabled: true
gitea_publish_web: false
gitea_publish_ssh: false
gitea_use_external_db: true
gitea_ingress_class: "{{ gitea_namespace | default(namespace) }}-{{ 'public' if gitea_publish_web else 'private' }}-gitea-ingress-nginx"
gitea_default_values:
config:
disableInstaller: true
admin_user: "{{ gitea_admin_user | default('gitea') }}"
admin_pass: "{{ gitea_admin_pass | default(gitea_admin_password) }}"
mailer:
domain: "{{ mail_domain | default(domain) }}"
enabled: true
host: "mail.{{ mail_domain | default(domain) }}:465"
skip_verify: false
is_tls_enabled: true
from: "gitea@{{ mail_domain | default(domain) }}"
user: "{{ gitea_ldap_user | default('gitea') }}"
passwd: "{{ gitea_ldap_pass | default(gitea_ldap_password) }}"
ingress:
## Set to true to enable ingress record generation
enabled: true
## When the ingress is enabled, a host pointing to this will be created
hostname: "gitea.{{ domain }}"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/ingress.class: "{{ gitea_ingress_class }}"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
kubernetes.io/tls-acme: "true"
#
## The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
# hosts:
# - name: git.example.com
# path: /
tls:
- hosts:
- "gitea.{{ domain }}"
secretName: "gitea.{{ domain }}-tls"
service:
type: ClusterIP
## This can stay as ClusterIP as (by default) we use ingress
http:
port: 3000
## Make the external port available
# externalPort: 8082
# externalHost: gitea.local
## SSH is commonly on port 22
ssh:
port: 22
## If serving on a different external port used for determining the ssh url in the gui
# externalPort: 22
# externalHost: gitea.local
# externalIPs: []
persistence:
enabled: true
# existingGiteaClaim: gitea-gitea
accessMode: "{{ gitea_storage_mode | default('ReadWriteMany') }}"
size: "{{ gitea_size | default('20Gi') }}"
storageClass: "{{ gitea_storage | default('nfs-ssd') }}"
## addtional annotations for PVCs. Uncommenting will prevent the PVC from being deleted.
annotations:
"helm.sh/resource-policy": keep
lfs:
enabled: "{{ gitea_lfs | default(true) }}"
accessMode: "{{ gitea_lfs_storage_mode | default('ReadWriteMany') }}"
size: "{{ gitea_lfs_size | default('50Gi') }}"
storageClass: "{{ gitea_lfs_storage | default('nfs-hdd') }}"
annotations:
"helm.sh/resource-policy": keep
mariadb:
enabled: false
externalDB:
enabled: true
dbType: "postgres"
dbHost: "{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local"
dbPort: "5432"
dbDatabase: "gitea"
dbUser: "{{ gitea_db_username | default(omit)}}"
dbPassword: "{{ gitea_db_password | default(omit) }}"
gitea_publush_ingress_nginx_values:
controller:
config:
use-proxy-protocol: true
use-forward-headers: true
compute-full-forward-for: true
service:
externalTrafficPolicy: Local
gitea_ingress_nginx_default_values:
controller:
containerPort:
ssh: 22
http: 80
https: 443
publishService:
enabled: true
scope:
enabled: true
extraArgs:
tcp-services-configmap: "{{ gitea_namespace | default(namespace) }}/{{ gitea_ingress_nginx_name | default(namespace + '-gitea-ingress-nginx') }}-tcp"
service:
enabled: true
type: LoadBalancer
loadBalancerIP: "{{ gitea_loadbalancer_ip | default(omit) }}"
ports:
ssh: 22
http: 80
https: 443
targetPorts:
ssh: ssh
http: http
https: https
ingressClass: "{{ gitea_ingress_class }}"
tcp:
22: "{{ gitea_namespace | default(namespace) }}/gitea-gitea-svc:22"
gitea_dns_default_values:
fullnameOverride: "{{ gitea_dns_name | default(namespace + '-gitea-internal-dns') }}"
annotationFilter: "kubernetes.io/ingress.class={{ gitea_ingress_class }}"
domainFilters: ["{{ domain }}"]
provider: rfc2136
rfc2136:
host: "{{ dns_ip }}"
port: 53
zone: "{{ domain }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ gitea_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ gitea_dns_tsigKeyname | default('k8s') }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@ -0,0 +1,50 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- gitea_use_external_db
- postgres_enable
- set_fact:
gitea_combined_values: "{{ gitea_default_values | combine(gitea_values, recursive=true) }}"
- set_fact:
gitea_dns_combined_values: "{{ gitea_dns_default_values | combine(gitea_dns_values, recursive=true) }}"
- set_fact:
gitea_ingress_nginx_combined_values: "{{ gitea_ingress_nginx_default_values | combine(gitea_ingress_nginx_values, recursive=true) }}"
- set_fact:
gitea_ingress_nginx_combined_values: "{{ gitea_ingress_nginx_combined_values | combine(gitea_publush_ingress_nginx_values, recursive=true) }}"
when: gitea_publish_web
- name: Deploy Nginx Ingress for Gitea
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ gitea_ingress_nginx_namespace | default(namespace) }}"
release_name: "{{ gitea_ingress_nginx_name | default(namespace + '-gitea-ingress-nginx') }}"
chart_ref: "{{ gitea_ingress_nginx_chart | default('ingress-nginx/ingress-nginx') }}"
chart_version: "{{ gitea_ingress_nginx_version | default(omit) }}"
release_values: "{{ gitea_ingress_nginx_combined_values | from_yaml }}"
wait: true
- name: Deploy DNS for Gitea
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ gitea_dns_namespace | default(namespace) }}"
release_name: "{{ gitea_dns_name | default('gitea-internal-dns') }}"
chart_ref: "{{ gitea_dns_chart | default('bitnami/external-dns') }}"
chart_version: "{{ gitea_dns_version | default(omit) }}"
release_values: "{{ gitea_dns_combined_values | from_yaml }}"
wait: true
when: gitea_publish_web == false
- name: Deploy Gitea
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ gitea_namespace | default(namespace) }}"
release_name: "{{ gitea_name | default('gitea') }}"
chart_ref: "{{ gitea_chart | default('ghp/gitea') }}"
chart_version: "{{ gitea_version | default(omit) }}"
release_values: "{{ gitea_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ gitea_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "gitea.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup Gitea DB secret
set_fact:
gitea_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ gitea_db_secret }}"
verbosity: 2
- name: Set Gitea DB username
set_fact:
gitea_db_username: "{{ gitea_db_secret.data.username | b64decode }}"
- name: Set Gitea DB password
set_fact:
gitea_db_password: "{{ gitea_db_secret.data.password | b64decode }}"

View File

@ -0,0 +1,3 @@
dockerize: false
namespace: haproxy
haproxy_image_tag: 2.3.1

View File

@ -0,0 +1,22 @@
---
- name: restart haproxy
become: true
systemd:
name: haproxy
state: restarted
daemon_reload: yes
- name: restart docker haproxy
docker_container:
name: "haproxy"
image: "{{ docker_registry }}/haproxy:{{ haproxy_image_tag }}"
state: started
restart: yes
ports:
- "443:443"
container_default_behavior: no_defaults
detach: true
restart_policy: unless-stopped
volumes:
- "/opt/haproxy/conf.d:/usr/local/etc/haproxy/conf.d"

View File

View File

@ -0,0 +1,26 @@
---
- name: Create configuration dir for {{ namespace }}
file:
name: "/opt/haproxy/conf.d"
state: directory
- name: Copy haproxy configuration files for {{ namespace }}
copy:
content: "{{ item.data }}"
dest: "/opt/haproxy/conf.d/{{ item.name }}"
loop: "{{ haproxy['conf.d'] }}"
when: haproxy['conf.d'] is defined
notify: restart docker haproxy
- name: Start haproxy in docker for {{ namespace }}
docker_container:
name: "haproxy"
image: "{{ docker_registry }}/haproxy:{{ haproxy_image_tag }}"
state: started
ports:
- "443:443"
container_default_behavior: no_defaults
detach: true
restart_policy: unless-stopped
volumes:
- "/opt/haproxy/conf.d:/usr/local/etc/haproxy/conf.d"

View File

@ -0,0 +1,10 @@
- block:
- import_tasks: package.yml
when: not dockerize
become: true
- block:
- import_tasks: docker.yml
when: dockerize
become: true

View File

@ -0,0 +1,22 @@
---
- block:
- name: install haproxy
package:
name: haproxy
state: present
- name: add haproxy config
copy:
content: "{{ haproxy_config }}"
dest: "/etc/haproxy/haproxy.cfg"
notify:
- restart haproxy
when: haproxy_config is defined
- name: start haproxy service
systemd:
name: haproxy
state: started
enabled: yes
daemon_reload: yes

View File

@ -0,0 +1,8 @@
helm_repos:
- { name: 'ghp', url: 'https://charts.ghp.0xace.cc' }
- { name: 'jetstack', url: 'https://charts.jetstack.io' }
- { name: 'bitnami', url: 'https://charts.bitnami.com/bitnami' }
- { name: 'drone', url: 'https://charts.drone.io' }
- { name: 'ingress-nginx', url: 'https://kubernetes.github.io/ingress-nginx' }
- { name: 'stable', url: 'https://charts.helm.sh/stable' }
- { name: 'nextcloud', url: 'https://nextcloud.github.io/helm' }

View File

@ -0,0 +1,6 @@
- name: Add Helm repositories
community.kubernetes.helm_repository:
name: "{{ item.name }}"
repo_url: "{{ item.url }}"
loop:
"{{ helm_repos }}"

View File

@ -0,0 +1,15 @@
internal_dns_default_values:
fullnameOverride: "{{ internal_dns_name | default(namespace + '-internal-dns') }}"
annotationFilter: "kubernetes.io/ingress.class={{ internal_ingress_class }}"
domainFilters: ["{{ internal_domain | default(domain) }}"]
provider: rfc2136
rfc2136:
host: "{{ internal_dns_ip | default(dns_ip) }}"
port: 53
zone: "{{ internal_domain | default(domain) }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ internal_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ internal_dns_tsigKeyname | default('k8s') }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@ -0,0 +1,12 @@
- set_fact:
internal_dns_combined_values: "{{ internal_dns_default_values | combine(internal_dns_values, recursive=true) }}"
- name: Deploy internal DNS
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ internal_dns_namespace | default(dns_namespace) | default(namespace) }}"
release_name: "{{ internal_dns_name | default(namespace + '-internal-dns') }}"
chart_ref: "{{ internal_dns_chart | default('bitnami/external-dns') }}"
chart_version: "{{ internal_dns_version | default(omit) }}"
release_values: "{{ internal_dns_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,9 @@
internal_ingress_nginx_default_values:
controller:
publishService:
enabled: true
scope:
enabled: false
service:
loadBalancerIP: "{{ internal_loadbalancer_ip | default(omit) }}"
ingressClass: "{{ internal_ingress_class }}"

View File

@ -0,0 +1,13 @@
- set_fact:
internal_ingress_nginx_combined_values: "{{ internal_ingress_nginx_default_values | combine(internal_ingress_nginx_values, recursive=true) }}"
- name: Deploy internal Nginx Ingress
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ internal_ingress_nginx_namespace | default(ingress_namespace) | default(namespace) }}"
release_name: "{{ internal_ingress_nginx_name | default(namespace + '-internal-ingress-nginx') }}"
chart_ref: "{{ internal_ingress_nginx_chart | default('ingress-nginx/ingress-nginx') }}"
chart_version: "{{ internal_ingress_nginx_version | default(omit) }}"
release_values: "{{ internal_ingress_nginx_combined_values | from_yaml }}"
wait: true
when: internal_ingress_nginx_enabled | default(true)

View File

@ -0,0 +1,56 @@
---
knot_conf: |
# This is a sample of a minimal configuration file for Knot DNS.
# See knot.conf(5) or refer to the server documentation.
server:
rundir: "/run/knot"
user: knot:knot
listen: [ 0.0.0.0@53, ::@53 ]
udp-max-payload: 1232
log:
- target: syslog
any: debug
#key:
# - id: k8s
# algorithm: hmac-sha512
# secret: changeme
#remote:
# - id: dns_server
# address: 127.0.0.1@53
#
#submission:
# - id: dns_zone_sbm
# parent: [dns_server]
#acl:
# - id: deny_all
# deny: on # no action specified and deny on implies denial of all actions
#
# - id: key_rule
# key: [k8s] # Access based just on TSIG key
# address: 192.168.0.0/16
# action: [transfer, notify, update]
#policy:
# - id: rsa
# algorithm: RSASHA512
# ksk-size: 4096
# zsk-size: 2048
# nsec3: on
# ksk-submission: dns_zone_sbm
template:
- id: default
storage: "/var/lib/knot"
file: "%s.zone"
zone:
- domain: example.com
storage: "/var/lib/knot/zones/"
file: "example.com.zone"
#acl: [deny_all, key_rule]

View File

@ -0,0 +1,4 @@
---
- name: restart knot
service: "name=knot state=restarted"
become: true

1
roles/knot/meta/main.yml Normal file
View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,7 @@
---
# RedHat Family (RedHat, Fendora, CentOS, Amazon, etc)
- name: packages (RedHat)
yum:
name: knot
state: present
when: ansible_os_family == "RedHat"

24
roles/knot/tasks/main.yml Normal file
View File

@ -0,0 +1,24 @@
---
- name: install
become: true
include: install.yml
# Configuration
- name: configure knot
become: true
copy:
content: "{{ knot_conf }}"
dest: /etc/knot/knot.conf
mode: 0640
owner: "root"
group: "knot"
validate: "knotc -c %s conf-check"
notify: restart knot
- name: enable knot
become: true
systemd:
name: "knot"
enabled: yes
state: started
daemon_reload: yes

View File

@ -0,0 +1,15 @@
local_dns_default_values:
fullnameOverride: "{{ local_dns_name | default(namespace + '-local-dns') }}"
annotationFilter: "kubernetes.io/ingress.class={{ local_ingress_class }}"
domainFilters: ["{{ local_domain }}"]
provider: rfc2136
rfc2136:
host: "{{ local_dns_ip | default(dns_ip) }}"
port: 53
zone: "{{ local_domain }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ local_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ local_dns_tsigKeyname | default('k8s') }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@ -0,0 +1,12 @@
- set_fact:
local_dns_combined_values: "{{ local_dns_default_values | combine(local_dns_values, recursive=true) }}"
- name: Deploy local DNS
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ local_dns_namespace | default(dns_namespace) | default(namespace) }}"
release_name: "{{ local_dns_name | default(namespace + '-local-dns') }}"
chart_ref: "{{ local_dns_chart | default('bitnami/external-dns') }}"
chart_version: "{{ local_dns_version | default(omit) }}"
release_values: "{{ local_dns_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,11 @@
local_ingress_nginx_default_values:
controller:
publishService:
enabled: true
scope:
enabled: false
service:
loadBalancerIP: "{{ local_loadbalancer_ip | default(omit) }}"
ingressClass: "{{ local_ingress_class }}"

View File

@ -0,0 +1,13 @@
- set_fact:
local_ingress_nginx_combined_values: "{{ local_ingress_nginx_default_values | combine(local_ingress_nginx_values, recursive=true) }}"
- name: Deploy local Nginx Ingress
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ local_ingress_nginx_namespace | default(ingress_namespace) | default(namespace) }}"
release_name: "{{ local_ingress_nginx_name | default(namespace + '-local-ingress-nginx') }}"
chart_ref: "{{ local_ingress_nginx_chart | default('ingress-nginx/ingress-nginx') }}"
chart_version: "{{ local_ingress_nginx_version | default(omit) }}"
release_values: "{{ local_ingress_nginx_combined_values | from_yaml }}"
wait: true
when: local_ingress_nginx_enabled | default(true)

View File

View File

@ -0,0 +1,69 @@
- name: Issue Certificate for Postfix and Dovecot
k8s:
wait: true
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: "mail.{{ domain }}-crt"
namespace: "{{ mail_namespace | default(namespace) }}"
spec:
secretName: "mail.{{ domain }}-secret"
dnsNames:
- "mail.{{ domain }}"
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: ClusterIssuer
group: cert-manager.io
- name: Create MailBox PV
k8s:
state: present
definition:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mailboxes
namespace: "{{ mail_namespace | default(namespace) }}"
spec:
accessModes:
- "{{ mailbox_storage_mode | default('ReadWriteMany') }}"
resources:
requests:
storage: "{{ mailbox_size | default('50Gi') }}"
storageClassName: "{{ mailbox_storage | default('nfs-hdd') }}"
- name: Deploy Postfix
import_role:
name: postfix
tags: postfix
- name: Deploy Dovecot
import_role:
name: dovecot
tags: dovecot
- name: Deploy OpenDKIM
import_role:
name: opendkim
tags: opendkim
- name: Deploy OpenDMARC
import_role:
name: opendmarc
tags: opendmarc
- name: Deploy Rspamd
import_role:
name: rspamd
when: rspamd_enabled | default(false)
tags: rspamd
- name: Deploy Roundcube
import_role:
name: roundcube
when: roundcube_enabled | default(true)
tags: roundcube

View File

@ -0,0 +1,2 @@
strict_arp_for_metallb: true
metallb_default_values: {}

View File

@ -0,0 +1,32 @@
- set_fact:
metallb_combined_values: "{{ metallb_default_values | combine(metallb_values, recursive=true) }}"
- name: Deploy MetalLB
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ metallb_namespace | default('metallb-system') }}"
release_name: "{{ metallb_name | default('metallb') }}"
chart_ref: "{{ metallb_chart | default('bitnami/metallb') }}"
chart_version: "{{ metallb_version | default(omit) }}"
release_values: "{{ metallb_combined_values | from_yaml }}"
wait: true
- name: Check for strict arp
check_mode: false
shell: |
kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl diff -f - -n kube-system
register: check_strict_arp
when: strict_arp_for_metallb
changed_when: check_strict_arp.rc != 0
- name: Apply strict arp
shell: |
kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl apply -f - -n kube-system \
&& kubectl -n kube-system delete pods --selector=k8s-app=kube-proxy
when:
- strict_arp_for_metallb
- check_strict_arp.changed

View File

@ -0,0 +1,6 @@
metrics_server_enabled: true
metrics_server_default_values:
apiService:
create: true
extraArgs:
kubelet-insecure-tls: true

View File

@ -0,0 +1,11 @@
- set_fact:
metrics_server_combined_values: "{{ metrics_server_default_values | combine(metrics_server_values, recursive=true) }}"
- name: Deploy Metrics server
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ metrics_server_namespace | default('metrics-server') }}"
release_name: "{{ metrics_server_name | default('metrics-server') }}"
chart_ref: "{{ metrics_server_chart | default('bitnami/metrics-server') }}"
chart_version: "{{ metrics_server_version | default(omit) }}"
release_values: "{{ metrics_server_combined_values | from_yaml }}"

View File

@ -0,0 +1,167 @@
nextcloud_enabled: true
nextcloud_publish: false
nextcloud_use_external_db: true
nextcloud_default_values:
image:
repository: nextcloud
tag: 19.0-apache
pullPolicy: Always
replicaCount: 1
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/ingress.class: "{{ external_ingress_class if nextcloud_publish else internal_ingress_class }}"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/server-snippet: |-
server_tokens off;
proxy_hide_header X-Powered-By;
rewrite ^/.well-known/webfinger /public.php?service=webfinger last;
rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json;
location = /.well-known/carddav {
return 301 $scheme://$host/remote.php/dav;
}
location = /.well-known/caldav {
return 301 $scheme://$host/remote.php/dav;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ {
deny all;
}
location ~ ^/(?:autotest|occ|issue|indie|db_|console) {
deny all;
}
tls:
- secretName: "nextcloud.{{ domain }}-tls"
hosts:
- "nextcloud.{{ domain }}"
nextcloud:
host: "nextcloud.{{ domain }}"
username: admin
password: "{{ nextcloud_pass | default(nextcloud_password) }}"
update: 0
datadir: /var/www/html/data
tableprefix:
mail:
enabled: true
fromAddress: nextcloud
domain: "{{ mail_domain | default(domain) }}"
smtp:
host: "mail.{{ mail_domain | default(domain) }}"
secure: ssl
port: 465
authtype: LOGIN
name: "{{ nexcloud_mail_user | default('nextcloud') }}"
password: "{{ nextcloud_mail_pass | default(nextcloud_mail_password) }}"
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs:
mail.fix.config.php: |-
<?php
$CONFIG = array (
"mail_smtptimeout" => 60,
);
fix.config.php: |-
<?php
$CONFIG = array (
'trusted_proxies' => ['{{ web_proxy_internal_ip }}'],
'overwriteprotocol' => 'https',
'overwrite.cli.url' => 'https://nextcloud.{{ domain }}',
'mail_smtpstreamoptions' =>
array (
'ssl' =>
array (
'allow_self_signed' => true,
'verify_peer' => false,
'verify_peer_name' => false,
),
),
);
strategy:
type: RollingUpdate
internalDatabase:
enabled: false
name: nextcloud
# Disable Mariadb setup
mariadb:
enabled: false
# Enable Redis
redis:
enabled: true
usePassword: false
cluster:
enabled: false
## External database configuration
externalDatabase:
enabled: true
## Supported database engines: mysql or postgresql
type: postgresql
## Database host
host: "{{ namespace }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local"
## Database name
database: nextcloud
user: "{{ nextcloud_db_username }}"
password: "{{ nextcloud_db_password }}"
## Cronjob to execute Nextcloud background tasks
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron-jobs
##
cronjob:
enabled: true
# Nexcloud image is used as default but only curl is needed
image:
repository: nextcloud
tag: 19.0-apache
schedule: "*/5 * * * *"
annotations: {}
# Set curl's insecure option if you use e.g. self-signed certificates
curlInsecure: false
failedJobsHistoryLimit: 1
successfulJobsHistoryLimit: 1
service:
type: ClusterIP
port: 8080
loadBalancerIP: nil
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
storageClass: "{{ gitea_storage | default('nfs-ssd') }}"
accessMode: "{{ gitea_storage_mode | default('ReadWriteMany') }}"
size: "{{ gitea_size | default('20Gi') }}"
livenessProbe:
enabled: false
readinessProbe:
enabled: false

View File

@ -0,0 +1,19 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- nextcloud_use_external_db
- postgres_enable
- set_fact:
nextcloud_combined_values: "{{ nextcloud_default_values | combine(nextcloud_values, recursive=true) }}"
- name: Deploy Nextcloud
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ nextcloud_namespace | default(namespace) }}"
release_name: "{{ nextcloud_name | default('nextcloud') }}"
chart_ref: "{{ nextcloud_chart | default('nextcloud/nextcloud') }}"
chart_version: "{{ nextcloud_version | default(omit) }}"
release_values: "{{ nextcloud_combined_values | from_yaml }}"
wait: false

View File

@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ nextcloud_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "nextcloud.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup Nextcloud DB secret
set_fact:
nextcloud_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ nextcloud_db_secret }}"
verbosity: 2
- name: Set Nextcloud DB username
set_fact:
nextcloud_db_username: "{{ nextcloud_db_secret.data.username | b64decode }}"
- name: Set Nextcloud DB password
set_fact:
nextcloud_db_password: "{{ nextcloud_db_secret.data.password | b64decode }}"

View File

@ -0,0 +1,70 @@
nfs_client_provisioner_namespace: nfs-client-provisioner
nfs_client_provisioner_hdd_default_values:
replicaCount: 1
strategyType: Recreate
nfs:
server:
path:
# For creating the StorageClass automatically:
storageClass:
create: true
# Set a provisioner name. If unset, a name will be generated.
# provisionerName:
# Set StorageClass as the default StorageClass
# Ignored if storageClass.create is false
defaultClass: false
# Set a StorageClass name
# Ignored if storageClass.create is false
name: nfs-hdd
# Allow volume to be expanded dynamically
allowVolumeExpansion: true
# Method used to reclaim an obsoleted volume
reclaimPolicy: Delete
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
archiveOnDelete: false
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
accessModes: ReadWriteMany
nfs_client_provisioner_ssd_default_values:
replicaCount: 1
strategyType: Recreate
nfs:
server:
path:
# For creating the StorageClass automatically:
storageClass:
create: true
# Set a provisioner name. If unset, a name will be generated.
# provisionerName:
# Set StorageClass as the default StorageClass
# Ignored if storageClass.create is false
defaultClass: true
# Set a StorageClass name
# Ignored if storageClass.create is false
name: nfs-ssd
# Allow volume to be expanded dynamically
allowVolumeExpansion: true
# Method used to reclaim an obsoleted volume
reclaimPolicy: Delete
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
archiveOnDelete: false
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
accessModes: ReadWriteMany

View File

@ -0,0 +1,25 @@
- set_fact:
nfs_client_provisioner_hdd_combined_values: "{{ nfs_client_provisioner_hdd_default_values | combine(nfs_client_provisioner_hdd_values, recursive=true) }}"
- name: Deploy NFS client provisioner for HDD storage
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ nfs_client_provisioner_hdd_namespace | default(nfs_client_provisioner_namespace) | default(namespace) }}"
release_name: "{{ nfs_client_provisioner_hdd_name | default('nfs-client-provisioner-hdd') }}"
chart_ref: "{{ nfs_client_provisioner_hdd_chart | default('stable/nfs-client-provisioner') }}"
chart_version: "{{ nfs_client_provisioner_hdd_version | default(omit) }}"
release_values: "{{ nfs_client_provisioner_hdd_combined_values | from_yaml }}"
wait: true
- set_fact:
nfs_client_provisioner_ssd_combined_values: "{{ nfs_client_provisioner_ssd_default_values | combine(nfs_client_provisioner_ssd_values, recursive=true) }}"
- name: Deploy NFS client provisioner for SSD storage
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ nfs_client_provisioner_ssd_namespace | default(nfs_client_provisioner_namespace) | default(namespace) }}"
release_name: "{{ nfs_client_provisioner_ssd_name | default('nfs-client-provisioner-ssd') }}"
chart_ref: "{{ nfs_client_provisioner_ssd_chart | default('stable/nfs-client-provisioner') }}"
chart_version: "{{ nfs_client_provisioner_ssd_version | default(omit) }}"
release_values: "{{ nfs_client_provisioner_ssd_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,3 @@
registry_readonly_ingress: false
wikijs_readonly_ingress: false
chartmuseum_readonly_ingress: false

View File

@ -0,0 +1,13 @@
---
- name: start nginx
systemd:
name: nginx
state: started
enabled: yes
- name: reload nginx
systemd:
name: nginx
state: reloaded
enabled: yes

View File

@ -0,0 +1,39 @@
---
- block:
- name: configure nginx.conf
copy:
content: "{{ nginx['nginx.conf'] }}"
dest: "/etc/nginx/nginx.conf"
notify: reload nginx
when: nginx['nginx.conf'] is defined
- name: add configs nginx to conf.d
copy:
content: "{{ item.data }}"
dest: "/etc/nginx/conf.d/{{ item.name }}"
loop: "{{ nginx['conf.d'] }}"
notify: reload nginx
when: nginx['conf.d'] is defined
- name: add configs nginx to stream.d
copy:
content: "{{ item.data }}"
dest: "/etc/nginx/stream.d/{{ item.name }}"
loop: "{{ nginx['stream.d'] }}"
notify: reload nginx
when: nginx['stream.d'] is defined
- name: check if ssl dir exist
file: path=/etc/nginx/ssl state=directory
when: nginx.ssl is defined
- name: add ssl certs and keys
copy:
content: "{{ item.data }}"
dest: "/etc/nginx/ssl/{{ item.name }}"
loop: "{{ nginx.ssl }}"
notify: reload nginx
when: nginx.ssl is defined
tags:
- nginx-configure

View File

@ -0,0 +1,11 @@
---
- block:
- name: installing nginx
package:
name:
- nginx
state: present
notify: start nginx
register: install_nginx_result
tags:
- nginx-install

View File

@ -0,0 +1,8 @@
---
- block:
- import_tasks: install.yml
- import_tasks: configure.yml
become: true
tags:
- nginx

View File

@ -0,0 +1,45 @@
opendkim_default_values:
replicaCount: 1
persistence:
enabled: false
existingClaim: mailboxes
opendkim:
image:
repository: "{{ docker_registry }}/opendkim"
tag: latest
pullPolicy: Always
configmaps:
opendkim: |
PidFile /var/run/opendkim/opendkim.pid
Mode sv
Syslog yes
SyslogSuccess yes
LogWhy yes
UserID opendkim:opendkim
Socket inet:8891
Umask 002
SendReports yes
SoftwareHeader yes
Canonicalization relaxed/relaxed
Domain {{ domain }}
Selector default
MinimumKeyBits 1024
KeyTable refile:/etc/opendkim/KeyTable
SigningTable refile:/etc/opendkim/SigningTable
ExternalIgnoreList refile:/etc/opendkim/TrustedHosts
InternalHosts refile:/etc/opendkim/TrustedHosts
OversignHeaders From
keytable: |
default._domainkey.{{ domain }} {{ domain }}:default:/etc/opendkim/keys/default.private
signingtable: |
*@{{ domain }} default._domainkey.{{ domain }}
trustedhosts: |
127.0.0.1
::1
*.{{ domain }}
default-private: |
{{ dkim_private_key_base64 | b64decode }}
default-public: |
{{ dkim_public_key_base64 | b64decode }}
service:
type: ClusterIP

View File

@ -0,0 +1,13 @@
- set_fact:
opendkim_combined_values: "{{ opendkim_default_values | combine(opendkim_values, recursive=true) }}"
- name: Deploy OpenDKIM
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ opendkim_namespace | default(mail_namespace) | default(namespace) }}"
release_name: "{{ opendkim_name | default('opendkim') }}"
chart_ref: "{{ opendkim_chart | default('ghp/opendkim') }}"
chart_version: "{{ opendkim_version | default(omit) }}"
release_values: "{{ opendkim_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,24 @@
opendmarc_default_values:
replicaCount: 1
persistence:
enabled: false
existingClaim: mailboxes
opendmarc:
image:
repository: "{{ docker_registry }}/opendmarc"
tag: latest
pullPolicy: Always
configmaps:
opendmarc: |
AuthservID mail.{{ domain }}
Socket inet:8893
SoftwareHeader true
SPFIgnoreResults true
SPFSelfValidate true
RequiredHeaders true
Syslog true
UserID opendmarc:mail
service:
type: ClusterIP

View File

@ -0,0 +1,13 @@
- set_fact:
opendmarc_combined_values: "{{ opendmarc_default_values | combine(opendmarc_values, recursive=true) }}"
- name: Deploy OpenDMARC
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ opendmarc_namespace | default(mail_namespace) | default(namespace) }}"
release_name: "{{ opendmarc_name | default('opendmarc') }}"
chart_ref: "{{ opendmarc_chart | default('ghp/opendmarc') }}"
chart_version: "{{ opendmarc_version | default(omit) }}"
release_values: "{{ opendmarc_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,279 @@
openldap_default_values:
replicaCount: 1
# Define deployment strategy - IMPORTANT: use rollingUpdate: null when use Recreate strategy.
# It prevents from merging with existing map keys which are forbidden.
strategy:
type: RollingUpdate
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
#
# or
#
# type: Recreate
# rollingUpdate: null
image:
# From repository https://github.com/osixia/docker-openldap
repository: osixia/openldap
tag: 1.4.0
pullPolicy: Always
# Spcifies an existing secret to be used for admin and config user passwords
existingSecret: ""
# settings for enabling TLS
tls:
enabled: true
secret: "openldap.{{ domain }}-secret" # The name of a kubernetes.io/tls type secret to use for TLS
CA:
enabled: true
secret: "openldap.{{ domain }}-ca" # The name of a generic secret to use for custom CA certificate (ca.crt)
## Add additional labels to all resources
extraLabels: {}
## Add additional annotations to pods
podAnnotations: {}
service:
annotations:
external-dns.alpha.kubernetes.io/hostname: openldap.{{ domain }}
clusterIP: ""
ldapPort: 389
sslLdapPort: 636 # Only used if tls.enabled is true
## List of IP addresses at which the service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: "{{ openldap_loadbalancer_ip | default(omit) }}"
loadBalancerSourceRanges: []
type: LoadBalancer
# Default configuration for openldap as environment variables. These get injected directly in the container.
# Use the env variables from https://github.com/osixia/docker-openldap#beginner-guide
env:
LDAP_ORGANISATION: "{{ ldap_org | default('GHP') }}"
LDAP_DOMAIN: "{{ ldap_domain | default(domain) }}"
LDAP_BACKEND: "mdb"
LDAP_TLS: "true"
LDAP_TLS_ENFORCE: "false"
LDAP_RFC2307BIS_SCHEMA: "true"
LDAP_TLS_VERIFY_CLIENT: "try"
# Default Passwords to use, stored as a secret. If unset, passwords are auto-generated.
# You can override these at install time with
# helm install openldap --set openldap.adminPassword=<passwd>,openldap.configPassword=<passwd>
adminPassword: "{{ openldap_admin_pass | default(openldap_admin_password) }}"
configPassword: "{{ openldap_config_pass | default(openldap_config_password) }}"
# Custom openldap configuration files used to override default settings
customLdifFiles:
01-pw-pbkdf2.ldif: |-
dn: cn=module{0},cn=config
changetype: modify
add: olcModuleLoad
olcModuleLoad: pw-pbkdf2
02-acl.ldif: |-
dn: olcDatabase={1}mdb,cn=config
changetype: modify
add: olcAccess
olcAccess: {1}to * by users read by anonymous auth by * none
03-default-users.ldif: |-
dn: ou=groups,{{ openldap_domain }}
changetype: add
objectClass: organizationalUnit
objectClass: top
ou: groups
dn: ou=users,{{ openldap_domain }}
changetype: add
objectClass: organizationalUnit
objectClass: top
ou: users
dn: ou=services,{{ openldap_domain }}
changetype: add
objectClass: organizationalUnit
objectClass: top
ou: services
dn: uid=admin,ou=users,{{ openldap_domain }}
changetype: add
uid: admin
cn: admin
sn: 3
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/admin
uidNumber: 14583103
gidNumber: 14564103
userPassword: {{ openldap_admin_pbkdf2_sha512_hash }}
gecos: Admin user
dn: uid=systemuser,ou=services,{{ openldap_domain }}
changetype: add
uid: systemuser
cn: systemuser
sn: 4
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/systemuser
uidNumber: 14583104
gidNumber: 14564104
userPassword: {{ systemuser_pbkdf2_sha512_hash }}
mail: systemuser@{{ domain }}
gecos: System user
dn: uid=nextcloud,ou=users,{{ openldap_domain }}
changetype: add
uid: nextcloud
cn: nextcloud
sn: 6
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/nextcloud
uidNumber: 14583106
gidNumber: 14564106
userPassword: {{ nextcloud_ldap_pbkdf2_sha512_hash }}
mail: nextcloud@{{ domain }}
gecos: Nexcloud user
dn: uid=ldapbind,ou=services,{{ openldap_domain }}
changetype: add
uid: ldapbind
cn: ldapbind
sn: 7
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /sbin/nologin
homeDirectory: /home/ldapbind
uidNumber: 14583107
gidNumber: 14564107
userPassword: {{ ldapbind_pbkdf2_sha512_hash }}
gecos: LDAP bind user
dn: uid=bitwarden,ou=users,{{ openldap_domain }}
changetype: add
uid: bitwarden
cn: bitwarden
sn: 8
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/bitwarden
uidNumber: 14583108
gidNumber: 14564108
userPassword: {{ bitwarden_ldap_pbkdf2_sha512_hash }}
mail: bitwarden@{{ domain }}
gecos: Bitwarden user
dn: uid=gitea,ou=users,{{ openldap_domain }}
changetype: add
uid: gitea
cn: gitea
sn: 9
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/gitea
uidNumber: 14583109
gidNumber: 14564109
userPassword: {{ gitea_ldap_pbkdf2_sha512_hash }}
mail: gitea@{{ domain }}
gecos: Gitea user
dn: uid=wikijs,ou=users,{{ openldap_domain }}
changetype: add
uid: wikijs
cn: wikijs
sn: 10
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/wikijs
uidNumber: 14583110
gidNumber: 14564110
userPassword: {{ wikijs_ldap_pbkdf2_sha512_hash }}
mail: wikijs@{{ domain }}
gecos: WikiJS user
dn: uid=peertube,ou=users,{{ openldap_domain }}
changetype: add
uid: peertube
cn: peertube
sn: 11
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/peertube
uidNumber: 14583111
gidNumber: 14564111
userPassword: {{ peertube_ldap_pbkdf2_sha512_hash }}
mail: peertube@{{ domain }}
gecos: PeerTube user
dn: cn=admin,ou=groups,{{ openldap_domain }}
changetype: add
objectClass: groupOfUniqueNames
cn: admin
description: Admin users
uniqueMember: cn=admin,{{ openldap_domain }}
06-memberof.ldif: |-
dn: cn=services,ou=groups,{{ openldap_domain }}
changetype: add
objectClass: groupOfUniqueNames
cn: services
description: System users
uniqueMember: uid=systemuser,ou=services,{{ openldap_domain }}
uniqueMember: uid=ldapbind,ou=services,{{ openldap_domain }}
uniqueMember: uid=nextcloud,ou=users,{{ openldap_domain }}
uniqueMember: uid=bitwarden,ou=users,{{ openldap_domain }}
uniqueMember: uid=gitea,ou=users,{{ openldap_domain }}
uniqueMember: uid=wikijs,ou=users,{{ openldap_domain }}
uniqueMember: uid=peertube,ou=users,{{ openldap_domain }}
dn: cn=users,ou=groups,{{ openldap_domain }}
changetype: add
objectClass: groupOfUniqueNames
cn: users
description: Simple users
{% for user in openldap_simple_users %}
uniqueMember: uid={{ user.name }},ou=users,{{ openldap_domain }}
{% endfor %}
{% for user in openldap_custom_users %}
uniqueMember: uid={{ user.name }},ou=users,{{ openldap_domain }}
{% endfor %}
## Persist data to a persistent volume
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: "{{ openldap_storage | default('nfs-ssd') }}"
accessMode: "{{ openldap_storage_mode | default('ReadWriteMany') }}"
size: "{{ openldap_size | default('8Gi') }}"
# existingClaim: ""
## test container details
test:
enabled: false

View File

@ -0,0 +1,44 @@
- name: Create Let's Encrypt ISRG Root X1 CA secret for OpenLDAP
k8s:
state: present
definition:
apiVersion: v1
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZhekNDQTFPZ0F3SUJBZ0lSQUlJUXo3RFNRT05aUkdQZ3UyT0Npd0F3RFFZSktvWklodmNOQVFFTEJRQXcKVHpFTE1Ba0dBMVVFQmhNQ1ZWTXhLVEFuQmdOVkJBb1RJRWx1ZEdWeWJtVjBJRk5sWTNWeWFYUjVJRkpsYzJWaApjbU5vSUVkeWIzVndNUlV3RXdZRFZRUURFd3hKVTFKSElGSnZiM1FnV0RFd0hoY05NVFV3TmpBME1URXdORE00CldoY05NelV3TmpBME1URXdORE00V2pCUE1Rc3dDUVlEVlFRR0V3SlZVekVwTUNjR0ExVUVDaE1nU1c1MFpYSnUKWlhRZ1UyVmpkWEpwZEhrZ1VtVnpaV0Z5WTJnZ1IzSnZkWEF4RlRBVEJnTlZCQU1UREVsVFVrY2dVbTl2ZENCWQpNVENDQWlJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dJUEFEQ0NBZ29DZ2dJQkFLM29KSFAwRkRmem01NHJWeWdjCmg3N2N0OTg0a0l4dVBPWlhvSGozZGNLaS92VnFidllBVHlqYjNtaUdiRVNUdHJGai9SUVNhNzhmMHVveG15RisKMFRNOHVrajEzWG5mczdqL0V2RWhta3ZCaW9aeGFVcG1abXlQZmp4d3Y2MHBJZ2J6NU1EbWdLN2lTNCszbVg2VQpBNS9UUjVkOG1VZ2pVK2c0cms4S2I0TXUwVWxYaklCMHR0b3YwRGlOZXdOd0lSdDE4akE4K28rdTNkcGpxK3NXClQ4S09FVXQrend2by83VjNMdlN5ZTByZ1RCSWxESENOQXltZzRWTWs3QlBaN2htL0VMTktqRCtKbzJGUjNxeUgKQjVUMFkzSHNMdUp2VzVpQjRZbGNOSGxzZHU4N2tHSjU1dHVrbWk4bXhkQVE0UTdlMlJDT0Z2dTM5NmozeCtVQwpCNWlQTmdpVjUrSTNsZzAyZFo3N0RuS3hIWnU4QS9sSkJkaUIzUVcwS3RaQjZhd0JkcFVLRDlqZjFiMFNIelV2CktCZHMwcGpCcUFsa2QyNUhON3JPckZsZWFKMS9jdGFKeFFaQktUNVpQdDBtOVNUSkVhZGFvMHhBSDBhaG1iV24KT2xGdWhqdWVmWEtuRWdWNFdlMCtVWGdWQ3dPUGpkQXZCYkkrZTBvY1MzTUZFdnpHNnVCUUUzeERrM1N6eW5UbgpqaDhCQ05BdzFGdHhOclFIdXNFd01GeEl0NEk3bUtaOVlJcWlveW1DekxxOWd3UWJvb01EUWFIV0JmRWJ3cmJ3CnFIeUdPMGFvU0NxSTNIYWFkcjhmYXFVOUdZL3JPUE5rM3NnckRRb28vL2ZiNGhWQzFDTFFKMTNoZWY0WTUzQ0kKclU3bTJZczZ4dDBuVVc3L3ZHVDFNME5QQWdNQkFBR2pRakJBTUE0R0ExVWREd0VCL3dRRUF3SUJCakFQQmdOVgpIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJSNXRGbm1lN2JsNUFGemdBaUl5QnBZOXVtYmJqQU5CZ2txCmhraUc5dzBCQVFzRkFBT0NBZ0VBVlI5WXFieXlxRkRRRExIWUdta2dKeWtJckdGMVhJcHUrSUxsYVMvVjlsWkwKdWJoekVGblRJWmQrNTB4eCs3TFNZSzA1cUF2cUZ5RldoZkZRRGxucnp1Qlo2YnJKRmUrR25ZK0VnUGJrNlpHUQozQmViWWh0RjhHYVYwbnh2d3VvNzd4L1B5OWF1Si9HcHNNaXUvWDErbXZvaUJPdi8yWC9xa1NzaXNSY09qL0tLCk5GdFkyUHdCeVZTNXVDYk1pb2d6aVV3dGhEeUMzKzZXVndXNkxMdjN4TGZIVGp1Q3ZqSElJbk56a3RIQ2dLUTUKT1JBekk0Sk1QSitHc2xXWUhiNHBob3dpbTU3aWF6dFhPb0p3VGR3Sng0bkxDZ2ROYk9oZGpzbnZ6cXZIdTdVcgpUa1hXU3RBbXpPVnl5Z2hxcFpYakZhSDNwTzNKTEYrbCsvK3NLQUl1dnRkN3UrTnhlNUFXMHdkZVJsTjhOd2RDCmpOUEVscHpWbWJVcTRKVWFnRWl1VERrSHpzeEhwRktWSzdxNCs2M1NNMU45NVIxTmJkV2hzY2RDYitaQUp6VmMKb3lpM0I0M25qVE9RNXlPZisxQ2NlV3hHMWJRVnM1WnVmcHNNbGpxNFVpMC8xbHZoK3dqQ2hQNGtxS09KMnF4cQo0Umdxc2FoRFlWdlRIOXc3alhieUxlaU5kZDhYTTJ3OVUvdDd5MEZmLzl5aTBHRTQ0WmE0ckYyTE45ZDExVFBBCm1SR3VuVUhCY25XRXZnSkJRbDluSkVpVTBac252Z2MvdWJoUGdYUlI0WHEzN1owajRyN2cxU2dFRXp3eEE1N2QKZW15UHhnY1l4bi9lUjQ0L0tKNEVCcytsVkRSM3ZleUptK2tYUTk5YjIxLytqaDVYb3MxQW5YNWlJdHJlR0NjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
kind: Secret
metadata:
name: "openldap.{{ domain }}-ca"
namespace: "{{ openldap_namespace | default(namespace) }}"
- name: Request cert for OpenLDAP
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: "openldap.{{ domain }}-crt"
namespace: "{{ openldap_namespace | default(namespace) }}"
spec:
secretName: "openldap.{{ domain }}-secret"
dnsNames:
- "openldap.{{ domain }}"
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: ClusterIssuer
group: cert-manager.io
- set_fact:
openldap_combined_values: "{{ openldap_default_values | combine(openldap_values, recursive=true) }}"
- name: Deploy OpenLDAP
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ openldap_namespace | default(namespace) }}"
release_name: "{{ openldap_name | default('openldap') }}"
chart_ref: "{{ openldap_chart | default('ghp/openldap') }}"
chart_version: "{{ openldap_version | default(omit) }}"
release_values: "{{ openldap_combined_values | from_yaml }}"

View File

@ -0,0 +1,453 @@
peertube_enabled: false
peertube_publish: false
peertube_use_external_db: true
peertube_default_values:
replicaCount: 1
image:
repository: chocobozzz/peertube
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v3.0.0-buster"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
configAsCode:
enabled: true
config:
listen:
hostname: '0.0.0.0'
port: 9000
# Correspond to your reverse proxy server_name/listen configuration
webserver:
https: true
hostname: 'peertube.{{ domain }}'
port: 443
rates_limit:
api:
# 50 attempts in 10 seconds
window: 10 seconds
max: 50
login:
# 15 attempts in 5 min
window: 5 minutes
max: 15
signup:
# 2 attempts in 5 min (only succeeded attempts are taken into account)
window: 5 minutes
max: 2
ask_send_email:
# 3 attempts in 5 min
window: 5 minutes
max: 3
# Proxies to trust to get real client IP
# If you run PeerTube just behind a local proxy (nginx), keep 'loopback'
# If you run PeerTube behind a remote proxy, add the proxy IP address (or subnet)
trust_proxy:
- 'loopback'
- 'linklocal'
- 'uniquelocal'
- '10.0.0.0/8'
- '172.16.0.0/12'
- '192.168.0.0/16'
# Your database name will be database.name OR "peertube"+database.suffix
database:
hostname: '{{ namespace }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local'
port: 5432
ssl: require
suffix: ''
username: '{{ peertube_db_username }}'
password: '{{ peertube_db_password }}'
pool:
max: 5
# Redis server for short time storage
# You can also specify a 'socket' path to a unix socket but first need to
# comment out hostname and port
redis:
hostname: 'peertube-redis-master'
port: 6379
auth: null
db: 0
# SMTP server to send emails
smtp:
# smtp or sendmail
transport: smtp
# Path to sendmail command. Required if you use sendmail transport
sendmail: null
hostname: "mail.{{ domain }}"
port: 465 # If you use StartTLS: 587
username: peertube
password: "{{ peertube_ldap_password }}"
tls: true # If you use StartTLS: false
disable_starttls: true
ca_file: null # Used for self signed certificates
from_address: 'peertube@{{ domain }}'
email:
body:
signature: "PeerTube"
subject:
prefix: "[PeerTube]"
# From the project root directory
storage:
tmp: '/var/www/peertube/storage/tmp/' # Use to download data (imports etc), store uploaded files before processing...
avatars: '/var/www/peertube/storage/avatars/'
videos: '/var/www/peertube/storage/videos/'
streaming_playlists: '/var/www/peertube/storage/streaming-playlists/'
redundancy: '/var/www/peertube/storage/redundancy/'
logs: '/var/www/peertube/storage/logs/'
previews: '/var/www/peertube/storage/previews/'
thumbnails: '/var/www/peertube/storage/thumbnails/'
torrents: '/var/www/peertube/storage/torrents/'
captions: '/var/www/peertube/storage/captions/'
cache: '/var/www/peertube/storage/cache/'
plugins: '/var/www/peertube/storage/plugins/'
# Overridable client files : logo.svg, favicon.png and icons/*.png (PWA) in client/dist/assets/images
# Could contain for example assets/images/favicon.png
# If the file exists, peertube will serve it
# If not, peertube will fallback to the default fil
client_overrides: '/var/www/peertube/storage/client-overrides/'
log:
level: 'info' # debug/info/warning/error
rotation:
enabled : true # Enabled by default, if disabled make sure that 'storage.logs' is pointing to a folder handled by logrotate
maxFileSize: 12MB
maxFiles: 20
anonymizeIP: false
trending:
videos:
interval_days: 7 # Compute trending videos for the last x days
# Cache remote videos on your server, to help other instances to broadcast the video
# You can define multiple caches using different sizes/strategies
# Once you have defined your strategies, choose which instances you want to cache in admin -> manage follows -> following
redundancy:
videos:
check_interval: '1 hour' # How often you want to check new videos to cache
strategies: # Just uncomment strategies you want
# -
# size: '10GB'
# # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances)
# min_lifetime: '48 hours'
# strategy: 'most-views' # Cache videos that have the most views
# -
# size: '10GB'
# # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances)
# min_lifetime: '48 hours'
# strategy: 'trending' # Cache trending videos
# -
# size: '10GB'
# # Minimum time the video must remain in the cache. Only accept values > 10 hours (to not overload remote instances)
# min_lifetime: '48 hours'
# strategy: 'recently-added' # Cache recently added videos
# min_views: 10 # Having at least x views
# Other instances that duplicate your content
remote_redundancy:
videos:
# 'nobody': Do not accept remote redundancies
# 'anybody': Accept remote redundancies from anybody
# 'followings': Accept redundancies from instance followings
accept_from: 'followings'
csp:
enabled: false
report_only: true # CSP directives are still being tested, so disable the report only mode at your own risk!
report_uri:
tracker:
# If you disable the tracker, you disable the P2P aspect of PeerTube
enabled: true
# Only handle requests on your videos.
# If you set this to false it means you have a public tracker.
# Then, it is possible that clients overload your instance with external torrents
private: true
# Reject peers that do a lot of announces (could improve privacy of TCP/UDP peers)
reject_too_many_announces: false
history:
videos:
# If you want to limit users videos history
# -1 means there is no limitations
# Other values could be '6 months' or '30 days' etc (PeerTube will periodically delete old entries from database)
max_age: -1
views:
videos:
# PeerTube creates a database entry every hour for each video to track views over a period of time
# This is used in particular by the Trending page
# PeerTube could remove old remote video views if you want to reduce your database size (video view counter will not be altered)
# -1 means no cleanup
# Other values could be '6 months' or '30 days' etc (PeerTube will periodically delete old entries from database)
remote:
max_age: '30 days'
plugins:
# The website PeerTube will ask for available PeerTube plugins and themes
# This is an unmoderated plugin index, so only install plugins/themes you trust
index:
enabled: true
check_latest_versions_interval: '12 hours' # How often you want to check new plugins/themes versions
url: 'https://packages.joinpeertube.org'
federation:
videos:
federate_unlisted: false
###############################################################################
#
# From this point, all the following keys can be overridden by the web interface
# (local-production.json file). If you need to change some values, prefer to
# use the web interface because the configuration will be automatically
# reloaded without any need to restart PeerTube.
#
# /!\ If you already have a local-production.json file, the modification of the
# following keys will have no effect /!\.
#
###############################################################################
cache:
previews:
size: 500 # Max number of previews you want to cache
captions:
size: 500 # Max number of video captions/subtitles you want to cache
admin:
# Used to generate the root user at first startup
# And to receive emails from the contact form
email: 'peertube@{{ domain }}'
contact_form:
enabled: true
signup:
enabled: false
limit: 10 # When the limit is reached, registrations are disabled. -1 == unlimited
requires_email_verification: false
filters:
cidr: # You can specify CIDR ranges to whitelist (empty = no filtering) or blacklist
whitelist: []
blacklist: []
user:
# Default value of maximum video BYTES the user can upload (does not take into account transcoded files).
# -1 == unlimited
video_quota: -1
video_quota_daily: -1
# If enabled, the video will be transcoded to mp4 (x264) with "faststart" flag
# In addition, if some resolutions are enabled the mp4 video file will be transcoded to these new resolutions.
# Please, do not disable transcoding since many uploaded videos will not work
transcoding:
enabled: true
# Allow your users to upload .mkv, .mov, .avi, .wmv, .flv, .f4v, .3g2, .3gp, .mts, m2ts, .mxf, .nut videos
allow_additional_extensions: true
# If a user uploads an audio file, PeerTube will create a video by merging the preview file and the audio file
allow_audio_files: true
threads: 2
resolutions: # Only created if the original video has a higher resolution, uses more storage!
0p: true # audio-only (creates mp4 without video stream, always created when enabled)
240p: true
360p: true
480p: true
720p: true
1080p: true
2160p: true
# Generate videos in a WebTorrent format (what we do since the first PeerTube release)
# If you also enabled the hls format, it will multiply videos storage by 2
# If disabled, breaks federation with PeerTube instances < 2.1
webtorrent:
enabled: true
# /!\ Requires ffmpeg >= 4.1
# Generate HLS playlists and fragmented MP4 files. Better playback than with WebTorrent:
# * Resolution change is smoother
# * Faster playback in particular with long videos
# * More stable playback (less bugs/infinite loading)
# If you also enabled the webtorrent format, it will multiply videos storage by 2
hls:
enabled: true
live:
enabled: true
# Limit lives duration
# Set null to disable duration limit
max_duration: -1 # For example: '5 hours'
# Limit max number of live videos created on your instance
# -1 == unlimited
max_instance_lives: 10
# Limit max number of live videos created by a user on your instance
# -1 == unlimited
max_user_lives: 2
# Allow your users to save a replay of their live
# PeerTube will transcode segments in a video file
# If the user daily/total quota is reached, PeerTube will stop the live
# /!\ transcoding.enabled (and not live.transcoding.enabled) has to be true to create a replay
allow_replay: true
rtmp:
port: 1935
# Allow to transcode the live streaming in multiple live resolutions
transcoding:
enabled: true
threads: 2
resolutions:
240p: true
360p: true
480p: true
720p: true
1080p: true
2160p: true
import:
# Add ability for your users to import remote videos (from YouTube, torrent...)
videos:
http: # Classic HTTP or all sites supported by youtube-dl https://rg3.github.io/youtube-dl/supportedsites.html
enabled: true
# You can use an HTTP/HTTPS/SOCKS proxy with youtube-dl
proxy:
enabled: false
url: ""
torrent: # Magnet URI or torrent file (use classic TCP/UDP/WebSeed to download the file)
enabled: true
auto_blacklist:
# New videos automatically blacklisted so moderators can review before publishing
videos:
of_users:
enabled: false
# Instance settings
instance:
name: 'GHP PeerTube'
short_description: 'PeerTube, a federated (ActivityPub) video streaming platform using P2P (BitTorrent) directly in the web browser with WebTorrent and Angular.'
description: 'Welcome to GHP PeerTube instance!' # Support markdown
terms: 'No terms for now.' # Support markdown
code_of_conduct: '' # Supports markdown
# Who moderates the instance? What is the policy regarding NSFW videos? Political videos? etc
moderation_information: '' # Supports markdown
# Why did you create this instance?
creation_reason: ''
# Who is behind the instance? A single person? A non profit?
administrator: ''
# How long do you plan to maintain this instance?
maintenance_lifetime: ''
# How will you pay the PeerTube instance server? With your own funds? With users donations? Advertising?
business_model: ''
# If you want to explain on what type of hardware your PeerTube instance runs
# Example: "2 vCore, 2GB RAM..."
hardware_information: '' # Supports Markdown
# What are the main languages of your instance? To interact with your users for example
# Uncomment or add the languages you want
# List of supported languages: https://peertube.cpy.re/api/v1/videos/languages
languages:
# - en
# - es
# - fr
# You can specify the main categories of your instance (dedicated to music, gaming or politics etc)
# Uncomment or add the category ids you want
# List of supported categories: https://peertube.cpy.re/api/v1/videos/categories
categories:
# - 1 # Music
# - 2 # Films
# - 3 # Vehicles
# - 4 # Art
# - 5 # Sports
# - 6 # Travels
# - 7 # Gaming
# - 8 # People
# - 9 # Comedy
# - 10 # Entertainment
# - 11 # News & Politics
# - 12 # How To
# - 13 # Education
# - 14 # Activism
# - 15 # Science & Technology
# - 16 # Animals
# - 17 # Kids
# - 18 # Food
default_client_route: '/videos/recently-added'
# Whether or not the instance is dedicated to NSFW content
# Enabling it will allow other administrators to know that you are mainly federating sensitive content
# Moreover, the NSFW checkbox on video upload will be automatically checked by default
is_nsfw: false
# By default, "do_not_list" or "blur" or "display" NSFW videos
# Could be overridden per user with a setting
default_nsfw_policy: 'display'
customizations:
javascript: '' # Directly your JavaScript code (without <script> tags). Will be eval at runtime
css: '' # Directly your CSS code (without <style> tags). Will be injected at runtime
# Robot.txt rules. To disallow robots to crawl your instance and disallow indexation of your site, add '/' to "Disallow:'
robots: |
User-agent: *
Disallow:
# Security.txt rules. To discourage researchers from testing your instance and disable security.txt integration, set this to an empty string.
securitytxt:
"# If you would like to report a security issue\n# you may report it to:\nContact: https://github.com/Chocobozzz/PeerTube/blob/develop/SECURITY.md\nContact: mailto:"
services:
# Cards configuration to format video in Twitter
twitter:
username: '@Chocobozzz' # Indicates the Twitter account for the website or platform on which the content was published
# If true, a video player will be embedded in the Twitter feed on PeerTube video share
# If false, we use an image link card that will redirect on your PeerTube instance
# Change it to "true", and then test on https://cards-dev.twitter.com/validator to see if you are whitelisted
whitelisted: false
followers:
instance:
# Allow or not other instances to follow yours
enabled: true
# Whether or not an administrator must manually validate a new follower
manual_approval: false
followings:
instance:
# If you want to automatically follow back new instance followers
# If this option is enabled, use the mute feature instead of deleting followings
# /!\ Don't enable this if you don't have a reactive moderation team /!\
auto_follow_back:
enabled: false
# If you want to automatically follow instances of the public index
# If this option is enabled, use the mute feature instead of deleting followings
# /!\ Don't enable this if you don't have a reactive moderation team /!\
auto_follow_index:
enabled: false
# Host your own using https://framagit.org/framasoft/peertube/instances-peertube#peertube-auto-follow
index_url: ''
theme:
default: 'default'
broadcast_message:
enabled: false
message: '' # Support markdown
level: 'info' # 'info' | 'warning' | 'error'
dismissable: false
search:
# Add ability to fetch remote videos/actors by their URI, that may not be federated with your instance
# If enabled, the associated group will be able to "escape" from the instance follows
# That means they will be able to follow channels, watch videos, list videos of non followed instances
remote_uri:
users: true
anonymous: false
# Use a third party index instead of your local index, only for search results
# Useful to discover content outside of your instance
# If you enable search_index, you must enable remote_uri search for users
# If you do not enable remote_uri search for anonymous user, your instance will redirect the user on the origin instance
# instead of loading the video locally
search_index:
enabled: false
# URL of the search index, that should use the same search API and routes
# than PeerTube: https://docs.joinpeertube.org/api-rest-reference.html
# You should deploy your own with https://framagit.org/framasoft/peertube/search-index,
# and can use https://search.joinpeertube.org/ for tests, but keep in mind the latter is an unmoderated search index
url: ''
# You can disable local search, so users only use the search index
disable_local_search: false
# If you did not disable local search, you can decide to use the search index by default
is_default_search: false
env:
- name: PT_INITIAL_ROOT_PASSWORD
value: "{{ peertube_admin_password }}"
ingress:
enabled: true
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/acme-dns01-provider: rfc2136
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: "{{ external_ingress_class if peertube_publish else internal_ingress_class }}"
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
hosts:
- host: peertube.{{ domain }}
paths: ["/"]
tls:
- secretName: peertube.{{ domain }}-tls
hosts:
- peertube.{{ domain }}
persistence:
enabled: true
accessMode: "{{ peertube_storage_mode | default('ReadWriteMany') }}"
size: "{{ peertube_size | default('100Gi') }}"
storageClass: "{{ peertube_storage | default('nfs-hdd') }}"
redis:
cluster:
enabled: false
postgresql:
enabled: false

View File

@ -0,0 +1,17 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- peertube_use_external_db
- postgres_enable
- set_fact:
peertube_combined_values: "{{ peertube_default_values | combine(peertube_values, recursive=true) }}"
- name: Deploy PeerTube
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ peertube_namespace | default(namespace) }}"
release_name: "{{ peertube_name | default('peertube') }}"
chart_ref: "{{ peertube_chart | default('ghp/peertube') }}"
chart_version: "{{ peertube_version | default(omit) }}"
release_values: "{{ peertube_combined_values | from_yaml }}"

View File

@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ peertube_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "peertube-owner-user.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup PeerTube DB secret
set_fact:
peertube_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ peertube_db_secret }}"
verbosity: 2
- name: Set PeerTube DB username
set_fact:
peertube_db_username: "{{ peertube_db_secret.data.username | b64decode }}"
- name: Set PeerTube DB password
set_fact:
peertube_db_password: "{{ peertube_db_secret.data.password | b64decode }}"

View File

@ -0,0 +1,76 @@
playmaker_enabled: true
playmaker_publish: false
playmaker_default_values:
replicaCount: 1
image:
repository: "{{ docker_registry }}/playmaker"
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "0.6.4"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: "playmaker"
#env:
# - name: LANG_LOCALE
# value: "en_US"
# - name: LANG_TIMEZONE
# value: "Europe/Moscow"
# - name: DEVICE_CODE
# value: "gemini"
# - name: CRONTAB_STRING
# value: "0 2 * * *"
#credentials: |
# [google]
# email = YOUR_GOOGLE_EMAIL
# password = YOUR_GOOGLE_EMAIL_PASSWORD
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext:
fsGroup: 999
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "{{ external_ingress_class if playmaker_publish else internal_ingress_class }}"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/tls-acme: "true"
hosts:
- host: playmaker.{{ domain }}
paths: ["/"]
tls:
- secretName: playmaker.{{ domain }}-tls
hosts:
- playmaker.{{ domain }}
persistence:
enabled: true
storageClass: "{{ playmaker_storage | default('nfs-hdd') }}"
size: "{{ playmaker_size | default('20Gi') }}"
accessMode: "{{ playmaker_storage_mode | default('ReadWriteMany') }}"

View File

@ -0,0 +1,12 @@
- set_fact:
playmaker_combined_values: "{{ playmaker_default_values | combine(playmaker_values, recursive=true) }}"
- name: Deploy Docker playmaker
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ playmaker_namespace | default(namespace) }}"
release_name: "{{ playmaker_name | default('playmaker') }}"
chart_ref: "{{ playmaker_chart | default('ghp/playmaker') }}"
chart_version: "{{ playmaker_version | default(omit) }}"
release_values: "{{ playmaker_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,292 @@
postfix_default_values:
replicaCount: 1
persistence:
enabled: true
existingClaim: mailboxes
tls:
enabled: true
existingSecret: mail.{{ domain }}-secret
postfix:
image:
repository: "{{ docker_registry }}/postfix"
tag: 3.5.8
pullPolicy: Always
configmaps:
main: |
#smtp_host_lookup = native
compatibility_level = 2
maillog_file = /dev/stdout
# Use ipv4 and listen on all interfaces
inet_protocols = ipv4
inet_interfaces = all
queue_directory = /var/spool/postfix
command_directory = /usr/sbin
daemon_directory = /usr/libexec/postfix
data_directory = /var/lib/postfix
mail_owner = postfix
# Postfix full server name for mail send/recieve
myhostname = mail.{{ domain }}
# Set domain name
mydomain = {{ domain }}
# Local name for mail send
myorigin = $mydomain
# Local mail delivery
mydestination = $myhostname, localhost.$mydomain, localhost
# Transport type
local_transport = virtual
# Local users map
local_recipient_maps = $virtual_mailbox_maps
# Reject code
unknown_local_recipient_reject_code = 550
# Virtual domain list
virtual_mailbox_domains = {{ domain }}
virtual_mailbox_base = /var/mail/vhosts
# Allowed users map
virtual_mailbox_maps = ldap:/etc/postfix/ldap-local-recipients.cf
# Dovecot socket for mail delivery
#virtual_transport = lmtp:unix:private/dovecot-lmtp
virtual_transport = lmtp:inet:dovecot.{{ namespace }}.svc.cluster.local:24
# Certs and TLS options
smtpd_tls_cert_file = /tls/tls.crt
smtpd_tls_key_file = /tls/tls.key
smtpd_use_tls = yes
smtpd_tls_auth_only = yes
smtpd_tls_security_level = may
smtp_tls_loglevel = 1
smtpd_tls_loglevel = 1
smtpd_tls_received_header = yes
smtpd_tls_session_cache_timeout = 3600s
smtp_tls_note_starttls_offer = yes
tls_random_source = dev:/dev/urandom
smtp_tls_security_level = may
# DANE-Settings
#smtp_dns_support_level=dnssec
#smtp_host_lookup=dns
#smtp_tls_security_level = dane
#smtp_tls_loglevel=1
# Filters for mail
smtpd_helo_required = yes
#smtpd_recipient_restrictions = permit_sasl_authenticated, permit_mynetworks, reject_unauth_destination, reject_unknown_sender_domain, reject_invalid_helo_hostname, reject_unauth_destination
smtpd_recipient_restrictions = permit_sasl_authenticated, permit_mynetworks, reject_unauth_destination, reject_non_fqdn_sender, reject_unknown_sender_domain, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, reject_unauth_destination, check_policy_service unix:private/policyd-spf
# SASL auth with dovecot options
smtpd_sasl_auth_enable = yes
smtpd_sasl_security_options = noanonymous
broken_sasl_auth_clients = yes
smtpd_sasl_type = dovecot
smtpd_sasl_path = inet:dovecot.{{ namespace }}.svc.cluster.local:12345
smtpd_sasl_local_domain = $myorigin
milter_protocol = 6
smtpd_milters = inet:opendkim.{{ namespace }}.svc.cluster.local:8891, inet:opendmarc.{{ namespace }}.svc.cluster.local:8893
non_smtpd_milters = $smtpd_milters
milter_default_action = accept
smtpd_tls_CAfile = /etc/ssl/certs/ca-bundle.crt
smtp_tls_CAfile = /etc/ssl/certs/ca-bundle.crt
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtp_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtp_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtp_tls_exclude_ciphers = EXP, MEDIUM, LOW, DES, 3DES, SSLv2
smtpd_tls_exclude_ciphers = EXP, MEDIUM, LOW, DES, 3DES, SSLv2
tls_high_cipherlist = kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!RC4:!MD5:!DES:!EXP:!SEED:!IDEA:!3DES
tls_medium_cipherlist = kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!MD5:!DES:!EXP:!SEED:!IDEA:!3DES
smtp_tls_ciphers = high
smtpd_tls_ciphers = high
sendmail_path = /usr/sbin/sendmail
html_directory = no
setgid_group = postdrop
manpage_directory = /usr/share/man
newaliases_path = /usr/bin/newaliases
mailq_path = /usr/bin/mailq
master: |
#
# Postfix master process configuration file. For details on the format
# of the file, see the master(5) manual page (command: "man 5 master").
#
# Do not forget to execute "postfix reload" after editing this file.
#
# ==========================================================================
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (100)
# ==========================================================================
smtp inet n - n - - smtpd
#smtp inet n - n - 1 postscreen
smtpd pass - - n - - smtpd
dnsblog unix - - n - 0 dnsblog
tlsproxy unix - - n - 0 tlsproxy
submission inet n - n - - smtpd
# -o syslog_name=postfix/submission
# -o smtpd_tls_security_level=encrypt
# -o smtpd_sasl_auth_enable=yes
# -o smtpd_reject_unlisted_recipient=no
# -o smtpd_client_restrictions=$mua_client_restrictions
# -o smtpd_helo_restrictions=$mua_helo_restrictions
# -o smtpd_sender_restrictions=$mua_sender_restrictions
# -o smtpd_recipient_restrictions=permit_sasl_authenticated,reject
# -o milter_macro_daemon_name=ORIGINATING
smtps inet n - n - - smtpd
# -o syslog_name=postfix/smtps
-o smtpd_tls_wrappermode=yes
-o smtpd_sasl_auth_enable=yes
# -o smtpd_reject_unlisted_recipient=no
# -o smtpd_client_restrictions=$mua_client_restrictions
# -o smtpd_helo_restrictions=$mua_helo_restrictions
# -o smtpd_sender_restrictions=$mua_sender_restrictions
-o smtpd_recipient_restrictions=permit_sasl_authenticated,reject
# -o milter_macro_daemon_name=ORIGINATING
#628 inet n - n - - qmqpd
pickup unix n - n 60 1 pickup
cleanup unix n - n - 0 cleanup
qmgr unix n - n 300 1 qmgr
#qmgr unix n - n 300 1 oqmgr
tlsmgr unix - - n 1000? 1 tlsmgr
rewrite unix - - n - - trivial-rewrite
bounce unix - - n - 0 bounce
defer unix - - n - 0 bounce
trace unix - - n - 0 bounce
verify unix - - n - 1 verify
flush unix n - n 1000? 0 flush
proxymap unix - - n - - proxymap
proxywrite unix - - n - 1 proxymap
smtp unix - - n - - smtp
relay unix - - n - - smtp
# -o smtp_helo_timeout=5 -o smtp_connect_timeout=5
showq unix n - n - - showq
error unix - - n - - error
retry unix - - n - - error
discard unix - - n - - discard
local unix - n n - - local
virtual unix - n n - - virtual
lmtp unix - - n - - lmtp
anvil unix - - n - 1 anvil
scache unix - - n - 1 scache
postlog unix-dgram n - n - 1 postlogd
2525 inet n - n - 1 postscreen
-o postscreen_upstream_proxy_protocol=haproxy
-o postscreen_cache_map=btree:$data_directory/postscreen_2525_cache
-o syslog_name=postfix/2525
10587 inet n - n - - smtpd
-o syslog_name=postfix/10587
-o smtpd_tls_security_level=encrypt
-o smtpd_tls_wrappermode=no
-o smtpd_sasl_auth_enable=yes
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
-o smtpd_recipient_restrictions=permit_mynetworks,permit_sasl_authenticated,reject
-o smtpd_sasl_type=dovecot
-o smtpd_sasl_path=inet:dovecot.{{ namespace }}.svc.cluster.local:12345
-o smtpd_upstream_proxy_protocol=haproxy
10465 inet n - n - - smtpd
-o syslog_name=postfix/10465
-o smtpd_tls_wrappermode=yes
-o smtpd_sasl_auth_enable=yes
-o smtpd_recipient_restrictions=permit_mynetworks,permit_sasl_authenticated,reject
-o smtpd_sasl_type=dovecot
-o smtpd_sasl_path=inet:dovecot.{{ namespace }}.svc.cluster.local:12345
-o smtpd_upstream_proxy_protocol=haproxy
#
# ====================================================================
# Interfaces to non-Postfix software. Be sure to examine the manual
# pages of the non-Postfix software to find out what options it wants.
#
# Many of the following services use the Postfix pipe(8) delivery
# agent. See the pipe(8) man page for information about ${recipient}
# and other message envelope options.
# ====================================================================
#
# maildrop. See the Postfix MAILDROP_README file for details.
# Also specify in main.cf: maildrop_destination_recipient_limit=1
#
#maildrop unix - n n - - pipe
# flags=DRhu user=vmail argv=/usr/local/bin/maildrop -d ${recipient}
#
# ====================================================================
#
# Recent Cyrus versions can use the existing "lmtp" master.cf entry.
#
# Specify in cyrus.conf:
# lmtp cmd="lmtpd -a" listen="localhost:lmtp" proto=tcp4
#
# Specify in main.cf one or more of the following:
# mailbox_transport = lmtp:inet:localhost
# virtual_transport = lmtp:inet:localhost
#
# ====================================================================
#
# Cyrus 2.1.5 (Amos Gouaux)
# Also specify in main.cf: cyrus_destination_recipient_limit=1
#
#cyrus unix - n n - - pipe
# user=cyrus argv=/usr/lib/cyrus-imapd/deliver -e -r ${sender} -m ${extension} ${user}
#
# ====================================================================
#
# Old example of delivery via Cyrus.
#
#old-cyrus unix - n n - - pipe
# flags=R user=cyrus argv=/usr/lib/cyrus-imapd/deliver -e -m ${extension} ${user}
#
# ====================================================================
#
# See the Postfix UUCP_README file for configuration details.
#
#uucp unix - n n - - pipe
# flags=Fqhu user=uucp argv=uux -r -n -z -a$sender - $nexthop!rmail ($recipient)
#
# ====================================================================
#
# Other external delivery methods.
#
#ifmail unix - n n - - pipe
# flags=F user=ftn argv=/usr/lib/ifmail/ifmail -r $nexthop ($recipient)
#
#bsmtp unix - n n - - pipe
# flags=Fq. user=bsmtp argv=/usr/local/sbin/bsmtp -f $sender $nexthop $recipient
#
#scalemail-backend unix - n n - 2 pipe
# flags=R user=scalemail argv=/usr/lib/scalemail/bin/scalemail-store
# ${nexthop} ${user} ${extension}
#
#mailman unix - n n - - pipe
# flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py
# ${nexthop} ${user}
#dane unix - - n - - smtp
# -o smtp_dns_support_level=dnssec
# -o smtp_tls_security_level=dane
policyd-spf unix - n n - 0 spawn user=nobody argv=/usr/libexec/postfix/policyd-spf
ldap-local-recipients: |
debuglevel = 0
version = 3
server_host = ldaps://openldap.{{ domain }}
server_port = 636
bind_dn = uid=ldapbind,ou=services,{{ openldap_domain }}
bind_pw = {{ ldapbind_pass | default(ldapbind_password) }}
search_base = ou=users,{{ openldap_domain }}
#search_scope = subtree
query_filter = mail=%s
result_attribute = cn
cache = no
service:
type: LoadBalancer
loadBalancerIP: "{{ postfix_loadbalancer_ip | default(omit) }}"

View File

@ -0,0 +1,13 @@
- set_fact:
postfix_combined_values: "{{ postfix_default_values | combine(postfix_values, recursive=true) }}"
- name: Deploy Postfix
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ postfix_namespace | default(mail_namespace) | default(namespace) }}"
release_name: "{{ postfix_name | default('postfix') }}"
chart_ref: "{{ postfix_chart | default('ghp/postfix') }}"
chart_version: "{{ postfix_version | default(omit) }}"
release_values: "{{ postfix_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,89 @@
postgres_operator_default_values:
image:
registry: "{{ docker_registry }}"
repository: postgres-operator
tag: v1.5.0-72-g49158ecb
pullPolicy: "IfNotPresent"
configKubernetes:
pod_environment_configmap: "{{ postgres_db_namespace | default(namespace) }}/postgresql-pod-environment"
storage_resize_mode: pvc
watched_namespace: "{{ postgres_operator_watch_namespace | default(namespace) }}"
postgres_operator_ui_default_values:
replicaCount: 1
image:
registry: "{{ docker_registry }}"
repository: postgres-operator-ui
tag: v1.5.0-72-g49158ecb
pullPolicy: "IfNotPresent"
envs:
# IMPORTANT: While operator chart and UI chart are idendependent, this is the interface between
# UI and operator API. Insert the service name of the operator API here!
operatorApiUrl: "http://postgres-operator:8080"
operatorClusterNameLabel: "cluster-name"
resourcesVisible: "False"
targetNamespace: "{{ namespace }}"
# configure UI ingress. If needed: "enabled: true"
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/ingress.class: "{{ postgres_operator_ui_ingress_class | default(internal_ingress_class) }}"
hosts:
- host: postgres-operator-ui.{{ domain }}
paths: [""]
tls:
- secretName: postgres-operator-ui.{{ domain }}-tls
hosts:
- postgres-operator-ui.{{ domain }}
postgres_db_definitions:
ghp-postgres: |
kind: "postgresql"
apiVersion: "acid.zalan.do/v1"
metadata:
name: "{{ postgres_db_team | default(namespace) }}-postgres"
namespace: "{{ postgres_db_namespace | default(namespace) }}"
labels:
team: "{{ postgres_db_team | default(namespace) }}"
spec:
teamId: "{{ postgres_db_team | default(namespace) }}"
postgresql:
version: "12"
numberOfInstances: 2
volume:
size: "{{ postgres_size | default('10Gi') }}"
users:
gitea: []
drone: []
bitwarden: []
wikijs: []
nextcloud: []
roundcube: []
databases:
gitea: gitea
drone: drone
bitwarden: bitwarden
wikijs: wikijs
nextcloud: nextcloud
roundcube: roundcube
preparedDatabases:
peertube:
defaultUsers: true
extensions:
pg_trgm: pg_catalog
unaccent: public
allowedSourceRanges: []
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 500m
memory: 500Mi

View File

@ -0,0 +1,78 @@
- name: Create Let's Encrypt ISRG Root X1 CA secret
k8s:
state: present
definition:
apiVersion: v1
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZhekNDQTFPZ0F3SUJBZ0lSQUlJUXo3RFNRT05aUkdQZ3UyT0Npd0F3RFFZSktvWklodmNOQVFFTEJRQXcKVHpFTE1Ba0dBMVVFQmhNQ1ZWTXhLVEFuQmdOVkJBb1RJRWx1ZEdWeWJtVjBJRk5sWTNWeWFYUjVJRkpsYzJWaApjbU5vSUVkeWIzVndNUlV3RXdZRFZRUURFd3hKVTFKSElGSnZiM1FnV0RFd0hoY05NVFV3TmpBME1URXdORE00CldoY05NelV3TmpBME1URXdORE00V2pCUE1Rc3dDUVlEVlFRR0V3SlZVekVwTUNjR0ExVUVDaE1nU1c1MFpYSnUKWlhRZ1UyVmpkWEpwZEhrZ1VtVnpaV0Z5WTJnZ1IzSnZkWEF4RlRBVEJnTlZCQU1UREVsVFVrY2dVbTl2ZENCWQpNVENDQWlJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dJUEFEQ0NBZ29DZ2dJQkFLM29KSFAwRkRmem01NHJWeWdjCmg3N2N0OTg0a0l4dVBPWlhvSGozZGNLaS92VnFidllBVHlqYjNtaUdiRVNUdHJGai9SUVNhNzhmMHVveG15RisKMFRNOHVrajEzWG5mczdqL0V2RWhta3ZCaW9aeGFVcG1abXlQZmp4d3Y2MHBJZ2J6NU1EbWdLN2lTNCszbVg2VQpBNS9UUjVkOG1VZ2pVK2c0cms4S2I0TXUwVWxYaklCMHR0b3YwRGlOZXdOd0lSdDE4akE4K28rdTNkcGpxK3NXClQ4S09FVXQrend2by83VjNMdlN5ZTByZ1RCSWxESENOQXltZzRWTWs3QlBaN2htL0VMTktqRCtKbzJGUjNxeUgKQjVUMFkzSHNMdUp2VzVpQjRZbGNOSGxzZHU4N2tHSjU1dHVrbWk4bXhkQVE0UTdlMlJDT0Z2dTM5NmozeCtVQwpCNWlQTmdpVjUrSTNsZzAyZFo3N0RuS3hIWnU4QS9sSkJkaUIzUVcwS3RaQjZhd0JkcFVLRDlqZjFiMFNIelV2CktCZHMwcGpCcUFsa2QyNUhON3JPckZsZWFKMS9jdGFKeFFaQktUNVpQdDBtOVNUSkVhZGFvMHhBSDBhaG1iV24KT2xGdWhqdWVmWEtuRWdWNFdlMCtVWGdWQ3dPUGpkQXZCYkkrZTBvY1MzTUZFdnpHNnVCUUUzeERrM1N6eW5UbgpqaDhCQ05BdzFGdHhOclFIdXNFd01GeEl0NEk3bUtaOVlJcWlveW1DekxxOWd3UWJvb01EUWFIV0JmRWJ3cmJ3CnFIeUdPMGFvU0NxSTNIYWFkcjhmYXFVOUdZL3JPUE5rM3NnckRRb28vL2ZiNGhWQzFDTFFKMTNoZWY0WTUzQ0kKclU3bTJZczZ4dDBuVVc3L3ZHVDFNME5QQWdNQkFBR2pRakJBTUE0R0ExVWREd0VCL3dRRUF3SUJCakFQQmdOVgpIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJSNXRGbm1lN2JsNUFGemdBaUl5QnBZOXVtYmJqQU5CZ2txCmhraUc5dzBCQVFzRkFBT0NBZ0VBVlI5WXFieXlxRkRRRExIWUdta2dKeWtJckdGMVhJcHUrSUxsYVMvVjlsWkwKdWJoekVGblRJWmQrNTB4eCs3TFNZSzA1cUF2cUZ5RldoZkZRRGxucnp1Qlo2YnJKRmUrR25ZK0VnUGJrNlpHUQozQmViWWh0RjhHYVYwbnh2d3VvNzd4L1B5OWF1Si9HcHNNaXUvWDErbXZvaUJPdi8yWC9xa1NzaXNSY09qL0tLCk5GdFkyUHdCeVZTNXVDYk1pb2d6aVV3dGhEeUMzKzZXVndXNkxMdjN4TGZIVGp1Q3ZqSElJbk56a3RIQ2dLUTUKT1JBekk0Sk1QSitHc2xXWUhiNHBob3dpbTU3aWF6dFhPb0p3VGR3Sng0bkxDZ2ROYk9oZGpzbnZ6cXZIdTdVcgpUa1hXU3RBbXpPVnl5Z2hxcFpYakZhSDNwTzNKTEYrbCsvK3NLQUl1dnRkN3UrTnhlNUFXMHdkZVJsTjhOd2RDCmpOUEVscHpWbWJVcTRKVWFnRWl1VERrSHpzeEhwRktWSzdxNCs2M1NNMU45NVIxTmJkV2hzY2RDYitaQUp6VmMKb3lpM0I0M25qVE9RNXlPZisxQ2NlV3hHMWJRVnM1WnVmcHNNbGpxNFVpMC8xbHZoK3dqQ2hQNGtxS09KMnF4cQo0Umdxc2FoRFlWdlRIOXc3alhieUxlaU5kZDhYTTJ3OVUvdDd5MEZmLzl5aTBHRTQ0WmE0ckYyTE45ZDExVFBBCm1SR3VuVUhCY25XRXZnSkJRbDluSkVpVTBac252Z2MvdWJoUGdYUlI0WHEzN1owajRyN2cxU2dFRXp3eEE1N2QKZW15UHhnY1l4bi9lUjQ0L0tKNEVCcytsVkRSM3ZleUptK2tYUTk5YjIxLytqaDVYb3MxQW5YNWlJdHJlR0NjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
kind: Secret
metadata:
name: "postgres.{{ domain }}-ca"
namespace: "{{ postgres_db_namespace | default(namespace) }}"
- name: Request cert for Postgres
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: "postgres.{{ domain }}-crt"
namespace: "{{ postgres_db_namespace | default(namespace) }}"
spec:
secretName: "postgres.{{ domain }}-secret"
dnsNames:
- "postgres.{{ domain }}"
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: ClusterIssuer
group: cert-manager.io
- name: Allow Non SSL connections
k8s:
state: present
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: postgresql-pod-environment
namespace: "{{ postgres_db_namespace | default(namespace) }}"
data:
ALLOW_NOSSL: "true"
- set_fact:
postgres_operator_combined_values: "{{ postgres_operator_default_values | combine(postgres_operator_values, recursive=true) }}"
- name: Deploy Postgres-operator
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ postgres_operator_namespace | default(namespace) }}"
release_name: "{{ postgres_operator_name | default('postgres-operator') }}"
chart_ref: "{{ postgres_operator_chart | default('ghp/postgres-operator') }}"
chart_version: "{{ postgres_operator_version | default(omit) }}"
release_values: "{{ postgres_operator_combined_values | from_yaml }}"
wait: true
- set_fact:
postgres_operator_ui_combined_values: "{{ postgres_operator_ui_default_values | combine(postgres_operator_ui_values, recursive=true) }}"
- name: Deploy Postgres-operator UI
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ postgres_operator_ui_namespace | default(postgres_operator_namespace) | default(namespace) }}"
release_name: "{{ postgres_operator_ui_name | default('postgres-operator-ui') }}"
chart_ref: "{{ postgres_operator_ui_chart | default('ghp/postgres-operator-ui') }}"
chart_version: "{{ postgres_operator_ui_version | default(omit) }}"
release_values: "{{ postgres_operator_ui_combined_values | from_yaml }}"
wait: true
- name: Create Postgres databases
k8s:
state: present
definition:
"{{ item.value }}"
wait: true
loop: "{{ postgres_db_definitions | dict2items }}"

View File

@ -0,0 +1,19 @@
default_accounts:
- { name: openldap_admin }
- { name: openldap_config }
- { name: ldapbind }
- { name: nextcloud_admin }
- { name: nextcloud_ldap }
- { name: bitwarden_ldap }
- { name: gitea_admin }
- { name: gitea_ldap }
- { name: wikijs_ldap }
- { name: drone_admin }
- { name: chartmuseum_admin }
- { name: peertube_ldap }
- { name: peertube_admin }
- { name: systemuser }
htpasswd_accounts:
- { name: pypiserver_admin }
- { name: adguard_admin }

View File

@ -0,0 +1,47 @@
- name: Test if DKIM private key exists
shell: grep -c "dkim_private_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: dkim_private_key_test_grep
- name: Test if DKIM public key exists
shell: grep -c "dkim_public_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: dkim_public_key_test_grep
- name: Create DKIM keys
docker_container:
name: ddclient
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "sh dkim-key.sh {{ mail_domain | default(domain) }}"
register: dkim_container_output
when: dkim_private_key_test_grep.stdout == '0' or dkim_public_key_test_grep.stdout == '0'
- name: Set ddclient_key
set_fact:
dkim_keys: "{{ dkim_container_output.ansible_facts.docker_container.Output | from_yaml }}"
when: dkim_private_key_test_grep.stdout == '0' or dkim_public_key_test_grep.stdout == '0'
- name: Show DKIM private key
debug:
msg: "ddclient private key: {{ dkim_keys['dkim'][0]['default.private'] | b64decode }}"
verbosity: 2
when: dkim_private_key_test_grep.stdout == '0'
- name: Show DKIM public key
debug:
msg: "ddclient public key: {{ dkim_keys['dkim'][0]['default.txt'] | b64decode }}"
verbosity: 2
when: dkim_public_key_test_grep.stdout == '0'
- name: Write DKIM private key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "dkim_private_key_base64: \"{{ dkim_keys['dkim'][0]['default.private'] }}\""
when: dkim_private_key_test_grep.stdout == '0'
- name: Write DKIM public key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "dkim_public_key_base64: \"{{ dkim_keys['dkim'][0]['default.txt'] }}\""
when: dkim_public_key_test_grep.stdout == '0'

View File

@ -0,0 +1,46 @@
- name: Test if password exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_password" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: password_test_grep
- name: Test if password htpasswd hash exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_htpasswd_hash" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: htpasswd_hash_test_grep
- name: Create password for {{ item.name }}
shell: "< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-64};echo;"
register: password
when: password_test_grep.stdout == '0'
- name: Show password json for {{ item.name }}
debug:
msg: "{{ password }}"
verbosity: 2
when: password_test_grep.stdout == '0'
- name: Create bcrypt hash from password for {{ item.name }}
docker_container:
name: slappasswd
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "htpasswd -B -n -i -b -C 16 {{ item.name }} {{ password.stdout | default(item.name + '_password') }}"
register: docker_container_output
when: htpasswd_hash_test_grep.stdout == '0'
- name: Show docker_container_output for {{ item.name }}
debug:
msg: "{{ docker_container_output }}"
verbosity: 2
- name: Write password for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_password: \"{{ password.stdout }}\""
when: password_test_grep.stdout == '0'
- name: Write htpasswd hash for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_htpasswd_hash: \"{{ docker_container_output.ansible_facts.docker_container.Output.split('\n')[0].split(':')[1] }}\""
when: htpasswd_hash_test_grep.stdout == '0'

View File

@ -0,0 +1,57 @@
- name: Create passwords.yaml file
file:
name: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
state: touch
- name: Create files directory for ddclient tsig
file:
name: "{{ playbook_dir }}/files/{{ namespace }}"
state: directory
- include_tasks: passwords.yaml
loop: "{{ default_accounts }}"
- include_tasks: htpasswd.yaml
loop: "{{ htpasswd_accounts }}"
- include_tasks: passwords.yaml
loop: "{{ openldap_custom_users }}"
when: openldap_custom_users is defined
- include_tasks: passwords.yaml
loop: "{{ openldap_simple_users }}"
when: openldap_simple_users is defined
- name: Test if Drone rpc secret exists in file for {{ item }}
shell: grep -c "drone_rpc_secret" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: rpc_secret_test_grep
- name: Test if Drone database secret exists in file for {{ item }}
shell: grep -c "drone_database_secret" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: database_secret_test_grep
- name: Create Drone rpc secret for {{ item }}
shell: "< /dev/urandom tr -dc a-f0-9 | head -c${1:-128};echo;"
register: rpc_secret
when: rpc_secret_test_grep.stdout == '0'
- name: Create Drone database secret for {{ item }}
shell: "< /dev/urandom tr -dc a-f0-9 | head -c${1:-32};echo;"
register: db_secret
when: database_secret_test_grep.stdout == '0'
- name: Write Drone rpc secret for {{ item }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "drone_rpc_secret: \"{{ rpc_secret.stdout }}\""
when: rpc_secret_test_grep.stdout == '0'
- name: Write Drone database secret for {{ item }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "drone_database_secret: \"{{ db_secret.stdout }}\""
when: database_secret_test_grep.stdout == '0'
- include_tasks: tsig.yaml
- include_tasks: dkim.yaml

View File

@ -0,0 +1,46 @@
- name: Test if password exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_password" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: password_test_grep
- name: Test if password pbkdf2-sha512 hash exists in file for {{ item.name }}
shell: grep -c "^{{ item.name }}_pbkdf2_sha512_hash" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: pbkdf2_sha512_hash_test_grep
- name: Create password for {{ item.name }}
shell: "< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-64};echo;"
register: password
when: password_test_grep.stdout == '0'
- name: Show password json for {{ item.name }}
debug:
msg: "{{ password }}"
verbosity: 2
when: password_test_grep.stdout == '0'
- name: Create PBKDF2-SHA512 hash from password for {{ item.name }}
docker_container:
name: slappasswd
image: "{{ docker_registry }}/pwgen"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "slappasswd -o module-load=pw-pbkdf2 -h {PBKDF2-SHA512} -s {{ password.stdout | default(item.name + '_password') }}"
register: docker_container_output
when: pbkdf2_sha512_hash_test_grep.stdout == '0'
- name: Show docker_container_output for {{ item.name }}
debug:
msg: "{{ docker_container_output }}"
verbosity: 2
- name: Write password for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_password: \"{{ password.stdout }}\""
when: password_test_grep.stdout == '0'
- name: Write PBKDF2-SHA512 hash for {{ item.name }}
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "{{ item.name }}_pbkdf2_sha512_hash: \"{{ docker_container_output.ansible_facts.docker_container.Output.split('\n')[0] }}\""
when: pbkdf2_sha512_hash_test_grep.stdout == '0'

View File

@ -0,0 +1,99 @@
- name: Test if k8s TSIG key exists
shell: grep -c "k8s_tsig" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: k8s_tsig_test_grep
- name: Test if ddclinet TSIG key exists
shell: grep -c "ddclient_tsig" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: ddclient_tsig_test_grep
- name: Test if ddclinet TSIG key exists
shell: grep -c "ddclient_tsig_public_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: ddclient_tsig_public_key_test_grep
- name: Test if ddclinet TSIG key exists
shell: grep -c "ddclient_tsig_private_key_base64" "{{ inventory_dir }}/group_vars/all/passwords.yaml" || true
register: ddclient_tsig_private_key_test_grep
- name: Generate k8s TSIG key for Knot DNS
docker_container:
name: keymgr
image: "{{ docker_registry }}/tsig"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "keymgr -t k8s hmac-sha512"
register: knot_container_output
when: k8s_tsig_test_grep.stdout == '0'
- name: Set k8s_key
set_fact:
k8s_key: "{{ knot_container_output.ansible_facts.docker_container.Output | from_yaml }}"
when: k8s_tsig_test_grep.stdout == '0'
- name: Show k8s TSIG key
debug:
msg: "Knot k8s key: {{ k8s_key['key'][0]['secret'] }}"
when: k8s_tsig_test_grep.stdout == '0'
- name: Write TSIG for Kubernetes
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "k8s_tsig: \"{{ k8s_key['key'][0]['secret'] }}\""
when: k8s_tsig_test_grep.stdout == '0'
- name: Generate TSIG key for ddclient
docker_container:
name: ddclient
image: "{{ docker_registry }}/tsig"
cleanup: true
detach: false
container_default_behavior: no_defaults
command: "bash tsig-key.sh {{ namespace }}"
register: ddclient_container_output
when: ddclient_tsig_public_key_test_grep.stdout == '0' or ddclient_tsig_private_key_test_grep.stdout == '0'
- name: Set ddclient_key
set_fact:
ddclient_key: "{{ ddclient_container_output.ansible_facts.docker_container.Output | from_yaml }}"
when: ddclient_tsig_public_key_test_grep.stdout == '0' or ddclient_tsig_private_key_test_grep.stdout == '0'
- name: Show ddclient TSIG public key file
debug:
msg: "ddclient key: {{ ddclient_key['tsig'][0]['key'] | b64decode }}"
verbosity: 2
when: ddclient_tsig_public_key_test_grep.stdout == '0'
- name: Show ddclient TSIG private key file
debug:
msg: "ddclient key: {{ ddclient_key['tsig'][0]['private'] | b64decode }}"
verbosity: 2
when: ddclient_tsig_private_key_test_grep.stdout == '0'
- name: Write ddclient TSIG public key file in base64
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig_public_key_base64: \"{{ ddclient_key['tsig'][0]['key'] }}\""
when: ddclient_tsig_public_key_test_grep.stdout == '0'
- name: Write ddclient TSIG private key file in base64
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig_private_key_base64: \"{{ ddclient_key['tsig'][0]['private'] }}\""
when: ddclient_tsig_private_key_test_grep.stdout == '0'
- name: Set ddclient TSIG key
set_fact:
ddclient_tsig_key: "{{ ddclient_key['tsig'][0]['private'] | b64decode | from_yaml }}"
when: ddclient_tsig_test_grep.stdout == '0'
- name: Show ddclient TSIG key
debug:
msg: "{{ ddclient_tsig_key }}"
verbosity: 2
when: ddclient_tsig_test_grep.stdout == '0'
- name: Write ddclient TSIG key
lineinfile:
path: "{{ inventory_dir }}/group_vars/all/passwords.yaml"
line: "ddclient_tsig: \"{{ ddclient_tsig_key['Key'] }}\""
when: ddclient_tsig_test_grep.stdout == '0'

View File

@ -0,0 +1,53 @@
pypiserver_enabled: true
pypiserver_publish: false
pypiserver_default_values:
## If you want more than 1 replica you will have to use a ReadWriteMany volume
replicaCount: 1
image:
repository: pypiserver/pypiserver
tag: v1.3.2
pullPolicy: IfNotPresent
pullSecrets: []
pypiserver:
# for a list of options see: https://github.com/pypiserver/pypiserver
extraArgs: []
# - --disable-fallback
# - --log-conf=/path/to/file
auth:
## comma-separated list of (case-insensitive) actions to authenticate
## Use '.' or '' for empty. Requires to have set the password (option below).
## Available actions are update, download and list
actions: update
## Map of username / encoded passwords that will be put to the htpasswd file
## use `htpasswd -n -b username password` to generate them
credentials:
pypiserver_admin: "{{ pypiserver_admin_htpasswd_hash }}"
ingress:
enabled: true
labels: {}
annotations:
kubernetes.io/ingress.class: "{{ external_ingress_class if registry_publish else internal_ingress_class }}"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
path: "/"
hosts:
- pip.{{ domain }}
tls:
- secretName: pip.{{ domain }}-tls
hosts:
- pip.{{ domain }}
persistence:
enabled: true
storageClass: "{{ pypiserver_storage | default('nfs-hdd') }}"
size: "{{ pypiserver_size | default('20Gi') }}"
accessMode: "{{ pypiserver_storage_mode | default('ReadWriteMany') }}"
mountPropagation: None
securityContext:
enabled: true
runAsUser: 0
runAsGroup: 0
fsGroup: 1000

View File

@ -0,0 +1,12 @@
- set_fact:
pypiserver_combined_values: "{{ pypiserver_default_values | combine(pypiserver_values, recursive=true) }}"
- name: Deploy Docker pypiserver
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ pypiserver_namespace | default(namespace) }}"
release_name: "{{ pypiserver_name | default('pypiserver') }}"
chart_ref: "{{ pypiserver_chart | default('owkin/pypiserver') }}"
chart_version: "{{ pypiserver_version | default(omit) }}"
release_values: "{{ pypiserver_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,59 @@
registry_enabled: true
registry_publish: false
registry_default_values:
service:
type: ClusterIP
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "{{ external_ingress_class if registry_publish else internal_ingress_class }}"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
hosts:
- registry.{{ domain }}
tls:
- secretName: registry.{{ domain }}-tls
hosts:
- registry.{{ domain }}
persistence:
enabled: true
storageClass: "{{ registry_storage | default('nfs-hdd') }}"
size: "{{ registry_size | default('15Gi') }}"
accessMode: "{{ registry_storage_mode | default('ReadWriteMany') }}"
registry_readonly_ingress_definition: |
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/acme-dns01-provider: rfc2136
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: "{{ external_ingress_class }}"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/configuration-snippet: |-
limit_except GET {
deny all;
}
name: docker-registry-public
namespace: "{{ registry_namespace | default(namespace) }}"
spec:
rules:
- host: "{{ registry_readonly_ingress }}"
http:
paths:
- backend:
serviceName: docker-registry
servicePort: 5000
path: /
tls:
- hosts:
- "{{ registry_readonly_ingress }}"
secretName: "{{ registry_readonly_ingress }}-tls"

View File

@ -0,0 +1,19 @@
- set_fact:
registry_combined_values: "{{ registry_default_values | combine(registry_values, recursive=true) }}"
- name: Deploy Docker registry
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ registry_namespace | default(namespace) }}"
release_name: "{{ registry_name | default('docker-registry') }}"
chart_ref: "{{ registry_chart | default('ghp/docker-registry') }}"
chart_version: "{{ registry_version | default(omit) }}"
release_values: "{{ registry_combined_values | from_yaml }}"
wait: true
- name: Deploy readonly public ingress for Docker registry
when: registry_readonly_ingress is defined
k8s:
state: present
definition:
"{{ registry_readonly_ingress_definition }}"

View File

@ -0,0 +1,83 @@
roundcube_enabled: true
roundcube_publish: false
roundcube_use_external_db: true
roundcube_default_values:
env:
- name: ROUNDCUBEMAIL_DB_TYPE
value: "pgsql"
- name: ROUNDCUBEMAIL_DB_HOST
value: "{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local"
- name: ROUNDCUBEMAIL_DB_USER
value: "{{ roundcube_db_username }}"
- name: ROUNDCUBEMAIL_DB_PASSWORD
value: "{{ roundcube_db_password }}"
- name: ROUNDCUBEMAIL_DB_NAME
value: roundcube
- name: ROUNDCUBEMAIL_DEFAULT_HOST
value: "ssl://mail.{{ domain }}"
- name: ROUNDCUBEMAIL_DEFAULT_PORT
value: "993"
- name: ROUNDCUBEMAIL_SMTP_SERVER
value: "ssl://mail.{{ domain }}"
- name: ROUNDCUBEMAIL_SMTP_PORT
value: "465"
- name: ROUNDCUBEMAIL_SKIN
value: elastic
- name: ROUNDCUBEMAIL_UPLOAD_MAX_FILESIZE
value: "25M"
- name: ROUNDCUBEMAIL_PLUGINS
value: "archive,zipdownload,managesieve"
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/ingress.class: "{{ external_ingress_class if roundcube_publish else internal_ingress_class }}"
kubernetes.io/tls-acme: "true"
path: /
hosts:
- webmail.{{ domain }}
tls:
- secretName: webmail.{{ domain }}-tls
hosts:
- webmail.{{ domain }}
persistence:
enabled: true
storageClass: "{{ roundcube_storage | default('nfs-hdd') }}"
accessMode: "{{ roundcube_storage_mode | default('ReadWriteMany') }}"
size: "{{ roundcube_size | default('8Gi') }}"
configs:
myconfig.inc.php: |
<?php
$config['mail_domain'] = '%d';
$config['username_domain'] = '%d';
$config['managesieve_port'] = '4190';
$config['managesieve_host'] = 'ssl://dovecot.{{ namespace }}.svc.cluster.local';
$config['managesieve_usetls'] = false;
$config['managesieve_debug'] = true;
$config['managesieve_conn_options'] = array(
'ssl' => array('verify_peer' => false, 'verify_peer_name' => false, 'allow_self_signed' => true)
);
// Enables separate management interface for vacation responses (out-of-office)
// 0 - no separate section (default),
// 1 - add Vacation section,
// 2 - add Vacation section, but hide Filters section
$config['managesieve_vacation'] = 1;
$config['imap_conn_options'] = array(
'ssl' => array(
'verify_peer' => false,
'allow_self_signed' => true,
'ciphers' => 'TLSv1.2+HIGH:!aNull:@STRENGTH',
),
);
// For STARTTLS SMTP
$config['smtp_conn_options'] = array(
'ssl' => array(
'verify_peer' => false,
'allow_self_signed' => true,
'ciphers' => 'TLSv1.2+HIGH:!aNull:@STRENGTH',
),
);
?>

View File

@ -0,0 +1,19 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- roundcube_use_external_db
- postgres_enable
- set_fact:
roundcube_combined_values: "{{ roundcube_default_values | combine(roundcube_values, recursive=true) }}"
- name: Deploy RoundCube
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ roundcube_namespace | default(mail_namespace) | default(namespace) }}"
release_name: "{{ roundcube_name | default('roundcube') }}"
chart_ref: "{{ roundcube_chart | default('ghp/roundcube') }}"
chart_version: "{{ roundcube_version | default(omit) }}"
release_values: "{{ roundcube_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ roundcube_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "roundcube.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup Roundcube DB secret
set_fact:
roundcube_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ roundcube_db_secret }}"
verbosity: 2
- name: Set Roundcube DB username
set_fact:
roundcube_db_username: "{{ roundcube_db_secret.data.username | b64decode }}"
- name: Set Roundcube DB password
set_fact:
roundcube_db_password: "{{ roundcube_db_secret.data.password | b64decode }}"

View File

@ -0,0 +1,15 @@
rspamd_default_values:
replicaCount: 1
persistence:
enabled: false
existingClaim: mailboxes
rspamd:
image:
repository: "{{ docker_registry }}/rspamd"
tag: latest
pullPolicy: Always
service:
type: ClusterIP

View File

@ -0,0 +1,13 @@
- set_fact:
rspamd_combined_values: "{{ rspamd_default_values | combine(rspamd_values, recursive=true) }}"
- name: Deploy Rspamd
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ rspamd_namespace | default(mail_namespace) | default(namespace) }}"
release_name: "{{ rspamd_name | default('rspamd') }}"
chart_ref: "{{ rspamd_chart | default('ghp/rspamd') }}"
chart_version: "{{ rspamd_version | default(omit) }}"
release_values: "{{ rspamd_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,15 @@
service_dns_default_values:
fullnameOverride: "{{ service_dns_name | default(namespace + '-service-dns') }}"
domainFilters: ["{{ service_domain | default(domain) }}"]
sources: ['service']
provider: rfc2136
rfc2136:
host: "{{ service_dns_ip | default(dns_ip) }}"
port: 53
zone: "{{ service_domain | default(domain) }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ service_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ service_dns_tsigKeyname | default('k8s') }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@ -0,0 +1,12 @@
- set_fact:
service_dns_combined_values: "{{ service_dns_default_values | combine(service_dns_values, recursive=true) }}"
- name: Deploy service DNS
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ service_dns_namespace | default(dns_namespace) | default(namespace) }}"
release_name: "{{ service_dns_name | default(namespace + '-service-dns') }}"
chart_ref: "{{ service_dns_chart | default('bitnami/external-dns') }}"
chart_version: "{{ service_dns_version | default(omit) }}"
release_values: "{{ service_dns_combined_values | from_yaml }}"
wait: true

Some files were not shown because too many files have changed in this diff Show More