GHP publish

This commit is contained in:
ace 2021-01-09 20:54:42 +03:00
commit b4b740a239
No known key found for this signature in database
GPG Key ID: 32989872B72276A0
173 changed files with 5392 additions and 0 deletions

10
ansible.cfg Normal file
View File

@ -0,0 +1,10 @@
[defaults]
host_key_checking = False
pipelining = True
callback_whitelist = timer, profile_tasks
forks = 50
roles_path = roles
[ssh_connection]
pipelining = True
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null

View File

@ -0,0 +1,128 @@
# Common #
namespace: ghp
docker_registry: registry.ghp.0xace.cc
domain: example.com
mail_domain: "{{ domain }}"
local_domain: lan
dns_ip: YOUR_RFC2136_DNS_IP
mail_proxy_public_ip: PUBLIC_VPS_IP
mail_proxy_private_ip: "{{ dns_ip }}"
web_proxy_internal_ip: INTERNAL_VPS_IP
# Core infrastructure #
## Nginx Ingress ##
### Internal ###
internal_ingress_class: "ghp-internal-nginx"
internal_loadbalancer_ip: "192.168.250.0"
### External ###
internal_ingress_class: "ghp-external-nginx"
external_loadbalancer_ip: "192.168.250.10"
### Local ###
internal_ingress_class: "ghp-local-nginx"
local_loadbalancer_ip: "192.168.250.20"
## External-dns ##
dns_namespace: dns
# Shared infrastructure #
## PostgreSQL ##
postgres_enable: true
postgres_db_namespace: "{{ namespace }}"
## OpenLDAP ##
openldap_enabled: true
#openldap_size: "10Gi"
#openldap_storage: "nfs-ssd"
openldap_loadbalancer_ip: "192.168.250.2"
openldap_domain: "dc=example,dc=com"
openldap_custom_users:
- { name: myuser1 }
- { name: myuser2 }
openldap_simple_users:
- { name: testuser1, sn: 6001, uid: 6001, gid: 6001 }
- { name: testuser2, sn: 6002, uid: 6002, gid: 6002 }
## Docker-registry ##
registry_enabled: true
#registry_size: "100Gi"
#registry_storage: "nfs-hdd"
registry_publish: false
## ChartMuseum ##
chartmuseum_enabled: true
#chartmuseum_size: "10Gi"
#chartmuseum_storage: "nfs-hdd"
#chartmuseum_publish: false
#chartmuseum_login: admin
#chartmuseum_pass:
# End User Applications #
## Email ##
mail_enabled: true
#mailbox_size: "50Gi"
#mailbox_storage: "nfs-hdd"
roundcube_enabled: true
roundcube_publish: false
postfix_loadbalancer_ip: "192.168.250.3"
dovecot_loadbalancer_ip: "192.168.250.4"
## Nextcloud ##
nextcloud_enabled: true
#nextcloud_size: "20Gi"
#nextcloud_storage: "nfs-ssd"
#nextcloud_pass:
#nextcloud_mail_pass:
nextcloud_publish: true
## Bitwarden Password Manager ##
bitwarden_enabled: true
#bitwarden_size: "8Gi"
#bitwarden_storage: "nfs-ssd"
#bitwarden_smtp_pass:
bitwarden_publish: false
## Gitea ##
gitea_enabled: true
#gitea_size: "20Gi"
#gitea_storage: "nfs-ssd"
#gitea_lfs: true
#gitea_lfs_size: "50Gi"
#gitea_lfs_storage: "nfs-hdd"
gitea_publish_web: false
gitea_publish_ssh: false
gitea_loadbalancer_ip: "192.168.250.5"
## Drone ##
drone_enabled: true
#drone_size: "10Gi"
#drone_storage: "nfs-ssd"
#drone_gitea_client_id:
#drone_gitea_client_secret:
drone_publish: false
### WikiJS ###
wikijs_enabled: true
wikijs_publish: false
### Playmaker ###
playmaker_enabled: false
playmaker_publish: false
### Pypiserver ###
pypiserver_enabled: false
pypiserver_publish: false
### PeerTube ###
peertube_enabled: false
peertube_publish: false
#peertube_size: "100Gi"
#peertube_storage: "nfs-hdd"
### Adguard Home ###
adguard_enabled: false
adguard_publish: false
adguard_loadbalancer_ip: "192.168.250.6"
#adguard_config_size: "20Mi"
#adguard_config_storage: "nfs-ssd"
#adguard_work_size: "10Gi"
#adguard_work_storage: "nfs-ssd"

View File

@ -0,0 +1,36 @@
ddclient_conf: |
daemon=300
syslog=yes
mail-failure=root
pid=/var/run/ddclient/ddclient.pid
ssl=yes
debug=yes
verbose=yes
{% for host in ddclient_hosts %}
{% if host != 'omitme' %}
use=web
web=checkip.dyndns.org
protocol=nsupdate
server={{ external_dns_ip | default(dns_ip) }}
login=/usr/bin/nsupdate
password=/config/Kvps.key
zone={{ domain }}
ttl=60
{{ host }}
{% endif %}
{% endfor %}
ddclient_hosts:
- "{% if nextcloud_publish | default(false) %}nextcloud.{{ domain }}{% else %}omitme{% endif %}"
- "{% if drone_publish | default(false) %}drone.{{ domain }}{% else %}omitme{% endif %}"
- "{% if gitea_publish | default(false) %}gitea.{{ domain }}{% else %}omitme{% endif %}"
- "{% if bitwarden_publish | default(false) %}bitwarden.{{ domain }}{% else %}omitme{% endif %}"
- "{% if wikijs_publish | default(false) %}wikijs.{{ domain }}{% else %}omitme{% endif %}"
- "{% if chartmuseum_publish | default(false) %}charts.{{ domain }}{% else %}omitme{% endif %}"
- "{% if registry_publish | default(false) %}registry.{{ domain }}{% else %}omitme{% endif %}"
- "{% if peertube_publish | default(false) %}peertube.{{ domain }}{% else %}omitme{% endif %}"
- "{{ registry_readonly_ingress | default('omitme') }}"
- "{{ chartmuseum_readonly_ingress | default('omitme') }}"
- "{{ wikijs_readonly_ingress | default('omitme') }}"

View File

@ -0,0 +1 @@
adguard_values: {}

View File

@ -0,0 +1 @@
bitwarden_values: {}

View File

@ -0,0 +1 @@
cert_manager_values: {}

View File

@ -0,0 +1 @@
chartmuseum_values: {}

View File

@ -0,0 +1 @@
dovecot_values: {}

View File

@ -0,0 +1,2 @@
drone_values: {}
drone_runner_kube_values: {}

View File

@ -0,0 +1 @@
external_dns_values: {}

View File

@ -0,0 +1 @@
external_ingress_nginx_values: {}

View File

@ -0,0 +1,3 @@
gitea_values: {}
gitea_ingress_nginx_values: {}
gitea_dns_values: {}

View File

@ -0,0 +1 @@
internal_dns_values: {}

View File

@ -0,0 +1 @@
internal_ingress_nginx_values: {}

View File

@ -0,0 +1 @@
local_dns_values: {}

View File

@ -0,0 +1 @@
local_ingress_nginx_values: {}

View File

@ -0,0 +1,13 @@
metallb_values:
configInline:
peers:
- peer-address: 192.168.5.1
peer-asn: 64601
my-asn: 65500
address-pools:
- name: default
protocol: bgp
bgp-advertisements:
- aggregation-length: 24
addresses:
- 192.168.250.0/24

View File

@ -0,0 +1 @@
metrics_server_values: {}

View File

@ -0,0 +1,43 @@
nextcloud_values:
nextcloud:
configs:
mail.fix.config.php: |-
<?php
$CONFIG = array (
"mail_smtptimeout" => 60,
);
fix.config.php: |-
<?php
$CONFIG = array (
'trusted_proxies' => ['{{ web_proxy_internal_ip }}'],
'overwriteprotocol' => 'https',
'overwrite.cli.url' => 'https://nextcloud.{{ domain }}',
'mail_smtpstreamoptions' =>
array (
'ssl' =>
array (
'allow_self_signed' => true,
'verify_peer' => false,
'verify_peer_name' => false,
),
),
);
rgw.config.php: |-
<?php
$CONFIG = array (
'objectstore_multibucket' => array(
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' => array(
'bucket' => 'nextcloud',
'autocreate' => true,
'key' => 'K4PNZLSTLIDQJMZUV27P',
'secret' => 'iPScsni8RS2aT9MFymfQYLPD7W8dVrRqFpafBKDc',
'hostname' => 'sds1-osd1.lan',
'port' => 8080,
'use_ssl' => false,
'num_buckets' => 16,
'region' => 'us-east-1',
'use_path_style' => true
),
),
);

View File

@ -0,0 +1,15 @@
nfs_client_provisioner_hdd_values:
replicaCount: 1
strategyType: Recreate
nfs:
server: <nfs server dns or ip>
path: <full path from exportfs>
defaultClass: false
nfs_client_provisioner_ssd_values:
replicaCount: 1
strategyType: Recreate
nfs:
server: <nfs server dns or ip>
path: <full path from exportfs>
defaultClass: true

View File

@ -0,0 +1 @@
opendkim_values: {}

View File

@ -0,0 +1 @@
opendmarc_values: {}

View File

@ -0,0 +1,58 @@
openldap_values:
customLdifFiles:
04-custom-users.ldif: |-
dn: uid=myuser1,ou=users,{{ openldap_domain }}
changetype: add
uid: myuser1
cn: myuser1
sn: 5001
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/myuser1
uidNumber: 5001
gidNumber: 5001
userPassword: {{ myuser1_pbkdf2_sha512_hash }}
mail: myuser1@{{ domain }}
mail: myuser1_second_mail@{{ domain }}
gecos: myuser1 description
dn: uid=myuser2,ou=users,{{ openldap_domain }}
changetype: add
uid: myuser2
cn: myuser2
sn: 5002
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/myuser2
uidNumber: 5002
gidNumber: 5002
userPassword: {{ myuser2_pbkdf2_sha512_hash }}
mail: myuser2@{{ domain }}
mail: myuser2_second_mail@{{ domain }}
gecos: myuser2 description
05-autogen-simple-users.ldif: |-
{% for user in openldap_simple_users %}
dn: uid={{ user.name }},ou=users,{{ openldap_domain }}
changetype: add
uid: {{ user.name }}
cn: {{ user.name }}
sn: {{ user.sn }}
objectClass: top
objectClass: posixAccount
objectClass: inetOrgPerson
loginShell: /bin/bash
homeDirectory: /home/{{ user.name }}
uidNumber: {{ user.uid }}
gidNumber: {{ user.gid }}
userPassword: {{ hostvars[inventory_hostname][user.name + '_pbkdf2_sha512_hash'] | default('nopass') }}
mail: {{ user.name }}@{{ domain }}
gecos: {{ user.name }} user
{% endfor %}

View File

@ -0,0 +1 @@
peertube_values: {}

View File

@ -0,0 +1 @@
postfix_values: {}

View File

@ -0,0 +1,2 @@
postgres_operator_values: {}
postgres_operator_ui_values: {}

View File

@ -0,0 +1 @@
registry_values: {}

View File

@ -0,0 +1 @@
roundcube_values: {}

View File

@ -0,0 +1 @@
rspamd_values: {}

View File

@ -0,0 +1 @@
service_dns_values: {}

View File

@ -0,0 +1 @@
wikijs_values: {}

View File

@ -0,0 +1,87 @@
knot_conf: |
# This is a sample of a minimal configuration file for Knot DNS.
# See knot.conf(5) or refer to the server documentation.
server:
rundir: "/run/knot"
user: knot:knot
listen: [ 0.0.0.0@53, ::@53 ]
udp-max-payload: 1232
log:
- target: syslog
any: debug
key:
- id: k8s
algorithm: hmac-sha512
secret: {{ k8s_tsig }}
- id: vps
algorithm: hmac-sha512
secret: {{ ddclient_tsig }}
remote:
# - id: slave
# address: 192.168.1.1@53
#
# - id: master
# address: 192.168.2.1@53
remote:
- id: dns_server
address: 127.0.0.1@53
submission:
- id: dns_zone_sbm
parent: [dns_server]
acl:
- id: deny_all
deny: on # no action specified and deny on implies denial of all actions
- id: key_rule
key: [vps, k8s] # Access based just on TSIG key
address: 192.168.0.0/16
action: [transfer, notify, update]
# - id: acl_slave
# address: 192.168.1.1
# action: transfer
# - id: acl_master
# address: 192.168.2.1
# action: notify
template:
- id: default
storage: "/var/lib/knot"
file: "%s.zone"
policy:
- id: rsa
algorithm: RSASHA512
ksk-size: 4096
zsk-size: 2048
nsec3: on
ksk-submission: dns_zone_sbm
zone:
- domain: "{{ domain }}"
storage: "/var/lib/knot/zones/"
file: "{{ domain }}.zone"
acl: [deny_all, key_rule]
dnssec-signing: on
dnssec-policy: rsa
zonefile-load: difference
# # Master zone
# - domain: example.com
# notify: slave
# acl: acl_slave
# # Slave zone
# - domain: example.net
# master: master
# acl: acl_master

View File

@ -0,0 +1,102 @@
haproxy_config: |
global
chroot /var/lib/haproxy
daemon
group haproxy
maxconn 200000
nbproc {{ ansible_processor_count }}
pidfile /var/run/haproxy.pid
user haproxy
stats socket /var/run/haproxy.stat
stats bind-process 1
log 127.0.0.1 local0
defaults
log global
maxconn 200000
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 10m
timeout server 10m
timeout check 10s
frontend ft_smtp
bind {{ mail_proxy_public_ip }}:25
bind {{ mail_proxy_private_ip }}:25
mode tcp
timeout client 1m
log global
option tcplog
default_backend bk_smtp
backend bk_smtp
mode tcp
log global
option tcplog
timeout server 1m
timeout connect 7s
server postfix {{ postfix_loadbalancer_ip }}:2525 send-proxy
frontend ft_submission
bind {{ mail_proxy_public_ip }}:587
bind {{ mail_proxy_private_ip }}:587
mode tcp
timeout client 1m
log global
option tcplog
default_backend bk_submission
backend bk_submission
mode tcp
log global
option tcplog
timeout server 1m
timeout connect 7s
server postfix {{ postfix_loadbalancer_ip }}:10587 send-proxy
frontend ft_submissions
bind {{ mail_proxy_public_ip }}:465
bind {{ mail_proxy_private_ip }}:465
mode tcp
timeout client 1m
log global
option tcplog
default_backend bk_submissions
backend bk_submissions
mode tcp
log global
option tcplog
timeout server 1m
timeout connect 7s
server postfix {{ postfix_loadbalancer_ip }}:10465 send-proxy
frontend ft_imap
bind {{ mail_proxy_public_ip }}:143
bind {{ mail_proxy_private_ip }}:143
mode tcp
default_backend bk_imap
backend bk_imap
mode tcp
balance leastconn
stick store-request src
stick-table type ip size 200k expire 30m
server imap1 {{ dovecot_loadbalancer_ip }}:1109 send-proxy-v2
frontend ft_imaps
bind {{ mail_proxy_public_ip }}:993
bind {{ mail_proxy_private_ip }}:993
mode tcp
default_backend bk_imaps
backend bk_imaps
mode tcp
balance leastconn
stick store-request src
stick-table type ip size 200k expire 30m
server imaps1 {{ dovecot_loadbalancer_ip }}:10993 send-proxy-v2

View File

@ -0,0 +1,97 @@
nginx:
nginx.conf: |
user nginx;
worker_processes {{ ansible_processor_count }};
error_log /var/log/nginx/error.log debug;
pid /var/run/nginx.pid;
events {
worker_connections 4096;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
stream {
server {
listen 443;
proxy_pass $upstream;
ssl_preread on;
proxy_protocol on;
}
map $ssl_preread_server_name $upstream {
include /etc/nginx/stream.d/*.map;
}
include /etc/nginx/stream.d/*.conf;
}
stream.d:
- name: "k8s-ghp-{{ namespace }}.map"
data: |
{% if gitea_publish_ssh %}
default gitea_ssh_{{ namespace }};
{% endif %}
{% if gitea_publish_web %}
gitea.{{ domain }} gitea_web_{{ namespace }};
{% endif %}
{% if bitwarden_publish %}
bitwarden.{{ domain }} https_{{ namespace }};
{% endif %}
{% if wikijs_publish %}
wikijs.{{ domain }} https_{{ namespace }};
{% endif %}
{% if drone_publish %}
drone.{{ domain }} https_{{ namespace }};
{% endif %}
{% if nextcloud_publish %}
nextcloud.{{ domain }} https_{{ namespace }};
{% endif %}
{% if registry_publish %}
registry.{{ domain }} https_{{ namespace }};
{% endif %}
{% if registry_readonly_ingress %}
{{ registry_readonly_ingress }} https_{{ namespace }};
{% endif %}
{% if chartmuseum_publish %}
charts.{{ domain }} https_{{ namespace }};
{% endif %}
{% if chartmuseum_readonly_ingress %}
{{ chartmuseum_readonly_ingress }} https_{{ namespace }};
{% endif %}
{% if wikijs_readonly_ingress %}
{{ wikijs_readonly_ingress }} https_{{ namespace }};
{% endif %}
{% if peertube_publish %}
peertube.{{ domain }} https_{{ namespace }};
{% endif %}
- name: "k8s-ghp-{{ namespace }}.conf"
data: |-
{% if gitea_publish_ssh %}
upstream gitea_ssh_{{ namespace }} {
server {{ gitea_loadbalancer_ip }}:22;
}
{% endif %}
{% if gitea_publish_web %}
upstream gitea_web_{{ namespace }} {
server {{ gitea_loadbalancer_ip }}:443;
}
{% endif %}
upstream https_{{ namespace }} {
server {{ external_loadbalancer_ip }}:443;
}

View File

@ -0,0 +1,16 @@
[vps:children]
knot_dns
web_proxy
mail_proxy
ddclient
[ddclient]
[web_proxy]
[mail_proxy]
[knot_dns]
[k8s]
localhost ansible_python_interpreter="python"

View File

@ -0,0 +1,27 @@
- hosts: localhost
connection: local
pre_tasks:
- name: Check docker is working
shell: docker info
register: docker_info
changed_when: "docker_info.rc != 0"
failed_when: "docker_info.rc != 0"
- name: Check Helm installed
shell: helm version
register: helm_version
changed_when: "helm_version.rc != 0"
failed_when: "helm_version.rc != 0"
- name: Helm version
debug:
msg: "{{ helm_version.stdout }}"
- name: Check kubectl installed and have access to cluster
shell: kubectl get nodes
register: kubectl_cluster_nodes
changed_when: "kubectl_cluster_nodes.rc != 0"
failed_when: "kubectl_cluster_nodes.rc != 0"
- name: Kubectl nodes output
debug:
msg: "{{ kubectl_cluster_nodes.stdout.split('\n') }}"
roles:
- helm-repos
- pwgen

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- cert-manager

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- chartmuseum

View File

@ -0,0 +1,75 @@
---
- hosts: k8s
connection: local
tasks:
- name: Deploy MetalLB
import_role:
name: metallb
when: metallb_enabled | default(true)
tags: metallb
- name: Deploy External Ingress Nginx
import_role:
name: external-ingress-nginx
when: external_ingress_nginx_enabled | default(true)
tags:
- external-ingress-nginx
- ingress-nginx
- name: Deploy Internal Ingress Nginx
import_role:
name: internal-ingress-nginx
when: internal_ingress_nginx_enabled | default(true)
tags:
- internal-ingress-nginx
- ingress-nginx
- name: Deploy Local Ingress Nginx
import_role:
name: internal-ingress-nginx
when: local_ingress_nginx_enabled | default(true)
tags:
- local-ingress-nginx
- ingress-nginx
- name: Deploy Internal DNS
import_role:
name: internal-dns
when: internal_dns_enabled | default(true)
tags:
- internal-dns
- dns
- name: Deploy Local DNS
import_role:
name: local-dns
when: local_dns_enabled | default(true)
tags:
- local-dns
- dns
- name: Deploy Service DNS
import_role:
name: service-dns
when: service_dns_enabled | default(true)
tags:
- service-dns
- dns
- name: Deploy Cert-manager
import_role:
name: cert-manager
when: cert_manager_enabled | default(true)
tags: cert-manager
- name: Deploy NFS-client-provisioner
import_role:
name: nfs-client-provisioner
when: nfs_client_provisioner_enabled | default(true)
tags: nfs-client-provisioner
- name: Deploy Metrics-server
import_role:
name: metrics-server
when: metrics_server_enabled | default(true)
tags: metrics-server

4
playbooks/ghp/dns.yaml Normal file
View File

@ -0,0 +1,4 @@
---
- hosts: knot_dns
roles:
- knot

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- dovecot

5
playbooks/ghp/drone.yaml Normal file
View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- drone

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- external-ingress-nginx

5
playbooks/ghp/gitea.yaml Normal file
View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- gitea

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- internal-dns

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- internal-ingress-nginx

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- local-dns

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- local-ingress-nginx

5
playbooks/ghp/mail.yaml Normal file
View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- mail

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- metallb

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- metrics-server

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- nextcloud

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- nfs-client-provisioner

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- opendkim

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- opendmarc

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- openldap

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- playmaker

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- postfix

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- postgres

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- pypiserver

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- registry

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- roundcube

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- rspamd

View File

@ -0,0 +1,27 @@
---
- hosts: k8s
connection: local
tasks:
- name: Deploy PostgreSQL
import_role:
name: postgres
when: postgres_enabled | default(true)
tags: postgres
- name: Deploy OpenLDAP
import_role:
name: openldap
when: openldap_enabled | default(true)
tags: openldap
- name: Deploy Docker registry
import_role:
name: registry
when: registry_enabled | default(true)
tags: registry
- name: Deploy ChartMuseum
import_role:
name: chartmuseum
when: chartmuseum_enabled | default(true)
tags: chartmuseum

12
playbooks/ghp/site.yaml Normal file
View File

@ -0,0 +1,12 @@
---
- name: Deploy Core Infrastructure
import_playbook: core-infra.yaml
tags: core-infra
- name: Deploy Shared Infrastructure
import_playbook: shared-infra.yaml
tags: shared-infra
- name: Deploy End User Applications
import_playbook: user-apps.yaml
tags: user-apps

View File

@ -0,0 +1,63 @@
---
- hosts: k8s
connection: local
tasks:
- name: Deploy Mail
import_role:
name: mail
when: mail_enabled | default(true)
tags: mail
- name: Deploy Nextcloud
import_role:
name: nextcloud
when: nextcloud_enabled | default(true)
tags: nextcloud
- name: Deploy Bitwarden
import_role:
name: bitwarden
when: bitwarden_enabled | default(true)
tags: bitwarden
- name: Deploy Gitea
import_role:
name: gitea
when: gitea_enabled | default(true)
tags: gitea
- name: Deploy Drone
import_role:
name: drone
when: drone_enabled | default(true)
tags: drone
- name: Deploy WikiJS
import_role:
name: wikijs
when: wikijs_enabled | default(true)
tags: wikijs
- name: Deploy Playmaker
import_role:
name: playmaker
when: playmaker_enabled | default(false)
tags: playmaker
- name: Deploy Pypiserver
import_role:
name: pypiserver
when: pypiserver_enabled | default(false)
tags: pypiserver
- name: Deploy PeerTube
import_role:
name: peertube
when: peertube_enabled | default(false)
tags: peertube
- name: Deploy Adguard Home
import_role:
name: adguard-home
when: adguard_enabled | default(false)
tags: adguard

17
playbooks/ghp/vps.yaml Normal file
View File

@ -0,0 +1,17 @@
---
- hosts: web_proxy
roles:
- nginx
tags: web-proxy
- hosts: mail_proxy
roles:
- haproxy
tags: mail-proxy
- hosts: ddclient
roles:
- docker
- role: ddclient
dockerize: true
tags: ddclient

View File

@ -0,0 +1,5 @@
---
- hosts: k8s
connection: local
roles:
- wikijs

37
requirements.txt Normal file
View File

@ -0,0 +1,37 @@
ansible==2.10.3
ansible-base==2.10.3
attrs==20.3.0
cachetools==4.1.1
certifi==2020.6.20
cffi==1.14.3
chardet==3.0.4
cryptography==3.2.1
docker==4.3.1
google-auth==1.23.0
idna==2.10
importlib-metadata==2.0.0
Jinja2==2.11.2
jsonschema==3.2.0
kubernetes==11.0.0
kubernetes-validate==1.18.0
MarkupSafe==1.1.1
oauthlib==3.1.0
openshift==0.11.2
packaging==20.4
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycparser==2.20
pyparsing==2.4.7
pyrsistent==0.17.3
python-dateutil==2.8.1
python-string-utils==1.0.0
PyYAML==5.3.1
requests==2.24.0
requests-oauthlib==1.3.0
rsa==4.6
ruamel.yaml==0.16.12
ruamel.yaml.clib==0.2.2
six==1.15.0
urllib3==1.25.11
websocket-client==0.57.0
zipp==3.4.0

View File

@ -0,0 +1,276 @@
adguard_enabled: false
adguard_publish: false
adguard_default_values:
# upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: RollingUpdate
configAsCode:
enabled: true
resources: {}
# requests:
# memory: 128Mi
# cpu: 100m
image:
repository: busybox
tag: latest
pullPolicy: Always
config:
bind_host: 0.0.0.0
bind_port: 3000
users:
- name: admin
password: "{{ adguard_admin_htpasswd_hash }}"
http_proxy: ""
language: "en"
rlimit_nofile: 0
debug_pprof: false
web_session_ttl: 720
dns:
bind_host: 0.0.0.0
port: 53
statistics_interval: 1
querylog_enabled: true
querylog_interval: 90
querylog_size_memory: 1000
anonymize_client_ip: false
protection_enabled: true
blocking_mode: default
blocking_ipv4: ""
blocking_ipv6: ""
blocked_response_ttl: 10
parental_block_host: family-block.dns.adguard.com
safebrowsing_block_host: standard-block.dns.adguard.com
ratelimit: 0
ratelimit_whitelist: []
refuse_any: true
upstream_dns:
- https://dns10.quad9.net/dns-query
bootstrap_dns:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
all_servers: false
fastest_addr: false
allowed_clients: []
# - 10.0.0.1
# - 10.0.1.1/24
disallowed_clients: []
# - 10.0.1.1
# - 10.0.11.1/24
blocked_hosts: []
# - example.org
# - '*.example.org'
# - '||example.org^'
cache_size: 4194304
cache_ttl_min: 0
cache_ttl_max: 0
bogus_nxdomain: []
aaaa_disabled: false
enable_dnssec: false
edns_client_subnet: false
filtering_enabled: true
filters_update_interval: 8
parental_enabled: false
safesearch_enabled: false
safebrowsing_enabled: false
safebrowsing_cache_size: 1048576
safesearch_cache_size: 1048576
parental_cache_size: 1048576
cache_time: 30
rewrites: []
# - domain: example.org
# answer: 127.0.0.1
# - domain: '*.example.org'
# answer: 127.0.0.1
blocked_services:
- facebook
- origin
- twitter
- snapchat
- skype
- whatsapp
- instagram
- youtube
- netflix
- twitch
- discord
- amazon
- ebay
- cloudflare
- steam
- epic_games
- reddit
- ok
- vk
- mail_ru
- tiktok
tls:
enabled: true
server_name: "{{ adguard_dns_name | default('dns.' + domain) }}"
force_https: false
port_https: 443
port_dns_over_tls: 853
allow_unencrypted_doh: false
strict_sni_check: false
certificate_chain: ""
private_key: ""
certificate_path: "/certs/tls.crt"
private_key_path: "/certs/tls.key"
filters:
- enabled: true
url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt
name: AdGuard DNS filter
id: 1
- enabled: false
url: https://adaway.org/hosts.txt
name: AdAway
id: 2
- enabled: false
url: https://www.malwaredomainlist.com/hostslist/hosts.txt
name: MalwareDomainList.com Hosts List
id: 4
whitelist_filters: []
# - enabled: true
# url: https://easylist-downloads.adblockplus.org/exceptionrules.txt
# name: Allow nonintrusive advertising
# id: 1595760241
user_rules: []
# - '||example.org^'
# - '@@||example.org^'
# - 127.0.0.1 example.org
# - '! Here goes a comment'
# - '# Also a comment'
dhcp:
enabled: false
interface_name: ""
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
clients: []
# - name: myuser
# tags:
# - user_admin
# ids:
# - 192.168.91.1
# use_global_settings: true
# filtering_enabled: false
# parental_enabled: false
# safesearch_enabled: false
# safebrowsing_enabled: false
# use_global_blocked_services: true
# blocked_services: []
# upstreams: []
log_file: ""
verbose: false
schema_version: 6
tlsSecretName: "{{ adguard_dns_name | default('dns.' + domain) }}-secret"
timezone: "UTC"
ingress:
enabled: true
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/acme-dns01-provider: rfc2136
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: "{{ external_ingress_class if adguard_publish else internal_ingress_class }}"
kubernetes.io/tls-acme: "true"
path: /
hosts:
- adguard.{{ domain }}
tls:
- secretName: adguard.{{ domain }}-tls
hosts:
- adguard.{{ domain }}
service:
type: ClusterIP
# externalTrafficPolicy: Local
# externalIPs: []
# loadBalancerIP: ""
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
# metallb.universe.tf/allow-shared-ip: adguard-home-svc
serviceTCP:
enabled: true
type: LoadBalancer
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: "{{ adguard_loadbalancer_ip }}"
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: adguard-home-svc
serviceUDP:
enabled: true
type: LoadBalancer
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: "{{ adguard_loadbalancer_ip }}"
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: adguard-home-svc
serviceDNSOverTLS:
enabled: true
## Enable if you use AdGuard as a DNS over TLS/HTTPS server
type: LoadBalancer
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: "{{ adguard_loadbalancer_ip }}"
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: adguard-home-svc
serviceDNSOverHTTPS:
enabled: true
## Enable if you use AdGuard as a DNS over TLS/HTTPS server
type: LoadBalancer
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: "{{ adguard_loadbalancer_ip }}"
# a fixed LoadBalancer IP
# loadBalancerSourceRanges: []
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: adguard-home-svc
external-dns.alpha.kubernetes.io/hostname: "{{ adguard_dns_name | default('dns.' + domain) }}"
serviceDHCP:
enabled: false
## Enable if you use AdGuard as a DHCP Server
type: NodePort
# externalTrafficPolicy: Local
# externalIPs: []
loadBalancerIP: ""
# a fixed LoadBalancer IP
annotations: {}
# metallb.universe.tf/address-pool: network-services
# metallb.universe.tf/allow-shared-ip: adguard-home-svc
persistence:
config:
enabled: true
accessMode: "{{ adguard_config_storage_mode | default('ReadWriteMany') }}"
size: "{{ adguard_config_size | default('20Mi') }}"
storageClass: "{{ adguard_config_storage | default('nfs-ssd') }}"
## Do not delete the pvc upon helm uninstall
skipuninstall: false
work:
enabled: true
accessMode: "{{ adguard_work_storage_mode | default('ReadWriteMany') }}"
size: "{{ adguard_work_size | default('10Gi') }}"
storageClass: "{{ adguard_work_storage | default('nfs-ssd') }}"
## Do not delete the pvc upon helm uninstall
skipuninstall: false

View File

@ -0,0 +1,32 @@
- name: Request cert for Adguard Home
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: "{{ adguard_dns_name | default('dns.' + domain) }}-crt"
namespace: "{{ adguard_namespace | default(namespace) }}"
spec:
secretName: "{{ adguard_dns_name | default('dns.' + domain) }}-secret"
dnsNames:
- "{{ adguard_dns_name | default('dns.' + domain) }}"
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: ClusterIssuer
group: cert-manager.io
- set_fact:
adguard_combined_values: "{{ adguard_default_values | combine(adguard_values, recursive=true) }}"
- name: Deploy Adguard Home
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ adguard_namespace | default(namespace) }}"
release_name: "{{ adguard_name | default('adguard') }}"
chart_ref: "{{ adguard_chart | default('ghp/adguard-home') }}"
chart_version: "{{ adguard_version | default(omit) }}"
release_values: "{{ adguard_combined_values | from_yaml }}"

View File

@ -0,0 +1,40 @@
bitwarden_enabled: true
bitwarden_publish: false
bitwarden_use_external_db: true
bitwarden_default_values:
env:
SIGNUPS_ALLOWED: true
INVITATIONS_ALLOWED: true
DATABASE_URL: "postgresql://{{ bitwarden_db_username }}:{{ bitwarden_db_password }}@{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local:5432/bitwarden?sslmode=require"
DOMAIN: "https://bitwarden.{{ domain }}"
SMTP_FROM: "bitwarden@{{ domain }}"
SMTP_HOST: "mail.{{ domain }}"
SMTP_PASSWORD: "{{ bitwarden_ldap_pass | default(bitwarden_ldap_password) }}"
SMTP_SSL: "true"
SMTP_EXPLICIT_TLS: "true"
SMTP_PORT: "465"
SMTP_USERNAME: "bitwarden@{{ domain }}"
SMTP_TIMEOUT: "120"
LOG_LEVEL: "debug"
EXTENDED_LOGGING: "true"
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "{{ external_ingress_class if bitwarden_publish else internal_ingress_class }}"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/tls-acme: "true"
path: /
hosts:
- "bitwarden.{{ domain }}"
tls:
- secretName: "bitwarden.{{ domain }}-tls"
hosts:
- "bitwarden.{{ domain }}"
persistence:
enabled: true
accessMode: "{{ bitwarden_storage_mode | default('ReadWriteMany') }}"
size: "{{ bitwarden_size | default('8Gi') }}"
storageClass: "{{ bitwarden_storage | default('nfs-ssd') }}"

View File

@ -0,0 +1,19 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- bitwarden_use_external_db
- postgres_enable
- set_fact:
bitwarden_combined_values: "{{ bitwarden_default_values | combine(bitwarden_values, recursive=true) }}"
- name: Deploy Bitwarden
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ bitwarden_namespace | default(namespace) }}"
release_name: "{{ bitwarden_name | default('bitwarden') }}"
chart_ref: "{{ bitwarden_chart | default('ghp/bitwarden') }}"
chart_version: "{{ bitwarden_version | default(omit) }}"
release_values: "{{ bitwarden_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ bitwarden_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "bitwarden.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup Bitwarden DB secret
set_fact:
bitwarden_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ bitwarden_db_secret }}"
verbosity: 2
- name: Set Bitwarden DB username
set_fact:
bitwarden_db_username: "{{ bitwarden_db_secret.data.username | b64decode }}"
- name: Set Bitwarden DB password
set_fact:
bitwarden_db_password: "{{ bitwarden_db_secret.data.password | b64decode }}"

View File

@ -0,0 +1,6 @@
cert_manager_version: v1.1.0
cert_manager_namespace: cert-manager
lets_encrypt_mailbox: "admin@{{ domain }}"
cert_manager_base64_tsig_key: "{{ k8s_tsig | b64encode }}"
cert_manager_default_values:
installCRDs: true

View File

@ -0,0 +1,88 @@
- set_fact:
cert_manager_combined_values: "{{ cert_manager_default_values | combine(cert_manager_values, recursive=true) }}"
- name: Deploy Cert-manager {{ cert_manager_version }}
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ cert_manager_namespace | default('cert-manager') }}"
release_name: "{{ cert_manager_name | default('cert-manager') }}"
chart_ref: "{{ cert_manager_chart | default('jetstack/cert-manager') }}"
chart_version: "{{ cert_manager_version }}"
release_values: "{{ cert_manager_combined_values | from_yaml | default(omit) }}"
wait: true
- name: Create secret for DNS RFC2136 (NSUPDATE)
k8s:
state: present
definition:
apiVersion: v1
data:
tsig-secret-key: "{{ cert_manager_base64_tsig_key }}"
kind: Secret
metadata:
name: tsig-secret
namespace: cert-manager
type: Opaque
- name: Create Production ClusterIssuer for Let's Encrypt
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: "{{ lets_encrypt_mailbox }}"
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
# Enable the HTTP-01 challenge provider
solvers:
#- http01:
# ingress:
# class: nginx
- dns01:
rfc2136:
nameserver: "{{ external_dns_ip | default(dns_ip) }}:53"
tsigAlgorithm: HMACSHA512
tsigKeyName: k8s
tsigSecretSecretRef:
key: tsig-secret-key
name: tsig-secret
- name: Create Staging ClusterIssuer for Let's Encrypt
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: "{{ lets_encrypt_mailbox }}"
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
# Enable the HTTP-01 challenge provider
solvers:
#- http01:
# ingress:
# class: nginx
- dns01:
rfc2136:
nameserver: "{{ external_dns_ip | default(dns_ip) }}:53"
tsigAlgorithm: HMACSHA512
tsigKeyName: k8s
tsigSecretSecretRef:
key: tsig-secret-key
name: tsig-secret

View File

@ -0,0 +1,86 @@
chartmuseum_enabled: true
chartmuseum_publish: false
chartmuseum_default_values:
env:
open:
# storage backend, can be one of: local, alibaba, amazon, google, microsoft, oracle
STORAGE: local
# levels of nested repos for multitenancy. The default depth is 0 (singletenant server)
DEPTH: 0
# sets the base context path
CONTEXT_PATH: /
# show debug messages
DEBUG: false
# output structured logs as json
LOG_JSON: true
# disable use of index-cache.yaml
DISABLE_STATEFILES: false
# disable Prometheus metrics
DISABLE_METRICS: true
# disable all routes prefixed with /api
DISABLE_API: false
# allow chart versions to be re-uploaded
ALLOW_OVERWRITE: true
# allow anonymous GET operations when auth is used
AUTH_ANONYMOUS_GET: true
secret:
# username for basic http authentication
BASIC_AUTH_USER: "{{ chartmuseum_admin_login | default('admin') }}"
# password for basic http authentication
BASIC_AUTH_PASS: "{{ chartmuseum_admin_pass | default(chartmuseum_admin_password) }}"
persistence:
enabled: true
accessMode: "{{ chartmuseum_storage_mode | default('ReadWriteMany') }}"
size: "{{ chartmuseum_size | default('10Gi') }}"
labels: {}
path: /storage
storageClass: "{{ chartmuseum_storage | default('nfs-hdd') }}"
## Ingress for load balancer
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/ingress.class: "{{ external_ingress_class if chartmuseum_publish else internal_ingress_class }}"
kubernetes.io/tls-acme: "true"
hosts:
- name: charts.{{ domain }}
path: /
tls: true
tlsSecret: charts.{{ domain }}-tls
chartmuseum_readonly_ingress_definition: |
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/acme-dns01-provider: rfc2136
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: "{{ external_ingress_class }}"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/configuration-snippet: |-
limit_except GET {
deny all;
}
name: chartmuseum-public
namespace: "{{ chartmuseum_namespace | default(namespace) }}"
spec:
rules:
- host: "{{ chartmuseum_readonly_ingress }}"
http:
paths:
- backend:
serviceName: chartmuseum-chartmuseum
servicePort: 8080
path: /
tls:
- hosts:
- "{{ chartmuseum_readonly_ingress }}"
secretName: "{{ chartmuseum_readonly_ingress }}-tls"

View File

@ -0,0 +1,20 @@
- set_fact:
chartmuseum_combined_values: "{{ chartmuseum_default_values | combine(chartmuseum_values, recursive=true) }}"
- name: Deploy ChartMuseum
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ chartmuseum_namespace | default(namespace) }}"
release_name: "{{ chartmuseum_name | default('chartmuseum') }}"
chart_ref: "{{ chartmuseum_chart | default('ghp/chartmuseum') }}"
chart_version: "{{ chartmuseum_version | default(omit) }}"
release_values: "{{ chartmuseum_combined_values | from_yaml }}"
wait: true
- name: Deploy readonly public ingress for ChartMuseum
when: chartmuseum_readonly_ingress is defined
k8s:
state: present
definition:
"{{ chartmuseum_readonly_ingress_definition }}"

View File

@ -0,0 +1,3 @@
dockerize: false
namespace: ddclient
ddclient_image_tag: v3.9.1-ls45

View File

@ -0,0 +1,27 @@
---
- name: start ddclient
systemd:
name: ddclient
state: started
enabled: yes
- name: restart ddclient
systemd:
name: ddclient
state: restarted
enabled: yes
- name: restart docker ddclient
community.general.docker_container:
name: "{{ namespace }}-ddclient"
image: "{{ docker_registry }}/ddclient:{{ ddclient_image_tag | default('v3.9.1-ls45') }}"
state: started
restart: yes
container_default_behavior: no_defaults
detach: true
restart_policy: unless-stopped
volumes:
- "/opt/{{ namespace }}/ddclient.conf:/config/ddclient.conf"
- "/opt/{{ namespace }}/Kvps.key:/config/Kvps.key"
- "/opt/{{ namespace }}/Kvps.private:/config/Kvps.private"

View File

@ -0,0 +1,18 @@
---
- block:
- name: copy public key for ddclient
copy:
dest: /etc/Kvps.key
src: files/Kvps.key
- name: copy private key for ddclient
copy:
dest: /etc/Kvps.private
src: files/Kvps.private
- name: configure ddlient.conf
copy:
content: "{{ ddclient_conf }}"
dest: "/etc/ddclient.conf"
notify: restart ddclient

View File

@ -0,0 +1,35 @@
---
- name: Create configuration dir for {{ namespace }}
file:
name: "/opt/{{ namespace }}"
state: directory
- name: Copy ddclient configuration for {{ namespace }}
copy:
dest: "/opt/{{ namespace }}/ddclient.conf"
content: "{{ ddclient_conf }}"
notify: restart docker ddclient
- name: Copy Kvps.key for {{ namespace }}
copy:
dest: "/opt/{{ namespace }}/Kvps.key"
content: "{{ ddclient_tsig_public_key_base64 | b64decode }}"
notify: restart docker ddclient
- name: Copy Kvps.private for {{ namespace }}
copy:
dest: "/opt/{{ namespace }}/Kvps.private"
content: "{{ ddclient_tsig_private_key_base64 | b64decode }}"
notify: restart docker ddclient
- name: Start ddclient in docker for {{ namespace }}
docker_container:
name: "{{ namespace }}-ddclient"
image: "{{ docker_registry }}/ddclient:{{ ddclient_image_tag }}"
state: started
container_default_behavior: no_defaults
restart_policy: unless-stopped
volumes:
- "/opt/{{ namespace }}/ddclient.conf:/config/ddclient.conf"
- "/opt/{{ namespace }}/Kvps.key:/config/Kvps.key"
- "/opt/{{ namespace }}/Kvps.private:/config/Kvps.private"

View File

@ -0,0 +1,11 @@
---
- block:
- name: installing ddclient
package:
name:
- ddclient
state: present
notify: start ddclient
register: install_ddlient_result
tags:
- ddclient-install

View File

@ -0,0 +1,12 @@
---
- block:
- import_tasks: install.yml
when: not dockerize
- import_tasks: configure.yml
when: not dockerize
become: true
- block:
- import_tasks: docker.yml
when: dockerize
become: true

View File

@ -0,0 +1 @@
install_docker_ce_repo: 'yes'

View File

@ -0,0 +1,54 @@
---
- block:
- name: Install packages for Docker
yum:
name:
- device-mapper-persistent-data
- lvm2
- libselinux-python
state: present
- name: add docker-ce repo
yum_repository:
name: docker-ce-stable
file: docker-ce
description: Docker CE Stable - $basearch
enabled: yes
baseurl: https://download.docker.com/linux/centos/7/$basearch/stable
gpgkey: https://download.docker.com/linux/centos/gpg
gpgcheck: yes
when: install_docker_ce_repo == 'yes'
become: yes
- name: Install Docker
package:
name: docker-ce
state: present
become: yes
- name: Create /etc/docker directory
file:
path: /etc/docker
state: directory
- name: Deploy Docker daemon.json
template:
src: daemon.json.j2
dest: /etc/docker/daemon.json
register: daemon_config_result
- name: Start Docker service
service:
name: docker
state: started
enabled: yes
become: yes
- name: Restart Docker
systemd:
state: restarted
name: docker
when: daemon_config_result.changed
tags:
- docker

View File

@ -0,0 +1,5 @@
---
- block:
- import_tasks: docker.yml
tags:
- docker

View File

@ -0,0 +1,18 @@
{% if docker is defined %}
{% if docker.insecure_registries is defined %}
{% set insecure_registries = docker.insecure_registries %}
{% endif %}
{% endif %}
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
{% if insecure_registries is defined %}
"insecure-registries" : ["{{ insecure_registries }}"],
{% endif %}
"log-opts": {
"max-size": "100m"
}
}

View File

@ -0,0 +1,201 @@
dovecot_default_values:
replicaCount: 1
persistence:
enabled: true
existingClaim: mailboxes
tls:
enabled: true
existingSecret: mail.{{ domain }}-secret
dovecot:
image:
repository: "{{ docker_registry }}/dovecot"
tag: latest
pullPolicy: Always
configmaps:
dovecot:
dovecot: |
protocols = imap lmtp sieve
mail_max_userip_connections = 1000
mail_plugins = virtual
haproxy_trusted_networks = 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
haproxy_timeout = 30s
dict {
#quota = mysql:/etc/dovecot/dovecot-dict-sql.conf.ext
#expire = sqlite:/etc/dovecot/dovecot-dict-sql.conf.ext
}
# Most of the actual configuration gets included below. The filenames are
# first sorted by their ASCII value and parsed in that order. The 00-prefixes
# in filenames are intended to make it easier to understand the ordering.
!include conf.d/*.conf
# A config file can also tried to be included without giving an error if
# it's not found:
!include_try local.conf
ldap: |
uris = ldaps://openldap.{{ domain }}
dn = uid=ldapbind,ou=services,{{ openldap_domain }}
dnpass = {{ ldapbind_pass | default(ldapbind_password) }}
auth_bind = yes
auth_bind_userdn = uid=%n,ou=users,{{ openldap_domain }}
tls = no
ldap_version = 3
base = ou=users,{{ openldap_domain }}
deref = never
scope = subtree
user_filter = (&(objectClass=posixAccount)(mail=%u))
user_attrs = cn=home=/home/vmail/%$
pass_filter = (&(objectClass=posixAccount)(mail=%u))
pass_attrs = uid=user,userPassword=password
#default_pass_scheme = CRYPT
confd:
auth-ldap: |
passdb {
driver = ldap
# Path for LDAP configuration file, see example-config/dovecot-ldap.conf.ext
args = /etc/dovecot/ldap.conf
}
userdb {
driver = ldap
args = /etc/dovecot/ldap.conf
}
10-auth: |
auth_default_realm = {{ domain }}
auth_username_format = %Lu
auth_mechanisms = plain login
10-mail: |
mail_location = maildir:%h
namespace inbox {
inbox = yes
}
mail_uid = vmail
mail_gid = vmail
first_valid_uid = 1000
last_valid_uid = 1000
first_valid_gid = 1000
last_valid_gid = 1000
protocol !indexer-worker {
}
mbox_write_locks = fcntl
10-master: |
protocol imap {
mail_plugins = virtual
}
service imap-login {
inet_listener imap {
#port = 143
}
inet_listener imaps {
#port = 993
#ssl = yes
}
inet_listener imap_haproxy {
port = 1109
haproxy = yes
}
inet_listener imaps_haproxy {
port = 10993
ssl = yes
haproxy = yes
}
}
service pop3-login {
inet_listener pop3 {
#port = 110
}
inet_listener pop3s {
#port = 995
#ssl = yes
}
}
service lmtp {
inet_listener lmtp {
port = 24
}
unix_listener /var/spool/postfix/private/dovecot-lmtp {
mode = 0600
group = postfix
user = postfix
}
user = vmail
}
service imap {
}
service pop3 {
}
service auth {
inet_listener {
port = 12345
}
unix_listener auth-userdb {
mode = 0660
user = vmail
#group =
}
# Postfix smtp-auth
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
}
service auth-worker {
}
service dict {
unix_listener dict {
}
}
10-ssl: |
ssl = required
#verbose_ssl = yes
ssl_prefer_server_ciphers = yes
ssl_min_protocol = TLSv1.2
ssl_cert = </tls/tls.crt
ssl_key = </tls/tls.key
10-logging: |
log_path = /dev/stderr
info_log_path = /dev/stdout
debug_log_path = /dev/stdout
15-lda: |
postmaster_address = postmaster@{{ domain }}
hostname = {{ domain }}
rejection_reason = Your message to was automatically rejected:%n%r
protocol lda {
mail_plugins = virtual sieve
}
20-lmtp: |
protocol lmtp {
mail_plugins = virtual sieve
postmaster_address = postmaster@{{ domain }}
}
20-managesieve: |
service managesieve-login {
inet_listener sieve {
port = 4190
ssl = yes
}
service_count = 1
vsz_limit = 64M
}
service managesieve {
process_limit = 1024
}
service:
type: LoadBalancer
loadBalancerIP: "{{ dovecot_loadbalancer_ip | default(omit) }}"

View File

@ -0,0 +1,13 @@
- set_fact:
dovecot_combined_values: "{{ dovecot_default_values | combine(dovecot_values, recursive=true) }}"
- name: Deploy Dovecot
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ dovecot_namespace | default(mail_namespace) | default(namespace) }}"
release_name: "{{ dovecot_name | default('dovecot') }}"
chart_ref: "{{ dovecot_chart | default('ghp/dovecot') }}"
chart_version: "{{ dovecot_version | default(omit) }}"
release_values: "{{ dovecot_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,50 @@
drone_enabled: true
drone_publish: false
drone_use_external_db: true
drone_default_values:
service:
type: ClusterIP
port: 80
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "{{ external_ingress_class if drone_publish else internal_ingress_class }}"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
hosts:
- host: "drone.{{ domain }}"
paths:
- "/"
tls:
- secretName: "drone.{{ domain }}-tls"
hosts:
- "drone.{{ domain }}"
persistentVolume:
enabled: true
accessModes:
- "{{ drone_storage_mode | default('ReadWriteMany') }}"
mountPath: /data
size: "{{ drone_size | default('8Gi') }}"
storageClass: "{{ drone_storage | default('nfs-ssd') }}"
env:
DRONE_SERVER_HOST: "drone.{{ domain }}"
DRONE_SERVER_PROTO: https
DRONE_RPC_SECRET: "{{ drone_rpc_secret | default(omit) }}"
DRONE_DATABASE_DRIVER: "postgres"
DRONE_DATABASE_DATASOURCE: "postgres://{{ drone_db_username }}:{{ drone_db_password }}@{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local:5432/drone?sslmode=disable"
DRONE_DATABASE_SECRET: "{{ drone_database_secret | default(omit) }}"
DRONE_GITEA_CLIENT_ID: "{{ drone_gitea_client_id | default(omit) }}"
DRONE_GITEA_CLIENT_SECRET: "{{ drone_gitea_client_secret | default(omit) }}"
DRONE_GITEA_SERVER: "https://gitea.{{ domain }}"
drone_runner_kube_default_values:
rbac:
buildNamespaces:
- "{{ drone_namespace | default(namespace) }}"
env:
DRONE_RPC_SECRET: "{{ drone_rpc_secret }}"
DRONE_RPC_HOST: "drone.{{ domain }}"
DRONE_RPC_PROTO: https
DRONE_NAMESPACE_DEFAULT: "{{ drone_namespace | default(namespace) }}"

View File

@ -0,0 +1,31 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- drone_use_external_db
- postgres_enable
- set_fact:
drone_combined_values: "{{ drone_default_values | combine(drone_values, recursive=true) }}"
- set_fact:
drone_runner_kube_combined_values: "{{ drone_runner_kube_default_values | combine(drone_runner_kube_values, recursive=true) }}"
- name: Deploy Drone Server
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ drone_namespace | default(namespace) }}"
release_name: "{{ drone_name | default('drone') }}"
chart_ref: "{{ drone_chart | default('drone/drone') }}"
chart_version: "{{ drone_version | default(omit) }}"
release_values: "{{ drone_combined_values | from_yaml }}"
wait: true
- name: Deploy Drone Runner Kube
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ drone_runner_kube_namespace | default(namespace) }}"
release_name: "{{ drone_runner_kube_name | default('drone-runner-kube') }}"
chart_ref: "{{ drone_runner_kube_chart | default('drone/drone-runner-kube') }}"
chart_version: "{{ drone_runner_kube_version | default(omit) }}"
release_values: "{{ drone_runner_kube_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,25 @@
- block:
- name: Set DB namespace for secret lookup
set_fact:
db_namespace: "{{ drone_db_namespace | default(postgres_db_namespace) | default(postgres_namespace) | default(postgres_operator_namespace) | default(namespace) }}"
- name: Set DB secret name for lookup
set_fact:
db_secret_name: "drone.{{ postgres_db_team | default(namespace) }}-postgres.credentials.postgresql.acid.zalan.do"
- name: Lookup Drone DB secret
set_fact:
drone_db_secret: "{{ lookup('k8s', kind='Secret', namespace=db_namespace, resource_name=db_secret_name) }}"
- debug:
msg: "{{ drone_db_secret }}"
verbosity: 2
- name: Set Drone DB username
set_fact:
drone_db_username: "{{ drone_db_secret.data.username | b64decode }}"
- name: Set Drone DB password
set_fact:
drone_db_password: "{{ drone_db_secret.data.password | b64decode }}"

View File

@ -0,0 +1,15 @@
external_dns_default_values:
fullnameOverride: "{{ external_dns_name | default(namespace + '-external-dns') }}"
annotationFilter: "kubernetes.io/ingress.class={{ external_ingress_class }}"
domainFilters: ["{{ external_domain | default(domain) }}"]
provider: rfc2136
rfc2136:
host: "{{ external_dns_ip | default(dns_ip) }}"
port: 53
zone: "{{ external_domain | default(domain) }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ external_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ external_dns_tsigKeyname | default('k8s') }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@ -0,0 +1,12 @@
- set_fact:
external_dns_combined_values: "{{ external_dns_default_values | combine(external_dns_values, recursive=true) }}"
- name: Deploy external DNS
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ external_dns_namespace | default(dns_namespace) | default(namespace) }}"
release_name: "{{ external_dns_name | default(namespace + '-external-dns') }}"
chart_ref: "{{ external_dns_chart | default('bitnami/external-dns') }}"
chart_version: "{{ external_dns_version | default(omit) }}"
release_values: "{{ external_dns_combined_values | from_yaml }}"
wait: true

View File

@ -0,0 +1,14 @@
external_ingress_nginx_default_values:
controller:
config:
use-proxy-protocol: true
use-forward-headers: true
compute-full-forward-for: true
publishService:
enabled: true
scope:
enabled: false
service:
loadBalancerIP: "{{ external_loadbalancer_ip | default(omit) }}"
externalTrafficPolicy: Local
ingressClass: "{{ external_ingress_class }}"

View File

@ -0,0 +1,13 @@
- set_fact:
external_ingress_nginx_combined_values: "{{ external_ingress_nginx_default_values | combine(external_ingress_nginx_values, recursive=true) }}"
- name: Deploy external Nginx Ingress
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ external_ingress_nginx_namespace | default(ingress_namespace) | default(namespace) }}"
release_name: "{{ external_ingress_nginx_name | default(namespace + '-external-ingress-nginx') }}"
chart_ref: "{{ external_ingress_nginx_chart | default('ingress-nginx/ingress-nginx') }}"
chart_version: "{{ external_ingress_nginx_version | default(omit) }}"
release_values: "{{ external_ingress_nginx_combined_values | from_yaml }}"
wait: true
when: external_ingress_nginx_enabled | default(true)

View File

@ -0,0 +1,137 @@
gitea_enabled: true
gitea_publish_web: false
gitea_publish_ssh: false
gitea_use_external_db: true
gitea_ingress_class: "{{ gitea_namespace | default(namespace) }}-{{ 'public' if gitea_publish_web else 'private' }}-gitea-ingress-nginx"
gitea_default_values:
config:
disableInstaller: true
admin_user: "{{ gitea_admin_user | default('gitea') }}"
admin_pass: "{{ gitea_admin_pass | default(gitea_admin_password) }}"
mailer:
domain: "{{ mail_domain | default(domain) }}"
enabled: true
host: "mail.{{ mail_domain | default(domain) }}:465"
skip_verify: false
is_tls_enabled: true
from: "gitea@{{ mail_domain | default(domain) }}"
user: "{{ gitea_ldap_user | default('gitea') }}"
passwd: "{{ gitea_ldap_pass | default(gitea_ldap_password) }}"
ingress:
## Set to true to enable ingress record generation
enabled: true
## When the ingress is enabled, a host pointing to this will be created
hostname: "gitea.{{ domain }}"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/acme-dns01-provider: "rfc2136"
cert-manager.io/acme-challenge-type: "dns01"
kubernetes.io/ingress.class: "{{ gitea_ingress_class }}"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
kubernetes.io/tls-acme: "true"
#
## The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
# hosts:
# - name: git.example.com
# path: /
tls:
- hosts:
- "gitea.{{ domain }}"
secretName: "gitea.{{ domain }}-tls"
service:
type: ClusterIP
## This can stay as ClusterIP as (by default) we use ingress
http:
port: 3000
## Make the external port available
# externalPort: 8082
# externalHost: gitea.local
## SSH is commonly on port 22
ssh:
port: 22
## If serving on a different external port used for determining the ssh url in the gui
# externalPort: 22
# externalHost: gitea.local
# externalIPs: []
persistence:
enabled: true
# existingGiteaClaim: gitea-gitea
accessMode: "{{ gitea_storage_mode | default('ReadWriteMany') }}"
size: "{{ gitea_size | default('20Gi') }}"
storageClass: "{{ gitea_storage | default('nfs-ssd') }}"
## addtional annotations for PVCs. Uncommenting will prevent the PVC from being deleted.
annotations:
"helm.sh/resource-policy": keep
lfs:
enabled: "{{ gitea_lfs | default(true) }}"
accessMode: "{{ gitea_lfs_storage_mode | default('ReadWriteMany') }}"
size: "{{ gitea_lfs_size | default('50Gi') }}"
storageClass: "{{ gitea_lfs_storage | default('nfs-hdd') }}"
annotations:
"helm.sh/resource-policy": keep
mariadb:
enabled: false
externalDB:
enabled: true
dbType: "postgres"
dbHost: "{{ postgres_db_team | default(namespace) }}-postgres.{{ postgres_db_namespace | default(namespace) }}.svc.cluster.local"
dbPort: "5432"
dbDatabase: "gitea"
dbUser: "{{ gitea_db_username | default(omit)}}"
dbPassword: "{{ gitea_db_password | default(omit) }}"
gitea_publush_ingress_nginx_values:
controller:
config:
use-proxy-protocol: true
use-forward-headers: true
compute-full-forward-for: true
service:
externalTrafficPolicy: Local
gitea_ingress_nginx_default_values:
controller:
containerPort:
ssh: 22
http: 80
https: 443
publishService:
enabled: true
scope:
enabled: true
extraArgs:
tcp-services-configmap: "{{ gitea_namespace | default(namespace) }}/{{ gitea_ingress_nginx_name | default(namespace + '-gitea-ingress-nginx') }}-tcp"
service:
enabled: true
type: LoadBalancer
loadBalancerIP: "{{ gitea_loadbalancer_ip | default(omit) }}"
ports:
ssh: 22
http: 80
https: 443
targetPorts:
ssh: ssh
http: http
https: https
ingressClass: "{{ gitea_ingress_class }}"
tcp:
22: "{{ gitea_namespace | default(namespace) }}/gitea-gitea-svc:22"
gitea_dns_default_values:
fullnameOverride: "{{ gitea_dns_name | default(namespace + '-gitea-internal-dns') }}"
annotationFilter: "kubernetes.io/ingress.class={{ gitea_ingress_class }}"
domainFilters: ["{{ domain }}"]
provider: rfc2136
rfc2136:
host: "{{ dns_ip }}"
port: 53
zone: "{{ domain }}"
tsigSecret: "{{ k8s_tsig }}"
tsigSecretAlg: "{{ gitea_dns_tsigSecretAlg | default('hmac-sha512') }}"
tsigKeyname: "{{ gitea_dns_tsigKeyname | default('k8s') }}"
tsigAxfr: true
## Possible units [ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration
minTTL: "30s"

View File

@ -0,0 +1,50 @@
- name: Import secret.yaml to obtain secrets
include_tasks: secrets.yaml
when:
- gitea_use_external_db
- postgres_enable
- set_fact:
gitea_combined_values: "{{ gitea_default_values | combine(gitea_values, recursive=true) }}"
- set_fact:
gitea_dns_combined_values: "{{ gitea_dns_default_values | combine(gitea_dns_values, recursive=true) }}"
- set_fact:
gitea_ingress_nginx_combined_values: "{{ gitea_ingress_nginx_default_values | combine(gitea_ingress_nginx_values, recursive=true) }}"
- set_fact:
gitea_ingress_nginx_combined_values: "{{ gitea_ingress_nginx_combined_values | combine(gitea_publush_ingress_nginx_values, recursive=true) }}"
when: gitea_publish_web
- name: Deploy Nginx Ingress for Gitea
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ gitea_ingress_nginx_namespace | default(namespace) }}"
release_name: "{{ gitea_ingress_nginx_name | default(namespace + '-gitea-ingress-nginx') }}"
chart_ref: "{{ gitea_ingress_nginx_chart | default('ingress-nginx/ingress-nginx') }}"
chart_version: "{{ gitea_ingress_nginx_version | default(omit) }}"
release_values: "{{ gitea_ingress_nginx_combined_values | from_yaml }}"
wait: true
- name: Deploy DNS for Gitea
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ gitea_dns_namespace | default(namespace) }}"
release_name: "{{ gitea_dns_name | default('gitea-internal-dns') }}"
chart_ref: "{{ gitea_dns_chart | default('bitnami/external-dns') }}"
chart_version: "{{ gitea_dns_version | default(omit) }}"
release_values: "{{ gitea_dns_combined_values | from_yaml }}"
wait: true
when: gitea_publish_web == false
- name: Deploy Gitea
community.kubernetes.helm:
create_namespace: true
release_namespace: "{{ gitea_namespace | default(namespace) }}"
release_name: "{{ gitea_name | default('gitea') }}"
chart_ref: "{{ gitea_chart | default('ghp/gitea') }}"
chart_version: "{{ gitea_version | default(omit) }}"
release_values: "{{ gitea_combined_values | from_yaml }}"
wait: true

Some files were not shown because too many files have changed in this diff Show More