Compare commits

...

133 Commits

Author SHA1 Message Date
uumas
33791031e2 prometheus: Update config option name 2025-09-14 03:11:23 +03:00
uumas
e0047b73f4 jitsi: Use firewalld 2025-09-14 03:11:11 +03:00
uumas
8fdb8eaf80 authentik: Support for additional mounts 2025-05-26 00:00:27 +03:00
uumas
bc2220e1ed service: fix published ports 2025-05-25 23:59:57 +03:00
uumas
4d4ade6ae2 Fix docker_published_ports not working when set to db_published_ports 2025-04-24 00:53:39 +03:00
uumas
3e9d83457d docker: use deb822 repo 2025-04-04 19:22:20 +03:00
uumas
5b4bea1b09 service: set reasonable defaults for copypath modes 2025-03-27 22:20:07 +02:00
uumas
53af0b80f6 gitea: update config option name 2025-03-27 22:19:48 +02:00
uumas
8f777f7ed7 dokuwiki: use official image instead of bitnami 2025-03-27 22:19:23 +02:00
uumas
f2bd55941a container -> service migration 2025-03-27 22:19:03 +02:00
uumas
2fef2c6ad8 fix some roles 2025-02-15 11:43:16 +02:00
uumas
599332b1d9 Merge branch 'master' of git.uumas.fi:uumas/ansible-docker 2024-11-13 04:45:19 +02:00
uumas
ff77e75d1d service: only use user as owner if docker_host_user is true 2024-11-13 04:26:51 +02:00
uumas
006a87f678 service: Initialize db_published_ports 2024-11-13 04:25:58 +02:00
uumas
ae7558a470 service: Add default for docker_vhost_additional_locations 2024-11-13 03:22:09 +02:00
uumas
340d870049 Merge branch 'master' of git.uumas.fi:uumas/ansible-docker 2024-11-09 13:49:59 +02:00
uumas
abcaf22958 service: Add docker_vhost_additional_locations 2024-10-17 17:18:13 +03:00
uumas
fd75af01af Delete uisp 2024-10-16 14:54:02 +03:00
uumas
d6c316f7f2 that hasn't been needed for a while 2024-09-29 23:27:06 +03:00
uumas
88abb5b243 Add authentik ldap outpost role 2024-09-28 16:23:06 +03:00
uumas
ec58a89dff service: Make postgres version configurable 2024-09-28 15:54:05 +03:00
uumas
5692cadea5 authentik: Make tag configurable 2024-09-28 03:31:45 +03:00
uumas
2cef674456 element, jitsi: fix vhost_proxy_target 2024-08-02 05:45:02 +03:00
uumas
fbbcb365ba gitea: fix volume name 2024-07-28 17:44:16 +03:00
uumas
75d9a054f1 container/service: update http port 2024-07-28 17:43:51 +03:00
uumas
c7b14f8dfa uumas.general.vhost update 2024-07-28 01:23:43 +03:00
uumas
e4222635d9 Merge branch 'master' of git.uumas.fi:uumas/ansible-docker 2024-07-28 01:20:34 +03:00
uumas
a773666715 Update wordpress to use service role 2024-07-28 01:20:10 +03:00
uumas
5921c7d5db jitsi: adjustements etc 2024-07-28 01:14:45 +03:00
uumas
79ba8658a0 Migrate docker_volumes -> docker_mounts 2024-07-28 01:14:14 +03:00
uumas
fa1cdcc348 authentik: migrate to service role 2024-07-28 01:12:43 +03:00
uumas
87bb985211 Add service role 2024-07-28 01:12:26 +03:00
uumas
64d074ea4b alpine: import_role -> include_role 2024-07-28 01:10:49 +03:00
uumas
2363902890 fix keycloak entrypoint 2024-05-19 17:02:59 +03:00
uumas
7c36ef086b v0.9.0 2023-12-21 01:48:50 +02:00
uumas
16e0d6eadb update smtp docs 2023-12-21 01:36:43 +02:00
uumas
298f053835 authentik: add mounts 2023-12-21 01:36:13 +02:00
uumas
6acb2d17dd grafana: add oauth support 2023-12-21 01:35:34 +02:00
uumas
b6e379a3f2 add alertmanager role 2023-12-21 01:33:54 +02:00
uumas
c5a54827d4 prometheus: add blackbox exporter 2023-12-21 01:32:53 +02:00
uumas
6f1bcecf25 container: formatting 2023-12-21 01:31:34 +02:00
uumas
5bf47c73a7 container: add switch for hcloud 2023-12-21 01:31:05 +02:00
uumas
681b788ac4 prometheus: add support for installing webhook server 2023-12-21 01:22:53 +02:00
uumas
0eeeecb549 prometheus: config cleanup, actually use recording and alerting rules 2023-12-21 01:19:23 +02:00
uumas
44665bae12 prometheus: install alertmanager 2023-12-21 01:18:27 +02:00
uumas
8dc0ec798f promehteus: add alerting and recording rules 2023-12-21 01:14:31 +02:00
uumas
e9d1eed01b container: flush handler in the end 2023-12-21 01:12:24 +02:00
uumas
8d2999fe87 container: fix check mode 2023-12-21 01:12:15 +02:00
uumas
d80641623e container: switch to fqcns 2023-12-21 01:12:02 +02:00
uumas
22227d9ffc container: small fixes 2023-12-21 01:10:56 +02:00
uumas
3e9ea95ad7 container: add copypath mounts for copying whole directories to mount in container 2023-12-21 01:08:02 +02:00
uumas
d76dbf6e3c container: add restart container handler for changed template 2023-12-21 01:03:52 +02:00
uumas
ef5d83b188 prometheus: use docker_mounts instead of volumes 2023-12-21 01:00:39 +02:00
uumas
74e9eb8dcb prometheus: include instead of import 2023-12-21 01:00:04 +02:00
uumas
9a4c7c9440 container: add molecule tests 2023-07-19 10:11:49 +03:00
uumas
f99fbc0483 authentik: add smtp 2023-07-19 09:56:04 +03:00
uumas
c35c3f73a0 docker: remove molecule example comment 2023-07-19 09:55:18 +03:00
uumas
9787fbf0db docker: add buildx 2023-07-14 15:44:35 +03:00
uumas
79b8e4fa27 docker: ansible-lint 2023-07-14 15:34:29 +03:00
uumas
1596b2bfc5 docker: use uumas.general.apt_repository for apt repo 2023-07-14 15:25:26 +03:00
uumas
2fae11dd33 add authentik role 2023-07-05 16:18:18 +03:00
uumas
d32706bae6 grafana: fix argspec descriptions 2023-07-05 16:10:31 +03:00
uumas
195210346b container: add support for redis 2023-07-05 16:10:31 +03:00
uumas
3f5b6c0558 container: Initialize container_networks to an empty list 2023-07-05 16:10:31 +03:00
uumas
4b7a359663 container: make additional containers use only first docker network 2023-07-05 16:10:30 +03:00
uumas
4d9edf1532 container: make dockerfile variable officially supported and more modular 2023-07-05 16:10:07 +03:00
uumas
8812459beb alpine: add dockerfile to argspec 2023-07-05 15:33:54 +03:00
uumas
011b2036d6 v0.8.7 2023-06-27 17:35:09 +03:00
uumas
7f6ae18c5c Merge branch 'master' of git.uumas.fi:uumas/ansible-docker 2023-06-27 17:32:56 +03:00
uumas
0b76fd58c7 v0.8.6 2023-06-27 17:22:58 +03:00
uumas
851ff2aaf8 wordpress: support configuring php timeouts 2023-06-27 17:22:39 +03:00
uumas
901a2f7450 prometheus: add customizable storage retention, default to 10 years 2023-06-26 13:12:09 +03:00
uumas
64aa8c574c container: support setting docker command 2023-06-26 13:11:19 +03:00
uumas
bbe86222da wordpress: add libssl-dev as a requirement for memcached (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1038149 2023-06-20 15:57:43 +03:00
uumas
68e4bcdbbd container: move additional services to separate tasks file 2023-06-06 21:17:50 +03:00
uumas
c7c437373f fix legacy docker_volumes configuration support 2023-05-16 07:17:59 +03:00
uumas
234bb70d73 v0.8.5 2023-05-09 21:47:41 +03:00
uumas
7af0e08dfc container: basic auth for phpmyadmin 2023-05-09 21:46:21 +03:00
uumas
09daba7fd7 container: support phpmyadmin for mariadb 2023-05-09 21:22:28 +03:00
uumas
a2225cea5f container: use uumas.general.vhost instead of reverse_proxy 2023-05-09 20:17:42 +03:00
uumas
e51052c34b v0.8.4 2023-04-21 08:21:37 +03:00
uumas
2893a99036 container: build docker image in separate directory 2023-04-21 08:13:01 +03:00
uumas
9ca48d376a container: fix db volume 2023-04-21 08:12:31 +03:00
uumas
079b008111 container: support bind mounted db data directory and mysql config 2023-04-21 07:44:23 +03:00
uumas
20e38332e6 container: reorganize volume directory creation 2023-04-21 05:09:14 +03:00
uumas
f845962ed9 container: use mount instead of volume for db 2023-04-21 04:08:15 +03:00
uumas
165a04fa51 wordpress: add customizable php.ini 2023-04-21 03:24:24 +03:00
uumas
309bd41836 wordpress: move wordpress_docker_volumes to vars as it should generally not be overridden 2023-04-21 03:24:10 +03:00
uumas
64491514a9 v0.8.3 2023-04-18 19:38:57 +03:00
uumas
7504ba96d0 add uisp role 2023-04-18 19:38:43 +03:00
uumas
851504030a container: add proxy target protocol and network mode vars to argspec 2023-04-18 19:37:57 +03:00
uumas
6fb2e23151 container: set TZ variable 2023-04-18 19:37:29 +03:00
uumas
7369fe30db v0.8.2 2023-03-24 21:07:05 +02:00
uumas
db49a87e29 jitsi: Set default reverse_proxy_type 2023-03-24 20:43:41 +02:00
uumas
d53f3b0d7d v0.8.1 2023-03-24 20:05:58 +02:00
uumas
df0005d2f4 Make wordpress role pass container argument spec validation when docker_service_suffix is defined 2023-03-24 20:03:40 +02:00
uumas
472ad4286f Add role readmes 2023-03-24 18:41:26 +02:00
uumas
9103bafb8d v0.8.0 2023-03-24 18:31:44 +02:00
uumas
aa7216d6e2 Add Prometheus role 2023-03-21 01:33:57 +02:00
uumas
c569974ba7 Fix argument specs typo 2023-03-21 01:13:31 +02:00
uumas
d5f2bb84c6 Add Grafana role 2023-03-21 01:13:09 +02:00
uumas
265270eed3 Add DokuWiki role 2023-03-21 00:51:34 +02:00
uumas
93e76562f4 Generalize alpine argument_specs 2023-03-21 00:44:36 +02:00
uumas
0541367cb3 Update container argument_specs, add alpine role as a template 2023-03-21 00:32:01 +02:00
uumas
9d7d11c384 v0.7.2 2023-03-15 03:32:07 +02:00
uumas
70bc3ce15b container: fix ansible check mode when image not present on host 2023-03-15 03:31:24 +02:00
uumas
2b43003564 gitea: fix mailer, add require signin view option 2023-03-15 00:16:57 +02:00
uumas
513392180a wordpress: fix dockerfile default 2023-03-15 00:08:54 +02:00
uumas
a1c23a5f1f jitsi: support muc_census 2023-03-15 00:08:20 +02:00
uumas
202046f442 container: fix traefik support 2023-03-15 00:05:41 +02:00
uumas
13f81e0c3e container: work in check mode 2023-03-15 00:05:24 +02:00
uumas
8a3d588a66 release 0.7.0 2023-02-07 18:29:40 +02:00
uumas
5991385def container: fix legacy docker_volumes compatibility, add missing arguments to argument specs 2023-02-07 18:00:06 +02:00
uumas
5a20226105 set volume types correctly 2023-02-07 16:14:52 +02:00
uumas
df996e5ef7 container: fix specifying docker_networks 2023-02-03 06:50:42 +02:00
uumas
5204ee38c2 container: fix template mounts 2023-02-03 06:50:22 +02:00
uumas
88e32f216a move container docs to arguments specs 2023-02-03 05:26:53 +02:00
uumas
3db21c6a05 container: add argument specs 2023-02-03 05:24:00 +02:00
uumas
9551382693 container: cleanups, fixes 2023-02-03 05:22:57 +02:00
uumas
f45f7d25e0 container: rework bind mount directory permissions, move from volume syntax to mount syntax, add single file template volumes 2023-02-03 05:15:39 +02:00
uumas
3c670d5832 container: run proxy tasks only when proxy will be installed, proxy cleanup 2023-02-03 05:06:58 +02:00
uumas
5a9c982f69 container: always pull image in a separate task before container creation 2023-02-03 05:04:30 +02:00
uumas
4c6363cab0 container: reset variables in role init, define more defaults outside tasks 2023-02-03 05:01:42 +02:00
uumas
a58c5bd47b move vars from defaults to vars 2023-02-03 04:54:38 +02:00
uumas
d77111e2fd split container tasks to multiple files for readability 2023-02-03 04:35:11 +02:00
uumas
d92d72f18f add restart docker handler 2023-02-03 03:16:22 +02:00
uumas
3150bf5c2c Update galaxy.yml, make ansible-lint happier 2022-11-25 20:00:47 +02:00
uumas
c74a56a2e2 add wordpress role with lots of bells and whistles 2022-11-25 18:58:20 +02:00
uumas
9001420597 container: support custom built images, mariadb, bind mounts, custom user 2022-11-25 18:54:56 +02:00
uumas
ab357620f7 update jitsi 2022-10-14 00:28:01 +03:00
uumas
568c40740e add container docs 2022-05-15 00:44:29 +03:00
uumas
166cc09277 jitsi support turn 2022-05-14 00:42:18 +03:00
uumas
11991867c6 unifi docs 2022-05-12 00:52:16 +03:00
107 changed files with 3588 additions and 150 deletions

View File

@@ -7,3 +7,6 @@ To add a new role:
1. usually meta/main.yml with depend on container
1. README.md
1. Add docs to docs/
The following roles have default versions which should probably be overridden:
- wordpress: `wordpress_tag`

View File

@@ -5,8 +5,8 @@ These variables are required by multiple roles. Example values included.
timezone: 'Europe/Helsinki'
admin_email: 'admin@domain.tld'
smtp_server: smtp.domain.tld
smtp_from: sender@domain.tld
smtp_server: smtp.domain.tld # Smtp server, must be reachable on port 587 with tls
smtp_from: sender@domain.tld # Address to send mail from
```
# Optional variables
@@ -15,7 +15,7 @@ These variables are used by multiple roles and have the following default values
```
reverse_proxy_type: caddy # Allowed values: caddy, traefik, none
smtp_from: # not defined, no smtp login by default
smtp_user: # not defined, no smtp login by default
smtp_pw: # not defined, see above
```

1
docs/container.md Normal file
View File

@@ -0,0 +1 @@
Please see `roles/container/meta/arguments_specs.yml` for all possible variables

View File

@@ -7,3 +7,11 @@ ports:
jitsi_domain: 'jitsi.domain.tld'
```
These vars are optional:
```
# for TURN, no turn server is used if not defined
turn_domain: turn.domain.tld
turn_secret: secret
```

14
docs/unifi.md Normal file
View File

@@ -0,0 +1,14 @@
Unifi is installed with host network mode.
# Required variables
These variables are required. Example values included. Some general variables might also be required for this role.
```
ports:
unifi:
https: 8443
docker_vhost_domains:
unifi:
- unifi.domain.tld
```

21
docs/wordpress.md Normal file
View File

@@ -0,0 +1,21 @@
# Required variables
These variables are required. Example values included.
```
ports:
wordpress_http: 8080
docker_vhost_domains:
wordpress:
- wordpress.domain.tld
database_passwords:
wordpress: secret
```
# Optional variables
These variables have default values listed below
```
wordpress_tag: php8.1
```

View File

@@ -2,11 +2,12 @@
namespace: uumas
name: docker
version: 0.5.3
description: Roles for installing services in docker containers
version: 0.9.1
readme: README.md
repository: https://git.uumas.fi/uumas/ansible-docker
license_file: LICENSE
dependencies:
uumas.general: '>=0.5.0'
uumas.general: '>=0.5.13'
authors:
- uumas

View File

@@ -0,0 +1 @@
Sets up a prometheus alertmanager docker container.

View File

@@ -0,0 +1,8 @@
---
alertmanager_storage_retention: "{{ prometheus_storage_retention | default('3650d') }}"
alertmanager_smtp_server: "{{ smtp_server | default('') }}"
alertmanager_smtp_from: "{{ smtp_from | default('') }}"
alertmanager_smtp_user: "{{ smtp_user | default('') }}"
alertmanager_smtp_pw: "{{ smtp_pw | default('') }}"

View File

@@ -0,0 +1,82 @@
---
argument_specs:
main:
short_description: Prometheus alertmanager docker container
options:
alertmanager_storage_retention:
description: Period of time for which alertmanager data is stored for. A number followed by unit (s, m, h, d, w, y).
type: str
required: false
default: "{{ prometheus_storage_retention | default('3650d') }}"
alertmanager_smtp_server:
description: Smtp server to use for sending mail. Must be reachable on port 587. Emails not sent if not defined
type: str
required: false
default: "{{ smtp_server | default('') }}"
alertmanager_smtp_from:
description: Address to send mail from. Required if sending emails.
type: str
required: false
default: "{{ smtp_from | default('') }}"
alertmanager_smtp_user:
description: User to login to smtp server with. No authentication if not defined.
type: str
required: false
default: "{{ smtp_user | default('') }}"
alertmanager_smtp_pw:
description: Password for the smtp user
type: str
required: false
default: "{{ smtp_pw | default('') }}"
smtp_server:
description: Global smtp server value, default for alertmanager_smtp_server
type: str
required: false
smtp_from:
description: Global smtp from value, default for alertmanager_smtp_from
type: str
required: "{{ alertmanager_smtp_server | length > 0 and alertmanager_smtp_from | length == 0 }}"
smtp_user:
description: Global smtp user value, default for alertmanager_smtp_user
type: str
required: false
smtp_pw:
description: Global smtp password value, default for alertmanager_smtp_pw
type: str
required: "{{ alertmanager_smtp_server | length > 0 and alertmanager_smtp_user | length > 0 and alertmanager_smtp_pw | length == 0 }}"
# All options after this will be passed directly to the container role
docker_service_suffix:
description: "Passed to container role"
required: false
docker_host_user:
description: "Passed to container role"
required: false
database_passwords:
description: "Passed to container role"
required: false
docker_additional_services:
description: "Passed to container role"
required: false
docker_volume_type:
description: "Passed to container role"
required: false
reverse_proxy_type:
description: "Passed to container role"
required: false
ports:
description: "Passed to container role"
required: false
docker_vhost_domains:
description: "Passed to container role"
required: false
docker_vhost_additional_locations:
description: "Passed to container role"
required: false
docker_entrypoint:
description: "Passed to container role"
required: false

View File

@@ -0,0 +1,18 @@
---
- name: Prometheus alertmanager container
include_role:
name: service
vars:
docker_service: alertmanager
docker_image: prom/alertmanager
reverse_proxy_type: none
docker_command:
- "--config.file=/etc/alertmanager/alertmanager.yml"
- "--storage.path=/alertmanager"
- "--data.retention={{ alertmanager_storage_retention }}"
docker_mounts:
- name: data
path: /alertmanager
- template: alertmanager.yml
path: /etc/alertmanager/alertmanager.yml

View File

@@ -0,0 +1,68 @@
---
# {{ ansible_managed }}
global:
# The smarthost and SMTP sender used for mail notifications.
{% if alertmanager_smtp_server | length > 0 %}
smtp_smarthost: '{{ alertmanager_smtp_server }}:587'
smtp_from: '{{ alertmanager_smtp_from }}'
{% if alertmanager_smtp_user | length > 0 %}
smtp_auth_username: '{{ alertmanager_smtp_user }}'
smtp_auth_password: '{{ alertmanager_smtp_pw }}'
{% endif %}
{% endif %}
# The directory from which notification templates are read.
templates:
- '/etc/alertmanager/template/*.tmpl'
# The root route on which each incoming alert enters.
route:
# The labels by which incoming alerts are grouped together. For example,
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
# be batched into a single group.
#
# To aggregate by all possible labels use '...' as the sole label name.
# This effectively disables aggregation entirely, passing through all
# alerts as-is. This is unlikely to be what you want, unless you have
# a very low alert volume or your upstream notification system performs
# its own grouping. Example: group_by: [...]
group_by: ['alertname', 'cluster', 'service']
# When a new group of alerts is created by an incoming alert, wait at
# least 'group_wait' to send the initial notification.
# This way ensures that you get multiple alerts for the same group that start
# firing shortly after another are batched together on the first
# notification.
group_wait: 30s
# When the first notification was sent, wait 'group_interval' to send a batch
# of new alerts that started firing for that group.
group_interval: 5m
# If an alert has successfully been sent, wait 'repeat_interval' to
# resend them.
repeat_interval: 3h
# A default receiver
receiver: uumas_email
# All the above attributes are inherited by all child routes and can
# overwritten on each.
# The child route trees.
routes: {{ alertmanager_routes }}
inhibit_rules:
- source_matchers: [severity="critical"]
target_matchers: [severity="warning"]
# Apply inhibition if the alertname is the same.
# CAUTION:
# If all label names listed in `equal` are missing
# from both the source and target alerts,
# the inhibition rule will apply!
equal: [alertname, cluster, service]
receivers: {{ alertmanager_receivers }}

1
roles/alpine/README.md Normal file
View File

@@ -0,0 +1 @@
Sets up an alpine docker container. To be used as a template for other roles.

View File

@@ -0,0 +1,44 @@
---
argument_specs:
main:
short_description: Alpine container
description: "Sets up an alpine docker container. This role can be used as a template for other roles using the container role."
options:
# All options after this will be passed directly to the container role
docker_service_suffix:
description: "Passed to container role"
required: false
docker_host_user:
description: "Passed to container role"
required: false
database_passwords:
description: "Passed to container role"
required: false
docker_additional_services:
description: "Passed to container role"
required: false
docker_volume_type:
description: "Passed to container role"
required: false
reverse_proxy_type:
description: "Passed to container role"
required: false
ports:
description: "Passed to container role"
required: false
docker_vhost_domains:
description: "Passed to container role"
required: false
docker_vhost_additional_locations:
description: "Passed to container role"
required: false
docker_entrypoint:
description: "Passed to container role"
required: false
dockerfile:
description: "Passed to container role"
required: false

View File

@@ -0,0 +1,15 @@
---
- name: Alpine container
include_role:
name: container
vars:
docker_service: alpine
docker_image: alpine
docker_image_http_port: 8080
docker_database: postgres
docker_mounts:
- name: data
path: /data
docker_env:
TZ: "{{ timezone }}"

View File

@@ -0,0 +1 @@
Sets up an authentik docker container.

View File

@@ -0,0 +1,7 @@
---
authentik_tag: latest
authentik_env: {}
docker_networks:
- name: authentik
authentik_additional_mounts: []

View File

@@ -0,0 +1,93 @@
---
argument_specs:
main:
short_description: Authentik container
description: "Sets up an authentik docker container."
options:
authentik_tag:
description: 'Authentik version to use. Can be minor (2024.8) or patch (2024.8.1) or "latest".'
type: str
required: false
default: latest
authentik_env:
description: "dict of custom environment variables for authentik container"
type: dict
required: false
default: {}
authentik_additional_mounts:
description: "List of bind mounts or volumes to be mounted inside the container. Each element is a dict with path and exactly one of name, src or template"
type: list
required: false
default: []
elements: dict
options:
path:
description: "The path inside the container to mount at"
type: str
required: true
readonly:
description: "If true, volume will be mounted as read only inside the container. Only applies for named and src mounts."
type: bool
required: false
default: false
mode:
description: "Permissions for the created/templated directory. Defaults to '0644' for files, '0755' for directories. Doesn't apply for named volumes."
type: str
required: false
name:
description: "If docker_volume_type is named, the name of the named volume to be mounted at path. If docker_volume_type is bind, the name of the folder to create under /opt/<service>[/suffix]/mounts/ and mount at path."
type: str
required: false
src:
description: "Host path to bind mount inside the container."
type: str
required: false
template:
description: "Name of template without .j2 extension. Will be templated at /opt/<service>[/suffix]/mounts/<template> and mounted read only inside the container."
type: str
required: false
copypath:
description: "Name of file or directory to copy. Will be deployed from files/<copypath> to /opt/<service>[/suffix]/mounts/<copypath> and mounted read only inside the container."
type: str
required: false
# All options after this will be passed directly to the container role
docker_service_suffix:
description: "Passed to container role"
required: false
docker_host_user:
description: "Passed to container role"
required: false
database_passwords:
description: "Passed to container role"
required: false
docker_additional_services:
description: "Passed to container role"
required: false
docker_volume_type:
description: "Passed to container role"
required: false
reverse_proxy_type:
description: "Passed to container role"
required: false
ports:
description: "Passed to container role"
required: false
docker_vhost_domains:
description: "Passed to container role"
required: false
docker_vhost_additional_locations:
description: "Passed to container role"
required: false
docker_entrypoint:
description: "Passed to container role"
required: false
dockerfile:
description: "Passed to container role"
required: false
docker_networks:
description: "Passed to container role"
required: false

View File

@@ -0,0 +1,28 @@
---
- name: Authentik container
ansible.builtin.include_role:
name: service
vars:
docker_service: authentik
docker_image: "beryju/authentik:{{ authentik_tag }}"
docker_command:
- server
docker_image_http_port: 9000
docker_database: postgres
docker_additional_services:
- redis
docker_env: "{{ authentik_common_env | combine(authentik_env) }}"
docker_mounts: "{{ _authentik_mounts }}"
- name: Authentik worker container
ansible.builtin.include_role:
name: service
vars:
docker_namespace: authentik
docker_service: worker
docker_image: "beryju/authentik:{{ authentik_tag }}"
docker_command:
- worker
reverse_proxy_type: none
docker_env: "{{ authentik_common_env | combine(authentik_env) }}"
docker_mounts: "{{ _authentik_mounts }}"

View File

@@ -0,0 +1,23 @@
---
_authentik_default_mounts:
- path: /media
name: "media"
- path: /templates
name: "templates"
- path: /certs
name: "certs"
_authentik_mounts: "{{ _authentik_default_mounts + authentik_additional_mounts }}"
authentik_common_env:
AUTHENTIK_REDIS__HOST: authentik-redis
AUTHENTIK_POSTGRESQL__HOST: authentik-db
AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: "{{ database_passwords.authentik }}"
AUTHENTIK_SECRET_KEY: "{{ authentik_secret_key }}"
AUTHENTIK_EMAIL__HOST: "{{ smtp_server }}"
AUTHENTIK_EMAIL__PORT: '587'
AUTHENTIK_EMAIL__USE_TLS: 'true'
AUTHENTIK_EMAIL__USERNAME: "{{ smtp_user | default(omit) }}"
AUTHENTIK_EMAIL__PASSWORD: "{{ smtp_pw | default(omit) }}"
AUTHENTIK_EMAIL__FROM: "{{ smtp_from }}"

View File

@@ -0,0 +1 @@
Sets up an authentik ldap outpost container.

View File

@@ -0,0 +1,2 @@
---
authentik_ldap_tag: latest

View File

@@ -0,0 +1,47 @@
---
argument_specs:
main:
short_description: Authentik container
description: "Sets up an authentik docker container."
options:
authentik_ldap_tag:
description: 'Authentik ldap outpost version to use. Can be minor (2024.8) or patch (2024.8.1) or "latest". Should probably be the same as authentik.'
type: str
required: false
default: latest
authentik_ldap_authentik_address:
description: "Address starting with https where authentik is reachable"
type: str
required: true
authentik_ldap_outpost_token:
description: Outpost token generated by authentik
type: str
required: true
# All options after this will be passed directly to the container role
docker_service_suffix:
description: "Passed to container role"
required: false
docker_host_user:
description: "Passed to container role"
required: false
database_passwords:
description: "Passed to container role"
required: false
docker_additional_services:
description: "Passed to container role"
required: false
docker_volume_type:
description: "Passed to container role"
required: false
ports:
description: "Passed to container role"
required: false
docker_entrypoint:
description: "Passed to container role"
required: false
dockerfile:
description: "Passed to container role"
required: false

View File

@@ -0,0 +1,12 @@
---
- name: Authentik ldap container
ansible.builtin.include_role:
name: service
vars:
docker_service: authentik-ldap
docker_image: beryju/authentik-ldap:{{ authentik_ldap_tag }}
reverse_proxy_type: none
docker_env:
AUTHENTIK_HOST: "{{ authentik_ldap_authentik_address }}"
AUTHENTIK_INSECURE: 'false'
AUTHENTIK_TOKEN: "{{ authentik_ldap_outpost_token }}"

View File

@@ -1,5 +1,12 @@
---
reverse_proxy_type: caddy
docker_additional_env: {}
docker_published_ports: []
docker_host_user: false
docker_volume_type: named
docker_mariadb_config: {}
docker_redis_persistence: false
dockerfile: []
docker_phpmyadmin_basicauth: true
docker_phpmyadmin_basicauth_users: {}
timezone: Etc/UTC

View File

@@ -0,0 +1,7 @@
---
- name: Restart container {{ docker_service_name }}
community.docker.docker_container:
name: "{{ docker_service_name }}"
restart: true
when: not container_out.changed

View File

@@ -0,0 +1,180 @@
---
argument_specs:
main:
short_description: Docker container
description: "Sets up a docker container. Supports defining networks, building a custom image, setting up memcached, databases and a reverse proxy, creating a user on the host to run the container as, named volumes, bind mounts (either auto-created or existing)."
options:
docker_service:
description: "The name of the docker service (example: gitea)"
type: str
required: true
docker_service_suffix:
description: "A suffix used to allow running multiple instances of the same service on a host. If docker_service is gitea and docker_service_suffix production, the container will be gitea_production"
type: str
required: false
docker_image:
description: "Docker image to use for the container. If dockerfile is defined, it will be used as base for locally built image (example: gitea/gitea:latest)"
type: str
required: true
dockerfile:
description: "A list of dockerfile instructions to add to the base image"
type: list
elements: str
required: false
default: []
docker_host_user:
description: "If true, creates a user on the host for this service. The container will run as this user's uid/gid. Bind mount volumes will be owned by this user."
type: bool
required: false
default: true
docker_database:
description: "Database type to set up. It will be run in a docker container accessible to the service at host <service name (with _suffix if suffix is defined)>_db on default port."
type: str
required: false
choices:
- postgres
- mariadb
- mongo
- none
default: none
database_passwords:
description: "database_passwords[docker_service] is a string with the password used for communication between the service and database. Required if docker_database is postgres or mariadb"
type: dict
required: false
docker_mariadb_config:
description: |
A dict specifying config for mariadb. For example
mysqld:
innodb_buffer_pool_size: '4G'
aria_pagecache_buffer_size': '4G'
would set
[mysqld]
innodb_buffer_pool_size = 4G
aria_pagecache_buffer_size = 4G
type: dict
required: false
default: {}
docker_additional_services:
description: "List of additional services to configure (in separate containers). These will be accessible with hostname <docker_service_name>[_suffix]_<additional_service>"
type: list
required: false
elements: str
choices:
- memcached
- redis
default: []
docker_redis_persistence:
description: Whether to save redis data to persistent storage
type: bool
required: false
default: false
docker_phpmyadmin_basicauth:
description: Whether to enable basicauth for phpmyadmin
type: bool
required: false
default: true
docker_phpmyadmin_basicauth_users:
description: Dict of users and their password hashes for phpmyadmin basic auth. Required if docker_phpmyadmin_basicauth is true and phpmyadmin is used.
type: dict
required: false
default: {}
docker_volume_type:
description: "Defines whether to use named volumes or bind mounts for mounts with name"
type: str
required: false
choices:
- named
- bind
default: named
docker_volumes:
description: "DEPRECATED List of docker volumes to mount inside the container. Use docker_mounts instead. DEPRECATED"
type: list
required: false
default: []
elements: str
docker_mounts:
description: "List of bind mounts or volumes to be mounted inside the container. Each element is a dict with path and exactly one of name, src or template"
type: list
required: false
default: []
elements: dict
options:
path:
description: "The path inside the container to mount at"
type: str
required: true
name:
description: "If docker_volume_type is named, the name of the named volume to be mounted at path. If docker_volume_type is bind, the name of the folder to create under /opt/<service>[/suffix]/mounts/ and mount at path."
type: str
required: false
src:
description: "Host path to bind mount inside the container."
type: str
required: false
template:
description: "Name of template without .j2 extension. Will be templated at /opt/<service>[/suffix]/mounts/<template> and mounted inside the container."
type: str
required: false
copypath:
description: "Name of file or directory to copy. Will be deployed from files/<copypath> to /opt/<service>[/suffix]/mounts/<copypath> and mounted inside the container."
type: str
required: false
reverse_proxy_type:
description: "Defines which kind of reverse proxy to configure for the container. Traefik support is experimental."
type: str
required: false
choices:
- caddy
- traefik
- none
default: caddy
docker_image_http_port:
description: "The port for http listener inside the container. Will be mapped to the host port defined in ports. Required if reverse_proxy_type is not none."
type: int
required: false
docker_proxy_target_protocol:
description: "Protocol to use for proxy connection"
type: str
required: false
choices:
- http
- https
default: http
ports:
description: "ports[docker_service]['http'] or ports[docker_service]['https'] defines the port on which the container will listen on for reverse proxy connections. Required if reverse_proxy_type is caddy. If ports[docker_service]['phpmyadmin'] is defined and docker_database is mariadb, phpmyadmin will is installed."
type: dict
docker_vhost_domains:
description: "docker_vhost_domains[docker_service] is a list which defines which domains should be proxied to the container. Required if reverse_proxy_type is not none. docker_vhost_domains[docker_service + '_phpmyadmin'] is used for phpmyadmin"
type: dict
docker_published_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
type: list
required: false
default: []
docker_env:
description: "A dict of environment variables for the container"
type: dict
required: false
default: {}
docker_entrypoint:
description: "Docker entrypoint as list of command and arguments"
type: list
required: false
elements: str
docker_command:
description: "Docker command as list of arguments or command and arguments"
type: list
required: false
elements: str
docker_network_mode:
description: "Docker container network mode"
type: str
required: false
timezone:
description: "Timezone set in the TZ env var"
type: str
required: false
default: Etc/UTC

View File

@@ -1,10 +1,4 @@
---
dependencies:
- docker
- role: uumas.general.reverse_proxy
vhost_id: "{{ docker_service }}"
vhost_domains: "{{ docker_vhost_domains[docker_service] }}"
proxy_target_protocol: "{{ docker_proxy_target_protocol | default('http') }}"
proxy_target_port: "{{ ports[docker_service][proxy_target_protocol] }}"
when: reverse_proxy_type != 'none' and reverse_proxy_type != 'traefik'
- role: docker

View File

@@ -0,0 +1,18 @@
---
- name: Converge
hosts: all
tasks:
- name: Nginx container
import_role:
name: container
vars:
docker_service: nginx
docker_image: nginx
docker_image_http_port: 80
admin_email: test@example.com
ports:
nginx:
http: 28001
docker_vhost_domains:
nginx:
- localhost

View File

@@ -0,0 +1,15 @@
---
dependency:
name: galaxy
driver:
name: podman
platforms:
- name: bullseye
image: git.uumas.fi/uumas/molecule-testbed:bullseye-docker
command: /lib/systemd/systemd
pre_build_image: true
privileged: true
provisioner:
name: ansible
verifier:
name: ansible

View File

@@ -0,0 +1,83 @@
---
- name: Verify
hosts: all
gather_facts: false
tasks:
- name: Ensure https://localhost returns 200
ansible.builtin.uri:
url: https://localhost
validate_certs: false
return_content: true
register: get_localhost
- name: Assert caddy responded on https://localhost
ansible.builtin.assert:
that: "(get_localhost.server | split(', '))[0] == 'Caddy'"
- name: Assert nginx responded on https://localhost
ansible.builtin.assert:
that: "(get_localhost.server | split(', '))[1].startswith('nginx')"
- name: Get /opt/nginx directory info
ansible.builtin.stat:
path: /opt/nginx
register: opt_nginx_stat
- name: Assert /opt/nginx doesn't exist
ansible.builtin.assert:
that: not opt_nginx_stat.stat.exists
msg: /opt/nginx should not have been created but it was
- name: Get host passwd nginx user
ansible.builtin.getent:
database: passwd
key: nginx
fail_key: false
- name: Assert nginx user does not exist
ansible.builtin.assert:
that: getent_passwd.nginx == None
msg: "nginx user should not exist but it does ({{ getent_passwd }})"
- name: Get nginx container info
community.docker.docker_container_info:
name: nginx
register: container_out
- name: Assert container port 80 forwarded to host 28001
ansible.builtin.assert:
that:
- "container_out.container.HostConfig.PortBindings['80/tcp'] is defined"
- "container_out.container.HostConfig.PortBindings['80/tcp'][0].HostPort == '28001'"
msg: "Container port 80 not correctly forwarded to host port. Port bindings output was {{ container_out.container.HostConfig.PortBindings }}"
- name: Assert container user not set
ansible.builtin.assert:
that: container_out.container.Config.User == ""
- name: Get container image info
community.docker.docker_image_info:
name: "{{ container_out.container.Image }}"
register: container_image_out
- name: Assert nginx image not built locally
assert:
that: container_image_out.images[0].RepoTags[0] == 'nginx:latest'
msg: "Nginx image tag incorrect. It should have been nginx:latest but it was {{ container_image_out.images[0].RepoTags }}"
- name: Get docker host info
community.docker.docker_host_info:
volumes: true
register: docker_host_out
- name: Assert all containers are running
ansible.builtin.assert:
that: docker_host_out.host_info.Containers == docker_host_out.host_info.ContainersRunning
msg: There should have been {{ docker_host_out.host_info.Containers }} containers running but there were {{ docker_host_out.host_info.Containers }}
- name: Assert no extra containers were created
ansible.builtin.assert:
that: docker_host_out.host_info.Containers == 1
msg: There should have been 1 container created but there were {{ docker_host_out.host_info.Containers }}
- name: Assert no extra images were pulled
ansible.builtin.assert:
that: docker_host_out.host_info.Images == 1
msg: There should have been 1 image present but there were {{ docker_host_out.host_info.Images }}
- name: Assert no volumes were created
ansible.builtin.assert:
that: docker_host_out.volumes | length == 0
msg: There should have been no volumes present but there were {{ docker_host_out.volumes | length }}

View File

@@ -0,0 +1,54 @@
---
- name: "Memcached container for {{ docker_service_name }}"
docker_container:
name: "{{ docker_service_name }}_memcached"
image: memcached:alpine
pull: true
restart_policy: always
networks:
- "{{ container_networks[0] }}"
log_driver: local
when: "'memcached' in docker_additional_services"
- name: Redis
when: "'redis' in docker_additional_services"
block:
- name: Reset redis container mount definition var
set_fact:
redis_container_mount_definition: []
- name: Set up redis container data bind mount
when: docker_volume_type == 'bind'
block:
- name: Create redis data bind mount directory for {{ docker_service_name }}
file:
path: "{{ docker_mounts_dir }}/redis"
state: directory
- name: Set redis_container_mount_definition for redis data bind mount
set_fact:
redis_container_mount_definition:
- source: "{{ docker_mounts_dir + '/redis' }}"
target: /data
type: bind
- name: Set redis_container_mount_definition for redis data named volume
set_fact:
redis_container_mount_definition:
source: "{{ docker_service_name + '_redis' }}"
target: /data
type: volume
when: docker_volume_type == 'named'
- name: "Redis container for {{ docker_service_name }}"
docker_container:
name: "{{ docker_service_name }}_redis"
image: redis:alpine
pull: true
restart_policy: always
mounts: "{{ redis_container_mount_definition }}"
command: "{{ '--save 60 1' if docker_redis_persistence else omit }}"
networks:
- "{{ container_networks[0] }}"
log_driver: local

View File

@@ -0,0 +1,96 @@
---
- name: Set postgres container vars
set_fact:
db_container_image: 'postgres:14-alpine'
db_container_env:
POSTGRES_USER: "{{ docker_service_name }}"
POSTGRES_PASSWORD: "{{ database_passwords[docker_service_name] }}"
db_container_data: /var/lib/postgresql/data
when: docker_database == 'postgres'
- name: Set mariadb container vars
set_fact:
db_container_image: mariadb:10
db_container_env:
MARIADB_USER: "{{ docker_service_name }}"
MARIADB_DATABASE: "{{ docker_service_name }}"
MARIADB_PASSWORD: "{{ database_passwords[docker_service_name] }}"
MARIADB_RANDOM_ROOT_PASSWORD: "{{ database_passwords[docker_service_name + '_root'] is not defined | string }}"
MARIADB_ROOT_PASSOWRD: "{{ database_passwords[docker_service_name + '_root'] | default(omit) }}"
db_container_data: /var/lib/mysql
db_image_port: 3306
when: docker_database == 'mariadb'
- name: Set mongo container vars
set_fact:
db_container_image: 'mongo:latest'
db_container_data: /data/db
when: docker_database == 'mongo'
- name: Reset db container mount definition var
set_fact:
db_container_mount_definition: []
- name: Set up database container data bind mount
when: docker_volume_type == 'bind'
block:
- name: Create db data bind mount directory for {{ docker_service_name }}
file:
path: "{{ docker_mounts_dir }}/db"
state: directory
- name: Set db_container_mount_definition for db data bind mount
set_fact:
db_container_mount_definition: "{{ db_container_mount_definition + [{'source': docker_mounts_dir + '/db', 'target': db_container_data, 'type': 'bind'}] }}"
- name: Set up database container config bind mount
when: db_config_mounts_needed
block:
- name: Put mariadb config in place
template:
src: mariadb.cnf.j2
dest: "{{ docker_mounts_dir }}/mariadb.cnf"
- name: Set db_container_mount_definition for db config bind mount
set_fact:
db_container_mount_definition: "{{ db_container_mount_definition + [{'source': docker_mounts_dir + '/mariadb.cnf', 'target': '/etc/mysql/conf.d/custom.cnf', 'type': 'bind', 'read_only': true}] }}"
- name: Set db_container_mount_definition for db data named volume
set_fact:
db_container_mount_definition: "{{ db_container_mount_definition + [{'source': docker_service_name + '_db', 'target': db_container_data, 'type': 'volume'}] }}"
when: docker_volume_type == 'named'
- name: Set db published ports var
set_fact:
db_published_ports: ["127.0.0.1:{{ ports[docker_service_name].db }}:{{ db_image_port }}"]
when: ports[docker_service_name].db is defined
- name: Database container for {{ docker_service_name + ' (' + docker_database + ')' }}
docker_container:
name: "{{ docker_service_name }}_db"
image: "{{ db_container_image }}"
pull: true
env: "{{ db_container_env | default(omit) }}"
published_ports: "{{ db_published_ports | default(omit) }}"
restart_policy: always
mounts: "{{ db_container_mount_definition }}"
networks:
- "{{ container_networks[0] }}"
log_driver: local
- name: phpMyAdmin container for {{ docker_service_name }}
docker_container:
name: "{{ docker_service_name }}_phpmyadmin"
image: phpmyadmin
pull: true
env:
PMA_ABSOLUTE_URI: "https://{{ docker_vhost_domains[docker_service_name + '_phpmyadmin'][0] }}"
PMA_HOST: "{{ docker_service_name }}_db"
published_ports:
- "127.0.0.1:{{ ports[docker_service_name]['phpmyadmin'] }}:80"
restart_policy: always
networks:
- "{{ container_networks[0] }}"
log_driver: local
when: docker_database == 'mariadb' and ports[docker_service_name]['phpmyadmin'] is defined

View File

@@ -0,0 +1,14 @@
---
- name: "Create user for {{ docker_service_name }}"
user:
name: "{{ docker_service_name }}"
home: "/opt/{{ docker_service }}/{{ docker_service_suffix | default('') }}"
create_home: false
system: true
shell: /bin/bash
register: user
- name: Set docker container user
set_fact:
docker_user: "{{ user.uid }}:{{ user.group }}"

View File

@@ -0,0 +1,65 @@
---
- name: Image build
when: dockerfile_needed
block:
- name: Set docker_build_directory variable
set_fact:
docker_build_directory: /opt/{{ docker_service }}/build
- name: Create container build directory
file:
path: "{{ docker_build_directory }}"
state: directory
- name: Put dockerfile in place
template:
src: Dockerfile.j2
dest: "{{ docker_build_directory }}/Dockerfile"
mode: 0644
- name: Build docker image for {{ docker_service }}
docker_image:
name: "local_{{ docker_service }}"
source: build
force_source: true
build:
pull: true
path: "{{ docker_build_directory }}"
register: built_image
- name: Pull container image for {{ docker_service }}
docker_image:
name: "{{ docker_image }}"
source: pull
force_source: true
register: pulled_image
when: not dockerfile_needed
- name: Set container_image variable
set_fact:
container_image: "{{ item.image }}"
when: item.skipped is not defined or not item.skipped
loop:
- "{{ built_image }}"
- "{{ pulled_image }}"
- name: Check mode image info
when: ansible_check_mode
block:
- name: Get docker image info for check mode
docker_image_info:
name: "{{ ('local_' + docker_service) if dockerfile is defined and dockerfile | length > 0 else docker_image }}"
register: existing_image
- name: Set check mode container_image variable
set_fact:
container_image: "{{ existing_image.images[0] }}"
when: existing_image.images | length > 0
- name: Set image user variable
set_fact:
image_user: "{{ container_image.Config.User }}"
when:
- not ansible_check_mode
- container_image.Config.User | length > 0

View File

@@ -0,0 +1,58 @@
---
- name: Reset variables
set_fact:
docker_volume_definition: []
container_published_ports: []
docker_volumes_new: []
final_docker_volumes: "{{ docker_volumes }}"
container_image: ''
container_networks: []
- name: Warn about docker_volumes legacy format
debug:
msg: "docker_volumes is deprecated. This support may be removed after december 2022. Use docker_mounts instead!"
when: docker_volumes | length > 0
- name: Convert docker_volumes from legacy format (DEPRECATED)
when: docker_volumes | length > 0 and docker_volumes[0] is not mapping
block:
- name: Add legacy docker volumes to docker_volumes_new using the new format
set_fact:
docker_volumes_new: "{{ docker_volumes_new | default([]) + [{'name': item.split(':')[0] | regex_replace('^' + docker_service_name + '_', ''), 'path': item.split(':')[1]}] }}"
when: "'/' not in item.split(':')[0]"
loop: "{{ docker_volumes }}"
- name: Add legacy docker src bind mounts to docker_volumes_new using the new format
set_fact:
docker_volumes_new: "{{ docker_volumes_new | default([]) + [{'src': item.split(':')[0], 'path': item.split(':')[1]}] }}"
when: "'/' in item.split(':')[0]"
loop: "{{ docker_volumes }}"
- name: Set final_docker_volumes variable
set_fact:
final_docker_volumes: "{{ docker_volumes_new }}"
- name: Convert final_docker_volumes to docker_mounts (DEPRECATED)
set_fact:
docker_mounts: "{{ final_docker_volumes }}"
when: docker_mounts | length == 0 and final_docker_volumes | length > 0
- name: Set assistive variables
set_fact:
template_mounts_needed: "{{ docker_mounts | selectattr('template', 'defined') | list | length > 0 }}"
copypath_mounts_needed: "{{ docker_mounts | selectattr('copypath', 'defined') | list | length > 0 }}"
volumes_needed: "{{ docker_mounts | selectattr('name', 'defined') | list | length > 0 or docker_database != 'none' }}"
dockerfile_needed: "{{ dockerfile | length > 0 }}"
db_config_mounts_needed: "{{ docker_mariadb_config | length > 0 }}"
- name: Set more assistive variables
set_fact:
bind_volumes_needed: "{{ volumes_needed and docker_volume_type == 'bind' }}"
named_volumes_needed: "{{ volumes_needed and docker_volume_type == 'named' }}"
- name: Set even more assistive variables
set_fact:
create_opt_directory: "{{ dockerfile_needed or docker_host_user or bind_volumes_needed or template_mounts_needed or copypath_mounts_needed or db_config_mounts_needed }}"
create_mounts_directory: "{{ bind_volumes_needed or template_mounts_needed or copypath_mounts_needed or db_config_mounts_needed }}"
- name: Set docker service full name
set_fact:
docker_service_name: "{{ docker_service }}_{{ docker_service_suffix }}"
when: docker_service_suffix is defined

View File

@@ -1,65 +1,124 @@
---
- name: "{{ docker_service }} docker network"
docker_network:
name: "{{ docker_service }}"
when: docker_network_mode is not defined or docker_network_mode != 'host'
- name: Deprecation warning
debug:
msg: >
The role uumas.docker.container has been deprecated. You should switch to
uumas.docker.service instead. It has the following syntax changes:\n
* container name suffixes are separated by - instead of _\n
* May require updating hostnames in configs or database\n
* May require fixing any ports, database_passwords or
docker_vhost_domains variables to new syntax\n
* May require renaming database user and database for existing database
* docker_volume_type defaults to bind. Set it to named if you want to
continue using named volumes.\n
uumas.docker.container doesn't receive any updates and may be removed
after May 2024
- name: Set published ports variable
set_fact:
container_published_ports: ["127.0.0.1:{{ ports[docker_service][proxy_target_protocol] }}:{{ docker_image_http_port }}"]
when: reverse_proxy_type != 'traefik' and (docker_network_mode is not defined or docker_network_mode != 'host')
- name: Container role initialization
import_tasks: init.yml
- name: Set networks variable
set_fact:
container_networks:
- name: "{{ docker_service }}"
when: docker_network_mode is not defined or docker_network_mode != 'host'
- name: Docker network
when: docker_network_mode is not defined or docker_network_mode != 'host' or docker_networks | length > 0
block:
- name: Set networks variable to {{ docker_service_name }}
ansible.builtin.set_fact:
container_networks:
- name: "{{ docker_service_name }}"
when: docker_networks | length == 0
- name: Set networks variable to {{ docker_networks }}
ansible.builtin.set_fact:
container_networks: "{{ docker_networks }}"
when: docker_networks | length > 0
- name: Create docker networks
community.docker.docker_network:
name: "{{ item.name }}"
loop: "{{ container_networks }}"
- name: Include traefik vars
include_vars: traefik.yml
when: reverse_proxy_type == 'traefik'
- name: Reverse proxy for container
include_tasks: proxy.yml
when: reverse_proxy_type != 'none'
- name: Set postgres container env
set_fact:
db_container_image: 'postgres:14-alpine'
db_container_env:
POSTGRES_USER: "{{ docker_service }}"
POSTGRES_PASSWORD: "{{ database_passwords[docker_service] }}"
db_container_data: /var/lib/postgresql/data
when: docker_database is defined and docker_database == 'postgres'
- name: Set mongo container env
set_fact:
db_container_image: 'mongo:latest'
db_container_data: /data/db
when: docker_database is defined and docker_database == 'mongo'
- name: Create directory /opt/{{ docker_service }}
ansible.builtin.file:
path: "/opt/{{ docker_service }}"
state: directory
mode: 0755
when: create_opt_directory
- name: "{{ docker_database }} database container for {{ docker_service }}"
docker_container:
name: "{{ docker_service }}_db"
image: "{{ db_container_image }}"
pull: yes
container_default_behavior: no_defaults
env: "{{ db_container_env | default(omit) }}"
restart_policy: always
volumes:
- "{{ docker_service }}_db:{{ db_container_data }}"
networks: "{{ container_networks | default(omit) }}"
when: docker_database is defined
- name: Container image
import_tasks: image.yml
- name: "Container for {{ docker_service }}"
docker_container:
name: "{{ docker_service }}"
image: "{{ docker_image }}"
pull: true
container_default_behavior: no_defaults
volumes: "{{ docker_volumes | default(omit) }}"
published_ports: "{{ container_published_ports | default([]) + docker_published_ports | default(omit) }}"
- name: Container user
include_tasks: host_user.yml
when: docker_host_user
- name: Create suffix directory
when: create_opt_directory and docker_service_suffix is defined
block:
- name: Create directory /opt/{{ docker_service + '/' + docker_service_suffix }}
ansible.builtin.file:
path: "/opt/{{ docker_service }}/{{ docker_service_suffix }}"
state: directory
owner: "{{ user.uid | default(omit) }}"
group: "{{ user.group | default(omit) }}"
mode: 0755
- name: Set container_workdir variable
ansible.builtin.set_fact:
container_workdir: /opt/{{ docker_service }}/{{ docker_service_suffix }}
- name: Set container_workdir variable
ansible.builtin.set_fact:
container_workdir: /opt/{{ docker_service }}
when: docker_service_suffix is not defined
- name: Create mounts directory
when: create_mounts_directory
block:
- name: Set docker_mounts_dir
ansible.builtin.set_fact:
docker_mounts_dir: "{{ container_workdir }}/mounts"
- name: Create directory {{ docker_mounts_dir }}
ansible.builtin.file:
path: "{{ docker_mounts_dir }}"
state: directory
owner: "{{ user.uid | default(omit) }}"
group: "{{ user.group | default(omit) }}"
mode: 0700
- name: Database container
include_tasks: database.yml
when: docker_database != 'none'
- name: Additional services
include_tasks: additional.yml
when: docker_additional_services is defined
- name: Container volumes
import_tasks: volumes.yml
- name: "Container for {{ docker_service_name }}"
community.docker.docker_container:
name: "{{ docker_service_name }}"
image: "{{ container_image.Id if (not ansible_check_mode) or (container_image | length > 0) else docker_image }}"
user: "{{ docker_user if docker_host_user else omit }}"
mounts: "{{ docker_volume_definition }}"
published_ports: "{{ container_published_ports + docker_published_ports }}"
labels: "{{ traefik_labels | default(omit) }}"
env: "{{ docker_env | combine(docker_additional_env) }}"
env: "{{ docker_env | combine(docker_additional_env) | combine({'TZ': timezone}) }}"
entrypoint: "{{ docker_entrypoint | default(omit) }}"
command: "{{ docker_command | default(omit) }}"
restart_policy: always
network_mode: "{{ docker_network_mode | default(omit) }}"
networks: "{{ container_networks | default(omit) }}"
networks: "{{ container_networks }}"
log_driver: local
register: container_out
- name: Reset docker_mounts if converted from docker_volumes
ansible.builtin.set_fact:
docker_mounts: []
when: final_docker_volumes | length > 0
- name: Flush handlers to trigger container restart
ansible.builtin.meta: flush_handlers

View File

@@ -0,0 +1,34 @@
---
- name: Reverse proxy
include_role:
name: uumas.general.vhost
vars:
vhost_type: reverse_proxy
vhost_id: "{{ docker_service_name }}"
vhost_proxy_target_protocol: "{{ docker_proxy_target_protocol }}"
vhost_domains: "{{ docker_vhost_domains[docker_service_name] }}"
vhost_proxy_target_port: "{{ ports[docker_service_name][vhost_proxy_target_protocol] }}"
when: reverse_proxy_type != 'traefik'
- name: Set published ports variable to http port
set_fact:
container_published_ports: ["127.0.0.1:{{ ports[docker_service_name][docker_proxy_target_protocol] }}:{{ docker_image_http_port }}"]
when:
- docker_network_mode is not defined or docker_network_mode != 'host'
- reverse_proxy_type != 'traefik'
- name: Include traefik vars
include_vars: traefik.yml
when: reverse_proxy_type == 'traefik'
- name: Reverse proxy for phpmyadmin
include_role:
name: uumas.general.vhost
vars:
vhost_type: reverse_proxy
vhost_id: "{{ docker_service_name }}_phpmyadmin"
vhost_domains: "{{ docker_vhost_domains[docker_service_name + '_phpmyadmin'] }}"
vhost_proxy_target_port: "{{ ports[docker_service_name]['phpmyadmin'] }}"
vhost_basicauth: "{{ docker_phpmyadmin_basicauth }}"
vhost_basicauth_users: "{{ docker_phpmyadmin_basicauth_users }}"
when: docker_database == 'mariadb' and ports[docker_service_name]['phpmyadmin'] is defined

View File

@@ -0,0 +1,61 @@
---
- name: Create directories and put files in them
when: create_mounts_directory
block:
- name: Define mount directory owner
set_fact:
mount_owner: "{{ user.uid if docker_host_user else image_user | default('') }}"
mount_group: "{{ user.group if docker_host_user else '' }}"
- name: "Create docker bind mount directories for {{ docker_service_name }}"
file:
path: "{{ docker_mounts_dir }}/{{ item.name }}"
state: directory
owner: "{{ mount_owner if (item.set_owner is not defined or item.set_owner) and mount_owner | length > 0 else omit }}"
group: "{{ mount_group if (item.set_group is not defined or item.set_group) and mount_group | length > 0 else omit }}"
when: item.name is defined and docker_volume_type == 'bind'
loop: "{{ docker_mounts }}"
- name: Set docker_volume_definition for named binds
set_fact:
docker_volume_definition: "{{ docker_volume_definition + [{'source': docker_mounts_dir + '/' + item.name, 'target': item.path, 'type': 'bind'}] }}"
when: item.name is defined and docker_volume_type == 'bind'
loop: "{{ docker_mounts }}"
- name: Template docker template mounts for {{ docker_service_name }}
template:
src: "{{ item.template }}.j2"
dest: "{{ docker_mounts_dir }}/{{ item.template }}"
when: item.template is defined
loop: "{{ docker_mounts }}"
notify: Restart container {{ docker_service_name }}
- name: Set docker_volume_definition for template mounts
set_fact:
docker_volume_definition: "{{ docker_volume_definition + [{'source': docker_mounts_dir + '/' + item.template, 'target': item.path, 'type': 'bind', 'read_only': true}] }}"
when: item.template is defined
loop: "{{ docker_mounts }}"
- name: Copy docker copypath mounts for {{ docker_service_name }}
copy:
src: "files/{{ item.copypath }}"
dest: "{{ docker_mounts_dir }}/"
when: item.copypath is defined
loop: "{{ docker_mounts }}"
notify: Restart container {{ docker_service_name }}
- name: Set docker_volume_definition for copypath mounts
set_fact:
docker_volume_definition: "{{ docker_volume_definition + [{'source': docker_mounts_dir + '/' + item.copypath, 'target': item.path, 'type': 'bind', 'read_only': true}] }}"
when: item.copypath is defined
loop: "{{ docker_mounts }}"
- name: Set docker_volume_definition for named volumes
set_fact:
docker_volume_definition: "{{ docker_volume_definition + [{'source': docker_service_name + '_' + item.name, 'target': item.path, 'type': 'volume'}] }}"
when: docker_volume_type == 'named' and item.name is defined
loop: "{{ docker_mounts }}"
- name: Set docker_volume_definition for src binds
set_fact:
docker_volume_definition: "{{ docker_volume_definition + [{'source': item.src, 'target': item.path, 'type': 'bind'}] }}"
when: item.src is defined
loop: "{{ docker_mounts }}"

View File

@@ -0,0 +1,6 @@
# {{ ansible_managed }}
FROM {{ docker_image }}
{% for item in dockerfile %}
{{ item }}
{% endfor %}

View File

@@ -0,0 +1,8 @@
# {{ ansible_managed }}
{% for section in docker_mariadb_config | dict2items %}
[{{ section.key }}]
{% for item in section.value | dict2items %}
{{ item.key }} = {{ item.value }}
{% endfor %}
{% endfor %}

View File

@@ -0,0 +1,15 @@
---
docker_service_name: "{{ docker_service }}"
reverse_proxy_type: caddy
docker_proxy_target_protocol: http
docker_additional_env: {}
docker_database: none
docker_volumes: [] # DEPRECATED
docker_mounts: []
docker_networks: []
docker_env: {}
docker_published_ports: []

View File

@@ -2,5 +2,5 @@
traefik_labels:
traefik.enable: 'true'
"traefik.http.routers.{{ docker_service }}.rule": "Host(`{{ vhost_domains[docker_service] | join('`) || Host(`') }}`)"
"traefik.http.routers.{{ docker_service }}.tls.certresolver": 'le'
"traefik.http.routers.{{ docker_service_name }}.rule": "Host(`{{ vhost_domains[docker_service_name] | join('`) || Host(`') }}`)"
"traefik.http.routers.{{ docker_service_name }}.tls.certresolver": 'le'

View File

@@ -0,0 +1,6 @@
---
- name: Restart docker
ansible.builtin.systemd:
name: docker.service
state: restarted

View File

@@ -0,0 +1,5 @@
---
- name: Converge
hosts: all
roles:
- docker

View File

@@ -0,0 +1,15 @@
---
dependency:
name: galaxy
driver:
name: podman
platforms:
- name: bullseye
image: git.uumas.fi/uumas/molecule-testbed:bullseye
command: /lib/systemd/systemd
pre_build_image: true
privileged: true
provisioner:
name: ansible
verifier:
name: ansible

View File

@@ -0,0 +1,12 @@
---
- name: Verify
hosts: all
gather_facts: false
tasks:
- name: Docker hello world container
community.docker.docker_container:
name: hello-world
image: hello-world
detach: false
cleanup: true
register: docker_hello_world

View File

@@ -1,38 +1,30 @@
---
- name: Ensure legacy apt repository not present
ansible.builtin.file:
path: /etc/apt/sources.list.d/docker.list
state: absent
- name: Install dependencies
apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg
- lsb-release
update_cache: true
- name: Set dpkg arch (amd64)
set_fact:
dpkg_arch: amd64
when: ansible_architecture == 'x86_64'
- name: Set dpkg arch (arm64)
set_fact:
dpkg_arch: arm64
when: ansible_architecture == 'aarch64'
- name: Add docker repo signing key
apt_key:
id: '9DC858229FC7DD38854AE2D88D81803C0EBFCD88'
url: 'https://download.docker.com/linux/debian/gpg'
- name: Add docker repo
apt_repository:
repo: "deb [arch={{ dpkg_arch }}] https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} stable"
filename: 'docker'
mode: '644'
- name: Add docker apt repository
ansible.builtin.deb822_repository:
name: docker
uris: https://download.docker.com/linux/{{ ansible_distribution | lower }}
signed_by: https://download.docker.com/linux/debian/gpg
suites: "{{ ansible_distribution_release }}"
# architectures: "{{ apt_arch }}"
components:
- stable
- name: Install docker
apt:
ansible.builtin.apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- python3-docker
- name: Ensure docker is started and enabled
ansible.builtin.systemd:
name: docker.service
state: started
enabled: true

1
roles/dokuwiki/README.md Normal file
View File

@@ -0,0 +1 @@
Sets up a dokuwiki docker container.

View File

@@ -0,0 +1,3 @@
---
dokuwiki_wiki_name: DokuWiki
dokuwiki_admin_email: "{{ admin_email }}"

View File

@@ -0,0 +1,48 @@
---
argument_specs:
main:
short_description: DokuWiki container
description: "Sets up a DokuWiki docker container."
options:
dokuwiki_wiki_name:
description: Name of the DokuWiki site
type: str
reuired: false
default: DokuWiki
dokuwiki_admin_password:
description: Password of dokuwiki admin user
type: str
required: true
dokuwiki_admin_email:
description: Email address of dokuwiki admin user
type: str
required: false
default: "{{ admin_email }}"
# All options after this will be passed directly to the container role
docker_service_suffix:
description: "Passed to container role"
required: false
docker_host_user:
description: "Passed to container role"
required: false
docker_volume_type:
description: "Passed to container role"
required: false
reverse_proxy_type:
description: "Passed to container role"
required: false
ports:
description: "Passed to container role"
required: false
docker_vhost_domains:
description: "Passed to container role"
required: false
docker_vhost_additional_locations:
description: "Passed to container role"
required: false
docker_entrypoint:
description: "Passed to container role"
required: false

View File

@@ -0,0 +1,11 @@
---
- name: Dokuwiki container
import_role:
name: service
vars:
docker_service: dokuwiki
docker_image: dokuwiki/dokuwiki:stable
docker_image_http_port: 8080
docker_mounts:
- name: data
path: /storage

View File

@@ -1,9 +1,10 @@
---
dependencies:
- docker
- role: uumas.general.reverse_proxy
- role: docker
- role: uumas.general.vhost
vhost_type: reverse_proxy
vhost_id: element
vhost_domains: "{{ element_domains }}"
proxy_target: http://127.0.0.1:{{ ports.element_http }}
vhost_proxy_target_port: "{{ ports.element_http }}"
when: reverse_proxy_type != 'none' and reverse_proxy_type != 'traefik'

View File

@@ -1,7 +1,7 @@
---
- name: Set element published ports variable
set_fact:
set_fact:
element_published_ports: ["127.0.0.1:{{ ports.element_http }}:80"]
when: reverse_proxy_type != 'traefik'
@@ -33,4 +33,3 @@
content: "{{ element_config }}"
dest: "{{ element_config_volume.Source }}/config.json"
mode: '644'

View File

@@ -6,9 +6,11 @@ dependencies:
docker_image: gitea/gitea:latest
docker_image_http_port: 3000
docker_database: postgres
docker_volumes:
- gitea_data:/data
- /var/lib/gitea/.ssh/:/data/git/.ssh
docker_mounts:
- name: data
path: /data
- src: /var/lib/gitea/.ssh/
path: /data/git/.ssh
docker_published_ports:
- "127.0.0.1:{{ ports.gitea.ssh }}:22"
docker_env:
@@ -24,6 +26,7 @@ dependencies:
GITEA__mailer__ENABLED: "true"
GITEA__mailer__HOST: "{{ smtp_server }}:587"
GITEA__mailer__FROM: "{{ smtp_from }}"
GITEA__mailer__MAILER_TYPE: smtp
GITERA__mailer__USER: "{{ smtp_user | default(omit) }}"
GITERA__mailer__PASSWD: "{{ smtp_pw | default(omit) }}"
GITEA__mailer__PROTOCOL: smtp
GITEA__mailer__USER: "{{ smtp_user | default(omit) }}"
GITEA__mailer__PASSWD: "{{ smtp_pw | default(omit) }}"
GITEA__service__REQUIRE_SIGNIN_VIEW: "{{ gitea_require_signin_view | default(omit) }}"

View File

@@ -4,16 +4,16 @@
group:
name: git
gid: 2132
system: yes
system: true
- name: Create git user on host for gitea ssh
user:
name: git
uid: 2132
group: git
system: yes
system: true
home: /var/lib/gitea
generate_ssh_key: yes
generate_ssh_key: true
register: git_user
- name: Add git user's own ssh key to its authorized keys

1
roles/grafana/README.md Normal file
View File

@@ -0,0 +1 @@
Sets up a grafana docker container.

View File

@@ -0,0 +1,9 @@
---
grafana_oauth_enabled: false
grafana_oauth_scopes:
- openid
- profile
- email
grafana_oauth_allow_sign_up: true
grafana_oauth_auto_login: false

View File

@@ -0,0 +1,93 @@
---
argument_specs:
main:
short_description: Grafana
description: "Sets up a grafana docker container"
options:
grafana_oauth_enabled:
description: Enables generic OAuth2 authentication.
type: bool
required: false
default: false
grafana_oauth_name:
description: Name that refers to the generic OAuth2 authentication from the Grafana user interface.
type: str
required: false
grafana_oauth_client_id:
description: Client ID provided by your OAuth2 app.
type: str
required: "{{ grafana_oauth_enabled }}"
grafana_oauth_client_secret:
description: Client secret provided by your OAuth2 app.
type: str
required: "{{ grafana_oauth_enabled }}"
grafana_oauth_auth_url:
description: Authorization endpoint of your OAuth2 provider.
type: str
required: "{{ grafana_oauth_enabled }}"
grafana_oauth_token_url:
description: Endpoint used to obtain the OAuth2 access token.
type: str
required: "{{ grafana_oauth_enabled }}"
grafana_oauth_api_url:
description: Endpoint used to obtain user information compatible with OpenID UserInfo.
type: str
required: "{{ grafana_oauth_enabled }}"
grafana_oauth_scopes:
description: List of OAuth2 scopes.
type: list
required: false
items: str
default:
- openid
- profile
- email
grafana_oauth_role_attribute_path:
description: JMESPath expression to use for Grafana role lookup. Grafana will first evaluate the expression using the OAuth2 ID token. If no role is found, the expression will be evaluated using the user information obtained from the UserInfo endpoint. The result of the evaluation should be a valid Grafana role (Viewer, Editor, Admin or GrafanaAdmin).
type: str
required: false
grafana_oauth_allow_sign_up:
description: Controls Grafana user creation through the generic OAuth2 login. Only existing Grafana users can log in with generic OAuth if set to false.
type: bool
required: false
default: true
grafana_oauth_auto_login:
description: Set to true to enable users to bypass the login screen and automatically log in. This setting is ignored if you configure multiple auth providers to use auto-login.
type: bool
required: false
default: false
database_passwords:
description: "Passed to container role"
required: true
type: dict
docker_vhost_domains:
description: "Passed to container role"
required: true
type: dict
# All options after this will be passed directly to the container role
docker_host_user:
description: "Passed to container role"
required: false
docker_additional_services:
description: "Passed to container role"
required: false
docker_volume_type:
description: "Passed to container role"
required: false
reverse_proxy_type:
description: "Passed to container role"
required: false
ports:
description: "Passed to container role"
required: false
docker_entrypoint:
description: "Passed to container role"
required: false
docker_vhost_additional_locations:
description: "Passed to container role"
required: false

View File

@@ -0,0 +1,32 @@
---
- name: Grafana container
include_role:
name: service
vars:
docker_service: grafana
docker_image: grafana/grafana
docker_image_http_port: 3000
docker_mounts:
- name: data
path: /var/lib/grafana
docker_database: postgres
docker_env:
GF_DATABASE_TYPE: postgres
GF_DATABASE_HOST: grafana_db:5432
GF_DATABASE_NAME: grafana
GF_DATABASE_USER: grafana
GF_DATABASE_PASSWORD: "{{ database_passwords.grafana }}"
GF_SERVER_DOMAIN: "{{ docker_vhost_domains.grafana[0] }}"
GF_SERVER_ROOT_URL: "https://{{ docker_vhost_domains.grafana[0] }}"
GF_AUTH_GENERIC_OAUTH_ENABLED: "{{ 'true' if grafana_oauth_enabled else 'false' }}"
GF_AUTH_GENERIC_OAUTH_NAME: "{{ grafana_oauth_name | default(omit) }}"
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: "{{ grafana_oauth_client_id }}"
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: "{{ grafana_oauth_client_secret }}"
GF_AUTH_GENERIC_OAUTH_AUTH_URL: "{{ grafana_oauth_auth_url }}"
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: "{{ grafana_oauth_token_url }}"
GF_AUTH_GENERIC_OAUTH_API_URL: "{{ grafana_oauth_api_url }}"
GF_AUTH_GENERIC_OAUTH_SCOPES: "{{ grafana_oauth_scopes | join(' ') }}"
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: "{{ 'true' if grafana_oauth_allow_sign_up else 'false' }}"
GF_AUTH_GENERIC_OAUTH_AUTO_LOGIN: "{{ 'true' if grafana_oauth_auto_login else 'false' }}"
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH: "{{ grafana_oauth_role_attribute_path | default(omit) }}"

View File

@@ -6,8 +6,9 @@ dependencies:
docker_image: quay.io/hedgedoc/hedgedoc:latest
docker_image_http_port: 3000
docker_database: postgres
docker_volumes:
- hedgedoc_uploads:/hedgedoc/public/uploads
docker_mounts:
- name: uploads
path: /hedgedoc/public/uploads
docker_env:
CMD_DB_URL: postgres://hedgedoc:{{ database_passwords.hedgedoc }}@hedgedoc_db:5432/hedgedoc
CMD_DOMAIN: "{{ docker_vhost_domains.hedgedoc[0] }}"
@@ -15,4 +16,3 @@ dependencies:
CMD_ALLOW_ANONYMOUS: "false"
CMD_ALLOW_ANONYMOUS_EDITS: "true"
CMD_ALLOW_FREEURL: "true"

View File

@@ -0,0 +1,4 @@
---
jitsi_docker_tag: stable
reverse_proxy_type: caddy

View File

@@ -1,10 +1,11 @@
---
dependencies:
- docker
- role: uumas.general.reverse_proxy
- role: docker
- role: uumas.general.vhost
vhost_type: reverse_proxy
vhost_id: jitsi
vhost_domains:
- "{{ jitsi_domain }}"
proxy_target: http://127.0.0.1:{{ ports.jitsi_http }}
vhost_proxy_target_port: "{{ ports.jitsi_http }}"
when: reverse_proxy_type != 'traefik'

View File

@@ -5,10 +5,18 @@
name: meet.jitsi
- name: Set jitsi meet web published ports variable
set_fact:
set_fact:
jitsi_web_published_ports: ["127.0.0.1:{{ ports.jitsi_http }}:80"]
when: reverse_proxy_type != 'traefik'
- name: Reset jitsi meet prosody published ports variable
set_fact:
jitsi_prosody_published_ports: []
- name: Set jitsi meet prosody published ports variable
set_fact:
jitsi_prosody_published_ports: ["127.0.0.1:{{ ports.jitsi_prosody_http }}:5280"]
when: ports.jitsi_prosody_http is defined
- name: Include traefik vars
include_vars: traefik.yml
when: reverse_proxy_type == 'traefik'
@@ -16,7 +24,7 @@
- name: Jitsi meet web
docker_container:
name: 'jitsi_meet_web'
image: 'jitsi/web:latest'
image: "jitsi/web:{{ jitsi_docker_tag }}"
pull: true
container_default_behavior: no_defaults
published_ports: "{{ jitsi_web_published_ports | default(omit) }}"
@@ -29,6 +37,7 @@
ENABLE_NOISY_MIC_DETECTION: '0'
ENABLE_BREAKOUT_ROOMS: '1'
JICOFO_AUTH_USER: focus
COLIBRI_WEBSOCKET_JVB_LOOKUP_NAME: jvb.meet.jitsi
XMPP_BOSH_URL_BASE: 'http://xmpp.meet.jitsi:5280'
XMPP_DOMAIN: meet.jitsi
XMPP_AUTH_DOMAIN: auth.meet.jitsi
@@ -37,29 +46,15 @@
restart_policy: always
networks:
- name: meet.jitsi
aliases:
- meet.jitsi
register: jitsi_meet_web_out
- set_fact:
jitsi_meet_web_config_volume: "{{ jitsi_meet_web_out.container.Mounts | selectattr('Destination', 'equalto', '/config') | join }}"
- name: Disable recording and livestreaming
lineinfile:
path: "{{ jitsi_meet_web_config_volume.Source }}/config.js"
regexp: "^ (\/\/ )?{{ item }}: .*,$"
line: " {{ item }}: false,"
state: present
loop:
- fileRecordingsEnabled
- liveStreamingEnabled
- name: Jitsi meet prosody
docker_container:
name: 'jitsi_meet_prosody'
image: 'jitsi/prosody:latest'
image: "jitsi/prosody:{{ jitsi_docker_tag }}"
pull: true
container_default_behavior: no_defaults
published_ports: "{{ jitsi_prosody_published_ports }}"
env:
PUBLIC_URL: "https://{{ jitsi_domain }}"
TZ: "{{ timezone }}"
@@ -69,10 +64,16 @@
JICOFO_AUTH_PASSWORD: "{{ jitsi_pw.jicofo_auth }}"
JVB_AUTH_USER: jvb
JVB_AUTH_PASSWORD: "{{ jitsi_pw.jvb_auth }}"
TURN_CREDENTIALS: "{{ turn_secret | default(omit) }}"
TURN_HOST: "{{ turn_domain | default(omit) }}"
TURN_PORT: "{{ '443' if turn_domain is defined else omit }}"
TURNS_HOST: "{{ turn_domain | default(omit) }}"
TURNS_PORT: "{{ '443' if turn_domain is defined else omit }}"
XMPP_DOMAIN: meet.jitsi
XMPP_AUTH_DOMAIN: auth.meet.jitsi
XMPP_INTERNAL_MUC_DOMAIN: internal-muc.meet.jitsi
XMPP_MUC_DOMAIN: muc.meet.jitsi
XMPP_MODULES: 'muc_census'
restart_policy: always
exposed_ports:
- '5222'
@@ -85,7 +86,7 @@
- name: Jitsi meet jicofo
docker_container:
name: 'jitsi_meet_jicofo'
image: 'jitsi/jicofo:latest'
image: "jitsi/jicofo:{{ jitsi_docker_tag }}"
pull: true
container_default_behavior: no_defaults
env:
@@ -103,12 +104,12 @@
networks:
- name: meet.jitsi
aliases:
- meet.jitsi
- jicofo.meet.jitsi
- name: Jitsi meet video bridge
docker_container:
name: 'jitsi_meet_jvb'
image: 'jitsi/jvb:latest'
image: "jitsi/jvb:{{ jitsi_docker_tag }}"
pull: true
container_default_behavior: no_defaults
published_ports:
@@ -131,5 +132,14 @@
networks:
- name: meet.jitsi
aliases:
- meet.jitsi
- jvb.meet.jitsi
- name: Open ports for jitsi
ansible.posix.firewalld:
port: "{{ item }}"
state: enabled
permanent: true
immediate: true
loop:
- 4443/tcp
- 10000/udp

View File

@@ -6,18 +6,13 @@ dependencies:
docker_image: quay.io/keycloak/keycloak:latest
docker_image_http_port: 8080
docker_database: postgres
docker_entrypoint:
- /opt/keycloak/bin/kc.sh
- start
- --auto-build
docker_command: start --proxy-headers xforwarded
docker_env:
KEYCLOAK_ADMIN: admin
KEYCLOAK_ADMIN_PASSWORD: "{{ keycloak_admin_pw }}"
KC_HOSTNAME: "{{ docker_vhost_domains.keycloak[0] }}"
KC_HTTP_ENABLED: "true"
KC_PROXY: edge
KC_DB: postgres
KC_DB_URL: jdbc:postgresql://keycloak_db/keycloak
KC_DB_USERNAME: keycloak
KC_DB_PASSWORD: "{{ database_passwords.keycloak }}"

View File

@@ -0,0 +1 @@
Sets up a prometheus docker container.

View File

@@ -0,0 +1,16 @@
---
prometheus_scrape_interval: 5s
prometheus_evaluation_interval: 15s
prometheus_storage_retention: 3650d
prometheus_hcloud_enabled: false
prometheus_hcloud_relabel_configs: []
prometheus_install_alertmanager: true
prometheus_install_webhook: false
prometheus_install_grafana: false
alertmanager_storage_retention: "{{ prometheus_storage_retention }}"
prometheus_ping_hosts: []

View File

@@ -0,0 +1,95 @@
---
groups:
- name: BlackboxExporter
rules:
- alert: BlackboxAllWanProbesFailed
expr: 'sum by (host_type) (probe_success{host_type="wan"})==0'
for: 5s
labels:
severity: critical
annotations:
summary: Lost internet access
descrtiption: Failed to contact any wan probes
- alert: BlackboxProbeFailed
expr: 'probe_success == 0'
for: 0m
labels:
severity: error
annotations:
summary: Unable to reach (instance {{ $labels.instance }})
description: "Probe failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxConfigurationReloadFailure
expr: 'blackbox_exporter_config_last_reload_successful != 1'
for: 0m
labels:
severity: warning
annotations:
summary: Blackbox configuration reload failure (instance {{ $labels.instance }})
description: "Blackbox configuration reload failure\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSlowProbe
expr: 'avg_over_time(probe_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Getting slow responses from (instance {{ $labels.instance }})
description: "Blackbox probe took more than 1s to complete\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeHttpFailure
expr: 'probe_http_status_code <= 199 OR probe_http_status_code >= 400'
for: 0m
labels:
severity: error
annotations:
summary: HTTP failure (instance {{ $labels.instance }})
description: "HTTP status code is not 200-399\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateWillExpireSoon
expr: '3 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 20'
for: 0m
labels:
severity: warning
annotations:
summary: SSL certificate will expire soon (instance {{ $labels.instance }})
description: "SSL certificate expires in less than 20 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateWillExpireSoon
expr: '0 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 3'
for: 0m
labels:
severity: error
annotations:
summary: SSL certificate expiry imminent (instance {{ $labels.instance }})
description: "SSL certificate expires in less than 3 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateExpired
expr: 'round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 0'
for: 0m
labels:
severity: critical
annotations:
summary: SSL certificate expired (instance {{ $labels.instance }})
description: "SSL certificate has expired already\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeSlowHttp
expr: 'avg_over_time(probe_http_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Slow HTTP responses from (instance {{ $labels.instance }})
description: "HTTP request took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeSlowPing
expr: 'avg_over_time(probe_icmp_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Slow ping responses from (instance {{ $labels.instance }})
description: "Blackbox ping took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,349 @@
---
groups:
- name: NodeExporter
rules:
- alert: HostOutOfMemory
expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of memory (instance {{ $labels.instance }})
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryUnderMemoryPressure
expr: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host memory under memory pressure (instance {{ $labels.instance }})
description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryIsUnderutilized
expr: '(100 - (rate(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1w
labels:
severity: info
annotations:
summary: Host Memory is underutilized (instance {{ $labels.instance }})
description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputIn
expr: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual network throughput in (instance {{ $labels.instance }})
description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputOut
expr: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual network throughput out (instance {{ $labels.instance }})
description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskReadRate
expr: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk read rate (instance {{ $labels.instance }})
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskWriteRate
expr: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write rate (instance {{ $labels.instance }})
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfDiskSpace
expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of disk space (instance {{ $labels.instance }})
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostDiskWillFillIn24Hours
expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfInodes
expr: '(node_filesystem_files_free / node_filesystem_files * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of inodes (instance {{ $labels.instance }})
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostFilesystemDeviceError
expr: 'node_filesystem_device_error == 1'
for: 0m
labels:
severity: critical
annotations:
summary: Host filesystem device error (instance {{ $labels.instance }})
description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostInodesWillFillIn24Hours
expr: '(node_filesystem_files_free / node_filesystem_files * 100 < 10 and predict_linear(node_filesystem_files_free[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskReadLatency
expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk read latency (instance {{ $labels.instance }})
description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskWriteLatency
expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write latency (instance {{ $labels.instance }})
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostHighCpuLoad
expr: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 10m
labels:
severity: warning
annotations:
summary: Host high CPU load (instance {{ $labels.instance }})
description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuIsUnderutilized
expr: '(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1w
labels:
severity: info
annotations:
summary: Host CPU is underutilized (instance {{ $labels.instance }})
description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuStealNoisyNeighbor
expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuHighIowait
expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU high iowait (instance {{ $labels.instance }})
description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskIo
expr: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostContextSwitching
expr: '((rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host context switching (instance {{ $labels.instance }})
description: "Context switching is growing on the node (> 10000 / s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSwapIsFillingUp
expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host swap is filling up (instance {{ $labels.instance }})
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSystemdServiceCrashed
expr: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host systemd service crashed (instance {{ $labels.instance }})
description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostPhysicalComponentTooHot
expr: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host physical component too hot (instance {{ $labels.instance }})
description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNodeOvertemperatureAlarm
expr: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: critical
annotations:
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostRaidArrayGotInactive
expr: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: critical
annotations:
summary: Host RAID array got inactive (instance {{ $labels.instance }})
description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostRaidDiskFailure
expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host RAID disk failure (instance {{ $labels.instance }})
description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostKernelVersionDeviations
expr: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 6h
labels:
severity: warning
annotations:
summary: Host kernel version deviations (instance {{ $labels.instance }})
description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOomKillDetected
expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{ $labels.instance }})
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostEdacCorrectableErrorsDetected
expr: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: info
annotations:
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostEdacUncorrectableErrorsDetected
expr: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkReceiveErrors
expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Receive Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkTransmitErrors
expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkInterfaceSaturated
expr: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1m
labels:
severity: warning
annotations:
summary: Host Network Interface Saturated (instance {{ $labels.instance }})
description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkBondDegraded
expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostConntrackLimit
expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host conntrack limit (instance {{ $labels.instance }})
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockSkew
expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 10m
labels:
severity: warning
annotations:
summary: Host clock skew (instance {{ $labels.instance }})
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockNotSynchronising
expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host clock not synchronising (instance {{ $labels.instance }})
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostRequiresReboot
expr: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 4h
labels:
severity: info
annotations:
summary: Host requires reboot (instance {{ $labels.instance }})
description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,58 @@
"groups":
- "name": "node-exporter.rules"
"rules":
- "expr": |
count without (cpu) (
count without (mode) (
node_cpu_seconds_total{job="node"}
)
)
"record": "instance:node_num_cpu:sum"
- "expr": |
1 - avg without (cpu, mode) (
rate(node_cpu_seconds_total{job="node", mode="idle"}[1m])
)
"record": "instance:node_cpu_utilisation:rate1m"
- "expr": |
(
node_load1{job="node"}
/
instance:node_num_cpu:sum{job="node"}
)
"record": "instance:node_load1_per_cpu:ratio"
- "expr": |
1 - (
node_memory_MemAvailable_bytes{job="node"}
/
node_memory_MemTotal_bytes{job="node"}
)
"record": "instance:node_memory_utilisation:ratio"
- "expr": |
rate(node_vmstat_pgmajfault{job="node"}[1m])
"record": "instance:node_vmstat_pgmajfault:rate1m"
- "expr": |
rate(node_disk_io_time_seconds_total{job="node", device!=""}[1m])
"record": "instance_device:node_disk_io_time_seconds:rate1m"
- "expr": |
rate(node_disk_io_time_weighted_seconds_total{job="node", device!=""}[1m])
"record": "instance_device:node_disk_io_time_weighted_seconds:rate1m"
- "expr": |
sum without (device) (
rate(node_network_receive_bytes_total{job="node", device!="lo"}[1m])
)
"record": "instance:node_network_receive_bytes_excluding_lo:rate1m"
- "expr": |
sum without (device) (
rate(node_network_transmit_bytes_total{job="node", device!="lo"}[1m])
)
"record": "instance:node_network_transmit_bytes_excluding_lo:rate1m"
- "expr": |
sum without (device) (
rate(node_network_receive_drop_total{job="node", device!="lo"}[1m])
)
"record": "instance:node_network_receive_drop_excluding_lo:rate1m"
- "expr": |
sum without (device) (
rate(node_network_transmit_drop_total{job="node", device!="lo"}[1m])
)
"record": "instance:node_network_transmit_drop_excluding_lo:rate1m"

View File

@@ -0,0 +1,152 @@
---
argument_specs:
main:
short_description: Prometheus docker container
options:
prometheus_scrape_interval:
description: Interval how often prometheus will scrape the monitoring targets
type: str
required: false
default: 5s
prometheus_evaluation_interval:
description: Interval how often prometheus will evaluate the scraped metrics against defined conditions
type: str
required: false
default: 15s
prometheus_storage_retention:
description: Period of time for which prometheus metrics are stored for. A number followed by unit (s, m, h, d, w, y)
type: str
required: false
default: 3650d
prometheus_hcloud_enabled:
description: Whether to use hcloud discovery
type: bool
required: false
default: false
prometheus_hcloud_token:
description: Access token for hetzner cloud service discovery.
type: str
required: "{{ prometheus_hcloud_enabled }}"
prometheus_hcloud_relabel_configs:
description: Relabel configs for hcloud
type: list
required: false
default: []
elements: dict
options:
source_labels:
type: list
required: true
elements: str
target_label:
type: str
required: true
replacement:
type: str
required: false
prometheus_install_grafana:
description: If true, installs grafana in the same docker network as prometheus
type: bool
required: false
default: false
prometheus_install_alertmanager:
description: If true, installs alertmanager in the same docker network as prometheus and configures it
type: bool
required: false
default: true
prometheus_install_webhook:
description: If true, installs webhook server in the same docker network as prometheus and configures it
type: bool
required: false
default: false
alertmanager_storage_retention:
description: Period of time for which alertmanager data is stored for. A number followed by unit (s, m, h, d, w, y). Passed directly to alertmanager role
type: str
required: false
default: "{{ prometheus_storage_retention | default('3650d') }}"
prometheus_webhook_handlers:
description: List of webhook server handlers
type: list
required: "{{ prometheus_install_webhook }}"
elements: dict
options:
id:
description: specifies the ID of your hook. This value is used to create the HTTP endpoint
type: str
required: true
execute-command:
description: specifies the command that should be executed when the hook is triggered
type: str
required: true
command-working-directory:
description: specifies the working directory that will be used for the script when it's executed
type: str
required: false
pass-arguments-to-command:
description: >
specifies the list of arguments that will be passed to the
command. See for more info:
https://github.com/adnanh/webhook/blob/master/docs/Referencing-Request-Values.md
type: list
required: false
elements: dict
options:
source:
description: Source of the argument. Use `string` to specify argument here.
type: str
required: true
choices:
- string
- header
- url
- request
- payload
name:
description: Argument if source is string, otherwise the source attribute name.
type: str
required: true
prometheus_ping_hosts:
description: List of hosts to gather ping metrics for using prometheus blackbox exporter
type: list
required: false
default: []
elements: str
# All options after this will be passed directly to the container role
docker_service_suffix:
description: "Passed to container role"
required: false
docker_host_user:
description: "Passed to container role"
required: false
database_passwords:
description: "Passed to container role"
required: false
docker_additional_services:
description: "Passed to container role"
required: false
docker_volume_type:
description: "Passed to container role"
required: false
reverse_proxy_type:
description: "Passed to container role"
required: false
ports:
description: "Passed to container role"
required: false
docker_vhost_domains:
description: "Passed to container role"
required: false
docker_vhost_additional_locations:
description: "Passed to container role"
required: false
docker_entrypoint:
description: "Passed to container role"
required: false

View File

@@ -0,0 +1,66 @@
---
- name: Prometheus container
include_role:
name: service
vars:
docker_service: prometheus
docker_image: prom/prometheus
reverse_proxy_type: none
docker_command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--storage.tsdb.retention.time={{ prometheus_storage_retention }}"
- "--web.console.libraries=/usr/share/prometheus/console_libraries"
- "--web.console.templates=/usr/share/prometheus/consoles"
docker_mounts:
- name: data
path: /prometheus
- template: prometheus.yml
path: /etc/prometheus/prometheus.yml
- copypath: recording
path: /etc/prometheus/recording
- copypath: alerting
path: /etc/prometheus/alerting
- name: Alertmanager container for prometheus
include_role:
name: alertmanager
vars:
docker_networks:
- name: prometheus
when: prometheus_install_alertmanager
- name: Webhook container for prometheus
include_role:
name: service
vars:
docker_service: prometheus_webhook
docker_image: thecatlady/webhook
reverse_proxy_type: none
docker_mounts:
- template: webhooks.yaml
path: /config/hooks.yml
docker_networks:
- name: prometheus
when: prometheus_install_webhook
- name: Blackbox exporter for prometheus
include_role:
name: service
vars:
docker_service: blackbox_exporter
docker_image: prom/blackbox-exporter
reverse_proxy_type: none
docker_mounts:
- template: blackbox_exporter.yml
path: /etc/blackbox_exporter/config.yml
docker_networks:
- name: prometheus
- name: Grafana container for prometheus
include_role:
name: grafana
vars:
docker_networks:
- name: prometheus
when: prometheus_install_grafana

View File

@@ -0,0 +1,8 @@
---
modules:
icmp:
prober: icmp
timeout: 5s
icmp:
preferred_ip_protocol: "ip4"

View File

@@ -0,0 +1,65 @@
---
# {{ ansible_managed }}
# my global config
global:
scrape_interval: {{ prometheus_scrape_interval }}
evaluation_interval: {{ prometheus_evaluation_interval }}
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
{% if prometheus_install_alertmanager %}
- alertmanager:9093
{%- endif %}
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "/etc/prometheus/recording/*.yaml"
- "/etc/prometheus/alerting/*.yaml"
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
- job_name: "blackbox"
static_configs:
- targets: ["blackbox_exporter:9115"]
{% if prometheus_ping_hosts | length > 0 %}
- job_name: "icmp"
metrics_path: "/probe"
params:
module: ["icmp"]
static_configs:
- targets:
{% for host in prometheus_ping_hosts %}
- "{{ host.name }}::{{ host.type | default('monitored') }}"
{% endfor %}
relabel_configs:
- source_labels: [__address__]
regex: '(.+)::(.+)'
target_label: __param_target
replacement: '${1}'
- source_labels: [__address__]
regex: '(.+)::(.+)'
target_label: host_type
replacement: '${2}'
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox_exporter:9115
{%- endif %}
{% if prometheus_hcloud_enabled %}
- job_name: hcloud
hetzner_sd_configs:
- role: hcloud
authorization:
credentials: {{ prometheus_hcloud_token }}
relabel_configs: {{ prometheus_hcloud_relabel_configs }}
{%- endif %}

View File

@@ -0,0 +1,5 @@
---
{% for item in prometheus_webhook_handlers %}
- {{ item }}
{% endfor %}

1
roles/service/README.md Normal file
View File

@@ -0,0 +1 @@
Sets up a docker service

View File

@@ -0,0 +1,30 @@
---
docker_namespace: "{{ docker_service }}"
docker_service_suffix: ""
docker_volume_type: bind
docker_restart_policy: always
reverse_proxy_type: caddy
docker_proxy_target_protocol: http
docker_vhost_additional_locations: []
docker_additional_env: {}
docker_additional_services: []
docker_database: none
docker_postgres_tag: 14-alpine
docker_mounts: []
docker_network_mode: ""
docker_networks: []
docker_env: {}
docker_published_ports: []
docker_host_user: false
dockerfile: []
docker_mariadb_config: {}
docker_redis_persistence: false
docker_phpmyadmin_basicauth: true
docker_phpmyadmin_basicauth_users: {}
timezone: Etc/UTC

View File

@@ -0,0 +1,13 @@
---
- name: Restart container
community.docker.docker_container:
name: "{{ docker_service_name }}"
restart: true
when: not container_out.changed
- name: Ensure container running
community.docker.docker_container:
name: "{{ docker_service_name }}"
state: started
when: not ansible_check_mode

View File

@@ -0,0 +1,225 @@
---
argument_specs:
main:
short_description: Docker service
description: "Sets up a docker service. Supports defining networks, building a custom image, setting up memcached, databases and a reverse proxy, creating a user on the host to run the container as, named volumes, bind mounts (either auto-created or existing)."
options:
docker_service:
description: "The name of the docker service (example: gitea)"
type: str
required: true
docker_namespace:
description: The namespace of the service. This can be used when multiple containers should be part of the service. Used as the default network, the name of the directory where bind mounts are created and the prefix for volume names.
type: str
required: false
default: "{{ docker_service }}"
docker_service_suffix:
description: "A suffix used to allow running multiple instances of the same service on a host. If docker_service is gitea and docker_service_suffix production, the service will be gitea-production"
type: str
required: false
default: ""
docker_image:
description: "Docker image to use for the container. If dockerfile is defined, it will be used as base for locally built image (example: gitea/gitea:latest)"
type: str
required: true
dockerfile:
description: "A list of dockerfile instructions to add to the base image"
type: list
elements: str
required: false
default: []
docker_host_user:
description: "If true, creates a user on the host for this service. The container will run as this user's uid/gid. Bind mount volumes will be owned by this user."
type: bool
required: false
default: true
docker_database:
description: "Database type to set up. It will be run in a docker container accessible to the service at host <service name (with -suffix if suffix is defined)>-db on default port."
type: str
required: false
choices:
- postgres
- mariadb
- mongo
- none
default: none
database_passwords:
description: >
database_passwords[docker_service] is a string with the password used
for communication between the service and database. Required if
docker_database is postgres or mariadb. For mariadb, you can also
define database_passwords[docker_service + '_root'] if you want to
set a password for the mariadb root user
type: dict
required: false
docker_postgres_tag:
description: 'Postgresql version to use. Can be debian (n) or alpine-based (n-alpine), where n can be major version like 14 or minor like 14.13.'
type: str
required: false
default: 14-alpine
docker_mariadb_config:
description: |
A dict specifying config for mariadb. For example
mysqld:
innodb_buffer_pool_size: '4G'
aria_pagecache_buffer_size': '4G'
would set
[mysqld]
innodb_buffer_pool_size = 4G
aria_pagecache_buffer_size = 4G
type: dict
required: false
default: {}
docker_additional_services:
description: "List of additional services to configure (in separate containers). These will be accessible with hostname <docker_service_name>[_suffix]_<additional_service>"
type: list
required: false
elements: str
choices:
- memcached
- redis
default: []
docker_redis_persistence:
description: Whether to save redis data to persistent storage
type: bool
required: false
default: false
docker_phpmyadmin_basicauth:
description: Whether to enable basicauth for phpmyadmin
type: bool
required: false
default: true
docker_phpmyadmin_basicauth_users:
description: Dict of users and their password hashes for phpmyadmin basic auth. Required if docker_phpmyadmin_basicauth is true and phpmyadmin is used.
type: dict
required: false
default: {}
docker_volume_type:
description: "Defines whether to use named volumes or bind mounts for mounts with name"
type: str
required: false
choices:
- named
- bind
default: named
docker_mounts:
description: "List of bind mounts or volumes to be mounted inside the container. Each element is a dict with path and exactly one of name, src or template"
type: list
required: false
default: []
elements: dict
options:
path:
description: "The path inside the container to mount at"
type: str
required: true
readonly:
description: "If true, volume will be mounted as read only inside the container. Only applies for named and src mounts."
type: bool
required: false
default: false
mode:
description: "Permissions for the created/templated directory. Defaults to '0644' for files, '0755' for directories. Doesn't apply for named volumes."
type: str
required: false
name:
description: "If docker_volume_type is named, the name of the named volume to be mounted at path. If docker_volume_type is bind, the name of the folder to create under /opt/<service>[/suffix]/mounts/ and mount at path."
type: str
required: false
src:
description: "Host path to bind mount inside the container."
type: str
required: false
template:
description: "Name of template without .j2 extension. Will be templated at /opt/<service>[/suffix]/mounts/<template> and mounted read only inside the container."
type: str
required: false
copypath:
description: "Name of file or directory to copy. Will be deployed from files/<copypath> to /opt/<service>[/suffix]/mounts/<copypath> and mounted read only inside the container."
type: str
required: false
reverse_proxy_type:
description: "Defines which kind of reverse proxy to configure for the container. Traefik support is experimental."
type: str
required: false
choices:
- caddy
- traefik
- none
default: caddy
docker_image_http_port:
description: "The port for http listener inside the container. Will be mapped to the host port defined in ports. Required if reverse_proxy_type is not none."
type: int
required: false
docker_proxy_target_protocol:
description: "Protocol to use for proxy connection"
type: str
required: false
choices:
- http
- https
default: http
ports:
description: "ports[docker_service]['http'] or ports[docker_service]['https'] defines the port on which the container will listen on for reverse proxy connections. Required if reverse_proxy_type is caddy. If ports[docker_service]['phpmyadmin'] is defined and docker_database is mariadb, phpmyadmin will is installed."
type: dict
docker_vhost_domains:
description: "docker_vhost_domains[docker_service] is a list which defines which domains should be proxied to the container. Required if reverse_proxy_type is not none. docker_vhost_domains[docker_service + '-phpmyadmin'] is used for phpmyadmin"
type: dict
docker_vhost_additional_locations:
description: "Passed to vhost role as vhost_locations variable"
required: false
default: []
docker_published_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
type: list
required: false
default: []
docker_networks:
description: A list of docker networks for docker service. Defaults to docker name
type: list
required: false
default:
- name: "{{ docker_namespace }}"
elements: dict
options:
name:
description: Name of the docker network
type: str
required: true
docker_env:
description: "A dict of environment variables for the container"
type: dict
required: false
default: {}
docker_entrypoint:
description: "Docker entrypoint as list of command and arguments"
type: list
required: false
elements: str
docker_command:
description: "Docker command as list of arguments or command and arguments"
type: list
required: false
elements: str
docker_memory:
description: Memory limit for the container
type: str
required: false
docker_restart_policy:
description: Restart policy of the service
type: str
required: false
default: always
docker_network_mode:
description: "Docker container network mode"
type: str
required: false
default: ""
timezone:
description: "Timezone set in the TZ env var"
type: str
required: false
default: Etc/UTC

View File

@@ -0,0 +1,4 @@
---
dependencies:
- role: docker

View File

@@ -0,0 +1,18 @@
---
- name: Converge
hosts: all
tasks:
- name: Nginx
include_role:
name: service
vars:
docker_service: nginx
docker_image: nginx
docker_image_http_port: 80
admin_email: test@example.com
ports:
nginx:
http: 28001
docker_vhost_domains:
nginx:
- localhost

View File

@@ -0,0 +1,15 @@
---
dependency:
name: galaxy
driver:
name: podman
platforms:
- name: bullseye
image: git.uumas.fi/uumas/molecule-testbed:bullseye-docker
command: /lib/systemd/systemd
pre_build_image: true
privileged: true
provisioner:
name: ansible
verifier:
name: ansible

View File

@@ -0,0 +1,83 @@
---
- name: Verify
hosts: all
gather_facts: false
tasks:
- name: Ensure https://localhost returns 200
ansible.builtin.uri:
url: https://localhost
validate_certs: false
return_content: true
register: get_localhost
- name: Assert caddy responded on https://localhost
ansible.builtin.assert:
that: "(get_localhost.server | split(', '))[0] == 'Caddy'"
- name: Assert nginx responded on https://localhost
ansible.builtin.assert:
that: "(get_localhost.server | split(', '))[1].startswith('nginx')"
- name: Get /opt/nginx directory info
ansible.builtin.stat:
path: /opt/nginx
register: opt_nginx_stat
- name: Assert /opt/nginx doesn't exist
ansible.builtin.assert:
that: not opt_nginx_stat.stat.exists
msg: /opt/nginx should not have been created but it was
- name: Get host passwd nginx user
ansible.builtin.getent:
database: passwd
key: nginx
fail_key: false
- name: Assert nginx user does not exist
ansible.builtin.assert:
that: getent_passwd.nginx == None
msg: "nginx user should not exist but it does ({{ getent_passwd }})"
- name: Get nginx container info
community.docker.docker_container_info:
name: nginx
register: container_out
- name: Assert container port 80 forwarded to host 28001
ansible.builtin.assert:
that:
- "container_out.container.HostConfig.PortBindings['80/tcp'] is defined"
- "container_out.container.HostConfig.PortBindings['80/tcp'][0].HostPort == '28001'"
msg: "Container port 80 not correctly forwarded to host port. Port bindings output was {{ container_out.container.HostConfig.PortBindings }}"
- name: Assert container user not set
ansible.builtin.assert:
that: container_out.container.Config.User == ""
- name: Get container image info
community.docker.docker_image_info:
name: "{{ container_out.container.Image }}"
register: container_image_out
- name: Assert nginx image not built locally
assert:
that: container_image_out.images[0].RepoTags[0] == 'nginx:latest'
msg: "Nginx image tag incorrect. It should have been nginx:latest but it was {{ container_image_out.images[0].RepoTags }}"
- name: Get docker host info
community.docker.docker_host_info:
volumes: true
register: docker_host_out
- name: Assert all containers are running
ansible.builtin.assert:
that: docker_host_out.host_info.Containers == docker_host_out.host_info.ContainersRunning
msg: There should have been {{ docker_host_out.host_info.Containers }} containers running but there were {{ docker_host_out.host_info.ContainersRunning }}
- name: Assert no extra containers were created
ansible.builtin.assert:
that: docker_host_out.host_info.Containers == 1
msg: There should have been 1 container created but there were {{ docker_host_out.host_info.Containers }}
- name: Assert no extra images were pulled
ansible.builtin.assert:
that: docker_host_out.host_info.Images == 1
msg: There should have been 1 image present but there were {{ docker_host_out.host_info.Images }}
- name: Assert no volumes were created
ansible.builtin.assert:
that: docker_host_out.volumes | length == 0
msg: There should have been no volumes present but there were {{ docker_host_out.volumes | length }}

View File

@@ -0,0 +1,51 @@
---
- name: Store docker additional services so it doesn't get reset
ansible.builtin.set_fact:
_docker_additional_services: "{{ docker_additional_services }}"
- name: "Memcached container for {{ docker_service_name }}"
ansible.builtin.include_role:
name: service
vars:
docker_namespace: "{{ _docker_namespace }}"
docker_service: memcached
docker_image: memcached:alpine
reverse_proxy_type: none
docker_mounts: []
docker_published_ports: []
docker_env: {}
docker_additional_env: {}
docker_networks: []
docker_database: none
docker_additional_services: []
docker_host_user: false
dockerfile: []
docker_command: "{{ omit }}"
docker_entrypoint: "{{ omit }}"
docker_memory: "{{ omit }}"
when: "'memcached' in _docker_additional_services"
- name: "Redis container for {{ docker_service_name }}"
ansible.builtin.include_role:
name: service
vars:
docker_namespace: "{{ _docker_namespace }}"
docker_service: redis
docker_image: redis:alpine
reverse_proxy_type: none
docker_mounts:
- name: redis
path: /data
docker_command: "{{ '--save 60 1' if docker_redis_persistence else omit }}"
docker_published_ports: []
docker_env: {}
docker_additional_env: {}
docker_networks: []
docker_database: none
docker_additional_services: []
docker_host_user: false
dockerfile: []
docker_entrypoint: "{{ omit }}"
docker_memory: "{{ omit }}"
when: "'redis' in _docker_additional_services"

View File

@@ -0,0 +1,94 @@
---
- name: Set postgres container vars
ansible.builtin.set_fact:
db_container_image: 'postgres:{{ docker_postgres_tag }}'
db_container_env:
POSTGRES_USER: "{{ docker_service_underscore_name }}"
POSTGRES_PASSWORD: "{{ database_passwords[docker_service_name] }}"
db_container_data: /var/lib/postgresql/data
when: docker_database == 'postgres'
- name: Set mariadb container vars
ansible.builtin.set_fact:
db_container_image: mariadb:10
db_container_env:
MARIADB_USER: "{{ docker_service_underscore_name }}"
MARIADB_DATABASE: "{{ docker_service_underscore_name }}"
MARIADB_PASSWORD: "{{ database_passwords[docker_service_name] }}"
MARIADB_RANDOM_ROOT_PASSWORD: "{{ database_passwords[docker_service_name + '_root'] is not defined | string }}"
MARIADB_ROOT_PASSOWRD: "{{ database_passwords[docker_service_name + '_root'] | default(omit) }}"
db_container_data: /var/lib/mysql
db_image_port: 3306
when: docker_database == 'mariadb'
- name: Set mongo container vars
ansible.builtin.set_fact:
db_container_image: 'mongo:latest'
db_container_data: /data/db
when: docker_database == 'mongo'
- name: Define db container data mount
set_fact:
db_container_mounts:
- name: db
path: "{{ db_container_data }}"
mode: '0700'
- name: Define db container config mount
set_fact:
db_container_mounts: "{{ db_container_mounts + [{'template': 'mariadb.cnf', 'path': '/etc/mysql/conf.d/custom.cnf'}] }}"
when: db_config_mounts_needed
- name: Set db published ports var
set_fact:
db_published_ports: ["127.0.0.1:{{ ports[docker_service_name].db }}:{{ db_image_port }}"]
when: ports[docker_service_name].db is defined
- name: Database container for {{ docker_service_name }}
ansible.builtin.include_role:
name: service
vars:
docker_namespace: "{{ _docker_namespace }}"
docker_service: db
docker_image: "{{ db_container_image }}"
reverse_proxy_type: none
docker_mounts: "{{ db_container_mounts }}"
docker_published_ports: "{{ db_published_ports }}"
docker_env: "{{ db_container_env | default({}) }}"
docker_additional_env: {}
docker_networks: []
docker_database: none
docker_additional_services: []
docker_host_user: false
dockerfile: []
docker_command: "{{ omit }}"
docker_entrypoint: "{{ omit }}"
docker_memory: "{{ omit }}"
- name: phpMyAdmin container for {{ docker_service_name }}
ansible.builtin.include_role:
name: service
vars:
docker_namespace: "{{ _docker_namespace }}"
docker_service: phpmyadmin
docker_image: phpmyadmin
docker_mounts: []
docker_published_ports:
- "127.0.0.1:{{ ports[docker_service_name]['phpmyadmin'] }}:80"
docker_env:
PMA_ABSOLUTE_URI: "https://{{ docker_vhost_domains[docker_service_name + '_phpmyadmin'][0] }}"
PMA_HOST: "{{ docker_service_name }}-db"
docker_additional_env: {}
docker_networks: []
docker_database: none
docker_additional_services: []
docker_host_user: false
dockerfile: []
docker_command: "{{ omit }}"
docker_entrypoint: "{{ omit }}"
docker_memory: "{{ omit }}"
docker_proxy_target_protocol: http
vhost_basicauth: "{{ docker_phpmyadmin_basicauth }}"
vhost_basicauth_users: "{{ docker_phpmyadmin_basicauth_users }}"
when: docker_database == 'mariadb' and ports[docker_service_name]['phpmyadmin'] is defined

View File

@@ -0,0 +1,14 @@
---
- name: "Create user for {{ docker_service_name }}"
user:
name: "{{ docker_service_underscore_name }}"
home: "/opt/{{ docker_namespace }}/{{ docker_service_suffix }}"
create_home: false
system: true
shell: /bin/bash
register: user
- name: Set docker container user
set_fact:
docker_user: "{{ user.uid }}:{{ user.group }}"

View File

@@ -0,0 +1,71 @@
---
- name: Image build
when: dockerfile_needed
block:
- name: Set docker_build_directory variable
set_fact:
docker_build_directory: /opt/{{ docker_namespace }}/build
- name: Create container build directory
file:
path: "{{ docker_build_directory }}"
state: directory
- name: Put dockerfile in place
template:
src: Dockerfile.j2
dest: "{{ docker_build_directory }}/Dockerfile"
mode: 0644
- name: Build docker image for {{ docker_service }}
docker_image:
name: "local_{{ docker_service }}"
source: build
force_source: true
build:
pull: true
path: "{{ docker_build_directory }}"
register: built_image
changed_when:
- not ansible_check_mode
- built_image.changed
- name: Pull container image for {{ docker_service_name }}
docker_image:
name: "{{ docker_image }}"
source: pull
force_source: true
register: pulled_image
when: not dockerfile_needed
changed_when:
- not ansible_check_mode
- pulled_image.changed
- name: Set container_image variable
set_fact:
container_image: "{{ item.image }}"
when: item.skipped is not defined or not item.skipped
loop:
- "{{ built_image }}"
- "{{ pulled_image }}"
- name: Check mode image info
when: ansible_check_mode
block:
- name: Get docker image info for check mode
docker_image_info:
name: "{{ ('local_' + docker_service) if dockerfile | length > 0 else docker_image }}"
register: existing_image
- name: Set check mode container_image variable
set_fact:
container_image: "{{ existing_image.images[0] }}"
when: existing_image.images | length > 0
- name: Set image user variable
set_fact:
image_user: "{{ container_image.Config.User }}"
when:
- not ansible_check_mode
- container_image.Config.User | int

View File

@@ -0,0 +1,65 @@
---
- name: Fail if docker_volumes defined
ansible.builtin.fail:
msg: "docker_volumes is not supported anymore. Use docker_mounts instead!"
when: docker_volumes is defined
- name: Store variables to be reset in the end
ansible.builtin.set_fact:
_docker_service_name: "{{ docker_service_name }}"
_docker_mount_definition: "{{ docker_mount_definition }}"
_container_published_ports: "{{ container_published_ports }}"
_docker_published_ports: "{{ docker_published_ports }}"
_container_image: "{{ container_image }}"
_container_networks: "{{ container_networks }}"
_template_mounts_needed: "{{ template_mounts_needed }}"
_copypath_mounts_needed: "{{ copypath_mounts_needed }}"
_volumes_needed: "{{ volumes_needed }}"
_dockerfile_needed: "{{ dockerfile_needed }}"
_db_config_mounts_needed: "{{ db_config_mounts_needed }}"
_bind_volumes_needed: "{{ bind_volumes_needed }}"
_named_volumes_needed: "{{ named_volumes_needed }}"
_create_opt_directory: "{{ create_opt_directory }}"
_create_mounts_directory: "{{ create_mounts_directory }}"
_docker_service_underscore_name: "{{ docker_service_underscore_name }}"
_image_user: "{{ image_user }}"
when: create_mounts_directory is defined
- name: Initialize variables
ansible.builtin.set_fact:
_docker_namespace: "{{ docker_namespace }}"
docker_service_name: "{{ docker_namespace }}"
docker_mount_definition: []
container_published_ports: []
container_image: ''
image_user: ''
container_networks: []
db_published_ports: []
- name: Add suffix to docker_service_name
ansible.builtin.set_fact:
docker_service_name: "{{ docker_service_name }}-{{ docker_service_suffix }}"
when: docker_service_suffix | length > 0
- name: Add docker_service to docker_service_name
ansible.builtin.set_fact:
docker_service_name: "{{ docker_service_name }}-{{ docker_service }}"
when: docker_namespace != docker_service
- name: Set assistive variables
set_fact:
docker_service_underscore_name: "{{ docker_service_name | replace('-', '_') }}"
template_mounts_needed: "{{ docker_mounts | selectattr('template', 'defined') | list | length > 0 }}"
copypath_mounts_needed: "{{ docker_mounts | selectattr('copypath', 'defined') | list | length > 0 }}"
volumes_needed: "{{ docker_mounts | selectattr('name', 'defined') | list | length > 0 or docker_database != 'none' }}"
dockerfile_needed: "{{ dockerfile | length > 0 }}"
db_config_mounts_needed: "{{ docker_mariadb_config | length > 0 }}"
- name: Set more assistive variables
set_fact:
bind_volumes_needed: "{{ volumes_needed and docker_volume_type == 'bind' }}"
named_volumes_needed: "{{ volumes_needed and docker_volume_type == 'named' }}"
- name: Set even more assistive variables
set_fact:
create_opt_directory: "{{ dockerfile_needed or docker_host_user or bind_volumes_needed or template_mounts_needed or copypath_mounts_needed or db_config_mounts_needed }}"
create_mounts_directory: "{{ bind_volumes_needed or template_mounts_needed or copypath_mounts_needed or db_config_mounts_needed }}"

View File

@@ -0,0 +1,140 @@
---
- name: Role initialization
import_tasks: init.yml
- name: Docker network
when: docker_network_mode != 'host' or docker_networks | length > 0
block:
- name: Set networks variable to {{ docker_namespace + ('-' + docker_service_suffix if docker_service_suffix | length > 0 else '') }}
ansible.builtin.set_fact:
container_networks:
- name: "{{ docker_namespace + ('-' + docker_service_suffix if docker_service_suffix | length > 0 else '') }}"
when: docker_networks | length == 0
- name: Set networks variable to {{ docker_networks }}
ansible.builtin.set_fact:
container_networks: "{{ docker_networks }}"
when: docker_networks | length > 0
- name: Create docker networks
community.docker.docker_network:
name: "{{ item.name }}"
loop: "{{ container_networks }}"
- name: Reverse proxy for container
include_tasks: proxy.yml
when: reverse_proxy_type != 'none'
- name: Create directory /opt/{{ docker_namespace }}
ansible.builtin.file:
path: "/opt/{{ docker_namespace }}"
state: directory
mode: 0755
when: create_opt_directory
- name: Container image
import_tasks: image.yml
- name: Container user
include_tasks: host_user.yml
when: docker_host_user
- name: Create suffix directory
when: create_opt_directory and docker_service_suffix | length > 0
block:
- name: Create directory /opt/{{ docker_namespace + '/' + docker_service_suffix }}
ansible.builtin.file:
path: "/opt/{{ docker_namespace }}/{{ docker_service_suffix }}"
state: directory
owner: "{{ user.uid | default(omit) }}"
group: "{{ user.group | default(omit) }}"
mode: 0755
- name: Set container_workdir variable
ansible.builtin.set_fact:
container_workdir: /opt/{{ docker_namespace }}/{{ docker_service_suffix }}
- name: Set container_workdir variable
ansible.builtin.set_fact:
container_workdir: /opt/{{ docker_namespace }}
when: docker_service_suffix | length == 0
- name: Create mounts directory
when: create_mounts_directory
block:
- name: Set docker_mounts_dir
ansible.builtin.set_fact:
docker_mounts_dir: "{{ container_workdir }}/mounts"
- name: Create directory {{ docker_mounts_dir }}
ansible.builtin.file:
path: "{{ docker_mounts_dir }}"
state: directory
owner: "{{ user.uid if docker_host_user else omit }}"
group: "{{ user.group if docker_host_user else omit }}"
mode: 0700
- name: Database container
include_tasks: database.yml
when: docker_database != 'none'
- name: Additional services
include_tasks: additional.yml
when: docker_additional_services | length > 0
- name: Container mounts
import_tasks: mounts.yml
- name: "Ensure container with legacy name doesn't exist for {{ docker_service_underscore_name }}"
community.docker.docker_container:
name: "{{ docker_service_underscore_name }}"
state: absent
when: docker_service_underscore_name != docker_service_name
- name: Ensure network with legacy name doesn't exist
community.docker.docker_network:
name: "{{ docker_service_underscore_name }}"
state: absent
when: docker_service_underscore_name != docker_service_name
- name: "Container for {{ docker_service_name }}"
community.docker.docker_container:
name: "{{ docker_service_name }}"
image: "{{ container_image.Id if (not ansible_check_mode) or (container_image | length > 0) else docker_image }}"
user: "{{ docker_user if docker_host_user else omit }}"
mounts: "{{ docker_mount_definition }}"
published_ports: "{{ container_published_ports + docker_published_ports }}"
labels: "{{ traefik_labels | default(omit) }}"
env: "{{ docker_env | combine(docker_additional_env) | combine({'TZ': timezone}) }}"
entrypoint: "{{ docker_entrypoint | default(omit) }}"
command: "{{ docker_command | default(omit) }}"
memory: "{{ docker_memory | default(omit) }}"
restart_policy: "{{ docker_restart_policy }}"
network_mode: "{{ docker_network_mode if docker_network_mode | length > 0 else omit }}"
networks: "{{ container_networks }}"
log_driver: local
state: "{{ 'started' if docker_restart_policy == 'always' else 'present' }}"
register: container_out
notify: Ensure container running
- name: Flush handlers to trigger container restart
ansible.builtin.meta: flush_handlers
- name: Reset variables to their original values
ansible.builtin.set_fact:
docker_service_name: "{{ _docker_service_name }}"
docker_mount_definition: "{{ _docker_mount_definition }}"
container_published_ports: "{{ _container_published_ports }}"
docker_published_ports: "{{ _docker_published_ports }}"
container_image: "{{ _container_image }}"
container_networks: "{{ _container_networks }}"
template_mounts_needed: "{{ _template_mounts_needed }}"
copypath_mounts_needed: "{{ _copypath_mounts_needed }}"
volumes_needed: "{{ _volumes_needed }}"
dockerfile_needed: "{{ _dockerfile_needed }}"
db_config_mounts_needed: "{{ _db_config_mounts_needed }}"
bind_volumes_needed: "{{ _bind_volumes_needed }}"
named_volumes_needed: "{{ _named_volumes_needed }}"
create_opt_directory: "{{ _create_opt_directory }}"
create_mounts_directory: "{{ _create_mounts_directory }}"
docker_service_underscore_name: "{{ _docker_service_underscore_name }}"
image_user: "{{ _image_user }}"
when: _docker_service_name is defined

View File

@@ -0,0 +1,65 @@
---
- name: Create directories and put files in them
when: create_mounts_directory
block:
- name: Define mount directory owner
set_fact:
mount_owner: "{{ user.uid if docker_host_user else image_user }}"
mount_group: "{{ user.group if docker_host_user else '' }}"
- name: "Create docker bind mount directories for {{ docker_service_name }}"
file:
path: "{{ docker_mounts_dir }}/{{ item.name }}"
state: directory
owner: "{{ mount_owner if (item.set_owner is not defined or item.set_owner) and mount_owner | length > 0 else omit }}"
group: "{{ mount_group if (item.set_group is not defined or item.set_group) and mount_group | length > 0 else omit }}"
mode: "{{ item.mode | default('0755') }}"
when: item.name is defined and docker_volume_type == 'bind'
loop: "{{ docker_mounts }}"
- name: Set docker_mount_definition for named binds
set_fact:
docker_mount_definition: "{{ docker_mount_definition + [{'source': docker_mounts_dir + '/' + item.name, 'target': item.path, 'type': 'bind', 'read_only': item.readonly | default(false)}] }}"
when: item.name is defined and docker_volume_type == 'bind'
loop: "{{ docker_mounts }}"
- name: Template docker template mounts for {{ docker_service_name }}
template:
src: "{{ item.template }}.j2"
dest: "{{ docker_mounts_dir }}/{{ item.template }}"
mode: "{{ item.mode | default('0644') }}"
when: item.template is defined
loop: "{{ docker_mounts }}"
notify: Restart container
- name: Set docker_mount_definition for template mounts
set_fact:
docker_mount_definition: "{{ docker_mount_definition + [{'source': docker_mounts_dir + '/' + item.template, 'target': item.path, 'type': 'bind', 'read_only': true}] }}"
when: item.template is defined
loop: "{{ docker_mounts }}"
- name: Copy docker copypath mounts for {{ docker_service_name }}
copy:
src: "files/{{ item.copypath }}"
dest: "{{ docker_mounts_dir }}/"
directory_mode: "{{ item.mode | default('0755') }}"
mode: "{{ item.mode | default('0644') }}"
when: item.copypath is defined
loop: "{{ docker_mounts }}"
notify: Restart container
- name: Set docker_mount_definition for copypath mounts
set_fact:
docker_mount_definition: "{{ docker_mount_definition + [{'source': docker_mounts_dir + '/' + item.copypath, 'target': item.path, 'type': 'bind', 'read_only': true}] }}"
when: item.copypath is defined
loop: "{{ docker_mounts }}"
- name: Set docker_mount_definition for named volumes
set_fact:
docker_mount_definition: "{{ docker_mount_definition + [{'source': docker_namespace + ('-' + docker_service_suffix if docker_service_suffix | length > 0 else '') + '-' + item.name, 'target': item.path, 'type': 'volume', 'read_only': item.readonly | default(false)}] }}"
when: docker_volume_type == 'named' and item.name is defined
loop: "{{ docker_mounts }}"
- name: Set docker_mount_definition for src binds
set_fact:
docker_mount_definition: "{{ docker_mount_definition + [{'source': item.src, 'target': item.path, 'type': 'bind', 'read_only': item.readonly | default(false)}] }}"
when: item.src is defined
loop: "{{ docker_mounts }}"

View File

@@ -0,0 +1,32 @@
---
- name: "Ensure reverse proxy with legacy name doesn't exist"
include_role:
name: uumas.general.vhost
vars:
vhost_id: "{{ docker_service_underscore_name }}"
vhost_state: absent
when: docker_service_underscore_name != docker_service_name
- name: Reverse proxy
include_role:
name: uumas.general.vhost
vars:
vhost_type: reverse_proxy
vhost_id: "{{ docker_service_name }}"
vhost_proxy_target_protocol: "{{ docker_proxy_target_protocol }}"
vhost_domains: "{{ docker_vhost_domains[docker_service_name] }}"
vhost_proxy_target_port: "{{ ports[docker_service_name][vhost_proxy_target_protocol] }}"
vhost_locations: "{{ docker_vhost_additional_locations }}"
when: reverse_proxy_type != 'traefik'
- name: Set published ports variable to http port
set_fact:
container_published_ports: ["127.0.0.1:{{ ports[docker_service_name][docker_proxy_target_protocol] }}:{{ docker_image_http_port }}"]
when:
- docker_network_mode != 'host'
- reverse_proxy_type != 'traefik'
- name: Include traefik vars
include_vars: traefik.yml
when: reverse_proxy_type == 'traefik'

View File

@@ -0,0 +1,6 @@
# {{ ansible_managed }}
FROM {{ docker_image }}
{% for item in dockerfile %}
{{ item }}
{% endfor %}

View File

@@ -0,0 +1,8 @@
# {{ ansible_managed }}
{% for section in docker_mariadb_config | dict2items %}
[{{ section.key }}]
{% for item in section.value | dict2items %}
{{ item.key }} = {{ item.value }}
{% endfor %}
{% endfor %}

3
roles/service/todo Normal file
View File

@@ -0,0 +1,3 @@
Reverse proxy for phpmyadmin
Make docker_additional_services a loop of include_tasks

View File

@@ -11,4 +11,3 @@ dependencies:
docker_env:
UNIFI_HTTPS_PORT: "{{ ports.unifi.https }}"
PORTAL_HTTP_PORT: "8808"

View File

@@ -6,10 +6,10 @@ dependencies:
docker_image: quay.io/wekan/wekan
docker_image_http_port: 8080
docker_database: mongo
docker_volumes:
- wekan_data:/data
docker_mounts:
- name: data
path: /data
docker_env:
MONGO_URL: mongodb://wekan_db:27017/wekan
ROOT_URL: "https://{{ docker_vhost_domains.wekan[0] }}"
WRITABLE_PATH: /data

View File

@@ -1,5 +1,4 @@
---
- name: Get wekan data volume path from container creation output
set_fact:
wekan_data_volume: "{{ container_out.container.Mounts | selectattr('Destination', 'equalto', '/data') | join }}"
@@ -10,3 +9,4 @@
state: directory
owner: 999
group: 999
mode: 0755

View File

@@ -0,0 +1 @@
Installs wordpress in docker and configures cron

View File

@@ -0,0 +1,16 @@
---
docker_service_name: wordpress
wordpress_tag: php8.1
docker_additional_services: []
docker_host_user: false
wordpress_symlinks: false
wordpress_php_memory_limit: 256M
wordpress_php_post_max_size: 128M
wordpress_php_upload_max_filesize: 32M
wordpress_php_max_execution_time: 30
wordpress_php_max_input_time: 60

View File

@@ -0,0 +1,25 @@
---
- name: Converge
hosts: all
tasks:
- name: "Include wordpress"
ansible.builtin.include_role:
name: "wordpress"
vars:
docker_additional_services:
- memcached
- redis
wordpress_php_extensions:
- pdo_mysql
wordpress_additional_volumes:
- www
- log
database_passwords:
wordpress: molecule
docker_vhost_domains:
wordpress:
- localhost
admin_email: molecule@example.com
ports:
wordpress:
http: 28001

Some files were not shown because too many files have changed in this diff Show More