Compare commits

...

102 Commits

Author SHA1 Message Date
uumas
db651723b2 Add pinp support and make windmill use it 2026-03-26 03:04:44 +02:00
uumas
1d180106d6 service: Use saner defaults for additional containers 2026-03-25 19:29:30 +02:00
uumas
9eaa306aa4 lint 2026-03-25 19:28:40 +02:00
uumas
f8e67b12d7 windmill: fix entrypoint script 2026-03-24 19:56:09 +02:00
uumas
5814267d66 Add windmill 2026-03-18 00:31:50 +02:00
uumas
defd2517ea service: Add postgres url to secrets 2026-03-18 00:30:25 +02:00
uumas
615c4013c1 Use caddy instead of socat for http proxying 2026-03-15 22:30:36 +02:00
uumas
77768e5483 small fixes 2026-03-15 22:30:16 +02:00
uumas
162972810f example: fix postgres examples 2026-03-15 21:56:16 +02:00
uumas
8595e261c9 nextcloud: Make HArP optional and opt-in 2026-03-15 00:43:24 +02:00
uumas
70c5ed7ea0 service: Make oauth2-proxy depend on its socket 2026-03-14 23:50:01 +02:00
uumas
3554de82c0 service: Make oauth2-proxy aware it's running behing reverse proxy 2026-03-14 23:33:34 +02:00
uumas
f64ea2cbe3 container: Allow custom ip addresses for more than one network 2026-03-14 23:33:04 +02:00
uumas
ca29ffb271 network: Delete network on stop 2026-03-14 22:11:42 +02:00
uumas
dac44638e6 service: Don't use different networks for additional containers 2026-03-12 03:08:42 +02:00
uumas
489b8eaade service: Use native socket for oauth2 proxy 2026-03-12 03:08:36 +02:00
uumas
956f8ed6ce Use uumas.general.systemd_socket role 2026-03-12 01:54:43 +02:00
uumas
63e6f938bb Add vscode configuration 2026-03-12 00:45:48 +02:00
uumas
61c0724801 Add nextcloud role 2026-03-12 00:45:25 +02:00
uumas
ea2a2c3652 Add forgejo role 2026-03-12 00:42:54 +02:00
uumas
31cf49b004 service: Improve additional container support 2026-03-12 00:42:00 +02:00
uumas
9e3e1496f0 service: Split container network namespaces 2026-03-12 00:40:54 +02:00
uumas
190527e877 naming and documentation fixes 2026-03-12 00:38:42 +02:00
uumas
fb39f1bfc8 service: Don't require postgres tag specified 2026-03-12 00:36:11 +02:00
uumas
69ae1687b7 service: Add support for mongodb 2026-03-12 00:35:42 +02:00
uumas
efc7bf5434 service: Imrove native sockets 2026-03-12 00:34:25 +02:00
uumas
294b931d19 service: Support publishing arbitary ports through sockets 2026-03-12 00:32:10 +02:00
uumas
470b60f988 service: Support postgres >= 18 and postgres upgrades 2026-03-12 00:26:35 +02:00
uumas
c673aae8dc synapse: Use simple database hostname 2026-03-12 00:23:22 +02:00
uumas
4a68ab25e1 .yml -> .yaml 2026-03-12 00:22:52 +02:00
uumas
bf4ced4a9b service: Support cap_add 2026-03-12 00:20:19 +02:00
uumas
5a3bb96fc2 container: Support cap_add 2026-03-12 00:17:18 +02:00
uumas
447d4e59ad container: Stop container if process is oomkilled 2026-03-12 00:16:56 +02:00
uumas
f6af1d3472 service: Make supporting containers accessible using simple hostnames 2026-03-12 00:16:11 +02:00
uumas
ff1badbf03 container: Support setting container hostname 2026-03-12 00:12:03 +02:00
uumas
f721641fc6 Use systemd handlers from uumas.general.systemd 2026-03-12 00:10:31 +02:00
uumas
fce8804653 Make socat its own role 2026-03-12 00:06:37 +02:00
uumas
accd5ece14 small improvements 2026-02-13 02:00:55 +02:00
uumas
58ff2f6217 service: Add support for native sockets for http 2026-02-13 02:00:44 +02:00
uumas
4079b69338 Add gitignore for release archives 2026-02-12 20:05:09 +02:00
uumas
c7e26555b7 v0.1.1 2026-02-12 20:04:38 +02:00
uumas
07925caa95 Rename runtime.yaml to runtime.yml 2026-02-12 20:02:12 +02:00
uumas
de707b4e71 service: Add support for mariadb 2026-02-12 20:01:52 +02:00
uumas
0ee8e9b254 Add support for container devices 2026-01-24 17:12:57 +02:00
uumas
b030d671b5 service: Add support for mounting entire copied directory 2026-01-09 17:24:01 +02:00
uumas
b2540e2bd3 service: Validation, set default for mounts in additional containers 2025-11-26 22:26:12 +02:00
uumas
47088fd1a0 Allow setting container entrypoint 2025-11-26 22:25:54 +02:00
uumas
abf3859af7 container: rename task 2025-10-06 16:18:16 +03:00
uumas
bdec55ffc7 Use auth file instead of creds in quadlet files 2025-10-06 16:17:27 +03:00
uumas
2712cf2865 service: Support setting templated file mode 2025-09-16 12:37:20 +03:00
uumas
c5fb7f599c podman: Login to registries 2025-09-16 12:36:58 +03:00
uumas
597faa3fd5 service: Set static ips for other containers too 2025-09-15 12:34:57 +03:00
uumas
16babfd5ed service: Have service depend on oauth2-proxy socat socket if set 2025-09-15 12:34:21 +03:00
uumas
d3542993dd container: Set network ip range when using a static container ip 2025-09-15 12:32:22 +03:00
uumas
a93c26864d network: Support setting ip range 2025-09-15 12:31:40 +03:00
uumas
b333bbebbd Add prometheus role 2025-09-14 03:21:33 +03:00
uumas
fea49be8d1 Use service-specific oauth2-proxy instances 2025-09-14 03:10:20 +03:00
uumas
aaca377811 service: Support static ip for service container 2025-09-14 03:09:28 +03:00
uumas
0b73582f36 container: Support static ip for container 2025-09-14 03:08:24 +03:00
uumas
ad50e05ee9 network: Support static subnet 2025-09-14 03:07:28 +03:00
uumas
12f32f5824 network: Support macvlan driver 2025-09-14 03:07:03 +03:00
uumas
586f98bc9f synapse: Use federation port 8448 2025-09-14 03:05:06 +03:00
uumas
a29908b507 podman: Ensure auto update timer is enabled 2025-09-14 03:04:22 +03:00
uumas
c96997a4ec lint 2025-09-13 17:36:05 +03:00
uumas
014edb08ac service: fix template mounts for additional containers 2025-08-28 11:02:35 +03:00
uumas
d260e28625 synapse: Use regex for mas-proxied paths 2025-07-19 20:03:12 +03:00
uumas
39b35b30a9 grafana: Support additional networks 2025-07-13 19:09:44 +03:00
uumas
6baab11851 service: Support proxy forward auth using OAuth2 Proxy 2025-07-13 19:09:08 +03:00
uumas
543a34f60d Add oauth2_proxy role 2025-07-13 19:08:10 +03:00
uumas
4e4f824958 service: Support additional networks for database 2025-07-13 19:05:37 +03:00
uumas
a8a7dfc688 container: Restart container when secret changed 2025-07-11 20:44:39 +03:00
uumas
f52ba4eced service: Allow socat without reverse proxy config 2025-07-11 20:41:04 +03:00
uumas
303d3a384a Add grafana role 2025-07-10 00:56:23 +03:00
uumas
d6083ec2be image: Add readme 2025-07-10 00:54:18 +03:00
uumas
4d3a5933c0 service: Better organize vars 2025-07-05 16:52:10 +03:00
uumas
8b55af2d06 synapse: Fix signing key path 2025-07-05 16:37:00 +03:00
uumas
e38c283825 service: Fix additional container mounts to use service prefix 2025-07-05 16:36:33 +03:00
uumas
2182b821f4 service: Use properly prefixed loop var for additional containers 2025-07-05 16:35:57 +03:00
uumas
6437c78a94 service: Move mount variable definitions form set_fact to vars 2025-07-05 16:33:29 +03:00
uumas
c31fbf0833 service: Support specifying container command 2025-07-05 16:24:26 +03:00
uumas
84036653fe container: Set exit code 143 as success
Some applications return 143 when exiting due to SIGTERM
2025-07-05 16:20:34 +03:00
uumas
a862606df2 volume: Support device-based volumes
Also support those options in container and service roles
2025-07-05 16:19:14 +03:00
uumas
faa68bfe83 service: Support setting type and target for db password secret 2025-06-28 13:46:07 +03:00
uumas
a10bf366e6 service: Allow custom postgres image 2025-06-27 00:06:28 +03:00
uumas
78860da6a4 service: Add support for redis 2025-06-27 00:06:28 +03:00
uumas
aa9eabf19c service: Actually do something with service_additional_containers 2025-06-27 00:06:23 +03:00
uumas
2e14434c9f service: Set secret target to secret name
This is a breaking change as it was previously prefixed with service
name
2025-06-27 00:05:51 +03:00
uumas
8f29c2815e container: Allow setting secret target 2025-06-26 00:51:06 +03:00
uumas
60529c18cd Revert "container: shell quote environment variables"
This reverts commit 61aa99bcd1.
2025-06-20 13:13:55 +03:00
uumas
7d8b1cb258 container: Add support for specifying secret type (untested) 2025-06-17 09:08:59 +03:00
uumas
3ac6b98a30 Add image role, support logging in to registries 2025-04-10 19:27:16 +03:00
uumas
093e7846ad service: Option to not pass host header to container service 2025-04-05 04:38:57 +03:00
uumas
6acdcd6dac Make compatcheck deduplicatable 2025-04-05 00:35:22 +03:00
uumas
bdac6ee513 network: Avoid duplicate runs 2025-04-05 00:25:11 +03:00
uumas
56d86d964c podman: remove tags 2025-04-05 00:24:49 +03:00
uumas
5a154d3f17 Move service_container_requires from set_fact to vars 2025-04-04 22:16:51 +03:00
uumas
3b354ef3b8 service: reformat postgres_tag description in argspec 2025-04-04 22:16:21 +03:00
uumas
7b46279c63 service: Better use podman secrets for database passwords 2025-04-04 22:16:08 +03:00
uumas
68b3dcb49c service: rename secrets return variable 2025-04-04 22:13:07 +03:00
uumas
9bb2b24948 Make randomly generated secret length configurable 2025-04-04 21:21:42 +03:00
uumas
335656a166 Use import_role where possible 2025-04-04 21:20:21 +03:00
uumas
4c44845a8b galaxy.yaml -> galaxy.yml 2025-03-31 03:16:12 +03:00
99 changed files with 2969 additions and 264 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
uumas-podman-*.tar.gz

5
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,5 @@
{
"files.associations": {
"*.yaml": "ansible"
}
}

View File

@@ -3,7 +3,7 @@ namespace: uumas
name: podman
description: Roles for installing services in podman containers
readme: README.md
version: 0.1.0
version: 0.1.1
repository: "https://git.uumas.fi/uumas/ansible-podman"
license_file: LICENSE
authors:

View File

@@ -0,0 +1,4 @@
---
caddy_socket_proxy_target_container: "{{ caddy_socket_proxy_service_name }}"
caddy_socket_proxy_container_ip: ""
caddy_socket_proxy_auto_update: true

View File

@@ -0,0 +1,30 @@
---
argument_specs:
main:
description: >-
Sets up a caddy container and a systemd socket unit, forwarding traffic from it to
target container
options:
caddy_socket_proxy_service_name:
description: Name of the caddy service, used for systemd unit and container naming
type: str
required: true
caddy_socket_proxy_target_container:
description: Name of the container to forward traffic to
type: str
required: false
default: "{{ caddy_socket_proxy_service_name }}"
caddy_socket_proxy_target_http_port:
description: Port on the target container to forward traffic to
type: int
required: true
caddy_socket_proxy_container_ip:
description: IP address to assign to the caddy container.
type: str
required: false
default: ""
caddy_socket_proxy_auto_update:
description: Whether to automatically update the caddy container
type: bool
required: false
default: true

View File

@@ -0,0 +1,45 @@
---
- name: Create caddy socket proxy mount directories for {{ caddy_socket_proxy_service_name }}
ansible.builtin.file:
path: "{{ item.key }}"
state: directory
mode: "{{ item.value }}"
with_dict:
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/": "0755"
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts": "0700"
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy": "0755"
- name: Configure caddy socket proxy for {{ caddy_socket_proxy_service_name }}
ansible.builtin.template:
src: Caddyfile.j2
dest: "/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy/Caddyfile"
mode: "0644"
notify: Restart container service {{ caddy_socket_proxy_service_name }}-caddy-socket-proxy
- name: Caddy socket proxy socket for {{ caddy_socket_proxy_service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
systemd_socket_requires:
- "{{ caddy_socket_proxy_target_container }}.service"
- name: Caddy container for {{ caddy_socket_proxy_service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
container_image: "docker.io/library/caddy:2-alpine"
container_mounts:
- type: bind
source: "/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy"
destination: /etc/caddy
readonly: true
container_networks:
- name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
ip: "{{ caddy_socket_proxy_container_ip }}"
container_requires:
- "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy.socket"
- "{{ caddy_socket_proxy_target_container }}.service"
container_auto_start: false
container_auto_update: "{{ caddy_socket_proxy_auto_update }}"

View File

@@ -0,0 +1,12 @@
# {{ ansible_managed }}
{
servers {
trusted_proxies_unix
}
}
http:// {
bind fd/3
reverse_proxy {{ caddy_socket_proxy_service_name }}:{{ service_container_http_port }}
}
}

View File

@@ -1,12 +1,16 @@
---
container_command: []
container_entrypoint: ""
container_user: ""
container_mounts: []
container_devices: []
container_publish_ports: []
container_networks: []
container_hostname: ""
container_secrets: []
container_env: {}
container_auto_start: true
container_auto_update: true
container_requires: []
container_wants: []
container_add_capabilities: []

View File

@@ -1,7 +1,6 @@
---
- name: "Restart container service {{ container_name }}"
ansible.builtin.systemd_service:
name: "{{ container_name }}.service"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
- name: Restart container service {{ container_name }}
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [container_name ~ '.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -7,22 +7,28 @@ argument_specs:
description: Name of the container. Must be unique within a host.
type: str
required: true
container_image:
description: "The image to run in the container, in FQIN format (registry/imagename:tag)"
type: str
required: true
container_command:
description: Command to start the container with.
type: list
required: false
default: []
elements: str
container_entrypoint:
description: Entrypoint to use for the container
type: str
required: false
default: ""
container_user:
description: The UID to run as inside the container
type: str
required: false
default: ""
container_image:
description: "The image to run in the container, in FQIN format (registry/image_name:tag)"
type: str
required: true
container_mounts:
description: List of bind mounts or volumes to be mounted inside the container.
type: list
@@ -31,7 +37,7 @@ argument_specs:
elements: dict
options:
type:
description: Type of volume
description: Type of mount
type: str
required: true
choices:
@@ -63,7 +69,44 @@ argument_specs:
type: str
required: false
default: ""
volume_device:
description: >-
The path of a device which is mounted for the volume.
Only applicable if mount type is volume.
type: str
required: false
default: ""
volume_type:
description: >-
The filesystem type of device as used by the mount commands -t option
Only applicable if mount type is volume.
type: str
required: false
default: ""
volume_mount_options:
description: >-
The mount options to use for a filesystem as used by the mount command -o option
Only applicable if mount type is volume.
type: list
elements: str
required: false
default: []
container_devices:
description: List of devices to be added inside the container.
type: list
required: false
default: []
elements: dict
options:
source:
description: Device path on host
type: str
required: true
destination:
description: Device path inside the container. Defaults to same as host.
type: str
required: false
container_publish_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
type: list
@@ -75,9 +118,25 @@ argument_specs:
type: list
required: false
default: []
elements: str
elements: dict
options:
name:
description: Network name
type: str
required: true
ip:
description: Container IPv4 address in the network
type: str
required: false
default: ""
container_hostname:
description: Hostname to set inside the container. Available to other containers on the same network.
type: str
required: false
default: ""
container_secrets:
description: A list of secrets available to the container in /run/secrets/<secret name>
description: A list of secrets available to the container as file or environment variable
type: list
required: false
default: []
@@ -93,6 +152,25 @@ argument_specs:
- If the value is not explicitly set, it will not be changed if the secret already exists.
type: str
required: false
length:
description: Length of randomly generated string
type: int
required: false
default: 128
type:
description: How the secret will be exposed to the container
type: str
choices:
- mount
- env
default: mount
target:
description: >
Where the secret will be available inside the container. If type is mount, this is
either a full file path or a filename under /run/secrets. If type is env, this is
the name of the environment variable. Defaults to secret name.
type: str
required: false
container_env:
description: A dict of environment variables for the container
@@ -100,6 +178,13 @@ argument_specs:
required: false
default: {}
container_add_capabilities:
description: List of capabilities to add to the container
type: list
required: false
default: []
elements: str
container_requires:
description: >
List of systemd units (like other containers) this one depends on.

View File

@@ -2,11 +2,21 @@
- name: Validate inputs
ansible.builtin.import_tasks: validation.yaml
- name: Create image for container {{ container_name }}
ansible.builtin.include_role:
name: image
vars:
image_name: "{{ container_image }}"
when: image_created_images is not defined or container_image not in image_created_images
- name: Create networks for container {{ container_name }}
ansible.builtin.include_role:
name: network
vars:
network_name: "{{ network }}"
network_name: "{{ network.name }}"
network_subnet: "{{ _container_network_subnet }}"
network_range: "{{ _container_network_range }}"
when: network_created_networks is not defined or network.name not in network_created_networks
loop: "{{ container_networks }}"
loop_control:
loop_var: network
@@ -18,6 +28,9 @@
volume_name: "{{ volume.source }}"
volume_uid: "{{ volume.user | default('') }}"
volume_gid: "{{ volume.group | default('') }}"
volume_type: "{{ volume.volume_type | default('') }}"
volume_device: "{{ volume.volume_device | default('') }}"
volume_mount_options: "{{ volume.volume_mount_options | default([]) }}"
loop: "{{ _container_volumes }}"
loop_control:
loop_var: volume
@@ -26,18 +39,25 @@
ansible.builtin.include_tasks: secrets.yaml
when: container_secrets | length > 0
- name: Create container service {{ container_name }}
- name: Create container {{ container_name }}
containers.podman.podman_container:
image: "{{ container_image }}"
image: "{{ _container_image }}"
name: "{{ container_name }}"
command: "{{ container_command or omit }}"
entrypoint: "{{ container_entrypoint or omit }}"
user: "{{ container_user or omit }}"
mount: "{{ _container_mounts | map('items') | map('map', 'join', '=') | map('join', ',') }}"
network: "{{ container_networks | map('regex_replace', '$', '.network') }}"
device: "{{ _container_devices }}"
network: "{{ _container_networks }}"
hostname: "{{ container_hostname or omit }}"
publish: "{{ container_publish_ports }}"
secrets: "{{ container_secrets | map(attribute='name') }}"
env: "{{ container_env.keys() | zip(container_env.values() | map('quote')) | community.general.dict }}"
secrets: "{{ _container_secrets }}"
env: "{{ container_env }}"
cap_add: "{{ container_add_capabilities }}"
label: "{{ _container_labels if _container_labels | length > 0 else omit }}"
state: quadlet
quadlet_file_mode: "0600"
quadlet_options: "{{ _container_quadlet_options }}"
notify: "Restart container service {{ container_name }}"
notify:
- Reload systemd daemon
- Restart container service {{ container_name }}

View File

@@ -2,7 +2,8 @@
- name: Create secrets for container {{ container_name }}
containers.podman.podman_secret:
name: "{{ item.name }}"
data: "{{ item.value | default(lookup('community.general.random_string', special=false, length=128)) }}"
data: "{{ item.value | default(lookup('community.general.random_string', special=false, length=item.length | default(128))) }}"
skip_existing: "{{ item.value is not defined }}"
no_log: true
loop: "{{ container_secrets }}"
notify: Restart container service {{ container_name }}

View File

@@ -1,4 +1,6 @@
---
_container_image: "{{ container_image | replace('/', '_') ~ '.image' }}"
_container_volumes: "{{ container_mounts | selectattr('type', '==', 'volume') }}"
_container_mount_sources: "{{ container_mounts | map(attribute='source') }}"
@@ -16,10 +18,57 @@ _container_mounts: >-
{{
container_mounts | selectattr('type', '!=', 'volume') +
container_mounts | selectattr('type', '==', 'volume')
| community.general.remove_keys(['user', 'group'])
| community.general.keep_keys(['type', 'source', 'destination', 'readonly'])
| zip(_container_volume_mount_sources) | map('combine')
}}
_container_devices_withdefaults: >-
{{
container_devices
| map(attribute='source')
| map('community.general.dict_kv', 'destination')
| zip(container_devices)
| map('combine')
}}
_container_devices: >-
{{
_container_devices_withdefaults
| map(attribute='source')
| zip(
_container_devices_withdefaults
| map(attribute='destination')
)
| map('join', ':')
}}
_container_secrets: >-
{{
container_secrets
| map(attribute='name')
| zip(
container_secrets
| map(attribute='type', default='mount')
| map('regex_replace', '^', 'type='),
container_secrets
| map(attribute='name')
| map('community.general.dict_kv', 'target')
| zip(container_secrets)
| map('combine')
| map(attribute='target')
| map('regex_replace', '^', 'target=')
)
| map('join', ',')
}}
_container_labels: >-
{{
{'io.containers.autoupdate.authfile': '/etc/containers/auth.json'}
if container_auto_update and
container_image.split('/')[0] in
podman_registry_accounts | map(attribute='registry')
else {}
}}
_container_quadlet_unit_options: |
[Unit]
Description=Container {{ container_name }}
@@ -32,6 +81,9 @@ _container_quadlet_unit_options: |
{% for want in container_wants %}
Wants={{ want }}
{% endfor %}
[Service]
SuccessExitStatus=0 143
OOMPolicy=stop
_container_quadlet_auto_start_options: |
[Service]
Restart=always

View File

@@ -0,0 +1,27 @@
---
_container_networks: >-
{{
container_networks
| map(attribute='name')
| map('regex_replace', '$', '.network')
| zip(container_networks | map(attribute='ip', default=''))
| map('reject', 'equalto', '')
| map('join', ':ip=')
}}
_container_network_subnet: >-
{{ network.ip | ansible.utils.ipsubnet(24) if network.ip | default('') | length > 0 else '' }}
_container_network_subnet_ranges: >-
{{
[
_container_network_subnet | ansible.utils.ipsubnet(25, 0),
_container_network_subnet | ansible.utils.ipsubnet(25, 1)
] if network.ip | default('') | length > 0 else []
}}
_container_network_range: >-
{{
_container_network_subnet_ranges |
reject('ansible.utils.supernet_of', network.ip) |
first
if network.ip | default('') | length > 0 else ''
}}

View File

@@ -1,6 +1,6 @@
---
- name: Hello world container
ansible.builtin.include_role:
- name: Hello world service
ansible.builtin.import_role:
name: service
vars:
service_name: hello-world
@@ -19,7 +19,6 @@
service_container_http_port: 8080
service_domains: "{{ example_domains }}"
service_database_type: postgres
service_postgres_tag: 16-alpine
service_container_publish_ports:
- "127.0.0.1:8080:8080"
- "0.0.0.0:4443:8043"
@@ -27,6 +26,9 @@
- network-online.target
service_container_env:
TZ: "Etc/UTC"
DB_HOST: postgres
DB_USER: hello-world
DB_PASSWORD__FILE: /run/secrets/postgres
service_additional_containers:
- name: worker
# image: "docker.io/library/hello-world:latest"

1
roles/forgejo/README.md Normal file
View File

@@ -0,0 +1 @@
Installs and configures forgejo inside podman

View File

@@ -0,0 +1,6 @@
---
forgejo_require_signin_view: false
forgejo_enable_internal_signin: true
forgejo_smtp_user: ""
forgejo_smtp_password: ""

View File

@@ -0,0 +1,45 @@
---
argument_specs:
main:
description: "Installs and configures forgejo inside podman"
options:
forgejo_tag:
description: Forgejo version to use. Can be major (x), minor (x.y) or patch (x.y.z). Major version recommended.
type: str
required: true
forgejo_domain:
description: Domain forgejo should listen on
type: str
required: true
forgejo_secret_key:
description: A long secret key for forgejo to encrypt secrets with. Must never change.
type: str
required: true
forgejo_smtp_server:
description: Smtp server for forgejo
type: str
required: true
forgejo_smtp_from:
description: Address to send email from
type: str
required: true
forgejo_smtp_user:
description: Smtp user to authenticate as
type: str
required: false
default: ""
forgejo_smtp_password:
description: Smtp password to authenticate with
type: str
required: false
default: ""
forgejo_require_signin_view:
description: Whether to require signing in to view public repositories
type: bool
required: false
default: false
forgejo_enable_internal_signin:
description: Whether to enable signing in using local username/password
type: bool
required: false
default: true

View File

@@ -0,0 +1,81 @@
---
- name: Ensure netcat-openbsd is installed for ssh shell
ansible.builtin.apt:
name: netcat-openbsd
- name: Create git system user on host for forgejo ssh
ansible.builtin.user:
name: git
group: git
system: true
home: /srv/forgejo/git
generate_ssh_key: true
ssh_key_type: ed25519
shell: /srv/forgejo/git/ssh-shell
register: _forgejo_git_user
- name: Add git user's own ssh key to its authorized keys
ansible.posix.authorized_key:
user: git
key: "{{ _forgejo_git_user.ssh_public_key }}"
- name: Install ssh forwarding shell for forgejo
ansible.builtin.template:
src: ssh-shell.j2
dest: /srv/forgejo/git/ssh-shell
mode: "0755"
- name: Forgejo service
ansible.builtin.import_role:
name: service
vars:
service_name: forgejo
service_container_image: codeberg.org/forgejo/forgejo:{{ forgejo_tag }}
service_container_mounts:
- type: volume
source: data
destination: /data
- type: bind
source: /etc/localtime
destination: /etc/localtime
readonly: true
- type: bind
source: /srv/forgejo/git/.ssh
destination: /data/git/.ssh
service_container_secrets:
- name: secret-key
value: "{{ forgejo_secret_key }}"
service_domains:
- "{{ forgejo_domain }}"
service_database_type: postgres
service_postgres_tag: 18-alpine
service_container_publish_ports:
- name: ssh
type: socket
container_port: 22
service_container_env:
USER_UID: "{{ _forgejo_git_user.uid }}"
USER_GID: "{{ _forgejo_git_user.group }}"
FORGEJO__security__SECRET_KEY_URI: file:/run/secrets/secret-key
FORGEJO__database__DB_TYPE: postgres
FORGEJO__database__USER: forgejo
FORGEJO__database__NAME: forgejo
FORGEJO__database__HOST: postgres
FORGEJO__database__PASSWD__FILE: /run/secrets/postgres
FORGEJO__server__PROTOCOL: http+unix
FORGEJO__server__HTTP_ADDR: /run/forgejo.sock
FORGEJO__server__DOMAIN: "{{ forgejo_domain }}"
FORGEJO__server__ROOT_URL: https://{{ forgejo_domain }}
FORGEJO__server__SSH_ALLOW_UNEXPECTED_AUTHORIZED_KEYS: "true"
FORGEJO__mailer__ENABLED: "true"
FORGEJO__mailer__PROTOCOL: smtp
FORGEJO__mailer__SMTP_ADDR: "{{ forgejo_smtp_server }}"
FORGEJO__mailer__SMTP_PORT: "587"
FORGEJO__mailer__FROM: "{{ forgejo_smtp_from }}"
FORGEJO__mailer__USER: "{{ forgejo_smtp_user }}"
FORGEJO__mailer__PASSWD: "{{ forgejo_smtp_password }}"
FORGEJO__service__DISABLE_REGISTRATION: "true"
FORGEJO__service__REQUIRE_SIGNIN_VIEW: "{{ 'true' if forgejo_require_signin_view else 'false' }}"
FORGEJO__service__ENABLE_INTERNAL_SIGNIN: "{{ 'true' if forgejo_enable_internal_signin else 'false' }}"
FORGEJO__oauth2_client__ENABLE_AUTO_REGISTRATION: "true"
FORGEJO__openid__ENABLE_OPENID_SIGNIN: "false"

View File

@@ -0,0 +1,4 @@
#!/bin/bash
# {{ ansible_managed }}
shift
SHELL=/bin/bash ssh -o "ProxyCommand nc -U /run/forgejo-ssh-socat.sock" -o StrictHostKeyChecking=no git@forgejo "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $@"

1
roles/grafana/README.md Normal file
View File

@@ -0,0 +1 @@
Installs and configures grafana

View File

@@ -0,0 +1,15 @@
---
grafana_additional_networks: []
grafana_oauth_name: ""
grafana_oauth_client_id: ""
grafana_oauth_auth_url: ""
grafana_oauth_token_url: ""
grafana_oauth_api_url: ""
grafana_oauth_scopes:
- openid
- profile
- email
grafana_oauth_role_attribute_path: ""
grafana_oauth_allow_sign_up: true
grafana_oauth_auto_login: true

View File

@@ -0,0 +1,80 @@
---
argument_specs:
main:
description: Installs and configures grafana
options:
grafana_domain:
description: The domain grafana should be available on
type: str
required: true
grafana_additional_networks:
description: >-
A list of additional podman networks for the grafana container (in
addition to grafana network).
type: list
required: false
default: []
elements: str
grafana_oauth_name:
description: >-
Name that refers to the generic OAuth2 authentication from the Grafana
user interface. Required to enable OAuth authentication.
type: str
required: false
default: ""
grafana_oauth_client_id:
description: >-
Client ID provided by your OAuth2 app. Required if OAuth is enabled.
type: str
required: false
default: ""
grafana_oauth_auth_url:
description: Authorization endpoint of your OAuth2 provider. Required if OAuth is enabled.
type: str
required: false
default: ""
grafana_oauth_token_url:
description: Endpoint used to obtain the OAuth2 access token.
type: str
required: false
default: ""
grafana_oauth_api_url:
description: Endpoint used to obtain user information compatible with OpenID UserInfo.
type: str
required: false
default: ""
grafana_oauth_scopes:
description: List of OAuth2 scopes.
type: list
required: false
elements: str
default:
- openid
- profile
- email
grafana_oauth_role_attribute_path:
description: >-
JMESPath expression to use for Grafana role lookup. Grafana will first
evaluate the expression using the OAuth2 ID token. If no role is found,
the expression will be evaluated using the user information obtained
from the UserInfo endpoint. The result of the evaluation should be
a valid Grafana role (Viewer, Editor, Admin or GrafanaAdmin).
type: str
required: false
default: ""
grafana_oauth_allow_sign_up:
description: >-
Controls Grafana user creation through the generic OAuth2 login. Only
existing Grafana users can log in with generic OAuth if set to false.
type: bool
required: false
default: true
grafana_oauth_auto_login:
description: >-
Whether to enable users to bypass the login screen and automatically
log in. This setting is ignored if you configure multiple auth
providers to use auto-login.
type: bool
required: false
default: true

View File

@@ -0,0 +1,40 @@
---
- name: Grafana
ansible.builtin.import_role:
name: service
vars:
service_name: grafana
service_container_image: "docker.io/grafana/grafana:latest"
service_container_mounts:
- type: volume
source: data
destination: /var/lib/grafana
service_container_http_port: 3000
service_domains:
- "{{ grafana_domain }}"
service_database_type: postgres
service_postgres_tag: 17-alpine
service_container_additional_networks: "{{ grafana_additional_networks }}"
service_container_env:
GF_DATABASE_TYPE: postgres
GF_DATABASE_HOST: postgres:5432
GF_DATABASE_NAME: grafana
GF_DATABASE_USER: grafana
GF_DATABASE_PASSWORD__FILE: /run/secrets/postgres
GF_SERVER_DOMAIN: "{{ grafana_domain }}"
GF_SERVER_ROOT_URL: "https://{{ grafana_domain }}"
GF_AUTH_GENERIC_OAUTH_ENABLED: "{{ 'true' if grafana_oauth_name | length > 0 else 'false' }}"
GF_AUTH_GENERIC_OAUTH_NAME: "{{ grafana_oauth_name }}"
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: "{{ grafana_oauth_client_id }}"
GF_AUTH_GENERIC_OAUTH_AUTH_URL: "{{ grafana_oauth_auth_url }}"
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: "{{ grafana_oauth_token_url }}"
GF_AUTH_GENERIC_OAUTH_API_URL: "{{ grafana_oauth_userinfo_url }}"
GF_AUTH_GENERIC_OAUTH_SCOPES: "{{ grafana_oauth_scopes | join(' ') }}"
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: "{{ 'true' if grafana_oauth_allow_sign_up else 'false' }}"
GF_AUTH_GENERIC_OAUTH_AUTO_LOGIN: "{{ 'true' if grafana_oauth_auto_login else 'false' }}"
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH: "{{ grafana_oauth_role_attribute_path }}"
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_STRICT: "true"
GF_AUTH_GENERIC_OAUTH_ALLOW_ASSIGN_GRAFANA_ADMIN: "true"
GF_AUTH_GENERIC_OAUTH_USE_PKCE: "true"
GF_AUTH_GENERIC_OAUTH_LOGIN_ATTRIBUTE_PATH: preferred_username

2
roles/image/README.md Normal file
View File

@@ -0,0 +1,2 @@
Sets up podman image with systemd unit (quadlet)
The image unit filename is `image_name` with / replaced by _

View File

@@ -0,0 +1,11 @@
---
argument_specs:
main:
description:
- Sets up podman image with systemd unit (quadlet)
- The image unit filename is `image_name` with / replaced by _
options:
image_name:
description: "The image FQIN (format registry/imagename:tag)"
type: str
required: true

View File

@@ -0,0 +1,3 @@
---
dependencies:
- role: podman

View File

@@ -0,0 +1,19 @@
---
- name: Set variables for use by other roles
ansible.builtin.set_fact:
image_created_images: "{{ image_created_images | default([]) + [image_name] }}"
- name: Create container image service {{ image_name }}
containers.podman.podman_image:
name: "{{ image_name }}"
state: quadlet
quadlet_filename: "{{ image_name | replace('/', '_') }}"
quadlet_file_mode: "0600"
quadlet_options: >-
{{
['AuthFile=/etc/containers/auth.json']
if image_name.split('/')[0] in
podman_registry_accounts | map(attribute='registry')
else []
}}
notify: Reload systemd daemon

View File

@@ -33,7 +33,7 @@ http:
database:
host: matrix-authentication-service-postgres
username: matrix_authentication_service
password: "{{ _service_database_password }}"
password: "{{ service_podman_secrets['matrix-authentication-service-postgres'] }}"
database: matrix_authentication_service
secrets:

View File

@@ -0,0 +1,4 @@
---
network_driver: bridge
network_subnet: ""
network_range: ""

View File

@@ -0,0 +1,6 @@
---
- name: Restart network service {{ network_name }}
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [network_name ~ '-network.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -7,3 +7,21 @@ argument_specs:
description: Name of the network. Must be unique within a host.
type: str
required: true
network_driver:
description: Driver to manage the network
type: str
required: false
default: bridge
choices:
- bridge
- macvlan
network_subnet:
description: Subnet for the network
type: str
required: false
default: ""
network_range:
description: Range to allocate ip addresses from
type: str
required: false
default: ""

View File

@@ -1,7 +1,22 @@
---
- name: "Create container network service {{ network_name }}"
- name: Create container network service {{ network_name }}
containers.podman.podman_network:
name: "{{ network_name }}"
state: quadlet
quadlet_file_mode: "0644"
notify: Reload systemd daemon
driver: "{{ network_driver }}"
subnet: "{{ network_subnet if network_subnet | length > 0 else omit }}"
ip_range: "{{ network_range if network_range | length > 0 else omit }}"
opt:
parent: "{{ ansible_facts.default_ipv4.interface if network_driver == 'macvlan' else omit }}"
quadlet_options:
- |-
[Service]
ExecStopPost=/usr/bin/podman network rm {{ network_name }}
notify:
- Reload systemd daemon
- Restart network service {{ network_name }}
- name: Add network to created networks variable
ansible.builtin.set_fact:
network_created_networks: "{{ network_created_networks | default([]) + [network_name] }}"

View File

@@ -0,0 +1 @@
Sets up a nextcloud podman container, including HaRP using podman in podman

View File

@@ -0,0 +1,3 @@
---
nextcloud_tag: stable
nextcloud_install_harp: false

View File

@@ -0,0 +1,29 @@
---
- name: Unregister AppAPI daemon
containers.podman.podman_container_exec:
name: nextcloud
argv:
- /var/www/html/occ
- app_api:daemon:unregister
- harp
register: _nextcloud_appapi_unregister
changed_when: _nextcloud_appapi_unregister.rc == 0
failed_when: _nextcloud_appapi_unregister.rc not in [0, 1]
listen: Restart container service nextcloud-harp
- name: Register AppAPI daemon
containers.podman.podman_container_exec:
name: nextcloud
argv:
- /bin/sh
- -c
- >-
/var/www/html/occ app_api:daemon:register
--harp
--harp_frp_address=harp:8782
--harp_shared_key "$(cat /run/secrets/harp-shared-key)"
--net host
--set-default
--
harp HaRP docker-install http harp:8780 https://{{ nextcloud_domains[0] }}
listen: Restart container service nextcloud-harp

View File

@@ -0,0 +1,24 @@
---
argument_specs:
main:
description: "Sets up a nextcloud podman container, including HaRP using podman in podman"
options:
nextcloud_domains:
description: A list of domains nextcloud should listen on
type: list
required: true
elements: str
nextcloud_admin_password:
description: Password of the initial admin user
type: str
required: true
nextcloud_tag:
description: Nextcloud version to use
type: str
required: false
default: stable
nextcloud_install_harp:
description: Whether to install HaRP for nextcloud
type: bool
required: false
default: false

View File

@@ -0,0 +1,31 @@
---
- name: Nextcloud service
ansible.builtin.import_role:
name: service
vars:
service_name: nextcloud
service_container_image: docker.io/library/nextcloud:{{ nextcloud_tag }}
service_container_http_port: 80
service_domains: "{{ nextcloud_domains }}"
service_database_type: postgres
service_redis: true
service_container_mounts:
- type: volume
source: data
destination: /var/www/html
service_container_secrets:
- name: admin-password
value: "{{ nextcloud_admin_password }}"
- name: harp-shared-key
service_container_env:
POSTGRES_HOST: postgres
POSTGRES_DB: nextcloud
POSTGRES_USER: nextcloud
POSTGRES_PASSWORD_FILE: /run/secrets/postgres
REDIS_HOST: redis
TRUSTED_PROXIES: 10.0.0.0/8
NEXTCLOUD_TRUSTED_DOMAINS: "{{ nextcloud_domains | join(' ') }}"
NEXTCLOUD_ADMIN_USER: admin
NEXTCLOUD_ADMIN_PASSWORD_FILE: /run/secrets/admin-password
service_additional_containers: "{{ _nextcloud_additional_containers }}"
service_vhost_locations: "{{ _nextcloud_vhost_locations }}"

View File

@@ -0,0 +1,9 @@
[containers]
ipcns = "host"
cgroupns = "host"
cgroups = "disabled"
log_driver = "k8s-file"
[engine]
cgroup_manager = "cgroupfs"
events_logger = "file"
runtime = "crun"

View File

@@ -0,0 +1,24 @@
#!/bin/bash
# {{ ansible_managed }}
_term() {
echo "Received SIGTERM, stopping all containers"
kill "$child"
}
podman system service -t 0 &
podman run \
--rm \
-v /run/secrets/harp-shared-key:/run/secrets/harp-shared-key:ro \
-e HP_SHARED_KEY_FILE=/run/secrets/harp-shared-key \
-e NC_INSTANCE_URL="https://{{ nextcloud_domains[0] }}" \
-e HP_TRUSTED_PROXY_IPS="10.0.0.0/8" \
-v /tmp/storage-run-1000/podman/podman.sock:/var/run/docker.sock \
-v /certs:/certs \
--name harp \
--network host \
ghcr.io/nextcloud/nextcloud-appapi-harp:release &
child=$!
trap _term SIGTERM
wait

View File

@@ -0,0 +1,45 @@
---
_nextcloud_cron_container:
name: cron
entrypoint: /cron.sh
_nextcloud_harp_container:
name: harp
add_capabilities:
- CAP_SYS_ADMIN
image: quay.io/podman/stable:latest
user: podman
entrypoint: /entrypoint.sh
devices:
- source: /dev/fuse
mounts:
- type: template
source: containers.conf.j2
destination: /etc/containers/containers.conf
- type: template
source: harp_entrypoint.sh.j2
destination: /entrypoint.sh
mode: "0755"
- type: volume
source: harp-certs
destination: /certs
- type: volume
source: harp-containers
destination: /home/podman/.local/share/containers
env: {}
secrets:
- name: harp-shared-key
publish_ports:
- name: harp
type: socket
container_port: 8780
_nextcloud_additional_containers: >-
{{
[_nextcloud_cron_container]
+ ([_nextcloud_harp_container] if nextcloud_install_harp else [])
}}
_nextcloud_harp_vhost_locations:
- path: /exapps/*
proxy_target_socket: /run/nextcloud-harp-socat.sock
_nextcloud_vhost_locations: >-
{{ _nextcloud_harp_vhost_locations if nextcloud_install_harp else [] }}

View File

@@ -0,0 +1,2 @@
---
podman_registry_accounts: []

View File

@@ -1,4 +0,0 @@
---
- name: Reload systemd daemon
ansible.builtin.systemd_service:
daemon_reload: true

View File

@@ -2,4 +2,23 @@
argument_specs:
main:
description: Installs podman
options: {}
options:
podman_registry_accounts:
description: Dict of accounts for container repositories
type: list
required: false
default: []
elements: dict
options:
registry:
description: Registry server to login to
type: str
required: true
username:
description: Username
type: str
required: true
password:
description: Password / token
type: str
required: true

View File

@@ -0,0 +1,10 @@
---
dependencies:
- role: uumas.general.systemd
- role: uumas.general.compatcheck
vars:
compatcheck_supported_distributions:
- name: debian
version_min: 13
- name: ubuntu
version_min: 24

View File

@@ -1,18 +1,22 @@
---
- name: Ensure host distribution is supported
ansible.builtin.import_role:
name: uumas.general.compatcheck
vars:
compatcheck_supported_distributions:
- name: debian
version_min: 13
- name: ubuntu
version_min: 24
tags: podman
- name: Install podman
ansible.builtin.apt:
name:
- podman
- aardvark-dns
tags: podman
- name: Ensure podman auto update timer is enabled
ansible.builtin.systemd_service:
name: podman-auto-update.timer
state: started
enabled: true
ignore_errors: "{{ ansible_check_mode }}"
- name: Login to registries
containers.podman.podman_login:
registry: "{{ item.registry }}"
username: "{{ item.username }}"
password: "{{ item.password }}"
authfile: /etc/containers/auth.json
loop: "{{ podman_registry_accounts }}"
no_log: true

View File

@@ -0,0 +1 @@
Installs and configures prometheus

View File

@@ -0,0 +1,4 @@
---
prometheus_additional_networks: []
prometheus_ping_hosts: []

View File

@@ -0,0 +1,35 @@
---
argument_specs:
main:
description: Installs and configures prometheus
options:
prometheus_additional_networks:
description: >-
A list of additional podman networks for the prometheus container (in
addition to prometheus network).
type: list
required: false
default: []
elements: str
prometheus_ping_hosts:
description: List of hosts to ping
type: list
required: false
default: []
elements: dict
options:
name:
description: Hostname to ping
type: str
required: true
type:
description: >-
Type of host. Monitored hosts are pinged to check if they are up.
Wan hosts are pinged to check if prometheus has internet access.
type: str
required: false
default: monitored
choices:
- monitored
- wan

View File

@@ -0,0 +1,28 @@
---
- name: Prometheus
ansible.builtin.import_role:
name: service
vars:
service_name: prometheus
service_container_image: "docker.io/prom/prometheus:latest"
service_container_mounts:
- type: template
source: prometheus.yml.j2
destination: /etc/prometheus/prometheus.yml
- type: volume
source: data
destination: /prometheus
- type: template
source: alerting/node-exporter.yaml.j2
destination: /etc/prometheus/alerting/node-exporter.yaml
- type: template
source: alerting/blackbox-exporter.yaml.j2
destination: /etc/prometheus/alerting/blackbox-exporter.yaml
service_container_additional_networks: "{{ prometheus_additional_networks }}"
service_additional_containers:
- name: blackbox-exporter
image: docker.io/prom/blackbox-exporter:latest
mounts:
- type: template
source: blackbox_exporter.yml.j2
destination: /etc/blackbox_exporter/config.yml

View File

@@ -0,0 +1,97 @@
{% raw %}
groups:
- name: BlackboxExporter
rules:
- alert: BlackboxAllWanProbesFailed
expr: 'sum by (host_type) (probe_success{host_type="wan"})==0'
for: 5s
labels:
severity: critical
annotations:
summary: Lost internet access
descrtiption: Failed to contact any wan probes
- alert: BlackboxProbeFailed
expr: 'probe_success == 0'
for: 0m
labels:
severity: critical
annotations:
summary: Blackbox probe failed (instance {{ $labels.instance }})
description: "Probe failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxConfigurationReloadFailure
expr: 'blackbox_exporter_config_last_reload_successful != 1'
for: 0m
labels:
severity: warning
annotations:
summary: Blackbox configuration reload failure (instance {{ $labels.instance }})
description: "Blackbox configuration reload failure\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSlowProbe
expr: 'avg_over_time(probe_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Blackbox slow probe (instance {{ $labels.instance }})
description: "Blackbox probe took more than 1s to complete\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeHttpFailure
expr: 'probe_http_status_code <= 199 OR probe_http_status_code >= 400'
for: 0m
labels:
severity: critical
annotations:
summary: Blackbox probe HTTP failure (instance {{ $labels.instance }})
description: "HTTP status code is not 200-399\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateWillExpireSoon
expr: '3 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 20'
for: 0m
labels:
severity: warning
annotations:
summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})
description: "SSL certificate expires in less than 20 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateWillExpireSoon
expr: '0 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 3'
for: 0m
labels:
severity: critical
annotations:
summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})
description: "SSL certificate expires in less than 3 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateExpired
expr: 'round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 0'
for: 0m
labels:
severity: critical
annotations:
summary: Blackbox SSL certificate expired (instance {{ $labels.instance }})
description: "SSL certificate has expired already\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeSlowHttp
expr: 'avg_over_time(probe_http_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Blackbox probe slow HTTP (instance {{ $labels.instance }})
description: "HTTP request took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeSlowPing
expr: 'avg_over_time(probe_icmp_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Blackbox probe slow ping (instance {{ $labels.instance }})
description: "Blackbox ping took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
{% endraw %}

View File

@@ -0,0 +1,322 @@
{% raw %}
groups:
- name: NodeExporter
rules:
- alert: HostOutOfMemory
expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of memory (instance {{ $labels.instance }})
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryUnderMemoryPressure
expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000)'
for: 0m
labels:
severity: warning
annotations:
summary: Host memory under memory pressure (instance {{ $labels.instance }})
description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryIsUnderutilized
expr: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8'
for: 0m
labels:
severity: info
annotations:
summary: Host Memory is underutilized (instance {{ $labels.instance }})
description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputIn
expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)'
for: 0m
labels:
severity: warning
annotations:
summary: Host unusual network throughput in (instance {{ $labels.instance }})
description: "Host receive bandwidth is high (>80%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputOut
expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)'
for: 0m
labels:
severity: warning
annotations:
summary: Host unusual network throughput out (instance {{ $labels.instance }})
description: "Host transmit bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskReadRate
expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80)'
for: 0m
labels:
severity: warning
annotations:
summary: Host unusual disk read rate (instance {{ $labels.instance }})
description: "Disk is too busy (IO wait > 80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfDiskSpace
expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)'
for: 2m
labels:
severity: critical
annotations:
summary: Host out of disk space (instance {{ $labels.instance }})
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostDiskMayFillIn24Hours
expr: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[3h], 86400) <= 0 and node_filesystem_avail_bytes > 0'
for: 2m
labels:
severity: warning
annotations:
summary: Host disk may fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem will likely run out of space within the next 24 hours.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfInodes
expr: '(node_filesystem_files_free / node_filesystem_files < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)'
for: 2m
labels:
severity: critical
annotations:
summary: Host out of inodes (instance {{ $labels.instance }})
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostFilesystemDeviceError
expr: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1'
for: 2m
labels:
severity: critical
annotations:
summary: Host filesystem device error (instance {{ $labels.instance }})
description: "Error stat-ing the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostInodesMayFillIn24Hours
expr: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0'
for: 2m
labels:
severity: warning
annotations:
summary: Host inodes may fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem will likely run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskReadLatency
expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk read latency (instance {{ $labels.instance }})
description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskWriteLatency
expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write latency (instance {{ $labels.instance }})
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostHighCpuLoad
expr: '1 - (avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[5m]))) > .80'
for: 10m
labels:
severity: warning
annotations:
summary: Host high CPU load (instance {{ $labels.instance }})
description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuIsUnderutilized
expr: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8'
for: 1w
labels:
severity: info
annotations:
summary: Host CPU is underutilized (instance {{ $labels.instance }})
description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuStealNoisyNeighbor
expr: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuHighIowait
expr: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU high iowait (instance {{ $labels.instance }})
description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskIo
expr: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk IO (instance {{ $labels.instance }})
description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostContextSwitchingHigh
expr: '(rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) / (rate(node_context_switches_total[1d])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) > 2'
for: 0m
labels:
severity: warning
annotations:
summary: Host context switching high (instance {{ $labels.instance }})
description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSwapIsFillingUp
expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)'
for: 2m
labels:
severity: warning
annotations:
summary: Host swap is filling up (instance {{ $labels.instance }})
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSystemdServiceCrashed
expr: '(node_systemd_unit_state{state="failed"} == 1)'
for: 0m
labels:
severity: warning
annotations:
summary: Host systemd service crashed (instance {{ $labels.instance }})
description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostPhysicalComponentTooHot
expr: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius'
for: 5m
labels:
severity: warning
annotations:
summary: Host physical component too hot (instance {{ $labels.instance }})
description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNodeOvertemperatureAlarm
expr: '((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1))'
for: 0m
labels:
severity: critical
annotations:
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSoftwareRaidInsufficientDrives
expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)'
for: 0m
labels:
severity: critical
annotations:
summary: Host software RAID insufficient drives (instance {{ $labels.instance }})
description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSoftwareRaidDiskFailure
expr: '(node_md_disks{state="failed"} > 0)'
for: 2m
labels:
severity: warning
annotations:
summary: Host software RAID disk failure (instance {{ $labels.instance }})
description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostKernelVersionDeviations
expr: 'changes(node_uname_info[1h]) > 0'
for: 0m
labels:
severity: info
annotations:
summary: Host kernel version deviations (instance {{ $labels.instance }})
description: "Kernel version for {{ $labels.instance }} has changed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOomKillDetected
expr: '(increase(node_vmstat_oom_kill[1m]) > 0)'
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{ $labels.instance }})
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostEdacCorrectableErrorsDetected
expr: '(increase(node_edac_correctable_errors_total[1m]) > 0)'
for: 0m
labels:
severity: info
annotations:
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostEdacUncorrectableErrorsDetected
expr: '(node_edac_uncorrectable_errors_total > 0)'
for: 0m
labels:
severity: warning
annotations:
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkReceiveErrors
expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Receive Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkTransmitErrors
expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkBondDegraded
expr: '((node_bonding_active - node_bonding_slaves) != 0)'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostConntrackLimit
expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)'
for: 5m
labels:
severity: warning
annotations:
summary: Host conntrack limit (instance {{ $labels.instance }})
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockSkew
expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))'
for: 10m
labels:
severity: warning
annotations:
summary: Host clock skew (instance {{ $labels.instance }})
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockNotSynchronising
expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)'
for: 2m
labels:
severity: warning
annotations:
summary: Host clock not synchronising (instance {{ $labels.instance }})
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
{% endraw %}

View File

@@ -0,0 +1,5 @@
---
modules:
icmp:
prober: icmp
timeout: 5s

View File

@@ -0,0 +1,55 @@
---
# {{ ansible_managed }}
global:
scrape_interval: 10s
evaluation_interval: 10s
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "/etc/prometheus/recording/*.yaml"
- "/etc/prometheus/alerting/*.yaml"
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- localhost:9090
- job_name: blackbox
static_configs:
- targets:
- blackbox-exporter:9115
- job_name: node
static_configs:
- targets:
- host.containers.internal:9100
{% if prometheus_ping_hosts | length > 0 %}
- job_name: "icmp"
metrics_path: "/probe"
params:
module: ["icmp"]
static_configs:
- targets:
{% for host in prometheus_ping_hosts %}
- "{{ host.name }}::{{ host.type | default('monitored') }}"
{% endfor %}
relabel_configs:
- source_labels:
- __address__
regex: '(.+)::(.+)'
target_label: __param_target
replacement: '${1}'
- source_labels:
- __address__
regex: '(.+)::(.+)'
target_label: host_type
replacement: '${2}'
- source_labels:
- __param_target
target_label: instance
- target_label: __address__
replacement: blackbox_exporter:9115
{%- endif %}

View File

@@ -1,15 +1,31 @@
---
service_domains: []
service_vhost_locations: []
service_container_command: []
service_container_entrypoint: ""
service_domains: []
service_container_http_port: 0
service_vhost_locations: []
service_proxy_pass_host_header: true
service_proxy_auth_type: none
service_container_ip: ""
service_container_additional_networks: []
service_container_user: ""
service_container_publish_ports: []
service_container_mounts: []
service_container_devices: []
service_container_secrets: []
service_container_env: {}
service_container_add_capabilities: []
service_container_pinp: false
service_database_type: none
service_database_additional_networks: []
service_database_secret_type: mount
service_database_secret_target: "{{ service_database_type }}"
service_postgres_image: docker.io/pgautoupgrade/pgautoupgrade
service_postgres_tag: alpine
service_redis: false
service_additional_containers: []

View File

@@ -1,7 +1,6 @@
---
- name: "Restart socat socket for {{ service_name }}"
ansible.builtin.systemd_service:
name: "{{ service_name }}-socat.socket"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
- name: Restart socket for {{ service_name }}
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [servive_name ~ '.socket'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -8,6 +8,18 @@ argument_specs:
type: str
required: true
service_container_command:
description: Command to start the service container with.
type: list
required: false
default: []
elements: str
service_container_entrypoint:
description: Entrypoint to use in the service container
type: str
required: false
default: ""
service_domains:
description: A list of domains which should be proxied to the main service container
type: list
@@ -16,36 +28,107 @@ argument_specs:
elements: str
service_container_http_port:
description:
- Port inside the container where http requests will be proxied to.
- Required if service_domains is not empty.
- Port inside the container where http requests are proxied to.
- If set to 0, http requests are proxied to /run/<service name>.sock inside the container
type: int
required: false
default: 0
service_proxy_pass_host_header:
description: Passed to vhost role as vhost_proxy_pass_header
type: bool
required: false
default: true
service_proxy_auth_type:
description: >-
Set to oauth2-proxy to use OAuth2 Proxy for vhost authentication.
type: str
required: false
default: none
choices:
- none
- oauth2-proxy
service_vhost_locations:
description: Passed to vhost role as vhost_locations
type: list
required: false
default: []
service_oauth2_proxy_issuer_url:
description: >-
OpenID Connect issuer URL. Required if service_proxy_auth_type is oauth2-proxy.
type: str
required: false
oauth2_proxy_client_id:
description: OAuth client ID. Required if service_proxy_auth_type is oauth2-proxy.
type: str
required: false
oauth2_proxy_client_secret:
description: OAuth client secret. Required if service_proxy_auth_type is oauth2-proxy.
type: str
required: false
service_container_image:
description: "The image to run in the service container(s), in FQIN format (registry/imagename:tag)."
description: "The image to run in the service container(s), in FQIN format (registry/image_name:tag)."
type: str
required: true
service_container_user:
description: The UID to run as inside the container
type: str
required: false
default: ""
service_container_ip:
description: Static ip for the container in it's network
type: str
required: false
default: ""
service_container_additional_networks:
description: A list of additional podman networks for the service container (in addition to service name network).
description: >-
A list of additional podman networks for the service container (in
addition to service name network).
type: list
required: false
default: []
elements: str
service_container_publish_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
description: A list of ports to publish outside the container
type: list
required: false
default: []
elements: str
elements: dict
options:
name:
description:
- Name of the port.
- If type is socket, the socket will be created at /run/<service name>-<port name>.sock on the host.
- If type is not socket, this is just informative.
type: str
required: true
container_port:
description: Container port to publish
type: int
required: true
type:
description: Whether to publish as a port or socket
type: str
required: false
default: port
choices:
- socket
- port
host_address:
description:
- IP or hostname to listen on on the host
- Ignored if type is socket
type: str
required: false
default: 0.0.0.0
host_port:
description:
- Port to listen on on the host
- Required if type is port, ignored otherwise
type: int
required: false
service_container_mounts:
description: List of bind mounts or volumes to be mounted inside the service container(s).
type: list
@@ -54,19 +137,21 @@ argument_specs:
elements: dict
options:
type:
description: Type of volume
description: Type of mount
type: str
required: true
choices:
- volume
- bind
- template
- copy
source:
description:
- Mount source.
- If mount type is volume, name of the volume.
- If mount type is bind, host path to bind mount inside the container.
- If mount type is template, the name of the template file, must end in .j2
- If mount type is copy, name of the file or directory to copy. Directory name must end in /.
type: str
required: true
destination:
@@ -76,7 +161,7 @@ argument_specs:
readonly:
description:
- If true, volume will be mounted as read only inside the container.
- Defaults to false for volume and bind, true for template
- Defaults to false for volume and bind, true for template and copy
type: bool
required: false
user:
@@ -89,12 +174,57 @@ argument_specs:
type: str
required: false
default: ""
mode:
description:
- Templated file or copied directory/file permissions.
- Defaults to 0644 for files, 0755 for directories
type: str
required: false
volume_device:
description: >-
The path of a device which is mounted for the volume.
Only applicable if mount type is volume.
type: str
required: false
default: ""
volume_type:
description: >-
The filesystem type of device as used by the mount commands -t option
Only applicable if mount type is volume.
type: str
required: false
default: ""
volume_mount_options:
description: >-
The mount options to use for a filesystem as used by the mount command -o option
Only applicable if mount type is volume.
type: list
elements: str
required: false
default: []
service_container_devices:
description: List of devices to be added inside the service main container.
type: list
required: false
default: []
elements: dict
options:
source:
description: Device path on host
type: str
required: true
destination:
description: Device path inside the container. Defaults to same as host.
type: str
required: false
service_container_secrets:
description:
- A list of secrets available to the service container in /run/secrets/<service name>-<secret name>
- >
A dict of secrets and their values (including autogenerated values) is available as `_service_podman_secrets` for use
in tepmlates or environment variables. This should only be used if the container doesn't support reading the secret from file
A list of secrets available to the service container as file or environment variable
- >
A dict of secrets and their values (including autogenerated values) is available as
`service_podman_secrets` for use in templates. This should only be used if the
container doesn't support reading the secret from file or environment variable.
type: list
required: false
default: []
@@ -106,39 +236,130 @@ argument_specs:
required: true
value:
description:
- Value of the secret. Defaults to a 128-character random string containing alphanumeric characters.
- If the value is not explicitly set, it will not be changed if the secret already exists.
- >
Value of the secret. Defaults to a 128-character random string containing
alphanumeric characters.
- >
If the value is not explicitly set, it will not be changed if the secret
already exists.
type: str
required: false
length:
description: Length of randomly generated string
type: int
required: false
default: 128
type:
description: How the secret will be exposed to the container
type: str
choices:
- mount
- env
default: mount
target:
description: >
Where the secret will be available inside the container. If type is mount, this is
either a full file path or a filename under /run/secrets. If type is env, this is
the name of the environment variable. Defaults to secret name.
type: str
required: false
service_container_env:
description: A dict of environment variables for the service container(s)
type: dict
required: false
default: {}
service_database_type:
description: >
Database type to set up. It will be run in a docker container accessible to the service at host <service name>-{{ service_database_type }} on the
default port. The database user will be {{ service_name }} and password will be available as the _service_database_password variable.
type: str
service_container_add_capabilities:
description: List of capabilities to add to the service container
type: list
required: false
default: []
elements: str
service_container_pinp:
description:
- If true, runs the container with podman in podman
- This starts a podman service inside the outer container
- The podman socket is exposed to the inner container at /var/run/docker.sock
- >-
This allows the container to manage other containers, which are run inside the
same outer container
- >-
The inner containers use host networking, so they share the network namespace
with the outer container and each other.
- This support is experimental and may not work with all images or configurations.
type: bool
required: false
default: false
service_database_type:
description:
- Database type to set up.
- >
It will be run in a container accessible to the service at
host <service database type> on the default port.
- The database user will be <service name>
- The password will be accessible as secret at /run/secrets/<service database type>
- >
The password will also be available as the
service_podman_secrets['<service name>-<service database type>'] variable.
type: str
choices:
- postgres
- mariadb
- mongo
- none
required: false
default: none
service_postgres_tag:
description: >
Postgresql version to use. Can be debian (n) or alpine-based (n-alpine), where n can be major version like 14 or minor like 14.13.
Required if service_database_type is postgres.
service_database_additional_networks:
description: >-
A list of additional podman networks for the database container (in
addition to service name network).
type: list
required: false
default: []
elements: str
service_database_secret_type:
description: Secret type for database secret for service container
type: str
choices:
- mount
- env
required: false
default: mount
service_database_secret_target:
description: Secret target for database secret for service container.
type: str
required: false
default: "{{ service_database_type }}"
service_postgres_image:
description: Postgresql image to use.
type: str
required: false
default: docker.io/library/postgres
service_postgres_tag:
description:
- Postgresql version to use.
- Can be debian (n) or alpine-based (n-alpine), where n can be major version like 14 or minor like 14.13.
- Ignored if database type is not postgres.
- If a custom postgres image is specified, see that image documentation for supported tags.
type: str
required: false
service_redis:
description: >-
Whether to install redis in a container accessible to the service at host redis.
type: bool
required: false
default: false
service_additional_containers:
description:
- List of additional containers for the sercice.
- >
Will inherit most options from main service container, except for publish_ports.
All options can be overridden per-container.
- List of additional containers for the service.
- >-
If image is not specified, will use service container image and
inherit most options from main service container.
- All options can be overridden per-container.
type: list
required: false
default: []
@@ -158,15 +379,35 @@ argument_specs:
type: str
required: false
default: "{{ service_container_image }}"
mounts:
description: List of bind mounts or volumes to be mounted inside the main service container.
user:
description:
- The UID to run as inside the container.
- Defaults to <service_container_user> if same image, "" otherwise.
type: str
required: false
command:
description:
- Command to start the container with.
- Defaults to <service_container_command> if same image, [] otherwise.
type: list
required: false
elements: str
entrypoint:
description:
- Entrypoint to use in the container
- Defaults to <service_container_entrypoint> if same image, "" otherwise.
type: str
required: false
mounts:
description:
- List of bind mounts or volumes to be mounted inside the container.
- Defaults to <service_container_mounts> if same image, [] otherwise.
type: list
required: false
default: "{{ service_container_mounts }}"
elements: dict
options:
type:
description: Type of volume
description: Type of mount
type: str
required: true
choices:
@@ -191,17 +432,176 @@ argument_specs:
- Defaults to false for volume and bind, true for template
type: bool
required: false
user:
description: Volume owner uid. Only applicable if mount type is volume.
type: str
required: false
default: ""
group:
description: Volume owner gid. Only applicable if mount type is volume.
type: str
required: false
default: ""
mode:
description:
- Templated file or copied directory/file permissions.
- Defaults to 0644 for files, 0755 for directories
type: str
required: false
volume_device:
description: >-
The path of a device which is mounted for the volume.
Only applicable if mount type is volume.
type: str
required: false
default: ""
volume_type:
description: >-
The filesystem type of device as used by the mount commands -t option
Only applicable if mount type is volume.
type: str
required: false
default: ""
volume_mount_options:
description: >-
The mount options to use for a filesystem as used by the mount command -o option
Only applicable if mount type is volume.
type: list
elements: str
required: false
default: []
devices:
description:
- List of devices to be added inside the container.
- Defaults to <service_container_devices> if same image, [] otherwise.
type: list
required: false
elements: dict
options:
source:
description: Device path on host
type: str
required: true
destination:
description: Device path inside the container. Defaults to same as host.
type: str
required: false
publish_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
description: A list of ports to publish outside the container
type: list
required: false
default: []
elements: str
elements: dict
options:
name:
description:
- Name of the port.
- >-
If type is socket, the socket will be created at
/run/<service name>-<additional container name>-<port name>.sock on the host.
- If type is not socket, this is just informative.
type: str
required: true
container_port:
description: Container port to publish
type: int
required: true
type:
description: Whether to publish as a port or socket
type: str
required: false
default: port
choices:
- socket
- port
host_address:
description:
- IP or hostname to listen on on the host
- Ignored if type is socket
type: str
required: false
default: 0.0.0.0
host_port:
description:
- Port to listen on on the host
- Required if type is port, ignored otherwise
type: int
required: false
env:
description: A dict of environment variables for the container
description:
- A dict of environment variables for the container
- Defaults to <service_container_env> if same image, {} otherwise.
type: dict
required: false
default: {}
add_capabilities:
description:
- List of capabilities to add to the container
- Defaults to <service_container_add_capabilities> if same image, [] otherwise.
type: list
required: false
elements: str
secrets:
description:
- >
A list of secrets available to the service container as file or environment
variable
- >
A dict of secrets and their values (including autogenerated values) is available as
`service_podman_secrets` for use in templates. This should only be used if the
container doesn't support reading the secret from file or environment variable.
- Defaults to <service_container_secrets> if same image, [] otherwise.
type: list
required: false
elements: dict
options:
name:
description: Name of the secret
type: str
required: true
value:
description:
- >
Value of the secret. Defaults to a 128-character random string containing
alphanumeric characters.
- >
If the value is not explicitly set, it will not be changed if the secret
already exists.
type: str
required: false
length:
description: Length of randomly generated string
type: int
required: false
default: 128
type:
description: How the secret will be exposed to the container
type: str
choices:
- mount
- env
default: mount
target:
description: >
Where the secret will be available inside the container. If type is mount, this is
either a full file path or a filename under /run/secrets. If type is env, this is
the name of the environment variable. Defaults to secret name.
type: str
required: false
pinp:
description:
- If true, runs the container with podman in podman
- This starts a podman service inside the outer container
- The podman socket is exposed to the inner container at /var/run/docker.sock
- >-
This allows the container to manage other containers, which are run inside the
same outer container
- >-
The inner containers use host networking, so they share the network namespace
with the outer container and each other.
- This support is experimental and may not work with all images or configurations.
type: bool
required: false
default: false
service_requires:
description: List of systemd units this service container depends on.

View File

@@ -0,0 +1,31 @@
---
- name: Additional containers for {{ service_name }}
ansible.builtin.include_role:
name: container
vars:
container_name: "{{ _service_additional_container.name }}"
container_image: "{{ _service_additional_container_final.image }}"
container_entrypoint: "{{ _service_additional_container_final.entrypoint }}"
container_command: "{{ _service_additional_container_final.command }}"
container_user: "{{ _service_additional_container_final.user }}"
container_mounts: "{{ _service_additional_container_final.mounts }}"
container_devices: "{{ _service_additional_container.devices }}"
container_publish_ports: "{{ _service_additional_container_publish_ports }}"
container_networks: "{{ _service_additional_container_networks }}"
container_hostname: "{{ _service_additional_container.name | regex_replace('^' ~ service_name ~ '-', '') }}"
container_secrets: "{{ _service_additional_container_secrets }}"
container_env: "{{ _service_additional_container_final.env }}"
container_add_capabilities: "{{ _service_additional_container.add_capabilities }}"
container_requires: "{{ _service_container_requires }}"
container_wants: "{{ _service_additional_container_wants }}"
container_auto_update: "{{ service_auto_update }}"
loop: "{{ _service_additional_containers }}"
loop_control:
loop_var: _service_additional_container
index_var: _service_additional_container_index
- name: Socat sockets for additional containers of {{ service_name }}
ansible.builtin.include_tasks: additional_socat.yaml
loop: "{{ _service_additional_containers }}"
loop_control:
loop_var: _service_additional_container

View File

@@ -0,0 +1,12 @@
---
- name: Socat for socket published ports of {{ service_name }}
ansible.builtin.include_role:
name: socat
loop: "{{ _service_additional_container_publish_socket_ports }}"
loop_control:
loop_var: publish_port
vars:
socat_service_name: "{{ service_name }}-{{ publish_port.name }}"
socat_target_container: "{{ _service_additional_container.name }}"
socat_target_http_port: "{{ publish_port.container_port }}"
socat_auto_update: "{{ service_auto_update }}"

View File

@@ -1,35 +1,20 @@
---
- name: Include database variables
- name: Include variables for database {{ service_database_type }}
ansible.builtin.include_vars:
file: database.yaml
file: database/{{ service_database_type }}.yaml
- name: Database container for {{ service_name }}
ansible.builtin.include_role:
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ service_name }}-{{ service_database_type }}" # This doesn't use _service_database_name to allow container role handlers to work
container_image: "docker.io/library/postgres:{{ service_postgres_tag }}"
container_image: "{{ _service_database_image }}"
container_mounts:
- type: volume
source: "{{ _service_database_name }}"
destination: /var/lib/postgresql/data
container_networks:
- "{{ service_name }}"
container_secrets:
- name: "{{ _service_database_name }}"
container_env:
POSTGRES_USER: "{{ service_name | replace('-', '_') }}"
POSTGRES_PASSWORD_FILE: "/run/secrets/{{ _service_database_name }}"
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
destination: "{{ _service_database_mount_destination }}"
container_networks: "{{ _service_database_networks }}"
container_hostname: "{{ service_database_type }}"
container_secrets: "{{ _service_database_secrets }}"
container_env: "{{ _service_database_env }}"
container_auto_update: "{{ service_auto_update }}"
- name: Get database secret info
containers.podman.podman_secret_info:
name: "{{ _service_database_name }}"
showsecret: true
register: _service_database_secret
- name: Set database-related variables
ansible.builtin.set_fact:
_service_database_password: "{{ _service_database_secret.secrets[0].SecretData }}"
_service_container_requires: "{{ _service_container_requires + [_service_database_name + '.service'] }}"

View File

@@ -0,0 +1,61 @@
---
- name: Create directory {{ _service_host_directory }}
ansible.builtin.file:
path: "{{ _service_host_directory }}"
state: directory
mode: "0755"
- name: Create directory {{ _service_host_directory + '/mounts' }}
ansible.builtin.file:
path: "{{ _service_host_directory }}/mounts"
state: directory
mode: "0700"
- name: Create service template mount directories
ansible.builtin.file:
path: "{{ _service_host_directory }}/mounts/{{ item }}"
state: directory
mode: "0700"
loop: "{{ _service_all_template_mount_directories }}"
- name: Template files for template mounts
ansible.builtin.template:
src: "{{ item[0].source }}"
dest: "{{ item[1] }}"
mode: "{{ item[0].mode | default('0644') }}"
notify: Restart container service {{ service_name }}
loop: "{{ _service_all_template_mounts | zip(_service_all_template_mount_host_files) }}"
- name: Copy files for copy mounts
ansible.builtin.copy:
src: "{{ item[0].source }}"
dest: "{{ item[1] }}"
mode: "{{ item[0].mode | default('0644') }}"
directory_mode: "0755"
notify: Restart container service {{ service_name }}
loop: "{{ _service_all_copy_mounts | zip(_service_all_copy_mount_host_files) }}"
- name: Template entrypoint for pinp
ansible.builtin.template:
src: "pinp-entrypoint.sh.j2"
dest: "{{ _service_host_directory }}/mounts/pinp-entrypoint.sh"
mode: "0755"
vars:
pinp_inner_name: "{{ service_name }}"
pinp_inner_image: "{{ service_container_image }}"
pinp_inner_mounts: "{{ _service_container_pinp_inner_mounts }}"
pinp_inner_env: "{{ service_container_env }}"
when: service_container_pinp
- name: Template entrypoint for pinp of additional containers
ansible.builtin.template:
src: "pinp-entrypoint.sh.j2"
dest: "{{ _service_host_directory }}/mounts/{{ _service_additional_container.name }}-pinp-entrypoint.sh"
mode: "0755"
loop: "{{ _service_additional_containers | selectattr('pinp') }}"
loop_control:
loop_var: _service_additional_container
vars:
pinp_inner_name: "{{ _service_additional_container.name }}"
pinp_inner_image: "{{ _service_additional_container.image }}"
pinp_inner_mounts: "{{ _service_additional_container_pinp_inner_mounts }}"
pinp_inner_env: "{{ _service_additional_container.env }}"

View File

@@ -2,39 +2,81 @@
- name: Validate inputs
ansible.builtin.import_tasks: validation.yaml
- name: Initialize variables
ansible.builtin.set_fact:
_service_container_mounts: []
_service_container_requires: "{{ service_requires }}"
- name: Databse for {{ service_name }}
- name: Database for {{ service_name }}
ansible.builtin.include_tasks: database.yaml
when: "service_database_type != 'none'"
when: _service_setup_database
- name: Redis for {{ service_name }}
ansible.builtin.include_tasks: redis.yaml
when: service_redis
- name: Secrets for {{ service_name }}
ansible.builtin.include_tasks: secrets.yaml
when: service_container_secrets | length > 0
when: _service_container_secrets | length > 0
- name: Mounts for {{ service_name }}
ansible.builtin.include_tasks: mounts.yaml
when: service_container_mounts | length > 0
- name: Host mounts for {{ service_name }}
ansible.builtin.include_tasks: host_mounts.yaml
when: >-
(_service_all_template_mounts + _service_all_copy_mounts) | length > 0
or service_container_pinp
or (_service_additional_containers | selectattr('pinp') | length > 0)
- name: Additional containers for {{ service_name }}
ansible.builtin.include_tasks: additional.yaml
when: _service_additional_containers | length > 0
- name: Native socket for {{ service_name }}
ansible.builtin.include_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ service_name }}"
systemd_socket_requires:
- "{{ service_name }}.service"
when: _service_native_socket
- name: Main container for {{ service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ service_name }}"
container_image: "{{ service_container_image }}"
container_user: "{{ service_container_user }}"
container_mounts: "{{ _service_container_mounts }}"
container_publish_ports: "{{ service_container_publish_ports }}"
container_networks: "{{ [service_name] + service_container_additional_networks }}"
container_image: "{{ _service_container.image }}"
container_entrypoint: "{{ _service_container.entrypoint }}"
container_command: "{{ _service_container.command }}"
container_user: "{{ _service_container.user }}"
container_mounts: "{{ _service_container.mounts }}"
container_devices: "{{ service_container_devices }}"
container_publish_ports: "{{ _service_container_publish_ports }}"
container_networks: "{{ _service_container_networks }}"
container_secrets: "{{ _service_container_secrets }}"
container_env: "{{ service_container_env }}"
container_env: "{{ _service_container.env }}"
container_add_capabilities: "{{ service_container_add_capabilities }}"
container_requires: "{{ _service_container_requires }}"
container_wants: "{{ _service_container_wants }}"
container_auto_update: "{{ service_auto_update }}"
- name: Caddy socket proxy for http of {{ service_name }}
ansible.builtin.include_role:
name: caddy_socket_proxy
when: service_container_http_port > 0
vars:
caddy_socket_proxy_service_name: "{{ service_name }}"
caddy_socket_proxy_target_http_port: "{{ service_container_http_port }}"
caddy_socket_proxy_container_ip: >-
{{ service_container_ip | ansible.utils.ipmath(257) if _service_static_ip else '' }}
caddy_socket_proxy_auto_update: "{{ service_auto_update }}"
- name: Socat for socket published ports of {{ service_name }}
ansible.builtin.include_role:
name: socat
loop: "{{ _service_container_publish_socket_ports }}"
loop_control:
loop_var: publish_port
vars:
socat_service_name: "{{ service_name }}-{{ publish_port.name }}"
socat_target_container: "{{ service_name }}"
socat_target_http_port: "{{ publish_port.container_port }}"
socat_auto_update: "{{ service_auto_update }}"
- name: Reverse proxy for {{ service_name }}
ansible.builtin.include_tasks: proxy.yaml
when: service_domains | length > 0

View File

@@ -1,32 +0,0 @@
---
- name: Set container named mounts
ansible.builtin.set_fact:
_service_container_mounts: >
{{ _service_container_mounts +
[mount | combine({'source': service_name + '-' + mount.source})] }}
when: mount.type == 'volume'
- name: Set container named mounts
ansible.builtin.set_fact:
_service_container_mounts: "{{ _service_container_mounts + [mount] }}"
when: mount.type == 'bind'
- name: Template mounts
when: mount.type == 'template'
block:
- name: Set template host path
ansible.builtin.set_fact:
_service_template_host_path: "{{ _service_host_directory }}/mounts/{{ (mount.source | split('.'))[0:-1] | join('.') }}" # Strip .j2 extension
- name: Template files for template mounts
ansible.builtin.template:
src: "{{ mount.source }}"
dest: "{{ _service_template_host_path }}"
mode: "0644"
notify: "Restart container service {{ service_name }}"
- name: Set container template mounts
ansible.builtin.set_fact:
_service_container_mounts: >
{{ _service_container_mounts +
[{'readonly': true} | combine(mount) | combine({'type': 'bind', 'source': _service_template_host_path})] }}

View File

@@ -1,21 +0,0 @@
---
- name: Create template mount directories under /srv
when: _service_template_mounts | length > 0
block:
- name: Create directory {{ _service_host_directory }}
ansible.builtin.file:
path: "{{ _service_host_directory }}"
state: directory
mode: "0755"
- name: Create directory {{ _service_host_directory + '/mounts' }}
ansible.builtin.file:
path: "{{ _service_host_directory }}/mounts"
state: directory
mode: "0700"
- name: Set mount definitions for {{ service_name }}
ansible.builtin.include_tasks: mount.yaml
loop: "{{ service_container_mounts }}"
loop_control:
loop_var: mount

View File

@@ -0,0 +1,39 @@
---
- name: OAuth2 Proxy container for {{ service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ service_name }}-oauth2-proxy"
container_image: "quay.io/oauth2-proxy/oauth2-proxy:latest-alpine"
container_command:
- --client-secret-file
- /run/secrets/client-secret
- --cookie-secret-file
- /run/secrets/cookie-secret
container_networks:
- name: "{{ service_name }}-oauth2-proxy"
container_secrets:
- name: "{{ service_name }}-oauth2-proxy-cookie-secret"
length: 32
target: cookie-secret
- name: "{{ service_name }}-oauth2-proxy-client-secret"
value: "{{ service_oauth2_proxy_client_secret }}"
target: client-secret
container_env:
OAUTH2_PROXY_HTTP_ADDRESS: fd:3
OAUTH2_PROXY_PROVIDER: oidc
OAUTH2_PROXY_OIDC_ISSUER_URL: "{{ service_oauth2_proxy_issuer_url }}"
OAUTH2_PROXY_CLIENT_ID: "{{ service_oauth2_proxy_client_id }}"
OAUTH2_PROXY_CODE_CHALLENGE_METHOD: S256
OAUTH2_PROXY_SKIP_PROVIDER_BUTTON: "true"
OAUTH2_PROXY_EMAIL_DOMAINS: "*"
OAUTH2_PROXY_REVERSE_PROXY: "true"
container_requires:
- "{{ service_name }}-oauth2-proxy.socket"
container_auto_update: "{{ service_auto_update }}"
- name: Socket for OAuth2 Proxy for {{ service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ service_name }}-oauth2-proxy"

View File

@@ -1,28 +1,7 @@
---
- name: Socat socket for {{ service_name }}
ansible.builtin.template:
src: socat.socket.j2
dest: /etc/systemd/system/{{ service_name }}-socat.socket
mode: "0644"
notify: Restart socat socket for {{ service_name }}
- name: Socat container for {{ service_name }}
ansible.builtin.include_role:
name: container
vars:
container_name: "{{ service_name }}-socat"
container_image: "docker.io/alpine/socat:latest"
container_command:
- "ACCEPT-FD:3,fork"
- "TCP:{{ service_name }}:{{ service_container_http_port }}"
container_user: nobody
container_networks:
- "{{ service_name }}"
container_requires:
- "{{ service_name }}-socat.socket"
- "{{ service_name }}.service"
container_auto_start: false
container_auto_update: "{{ service_auto_update }}"
- name: OAuth2 proxy for {{ service_name }}
ansible.builtin.include_tasks: oauth2_proxy.yaml
when: _service_oauth2_proxy
- name: Reverse proxy for {{ service_name }}
ansible.builtin.import_role:
@@ -32,5 +11,9 @@
vhost_id: "{{ service_name }}"
vhost_domains: "{{ service_domains }}"
vhost_proxy_target_netproto: unix
vhost_proxy_target_socket: "/run/{{ service_name }}-socat.sock"
vhost_locations: "{{ service_vhost_locations }}"
vhost_proxy_target_socket: "{{ _service_socket_path }}"
vhost_proxy_headers: "{{ _service_proxy_headers }}"
vhost_proxy_auth_socket: "{{ _service_oauth2_socket }}"
vhost_proxy_auth_uri: /oauth2/auth
vhost_proxy_auth_unauthorized_redir: "/oauth2/sign_in?rd={scheme}://{host}{uri}"
vhost_locations: "{{ _service_vhost_locations }}"

View File

@@ -0,0 +1,12 @@
---
- name: Redis container for {{ service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ service_name }}-redis"
container_image: docker.io/valkey/valkey:alpine
container_networks:
- name: "{{ service_name }}"
ip: "{{ service_container_ip | ansible.utils.ipmath(2) if _service_static_ip else '' }}"
container_hostname: redis
container_auto_update: "{{ service_auto_update }}"

View File

@@ -1,6 +1,6 @@
---
- name: Create secrets
ansible.builtin.include_role:
ansible.builtin.import_role:
name: container
tasks_from: secrets.yaml
rolespec_validate: false # FIXME make proper validation possible
@@ -16,7 +16,7 @@
- name: Store secrets in a variable for later
ansible.builtin.set_fact:
_service_podman_secrets: >
service_podman_secrets: >
{{ _service_podman_secret_info.secrets
| map(attribute='Spec.Name')
| zip(_service_podman_secret_info.secrets | map(attribute='SecretData'))

View File

@@ -1,16 +1,22 @@
---
- name: Fail if service_name is empty
ansible.builtin.fail:
msg: service_name must not be empty
when: service_name | length == 0
- name: Fail if service_container_user is not string
ansible.builtin.fail:
msg: "service_container_user must be a string, not int."
when: service_container_user is not string
- name: Fail if service_database_type is postgres but service_postgres_tag is not set
ansible.builtin.fail:
msg: "service_postgres_tag needs to be set when database type is postgres"
when: "service_database_type == 'postgres' and service_postgres_tag is not defined"
- name: Fail if template mount source doesn't end in .j2
ansible.builtin.fail:
msg: "Template mount source file name needs to end in .j2. The file {{ item.source }} of {{ service_name }} doesn't."
when: "item.source | split('.') | last != 'j2'"
loop: "{{ _service_template_mounts }}"
- name: Fail if copy mount source doesn't end with /
ansible.builtin.fail:
msg: "Copy mount source name must end with /. The file {{ item.source }} of {{ service_name }} doesn't"
when: "not item.source.endswith('/')"
loop: "{{ _service_copy_mounts }}"

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# {{ ansible_managed }}
_term() {
echo "Received SIGTERM, stopping all containers"
kill "$child"
}
podman system service -t 0 &
podman run \
--rm \
-v /run/secrets:/run/secrets:ro \
{% for key, value in pinp_inner_env.items() %}
-e {{ key }}={{ value }} \
{% endfor %}
-v /tmp/storage-run-1000/podman/podman.sock:/var/run/docker.sock \
{% for mount in pinp_inner_mounts %}
--mount type={{ mount.type }},source={{ mount.source }},destination={{ mount.destination }}{% if mount.readonly | default(false) %},readonly{% endif %} \
{% endfor %}
--name {{ pinp_inner_name }} \
--network host \
{{ pinp_inner_image }} &
child=$!
trap _term SIGTERM
wait "$!"

View File

@@ -1,6 +0,0 @@
# {{ ansible_managed }}
[Unit]
Description={{ service_name }} socat socket
[Socket]
ListenStream=/run/{{ service_name }}-socat.sock

View File

@@ -1,2 +0,0 @@
---
_service_database_name: "{{ service_name }}-{{ service_database_type }}"

View File

@@ -0,0 +1,9 @@
---
_service_database_image: docker.io/library/mariadb:lts
_service_database_mount_destination: /var/lib/mysql
_service_database_authenticated: true
_service_database_env:
MARIADB_RANDOM_ROOT_PASSWORD: "1"
MARIADB_USER: "{{ service_name | replace('-', '_') }}"
MARIADB_DATABASE: "{{ service_name | replace('-', '_') }}"
MARIADB_PASSWORD_FILE: "/run/secrets/{{ service_database_type }}"

View File

@@ -0,0 +1,6 @@
---
_service_database_image: docker.io/library/mongo:latest
_service_database_mount_destination: /data/db
_service_database_authenticated: false
_service_database_env:
MONGO_INITDB_DATABASE: "{{ service_name | replace('-', '_') }}"

View File

@@ -0,0 +1,14 @@
---
_service_database_image: "{{ service_postgres_image }}:{{ service_postgres_tag }}"
_service_database_mount_destination: >-
{{
'/var/lib/postgresql/data'
if (service_postgres_tag | split('-') | length > 1)
and (service_postgres_tag | split('-') | first) is version('18', '<')
else '/var/lib/postgresql'
}}
_service_database_authenticated: true
_service_database_env:
POSTGRES_USER: "{{ service_name | replace('-', '_') }}"
POSTGRES_PASSWORD_FILE: "/run/secrets/{{ service_database_type }}"
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"

View File

@@ -1,16 +0,0 @@
---
_service_template_mounts: "{{ service_container_mounts | selectattr('type', '==', 'template') | list }}"
_service_host_directory: "/srv/{{ service_name }}"
_service_container_secrets: >
{{
service_container_secrets
| zip(service_container_secrets
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('community.general.dict_kv', 'name')
)
| map('combine')
}}
_service_container_wants: "{{ service_wants + ([service_name + '-socat.socket'] if service_domains | length > 0 else []) }}"

View File

@@ -0,0 +1,264 @@
---
_service_additional_containers_with_default_image: >-
{{
([{ 'image': service_container_image }] * service_additional_containers | length)
| zip(service_additional_containers)
| map('combine')
}}
_service_additional_container_same_image_defaults:
user: "{{ service_container_user }}"
command: "{{ service_container_command }}"
entrypoint: "{{ service_container_entrypoint }}"
devices: "{{ service_container_devices }}"
env: "{{ service_container_env }}"
add_capabilities: "{{ service_container_add_capabilities }}"
pinp: false
_service_additional_container_different_image_defaults:
user: ""
command: []
entrypoint: ""
mounts: []
devices: []
publish_ports: []
env: {}
add_capabilities: []
secrets: []
pinp: false
_service_additional_same_image_containers: >-
{{
_service_additional_containers_with_default_image
| selectattr('image', '==', service_container_image)
}}
_service_additional_different_image_containers: >-
{{
_service_additional_containers_with_default_image
| selectattr('image', '!=', service_container_image)
}}
_service_additional_containers: >-
{{
(
(
(
[_service_additional_container_same_image_defaults] *
(_service_additional_same_image_containers | length)
)
| zip(_service_additional_same_image_containers)
| map('combine')
) +
(
(
[_service_additional_container_different_image_defaults] *
(_service_additional_different_image_containers | length)
)
| zip(_service_additional_different_image_containers)
| map('combine')
)
)
| zip(
(
_service_additional_same_image_containers +
_service_additional_different_image_containers
)
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('community.general.dict_kv', 'name')
)
| map('combine')
}}
_service_additional_container_wants: >-
{{
service_wants
+ _service_additional_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat.socket')
}}
_service_additional_container_networks: >-
{{
[{
'name': service_name,
'ip':
service_container_ip | ansible.utils.ipmath(20 + _service_additional_container_index)
if _service_static_ip else ''
}]
+ (
service_container_additional_networks
+ (
_service_additional_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat')
)
) | map('community.general.dict_kv', 'name')
}}
_service_additional_container_secrets: >-
{{
(
_service_additional_container.secrets
| map(attribute='name')
| map('community.general.dict_kv', 'target')
| zip(
_service_additional_container.secrets,
_service_additional_container.secrets
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('community.general.dict_kv', 'name')
)
| map('combine')
) if _service_additional_container.secrets is defined
else _service_container_secrets
}}
_service_additional_container_publish_ports_with_defaults: >-
{{
([{ 'type': 'port', 'host_address': '0.0.0.0' }] * _service_additional_container.publish_ports | length)
| zip(_service_additional_container.publish_ports)
| map('combine')
}}
_service_additional_container_publish_socket_ports: >-
{{
_service_additional_container_publish_ports_with_defaults | selectattr('type', '==', 'socket')
if _service_additional_container.publish_ports is defined
else
[]
}}
_service_additional_container_publish_port_ports: >-
{{
_service_additional_container_publish_ports_with_defaults | selectattr('type', '==', 'port')
if _service_additional_container.publish_ports is defined
else
[]
}}
_service_additional_container_publish_ports: >-
{{
_service_additional_container_publish_port_ports | map(attribute='host_address') |
zip(
_service_additional_container_publish_port_ports | map(attribute='host_port'),
_service_additional_container_publish_port_ports | map(attribute='container_port')
) | map('join', ':')
}}
_service_additional_volume_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'volume') }}"
_service_additional_template_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'template') }}"
_service_additional_copy_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'copy') }}"
_service_additional_host_directory: "/srv/{{ service_name }}"
_service_additional_container_volume_mounts: >-
{{
_service_additional_volume_mounts |
zip(
_service_additional_volume_mounts |
map(attribute='source') |
map('regex_replace', '^', service_name ~ '-') |
map('community.general.dict_kv', 'source')
) |
map('combine')
}}
_service_additional_container_bind_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'bind') }}"
_service_additional_container_template_mounts: >-
{{
([{'readonly': true}] * _service_additional_template_mounts | length) |
zip(
_service_additional_template_mounts |
community.general.remove_keys(['mode']),
_service_additional_template_mounts |
map(attribute='source') |
map('regex_replace', '\.j2$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
([{'type': 'bind'}] * _service_additional_template_mounts | length)
) |
map('combine')
}}
_service_additional_container_copy_mounts: >-
{{
([{'readonly': true}] * _service_additional_copy_mounts | length) |
zip(
_service_additional_copy_mounts |
community.general.remove_keys(['mode']),
_service_additional_copy_mounts |
map(attribute='source') |
map('regex_replace', '\/$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
([{'type': 'bind'}] * _service_additional_copy_mounts | length)
) |
map('combine')
}}
_service_additional_container_mounts: >-
{{
_service_additional_container_volume_mounts +
_service_additional_container_bind_mounts +
_service_additional_container_template_mounts +
_service_additional_container_copy_mounts
if _service_additional_container.mounts is defined
else
_service_container_mounts
}}
_service_additional_plain_container:
image: "{{ _service_additional_container.image }}"
entrypoint: "{{ _service_additional_container.entrypoint }}"
command: "{{ _service_additional_container.command }}"
user: "{{ _service_additional_container.user }}"
env: "{{ _service_additional_container.env }}"
mounts: "{{ _service_additional_container_mounts }}"
_service_additional_pinp_container_mounts:
- type: bind
source: "{{ _service_host_directory }}/mounts/{{ _service_additional_container.name }}-entrypoint.sh"
destination: /entrypoint.sh
readonly: true
- type: volume
source: "{{ _service_additional_container.name }}-containers"
destination: /home/podman/.local/share/containers
_service_additional_pinp_container:
image: quay.io/podman/stable:latest
entrypoint: /entrypoint.sh
command: []
user: podman
env: {}
mounts: >-
{{
_service_additional_pinp_container_mounts
+ (
_service_additional_container_mounts
| zip(
_service_additional_container_mounts
| map(attribute='source')
| map('replace', '/', '_')
| map('regex_replace', '^', '/mounts/')
| map('community.general.dict_kv', 'destination')
)
| map('combine')
)
}}
_service_additional_container_final: >-
{{ _service_additional_pinp_container if _service_additional_container.pinp else _service_additional_plain_container }}
_service_additional_container_pinp_inner_mounts: >-
{{
_service_additional_container_mounts
| zip(
_service_additional_container_mounts
| map(attribute='source')
| map('replace', '/', '_')
| map('regex_replace', '^', '/mounts/')
| map('community.general.dict_kv', 'source')
)
| map('combine')
}}

View File

@@ -0,0 +1,16 @@
---
_service_setup_database: "{{ service_database_type != 'none' }}"
_service_database_name: "{{ service_name }}-{{ service_database_type }}"
_service_database_networks: >-
{{
[{
'name': service_name,
'ip': service_container_ip | ansible.utils.ipmath(1) if _service_static_ip else ''
}]
+ service_database_additional_networks | map('community.general.dict_kv', 'name')
}}
_service_database_secret:
name: "{{ _service_database_name }}"
target: "{{ service_database_type }}"
_service_database_secrets: "{{ [_service_database_secret] if _service_database_authenticated else [] }}"

View File

@@ -0,0 +1,45 @@
---
_service_container_networks: >-
{{
[{
'name': service_name,
'ip': service_container_ip
}]
+ (
[{
'name': service_name ~ '-caddy-socket-proxy',
'ip': service_container_ip | ansible.utils.ipmath(256) if _service_static_ip else ''
}] if service_container_http_port > 0 else []
)
+ (
service_container_additional_networks
+ (
_service_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat')
)
) | map('community.general.dict_kv', 'name')
}}
_service_static_ip: "{{ service_container_ip | length > 0 }}"
_service_container_requires: >-
{{
service_requires
+ ([_service_database_name ~ '.service'] if _service_setup_database else [])
+ ([service_name ~ '-redis.service'] if service_redis else [])
+ ([service_name ~ '.socket'] if _service_native_socket else [])
}}
_service_container_wants: >-
{{
service_wants
+ ([service_name + '-caddy-socket-proxy.socket'] if service_container_http_port > 0 else [])
+ ([service_name + '-oauth2-proxy.socket'] if _service_oauth2_proxy else [])
+ _service_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat.socket')
+ _service_additional_containers
| map(attribute='name')
| map('regex_replace', '$', '.service')
}}

View File

@@ -0,0 +1,115 @@
---
_service_container_socket_mount:
type: bind
source: /run/{{ service_name }}.sock
destination: /run/{{ service_name }}.sock
_service_volume_mounts: "{{ service_container_mounts | selectattr('type', '==', 'volume') }}"
_service_template_mounts: "{{ service_container_mounts | selectattr('type', '==', 'template') }}"
_service_copy_mounts: "{{ service_container_mounts | selectattr('type', '==', 'copy') }}"
_service_host_directory: "/srv/{{ service_name }}"
_service_container_volume_mounts: >-
{{
_service_volume_mounts |
zip(
_service_volume_mounts |
map(attribute='source') |
map('regex_replace', '^', service_name ~ '-') |
map('community.general.dict_kv', 'source')
) |
map('combine')
}}
_service_container_bind_mounts: >-
{{
service_container_mounts | selectattr('type', '==', 'bind') +
([ _service_container_socket_mount ] if _service_native_socket else [])
}}
_service_container_template_mounts: >-
{{
([{'readonly': true}] * _service_template_mounts | length) |
zip(
_service_template_mounts |
community.general.remove_keys(['mode']),
_service_template_mounts |
map(attribute='source') |
map('regex_replace', '\.j2$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
([{'type': 'bind'}] * _service_template_mounts | length)
) |
map('combine')
}}
_service_container_copy_mounts: >-
{{
([{'readonly': true}] * _service_copy_mounts | length) |
zip(
_service_copy_mounts |
community.general.remove_keys(['mode']),
_service_copy_mounts |
map(attribute='source') |
map('regex_replace', '\/$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
([{'type': 'bind'}] * _service_copy_mounts | length)
) |
map('combine')
}}
_service_container_mounts: >-
{{
_service_container_volume_mounts +
_service_container_bind_mounts +
_service_container_template_mounts +
_service_container_copy_mounts
}}
_service_all_template_mounts: >-
{{
(
_service_template_mounts +
(
_service_additional_containers |
map(attribute='mounts', default=[]) |
flatten
)
) |
selectattr('type', '==', 'template') |
unique
}}
_service_all_template_mount_directories: >-
{{
_service_all_template_mounts |
map(attribute='source') |
map('dirname') |
unique |
select('!=', '')
}}
_service_all_template_mount_host_files: >-
{{
_service_all_template_mounts |
map(attribute='source') |
map('regex_replace', '\.j2$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/')
}}
_service_all_copy_mounts: >-
{{
(
_service_copy_mounts +
(
_service_additional_containers |
map(attribute='mounts', default=[]) |
flatten
)
) |
selectattr('type', '==', 'copy') |
unique
}}
_service_all_copy_mount_host_files: >-
{{
_service_all_copy_mounts |
map(attribute='source') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/')
}}

View File

@@ -0,0 +1,55 @@
---
_service_plain_container:
image: "{{ service_container_image }}"
entrypoint: "{{ service_container_entrypoint }}"
command: "{{ service_container_command }}"
user: "{{ service_container_user }}"
env: "{{ service_container_env }}"
mounts: "{{ _service_container_mounts }}"
_service_pinp_container_mounts:
- type: bind
source: "{{ _service_host_directory }}/mounts/entrypoint.sh"
destination: /entrypoint.sh
readonly: true
- type: volume
source: "containers"
destination: /home/podman/.local/share/containers
_service_pinp_container:
image: quay.io/podman/stable:latest
entrypoint: /entrypoint.sh
command: []
user: podman
env: {}
mounts: >-
{{
_service_pinp_container_mounts
+ (
_service_container_mounts
| zip(
_service_container_mounts
| map(attribute='source')
| map('replace', '/', '_')
| map('regex_replace', '^', '/mounts/')
| map('community.general.dict_kv', 'destination')
)
| map('combine')
)
}}
_service_container: >-
{{ _service_pinp_container if service_container_pinp else _service_plain_container }}
_service_container_pinp_inner_mounts: >-
{{
_service_container_mounts
| zip(
_service_container_mounts
| map(attribute='source')
| map('replace', '/', '_')
| map('regex_replace', '^', '/mounts/')
| map('community.general.dict_kv', 'source')
)
| map('combine')
}}

View File

@@ -0,0 +1,23 @@
---
_service_native_socket: "{{ service_domains | length > 0 and service_container_http_port == 0 }}"
_service_socket_path: >-
/run/{{ service_name ~ ('-caddy-socket-proxy' if not _service_native_socket else '' ) }}.sock
_service_replacement_host_header:
Host: "{{ service_name }}:{{ service_container_http_port }}"
_service_proxy_headers: "{{ _service_replacement_host_header if not service_proxy_pass_host_header else {} }}"
_service_oauth2_proxy: "{{ service_proxy_auth_type == 'oauth2-proxy' }}"
_service_oauth2_socket: >-
{{ '/run/' ~ service_name ~ '-oauth2-proxy.sock' if _service_oauth2_proxy else '' }}
_service_oauth2_proxy_location:
path: /oauth2/*
proxy_target_socket: "{{ _service_oauth2_socket }}"
proxy_auth_socket: ""
_service_vhost_locations: >-
{{
service_vhost_locations +
([_service_oauth2_proxy_location] if _service_oauth2_proxy else [])
}}

View File

@@ -0,0 +1,21 @@
---
_service_container_publish_ports_with_defaults: >-
{{
([{ 'type': 'port', 'host_address': '0.0.0.0' }] * service_container_publish_ports | length)
| zip(service_container_publish_ports)
| map('combine')
}}
_service_container_publish_socket_ports: >-
{{ _service_container_publish_ports_with_defaults | selectattr('type', '==', 'socket') }}
_service_container_publish_port_ports: >-
{{ _service_container_publish_ports_with_defaults | selectattr('type', '==', 'port') }}
_service_container_publish_ports: >-
{{
_service_container_publish_port_ports | map(attribute='host_address') |
zip(
_service_container_publish_port_ports | map(attribute='host_port'),
_service_container_publish_port_ports | map(attribute='container_port')
) | map('join', ':')
}}

View File

@@ -0,0 +1,35 @@
---
_service_container_secrets: >-
{{
service_container_secrets
| map(attribute='name')
| map('community.general.dict_kv', 'target')
| zip(
service_container_secrets,
service_container_secrets
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('community.general.dict_kv', 'name')
)
| map('combine')
+ (
[{
'name': _service_database_name,
'type': service_database_secret_type,
'target': service_database_secret_target
}] if _service_setup_database else []
)
+ (
[{
'name': _service_database_name ~ '-url',
'value':
'postgres://'
~ service_name | replace('-', '_')
~ ':' ~ service_podman_secrets[service_name ~ '-postgres']
~ '@postgres/' ~ service_name | replace('-', '_')
~ '?sslmode=disable',
'type': service_database_secret_type,
'target': service_database_secret_target ~ '-url'
}] if service_podman_secrets[service_name ~ '-postgres'] is defined else []
)
}}

1
roles/socat/README.md Normal file
View File

@@ -0,0 +1 @@
Sets up a socat container along with a systemd socket unit to forward traffic to it

View File

@@ -0,0 +1,4 @@
---
socat_target_container: "{{ socat_service_name }}"
socat_container_ip: ""
socat_auto_update: true

View File

@@ -0,0 +1,28 @@
---
argument_specs:
main:
description: Sets up a socat container along with a systemd socket unit to forward traffic to it
options:
socat_service_name:
description: Name of the socat service, used for systemd unit and container naming
type: str
required: true
socat_target_container:
description: Name of the container to forward traffic to
type: str
required: false
default: "{{ socat_service_name }}"
socat_target_http_port:
description: Port on the target container to forward traffic to
type: int
required: true
socat_container_ip:
description: IP address to assign to the socat container.
type: str
required: false
default: ""
socat_auto_update:
description: Whether to automatically update the socat container
type: bool
required: false
default: true

View File

@@ -0,0 +1,27 @@
---
- name: Socat socket for {{ socat_service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ socat_service_name }}-socat"
systemd_socket_requires:
- "{{ socat_target_container }}.service"
- name: Socat container for {{ socat_service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ socat_service_name }}-socat"
container_image: "docker.io/alpine/socat:latest"
container_command:
- "ACCEPT-FD:3,fork"
- "TCP:{{ socat_target_container }}:{{ socat_target_http_port }}"
container_user: nobody
container_networks:
- name: "{{ socat_service_name }}-socat"
ip: "{{ socat_container_ip }}"
container_requires:
- "{{ socat_service_name }}-socat.socket"
- "{{ socat_target_container }}.service"
container_auto_start: false
container_auto_update: "{{ socat_auto_update }}"

View File

@@ -11,7 +11,7 @@
quiet: true
- name: Synapse container
ansible.builtin.include_role:
ansible.builtin.import_role:
name: service
vars:
service_name: synapse
@@ -43,23 +43,13 @@
service_container_http_port: 8008
service_domains: "{{ [synapse_external_domain] }}"
service_vhost_locations:
- path: /_matrix/client/*/login
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/logout
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/refresh
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/login/*
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/logout/*
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/refresh/*
- path: ^/_matrix/client/.*/(login|logout|refresh).*$
proxy_target_socket: /run/matrix-authentication-service-socat.sock
service_wants:
- matrix-authentication-service.service
- name: Matrix authentication service for synapse
ansible.builtin.include_role:
ansible.builtin.import_role:
name: matrix_authentication_service
vars:
matrix_authentication_service_additional_networks:
@@ -68,8 +58,8 @@
matrix_authentication_service_domain: "{{ synapse_mas_domain }}"
matrix_authentication_service_homeserver_name: "{{ synapse_server_name }}"
matrix_authentication_service_homeserver_address: http://synapse:8009
matrix_authentication_service_client_secret: "{{ _service_podman_secrets['synapse-mas-client-secret'] }}"
matrix_authentication_service_homeserver_secret: "{{ _service_podman_secrets['synapse-mas-homeserver-secret'] }}"
matrix_authentication_service_client_secret: "{{ service_podman_secrets['synapse-mas-client-secret'] }}"
matrix_authentication_service_homeserver_secret: "{{ service_podman_secrets['synapse-mas-homeserver-secret'] }}"
matrix_authentication_service_email_smtp_server: "{{ synapse_email_smtp_server }}"
matrix_authentication_service_email_smtp_user: "{{ synapse_email_smtp_user }}"
@@ -82,3 +72,21 @@
matrix_authentication_service_upstream_oauth2_scope: "{{ synapse_oidc_provider_scopes | join(' ') }}"
matrix_authentication_service_upstream_oauth2_claims_imports: "{{ synapse_oidc_provider_mas_claims_imports }}"
matrix_authentication_service_upstream_oauth2_human_name: "{{ synapse_oidc_provider_name }}"
- name: Reverse proxy synapse federation
ansible.builtin.import_role:
name: uumas.general.vhost
vars:
vhost_type: reverse_proxy
vhost_id: synapse-federation
vhost_domains:
- "{{ synapse_external_domain }}:8448"
vhost_proxy_target_netproto: unix
vhost_proxy_target_socket: "/run/synapse-caddy-socket-proxy.sock"
- name: Open port for synapse federation
ansible.posix.firewalld:
service: matrix
state: enabled
permanent: true
immediate: true

View File

@@ -2,7 +2,7 @@
# vim:ft=yaml
# {{ ansible_managed }}
signing_key_path: /run/secrets/synapse-signing-key
signing_key_path: /run/secrets/signing-key
media_store_path: /data/media
log_config: /data/log.yaml
@@ -27,9 +27,9 @@ listeners:
database:
name: psycopg2
args:
host: synapse-postgres
host: postgres
user: synapse
password: "{{ _service_database_password }}"
password: "{{ service_podman_secrets['synapse-postgres'] }}"
dbname: synapse
caches:
@@ -112,6 +112,6 @@ experimental_features:
issuer: http://matrix-authentication-service:8080/
client_id: 0000000000000000000SYNAPSE
client_auth_method: client_secret_basic
client_secret: "{{ _service_podman_secrets['synapse-mas-client-secret'] }}"
admin_token: "{{ _service_podman_secrets['synapse-mas-homeserver-secret'] }}"
client_secret: "{{ service_podman_secrets['synapse-mas-client-secret'] }}"
admin_token: "{{ service_podman_secrets['synapse-mas-homeserver-secret'] }}"
account_management_url: "https://{{ synapse_mas_domain }}/account"

View File

@@ -1,3 +1,6 @@
---
volume_uid: ""
volume_gid: ""
volume_type: ""
volume_device: ""
volume_mount_options: []

View File

@@ -0,0 +1,6 @@
---
- name: "Restart volume service {{ volume_name }}"
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [volume_name ~ '-volume.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -17,3 +17,19 @@ argument_specs:
type: str
required: false
default: ""
volume_device:
description: The path of a device which is mounted for the volume.
type: str
required: false
default: ""
volume_type:
description: The filesystem type of device as used by the mount commands -t option
type: str
required: false
default: ""
volume_mount_options:
description: The mount options to use for a filesystem as used by the mount command -o option
type: list
elements: str
required: false
default: []

View File

@@ -5,7 +5,10 @@
- name: Create container volume service {{ volume_name }}
containers.podman.podman_volume:
name: "{{ volume_name }}"
options: "{{ volume_options }}"
options: "{{ _volume_options }}"
state: quadlet
quadlet_file_mode: "0644"
notify: Reload systemd daemon
quadlet_options: "{{ _volume_quadlet_options }}"
notify:
- Reload systemd daemon
- Restart volume service {{ volume_name }}

View File

@@ -1,6 +1,24 @@
---
volume_mount_options_incl_empty:
_volume_mount_options_incl_empty:
- "{{ 'uid=' ~ volume_uid if volume_uid | length > 0 else '' }}"
- "{{ 'gid=' ~ volume_gid if volume_gid | length > 0 else '' }}"
volume_mount_options: "{{ volume_mount_options_incl_empty | select('!=', '') | list }}"
volume_options: "{{ ['o=' ~ volume_mount_options | join(',')] if volume_mount_options | length > 0 else [] }}"
_volume_mount_options: >-
{{
_volume_mount_options_incl_empty
| select('!=', '')
+ volume_mount_options
}}
_volume_options: >-
{{
(['o=' ~ _volume_mount_options | join(',')] if _volume_mount_options | length > 0 else [])
+ (['type=' ~ volume_type] if volume_type | length > 0 else [])
+ (['device=' ~ volume_device] if volume_device | length > 0 else [])
}}
_volume_device_quadlet_options: |
[Service]
ExecStartPost=/usr/bin/podman volume mount {{ volume_name }}
ExecStop=/usr/bin/podman volume unmount {{ volume_name }}
ExecStop=/usr/bin/podman volume rm {{ volume_name }}
_volume_quadlet_options: >-
{{ [_volume_device_quadlet_options] if volume_device | length > 0 else [] }}

View File

@@ -0,0 +1,9 @@
---
argument_specs:
main:
description: Installs windmill with worker in podman in podman
options:
windmill_domain:
description: The domain to use for windmill
type: str
required: true

View File

@@ -0,0 +1,62 @@
---
- name: Windmill service
ansible.builtin.import_role:
name: service
vars:
service_name: windmill
service_container_image: ghcr.io/windmill-labs/windmill:main
service_container_mounts:
- type: volume
source: worker-logs
destination: /tmp/windmill/logs
service_container_http_port: 8000
service_domains:
- "{{ windmill_domain }}"
service_database_type: postgres
service_container_env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: server
service_additional_containers:
- name: worker
pinp: true
mounts:
- type: volume
source: worker-logs
destination: /tmp/windmill/logs
- type: volume
source: worker-dependency-cache
destination: /tmp/windmill/cache
env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: worker
WORKER_GROUP: default
ENABLE_UNSHARE_PID: "true"
UNSHARE_ISOLATION_FLAGS: "--user --map-root-user --pid --fork"
- name: worker-native
env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: worker
WORKER_TYPE: native
NATIVE_MODE: "true"
NUM_WORKERS: "8"
SLEEP_QUEUE: "200"
- name: lsp
image: ghcr.io/windmill-labs/windmill-extra:latest
secrets: []
mounts:
- type: volume
source: lsp-cache
destination: /puls/.cache
publish_ports:
- name: lsp
type: socket
container_port: 3001
env:
ENABLE_LSP: "true"
ENABLE_MULTIPLAYER: "false"
ENABLE_DEBUGGER: "false"
WINDMILL_BASE_URL: http://windmill:8000
service_vhost_locations:
- path: /ws/*
proxy_target_socket: /run/windmill-lsp-socat.sock