Compare commits

...

66 Commits

Author SHA1 Message Date
uumas
db651723b2 Add pinp support and make windmill use it 2026-03-26 03:04:44 +02:00
uumas
1d180106d6 service: Use saner defaults for additional containers 2026-03-25 19:29:30 +02:00
uumas
9eaa306aa4 lint 2026-03-25 19:28:40 +02:00
uumas
f8e67b12d7 windmill: fix entrypoint script 2026-03-24 19:56:09 +02:00
uumas
5814267d66 Add windmill 2026-03-18 00:31:50 +02:00
uumas
defd2517ea service: Add postgres url to secrets 2026-03-18 00:30:25 +02:00
uumas
615c4013c1 Use caddy instead of socat for http proxying 2026-03-15 22:30:36 +02:00
uumas
77768e5483 small fixes 2026-03-15 22:30:16 +02:00
uumas
162972810f example: fix postgres examples 2026-03-15 21:56:16 +02:00
uumas
8595e261c9 nextcloud: Make HArP optional and opt-in 2026-03-15 00:43:24 +02:00
uumas
70c5ed7ea0 service: Make oauth2-proxy depend on its socket 2026-03-14 23:50:01 +02:00
uumas
3554de82c0 service: Make oauth2-proxy aware it's running behing reverse proxy 2026-03-14 23:33:34 +02:00
uumas
f64ea2cbe3 container: Allow custom ip addresses for more than one network 2026-03-14 23:33:04 +02:00
uumas
ca29ffb271 network: Delete network on stop 2026-03-14 22:11:42 +02:00
uumas
dac44638e6 service: Don't use different networks for additional containers 2026-03-12 03:08:42 +02:00
uumas
489b8eaade service: Use native socket for oauth2 proxy 2026-03-12 03:08:36 +02:00
uumas
956f8ed6ce Use uumas.general.systemd_socket role 2026-03-12 01:54:43 +02:00
uumas
63e6f938bb Add vscode configuration 2026-03-12 00:45:48 +02:00
uumas
61c0724801 Add nextcloud role 2026-03-12 00:45:25 +02:00
uumas
ea2a2c3652 Add forgejo role 2026-03-12 00:42:54 +02:00
uumas
31cf49b004 service: Improve additional container support 2026-03-12 00:42:00 +02:00
uumas
9e3e1496f0 service: Split container network namespaces 2026-03-12 00:40:54 +02:00
uumas
190527e877 naming and documentation fixes 2026-03-12 00:38:42 +02:00
uumas
fb39f1bfc8 service: Don't require postgres tag specified 2026-03-12 00:36:11 +02:00
uumas
69ae1687b7 service: Add support for mongodb 2026-03-12 00:35:42 +02:00
uumas
efc7bf5434 service: Imrove native sockets 2026-03-12 00:34:25 +02:00
uumas
294b931d19 service: Support publishing arbitary ports through sockets 2026-03-12 00:32:10 +02:00
uumas
470b60f988 service: Support postgres >= 18 and postgres upgrades 2026-03-12 00:26:35 +02:00
uumas
c673aae8dc synapse: Use simple database hostname 2026-03-12 00:23:22 +02:00
uumas
4a68ab25e1 .yml -> .yaml 2026-03-12 00:22:52 +02:00
uumas
bf4ced4a9b service: Support cap_add 2026-03-12 00:20:19 +02:00
uumas
5a3bb96fc2 container: Support cap_add 2026-03-12 00:17:18 +02:00
uumas
447d4e59ad container: Stop container if process is oomkilled 2026-03-12 00:16:56 +02:00
uumas
f6af1d3472 service: Make supporting containers accessible using simple hostnames 2026-03-12 00:16:11 +02:00
uumas
ff1badbf03 container: Support setting container hostname 2026-03-12 00:12:03 +02:00
uumas
f721641fc6 Use systemd handlers from uumas.general.systemd 2026-03-12 00:10:31 +02:00
uumas
fce8804653 Make socat its own role 2026-03-12 00:06:37 +02:00
uumas
accd5ece14 small improvements 2026-02-13 02:00:55 +02:00
uumas
58ff2f6217 service: Add support for native sockets for http 2026-02-13 02:00:44 +02:00
uumas
4079b69338 Add gitignore for release archives 2026-02-12 20:05:09 +02:00
uumas
c7e26555b7 v0.1.1 2026-02-12 20:04:38 +02:00
uumas
07925caa95 Rename runtime.yaml to runtime.yml 2026-02-12 20:02:12 +02:00
uumas
de707b4e71 service: Add support for mariadb 2026-02-12 20:01:52 +02:00
uumas
0ee8e9b254 Add support for container devices 2026-01-24 17:12:57 +02:00
uumas
b030d671b5 service: Add support for mounting entire copied directory 2026-01-09 17:24:01 +02:00
uumas
b2540e2bd3 service: Validation, set default for mounts in additional containers 2025-11-26 22:26:12 +02:00
uumas
47088fd1a0 Allow setting container entrypoint 2025-11-26 22:25:54 +02:00
uumas
abf3859af7 container: rename task 2025-10-06 16:18:16 +03:00
uumas
bdec55ffc7 Use auth file instead of creds in quadlet files 2025-10-06 16:17:27 +03:00
uumas
2712cf2865 service: Support setting templated file mode 2025-09-16 12:37:20 +03:00
uumas
c5fb7f599c podman: Login to registries 2025-09-16 12:36:58 +03:00
uumas
597faa3fd5 service: Set static ips for other containers too 2025-09-15 12:34:57 +03:00
uumas
16babfd5ed service: Have service depend on oauth2-proxy socat socket if set 2025-09-15 12:34:21 +03:00
uumas
d3542993dd container: Set network ip range when using a static container ip 2025-09-15 12:32:22 +03:00
uumas
a93c26864d network: Support setting ip range 2025-09-15 12:31:40 +03:00
uumas
b333bbebbd Add prometheus role 2025-09-14 03:21:33 +03:00
uumas
fea49be8d1 Use service-specific oauth2-proxy instances 2025-09-14 03:10:20 +03:00
uumas
aaca377811 service: Support static ip for service container 2025-09-14 03:09:28 +03:00
uumas
0b73582f36 container: Support static ip for container 2025-09-14 03:08:24 +03:00
uumas
ad50e05ee9 network: Support static subnet 2025-09-14 03:07:28 +03:00
uumas
12f32f5824 network: Support macvlan driver 2025-09-14 03:07:03 +03:00
uumas
586f98bc9f synapse: Use federation port 8448 2025-09-14 03:05:06 +03:00
uumas
a29908b507 podman: Ensure auto update timer is enabled 2025-09-14 03:04:22 +03:00
uumas
c96997a4ec lint 2025-09-13 17:36:05 +03:00
uumas
014edb08ac service: fix template mounts for additional containers 2025-08-28 11:02:35 +03:00
uumas
d260e28625 synapse: Use regex for mas-proxied paths 2025-07-19 20:03:12 +03:00
92 changed files with 2273 additions and 308 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
uumas-podman-*.tar.gz

5
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,5 @@
{
"files.associations": {
"*.yaml": "ansible"
}
}

View File

@@ -3,7 +3,7 @@ namespace: uumas
name: podman
description: Roles for installing services in podman containers
readme: README.md
version: 0.1.0
version: 0.1.1
repository: "https://git.uumas.fi/uumas/ansible-podman"
license_file: LICENSE
authors:

View File

@@ -0,0 +1,4 @@
---
caddy_socket_proxy_target_container: "{{ caddy_socket_proxy_service_name }}"
caddy_socket_proxy_container_ip: ""
caddy_socket_proxy_auto_update: true

View File

@@ -0,0 +1,30 @@
---
argument_specs:
main:
description: >-
Sets up a caddy container and a systemd socket unit, forwarding traffic from it to
target container
options:
caddy_socket_proxy_service_name:
description: Name of the caddy service, used for systemd unit and container naming
type: str
required: true
caddy_socket_proxy_target_container:
description: Name of the container to forward traffic to
type: str
required: false
default: "{{ caddy_socket_proxy_service_name }}"
caddy_socket_proxy_target_http_port:
description: Port on the target container to forward traffic to
type: int
required: true
caddy_socket_proxy_container_ip:
description: IP address to assign to the caddy container.
type: str
required: false
default: ""
caddy_socket_proxy_auto_update:
description: Whether to automatically update the caddy container
type: bool
required: false
default: true

View File

@@ -0,0 +1,45 @@
---
- name: Create caddy socket proxy mount directories for {{ caddy_socket_proxy_service_name }}
ansible.builtin.file:
path: "{{ item.key }}"
state: directory
mode: "{{ item.value }}"
with_dict:
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/": "0755"
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts": "0700"
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy": "0755"
- name: Configure caddy socket proxy for {{ caddy_socket_proxy_service_name }}
ansible.builtin.template:
src: Caddyfile.j2
dest: "/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy/Caddyfile"
mode: "0644"
notify: Restart container service {{ caddy_socket_proxy_service_name }}-caddy-socket-proxy
- name: Caddy socket proxy socket for {{ caddy_socket_proxy_service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
systemd_socket_requires:
- "{{ caddy_socket_proxy_target_container }}.service"
- name: Caddy container for {{ caddy_socket_proxy_service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
container_image: "docker.io/library/caddy:2-alpine"
container_mounts:
- type: bind
source: "/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy"
destination: /etc/caddy
readonly: true
container_networks:
- name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
ip: "{{ caddy_socket_proxy_container_ip }}"
container_requires:
- "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy.socket"
- "{{ caddy_socket_proxy_target_container }}.service"
container_auto_start: false
container_auto_update: "{{ caddy_socket_proxy_auto_update }}"

View File

@@ -0,0 +1,12 @@
# {{ ansible_managed }}
{
servers {
trusted_proxies_unix
}
}
http:// {
bind fd/3
reverse_proxy {{ caddy_socket_proxy_service_name }}:{{ service_container_http_port }}
}
}

View File

@@ -1,15 +1,16 @@
---
container_command: []
container_entrypoint: ""
container_user: ""
container_mounts: []
container_devices: []
container_publish_ports: []
container_networks: []
container_hostname: ""
container_secrets: []
container_env: {}
container_auto_start: true
container_auto_update: true
container_requires: []
container_wants: []
container_image_creds:
username: ""
password: ""
container_add_capabilities: []

View File

@@ -1,7 +1,6 @@
---
- name: "Restart container service {{ container_name }}"
ansible.builtin.systemd_service:
name: "{{ container_name }}.service"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
- name: Restart container service {{ container_name }}
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [container_name ~ '.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -13,6 +13,11 @@ argument_specs:
required: false
default: []
elements: str
container_entrypoint:
description: Entrypoint to use for the container
type: str
required: false
default: ""
container_user:
description: The UID to run as inside the container
type: str
@@ -20,25 +25,9 @@ argument_specs:
default: ""
container_image:
description: "The image to run in the container, in FQIN format (registry/imagename:tag)"
description: "The image to run in the container, in FQIN format (registry/image_name:tag)"
type: str
required: true
container_image_creds:
description: Credentials used to authenticate with the registry
type: dict
required: false
default:
username: ""
password: ""
options:
username:
description: Username
type: str
required: true
password:
description: Password
type: str
required: true
container_mounts:
description: List of bind mounts or volumes to be mounted inside the container.
@@ -103,6 +92,21 @@ argument_specs:
required: false
default: []
container_devices:
description: List of devices to be added inside the container.
type: list
required: false
default: []
elements: dict
options:
source:
description: Device path on host
type: str
required: true
destination:
description: Device path inside the container. Defaults to same as host.
type: str
required: false
container_publish_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
type: list
@@ -114,7 +118,23 @@ argument_specs:
type: list
required: false
default: []
elements: str
elements: dict
options:
name:
description: Network name
type: str
required: true
ip:
description: Container IPv4 address in the network
type: str
required: false
default: ""
container_hostname:
description: Hostname to set inside the container. Available to other containers on the same network.
type: str
required: false
default: ""
container_secrets:
description: A list of secrets available to the container as file or environment variable
type: list
@@ -158,6 +178,13 @@ argument_specs:
required: false
default: {}
container_add_capabilities:
description: List of capabilities to add to the container
type: list
required: false
default: []
elements: str
container_requires:
description: >
List of systemd units (like other containers) this one depends on.

View File

@@ -7,15 +7,16 @@
name: image
vars:
image_name: "{{ container_image }}"
image_creds: "{{ container_image_creds }}"
when: image_created_images is not defined or container_image not in image_created_images
- name: Create networks for container {{ container_name }}
ansible.builtin.include_role:
name: network
vars:
network_name: "{{ network }}"
when: network_created_networks is not defined or network not in network_created_networks
network_name: "{{ network.name }}"
network_subnet: "{{ _container_network_subnet }}"
network_range: "{{ _container_network_range }}"
when: network_created_networks is not defined or network.name not in network_created_networks
loop: "{{ container_networks }}"
loop_control:
loop_var: network
@@ -38,18 +39,25 @@
ansible.builtin.include_tasks: secrets.yaml
when: container_secrets | length > 0
- name: Create container service {{ container_name }}
- name: Create container {{ container_name }}
containers.podman.podman_container:
image: "{{ _container_image }}"
name: "{{ container_name }}"
command: "{{ container_command or omit }}"
entrypoint: "{{ container_entrypoint or omit }}"
user: "{{ container_user or omit }}"
mount: "{{ _container_mounts | map('items') | map('map', 'join', '=') | map('join', ',') }}"
network: "{{ container_networks | map('regex_replace', '$', '.network') }}"
device: "{{ _container_devices }}"
network: "{{ _container_networks }}"
hostname: "{{ container_hostname or omit }}"
publish: "{{ container_publish_ports }}"
secrets: "{{ _container_secrets }}"
env: "{{ container_env }}"
cap_add: "{{ container_add_capabilities }}"
label: "{{ _container_labels if _container_labels | length > 0 else omit }}"
state: quadlet
quadlet_file_mode: "0600"
quadlet_options: "{{ _container_quadlet_options }}"
notify: Restart container service {{ container_name }}
notify:
- Reload systemd daemon
- Restart container service {{ container_name }}

View File

@@ -22,6 +22,25 @@ _container_mounts: >-
| zip(_container_volume_mount_sources) | map('combine')
}}
_container_devices_withdefaults: >-
{{
container_devices
| map(attribute='source')
| map('community.general.dict_kv', 'destination')
| zip(container_devices)
| map('combine')
}}
_container_devices: >-
{{
_container_devices_withdefaults
| map(attribute='source')
| zip(
_container_devices_withdefaults
| map(attribute='destination')
)
| map('join', ':')
}}
_container_secrets: >-
{{
container_secrets
@@ -41,6 +60,15 @@ _container_secrets: >-
| map('join', ',')
}}
_container_labels: >-
{{
{'io.containers.autoupdate.authfile': '/etc/containers/auth.json'}
if container_auto_update and
container_image.split('/')[0] in
podman_registry_accounts | map(attribute='registry')
else {}
}}
_container_quadlet_unit_options: |
[Unit]
Description=Container {{ container_name }}
@@ -55,6 +83,7 @@ _container_quadlet_unit_options: |
{% endfor %}
[Service]
SuccessExitStatus=0 143
OOMPolicy=stop
_container_quadlet_auto_start_options: |
[Service]
Restart=always

View File

@@ -0,0 +1,27 @@
---
_container_networks: >-
{{
container_networks
| map(attribute='name')
| map('regex_replace', '$', '.network')
| zip(container_networks | map(attribute='ip', default=''))
| map('reject', 'equalto', '')
| map('join', ':ip=')
}}
_container_network_subnet: >-
{{ network.ip | ansible.utils.ipsubnet(24) if network.ip | default('') | length > 0 else '' }}
_container_network_subnet_ranges: >-
{{
[
_container_network_subnet | ansible.utils.ipsubnet(25, 0),
_container_network_subnet | ansible.utils.ipsubnet(25, 1)
] if network.ip | default('') | length > 0 else []
}}
_container_network_range: >-
{{
_container_network_subnet_ranges |
reject('ansible.utils.supernet_of', network.ip) |
first
if network.ip | default('') | length > 0 else ''
}}

View File

@@ -1,5 +1,5 @@
---
- name: Hello world container
- name: Hello world service
ansible.builtin.import_role:
name: service
vars:
@@ -19,7 +19,6 @@
service_container_http_port: 8080
service_domains: "{{ example_domains }}"
service_database_type: postgres
service_postgres_tag: 16-alpine
service_container_publish_ports:
- "127.0.0.1:8080:8080"
- "0.0.0.0:4443:8043"
@@ -27,6 +26,9 @@
- network-online.target
service_container_env:
TZ: "Etc/UTC"
DB_HOST: postgres
DB_USER: hello-world
DB_PASSWORD__FILE: /run/secrets/postgres
service_additional_containers:
- name: worker
# image: "docker.io/library/hello-world:latest"

1
roles/forgejo/README.md Normal file
View File

@@ -0,0 +1 @@
Installs and configures forgejo inside podman

View File

@@ -0,0 +1,6 @@
---
forgejo_require_signin_view: false
forgejo_enable_internal_signin: true
forgejo_smtp_user: ""
forgejo_smtp_password: ""

View File

@@ -0,0 +1,45 @@
---
argument_specs:
main:
description: "Installs and configures forgejo inside podman"
options:
forgejo_tag:
description: Forgejo version to use. Can be major (x), minor (x.y) or patch (x.y.z). Major version recommended.
type: str
required: true
forgejo_domain:
description: Domain forgejo should listen on
type: str
required: true
forgejo_secret_key:
description: A long secret key for forgejo to encrypt secrets with. Must never change.
type: str
required: true
forgejo_smtp_server:
description: Smtp server for forgejo
type: str
required: true
forgejo_smtp_from:
description: Address to send email from
type: str
required: true
forgejo_smtp_user:
description: Smtp user to authenticate as
type: str
required: false
default: ""
forgejo_smtp_password:
description: Smtp password to authenticate with
type: str
required: false
default: ""
forgejo_require_signin_view:
description: Whether to require signing in to view public repositories
type: bool
required: false
default: false
forgejo_enable_internal_signin:
description: Whether to enable signing in using local username/password
type: bool
required: false
default: true

View File

@@ -0,0 +1,81 @@
---
- name: Ensure netcat-openbsd is installed for ssh shell
ansible.builtin.apt:
name: netcat-openbsd
- name: Create git system user on host for forgejo ssh
ansible.builtin.user:
name: git
group: git
system: true
home: /srv/forgejo/git
generate_ssh_key: true
ssh_key_type: ed25519
shell: /srv/forgejo/git/ssh-shell
register: _forgejo_git_user
- name: Add git user's own ssh key to its authorized keys
ansible.posix.authorized_key:
user: git
key: "{{ _forgejo_git_user.ssh_public_key }}"
- name: Install ssh forwarding shell for forgejo
ansible.builtin.template:
src: ssh-shell.j2
dest: /srv/forgejo/git/ssh-shell
mode: "0755"
- name: Forgejo service
ansible.builtin.import_role:
name: service
vars:
service_name: forgejo
service_container_image: codeberg.org/forgejo/forgejo:{{ forgejo_tag }}
service_container_mounts:
- type: volume
source: data
destination: /data
- type: bind
source: /etc/localtime
destination: /etc/localtime
readonly: true
- type: bind
source: /srv/forgejo/git/.ssh
destination: /data/git/.ssh
service_container_secrets:
- name: secret-key
value: "{{ forgejo_secret_key }}"
service_domains:
- "{{ forgejo_domain }}"
service_database_type: postgres
service_postgres_tag: 18-alpine
service_container_publish_ports:
- name: ssh
type: socket
container_port: 22
service_container_env:
USER_UID: "{{ _forgejo_git_user.uid }}"
USER_GID: "{{ _forgejo_git_user.group }}"
FORGEJO__security__SECRET_KEY_URI: file:/run/secrets/secret-key
FORGEJO__database__DB_TYPE: postgres
FORGEJO__database__USER: forgejo
FORGEJO__database__NAME: forgejo
FORGEJO__database__HOST: postgres
FORGEJO__database__PASSWD__FILE: /run/secrets/postgres
FORGEJO__server__PROTOCOL: http+unix
FORGEJO__server__HTTP_ADDR: /run/forgejo.sock
FORGEJO__server__DOMAIN: "{{ forgejo_domain }}"
FORGEJO__server__ROOT_URL: https://{{ forgejo_domain }}
FORGEJO__server__SSH_ALLOW_UNEXPECTED_AUTHORIZED_KEYS: "true"
FORGEJO__mailer__ENABLED: "true"
FORGEJO__mailer__PROTOCOL: smtp
FORGEJO__mailer__SMTP_ADDR: "{{ forgejo_smtp_server }}"
FORGEJO__mailer__SMTP_PORT: "587"
FORGEJO__mailer__FROM: "{{ forgejo_smtp_from }}"
FORGEJO__mailer__USER: "{{ forgejo_smtp_user }}"
FORGEJO__mailer__PASSWD: "{{ forgejo_smtp_password }}"
FORGEJO__service__DISABLE_REGISTRATION: "true"
FORGEJO__service__REQUIRE_SIGNIN_VIEW: "{{ 'true' if forgejo_require_signin_view else 'false' }}"
FORGEJO__service__ENABLE_INTERNAL_SIGNIN: "{{ 'true' if forgejo_enable_internal_signin else 'false' }}"
FORGEJO__oauth2_client__ENABLE_AUTO_REGISTRATION: "true"
FORGEJO__openid__ENABLE_OPENID_SIGNIN: "false"

View File

@@ -0,0 +1,4 @@
#!/bin/bash
# {{ ansible_managed }}
shift
SHELL=/bin/bash ssh -o "ProxyCommand nc -U /run/forgejo-ssh-socat.sock" -o StrictHostKeyChecking=no git@forgejo "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $@"

View File

@@ -17,7 +17,7 @@
service_container_additional_networks: "{{ grafana_additional_networks }}"
service_container_env:
GF_DATABASE_TYPE: postgres
GF_DATABASE_HOST: grafana-postgres:5432
GF_DATABASE_HOST: postgres:5432
GF_DATABASE_NAME: grafana
GF_DATABASE_USER: grafana
GF_DATABASE_PASSWORD__FILE: /run/secrets/postgres

View File

@@ -1,4 +0,0 @@
---
image_creds:
username: ""
password: ""

View File

@@ -9,19 +9,3 @@ argument_specs:
description: "The image FQIN (format registry/imagename:tag)"
type: str
required: true
image_creds:
description: Credentials used to authenticate with the registry
type: dict
required: false
default:
username: ""
password: ""
options:
username:
description: Username
type: str
required: true
password:
description: Password
type: str
required: true

View File

@@ -6,9 +6,14 @@
- name: Create container image service {{ image_name }}
containers.podman.podman_image:
name: "{{ image_name }}"
username: "{{ image_creds.username if image_creds.username | length > 0 else omit }}"
password: "{{ image_creds.password if image_creds.password | length > 0 else omit }}"
state: quadlet
quadlet_filename: "{{ image_name | replace('/', '_') }}"
quadlet_file_mode: "0600"
quadlet_options: >-
{{
['AuthFile=/etc/containers/auth.json']
if image_name.split('/')[0] in
podman_registry_accounts | map(attribute='registry')
else []
}}
notify: Reload systemd daemon

View File

@@ -0,0 +1,4 @@
---
network_driver: bridge
network_subnet: ""
network_range: ""

View File

@@ -0,0 +1,6 @@
---
- name: Restart network service {{ network_name }}
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [network_name ~ '-network.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -7,3 +7,21 @@ argument_specs:
description: Name of the network. Must be unique within a host.
type: str
required: true
network_driver:
description: Driver to manage the network
type: str
required: false
default: bridge
choices:
- bridge
- macvlan
network_subnet:
description: Subnet for the network
type: str
required: false
default: ""
network_range:
description: Range to allocate ip addresses from
type: str
required: false
default: ""

View File

@@ -1,10 +1,21 @@
---
- name: "Create container network service {{ network_name }}"
- name: Create container network service {{ network_name }}
containers.podman.podman_network:
name: "{{ network_name }}"
state: quadlet
quadlet_file_mode: "0644"
notify: Reload systemd daemon
driver: "{{ network_driver }}"
subnet: "{{ network_subnet if network_subnet | length > 0 else omit }}"
ip_range: "{{ network_range if network_range | length > 0 else omit }}"
opt:
parent: "{{ ansible_facts.default_ipv4.interface if network_driver == 'macvlan' else omit }}"
quadlet_options:
- |-
[Service]
ExecStopPost=/usr/bin/podman network rm {{ network_name }}
notify:
- Reload systemd daemon
- Restart network service {{ network_name }}
- name: Add network to created networks variable
ansible.builtin.set_fact:

View File

@@ -0,0 +1 @@
Sets up a nextcloud podman container, including HaRP using podman in podman

View File

@@ -0,0 +1,3 @@
---
nextcloud_tag: stable
nextcloud_install_harp: false

View File

@@ -0,0 +1,29 @@
---
- name: Unregister AppAPI daemon
containers.podman.podman_container_exec:
name: nextcloud
argv:
- /var/www/html/occ
- app_api:daemon:unregister
- harp
register: _nextcloud_appapi_unregister
changed_when: _nextcloud_appapi_unregister.rc == 0
failed_when: _nextcloud_appapi_unregister.rc not in [0, 1]
listen: Restart container service nextcloud-harp
- name: Register AppAPI daemon
containers.podman.podman_container_exec:
name: nextcloud
argv:
- /bin/sh
- -c
- >-
/var/www/html/occ app_api:daemon:register
--harp
--harp_frp_address=harp:8782
--harp_shared_key "$(cat /run/secrets/harp-shared-key)"
--net host
--set-default
--
harp HaRP docker-install http harp:8780 https://{{ nextcloud_domains[0] }}
listen: Restart container service nextcloud-harp

View File

@@ -0,0 +1,24 @@
---
argument_specs:
main:
description: "Sets up a nextcloud podman container, including HaRP using podman in podman"
options:
nextcloud_domains:
description: A list of domains nextcloud should listen on
type: list
required: true
elements: str
nextcloud_admin_password:
description: Password of the initial admin user
type: str
required: true
nextcloud_tag:
description: Nextcloud version to use
type: str
required: false
default: stable
nextcloud_install_harp:
description: Whether to install HaRP for nextcloud
type: bool
required: false
default: false

View File

@@ -0,0 +1,31 @@
---
- name: Nextcloud service
ansible.builtin.import_role:
name: service
vars:
service_name: nextcloud
service_container_image: docker.io/library/nextcloud:{{ nextcloud_tag }}
service_container_http_port: 80
service_domains: "{{ nextcloud_domains }}"
service_database_type: postgres
service_redis: true
service_container_mounts:
- type: volume
source: data
destination: /var/www/html
service_container_secrets:
- name: admin-password
value: "{{ nextcloud_admin_password }}"
- name: harp-shared-key
service_container_env:
POSTGRES_HOST: postgres
POSTGRES_DB: nextcloud
POSTGRES_USER: nextcloud
POSTGRES_PASSWORD_FILE: /run/secrets/postgres
REDIS_HOST: redis
TRUSTED_PROXIES: 10.0.0.0/8
NEXTCLOUD_TRUSTED_DOMAINS: "{{ nextcloud_domains | join(' ') }}"
NEXTCLOUD_ADMIN_USER: admin
NEXTCLOUD_ADMIN_PASSWORD_FILE: /run/secrets/admin-password
service_additional_containers: "{{ _nextcloud_additional_containers }}"
service_vhost_locations: "{{ _nextcloud_vhost_locations }}"

View File

@@ -0,0 +1,9 @@
[containers]
ipcns = "host"
cgroupns = "host"
cgroups = "disabled"
log_driver = "k8s-file"
[engine]
cgroup_manager = "cgroupfs"
events_logger = "file"
runtime = "crun"

View File

@@ -0,0 +1,24 @@
#!/bin/bash
# {{ ansible_managed }}
_term() {
echo "Received SIGTERM, stopping all containers"
kill "$child"
}
podman system service -t 0 &
podman run \
--rm \
-v /run/secrets/harp-shared-key:/run/secrets/harp-shared-key:ro \
-e HP_SHARED_KEY_FILE=/run/secrets/harp-shared-key \
-e NC_INSTANCE_URL="https://{{ nextcloud_domains[0] }}" \
-e HP_TRUSTED_PROXY_IPS="10.0.0.0/8" \
-v /tmp/storage-run-1000/podman/podman.sock:/var/run/docker.sock \
-v /certs:/certs \
--name harp \
--network host \
ghcr.io/nextcloud/nextcloud-appapi-harp:release &
child=$!
trap _term SIGTERM
wait

View File

@@ -0,0 +1,45 @@
---
_nextcloud_cron_container:
name: cron
entrypoint: /cron.sh
_nextcloud_harp_container:
name: harp
add_capabilities:
- CAP_SYS_ADMIN
image: quay.io/podman/stable:latest
user: podman
entrypoint: /entrypoint.sh
devices:
- source: /dev/fuse
mounts:
- type: template
source: containers.conf.j2
destination: /etc/containers/containers.conf
- type: template
source: harp_entrypoint.sh.j2
destination: /entrypoint.sh
mode: "0755"
- type: volume
source: harp-certs
destination: /certs
- type: volume
source: harp-containers
destination: /home/podman/.local/share/containers
env: {}
secrets:
- name: harp-shared-key
publish_ports:
- name: harp
type: socket
container_port: 8780
_nextcloud_additional_containers: >-
{{
[_nextcloud_cron_container]
+ ([_nextcloud_harp_container] if nextcloud_install_harp else [])
}}
_nextcloud_harp_vhost_locations:
- path: /exapps/*
proxy_target_socket: /run/nextcloud-harp-socat.sock
_nextcloud_vhost_locations: >-
{{ _nextcloud_harp_vhost_locations if nextcloud_install_harp else [] }}

View File

@@ -1 +0,0 @@
Sets up a oauth2-proxy container

View File

@@ -1,17 +0,0 @@
---
argument_specs:
main:
description: "Sets up a oauth2-proxy container"
options:
oauth2_proxy_oidc_issuer_url:
description: the OpenID Connect issuer URL
type: str
required: true
oauth2_proxy_client_id:
description: the OAuth client ID
type: str
required: true
oauth2_proxy_client_secret:
description: the OAuth client secret
type: str
required: true

View File

@@ -1,24 +0,0 @@
---
- name: OAuth2 Proxy
ansible.builtin.import_role:
name: service
vars:
service_name: oauth2-proxy
service_container_image: "quay.io/oauth2-proxy/oauth2-proxy:latest-alpine"
service_container_http_port: 4180
service_container_command:
- --config
- /oauth2-proxy.cfg
- --client-secret-file
- /run/secrets/client_secret
service_container_mounts:
- type: template
source: oauth2-proxy.cfg.j2
destination: /oauth2-proxy.cfg
service_container_secrets:
- name: cookie_secret
length: 32
type: env
target: OAUTH2_PROXY_COOKIE_SECRET
- name: client_secret
value: "{{ oauth2_proxy_client_secret }}"

View File

@@ -1,11 +0,0 @@
# OAuth2 Proxy Configuration
http_address = "0.0.0.0:4180"
# OIDC Provider Configuration
provider = "oidc"
oidc_issuer_url = "{{ oauth2_proxy_oidc_issuer_url }}"
client_id = "{{ oauth2_proxy_client_id }}"
code_challenge_method = "S256"
skip_provider_button = "true"
email_domains = "*"

View File

@@ -0,0 +1,2 @@
---
podman_registry_accounts: []

View File

@@ -1,4 +0,0 @@
---
- name: Reload systemd daemon
ansible.builtin.systemd_service:
daemon_reload: true

View File

@@ -2,4 +2,23 @@
argument_specs:
main:
description: Installs podman
options: {}
options:
podman_registry_accounts:
description: Dict of accounts for container repositories
type: list
required: false
default: []
elements: dict
options:
registry:
description: Registry server to login to
type: str
required: true
username:
description: Username
type: str
required: true
password:
description: Password / token
type: str
required: true

View File

@@ -1,5 +1,6 @@
---
dependencies:
- role: uumas.general.systemd
- role: uumas.general.compatcheck
vars:
compatcheck_supported_distributions:

View File

@@ -4,3 +4,19 @@
name:
- podman
- aardvark-dns
- name: Ensure podman auto update timer is enabled
ansible.builtin.systemd_service:
name: podman-auto-update.timer
state: started
enabled: true
ignore_errors: "{{ ansible_check_mode }}"
- name: Login to registries
containers.podman.podman_login:
registry: "{{ item.registry }}"
username: "{{ item.username }}"
password: "{{ item.password }}"
authfile: /etc/containers/auth.json
loop: "{{ podman_registry_accounts }}"
no_log: true

View File

@@ -0,0 +1 @@
Installs and configures prometheus

View File

@@ -0,0 +1,4 @@
---
prometheus_additional_networks: []
prometheus_ping_hosts: []

View File

@@ -0,0 +1,35 @@
---
argument_specs:
main:
description: Installs and configures prometheus
options:
prometheus_additional_networks:
description: >-
A list of additional podman networks for the prometheus container (in
addition to prometheus network).
type: list
required: false
default: []
elements: str
prometheus_ping_hosts:
description: List of hosts to ping
type: list
required: false
default: []
elements: dict
options:
name:
description: Hostname to ping
type: str
required: true
type:
description: >-
Type of host. Monitored hosts are pinged to check if they are up.
Wan hosts are pinged to check if prometheus has internet access.
type: str
required: false
default: monitored
choices:
- monitored
- wan

View File

@@ -0,0 +1,28 @@
---
- name: Prometheus
ansible.builtin.import_role:
name: service
vars:
service_name: prometheus
service_container_image: "docker.io/prom/prometheus:latest"
service_container_mounts:
- type: template
source: prometheus.yml.j2
destination: /etc/prometheus/prometheus.yml
- type: volume
source: data
destination: /prometheus
- type: template
source: alerting/node-exporter.yaml.j2
destination: /etc/prometheus/alerting/node-exporter.yaml
- type: template
source: alerting/blackbox-exporter.yaml.j2
destination: /etc/prometheus/alerting/blackbox-exporter.yaml
service_container_additional_networks: "{{ prometheus_additional_networks }}"
service_additional_containers:
- name: blackbox-exporter
image: docker.io/prom/blackbox-exporter:latest
mounts:
- type: template
source: blackbox_exporter.yml.j2
destination: /etc/blackbox_exporter/config.yml

View File

@@ -0,0 +1,97 @@
{% raw %}
groups:
- name: BlackboxExporter
rules:
- alert: BlackboxAllWanProbesFailed
expr: 'sum by (host_type) (probe_success{host_type="wan"})==0'
for: 5s
labels:
severity: critical
annotations:
summary: Lost internet access
descrtiption: Failed to contact any wan probes
- alert: BlackboxProbeFailed
expr: 'probe_success == 0'
for: 0m
labels:
severity: critical
annotations:
summary: Blackbox probe failed (instance {{ $labels.instance }})
description: "Probe failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxConfigurationReloadFailure
expr: 'blackbox_exporter_config_last_reload_successful != 1'
for: 0m
labels:
severity: warning
annotations:
summary: Blackbox configuration reload failure (instance {{ $labels.instance }})
description: "Blackbox configuration reload failure\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSlowProbe
expr: 'avg_over_time(probe_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Blackbox slow probe (instance {{ $labels.instance }})
description: "Blackbox probe took more than 1s to complete\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeHttpFailure
expr: 'probe_http_status_code <= 199 OR probe_http_status_code >= 400'
for: 0m
labels:
severity: critical
annotations:
summary: Blackbox probe HTTP failure (instance {{ $labels.instance }})
description: "HTTP status code is not 200-399\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateWillExpireSoon
expr: '3 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 20'
for: 0m
labels:
severity: warning
annotations:
summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})
description: "SSL certificate expires in less than 20 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateWillExpireSoon
expr: '0 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 3'
for: 0m
labels:
severity: critical
annotations:
summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})
description: "SSL certificate expires in less than 3 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxSslCertificateExpired
expr: 'round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 0'
for: 0m
labels:
severity: critical
annotations:
summary: Blackbox SSL certificate expired (instance {{ $labels.instance }})
description: "SSL certificate has expired already\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeSlowHttp
expr: 'avg_over_time(probe_http_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Blackbox probe slow HTTP (instance {{ $labels.instance }})
description: "HTTP request took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: BlackboxProbeSlowPing
expr: 'avg_over_time(probe_icmp_duration_seconds[1m]) > 1'
for: 1m
labels:
severity: warning
annotations:
summary: Blackbox probe slow ping (instance {{ $labels.instance }})
description: "Blackbox ping took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
{% endraw %}

View File

@@ -0,0 +1,322 @@
{% raw %}
groups:
- name: NodeExporter
rules:
- alert: HostOutOfMemory
expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of memory (instance {{ $labels.instance }})
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryUnderMemoryPressure
expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000)'
for: 0m
labels:
severity: warning
annotations:
summary: Host memory under memory pressure (instance {{ $labels.instance }})
description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryIsUnderutilized
expr: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8'
for: 0m
labels:
severity: info
annotations:
summary: Host Memory is underutilized (instance {{ $labels.instance }})
description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputIn
expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)'
for: 0m
labels:
severity: warning
annotations:
summary: Host unusual network throughput in (instance {{ $labels.instance }})
description: "Host receive bandwidth is high (>80%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputOut
expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)'
for: 0m
labels:
severity: warning
annotations:
summary: Host unusual network throughput out (instance {{ $labels.instance }})
description: "Host transmit bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskReadRate
expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80)'
for: 0m
labels:
severity: warning
annotations:
summary: Host unusual disk read rate (instance {{ $labels.instance }})
description: "Disk is too busy (IO wait > 80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfDiskSpace
expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)'
for: 2m
labels:
severity: critical
annotations:
summary: Host out of disk space (instance {{ $labels.instance }})
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostDiskMayFillIn24Hours
expr: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[3h], 86400) <= 0 and node_filesystem_avail_bytes > 0'
for: 2m
labels:
severity: warning
annotations:
summary: Host disk may fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem will likely run out of space within the next 24 hours.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfInodes
expr: '(node_filesystem_files_free / node_filesystem_files < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)'
for: 2m
labels:
severity: critical
annotations:
summary: Host out of inodes (instance {{ $labels.instance }})
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostFilesystemDeviceError
expr: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1'
for: 2m
labels:
severity: critical
annotations:
summary: Host filesystem device error (instance {{ $labels.instance }})
description: "Error stat-ing the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostInodesMayFillIn24Hours
expr: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0'
for: 2m
labels:
severity: warning
annotations:
summary: Host inodes may fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem will likely run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskReadLatency
expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk read latency (instance {{ $labels.instance }})
description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskWriteLatency
expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write latency (instance {{ $labels.instance }})
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostHighCpuLoad
expr: '1 - (avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[5m]))) > .80'
for: 10m
labels:
severity: warning
annotations:
summary: Host high CPU load (instance {{ $labels.instance }})
description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuIsUnderutilized
expr: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8'
for: 1w
labels:
severity: info
annotations:
summary: Host CPU is underutilized (instance {{ $labels.instance }})
description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuStealNoisyNeighbor
expr: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuHighIowait
expr: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU high iowait (instance {{ $labels.instance }})
description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskIo
expr: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk IO (instance {{ $labels.instance }})
description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostContextSwitchingHigh
expr: '(rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) / (rate(node_context_switches_total[1d])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) > 2'
for: 0m
labels:
severity: warning
annotations:
summary: Host context switching high (instance {{ $labels.instance }})
description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSwapIsFillingUp
expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)'
for: 2m
labels:
severity: warning
annotations:
summary: Host swap is filling up (instance {{ $labels.instance }})
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSystemdServiceCrashed
expr: '(node_systemd_unit_state{state="failed"} == 1)'
for: 0m
labels:
severity: warning
annotations:
summary: Host systemd service crashed (instance {{ $labels.instance }})
description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostPhysicalComponentTooHot
expr: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius'
for: 5m
labels:
severity: warning
annotations:
summary: Host physical component too hot (instance {{ $labels.instance }})
description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNodeOvertemperatureAlarm
expr: '((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1))'
for: 0m
labels:
severity: critical
annotations:
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSoftwareRaidInsufficientDrives
expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)'
for: 0m
labels:
severity: critical
annotations:
summary: Host software RAID insufficient drives (instance {{ $labels.instance }})
description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSoftwareRaidDiskFailure
expr: '(node_md_disks{state="failed"} > 0)'
for: 2m
labels:
severity: warning
annotations:
summary: Host software RAID disk failure (instance {{ $labels.instance }})
description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostKernelVersionDeviations
expr: 'changes(node_uname_info[1h]) > 0'
for: 0m
labels:
severity: info
annotations:
summary: Host kernel version deviations (instance {{ $labels.instance }})
description: "Kernel version for {{ $labels.instance }} has changed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOomKillDetected
expr: '(increase(node_vmstat_oom_kill[1m]) > 0)'
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{ $labels.instance }})
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostEdacCorrectableErrorsDetected
expr: '(increase(node_edac_correctable_errors_total[1m]) > 0)'
for: 0m
labels:
severity: info
annotations:
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostEdacUncorrectableErrorsDetected
expr: '(node_edac_uncorrectable_errors_total > 0)'
for: 0m
labels:
severity: warning
annotations:
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkReceiveErrors
expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Receive Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkTransmitErrors
expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkBondDegraded
expr: '((node_bonding_active - node_bonding_slaves) != 0)'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostConntrackLimit
expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)'
for: 5m
labels:
severity: warning
annotations:
summary: Host conntrack limit (instance {{ $labels.instance }})
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockSkew
expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))'
for: 10m
labels:
severity: warning
annotations:
summary: Host clock skew (instance {{ $labels.instance }})
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockNotSynchronising
expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)'
for: 2m
labels:
severity: warning
annotations:
summary: Host clock not synchronising (instance {{ $labels.instance }})
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
{% endraw %}

View File

@@ -0,0 +1,5 @@
---
modules:
icmp:
prober: icmp
timeout: 5s

View File

@@ -0,0 +1,55 @@
---
# {{ ansible_managed }}
global:
scrape_interval: 10s
evaluation_interval: 10s
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "/etc/prometheus/recording/*.yaml"
- "/etc/prometheus/alerting/*.yaml"
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- localhost:9090
- job_name: blackbox
static_configs:
- targets:
- blackbox-exporter:9115
- job_name: node
static_configs:
- targets:
- host.containers.internal:9100
{% if prometheus_ping_hosts | length > 0 %}
- job_name: "icmp"
metrics_path: "/probe"
params:
module: ["icmp"]
static_configs:
- targets:
{% for host in prometheus_ping_hosts %}
- "{{ host.name }}::{{ host.type | default('monitored') }}"
{% endfor %}
relabel_configs:
- source_labels:
- __address__
regex: '(.+)::(.+)'
target_label: __param_target
replacement: '${1}'
- source_labels:
- __address__
regex: '(.+)::(.+)'
target_label: host_type
replacement: '${2}'
- source_labels:
- __param_target
target_label: instance
- target_label: __address__
replacement: blackbox_exporter:9115
{%- endif %}

View File

@@ -1,5 +1,6 @@
---
service_container_command: []
service_container_entrypoint: ""
service_domains: []
service_container_http_port: 0
@@ -7,18 +8,23 @@ service_vhost_locations: []
service_proxy_pass_host_header: true
service_proxy_auth_type: none
service_container_ip: ""
service_container_additional_networks: []
service_container_user: ""
service_container_publish_ports: []
service_container_mounts: []
service_container_devices: []
service_container_secrets: []
service_container_env: {}
service_container_add_capabilities: []
service_container_pinp: false
service_database_type: none
service_database_additional_networks: []
service_database_secret_type: mount
service_database_secret_target: "{{ service_database_type }}"
service_postgres_image: docker.io/library/postgres
service_postgres_image: docker.io/pgautoupgrade/pgautoupgrade
service_postgres_tag: alpine
service_redis: false
service_additional_containers: []
@@ -26,7 +32,3 @@ service_additional_containers: []
service_requires: []
service_wants: []
service_auto_update: true
service_container_image_creds:
username: ""
password: ""

View File

@@ -1,7 +1,6 @@
---
- name: "Restart socat socket for {{ service_name }}"
ansible.builtin.systemd_service:
name: "{{ service_name }}-socat.socket"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
- name: Restart socket for {{ service_name }}
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [servive_name ~ '.socket'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -14,6 +14,11 @@ argument_specs:
required: false
default: []
elements: str
service_container_entrypoint:
description: Entrypoint to use in the service container
type: str
required: false
default: ""
service_domains:
description: A list of domains which should be proxied to the main service container
@@ -23,8 +28,8 @@ argument_specs:
elements: str
service_container_http_port:
description:
- Port inside the container where http requests will be proxied to.
- Required if service_domains is not empty.
- Port inside the container where http requests are proxied to.
- If set to 0, http requests are proxied to /run/<service name>.sock inside the container
type: int
required: false
default: 0
@@ -36,7 +41,6 @@ argument_specs:
service_proxy_auth_type:
description: >-
Set to oauth2-proxy to use OAuth2 Proxy for vhost authentication.
The oauth2-proxy role must be run separately.
type: str
required: false
default: none
@@ -49,32 +53,35 @@ argument_specs:
required: false
default: []
service_oauth2_proxy_issuer_url:
description: >-
OpenID Connect issuer URL. Required if service_proxy_auth_type is oauth2-proxy.
type: str
required: false
oauth2_proxy_client_id:
description: OAuth client ID. Required if service_proxy_auth_type is oauth2-proxy.
type: str
required: false
oauth2_proxy_client_secret:
description: OAuth client secret. Required if service_proxy_auth_type is oauth2-proxy.
type: str
required: false
service_container_image:
description: "The image to run in the service container(s), in FQIN format (registry/imagename:tag)."
description: "The image to run in the service container(s), in FQIN format (registry/image_name:tag)."
type: str
required: true
service_container_image_creds:
description: Credentials used to authenticate with the registry
type: dict
required: false
default:
username: ""
password: ""
options:
username:
description: Username
type: str
required: true
password:
description: Password
type: str
required: true
service_container_user:
description: The UID to run as inside the container
type: str
required: false
default: ""
service_container_ip:
description: Static ip for the container in it's network
type: str
required: false
default: ""
service_container_additional_networks:
description: >-
A list of additional podman networks for the service container (in
@@ -84,11 +91,44 @@ argument_specs:
default: []
elements: str
service_container_publish_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
description: A list of ports to publish outside the container
type: list
required: false
default: []
elements: str
elements: dict
options:
name:
description:
- Name of the port.
- If type is socket, the socket will be created at /run/<service name>-<port name>.sock on the host.
- If type is not socket, this is just informative.
type: str
required: true
container_port:
description: Container port to publish
type: int
required: true
type:
description: Whether to publish as a port or socket
type: str
required: false
default: port
choices:
- socket
- port
host_address:
description:
- IP or hostname to listen on on the host
- Ignored if type is socket
type: str
required: false
default: 0.0.0.0
host_port:
description:
- Port to listen on on the host
- Required if type is port, ignored otherwise
type: int
required: false
service_container_mounts:
description: List of bind mounts or volumes to be mounted inside the service container(s).
type: list
@@ -104,12 +144,14 @@ argument_specs:
- volume
- bind
- template
- copy
source:
description:
- Mount source.
- If mount type is volume, name of the volume.
- If mount type is bind, host path to bind mount inside the container.
- If mount type is template, the name of the template file, must end in .j2
- If mount type is copy, name of the file or directory to copy. Directory name must end in /.
type: str
required: true
destination:
@@ -119,7 +161,7 @@ argument_specs:
readonly:
description:
- If true, volume will be mounted as read only inside the container.
- Defaults to false for volume and bind, true for template
- Defaults to false for volume and bind, true for template and copy
type: bool
required: false
user:
@@ -132,6 +174,12 @@ argument_specs:
type: str
required: false
default: ""
mode:
description:
- Templated file or copied directory/file permissions.
- Defaults to 0644 for files, 0755 for directories
type: str
required: false
volume_device:
description: >-
The path of a device which is mounted for the volume.
@@ -154,6 +202,21 @@ argument_specs:
elements: str
required: false
default: []
service_container_devices:
description: List of devices to be added inside the service main container.
type: list
required: false
default: []
elements: dict
options:
source:
description: Device path on host
type: str
required: true
destination:
description: Device path inside the container. Defaults to same as host.
type: str
required: false
service_container_secrets:
description:
- >
@@ -207,20 +270,45 @@ argument_specs:
required: false
default: {}
service_container_add_capabilities:
description: List of capabilities to add to the service container
type: list
required: false
default: []
elements: str
service_container_pinp:
description:
- If true, runs the container with podman in podman
- This starts a podman service inside the outer container
- The podman socket is exposed to the inner container at /var/run/docker.sock
- >-
This allows the container to manage other containers, which are run inside the
same outer container
- >-
The inner containers use host networking, so they share the network namespace
with the outer container and each other.
- This support is experimental and may not work with all images or configurations.
type: bool
required: false
default: false
service_database_type:
description:
- Database type to set up.
- >
It will be run in a container accessible to the service at
host {{ service_name }}-{{ service_database_type }} on the default port.
- The database user will be {{ service_name }}
- The password will be accessible as secret at /run/secrets/{{ service_database_type }}
host <service database type> on the default port.
- The database user will be <service name>
- The password will be accessible as secret at /run/secrets/<service database type>
- >
The password will also be available as the
service_podman_secrets['{{ service_name }}-{{ service_database_type }}'] variable.
service_podman_secrets['<service name>-<service database type>'] variable.
type: str
choices:
- postgres
- mariadb
- mongo
- none
required: false
default: none
@@ -254,14 +342,13 @@ argument_specs:
description:
- Postgresql version to use.
- Can be debian (n) or alpine-based (n-alpine), where n can be major version like 14 or minor like 14.13.
- Required if service_database_type is postgres, does nothing otherwise
- Ignored if database type is not postgres.
- If a custom postgres image is specified, see that image documentation for supported tags.
type: str
required: false
service_redis:
description: >-
Whether to install redis in a container accessible to the service at host
{{ service_name }}-redis.
Whether to install redis in a container accessible to the service at host redis.
type: bool
required: false
default: false
@@ -269,9 +356,10 @@ argument_specs:
service_additional_containers:
description:
- List of additional containers for the service.
- >
Will inherit most options from main service container. All options can be overridden
per-container.
- >-
If image is not specified, will use service container image and
inherit most options from main service container.
- All options can be overridden per-container.
type: list
required: false
default: []
@@ -291,17 +379,31 @@ argument_specs:
type: str
required: false
default: "{{ service_container_image }}"
user:
description:
- The UID to run as inside the container.
- Defaults to <service_container_user> if same image, "" otherwise.
type: str
required: false
command:
description: Command to start the container with.
description:
- Command to start the container with.
- Defaults to <service_container_command> if same image, [] otherwise.
type: list
required: false
default: "[]"
elements: str
entrypoint:
description:
- Entrypoint to use in the container
- Defaults to <service_container_entrypoint> if same image, "" otherwise.
type: str
required: false
mounts:
description: List of bind mounts or volumes to be mounted inside the main service container.
description:
- List of bind mounts or volumes to be mounted inside the container.
- Defaults to <service_container_mounts> if same image, [] otherwise.
type: list
required: false
default: "{{ service_container_mounts }}"
elements: dict
options:
type:
@@ -330,6 +432,22 @@ argument_specs:
- Defaults to false for volume and bind, true for template
type: bool
required: false
user:
description: Volume owner uid. Only applicable if mount type is volume.
type: str
required: false
default: ""
group:
description: Volume owner gid. Only applicable if mount type is volume.
type: str
required: false
default: ""
mode:
description:
- Templated file or copied directory/file permissions.
- Defaults to 0644 for files, 0755 for directories
type: str
required: false
volume_device:
description: >-
The path of a device which is mounted for the volume.
@@ -352,17 +470,76 @@ argument_specs:
elements: str
required: false
default: []
devices:
description:
- List of devices to be added inside the container.
- Defaults to <service_container_devices> if same image, [] otherwise.
type: list
required: false
elements: dict
options:
source:
description: Device path on host
type: str
required: true
destination:
description: Device path inside the container. Defaults to same as host.
type: str
required: false
publish_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
description: A list of ports to publish outside the container
type: list
required: false
default: []
elements: str
elements: dict
options:
name:
description:
- Name of the port.
- >-
If type is socket, the socket will be created at
/run/<service name>-<additional container name>-<port name>.sock on the host.
- If type is not socket, this is just informative.
type: str
required: true
container_port:
description: Container port to publish
type: int
required: true
type:
description: Whether to publish as a port or socket
type: str
required: false
default: port
choices:
- socket
- port
host_address:
description:
- IP or hostname to listen on on the host
- Ignored if type is socket
type: str
required: false
default: 0.0.0.0
host_port:
description:
- Port to listen on on the host
- Required if type is port, ignored otherwise
type: int
required: false
env:
description: A dict of environment variables for the container
description:
- A dict of environment variables for the container
- Defaults to <service_container_env> if same image, {} otherwise.
type: dict
required: false
default: {}
add_capabilities:
description:
- List of capabilities to add to the container
- Defaults to <service_container_add_capabilities> if same image, [] otherwise.
type: list
required: false
elements: str
secrets:
description:
- >
@@ -372,9 +549,9 @@ argument_specs:
A dict of secrets and their values (including autogenerated values) is available as
`service_podman_secrets` for use in templates. This should only be used if the
container doesn't support reading the secret from file or environment variable.
- Defaults to <service_container_secrets> if same image, [] otherwise.
type: list
required: false
default: []
elements: dict
options:
name:
@@ -410,6 +587,21 @@ argument_specs:
the name of the environment variable. Defaults to secret name.
type: str
required: false
pinp:
description:
- If true, runs the container with podman in podman
- This starts a podman service inside the outer container
- The podman socket is exposed to the inner container at /var/run/docker.sock
- >-
This allows the container to manage other containers, which are run inside the
same outer container
- >-
The inner containers use host networking, so they share the network namespace
with the outer container and each other.
- This support is experimental and may not work with all images or configurations.
type: bool
required: false
default: false
service_requires:
description: List of systemd units this service container depends on.

View File

@@ -1,21 +1,31 @@
---
- name: Additional container {{ container ~ ' for ' ~ service_name }}
- name: Additional containers for {{ service_name }}
ansible.builtin.include_role:
name: container
vars:
container_name: "{{ _service_additional_container.name }}"
container_image: "{{ _service_additional_container.image | default(service_container_image) }}"
container_image_creds: "{{ service_container_image_creds }}"
container_command: "{{ _service_additional_container.command | default([]) }}"
container_user: "{{ service_container_user }}"
container_mounts: "{{ _service_additional_container_mounts }}"
container_publish_ports: "{{ _service_additional_container.publish_ports | default([]) }}"
container_networks: "{{ _service_container_networks }}"
container_secrets: "{{ _service_additional_container.secrets | default(_service_container_secrets) }}"
container_env: "{{ _service_additional_container.env | default(service_container_env) }}"
container_image: "{{ _service_additional_container_final.image }}"
container_entrypoint: "{{ _service_additional_container_final.entrypoint }}"
container_command: "{{ _service_additional_container_final.command }}"
container_user: "{{ _service_additional_container_final.user }}"
container_mounts: "{{ _service_additional_container_final.mounts }}"
container_devices: "{{ _service_additional_container.devices }}"
container_publish_ports: "{{ _service_additional_container_publish_ports }}"
container_networks: "{{ _service_additional_container_networks }}"
container_hostname: "{{ _service_additional_container.name | regex_replace('^' ~ service_name ~ '-', '') }}"
container_secrets: "{{ _service_additional_container_secrets }}"
container_env: "{{ _service_additional_container_final.env }}"
container_add_capabilities: "{{ _service_additional_container.add_capabilities }}"
container_requires: "{{ _service_container_requires }}"
container_wants: "{{ service_wants }}"
container_wants: "{{ _service_additional_container_wants }}"
container_auto_update: "{{ service_auto_update }}"
loop: "{{ _service_additional_containers }}"
loop_control:
loop_var: _service_additional_container
index_var: _service_additional_container_index
- name: Socat sockets for additional containers of {{ service_name }}
ansible.builtin.include_tasks: additional_socat.yaml
loop: "{{ _service_additional_containers }}"
loop_control:
loop_var: _service_additional_container

View File

@@ -0,0 +1,12 @@
---
- name: Socat for socket published ports of {{ service_name }}
ansible.builtin.include_role:
name: socat
loop: "{{ _service_additional_container_publish_socket_ports }}"
loop_control:
loop_var: publish_port
vars:
socat_service_name: "{{ service_name }}-{{ publish_port.name }}"
socat_target_container: "{{ _service_additional_container.name }}"
socat_target_http_port: "{{ publish_port.container_port }}"
socat_auto_update: "{{ service_auto_update }}"

View File

@@ -1,20 +1,20 @@
---
- name: Include variables for database {{ service_database_type }}
ansible.builtin.include_vars:
file: database/{{ service_database_type }}.yaml
- name: Database container for {{ service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ service_name }}-{{ service_database_type }}" # This doesn't use _service_database_name to allow container role handlers to work
container_image: "{{ service_postgres_image }}:{{ service_postgres_tag }}"
container_image: "{{ _service_database_image }}"
container_mounts:
- type: volume
source: "{{ _service_database_name }}"
destination: /var/lib/postgresql/data
destination: "{{ _service_database_mount_destination }}"
container_networks: "{{ _service_database_networks }}"
container_secrets:
- name: "{{ _service_database_name }}"
target: "{{ service_database_type }}"
container_env:
POSTGRES_USER: "{{ service_name | replace('-', '_') }}"
POSTGRES_PASSWORD_FILE: "/run/secrets/{{ service_database_type }}"
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
container_hostname: "{{ service_database_type }}"
container_secrets: "{{ _service_database_secrets }}"
container_env: "{{ _service_database_env }}"
container_auto_update: "{{ service_auto_update }}"

View File

@@ -0,0 +1,61 @@
---
- name: Create directory {{ _service_host_directory }}
ansible.builtin.file:
path: "{{ _service_host_directory }}"
state: directory
mode: "0755"
- name: Create directory {{ _service_host_directory + '/mounts' }}
ansible.builtin.file:
path: "{{ _service_host_directory }}/mounts"
state: directory
mode: "0700"
- name: Create service template mount directories
ansible.builtin.file:
path: "{{ _service_host_directory }}/mounts/{{ item }}"
state: directory
mode: "0700"
loop: "{{ _service_all_template_mount_directories }}"
- name: Template files for template mounts
ansible.builtin.template:
src: "{{ item[0].source }}"
dest: "{{ item[1] }}"
mode: "{{ item[0].mode | default('0644') }}"
notify: Restart container service {{ service_name }}
loop: "{{ _service_all_template_mounts | zip(_service_all_template_mount_host_files) }}"
- name: Copy files for copy mounts
ansible.builtin.copy:
src: "{{ item[0].source }}"
dest: "{{ item[1] }}"
mode: "{{ item[0].mode | default('0644') }}"
directory_mode: "0755"
notify: Restart container service {{ service_name }}
loop: "{{ _service_all_copy_mounts | zip(_service_all_copy_mount_host_files) }}"
- name: Template entrypoint for pinp
ansible.builtin.template:
src: "pinp-entrypoint.sh.j2"
dest: "{{ _service_host_directory }}/mounts/pinp-entrypoint.sh"
mode: "0755"
vars:
pinp_inner_name: "{{ service_name }}"
pinp_inner_image: "{{ service_container_image }}"
pinp_inner_mounts: "{{ _service_container_pinp_inner_mounts }}"
pinp_inner_env: "{{ service_container_env }}"
when: service_container_pinp
- name: Template entrypoint for pinp of additional containers
ansible.builtin.template:
src: "pinp-entrypoint.sh.j2"
dest: "{{ _service_host_directory }}/mounts/{{ _service_additional_container.name }}-pinp-entrypoint.sh"
mode: "0755"
loop: "{{ _service_additional_containers | selectattr('pinp') }}"
loop_control:
loop_var: _service_additional_container
vars:
pinp_inner_name: "{{ _service_additional_container.name }}"
pinp_inner_image: "{{ _service_additional_container.image }}"
pinp_inner_mounts: "{{ _service_additional_container_pinp_inner_mounts }}"
pinp_inner_env: "{{ _service_additional_container.env }}"

View File

@@ -14,35 +14,68 @@
ansible.builtin.include_tasks: secrets.yaml
when: _service_container_secrets | length > 0
- name: Template mounts for {{ service_name }}
ansible.builtin.include_tasks: templates.yaml
when: _service_template_mounts | length > 0
- name: Host mounts for {{ service_name }}
ansible.builtin.include_tasks: host_mounts.yaml
when: >-
(_service_all_template_mounts + _service_all_copy_mounts) | length > 0
or service_container_pinp
or (_service_additional_containers | selectattr('pinp') | length > 0)
- name: Additional containers for {{ service_name }}
ansible.builtin.include_tasks: additional.yaml
when: _service_additional_containers | length > 0
- name: Native socket for {{ service_name }}
ansible.builtin.include_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ service_name }}"
systemd_socket_requires:
- "{{ service_name }}.service"
when: _service_native_socket
- name: Main container for {{ service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ service_name }}"
container_image: "{{ service_container_image }}"
container_image_creds: "{{ service_container_image_creds }}"
container_command: "{{ service_container_command }}"
container_user: "{{ service_container_user }}"
container_mounts: "{{ _service_container_mounts }}"
container_publish_ports: "{{ service_container_publish_ports }}"
container_image: "{{ _service_container.image }}"
container_entrypoint: "{{ _service_container.entrypoint }}"
container_command: "{{ _service_container.command }}"
container_user: "{{ _service_container.user }}"
container_mounts: "{{ _service_container.mounts }}"
container_devices: "{{ service_container_devices }}"
container_publish_ports: "{{ _service_container_publish_ports }}"
container_networks: "{{ _service_container_networks }}"
container_secrets: "{{ _service_container_secrets }}"
container_env: "{{ service_container_env }}"
container_env: "{{ _service_container.env }}"
container_add_capabilities: "{{ service_container_add_capabilities }}"
container_requires: "{{ _service_container_requires }}"
container_wants: "{{ _service_container_wants }}"
container_auto_update: "{{ service_auto_update }}"
- name: Socat for {{ service_name }}
ansible.builtin.include_tasks: socat.yaml
- name: Caddy socket proxy for http of {{ service_name }}
ansible.builtin.include_role:
name: caddy_socket_proxy
when: service_container_http_port > 0
vars:
caddy_socket_proxy_service_name: "{{ service_name }}"
caddy_socket_proxy_target_http_port: "{{ service_container_http_port }}"
caddy_socket_proxy_container_ip: >-
{{ service_container_ip | ansible.utils.ipmath(257) if _service_static_ip else '' }}
caddy_socket_proxy_auto_update: "{{ service_auto_update }}"
- name: Socat for socket published ports of {{ service_name }}
ansible.builtin.include_role:
name: socat
loop: "{{ _service_container_publish_socket_ports }}"
loop_control:
loop_var: publish_port
vars:
socat_service_name: "{{ service_name }}-{{ publish_port.name }}"
socat_target_container: "{{ service_name }}"
socat_target_http_port: "{{ publish_port.container_port }}"
socat_auto_update: "{{ service_auto_update }}"
- name: Reverse proxy for {{ service_name }}
ansible.builtin.include_tasks: proxy.yaml

View File

@@ -0,0 +1,39 @@
---
- name: OAuth2 Proxy container for {{ service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ service_name }}-oauth2-proxy"
container_image: "quay.io/oauth2-proxy/oauth2-proxy:latest-alpine"
container_command:
- --client-secret-file
- /run/secrets/client-secret
- --cookie-secret-file
- /run/secrets/cookie-secret
container_networks:
- name: "{{ service_name }}-oauth2-proxy"
container_secrets:
- name: "{{ service_name }}-oauth2-proxy-cookie-secret"
length: 32
target: cookie-secret
- name: "{{ service_name }}-oauth2-proxy-client-secret"
value: "{{ service_oauth2_proxy_client_secret }}"
target: client-secret
container_env:
OAUTH2_PROXY_HTTP_ADDRESS: fd:3
OAUTH2_PROXY_PROVIDER: oidc
OAUTH2_PROXY_OIDC_ISSUER_URL: "{{ service_oauth2_proxy_issuer_url }}"
OAUTH2_PROXY_CLIENT_ID: "{{ service_oauth2_proxy_client_id }}"
OAUTH2_PROXY_CODE_CHALLENGE_METHOD: S256
OAUTH2_PROXY_SKIP_PROVIDER_BUTTON: "true"
OAUTH2_PROXY_EMAIL_DOMAINS: "*"
OAUTH2_PROXY_REVERSE_PROXY: "true"
container_requires:
- "{{ service_name }}-oauth2-proxy.socket"
container_auto_update: "{{ service_auto_update }}"
- name: Socket for OAuth2 Proxy for {{ service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ service_name }}-oauth2-proxy"

View File

@@ -1,4 +1,8 @@
---
- name: OAuth2 proxy for {{ service_name }}
ansible.builtin.include_tasks: oauth2_proxy.yaml
when: _service_oauth2_proxy
- name: Reverse proxy for {{ service_name }}
ansible.builtin.import_role:
name: uumas.general.vhost
@@ -7,7 +11,7 @@
vhost_id: "{{ service_name }}"
vhost_domains: "{{ service_domains }}"
vhost_proxy_target_netproto: unix
vhost_proxy_target_socket: "/run/{{ service_name }}-socat.sock"
vhost_proxy_target_socket: "{{ _service_socket_path }}"
vhost_proxy_headers: "{{ _service_proxy_headers }}"
vhost_proxy_auth_socket: "{{ _service_oauth2_socket }}"
vhost_proxy_auth_uri: /oauth2/auth

View File

@@ -6,5 +6,7 @@
container_name: "{{ service_name }}-redis"
container_image: docker.io/valkey/valkey:alpine
container_networks:
- "{{ service_name }}"
- name: "{{ service_name }}"
ip: "{{ service_container_ip | ansible.utils.ipmath(2) if _service_static_ip else '' }}"
container_hostname: redis
container_auto_update: "{{ service_auto_update }}"

View File

@@ -1,26 +0,0 @@
---
- name: Socat socket for {{ service_name }}
ansible.builtin.template:
src: socat.socket.j2
dest: /etc/systemd/system/{{ service_name }}-socat.socket
mode: "0644"
notify: Restart socat socket for {{ service_name }}
- name: Socat container for {{ service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ service_name }}-socat"
container_image: "docker.io/alpine/socat:latest"
container_command:
- "ACCEPT-FD:3,fork"
- "TCP:{{ service_name }}:{{ service_container_http_port }}"
container_user: nobody
container_networks:
- "{{ service_name }}"
container_requires:
- "{{ service_name }}-socat.socket"
- "{{ service_name }}.service"
container_auto_start: false
container_auto_update: "{{ service_auto_update }}"

View File

@@ -1,20 +0,0 @@
---
- name: Create directory {{ _service_host_directory }}
ansible.builtin.file:
path: "{{ _service_host_directory }}"
state: directory
mode: "0755"
- name: Create directory {{ _service_host_directory + '/mounts' }}
ansible.builtin.file:
path: "{{ _service_host_directory }}/mounts"
state: directory
mode: "0700"
- name: Template files for template mounts
ansible.builtin.template:
src: "{{ item[0].source }}"
dest: "{{ item[1].source }}"
mode: "0644"
notify: "Restart container service {{ service_name }}"
loop: "{{ _service_template_mounts | zip(_service_container_template_mounts) }}"

View File

@@ -1,16 +1,22 @@
---
- name: Fail if service_name is empty
ansible.builtin.fail:
msg: service_name must not be empty
when: service_name | length == 0
- name: Fail if service_container_user is not string
ansible.builtin.fail:
msg: "service_container_user must be a string, not int."
when: service_container_user is not string
- name: Fail if service_database_type is postgres but service_postgres_tag is not set
ansible.builtin.fail:
msg: "service_postgres_tag needs to be set when database type is postgres"
when: "service_database_type == 'postgres' and service_postgres_tag is not defined"
- name: Fail if template mount source doesn't end in .j2
ansible.builtin.fail:
msg: "Template mount source file name needs to end in .j2. The file {{ item.source }} of {{ service_name }} doesn't."
when: "item.source | split('.') | last != 'j2'"
loop: "{{ _service_template_mounts }}"
- name: Fail if copy mount source doesn't end with /
ansible.builtin.fail:
msg: "Copy mount source name must end with /. The file {{ item.source }} of {{ service_name }} doesn't"
when: "not item.source.endswith('/')"
loop: "{{ _service_copy_mounts }}"

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# {{ ansible_managed }}
_term() {
echo "Received SIGTERM, stopping all containers"
kill "$child"
}
podman system service -t 0 &
podman run \
--rm \
-v /run/secrets:/run/secrets:ro \
{% for key, value in pinp_inner_env.items() %}
-e {{ key }}={{ value }} \
{% endfor %}
-v /tmp/storage-run-1000/podman/podman.sock:/var/run/docker.sock \
{% for mount in pinp_inner_mounts %}
--mount type={{ mount.type }},source={{ mount.source }},destination={{ mount.destination }}{% if mount.readonly | default(false) %},readonly{% endif %} \
{% endfor %}
--name {{ pinp_inner_name }} \
--network host \
{{ pinp_inner_image }} &
child=$!
trap _term SIGTERM
wait "$!"

View File

@@ -1,6 +0,0 @@
# {{ ansible_managed }}
[Unit]
Description={{ service_name }} socat socket
[Socket]
ListenStream=/run/{{ service_name }}-socat.sock

View File

@@ -0,0 +1,9 @@
---
_service_database_image: docker.io/library/mariadb:lts
_service_database_mount_destination: /var/lib/mysql
_service_database_authenticated: true
_service_database_env:
MARIADB_RANDOM_ROOT_PASSWORD: "1"
MARIADB_USER: "{{ service_name | replace('-', '_') }}"
MARIADB_DATABASE: "{{ service_name | replace('-', '_') }}"
MARIADB_PASSWORD_FILE: "/run/secrets/{{ service_database_type }}"

View File

@@ -0,0 +1,6 @@
---
_service_database_image: docker.io/library/mongo:latest
_service_database_mount_destination: /data/db
_service_database_authenticated: false
_service_database_env:
MONGO_INITDB_DATABASE: "{{ service_name | replace('-', '_') }}"

View File

@@ -0,0 +1,14 @@
---
_service_database_image: "{{ service_postgres_image }}:{{ service_postgres_tag }}"
_service_database_mount_destination: >-
{{
'/var/lib/postgresql/data'
if (service_postgres_tag | split('-') | length > 1)
and (service_postgres_tag | split('-') | first) is version('18', '<')
else '/var/lib/postgresql'
}}
_service_database_authenticated: true
_service_database_env:
POSTGRES_USER: "{{ service_name | replace('-', '_') }}"
POSTGRES_PASSWORD_FILE: "/run/secrets/{{ service_database_type }}"
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"

View File

@@ -1,19 +1,156 @@
---
_service_additional_containers_with_default_image: >-
{{
([{ 'image': service_container_image }] * service_additional_containers | length)
| zip(service_additional_containers)
| map('combine')
}}
_service_additional_container_same_image_defaults:
user: "{{ service_container_user }}"
command: "{{ service_container_command }}"
entrypoint: "{{ service_container_entrypoint }}"
devices: "{{ service_container_devices }}"
env: "{{ service_container_env }}"
add_capabilities: "{{ service_container_add_capabilities }}"
pinp: false
_service_additional_container_different_image_defaults:
user: ""
command: []
entrypoint: ""
mounts: []
devices: []
publish_ports: []
env: {}
add_capabilities: []
secrets: []
pinp: false
_service_additional_same_image_containers: >-
{{
_service_additional_containers_with_default_image
| selectattr('image', '==', service_container_image)
}}
_service_additional_different_image_containers: >-
{{
_service_additional_containers_with_default_image
| selectattr('image', '!=', service_container_image)
}}
_service_additional_containers: >-
{{
service_additional_containers
| zip(
service_additional_containers
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('community.general.dict_kv', 'name')
(
(
(
[_service_additional_container_same_image_defaults] *
(_service_additional_same_image_containers | length)
)
| map('combine')
| zip(_service_additional_same_image_containers)
| map('combine')
) +
(
(
[_service_additional_container_different_image_defaults] *
(_service_additional_different_image_containers | length)
)
| zip(_service_additional_different_image_containers)
| map('combine')
)
)
| zip(
(
_service_additional_same_image_containers +
_service_additional_different_image_containers
)
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('community.general.dict_kv', 'name')
)
| map('combine')
}}
_service_additional_container_wants: >-
{{
service_wants
+ _service_additional_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat.socket')
}}
_service_additional_container_networks: >-
{{
[{
'name': service_name,
'ip':
service_container_ip | ansible.utils.ipmath(20 + _service_additional_container_index)
if _service_static_ip else ''
}]
+ (
service_container_additional_networks
+ (
_service_additional_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat')
)
) | map('community.general.dict_kv', 'name')
}}
_service_additional_container_secrets: >-
{{
(
_service_additional_container.secrets
| map(attribute='name')
| map('community.general.dict_kv', 'target')
| zip(
_service_additional_container.secrets,
_service_additional_container.secrets
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('community.general.dict_kv', 'name')
)
| map('combine')
) if _service_additional_container.secrets is defined
else _service_container_secrets
}}
_service_additional_container_publish_ports_with_defaults: >-
{{
([{ 'type': 'port', 'host_address': '0.0.0.0' }] * _service_additional_container.publish_ports | length)
| zip(_service_additional_container.publish_ports)
| map('combine')
}}
_service_additional_container_publish_socket_ports: >-
{{
_service_additional_container_publish_ports_with_defaults | selectattr('type', '==', 'socket')
if _service_additional_container.publish_ports is defined
else
[]
}}
_service_additional_container_publish_port_ports: >-
{{
_service_additional_container_publish_ports_with_defaults | selectattr('type', '==', 'port')
if _service_additional_container.publish_ports is defined
else
[]
}}
_service_additional_container_publish_ports: >-
{{
_service_additional_container_publish_port_ports | map(attribute='host_address') |
zip(
_service_additional_container_publish_port_ports | map(attribute='host_port'),
_service_additional_container_publish_port_ports | map(attribute='container_port')
) | map('join', ':')
}}
_service_additional_volume_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'volume') }}"
_service_additional_template_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'template') }}"
_service_additional_copy_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'copy') }}"
_service_additional_host_directory: "/srv/{{ service_name }}"
_service_additional_container_volume_mounts: >-
@@ -32,23 +169,96 @@ _service_additional_container_template_mounts: >-
{{
([{'readonly': true}] * _service_additional_template_mounts | length) |
zip(
_service_additional_template_mounts,
_service_additional_template_mounts |
map(attribute='source') |
map('regex_replace', '\.j2$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
community.general.remove_keys(['mode']),
_service_additional_template_mounts |
map(attribute='source') |
map('regex_replace', '\.j2$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
([{'type': 'bind'}] * _service_additional_template_mounts | length)
) |
map('combine')
}}
_service_additional_container_copy_mounts: >-
{{
([{'readonly': true}] * _service_additional_copy_mounts | length) |
zip(
_service_additional_copy_mounts |
community.general.remove_keys(['mode']),
_service_additional_copy_mounts |
map(attribute='source') |
map('regex_replace', '\/$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
([{'type': 'bind'}] * _service_additional_copy_mounts | length)
) |
map('combine')
}}
_service_additional_container_mounts: >-
{{
_service_additional_container_volume_mounts +
_service_additional_container_bind_mounts +
_service_additional_container_template_mounts
_service_additional_container_template_mounts +
_service_additional_container_copy_mounts
if _service_additional_container.mounts is defined
else
_service_container_mounts
}}
_service_additional_plain_container:
image: "{{ _service_additional_container.image }}"
entrypoint: "{{ _service_additional_container.entrypoint }}"
command: "{{ _service_additional_container.command }}"
user: "{{ _service_additional_container.user }}"
env: "{{ _service_additional_container.env }}"
mounts: "{{ _service_additional_container_mounts }}"
_service_additional_pinp_container_mounts:
- type: bind
source: "{{ _service_host_directory }}/mounts/{{ _service_additional_container.name }}-entrypoint.sh"
destination: /entrypoint.sh
readonly: true
- type: volume
source: "{{ _service_additional_container.name }}-containers"
destination: /home/podman/.local/share/containers
_service_additional_pinp_container:
image: quay.io/podman/stable:latest
entrypoint: /entrypoint.sh
command: []
user: podman
env: {}
mounts: >-
{{
_service_additional_pinp_container_mounts
+ (
_service_additional_container_mounts
| zip(
_service_additional_container_mounts
| map(attribute='source')
| map('replace', '/', '_')
| map('regex_replace', '^', '/mounts/')
| map('community.general.dict_kv', 'destination')
)
| map('combine')
)
}}
_service_additional_container_final: >-
{{ _service_additional_pinp_container if _service_additional_container.pinp else _service_additional_plain_container }}
_service_additional_container_pinp_inner_mounts: >-
{{
_service_additional_container_mounts
| zip(
_service_additional_container_mounts
| map(attribute='source')
| map('replace', '/', '_')
| map('regex_replace', '^', '/mounts/')
| map('community.general.dict_kv', 'source')
)
| map('combine')
}}

View File

@@ -3,6 +3,14 @@ _service_setup_database: "{{ service_database_type != 'none' }}"
_service_database_name: "{{ service_name }}-{{ service_database_type }}"
_service_database_networks: >-
{{
[service_name] +
service_database_additional_networks
[{
'name': service_name,
'ip': service_container_ip | ansible.utils.ipmath(1) if _service_static_ip else ''
}]
+ service_database_additional_networks | map('community.general.dict_kv', 'name')
}}
_service_database_secret:
name: "{{ _service_database_name }}"
target: "{{ service_database_type }}"
_service_database_secrets: "{{ [_service_database_secret] if _service_database_authenticated else [] }}"

View File

@@ -1,16 +1,44 @@
---
_service_container_networks: "{{ [service_name] + service_container_additional_networks }}"
_service_container_networks: >-
{{
[{
'name': service_name,
'ip': service_container_ip
}]
+ (
[{
'name': service_name ~ '-caddy-socket-proxy',
'ip': service_container_ip | ansible.utils.ipmath(256) if _service_static_ip else ''
}] if service_container_http_port > 0 else []
)
+ (
service_container_additional_networks
+ (
_service_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat')
)
) | map('community.general.dict_kv', 'name')
}}
_service_static_ip: "{{ service_container_ip | length > 0 }}"
_service_container_requires: >-
{{
service_requires
+ ([_service_database_name + '.service'] if _service_setup_database else [])
+ ([service_name + '-redis.service'] if service_redis else [])
+ ([_service_database_name ~ '.service'] if _service_setup_database else [])
+ ([service_name ~ '-redis.service'] if service_redis else [])
+ ([service_name ~ '.socket'] if _service_native_socket else [])
}}
_service_container_wants: >-
{{
service_wants
+ ([service_name + '-socat.socket'] if service_domains | length > 0 else [])
+ ([service_name + '-caddy-socket-proxy.socket'] if service_container_http_port > 0 else [])
+ ([service_name + '-oauth2-proxy.socket'] if _service_oauth2_proxy else [])
+ _service_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat.socket')
+ _service_additional_containers
| map(attribute='name')
| map('regex_replace', '$', '.service')

View File

@@ -1,6 +1,12 @@
---
_service_container_socket_mount:
type: bind
source: /run/{{ service_name }}.sock
destination: /run/{{ service_name }}.sock
_service_volume_mounts: "{{ service_container_mounts | selectattr('type', '==', 'volume') }}"
_service_template_mounts: "{{ service_container_mounts | selectattr('type', '==', 'template') }}"
_service_copy_mounts: "{{ service_container_mounts | selectattr('type', '==', 'copy') }}"
_service_host_directory: "/srv/{{ service_name }}"
_service_container_volume_mounts: >-
@@ -14,25 +20,96 @@ _service_container_volume_mounts: >-
) |
map('combine')
}}
_service_container_bind_mounts: "{{ service_container_mounts | selectattr('type', '==', 'bind') }}"
_service_container_bind_mounts: >-
{{
service_container_mounts | selectattr('type', '==', 'bind') +
([ _service_container_socket_mount ] if _service_native_socket else [])
}}
_service_container_template_mounts: >-
{{
([{'readonly': true}] * _service_template_mounts | length) |
zip(
_service_template_mounts,
_service_template_mounts |
map(attribute='source') |
map('regex_replace', '\.j2$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
community.general.remove_keys(['mode']),
_service_template_mounts |
map(attribute='source') |
map('regex_replace', '\.j2$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
([{'type': 'bind'}] * _service_template_mounts | length)
) |
map('combine')
}}
_service_container_copy_mounts: >-
{{
([{'readonly': true}] * _service_copy_mounts | length) |
zip(
_service_copy_mounts |
community.general.remove_keys(['mode']),
_service_copy_mounts |
map(attribute='source') |
map('regex_replace', '\/$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/') |
map('community.general.dict_kv', 'source'),
([{'type': 'bind'}] * _service_copy_mounts | length)
) |
map('combine')
}}
_service_container_mounts: >-
{{
_service_container_volume_mounts +
_service_container_bind_mounts +
_service_container_template_mounts
_service_container_template_mounts +
_service_container_copy_mounts
}}
_service_all_template_mounts: >-
{{
(
_service_template_mounts +
(
_service_additional_containers |
map(attribute='mounts', default=[]) |
flatten
)
) |
selectattr('type', '==', 'template') |
unique
}}
_service_all_template_mount_directories: >-
{{
_service_all_template_mounts |
map(attribute='source') |
map('dirname') |
unique |
select('!=', '')
}}
_service_all_template_mount_host_files: >-
{{
_service_all_template_mounts |
map(attribute='source') |
map('regex_replace', '\.j2$', '') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/')
}}
_service_all_copy_mounts: >-
{{
(
_service_copy_mounts +
(
_service_additional_containers |
map(attribute='mounts', default=[]) |
flatten
)
) |
selectattr('type', '==', 'copy') |
unique
}}
_service_all_copy_mount_host_files: >-
{{
_service_all_copy_mounts |
map(attribute='source') |
map('regex_replace', '^', _service_host_directory ~ '/mounts/')
}}

View File

@@ -0,0 +1,55 @@
---
_service_plain_container:
image: "{{ service_container_image }}"
entrypoint: "{{ service_container_entrypoint }}"
command: "{{ service_container_command }}"
user: "{{ service_container_user }}"
env: "{{ service_container_env }}"
mounts: "{{ _service_container_mounts }}"
_service_pinp_container_mounts:
- type: bind
source: "{{ _service_host_directory }}/mounts/entrypoint.sh"
destination: /entrypoint.sh
readonly: true
- type: volume
source: "containers"
destination: /home/podman/.local/share/containers
_service_pinp_container:
image: quay.io/podman/stable:latest
entrypoint: /entrypoint.sh
command: []
user: podman
env: {}
mounts: >-
{{
_service_pinp_container_mounts
+ (
_service_container_mounts
| zip(
_service_container_mounts
| map(attribute='source')
| map('replace', '/', '_')
| map('regex_replace', '^', '/mounts/')
| map('community.general.dict_kv', 'destination')
)
| map('combine')
)
}}
_service_container: >-
{{ _service_pinp_container if service_container_pinp else _service_plain_container }}
_service_container_pinp_inner_mounts: >-
{{
_service_container_mounts
| zip(
_service_container_mounts
| map(attribute='source')
| map('replace', '/', '_')
| map('regex_replace', '^', '/mounts/')
| map('community.general.dict_kv', 'source')
)
| map('combine')
}}

View File

@@ -1,11 +1,16 @@
---
_service_native_socket: "{{ service_domains | length > 0 and service_container_http_port == 0 }}"
_service_socket_path: >-
/run/{{ service_name ~ ('-caddy-socket-proxy' if not _service_native_socket else '' ) }}.sock
_service_replacement_host_header:
Host: "{{ service_name }}:{{ service_container_http_port }}"
_service_proxy_headers: "{{ _service_replacement_host_header if not service_proxy_pass_host_header else {} }}"
_service_oauth2_proxy: "{{ service_proxy_auth_type == 'oauth2-proxy' }}"
_service_oauth2_socket: >-
{{ '/run/oauth2-proxy-socat.sock' if _service_oauth2_proxy else '' }}
{{ '/run/' ~ service_name ~ '-oauth2-proxy.sock' if _service_oauth2_proxy else '' }}
_service_oauth2_proxy_location:
path: /oauth2/*
proxy_target_socket: "{{ _service_oauth2_socket }}"

View File

@@ -0,0 +1,21 @@
---
_service_container_publish_ports_with_defaults: >-
{{
([{ 'type': 'port', 'host_address': '0.0.0.0' }] * service_container_publish_ports | length)
| zip(service_container_publish_ports)
| map('combine')
}}
_service_container_publish_socket_ports: >-
{{ _service_container_publish_ports_with_defaults | selectattr('type', '==', 'socket') }}
_service_container_publish_port_ports: >-
{{ _service_container_publish_ports_with_defaults | selectattr('type', '==', 'port') }}
_service_container_publish_ports: >-
{{
_service_container_publish_port_ports | map(attribute='host_address') |
zip(
_service_container_publish_port_ports | map(attribute='host_port'),
_service_container_publish_port_ports | map(attribute='container_port')
) | map('join', ':')
}}

View File

@@ -19,4 +19,17 @@ _service_container_secrets: >-
'target': service_database_secret_target
}] if _service_setup_database else []
)
+ (
[{
'name': _service_database_name ~ '-url',
'value':
'postgres://'
~ service_name | replace('-', '_')
~ ':' ~ service_podman_secrets[service_name ~ '-postgres']
~ '@postgres/' ~ service_name | replace('-', '_')
~ '?sslmode=disable',
'type': service_database_secret_type,
'target': service_database_secret_target ~ '-url'
}] if service_podman_secrets[service_name ~ '-postgres'] is defined else []
)
}}

1
roles/socat/README.md Normal file
View File

@@ -0,0 +1 @@
Sets up a socat container along with a systemd socket unit to forward traffic to it

View File

@@ -0,0 +1,4 @@
---
socat_target_container: "{{ socat_service_name }}"
socat_container_ip: ""
socat_auto_update: true

View File

@@ -0,0 +1,28 @@
---
argument_specs:
main:
description: Sets up a socat container along with a systemd socket unit to forward traffic to it
options:
socat_service_name:
description: Name of the socat service, used for systemd unit and container naming
type: str
required: true
socat_target_container:
description: Name of the container to forward traffic to
type: str
required: false
default: "{{ socat_service_name }}"
socat_target_http_port:
description: Port on the target container to forward traffic to
type: int
required: true
socat_container_ip:
description: IP address to assign to the socat container.
type: str
required: false
default: ""
socat_auto_update:
description: Whether to automatically update the socat container
type: bool
required: false
default: true

View File

@@ -0,0 +1,27 @@
---
- name: Socat socket for {{ socat_service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ socat_service_name }}-socat"
systemd_socket_requires:
- "{{ socat_target_container }}.service"
- name: Socat container for {{ socat_service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ socat_service_name }}-socat"
container_image: "docker.io/alpine/socat:latest"
container_command:
- "ACCEPT-FD:3,fork"
- "TCP:{{ socat_target_container }}:{{ socat_target_http_port }}"
container_user: nobody
container_networks:
- name: "{{ socat_service_name }}-socat"
ip: "{{ socat_container_ip }}"
container_requires:
- "{{ socat_service_name }}-socat.socket"
- "{{ socat_target_container }}.service"
container_auto_start: false
container_auto_update: "{{ socat_auto_update }}"

View File

@@ -43,17 +43,7 @@
service_container_http_port: 8008
service_domains: "{{ [synapse_external_domain] }}"
service_vhost_locations:
- path: /_matrix/client/*/login
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/logout
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/refresh
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/login/*
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/logout/*
proxy_target_socket: /run/matrix-authentication-service-socat.sock
- path: /_matrix/client/*/refresh/*
- path: ^/_matrix/client/.*/(login|logout|refresh).*$
proxy_target_socket: /run/matrix-authentication-service-socat.sock
service_wants:
- matrix-authentication-service.service
@@ -82,3 +72,21 @@
matrix_authentication_service_upstream_oauth2_scope: "{{ synapse_oidc_provider_scopes | join(' ') }}"
matrix_authentication_service_upstream_oauth2_claims_imports: "{{ synapse_oidc_provider_mas_claims_imports }}"
matrix_authentication_service_upstream_oauth2_human_name: "{{ synapse_oidc_provider_name }}"
- name: Reverse proxy synapse federation
ansible.builtin.import_role:
name: uumas.general.vhost
vars:
vhost_type: reverse_proxy
vhost_id: synapse-federation
vhost_domains:
- "{{ synapse_external_domain }}:8448"
vhost_proxy_target_netproto: unix
vhost_proxy_target_socket: "/run/synapse-caddy-socket-proxy.sock"
- name: Open port for synapse federation
ansible.posix.firewalld:
service: matrix
state: enabled
permanent: true
immediate: true

View File

@@ -27,7 +27,7 @@ listeners:
database:
name: psycopg2
args:
host: synapse-postgres
host: postgres
user: synapse
password: "{{ service_podman_secrets['synapse-postgres'] }}"
dbname: synapse

View File

@@ -1,7 +1,6 @@
---
- name: "Restart volume service {{ volume_name }}"
ansible.builtin.systemd_service:
name: "{{ volume_name }}-volume.service"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [volume_name ~ '-volume.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -9,4 +9,6 @@
state: quadlet
quadlet_file_mode: "0644"
quadlet_options: "{{ _volume_quadlet_options }}"
notify: Restart volume service {{ volume_name }}
notify:
- Reload systemd daemon
- Restart volume service {{ volume_name }}

View File

@@ -0,0 +1,9 @@
---
argument_specs:
main:
description: Installs windmill with worker in podman in podman
options:
windmill_domain:
description: The domain to use for windmill
type: str
required: true

View File

@@ -0,0 +1,62 @@
---
- name: Windmill service
ansible.builtin.import_role:
name: service
vars:
service_name: windmill
service_container_image: ghcr.io/windmill-labs/windmill:main
service_container_mounts:
- type: volume
source: worker-logs
destination: /tmp/windmill/logs
service_container_http_port: 8000
service_domains:
- "{{ windmill_domain }}"
service_database_type: postgres
service_container_env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: server
service_additional_containers:
- name: worker
pinp: true
mounts:
- type: volume
source: worker-logs
destination: /tmp/windmill/logs
- type: volume
source: worker-dependency-cache
destination: /tmp/windmill/cache
env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: worker
WORKER_GROUP: default
ENABLE_UNSHARE_PID: "true"
UNSHARE_ISOLATION_FLAGS: "--user --map-root-user --pid --fork"
- name: worker-native
env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: worker
WORKER_TYPE: native
NATIVE_MODE: "true"
NUM_WORKERS: "8"
SLEEP_QUEUE: "200"
- name: lsp
image: ghcr.io/windmill-labs/windmill-extra:latest
secrets: []
mounts:
- type: volume
source: lsp-cache
destination: /puls/.cache
publish_ports:
- name: lsp
type: socket
container_port: 3001
env:
ENABLE_LSP: "true"
ENABLE_MULTIPLAYER: "false"
ENABLE_DEBUGGER: "false"
WINDMILL_BASE_URL: http://windmill:8000
service_vhost_locations:
- path: /ws/*
proxy_target_socket: /run/windmill-lsp-socat.sock