Compare commits

...

33 Commits

Author SHA1 Message Date
uumas
5814267d66 Add windmill 2026-03-18 00:31:50 +02:00
uumas
defd2517ea service: Add postgres url to secrets 2026-03-18 00:30:25 +02:00
uumas
615c4013c1 Use caddy instead of socat for http proxying 2026-03-15 22:30:36 +02:00
uumas
77768e5483 small fixes 2026-03-15 22:30:16 +02:00
uumas
162972810f example: fix postgres examples 2026-03-15 21:56:16 +02:00
uumas
8595e261c9 nextcloud: Make HArP optional and opt-in 2026-03-15 00:43:24 +02:00
uumas
70c5ed7ea0 service: Make oauth2-proxy depend on its socket 2026-03-14 23:50:01 +02:00
uumas
3554de82c0 service: Make oauth2-proxy aware it's running behing reverse proxy 2026-03-14 23:33:34 +02:00
uumas
f64ea2cbe3 container: Allow custom ip addresses for more than one network 2026-03-14 23:33:04 +02:00
uumas
ca29ffb271 network: Delete network on stop 2026-03-14 22:11:42 +02:00
uumas
dac44638e6 service: Don't use different networks for additional containers 2026-03-12 03:08:42 +02:00
uumas
489b8eaade service: Use native socket for oauth2 proxy 2026-03-12 03:08:36 +02:00
uumas
956f8ed6ce Use uumas.general.systemd_socket role 2026-03-12 01:54:43 +02:00
uumas
63e6f938bb Add vscode configuration 2026-03-12 00:45:48 +02:00
uumas
61c0724801 Add nextcloud role 2026-03-12 00:45:25 +02:00
uumas
ea2a2c3652 Add forgejo role 2026-03-12 00:42:54 +02:00
uumas
31cf49b004 service: Improve additional container support 2026-03-12 00:42:00 +02:00
uumas
9e3e1496f0 service: Split container network namespaces 2026-03-12 00:40:54 +02:00
uumas
190527e877 naming and documentation fixes 2026-03-12 00:38:42 +02:00
uumas
fb39f1bfc8 service: Don't require postgres tag specified 2026-03-12 00:36:11 +02:00
uumas
69ae1687b7 service: Add support for mongodb 2026-03-12 00:35:42 +02:00
uumas
efc7bf5434 service: Imrove native sockets 2026-03-12 00:34:25 +02:00
uumas
294b931d19 service: Support publishing arbitary ports through sockets 2026-03-12 00:32:10 +02:00
uumas
470b60f988 service: Support postgres >= 18 and postgres upgrades 2026-03-12 00:26:35 +02:00
uumas
c673aae8dc synapse: Use simple database hostname 2026-03-12 00:23:22 +02:00
uumas
4a68ab25e1 .yml -> .yaml 2026-03-12 00:22:52 +02:00
uumas
bf4ced4a9b service: Support cap_add 2026-03-12 00:20:19 +02:00
uumas
5a3bb96fc2 container: Support cap_add 2026-03-12 00:17:18 +02:00
uumas
447d4e59ad container: Stop container if process is oomkilled 2026-03-12 00:16:56 +02:00
uumas
f6af1d3472 service: Make supporting containers accessible using simple hostnames 2026-03-12 00:16:11 +02:00
uumas
ff1badbf03 container: Support setting container hostname 2026-03-12 00:12:03 +02:00
uumas
f721641fc6 Use systemd handlers from uumas.general.systemd 2026-03-12 00:10:31 +02:00
uumas
fce8804653 Make socat its own role 2026-03-12 00:06:37 +02:00
69 changed files with 1008 additions and 193 deletions

5
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,5 @@
{
"files.associations": {
"*.yaml": "ansible"
}
}

View File

@@ -0,0 +1,4 @@
---
caddy_socket_proxy_target_container: "{{ caddy_socket_proxy_service_name }}"
caddy_socket_proxy_container_ip: ""
caddy_socket_proxy_auto_update: true

View File

@@ -0,0 +1,30 @@
---
argument_specs:
main:
description: >-
Sets up a caddy container and a systemd socket unit, forwarding traffic from it to
target container
options:
caddy_socket_proxy_service_name:
description: Name of the caddy service, used for systemd unit and container naming
type: str
required: true
caddy_socket_proxy_target_container:
description: Name of the container to forward traffic to
type: str
required: false
default: "{{ caddy_socket_proxy_service_name }}"
caddy_socket_proxy_target_http_port:
description: Port on the target container to forward traffic to
type: int
required: true
caddy_socket_proxy_container_ip:
description: IP address to assign to the caddy container.
type: str
required: false
default: ""
caddy_socket_proxy_auto_update:
description: Whether to automatically update the caddy container
type: bool
required: false
default: true

View File

@@ -0,0 +1,45 @@
---
- name: Create caddy socket proxy mount directories for {{ caddy_socket_proxy_service_name }}
ansible.builtin.file:
path: "{{ item.key }}"
state: directory
mode: "{{ item.value }}"
with_dict:
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/": "0755"
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts": "0700"
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy": "0755"
- name: Configure caddy socket proxy for {{ caddy_socket_proxy_service_name }}
ansible.builtin.template:
src: Caddyfile.j2
dest: "/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy/Caddyfile"
mode: "0644"
notify: Restart container service {{ caddy_socket_proxy_service_name }}-caddy-socket-proxy
- name: Caddy socket proxy socket for {{ caddy_socket_proxy_service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
systemd_socket_requires:
- "{{ caddy_socket_proxy_target_container }}.service"
- name: Caddy container for {{ caddy_socket_proxy_service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
container_image: "docker.io/library/caddy:2-alpine"
container_mounts:
- type: bind
source: "/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy"
destination: /etc/caddy
readonly: true
container_networks:
- name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
ip: "{{ caddy_socket_proxy_container_ip }}"
container_requires:
- "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy.socket"
- "{{ caddy_socket_proxy_target_container }}.service"
container_auto_start: false
container_auto_update: "{{ caddy_socket_proxy_auto_update }}"

View File

@@ -0,0 +1,12 @@
# {{ ansible_managed }}
{
servers {
trusted_proxies_unix
}
}
http:// {
bind fd/3
reverse_proxy {{ caddy_socket_proxy_service_name }}:{{ service_container_http_port }}
}
}

View File

@@ -6,10 +6,11 @@ container_mounts: []
container_devices: []
container_publish_ports: []
container_networks: []
container_ip: ""
container_hostname: ""
container_secrets: []
container_env: {}
container_auto_start: true
container_auto_update: true
container_requires: []
container_wants: []
container_add_capabilities: []

View File

@@ -1,7 +1,6 @@
---
- name: "Restart container service {{ container_name }}"
ansible.builtin.systemd_service:
name: "{{ container_name }}.service"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
- name: Restart container service {{ container_name }}
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [container_name ~ '.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -14,7 +14,7 @@ argument_specs:
default: []
elements: str
container_entrypoint:
description: Entrypoint to use for the continaer
description: Entrypoint to use for the container
type: str
required: false
default: ""
@@ -25,7 +25,7 @@ argument_specs:
default: ""
container_image:
description: "The image to run in the container, in FQIN format (registry/imagename:tag)"
description: "The image to run in the container, in FQIN format (registry/image_name:tag)"
type: str
required: true
@@ -118,12 +118,23 @@ argument_specs:
type: list
required: false
default: []
elements: str
container_ip:
description: IPv4 address for the container in the first network defined in container_networks
elements: dict
options:
name:
description: Network name
type: str
required: true
ip:
description: Container IPv4 address in the network
type: str
required: false
default: ""
container_hostname:
description: Hostname to set inside the container. Available to other containers on the same network.
type: str
required: false
default: ""
container_secrets:
description: A list of secrets available to the container as file or environment variable
type: list
@@ -167,6 +178,13 @@ argument_specs:
required: false
default: {}
container_add_capabilities:
description: List of capabilities to add to the container
type: list
required: false
default: []
elements: str
container_requires:
description: >
List of systemd units (like other containers) this one depends on.

View File

@@ -13,14 +13,13 @@
ansible.builtin.include_role:
name: network
vars:
network_name: "{{ network }}"
network_subnet: "{{ _container_network_subnet if network_index == 0 else '' }}"
network_range: "{{ _container_network_range if network_index == 0 else '' }}"
when: network_created_networks is not defined or network not in network_created_networks
network_name: "{{ network.name }}"
network_subnet: "{{ _container_network_subnet }}"
network_range: "{{ _container_network_range }}"
when: network_created_networks is not defined or network.name not in network_created_networks
loop: "{{ container_networks }}"
loop_control:
loop_var: network
index_var: network_index
- name: Create volumes for container {{ container_name }}
ansible.builtin.include_role:
@@ -49,12 +48,16 @@
user: "{{ container_user or omit }}"
mount: "{{ _container_mounts | map('items') | map('map', 'join', '=') | map('join', ',') }}"
device: "{{ _container_devices }}"
network: "{{ _container_networks_with_ip }}"
network: "{{ _container_networks }}"
hostname: "{{ container_hostname or omit }}"
publish: "{{ container_publish_ports }}"
secrets: "{{ _container_secrets }}"
env: "{{ container_env }}"
cap_add: "{{ container_add_capabilities }}"
label: "{{ _container_labels if _container_labels | length > 0 else omit }}"
state: quadlet
quadlet_file_mode: "0600"
quadlet_options: "{{ _container_quadlet_options }}"
notify: Restart container service {{ container_name }}
notify:
- Reload systemd daemon
- Restart container service {{ container_name }}

View File

@@ -1,33 +1,6 @@
---
_container_image: "{{ container_image | replace('/', '_') ~ '.image' }}"
_container_networks: "{{ container_networks | map('regex_replace', '$', '.network') }}"
_container_networks_with_ip: >-
{{
[
_container_networks[0] ~ (
':ip=' ~ container_ip if container_ip | length > 0 else ''
)
]
+ _container_networks[1:]
}}
_container_network_subnet: >-
{{ container_ip | ansible.utils.ipsubnet(24) if container_ip | length > 0 else '' }}
_container_network_subnet_ranges: >-
{{
[
_container_network_subnet | ansible.utils.ipsubnet(25, 0),
_container_network_subnet | ansible.utils.ipsubnet(25, 1)
] if container_ip | length > 0 else ''
}}
_container_network_range: >-
{{
_container_network_subnet_ranges |
reject('ansible.utils.supernet_of', container_ip) |
first
if container_ip | length > 0 else ''
}}
_container_volumes: "{{ container_mounts | selectattr('type', '==', 'volume') }}"
_container_mount_sources: "{{ container_mounts | map(attribute='source') }}"
@@ -110,6 +83,7 @@ _container_quadlet_unit_options: |
{% endfor %}
[Service]
SuccessExitStatus=0 143
OOMPolicy=stop
_container_quadlet_auto_start_options: |
[Service]
Restart=always

View File

@@ -0,0 +1,27 @@
---
_container_networks: >-
{{
container_networks
| map(attribute='name')
| map('regex_replace', '$', '.network')
| zip(container_networks | map(attribute='ip', default=''))
| map('reject', 'equalto', '')
| map('join', ':ip=')
}}
_container_network_subnet: >-
{{ network.ip | ansible.utils.ipsubnet(24) if network.ip | default('') | length > 0 else '' }}
_container_network_subnet_ranges: >-
{{
[
_container_network_subnet | ansible.utils.ipsubnet(25, 0),
_container_network_subnet | ansible.utils.ipsubnet(25, 1)
] if network.ip | default('') | length > 0 else []
}}
_container_network_range: >-
{{
_container_network_subnet_ranges |
reject('ansible.utils.supernet_of', network.ip) |
first
if network.ip | default('') | length > 0 else ''
}}

View File

@@ -19,7 +19,6 @@
service_container_http_port: 8080
service_domains: "{{ example_domains }}"
service_database_type: postgres
service_postgres_tag: 16-alpine
service_container_publish_ports:
- "127.0.0.1:8080:8080"
- "0.0.0.0:4443:8043"
@@ -27,7 +26,7 @@
- network-online.target
service_container_env:
TZ: "Etc/UTC"
DB_HOST: hello-world-db
DB_HOST: postgres
DB_USER: hello-world
DB_PASSWORD__FILE: /run/secrets/postgres
service_additional_containers:

1
roles/forgejo/README.md Normal file
View File

@@ -0,0 +1 @@
Installs and configures forgejo inside podman

View File

@@ -0,0 +1,6 @@
---
forgejo_require_signin_view: false
forgejo_enable_internal_signin: true
forgejo_smtp_user: ""
forgejo_smtp_password: ""

View File

@@ -0,0 +1,45 @@
---
argument_specs:
main:
description: "Installs and configures forgejo inside podman"
options:
forgejo_tag:
description: Forgejo version to use. Can be major (x), minor (x.y) or patch (x.y.z). Major version recommended.
type: str
required: true
forgejo_domain:
description: Domain forgejo should listen on
type: str
required: true
forgejo_secret_key:
description: A long secret key for forgejo to encrypt secrets with. Must never change.
type: str
required: true
forgejo_smtp_server:
description: Smtp server for forgejo
type: str
required: true
forgejo_smtp_from:
description: Address to send email from
type: str
required: true
forgejo_smtp_user:
description: Smtp user to authenticate as
type: str
required: false
default: ""
forgejo_smtp_password:
description: Smtp password to authenticate with
type: str
required: false
default: ""
forgejo_require_signin_view:
description: Whether to require signing in to view public repositories
type: bool
required: false
default: false
forgejo_enable_internal_signin:
description: Whether to enable signing in using local username/password
type: bool
required: false
default: true

View File

@@ -0,0 +1,81 @@
---
- name: Ensure netcat-openbsd is installed for ssh shell
ansible.builtin.apt:
name: netcat-openbsd
- name: Create git system user on host for forgejo ssh
ansible.builtin.user:
name: git
group: git
system: true
home: /srv/forgejo/git
generate_ssh_key: true
ssh_key_type: ed25519
shell: /srv/forgejo/git/ssh-shell
register: _forgejo_git_user
- name: Add git user's own ssh key to its authorized keys
ansible.posix.authorized_key:
user: git
key: "{{ _forgejo_git_user.ssh_public_key }}"
- name: Install ssh forwarding shell for forgejo
ansible.builtin.template:
src: ssh-shell.j2
dest: /srv/forgejo/git/ssh-shell
mode: "0755"
- name: Forgejo service
ansible.builtin.import_role:
name: service
vars:
service_name: forgejo
service_container_image: codeberg.org/forgejo/forgejo:{{ forgejo_tag }}
service_container_mounts:
- type: volume
source: data
destination: /data
- type: bind
source: /etc/localtime
destination: /etc/localtime
readonly: true
- type: bind
source: /srv/forgejo/git/.ssh
destination: /data/git/.ssh
service_container_secrets:
- name: secret-key
value: "{{ forgejo_secret_key }}"
service_domains:
- "{{ forgejo_domain }}"
service_database_type: postgres
service_postgres_tag: 18-alpine
service_container_publish_ports:
- name: ssh
type: socket
container_port: 22
service_container_env:
USER_UID: "{{ _forgejo_git_user.uid }}"
USER_GID: "{{ _forgejo_git_user.group }}"
FORGEJO__security__SECRET_KEY_URI: file:/run/secrets/secret-key
FORGEJO__database__DB_TYPE: postgres
FORGEJO__database__USER: forgejo
FORGEJO__database__NAME: forgejo
FORGEJO__database__HOST: postgres
FORGEJO__database__PASSWD__FILE: /run/secrets/postgres
FORGEJO__server__PROTOCOL: http+unix
FORGEJO__server__HTTP_ADDR: /run/forgejo.sock
FORGEJO__server__DOMAIN: "{{ forgejo_domain }}"
FORGEJO__server__ROOT_URL: https://{{ forgejo_domain }}
FORGEJO__server__SSH_ALLOW_UNEXPECTED_AUTHORIZED_KEYS: "true"
FORGEJO__mailer__ENABLED: "true"
FORGEJO__mailer__PROTOCOL: smtp
FORGEJO__mailer__SMTP_ADDR: "{{ forgejo_smtp_server }}"
FORGEJO__mailer__SMTP_PORT: "587"
FORGEJO__mailer__FROM: "{{ forgejo_smtp_from }}"
FORGEJO__mailer__USER: "{{ forgejo_smtp_user }}"
FORGEJO__mailer__PASSWD: "{{ forgejo_smtp_password }}"
FORGEJO__service__DISABLE_REGISTRATION: "true"
FORGEJO__service__REQUIRE_SIGNIN_VIEW: "{{ 'true' if forgejo_require_signin_view else 'false' }}"
FORGEJO__service__ENABLE_INTERNAL_SIGNIN: "{{ 'true' if forgejo_enable_internal_signin else 'false' }}"
FORGEJO__oauth2_client__ENABLE_AUTO_REGISTRATION: "true"
FORGEJO__openid__ENABLE_OPENID_SIGNIN: "false"

View File

@@ -0,0 +1,4 @@
#!/bin/bash
# {{ ansible_managed }}
shift
SHELL=/bin/bash ssh -o "ProxyCommand nc -U /run/forgejo-ssh-socat.sock" -o StrictHostKeyChecking=no git@forgejo "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $@"

View File

@@ -17,7 +17,7 @@
service_container_additional_networks: "{{ grafana_additional_networks }}"
service_container_env:
GF_DATABASE_TYPE: postgres
GF_DATABASE_HOST: grafana-postgres:5432
GF_DATABASE_HOST: postgres:5432
GF_DATABASE_NAME: grafana
GF_DATABASE_USER: grafana
GF_DATABASE_PASSWORD__FILE: /run/secrets/postgres

View File

@@ -1,7 +1,6 @@
---
- name: Restart network service {{ network_name }}
ansible.builtin.systemd_service:
name: "{{ network_name }}-network.service"
state: restarted
daemon_reload: true
ignore_errors: "{{ ansible_check_mode }}"
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [network_name ~ '-network.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -7,12 +7,15 @@
driver: "{{ network_driver }}"
subnet: "{{ network_subnet if network_subnet | length > 0 else omit }}"
ip_range: "{{ network_range if network_range | length > 0 else omit }}"
quadlet_options: >-
{{
['Options=parent=' ~ ansible_facts.default_ipv4.interface]
if network_driver == 'macvlan' else []
}}
notify: Restart network service {{ network_name }}
opt:
parent: "{{ ansible_facts.default_ipv4.interface if network_driver == 'macvlan' else omit }}"
quadlet_options:
- |-
[Service]
ExecStopPost=/usr/bin/podman network rm {{ network_name }}
notify:
- Reload systemd daemon
- Restart network service {{ network_name }}
- name: Add network to created networks variable
ansible.builtin.set_fact:

View File

@@ -0,0 +1 @@
Sets up a nextcloud podman container, including HaRP using podman in podman

View File

@@ -0,0 +1,3 @@
---
nextcloud_tag: stable
nextcloud_install_harp: false

View File

@@ -0,0 +1,29 @@
---
- name: Unregister AppAPI daemon
containers.podman.podman_container_exec:
name: nextcloud
argv:
- /var/www/html/occ
- app_api:daemon:unregister
- harp
register: _nextcloud_appapi_unregister
changed_when: _nextcloud_appapi_unregister.rc == 0
failed_when: _nextcloud_appapi_unregister.rc not in [0, 1]
listen: Restart container service nextcloud-harp
- name: Register AppAPI daemon
containers.podman.podman_container_exec:
name: nextcloud
argv:
- /bin/sh
- -c
- >-
/var/www/html/occ app_api:daemon:register
--harp
--harp_frp_address=harp:8782
--harp_shared_key "$(cat /run/secrets/harp-shared-key)"
--net host
--set-default
--
harp HaRP docker-install http harp:8780 https://{{ nextcloud_domains[0] }}
listen: Restart container service nextcloud-harp

View File

@@ -0,0 +1,24 @@
---
argument_specs:
main:
description: "Sets up a nextcloud podman container, including HaRP using podman in podman"
options:
nextcloud_domains:
description: A list of domains nextcloud should listen on
type: list
required: true
elements: str
nextcloud_admin_password:
description: Password of the initial admin user
type: str
required: true
nextcloud_tag:
description: Nextcloud version to use
type: str
required: false
default: stable
nextcloud_install_harp:
description: Whether to install HaRP for nextcloud
type: bool
required: false
default: false

View File

@@ -0,0 +1,31 @@
---
- name: Nextcloud service
ansible.builtin.import_role:
name: service
vars:
service_name: nextcloud
service_container_image: docker.io/library/nextcloud:{{ nextcloud_tag }}
service_container_http_port: 80
service_domains: "{{ nextcloud_domains }}"
service_database_type: postgres
service_redis: true
service_container_mounts:
- type: volume
source: data
destination: /var/www/html
service_container_secrets:
- name: admin-password
value: "{{ nextcloud_admin_password }}"
- name: harp-shared-key
service_container_env:
POSTGRES_HOST: postgres
POSTGRES_DB: nextcloud
POSTGRES_USER: nextcloud
POSTGRES_PASSWORD_FILE: /run/secrets/postgres
REDIS_HOST: redis
TRUSTED_PROXIES: 10.0.0.0/8
NEXTCLOUD_TRUSTED_DOMAINS: "{{ nextcloud_domains | join(' ') }}"
NEXTCLOUD_ADMIN_USER: admin
NEXTCLOUD_ADMIN_PASSWORD_FILE: /run/secrets/admin-password
service_additional_containers: "{{ _nextcloud_additional_containers }}"
service_vhost_locations: "{{ _nextcloud_vhost_locations }}"

View File

@@ -0,0 +1,9 @@
[containers]
ipcns = "host"
cgroupns = "host"
cgroups = "disabled"
log_driver = "k8s-file"
[engine]
cgroup_manager = "cgroupfs"
events_logger = "file"
runtime = "crun"

View File

@@ -0,0 +1,24 @@
#!/bin/bash
# {{ ansible_managed }}
_term() {
echo "Received SIGTERM, stopping all containers"
kill "$child"
}
podman system service -t 0 &
podman run \
--rm \
-v /run/secrets/harp-shared-key:/run/secrets/harp-shared-key:ro \
-e HP_SHARED_KEY_FILE=/run/secrets/harp-shared-key \
-e NC_INSTANCE_URL="https://{{ nextcloud_domains[0] }}" \
-e HP_TRUSTED_PROXY_IPS="10.0.0.0/8" \
-v /tmp/storage-run-1000/podman/podman.sock:/var/run/docker.sock \
-v /certs:/certs \
--name harp \
--network host \
ghcr.io/nextcloud/nextcloud-appapi-harp:release &
child=$!
trap _term SIGTERM
wait

View File

@@ -0,0 +1,45 @@
---
_nextcloud_cron_container:
name: cron
entrypoint: /cron.sh
_nextcloud_harp_container:
name: harp
add_capabilities:
- CAP_SYS_ADMIN
image: quay.io/podman/stable:latest
user: podman
entrypoint: /entrypoint.sh
devices:
- source: /dev/fuse
mounts:
- type: template
source: containers.conf.j2
destination: /etc/containers/containers.conf
- type: template
source: harp_entrypoint.sh.j2
destination: /entrypoint.sh
mode: "0755"
- type: volume
source: harp-certs
destination: /certs
- type: volume
source: harp-containers
destination: /home/podman/.local/share/containers
env: {}
secrets:
- name: harp-shared-key
publish_ports:
- name: harp
type: socket
container_port: 8780
_nextcloud_additional_containers: >-
{{
[_nextcloud_cron_container]
+ ([_nextcloud_harp_container] if nextcloud_install_harp else [])
}}
_nextcloud_harp_vhost_locations:
- path: /exapps/*
proxy_target_socket: /run/nextcloud-harp-socat.sock
_nextcloud_vhost_locations: >-
{{ _nextcloud_harp_vhost_locations if nextcloud_install_harp else [] }}

View File

@@ -1,4 +0,0 @@
---
- name: Reload systemd daemon
ansible.builtin.systemd_service:
daemon_reload: true

View File

@@ -1,5 +1,6 @@
---
dependencies:
- role: uumas.general.systemd
- role: uumas.general.compatcheck
vars:
compatcheck_supported_distributions:

View File

@@ -16,12 +16,14 @@ service_container_mounts: []
service_container_devices: []
service_container_secrets: []
service_container_env: {}
service_container_add_capabilities: []
service_database_type: none
service_database_additional_networks: []
service_database_secret_type: mount
service_database_secret_target: "{{ service_database_type }}"
service_postgres_image: docker.io/library/postgres
service_postgres_image: docker.io/pgautoupgrade/pgautoupgrade
service_postgres_tag: alpine
service_redis: false
service_additional_containers: []

View File

@@ -1,21 +1,6 @@
---
- name: Restart socket for {{ service_name }}
ansible.builtin.systemd_service:
name: "{{ service_name }}.socket"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
- name: Restart socat socket for {{ service_name }}
ansible.builtin.systemd_service:
name: "{{ service_name }}-socat.socket"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
- name: Restart socat socket for {{ service_name ~ '-oauth2-proxy' }}
ansible.builtin.systemd_service:
name: "{{ service_name }}-oauth2-proxy-socat.socket"
state: restarted
daemon_reload: true
ignore_errors: '{{ ansible_check_mode }}'
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [servive_name ~ '.socket'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -29,9 +29,7 @@ argument_specs:
service_container_http_port:
description:
- Port inside the container where http requests are proxied to.
- >-
If set to 0, /run/{{ service_name }}.sock on the host is bind mounted to /run/{{ service_name }}.sock inside the container
and http requests are proxied to it.
- If set to 0, http requests are proxied to /run/<service name>.sock inside the container
type: int
required: false
default: 0
@@ -70,7 +68,7 @@ argument_specs:
required: false
service_container_image:
description: "The image to run in the service container(s), in FQIN format (registry/imagename:tag)."
description: "The image to run in the service container(s), in FQIN format (registry/image_name:tag)."
type: str
required: true
@@ -93,11 +91,44 @@ argument_specs:
default: []
elements: str
service_container_publish_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
description: A list of ports to publish outside the container
type: list
required: false
default: []
elements: str
elements: dict
options:
name:
description:
- Name of the port.
- If type is socket, the socket will be created at /run/<service name>-<port name>.sock on the host.
- If type is not socket, this is just informative.
type: str
required: true
container_port:
description: Container port to publish
type: int
required: true
type:
description: Whether to publish as a port or socket
type: str
required: false
default: port
choices:
- socket
- port
host_address:
description:
- IP or hostname to listen on on the host
- Ignored if type is socket
type: str
required: false
default: 0.0.0.0
host_port:
description:
- Port to listen on on the host
- Required if type is port, ignored otherwise
type: int
required: false
service_container_mounts:
description: List of bind mounts or volumes to be mounted inside the service container(s).
type: list
@@ -239,21 +270,29 @@ argument_specs:
required: false
default: {}
service_container_add_capabilities:
description: List of capabilities to add to the service container
type: list
required: false
default: []
elements: str
service_database_type:
description:
- Database type to set up.
- >
It will be run in a container accessible to the service at
host {{ service_name }}-{{ service_database_type }} on the default port.
- The database user will be {{ service_name }}
- The password will be accessible as secret at /run/secrets/{{ service_database_type }}
host <service database type> on the default port.
- The database user will be <service name>
- The password will be accessible as secret at /run/secrets/<service database type>
- >
The password will also be available as the
service_podman_secrets['{{ service_name }}-{{ service_database_type }}'] variable.
service_podman_secrets['<service name>-<service database type>'] variable.
type: str
choices:
- postgres
- mariadb
- mongo
- none
required: false
default: none
@@ -287,14 +326,13 @@ argument_specs:
description:
- Postgresql version to use.
- Can be debian (n) or alpine-based (n-alpine), where n can be major version like 14 or minor like 14.13.
- Required if service_database_type is postgres, does nothing otherwise
- Ignored if database type is not postgres.
- If a custom postgres image is specified, see that image documentation for supported tags.
type: str
required: false
service_redis:
description: >-
Whether to install redis in a container accessible to the service at host
{{ service_name }}-redis.
Whether to install redis in a container accessible to the service at host redis.
type: bool
required: false
default: false
@@ -324,11 +362,16 @@ argument_specs:
type: str
required: false
default: "{{ service_container_image }}"
user:
description: The UID to run as inside the container
type: str
required: false
default: "{{ service_container_user }}"
command:
description: Command to start the container with.
type: list
required: false
default: "[]"
default: []
elements: str
entrypoint:
description: Entrypoint to use in the container
@@ -336,7 +379,7 @@ argument_specs:
required: false
default: ""
mounts:
description: List of bind mounts or volumes to be mounted inside the main service container.
description: List of bind mounts or volumes to be mounted inside the container.
type: list
required: false
default: "{{ service_container_mounts }}"
@@ -368,6 +411,22 @@ argument_specs:
- Defaults to false for volume and bind, true for template
type: bool
required: false
user:
description: Volume owner uid. Only applicable if mount type is volume.
type: str
required: false
default: ""
group:
description: Volume owner gid. Only applicable if mount type is volume.
type: str
required: false
default: ""
mode:
description:
- Templated file or copied directory/file permissions.
- Defaults to 0644 for files, 0755 for directories
type: str
required: false
volume_device:
description: >-
The path of a device which is mounted for the volume.
@@ -390,17 +449,73 @@ argument_specs:
elements: str
required: false
default: []
devices:
description: List of devices to be added inside the container.
type: list
required: false
default: "{{ service_container_devices }}"
elements: dict
options:
source:
description: Device path on host
type: str
required: true
destination:
description: Device path inside the container. Defaults to same as host.
type: str
required: false
publish_ports:
description: "A list of published ports in docker format (<host listen address>:<host port>:<container port>)"
description: A list of ports to publish outside the container
type: list
required: false
default: []
elements: str
elements: dict
options:
name:
description:
- Name of the port.
- >-
If type is socket, the socket will be created at
/run/<service name>-<additional container name>-<port name>.sock on the host.
- If type is not socket, this is just informative.
type: str
required: true
container_port:
description: Container port to publish
type: int
required: true
type:
description: Whether to publish as a port or socket
type: str
required: false
default: port
choices:
- socket
- port
host_address:
description:
- IP or hostname to listen on on the host
- Ignored if type is socket
type: str
required: false
default: 0.0.0.0
host_port:
description:
- Port to listen on on the host
- Required if type is port, ignored otherwise
type: int
required: false
env:
description: A dict of environment variables for the container
type: dict
required: false
default: {}
default: "{{ service_container_env }}"
add_capabilities:
description: List of capabilities to add to the container
type: list
required: false
default: "{{ service_container_add_capabilities }}"
elements: str
secrets:
description:
- >
@@ -412,7 +527,7 @@ argument_specs:
container doesn't support reading the secret from file or environment variable.
type: list
required: false
default: []
default: "{{ service_container_secrets }}"
elements: dict
options:
name:

View File

@@ -1,5 +1,5 @@
---
- name: Additional container {{ container ~ ' for ' ~ service_name }}
- name: Additional containers for {{ service_name }}
ansible.builtin.include_role:
name: container
vars:
@@ -7,17 +7,25 @@
container_image: "{{ _service_additional_container.image | default(service_container_image) }}"
container_command: "{{ _service_additional_container.command | default([]) }}"
container_entrypoint: "{{ _service_additional_container.entrypoint | default('') }}"
container_user: "{{ service_container_user }}"
container_user: "{{ _service_additional_container.user | default(service_container_user) }}"
container_mounts: "{{ _service_additional_container_mounts }}"
container_publish_ports: "{{ _service_additional_container.publish_ports | default([]) }}"
container_networks: "{{ _service_container_networks }}"
container_ip: "{{ _service_additional_container_ip }}"
container_secrets: "{{ _service_additional_container.secrets | default(_service_container_secrets) }}"
container_devices: "{{ _service_additional_container.devices | default(service_container_devices) }}"
container_publish_ports: "{{ _service_additional_container_publish_ports }}"
container_networks: "{{ _service_additional_container_networks }}"
container_hostname: "{{ _service_additional_container.name | regex_replace('^' ~ service_name ~ '-', '') }}"
container_secrets: "{{ _service_additional_container_secrets }}"
container_env: "{{ _service_additional_container.env | default(service_container_env) }}"
container_add_capabilities: "{{ _service_additional_container.add_capabilities | default(service_container_add_capabilities) }}"
container_requires: "{{ _service_container_requires }}"
container_wants: "{{ service_wants }}"
container_wants: "{{ _service_additional_container_wants }}"
container_auto_update: "{{ service_auto_update }}"
loop: "{{ _service_additional_containers }}"
loop_control:
loop_var: _service_additional_container
index_var: _service_additional_container_index
- name: Socat sockets for additional containers of {{ service_name }}
ansible.builtin.include_tasks: additional_socat.yaml
loop: "{{ _service_additional_containers }}"
loop_control:
loop_var: _service_additional_container

View File

@@ -0,0 +1,12 @@
---
- name: Socat for socket published ports of {{ service_name }}
ansible.builtin.include_role:
name: socat
loop: "{{ _service_additional_container_publish_socket_ports }}"
loop_control:
loop_var: publish_port
vars:
socat_service_name: "{{ service_name }}-{{ publish_port.name }}"
socat_target_container: "{{ _service_additional_container.name }}"
socat_target_http_port: "{{ publish_port.container_port }}"
socat_auto_update: "{{ service_auto_update }}"

View File

@@ -1,5 +1,5 @@
---
- name: Incude variables for database {{ service_database_type }}
- name: Include variables for database {{ service_database_type }}
ansible.builtin.include_vars:
file: database/{{ service_database_type }}.yaml
@@ -14,10 +14,7 @@
source: "{{ _service_database_name }}"
destination: "{{ _service_database_mount_destination }}"
container_networks: "{{ _service_database_networks }}"
container_ip: >-
{{ service_container_ip | ansible.utils.ipmath(1) if _service_static_ip else '' }}
container_secrets:
- name: "{{ _service_database_name }}"
target: "{{ service_database_type }}"
container_hostname: "{{ service_database_type }}"
container_secrets: "{{ _service_database_secrets }}"
container_env: "{{ _service_database_env }}"
container_auto_update: "{{ service_auto_update }}"

View File

@@ -15,15 +15,20 @@
when: _service_container_secrets | length > 0
- name: Template mounts for {{ service_name }}
ansible.builtin.include_tasks: hostmounts.yaml
when: (_service_template_mounts + _service_copy_mounts) | length > 0
ansible.builtin.include_tasks: host_mounts.yaml
when: (_service_all_template_mounts + _service_all_copy_mounts) | length > 0
- name: Additional containers for {{ service_name }}
ansible.builtin.include_tasks: additional.yaml
when: _service_additional_containers | length > 0
- name: Native socket for {{ service_name }}
ansible.builtin.include_tasks: native_socket.yaml
ansible.builtin.include_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ service_name }}"
systemd_socket_requires:
- "{{ service_name }}.service"
when: _service_native_socket
- name: Main container for {{ service_name }}
@@ -37,23 +42,37 @@
container_user: "{{ service_container_user }}"
container_mounts: "{{ _service_container_mounts }}"
container_devices: "{{ service_container_devices }}"
container_publish_ports: "{{ service_container_publish_ports }}"
container_publish_ports: "{{ _service_container_publish_ports }}"
container_networks: "{{ _service_container_networks }}"
container_ip: "{{ service_container_ip }}"
container_secrets: "{{ _service_container_secrets }}"
container_env: "{{ service_container_env }}"
container_add_capabilities: "{{ service_container_add_capabilities }}"
container_requires: "{{ _service_container_requires }}"
container_wants: "{{ _service_container_wants }}"
container_auto_update: "{{ service_auto_update }}"
- name: Socat for {{ service_name }}
ansible.builtin.include_tasks: socat.yaml
- name: Caddy socket proxy for http of {{ service_name }}
ansible.builtin.include_role:
name: caddy_socket_proxy
when: service_container_http_port > 0
vars:
socat_service_name: "{{ service_name }}"
socat_target_http_port: "{{ service_container_http_port }}"
socat_container_ip: >-
{{ service_container_ip | ansible.utils.ipmath(3) if _service_static_ip else '' }}
caddy_socket_proxy_service_name: "{{ service_name }}"
caddy_socket_proxy_target_http_port: "{{ service_container_http_port }}"
caddy_socket_proxy_container_ip: >-
{{ service_container_ip | ansible.utils.ipmath(257) if _service_static_ip else '' }}
caddy_socket_proxy_auto_update: "{{ service_auto_update }}"
- name: Socat for socket published ports of {{ service_name }}
ansible.builtin.include_role:
name: socat
loop: "{{ _service_container_publish_socket_ports }}"
loop_control:
loop_var: publish_port
vars:
socat_service_name: "{{ service_name }}-{{ publish_port.name }}"
socat_target_container: "{{ service_name }}"
socat_target_http_port: "{{ publish_port.container_port }}"
socat_auto_update: "{{ service_auto_update }}"
- name: Reverse proxy for {{ service_name }}
ansible.builtin.include_tasks: proxy.yaml

View File

@@ -1,7 +0,0 @@
---
- name: Socket for {{ service_name }}
ansible.builtin.template:
src: service.socket.j2
dest: /etc/systemd/system/{{ service_name }}.socket
mode: "0644"
notify: Restart socket for {{ service_name }}

View File

@@ -11,7 +11,7 @@
- --cookie-secret-file
- /run/secrets/cookie-secret
container_networks:
- "{{ service_name }}-oauth2-proxy"
- name: "{{ service_name }}-oauth2-proxy"
container_secrets:
- name: "{{ service_name }}-oauth2-proxy-cookie-secret"
length: 32
@@ -20,18 +20,20 @@
value: "{{ service_oauth2_proxy_client_secret }}"
target: client-secret
container_env:
OAUTH2_PROXY_HTTP_ADDRESS: 0.0.0.0:4180
OAUTH2_PROXY_HTTP_ADDRESS: fd:3
OAUTH2_PROXY_PROVIDER: oidc
OAUTH2_PROXY_OIDC_ISSUER_URL: "{{ service_oauth2_proxy_issuer_url }}"
OAUTH2_PROXY_CLIENT_ID: "{{ service_oauth2_proxy_client_id }}"
OAUTH2_PROXY_CODE_CHALLENGE_METHOD: S256
OAUTH2_PROXY_SKIP_PROVIDER_BUTTON: "true"
OAUTH2_PROXY_EMAIL_DOMAINS: "*"
OAUTH2_PROXY_REVERSE_PROXY: "true"
container_requires:
- "{{ service_name }}-oauth2-proxy.socket"
container_auto_update: "{{ service_auto_update }}"
- name: Socat for OAuth2 Proxy for {{ service_name }}
ansible.builtin.import_tasks: socat.yaml
- name: Socket for OAuth2 Proxy for {{ service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
socat_service_name: "{{ service_name }}-oauth2-proxy"
socat_target_http_port: 4180
socat_container_ip: ""
systemd_socket_name: "{{ service_name }}-oauth2-proxy"

View File

@@ -6,7 +6,7 @@
container_name: "{{ service_name }}-redis"
container_image: docker.io/valkey/valkey:alpine
container_networks:
- "{{ service_name }}"
container_ip: >-
{{ service_container_ip | ansible.utils.ipmath(2) if _service_static_ip else '' }}
- name: "{{ service_name }}"
ip: "{{ service_container_ip | ansible.utils.ipmath(2) if _service_static_ip else '' }}"
container_hostname: redis
container_auto_update: "{{ service_auto_update }}"

View File

@@ -9,11 +9,6 @@
msg: "service_container_user must be a string, not int."
when: service_container_user is not string
- name: Fail if service_database_type is postgres but service_postgres_tag is not set
ansible.builtin.fail:
msg: "service_postgres_tag needs to be set when database type is postgres"
when: "service_database_type == 'postgres' and service_postgres_tag is not defined"
- name: Fail if template mount source doesn't end in .j2
ansible.builtin.fail:
msg: "Template mount source file name needs to end in .j2. The file {{ item.source }} of {{ service_name }} doesn't."

View File

@@ -1,6 +0,0 @@
# {{ ansible_managed }}
[Unit]
Description={{ service_name }} socket
[Socket]
ListenStream=/run/{{ service_name }}.sock

View File

@@ -1,6 +0,0 @@
# {{ ansible_managed }}
[Unit]
Description={{ socat_service_name }} socat socket
[Socket]
ListenStream=/run/{{ socat_service_name }}-socat.sock

View File

@@ -1,6 +1,7 @@
---
_service_database_image: docker.io/library/mariadb:lts
_service_database_mount_destination: /var/lib/mysql
_service_database_authenticated: true
_service_database_env:
MARIADB_RANDOM_ROOT_PASSWORD: "1"
MARIADB_USER: "{{ service_name | replace('-', '_') }}"

View File

@@ -0,0 +1,6 @@
---
_service_database_image: docker.io/library/mongo:latest
_service_database_mount_destination: /data/db
_service_database_authenticated: false
_service_database_env:
MONGO_INITDB_DATABASE: "{{ service_name | replace('-', '_') }}"

View File

@@ -1,6 +1,13 @@
---
_service_database_image: "{{ service_postgres_image }}:{{ service_postgres_tag }}"
_service_database_mount_destination: /var/lib/postgresql/data
_service_database_mount_destination: >-
{{
'/var/lib/postgresql/data'
if (service_postgres_tag | split('-') | length > 1)
and (service_postgres_tag | split('-') | first) is version('18', '<')
else '/var/lib/postgresql'
}}
_service_database_authenticated: true
_service_database_env:
POSTGRES_USER: "{{ service_name | replace('-', '_') }}"
POSTGRES_PASSWORD_FILE: "/run/secrets/{{ service_database_type }}"

View File

@@ -11,13 +11,81 @@ _service_additional_containers: >-
| map('combine')
}}
_service_additional_container_ip: >-
_service_additional_container_wants: >-
{{
service_container_ip |
ansible.utils.ipmath(20 + _service_additional_container_index)
if _service_static_ip else ''
service_wants
+ _service_additional_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat.socket')
}}
_service_additional_container_networks: >-
{{
[{
'name': service_name,
'ip':
service_container_ip | ansible.utils.ipmath(20 + _service_additional_container_index)
if _service_static_ip else ''
}]
+ (
service_container_additional_networks
+ (
_service_additional_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat')
)
) | map('community.general.dict_kv', 'name')
}}
_service_additional_container_secrets: >-
{{
(
_service_additional_container.secrets
| map(attribute='name')
| map('community.general.dict_kv', 'target')
| zip(
_service_additional_container.secrets,
_service_additional_container.secrets
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('community.general.dict_kv', 'name')
)
| map('combine')
) if _service_additional_container.secrets is defined
else _service_container_secrets
}}
_service_additional_container_publish_ports_with_defaults: >-
{{
([{ 'type': 'port', 'host_address': '0.0.0.0' }] * _service_additional_container.publish_ports | length)
| zip(_service_additional_container.publish_ports)
| map('combine')
}}
_service_additional_container_publish_socket_ports: >-
{{
_service_additional_container_publish_ports_with_defaults | selectattr('type', '==', 'socket')
if _service_additional_container.publish_ports is defined
else
[]
}}
_service_additional_container_publish_port_ports: >-
{{
_service_additional_container_publish_ports_with_defaults | selectattr('type', '==', 'port')
if _service_additional_container.publish_ports is defined
else
[]
}}
_service_additional_container_publish_ports: >-
{{
_service_additional_container_publish_port_ports | map(attribute='host_address') |
zip(
_service_additional_container_publish_port_ports | map(attribute='host_port'),
_service_additional_container_publish_port_ports | map(attribute='container_port')
) | map('join', ':')
}}
_service_additional_volume_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'volume') }}"
_service_additional_template_mounts: "{{ _service_additional_container.mounts | selectattr('type', '==', 'template') }}"

View File

@@ -3,6 +3,14 @@ _service_setup_database: "{{ service_database_type != 'none' }}"
_service_database_name: "{{ service_name }}-{{ service_database_type }}"
_service_database_networks: >-
{{
[service_name] +
service_database_additional_networks
[{
'name': service_name,
'ip': service_container_ip | ansible.utils.ipmath(1) if _service_static_ip else ''
}]
+ service_database_additional_networks | map('community.general.dict_kv', 'name')
}}
_service_database_secret:
name: "{{ _service_database_name }}"
target: "{{ service_database_type }}"
_service_database_secrets: "{{ [_service_database_secret] if _service_database_authenticated else [] }}"

View File

@@ -1,5 +1,26 @@
---
_service_container_networks: "{{ [service_name] + service_container_additional_networks }}"
_service_container_networks: >-
{{
[{
'name': service_name,
'ip': service_container_ip
}]
+ (
[{
'name': service_name ~ '-caddy-socket-proxy',
'ip': service_container_ip | ansible.utils.ipmath(256) if _service_static_ip else ''
}] if service_container_http_port > 0 else []
)
+ (
service_container_additional_networks
+ (
_service_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat')
)
) | map('community.general.dict_kv', 'name')
}}
_service_static_ip: "{{ service_container_ip | length > 0 }}"
_service_container_requires: >-
@@ -12,8 +33,12 @@ _service_container_requires: >-
_service_container_wants: >-
{{
service_wants
+ ([service_name + '-socat.socket'] if service_container_http_port > 0 else [])
+ ([service_name + '-oauth2-proxy-socat.socket'] if _service_oauth2_proxy else [])
+ ([service_name + '-caddy-socket-proxy.socket'] if service_container_http_port > 0 else [])
+ ([service_name + '-oauth2-proxy.socket'] if _service_oauth2_proxy else [])
+ _service_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat.socket')
+ _service_additional_containers
| map(attribute='name')
| map('regex_replace', '$', '.service')

View File

@@ -1,8 +1,8 @@
---
_service_container_socket_mount:
type: bind
source: /run/{{ service_name }}
destination: /run/{{ service_name }}
source: /run/{{ service_name }}.sock
destination: /run/{{ service_name }}.sock
_service_volume_mounts: "{{ service_container_mounts | selectattr('type', '==', 'volume') }}"
_service_template_mounts: "{{ service_container_mounts | selectattr('type', '==', 'template') }}"

View File

@@ -2,7 +2,7 @@
_service_native_socket: "{{ service_domains | length > 0 and service_container_http_port == 0 }}"
_service_socket_path: >-
/run/{{ service_name ~ ('-socat' if not _service_native_socket else '' ) }}.sock
/run/{{ service_name ~ ('-caddy-socket-proxy' if not _service_native_socket else '' ) }}.sock
_service_replacement_host_header:
Host: "{{ service_name }}:{{ service_container_http_port }}"
@@ -10,7 +10,7 @@ _service_proxy_headers: "{{ _service_replacement_host_header if not service_prox
_service_oauth2_proxy: "{{ service_proxy_auth_type == 'oauth2-proxy' }}"
_service_oauth2_socket: >-
{{ '/run/' ~ service_name ~ '-oauth2-proxy-socat.sock' if _service_oauth2_proxy else '' }}
{{ '/run/' ~ service_name ~ '-oauth2-proxy.sock' if _service_oauth2_proxy else '' }}
_service_oauth2_proxy_location:
path: /oauth2/*
proxy_target_socket: "{{ _service_oauth2_socket }}"

View File

@@ -0,0 +1,21 @@
---
_service_container_publish_ports_with_defaults: >-
{{
([{ 'type': 'port', 'host_address': '0.0.0.0' }] * service_container_publish_ports | length)
| zip(service_container_publish_ports)
| map('combine')
}}
_service_container_publish_socket_ports: >-
{{ _service_container_publish_ports_with_defaults | selectattr('type', '==', 'socket') }}
_service_container_publish_port_ports: >-
{{ _service_container_publish_ports_with_defaults | selectattr('type', '==', 'port') }}
_service_container_publish_ports: >-
{{
_service_container_publish_port_ports | map(attribute='host_address') |
zip(
_service_container_publish_port_ports | map(attribute='host_port'),
_service_container_publish_port_ports | map(attribute='container_port')
) | map('join', ':')
}}

View File

@@ -19,4 +19,12 @@ _service_container_secrets: >-
'target': service_database_secret_target
}] if _service_setup_database else []
)
+ (
[{
'name': _service_database_name ~ '-url',
'value': 'postgres://' ~ service_name | replace('-', '_') ~ ':' ~ service_podman_secrets[service_name ~ '-postgres'] ~ '@postgres/' ~ service_name | replace('-', '_') ~ '?sslmode=disable',
'type': service_database_secret_type,
'target': service_database_secret_target ~ '-url'
}] if service_podman_secrets[service_name ~ '-postgres'] is defined else []
)
}}

1
roles/socat/README.md Normal file
View File

@@ -0,0 +1 @@
Sets up a socat container along with a systemd socket unit to forward traffic to it

View File

@@ -0,0 +1,4 @@
---
socat_target_container: "{{ socat_service_name }}"
socat_container_ip: ""
socat_auto_update: true

View File

@@ -0,0 +1,28 @@
---
argument_specs:
main:
description: Sets up a socat container along with a systemd socket unit to forward traffic to it
options:
socat_service_name:
description: Name of the socat service, used for systemd unit and container naming
type: str
required: true
socat_target_container:
description: Name of the container to forward traffic to
type: str
required: false
default: "{{ socat_service_name }}"
socat_target_http_port:
description: Port on the target container to forward traffic to
type: int
required: true
socat_container_ip:
description: IP address to assign to the socat container.
type: str
required: false
default: ""
socat_auto_update:
description: Whether to automatically update the socat container
type: bool
required: false
default: true

View File

@@ -1,10 +1,11 @@
---
- name: Socat socket for {{ socat_service_name }}
ansible.builtin.template:
src: socat.socket.j2
dest: /etc/systemd/system/{{ socat_service_name }}-socat.socket
mode: "0644"
notify: Restart socat socket for {{ socat_service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ socat_service_name }}-socat"
systemd_socket_requires:
- "{{ socat_target_container }}.service"
- name: Socat container for {{ socat_service_name }}
ansible.builtin.import_role:
@@ -14,13 +15,13 @@
container_image: "docker.io/alpine/socat:latest"
container_command:
- "ACCEPT-FD:3,fork"
- "TCP:{{ socat_service_name }}:{{ socat_target_http_port }}"
- "TCP:{{ socat_target_container }}:{{ socat_target_http_port }}"
container_user: nobody
container_networks:
- "{{ socat_service_name }}"
container_ip: "{{ socat_container_ip }}"
- name: "{{ socat_service_name }}-socat"
ip: "{{ socat_container_ip }}"
container_requires:
- "{{ socat_service_name }}-socat.socket"
- "{{ socat_service_name }}.service"
- "{{ socat_target_container }}.service"
container_auto_start: false
container_auto_update: "{{ service_auto_update }}"
container_auto_update: "{{ socat_auto_update }}"

View File

@@ -82,7 +82,7 @@
vhost_domains:
- "{{ synapse_external_domain }}:8448"
vhost_proxy_target_netproto: unix
vhost_proxy_target_socket: "/run/synapse-socat.sock"
vhost_proxy_target_socket: "/run/synapse-caddy-socket-proxy.sock"
- name: Open port for synapse federation
ansible.posix.firewalld:

View File

@@ -27,7 +27,7 @@ listeners:
database:
name: psycopg2
args:
host: synapse-postgres
host: postgres
user: synapse
password: "{{ service_podman_secrets['synapse-postgres'] }}"
dbname: synapse

View File

@@ -1,7 +1,6 @@
---
- name: "Restart volume service {{ volume_name }}"
ansible.builtin.systemd_service:
name: "{{ volume_name }}-volume.service"
state: restarted
daemon_reload: true
ignore_errors: "{{ ansible_check_mode }}"
ansible.builtin.set_fact:
systemd_restart_units: "{{ systemd_restart_units + [volume_name ~ '-volume.service'] }}" # noqa: var-naming[no-role-prefix]
changed_when: true
notify: Apply systemd unit restarts

View File

@@ -9,4 +9,6 @@
state: quadlet
quadlet_file_mode: "0644"
quadlet_options: "{{ _volume_quadlet_options }}"
notify: Restart volume service {{ volume_name }}
notify:
- Reload systemd daemon
- Restart volume service {{ volume_name }}

View File

@@ -0,0 +1,9 @@
---
argument_specs:
main:
description: Installs windmill with worker in podman in podman
options:
windmill_domain:
description: The domain to use for windmill
type: str
required: true

View File

@@ -0,0 +1,66 @@
---
- name: Windmill service
ansible.builtin.import_role:
name: service
vars:
service_name: windmill
service_container_image: ghcr.io/windmill-labs/windmill:main
service_container_mounts:
- type: volume
source: worker-logs
destination: /tmp/windmill/logs
service_container_http_port: 8000
service_domains:
- "{{ windmill_domain }}"
service_database_type: postgres
service_container_env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: server
service_additional_containers:
- name: worker
image: quay.io/podman/stable:latest
user: podman
entrypoint: /entrypoint.sh
mounts:
- type: volume
source: worker-logs
destination: /worker-logs
- type: volume
source: worker-dependency-cache
destination: /worker-dependency-cache
- type: template
source: worker_entrypoint.sh.j2
destination: /entrypoint.sh
mode: "0755"
- type: volume
source: worker-containers
destination: /home/podman/.local/share/containers
publish_ports: []
env: {}
- name: worker-native
env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: worker
WORKER_TYPE: native
NATIVE_MODE: "true"
NUM_WORKERS: "8"
SLEEP_QUEUE: "200"
- name: lsp
image: ghcr.io/windmill-labs/windmill-extra:latest
secrets: []
mounts:
- type: volume
source: lsp-cache
destination: /puls/.cache
publish_ports:
- name: lsp
type: socket
container_port: 3001
env:
ENABLE_LSP: "true"
ENABLE_MULTIPLAYER: "false"
ENABLE_DEBUGGER: "false"
WINDMILL_BASE_URL: http://windmill:8000
service_vhost_locations:
- path: /ws/*
proxy_target_socket: /run/windmill-lsp-socat.sock

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# {{ ansible_managed }}
_term() {
echo "Received SIGTERM, stopping all containers"
kill "$child"
}
podman system service -t 0 &
podman run \
--rm \
-v /run/secrets/postgres-url:/run/secrets/postgres-url:ro \
-e DATABASE_URL_FILE=/run/secrets/postgres-url \
-e MODE=worker \
-e WORKER_GROUP=default \
-e ENABLE_UNSHARE_PID="true" \
-v /tmp/storage-run-1000/podman/podman.sock:/var/run/docker.sock \
-v /worker-logs:/tmp/windmill/logs \
-v /worker-dependency-cache:/tmp/windmill/cache \
--name worker \
--network host \
ghcr.io/windmill-labs/windmill:main &
child=$!
trap _term SIGTERM
wait $!