Compare commits

..

7 Commits

Author SHA1 Message Date
uumas
5814267d66 Add windmill 2026-03-18 00:31:50 +02:00
uumas
defd2517ea service: Add postgres url to secrets 2026-03-18 00:30:25 +02:00
uumas
615c4013c1 Use caddy instead of socat for http proxying 2026-03-15 22:30:36 +02:00
uumas
77768e5483 small fixes 2026-03-15 22:30:16 +02:00
uumas
162972810f example: fix postgres examples 2026-03-15 21:56:16 +02:00
uumas
8595e261c9 nextcloud: Make HArP optional and opt-in 2026-03-15 00:43:24 +02:00
uumas
70c5ed7ea0 service: Make oauth2-proxy depend on its socket 2026-03-14 23:50:01 +02:00
19 changed files with 278 additions and 54 deletions

View File

@@ -0,0 +1,4 @@
---
caddy_socket_proxy_target_container: "{{ caddy_socket_proxy_service_name }}"
caddy_socket_proxy_container_ip: ""
caddy_socket_proxy_auto_update: true

View File

@@ -0,0 +1,30 @@
---
argument_specs:
main:
description: >-
Sets up a caddy container and a systemd socket unit, forwarding traffic from it to
target container
options:
caddy_socket_proxy_service_name:
description: Name of the caddy service, used for systemd unit and container naming
type: str
required: true
caddy_socket_proxy_target_container:
description: Name of the container to forward traffic to
type: str
required: false
default: "{{ caddy_socket_proxy_service_name }}"
caddy_socket_proxy_target_http_port:
description: Port on the target container to forward traffic to
type: int
required: true
caddy_socket_proxy_container_ip:
description: IP address to assign to the caddy container.
type: str
required: false
default: ""
caddy_socket_proxy_auto_update:
description: Whether to automatically update the caddy container
type: bool
required: false
default: true

View File

@@ -0,0 +1,45 @@
---
- name: Create caddy socket proxy mount directories for {{ caddy_socket_proxy_service_name }}
ansible.builtin.file:
path: "{{ item.key }}"
state: directory
mode: "{{ item.value }}"
with_dict:
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/": "0755"
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts": "0700"
"/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy": "0755"
- name: Configure caddy socket proxy for {{ caddy_socket_proxy_service_name }}
ansible.builtin.template:
src: Caddyfile.j2
dest: "/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy/Caddyfile"
mode: "0644"
notify: Restart container service {{ caddy_socket_proxy_service_name }}-caddy-socket-proxy
- name: Caddy socket proxy socket for {{ caddy_socket_proxy_service_name }}
ansible.builtin.import_role:
name: uumas.general.systemd_socket
vars:
systemd_socket_name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
systemd_socket_requires:
- "{{ caddy_socket_proxy_target_container }}.service"
- name: Caddy container for {{ caddy_socket_proxy_service_name }}
ansible.builtin.import_role:
name: container
vars:
container_name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
container_image: "docker.io/library/caddy:2-alpine"
container_mounts:
- type: bind
source: "/srv/{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy/mounts/caddy"
destination: /etc/caddy
readonly: true
container_networks:
- name: "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy"
ip: "{{ caddy_socket_proxy_container_ip }}"
container_requires:
- "{{ caddy_socket_proxy_service_name }}-caddy-socket-proxy.socket"
- "{{ caddy_socket_proxy_target_container }}.service"
container_auto_start: false
container_auto_update: "{{ caddy_socket_proxy_auto_update }}"

View File

@@ -0,0 +1,12 @@
# {{ ansible_managed }}
{
servers {
trusted_proxies_unix
}
}
http:// {
bind fd/3
reverse_proxy {{ caddy_socket_proxy_service_name }}:{{ service_container_http_port }}
}
}

View File

@@ -19,7 +19,6 @@
service_container_http_port: 8080
service_domains: "{{ example_domains }}"
service_database_type: postgres
service_postgres_tag: 16-alpine
service_container_publish_ports:
- "127.0.0.1:8080:8080"
- "0.0.0.0:4443:8043"
@@ -27,7 +26,7 @@
- network-online.target
service_container_env:
TZ: "Etc/UTC"
DB_HOST: hello-world-db
DB_HOST: postgres
DB_USER: hello-world
DB_PASSWORD__FILE: /run/secrets/postgres
service_additional_containers:

View File

@@ -10,7 +10,7 @@
opt:
parent: "{{ ansible_facts.default_ipv4.interface if network_driver == 'macvlan' else omit }}"
quadlet_options:
- |
- |-
[Service]
ExecStopPost=/usr/bin/podman network rm {{ network_name }}
notify:

View File

@@ -1,2 +1,3 @@
---
nextcloud_tag: stable
nextcloud_install_harp: false

View File

@@ -8,7 +8,7 @@ argument_specs:
type: list
required: true
elements: str
nextcloud_admin_pw:
nextcloud_admin_password:
description: Password of the initial admin user
type: str
required: true
@@ -17,3 +17,8 @@ argument_specs:
type: str
required: false
default: stable
nextcloud_install_harp:
description: Whether to install HaRP for nextcloud
type: bool
required: false
default: false

View File

@@ -14,8 +14,8 @@
source: data
destination: /var/www/html
service_container_secrets:
- name: adminpw
value: "{{ nextcloud_admin_pw }}"
- name: admin-password
value: "{{ nextcloud_admin_password }}"
- name: harp-shared-key
service_container_env:
POSTGRES_HOST: postgres
@@ -26,39 +26,6 @@
TRUSTED_PROXIES: 10.0.0.0/8
NEXTCLOUD_TRUSTED_DOMAINS: "{{ nextcloud_domains | join(' ') }}"
NEXTCLOUD_ADMIN_USER: admin
NEXTCLOUD_ADMIN_PASSWORD_FILE: /run/secrets/adminpw
service_additional_containers:
- name: cron
entrypoint: /cron.sh
- name: harp
add_capabilities:
- CAP_SYS_ADMIN
image: quay.io/podman/stable:latest
user: podman
entrypoint: /entrypoint.sh
devices:
- source: /dev/fuse
mounts:
- type: template
source: containers.conf.j2
destination: /etc/containers/containers.conf
- type: template
source: harp_entrypoint.sh.j2
destination: /entrypoint.sh
mode: "0755"
- type: volume
source: harp-certs
destination: /certs
# - type: volume
# source: harp-containers
# destination: /home/podman/.local/share/containers
env: {}
secrets:
- name: harp-shared-key
publish_ports:
- name: harp
type: socket
container_port: 8780
service_vhost_locations:
- path: /exapps/*
proxy_target_socket: /run/nextcloud-harp-socat.sock
NEXTCLOUD_ADMIN_PASSWORD_FILE: /run/secrets/admin-password
service_additional_containers: "{{ _nextcloud_additional_containers }}"
service_vhost_locations: "{{ _nextcloud_vhost_locations }}"

View File

@@ -0,0 +1,45 @@
---
_nextcloud_cron_container:
name: cron
entrypoint: /cron.sh
_nextcloud_harp_container:
name: harp
add_capabilities:
- CAP_SYS_ADMIN
image: quay.io/podman/stable:latest
user: podman
entrypoint: /entrypoint.sh
devices:
- source: /dev/fuse
mounts:
- type: template
source: containers.conf.j2
destination: /etc/containers/containers.conf
- type: template
source: harp_entrypoint.sh.j2
destination: /entrypoint.sh
mode: "0755"
- type: volume
source: harp-certs
destination: /certs
- type: volume
source: harp-containers
destination: /home/podman/.local/share/containers
env: {}
secrets:
- name: harp-shared-key
publish_ports:
- name: harp
type: socket
container_port: 8780
_nextcloud_additional_containers: >-
{{
[_nextcloud_cron_container]
+ ([_nextcloud_harp_container] if nextcloud_install_harp else [])
}}
_nextcloud_harp_vhost_locations:
- path: /exapps/*
proxy_target_socket: /run/nextcloud-harp-socat.sock
_nextcloud_vhost_locations: >-
{{ _nextcloud_harp_vhost_locations if nextcloud_install_harp else [] }}

View File

@@ -51,16 +51,16 @@
container_wants: "{{ _service_container_wants }}"
container_auto_update: "{{ service_auto_update }}"
- name: Socat for http of {{ service_name }}
- name: Caddy socket proxy for http of {{ service_name }}
ansible.builtin.include_role:
name: socat
name: caddy_socket_proxy
when: service_container_http_port > 0
vars:
socat_service_name: "{{ service_name }}"
socat_target_http_port: "{{ service_container_http_port }}"
socat_container_ip: >-
caddy_socket_proxy_service_name: "{{ service_name }}"
caddy_socket_proxy_target_http_port: "{{ service_container_http_port }}"
caddy_socket_proxy_container_ip: >-
{{ service_container_ip | ansible.utils.ipmath(257) if _service_static_ip else '' }}
socat_auto_update: "{{ service_auto_update }}"
caddy_socket_proxy_auto_update: "{{ service_auto_update }}"
- name: Socat for socket published ports of {{ service_name }}
ansible.builtin.include_role:

View File

@@ -28,6 +28,8 @@
OAUTH2_PROXY_SKIP_PROVIDER_BUTTON: "true"
OAUTH2_PROXY_EMAIL_DOMAINS: "*"
OAUTH2_PROXY_REVERSE_PROXY: "true"
container_requires:
- "{{ service_name }}-oauth2-proxy.socket"
container_auto_update: "{{ service_auto_update }}"
- name: Socket for OAuth2 Proxy for {{ service_name }}

View File

@@ -7,13 +7,18 @@ _service_container_networks: >-
}]
+ (
[{
'name': service_name ~ '-socat',
'name': service_name ~ '-caddy-socket-proxy',
'ip': service_container_ip | ansible.utils.ipmath(256) if _service_static_ip else ''
}] if service_container_http_port > 0 else []
)
+ (
service_container_additional_networks
+ (_service_container_publish_socket_ports | map(attribute='name') | map('regex_replace', '^', service_name ~ '-'))
+ (
_service_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')
| map('regex_replace', '$', '-socat')
)
) | map('community.general.dict_kv', 'name')
}}
_service_static_ip: "{{ service_container_ip | length > 0 }}"
@@ -28,8 +33,8 @@ _service_container_requires: >-
_service_container_wants: >-
{{
service_wants
+ ([service_name + '-socat.socket'] if service_container_http_port > 0 else [])
+ ([service_name + '-oauth2-proxy-socat.socket'] if _service_oauth2_proxy else [])
+ ([service_name + '-caddy-socket-proxy.socket'] if service_container_http_port > 0 else [])
+ ([service_name + '-oauth2-proxy.socket'] if _service_oauth2_proxy else [])
+ _service_container_publish_socket_ports
| map(attribute='name')
| map('regex_replace', '^', service_name ~ '-')

View File

@@ -2,7 +2,7 @@
_service_native_socket: "{{ service_domains | length > 0 and service_container_http_port == 0 }}"
_service_socket_path: >-
/run/{{ service_name ~ ('-socat' if not _service_native_socket else '' ) }}.sock
/run/{{ service_name ~ ('-caddy-socket-proxy' if not _service_native_socket else '' ) }}.sock
_service_replacement_host_header:
Host: "{{ service_name }}:{{ service_container_http_port }}"

View File

@@ -19,4 +19,12 @@ _service_container_secrets: >-
'target': service_database_secret_target
}] if _service_setup_database else []
)
+ (
[{
'name': _service_database_name ~ '-url',
'value': 'postgres://' ~ service_name | replace('-', '_') ~ ':' ~ service_podman_secrets[service_name ~ '-postgres'] ~ '@postgres/' ~ service_name | replace('-', '_') ~ '?sslmode=disable',
'type': service_database_secret_type,
'target': service_database_secret_target ~ '-url'
}] if service_podman_secrets[service_name ~ '-postgres'] is defined else []
)
}}

View File

@@ -82,7 +82,7 @@
vhost_domains:
- "{{ synapse_external_domain }}:8448"
vhost_proxy_target_netproto: unix
vhost_proxy_target_socket: "/run/synapse-socat.sock"
vhost_proxy_target_socket: "/run/synapse-caddy-socket-proxy.sock"
- name: Open port for synapse federation
ansible.posix.firewalld:

View File

@@ -0,0 +1,9 @@
---
argument_specs:
main:
description: Installs windmill with worker in podman in podman
options:
windmill_domain:
description: The domain to use for windmill
type: str
required: true

View File

@@ -0,0 +1,66 @@
---
- name: Windmill service
ansible.builtin.import_role:
name: service
vars:
service_name: windmill
service_container_image: ghcr.io/windmill-labs/windmill:main
service_container_mounts:
- type: volume
source: worker-logs
destination: /tmp/windmill/logs
service_container_http_port: 8000
service_domains:
- "{{ windmill_domain }}"
service_database_type: postgres
service_container_env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: server
service_additional_containers:
- name: worker
image: quay.io/podman/stable:latest
user: podman
entrypoint: /entrypoint.sh
mounts:
- type: volume
source: worker-logs
destination: /worker-logs
- type: volume
source: worker-dependency-cache
destination: /worker-dependency-cache
- type: template
source: worker_entrypoint.sh.j2
destination: /entrypoint.sh
mode: "0755"
- type: volume
source: worker-containers
destination: /home/podman/.local/share/containers
publish_ports: []
env: {}
- name: worker-native
env:
DATABASE_URL_FILE: /run/secrets/postgres-url
MODE: worker
WORKER_TYPE: native
NATIVE_MODE: "true"
NUM_WORKERS: "8"
SLEEP_QUEUE: "200"
- name: lsp
image: ghcr.io/windmill-labs/windmill-extra:latest
secrets: []
mounts:
- type: volume
source: lsp-cache
destination: /puls/.cache
publish_ports:
- name: lsp
type: socket
container_port: 3001
env:
ENABLE_LSP: "true"
ENABLE_MULTIPLAYER: "false"
ENABLE_DEBUGGER: "false"
WINDMILL_BASE_URL: http://windmill:8000
service_vhost_locations:
- path: /ws/*
proxy_target_socket: /run/windmill-lsp-socat.sock

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# {{ ansible_managed }}
_term() {
echo "Received SIGTERM, stopping all containers"
kill "$child"
}
podman system service -t 0 &
podman run \
--rm \
-v /run/secrets/postgres-url:/run/secrets/postgres-url:ro \
-e DATABASE_URL_FILE=/run/secrets/postgres-url \
-e MODE=worker \
-e WORKER_GROUP=default \
-e ENABLE_UNSHARE_PID="true" \
-v /tmp/storage-run-1000/podman/podman.sock:/var/run/docker.sock \
-v /worker-logs:/tmp/windmill/logs \
-v /worker-dependency-cache:/tmp/windmill/cache \
--name worker \
--network host \
ghcr.io/windmill-labs/windmill:main &
child=$!
trap _term SIGTERM
wait $!