feat: initial commit
Some checks failed
Ansible Lint Check / lint (push) Failing after 44s

This commit is contained in:
Iain Learmonth 2025-11-08 20:55:40 +00:00
commit ce0eb65c8e
36 changed files with 946 additions and 0 deletions

12
.ansible-lint.yml Normal file
View file

@ -0,0 +1,12 @@
---
offline: true
mock_roles:
- sr2c.core.baseline
- sr2c.core.podman_host
exclude_paths:
- .ansible/
- .cache/
- .forgejo/
- venv/
skip_list:
- galaxy[no-changelog] # TODO: remove this once we tag a release

View file

@ -0,0 +1,41 @@
---
name: Ansible Lint Check
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
jobs:
lint:
runs-on: docker
container:
image: ghcr.io/catthehacker/ubuntu:runner-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Create a virtual environment
run: |
python -m venv venv
- name: Install Ansible and ansible-dev-tools
run: |
source venv/bin/activate
pip install --upgrade pip
pip install ansible ansible-dev-tools
shell: bash
- name: Run ansible-lint
run: |
source venv/bin/activate
ansible-lint
shell: bash

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
.ansible
.cache

20
galaxy.yml Normal file
View file

@ -0,0 +1,20 @@
---
namespace: sr2c
name: apps
version: "0.0.1"
readme: "README.md"
license:
- BSD-2-Clause
authors:
- irl
- acute
description: Roles and playbooks for application deployment and management.
homepage: https://guardianproject.dev/sr2/ansible-collection-apps
repository: https://guardianproject.dev/sr2/ansible-collection-apps.git
issues: https://guardianproject.dev/sr2/ansible-collection-apps/issues/
dependencies:
ansible.posix: "*"
containers.podman: "*"
sr2c.core: "*"
tags:
- linux

2
meta/runtime.yml Normal file
View file

@ -0,0 +1,2 @@
---
requires_ansible: ">=2.15.0"

9
playbooks/link.yml Normal file
View file

@ -0,0 +1,9 @@
---
- name: CDR Link | Deploy and update CDR Link instances
hosts:
- cdr_link
roles:
- role: sr2c.core.baseline
tags: bootstrap
- role: sr2c.apps.podman_link
tags: link

View file

@ -0,0 +1,27 @@
# podman_link_podman_rootless_user:
podman_link_web_hostname: "{{ inventory_hostname }}"
podman_link_postgres_zammad_user: postgres
podman_link_postgres_zammad_database: zammad_production
podman_link_postgres_link_user: link
podman_link_postgres_link_database: link
# podman_link_postgres_zammad_password:
# podman_link_postgres_link_password:
# podman_link_postgres_root_password:
# podman_link_zammad_redis_password:
# podman_link_opensearch_password:
podman_link_opensearch_memory_limit: 2048
podman_link_setup_mode: false
podman_link_leafcutter_enabled: false
podman_link_dashboard_url: ""
podman_link_zammad_api_token: ""
# podman_link_nextauth_secret:
# podman_link_google_client_id:
# podman_link_google_client_secret:
# TODO: add keycloak variables
#
# TODO: work out what this was trying to fix and fix it
# the following may seem useless, but unless they are included as variables in the environment file template,
# they automatically get wrapped in quotes by systemd causing zammad to not connect
podman_link_postgres_zammad_postgresql_host: zammad-postgresql
podman_link_postgres_zammad_es_host: opensearch
podman_link_postgres_zammad_memcached_server: zammad-memcached:11211

View file

@ -0,0 +1,36 @@
_meta:
type: "config"
config_version: 2
config:
dynamic:
http:
anonymous_auth_enabled: false
xff:
enabled: true
remoteIpHeader: "x-forwarded-for"
internalProxies: ".*"
authc:
basic_internal_auth_domain:
description: "Authenticate via HTTP Basic against internal users database"
http_enabled: true
transport_enabled: true
order: 0
http_authenticator:
type: basic
challenge: false
authentication_backend:
type: intern
proxy_auth_domain:
description: "Authenticate via proxy"
http_enabled: true
transport_enabled: true
order: 1
http_authenticator:
type: proxy
challenge: false
config:
user_header: "x-forwarded-user"
roles_header: "x-forwarded-roles"
authentication_backend:
type: noop

View file

@ -0,0 +1,9 @@
# Generally, restarting most services will cause a restart of the stack anyway due to dependencies
- name: Restart Link
ansible.builtin.systemd_service:
name: link.target
state: restarted
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_link_podman_rootless_user }}"

View file

@ -0,0 +1,350 @@
---
- name: Podman CDR Link | PATCH | Install data plate
ansible.builtin.template:
src: etc/motd.d/10-data-plate.txt
dest: /etc/motd.d/10-data-plate.txt
owner: root
group: root
mode: "0444"
become: true
- name: Podman CDR Link | PATCH | Install podman and verify rootless podman user
ansible.builtin.include_role:
role: sr2c.core.podman_host
vars:
podman_host_minimum_unpriv_port: 80
podman_host_rootless_users: ["{{ podman_link_podman_rootless_user }}"]
- name: Podman CDR Link | AUDIT | Get subuid range for user
ansible.builtin.command:
cmd: "getsubids {{ podman_link_podman_rootless_user }}"
register: _podman_link_user_subuid
changed_when: false
- name: Podman CDR Link | AUDIT | Get subgid range for user
ansible.builtin.command:
cmd: "getsubids -g {{ podman_link_podman_rootless_user }}"
register: _podman_link_user_subgid
changed_when: false
- name: Podman CDR Link | AUDIT | Parse outputs of getsubids and store results
ansible.builtin.set_fact:
_podman_link_user_subuid_start: "{{ (_podman_link_user_subuid.stdout_lines[0].split()[2] | int) }}"
_podman_link_user_subgid_start: "{{ (_podman_link_user_subgid.stdout_lines[0].split()[2] | int) }}"
- name: Podman CDR Link | PATCH | Set sysctl vm.max_map_count for Opensearch tuning
ansible.posix.sysctl:
name: vm.max_map_count
value: '262144'
state: present
become: true
- name: Podman CDR Link | PATCH | Set vm.overcommit_memory for Memcached tuning
ansible.posix.sysctl:
name: vm.overcommit_memory
value: '1'
state: present
become: true
# Opensearch runs with UID/GID 1000 inside the container
- name: Podman CDR Link | PATCH | Create data directory for Opensearch
ansible.builtin.file:
path: "/home/{{ podman_link_podman_rootless_user }}/opensearch-data"
owner: "{{ _podman_link_user_subuid_start + 999 }}"
group: "{{ _podman_link_user_subgid_start + 999 }}"
mode: "0700"
state: "directory"
become: true
# Opensearch runs with UID/GID 1000 inside the container
- name: Podman CDR Link | PATCH | Install Opensearch configuration
ansible.builtin.copy:
src: home/opensearch-config.yml
dest: "/home/{{ podman_link_podman_rootless_user }}/opensearch-config.yml"
mode: "0400"
owner: "{{ _podman_link_user_subuid_start + 999 }}"
group: "{{ _podman_link_user_subgid_start + 999 }}"
become: true
notify:
- Restart Link
# Zammad runs with UID/GID 1000 inside the container
- name: Podman CDR Link | PATCH | Install Zammad database configuration file
ansible.builtin.template:
src: home/zammad-database.yml
dest: "/home/{{ podman_link_podman_rootless_user }}/zammad-database.yml"
owner: "{{ _podman_link_user_subuid_start + 999 }}"
group: "{{ _podman_link_user_subuid_start + 999 }}"
mode: "0400"
become: true
notify:
- Restart Link
# Zammad runs with UID/GID 1000 inside the container
- name: Podman CDR Link | PATCH | Create data directories for Zammad
ansible.builtin.file:
path: "/home/{{ podman_link_podman_rootless_user }}/{{ item }}"
owner: "{{ _podman_link_user_subuid_start + 999 }}"
group: "{{ _podman_link_user_subgid_start + 999 }}"
mode: "0700"
state: "directory"
become: true
with_items:
- zammad-storage
- zammad-var
- zammad-backup
- zammad-data
- zammad-config-nginx
# Bridge/Link runs with UID/GID 1000 inside the container (because it's based on the node container)
- name: Podman CDR Link | PATCH | Create data directory for bridge-whatsapp
ansible.builtin.file:
path: "/home/{{ podman_link_podman_rootless_user }}/bridge-whatsapp-data"
owner: "{{ _podman_link_user_subuid_start + 999 }}"
group: "{{ _podman_link_user_subgid_start + 999 }}"
mode: "0700"
state: "directory"
become: true
# Postgres/Redis runs with UID/GID 999 inside the container
# Postgres seems to want to set group permissions on the data directory, which is probably fine
- name: Podman CDR Link | PATCH | Create data directory for PostgreSQL and Redis
ansible.builtin.file:
path: "/home/{{ podman_link_podman_rootless_user }}/{{ item }}"
owner: "{{ _podman_link_user_subuid_start + 998 }}"
group: "{{ _podman_link_user_subgid_start + 998 }}"
mode: "0750"
state: "directory"
become: true
with_items:
- bridge-postgresql-data
- redis-data
- postgresql-data
# We set the UID/GID to 1002 inside the signal-cli-rest-api container with environment variables
- name: Podman CDR Link | PATCH | Create data directory for signal-cli-rest-api
ansible.builtin.file:
path: "/home/{{ podman_link_podman_rootless_user }}/signal-cli-rest-api-data"
owner: "{{ _podman_link_user_subuid_start + 1001 }}"
group: "{{ _podman_link_user_subgid_start + 1001 }}"
mode: "0700"
state: "directory"
become: true
- name: Podman CDR Link | PATCH | Install shared environment files
ansible.builtin.template:
src: "home/config/containers/systemd/{{ item }}"
dest: "/home/{{ podman_link_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
owner: "{{ podman_link_podman_rootless_user }}"
mode: "0400"
become: true
with_items:
- common-zammad.env
- common-bridge.env
notify:
- Restart Link
- name: Podman CDR Link | PATCH | Install container quadlets
ansible.builtin.template:
src: "home/config/containers/systemd/{{ item }}"
dest: "/home/{{ podman_link_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
owner: "{{ podman_link_podman_rootless_user }}"
mode: "0400"
with_items:
- link.container
- zammad-opensearch.container
- opensearch-dashboards.container
- bridge-worker.container
- bridge-postgresql.container
- bridge-whatsapp.container
- signal-cli-rest-api.container
- zammad-init.container
- zammad-nginx.container
- zammad-railsserver.container
- zammad-scheduler.container
- zammad-postgresql.container
- zammad-websocket.container
- zammad-redis.container
- zammad-memcached.container
become: true
notify:
- Restart Link
- name: Podman CDR Link | PATCH | Install network quadlets
ansible.builtin.template:
src: "home/config/containers/systemd/{{ item }}"
dest: "/home/{{ podman_link_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
owner: "{{ podman_link_podman_rootless_user }}"
mode: "0400"
with_items:
- frontend.network
- link.network
become: true
notify:
- Restart Link
- name: Podman CDR Link | AUDIT | Verify quadlets are correctly defined
ansible.builtin.command: /usr/libexec/podman/quadlet -dryrun -user
register: podman_link_quadlet_result
ignore_errors: true
changed_when: false
become: true
become_user: "{{ podman_link_podman_rootless_user }}"
- name: Podman CDR Link | AUDIT | Assert that the quadlet verification succeeded
ansible.builtin.assert:
that:
- podman_link_quadlet_result.rc == 0
fail_msg: "'/usr/libexec/podman/quadlet -dryrun -user' failed! Output withheld to prevent leaking secrets."
- name: Podman CDR Link | PATCH | Set up nginx and Let's Encrypt certificate
ansible.builtin.include_role:
name: sr2c.core.podman_nginx
vars:
podman_nginx_frontend_network: frontend
podman_nginx_podman_rootless_user: "{{ podman_link_podman_rootless_user }}"
podman_nginx_primary_hostname: "{{ podman_link_web_hostname }}"
podman_nginx_systemd_service_slice: "link.slice"
- name: Podman CDR Link | PATCH | Install production nginx configuration file
ansible.builtin.template:
src: home/nginx.conf
dest: "/home/{{ podman_link_podman_rootless_user }}/nginx/nginx.conf"
owner: "{{ podman_link_podman_rootless_user }}"
group: "{{ podman_link_podman_rootless_user }}"
mode: "0644"
become: true
notify:
- Restart nginx
- name: Podman CDR Link | PATCH | Install additional systemd units (services, targets, and slice)
ansible.builtin.template:
src: "home/config/systemd/user/{{ item }}"
dest: "/home/{{ podman_link_podman_rootless_user }}/.config/systemd/user/{{ item }}"
owner: "{{ podman_link_podman_rootless_user }}"
group: "{{ podman_link_podman_rootless_user }}"
mode: "0400"
become: true
with_items:
- "zammad-reindex.service"
- "link.slice"
- "link.target"
- "zammad-storage.target"
notify:
- Restart Link
- name: Podman CDR Link | PATCH | Ensure zammad-opensearch is running
ansible.builtin.systemd_service:
name: zammad-opensearch.service
state: started
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_link_podman_rootless_user }}"
- name: Podman CDR Link | AUDIT | Check if Opensearch setup script has been run
ansible.builtin.stat:
path: "/home/{{ podman_link_podman_rootless_user }}/.securityadmin_done"
become: true
register: _podman_link_opensearch_securityadmin_done
- name: Podman CDR Link | PATCH | Ensure Opensearch setup script is executable
containers.podman.podman_container_exec:
name: zammad-opensearch
command: chmod +x /usr/share/opensearch/plugins/opensearch-security/tools/securityadmin.sh
become: true
become_user: "{{ podman_link_podman_rootless_user }}"
when: not _podman_link_opensearch_securityadmin_done.stat.exists
- name: Podman CDR Link | PATCH | Run Opensearch setup script
containers.podman.podman_container_exec:
name: zammad-opensearch
argv:
- /usr/share/opensearch/plugins/opensearch-security/tools/securityadmin.sh
- -cd
- /usr/share/opensearch/config/opensearch-security/
- -icl
- -key
- /usr/share/opensearch/config/kirk-key.pem
- -cert
- /usr/share/opensearch/config/kirk.pem
- -cacert
- /usr/share/opensearch/config/root-ca.pem
- -nhnv
become: true
become_user: "{{ podman_link_podman_rootless_user }}"
register: _podman_link_opensearch_securityadmin_result
retries: 20
delay: 5
until: _podman_link_opensearch_securityadmin_result.rc == 0
when: not _podman_link_opensearch_securityadmin_done.stat.exists
- name: Podman CDR Link | PATCH | Mark Opensearch setup script as having run
ansible.builtin.file:
path: "/home/{{ podman_link_podman_rootless_user }}/.securityadmin_done"
state: touch
owner: "{{ podman_link_podman_rootless_user }}"
group: "{{ podman_link_podman_rootless_user }}"
mode: "0000"
modification_time: preserve
access_time: preserve
become: true
- name: Podman CDR Link | PATCH | Ensure zammad-railsserver is running
ansible.builtin.systemd_service:
name: zammad-railsserver.service
state: started
scope: user
become: true
become_user: "{{ podman_link_podman_rootless_user }}"
- name: Podman CDR Link | AUDIT | Wait for zammad-init to finish
containers.podman.podman_container_exec:
name: zammad-railsserver
argv:
- bundle
- exec
- rails
- r
- 'ActiveRecord::Migration.check_all_pending!; Translation.any? || raise'
register: _podman_link_zammad_init_wait
until: _podman_link_zammad_init_wait.rc == 0
retries: 30
delay: 5
become: true
become_user: "{{ podman_link_podman_rootless_user }}"
changed_when: false
- name: Podman CDR Link | AUDIT | Check if Zammad wants to verify SSL connections to Opensearch
containers.podman.podman_container_exec:
name: zammad-railsserver
argv:
- rails
- r
- "print Setting.get('es_ssl_verify')"
become: true
become_user: "{{ podman_link_podman_rootless_user }}"
register: _podman_link_zammad_es_ssl_verify
changed_when: false
- name: Podman CDR Link | PATCH | Configure Zammad to not verify SSL connections to Opensearch
containers.podman.podman_container_exec:
name: zammad-railsserver
argv:
- rails
- r
- "Setting.set('es_ssl_verify', false)"
become: true
become_user: "{{ podman_link_podman_rootless_user }}"
when: (_podman_link_zammad_es_ssl_verify.stdout | trim)[-5:] != "false"
- name: Podman CDR Link | PATCH | Make sure all services are running now and started on boot
ansible.builtin.systemd_service:
name: "link.target"
enabled: true
state: started
masked: false
daemon_reload: true
scope: user
become: true
become_user: "{{ podman_link_podman_rootless_user }}"

View file

@ -0,0 +1,16 @@
=========================================================
A CDR Link instance is hosted on this server.
Podman user: {{ podman_link_podman_rootless_user }}
=========================================================
# Become the podman user
sudo -iu {{ podman_link_podman_rootless_user }}
# Check the Link stack status
systemctl --user status link.target
# Restart the Link stack
systemctl --user restart link.target
# Follow the logs for the Link stack
journalctl --user -fu link.slice
# Reindex Zammad in Opensearch
systemctl --user start zammad-reindex.service
=========================================================

View file

@ -0,0 +1,10 @@
[Container]
ContainerName=bridge-postgresql
EnvironmentFile=common-bridge.env
Image=registry.gitlab.com/digiresilience/link/link-stack/postgresql:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/bridge-postgresql-data:/var/lib/postgresql/data:rw,Z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,14 @@
[Unit]
PartOf=link.service
[Container]
ContainerName=bridge-whatsapp
Environment=BRIDGE_FRONTEND_URL=http://link:3000
ExposeHostPort=5000
Image=registry.gitlab.com/digiresilience/link/link-stack/bridge-whatsapp:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/bridge-whatsapp-data:/home/node/baileys:rw,Z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,15 @@
[Unit]
Requires=bridge-postgresql.service
Wants=bridge-whatsapp.service signal-cli-rest-api.service
After=bridge-postgresql.service
PartOf=link.service
[Container]
ContainerName=bridge-worker
EnvironmentFile=common-bridge.env
Image=registry.gitlab.com/digiresilience/link/link-stack/bridge-worker:3.2.0b1
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,19 @@
POSTGRES_USER={{ podman_link_postgres_link_user }}
POSTGRES_PASSWORD={{ podman_link_postgres_link_password }}
POSTGRES_DB={{ podman_link_postgres_link_database }}
NEXTAUTH_URL=https://{{ podman_link_web_hostname }}/link/api/auth
NEXTAUTH_SECRET={{ podman_link_nextauth_secret }}
{% if podman_link_google_client_id is defined %}
GOOGLE_CLIENT_ID={{ podman_link_google_client_id }}
GOOGLE_CLIENT_SECRET={{ podman_link_google_client_secret }}
{% endif %}
BRIDGE_FRONTEND_URL=http://link:3000
BRIDGE_SIGNAL_URL=http://signal-cli-rest-api:8081
BRIDGE_WHATSAPP_URL=http://bridge-whatsapp:5000
DATABASE_NAME={{ podman_link_postgres_link_database }}
DATABASE_HOST=bridge-postgresql
DATABASE_USER={{ podman_link_postgres_link_user }}
DATABASE_PASSWORD={{ podman_link_postgres_link_password }}
DATABASE_PORT=5432
DATABASE_URL=postgresql://{{ podman_link_postgres_link_user }}:{{ podman_link_postgres_link_password }}@bridge-postgresql/{{ podman_link_postgres_link_database}}
TZ=Etc/UTC

View file

@ -0,0 +1,12 @@
POSTGRESQL_HOST=zammad-postgresql
POSTGRESQL_PASS={{ podman_link_postgres_zammad_password }}
POSTGRESQL_USER={{ podman_link_postgres_zammad_user }}
POSTGRESQL_DB={{ podman_link_postgres_zammad_database }}
POSTGRESQL_OPTIONS=?pool=50
REDIS_URL=redis://default:{{ podman_link_zammad_redis_password }}@zammad-redis:6379
MEMCACHE_SERVERS=zammad-memcached:11211
ELASTICSEARCH_HOST=zammad-opensearch
ELASTICSEARCH_USER=admin
ELASTICSEARCH_PASS={{ podman_link_opensearch_password }}
ELASTICSEARCH_SCHEMA=https
ELASTICSEARCH_REINDEX=false

View file

@ -0,0 +1,2 @@
[Network]
NetworkName=frontend

View file

@ -0,0 +1,22 @@
[Unit]
Requires=bridge-postgresql.service bridge-worker.service
After=bridge-postgresql.service bridge-worker.service
PartOf=zammad-nginx.service
[Container]
ContainerName=link
Environment=ZAMMAD_VIRTUAL_HOST={{ podman_link_web_hostname }}
Environment=SETUP_MODE={{ podman_link_setup_mode }}
Environment=LEAFCUTTER_ENABLED={{ podman_link_leafcutter_enabled }}
Environment=LEAFCUTTER_DEFAULT_DASHBOARD_URL={{ podman_link_dashboard_url }}
Environment=ZAMMAD_API_TOKEN={{ podman_link_zammad_api_token }}
Environment=LINK_URL=https://localhost:3000/link
Environment=ZAMMAD_URL=http://zammad-nginx:8080
EnvironmentFile=common-bridge.env
ExposeHostPort=3000
Image=registry.gitlab.com/digiresilience/link/link-stack/link:3.2.0b1
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,2 @@
[Network]
NetworkName=link

View file

@ -0,0 +1,17 @@
[Unit]
Requires=zammad-opensearch.service
After=zammad-opensearch.service
PartOf=link.target
[Container]
ContainerName=opensearch-dashboards
#Environment=OPENSEARCH_USERNAME=admin
#Environment=OPENSEARCH_PASSWORD={{ podman_link_opensearch_password }}
Image=registry.gitlab.com/digiresilience/link/link-stack/opensearch-dashboards:3.2.0b1
PublishPort=127.0.0.1:5601:5601
#Volume=/home/{{ podman_link_podman_rootless_user }}/opensearch-dashboards-config.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,17 @@
[Unit]
PartOf=link.service
[Container]
ContainerName=signal-cli-rest-api
Environment=MODE=native
Environment=PORT=8081
Environment=SIGNAL_CLI_UID=1002
Environment=SIGNAL_CLI_GID=1002
ExposeHostPort=8081
Image=registry.gitlab.com/digiresilience/link/link-stack/signal-cli-rest-api:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/signal-cli-rest-api-data:/home/.local/share/signal-cli:rw,Z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,17 @@
[Unit]
Requires=zammad-storage.target
After=zammad-storage.target
[Container]
ContainerName=zammad-init
EnvironmentFile=common-zammad.env
Exec=zammad-init
Image=registry.gitlab.com/digiresilience/link/link-stack/zammad:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-config-nginx:/etc/nginx/sites-enabled:rw,z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-var:/opt/zammad/var:rw,z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-storage:/opt/zammad/storage:ro,z
Network=link.network
[Service]
Restart=on-failure
Slice=link.slice

View file

@ -0,0 +1,13 @@
[Unit]
PartOf=zammad-storage.target
[Container]
ContainerName=zammad-memcached
Exec=memcached -m 256M
Image=registry.gitlab.com/digiresilience/link/link-stack/memcached:3.2.0b1
Network=link.network
ExposeHostPort=11211
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,19 @@
[Unit]
Requires=zammad-railsserver.service link.service
After=zammad-railsserver.service link.service
PartOf=link.target
[Container]
ContainerName=zammad-nginx
EnvironmentFile=common-zammad.env
Exec=zammad-nginx
ExposeHostPort=8080
Image=registry.gitlab.com/digiresilience/link/link-stack/zammad:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-config-nginx:/etc/nginx/sites-enabled:rw,z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-var:/opt/zammad/var:ro,z
Network=link.network
Network=frontend.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,27 @@
[Unit]
PartOf=zammad-storage.target
[Container]
ContainerName=zammad-opensearch
Environment=discovery.type=single-node
Environment=plugins.security.ssl.transport.enforce_hostname_verification=false
Environment=plugins.security.ssl.transport.resolve_hostname=false
Environment=cluster.routing.allocation.disk.watermark.low=3gb
Environment=cluster.routing.allocation.disk.watermark.high=2gb
Environment=cluster.routing.allocation.disk.watermark.flood_stage=500mb
Environment=cluster.info.update.interval=1m
Environment=node.name=opensearch-node1
Environment=bootstrap.memory_lock=true
Environment=OPENSEARCH_JAVA_OPTS="-Xms{{ podman_link_opensearch_memory_limit }}m -Xmx{{podman_link_opensearch_memory_limit }}m -XX:-HeapDumpOnOutOfMemoryError"
Environment=OPENSEARCH_INITIAL_ADMIN_PASSWORD={{ podman_link_opensearch_password }}
Environment=compatibility.override_main_response_version=true
Image=registry.gitlab.com/digiresilience/link/link-stack/opensearch:3.2.0b2
PublishPort=127.0.0.1:9200:9200
PublishPort=127.0.0.1:9600:9600
Volume=/home/{{ podman_link_podman_rootless_user }}/opensearch-data:/usr/share/opensearch/data:rw,Z
Volume=/home/{{ podman_link_podman_rootless_user }}/opensearch-config.yml:/usr/share/opensearch/config/opensearch-security/config.yml:rw,Z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,19 @@
[Unit]
PartOf=zammad-storage.target
[Container]
ContainerName=zammad-postgresql
Environment=POSTGRES_PASSWORD={{ podman_link_postgres_zammad_password }}
Environment=POSTGRES_USER={{ podman_link_postgres_zammad_user }}
Environment=POSTGRES_DB={{ podman_link_postgres_zammad_database }}
Environment=POSTGRES_HOST_AUTH_METHOD=scram-sha-256
Environment=POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
Image=registry.gitlab.com/digiresilience/link/link-stack/postgresql:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/postgresql-data:/var/lib/postgresql/data:rw,Z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-data:/opt/zammad:rw,z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-backup:/var/tmp/zammad:ro,z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,19 @@
[Unit]
Requires=zammad-storage.target
After=zammad-storage.target
Wants=zammad-init.service zammad-reindex.service
PartOf=zammad-nginx.service
[Container]
ContainerName=zammad-railsserver
EnvironmentFile=common-zammad.env
Exec=zammad-railsserver
Image=registry.gitlab.com/digiresilience/link/link-stack/zammad:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-var:/opt/zammad/var:rw,z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-storage:/opt/zammad/storage:rw,z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-database.yml:/opt/zammad/config/database.yml:ro,z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,13 @@
[Unit]
PartOf=zammad-storage.target
[Container]
ContainerName=zammad-redis
Environment=REDIS_PASSWORD={{ podman_link_zammad_redis_password }}
Image=registry.gitlab.com/digiresilience/link/link-stack/redis:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/redis-data:/data:rw,Z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,16 @@
[Unit]
Requires=zammad-storage.target
After=zammad-storage.target
[Container]
ContainerName=zammad-scheduler
EnvironmentFile=common-zammad.env
Exec=zammad-scheduler
Image=registry.gitlab.com/digiresilience/link/link-stack/zammad:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-var:/opt/zammad/var:rw,z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-storage:/opt/zammad/storage:rw,z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,16 @@
[Unit]
Requires=zammad-storage.target
After=zammad-storage.target
[Container]
ContainerName=zammad-websocket
EnvironmentFile=common-zammad.env
Exec=zammad-websocket
Image=registry.gitlab.com/digiresilience/link/link-stack/zammad:3.2.0b1
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-var:/opt/zammad/var:rw,z
Volume=/home/{{ podman_link_podman_rootless_user }}/zammad-storage:/opt/zammad/storage:rw,z
Network=link.network
[Service]
Restart=always
Slice=link.slice

View file

@ -0,0 +1,2 @@
[Unit]
Description=Podman CDR Link Stack by SR2 Communications

View file

@ -0,0 +1,9 @@
[Unit]
Description=Podman CDR Link Stack by SR2 Communications
Requires=opensearch-dashboards.service
Requires=zammad-nginx.service
After=opensearch-dashboards.service
After=zammad-nginx.service
[Install]
WantedBy=default.target

View file

@ -0,0 +1,9 @@
[Unit]
Description=Rebuild Zammad search index
Requires=zammad-railsserver.service
After=zammad-railsserver.service
[Service]
Type=oneshot
ExecStart=/usr/bin/podman exec zammad-railsserver bundle exec rake zammad:searchindex:rebuild
Restart=never

View file

@ -0,0 +1,10 @@
[Unit]
Description=Podman CDR Link Data Sources by SR2 Communications
Requires=zammad-opensearch.service
Requires=zammad-postgresql.service
Requires=zammad-redis.service
Requires=zammad-memcached.service
After=zammad-opensearch.service
After=zammad-postgresql.service
After=zammad-redis.service
After=zammad-memcached.service

View file

@ -0,0 +1,94 @@
resolver 10.89.0.1 ipv6=off valid=10s;
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
default $http_x_forwarded_proto;
'' $scheme;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $http_x_forwarded_port;
'' $server_port;
}
# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any
# Connection header that may have been passed to this server
map $http_upgrade $proxy_connection {
default upgrade;
'' close;
}
# Apply fix for very long server names
server_names_hash_bucket_size 128;
# Default dhparam
# Set appropriate X-Forwarded-Ssl header
map $scheme $proxy_x_forwarded_ssl {
default off;
https on;
}
gzip_types text/plain text/css application/javascript application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
log_format vhost '$host $remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log off;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# Mitigate httpoxy attack (see README for details)
proxy_set_header Proxy "";
server {
listen 80;
listen [::]:80;
server_name {{ podman_link_web_hostname }};
server_tokens off;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
return 301 https://{{ podman_link_web_hostname }}$request_uri;
}
}
upstream zammad {
zone zammad_upstream 64k;
server zammad-nginx:8080 resolve;
}
server {
server_name {{ podman_link_web_hostname }};
listen 80 ;
access_log /var/log/nginx/access.log vhost;
return 301 https://$host$request_uri;
}
server {
server_name {{ podman_link_web_hostname }};
listen 443 ssl;
listen [::]:443 ssl;
http2 on;
server_tokens off;
ssl_certificate /etc/letsencrypt/live/{{ podman_link_web_hostname }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ podman_link_web_hostname }}/privkey.pem;
add_header Strict-Transport-Security "max-age=31536000" always;
add_header Referrer-Policy origin always; # make sure outgoing links don't show the URL to the Matomo instance
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
location / {
proxy_pass http://zammad;
}
}

View file

@ -0,0 +1,9 @@
production:
adapter: postgresql
database: {{ podman_link_postgres_zammad_database }}
pool: 50
timeout: 5000
encoding: utf8
username: {{ podman_link_postgres_zammad_user }}
password: {{ podman_link_postgres_zammad_password }}
host: zammad-postgresql