Initial import; migrate some roles from irl.wip

This commit is contained in:
Iain Learmonth 2025-10-31 22:36:32 +00:00
commit 2ba6c6691b
44 changed files with 1573 additions and 0 deletions

18
roles/baseline/README.md Normal file
View file

@ -0,0 +1,18 @@
# sr2c.core.baseline
Configure an SR2 virtual machine.
## Disk Partitions and Encryption
Creates a new LVM volume group on `baseline_second_disk_device` with logical volumes for:
| Mountpoint | Default Size | Encrypted |
|----------------|-----------------|-----------|
| /var | 5GiB | No |
| /var/log | 5GiB | No |
| /var/log/audit | 5GiB | No |
| /var/tmp | 5GiB | No |
| /home | Remaining space | Yes |
It is assumed that `/home` is empty and that no migration of data need occur. Data under `/var` will be migrated for
each partition.

View file

@ -0,0 +1,27 @@
---
# Location of the host (generic, sr2_de_fsn)
baseline_location: generic
# Enable running the Ansible Lockdown CIS role
baseline_lockdown: true
# UK Ministry of Justice Login Banner (seems as good as any)
# https://security-guidance.service.justice.gov.uk/system-lockdown-and-hardening-standard/#appendix-a-login-banner
baseline_warning_banner: |
THIS SYSTEM IS FOR AUTHORISED USERS ONLY.
This is a private system; only use this system if you have specific authority to do so.
Otherwise you are liable to prosecution under the Computer Misuse Act 1990. If you do
not have the express permission of the operator or owner of this system, switch off or
disconnect now to avoid prosecution.
# Local NTP servers if available
baseline_ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
# baseline_second_disk_device:
baseline_second_disk_vg_name: "datavg"
baseline_second_disk_var_size: "5G"
baseline_second_disk_var_log_size: "5G"
baseline_second_disk_var_log_audit_size: "5G"
baseline_second_disk_var_tmp_size: "5G"

View file

@ -0,0 +1,4 @@
[Resolve]
DNSSEC=true
LLMNR=no
MulticastDNS=no

View file

@ -0,0 +1,2 @@
[Service]
Environment="SYSTEMD_RESOLVED_SYNTHESIZE_HOSTNAME=0"

View file

@ -0,0 +1,9 @@
---
- name: Regenerate grub config
ansible.builtin.command:
cmd: grub2-mkconfig -o /boot/grub2/grub.cfg
- name: Restart systemd-resolved
service:
name: systemd-resolved
state: restarted

View file

@ -0,0 +1,160 @@
---
- name: Disk Partitions | PRELIM | Ensure baseline_home_luks_passphrase is defined
ansible.builtin.assert:
that:
- baseline_home_luks_passphrase is defined
msg: "Variable 'baseline_home_luks_passphrase' must be defined."
- name: Disk Partitions | PRELIM | Ensure baseline_second_disk_device is defined
ansible.builtin.assert:
that:
- baseline_second_disk_device is defined
msg: "Variable 'baseline_second_disk_device' must be defined."
- name: Disk Partitions | PATCH | Ensure lvm2 is installed
ansible.builtin.package:
name: lvm2
state: present
- name: Disk Partitions | PATCH | Create LVM partition spanning entire disk
community.general.parted:
device: "{{ baseline_second_disk_device }}"
number: 1
flags: [ lvm ]
state: present
part_start: "0%"
part_end: "100%"
- name: Disk Partitions | PATCH | Create volume group
community.general.lvg:
vg: "{{ baseline_second_disk_vg_name }}"
pvs: "{{ baseline_second_disk_device }}1"
- name: Disk Partitions | PATCH | Create /var logical volume
community.general.lvol:
vg: "{{ baseline_second_disk_vg_name }}"
lv: var
size: "{{ baseline_second_disk_var_size }}"
- name: Disk Partitions | PATCH | Create /var/log logical volume
community.general.lvol:
vg: "{{ baseline_second_disk_vg_name }}"
lv: var_log
size: "{{ baseline_second_disk_var_log_size }}"
- name: Disk Partitions | PATCH | Create /var/log/audit logical volume
community.general.lvol:
vg: "{{ baseline_second_disk_vg_name }}"
lv: var_log_audit
size: "{{ baseline_second_disk_var_log_audit_size }}"
- name: Disk Partitions | PATCH | Create /var/tmp logical volume
community.general.lvol:
vg: "{{ baseline_second_disk_vg_name }}"
lv: var_tmp
size: "{{ baseline_second_disk_var_tmp_size }}"
- name: Disk Partitions | PATCH | Create /home logical volume with remaining space
community.general.lvol:
vg: "{{ baseline_second_disk_vg_name }}"
lv: home
shrink: false # make idempotent
size: "100%FREE"
- name: Disk Partitions | PATCH | Ensure cryptsetup is installed
ansible.builtin.package:
name: cryptsetup
state: present
- name: Disk Partitions | PATCH | Encrypt /home with LUKS2 and provided passphrase
community.crypto.luks_device:
device: "/dev/{{ baseline_second_disk_vg_name }}/home"
state: present
passphrase: "{{ baseline_home_luks_passphrase }}"
type: luks2
- name: Disk Partitions | PATCH | Open LUKS device
community.crypto.luks_device:
device: "/dev/{{ baseline_second_disk_vg_name }}/home"
name: home_crypt
state: opened
passphrase: "{{ baseline_home_luks_passphrase }}"
- name: Disk Partitions | PATCH | Add /home logical volume to crypttab
community.general.crypttab:
backing_device: /dev/mapper/datavg-home
name: home_crypt
opts: discard
state: present
- name: Disk Partitions | PATCH | Create xfs filesystems on new partitions
community.general.filesystem:
dev: "{{ item }}"
fstype: xfs
with_items:
- /dev/mapper/datavg-var
- /dev/mapper/datavg-var_log
- /dev/mapper/datavg-var_log_audit
- /dev/mapper/datavg-var_tmp
- /dev/mapper/home_crypt
- name: Disk Partitions | AUDIT | Check if /home is mounted
ansible.builtin.command:
cmd: mountpoint -q /home
register: baseline_second_disk_home_mounted
changed_when: false
failed_when: false
- name: Disk Partitions | AUDIT | Check if /home is empty
ansible.builtin.command:
cmd: ls -A /home
register: baseline_second_disk_home_files
when: baseline_second_disk_home_mounted.rc != 0
changed_when: false
- name: Disk Partitions | AUDIT | Fail if /home is not mounted and not empty
ansible.builtin.assert:
that:
- ((baseline_second_disk_home_files.skipped is defined) and baseline_second_disk_home_files.skipped) or (baseline_second_disk_home_files.stdout == "")
- name: Disk Partitions | PATCH | Ensure /home is mounted
ansible.posix.mount:
src: "/dev/mapper/home_crypt"
path: '/home'
fstype: 'xfs'
opts: 'rw,nosuid,nodev'
state: mounted
- name: Disk Partitions | AUDIT | Check if /var is mounted
ansible.builtin.command:
cmd: mountpoint -q /var
register: baseline_second_disk_var_mounted
changed_when: false
failed_when: false
- name: Disk Partitions | PATCH | Migrate content if /var is not mounted
when: baseline_second_disk_var_mounted.rc != 0
block:
- name: Disk Partitions | PATCH | Enter emergency mode
ansible.builtin.command:
cmd: systemctl isolate emergency.target
- name: Disk Partitions | PATCH | Unmount /var/lib/nfs/rpc_pipefs if mounted
ansible.posix.mount:
path: /var/lib/nfs/rpc_pipefs
state: unmounted
- name: Disk Partitions | PATCH | Migrate data to new partitions
ansible.builtin.include_tasks:
file: disk_partitions_migrate.yml
vars:
baseline_second_disk_migrate_path: "{{ item }}"
with_items:
- "/var"
- "/var/log"
- "/var/log/audit"
- "/var/tmp"
- name: Disk Partitions | PATCH | Restore default mode
ansible.builtin.command:
cmd: systemctl isolate default.target

View file

@ -0,0 +1,31 @@
---
- name: 'Disk Partitions | PATCH | Rename {{ baseline_second_disk_migrate_path }} to {{ baseline_second_disk_migrate_path }}.old'
ansible.builtin.command:
cmd: 'mv {{ baseline_second_disk_migrate_path }} {{ baseline_second_disk_migrate_path }}.old'
- name: 'Disk Partitions | PATCH | Mount {{ baseline_second_disk_migrate_path }}'
ansible.posix.mount:
src: "/dev/mapper/datavg-{{ baseline_second_disk_migrate_path | replace('/', '', 1) | replace('/', '_') }}"
path: '{{ baseline_second_disk_migrate_path }}'
fstype: 'xfs'
opts: 'rw,{{ "noexec," if baseline_second_disk_migrate_path != "/var" else "" }}nosuid,nodev'
state: mounted
# TODO: systemctl daemon-reload after modifying /etc/fstab
- name: 'Disk Partitions | PATCH | Set {{ baseline_second_disk_migrate_path }} permissions'
ansible.builtin.file:
path: '{{ baseline_second_disk_migrate_path }}'
owner: root
group: root
mode: '0755'
state: directory
- name: 'Disk Partitions | PATCH | Move {{ baseline_second_disk_migrate_path }} content'
ansible.builtin.shell:
cmd: 'cp -ax * {{ baseline_second_disk_migrate_path }}/'
chdir: '{{ baseline_second_disk_migrate_path }}.old'
- name: 'Disk Partitions | PATCH | Delete {{ baseline_second_disk_migrate_path }}.old'
ansible.builtin.file:
path: '{{ baseline_second_disk_migrate_path }}.old'
state: absent

View file

@ -0,0 +1,46 @@
---
- name: DNS Resolver | PATCH | Install systemd-resolved
ansible.builtin.dnf:
name: systemd-resolved
state: latest
- name: DNS Resolver | PATCH | Ensure systemd-resolved is in use
ansible.builtin.systemd_service:
name: systemd-resolved
state: started
enabled: true
masked: false
- name: DNS Resolver | PATCH | Remove loopback address entries containing the hostname from /etc/hosts
ansible.builtin.lineinfile:
path: /etc/hosts
regexp: '^(127\.0\.0\.1|::1)\s.*{{ inventory_hostname }}'
state: absent
- name: DNS Resolver | PATCH | Enable DNSSEC and disable unwanted resolved features
ansible.builtin.copy:
src: resolved.conf
dest: /etc/systemd/resolved.conf
owner: root
group: root
mode: "0644"
notify: "Restart systemd-resolved"
become: true
- name: DNS Resolver | PATCH | Ensure /etc/systemd/system/systemd-resolved.service.d exists
ansible.builtin.file:
path: /etc/systemd/system/systemd-resolved.service.d
state: directory
owner: root
group: root
mode: "0755"
- name: DNS Resolver | PATCH | Disable resolved record synthesising
ansible.builtin.copy:
src: systemd-resolved-override.conf
dest: /etc/systemd/system/systemd-resolved.service.d/override.conf
owner: root
group: root
mode: "0644"
notify: "Restart systemd-resolved"
become: true

View file

@ -0,0 +1,25 @@
---
- name: FreeIPA Client | PATCH | Join IPA domain
ansible.builtin.include_role:
role: freeipa.ansible_freeipa.ipaclient
vars:
ipaclient_hostname: "{{ inventory_hostname }}"
- name: FreeIPA Client | AUDIT | Check current authselect configuration
ansible.builtin.command: authselect current
register: freeipa_authselect_status
changed_when: false
- name: FreeIPA Client | PATCH | Apply authselect profile with sssd, sudo, and mkhomedir if not set
ansible.builtin.command: authselect select sssd with-sudo with-mkhomedir --force
when: >
'Profile ID: sssd' not in freeipa_authselect_status.stdout or
'with-sudo' not in freeipa_authselect_status.stdout or
'with-mkhomedir' not in freeipa_authselect_status.stdout
- name: FreeIPA Client | PATCH | Enable oddjobd.service (for with-mkhomedir feature)
ansible.builtin.systemd_service:
name: oddjobd.service
state: started
enabled: true
masked: false

View file

@ -0,0 +1,29 @@
---
- name: Lockdown | AUDIT | Check current authselect configuration
command: authselect current
register: baseline_lockdown_authselect_status
failed_when: false # Exit code is 2 when not configured
changed_when: false
- name: Lockdown | AUDIT | Do not disable root login if no authselect profile configured
ansible.builtin.set_fact:
rhel9cis_rule_5_1_20: false
when: baseline_lockdown_authselect_status.rc == 2
- name: Lockdown | PATCH | Run Ansible Lockdown (RHEL9-CIS)
ansible.builtin.include_role:
name: RHEL9-CIS
vars:
# Ensure message of the day is configured properly - we have our own MOTD to apply
rhel9cis_rule_1_7_1: false
rhel9cis_rule_1_7_4: false
# Don't restrict user SSH access in sshd_config - this is managed by FreeIPA
rhel9cis_rule_5_1_7: false
# TODO: figure out boot password
rhel9cis_set_boot_pass: false
# TODO: We intend to later deploy a remote rsyslog sink
rhel9cis_syslog: rsyslog
rhel9cis_time_synchronization_servers: "{{ baseline_ntp_servers }}"
rhel9cis_warning_banner: "{{ baseline_warning_banner }}"
rhel9cis_sshd_denyusers: "admin nobody"
when: (ansible_distribution == "Rocky") and (ansible_distribution_major_version == "9")

View file

@ -0,0 +1,103 @@
---
- name: Baseline | PRELIM | Check for supported operating system
ansible.builtin.assert:
that:
- ansible_distribution == "Rocky"
- ansible_distribution_major_version == "9"
- name: Baseline | PRELIM | Include location specific variables
ansible.builtin.include_vars:
file: "{{ baseline_location }}.yml"
- name: Baseline | PATCH | Configure virtual machine for optimal operation as a SolusVM guest
ansible.builtin.include_tasks:
file: "solusvm.yml"
when: baseline_host_type == "solusvm"
- name: Baseline | PATCH | Setup second disk for additional partitions
ansible.builtin.include_tasks:
file: disk_partitions.yml
when: baseline_second_disk_device is defined
- name: Baseline | PATCH | Enable EPEL repository
block:
- name: Baseline | PATCH | Install epel-release
ansible.builtin.dnf:
name: epel-release
state: present
- name: Baseline | PATCH | Restrict packages to be installed from EPEL
community.general.ini_file:
path: /etc/yum.repos.d/epel.repo
section: epel
option: includepkgs
value: "{{ baseline_epel_packages_allowed | join(',') }}"
- name: Baseline | PATCH | Disable EPEL openh264 repository
community.general.ini_file:
path: /etc/yum.repos.d/epel-cisco-openh264.repo
section: epel-cisco-openh264
option: enabled
value: 0
when: (baseline_epel_packages_allowed is defined) and (baseline_epel_packages_allowed | length > 0)
- name: Baseline | PATCH | Remove EPEL repository
ansible.builtin.dnf:
name: epel-release
state: absent
when: (baseline_epel_packages_allowed is not defined) or (baseline_epel_packages_allowed | length == 0)
- name: Baseline | PATCH | Remove cockpit-ws
ansible.builtin.dnf:
name: cockpit-ws
state: absent
- name: Baseline | PATCH | Flush handlers
ansible.builtin.meta: flush_handlers
- name: Baseline | PATCH | Run Ansible Lockdown role
ansible.builtin.include_tasks:
file: "lockdown.yml"
when: baseline_lockdown
- name: Baseline | PATCH | Ensure message of the day is configured properly (CIS 1.7.1, 1.7.4)
ansible.builtin.template:
src: motd.j2
dest: /etc/motd
owner: root
group: root
mode: 'u-x,go-wx'
- name: Baseline | PATCH | Remove dhcpv6-client service from firewalld
ansible.posix.firewalld:
service: dhcpv6-client
state: disabled
immediate: true
permanent: true
zone: public
- name: Baseline | PATCH | Remove mdns service from firewalld
ansible.posix.firewalld:
service: mdns
state: disabled
immediate: true
permanent: true
zone: public
- name: Baseline | PATCH | Remove cockpit service from firewalld
ansible.posix.firewalld:
service: cockpit
state: disabled
immediate: true
permanent: true
zone: public
- name: Baseline | PATCH | Configure DNS resolver
ansible.builtin.include_tasks:
file: dns_resolver.yml
- name: Baseline | PATCH | Flush handlers
ansible.builtin.meta: flush_handlers
- name: Baseline | PATCH | Join IPA Domain
ansible.builtin.include_tasks:
file: ipaclient.yml
when: "'ipaservers' not in group_names"

View file

@ -0,0 +1,52 @@
---
# https://support.solusvm.com/hc/en-us/articles/21334950006807-How-to-install-Guest-Tools-manually-inside-a-VM-in-SolusVM-2
- name: SolusVM Guest | PATCH | Install required packages
ansible.builtin.dnf:
name:
- qemu-guest-agent
- cloud-init
- tuned
state: latest
update_cache: true
become: true
- name: SolusVM Guest | PATCH | Enable and start tuned
ansible.builtin.systemd_service:
name: tuned
enabled: true
state: started
become: true
- name: SolusVM Guest | AUDIT | Check for tuned profile
ansible.builtin.command: tuned-adm active
register: vps_tuned_profile
become: true
changed_when: false
- name: SolusVM Guest | PATCH | Start tuned profile (virtual-guest)
ansible.builtin.shell: tuned-adm profile virtual-guest
become: true
when: "'virtual-guest' not in vps_tuned_profile.stdout"
- name: SolusVM Guest | PATCH | Remove console=ttyS0,115200n8 from bootloader configurations
ansible.builtin.replace:
path: "{{ item }}"
regexp: 'console=ttyS0,115200n8'
replace: ''
with_items:
- /etc/default/grub
- /etc/sysconfig/bootloader
when: ansible_distribution == 'Rocky'
notify:
- Regenerate grub config
- name: SolusVM Guest | AUDIT | Find all vmlinuz-* files in /boot
ansible.builtin.find:
paths: /boot
patterns: 'vmlinuz-*'
register: baseline_solusvm_kernels
- name: SolusVM Guest | PATCH | Remove console=ttyS0,115200n8 from existing kernel bootloader entries
ansible.builtin.command:
cmd: "grubby --update-kernel={{ item.path }} --remove-args='console=ttyS0,115200n8'"
with_items: "{{ baseline_solusvm_kernels.files }}"

View file

@ -0,0 +1,14 @@
##### ###### #####
# # # # # #
# # # #
##### ###### #####
# # # #
# # # # #
##### # # #######
* Hostname: {{ inventory_hostname }}
* Last Ansible run: {{ template_run_date }}
* Audit logging is active.
* Don't mess up.

View file

@ -0,0 +1,2 @@
---
baseline_host_type: generic

View file

@ -0,0 +1,6 @@
---
baseline_host_type: solusvm
baseline_ntp_servers:
- ntp1.hetzner.de
- ntp2.hetzner.com
- ntp3.hetzner.net

View file

@ -0,0 +1,99 @@
---
- name: "FreeIPA Certificates | PATCH | Install latest certbot"
ansible.builtin.dnf:
name: certbot
state: latest
update_cache: true
- name: "FreeIPA Certificates | AUDIT | Check for existing certificate expiry"
community.crypto.x509_certificate_info:
path: "/etc/letsencrypt/live/{{ inventory_hostname }}/cert.pem"
register: freeipa_certs_existing_cert
ignore_errors: true
- name: "FreeIPA Certificates | AUDIT | Calculate days until expiry"
ansible.builtin.set_fact:
freeipa_certs_days_until_expiry: "{{ ((freeipa_certs_existing_cert.not_after | to_datetime('%Y%m%d%H%M%SZ')) - now()).days }}"
when: freeipa_certs_existing_cert.not_after is defined
- name: "FreeIPA Certificates | AUDIT | Print days until expiry"
debug:
msg: "{{ freeipa_certs_days_until_expiry }}"
when: freeipa_certs_existing_cert.not_after is defined
- name: "FreeIPA Certificates | PATCH | Request a new or renewed certificate"
when: (freeipa_certs_existing_cert.failed) or (freeipa_certs_days_until_expiry | int < 30)
block:
- name: "FreeIPA Certificates | PATCH | Download Let's Encrypt Root"
ansible.builtin.get_url:
url: "https://letsencrypt.org/certs/{{ item }}.pem"
dest: /root/{{ item }}.pem
owner: root
group: root
mode: "0600"
with_items:
- isrgrootx1
- isrg-root-x2
- name: "FreeIPA Certificates | PATCH | Download Let's Encrypt Intermediates"
ansible.builtin.get_url:
url: "https://letsencrypt.org/certs/2024/{{ item }}.pem"
dest: "/root/{{ item }}.pem"
owner: root
group: root
mode: "0600"
with_items:
- e7-cross
- e8-cross
- r12
- r13
- name: "FreeIPA Certificates | AUDIT | Check httpd"
ansible.builtin.systemd_service:
name: httpd
register: freeipa_certs_httpd_status
- name: "FreeIPA Certificates | PATCH | Stop httpd"
ansible.builtin.systemd_service:
name: httpd
state: stopped
when: freeipa_certs_httpd_status.status.ActiveState == "active"
- name: "FreeIPA Certificates | PATCH | Add http service to firewall (in case freeipa service is not yet configured)"
ansible.posix.firewalld:
service: http
state: enabled
- name: "FreeIPA Certificates | PATCH | Request new certificate"
ansible.builtin.command:
cmd: certbot certonly --standalone --preferred-challenges http --agree-tos -n -d {{ inventory_hostname }} --register-unsafely-without-email
when: freeipa_certs_existing_cert.failed
- name: "FreeIPA Certificates | PATCH | Renew existing certificate"
ansible.builtin.command:
cmd: certbot renew
when: not freeipa_certs_existing_cert.failed
- name: "FreeIPA Certificates | PATCH | Remove http service from firewall"
ansible.posix.firewalld:
service: http
state: disabled
- name: "FreeIPA Certificates | PATCH | Start httpd"
ansible.builtin.systemd_service:
name: httpd
state: started
when: freeipa_certs_httpd_status.status.ActiveState == "active"
- name: "FreeIPA Certificates | PATCH | Create PKCS#12 encoded certificate"
community.crypto.openssl_pkcs12:
action: export
path: /root/server.p12
friendly_name: "{{ inventory_hostname }}"
privatekey_path: "/etc/letsencrypt/live/{{ inventory_hostname }}/privkey.pem"
certificate_path: "/etc/letsencrypt/live/{{ inventory_hostname }}/cert.pem"
other_certificates: "/etc/letsencrypt/live/{{ inventory_hostname }}/chain.pem"
other_certificates_parse_all: true
owner: root
group: root
mode: "0600"

View file

@ -0,0 +1,52 @@
---
- name: FreeIPA | PATCH | Request or renew Let's Encrypt Certificates
ansible.builtin.include_tasks:
file: certs.yml
- name: FreeIPA | PATCH | Deploy first FreeIPA server
ansible.builtin.include_role:
role: freeipa.ansible_freeipa.ipaserver
vars:
ipaserver_ca_cert_files:
- /root/isrgrootx1.pem
- /root/isrg-root-x2.pem
ipaserver_dirsrv_cert_name: "{{ ansible_inventory }}"
ipaserver_dirsrv_cert_files: [ "/root/server.p12" ]
ipaserver_dirsrv_pin: ""
ipaserver_firewalld_zone: public
ipaserver_http_cert_name: "{{ ansible_inventory }}"
ipaserver_http_cert_files: [ "/root/server.p12" ]
ipaserver_http_pin: ""
ipaserver_no_hbac_allow: true
ipaserver_no_pkinit: true
ipaserver_setup_dns: false
when: inventory_hostname == groups['ipaservers'][0]
- name: FreeIPA | PATCH | Deploy replica FreeIPA servers
ansible.builtin.include_role:
role: freeipa.ansible_freeipa.ipareplica
vars:
ipareplica_ca_cert_files:
- /root/isrgrootx1.pem
- /root/isrg-root-x2.pem
ipareplica_dirsrv_cert_name: "{{ ansible_inventory }}"
ipareplica_dirsrv_cert_files: [ "/root/server.p12" ]
ipareplica_dirsrv_pin: ""
ipareplica_firewalld_zone: public
ipareplica_http_cert_name: "{{ ansible_inventory }}"
ipareplica_http_cert_files: [ "/root/server.p12" ]
ipareplica_http_pin: ""
ipareplica_no_pkinit: true
ipareplica_setup_dns: false
- name: FreeIPA | AUDIT | Check current authselect configuration
command: authselect current
register: freeipa_authselect_status
changed_when: false
- name: FreeIPA | PATCH | Apply authselect profile with sssd, sudo, and mkhomedir if not set
command: authselect select sssd with-sudo with-mkhomedir
when: >
'Profile ID: sssd' not in freeipa_authselect_status.stdout or
'with-sudo' not in freeipa_authselect_status.stdout or
'with-mkhomedir' not in freeipa_authselect_status.stdout

View file

@ -0,0 +1,3 @@
---
podman_host_minimum_unpriv_port: "1024"
podman_host_rootless_users: ["podman"]

View file

@ -0,0 +1,41 @@
---
- name: Podman Host | AUDIT | Gather rootless user facts
ansible.builtin.user:
name: "{{ _podman_host_rootless_user }}"
register: _podman_host_rootless_user_facts
- name: Podman Host | AUDIT | Resolve name of user's primary group
ansible.builtin.getent:
database: group
key: "{{ _podman_host_rootless_user_facts.group }}"
register: _podman_host_rootless_user_group
- name: Podman Host | AUDIT | Check if user is in subuid file
ansible.builtin.lineinfile:
path: /etc/subuid
regexp: '^{{ _podman_host_rootless_user }}:.*$'
state: absent
register: uid_line_found
check_mode: yes
failed_when: false
changed_when: false
- name: Podman Host | AUDIT | Check if group is in subgid file
ansible.builtin.lineinfile:
path: /etc/subgid
regexp: '^{{ _podman_host_rootless_user_group.ansible_facts.getent_group | first }}:.*$'
state: absent
register: gid_line_found
check_mode: yes
failed_when: false
changed_when: false
- name: Podman Host | AUDIT | Assert that user is in subuid file exactly once
ansible.builtin.assert:
that:
- uid_line_found.found == 1
- name: Podman Host | AUDIT | Assert that group is in subgid file exactly once
ansible.builtin.assert:
that:
- gid_line_found.found == 1

View file

@ -0,0 +1,75 @@
---
- name: Podman Host | PRELIM | Ensure the rootless users are defined and are not root
ansible.builtin.assert:
that:
- podman_host_rootless_users | length > 0
- '"root" not in podman_host_rootless_users'
- name: Podman Host | AUDIT | Ensure that subuid and subgid are defined for the users
ansible.builtin.include_tasks:
file: check_subid.yml
vars:
_podman_host_rootless_user: "{{ item }}"
with_items: "{{ podman_host_rootless_users }}"
- name: Podman Host | PATCH | Set unprivileged port minimum
ansible.posix.sysctl:
name: net.ipv4.ip_unprivileged_port_start
value: "{{ podman_host_minimum_unpriv_port }}"
sysctl_set: true
sysctl_file: /etc/sysctl.d/zzz-podman-unpriv-port.conf
reload: true
become: true
- name: Podman Host | PATCH | Create users for rootless podman
ansible.builtin.user:
name: "{{ item }}"
become: true
with_items: "{{ podman_host_rootless_users }}"
- name: Podman Host | PATCH | Set XDG_RUNTIME_DIR in .profile for rootless users
ansible.builtin.lineinfile:
path: "/home/{{ item }}/.bash_profile"
line: "export XDG_RUNTIME_DIR=/run/user/$(id -u)"
create: false
become: true
become_user: "{{ item }}"
with_items: "{{ podman_host_rootless_users }}"
- name: Podman Host | PATCH | Enable linger for rootless users
ansible.builtin.command:
argv:
- /usr/bin/loginctl
- enable-linger
- "{{ item }}"
creates: "/var/lib/systemd/linger/{{ item }}"
become: true
with_items: "{{ podman_host_rootless_users }}"
- name: Podman Host | PATCH | Install Podman
ansible.builtin.dnf:
name:
- podman
- container-selinux
state: latest
become: true
- name: Podman Host | PATCH | Create users quadlets directory
ansible.builtin.file:
path: "/home/{{ item }}/.config/containers/systemd"
state: directory
owner: "{{ item }}"
group: "{{ item }}"
mode: "0700"
with_items: "{{ podman_host_rootless_users }}"
become: true
- name: Podman Host | PATCH | Enable podman auto update timer for users
ansible.builtin.systemd_service:
name: podman-auto-update.timer
scope: user
state: started
enabled: true
become: true
become_user: "{{ item }}"
with_items: "{{ podman_host_rootless_users }}"

View file

@ -0,0 +1,4 @@
# {{ ansible_managed }}
{% for username in podman_host_rootless_users %}
{{ username }}:{{ 100000 + ((loop.index - 1) * 65536) }}:65536
{% endfor %}

View file

@ -0,0 +1,17 @@
---
podman_keycloak_certbot_testing: false
podman_keycloak_enable_ldap: true
# podman_keycloak_keycloak_admin_password:
podman_keycloak_keycloak_admin_username: admin
podman_keycloak_keycloak_hostname: "{{ inventory_hostname }}"
podman_keycloak_keycloak_providers: []
# - url: https://github.com/jacekkow/keycloak-protocol-cas/releases/download/26.4.1/keycloak-protocol-cas-26.4.1.jar
# sha256: 7692526943063434443411b2d0fac63fb4e46f89b20fb07bb45c360916407367
# podman_keycloak_ldap_administrator_password:
# podman_keycloak_ldap_directory_manager_password:
# podman_keycloak_ldap_database_suffix_dn:
podman_keycloak_podman_rootless_user: keycloak
podman_keycloak_postgres_keycloak_database: keycloak
# podman_keycloak_postgres_keycloak_password:
podman_keycloak_postgres_keycloak_username: keycloak
podman_keycloak_keycloak_additional_volumes: []

View file

@ -0,0 +1,27 @@
---
- name: Restart ldap
ansible.builtin.systemd_service:
name: ldap
state: restarted
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
- name: Restart postgres
ansible.builtin.systemd_service:
name: postgres
state: restarted
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
- name: Restart keycloak
ansible.builtin.systemd_service:
name: keycloak
state: restarted
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"

View file

@ -0,0 +1,115 @@
---
- name: wait 30 seconds for ldap server to start
ansible.builtin.pause:
seconds: 30
- name: create ldap suffix
containers.podman.podman_container_exec:
name: ldap
argv:
- dsconf
- -v
- localhost
- backend
- create
- --suffix
- "{{ podman_keycloak_ldap_database_suffix_dn }}"
- --be-name
- "{{ podman_keycloak_ldap_database_backend_name }}"
- --create-suffix
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
register: podman_keycloak_create_suffix
ignore_errors: true
changed_when: false
tags:
- ldap
- name: create suffix result (only when changed)
debug:
msg: "Suffix was created"
when: not podman_keycloak_create_suffix.failed
changed_when: not podman_keycloak_create_suffix.failed
- name: ldap organisational units
community.general.ldap_entry:
dn: "ou={{ item }},{{ podman_keycloak_ldap_database_suffix_dn }}"
objectClass:
- top
- organizationalUnit
server_uri: ldaps://{{ inventory_hostname }}/
bind_dn: "cn=Directory Manager"
bind_pw: "{{ podman_keycloak_ldap_directory_manager_password }}"
delegate_to: localhost
with_items:
- Administrators
- People
- Groups
environment:
- LDAPTLS_REQCERT: "{% if podman_keycloak_certbot_testing %}never{% else %}always{% endif %}"
tags: ldap
- name: enable memberOf plugin
containers.podman.podman_container_exec:
name: ldap
argv:
- dsconf
- -v
- localhost
- -D "cn=Directory Manager"
- plugin
- memberof
- enable
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
tags:
- ldap
- name: disable anonymous bind
containers.podman.podman_container_exec:
name: ldap
argv:
- dsconf
- -v
- localhost
- -D "cn=Directory Manager"
- config
- replace
- nsslapd-allow-anonymous-access=off
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
tags:
- ldap
- name: ldap read-only administrator
community.general.ldap_entry:
dn: "uid=admin,ou=Administrators,{{ podman_keycloak_ldap_database_suffix_dn }}"
objectClass:
- top
- person
- organizationalPerson
- inetOrgPerson
attributes:
cn: admin
sn: admin
userPassword: "{{ podman_keycloak_ldap_administrator_password }}"
server_uri: ldaps://{{ inventory_hostname }}/
bind_dn: "cn=Directory Manager"
bind_pw: "{{ podman_keycloak_ldap_directory_manager_password }}"
delegate_to: localhost
environment:
- LDAPTLS_REQCERT: "{% if podman_keycloak_certbot_testing %}never{% else %}always{% endif %}"
tags: ldap
- name: ldap access control information
community.general.ldap_attrs:
dn: "{{ podman_keycloak_ldap_database_suffix_dn }}"
attributes:
aci: '(target="ldap:///{{ podman_keycloak_ldap_database_suffix_dn }}")(targetattr="*") (version 3.0; acl "readonly"; allow (search,read,compare) userdn="ldap:///uid=admin,ou=Administrators,{{ podman_keycloak_ldap_database_suffix_dn }}";)'
server_uri: ldaps://{{ inventory_hostname }}/
bind_dn: "cn=Directory Manager"
bind_pw: "{{ podman_keycloak_ldap_directory_manager_password }}"
delegate_to: localhost
environment:
- LDAPTLS_REQCERT: "{% if podman_keycloak_certbot_testing %}never{% else %}always{% endif %}"
tags: ldap

View file

@ -0,0 +1,160 @@
---
- name: Podman Keycloak | PATCH | Install podman and create rootless podman user
ansible.builtin.include_role:
role: sr2c.core.podman_host
vars:
podman_host_minimum_unpriv_port: 80
podman_host_rootless_users: ["keycloak"]
- name: Podman Keycloak | PATCH | Enable http service with firewalld
ansible.posix.firewalld:
service: http
state: enabled
immediate: true
permanent: true
zone: public
- name: Podman Keycloak | PATCH | Enable https service with firewalld
ansible.posix.firewalld:
service: https
state: enabled
immediate: true
permanent: true
zone: public
# TODO: These will be relabelled by podman but in the future we should label them from the start
- name: Podman Keycloak | PATCH | Create service configuration directories
ansible.builtin.file:
path: "/home/{{ podman_keycloak_podman_rootless_user }}/{{ item }}"
state: directory
owner: "{{ podman_keycloak_podman_rootless_user }}"
group: "{{ podman_keycloak_podman_rootless_user }}"
mode: "0755"
become: true
with_items:
- keycloak
- ldap
- postgres
when: (item != 'ldap') or podman_keycloak_enable_ldap
- name: Podman Keycloak | PATCH | Download keycloak providers
ansible.builtin.get_url:
url: "{{ item.url }}"
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/keycloak/{{ item.url | basename }}"
checksum: "sha256:{{ item.sha256 }}"
with_items: "{{ podman_keycloak_keycloak_providers }}"
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
notify: restart keycloak
- name: Podman Keycloak | PATCH | Install systemd target
ansible.builtin.template:
src: "keycloak.target"
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/.config/systemd/user/keycloak.target"
owner: "{{ podman_keycloak_podman_rootless_user }}"
mode: "0400"
- name: Podman Keycloak | PATCH | Install systemd slice
ansible.builtin.template:
src: "keycloak.slice"
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/.config/systemd/user/keycloak.slice"
owner: "{{ podman_keycloak_podman_rootless_user }}"
mode: "0400"
- name: Podman Keycloak | PATCH | Install container quadlets
ansible.builtin.template:
src: "{{ item }}"
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
owner: "{{ podman_keycloak_podman_rootless_user }}"
mode: "0400"
with_items:
- ldap.container
- keycloak.container
- postgres.container
when: (item != 'ldap.container') or podman_keycloak_enable_ldap
notify:
- "Restart {{ item | split('.') | first }}"
become: true
- name: Podman Keycloak | PATCH | Install network quadlets
ansible.builtin.template:
src: "{{ item }}"
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
owner: "{{ podman_keycloak_podman_rootless_user }}"
mode: "0400"
with_items:
- frontend.network
- ldap.network
- keycloak.network
when: (item != 'ldap.network') or podman_keycloak_enable_ldap
become: true
- name: Podman Keycloak | AUDIT | Verify quadlets are correctly defined
ansible.builtin.command: /usr/libexec/podman/quadlet -dryrun -user
register: podman_keycloak_quadlet_result
ignore_errors: true
changed_when: false
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
- name: Podman Keycloak | AUDIT | Assert that the quadlet verification succeeded
ansible.builtin.assert:
that:
- podman_keycloak_quadlet_result.rc == 0
fail_msg: "'/usr/libexec/podman/quadlet -dryrun -user' failed! Output withheld to prevent leaking secrets."
- name: Podman Keycloak | PATCH | Start PostgreSQL and keycloak containers
ansible.builtin.systemd_service:
name: "{{ item }}"
state: started
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
with_items:
- postgres
- keycloak
- name: Podman Keycloak | PATCH | Configure nginx container
ansible.builtin.include_role:
name: sr2c.core.podman_nginx
vars:
podman_nginx_podman_rootless_user: "{{ podman_keycloak_podman_rootless_user }}"
podman_nginx_primary_hostname: "{{ podman_keycloak_keycloak_hostname }}"
podman_nginx_frontend_network: frontend
podman_nginx_systemd_service_slice: keycloak.slice
podman_nginx_systemd_service_target: keycloak.target
- name: Podman Keycloak | PATCH | Start LDAP container
ansible.builtin.systemd_service:
name: ldap
state: started
scope: user
when: podman_keycloak_enable_ldap
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"
- name: Podman Keycloak | PATCH | Create nginx configuration file
ansible.builtin.template:
src: nginx.conf
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/nginx/nginx.conf"
owner: "{{ podman_keycloak_podman_rootless_user }}"
group: "{{ podman_keycloak_podman_rootless_user }}"
mode: "0644"
become: true
notify: restart nginx
- name: Podman Keycloak | PATCH | Configure the LDAP directory
ansible.builtin.include_tasks:
file: ldap.yml
when: podman_keycloak_enable_ldap
- name: Podman Keycloak | PATCH | Enable keycloak.target
ansible.builtin.systemd_service:
name: keycloak.target
state: started
enabled: true
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_keycloak_podman_rootless_user }}"

View file

@ -0,0 +1,5 @@
[Network]
Driver=bridge
[Install]
WantedBy=keycloak.target

View file

@ -0,0 +1,40 @@
[Unit]
Requires=postgres.service
After=postgres.service
PartOf=keycloak.target
[Container]
AutoUpdate=registry
ContainerName=keycloak
Environment=KC_LOG_LEVEL=info
Environment=KC_DB=postgres
Environment=KC_DB_PASSWORD={{ podman_keycloak_postgres_keycloak_password }}
Environment=KC_DB_URL=jdbc:postgresql://postgres/{{ podman_keycloak_postgres_keycloak_database }}
Environment=KC_DB_USERNAME={{ podman_keycloak_postgres_keycloak_username }}
Environment=KC_HOSTNAME={{ podman_keycloak_keycloak_hostname }}
Environment=KC_HTTP_ENABLED=true
Environment=KC_HTTP_PORT=8080
Environment=KC_PROXY_HEADERS=xforwarded
Environment=KC_BOOTSTRAP_ADMIN_USERNAME={{ podman_keycloak_keycloak_admin_username }}
Environment=KC_BOOTSTRAP_ADMIN_PASSWORD={{ podman_keycloak_keycloak_admin_password }}
Environment=PROXY_ADDRESS_FORWARDING=true
Exec=start --features=quick-theme
Image=quay.io/keycloak/keycloak:26.4
Network=keycloak.network
{% if podman_keycloak_enable_ldap %}
Network=ldap.network
{% endif %}
Network=frontend.network
{% for provider in podman_keycloak_keycloak_providers %}
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/keycloak/{{ provider.url | basename }}:/opt/keycloak/providers/{{ provider.url | basename }}:ro,z
{% endfor %}
{% for item in podman_keycloak_keycloak_additional_volumes %}
Volume={{ item.src }}:{{ item.dest }}:{{ item.options }}
{% endfor %}
[Service]
Slice=keycloak.slice
Restart=always
[Install]
WantedBy=keycloak.target

View file

@ -0,0 +1,5 @@
[Network]
Driver=bridge
[Install]
WantedBy=keycloak.target

View file

@ -0,0 +1,2 @@
[Unit]
Description=Podman Keycloak Stack by SR2 Communications

View file

@ -0,0 +1,10 @@
[Unit]
Description=Podman Keycloak Stack by SR2 Communications
Requires=keycloak.service
{% if podman_keycloak_enable_ldap %}
Requires=ldap.service
{% endif %}
Requires=nginx.service
[Install]
WantedBy=default.target

View file

@ -0,0 +1,22 @@
[Unit]
PartOf=keycloak.target
[Container]
ContainerName=ldap
Environment=DS_DM_PASSWORD={{ podman_keycloak_ldap_directory_manager_password }}
Image=quay.io/389ds/dirsrv:latest
Network=ldap.network
PublishPort=636:3636/tcp
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/ldap:/data:rw,Z
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/certbot/conf/live/{{ podman_keycloak_keycloak_hostname }}/privkey.pem:/data/tls/server.key:ro,z
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/certbot/conf/live/{{ podman_keycloak_keycloak_hostname }}/cert.pem:/data/tls/server.crt:ro,z
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/certbot/conf/live/{{ podman_keycloak_keycloak_hostname }}/chain.pem:/data/tls/ca/chain.crt:ro,z
[Service]
Slice=keycloak.slice
Restart=always
# RuntimeMaxSec is used to restart the service periodically to pick up new Let's Encrypt certificates
RuntimeMaxSec=604800
[Install]
WantedBy=keycloak.target

View file

@ -0,0 +1,5 @@
[Network]
Driver=bridge
[Install]
WantedBy=keycloak.target

View file

@ -0,0 +1,39 @@
# {{ ansible_managed }}
server {
listen 80;
listen [::]:80;
server_name {{ podman_keycloak_keycloak_hostname }};
server_tokens off;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
return 301 https://{{ podman_keycloak_keycloak_hostname }}$request_uri;
}
}
server {
listen 443 default_server ssl;
listen [::]:443 ssl;
http2 on;
server_name {{ podman_keycloak_keycloak_hostname }};
server_tokens off;
ssl_certificate /etc/letsencrypt/live/{{ podman_keycloak_keycloak_hostname }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ podman_keycloak_keycloak_hostname }}/privkey.pem;
location / {
proxy_pass http://keycloak:8080/;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port 443;
}
}

View file

@ -0,0 +1,21 @@
[Unit]
PartOf=keycloak.target
[Container]
AutoUpdate=registry
ContainerName=postgres
Environment=POSTGRES_DB={{ podman_keycloak_postgres_keycloak_database }}
Environment=POSTGRES_PASSWORD={{ podman_keycloak_postgres_keycloak_password }}
Environment=POSTGRES_USER={{ podman_keycloak_postgres_keycloak_username }}
Environment=POSTGRES_HOST_AUTH_METHOD=scram-sha-256
Environment=POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
Image=docker.io/postgres:17.3
Network=keycloak.network
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/postgres:/var/lib/postgresql/data:rw,Z
[Service]
Slice=keycloak.slice
Restart=always
[Install]
WantedBy=keycloak.target

View file

@ -0,0 +1,10 @@
---
podman_nginx_additional_hostnames: []
podman_nginx_certbot_testing: false
# podman_nginx_frontend_network:
podman_nginx_podman_rootless_user: nginx
# podman_nginx_primary_hostname:
# podman_nginx_systemd_service_slice:
# podman_nginx_systemd_service_target:
podman_nginx_systemd_service_requires: []
podman_nginx_additional_volumes: []

View file

@ -0,0 +1,18 @@
---
- name: Restart certbot-renew
ansible.builtin.systemd_service:
name: certbot-renew
state: started
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_nginx_podman_rootless_user }}"
- name: Restart nginx
ansible.builtin.systemd_service:
name: nginx
state: restarted
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_nginx_podman_rootless_user }}"

View file

@ -0,0 +1,111 @@
---
- name: Podman Nginx | PATCH | Create service configuration directories
ansible.builtin.file:
path: "/home/{{ podman_nginx_podman_rootless_user }}/{{ item }}"
state: directory
owner: "{{ podman_nginx_podman_rootless_user }}"
group: "{{ podman_nginx_podman_rootless_user }}"
mode: "0755"
become: true
with_items:
- .config/systemd/user
- certbot/conf
- certbot/www
- nginx
- name: Podman Nginx | PATCH | Install podman quadlet for rootless podman user
ansible.builtin.template:
src: "{{ item }}"
dest: "/home/{{ podman_nginx_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
owner: "{{ podman_nginx_podman_rootless_user }}"
mode: "0400"
with_items:
- certbot-renew.container
- nginx.container
notify:
- "Restart {{ item | split('.') | first }}"
become: true
- name: Podman Nginx | PATCH | Install certbot renewal timer for rootless podman user
ansible.builtin.template:
src: "certbot-renew.timer"
dest: "/home/{{ podman_nginx_podman_rootless_user }}/.config/systemd/user/certbot-renew.timer"
owner: "{{ podman_nginx_podman_rootless_user }}"
mode: "0400"
become: true
- name: Podman Nginx | AUDIT | Verify quadlets are correctly defined
ansible.builtin.command: /usr/libexec/podman/quadlet -dryrun -user
register: podman_nginx_quadlet_result
ignore_errors: true
changed_when: false
become: true
become_user: "{{ podman_nginx_podman_rootless_user }}"
- name: Podman Nginx | AUDIT | Check if certificate exists
ansible.builtin.stat:
path: "/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/live/{{ podman_nginx_primary_hostname }}/fullchain.pem"
register: podman_nginx_cert_stat
become: true
become_user: "{{ podman_nginx_podman_rootless_user }}"
- name: Podman Nginx | PATCH | Create temporary nginx configuration (no https)
ansible.builtin.template:
src: nginx.conf
dest: "/home/{{ podman_nginx_podman_rootless_user }}/nginx/nginx.conf"
owner: "{{ podman_nginx_podman_rootless_user }}"
group: "{{ podman_nginx_podman_rootless_user }}"
mode: "0644"
become: true
when: not podman_nginx_cert_stat.stat.exists
- name: Podman Nginx | PATCH | Start nginx
ansible.builtin.systemd_service:
name: nginx
state: started
scope: user
daemon_reload: true
become: true
become_user: "{{ podman_nginx_podman_rootless_user }}"
- name: Podman Nginx | PATCH | Run certbot container to create certificate
ansible.builtin.command:
cmd: >
podman run --name certbot-generate
--rm
--volume /home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot:rw,z
--volume /home/{{ podman_nginx_podman_rootless_user }}/certbot/conf:/etc/letsencrypt:rw,z
docker.io/certbot/certbot:latest
certonly
--register-unsafely-without-email
--agree-tos
--webroot
--webroot-path /var/www/certbot/
-d "{{ podman_nginx_primary_hostname }}"
{% for hostname in podman_nginx_additional_hostnames %} -d "{{ hostname }}"{% endfor %}
{% if podman_nginx_certbot_testing %} --test-cert{% endif %}
when: not podman_nginx_cert_stat.stat.exists
become: true
become_user: "{{ podman_nginx_podman_rootless_user }}"
- name: Podman Nginx | AUDIT | Check if certificate exists
ansible.builtin.stat:
path: "/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/live/{{ podman_nginx_primary_hostname }}/fullchain.pem"
register: podman_nginx_cert_stat
become: true
become_user: "{{ podman_nginx_podman_rootless_user }}"
- name: Podman Nginx | AUDIT | Assert that certificate exists now
ansible.builtin.assert:
that:
- podman_nginx_cert_stat.stat.exists
fail_msg: "Failed to get a Lets Encrypt certificate."
- name: Podman Nginx | PATCH | Start certbot renewal timer
ansible.builtin.systemd_service:
name: "certbot-renew.timer"
state: started
enabled: true
scope: user
become: true
become_user: "{{ podman_nginx_podman_rootless_user }}"

View file

@ -0,0 +1,13 @@
[Unit]
Description=Run certbot renew
[Container]
AutoUpdate=registry
ContainerName=certbot-renew
Exec=renew
Image=docker.io/certbot/certbot:latest
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot:z
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf:/etc/letsencrypt:z
[Service]
Restart=no

View file

@ -0,0 +1,9 @@
[Unit]
Description=Timer for certbot renewals
[Timer]
OnCalendar=daily
Persistent=true
[Install]
WantedBy=timers.target

View file

@ -0,0 +1,17 @@
# {{ ansible_managed }}
server {
listen 80;
listen [::]:80;
server_name {{ podman_nginx_primary_hostname }};
server_tokens off;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
return 301 https://{{ podman_nginx_primary_hostname }}$request_uri;
}
}

View file

@ -0,0 +1,34 @@
[Unit]
{% for req in podman_nginx_systemd_service_requires %}
Requires={{ req }}.service
After={{ req }}.service
{% endfor %}
{% if podman_nginx_systemd_service_target is defined %}
PartOf={{ podman_nginx_systemd_service_target }}
{% endif %}
[Container]
ContainerName=nginx
Image=docker.io/nginx:1
{% if podman_nginx_frontend_network is defined %}Network={{ podman_nginx_frontend_network }}.network{% endif +%}
PublishPort=80:80
PublishPort=443:443
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot/:ro,z
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/:/etc/letsencrypt/:ro,z
Volume=/home/{{ podman_nginx_podman_rootless_user }}/nginx:/etc/nginx/conf.d/:ro,z
{% for item in podman_nginx_additional_volumes %}
Volume={{ item.src }}:{{ item.dest }}:{{ item.options }}
{% endfor %}
[Service]
RuntimeMaxSec=604800
Restart=always
{% if podman_nginx_systemd_service_slice is defined %}
Slice={{ podman_nginx_systemd_service_slice }}
{% endif %}
{% if podman_nginx_systemd_service_target is defined %}
[Install]
WantedBy=default.target
{% endif %}