Initial import; migrate some roles from irl.wip
This commit is contained in:
commit
2ba6c6691b
44 changed files with 1573 additions and 0 deletions
14
README.md
Normal file
14
README.md
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
# sr2.core
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
It is not possible for collections to depend on roles, so the roles must be included in your requirements.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
collections:
|
||||||
|
- src: git+https://guardianproject.dev/sr2/ansible-collection-core.git
|
||||||
|
version: "main"
|
||||||
|
roles:
|
||||||
|
- src: git+https://github.com/ansible-lockdown/RHEL9-CIS.git
|
||||||
|
version: "2.0.0"
|
||||||
|
```
|
||||||
18
galaxy.yml
Normal file
18
galaxy.yml
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
namespace: sr2c
|
||||||
|
name: core
|
||||||
|
version: "0.0.1"
|
||||||
|
readme: "README.md"
|
||||||
|
license:
|
||||||
|
- BSD-2-Clause
|
||||||
|
authors:
|
||||||
|
- irl
|
||||||
|
description: Common roles and playbooks for infrastructure deployment and management.
|
||||||
|
homepage: https://guardianproject.dev/sr2/ansible-collection-core
|
||||||
|
repository: https://guardianproject.dev/sr2/ansible-collection-core.git
|
||||||
|
issues: https://guardianproject.dev/sr2/ansible-collection-core/issues/
|
||||||
|
dependencies:
|
||||||
|
ansible.posix: "*"
|
||||||
|
community.crypto: "*"
|
||||||
|
community.general: "*"
|
||||||
|
freeipa.ansible_freeipa: "1.15.1"
|
||||||
59
playbooks/core_services.yml
Normal file
59
playbooks/core_services.yml
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
---
|
||||||
|
- hosts:
|
||||||
|
- ipaservers
|
||||||
|
become: true # Required by FreeIPA roles
|
||||||
|
vars:
|
||||||
|
# Required for FreeIPA setup
|
||||||
|
baseline_epel_packages_allowed:
|
||||||
|
- certbot
|
||||||
|
- python3-certbot
|
||||||
|
- python3-pyrfc3339
|
||||||
|
- python3-parsedatetime
|
||||||
|
- python3-josepy
|
||||||
|
- python3-importlib-metadata
|
||||||
|
- python3-configargparse
|
||||||
|
- python3-acme
|
||||||
|
- python3-zipp
|
||||||
|
- python3-pyOpenSSL
|
||||||
|
# 2.1 Configure Server Services
|
||||||
|
# These services are required by FreeIPA.
|
||||||
|
rhel9cis_autofs_services: true # TODO: can we mask it? This is required by FreeIPA but we don't use it.
|
||||||
|
rhel9cis_dns_server: true
|
||||||
|
rhel9cis_httpd_server: true
|
||||||
|
# 2.2 Configure Client Services
|
||||||
|
# These services are required by FreeIPA.
|
||||||
|
rhel9cis_openldap_clients_required: true
|
||||||
|
# 5.3.2 Configure authselect
|
||||||
|
# ipaservers are part of Linux Identity Management. Joining your host to an IdM
|
||||||
|
# domain automatically configures SSSD authentication on your host.
|
||||||
|
rhel9cis_allow_authselect_updates: false
|
||||||
|
# TODO: Restricted umask breaks FreeIPA roles
|
||||||
|
rhel9cis_rule_5_4_2_6: false
|
||||||
|
rhel9cis_rule_5_4_3_3: false
|
||||||
|
roles:
|
||||||
|
- name: sr2c.core.baseline
|
||||||
|
tags: bootstrap
|
||||||
|
- name: sr2c.core.freeipa
|
||||||
|
tags: freeipa
|
||||||
|
|
||||||
|
- hosts:
|
||||||
|
- keycloak
|
||||||
|
become: true
|
||||||
|
vars:
|
||||||
|
rhel9cis_autofs_services: true # TODO: can we mask it? This is required by FreeIPA but we don't use it.
|
||||||
|
# 2.2 Configure Client Services
|
||||||
|
# These services are required by FreeIPA.
|
||||||
|
rhel9cis_openldap_clients_required: true
|
||||||
|
# 5.3.2 Configure authselect
|
||||||
|
# ipaservers are part of Linux Identity Management. Joining your host to an IdM
|
||||||
|
# domain automatically configures SSSD authentication on your host.
|
||||||
|
rhel9cis_allow_authselect_updates: false
|
||||||
|
podman_host_rootless_users: ["identity"]
|
||||||
|
roles:
|
||||||
|
- name: sr2c.core.baseline
|
||||||
|
tags: bootstrap
|
||||||
|
- name: freeipa.ansible_freeipa.ipaclient
|
||||||
|
state: present
|
||||||
|
tags: bootstrap
|
||||||
|
- name: sr2c.core.podman_keycloak
|
||||||
|
tags: keycloak
|
||||||
18
roles/baseline/README.md
Normal file
18
roles/baseline/README.md
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
# sr2c.core.baseline
|
||||||
|
|
||||||
|
Configure an SR2 virtual machine.
|
||||||
|
|
||||||
|
## Disk Partitions and Encryption
|
||||||
|
|
||||||
|
Creates a new LVM volume group on `baseline_second_disk_device` with logical volumes for:
|
||||||
|
|
||||||
|
| Mountpoint | Default Size | Encrypted |
|
||||||
|
|----------------|-----------------|-----------|
|
||||||
|
| /var | 5GiB | No |
|
||||||
|
| /var/log | 5GiB | No |
|
||||||
|
| /var/log/audit | 5GiB | No |
|
||||||
|
| /var/tmp | 5GiB | No |
|
||||||
|
| /home | Remaining space | Yes |
|
||||||
|
|
||||||
|
It is assumed that `/home` is empty and that no migration of data need occur. Data under `/var` will be migrated for
|
||||||
|
each partition.
|
||||||
27
roles/baseline/defaults/main.yml
Normal file
27
roles/baseline/defaults/main.yml
Normal file
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
# Location of the host (generic, sr2_de_fsn)
|
||||||
|
baseline_location: generic
|
||||||
|
# Enable running the Ansible Lockdown CIS role
|
||||||
|
baseline_lockdown: true
|
||||||
|
# UK Ministry of Justice Login Banner (seems as good as any)
|
||||||
|
# https://security-guidance.service.justice.gov.uk/system-lockdown-and-hardening-standard/#appendix-a-login-banner
|
||||||
|
baseline_warning_banner: |
|
||||||
|
THIS SYSTEM IS FOR AUTHORISED USERS ONLY.
|
||||||
|
|
||||||
|
This is a private system; only use this system if you have specific authority to do so.
|
||||||
|
Otherwise you are liable to prosecution under the Computer Misuse Act 1990. If you do
|
||||||
|
not have the express permission of the operator or owner of this system, switch off or
|
||||||
|
disconnect now to avoid prosecution.
|
||||||
|
|
||||||
|
# Local NTP servers if available
|
||||||
|
baseline_ntp_servers:
|
||||||
|
- 0.pool.ntp.org
|
||||||
|
- 1.pool.ntp.org
|
||||||
|
- 2.pool.ntp.org
|
||||||
|
- 3.pool.ntp.org
|
||||||
|
# baseline_second_disk_device:
|
||||||
|
baseline_second_disk_vg_name: "datavg"
|
||||||
|
baseline_second_disk_var_size: "5G"
|
||||||
|
baseline_second_disk_var_log_size: "5G"
|
||||||
|
baseline_second_disk_var_log_audit_size: "5G"
|
||||||
|
baseline_second_disk_var_tmp_size: "5G"
|
||||||
4
roles/baseline/files/resolved.conf
Normal file
4
roles/baseline/files/resolved.conf
Normal file
|
|
@ -0,0 +1,4 @@
|
||||||
|
[Resolve]
|
||||||
|
DNSSEC=true
|
||||||
|
LLMNR=no
|
||||||
|
MulticastDNS=no
|
||||||
2
roles/baseline/files/systemd-resolved-override.conf
Normal file
2
roles/baseline/files/systemd-resolved-override.conf
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
[Service]
|
||||||
|
Environment="SYSTEMD_RESOLVED_SYNTHESIZE_HOSTNAME=0"
|
||||||
9
roles/baseline/handlers/main.yml
Normal file
9
roles/baseline/handlers/main.yml
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
- name: Regenerate grub config
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||||
|
|
||||||
|
- name: Restart systemd-resolved
|
||||||
|
service:
|
||||||
|
name: systemd-resolved
|
||||||
|
state: restarted
|
||||||
160
roles/baseline/tasks/disk_partitions.yml
Normal file
160
roles/baseline/tasks/disk_partitions.yml
Normal file
|
|
@ -0,0 +1,160 @@
|
||||||
|
---
|
||||||
|
- name: Disk Partitions | PRELIM | Ensure baseline_home_luks_passphrase is defined
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- baseline_home_luks_passphrase is defined
|
||||||
|
msg: "Variable 'baseline_home_luks_passphrase' must be defined."
|
||||||
|
|
||||||
|
- name: Disk Partitions | PRELIM | Ensure baseline_second_disk_device is defined
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- baseline_second_disk_device is defined
|
||||||
|
msg: "Variable 'baseline_second_disk_device' must be defined."
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Ensure lvm2 is installed
|
||||||
|
ansible.builtin.package:
|
||||||
|
name: lvm2
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Create LVM partition spanning entire disk
|
||||||
|
community.general.parted:
|
||||||
|
device: "{{ baseline_second_disk_device }}"
|
||||||
|
number: 1
|
||||||
|
flags: [ lvm ]
|
||||||
|
state: present
|
||||||
|
part_start: "0%"
|
||||||
|
part_end: "100%"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Create volume group
|
||||||
|
community.general.lvg:
|
||||||
|
vg: "{{ baseline_second_disk_vg_name }}"
|
||||||
|
pvs: "{{ baseline_second_disk_device }}1"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Create /var logical volume
|
||||||
|
community.general.lvol:
|
||||||
|
vg: "{{ baseline_second_disk_vg_name }}"
|
||||||
|
lv: var
|
||||||
|
size: "{{ baseline_second_disk_var_size }}"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Create /var/log logical volume
|
||||||
|
community.general.lvol:
|
||||||
|
vg: "{{ baseline_second_disk_vg_name }}"
|
||||||
|
lv: var_log
|
||||||
|
size: "{{ baseline_second_disk_var_log_size }}"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Create /var/log/audit logical volume
|
||||||
|
community.general.lvol:
|
||||||
|
vg: "{{ baseline_second_disk_vg_name }}"
|
||||||
|
lv: var_log_audit
|
||||||
|
size: "{{ baseline_second_disk_var_log_audit_size }}"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Create /var/tmp logical volume
|
||||||
|
community.general.lvol:
|
||||||
|
vg: "{{ baseline_second_disk_vg_name }}"
|
||||||
|
lv: var_tmp
|
||||||
|
size: "{{ baseline_second_disk_var_tmp_size }}"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Create /home logical volume with remaining space
|
||||||
|
community.general.lvol:
|
||||||
|
vg: "{{ baseline_second_disk_vg_name }}"
|
||||||
|
lv: home
|
||||||
|
shrink: false # make idempotent
|
||||||
|
size: "100%FREE"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Ensure cryptsetup is installed
|
||||||
|
ansible.builtin.package:
|
||||||
|
name: cryptsetup
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Encrypt /home with LUKS2 and provided passphrase
|
||||||
|
community.crypto.luks_device:
|
||||||
|
device: "/dev/{{ baseline_second_disk_vg_name }}/home"
|
||||||
|
state: present
|
||||||
|
passphrase: "{{ baseline_home_luks_passphrase }}"
|
||||||
|
type: luks2
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Open LUKS device
|
||||||
|
community.crypto.luks_device:
|
||||||
|
device: "/dev/{{ baseline_second_disk_vg_name }}/home"
|
||||||
|
name: home_crypt
|
||||||
|
state: opened
|
||||||
|
passphrase: "{{ baseline_home_luks_passphrase }}"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Add /home logical volume to crypttab
|
||||||
|
community.general.crypttab:
|
||||||
|
backing_device: /dev/mapper/datavg-home
|
||||||
|
name: home_crypt
|
||||||
|
opts: discard
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Create xfs filesystems on new partitions
|
||||||
|
community.general.filesystem:
|
||||||
|
dev: "{{ item }}"
|
||||||
|
fstype: xfs
|
||||||
|
with_items:
|
||||||
|
- /dev/mapper/datavg-var
|
||||||
|
- /dev/mapper/datavg-var_log
|
||||||
|
- /dev/mapper/datavg-var_log_audit
|
||||||
|
- /dev/mapper/datavg-var_tmp
|
||||||
|
- /dev/mapper/home_crypt
|
||||||
|
|
||||||
|
- name: Disk Partitions | AUDIT | Check if /home is mounted
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: mountpoint -q /home
|
||||||
|
register: baseline_second_disk_home_mounted
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Disk Partitions | AUDIT | Check if /home is empty
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: ls -A /home
|
||||||
|
register: baseline_second_disk_home_files
|
||||||
|
when: baseline_second_disk_home_mounted.rc != 0
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Disk Partitions | AUDIT | Fail if /home is not mounted and not empty
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- ((baseline_second_disk_home_files.skipped is defined) and baseline_second_disk_home_files.skipped) or (baseline_second_disk_home_files.stdout == "")
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Ensure /home is mounted
|
||||||
|
ansible.posix.mount:
|
||||||
|
src: "/dev/mapper/home_crypt"
|
||||||
|
path: '/home'
|
||||||
|
fstype: 'xfs'
|
||||||
|
opts: 'rw,nosuid,nodev'
|
||||||
|
state: mounted
|
||||||
|
|
||||||
|
- name: Disk Partitions | AUDIT | Check if /var is mounted
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: mountpoint -q /var
|
||||||
|
register: baseline_second_disk_var_mounted
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Migrate content if /var is not mounted
|
||||||
|
when: baseline_second_disk_var_mounted.rc != 0
|
||||||
|
block:
|
||||||
|
- name: Disk Partitions | PATCH | Enter emergency mode
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: systemctl isolate emergency.target
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Unmount /var/lib/nfs/rpc_pipefs if mounted
|
||||||
|
ansible.posix.mount:
|
||||||
|
path: /var/lib/nfs/rpc_pipefs
|
||||||
|
state: unmounted
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Migrate data to new partitions
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: disk_partitions_migrate.yml
|
||||||
|
vars:
|
||||||
|
baseline_second_disk_migrate_path: "{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- "/var"
|
||||||
|
- "/var/log"
|
||||||
|
- "/var/log/audit"
|
||||||
|
- "/var/tmp"
|
||||||
|
|
||||||
|
- name: Disk Partitions | PATCH | Restore default mode
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: systemctl isolate default.target
|
||||||
31
roles/baseline/tasks/disk_partitions_migrate.yml
Normal file
31
roles/baseline/tasks/disk_partitions_migrate.yml
Normal file
|
|
@ -0,0 +1,31 @@
|
||||||
|
---
|
||||||
|
- name: 'Disk Partitions | PATCH | Rename {{ baseline_second_disk_migrate_path }} to {{ baseline_second_disk_migrate_path }}.old'
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: 'mv {{ baseline_second_disk_migrate_path }} {{ baseline_second_disk_migrate_path }}.old'
|
||||||
|
|
||||||
|
- name: 'Disk Partitions | PATCH | Mount {{ baseline_second_disk_migrate_path }}'
|
||||||
|
ansible.posix.mount:
|
||||||
|
src: "/dev/mapper/datavg-{{ baseline_second_disk_migrate_path | replace('/', '', 1) | replace('/', '_') }}"
|
||||||
|
path: '{{ baseline_second_disk_migrate_path }}'
|
||||||
|
fstype: 'xfs'
|
||||||
|
opts: 'rw,{{ "noexec," if baseline_second_disk_migrate_path != "/var" else "" }}nosuid,nodev'
|
||||||
|
state: mounted
|
||||||
|
# TODO: systemctl daemon-reload after modifying /etc/fstab
|
||||||
|
|
||||||
|
- name: 'Disk Partitions | PATCH | Set {{ baseline_second_disk_migrate_path }} permissions'
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: '{{ baseline_second_disk_migrate_path }}'
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: 'Disk Partitions | PATCH | Move {{ baseline_second_disk_migrate_path }} content'
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: 'cp -ax * {{ baseline_second_disk_migrate_path }}/'
|
||||||
|
chdir: '{{ baseline_second_disk_migrate_path }}.old'
|
||||||
|
|
||||||
|
- name: 'Disk Partitions | PATCH | Delete {{ baseline_second_disk_migrate_path }}.old'
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: '{{ baseline_second_disk_migrate_path }}.old'
|
||||||
|
state: absent
|
||||||
46
roles/baseline/tasks/dns_resolver.yml
Normal file
46
roles/baseline/tasks/dns_resolver.yml
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
---
|
||||||
|
- name: DNS Resolver | PATCH | Install systemd-resolved
|
||||||
|
ansible.builtin.dnf:
|
||||||
|
name: systemd-resolved
|
||||||
|
state: latest
|
||||||
|
|
||||||
|
- name: DNS Resolver | PATCH | Ensure systemd-resolved is in use
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: systemd-resolved
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
masked: false
|
||||||
|
|
||||||
|
- name: DNS Resolver | PATCH | Remove loopback address entries containing the hostname from /etc/hosts
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/hosts
|
||||||
|
regexp: '^(127\.0\.0\.1|::1)\s.*{{ inventory_hostname }}'
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: DNS Resolver | PATCH | Enable DNSSEC and disable unwanted resolved features
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: resolved.conf
|
||||||
|
dest: /etc/systemd/resolved.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: "Restart systemd-resolved"
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: DNS Resolver | PATCH | Ensure /etc/systemd/system/systemd-resolved.service.d exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/systemd/system/systemd-resolved.service.d
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: DNS Resolver | PATCH | Disable resolved record synthesising
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: systemd-resolved-override.conf
|
||||||
|
dest: /etc/systemd/system/systemd-resolved.service.d/override.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: "Restart systemd-resolved"
|
||||||
|
become: true
|
||||||
25
roles/baseline/tasks/ipaclient.yml
Normal file
25
roles/baseline/tasks/ipaclient.yml
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
- name: FreeIPA Client | PATCH | Join IPA domain
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
role: freeipa.ansible_freeipa.ipaclient
|
||||||
|
vars:
|
||||||
|
ipaclient_hostname: "{{ inventory_hostname }}"
|
||||||
|
|
||||||
|
- name: FreeIPA Client | AUDIT | Check current authselect configuration
|
||||||
|
ansible.builtin.command: authselect current
|
||||||
|
register: freeipa_authselect_status
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: FreeIPA Client | PATCH | Apply authselect profile with sssd, sudo, and mkhomedir if not set
|
||||||
|
ansible.builtin.command: authselect select sssd with-sudo with-mkhomedir --force
|
||||||
|
when: >
|
||||||
|
'Profile ID: sssd' not in freeipa_authselect_status.stdout or
|
||||||
|
'with-sudo' not in freeipa_authselect_status.stdout or
|
||||||
|
'with-mkhomedir' not in freeipa_authselect_status.stdout
|
||||||
|
|
||||||
|
- name: FreeIPA Client | PATCH | Enable oddjobd.service (for with-mkhomedir feature)
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: oddjobd.service
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
masked: false
|
||||||
29
roles/baseline/tasks/lockdown.yml
Normal file
29
roles/baseline/tasks/lockdown.yml
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
---
|
||||||
|
- name: Lockdown | AUDIT | Check current authselect configuration
|
||||||
|
command: authselect current
|
||||||
|
register: baseline_lockdown_authselect_status
|
||||||
|
failed_when: false # Exit code is 2 when not configured
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Lockdown | AUDIT | Do not disable root login if no authselect profile configured
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
rhel9cis_rule_5_1_20: false
|
||||||
|
when: baseline_lockdown_authselect_status.rc == 2
|
||||||
|
|
||||||
|
- name: Lockdown | PATCH | Run Ansible Lockdown (RHEL9-CIS)
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: RHEL9-CIS
|
||||||
|
vars:
|
||||||
|
# Ensure message of the day is configured properly - we have our own MOTD to apply
|
||||||
|
rhel9cis_rule_1_7_1: false
|
||||||
|
rhel9cis_rule_1_7_4: false
|
||||||
|
# Don't restrict user SSH access in sshd_config - this is managed by FreeIPA
|
||||||
|
rhel9cis_rule_5_1_7: false
|
||||||
|
# TODO: figure out boot password
|
||||||
|
rhel9cis_set_boot_pass: false
|
||||||
|
# TODO: We intend to later deploy a remote rsyslog sink
|
||||||
|
rhel9cis_syslog: rsyslog
|
||||||
|
rhel9cis_time_synchronization_servers: "{{ baseline_ntp_servers }}"
|
||||||
|
rhel9cis_warning_banner: "{{ baseline_warning_banner }}"
|
||||||
|
rhel9cis_sshd_denyusers: "admin nobody"
|
||||||
|
when: (ansible_distribution == "Rocky") and (ansible_distribution_major_version == "9")
|
||||||
103
roles/baseline/tasks/main.yml
Normal file
103
roles/baseline/tasks/main.yml
Normal file
|
|
@ -0,0 +1,103 @@
|
||||||
|
---
|
||||||
|
- name: Baseline | PRELIM | Check for supported operating system
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- ansible_distribution == "Rocky"
|
||||||
|
- ansible_distribution_major_version == "9"
|
||||||
|
|
||||||
|
- name: Baseline | PRELIM | Include location specific variables
|
||||||
|
ansible.builtin.include_vars:
|
||||||
|
file: "{{ baseline_location }}.yml"
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Configure virtual machine for optimal operation as a SolusVM guest
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: "solusvm.yml"
|
||||||
|
when: baseline_host_type == "solusvm"
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Setup second disk for additional partitions
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: disk_partitions.yml
|
||||||
|
when: baseline_second_disk_device is defined
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Enable EPEL repository
|
||||||
|
block:
|
||||||
|
- name: Baseline | PATCH | Install epel-release
|
||||||
|
ansible.builtin.dnf:
|
||||||
|
name: epel-release
|
||||||
|
state: present
|
||||||
|
- name: Baseline | PATCH | Restrict packages to be installed from EPEL
|
||||||
|
community.general.ini_file:
|
||||||
|
path: /etc/yum.repos.d/epel.repo
|
||||||
|
section: epel
|
||||||
|
option: includepkgs
|
||||||
|
value: "{{ baseline_epel_packages_allowed | join(',') }}"
|
||||||
|
- name: Baseline | PATCH | Disable EPEL openh264 repository
|
||||||
|
community.general.ini_file:
|
||||||
|
path: /etc/yum.repos.d/epel-cisco-openh264.repo
|
||||||
|
section: epel-cisco-openh264
|
||||||
|
option: enabled
|
||||||
|
value: 0
|
||||||
|
when: (baseline_epel_packages_allowed is defined) and (baseline_epel_packages_allowed | length > 0)
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Remove EPEL repository
|
||||||
|
ansible.builtin.dnf:
|
||||||
|
name: epel-release
|
||||||
|
state: absent
|
||||||
|
when: (baseline_epel_packages_allowed is not defined) or (baseline_epel_packages_allowed | length == 0)
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Remove cockpit-ws
|
||||||
|
ansible.builtin.dnf:
|
||||||
|
name: cockpit-ws
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Flush handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Run Ansible Lockdown role
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: "lockdown.yml"
|
||||||
|
when: baseline_lockdown
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Ensure message of the day is configured properly (CIS 1.7.1, 1.7.4)
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: motd.j2
|
||||||
|
dest: /etc/motd
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 'u-x,go-wx'
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Remove dhcpv6-client service from firewalld
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: dhcpv6-client
|
||||||
|
state: disabled
|
||||||
|
immediate: true
|
||||||
|
permanent: true
|
||||||
|
zone: public
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Remove mdns service from firewalld
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: mdns
|
||||||
|
state: disabled
|
||||||
|
immediate: true
|
||||||
|
permanent: true
|
||||||
|
zone: public
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Remove cockpit service from firewalld
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: cockpit
|
||||||
|
state: disabled
|
||||||
|
immediate: true
|
||||||
|
permanent: true
|
||||||
|
zone: public
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Configure DNS resolver
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: dns_resolver.yml
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Flush handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
||||||
|
|
||||||
|
- name: Baseline | PATCH | Join IPA Domain
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: ipaclient.yml
|
||||||
|
when: "'ipaservers' not in group_names"
|
||||||
52
roles/baseline/tasks/solusvm.yml
Normal file
52
roles/baseline/tasks/solusvm.yml
Normal file
|
|
@ -0,0 +1,52 @@
|
||||||
|
---
|
||||||
|
# https://support.solusvm.com/hc/en-us/articles/21334950006807-How-to-install-Guest-Tools-manually-inside-a-VM-in-SolusVM-2
|
||||||
|
- name: SolusVM Guest | PATCH | Install required packages
|
||||||
|
ansible.builtin.dnf:
|
||||||
|
name:
|
||||||
|
- qemu-guest-agent
|
||||||
|
- cloud-init
|
||||||
|
- tuned
|
||||||
|
state: latest
|
||||||
|
update_cache: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: SolusVM Guest | PATCH | Enable and start tuned
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: tuned
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: SolusVM Guest | AUDIT | Check for tuned profile
|
||||||
|
ansible.builtin.command: tuned-adm active
|
||||||
|
register: vps_tuned_profile
|
||||||
|
become: true
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: SolusVM Guest | PATCH | Start tuned profile (virtual-guest)
|
||||||
|
ansible.builtin.shell: tuned-adm profile virtual-guest
|
||||||
|
become: true
|
||||||
|
when: "'virtual-guest' not in vps_tuned_profile.stdout"
|
||||||
|
|
||||||
|
- name: SolusVM Guest | PATCH | Remove console=ttyS0,115200n8 from bootloader configurations
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: "{{ item }}"
|
||||||
|
regexp: 'console=ttyS0,115200n8'
|
||||||
|
replace: ''
|
||||||
|
with_items:
|
||||||
|
- /etc/default/grub
|
||||||
|
- /etc/sysconfig/bootloader
|
||||||
|
when: ansible_distribution == 'Rocky'
|
||||||
|
notify:
|
||||||
|
- Regenerate grub config
|
||||||
|
|
||||||
|
- name: SolusVM Guest | AUDIT | Find all vmlinuz-* files in /boot
|
||||||
|
ansible.builtin.find:
|
||||||
|
paths: /boot
|
||||||
|
patterns: 'vmlinuz-*'
|
||||||
|
register: baseline_solusvm_kernels
|
||||||
|
|
||||||
|
- name: SolusVM Guest | PATCH | Remove console=ttyS0,115200n8 from existing kernel bootloader entries
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "grubby --update-kernel={{ item.path }} --remove-args='console=ttyS0,115200n8'"
|
||||||
|
with_items: "{{ baseline_solusvm_kernels.files }}"
|
||||||
14
roles/baseline/templates/motd.j2
Normal file
14
roles/baseline/templates/motd.j2
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
|
||||||
|
##### ###### #####
|
||||||
|
# # # # # #
|
||||||
|
# # # #
|
||||||
|
##### ###### #####
|
||||||
|
# # # #
|
||||||
|
# # # # #
|
||||||
|
##### # # #######
|
||||||
|
|
||||||
|
* Hostname: {{ inventory_hostname }}
|
||||||
|
* Last Ansible run: {{ template_run_date }}
|
||||||
|
* Audit logging is active.
|
||||||
|
* Don't mess up.
|
||||||
|
|
||||||
2
roles/baseline/vars/generic.yml
Normal file
2
roles/baseline/vars/generic.yml
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
---
|
||||||
|
baseline_host_type: generic
|
||||||
6
roles/baseline/vars/sr2_de_fsn.yml
Normal file
6
roles/baseline/vars/sr2_de_fsn.yml
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
baseline_host_type: solusvm
|
||||||
|
baseline_ntp_servers:
|
||||||
|
- ntp1.hetzner.de
|
||||||
|
- ntp2.hetzner.com
|
||||||
|
- ntp3.hetzner.net
|
||||||
99
roles/freeipa/tasks/certs.yml
Normal file
99
roles/freeipa/tasks/certs.yml
Normal file
|
|
@ -0,0 +1,99 @@
|
||||||
|
---
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Install latest certbot"
|
||||||
|
ansible.builtin.dnf:
|
||||||
|
name: certbot
|
||||||
|
state: latest
|
||||||
|
update_cache: true
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | AUDIT | Check for existing certificate expiry"
|
||||||
|
community.crypto.x509_certificate_info:
|
||||||
|
path: "/etc/letsencrypt/live/{{ inventory_hostname }}/cert.pem"
|
||||||
|
register: freeipa_certs_existing_cert
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | AUDIT | Calculate days until expiry"
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
freeipa_certs_days_until_expiry: "{{ ((freeipa_certs_existing_cert.not_after | to_datetime('%Y%m%d%H%M%SZ')) - now()).days }}"
|
||||||
|
when: freeipa_certs_existing_cert.not_after is defined
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | AUDIT | Print days until expiry"
|
||||||
|
debug:
|
||||||
|
msg: "{{ freeipa_certs_days_until_expiry }}"
|
||||||
|
when: freeipa_certs_existing_cert.not_after is defined
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Request a new or renewed certificate"
|
||||||
|
when: (freeipa_certs_existing_cert.failed) or (freeipa_certs_days_until_expiry | int < 30)
|
||||||
|
block:
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Download Let's Encrypt Root"
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://letsencrypt.org/certs/{{ item }}.pem"
|
||||||
|
dest: /root/{{ item }}.pem
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0600"
|
||||||
|
with_items:
|
||||||
|
- isrgrootx1
|
||||||
|
- isrg-root-x2
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Download Let's Encrypt Intermediates"
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://letsencrypt.org/certs/2024/{{ item }}.pem"
|
||||||
|
dest: "/root/{{ item }}.pem"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0600"
|
||||||
|
with_items:
|
||||||
|
- e7-cross
|
||||||
|
- e8-cross
|
||||||
|
- r12
|
||||||
|
- r13
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | AUDIT | Check httpd"
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: httpd
|
||||||
|
register: freeipa_certs_httpd_status
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Stop httpd"
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: httpd
|
||||||
|
state: stopped
|
||||||
|
when: freeipa_certs_httpd_status.status.ActiveState == "active"
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Add http service to firewall (in case freeipa service is not yet configured)"
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: http
|
||||||
|
state: enabled
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Request new certificate"
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: certbot certonly --standalone --preferred-challenges http --agree-tos -n -d {{ inventory_hostname }} --register-unsafely-without-email
|
||||||
|
when: freeipa_certs_existing_cert.failed
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Renew existing certificate"
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: certbot renew
|
||||||
|
when: not freeipa_certs_existing_cert.failed
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Remove http service from firewall"
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: http
|
||||||
|
state: disabled
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Start httpd"
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: httpd
|
||||||
|
state: started
|
||||||
|
when: freeipa_certs_httpd_status.status.ActiveState == "active"
|
||||||
|
|
||||||
|
- name: "FreeIPA Certificates | PATCH | Create PKCS#12 encoded certificate"
|
||||||
|
community.crypto.openssl_pkcs12:
|
||||||
|
action: export
|
||||||
|
path: /root/server.p12
|
||||||
|
friendly_name: "{{ inventory_hostname }}"
|
||||||
|
privatekey_path: "/etc/letsencrypt/live/{{ inventory_hostname }}/privkey.pem"
|
||||||
|
certificate_path: "/etc/letsencrypt/live/{{ inventory_hostname }}/cert.pem"
|
||||||
|
other_certificates: "/etc/letsencrypt/live/{{ inventory_hostname }}/chain.pem"
|
||||||
|
other_certificates_parse_all: true
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0600"
|
||||||
52
roles/freeipa/tasks/main.yml
Normal file
52
roles/freeipa/tasks/main.yml
Normal file
|
|
@ -0,0 +1,52 @@
|
||||||
|
---
|
||||||
|
- name: FreeIPA | PATCH | Request or renew Let's Encrypt Certificates
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: certs.yml
|
||||||
|
|
||||||
|
- name: FreeIPA | PATCH | Deploy first FreeIPA server
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
role: freeipa.ansible_freeipa.ipaserver
|
||||||
|
vars:
|
||||||
|
ipaserver_ca_cert_files:
|
||||||
|
- /root/isrgrootx1.pem
|
||||||
|
- /root/isrg-root-x2.pem
|
||||||
|
ipaserver_dirsrv_cert_name: "{{ ansible_inventory }}"
|
||||||
|
ipaserver_dirsrv_cert_files: [ "/root/server.p12" ]
|
||||||
|
ipaserver_dirsrv_pin: ""
|
||||||
|
ipaserver_firewalld_zone: public
|
||||||
|
ipaserver_http_cert_name: "{{ ansible_inventory }}"
|
||||||
|
ipaserver_http_cert_files: [ "/root/server.p12" ]
|
||||||
|
ipaserver_http_pin: ""
|
||||||
|
ipaserver_no_hbac_allow: true
|
||||||
|
ipaserver_no_pkinit: true
|
||||||
|
ipaserver_setup_dns: false
|
||||||
|
when: inventory_hostname == groups['ipaservers'][0]
|
||||||
|
|
||||||
|
- name: FreeIPA | PATCH | Deploy replica FreeIPA servers
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
role: freeipa.ansible_freeipa.ipareplica
|
||||||
|
vars:
|
||||||
|
ipareplica_ca_cert_files:
|
||||||
|
- /root/isrgrootx1.pem
|
||||||
|
- /root/isrg-root-x2.pem
|
||||||
|
ipareplica_dirsrv_cert_name: "{{ ansible_inventory }}"
|
||||||
|
ipareplica_dirsrv_cert_files: [ "/root/server.p12" ]
|
||||||
|
ipareplica_dirsrv_pin: ""
|
||||||
|
ipareplica_firewalld_zone: public
|
||||||
|
ipareplica_http_cert_name: "{{ ansible_inventory }}"
|
||||||
|
ipareplica_http_cert_files: [ "/root/server.p12" ]
|
||||||
|
ipareplica_http_pin: ""
|
||||||
|
ipareplica_no_pkinit: true
|
||||||
|
ipareplica_setup_dns: false
|
||||||
|
|
||||||
|
- name: FreeIPA | AUDIT | Check current authselect configuration
|
||||||
|
command: authselect current
|
||||||
|
register: freeipa_authselect_status
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: FreeIPA | PATCH | Apply authselect profile with sssd, sudo, and mkhomedir if not set
|
||||||
|
command: authselect select sssd with-sudo with-mkhomedir
|
||||||
|
when: >
|
||||||
|
'Profile ID: sssd' not in freeipa_authselect_status.stdout or
|
||||||
|
'with-sudo' not in freeipa_authselect_status.stdout or
|
||||||
|
'with-mkhomedir' not in freeipa_authselect_status.stdout
|
||||||
3
roles/podman_host/defaults/main.yml
Normal file
3
roles/podman_host/defaults/main.yml
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
---
|
||||||
|
podman_host_minimum_unpriv_port: "1024"
|
||||||
|
podman_host_rootless_users: ["podman"]
|
||||||
41
roles/podman_host/tasks/check_subid.yml
Normal file
41
roles/podman_host/tasks/check_subid.yml
Normal file
|
|
@ -0,0 +1,41 @@
|
||||||
|
---
|
||||||
|
- name: Podman Host | AUDIT | Gather rootless user facts
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ _podman_host_rootless_user }}"
|
||||||
|
register: _podman_host_rootless_user_facts
|
||||||
|
|
||||||
|
- name: Podman Host | AUDIT | Resolve name of user's primary group
|
||||||
|
ansible.builtin.getent:
|
||||||
|
database: group
|
||||||
|
key: "{{ _podman_host_rootless_user_facts.group }}"
|
||||||
|
register: _podman_host_rootless_user_group
|
||||||
|
|
||||||
|
- name: Podman Host | AUDIT | Check if user is in subuid file
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/subuid
|
||||||
|
regexp: '^{{ _podman_host_rootless_user }}:.*$'
|
||||||
|
state: absent
|
||||||
|
register: uid_line_found
|
||||||
|
check_mode: yes
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Podman Host | AUDIT | Check if group is in subgid file
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/subgid
|
||||||
|
regexp: '^{{ _podman_host_rootless_user_group.ansible_facts.getent_group | first }}:.*$'
|
||||||
|
state: absent
|
||||||
|
register: gid_line_found
|
||||||
|
check_mode: yes
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Podman Host | AUDIT | Assert that user is in subuid file exactly once
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- uid_line_found.found == 1
|
||||||
|
|
||||||
|
- name: Podman Host | AUDIT | Assert that group is in subgid file exactly once
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- gid_line_found.found == 1
|
||||||
75
roles/podman_host/tasks/main.yml
Normal file
75
roles/podman_host/tasks/main.yml
Normal file
|
|
@ -0,0 +1,75 @@
|
||||||
|
---
|
||||||
|
- name: Podman Host | PRELIM | Ensure the rootless users are defined and are not root
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- podman_host_rootless_users | length > 0
|
||||||
|
- '"root" not in podman_host_rootless_users'
|
||||||
|
|
||||||
|
- name: Podman Host | AUDIT | Ensure that subuid and subgid are defined for the users
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: check_subid.yml
|
||||||
|
vars:
|
||||||
|
_podman_host_rootless_user: "{{ item }}"
|
||||||
|
with_items: "{{ podman_host_rootless_users }}"
|
||||||
|
|
||||||
|
- name: Podman Host | PATCH | Set unprivileged port minimum
|
||||||
|
ansible.posix.sysctl:
|
||||||
|
name: net.ipv4.ip_unprivileged_port_start
|
||||||
|
value: "{{ podman_host_minimum_unpriv_port }}"
|
||||||
|
sysctl_set: true
|
||||||
|
sysctl_file: /etc/sysctl.d/zzz-podman-unpriv-port.conf
|
||||||
|
reload: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Podman Host | PATCH | Create users for rootless podman
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ item }}"
|
||||||
|
become: true
|
||||||
|
with_items: "{{ podman_host_rootless_users }}"
|
||||||
|
|
||||||
|
- name: Podman Host | PATCH | Set XDG_RUNTIME_DIR in .profile for rootless users
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: "/home/{{ item }}/.bash_profile"
|
||||||
|
line: "export XDG_RUNTIME_DIR=/run/user/$(id -u)"
|
||||||
|
create: false
|
||||||
|
become: true
|
||||||
|
become_user: "{{ item }}"
|
||||||
|
with_items: "{{ podman_host_rootless_users }}"
|
||||||
|
|
||||||
|
- name: Podman Host | PATCH | Enable linger for rootless users
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- /usr/bin/loginctl
|
||||||
|
- enable-linger
|
||||||
|
- "{{ item }}"
|
||||||
|
creates: "/var/lib/systemd/linger/{{ item }}"
|
||||||
|
become: true
|
||||||
|
with_items: "{{ podman_host_rootless_users }}"
|
||||||
|
|
||||||
|
- name: Podman Host | PATCH | Install Podman
|
||||||
|
ansible.builtin.dnf:
|
||||||
|
name:
|
||||||
|
- podman
|
||||||
|
- container-selinux
|
||||||
|
state: latest
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Podman Host | PATCH | Create users quadlets directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/home/{{ item }}/.config/containers/systemd"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ item }}"
|
||||||
|
group: "{{ item }}"
|
||||||
|
mode: "0700"
|
||||||
|
with_items: "{{ podman_host_rootless_users }}"
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Podman Host | PATCH | Enable podman auto update timer for users
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: podman-auto-update.timer
|
||||||
|
scope: user
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ item }}"
|
||||||
|
with_items: "{{ podman_host_rootless_users }}"
|
||||||
4
roles/podman_host/templates/subXid.j2
Normal file
4
roles/podman_host/templates/subXid.j2
Normal file
|
|
@ -0,0 +1,4 @@
|
||||||
|
# {{ ansible_managed }}
|
||||||
|
{% for username in podman_host_rootless_users %}
|
||||||
|
{{ username }}:{{ 100000 + ((loop.index - 1) * 65536) }}:65536
|
||||||
|
{% endfor %}
|
||||||
17
roles/podman_keycloak/defaults/main.yml
Normal file
17
roles/podman_keycloak/defaults/main.yml
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
podman_keycloak_certbot_testing: false
|
||||||
|
podman_keycloak_enable_ldap: true
|
||||||
|
# podman_keycloak_keycloak_admin_password:
|
||||||
|
podman_keycloak_keycloak_admin_username: admin
|
||||||
|
podman_keycloak_keycloak_hostname: "{{ inventory_hostname }}"
|
||||||
|
podman_keycloak_keycloak_providers: []
|
||||||
|
# - url: https://github.com/jacekkow/keycloak-protocol-cas/releases/download/26.4.1/keycloak-protocol-cas-26.4.1.jar
|
||||||
|
# sha256: 7692526943063434443411b2d0fac63fb4e46f89b20fb07bb45c360916407367
|
||||||
|
# podman_keycloak_ldap_administrator_password:
|
||||||
|
# podman_keycloak_ldap_directory_manager_password:
|
||||||
|
# podman_keycloak_ldap_database_suffix_dn:
|
||||||
|
podman_keycloak_podman_rootless_user: keycloak
|
||||||
|
podman_keycloak_postgres_keycloak_database: keycloak
|
||||||
|
# podman_keycloak_postgres_keycloak_password:
|
||||||
|
podman_keycloak_postgres_keycloak_username: keycloak
|
||||||
|
podman_keycloak_keycloak_additional_volumes: []
|
||||||
27
roles/podman_keycloak/handlers/main.yml
Normal file
27
roles/podman_keycloak/handlers/main.yml
Normal file
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
- name: Restart ldap
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: ldap
|
||||||
|
state: restarted
|
||||||
|
scope: user
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Restart postgres
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: postgres
|
||||||
|
state: restarted
|
||||||
|
scope: user
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Restart keycloak
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: keycloak
|
||||||
|
state: restarted
|
||||||
|
scope: user
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
115
roles/podman_keycloak/tasks/ldap.yml
Normal file
115
roles/podman_keycloak/tasks/ldap.yml
Normal file
|
|
@ -0,0 +1,115 @@
|
||||||
|
---
|
||||||
|
- name: wait 30 seconds for ldap server to start
|
||||||
|
ansible.builtin.pause:
|
||||||
|
seconds: 30
|
||||||
|
|
||||||
|
- name: create ldap suffix
|
||||||
|
containers.podman.podman_container_exec:
|
||||||
|
name: ldap
|
||||||
|
argv:
|
||||||
|
- dsconf
|
||||||
|
- -v
|
||||||
|
- localhost
|
||||||
|
- backend
|
||||||
|
- create
|
||||||
|
- --suffix
|
||||||
|
- "{{ podman_keycloak_ldap_database_suffix_dn }}"
|
||||||
|
- --be-name
|
||||||
|
- "{{ podman_keycloak_ldap_database_backend_name }}"
|
||||||
|
- --create-suffix
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
register: podman_keycloak_create_suffix
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
tags:
|
||||||
|
- ldap
|
||||||
|
|
||||||
|
- name: create suffix result (only when changed)
|
||||||
|
debug:
|
||||||
|
msg: "Suffix was created"
|
||||||
|
when: not podman_keycloak_create_suffix.failed
|
||||||
|
changed_when: not podman_keycloak_create_suffix.failed
|
||||||
|
|
||||||
|
- name: ldap organisational units
|
||||||
|
community.general.ldap_entry:
|
||||||
|
dn: "ou={{ item }},{{ podman_keycloak_ldap_database_suffix_dn }}"
|
||||||
|
objectClass:
|
||||||
|
- top
|
||||||
|
- organizationalUnit
|
||||||
|
server_uri: ldaps://{{ inventory_hostname }}/
|
||||||
|
bind_dn: "cn=Directory Manager"
|
||||||
|
bind_pw: "{{ podman_keycloak_ldap_directory_manager_password }}"
|
||||||
|
delegate_to: localhost
|
||||||
|
with_items:
|
||||||
|
- Administrators
|
||||||
|
- People
|
||||||
|
- Groups
|
||||||
|
environment:
|
||||||
|
- LDAPTLS_REQCERT: "{% if podman_keycloak_certbot_testing %}never{% else %}always{% endif %}"
|
||||||
|
tags: ldap
|
||||||
|
|
||||||
|
- name: enable memberOf plugin
|
||||||
|
containers.podman.podman_container_exec:
|
||||||
|
name: ldap
|
||||||
|
argv:
|
||||||
|
- dsconf
|
||||||
|
- -v
|
||||||
|
- localhost
|
||||||
|
- -D "cn=Directory Manager"
|
||||||
|
- plugin
|
||||||
|
- memberof
|
||||||
|
- enable
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
tags:
|
||||||
|
- ldap
|
||||||
|
|
||||||
|
- name: disable anonymous bind
|
||||||
|
containers.podman.podman_container_exec:
|
||||||
|
name: ldap
|
||||||
|
argv:
|
||||||
|
- dsconf
|
||||||
|
- -v
|
||||||
|
- localhost
|
||||||
|
- -D "cn=Directory Manager"
|
||||||
|
- config
|
||||||
|
- replace
|
||||||
|
- nsslapd-allow-anonymous-access=off
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
tags:
|
||||||
|
- ldap
|
||||||
|
|
||||||
|
- name: ldap read-only administrator
|
||||||
|
community.general.ldap_entry:
|
||||||
|
dn: "uid=admin,ou=Administrators,{{ podman_keycloak_ldap_database_suffix_dn }}"
|
||||||
|
objectClass:
|
||||||
|
- top
|
||||||
|
- person
|
||||||
|
- organizationalPerson
|
||||||
|
- inetOrgPerson
|
||||||
|
attributes:
|
||||||
|
cn: admin
|
||||||
|
sn: admin
|
||||||
|
userPassword: "{{ podman_keycloak_ldap_administrator_password }}"
|
||||||
|
server_uri: ldaps://{{ inventory_hostname }}/
|
||||||
|
bind_dn: "cn=Directory Manager"
|
||||||
|
bind_pw: "{{ podman_keycloak_ldap_directory_manager_password }}"
|
||||||
|
delegate_to: localhost
|
||||||
|
environment:
|
||||||
|
- LDAPTLS_REQCERT: "{% if podman_keycloak_certbot_testing %}never{% else %}always{% endif %}"
|
||||||
|
tags: ldap
|
||||||
|
|
||||||
|
- name: ldap access control information
|
||||||
|
community.general.ldap_attrs:
|
||||||
|
dn: "{{ podman_keycloak_ldap_database_suffix_dn }}"
|
||||||
|
attributes:
|
||||||
|
aci: '(target="ldap:///{{ podman_keycloak_ldap_database_suffix_dn }}")(targetattr="*") (version 3.0; acl "readonly"; allow (search,read,compare) userdn="ldap:///uid=admin,ou=Administrators,{{ podman_keycloak_ldap_database_suffix_dn }}";)'
|
||||||
|
server_uri: ldaps://{{ inventory_hostname }}/
|
||||||
|
bind_dn: "cn=Directory Manager"
|
||||||
|
bind_pw: "{{ podman_keycloak_ldap_directory_manager_password }}"
|
||||||
|
delegate_to: localhost
|
||||||
|
environment:
|
||||||
|
- LDAPTLS_REQCERT: "{% if podman_keycloak_certbot_testing %}never{% else %}always{% endif %}"
|
||||||
|
tags: ldap
|
||||||
160
roles/podman_keycloak/tasks/main.yml
Normal file
160
roles/podman_keycloak/tasks/main.yml
Normal file
|
|
@ -0,0 +1,160 @@
|
||||||
|
---
|
||||||
|
- name: Podman Keycloak | PATCH | Install podman and create rootless podman user
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
role: sr2c.core.podman_host
|
||||||
|
vars:
|
||||||
|
podman_host_minimum_unpriv_port: 80
|
||||||
|
podman_host_rootless_users: ["keycloak"]
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Enable http service with firewalld
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: http
|
||||||
|
state: enabled
|
||||||
|
immediate: true
|
||||||
|
permanent: true
|
||||||
|
zone: public
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Enable https service with firewalld
|
||||||
|
ansible.posix.firewalld:
|
||||||
|
service: https
|
||||||
|
state: enabled
|
||||||
|
immediate: true
|
||||||
|
permanent: true
|
||||||
|
zone: public
|
||||||
|
|
||||||
|
# TODO: These will be relabelled by podman but in the future we should label them from the start
|
||||||
|
- name: Podman Keycloak | PATCH | Create service configuration directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/home/{{ podman_keycloak_podman_rootless_user }}/{{ item }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
group: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
become: true
|
||||||
|
with_items:
|
||||||
|
- keycloak
|
||||||
|
- ldap
|
||||||
|
- postgres
|
||||||
|
when: (item != 'ldap') or podman_keycloak_enable_ldap
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Download keycloak providers
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "{{ item.url }}"
|
||||||
|
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/keycloak/{{ item.url | basename }}"
|
||||||
|
checksum: "sha256:{{ item.sha256 }}"
|
||||||
|
with_items: "{{ podman_keycloak_keycloak_providers }}"
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
notify: restart keycloak
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Install systemd target
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "keycloak.target"
|
||||||
|
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/.config/systemd/user/keycloak.target"
|
||||||
|
owner: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
mode: "0400"
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Install systemd slice
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "keycloak.slice"
|
||||||
|
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/.config/systemd/user/keycloak.slice"
|
||||||
|
owner: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
mode: "0400"
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Install container quadlets
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
|
||||||
|
owner: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
mode: "0400"
|
||||||
|
with_items:
|
||||||
|
- ldap.container
|
||||||
|
- keycloak.container
|
||||||
|
- postgres.container
|
||||||
|
when: (item != 'ldap.container') or podman_keycloak_enable_ldap
|
||||||
|
notify:
|
||||||
|
- "Restart {{ item | split('.') | first }}"
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Install network quadlets
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
|
||||||
|
owner: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
mode: "0400"
|
||||||
|
with_items:
|
||||||
|
- frontend.network
|
||||||
|
- ldap.network
|
||||||
|
- keycloak.network
|
||||||
|
when: (item != 'ldap.network') or podman_keycloak_enable_ldap
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Podman Keycloak | AUDIT | Verify quadlets are correctly defined
|
||||||
|
ansible.builtin.command: /usr/libexec/podman/quadlet -dryrun -user
|
||||||
|
register: podman_keycloak_quadlet_result
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Podman Keycloak | AUDIT | Assert that the quadlet verification succeeded
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- podman_keycloak_quadlet_result.rc == 0
|
||||||
|
fail_msg: "'/usr/libexec/podman/quadlet -dryrun -user' failed! Output withheld to prevent leaking secrets."
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Start PostgreSQL and keycloak containers
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: started
|
||||||
|
scope: user
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
with_items:
|
||||||
|
- postgres
|
||||||
|
- keycloak
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Configure nginx container
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: sr2c.core.podman_nginx
|
||||||
|
vars:
|
||||||
|
podman_nginx_podman_rootless_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
podman_nginx_primary_hostname: "{{ podman_keycloak_keycloak_hostname }}"
|
||||||
|
podman_nginx_frontend_network: frontend
|
||||||
|
podman_nginx_systemd_service_slice: keycloak.slice
|
||||||
|
podman_nginx_systemd_service_target: keycloak.target
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Start LDAP container
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: ldap
|
||||||
|
state: started
|
||||||
|
scope: user
|
||||||
|
when: podman_keycloak_enable_ldap
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Create nginx configuration file
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: nginx.conf
|
||||||
|
dest: "/home/{{ podman_keycloak_podman_rootless_user }}/nginx/nginx.conf"
|
||||||
|
owner: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
group: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
|
mode: "0644"
|
||||||
|
become: true
|
||||||
|
notify: restart nginx
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Configure the LDAP directory
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: ldap.yml
|
||||||
|
when: podman_keycloak_enable_ldap
|
||||||
|
|
||||||
|
- name: Podman Keycloak | PATCH | Enable keycloak.target
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: keycloak.target
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
scope: user
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_keycloak_podman_rootless_user }}"
|
||||||
5
roles/podman_keycloak/templates/frontend.network
Normal file
5
roles/podman_keycloak/templates/frontend.network
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
[Network]
|
||||||
|
Driver=bridge
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=keycloak.target
|
||||||
40
roles/podman_keycloak/templates/keycloak.container
Normal file
40
roles/podman_keycloak/templates/keycloak.container
Normal file
|
|
@ -0,0 +1,40 @@
|
||||||
|
[Unit]
|
||||||
|
Requires=postgres.service
|
||||||
|
After=postgres.service
|
||||||
|
PartOf=keycloak.target
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
AutoUpdate=registry
|
||||||
|
ContainerName=keycloak
|
||||||
|
Environment=KC_LOG_LEVEL=info
|
||||||
|
Environment=KC_DB=postgres
|
||||||
|
Environment=KC_DB_PASSWORD={{ podman_keycloak_postgres_keycloak_password }}
|
||||||
|
Environment=KC_DB_URL=jdbc:postgresql://postgres/{{ podman_keycloak_postgres_keycloak_database }}
|
||||||
|
Environment=KC_DB_USERNAME={{ podman_keycloak_postgres_keycloak_username }}
|
||||||
|
Environment=KC_HOSTNAME={{ podman_keycloak_keycloak_hostname }}
|
||||||
|
Environment=KC_HTTP_ENABLED=true
|
||||||
|
Environment=KC_HTTP_PORT=8080
|
||||||
|
Environment=KC_PROXY_HEADERS=xforwarded
|
||||||
|
Environment=KC_BOOTSTRAP_ADMIN_USERNAME={{ podman_keycloak_keycloak_admin_username }}
|
||||||
|
Environment=KC_BOOTSTRAP_ADMIN_PASSWORD={{ podman_keycloak_keycloak_admin_password }}
|
||||||
|
Environment=PROXY_ADDRESS_FORWARDING=true
|
||||||
|
Exec=start --features=quick-theme
|
||||||
|
Image=quay.io/keycloak/keycloak:26.4
|
||||||
|
Network=keycloak.network
|
||||||
|
{% if podman_keycloak_enable_ldap %}
|
||||||
|
Network=ldap.network
|
||||||
|
{% endif %}
|
||||||
|
Network=frontend.network
|
||||||
|
{% for provider in podman_keycloak_keycloak_providers %}
|
||||||
|
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/keycloak/{{ provider.url | basename }}:/opt/keycloak/providers/{{ provider.url | basename }}:ro,z
|
||||||
|
{% endfor %}
|
||||||
|
{% for item in podman_keycloak_keycloak_additional_volumes %}
|
||||||
|
Volume={{ item.src }}:{{ item.dest }}:{{ item.options }}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Slice=keycloak.slice
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=keycloak.target
|
||||||
5
roles/podman_keycloak/templates/keycloak.network
Normal file
5
roles/podman_keycloak/templates/keycloak.network
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
[Network]
|
||||||
|
Driver=bridge
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=keycloak.target
|
||||||
2
roles/podman_keycloak/templates/keycloak.slice
Normal file
2
roles/podman_keycloak/templates/keycloak.slice
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Podman Keycloak Stack by SR2 Communications
|
||||||
10
roles/podman_keycloak/templates/keycloak.target
Normal file
10
roles/podman_keycloak/templates/keycloak.target
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Podman Keycloak Stack by SR2 Communications
|
||||||
|
Requires=keycloak.service
|
||||||
|
{% if podman_keycloak_enable_ldap %}
|
||||||
|
Requires=ldap.service
|
||||||
|
{% endif %}
|
||||||
|
Requires=nginx.service
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
22
roles/podman_keycloak/templates/ldap.container
Normal file
22
roles/podman_keycloak/templates/ldap.container
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
[Unit]
|
||||||
|
PartOf=keycloak.target
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
ContainerName=ldap
|
||||||
|
Environment=DS_DM_PASSWORD={{ podman_keycloak_ldap_directory_manager_password }}
|
||||||
|
Image=quay.io/389ds/dirsrv:latest
|
||||||
|
Network=ldap.network
|
||||||
|
PublishPort=636:3636/tcp
|
||||||
|
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/ldap:/data:rw,Z
|
||||||
|
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/certbot/conf/live/{{ podman_keycloak_keycloak_hostname }}/privkey.pem:/data/tls/server.key:ro,z
|
||||||
|
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/certbot/conf/live/{{ podman_keycloak_keycloak_hostname }}/cert.pem:/data/tls/server.crt:ro,z
|
||||||
|
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/certbot/conf/live/{{ podman_keycloak_keycloak_hostname }}/chain.pem:/data/tls/ca/chain.crt:ro,z
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Slice=keycloak.slice
|
||||||
|
Restart=always
|
||||||
|
# RuntimeMaxSec is used to restart the service periodically to pick up new Let's Encrypt certificates
|
||||||
|
RuntimeMaxSec=604800
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=keycloak.target
|
||||||
5
roles/podman_keycloak/templates/ldap.network
Normal file
5
roles/podman_keycloak/templates/ldap.network
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
[Network]
|
||||||
|
Driver=bridge
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=keycloak.target
|
||||||
39
roles/podman_keycloak/templates/nginx.conf
Normal file
39
roles/podman_keycloak/templates/nginx.conf
Normal file
|
|
@ -0,0 +1,39 @@
|
||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
|
||||||
|
server_name {{ podman_keycloak_keycloak_hostname }};
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
root /var/www/certbot;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
return 301 https://{{ podman_keycloak_keycloak_hostname }}$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 default_server ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
http2 on;
|
||||||
|
|
||||||
|
server_name {{ podman_keycloak_keycloak_hostname }};
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/{{ podman_keycloak_keycloak_hostname }}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/{{ podman_keycloak_keycloak_hostname }}/privkey.pem;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://keycloak:8080/;
|
||||||
|
proxy_redirect off;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-Port 443;
|
||||||
|
}
|
||||||
|
}
|
||||||
21
roles/podman_keycloak/templates/postgres.container
Normal file
21
roles/podman_keycloak/templates/postgres.container
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
[Unit]
|
||||||
|
PartOf=keycloak.target
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
AutoUpdate=registry
|
||||||
|
ContainerName=postgres
|
||||||
|
Environment=POSTGRES_DB={{ podman_keycloak_postgres_keycloak_database }}
|
||||||
|
Environment=POSTGRES_PASSWORD={{ podman_keycloak_postgres_keycloak_password }}
|
||||||
|
Environment=POSTGRES_USER={{ podman_keycloak_postgres_keycloak_username }}
|
||||||
|
Environment=POSTGRES_HOST_AUTH_METHOD=scram-sha-256
|
||||||
|
Environment=POSTGRES_INITDB_ARGS=--auth-host=scram-sha-256
|
||||||
|
Image=docker.io/postgres:17.3
|
||||||
|
Network=keycloak.network
|
||||||
|
Volume=/home/{{ podman_keycloak_podman_rootless_user }}/postgres:/var/lib/postgresql/data:rw,Z
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Slice=keycloak.slice
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=keycloak.target
|
||||||
10
roles/podman_nginx/defaults/main.yml
Normal file
10
roles/podman_nginx/defaults/main.yml
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
---
|
||||||
|
podman_nginx_additional_hostnames: []
|
||||||
|
podman_nginx_certbot_testing: false
|
||||||
|
# podman_nginx_frontend_network:
|
||||||
|
podman_nginx_podman_rootless_user: nginx
|
||||||
|
# podman_nginx_primary_hostname:
|
||||||
|
# podman_nginx_systemd_service_slice:
|
||||||
|
# podman_nginx_systemd_service_target:
|
||||||
|
podman_nginx_systemd_service_requires: []
|
||||||
|
podman_nginx_additional_volumes: []
|
||||||
18
roles/podman_nginx/handlers/main.yml
Normal file
18
roles/podman_nginx/handlers/main.yml
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
- name: Restart certbot-renew
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: certbot-renew
|
||||||
|
state: started
|
||||||
|
scope: user
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Restart nginx
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: nginx
|
||||||
|
state: restarted
|
||||||
|
scope: user
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
111
roles/podman_nginx/tasks/main.yml
Normal file
111
roles/podman_nginx/tasks/main.yml
Normal file
|
|
@ -0,0 +1,111 @@
|
||||||
|
---
|
||||||
|
- name: Podman Nginx | PATCH | Create service configuration directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/home/{{ podman_nginx_podman_rootless_user }}/{{ item }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
group: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
become: true
|
||||||
|
with_items:
|
||||||
|
- .config/systemd/user
|
||||||
|
- certbot/conf
|
||||||
|
- certbot/www
|
||||||
|
- nginx
|
||||||
|
|
||||||
|
- name: Podman Nginx | PATCH | Install podman quadlet for rootless podman user
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "/home/{{ podman_nginx_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
|
||||||
|
owner: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
mode: "0400"
|
||||||
|
with_items:
|
||||||
|
- certbot-renew.container
|
||||||
|
- nginx.container
|
||||||
|
notify:
|
||||||
|
- "Restart {{ item | split('.') | first }}"
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Podman Nginx | PATCH | Install certbot renewal timer for rootless podman user
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "certbot-renew.timer"
|
||||||
|
dest: "/home/{{ podman_nginx_podman_rootless_user }}/.config/systemd/user/certbot-renew.timer"
|
||||||
|
owner: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
mode: "0400"
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Podman Nginx | AUDIT | Verify quadlets are correctly defined
|
||||||
|
ansible.builtin.command: /usr/libexec/podman/quadlet -dryrun -user
|
||||||
|
register: podman_nginx_quadlet_result
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Podman Nginx | AUDIT | Check if certificate exists
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: "/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/live/{{ podman_nginx_primary_hostname }}/fullchain.pem"
|
||||||
|
register: podman_nginx_cert_stat
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Podman Nginx | PATCH | Create temporary nginx configuration (no https)
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: nginx.conf
|
||||||
|
dest: "/home/{{ podman_nginx_podman_rootless_user }}/nginx/nginx.conf"
|
||||||
|
owner: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
group: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
mode: "0644"
|
||||||
|
become: true
|
||||||
|
when: not podman_nginx_cert_stat.stat.exists
|
||||||
|
|
||||||
|
- name: Podman Nginx | PATCH | Start nginx
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: nginx
|
||||||
|
state: started
|
||||||
|
scope: user
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Podman Nginx | PATCH | Run certbot container to create certificate
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
podman run --name certbot-generate
|
||||||
|
--rm
|
||||||
|
--volume /home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot:rw,z
|
||||||
|
--volume /home/{{ podman_nginx_podman_rootless_user }}/certbot/conf:/etc/letsencrypt:rw,z
|
||||||
|
docker.io/certbot/certbot:latest
|
||||||
|
certonly
|
||||||
|
--register-unsafely-without-email
|
||||||
|
--agree-tos
|
||||||
|
--webroot
|
||||||
|
--webroot-path /var/www/certbot/
|
||||||
|
-d "{{ podman_nginx_primary_hostname }}"
|
||||||
|
{% for hostname in podman_nginx_additional_hostnames %} -d "{{ hostname }}"{% endfor %}
|
||||||
|
{% if podman_nginx_certbot_testing %} --test-cert{% endif %}
|
||||||
|
when: not podman_nginx_cert_stat.stat.exists
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Podman Nginx | AUDIT | Check if certificate exists
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: "/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/live/{{ podman_nginx_primary_hostname }}/fullchain.pem"
|
||||||
|
register: podman_nginx_cert_stat
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
|
|
||||||
|
- name: Podman Nginx | AUDIT | Assert that certificate exists now
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- podman_nginx_cert_stat.stat.exists
|
||||||
|
fail_msg: "Failed to get a Lets Encrypt certificate."
|
||||||
|
|
||||||
|
- name: Podman Nginx | PATCH | Start certbot renewal timer
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: "certbot-renew.timer"
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
scope: user
|
||||||
|
become: true
|
||||||
|
become_user: "{{ podman_nginx_podman_rootless_user }}"
|
||||||
13
roles/podman_nginx/templates/certbot-renew.container
Normal file
13
roles/podman_nginx/templates/certbot-renew.container
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Run certbot renew
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
AutoUpdate=registry
|
||||||
|
ContainerName=certbot-renew
|
||||||
|
Exec=renew
|
||||||
|
Image=docker.io/certbot/certbot:latest
|
||||||
|
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot:z
|
||||||
|
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf:/etc/letsencrypt:z
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=no
|
||||||
9
roles/podman_nginx/templates/certbot-renew.timer
Normal file
9
roles/podman_nginx/templates/certbot-renew.timer
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
[Unit]
|
||||||
|
Description=Timer for certbot renewals
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnCalendar=daily
|
||||||
|
Persistent=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
||||||
17
roles/podman_nginx/templates/nginx.conf
Normal file
17
roles/podman_nginx/templates/nginx.conf
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
|
||||||
|
server_name {{ podman_nginx_primary_hostname }};
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
root /var/www/certbot;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
return 301 https://{{ podman_nginx_primary_hostname }}$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
34
roles/podman_nginx/templates/nginx.container
Normal file
34
roles/podman_nginx/templates/nginx.container
Normal file
|
|
@ -0,0 +1,34 @@
|
||||||
|
[Unit]
|
||||||
|
{% for req in podman_nginx_systemd_service_requires %}
|
||||||
|
Requires={{ req }}.service
|
||||||
|
After={{ req }}.service
|
||||||
|
{% endfor %}
|
||||||
|
{% if podman_nginx_systemd_service_target is defined %}
|
||||||
|
PartOf={{ podman_nginx_systemd_service_target }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
ContainerName=nginx
|
||||||
|
Image=docker.io/nginx:1
|
||||||
|
{% if podman_nginx_frontend_network is defined %}Network={{ podman_nginx_frontend_network }}.network{% endif +%}
|
||||||
|
PublishPort=80:80
|
||||||
|
PublishPort=443:443
|
||||||
|
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot/:ro,z
|
||||||
|
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/:/etc/letsencrypt/:ro,z
|
||||||
|
Volume=/home/{{ podman_nginx_podman_rootless_user }}/nginx:/etc/nginx/conf.d/:ro,z
|
||||||
|
|
||||||
|
{% for item in podman_nginx_additional_volumes %}
|
||||||
|
Volume={{ item.src }}:{{ item.dest }}:{{ item.options }}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
RuntimeMaxSec=604800
|
||||||
|
Restart=always
|
||||||
|
{% if podman_nginx_systemd_service_slice is defined %}
|
||||||
|
Slice={{ podman_nginx_systemd_service_slice }}
|
||||||
|
{% endif %}
|
||||||
|
{% if podman_nginx_systemd_service_target is defined %}
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
|
{% endif %}
|
||||||
Loading…
Add table
Add a link
Reference in a new issue