diff --git a/playbooks/core_services.yml b/playbooks/services.yml similarity index 57% rename from playbooks/core_services.yml rename to playbooks/services.yml index b579d96..c06a62f 100644 --- a/playbooks/core_services.yml +++ b/playbooks/services.yml @@ -2,7 +2,6 @@ - name: Deploy and update the FreeIPA servers hosts: - ipaservers - become: true # Required by FreeIPA roles vars: # Required for FreeIPA setup baseline_epel_packages_allowed: @@ -16,6 +15,7 @@ - python3-acme - python3-zipp - python3-pyOpenSSL + - node-exporter rhel9cis_dns_server: true rhel9cis_httpd_server: true # TODO: Restricted umask breaks FreeIPA roles @@ -23,9 +23,14 @@ rhel9cis_rule_5_4_3_3: false roles: - role: sr2c.core.baseline + baseline_epel_packages_allowed: + - node-exporter tags: bootstrap - role: sr2c.core.freeipa + become: true tags: freeipa + - role: sr2c.core.node_exporter + tags: prometheus - name: Deploy and update the Keycloak server hosts: @@ -33,16 +38,42 @@ become: true roles: - role: sr2c.core.baseline + baseline_epel_packages_allowed: + - node-exporter tags: bootstrap - role: freeipa.ansible_freeipa.ipaclient state: present tags: bootstrap - role: sr2c.core.podman_keycloak tags: keycloak + - role: sr2c.core.node_exporter + tags: prometheus + +- name: Deploy and update the Prometheus server + hosts: + - prometheus + roles: + - role: sr2c.core.baseline + vars: + baseline_epel_packages_allowed: + - node-exporter + tags: bootstrap + - role: freeipa.ansible_freeipa.ipaclient + become: true + state: present + tags: bootstrap + - role: sr2c.core.node_exporter + tags: prometheus + - role: sr2c.core.podman_prometheus + tags: prometheus - name: Baseline for generic servers (manual or externally managed application deployment) hosts: - generic roles: - role: sr2c.core.baseline + baseline_epel_packages_allowed: + - node-exporter tags: bootstrap + - role: sr2c.core.node_exporter + tags: prometheus diff --git a/plugins/modules/cloudns_monitor.py b/plugins/modules/cloudns_monitor.py index 6761a4c..49e3170 100644 --- a/plugins/modules/cloudns_monitor.py +++ b/plugins/modules/cloudns_monitor.py @@ -214,7 +214,7 @@ def run_module(): r.raise_for_status() for email in notifications_to_remove: - url = "https://api.cloudns.net/monitoring/delete-notifications.json" + url = "https://api.cloudns.net/monitoring/delete-notification.json" params = { 'auth-id': module.params['auth_id'], 'auth-password': module.params['auth_password'], diff --git a/roles/baseline/handlers/main.yml b/roles/baseline/handlers/main.yml index 894bba4..032d30f 100644 --- a/roles/baseline/handlers/main.yml +++ b/roles/baseline/handlers/main.yml @@ -10,3 +10,9 @@ state: restarted daemon_reload: true become: true + +- name: Reload firewalld + ansible.builtin.systemd_service: + name: firewalld + state: reloaded + become: true diff --git a/roles/baseline/tasks/lockdown.yml b/roles/baseline/tasks/lockdown.yml index 7180720..e47a3b1 100644 --- a/roles/baseline/tasks/lockdown.yml +++ b/roles/baseline/tasks/lockdown.yml @@ -32,4 +32,5 @@ # ipaservers are part of Linux Identity Management. Joining your host to an IdM # domain automatically configures SSSD authentication on your host. rhel9cis_allow_authselect_updates: false + rhel9cis_auditd_max_log_file_action: rotate when: (ansible_distribution == "Rocky") and (ansible_distribution_major_version == "9") diff --git a/roles/node_exporter/defaults/main.yml b/roles/node_exporter/defaults/main.yml new file mode 100644 index 0000000..b5364e3 --- /dev/null +++ b/roles/node_exporter/defaults/main.yml @@ -0,0 +1,2 @@ +# node_exporter_password: +# node_exporter_textfile_directory: diff --git a/roles/node_exporter/handlers/main.yml b/roles/node_exporter/handlers/main.yml new file mode 100644 index 0000000..bd54ff5 --- /dev/null +++ b/roles/node_exporter/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart Node Exporter + ansible.builtin.systemd_service: + name: prometheus-node-exporter + state: restarted + become: true diff --git a/roles/node_exporter/tasks/main.yml b/roles/node_exporter/tasks/main.yml new file mode 100644 index 0000000..2278dc4 --- /dev/null +++ b/roles/node_exporter/tasks/main.yml @@ -0,0 +1,83 @@ +--- +- name: Node Exporter | PATCH | Install node-exporter + become: true + ansible.builtin.dnf: + name: node-exporter + state: present + +- name: Node Exporter | PATCH | Generate private TLS key + community.crypto.openssl_privatekey: + path: /etc/ssl/node-exporter.key + size: 4096 + owner: prometheus + group: root + mode: '0440' + become: true + +- name: Node Exporter | PATCH | Create certificate signing request + community.crypto.openssl_csr: + path: /etc/ssl/node-exporter.csr + privatekey_path: /etc/ssl/node-exporter.key + common_name: "{{ inventory_hostname }}" + subject_alt_name: "DNS:{{ inventory_hostname }}" + owner: root + group: root + mode: '0400' + become: true + +- name: Generate self-signed certificate + community.crypto.x509_certificate: + provider: selfsigned + path: /etc/ssl/node-exporter.crt + privatekey_path: /etc/ssl/node-exporter.key + csr_path: /etc/ssl/node-exporter.csr + owner: prometheus + group: root + mode: '0440' + become: true + +- name: Node Exporter | PATCH | Install node-exporter web configuration + become: true + ansible.builtin.template: + src: etc/node-exporter-web.yml + dest: /etc/node-exporter-web.yml + owner: root + group: root + mode: "0444" + +- name: Node Exporter | PATCH | Set command line arguments + become: true + ansible.builtin.lineinfile: + path: /etc/default/prometheus-node-exporter + regexp: "^ARGS" + line: "ARGS='--web.config.file=\"/etc/node-exporter-web.yml\"{% if node_exporter_textfile_directory is defined %} --collector.textfile.directory {{ node_exporter_textfile_directory }}{% endif %}'" + notify: Restart Node Exporter + +- name: Node Exporter | PATCH | Ensure node-exporter is enabled and running + become: true + ansible.builtin.systemd_service: + name: prometheus-node-exporter + masked: false + enabled: true + state: started + +- name: Node Exporter | PATCH | Create firewalld service file for node-exporter + become: true + ansible.builtin.template: + src: etc/firewalld/services/node-exporter.xml + dest: /etc/firewalld/services/node-exporter.xml + owner: root + group: root + mode: '0400' + notify: Reload firewalld + +- name: Node Exporter | Flush handlers + ansible.builtin.meta: flush_handlers + +- name: Node Exporter | PATCH | Enable node-exporter service in firewalld permanently + become: true + ansible.posix.firewalld: + service: node-exporter + permanent: true + state: enabled + immediate: true diff --git a/roles/node_exporter/templates/etc/firewalld/services/node-exporter.xml b/roles/node_exporter/templates/etc/firewalld/services/node-exporter.xml new file mode 100644 index 0000000..26852ff --- /dev/null +++ b/roles/node_exporter/templates/etc/firewalld/services/node-exporter.xml @@ -0,0 +1,6 @@ + + + Node Exporter + Exposes metrics for Prometheus scraping + + diff --git a/roles/node_exporter/templates/etc/node-exporter-web.yml b/roles/node_exporter/templates/etc/node-exporter-web.yml new file mode 100644 index 0000000..786c1ce --- /dev/null +++ b/roles/node_exporter/templates/etc/node-exporter-web.yml @@ -0,0 +1,17 @@ +--- +tls_server_config: + cert_file: /etc/ssl/node-exporter.crt + key_file: /etc/ssl/node-exporter.key + + min_version: "TLS13" + max_version: "TLS13" + +http_server_config: + headers: + X-Frame-Options: deny + X-Content-Type-Options: nosniff + +{% if node_exporter_password is defined %} +basic_auth_users: + metrics: "{{ node_exporter_password | password_hash(hashtype='bcrypt') }}" +{% endif %} diff --git a/roles/podman_host/handlers/main.yml b/roles/podman_host/handlers/main.yml new file mode 100644 index 0000000..77826fe --- /dev/null +++ b/roles/podman_host/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart systemd-logind + ansible.builtin.systemd_service: + name: systemd-logind + state: restarted + become: true diff --git a/roles/podman_host/tasks/main.yml b/roles/podman_host/tasks/main.yml index 96254ff..24d9e2e 100644 --- a/roles/podman_host/tasks/main.yml +++ b/roles/podman_host/tasks/main.yml @@ -37,6 +37,7 @@ become: true become_user: "{{ item }}" with_items: "{{ podman_host_rootless_users }}" + notify: Restart systemd-logind - name: Podman Host | PATCH | Set XDG_RUNTIME_DIR in .bashrc for rootless users ansible.builtin.lineinfile: @@ -45,6 +46,10 @@ become: true become_user: "{{ item }}" with_items: "{{ podman_host_rootless_users }}" + notify: Restart systemd-logind + +- name: Podman Host | Flush handlers + ansible.builtin.meta: flush_handlers - name: Podman Host | PATCH | Enable linger for rootless users ansible.builtin.command: diff --git a/roles/podman_nginx/defaults/main.yml b/roles/podman_nginx/defaults/main.yml index 75ea8c4..24bd328 100644 --- a/roles/podman_nginx/defaults/main.yml +++ b/roles/podman_nginx/defaults/main.yml @@ -1,5 +1,6 @@ --- podman_nginx_additional_hostnames: [] +podman_nginx_additional_publish_ports: [] podman_nginx_certbot_testing: false # podman_nginx_frontend_network: podman_nginx_podman_rootless_user: nginx diff --git a/roles/podman_nginx/templates/nginx.container b/roles/podman_nginx/templates/nginx.container index 03eb83b..eaf62c7 100644 --- a/roles/podman_nginx/templates/nginx.container +++ b/roles/podman_nginx/templates/nginx.container @@ -13,10 +13,13 @@ Image=docker.io/nginx:1 {% if podman_nginx_frontend_network is defined %}Network={{ podman_nginx_frontend_network }}.network{% endif +%} PublishPort=80:80 PublishPort=443:443 +{% for item in podman_nginx_additional_publish_ports %} +PublishPort={{ item }} +{% endfor %} + Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot/:ro,z Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/:/etc/letsencrypt/:ro,z Volume=/home/{{ podman_nginx_podman_rootless_user }}/nginx:/etc/nginx/conf.d/:ro,z - {% for item in podman_nginx_additional_volumes %} Volume={{ item.src }}:{{ item.dest }}:{{ item.options }} {% endfor %} diff --git a/roles/podman_prometheus/defaults/main.yml b/roles/podman_prometheus/defaults/main.yml new file mode 100644 index 0000000..607210c --- /dev/null +++ b/roles/podman_prometheus/defaults/main.yml @@ -0,0 +1,2 @@ +--- +podman_prometheus_podman_rootless_user: prometheus diff --git a/roles/podman_prometheus/handlers/main.yml b/roles/podman_prometheus/handlers/main.yml new file mode 100644 index 0000000..7165847 --- /dev/null +++ b/roles/podman_prometheus/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: Restart Prometheus + ansible.builtin.systemd_service: + name: prometheus + scope: user + state: restarted + become: true + become_user: "{{ podman_prometheus_podman_rootless_user }}" diff --git a/roles/podman_prometheus/tasks/main.yml b/roles/podman_prometheus/tasks/main.yml new file mode 100644 index 0000000..962b535 --- /dev/null +++ b/roles/podman_prometheus/tasks/main.yml @@ -0,0 +1,148 @@ +--- +- name: Podman Prometheus | PATCH | Install data plate + ansible.builtin.template: + src: etc/motd.d/10-data-plate.txt + dest: /etc/motd.d/10-data-plate.txt + owner: root + group: root + mode: "0444" + become: true + +- name: Podman Prometheus | PATCH | Install podman and verify rootless podman user + ansible.builtin.include_role: + role: sr2c.core.podman_host + vars: + podman_host_minimum_unpriv_port: 80 + podman_host_rootless_users: ["{{ podman_prometheus_podman_rootless_user }}"] + +- name: Podman Prometheus | AUDIT | Get subuid range for user + ansible.builtin.command: + cmd: "getsubids {{ podman_prometheus_podman_rootless_user }}" + register: _podman_prometheus_user_subuid + changed_when: false + +- name: Podman Prometheus | AUDIT | Get subgid range for user + ansible.builtin.command: + cmd: "getsubids -g {{ podman_prometheus_podman_rootless_user }}" + register: _podman_prometheus_user_subgid + changed_when: false + +- name: Podman Prometheus | AUDIT | Parse outputs of getsubids and store results + ansible.builtin.set_fact: + _podman_prometheus_user_subuid_start: "{{ (_podman_prometheus_user_subuid.stdout_lines[0].split()[2] | int) }}" + _podman_prometheus_user_subgid_start: "{{ (_podman_prometheus_user_subgid.stdout_lines[0].split()[2] | int) }}" + +# Prometheus runs with UID/GID 65534 inside the container +- name: Podman Prometheus | PATCH | Create data directory for Prometheus + ansible.builtin.file: + path: "/home/{{ podman_prometheus_podman_rootless_user }}/prometheus-data" + owner: "{{ _podman_prometheus_user_subuid_start + 65533 }}" + group: "{{ _podman_prometheus_user_subgid_start + 65533 }}" + mode: "0700" + state: "directory" + become: true + +# Prometheus runs with UID/GID 65534 inside the container +- name: Podman Prometheus | PATCH | Create service discovery directory for Prometheus + ansible.builtin.file: + path: "/home/{{ podman_prometheus_podman_rootless_user }}/file-configs" + owner: "{{ _podman_prometheus_user_subuid_start + 65533 }}" + group: "{{ _podman_prometheus_user_subgid_start + 65533 }}" + mode: "0700" + state: "directory" + become: true + +# Prometheus runs with UID/GID 65534 inside the container +- name: Podman Prometheus | PATCH | Install Prometheus configuration + ansible.builtin.template: + src: home/podman/prometheus.yml + dest: "/home/{{ podman_prometheus_podman_rootless_user }}/prometheus.yml" + mode: "0400" + owner: "{{ _podman_prometheus_user_subuid_start + 65533 }}" + group: "{{ _podman_prometheus_user_subgid_start + 65533 }}" + become: true + notify: + - Restart Prometheus + +- name: Podman Prometheus | PATCH | Install container quadlets + ansible.builtin.template: + src: "home/podman/config/containers/systemd/{{ item }}" + dest: "/home/{{ podman_prometheus_podman_rootless_user }}/.config/containers/systemd/{{ item }}" + owner: "{{ podman_prometheus_podman_rootless_user }}" + mode: "0400" + with_items: + - prometheus.container + become: true + notify: + - Restart Prometheus + +- name: Podman Prometheus | PATCH | Install network quadlets + ansible.builtin.template: + src: "home/podman/config/containers/systemd/{{ item }}" + dest: "/home/{{ podman_prometheus_podman_rootless_user }}/.config/containers/systemd/{{ item }}" + owner: "{{ podman_prometheus_podman_rootless_user }}" + mode: "0400" + with_items: + - frontend.network + become: true + notify: + - Restart Prometheus + - Restart nginx + +- name: Podman Prometheus | AUDIT | Verify quadlets are correctly defined + ansible.builtin.command: /usr/libexec/podman/quadlet -dryrun -user + register: podman_prometheus_quadlet_result + ignore_errors: true + changed_when: false + become: true + become_user: "{{ podman_prometheus_podman_rootless_user }}" + +- name: Podman Prometheus | AUDIT | Assert that the quadlet verification succeeded + ansible.builtin.assert: + that: + - podman_prometheus_quadlet_result.rc == 0 + fail_msg: "'/usr/libexec/podman/quadlet -dryrun -user' failed! Output withheld to prevent leaking secrets." + +- name: Podman Prometheus | PATCH | Set up nginx and Let's Encrypt certificate + ansible.builtin.include_role: + name: sr2c.core.podman_nginx + vars: + podman_nginx_frontend_network: frontend + podman_nginx_podman_rootless_user: "{{ podman_prometheus_podman_rootless_user }}" + podman_nginx_primary_hostname: "{{ inventory_hostname }}" + +- name: Podman Prometheus | PATCH | Install production nginx configuration file + ansible.builtin.template: + src: home/podman/nginx.conf + dest: "/home/{{ podman_prometheus_podman_rootless_user }}/nginx/nginx.conf" + owner: "{{ podman_prometheus_podman_rootless_user }}" + group: "{{ podman_prometheus_podman_rootless_user }}" + mode: "0644" + become: true + notify: + - Restart nginx + +- name: Podman Prometheus | PATCH | Make sure Prometheus and Nginx are running now and started on boot + ansible.builtin.systemd_service: + name: "{{ item }}.service" + enabled: true + state: started + masked: false + daemon_reload: true + scope: user + with_items: + - nginx + - prometheus + become: true + become_user: "{{ podman_prometheus_podman_rootless_user }}" + +- name: Podman Prometheus | PATCH | Set up ClouDNS monitoring + sr2c.core.cloudns_monitor: + name: "Prometheus - {{ inventory_hostname[:19] }}" + host: "{{ inventory_hostname }}" + ip: "{{ inventory_hostname }}" + http_status_code: "200" + emails: "{{ cloudns_monitoring_emails }}" + auth_id: "{{ cloudns_auth_id }}" + auth_password: "{{ cloudns_auth_password }}" + delegate_to: localhost diff --git a/roles/podman_prometheus/templates/etc/motd.d/10-data-plate.txt b/roles/podman_prometheus/templates/etc/motd.d/10-data-plate.txt new file mode 100644 index 0000000..5be20bb --- /dev/null +++ b/roles/podman_prometheus/templates/etc/motd.d/10-data-plate.txt @@ -0,0 +1,14 @@ + ========================================================= + A Prometheus instance is hosted on this server. + Podman user: {{ podman_prometheus_podman_rootless_user }} + ========================================================= + # Become the podman user + sudo -iu {{ podman_prometheus_podman_rootless_user }} + # Check the Prometheus status + systemctl --user status prometheus.service + # Restart Prometheus + systemctl --user restart prometheus.service + # Follow the logs for Prometheus + journalctl --user -fu prometheus.service + ========================================================= + diff --git a/roles/podman_prometheus/templates/home/podman/config/containers/systemd/frontend.network b/roles/podman_prometheus/templates/home/podman/config/containers/systemd/frontend.network new file mode 100644 index 0000000..379c059 --- /dev/null +++ b/roles/podman_prometheus/templates/home/podman/config/containers/systemd/frontend.network @@ -0,0 +1,2 @@ +[Network] +NetworkName=frontend diff --git a/roles/podman_prometheus/templates/home/podman/config/containers/systemd/prometheus.container b/roles/podman_prometheus/templates/home/podman/config/containers/systemd/prometheus.container new file mode 100644 index 0000000..b10b545 --- /dev/null +++ b/roles/podman_prometheus/templates/home/podman/config/containers/systemd/prometheus.container @@ -0,0 +1,13 @@ +[Container] +ContainerName=prometheus +Image=quay.io/prometheus/prometheus:v3.8.1 +Network=frontend.network +Volume=/home/{{ podman_prometheus_podman_rootless_user }}/prometheus-data:/prometheus:rw,Z +Volume=/home/{{ podman_prometheus_podman_rootless_user }}/file-configs:/file-configs:ro,Z +Volume=/home/{{ podman_prometheus_podman_rootless_user }}/prometheus.yml:/etc/prometheus/prometheus.yml:ro,Z + +[Service] +Restart=on-failure + +[Install] +WantedBy=default.target diff --git a/roles/podman_prometheus/templates/home/podman/nginx.conf b/roles/podman_prometheus/templates/home/podman/nginx.conf new file mode 100644 index 0000000..a1a168f --- /dev/null +++ b/roles/podman_prometheus/templates/home/podman/nginx.conf @@ -0,0 +1,52 @@ +# {{ ansible_managed }} + +resolver 10.89.0.1 ipv6=off valid=10s; + +# Mitigate httpoxy attack +proxy_set_header Proxy ""; + +server { + listen 80; + listen [::]:80; + + server_name {{ inventory_hostname }}; + server_tokens off; + + location /.well-known/acme-challenge/ { + root /var/www/certbot; + } + + location / { + return 301 https://{{ inventory_hostname }}$request_uri; + } +} + +upstream prometheus { + zone prometheus_upstream 64k; + server prometheus:9090 resolve; +} + +server { + server_name {{ inventory_hostname }}; + listen 443 ssl; + listen [::]:443 ssl; + http2 on; + server_tokens off; + + ssl_certificate /etc/letsencrypt/live/{{ inventory_hostname }}/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/{{ inventory_hostname }}/privkey.pem; + + add_header Strict-Transport-Security "max-age=31536000" always; + add_header Referrer-Policy origin always; # make sure outgoing links don't show the URL to the Prometheus instance + add_header X-Content-Type-Options "nosniff" always; + add_header X-XSS-Protection "1; mode=block" always; + + location / { + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_read_timeout 180; + proxy_pass http://prometheus; + } +} diff --git a/roles/podman_prometheus/templates/home/podman/prometheus.yml b/roles/podman_prometheus/templates/home/podman/prometheus.yml new file mode 100644 index 0000000..422646d --- /dev/null +++ b/roles/podman_prometheus/templates/home/podman/prometheus.yml @@ -0,0 +1,33 @@ +global: + scrape_interval: 15s + external_labels: + monitor: '{{ inventory_hostname }}' + +scrape_configs: + - job_name: 'prometheus' + scrape_interval: 5s + static_configs: + - targets: ['localhost:9090'] + - job_name: 'node' + scrape_interval: 5s + scheme: https + basic_auth: + username: metrics + password: "{{ node_exporter_password }}" + tls_config: + insecure_skip_verify: true + static_configs: + - targets: + - 'host.containers.internal:9100' +{% for host in groups['ipaservers'] %} + - '{{ host }}:9100' +{% endfor %} +{% for host in groups['keycloak'] %} + - '{{ host }}:9100' +{% endfor %} +{% for host in groups['generic'] %} + - '{{ host }}:9100' +{% endfor %} + file_sd_configs: + - files: + - "/file-configs/*.yml"