Compare commits
No commits in common. "91390d73590d4dc2efcdcc955418aed3421f68b1" and "d5a6ef34618a6bfa18e29b7af131ade2209ea095" have entirely different histories.
91390d7359
...
d5a6ef3461
21 changed files with 3 additions and 442 deletions
|
|
@ -2,6 +2,7 @@
|
||||||
- name: Deploy and update the FreeIPA servers
|
- name: Deploy and update the FreeIPA servers
|
||||||
hosts:
|
hosts:
|
||||||
- ipaservers
|
- ipaservers
|
||||||
|
become: true # Required by FreeIPA roles
|
||||||
vars:
|
vars:
|
||||||
# Required for FreeIPA setup
|
# Required for FreeIPA setup
|
||||||
baseline_epel_packages_allowed:
|
baseline_epel_packages_allowed:
|
||||||
|
|
@ -15,7 +16,6 @@
|
||||||
- python3-acme
|
- python3-acme
|
||||||
- python3-zipp
|
- python3-zipp
|
||||||
- python3-pyOpenSSL
|
- python3-pyOpenSSL
|
||||||
- node-exporter
|
|
||||||
rhel9cis_dns_server: true
|
rhel9cis_dns_server: true
|
||||||
rhel9cis_httpd_server: true
|
rhel9cis_httpd_server: true
|
||||||
# TODO: Restricted umask breaks FreeIPA roles
|
# TODO: Restricted umask breaks FreeIPA roles
|
||||||
|
|
@ -23,14 +23,9 @@
|
||||||
rhel9cis_rule_5_4_3_3: false
|
rhel9cis_rule_5_4_3_3: false
|
||||||
roles:
|
roles:
|
||||||
- role: sr2c.core.baseline
|
- role: sr2c.core.baseline
|
||||||
baseline_epel_packages_allowed:
|
|
||||||
- node-exporter
|
|
||||||
tags: bootstrap
|
tags: bootstrap
|
||||||
- role: sr2c.core.freeipa
|
- role: sr2c.core.freeipa
|
||||||
become: true
|
|
||||||
tags: freeipa
|
tags: freeipa
|
||||||
- role: sr2c.core.node_exporter
|
|
||||||
tags: prometheus
|
|
||||||
|
|
||||||
- name: Deploy and update the Keycloak server
|
- name: Deploy and update the Keycloak server
|
||||||
hosts:
|
hosts:
|
||||||
|
|
@ -38,42 +33,16 @@
|
||||||
become: true
|
become: true
|
||||||
roles:
|
roles:
|
||||||
- role: sr2c.core.baseline
|
- role: sr2c.core.baseline
|
||||||
baseline_epel_packages_allowed:
|
|
||||||
- node-exporter
|
|
||||||
tags: bootstrap
|
tags: bootstrap
|
||||||
- role: freeipa.ansible_freeipa.ipaclient
|
- role: freeipa.ansible_freeipa.ipaclient
|
||||||
state: present
|
state: present
|
||||||
tags: bootstrap
|
tags: bootstrap
|
||||||
- role: sr2c.core.podman_keycloak
|
- role: sr2c.core.podman_keycloak
|
||||||
tags: keycloak
|
tags: keycloak
|
||||||
- role: sr2c.core.node_exporter
|
|
||||||
tags: prometheus
|
|
||||||
|
|
||||||
- name: Deploy and update the Prometheus server
|
|
||||||
hosts:
|
|
||||||
- prometheus
|
|
||||||
roles:
|
|
||||||
- role: sr2c.core.baseline
|
|
||||||
vars:
|
|
||||||
baseline_epel_packages_allowed:
|
|
||||||
- node-exporter
|
|
||||||
tags: bootstrap
|
|
||||||
- role: freeipa.ansible_freeipa.ipaclient
|
|
||||||
become: true
|
|
||||||
state: present
|
|
||||||
tags: bootstrap
|
|
||||||
- role: sr2c.core.node_exporter
|
|
||||||
tags: prometheus
|
|
||||||
- role: sr2c.core.podman_prometheus
|
|
||||||
tags: prometheus
|
|
||||||
|
|
||||||
- name: Baseline for generic servers (manual or externally managed application deployment)
|
- name: Baseline for generic servers (manual or externally managed application deployment)
|
||||||
hosts:
|
hosts:
|
||||||
- generic
|
- generic
|
||||||
roles:
|
roles:
|
||||||
- role: sr2c.core.baseline
|
- role: sr2c.core.baseline
|
||||||
baseline_epel_packages_allowed:
|
|
||||||
- node-exporter
|
|
||||||
tags: bootstrap
|
tags: bootstrap
|
||||||
- role: sr2c.core.node_exporter
|
|
||||||
tags: prometheus
|
|
||||||
|
|
@ -214,7 +214,7 @@ def run_module():
|
||||||
r.raise_for_status()
|
r.raise_for_status()
|
||||||
|
|
||||||
for email in notifications_to_remove:
|
for email in notifications_to_remove:
|
||||||
url = "https://api.cloudns.net/monitoring/delete-notification.json"
|
url = "https://api.cloudns.net/monitoring/delete-notifications.json"
|
||||||
params = {
|
params = {
|
||||||
'auth-id': module.params['auth_id'],
|
'auth-id': module.params['auth_id'],
|
||||||
'auth-password': module.params['auth_password'],
|
'auth-password': module.params['auth_password'],
|
||||||
|
|
|
||||||
|
|
@ -10,9 +10,3 @@
|
||||||
state: restarted
|
state: restarted
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Reload firewalld
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
name: firewalld
|
|
||||||
state: reloaded
|
|
||||||
become: true
|
|
||||||
|
|
|
||||||
|
|
@ -32,5 +32,4 @@
|
||||||
# ipaservers are part of Linux Identity Management. Joining your host to an IdM
|
# ipaservers are part of Linux Identity Management. Joining your host to an IdM
|
||||||
# domain automatically configures SSSD authentication on your host.
|
# domain automatically configures SSSD authentication on your host.
|
||||||
rhel9cis_allow_authselect_updates: false
|
rhel9cis_allow_authselect_updates: false
|
||||||
rhel9cis_auditd_max_log_file_action: rotate
|
|
||||||
when: (ansible_distribution == "Rocky") and (ansible_distribution_major_version == "9")
|
when: (ansible_distribution == "Rocky") and (ansible_distribution_major_version == "9")
|
||||||
|
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
# node_exporter_password:
|
|
||||||
# node_exporter_textfile_directory:
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
---
|
|
||||||
- name: Restart Node Exporter
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
name: prometheus-node-exporter
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
||||||
|
|
@ -1,83 +0,0 @@
|
||||||
---
|
|
||||||
- name: Node Exporter | PATCH | Install node-exporter
|
|
||||||
become: true
|
|
||||||
ansible.builtin.dnf:
|
|
||||||
name: node-exporter
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Node Exporter | PATCH | Generate private TLS key
|
|
||||||
community.crypto.openssl_privatekey:
|
|
||||||
path: /etc/ssl/node-exporter.key
|
|
||||||
size: 4096
|
|
||||||
owner: prometheus
|
|
||||||
group: root
|
|
||||||
mode: '0440'
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Node Exporter | PATCH | Create certificate signing request
|
|
||||||
community.crypto.openssl_csr:
|
|
||||||
path: /etc/ssl/node-exporter.csr
|
|
||||||
privatekey_path: /etc/ssl/node-exporter.key
|
|
||||||
common_name: "{{ inventory_hostname }}"
|
|
||||||
subject_alt_name: "DNS:{{ inventory_hostname }}"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0400'
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Generate self-signed certificate
|
|
||||||
community.crypto.x509_certificate:
|
|
||||||
provider: selfsigned
|
|
||||||
path: /etc/ssl/node-exporter.crt
|
|
||||||
privatekey_path: /etc/ssl/node-exporter.key
|
|
||||||
csr_path: /etc/ssl/node-exporter.csr
|
|
||||||
owner: prometheus
|
|
||||||
group: root
|
|
||||||
mode: '0440'
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Node Exporter | PATCH | Install node-exporter web configuration
|
|
||||||
become: true
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: etc/node-exporter-web.yml
|
|
||||||
dest: /etc/node-exporter-web.yml
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: "0444"
|
|
||||||
|
|
||||||
- name: Node Exporter | PATCH | Set command line arguments
|
|
||||||
become: true
|
|
||||||
ansible.builtin.lineinfile:
|
|
||||||
path: /etc/default/prometheus-node-exporter
|
|
||||||
regexp: "^ARGS"
|
|
||||||
line: "ARGS='--web.config.file=\"/etc/node-exporter-web.yml\"{% if node_exporter_textfile_directory is defined %} --collector.textfile.directory {{ node_exporter_textfile_directory }}{% endif %}'"
|
|
||||||
notify: Restart Node Exporter
|
|
||||||
|
|
||||||
- name: Node Exporter | PATCH | Ensure node-exporter is enabled and running
|
|
||||||
become: true
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
name: prometheus-node-exporter
|
|
||||||
masked: false
|
|
||||||
enabled: true
|
|
||||||
state: started
|
|
||||||
|
|
||||||
- name: Node Exporter | PATCH | Create firewalld service file for node-exporter
|
|
||||||
become: true
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: etc/firewalld/services/node-exporter.xml
|
|
||||||
dest: /etc/firewalld/services/node-exporter.xml
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0400'
|
|
||||||
notify: Reload firewalld
|
|
||||||
|
|
||||||
- name: Node Exporter | Flush handlers
|
|
||||||
ansible.builtin.meta: flush_handlers
|
|
||||||
|
|
||||||
- name: Node Exporter | PATCH | Enable node-exporter service in firewalld permanently
|
|
||||||
become: true
|
|
||||||
ansible.posix.firewalld:
|
|
||||||
service: node-exporter
|
|
||||||
permanent: true
|
|
||||||
state: enabled
|
|
||||||
immediate: true
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<service>
|
|
||||||
<short>Node Exporter</short>
|
|
||||||
<description>Exposes metrics for Prometheus scraping</description>
|
|
||||||
<port protocol="tcp" port="9100"/>
|
|
||||||
</service>
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
---
|
|
||||||
tls_server_config:
|
|
||||||
cert_file: /etc/ssl/node-exporter.crt
|
|
||||||
key_file: /etc/ssl/node-exporter.key
|
|
||||||
|
|
||||||
min_version: "TLS13"
|
|
||||||
max_version: "TLS13"
|
|
||||||
|
|
||||||
http_server_config:
|
|
||||||
headers:
|
|
||||||
X-Frame-Options: deny
|
|
||||||
X-Content-Type-Options: nosniff
|
|
||||||
|
|
||||||
{% if node_exporter_password is defined %}
|
|
||||||
basic_auth_users:
|
|
||||||
metrics: "{{ node_exporter_password | password_hash(hashtype='bcrypt') }}"
|
|
||||||
{% endif %}
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
---
|
|
||||||
- name: Restart systemd-logind
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
name: systemd-logind
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
||||||
|
|
@ -37,7 +37,6 @@
|
||||||
become: true
|
become: true
|
||||||
become_user: "{{ item }}"
|
become_user: "{{ item }}"
|
||||||
with_items: "{{ podman_host_rootless_users }}"
|
with_items: "{{ podman_host_rootless_users }}"
|
||||||
notify: Restart systemd-logind
|
|
||||||
|
|
||||||
- name: Podman Host | PATCH | Set XDG_RUNTIME_DIR in .bashrc for rootless users
|
- name: Podman Host | PATCH | Set XDG_RUNTIME_DIR in .bashrc for rootless users
|
||||||
ansible.builtin.lineinfile:
|
ansible.builtin.lineinfile:
|
||||||
|
|
@ -46,10 +45,6 @@
|
||||||
become: true
|
become: true
|
||||||
become_user: "{{ item }}"
|
become_user: "{{ item }}"
|
||||||
with_items: "{{ podman_host_rootless_users }}"
|
with_items: "{{ podman_host_rootless_users }}"
|
||||||
notify: Restart systemd-logind
|
|
||||||
|
|
||||||
- name: Podman Host | Flush handlers
|
|
||||||
ansible.builtin.meta: flush_handlers
|
|
||||||
|
|
||||||
- name: Podman Host | PATCH | Enable linger for rootless users
|
- name: Podman Host | PATCH | Enable linger for rootless users
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
podman_nginx_additional_hostnames: []
|
podman_nginx_additional_hostnames: []
|
||||||
podman_nginx_additional_publish_ports: []
|
|
||||||
podman_nginx_certbot_testing: false
|
podman_nginx_certbot_testing: false
|
||||||
# podman_nginx_frontend_network:
|
# podman_nginx_frontend_network:
|
||||||
podman_nginx_podman_rootless_user: nginx
|
podman_nginx_podman_rootless_user: nginx
|
||||||
|
|
|
||||||
|
|
@ -13,13 +13,10 @@ Image=docker.io/nginx:1
|
||||||
{% if podman_nginx_frontend_network is defined %}Network={{ podman_nginx_frontend_network }}.network{% endif +%}
|
{% if podman_nginx_frontend_network is defined %}Network={{ podman_nginx_frontend_network }}.network{% endif +%}
|
||||||
PublishPort=80:80
|
PublishPort=80:80
|
||||||
PublishPort=443:443
|
PublishPort=443:443
|
||||||
{% for item in podman_nginx_additional_publish_ports %}
|
|
||||||
PublishPort={{ item }}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot/:ro,z
|
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/www:/var/www/certbot/:ro,z
|
||||||
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/:/etc/letsencrypt/:ro,z
|
Volume=/home/{{ podman_nginx_podman_rootless_user }}/certbot/conf/:/etc/letsencrypt/:ro,z
|
||||||
Volume=/home/{{ podman_nginx_podman_rootless_user }}/nginx:/etc/nginx/conf.d/:ro,z
|
Volume=/home/{{ podman_nginx_podman_rootless_user }}/nginx:/etc/nginx/conf.d/:ro,z
|
||||||
|
|
||||||
{% for item in podman_nginx_additional_volumes %}
|
{% for item in podman_nginx_additional_volumes %}
|
||||||
Volume={{ item.src }}:{{ item.dest }}:{{ item.options }}
|
Volume={{ item.src }}:{{ item.dest }}:{{ item.options }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
---
|
|
||||||
podman_prometheus_podman_rootless_user: prometheus
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
---
|
|
||||||
- name: Restart Prometheus
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
name: prometheus
|
|
||||||
scope: user
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
||||||
become_user: "{{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
|
|
@ -1,148 +0,0 @@
|
||||||
---
|
|
||||||
- name: Podman Prometheus | PATCH | Install data plate
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: etc/motd.d/10-data-plate.txt
|
|
||||||
dest: /etc/motd.d/10-data-plate.txt
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: "0444"
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Podman Prometheus | PATCH | Install podman and verify rootless podman user
|
|
||||||
ansible.builtin.include_role:
|
|
||||||
role: sr2c.core.podman_host
|
|
||||||
vars:
|
|
||||||
podman_host_minimum_unpriv_port: 80
|
|
||||||
podman_host_rootless_users: ["{{ podman_prometheus_podman_rootless_user }}"]
|
|
||||||
|
|
||||||
- name: Podman Prometheus | AUDIT | Get subuid range for user
|
|
||||||
ansible.builtin.command:
|
|
||||||
cmd: "getsubids {{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
register: _podman_prometheus_user_subuid
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Podman Prometheus | AUDIT | Get subgid range for user
|
|
||||||
ansible.builtin.command:
|
|
||||||
cmd: "getsubids -g {{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
register: _podman_prometheus_user_subgid
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Podman Prometheus | AUDIT | Parse outputs of getsubids and store results
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
_podman_prometheus_user_subuid_start: "{{ (_podman_prometheus_user_subuid.stdout_lines[0].split()[2] | int) }}"
|
|
||||||
_podman_prometheus_user_subgid_start: "{{ (_podman_prometheus_user_subgid.stdout_lines[0].split()[2] | int) }}"
|
|
||||||
|
|
||||||
# Prometheus runs with UID/GID 65534 inside the container
|
|
||||||
- name: Podman Prometheus | PATCH | Create data directory for Prometheus
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "/home/{{ podman_prometheus_podman_rootless_user }}/prometheus-data"
|
|
||||||
owner: "{{ _podman_prometheus_user_subuid_start + 65533 }}"
|
|
||||||
group: "{{ _podman_prometheus_user_subgid_start + 65533 }}"
|
|
||||||
mode: "0700"
|
|
||||||
state: "directory"
|
|
||||||
become: true
|
|
||||||
|
|
||||||
# Prometheus runs with UID/GID 65534 inside the container
|
|
||||||
- name: Podman Prometheus | PATCH | Create service discovery directory for Prometheus
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "/home/{{ podman_prometheus_podman_rootless_user }}/file-configs"
|
|
||||||
owner: "{{ _podman_prometheus_user_subuid_start + 65533 }}"
|
|
||||||
group: "{{ _podman_prometheus_user_subgid_start + 65533 }}"
|
|
||||||
mode: "0700"
|
|
||||||
state: "directory"
|
|
||||||
become: true
|
|
||||||
|
|
||||||
# Prometheus runs with UID/GID 65534 inside the container
|
|
||||||
- name: Podman Prometheus | PATCH | Install Prometheus configuration
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: home/podman/prometheus.yml
|
|
||||||
dest: "/home/{{ podman_prometheus_podman_rootless_user }}/prometheus.yml"
|
|
||||||
mode: "0400"
|
|
||||||
owner: "{{ _podman_prometheus_user_subuid_start + 65533 }}"
|
|
||||||
group: "{{ _podman_prometheus_user_subgid_start + 65533 }}"
|
|
||||||
become: true
|
|
||||||
notify:
|
|
||||||
- Restart Prometheus
|
|
||||||
|
|
||||||
- name: Podman Prometheus | PATCH | Install container quadlets
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: "home/podman/config/containers/systemd/{{ item }}"
|
|
||||||
dest: "/home/{{ podman_prometheus_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
|
|
||||||
owner: "{{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
mode: "0400"
|
|
||||||
with_items:
|
|
||||||
- prometheus.container
|
|
||||||
become: true
|
|
||||||
notify:
|
|
||||||
- Restart Prometheus
|
|
||||||
|
|
||||||
- name: Podman Prometheus | PATCH | Install network quadlets
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: "home/podman/config/containers/systemd/{{ item }}"
|
|
||||||
dest: "/home/{{ podman_prometheus_podman_rootless_user }}/.config/containers/systemd/{{ item }}"
|
|
||||||
owner: "{{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
mode: "0400"
|
|
||||||
with_items:
|
|
||||||
- frontend.network
|
|
||||||
become: true
|
|
||||||
notify:
|
|
||||||
- Restart Prometheus
|
|
||||||
- Restart nginx
|
|
||||||
|
|
||||||
- name: Podman Prometheus | AUDIT | Verify quadlets are correctly defined
|
|
||||||
ansible.builtin.command: /usr/libexec/podman/quadlet -dryrun -user
|
|
||||||
register: podman_prometheus_quadlet_result
|
|
||||||
ignore_errors: true
|
|
||||||
changed_when: false
|
|
||||||
become: true
|
|
||||||
become_user: "{{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
|
|
||||||
- name: Podman Prometheus | AUDIT | Assert that the quadlet verification succeeded
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- podman_prometheus_quadlet_result.rc == 0
|
|
||||||
fail_msg: "'/usr/libexec/podman/quadlet -dryrun -user' failed! Output withheld to prevent leaking secrets."
|
|
||||||
|
|
||||||
- name: Podman Prometheus | PATCH | Set up nginx and Let's Encrypt certificate
|
|
||||||
ansible.builtin.include_role:
|
|
||||||
name: sr2c.core.podman_nginx
|
|
||||||
vars:
|
|
||||||
podman_nginx_frontend_network: frontend
|
|
||||||
podman_nginx_podman_rootless_user: "{{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
podman_nginx_primary_hostname: "{{ inventory_hostname }}"
|
|
||||||
|
|
||||||
- name: Podman Prometheus | PATCH | Install production nginx configuration file
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: home/podman/nginx.conf
|
|
||||||
dest: "/home/{{ podman_prometheus_podman_rootless_user }}/nginx/nginx.conf"
|
|
||||||
owner: "{{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
group: "{{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
mode: "0644"
|
|
||||||
become: true
|
|
||||||
notify:
|
|
||||||
- Restart nginx
|
|
||||||
|
|
||||||
- name: Podman Prometheus | PATCH | Make sure Prometheus and Nginx are running now and started on boot
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
name: "{{ item }}.service"
|
|
||||||
enabled: true
|
|
||||||
state: started
|
|
||||||
masked: false
|
|
||||||
daemon_reload: true
|
|
||||||
scope: user
|
|
||||||
with_items:
|
|
||||||
- nginx
|
|
||||||
- prometheus
|
|
||||||
become: true
|
|
||||||
become_user: "{{ podman_prometheus_podman_rootless_user }}"
|
|
||||||
|
|
||||||
- name: Podman Prometheus | PATCH | Set up ClouDNS monitoring
|
|
||||||
sr2c.core.cloudns_monitor:
|
|
||||||
name: "Prometheus - {{ inventory_hostname[:19] }}"
|
|
||||||
host: "{{ inventory_hostname }}"
|
|
||||||
ip: "{{ inventory_hostname }}"
|
|
||||||
http_status_code: "200"
|
|
||||||
emails: "{{ cloudns_monitoring_emails }}"
|
|
||||||
auth_id: "{{ cloudns_auth_id }}"
|
|
||||||
auth_password: "{{ cloudns_auth_password }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
=========================================================
|
|
||||||
A Prometheus instance is hosted on this server.
|
|
||||||
Podman user: {{ podman_prometheus_podman_rootless_user }}
|
|
||||||
=========================================================
|
|
||||||
# Become the podman user
|
|
||||||
sudo -iu {{ podman_prometheus_podman_rootless_user }}
|
|
||||||
# Check the Prometheus status
|
|
||||||
systemctl --user status prometheus.service
|
|
||||||
# Restart Prometheus
|
|
||||||
systemctl --user restart prometheus.service
|
|
||||||
# Follow the logs for Prometheus
|
|
||||||
journalctl --user -fu prometheus.service
|
|
||||||
=========================================================
|
|
||||||
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
[Network]
|
|
||||||
NetworkName=frontend
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
[Container]
|
|
||||||
ContainerName=prometheus
|
|
||||||
Image=quay.io/prometheus/prometheus:v3.8.1
|
|
||||||
Network=frontend.network
|
|
||||||
Volume=/home/{{ podman_prometheus_podman_rootless_user }}/prometheus-data:/prometheus:rw,Z
|
|
||||||
Volume=/home/{{ podman_prometheus_podman_rootless_user }}/file-configs:/file-configs:ro,Z
|
|
||||||
Volume=/home/{{ podman_prometheus_podman_rootless_user }}/prometheus.yml:/etc/prometheus/prometheus.yml:ro,Z
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Restart=on-failure
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=default.target
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
# {{ ansible_managed }}
|
|
||||||
|
|
||||||
resolver 10.89.0.1 ipv6=off valid=10s;
|
|
||||||
|
|
||||||
# Mitigate httpoxy attack
|
|
||||||
proxy_set_header Proxy "";
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
|
|
||||||
server_name {{ inventory_hostname }};
|
|
||||||
server_tokens off;
|
|
||||||
|
|
||||||
location /.well-known/acme-challenge/ {
|
|
||||||
root /var/www/certbot;
|
|
||||||
}
|
|
||||||
|
|
||||||
location / {
|
|
||||||
return 301 https://{{ inventory_hostname }}$request_uri;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
upstream prometheus {
|
|
||||||
zone prometheus_upstream 64k;
|
|
||||||
server prometheus:9090 resolve;
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
server_name {{ inventory_hostname }};
|
|
||||||
listen 443 ssl;
|
|
||||||
listen [::]:443 ssl;
|
|
||||||
http2 on;
|
|
||||||
server_tokens off;
|
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/{{ inventory_hostname }}/fullchain.pem;
|
|
||||||
ssl_certificate_key /etc/letsencrypt/live/{{ inventory_hostname }}/privkey.pem;
|
|
||||||
|
|
||||||
add_header Strict-Transport-Security "max-age=31536000" always;
|
|
||||||
add_header Referrer-Policy origin always; # make sure outgoing links don't show the URL to the Prometheus instance
|
|
||||||
add_header X-Content-Type-Options "nosniff" always;
|
|
||||||
add_header X-XSS-Protection "1; mode=block" always;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
proxy_set_header Host $http_host;
|
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
|
|
||||||
proxy_read_timeout 180;
|
|
||||||
proxy_pass http://prometheus;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
global:
|
|
||||||
scrape_interval: 15s
|
|
||||||
external_labels:
|
|
||||||
monitor: '{{ inventory_hostname }}'
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: 'prometheus'
|
|
||||||
scrape_interval: 5s
|
|
||||||
static_configs:
|
|
||||||
- targets: ['localhost:9090']
|
|
||||||
- job_name: 'node'
|
|
||||||
scrape_interval: 5s
|
|
||||||
scheme: https
|
|
||||||
basic_auth:
|
|
||||||
username: metrics
|
|
||||||
password: "{{ node_exporter_password }}"
|
|
||||||
tls_config:
|
|
||||||
insecure_skip_verify: true
|
|
||||||
static_configs:
|
|
||||||
- targets:
|
|
||||||
- 'host.containers.internal:9100'
|
|
||||||
{% for host in groups['ipaservers'] %}
|
|
||||||
- '{{ host }}:9100'
|
|
||||||
{% endfor %}
|
|
||||||
{% for host in groups['keycloak'] %}
|
|
||||||
- '{{ host }}:9100'
|
|
||||||
{% endfor %}
|
|
||||||
{% for host in groups['generic'] %}
|
|
||||||
- '{{ host }}:9100'
|
|
||||||
{% endfor %}
|
|
||||||
file_sd_configs:
|
|
||||||
- files:
|
|
||||||
- "/file-configs/*.yml"
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue