Initial import; migrate some roles from irl.wip
This commit is contained in:
commit
2ba6c6691b
44 changed files with 1573 additions and 0 deletions
18
roles/baseline/README.md
Normal file
18
roles/baseline/README.md
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# sr2c.core.baseline
|
||||
|
||||
Configure an SR2 virtual machine.
|
||||
|
||||
## Disk Partitions and Encryption
|
||||
|
||||
Creates a new LVM volume group on `baseline_second_disk_device` with logical volumes for:
|
||||
|
||||
| Mountpoint | Default Size | Encrypted |
|
||||
|----------------|-----------------|-----------|
|
||||
| /var | 5GiB | No |
|
||||
| /var/log | 5GiB | No |
|
||||
| /var/log/audit | 5GiB | No |
|
||||
| /var/tmp | 5GiB | No |
|
||||
| /home | Remaining space | Yes |
|
||||
|
||||
It is assumed that `/home` is empty and that no migration of data need occur. Data under `/var` will be migrated for
|
||||
each partition.
|
||||
27
roles/baseline/defaults/main.yml
Normal file
27
roles/baseline/defaults/main.yml
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
# Location of the host (generic, sr2_de_fsn)
|
||||
baseline_location: generic
|
||||
# Enable running the Ansible Lockdown CIS role
|
||||
baseline_lockdown: true
|
||||
# UK Ministry of Justice Login Banner (seems as good as any)
|
||||
# https://security-guidance.service.justice.gov.uk/system-lockdown-and-hardening-standard/#appendix-a-login-banner
|
||||
baseline_warning_banner: |
|
||||
THIS SYSTEM IS FOR AUTHORISED USERS ONLY.
|
||||
|
||||
This is a private system; only use this system if you have specific authority to do so.
|
||||
Otherwise you are liable to prosecution under the Computer Misuse Act 1990. If you do
|
||||
not have the express permission of the operator or owner of this system, switch off or
|
||||
disconnect now to avoid prosecution.
|
||||
|
||||
# Local NTP servers if available
|
||||
baseline_ntp_servers:
|
||||
- 0.pool.ntp.org
|
||||
- 1.pool.ntp.org
|
||||
- 2.pool.ntp.org
|
||||
- 3.pool.ntp.org
|
||||
# baseline_second_disk_device:
|
||||
baseline_second_disk_vg_name: "datavg"
|
||||
baseline_second_disk_var_size: "5G"
|
||||
baseline_second_disk_var_log_size: "5G"
|
||||
baseline_second_disk_var_log_audit_size: "5G"
|
||||
baseline_second_disk_var_tmp_size: "5G"
|
||||
4
roles/baseline/files/resolved.conf
Normal file
4
roles/baseline/files/resolved.conf
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
[Resolve]
|
||||
DNSSEC=true
|
||||
LLMNR=no
|
||||
MulticastDNS=no
|
||||
2
roles/baseline/files/systemd-resolved-override.conf
Normal file
2
roles/baseline/files/systemd-resolved-override.conf
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
[Service]
|
||||
Environment="SYSTEMD_RESOLVED_SYNTHESIZE_HOSTNAME=0"
|
||||
9
roles/baseline/handlers/main.yml
Normal file
9
roles/baseline/handlers/main.yml
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Regenerate grub config
|
||||
ansible.builtin.command:
|
||||
cmd: grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
- name: Restart systemd-resolved
|
||||
service:
|
||||
name: systemd-resolved
|
||||
state: restarted
|
||||
160
roles/baseline/tasks/disk_partitions.yml
Normal file
160
roles/baseline/tasks/disk_partitions.yml
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
---
|
||||
- name: Disk Partitions | PRELIM | Ensure baseline_home_luks_passphrase is defined
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- baseline_home_luks_passphrase is defined
|
||||
msg: "Variable 'baseline_home_luks_passphrase' must be defined."
|
||||
|
||||
- name: Disk Partitions | PRELIM | Ensure baseline_second_disk_device is defined
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- baseline_second_disk_device is defined
|
||||
msg: "Variable 'baseline_second_disk_device' must be defined."
|
||||
|
||||
- name: Disk Partitions | PATCH | Ensure lvm2 is installed
|
||||
ansible.builtin.package:
|
||||
name: lvm2
|
||||
state: present
|
||||
|
||||
- name: Disk Partitions | PATCH | Create LVM partition spanning entire disk
|
||||
community.general.parted:
|
||||
device: "{{ baseline_second_disk_device }}"
|
||||
number: 1
|
||||
flags: [ lvm ]
|
||||
state: present
|
||||
part_start: "0%"
|
||||
part_end: "100%"
|
||||
|
||||
- name: Disk Partitions | PATCH | Create volume group
|
||||
community.general.lvg:
|
||||
vg: "{{ baseline_second_disk_vg_name }}"
|
||||
pvs: "{{ baseline_second_disk_device }}1"
|
||||
|
||||
- name: Disk Partitions | PATCH | Create /var logical volume
|
||||
community.general.lvol:
|
||||
vg: "{{ baseline_second_disk_vg_name }}"
|
||||
lv: var
|
||||
size: "{{ baseline_second_disk_var_size }}"
|
||||
|
||||
- name: Disk Partitions | PATCH | Create /var/log logical volume
|
||||
community.general.lvol:
|
||||
vg: "{{ baseline_second_disk_vg_name }}"
|
||||
lv: var_log
|
||||
size: "{{ baseline_second_disk_var_log_size }}"
|
||||
|
||||
- name: Disk Partitions | PATCH | Create /var/log/audit logical volume
|
||||
community.general.lvol:
|
||||
vg: "{{ baseline_second_disk_vg_name }}"
|
||||
lv: var_log_audit
|
||||
size: "{{ baseline_second_disk_var_log_audit_size }}"
|
||||
|
||||
- name: Disk Partitions | PATCH | Create /var/tmp logical volume
|
||||
community.general.lvol:
|
||||
vg: "{{ baseline_second_disk_vg_name }}"
|
||||
lv: var_tmp
|
||||
size: "{{ baseline_second_disk_var_tmp_size }}"
|
||||
|
||||
- name: Disk Partitions | PATCH | Create /home logical volume with remaining space
|
||||
community.general.lvol:
|
||||
vg: "{{ baseline_second_disk_vg_name }}"
|
||||
lv: home
|
||||
shrink: false # make idempotent
|
||||
size: "100%FREE"
|
||||
|
||||
- name: Disk Partitions | PATCH | Ensure cryptsetup is installed
|
||||
ansible.builtin.package:
|
||||
name: cryptsetup
|
||||
state: present
|
||||
|
||||
- name: Disk Partitions | PATCH | Encrypt /home with LUKS2 and provided passphrase
|
||||
community.crypto.luks_device:
|
||||
device: "/dev/{{ baseline_second_disk_vg_name }}/home"
|
||||
state: present
|
||||
passphrase: "{{ baseline_home_luks_passphrase }}"
|
||||
type: luks2
|
||||
|
||||
- name: Disk Partitions | PATCH | Open LUKS device
|
||||
community.crypto.luks_device:
|
||||
device: "/dev/{{ baseline_second_disk_vg_name }}/home"
|
||||
name: home_crypt
|
||||
state: opened
|
||||
passphrase: "{{ baseline_home_luks_passphrase }}"
|
||||
|
||||
- name: Disk Partitions | PATCH | Add /home logical volume to crypttab
|
||||
community.general.crypttab:
|
||||
backing_device: /dev/mapper/datavg-home
|
||||
name: home_crypt
|
||||
opts: discard
|
||||
state: present
|
||||
|
||||
- name: Disk Partitions | PATCH | Create xfs filesystems on new partitions
|
||||
community.general.filesystem:
|
||||
dev: "{{ item }}"
|
||||
fstype: xfs
|
||||
with_items:
|
||||
- /dev/mapper/datavg-var
|
||||
- /dev/mapper/datavg-var_log
|
||||
- /dev/mapper/datavg-var_log_audit
|
||||
- /dev/mapper/datavg-var_tmp
|
||||
- /dev/mapper/home_crypt
|
||||
|
||||
- name: Disk Partitions | AUDIT | Check if /home is mounted
|
||||
ansible.builtin.command:
|
||||
cmd: mountpoint -q /home
|
||||
register: baseline_second_disk_home_mounted
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Disk Partitions | AUDIT | Check if /home is empty
|
||||
ansible.builtin.command:
|
||||
cmd: ls -A /home
|
||||
register: baseline_second_disk_home_files
|
||||
when: baseline_second_disk_home_mounted.rc != 0
|
||||
changed_when: false
|
||||
|
||||
- name: Disk Partitions | AUDIT | Fail if /home is not mounted and not empty
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- ((baseline_second_disk_home_files.skipped is defined) and baseline_second_disk_home_files.skipped) or (baseline_second_disk_home_files.stdout == "")
|
||||
|
||||
- name: Disk Partitions | PATCH | Ensure /home is mounted
|
||||
ansible.posix.mount:
|
||||
src: "/dev/mapper/home_crypt"
|
||||
path: '/home'
|
||||
fstype: 'xfs'
|
||||
opts: 'rw,nosuid,nodev'
|
||||
state: mounted
|
||||
|
||||
- name: Disk Partitions | AUDIT | Check if /var is mounted
|
||||
ansible.builtin.command:
|
||||
cmd: mountpoint -q /var
|
||||
register: baseline_second_disk_var_mounted
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Disk Partitions | PATCH | Migrate content if /var is not mounted
|
||||
when: baseline_second_disk_var_mounted.rc != 0
|
||||
block:
|
||||
- name: Disk Partitions | PATCH | Enter emergency mode
|
||||
ansible.builtin.command:
|
||||
cmd: systemctl isolate emergency.target
|
||||
|
||||
- name: Disk Partitions | PATCH | Unmount /var/lib/nfs/rpc_pipefs if mounted
|
||||
ansible.posix.mount:
|
||||
path: /var/lib/nfs/rpc_pipefs
|
||||
state: unmounted
|
||||
|
||||
- name: Disk Partitions | PATCH | Migrate data to new partitions
|
||||
ansible.builtin.include_tasks:
|
||||
file: disk_partitions_migrate.yml
|
||||
vars:
|
||||
baseline_second_disk_migrate_path: "{{ item }}"
|
||||
with_items:
|
||||
- "/var"
|
||||
- "/var/log"
|
||||
- "/var/log/audit"
|
||||
- "/var/tmp"
|
||||
|
||||
- name: Disk Partitions | PATCH | Restore default mode
|
||||
ansible.builtin.command:
|
||||
cmd: systemctl isolate default.target
|
||||
31
roles/baseline/tasks/disk_partitions_migrate.yml
Normal file
31
roles/baseline/tasks/disk_partitions_migrate.yml
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
- name: 'Disk Partitions | PATCH | Rename {{ baseline_second_disk_migrate_path }} to {{ baseline_second_disk_migrate_path }}.old'
|
||||
ansible.builtin.command:
|
||||
cmd: 'mv {{ baseline_second_disk_migrate_path }} {{ baseline_second_disk_migrate_path }}.old'
|
||||
|
||||
- name: 'Disk Partitions | PATCH | Mount {{ baseline_second_disk_migrate_path }}'
|
||||
ansible.posix.mount:
|
||||
src: "/dev/mapper/datavg-{{ baseline_second_disk_migrate_path | replace('/', '', 1) | replace('/', '_') }}"
|
||||
path: '{{ baseline_second_disk_migrate_path }}'
|
||||
fstype: 'xfs'
|
||||
opts: 'rw,{{ "noexec," if baseline_second_disk_migrate_path != "/var" else "" }}nosuid,nodev'
|
||||
state: mounted
|
||||
# TODO: systemctl daemon-reload after modifying /etc/fstab
|
||||
|
||||
- name: 'Disk Partitions | PATCH | Set {{ baseline_second_disk_migrate_path }} permissions'
|
||||
ansible.builtin.file:
|
||||
path: '{{ baseline_second_disk_migrate_path }}'
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
state: directory
|
||||
|
||||
- name: 'Disk Partitions | PATCH | Move {{ baseline_second_disk_migrate_path }} content'
|
||||
ansible.builtin.shell:
|
||||
cmd: 'cp -ax * {{ baseline_second_disk_migrate_path }}/'
|
||||
chdir: '{{ baseline_second_disk_migrate_path }}.old'
|
||||
|
||||
- name: 'Disk Partitions | PATCH | Delete {{ baseline_second_disk_migrate_path }}.old'
|
||||
ansible.builtin.file:
|
||||
path: '{{ baseline_second_disk_migrate_path }}.old'
|
||||
state: absent
|
||||
46
roles/baseline/tasks/dns_resolver.yml
Normal file
46
roles/baseline/tasks/dns_resolver.yml
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
- name: DNS Resolver | PATCH | Install systemd-resolved
|
||||
ansible.builtin.dnf:
|
||||
name: systemd-resolved
|
||||
state: latest
|
||||
|
||||
- name: DNS Resolver | PATCH | Ensure systemd-resolved is in use
|
||||
ansible.builtin.systemd_service:
|
||||
name: systemd-resolved
|
||||
state: started
|
||||
enabled: true
|
||||
masked: false
|
||||
|
||||
- name: DNS Resolver | PATCH | Remove loopback address entries containing the hostname from /etc/hosts
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/hosts
|
||||
regexp: '^(127\.0\.0\.1|::1)\s.*{{ inventory_hostname }}'
|
||||
state: absent
|
||||
|
||||
- name: DNS Resolver | PATCH | Enable DNSSEC and disable unwanted resolved features
|
||||
ansible.builtin.copy:
|
||||
src: resolved.conf
|
||||
dest: /etc/systemd/resolved.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: "Restart systemd-resolved"
|
||||
become: true
|
||||
|
||||
- name: DNS Resolver | PATCH | Ensure /etc/systemd/system/systemd-resolved.service.d exists
|
||||
ansible.builtin.file:
|
||||
path: /etc/systemd/system/systemd-resolved.service.d
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: DNS Resolver | PATCH | Disable resolved record synthesising
|
||||
ansible.builtin.copy:
|
||||
src: systemd-resolved-override.conf
|
||||
dest: /etc/systemd/system/systemd-resolved.service.d/override.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: "Restart systemd-resolved"
|
||||
become: true
|
||||
25
roles/baseline/tasks/ipaclient.yml
Normal file
25
roles/baseline/tasks/ipaclient.yml
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
- name: FreeIPA Client | PATCH | Join IPA domain
|
||||
ansible.builtin.include_role:
|
||||
role: freeipa.ansible_freeipa.ipaclient
|
||||
vars:
|
||||
ipaclient_hostname: "{{ inventory_hostname }}"
|
||||
|
||||
- name: FreeIPA Client | AUDIT | Check current authselect configuration
|
||||
ansible.builtin.command: authselect current
|
||||
register: freeipa_authselect_status
|
||||
changed_when: false
|
||||
|
||||
- name: FreeIPA Client | PATCH | Apply authselect profile with sssd, sudo, and mkhomedir if not set
|
||||
ansible.builtin.command: authselect select sssd with-sudo with-mkhomedir --force
|
||||
when: >
|
||||
'Profile ID: sssd' not in freeipa_authselect_status.stdout or
|
||||
'with-sudo' not in freeipa_authselect_status.stdout or
|
||||
'with-mkhomedir' not in freeipa_authselect_status.stdout
|
||||
|
||||
- name: FreeIPA Client | PATCH | Enable oddjobd.service (for with-mkhomedir feature)
|
||||
ansible.builtin.systemd_service:
|
||||
name: oddjobd.service
|
||||
state: started
|
||||
enabled: true
|
||||
masked: false
|
||||
29
roles/baseline/tasks/lockdown.yml
Normal file
29
roles/baseline/tasks/lockdown.yml
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
- name: Lockdown | AUDIT | Check current authselect configuration
|
||||
command: authselect current
|
||||
register: baseline_lockdown_authselect_status
|
||||
failed_when: false # Exit code is 2 when not configured
|
||||
changed_when: false
|
||||
|
||||
- name: Lockdown | AUDIT | Do not disable root login if no authselect profile configured
|
||||
ansible.builtin.set_fact:
|
||||
rhel9cis_rule_5_1_20: false
|
||||
when: baseline_lockdown_authselect_status.rc == 2
|
||||
|
||||
- name: Lockdown | PATCH | Run Ansible Lockdown (RHEL9-CIS)
|
||||
ansible.builtin.include_role:
|
||||
name: RHEL9-CIS
|
||||
vars:
|
||||
# Ensure message of the day is configured properly - we have our own MOTD to apply
|
||||
rhel9cis_rule_1_7_1: false
|
||||
rhel9cis_rule_1_7_4: false
|
||||
# Don't restrict user SSH access in sshd_config - this is managed by FreeIPA
|
||||
rhel9cis_rule_5_1_7: false
|
||||
# TODO: figure out boot password
|
||||
rhel9cis_set_boot_pass: false
|
||||
# TODO: We intend to later deploy a remote rsyslog sink
|
||||
rhel9cis_syslog: rsyslog
|
||||
rhel9cis_time_synchronization_servers: "{{ baseline_ntp_servers }}"
|
||||
rhel9cis_warning_banner: "{{ baseline_warning_banner }}"
|
||||
rhel9cis_sshd_denyusers: "admin nobody"
|
||||
when: (ansible_distribution == "Rocky") and (ansible_distribution_major_version == "9")
|
||||
103
roles/baseline/tasks/main.yml
Normal file
103
roles/baseline/tasks/main.yml
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
---
|
||||
- name: Baseline | PRELIM | Check for supported operating system
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- ansible_distribution == "Rocky"
|
||||
- ansible_distribution_major_version == "9"
|
||||
|
||||
- name: Baseline | PRELIM | Include location specific variables
|
||||
ansible.builtin.include_vars:
|
||||
file: "{{ baseline_location }}.yml"
|
||||
|
||||
- name: Baseline | PATCH | Configure virtual machine for optimal operation as a SolusVM guest
|
||||
ansible.builtin.include_tasks:
|
||||
file: "solusvm.yml"
|
||||
when: baseline_host_type == "solusvm"
|
||||
|
||||
- name: Baseline | PATCH | Setup second disk for additional partitions
|
||||
ansible.builtin.include_tasks:
|
||||
file: disk_partitions.yml
|
||||
when: baseline_second_disk_device is defined
|
||||
|
||||
- name: Baseline | PATCH | Enable EPEL repository
|
||||
block:
|
||||
- name: Baseline | PATCH | Install epel-release
|
||||
ansible.builtin.dnf:
|
||||
name: epel-release
|
||||
state: present
|
||||
- name: Baseline | PATCH | Restrict packages to be installed from EPEL
|
||||
community.general.ini_file:
|
||||
path: /etc/yum.repos.d/epel.repo
|
||||
section: epel
|
||||
option: includepkgs
|
||||
value: "{{ baseline_epel_packages_allowed | join(',') }}"
|
||||
- name: Baseline | PATCH | Disable EPEL openh264 repository
|
||||
community.general.ini_file:
|
||||
path: /etc/yum.repos.d/epel-cisco-openh264.repo
|
||||
section: epel-cisco-openh264
|
||||
option: enabled
|
||||
value: 0
|
||||
when: (baseline_epel_packages_allowed is defined) and (baseline_epel_packages_allowed | length > 0)
|
||||
|
||||
- name: Baseline | PATCH | Remove EPEL repository
|
||||
ansible.builtin.dnf:
|
||||
name: epel-release
|
||||
state: absent
|
||||
when: (baseline_epel_packages_allowed is not defined) or (baseline_epel_packages_allowed | length == 0)
|
||||
|
||||
- name: Baseline | PATCH | Remove cockpit-ws
|
||||
ansible.builtin.dnf:
|
||||
name: cockpit-ws
|
||||
state: absent
|
||||
|
||||
- name: Baseline | PATCH | Flush handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Baseline | PATCH | Run Ansible Lockdown role
|
||||
ansible.builtin.include_tasks:
|
||||
file: "lockdown.yml"
|
||||
when: baseline_lockdown
|
||||
|
||||
- name: Baseline | PATCH | Ensure message of the day is configured properly (CIS 1.7.1, 1.7.4)
|
||||
ansible.builtin.template:
|
||||
src: motd.j2
|
||||
dest: /etc/motd
|
||||
owner: root
|
||||
group: root
|
||||
mode: 'u-x,go-wx'
|
||||
|
||||
- name: Baseline | PATCH | Remove dhcpv6-client service from firewalld
|
||||
ansible.posix.firewalld:
|
||||
service: dhcpv6-client
|
||||
state: disabled
|
||||
immediate: true
|
||||
permanent: true
|
||||
zone: public
|
||||
|
||||
- name: Baseline | PATCH | Remove mdns service from firewalld
|
||||
ansible.posix.firewalld:
|
||||
service: mdns
|
||||
state: disabled
|
||||
immediate: true
|
||||
permanent: true
|
||||
zone: public
|
||||
|
||||
- name: Baseline | PATCH | Remove cockpit service from firewalld
|
||||
ansible.posix.firewalld:
|
||||
service: cockpit
|
||||
state: disabled
|
||||
immediate: true
|
||||
permanent: true
|
||||
zone: public
|
||||
|
||||
- name: Baseline | PATCH | Configure DNS resolver
|
||||
ansible.builtin.include_tasks:
|
||||
file: dns_resolver.yml
|
||||
|
||||
- name: Baseline | PATCH | Flush handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Baseline | PATCH | Join IPA Domain
|
||||
ansible.builtin.include_tasks:
|
||||
file: ipaclient.yml
|
||||
when: "'ipaservers' not in group_names"
|
||||
52
roles/baseline/tasks/solusvm.yml
Normal file
52
roles/baseline/tasks/solusvm.yml
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
---
|
||||
# https://support.solusvm.com/hc/en-us/articles/21334950006807-How-to-install-Guest-Tools-manually-inside-a-VM-in-SolusVM-2
|
||||
- name: SolusVM Guest | PATCH | Install required packages
|
||||
ansible.builtin.dnf:
|
||||
name:
|
||||
- qemu-guest-agent
|
||||
- cloud-init
|
||||
- tuned
|
||||
state: latest
|
||||
update_cache: true
|
||||
become: true
|
||||
|
||||
- name: SolusVM Guest | PATCH | Enable and start tuned
|
||||
ansible.builtin.systemd_service:
|
||||
name: tuned
|
||||
enabled: true
|
||||
state: started
|
||||
become: true
|
||||
|
||||
- name: SolusVM Guest | AUDIT | Check for tuned profile
|
||||
ansible.builtin.command: tuned-adm active
|
||||
register: vps_tuned_profile
|
||||
become: true
|
||||
changed_when: false
|
||||
|
||||
- name: SolusVM Guest | PATCH | Start tuned profile (virtual-guest)
|
||||
ansible.builtin.shell: tuned-adm profile virtual-guest
|
||||
become: true
|
||||
when: "'virtual-guest' not in vps_tuned_profile.stdout"
|
||||
|
||||
- name: SolusVM Guest | PATCH | Remove console=ttyS0,115200n8 from bootloader configurations
|
||||
ansible.builtin.replace:
|
||||
path: "{{ item }}"
|
||||
regexp: 'console=ttyS0,115200n8'
|
||||
replace: ''
|
||||
with_items:
|
||||
- /etc/default/grub
|
||||
- /etc/sysconfig/bootloader
|
||||
when: ansible_distribution == 'Rocky'
|
||||
notify:
|
||||
- Regenerate grub config
|
||||
|
||||
- name: SolusVM Guest | AUDIT | Find all vmlinuz-* files in /boot
|
||||
ansible.builtin.find:
|
||||
paths: /boot
|
||||
patterns: 'vmlinuz-*'
|
||||
register: baseline_solusvm_kernels
|
||||
|
||||
- name: SolusVM Guest | PATCH | Remove console=ttyS0,115200n8 from existing kernel bootloader entries
|
||||
ansible.builtin.command:
|
||||
cmd: "grubby --update-kernel={{ item.path }} --remove-args='console=ttyS0,115200n8'"
|
||||
with_items: "{{ baseline_solusvm_kernels.files }}"
|
||||
14
roles/baseline/templates/motd.j2
Normal file
14
roles/baseline/templates/motd.j2
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
|
||||
##### ###### #####
|
||||
# # # # # #
|
||||
# # # #
|
||||
##### ###### #####
|
||||
# # # #
|
||||
# # # # #
|
||||
##### # # #######
|
||||
|
||||
* Hostname: {{ inventory_hostname }}
|
||||
* Last Ansible run: {{ template_run_date }}
|
||||
* Audit logging is active.
|
||||
* Don't mess up.
|
||||
|
||||
2
roles/baseline/vars/generic.yml
Normal file
2
roles/baseline/vars/generic.yml
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
baseline_host_type: generic
|
||||
6
roles/baseline/vars/sr2_de_fsn.yml
Normal file
6
roles/baseline/vars/sr2_de_fsn.yml
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
baseline_host_type: solusvm
|
||||
baseline_ntp_servers:
|
||||
- ntp1.hetzner.de
|
||||
- ntp2.hetzner.com
|
||||
- ntp3.hetzner.net
|
||||
Loading…
Add table
Add a link
Reference in a new issue