Compare commits

..

4 commits

Author SHA1 Message Date
uk-bolly
21a886a81c
Merge pull request #64 from ansible-lockdown/devel
Devel to main - bug fixes
2023-06-06 14:40:39 +01:00
uk-bolly
759bbbad7e
Merge pull request #49 from ansible-lockdown/devel
Galaxy Compliance
2023-03-21 21:11:51 +00:00
uk-bolly
8bbccd6b62
Merge pull request #47 from ansible-lockdown/devel
Merge to Main galaxy workflow
2023-03-21 16:35:53 +00:00
uk-bolly
beaeb3a181
Merge pull request #45 from ansible-lockdown/devel
Initial 1.0 release CIS 1.0
2023-03-21 15:39:53 +00:00
177 changed files with 8074 additions and 12516 deletions

View file

@ -3,7 +3,21 @@
parseable: true parseable: true
quiet: true quiet: true
skip_list: skip_list:
- 'package-latest' - 'schema'
- 'risky-shell-pipe' - 'no-changed-when'
- 'var-spacing'
- 'fqcn-builtins'
- 'experimental'
- 'name[play]'
- 'name[casing]'
- 'name[template]'
- 'fqcn[action]'
- '204'
- '305'
- '303'
- '403'
- '306'
- '602'
- '208'
use_default_rules: true use_default_rules: true
verbosity: 0 verbosity: 0

34
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View file

@ -0,0 +1,34 @@
---
name: Report Issue
about: Create a bug issue ticket to help us improve
title: ''
labels: bug
assignees: ''
---
**Describe the Issue**
A clear and concise description of what the bug is.
**Expected Behavior**
A clear and concise description of what you expected to happen.
**Actual Behavior**
A clear and concise description of what's happening.
**Control(s) Affected**
What controls are being affected by the issue
**Environment (please complete the following information):**
- branch being used: [e.g. devel]
- Ansible Version: [e.g. 2.10]
- Host Python Version: [e.g. Python 3.7.6]
- Ansible Server Python Version: [e.g. Python 3.7.6]
- Additional Details:
**Additional Notes**
Anything additional goes here
**Possible Solution**
Enter a suggested fix here

View file

@ -0,0 +1,22 @@
---
name: Feature Request or Enhancement
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
## Feature Request or Enhancement
- Feature []
- Enhancement []
**Summary of Request**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Suggested Code**
Please provide any code you have in mind to fulfill the request

18
.github/ISSUE_TEMPLATE/question.md vendored Normal file
View file

@ -0,0 +1,18 @@
---
name: Question
about: Ask away.......
title: ''
labels: question
assignees: ''
---
**Question**
Pose question here.
**Environment (please complete the following information):**
- Ansible Version: [e.g. 2.10]
- Host Python Version: [e.g. Python 3.7.6]
- Ansible Server Python Version: [e.g. Python 3.7.6]
- Additional Details:

12
.github/pull_request_template.md vendored Normal file
View file

@ -0,0 +1,12 @@
**Overall Review of Changes:**
A general description of the changes made that are being requested for merge
**Issue Fixes:**
Please list (using linking) any open issues this PR addresses
**Enhancements:**
Please list any enhancements/features that are not open issue tickets
**How has this been tested?:**
Please give an overview of how these changes were tested. If they were not please use N/A

9
.github/workflows/OS.tfvars vendored Normal file
View file

@ -0,0 +1,9 @@
#Ami Alma 9
ami_id = "ami-0845395779540e3cb"
ami_os = "rhel9"
ami_username = "ec2-user"
ami_user_home = "/home/ec2-user"
instance_tags = {
Name = "RHEL9-CIS"
Environment = "lockdown_github_repo_workflow"
}

View file

@ -1,17 +0,0 @@
---
name: Add Repo Issue to ALD GH project
on:
issues:
types:
- opened
- reopened
- transferred
jobs:
add-to-project:
runs-on: ubuntu-latest
steps:
- uses: actions/add-to-project@main
with:
project-url: https://github.com/orgs/ansible-lockdown/projects/1
github-token: ${{ secrets.ALD_GH_PROJECT }}

View file

@ -1,54 +0,0 @@
---
# GitHub schedules all cron jobs in UTC.
# ──────────────────────────────────────────────────────────────────────────────
# Schedule:
# - '0 13 * * *' runs at 13:00 UTC every day.
# - This corresponds to:
# • 9:00 AM Eastern **during Daylight Saving Time** (mid-Mar → early-Nov)
# • 8:00 AM Eastern **during Standard Time** (early-Nov → mid-Mar)
#
# Job routing:
# - call-benchmark-tracker:
# • Runs on manual dispatch, and on pushes to the 'latest' branch.
# - call-monitor-promotions:
# • Runs on schedule or manual dispatch **only in repos named ansible-lockdown/Private-***.
# • Skips automatically in public repos (e.g., Windows-2022-CIS) to avoid false failures.
#
# Defense-in-depth:
# - The called promotion workflow may still keep its own guard to ensure only Private-* repos execute it.
name: Central Benchmark Orchestrator
on:
push:
branches:
- latest
schedule:
- cron: '0 13 * * *' # 13:00 UTC → 9 AM ET (DST) / 8 AM ET (Standard Time)
workflow_dispatch:
jobs:
call-benchmark-tracker:
# Run on manual dispatch OR when 'latest' branch receives a push
if: github.event_name == 'workflow_dispatch' || (github.event_name == 'push' && github.ref_name == 'latest')
name: Start Benchmark Tracker
uses: ansible-lockdown/github_linux_IaC/.github/workflows/benchmark_track.yml@self_hosted
with:
repo_name: ${{ github.repository }}
secrets:
TEAMS_WEBHOOK_URL: ${{ secrets.TEAMS_WEBHOOK_URL }}
BADGE_PUSH_TOKEN: ${{ secrets.BADGE_PUSH_TOKEN }}
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }}
call-monitor-promotions:
# Run on schedule or manual dispatch, but only for Private-* repos
if: (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && startsWith(github.repository, 'ansible-lockdown/Private-')
name: Monitor Promotions and Auto-Promote
uses: ansible-lockdown/github_linux_IaC/.github/workflows/benchmark_promote.yml@self_hosted
with:
repo_name: ${{ github.repository }}
secrets:
TEAMS_WEBHOOK_URL: ${{ secrets.TEAMS_WEBHOOK_URL }}
BADGE_PUSH_TOKEN: ${{ secrets.BADGE_PUSH_TOKEN }}
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }}

View file

@ -1,162 +0,0 @@
---
name: Devel pipeline
on: # yamllint disable-line rule:truthy
pull_request_target:
types: [opened, reopened, synchronize]
branches:
- devel
- benchmark*
paths:
- '**.yml'
- '**.sh'
- '**.j2'
- '**.ps1'
- '**.cfg'
# Allow manual running of workflow
workflow_dispatch:
# A workflow run is made up of one or more jobs
# that can run sequentially or in parallel
jobs:
# This will create messages for first time contributers and direct them to the Discord server
welcome:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/first-interaction@main
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
issue_message: |-
Congrats on opening your first issue and thank you for taking the time to help improve Ansible-Lockdown!
Please join in the conversation happening on the [Discord Server](https://www.lockdownenterprise.com/discord) as well.
pr_message: |-
Congrats on opening your first pull request and thank you for taking the time to help improve Ansible-Lockdown!
Please join in the conversation happening on the [Discord Server](https://www.lockdownenterprise.com/discord) as well.
# This workflow contains a single job that tests the playbook
playbook-test:
# The type of runner that the job will run on
runs-on: self-hosted
# Allow permissions for AWS auth
permissions:
id-token: write
contents: read
pull-requests: read
env:
ENABLE_DEBUG: ${{ vars.ENABLE_DEBUG }}
# Imported as a variable by terraform
TF_VAR_repository: ${{ github.event.repository.name }}
AWS_REGION: "us-east-1"
ANSIBLE_VERSION: ${{ vars.ANSIBLE_RUNNER_VERSION }}
defaults:
run:
shell: bash
working-directory: .github/workflows/github_linux_IaC
# working-directory: .github/workflows
steps:
- name: Git clone the lockdown repository to test
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: If a variable for IAC_BRANCH is set use that branch
working-directory: .github/workflows
run: |
if [ ${{ vars.IAC_BRANCH }} != '' ]; then
echo "IAC_BRANCH=${{ vars.IAC_BRANCH }}" >> $GITHUB_ENV
echo "Pipeline using the following IAC branch ${{ vars.IAC_BRANCH }}"
else
echo IAC_BRANCH=main >> $GITHUB_ENV
fi
# Pull in terraform code for linux servers
- name: Clone GitHub IaC plan
uses: actions/checkout@v4
with:
repository: ansible-lockdown/github_linux_IaC
path: .github/workflows/github_linux_IaC
ref: ${{ env.IAC_BRANCH }}
# Uses dedicated restricted role and policy to enable this only for this task
# No credentials are part of github for AWS auth
- name: configure aws credentials
uses: aws-actions/configure-aws-credentials@main
with:
role-to-assume: ${{ secrets.AWS_ASSUME_ROLE }}
role-session-name: ${{ secrets.AWS_ROLE_SESSION }}
aws-region: ${{ env.AWS_REGION }}
- name: DEBUG - Show IaC files
if: env.ENABLE_DEBUG == 'true'
run: |
echo "OSVAR = $OSVAR"
echo "benchmark_type = $benchmark_type"
pwd
env:
# Imported from GitHub variables this is used to load the relevant OS.tfvars file
OSVAR: ${{ vars.OSVAR }}
benchmark_type: ${{ vars.BENCHMARK_TYPE }}
- name: Tofu init
id: init
run: tofu init
env:
# Imported from GitHub variables this is used to load the relevant OS.tfvars file
OSVAR: ${{ vars.OSVAR }}
TF_VAR_benchmark_type: ${{ vars.BENCHMARK_TYPE }}
- name: Tofu validate
id: validate
run: tofu validate
env:
# Imported from GitHub variables this is used to load the relevant OS.tfvars file
OSVAR: ${{ vars.OSVAR }}
TF_VAR_benchmark_type: ${{ vars.BENCHMARK_TYPE }}
- name: Tofu apply
id: apply
env:
OSVAR: ${{ vars.OSVAR }}
TF_VAR_benchmark_type: ${{ vars.BENCHMARK_TYPE }}
TF_VAR_privsubnet_id: ${{ secrets.AWS_PRIVSUBNET_ID }}
TF_VAR_vpc_secgrp_id: ${{ secrets.AWS_VPC_SECGRP_ID }}
run: tofu apply -var-file "${OSVAR}.tfvars" --auto-approve -input=false
## Debug Section
- name: DEBUG - Show Ansible hostfile
if: env.ENABLE_DEBUG == 'true'
run: cat hosts.yml
# Aws deployments taking a while to come up insert sleep or playbook fails
- name: Sleep to allow system to come up
run: sleep ${{ vars.BUILD_SLEEPTIME }}
# Run the Ansible playbook
- name: Run_Ansible_Playbook
env:
ANSIBLE_HOST_KEY_CHECKING: "false"
ANSIBLE_DEPRECATION_WARNINGS: "false"
run: |
/opt/ansible_${{ env.ANSIBLE_VERSION }}_venv/bin/ansible-playbook -i hosts.yml --private-key ~/.ssh/le_runner ../../../site.yml
# Remove test system - User secrets to keep if necessary
- name: Tofu Destroy
if: always() && env.ENABLE_DEBUG == 'false'
env:
OSVAR: ${{ vars.OSVAR }}
TF_VAR_benchmark_type: ${{ vars.BENCHMARK_TYPE }}
TF_VAR_privsubnet_id: ${{ secrets.AWS_PRIVSUBNET_ID }}
TF_VAR_vpc_secgrp_id: ${{ secrets.AWS_VPC_SECGRP_ID }}
run: tofu destroy -var-file "${OSVAR}.tfvars" --auto-approve -input=false

View file

@ -1,27 +0,0 @@
---
name: Export Private Repo Badges
# Use different minute offsets with the same hourly pattern:
# Repo Group Suggested Cron Expression Explanation
# Group A 0 */6 * * * Starts at top of hour
# Group B 10 */6 * * * Starts at 10 after
# And So On
on:
push:
branches:
- latest
schedule:
- cron: '0 */6 * * *'
workflow_dispatch:
jobs:
export-badges:
if: github.event_name == 'workflow_dispatch' || (github.event_name == 'schedule' && startsWith(github.repository, 'ansible-lockdown/Private-')) || (github.event_name == 'push' && github.ref_name == 'latest')
uses: ansible-lockdown/github_linux_IaC/.github/workflows/export_badges_private.yml@self_hosted
with:
# Full org/repo path passed for GitHub API calls (e.g., ansible-lockdown/Private-Windows-2016-CIS)
repo_name: ${{ github.repository }}
secrets:
BADGE_PUSH_TOKEN: ${{ secrets.BADGE_PUSH_TOKEN }}

View file

@ -1,19 +0,0 @@
---
name: Export Public Repo Badges
on:
push:
branches:
- main
- devel
workflow_dispatch:
jobs:
export-badges:
if: github.repository_visibility == 'public' && (github.event_name == 'workflow_dispatch' || (github.event_name == 'push' && (github.ref_name == 'devel' || github.ref_name == 'main')))
uses: ansible-lockdown/github_linux_IaC/.github/workflows/export_badges_public.yml@self_hosted
with:
repo_name: ${{ github.repository }}
secrets:
BADGE_PUSH_TOKEN: ${{ secrets.BADGE_PUSH_TOKEN }}

53
.github/workflows/github_networks.tf vendored Normal file
View file

@ -0,0 +1,53 @@
resource "aws_vpc" "Main" {
cidr_block = var.main_vpc_cidr
instance_tenancy = "default"
tags = {
Environment = "${var.environment}"
Name = "${var.namespace}-VPC"
}
}
resource "aws_internet_gateway" "IGW" {
vpc_id = aws_vpc.Main.id
tags = {
Environment = "${var.environment}"
Name = "${var.namespace}-IGW"
}
}
resource "aws_subnet" "publicsubnets" {
vpc_id = aws_vpc.Main.id
cidr_block = var.public_subnets
availability_zone = var.availability_zone
tags = {
Environment = "${var.environment}"
Name = "${var.namespace}-pubsub"
}
}
resource "aws_subnet" "Main" {
vpc_id = aws_vpc.Main.id
cidr_block = var.private_subnets
availability_zone = var.availability_zone
tags = {
Environment = "${var.environment}"
Name = "${var.namespace}-prvsub"
}
}
resource "aws_route_table" "PublicRT" {
vpc_id = aws_vpc.Main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.IGW.id
}
tags = {
Environment = "${var.environment}"
Name = "${var.namespace}-publicRT"
}
}
resource "aws_route_table_association" "rt_associate_public" {
subnet_id = aws_subnet.Main.id
route_table_id = aws_route_table.PublicRT.id
}

14
.github/workflows/github_vars.tfvars vendored Normal file
View file

@ -0,0 +1,14 @@
// github_actions variables
// Resourced in github_networks.tf
// Declared in variables.tf
//
namespace = "github_actions"
environment = "lockdown_github_repo_workflow"
// Matching pair name found in AWS for keypairs PEM key
ami_key_pair_name = "github_actions"
private_key = ".ssh/github_actions.pem"
main_vpc_cidr = "172.22.0.0/24"
public_subnets = "172.22.0.128/26"
private_subnets = "172.22.0.192/26"

View file

@ -0,0 +1,111 @@
# This is a basic workflow to help you get started with Actions
name: linux_benchmark_pipeline
# Controls when the action will run.
# Triggers the workflow on push or pull request
# events but only for the devel branch
on: # yamllint disable-line rule:truthy
pull_request_target:
types: [opened, reopened, synchronize]
branches:
- devel
- main
paths:
- '**.yml'
- '**.sh'
- '**.j2'
- '**.ps1'
- '**.cfg'
# A workflow run is made up of one or more jobs
# that can run sequentially or in parallel
jobs:
# This will create messages for first time contributers and direct them to the Discord server
welcome:
runs-on: ubuntu-latest
steps:
- uses: actions/first-interaction@main
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
pr-message: |-
Congrats on opening your first pull request and thank you for taking the time to help improve Ansible-Lockdown!
Please join in the conversation happening on the [Discord Server](https://discord.io/ansible-lockdown) as well.
# This workflow contains a single job called "build"
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
env:
ENABLE_DEBUG: false
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE,
# so your job can access it
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Add_ssh_key
working-directory: .github/workflows
env:
SSH_AUTH_SOCK: /tmp/ssh_agent.sock
PRIVATE_KEY: "${{ secrets.SSH_PRV_KEY }}"
run: |
mkdir .ssh
chmod 700 .ssh
echo $PRIVATE_KEY > .ssh/github_actions.pem
chmod 600 .ssh/github_actions.pem
### Build out the server
- name: Terraform_Init
working-directory: .github/workflows
run: terraform init
- name: Terraform_Validate
working-directory: .github/workflows
run: terraform validate
- name: Terraform_Apply
working-directory: .github/workflows
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
run: terraform apply -var-file "github_vars.tfvars" -var-file "OS.tfvars" --auto-approve -input=false
## Debug Section
- name: DEBUG - Show Ansible hostfile
if: env.ENABLE_DEBUG == 'true'
working-directory: .github/workflows
run: cat hosts.yml
# Aws deployments taking a while to come up insert sleep or playbook fails
- name: Sleep for 60 seconds
run: sleep 60s
shell: bash
# Run the ansible playbook
- name: Run_Ansible_Playbook
uses: arillso/action.playbook@master
with:
playbook: site.yml
inventory: .github/workflows/hosts.yml
galaxy_file: collections/requirements.yml
private_key: ${{ secrets.SSH_PRV_KEY }}
# verbose: 3
env:
ANSIBLE_HOST_KEY_CHECKING: "false"
ANSIBLE_DEPRECATION_WARNINGS: "false"
# Remove test system - User secrets to keep if necessary
- name: Terraform_Destroy
working-directory: .github/workflows
if: always() && env.ENABLE_DEBUG == 'false'
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
run: terraform destroy -var-file "github_vars.tfvars" -var-file "OS.tfvars" --auto-approve -input=false

83
.github/workflows/main.tf vendored Normal file
View file

@ -0,0 +1,83 @@
provider "aws" {
profile = ""
region = var.aws_region
}
// Create a security group with access to port 22 and port 80 open to serve HTTP traffic
resource "random_id" "server" {
keepers = {
# Generate a new id each time we switch to a new AMI id
ami_id = "${var.ami_id}"
}
byte_length = 8
}
resource "aws_security_group" "github_actions" {
name = "${var.namespace}-${random_id.server.hex}-SG"
vpc_id = aws_vpc.Main.id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Environment = "${var.environment}"
Name = "${var.namespace}-SG"
}
}
// instance setup
resource "aws_instance" "testing_vm" {
ami = var.ami_id
availability_zone = var.availability_zone
associate_public_ip_address = true
key_name = var.ami_key_pair_name # This is the key as known in the ec2 key_pairs
instance_type = var.instance_type
tags = var.instance_tags
vpc_security_group_ids = [aws_security_group.github_actions.id]
subnet_id = aws_subnet.Main.id
root_block_device {
delete_on_termination = true
}
}
// generate inventory file
resource "local_file" "inventory" {
filename = "./hosts.yml"
directory_permission = "0755"
file_permission = "0644"
content = <<EOF
# benchmark host
all:
hosts:
${var.ami_os}:
ansible_host: ${aws_instance.testing_vm.public_ip}
ansible_user: ${var.ami_username}
vars:
setup_audit: true
run_audit: true
system_is_ec2: true
skip_reboot: false
rhel9cis_rule_5_6_6: false # skip root passwd check and keys only
EOF
}

View file

@ -1,141 +0,0 @@
---
name: Main pipeline
on: # yamllint disable-line rule:truthy
pull_request_target:
types: [opened, reopened, synchronize]
branches:
- main
- latest
paths:
- '**.yml'
- '**.sh'
- '**.j2'
- '**.ps1'
- '**.cfg'
# Allow permissions for AWS auth
permissions:
id-token: write
contents: read
pull-requests: read
# A workflow run is made up of one or more jobs
# that can run sequentially or in parallel
jobs:
# This workflow contains a single job that tests the playbook
playbook-test:
# The type of runner that the job will run on
runs-on: self-hosted
env:
ENABLE_DEBUG: ${{ vars.ENABLE_DEBUG }}
# Imported as a variable by terraform
TF_VAR_repository: ${{ github.event.repository.name }}
AWS_REGION : "us-east-1"
ANSIBLE_VERSION: ${{ vars.ANSIBLE_RUNNER_VERSION }}
defaults:
run:
shell: bash
working-directory: .github/workflows/github_linux_IaC
# working-directory: .github/workflows
steps:
- name: Git clone the lockdown repository to test
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: If a variable for IAC_BRANCH is set use that branch
working-directory: .github/workflows
run: |
if [ ${{ vars.IAC_BRANCH }} != '' ]; then
echo "IAC_BRANCH=${{ vars.IAC_BRANCH }}" >> $GITHUB_ENV
echo "Pipeline using the following IAC branch ${{ vars.IAC_BRANCH }}"
else
echo IAC_BRANCH=main >> $GITHUB_ENV
fi
# Pull in terraform code for linux servers
- name: Clone GitHub IaC plan
uses: actions/checkout@v4
with:
repository: ansible-lockdown/github_linux_IaC
path: .github/workflows/github_linux_IaC
ref: ${{ env.IAC_BRANCH }}
# Uses dedicated restricted role and policy to enable this only for this task
# No credentials are part of github for AWS auth
- name: configure aws credentials
uses: aws-actions/configure-aws-credentials@main
with:
role-to-assume: ${{ secrets.AWS_ASSUME_ROLE }}
role-session-name: ${{ secrets.AWS_ROLE_SESSION }}
aws-region: ${{ env.AWS_REGION }}
- name: DEBUG - Show IaC files
if: env.ENABLE_DEBUG == 'true'
run: |
echo "OSVAR = $OSVAR"
echo "benchmark_type = $benchmark_type"
pwd
ls
env:
# Imported from GitHub variables this is used to load the relevant OS.tfvars file
OSVAR: ${{ vars.OSVAR }}
benchmark_type: ${{ vars.BENCHMARK_TYPE }}
- name: Tofu init
id: init
run: tofu init
env:
# Imported from GitHub variables this is used to load the relevant OS.tfvars file
OSVAR: ${{ vars.OSVAR }}
TF_VAR_benchmark_type: ${{ vars.BENCHMARK_TYPE }}
- name: Tofu validate
id: validate
run: tofu validate
env:
# Imported from GitHub variables this is used to load the relevant OS.tfvars file
OSVAR: ${{ vars.OSVAR }}
TF_VAR_benchmark_type: ${{ vars.BENCHMARK_TYPE }}
- name: Tofu apply
id: apply
env:
OSVAR: ${{ vars.OSVAR }}
TF_VAR_benchmark_type: ${{ vars.BENCHMARK_TYPE }}
TF_VAR_privsubnet_id: ${{ secrets.AWS_PRIVSUBNET_ID }}
TF_VAR_vpc_secgrp_id: ${{ secrets.AWS_VPC_SECGRP_ID }}
run: tofu apply -var-file "${OSVAR}.tfvars" --auto-approve -input=false
## Debug Section
- name: DEBUG - Show Ansible hostfile
if: env.ENABLE_DEBUG == 'true'
run: cat hosts.yml
# Aws deployments taking a while to come up insert sleep or playbook fails
- name: Sleep to allow system to come up
run: sleep ${{ vars.BUILD_SLEEPTIME }}
# Run the Ansible playbook
- name: Run_Ansible_Playbook
env:
ANSIBLE_HOST_KEY_CHECKING: "false"
ANSIBLE_DEPRECATION_WARNINGS: "false"
run: |
/opt/ansible_${{ env.ANSIBLE_VERSION }}_venv/bin/ansible-playbook -i hosts.yml --private-key ~/.ssh/le_runner ../../../site.yml
# Remove test system - User secrets to keep if necessary
- name: Tofu Destroy
if: always() && env.ENABLE_DEBUG == 'false'
env:
OSVAR: ${{ vars.OSVAR }}
TF_VAR_benchmark_type: ${{ vars.BENCHMARK_TYPE }}
TF_VAR_privsubnet_id: ${{ secrets.AWS_PRIVSUBNET_ID }}
TF_VAR_vpc_secgrp_id: ${{ secrets.AWS_VPC_SECGRP_ID }}
run: tofu destroy -var-file "${OSVAR}.tfvars" --auto-approve -input=false

6
.github/workflows/terraform.tfvars vendored Normal file
View file

@ -0,0 +1,6 @@
// vars should be loaded by OSname.tfvars
availability_zone = "us-east-1b"
aws_region = "us-east-1"
ami_os = var.ami_os
ami_username = var.ami_username
instance_tags = var.instance_tags

21
.github/workflows/update_galaxy.yml vendored Normal file
View file

@ -0,0 +1,21 @@
---
# This is a basic workflow to help you get started with Actions
name: update galaxy
# Controls when the action will run.
# Triggers the workflow on merge request events to the main branch
on:
push:
branches:
- main
jobs:
update_role:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: robertdebock/galaxy-action@master
with:
galaxy_api_key: ${{ secrets.GALAXY_API_KEY }}
git_branch: main

81
.github/workflows/variables.tf vendored Normal file
View file

@ -0,0 +1,81 @@
// Taken from the OSname.tfvars
variable "aws_region" {
description = "AWS region"
default = "us-east-1"
type = string
}
variable "availability_zone" {
description = "List of availability zone in the region"
default = "us-east-1b"
type = string
}
variable "instance_type" {
description = "EC2 Instance Type"
default = "t3.micro"
type = string
}
variable "instance_tags" {
description = "Tags to set for instances"
type = map(string)
}
variable "ami_key_pair_name" {
description = "Name of key pair in AWS thats used"
type = string
}
variable "private_key" {
description = "path to private key for ssh"
type = string
}
variable "ami_os" {
description = "AMI OS Type"
type = string
}
variable "ami_id" {
description = "AMI ID reference"
type = string
}
variable "ami_username" {
description = "Username for the ami id"
type = string
}
variable "ami_user_home" {
description = "home dir for the username"
type = string
}
variable "namespace" {
description = "Name used across all tags"
type = string
}
variable "environment" {
description = "Env Name used across all tags"
type = string
}
// taken from github_vars.tfvars &
variable "main_vpc_cidr" {
description = "Private cidr block to be used for vpc"
type = string
}
variable "public_subnets" {
description = "public subnet cidr block"
type = string
}
variable "private_subnets" {
description = "private subnet cidr block"
type = string
}

5
.gitignore vendored
View file

@ -12,7 +12,7 @@ delete*
ignore* ignore*
test_inv test_inv
# temp remove doc while this is built up # temp remove doc while this is built up
doc/ doc/
# VSCode # VSCode
.vscode .vscode
@ -46,6 +46,3 @@ benchparse/
# GitHub Action/Workflow files # GitHub Action/Workflow files
.github/ .github/
# Precommit exclusions
.ansible/

View file

@ -1,71 +0,0 @@
---
##### CI for use by github no need for action to be added
##### Inherited
ci:
autofix_prs: false
skip: [detect-aws-credentials, ansible-lint ]
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
# Safety
- id: detect-aws-credentials
name: Detect AWS Credentials
- id: detect-private-key
name: Detect Private Keys
# git checks
- id: check-merge-conflict
name: Check for merge conflicts
- id: check-added-large-files
name: Check for Large files
- id: check-case-conflict
name: Check case conflict
# General checks
- id: trailing-whitespace
name: Trim Trailing Whitespace
description: This hook trims trailing whitespace.
entry: trailing-whitespace-fixer
language: python
types: [text]
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
name: Ensure line at end of file
# Scan for passwords
- repo: https://github.com/Yelp/detect-secrets
rev: v1.5.0
hooks:
- id: detect-secrets
- repo: https://github.com/gitleaks/gitleaks
rev: v8.30.0
hooks:
- id: gitleaks
- repo: https://github.com/ansible-community/ansible-lint
rev: v25.12.2
hooks:
- id: ansible-lint
name: Ansible-lint
description: This hook runs ansible-lint.
entry: python3 -m ansiblelint --force-color site.yml -c .ansible-lint
language: python
# do not pass files to ansible-lint, see:
# https://github.com/ansible/ansible-lint/issues/611
pass_filenames: false
always_run: true
# additional_dependencies:
# https://github.com/pre-commit/pre-commit/issues/1526
# If you want to use specific version of ansible-core or ansible, feel
# free to override `additional_dependencies` in your own hook config
# file.
# - ansible-core>=2.10.1
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.37.1 # or higher tag
hooks:
- id: yamllint
name: Check YAML Lint

View file

@ -1,38 +1,33 @@
--- ---
extends: default extends: default
ignore: | ignore: |
tests/ tests/
molecule/ molecule/
.github/ .github/
.gitlab-ci.yml .gitlab-ci.yml
*molecule.yml *molecule.yml
rules: rules:
braces: indentation:
max-spaces-inside: 1 # Requiring 4 space indentation
level: error spaces: 4
brackets: # Requiring consistent indentation within a file, either indented or not
max-spaces-inside: 1 indent-sequences: consistent
level: error braces:
comments: max-spaces-inside: 1
ignore-shebangs: true level: error
min-spaces-from-content: 1 # prettier compatibility brackets:
comments-indentation: enable max-spaces-inside: 1
empty-lines: level: error
max: 1 empty-lines:
indentation: max: 1
# Requiring 2 space indentation line-length: disable
spaces: 2 key-duplicates: enable
# Requiring consistent indentation within a file, either indented or not new-line-at-end-of-file: enable
indent-sequences: consistent new-lines:
key-duplicates: enable type: unix
line-length: disable trailing-spaces: enable
new-line-at-end-of-file: enable truthy:
new-lines: allowed-values: ['true', 'false']
type: unix check-keys: false
octal-values:
forbid-implicit-octal: true # yamllint defaults to false
forbid-explicit-octal: true
trailing-spaces: enable
truthy:
allowed-values: ['true', 'false']
check-keys: true

View file

@ -1,146 +1,5 @@
# Changes to rhel9CIS # Changes to rhel9CIS
## 2.0.4 - Based on CIS v2.0.0
- addressed issue #393 thank you to @fragglexarmy
- addressed issue #394 thank you to @dbeuker
- addressed issues #390 and #391 thanks to @polski-g
- addressed issue #398 & #399 thanks to trumbaut
- Added max-concurrent options for audit
- work flow updates
- audit logic improvements
- auditd template 2.19 compatible
- pre-commit updates
- #410 thanks to @kpi-nourman
- #413 thanks to @bbaassssiiee
## 2.0.3 - Based on CIS v2.0.0
- addressed issue #387, thank you @fragglexarmy
- addressed issue #382 to improve regex logic on 5.4.2.4
- improvement on crypto policy managed controls with var logic
- addressed issue #384 thank you @polski-g
- update command to shell module on tasks
- addressed issue 371 thanks to @bgro and kodebach
- addressed issue 350 thanks to @chrispipo
- addressed issue 364 thanks to @polski-g
- pre-commit update
## 2.0.2 - Based on CIS v2.0.0
- Update to audit_only to allow fetching results
- resolved false warning for fetch audit
- fix root user check
- Improved documentation and variable compilation for crypto policies
- Addresses #318 - Thank you @kodebach & @bgro
- Improved logic for 5.2.4 to exclude rhel9cis_sudoers_exclude_nopasswd_list in pre-check tasks/main.yml
## 2.0.1 - Based on CIS v2.0.0
- Thanks to @polski-g several issues and improvements added
- Improved testing for 50-redhat.conf for ssh
- 5.1.x regexp improvements
- Improved root password check
- egrep command changed to grep -E
## 2.0.0 - Based on CIS v2.0.0
- #322, #325 - thanks to @mindrb
- #320 - thanks to @anup-ad
## 1.1.6 - Based on CIS v1.0.0
- #190 - thanks to @ipruteanu-sie
- addressed requirements in PR with alternate method
- #191 - thanks to @numericillustration
- Addressed authselect for pam
- #193 thanks to brakkio86
## 1.1.5 - Based on CIS v1.0.0
- added new interactive user discoveries
- updated controls 6.2.10-6.2.14
- audit
- steps moved to prelim
- update to coipy and archive logic and variables
- removed vars not used
- updated quotes used in mode tasks
- pre-commit update
- issues addressed
- #190 thanks to @ipruteanu-sie
- aligned logic for user shadow suite params (aligned with other repos)
- new variables to force changes to existing users added 5.6.1.1 - 5.6.1.2
- #198 thanks to @brakkio86
## 1.1.4 - Based on CIS v1.0.0
- 1.2.1 new option for a new system to import gpg key for 1.2.1 to pass redhat only
- thanks to @ipruteanu-sie
- #156
- #165
- #180
- #181
- #183
- #184
## 1.1.3 - Based on CIS v1.0.0
- updated goss binary to 0.4.4
- moved majority of audit variables to vars/audit.yml
- new function to enable audit_only using remediation
- removed some dupes in audit config
## 1.1.2 - Based on CIS v1.0.0
- updated audit binary versions - aligned with rhel9-cis-audit
- lint updates
- .secrets updated
- file mode quoted
- updated 5.6.5 thansk to feedback from S!ghs on discord community
## 1.1.1 - Based on CIS v1.0.0
- thanks to @agbrowne
- [#90](https://github.com/ansible-lockdown/RHEL9-CIS/issues/90)
- thanks to @mnasiadka
- [#54](https://github.com/ansible-lockdown/RHEL9-CIS/pull/54)
## 1.1.0
- new workflow configuration
- Allowing devel and main configs
- IaC code found in alternate repo for easier mgmt
- Added pre-commit config - Does not have to be used but can improve things
- .pre-commit-config.yaml
- .secrets.baseline
- gitleaks and secrets detection
- updated to logic in 5.6.5
- lint updates to 6.1.x
- readme updates
- audit control updates and variable name changes
- ability to run audit on arm64(e.g. pi or M1/2) too thanks to @lucab85 #77
- tidy up README adopted PR #78 thanks to @lucab85
- moved Makefile requirements to .config/
- removed .ansible.cfg and local.yml
## 1.0.10
- [#72](https://github.com/ansible-lockdown/RHEL9-CIS/issues/72)
- Only run check when paybook user not a superuser
- fix for 5.5.3 thanks to @nrg-fv
## 1.0.9
fixed assert for user password set
thanks to @byjunks
[#66](https://github.com/ansible-lockdown/RHEL9-CIS/issues/66)
## 1.0.8
rule_1.10 improvements allowing for module checking (useful for AD)
## 1.0.7 ## 1.0.7
lint and yaml updates lint and yaml updates
@ -149,7 +8,7 @@ improvements to 6.1.10, 6.1.11, 6.1.13, 6.1.14
## 1.0.6 ## 1.0.6
updated yamllint as galaxy doesn't honour local settings updated ymlalint as galaxy doenst honouyr local settings
removed empty lines in files removed empty lines in files
## 1.0.5 ## 1.0.5
@ -186,8 +45,8 @@ Aligned benchmark audit version with remediate release
## 1.0.1 ## 1.0.1
Control 6_2_16 new variable added thanks to @dulin_gnet on rhel8 Control 6_2_16 new variable added thanks to @dulin_gnet on rhel8
Will not follow symlink in home directories and amend permissions. Will not follow ynlink in hoe directoris and amend permissions.
- rhel_09_6_2_16_home_follow_symlink: false - rhel_09_6_2_16_home_follow_symlink: false

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2025 Mindpoint Group - A Tyto Athene Company / Ansible Lockdown Copyright (c) 2023 Mindpoint Group / Lockdown Enterprise / Lockdown Enterprise Releases
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View file

@ -25,5 +25,5 @@ yamllint:
pip-requirements: pip-requirements:
@echo 'Python dependencies:' @echo 'Python dependencies:'
@cat .config/requirements.txt @cat requirements.txt
pip3 install -r requirements.txt pip3 install -r requirements.txt

377
README.md
View file

@ -2,111 +2,109 @@
## Configure a RHEL 9 machine to be [CIS](https://www.cisecurity.org/cis-benchmarks/) compliant ## Configure a RHEL 9 machine to be [CIS](https://www.cisecurity.org/cis-benchmarks/) compliant
### Based on [CIS RedHat Enterprise Linux 9 Benchmark v2.0.0](https://www.cisecurity.org/cis-benchmarks/) ### Based on [ CIS RedHat Enterprise Linux 9 Benchmark v1.0.0 - 11-30-2022 ](https://www.cisecurity.org/cis-benchmarks/)
--- ---
## Public Repository 📣
![Org Stars](https://img.shields.io/github/stars/ansible-lockdown?label=Org%20Stars&style=social) ![Org Stars](https://img.shields.io/github/stars/ansible-lockdown?label=Org%20Stars&style=social)
![Stars](https://img.shields.io/github/stars/ansible-lockdown/RHEL9-CIS?label=Repo%20Stars&style=social) ![Stars](https://img.shields.io/github/stars/ansible-lockdown/RHEL9-CIS?label=Repo%20Stars&style=social)
![Forks](https://img.shields.io/github/forks/ansible-lockdown/RHEL9-CIS?style=social) ![Forks](https://img.shields.io/github/forks/ansible-lockdown/RHEL9-CIS?style=social)
![Followers](https://img.shields.io/github/followers/ansible-lockdown?style=social) ![followers](https://img.shields.io/github/followers/ansible-lockdown?style=social)
[![Twitter URL](https://img.shields.io/twitter/url/https/twitter.com/AnsibleLockdown.svg?style=social&label=Follow%20%40AnsibleLockdown)](https://twitter.com/AnsibleLockdown) [![Twitter URL](https://img.shields.io/twitter/url/https/twitter.com/AnsibleLockdown.svg?style=social&label=Follow%20%40AnsibleLockdown)](https://twitter.com/AnsibleLockdown)
![Ansible Galaxy Quality](https://img.shields.io/ansible/quality/61781?label=Quality&&logo=ansible)
![Discord Badge](https://img.shields.io/discord/925818806838919229?logo=discord) ![Discord Badge](https://img.shields.io/discord/925818806838919229?logo=discord)
![Devel Build Status](https://img.shields.io/github/actions/workflow/status/ansible-lockdown/RHEL9-CIS/linux_benchmark_testing.yml?label=Devel%20Build%20Status)
![Devel Commits](https://img.shields.io/github/commit-activity/m/ansible-lockdown/RHEL9-CIS/devel?color=dark%20green&label=Devel%20Branch%20commits)
![Release Branch](https://img.shields.io/badge/Release%20Branch-Main-brightgreen)
![Main Build Status](https://img.shields.io/github/actions/workflow/status/ansible-lockdown/RHEL9-CIS/linux_benchmark_testing.yml?label=Build%20Status)
![Main Release Date](https://img.shields.io/github/release-date/ansible-lockdown/RHEL9-CIS?label=Release%20Date)
![Release Tag](https://img.shields.io/github/v/tag/ansible-lockdown/RHEL9-CIS?label=Release%20Tag&&color=success)
![Issues Open](https://img.shields.io/github/issues-raw/ansible-lockdown/RHEL9-CIS?label=Open%20Issues)
![Issues Closed](https://img.shields.io/github/issues-closed-raw/ansible-lockdown/RHEL9-CIS?label=Closed%20Issues&&color=success)
![Pull Requests](https://img.shields.io/github/issues-pr/ansible-lockdown/RHEL9-CIS?label=Pull%20Requests)
![License](https://img.shields.io/github/license/ansible-lockdown/RHEL9-CIS?label=License) ![License](https://img.shields.io/github/license/ansible-lockdown/RHEL9-CIS?label=License)
## Lint & Pre-Commit Tools 🔧 ---
[![Pre-Commit.ci](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/RHEL9-CIS/pre-commit-ci.json)](https://results.pre-commit.ci/latest/github/ansible-lockdown/RHEL9-CIS/devel) ## Looking for support?
![YamlLint](https://img.shields.io/badge/yamllint-Present-brightgreen?style=flat&logo=yaml&logoColor=white)
![Ansible-Lint](https://img.shields.io/badge/ansible--lint-Present-brightgreen?style=flat&logo=ansible&logoColor=white)
## Community Release Information 📂 [Lockdown Enterprise](https://www.lockdownenterprise.com#GH_AL_RH9_cis)
![Release Branch](https://img.shields.io/badge/Release%20Branch-Main-brightgreen) [Ansible support](https://www.mindpointgroup.com/cybersecurity-products/ansible-counselor#GH_AL_RH9_cis)
![Release Tag](https://img.shields.io/github/v/tag/ansible-lockdown/RHEL9-CIS?label=Release%20Tag&&color=success)
![Main Release Date](https://img.shields.io/github/release-date/ansible-lockdown/RHEL9-CIS?label=Release%20Date)
![Benchmark Version Main](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/RHEL9-CIS/benchmark-version-main.json)
![Benchmark Version Devel](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/RHEL9-CIS/benchmark-version-devel.json)
[![Main Pipeline Status](https://github.com/ansible-lockdown/RHEL9-CIS/actions/workflows/main_pipeline_validation.yml/badge.svg?)](https://github.com/ansible-lockdown/RHEL9-CIS/actions/workflows/main_pipeline_validation.yml) ### Community
[![Devel Pipeline Status](https://github.com/ansible-lockdown/RHEL9-CIS/actions/workflows/devel_pipeline_validation.yml/badge.svg?)](https://github.com/ansible-lockdown/RHEL9-CIS/actions/workflows/devel_pipeline_validation.yml) Join us on our [Discord Server](https://discord.io/ansible-lockdown) to ask questions, discuss features, or just chat with other Ansible-Lockdown users.
![Devel Commits](https://img.shields.io/github/commit-activity/m/ansible-lockdown/RHEL9-CIS/devel?color=dark%20green&label=Devel%20Branch%20Commits)
![Open Issues](https://img.shields.io/github/issues-raw/ansible-lockdown/RHEL9-CIS?label=Open%20Issues)
![Closed Issues](https://img.shields.io/github/issues-closed-raw/ansible-lockdown/RHEL9-CIS?label=Closed%20Issues&&color=success)
![Pull Requests](https://img.shields.io/github/issues-pr/ansible-lockdown/RHEL9-CIS?label=Pull%20Requests)
--- ---
## Subscriber Release Information 🔐 ## Caution(s)
![Private Release Branch](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/Private-RHEL9-CIS/release-branch.json)
![Private Benchmark Version](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/Private-RHEL9-CIS/benchmark-version.json)
[![Private Remediate Pipeline](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/Private-RHEL9-CIS/remediate.json)](https://github.com/ansible-lockdown/Private-RHEL9-CIS/actions/workflows/main_pipeline_validation.yml)
[![Private GPO Pipeline](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/Private-RHEL9-CIS/gpo.json)](https://github.com/ansible-lockdown/Private-RHEL9-CIS/actions/workflows/main_pipeline_validation_gpo.yml)
![Private Pull Requests](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/Private-RHEL9-CIS/prs.json)
![Private Closed Issues](https://img.shields.io/endpoint?url=https://ansible-lockdown.github.io/github_linux_IaC/badges/Private-RHEL9-CIS/issues-closed.json)
---
## Looking for support? 🤝
[Lockdown Enterprise](https://www.lockdownenterprise.com#GH_AL_RHEL9-CIS)
[Ansible support](https://www.mindpointgroup.com/cybersecurity-products/ansible-counselor#GH_AL_RHEL9-CIS)
### Community 💬
On our [Discord Server](https://www.lockdownenterprise.com/discord) to ask questions, discuss features, or just chat with other Ansible-Lockdown users
---
## 🚨 Caution(s) 🚨
This role **will make changes to the system** which may have unintended consequences. This is not an auditing tool but rather a remediation tool to be used after an audit has been conducted. This role **will make changes to the system** which may have unintended consequences. This is not an auditing tool but rather a remediation tool to be used after an audit has been conducted.
- Testing is the most important thing you can do. Check Mode is not supported! The role will complete in check mode without errors, but it is not supported and should be used with caution. The RHEL8-CIS-Audit role or a compliance scanner should be used for compliance checking over check mode.
- Check Mode is not guaranteed! 🚫 The role will complete in check mode without errors, but it is not supported and should be used with caution. This role was developed against a clean install of the Operating System. If you are implementing to an existing system please review this role for any site specific changes that are needed.
- This role was developed against a clean install of the Operating System. If you are implementing to an existing system please review this role for any site specific changes that are needed. To use release version please point to main branch and relevant release for the cis benchmark you wish to work with.
- To use release version please point to main branch and relevant release for the cis benchmark you wish to work with.
- Did we mention testing??
--- ---
## Coming From A Previous Release ⏪ ## Matching a security Level for CIS
It is possible to to only run level 1 or level 2 controls for CIS.
This is managed using tags:
- level1_server
- level1_workstation
- level2_server
- level2_workstation
The control found in defaults main also need to reflect this as this control the testing thet takes place if you are using the audit component.
## Coming from a previous release
CIS release always contains changes, it is highly recommended to review the new references and available variables. This have changed significantly since ansible-lockdown initial release. CIS release always contains changes, it is highly recommended to review the new references and available variables. This have changed significantly since ansible-lockdown initial release.
This is now compatible with python3 if it is found to be the default interpreter. This does come with pre-requisites which it configures the system accordingly. This is now compatible with python3 if it is found to be the default interpreter. This does come with pre-requisites which it configures the system accordingly.
Further details can be seen in the [Changelog](./ChangeLog.md) Further details can be seen in the [Changelog](./ChangeLog.md)
--- ## Auditing (new)
## Matching a security Level for CIS This can be turned on or off within the defaults/main.yml file with the variable rhel8cis_run_audit. The value is false by default, please refer to the wiki for more details. The defaults file also populates the goss checks to check only the controls that have been enabled in the ansible role.
It is possible to only run level 1 or level 2 controls for CIS. This is a much quicker, very lightweight, checking (where possible) config compliance and live/running settings.
This is managed using tags:
- level1-server A new form of auditing has been developed, by using a small (12MB) go binary called [goss](https://github.com/goss-org/goss) along with the relevant configurations to check. Without the need for infrastructure or other tooling.
- level1-workstation This audit will not only check the config has the correct setting but aims to capture if it is running with that configuration also trying to remove [false positives](https://www.mindpointgroup.com/blog/is-compliance-scanning-still-relevant/) in the process.
- level2-server
- level2-workstation
The control found in defaults main also need to reflect this as this control the testing that takes place if you are using the audit component. Refer to [RHEL9-CIS-Audit](https://github.com/ansible-lockdown/RHEL9-CIS-Audit).
--- ## Documentation
## Requirements ✅
- [Read The Docs](https://ansible-lockdown.readthedocs.io/en/latest/)
- [Getting Started](https://www.lockdownenterprise.com/docs/getting-started-with-lockdown#GH_AL_RH9_cis)
- [Customizing Roles](https://www.lockdownenterprise.com/docs/customizing-lockdown-enterprise#GH_AL_RH9_cis)
- [Per-Host Configuration](https://www.lockdownenterprise.com/docs/per-host-lockdown-enterprise-configuration#GH_AL_RH9_cis)
- [Getting the Most Out of the Role](https://www.lockdownenterprise.com/docs/get-the-most-out-of-lockdown-enterprise#GH_AL_RH9_cis)
## Requirements
RHEL 9
Almalinux 9
Rocky 9
OracleLinux 9
ansible 2.10
jmespath
relevant collections
- Access to download or add the goss binary and content to the system if using auditing (other options are available on how to get the content to the system.)
**General:** **General:**
@ -117,86 +115,26 @@ The control found in defaults main also need to reflect this as this control the
- [Tower User Guide](https://docs.ansible.com/ansible-tower/latest/html/userguide/index.html) - [Tower User Guide](https://docs.ansible.com/ansible-tower/latest/html/userguide/index.html)
- [Ansible Community Info](https://docs.ansible.com/ansible/latest/community/index.html) - [Ansible Community Info](https://docs.ansible.com/ansible/latest/community/index.html)
- Functioning Ansible and/or Tower Installed, configured, and running. This includes all of the base Ansible/Tower configurations, needed packages installed, and infrastructure setup. - Functioning Ansible and/or Tower Installed, configured, and running. This includes all of the base Ansible/Tower configurations, needed packages installed, and infrastructure setup.
- Please read through the tasks in this role to gain an understanding of what each control is doing. Some of the tasks are disruptive and can have unintended consequences in a live production system. Also familiarize yourself with the variables in the defaults/main.yml file. - Please read through the tasks in this role to gain an understanding of what each control is doing. Some of the tasks are disruptive and can have unintended consiquences in a live production system. Also familiarize yourself with the variables in the defaults/main.yml file.
**Technical Dependencies:** **Technical Dependencies:**
RHEL Family OS 9 - Python3
- Ansible 2.9+
- Access to download or add the goss binary and content to the system if using auditing - python-def (should be included in RHEL 9)
(other options are available on how to get the content to the system.)
- Python3.8
- Ansible 2.12+
- python-def
- libselinux-python - libselinux-python
- pip packages
--- - jmespath ( complete list found in requirements.txt)
- collections found in collections/requirememnts.yml
## Auditing 🔍
This can be turned on or off within the defaults/main.yml file with the variable run_audit. The value is false by default, please refer to the wiki for more details. The defaults file also populates the goss checks to check only the controls that have been enabled in the ansible role.
This is a much quicker, very lightweight, checking (where possible) config compliance and live/running settings.
A new form of auditing has been developed, by using a small (12MB) go binary called [goss](https://github.com/goss-org/goss) along with the relevant configurations to check. Without the need for infrastructure or other tooling.
This audit will not only check the config has the correct setting but aims to capture if it is running with that configuration also trying to remove [false positives](https://www.mindpointgroup.com/blog/is-compliance-scanning-still-relevant/) in the process.
Refer to [RHEL9-CIS-Audit](https://github.com/ansible-lockdown/RHEL9-CIS-Audit).
## Example Audit Summary
This is based on a vagrant image with selections enabled. e.g. No Gui or firewall.
Note: More tests are run during audit as we check config and running state.
```txt
ok: [default] => {
"msg": [
"The pre remediation results are: ['Total Duration: 5.454s', 'Count: 338, Failed: 47, Skipped: 5'].",
"The post remediation results are: ['Total Duration: 5.007s', 'Count: 338, Failed: 46, Skipped: 5'].",
"Full breakdown can be found in /var/tmp",
""
]
}
PLAY RECAP *******************************************************************************************************************************************
default : ok=270 changed=23 unreachable=0 failed=0 skipped=140 rescued=0 ignored=0
```
## Documentation 📖
- [Read The Docs](https://ansible-lockdown.readthedocs.io/en/latest/)
- [Getting Started](https://www.lockdownenterprise.com/docs/getting-started-with-lockdown#GH_AL_RH9_cis)
- [Customizing Roles](https://www.lockdownenterprise.com/docs/customizing-lockdown-enterprise#GH_AL_RH9_cis)
- [Per-Host Configuration](https://www.lockdownenterprise.com/docs/per-host-lockdown-enterprise-configuration#GH_AL_RH9_cis)
- [Getting the Most Out of the Role](https://www.lockdownenterprise.com/docs/get-the-most-out-of-lockdown-enterprise#GH_AL_RH9_cis)
## Role Variables ## Role Variables
This role is designed that the end user should not have to edit the tasks themselves. All customizing should be done via the defaults/main.yml file or with extra vars within the project, job, workflow, etc. This role is designed that the end user should not have to edit the tasks themselves. All customizing should be done by overriding the required varaibles as found in defaults/main.yml file. e.g. using inventory, group_vars, extra_vars
## Tags 🏷️ ## Tags
There are many tags available for added control precision. Each control has its own set of tags noting what level, what OS element it relates to, whether it's a patch or audit, and the rule number. Additionally, NIST references follow a specific conversion format for consistency and clarity. There are many tags available for added control precision. Each control has it's own set of tags noting what level, if it's scored/notscored, what OS element it relates to, if it's a patch or audit, and the rule number.
### Conversion Format for NIST References:
1. Standard Prefix:
- All references are prefixed with "NIST".
2. Standard Types:
- "800-53" references are formatted as NIST800-53.
- "800-53r5" references are formatted as NIST800-53R5 (with 'R' capitalized).
- "800-171" references are formatted as NIST800-171.
3. Details:
- Section and subsection numbers use periods (.) for numeric separators.
- Parenthetical elements are separated by underscores (_), e.g., IA-5(1)(d) becomes IA-5_1_d.
- Subsection letters (e.g., "b") are appended with an underscore.
Below is an example of the tag section from a control within this role. Using this example if you set your run to skip all controls with the tag services, this task will be skipped. The opposite can also happen where you run only controls tagged with services. Below is an example of the tag section from a control within this role. Using this example if you set your run to skip all controls with the tag services, this task will be skipped. The opposite can also happen where you run only controls tagged with services.
```sh ```sh
@ -210,55 +148,146 @@ Below is an example of the tag section from a control within this role. Using th
- rule_2.2.4 - rule_2.2.4
``` ```
## Community Contribution
## Community Contribution 🧑‍🤝‍🧑
We encourage you (the community) to contribute to this role. Please read the rules below. We encourage you (the community) to contribute to this role. Please read the rules below.
- Your work is done in your own individual branch. Make sure to Signed-off-by and GPG sign all commits you intend to merge. - Your work is done in your own individual branch. Make sure to Signed-off and GPG sign all commits you intend to merge.
- All community Pull Requests are pulled into the devel branch - All community Pull Requests are pulled into the devel branch
- Pull Requests into devel will confirm your commits have a GPG signature, Signed-off-by, and a functional test before being approved - Pull Requests into devel will confirm your commits have a GPG signature, Signed-off, and a functional test before being approved
- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release - Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release
## Pipeline Testing 🔄
uses:
- ansible-core 2.16
- ansible collections - pulls in the latest version based on requirements file
- runs the audit using the devel branch
- This is an automated test that occurs on pull requests into devel
- self-hosted runners using OpenTofu
## Known Issues ## Known Issues
Almalinux BaseOS, EPEL and many cloud providers repositories, do not allow gpgcheck(rule_1.2.1.2) or repo_gpgcheck (rule_1.2.1.3) this will cause issues during the playbook unless or a workaround is found. CIS 1.2.4 - repo_gpgcheck is not carried out for RedHat hosts as the default repos do not have this function. This also affect EPEL(not covered by var).
- Rocky and Alma not affected.
Variable used to unset.
rhel9cis_rhel_default_repo: true # to be set to false if using repo that does have this ability
## Pipeline Testing
uses:
- ansible-core 2.12
- ansible collections - pulls in the latest version based on requirements file
- runs the audit using the devel branch
- This is an automated test that occurs on pull requests into devel
## Local Testing
ansible-base 2.10.17 - python 3.8
ansible-core 2.13.4 - python 3.10
- makefile - this is there purely for testing and initial setup purposes.
## Local Testing 💻
### example
```bash
molecule test -s default
molecule converge -s wsl -- --check
molecule verify -s localhost
```
local testing uses:
- ansible-core
- molecule 4.0.1
- molecule-docker 2.0.0
- molecule-podman 2.0.2
- molecule-vagrant 1.0.0
- molecule-azure 0.5.0
## Credits and Thanks 🙏
Massive thanks to the fantastic community and all its members.
This includes a huge thanks and credit to the original authors and maintainers.
Mark Bolwell, George Nalen, Steve Williams, Fred Witty
## v1.0.0 - released Dec 2022
![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/RHEL9-CIS/CommunityToDevel?label=Devel%20Build%20Status&style=plastic)
![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/RHEL9-CIS/DevelToMain?label=Main%20Build%20Status&style=plastic)
![Release](https://img.shields.io/github/v/release/ansible-lockdown/RHEL9-CIS?style=plastic)
## Join us
On our [Discord Server](https://discord.io/ansible-lockdown) to ask questions, discuss features, or just chat with other Ansible-Lockdown users
## Caution(s)
This role **will make changes to the system** which may have unintended concequences.
This role was developed against a clean install of the Operating System. If you are implimenting to an existing system please review this role for any site specific changes that are needed.
To use release version please point to main branch
## Documentation
- [Readthedocs](https://ansible-lockdown.readthedocs.io/en/latest/)
- [Getting Started](https://www.lockdownenterprise.com/docs/getting-started-with-lockdown)
- [Customizing Roles](https://www.lockdownenterprise.com/docs/customizing-lockdown-enterprise)
- [Per-Host Configuration](https://www.lockdownenterprise.com/docs/per-host-lockdown-enterprise-configuration)
- [Getting the Most Out of the Role](https://www.lockdownenterprise.com/docs/get-the-most-out-of-lockdown-enterprise)
## Requirements
RHEL 9
Almalinux 9
Rocky 9
OracleLinux 9
ansible 2.10
jmespath
relevant collections
- Access to download or add the goss binary and content to the system if using auditing (other options are available on how to get the content to the system.)
## Tested with
ansible-base 2.10.17 - python 3.8
ansible-core 2.13.4 - python 3.10
- makefile - this is there purely for testing and initial setup purposes.
## General
- Basic knowledge of Ansible, below are some links to the Ansible documentation to help get started if you are unfamiliar with Ansible
- [Main Ansible documentation page](https://docs.ansible.com)
- [Ansible Getting Started](https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html)
- [Tower User Guide](https://docs.ansible.com/ansible-tower/latest/html/userguide/index.html)
- [Ansible Community Info](https://docs.ansible.com/ansible/latest/community/index.html)
- Functioning Ansible and/or Tower Installed, configured, and running. This includes all of the base Ansible/Tower configurations, needed packages installed, and infrastructure setup.
- Please read through the tasks in this role to gain an understanding of what each control is doing.
- Some of the tasks are disruptive and can have unintended consiquences in a live production system. Also familiarize yourself with the variables in the defaults/main.yml file
## Dependencies
- Python3
- Ansible 2.9+
- python-def (should be included in RHEL 9)
- libselinux-python
- pip packages
- jmespath ( complete list found in requirements.txt)
- collections found in collections/requirememnts.yml
### Known Issues
CIS 1.2.4 - repo_gpgcheck is not carried out for RedHat hosts as the default repos do not have this function. This also affect EPEL(not covered by var).
- Rocky and Alma not affected.
Variable used to unset.
rhel9cis_rhel_default_repo: true # to be set to false if using repo that does have this ability

30
ansible.cfg Normal file
View file

@ -0,0 +1,30 @@
[defaults]
host_key_checking=False
display_skipped_hosts=True
system_warnings=False
command_warnings=False
nocows=1
retry_files_save_path=/dev/null
pipelining=true
# Use the YAML callback plugin.
stdout_callback = yaml
# Use the stdout_callback when running ad-hoc commands.
bin_ansible_callbacks = True
[privilege_escalation]
[paramiko_connection]
record_host_keys=False
[ssh_connection]
transfer_method=scp
ssh_args = -o ControlMaster=auto -o ControlPersist=60s
[accelerate]
[selinux]
[colors]
[diff]

View file

@ -1,14 +1,5 @@
--- ---
collections: collections:
- name: community.general - name: community.general
source: https://github.com/ansible-collections/community.general - name: community.crypto
type: git - name: ansible.posix
- name: community.crypto
source: https://github.com/ansible-collections/community.crypto
type: git
- name: ansible.posix
source: https://github.com/ansible-collections/ansible.posix
type: git

File diff suppressed because it is too large Load diff

View file

@ -1,56 +0,0 @@
#! /usr/bin/env bash
# Based on original Script provided by CIS
# CVEs correct at time of creation - April2024
{
a_output=(); a_output2=(); a_modprope_config=(); a_excluded=(); a_available_modules=()
a_ignore=("xfs" "vfat" "ext2" "ext3" "ext4")
a_cve_exists=("afs" "ceph" "cifs" "exfat" "ext" "fat" "fscache" "fuse" "gfs2" "nfs_common" "nfsd" "smbfs_common")
f_module_chk()
{
l_out2=""; grep -Pq -- "\b$l_mod_name\b" <<< "${a_cve_exists[*]}" && l_out2=" <- CVE exists!"
if ! grep -Pq -- '\bblacklist\h+'"$l_mod_name"'\b' <<< "${a_modprope_config[*]}"; then
a_output2+=(" - Kernel module: \"$l_mod_name\" is not fully disabled $l_out2")
elif ! grep -Pq -- '\binstall\h+'"$l_mod_name"'\h+\/bin\/(false|true)\b' <<< "${a_modprope_config[*]}"; then
a_output2+=(" - Kernel module: \"$l_mod_name\" is not fully disabled $l_out2")
fi
if lsmod | grep "$l_mod_name" &> /dev/null; then # Check if the module is currently loaded
l_output2+=(" - Kernel module: \"$l_mod_name\" is loaded" "")
fi
}
while IFS= read -r -d $'\0' l_module_dir; do
a_available_modules+=("$(basename "$l_module_dir")")
done < <(find "$(readlink -f /lib/modules/"$(uname -r)"/kernel/fs)" -mindepth 1 -maxdepth 1 -type d ! -empty -print0)
while IFS= read -r l_exclude; do
if grep -Pq -- "\b$l_exclude\b" <<< "${a_cve_exists[*]}"; then
a_output2+=(" - ** WARNING: kernel module: \"$l_exclude\" has a CVE and is currently mounted! **")
elif
grep -Pq -- "\b$l_exclude\b" <<< "${a_available_modules[*]}"; then
a_output+=(" - Kernel module: \"$l_exclude\" is currently mounted - do NOT unload or disable")
fi
! grep -Pq -- "\b$l_exclude\b" <<< "${a_ignore[*]}" && a_ignore+=("$l_exclude")
done < <(findmnt -knD | awk '{print $2}' | sort -u)
while IFS= read -r l_config; do
a_modprope_config+=("$l_config")
done < <(modprobe --showconfig | grep -P '^\h*(blacklist|install)')
for l_mod_name in "${a_available_modules[@]}"; do # Iterate over all filesystem modules
[[ "$l_mod_name" =~ overlay ]] && l_mod_name="${l_mod_name::-2}"
if grep -Pq -- "\b$l_mod_name\b" <<< "${a_ignore[*]}"; then
a_excluded+=(" - Kernel module: \"$l_mod_name\"")
else
f_module_chk
fi
done
# Output findings
echo "### Script can be found at ${BASH_SOURCE} ##"
if [ "${#a_output2[@]}" -le 0 ]; then
printf '%s\n' "" " - No unused filesystem kernel modules are enabled" "${a_output[@]}" ""
else
printf '%s\n' "" "-- Audit Result: --" " ** REVIEW the following **" "${a_output2[@]}"
# Changed return value to capture error
exit 99
#[ "${#a_output[@]}" -gt 0 ] && printf '%s\n' "" "-- Correctly set: --" "${a_output[@]}" ""
fi
}

View file

@ -1,273 +1,105 @@
--- ---
# handlers file for RHEL9-CIS # handlers file for RHEL9-CIS
- name: "Adding options for /tmp"
when: not rhel9cis_tmp_svc
vars:
mount_point: '/tmp'
ansible.posix.mount:
path: "{{ mount_point }}"
src: "{{ prelim_mount_point_fs_and_options[mount_point]['src'] }}"
state: present
fstype: "{{ prelim_mount_point_fs_and_options[mount_point]['fs_type'] }}"
opts: "{{ prelim_mount_point_fs_and_options[mount_point]['options'] | unique | join(',') }}"
listen: "Remount /tmp"
- name: "Remounting /tmp"
vars:
mount_point: '/tmp'
ansible.posix.mount:
path: "{{ mount_point }}"
state: remounted
listen: "Remount /tmp"
- name: "Remounting /tmp systemd"
vars:
mount_point: '/tmp'
ansible.builtin.systemd:
name: tmp.mount
state: restarted
daemon_reload: true
listen: "Remount /tmp"
- name: "Adding options for /dev/shm"
vars:
mount_point: '/dev/shm'
ansible.posix.mount:
path: "{{ mount_point }}"
src: "{{ prelim_mount_point_fs_and_options[mount_point]['src'] }}"
state: present
fstype: "{{ prelim_mount_point_fs_and_options[mount_point]['fs_type'] }}"
opts: "{{ prelim_mount_point_fs_and_options[mount_point]['options'] | unique | join(',') }}"
listen: "Remount /dev/shm"
- name: "Remounting /dev/shm"
vars:
mount_point: '/dev/shm'
ansible.posix.mount:
path: "{{ mount_point }}"
state: remounted
listen: "Remount /dev/shm"
- name: "Adding options for /home"
vars:
mount_point: '/home'
ansible.posix.mount:
path: "{{ mount_point }}"
src: "{{ prelim_mount_point_fs_and_options[mount_point]['src'] }}"
state: present
fstype: "{{ prelim_mount_point_fs_and_options[mount_point]['fs_type'] }}"
opts: "{{ prelim_mount_point_fs_and_options[mount_point]['options'] | unique | join(',') }}"
listen: "Remount /home"
- name: "Remounting /home"
vars:
mount_point: '/home'
ansible.posix.mount:
path: "{{ mount_point }}"
state: remounted
listen: "Remount /home"
- name: "Adding options for /var"
vars:
mount_point: '/var'
ansible.posix.mount:
path: "{{ mount_point }}"
src: "{{ prelim_mount_point_fs_and_options[mount_point]['src'] }}"
state: present
fstype: "{{ prelim_mount_point_fs_and_options[mount_point]['fs_type'] }}"
opts: "{{ prelim_mount_point_fs_and_options[mount_point]['options'] | unique | join(',') }}"
listen: "Remount /var"
- name: "Remounting /var"
vars:
mount_point: '/var'
ansible.posix.mount:
path: "{{ mount_point }}"
state: remounted
listen: "Remount /var"
- name: "Adding options for /var/tmp"
vars:
mount_point: '/var/tmp'
ansible.posix.mount:
path: "{{ mount_point }}"
src: "{{ prelim_mount_point_fs_and_options[mount_point]['src'] }}"
state: present
fstype: "{{ prelim_mount_point_fs_and_options[mount_point]['fs_type'] }}"
opts: "{{ prelim_mount_point_fs_and_options[mount_point]['options'] | unique | join(',') }}"
listen: "Remount /var/tmp"
- name: "Remounting /var/tmp"
vars:
mount_point: '/var/tmp'
ansible.posix.mount:
path: "{{ mount_point }}"
state: remounted
listen: "Remount /var/tmp"
- name: "Adding options for /var/log"
vars:
mount_point: '/var/log'
ansible.posix.mount:
path: "{{ mount_point }}"
src: "{{ prelim_mount_point_fs_and_options[mount_point]['src'] }}"
state: present
fstype: "{{ prelim_mount_point_fs_and_options[mount_point]['fs_type'] }}"
opts: "{{ prelim_mount_point_fs_and_options[mount_point]['options'] | unique | join(',') }}"
listen: "Remount /var/log"
- name: "Remounting /var/log"
vars:
mount_point: '/var/log'
ansible.posix.mount:
path: "{{ mount_point }}"
state: remounted
listen: "Remount /var/log"
- name: "Adding options for /var/log/audit"
vars:
mount_point: '/var/log/audit'
ansible.posix.mount:
path: "{{ mount_point }}"
src: "{{ prelim_mount_point_fs_and_options[mount_point]['src'] }}"
state: present
fstype: "{{ prelim_mount_point_fs_and_options[mount_point]['fs_type'] }}"
opts: "{{ prelim_mount_point_fs_and_options[mount_point]['options'] | unique | join(',') }}"
listen: "Remount /var/log/audit"
- name: "Remounting /var/log/audit"
vars:
mount_point: '/var/log/audit'
ansible.posix.mount:
path: "{{ mount_point }}"
state: remounted
listen: "Remount /var/log/audit"
- name: "Remounting /boot/efi"
vars:
mount_point: '/boot/efi'
ansible.posix.mount:
path: "{{ mount_point }}"
state: remounted
notify: Change_requires_reboot
listen: "Remount /boot/efi"
- name: Reload sysctl - name: Reload sysctl
ansible.builtin.command: sysctl --system ansible.builtin.shell: sysctl --system
changed_when: true when:
- sysctl_updated.changed
- name: Sysctl flush ipv4 route table - name: Sysctl flush ipv4 route table
when:
- rhel9cis_flush_ipv4_route
- not system_is_container
ansible.posix.sysctl: ansible.posix.sysctl:
name: net.ipv4.route.flush name: net.ipv4.route.flush
value: '1' value: '1'
sysctl_set: true sysctl_set: true
ignore_errors: true # noqa ignore-errors ignore_errors: true # noqa ignore-errors
when:
- rhel9cis_flush_ipv4_route
- not system_is_container
- name: Sysctl flush ipv6 route table - name: Sysctl flush ipv6 route table
when:
- rhel9cis_flush_ipv6_route
- not system_is_container
ansible.posix.sysctl: ansible.posix.sysctl:
name: net.ipv6.route.flush name: net.ipv6.route.flush
value: '1' value: '1'
sysctl_set: true sysctl_set: true
when:
- rhel9cis_flush_ipv6_route
- not system_is_container
- name: Systemd restart tmp.mount - name: Systemd restart tmp.mount
ansible.builtin.systemd: ansible.builtin.systemd:
name: tmp.mount name: tmp.mount
daemon_reload: true daemon_reload: true
enabled: true enabled: true
masked: false masked: false
state: reloaded state: reloaded
- name: Update Crypto Policy - name: Remount tmp
ansible.builtin.set_fact: ansible.posix.mount:
rhel9cis_full_crypto_policy: "{{ rhel9cis_crypto_policy }}{{ rhel9cis_crypto_policy_module }}{% if rhel9cis_additional_crypto_policy_module | length > 0 %}:{{ rhel9cis_additional_crypto_policy_module }}{% endif %}" path: /tmp
notify: Set Crypto Policy state: remounted
- name: Set Crypto Policy
when: prelim_system_wide_crypto_policy.stdout != rhel9cis_full_crypto_policy
ansible.builtin.command: update-crypto-policies --set "{{ rhel9cis_full_crypto_policy }}"
changed_when: true
notify:
- Change_requires_reboot
- Restart sshd
- name: Restart firewalld - name: Restart firewalld
ansible.builtin.systemd: ansible.builtin.systemd:
name: firewalld name: firewalld
state: restarted state: restarted
- name: Restart sshd - name: Restart sshd
ansible.builtin.systemd: ansible.builtin.systemd:
name: sshd name: sshd
state: restarted state: restarted
- name: Restart postfix - name: Restart postfix
ansible.builtin.systemd: ansible.builtin.systemd:
name: postfix name: postfix
state: restarted state: restarted
- name: Reload dconf - name: Reload dconf
ansible.builtin.command: dconf update ansible.builtin.shell: dconf update
changed_when: true
- name: Grub2cfg - name: Grub2cfg
ansible.builtin.command: "grub2-mkconfig -o /boot/grub2/grub.cfg" ansible.builtin.shell: "grub2-mkconfig -o /boot/grub2/grub.cfg"
changed_when: true
ignore_errors: true # noqa ignore-errors ignore_errors: true # noqa ignore-errors
tags:
- skip_ansible_lint
- name: Restart rsyslog - name: Restart rsyslog
ansible.builtin.systemd: ansible.builtin.systemd:
name: rsyslog name: rsyslog
state: restarted state: restarted
- name: Restart journald - name: Restart journald
ansible.builtin.systemd: ansible.builtin.systemd:
name: systemd-journald name: systemd-journald
state: restarted state: restarted
- name: Restart systemd_journal_upload - name: Restart systemd_journal_upload
ansible.builtin.systemd: ansible.builtin.systemd:
name: systemd-journal-upload name: systemd-journal-upload
state: restarted state: restarted
- name: Systemd daemon reload - name: Systemd daemon reload
ansible.builtin.systemd: ansible.builtin.systemd:
daemon-reload: true daemon-reload: true
- name: Authselect update
ansible.builtin.command: authselect apply-changes
changed_when: true
## Auditd tasks note order for handlers to run ## Auditd tasks note order for handlers to run
- name: Auditd immutable check - name: Auditd immutable check
ansible.builtin.command: grep -c "^-e 2" /etc/audit/rules.d/99_auditd.rules ansible.builtin.shell: grep -c "^-e 2" /etc/audit/rules.d/99_auditd.rules
changed_when: false changed_when: false
register: discovered_auditd_immutable_check register: auditd_immutable_check
- name: Audit immutable fact - name: Audit immutable fact
when: discovered_auditd_immutable_check.stdout == '1'
ansible.builtin.debug: ansible.builtin.debug:
msg: "Reboot required for auditd to apply new rules as immutable set" msg: "Reboot required for auditd to apply new rules as immutable set"
notify: Change_requires_reboot notify: Change_requires_reboot
when:
- auditd_immutable_check.stdout == '1'
- name: Stop auditd process - name: Restart auditd
ansible.builtin.command: systemctl kill auditd ansible.builtin.shell: service auditd restart
changed_when: true tags:
listen: Restart auditd - skip_ansible_lint
- name: Start auditd process
ansible.builtin.systemd:
name: auditd
state: started
listen: Restart auditd
- name: Change_requires_reboot - name: Change_requires_reboot
ansible.builtin.set_fact: ansible.builtin.set_fact:
change_requires_reboot: true change_requires_reboot: true

8
local.yml Normal file
View file

@ -0,0 +1,8 @@
---
- hosts: localhost
connection: local
become: true
roles:
- role: "{{ playbook_dir }}"

View file

@ -1,32 +1,32 @@
--- ---
galaxy_info: galaxy_info:
author: "MindPoint Group" author: "Sam Doran, Josh Springer, Daniel Shepherd, Bas Meijeri, James Cassell, Mike Renfro, DFed, George Nalen, Mark Bolwell"
description: "Apply the RHEL 9 CIS" description: "Apply the RHEL 9 CIS"
company: "MindPoint Group" company: "MindPoint Group"
license: MIT license: MIT
role_name: rhel9_cis role_name: rhel9_cis
namespace: mindpointgroup namespace: mindpointgroup
min_ansible_version: 2.10.1 min_ansible_version: 2.10.1
platforms: platforms:
- name: EL - name: EL
versions: versions:
- "9" - "9"
galaxy_tags: galaxy_tags:
- system - system
- security - security
- stig - stig
- hardening - hardening
- benchmark - benchmark
- compliance - compliance
- redhat - redhat
- complianceascode - complianceascode
- disa - disa
- rhel9 - rhel9
- cis - cis
- rocky - rocky
- alma - alma
collections: collections:
- community.general - community.general
- community.crypto - community.crypto
- ansible.posix - ansible.posix
dependencies: [] dependencies: []

View file

@ -9,7 +9,7 @@
ansible_user: root ansible_user: root
system_is_container: true system_is_container: true
rhel9cis_selinux_disable: true rhel9cis_selinux_disable: true
rhel9cis_rule_5_2_4: false rhel9cis_rule_5_3_4: false
rhel9cis_rule_1_1_10: false rhel9cis_rule_1_1_10: false
rhel9cis_firewall: "none" rhel9cis_firewall: "none"
rhel9cis_rule_4_1_1_1: false rhel9cis_rule_4_1_1_1: false

View file

@ -8,7 +8,7 @@
vars: vars:
ansible_user: "{{ lookup('env', 'USER') }}" ansible_user: "{{ lookup('env', 'USER') }}"
role_name: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" role_name: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
rhel9cis_rule_5_2_4: false rhel9cis_rule_5_3_4: false
pre_tasks: pre_tasks:
tasks: tasks:

5
requirements.txt Normal file
View file

@ -0,0 +1,5 @@
passlib
lxml
xmltodict
jmespath
yamllint

View file

@ -1,7 +1,7 @@
--- ---
- hosts: all # noqa: name[play]
- name: Apply ansible-lockdown hardening
hosts: all
become: true become: true
roles: roles:
- role: "{{ playbook_dir }}"
- role: "{{ playbook_dir }}"

View file

@ -1,31 +1,30 @@
--- ---
- name: Pre Audit Setup | Set audit package name
block:
- name: Pre Audit Setup | Set audit package name | 64bit
when: ansible_facts.machine == "x86_64"
ansible.builtin.set_fact:
audit_pkg_arch_name: AMD64
- name: Pre Audit Setup | Set audit package name | ARM64 - name: Download audit binary
when: (ansible_facts.machine == "arm64" or ansible_facts.machine == "aarch64")
ansible.builtin.set_fact:
audit_pkg_arch_name: ARM64
- name: Pre Audit Setup | Download audit binary
when: get_audit_binary_method == 'download'
ansible.builtin.get_url: ansible.builtin.get_url:
url: "{{ audit_bin_url }}{{ audit_pkg_arch_name }}" url: "{{ goss_url }}"
dest: "{{ audit_bin }}" dest: "{{ audit_bin }}"
owner: root owner: root
group: root group: root
checksum: "{{ audit_bin_version[audit_pkg_arch_name + '_checksum'] }}" checksum: "{{ goss_version.checksum }}"
mode: 'u+x,go-w' mode: 0555
when:
- get_goss_file == 'download'
- name: Pre Audit Setup | Copy audit binary - name: Copy audit binary
when: get_audit_binary_method == 'copy'
ansible.builtin.copy: ansible.builtin.copy:
src: "{{ audit_bin_copy_location }}/goss-linux-{{ audit_pkg_arch_name }}" src:
dest: "{{ audit_bin }}" dest: "{{ audit_bin }}"
owner: root mode: 0555
group: root owner: root
mode: 'u+x,go-w' group: root
when:
- get_goss_file == 'copy'
- name: Install git if not present
ansible.builtin.package:
name: git
state: present
register: git_installed
when:
- '"git" not in ansible_facts.packages'

View file

@ -1,17 +0,0 @@
---
- name: Audit_only | Fetch audit files
when:
- fetch_audit_output
- audit_only
ansible.builtin.import_tasks:
file: fetch_audit_output.yml
- name: Audit_only | Show Audit Summary
when: audit_only
ansible.builtin.debug:
msg: "{{ audit_results.split('\n') }}"
- name: Audit_only | Stop task for host as audit_only selected
when: audit_only
ansible.builtin.meta: end_host

View file

@ -1,61 +1,46 @@
--- ---
# Since auditd rules are dependent on syscalls and syscall tables are architecture specific,
# we need to update the auditd rules depending on the architecture of the system.
# This task passed the syscalls table to the auditd template and updates the auditd rules
- name: "POST | AUDITD | Set supported_syscalls variable"
ansible.builtin.shell: ausyscall --dump | awk '{print $2}'
changed_when: false
check_mode: false
failed_when: discovered_auditd_syscalls.rc not in [ 0, 1 ]
register: discovered_auditd_syscalls
- name: POST | AUDITD | Apply auditd template will for section 6.3.3 - only required rules will be added | stat file
ansible.builtin.stat:
path: /etc/audit/rules.d/99_auditd.rules
register: discovered_auditd_rules_file
- name: POST | Apply auditd template for section 6.3.3.x
when: update_audit_template
vars:
supported_syscalls: "{{ discovered_auditd_syscalls.stdout_lines }}"
ansible.builtin.template:
src: audit/99_auditd.rules.j2
dest: /etc/audit/rules.d/99_auditd.rules
owner: root
group: root
mode: 'u-x,g-wx,o-rwx'
diff: "{{ discovered_auditd_rules_file.stat.exists }}" # Only run diff if not a new file
register: discovered_auditd_rules_template_updated
notify:
- Auditd immutable check
- Audit immutable fact
- Restart auditd
- name: POST | AUDITD | Add Warning count for changes to template file | Warn Count # noqa no-handler
when:
- discovered_auditd_rules_template_updated.changed
- discovered_auditd_rules_file.stat.exists
ansible.builtin.import_tasks:
file: warning_facts.yml
vars:
warn_control_id: 'Auditd template updated, validate as expected'
- name: POST | AUDITD | Apply auditd template will for section 4.1.3 - only required rules will be added | stat file - name: POST | AUDITD | Apply auditd template will for section 4.1.3 - only required rules will be added | stat file
ansible.builtin.stat: ansible.builtin.stat:
path: /etc/audit/rules.d/98_auditd_exceptions.rules path: /etc/audit/rules.d/99_auditd.rules
register: discovered_auditd_exception_file register: rhel9cis_auditd_file
- name: POST | AUDITD | Apply auditd template will for section 4.1.3 - only required rules will be added | setup file
ansible.builtin.template:
src: audit/99_auditd.rules.j2
dest: /etc/audit/rules.d/99_auditd.rules
owner: root
group: root
mode: 0640
diff: "{{ rhel9cis_auditd_file.stat.exists }}" # Only run diff if not a new file
register: rhel9cis_auditd_template_updated
notify:
- Auditd immutable check
- Audit immutable fact
- Restart auditd
- name: POST | AUDITD | Add Warning count for changes to template file | Warn Count # noqa: no-handler
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: 'Auditd template updated, see diff output for details'
when:
- rhel9cis_auditd_template_updated.changed
- rhel9cis_auditd_file.stat.exists
- name: POST | AUDITD | Apply auditd template will for section 4.1.3 - only required rules will be added | stat file
ansible.builtin.stat:
path: /etc/audit/rules.d/98_auditd_exceptions.rules
register: rhel9cis_auditd_exception_file
- name: POST | Set up auditd user logging exceptions | setup file - name: POST | Set up auditd user logging exceptions | setup file
when:
- rhel9cis_allow_auditd_uid_user_exclusions
- rhel9cis_auditd_uid_exclude | length > 0
ansible.builtin.template: ansible.builtin.template:
src: audit/98_auditd_exception.rules.j2 src: audit/98_auditd_exception.rules.j2
dest: /etc/audit/rules.d/98_auditd_exceptions.rules dest: /etc/audit/rules.d/98_auditd_exceptions.rules
owner: root owner: root
group: root group: root
mode: '0640' mode: 0640
diff: "{{ discovered_auditd_exception_file.stat.exists }}" diff: "{{ rhel9cis_auditd_exception_file.stat.exists }}"
notify: Restart auditd notify: Restart auditd
when:
- rhel9cis_allow_auditd_uid_user_exclusions
- rhel9cis_auditd_uid_exclude | length > 0

View file

@ -1,7 +1,8 @@
--- ---
- name: "PREREQ | If required install libselinux package to manage file changes." - name: "PREREQ | If required install libselinux package to manage file changes."
when: '"libselinux-python3" not in ansible_facts.packages'
ansible.builtin.package: ansible.builtin.package:
name: libselinux-python3 name: libselinux-python3
state: present state: present
when:
- '"libselinux-python3" not in ansible_facts.packages'

View file

@ -1,47 +0,0 @@
---
# Stage to copy audit output to a centralised location
- name: "POST | FETCH | Fetch files and copy to controller"
when: audit_output_collection_method == "fetch"
ansible.builtin.fetch:
src: "{{ item }}"
dest: "{{ audit_output_destination }}"
flat: true
changed_when: true
failed_when: false
register: discovered_audit_fetch_state
loop:
- "{{ pre_audit_outfile }}"
- "{{ post_audit_outfile }}"
become: false
# Added this option for continuity but could be changed by adjusting the variable audit_conf_dest
# Allowing backup to one location
- name: "POST | FETCH | Copy files to location available to managed node"
when: audit_output_collection_method == "copy"
ansible.builtin.copy:
src: "{{ item }}"
dest: "{{ audit_output_destination }}"
mode: 'u-x,go-wx'
flat: true
failed_when: false
register: discovered_audit_copy_state
loop:
- "{{ pre_audit_outfile }}"
- "{{ post_audit_outfile }}"
- name: "POST | FETCH | Fetch files and copy to controller | Warning if issues with fetch_audit_files"
when:
- (audit_output_collection_method == "fetch" and not discovered_audit_fetch_state.changed) or
(audit_output_collection_method == "copy" and not discovered_audit_copy_state.changed)
block:
- name: "POST | FETCH | Fetch files and copy to controller | Warning if issues with fetch_audit_files"
ansible.builtin.debug:
msg: "Warning!! Unable to write to localhost {{ audit_output_destination }} for audit file copy"
- name: "POST | FETCH | Fetch files and copy to controller | Warning if issues with fetch_audit_files"
vars:
warn_control_id: "FETCH_AUDIT_FILES"
ansible.builtin.import_tasks:
file: warning_facts.yml

View file

@ -1,262 +1,190 @@
--- ---
# tasks file for RHEL9-CIS # tasks file for RHEL9-CIS
- name: "Check OS version and family" - name: Check OS version and family
when: os_check
tags: always
ansible.builtin.assert: ansible.builtin.assert:
that: (ansible_facts.distribution != 'CentOS' and ansible_facts.os_family == 'RedHat' or ansible_facts.os_family == "Rocky") and ansible_facts.distribution_major_version is version_compare('9', '==') that: (ansible_distribution != 'CentOS' and ansible_os_family == 'RedHat' or ansible_os_family == "Rocky") and ansible_distribution_major_version is version_compare('9', '==')
fail_msg: "This role can only be run against Supported OSs. {{ ansible_facts.distribution }} {{ ansible_facts.distribution_major_version }} is not supported." fail_msg: "This role can only be run against Supported OSs. {{ ansible_distribution }} {{ ansible_distribution_major_version }} is not supported."
success_msg: "This role is running against a supported OS {{ ansible_facts.distribution }} {{ ansible_facts.distribution_major_version }}" success_msg: "This role is running against a supported OS {{ ansible_distribution }} {{ ansible_distribution_major_version }}"
- name: "Check ansible version"
tags: always
ansible.builtin.assert:
that: ansible_version.full is version_compare(min_ansible_version, '>=')
fail_msg: "You must use Ansible {{ min_ansible_version }} or greater"
success_msg: "This role is running a supported version of ansible {{ ansible_version.full }} >= {{ min_ansible_version }}"
- name: "Setup rules if container"
when: when:
- ansible_connection == 'docker' or - os_check
ansible_facts.virtualization_type in ["docker", "lxc", "openvz", "podman", "container"] - not system_is_ec2
tags: tags:
- container_discovery - always
- always
block:
- name: "Discover and set container variable if required"
ansible.builtin.set_fact:
system_is_container: true
- name: "Load variable for container" - name: Check ansible version
ansible.builtin.include_vars:
file: "{{ container_vars_file }}"
- name: "Output if discovered is a container"
when: system_is_container
ansible.builtin.debug:
msg: system has been discovered as a container
- name: "Check crypto-policy input"
ansible.builtin.assert: ansible.builtin.assert:
that: rhel9cis_crypto_policy in rhel9cis_allowed_crypto_policies that: ansible_version.full is version_compare(min_ansible_version, '>=')
fail_msg: "Crypto policy is not a permitted version" fail_msg: "You must use Ansible {{ min_ansible_version }} or greater"
success_msg: "Crypto policy is a permitted version" success_msg: "This role is running a supported version of ansible {{ ansible_version.full }} >= {{ min_ansible_version }}"
- name: "Check rhel9cis_bootloader_password_hash variable has been changed"
when:
- rhel9cis_set_boot_pass
- rhel9cis_rule_1_4_1
tags: always
ansible.builtin.assert:
that: rhel9cis_bootloader_password_hash.find('grub.pbkdf2.sha512') != -1 and rhel9cis_bootloader_password_hash != 'grub.pbkdf2.sha512.changethispassword' # pragma: allowlist secret
msg: "This role will not be able to run single user password commands as rhel9cis_bootloader_password_hash variable has not been set correctly"
- name: "Check crypto-policy module input"
when:
- rhel9cis_rule_1_6_1
- rhel9cis_crypto_policy_module | length > 0
tags: tags:
- rule_1.6.1 - always
- crypto
- NIST800-53R5_SC-6
ansible.builtin.assert:
that: rhel9cis_additional_crypto_policy_module in rhel9cis_allowed_crypto_policies_modules
fail_msg: "Crypto policy module is not a permitted version"
success_msg: "Crypto policy module is a permitted version"
- name: "Check password set for {{ ansible_env.SUDO_USER }}" - name: "Check password set for {{ ansible_user }}"
block:
- name: Capture current password state of "{{ ansible_user }}"
ansible.builtin.shell: "grep {{ ansible_user }} /etc/shadow | awk -F: '{print $2}'"
changed_when: false
failed_when: false
check_mode: false
register: ansible_user_password_set
- name: "Assert that password set for {{ ansible_user }} and account not locked"
ansible.builtin.assert:
that: ansible_user_password_set.stdout | length != 0 and ansible_user_password_set.stdout != "!!"
fail_msg: "You have {{ sudo_password_rule }} enabled but the user = {{ ansible_user }} has no password set - It can break access"
success_msg: "You a password set for the {{ ansible_user }}"
vars:
sudo_password_rule: rhel9cis_rule_5_3_4
when: when:
- rhel9cis_rule_5_2_4 - rhel9cis_rule_5_3_4
- ansible_env.SUDO_USER is defined - not system_is_ec2
- not system_is_ec2
tags: tags:
- user_passwd - user_passwd
- rule_5.2.4
vars:
sudo_password_rule: rhel9cis_rule_5_2_4 # pragma: allowlist secret
block:
- name: "Check password set for {{ ansible_env.SUDO_USER }} | password state" # noqa name[template]
ansible.builtin.shell: "(grep {{ ansible_env.SUDO_USER }} /etc/shadow || echo 'not found:not found') | awk -F: '{print $2}'"
changed_when: false
failed_when: false
check_mode: false
register: prelim_ansible_user_password_set
- name: "Check for local account {{ ansible_env.SUDO_USER }} | Check for local account" # noqa name[template]
when: prelim_ansible_user_password_set.stdout == "not found"
ansible.builtin.debug:
msg: "No local account found for {{ ansible_env.SUDO_USER }} user. Skipping local account checks."
- name: "Check local account"
when: prelim_ansible_user_password_set.stdout != "not found"
block:
- name: "Check password set for {{ ansible_env.SUDO_USER }} | Assert local password set" # noqa name[template]
ansible.builtin.assert:
that: |
(
((prelim_ansible_user_password_set.stdout | length != 0) and (prelim_ansible_user_password_set.stdout != "!!" ))
or
(ansible_env.SUDO_USER in rhel9cis_sudoers_exclude_nopasswd_list)
)
fail_msg: "You have {{ sudo_password_rule }} enabled but the user = {{ ansible_env.SUDO_USER }} has no password set or or the user is not included in the exception list for rule 5.2.4 - It can break access"
success_msg: "You have a password set for the {{ ansible_env.SUDO_USER }} user or the user is included in the exception list for rule 5.2.4"
- name: "Check account is not locked for {{ ansible_env.SUDO_USER }} | Assert local account not locked" # noqa name[template]
ansible.builtin.assert:
that: (not prelim_ansible_user_password_set.stdout.startswith("!")) or (ansible_env.SUDO_USER in rhel9cis_sudoers_exclude_nopasswd_list)
fail_msg: "You have {{ sudo_password_rule }} enabled but the user = {{ ansible_env.SUDO_USER }} is locked - It can break access"
success_msg: "The local account {{ ansible_env.SUDO_USER }} is not locked or included in the exception list for rule 5.2.4"
- name: "Check authselect profile is selected"
when: rhel9cis_allow_authselect_updates
tags: always
block:
- name: "Check authselect profile name has been updated | Ensure name from default is changed"
ansible.builtin.assert:
that: rhel9cis_authselect_custom_profile_name != 'cis_example_profile'
fail_msg: "You still have the default name for your authselect profile"
- name: "Check authselect profile is selected | Check current profile"
ansible.builtin.command: authselect list
changed_when: false
failed_when: prelim_authselect_current_profile.rc not in [ 0, 1 ]
register: prelim_authselect_current_profile
- name: "Ensure root password is set" - name: "Ensure root password is set"
when: rhel9cis_rule_5_4_2_4
tags:
- level1-server
- level1-workstation
- patch
- accounts
- root
- rule_5.4.2.4
block: block:
- name: "Ensure root password is set" - name: "Ensure root password is set"
ansible.builtin.shell: LC_ALL=C passwd -S root | grep -E "(Alternate authentication|Password set|Password locked)" ansible.builtin.shell: passwd -S root | grep "Password set, SHA512 crypt"
changed_when: false changed_when: false
failed_when: prelim_root_passwd_set.rc not in [ 0, 1 ] register: root_passwd_set
register: prelim_root_passwd_set
- name: "Ensure root password is set" - name: "Ensure root password is set"
ansible.builtin.assert: ansible.builtin.assert:
that: prelim_root_passwd_set.rc == 0 that: root_passwd_set.rc == 0
fail_msg: "You have rule 5.4.2.4 enabled this requires that you have a root password set" fail_msg: "You have rule 5.6.6 enabled this requires that you have a root password set"
success_msg: "You have a root password set" success_msg: "You have a root password set"
when:
- rhel9cis_rule_5_6_6
tags:
- level1-server
- level1-workstation
- patch
- accounts
- root
- rule_5.6.6
- name: "Gather the package facts" - name: Setup rules if container
tags: always block:
- name: Discover and set container variable if required
ansible.builtin.set_fact:
system_is_container: true
- name: Load variable for container
ansible.builtin.include_vars:
file: "{{ container_vars_file }}"
- name: Output if discovered is a container
ansible.builtin.debug:
msg: system has been discovered as a container
when:
- system_is_container
when:
- ansible_connection == 'docker' or
ansible_virtualization_type in ["docker", "lxc", "openvz", "podman", "container"]
tags:
- container_discovery
- always
- name: Check crypto-policy input
ansible.builtin.assert:
that: rhel9cis_crypto_policy in rhel9cis_allowed_crypto_policies
fail_msg: "Crypto policy is not a permitted version"
success_msg: "Crypto policy is a permitted version"
- name: Check rhel9cis_bootloader_password_hash variable has been changed
ansible.builtin.assert:
that: rhel9cis_bootloader_password_hash.find('grub.pbkdf2.sha512') != -1 and rhel9cis_bootloader_password_hash != 'grub.pbkdf2.sha512.changethispassword'
msg: "This role will not be able to run single user password commands as rhel9cis_bootloader_password_hash variable has not been set correctly"
when:
- rhel9cis_set_boot_pass
- rhel9cis_rule_1_4_1
tags:
- always
- name: Gather the package facts
ansible.builtin.package_facts: ansible.builtin.package_facts:
manager: auto manager: auto
- name: "Include OS specific variables"
tags: always
ansible.builtin.include_vars:
file: "{{ ansible_facts.distribution }}.yml"
- name: "Include preliminary steps"
tags: tags:
- prelim_tasks - always
- always
ansible.builtin.import_tasks:
file: prelim.yml
- name: "Run Section 1 tasks" - name: Include OS specific variables
ansible.builtin.include_vars: "{{ ansible_distribution }}.yml"
tags:
- always
- name: Include preliminary steps
ansible.builtin.import_tasks: prelim.yml
tags:
- prelim_tasks
- always
- name: run pre_remediation audit
ansible.builtin.include_tasks: pre_remediation_audit.yml
when:
- run_audit
- name: run Section 1 tasks
ansible.builtin.import_tasks: section_1/main.yml
when: rhel9cis_section1 when: rhel9cis_section1
ansible.builtin.import_tasks: tags:
file: section_1/main.yml - rhel9cis_section1
- name: "Run Section 2 tasks" - name: run Section 2 tasks
ansible.builtin.import_tasks: section_2/main.yml
when: rhel9cis_section2 when: rhel9cis_section2
ansible.builtin.import_tasks: tags:
file: section_2/main.yml - rhel9cis_section2
- name: "Run Section 3 tasks" - name: run Section 3 tasks
ansible.builtin.import_tasks: section_3/main.yml
when: rhel9cis_section3 when: rhel9cis_section3
ansible.builtin.import_tasks: tags:
file: section_3/main.yml - rhel9cis_section3
- name: "Run Section 4 tasks" - name: run Section 4 tasks
ansible.builtin.import_tasks: section_4/main.yml
when: rhel9cis_section4 when: rhel9cis_section4
ansible.builtin.import_tasks: tags:
file: section_4/main.yml - rhel9cis_section4
- name: "Run Section 5 tasks" - name: run Section 5 tasks
ansible.builtin.import_tasks: section_5/main.yml
when: rhel9cis_section5 when: rhel9cis_section5
ansible.builtin.import_tasks: tags:
file: section_5/main.yml - rhel9cis_section5
- name: "Run Section 6 tasks" - name: run Section 6 tasks
ansible.builtin.import_tasks: section_6/main.yml
when: rhel9cis_section6 when: rhel9cis_section6
ansible.builtin.import_tasks: tags:
file: section_6/main.yml - rhel9cis_section6
- name: "Run Section 7 tasks" - name: run auditd logic
when: rhel9cis_section7 ansible.builtin.import_tasks: auditd.yml
ansible.builtin.import_tasks:
file: section_7/main.yml
- name: "Run auditd logic"
when: update_audit_template when: update_audit_template
tags: always
ansible.builtin.import_tasks:
file: auditd.yml
- name: "Run post remediation tasks"
tags: tags:
- post_tasks - always
- always
ansible.builtin.import_tasks:
file: post.yml
- name: "Run post_remediation audit" - name: run post remediation tasks
when: run_audit ansible.builtin.import_tasks: post.yml
tags: always
ansible.builtin.import_tasks:
file: post_remediation_audit.yml
- name: Add ansible file showing Benchmark and levels applied if audit details not present
when:
- create_benchmark_facts
- (post_audit_summary is defined) or
(ansible_local['compliance_facts']['lockdown_audit_details']['audit_summary'] is undefined and post_audit_summary is undefined)
tags: tags:
- always - post_tasks
- benchmark - always
block:
- name: Create ansible facts directory if audit facts not present
ansible.builtin.file:
path: "{{ ansible_facts_path }}"
state: directory
owner: root
group: root
mode: 'u=rwx,go=rx'
- name: Create ansible facts file and levels applied if audit facts not present - name: run post_remediation audit
ansible.builtin.template: ansible.builtin.import_tasks: post_remediation_audit.yml
src: etc/ansible/compliance_facts.j2
dest: "{{ ansible_facts_path }}/compliance_facts.fact"
owner: root
group: root
mode: 'u-x,go=r'
- name: Fetch audit files
when: when:
- fetch_audit_output - run_audit
- run_audit
tags: always
ansible.builtin.import_tasks:
file: fetch_audit_output.yml
- name: "Show Audit Summary" - name: Show Audit Summary
when: run_audit
tags: always
ansible.builtin.debug: ansible.builtin.debug:
msg: "{{ audit_results.split('\n') }}" msg: "{{ audit_results.split('\n') }}"
when: run_audit
- name: "If Warnings found Output count and control IDs affected" - name: If Warnings found Output count and control IDs affected
ansible.builtin.debug:
msg: "You have {{ warn_count }} Warning(s) that require investigating that are related to the following benchmark ID(s) {{ warn_control_list }}"
when: warn_count != 0 when: warn_count != 0
tags: always tags:
ansible.builtin.debug: - always
msg: "You have {{ warn_count }} Warning(s) that require investigating that are related to the following benchmark ID(s) {{ warn_control_list }}"

View file

@ -1,31 +1,32 @@
--- ---
- name: "PRELIM | 5.5.2 | 6.2.7 | 6.2.8 | 6.2.20 | Parse /etc/passwd" - name: "PRELIM | 5.5.2 | 6.2.7 | 6.2.8 | 6.2.20 | Parse /etc/passwd"
tags: always
block: block:
- name: "PRELIM | 5.5.2 | 6.2.7 | 6.2.8 | 6.2.20 | Parse /etc/passwd" - name: "PRELIM | 5.5.2 | 6.2.7 | 6.2.8 | 6.2.20 | Parse /etc/passwd"
ansible.builtin.shell: cat /etc/passwd | grep -v '^#' ansible.builtin.shell: cat /etc/passwd
changed_when: false changed_when: false
check_mode: false check_mode: false
register: prelim_capture_passwd_file register: rhel9cis_passwd_file_audit
- name: "PRELIM | 5.4.2 | 7.2.8 | Split passwd entries" - name: "PRELIM | 5.5.2 | 6.2.7 | 6.2.8 | 6.2.20 | Split passwd entries"
ansible.builtin.set_fact: ansible.builtin.set_fact:
prelim_captured_passwd_data: "{{ prelim_capture_passwd_file.stdout_lines | map('regex_replace', ld_passwd_regex, ld_passwd_yaml) | map('from_yaml') | list }}" rhel9cis_passwd: "{{ rhel9cis_passwd_file_audit.stdout_lines | map('regex_replace', ld_passwd_regex, ld_passwd_yaml) | map('from_yaml') | list }}"
loop: "{{ prelim_capture_passwd_file.stdout_lines }}" loop: "{{ rhel9cis_passwd_file_audit.stdout_lines }}"
vars: vars:
ld_passwd_regex: >- ld_passwd_regex: >-
^(?P<id>[^:]*):(?P<password>[^:]*):(?P<uid>[^:]*):(?P<gid>[^:]*):(?P<gecos>[^:]*):(?P<dir>[^:]*):(?P<shell>[^:]*) ^(?P<id>[^:]*):(?P<password>[^:]*):(?P<uid>[^:]*):(?P<gid>[^:]*):(?P<gecos>[^:]*):(?P<dir>[^:]*):(?P<shell>[^:]*)
ld_passwd_yaml: | # pragma: allowlist secret ld_passwd_yaml: |
id: >-4 id: >-4
\g<id> \g<id>
password: >-4 password: >-4
\g<password> \g<password>
uid: \g<uid> uid: \g<uid>
gid: \g<gid> gid: \g<gid>
gecos: >-4 gecos: >-4
\g<gecos> \g<gecos>
dir: >-4 dir: >-4
\g<dir> \g<dir>
shell: >-4 shell: >-4
\g<shell> \g<shell>
tags:
- always

View file

@ -1,49 +1,66 @@
--- ---
# Post tasks
- name: POST | Gather the package facts after remediation - name: POST | Gather the package facts after remediation
tags: always
ansible.builtin.package_facts: ansible.builtin.package_facts:
manager: auto manager: auto
tags:
- always
- name: POST | Update sysctl - name: POST | Update sysctl
when:
- rhel9cis_sysctl_update
- not system_is_container
- "'procps-ng' in ansible_facts.packages"
ansible.builtin.template: ansible.builtin.template:
src: "etc/sysctl.d/{{ item }}.j2" src: "etc/sysctl.d/{{ item }}.j2"
dest: "/etc/sysctl.d/{{ item }}" dest: "/etc/sysctl.d/{{ item }}"
owner: root owner: root
group: root group: root
mode: 'go-rwx' mode: 0600
register: sysctl_updated
notify: Reload sysctl notify: Reload sysctl
loop: loop:
- 60-kernel_sysctl.conf - 60-kernel_sysctl.conf
- 60-disable_ipv6.conf - 60-disable_ipv6.conf
- 60-netipv4_sysctl.conf - 60-netipv4_sysctl.conf
- 60-netipv6_sysctl.conf - 60-netipv6_sysctl.conf
when:
- rhel9cis_sysctl_update
- not system_is_container
- "'procps-ng' in ansible_facts.packages"
- name: Flush handlers - name: Flush handlers
ansible.builtin.meta: flush_handlers ansible.builtin.meta: flush_handlers
- name: POST | reboot system if changes require it and not skipped - name: POST | reboot system if changes require it and not skipped
when: change_requires_reboot
tags:
- always
vars:
warn_control_id: Reboot_required
block: block:
- name: POST | Reboot system if changes require it and not skipped - name: POST | Reboot system if changes require it and not skipped
when: not skip_reboot ansible.builtin.reboot:
ansible.builtin.reboot: when:
- change_requires_reboot
- not skip_reboot
- name: POST | Warning a reboot required but skip option set - name: POST | Warning a reboot required but skip option set
when: skip_reboot ansible.builtin.debug:
ansible.builtin.debug: msg: "Warning!! changes have been made that require a reboot to be implemented but skip reboot was set - Can affect compliance check results"
msg: "Warning!! changes have been made that require a reboot to be implemented but skip reboot was set - Can affect compliance check results" changed_when: true
changed_when: true when:
- change_requires_reboot
- skip_reboot
- name: "POST | Warning a reboot required but skip option set | warning count" - name: "POST | Warning a reboot required but skip option set | warning count"
when: skip_reboot ansible.builtin.import_tasks: warning_facts.yml
ansible.builtin.import_tasks: when:
file: warning_facts.yml - change_requires_reboot
- skip_reboot
vars:
warn_control_id: Reboot_required
tags:
- grub
- level1-server
- level1-workstation
- level2-server
- level2-workstation
- rhel9cis_section1
- rhel9cis_section2
- rhel9cis_section3
- rhel9cis_section4
- rhel9cis_section5
- rhel9cis_section6

View file

@ -1,33 +1,44 @@
--- ---
- name: Post Audit | Run post_remediation {{ benchmark }} audit # noqa name[template] - name: "Post Audit | Run post_remediation {{ benchmark }} audit"
ansible.builtin.shell: "umask 0022 && {{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -f {{ audit_format }} -m {{ audit_max_concurrent }} -o {{ post_audit_outfile }} -g \"{{ group_names }}\"" # noqa yaml[line-length] ansible.builtin.shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ post_audit_outfile }} -g {{ group_names }}"
changed_when: true environment: "{{ audit_run_script_environment | default({}) }}"
environment: changed_when: audit_run_post_remediation.rc == 0
AUDIT_BIN: "{{ audit_bin }}" register: audit_run_post_remediation
AUDIT_CONTENT_LOCATION: "{{ audit_conf_dest | default('/opt') }}"
AUDIT_FILE: goss.yml - name: Post Audit | ensure audit files readable by users
ansible.builtin.file:
path: "{{ item }}"
mode: 0644
state: file
loop:
- "{{ post_audit_outfile }}"
- "{{ pre_audit_outfile }}"
- name: Post Audit | Capture audit data if json format - name: Post Audit | Capture audit data if json format
when: audit_format == "json"
block: block:
- name: Post Audit | Capture audit data if json format - name: "Capture data {{ post_audit_outfile }}"
ansible.builtin.shell: grep -E '"summary-line.*Count:.*Failed' "{{ post_audit_outfile }}" | cut -d'"' -f4 ansible.builtin.shell: "cat {{ post_audit_outfile }}"
changed_when: false register: post_audit
register: post_audit_summary changed_when: false
- name: Post Audit | Set Fact for audit summary - name: Capture post-audit result
ansible.builtin.set_fact: ansible.builtin.set_fact:
post_audit_results: "{{ post_audit_summary.stdout }}" post_audit_summary: "{{ post_audit.stdout | from_json | json_query(summary) }}"
vars:
summary: 'summary."summary-line"'
when:
- audit_format == "json"
- name: Post Audit | Capture audit data if documentation format - name: Post Audit | Capture audit data if documentation format
when: audit_format == "documentation"
block: block:
- name: Post Audit | Capture audit data if documentation format - name: "Post Audit | capture data {{ post_audit_outfile }}"
ansible.builtin.shell: tail -2 "{{ post_audit_outfile }}" | tac | tr '\n' ' ' ansible.builtin.shell: "tail -2 {{ post_audit_outfile }}"
changed_when: false register: post_audit
register: post_audit_summary changed_when: false
- name: Post Audit | Set Fact for audit summary - name: Post Audit | Capture post-audit result
ansible.builtin.set_fact: ansible.builtin.set_fact:
post_audit_results: "{{ post_audit_summary.stdout }}" post_audit_summary: "{{ post_audit.stdout_lines }}"
when:
- audit_format == "documentation"

View file

@ -1,111 +1,109 @@
--- ---
- name: Pre Audit Setup | Setup the LE audit - name: Pre Audit | Setup the audit
when: setup_audit ansible.builtin.include_tasks: LE_audit_setup.yml
tags: setup_audit when:
ansible.builtin.include_tasks: - setup_audit
file: LE_audit_setup.yml
- name: Pre Audit Setup | Ensure existence of {{ audit_conf_dir }} # noqa name[template]
ansible.builtin.file:
path: "{{ audit_conf_dir }}"
mode: 'go-w'
state: directory
- name: Pre Audit Setup | If using git for content set up
when: audit_content == 'git'
block:
- name: Pre Audit Setup | Install git
ansible.builtin.package:
name: git
state: present
- name: Pre Audit Setup | Retrieve audit content files from git
ansible.builtin.git:
repo: "{{ audit_file_git }}"
dest: "{{ audit_conf_dir }}"
version: "{{ audit_git_version }}"
- name: Pre Audit Setup | Copy to audit content files to server
when: audit_content == 'copy'
ansible.builtin.copy:
src: "{{ audit_conf_source }}"
dest: "{{ audit_conf_dest }}"
mode: preserve
- name: Pre Audit Setup | Unarchive audit content files on server
when: audit_content == 'archive'
ansible.builtin.unarchive:
src: "{{ audit_conf_source }}"
dest: "{{ audit_conf_dest }}"
- name: Pre Audit Setup | Get audit content from url
when: audit_content == 'get_url'
ansible.builtin.unarchive:
src: "{{ audit_conf_source }}"
dest: "{{ audit_conf_dest }}/{{ benchmark }}-Audit"
remote_src: "{{ (audit_conf_source is contains('http')) | ternary(true, false) }}"
extra_opts: "{{ (audit_conf_source is contains('github')) | ternary('--strip-components=1', []) }}"
- name: Pre Audit Setup | Check Goss is available
when: run_audit
block:
- name: Pre Audit Setup | Check for goss file
ansible.builtin.stat:
path: "{{ audit_bin }}"
register: prelim_goss_available
- name: Pre Audit Setup | If audit ensure goss is available
when: not prelim_goss_available.stat.exists
ansible.builtin.assert:
that: prelim_goss_available['stat']['exists'] == true
msg: "Audit has been selected: unable to find goss binary at {{ audit_bin }}"
- name: Pre Audit Setup | Copy ansible default vars values to test audit
when: run_audit
tags: tags:
- goss_template - setup_audit
- run_audit
ansible.builtin.template:
src: ansible_vars_goss.yml.j2
dest: "{{ audit_vars_path }}"
mode: 'go-rwx'
- name: Pre Audit | Run pre_remediation audit {{ benchmark }} # noqa name[template] - name: "Pre Audit | Ensure {{ audit_conf_dir }} exists"
ansible.builtin.shell: "umask 0022 && {{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -f {{ audit_format }} -m {{ audit_max_concurrent }} -o {{ pre_audit_outfile }} -g \"{{ group_names }}\"" # noqa yaml[line-length] ansible.builtin.file:
changed_when: true path: "{{ audit_conf_dir }}"
environment: state: directory
AUDIT_BIN: "{{ audit_bin }}" mode: '0755'
AUDIT_CONTENT_LOCATION: "{{ audit_conf_dest | default('/opt') }}"
AUDIT_FILE: goss.yml - name: Pre Audit | retrieve audit content files from git
ansible.builtin.git:
repo: "{{ audit_file_git }}"
dest: "{{ audit_conf_dir }}"
version: "{{ audit_git_version }}"
when:
- audit_content == 'git'
- name: Pre Audit | confirm audit branch vs benchmark version
ansible.builtin.debug:
msg: "Audit will run the branch {{ audit_git_version }} for this Benchmark {{ benchmark_version }}"
- name: Pre Audit | copy to audit content files to server
ansible.builtin.copy:
src: "{{ audit_local_copy }}"
dest: "{{ audit_conf_dir }}"
mode: 0644
when:
- audit_content == 'copy'
- name: Pre Audit | get audit content from url
ansible.builtin.get_url:
url: "{{ audit_files_url }}"
dest: "{{ audit_conf_dir }}"
owner: root
group: root
mode: 0755
when:
- audit_content == 'get_url'
- name: Pre Audit | Check Goss is available
block:
- name: Pre Audit | Check for goss file
ansible.builtin.stat:
path: "{{ audit_bin }}"
register: goss_available
- name: Pre Audit | Alert if goss not available
ansible.builtin.assert:
that: goss_available.stat.exists
fail_msg: "Audit binary file {{ audit_bin }} does not exist"
when:
- run_audit
- name: "Pre Audit | Check whether machine is UEFI-based"
ansible.builtin.stat:
path: /sys/firmware/efi
register: rhel9_efi_boot
tags:
- goss_template
- name: Pre Audit | Copy ansible default vars values to test audit
ansible.builtin.template:
src: ansible_vars_goss.yml.j2
dest: "{{ audit_vars_path }}"
mode: 0600
when:
- run_audit
tags:
- goss_template
- name: "Pre Audit | Run pre_remediation {{ benchmark }} audit"
ansible.builtin.shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ pre_audit_outfile }} -g {{ group_names }}"
environment: "{{ audit_run_script_environment | default({}) }}"
changed_when: audit_run_pre_remediation.rc == 0
register: audit_run_pre_remediation
- name: Pre Audit | Capture audit data if json format - name: Pre Audit | Capture audit data if json format
when: audit_format == "json"
block: block:
- name: Pre Audit | Capture audit data if json format - name: "Pre Audit | capture data {{ pre_audit_outfile }}"
ansible.builtin.shell: grep -E '\"summary-line.*Count:.*Failed' "{{ pre_audit_outfile }}" | cut -d'"' -f4 ansible.builtin.shell: "cat {{ pre_audit_outfile }}"
changed_when: false register: pre_audit
failed_when: pre_audit_summary.stderr | length > 0 changed_when: false
register: pre_audit_summary
- name: Pre Audit | Set Fact for audit summary - name: Pre Audit | Capture pre-audit result
ansible.builtin.set_fact: ansible.builtin.set_fact:
pre_audit_results: "{{ pre_audit_summary.stdout }}" pre_audit_summary: "{{ pre_audit.stdout | from_json | json_query(summary) }}"
vars:
summary: 'summary."summary-line"'
when:
- audit_format == "json"
- name: Pre Audit | Capture audit data if documentation format - name: Pre Audit | Capture audit data if documentation format
when: audit_format == "documentation"
block: block:
- name: Pre Audit | Capture audit data if documentation format - name: "Pre Audit | capture data {{ pre_audit_outfile }}"
ansible.builtin.shell: tail -2 "{{ pre_audit_outfile }}" | tac | tr '\n' ' ' ansible.builtin.shell: "tail -2 {{ pre_audit_outfile }}"
changed_when: false register: pre_audit
failed_when: pre_audit_summary.stderr | length > 0 changed_when: false
register: pre_audit_summary
- name: Pre Audit | Set Fact for audit summary - name: Pre Audit | Capture pre-audit result
ansible.builtin.set_fact: ansible.builtin.set_fact:
pre_audit_results: "{{ pre_audit_summary.stdout }}" pre_audit_summary: "{{ pre_audit.stdout_lines }}"
when:
- name: Audit_Only | Run Audit Only - audit_format == "documentation"
when: audit_only
ansible.builtin.import_tasks:
file: audit_only.yml

View file

@ -2,361 +2,262 @@
# Preliminary tasks that should always be run # Preliminary tasks that should always be run
# List users in order to look files inside each home directory # List users in order to look files inside each home directory
- name: "PRELIM | List users accounts"
- name: "PRELIM | Include audit specific variables" ansible.builtin.shell: "awk -F: '{print $1}' /etc/passwd"
when: run_audit or audit_only or setup_audit
tags:
- setup_audit
- run_audit
ansible.builtin.include_vars:
file: audit.yml
- name: "PRELIM | Include pre-remediation audit tasks"
when: run_audit or audit_only or setup_audit
tags: run_audit
ansible.builtin.import_tasks: pre_remediation_audit.yml
- name: "PRELIM | AUDIT | Interactive Users"
tags: always
ansible.builtin.shell: >
grep -E -v '^(root|halt|sync|shutdown)' /etc/passwd | awk -F: '(!index($7, "sbin/nologin") && $7 != "/bin/nologin" && $7 != "/bin/false" && $7 != "/dev/null") { print $1":"$3":"$6 }'
changed_when: false changed_when: false
check_mode: false check_mode: false
register: prelim_interactive_users_raw register: users
tags:
- level1-server
- level1-workstation
- users
- name: "PRELIM | AUDIT | Interactive Users (reformat)" - name: "PRELIM | capture /etc/password variables"
tags: always ansible.builtin.include_tasks: parse_etc_password.yml
ansible.builtin.set_fact: tags:
prelim_interactive_users: "{{ prelim_interactive_users | default([]) + [dict([('username', item.split(':')[0]), ('uid', item.split(':')[1]), ('home', item.split(':')[2])])] }}" - rule_5.5.2
loop: "{{ prelim_interactive_users_raw.stdout_lines }}" - rule_5.6.2
- rule_6.2.9
- rule_6.2.10
- rule_6.2.11
- rhel9cis_section5
- rhel9cis_section6
- level1-server
- name: "PRELIM | AUDIT | Interactive User accounts home directories" - name: "PRELIM | Interactive User accounts"
tags: always ansible.builtin.shell: 'cat /etc/passwd | grep -Ev "nologin|/sbin" | cut -d: -f6'
ansible.builtin.shell: > changed_when: false
grep -E -v '^(root|halt|sync|shutdown)' /etc/passwd | awk -F: '(!index($7, "sbin/nologin") && $7 != "/bin/nologin" && $7 != "/bin/false" && $7 != "/dev/null") { print $6 }' register: interactive_users_home
tags:
- always
- name: "PRELIM | Gather accounts with empty password fields"
ansible.builtin.shell: "cat /etc/shadow | awk -F: '($2 == \"\" ) {j++;print $1; } END {exit j}'"
changed_when: false changed_when: false
check_mode: false check_mode: false
register: prelim_interactive_users_home register: empty_password_accounts
- name: "PRELIM | AUDIT | Interactive UIDs"
tags: always
ansible.builtin.shell: >
grep -E -v '^(root|halt|sync|shutdown)' /etc/passwd | awk -F: '(!index($7, "sbin/nologin") && $7 != "/bin/nologin" && $7 != "/bin/false") { print $3 }'
changed_when: false
check_mode: false
register: prelim_interactive_uids
- name: "PRELIM | AUDIT | Capture /etc/password variables"
tags: always
ansible.builtin.include_tasks:
file: parse_etc_password.yml
- name: "PRELIM | PATCH | Ensure python3-libselinux is installed"
when: '"python3-libselinux" not in ansible_facts.packages'
ansible.builtin.package:
name: python3-libselinux
state: present
- name: PRELIM | AUDIT | Section 1.1 | Create list of mount points
tags: always
ansible.builtin.set_fact:
prelim_mount_names: "{{ ansible_facts.mounts | map(attribute='mount') | list }}"
- name: PRELIM | AUDIT | Section 1.1 | Retrieve mount options
tags: always
block:
- name: PRELIM | AUDIT | Section 1.1 | Retrieve mount options - call mount # noqa command-instead-of-module
ansible.builtin.shell: |
mount | awk '{print $1, $3, $5, $6}'
changed_when: false
check_mode: false
register: prelim_mount_output
- name: PRELIM | AUDIT | Section 1.1 | Retrieve mount options - build fact # This is inherited and used in mountpoints tasks
ansible.builtin.set_fact:
prelim_mount_point_fs_and_options: >-
{%- set prelim_mount_point_fs_and_options = {} -%}
{%- for line in prelim_mount_output.stdout_lines -%}
{%- set fields = line.split() -%}
{%- set _ = prelim_mount_point_fs_and_options.update({fields[1]: {'src': fields[0], 'fs_type': fields[2], 'original_options': fields[3][1:-1].split(','), 'options': fields[3][1:-1].split(',')}}) -%}
{%- endfor -%}
{{ prelim_mount_point_fs_and_options }}
- name: "PRELIM | AUDIT | Debug of mount variables to assist in troubleshooting"
when: rhel9cis_debug_mount_data
ansible.builtin.debug:
msg: "{{ prelim_mount_point_fs_and_options }}"
- name: "PRELIM | PATCH | Update to latest gpg keys"
when:
- rhel9cis_rule_1_2_1_1
- ansible_facts.distribution != 'RedHat'
- ansible_facts.distribution != 'OracleLinux'
ansible.builtin.package:
name: "{{ gpg_key_package }}"
state: latest
- name: "PRELIM | AUDIT | Import gpg keys | RedHat Only"
when:
- rhel9cis_rule_1_2_1_1
- rhel9cis_force_gpg_key_import
- ansible_facts.distribution == 'RedHat'
block:
- name: "PRELIM | AUDIT | Import gpg keys | get data"
ansible.builtin.command: rpm -q gpg-pubkey --qf '%{NAME}-%{VERSION}-%{RELEASE}\t%{SUMMARY}\n' # noqa command-instead-of-module
changed_when: false
failed_when: false
check_mode: false
register: prelim_check_gpg_imported
- name: "PRELIM | AUDIT | Import gpg keys | Check Package" # noqa command-instead-of-module
when: "'not installed' in prelim_check_gpg_imported.stdout"
ansible.builtin.shell: rpm -qi redhat-release | grep Signature # noqa command-instead-of-module
changed_when: false
failed_when: false
check_mode: false
register: prelim_os_gpg_package_valid
- name: "PRELIM | PATCH | Force keys to be imported" # noqa command-instead-of-module
when:
- "'not installed' in prelim_check_gpg_imported.stdout"
- "'Key ID 199e2f91fd431d51' in prelim_os_gpg_package_valid.stdout"
ansible.builtin.rpm_key:
key: /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
state: present
- name: "PRELIM | AUDIT | Check systemd coredump"
when: rhel9cis_rule_1_5_4
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- rule_1.5.4 - passwords
- systemd
ansible.builtin.stat:
path: /etc/systemd/coredump.conf
register: prelim_systemd_coredump
- name: "PRELIM | PATCH | Setup crypto-policy" - name: "PRELIM | Gather UID 0 accounts other than root"
when: rhel9cis_crypto_policy_ansiblemanaged
tags:
- level1-server
- level1-workstation
- rule_1.6.1
- crypto
block:
- name: "PRELIM | PATCH | Install crypto-policies | pkgs present"
ansible.builtin.package:
name:
- crypto-policies
- crypto-policies-scripts
state: present
- name: "PRELIM | AUDIT | Gather system-wide crypto-policy"
ansible.builtin.command: 'update-crypto-policies --show'
changed_when: false
check_mode: false
register: prelim_system_wide_crypto_policy
- name: "PRELIM | AUDIT | Gather system-wide crypto-policy | set fact system policy"
ansible.builtin.set_fact:
current_crypto_policy: "{{ prelim_system_wide_crypto_policy.stdout.split(':')[0] }}"
- name: "PRELIM | AUDIT | Gather system-wide crypto-policy module | set fact system policy submodule"
when: "':' in prelim_system_wide_crypto_policy.stdout"
ansible.builtin.set_fact:
current_crypto_module: "{{ prelim_system_wide_crypto_policy.stdout.split(':')[1] }}"
- name: "PRELIM | AUDIT | Set facts based on boot type"
tags: always
block:
- name: "PRELIM | AUDIT | Check whether machine is UEFI-based"
ansible.builtin.stat:
path: /sys/firmware/efi
register: prelim_efi_boot
- name: "PRELIM | AUDIT | Set legacy boot and grub path | Bios"
when: not prelim_efi_boot.stat.exists
ansible.builtin.set_fact:
rhel9cis_legacy_boot: true
grub2_path: /etc/grub2.cfg
- name: "PRELIM | AUDIT | Set grub fact | UEFI"
when: prelim_efi_boot.stat.exists
ansible.builtin.set_fact:
grub2_path: /etc/grub2-efi.cfg
- name: "PRELIM | AUDIT | Discover Gnome Desktop Environment"
tags: always
ansible.builtin.stat:
path: /usr/share/gnome/gnome-version.xml
register: prelim_gnome_present
- name: "PRELIM | PATCH | Install dconf if gui installed"
when: rhel9cis_gui
tags: always
ansible.builtin.package:
name: dconf
state: present
- name: "PRELIM | AUDIT | Wireless adapter pre-requisites"
when:
- rhel9cis_rule_3_1_2
- not system_is_container
tags: always
block:
- name: "PRELIM | AUDIT | Discover is wireless adapter on system"
ansible.builtin.command: find /sys/class/net/*/ -type d -name wireless
register: discover_wireless_adapters
changed_when: false
check_mode: false
failed_when: discover_wireless_adapters.rc not in [ 0, 1 ]
- name: "PRELIM | PATCH | Install Network-Manager | if wireless adapter present"
when:
- discover_wireless_adapters.rc == 0
- "'NetworkManager' not in ansible_facts.packages"
ansible.builtin.package:
name: NetworkManager
state: present
- name: "PRELIM | PATCH | Install Cronie"
when:
- rhel9cis_rule_5_1_1
- '"cronie" not in ansible_facts.packages'
tags:
- level1-server
- level1-workstation
- rule_5.1.1
- cron
ansible.builtin.package:
name: cronie
state: present
# Added to ensure ssh drop in file exists if not default /etc/ssh/sshd_config
- name: "PRELIM | PATCH | SSH Config file is not exist"
when:
- rhel9cis_sshd_config_file != '/etc/ssh/sshd_config'
- "'openssh-server' in ansible_facts.packages"
tags:
- always
- level1_server
- level1_workstation
ansible.builtin.file:
path: "{{ rhel9cis_sshd_config_file }}"
owner: root
group: root
mode: 'go-rwx'
state: touch
- name: "PRELIM | PATCH | sshd_config.d/50-redhat.conf exists"
when: rhel9cis_rule_5_1_10 or rhel9cis_rule_5_1_11
ansible.builtin.stat:
path: /etc/ssh/sshd_config.d/50-redhat.conf
register: prelim_sshd_50_redhat_file
- name: "PRELIM | AUDIT | Capture pam security related files"
tags: always
ansible.builtin.find:
paths:
- /etc/security/pwquality.conf.d/
patterns: '*.conf'
register: prelim_pam_pwquality_confs
- name: "PRELIM | AUDIT | Gather UID 0 accounts other than root"
when: rhel9cis_rule_5_4_2_1
tags:
- rule_5.4.2.1
- level1-server
- level1-workstation
- users
ansible.builtin.shell: "cat /etc/passwd | awk -F: '($3 == 0 && $1 != \"root\") {i++;print $1 } END {exit i}'" ansible.builtin.shell: "cat /etc/passwd | awk -F: '($3 == 0 && $1 != \"root\") {i++;print $1 } END {exit i}'"
changed_when: false changed_when: false
check_mode: false check_mode: false
register: prelim_uid_zero_accounts_except_root register: rhel9cis_uid_zero_accounts_except_root
- name: "PRELIM | PATCH | Create journald config directory"
when:
- rhel9cis_syslog == 'journald'
- rhel9cis_rule_6_2_1_3 or
rhel9cis_rule_6_2_1_4
tags: always
ansible.builtin.file:
path: /etc/systemd/journald.conf.d
state: directory
mode: 'u+x,g-w,o-rwx'
- name: "PRELIM | PATCH | Configure System Accounting (auditd)"
when:
- '"auditd" not in ansible_facts.packages'
- rhel9cis_rule_6_3_1_1
tags: tags:
- level2-server - rule_6.2.9
- level2-workstation - level1-server
- patch - level1-workstation
- rule_6.3.1.1 - users
- auditd
ansible.builtin.package:
name: audit
state: present
become: true
- name: "PRELIM | AUDIT | Discover audit logfile" - name: "PRELIM | Setup crypto-policy"
when:
- rhel9cis_rule_6_3_4_1 or
rhel9cis_rule_6_3_4_2 or
rhel9cis_rule_6_3_4_3 or
rhel9cis_rule_6_3_4_4
tags: always
ansible.builtin.shell: grep ^log_file /etc/audit/auditd.conf | awk '{ print $NF }'
changed_when: false
check_mode: false
register: prelim_auditd_logfile
- name: "PRELIM | AUDIT | Audit conf and rules files | list files"
when:
- rhel9cis_rule_6_3_4_5 or
rhel9cis_rule_6_3_4_6 or
rhel9cis_rule_6_3_4_7
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_6.3.4.5
- rule_6.3.4.6
- rule_6.3.4.7
ansible.builtin.find:
path: /etc/audit
file_type: file
recurse: true
patterns: '*.conf,*.rules'
register: prelim_auditd_conf_files
- name: "PRELIM | AUDIT | Discover Interactive UID_MIN and UID_MAX from /etc/login.defs"
when: rhel9cis_discover_int_uid
tags: always
block: block:
- name: "PRELIM | AUDIT | Capture UID_MIN from /etc/login.defs" - name: "PRELIM | Install crypto-policies"
ansible.builtin.command: awk '/^UID_MIN/ {print $2}' /etc/login.defs ansible.builtin.package:
changed_when: false name:
failed_when: false - crypto-policies
check_mode: false - crypto-policies-scripts
register: prelim_uid_min_id state: present
- name: "PRELIM | AUDIT | Capture UID_MAX from /etc/login.defs" - name: "PRELIM | Gather system-wide crypto-policy"
ansible.builtin.command: awk '/^UID_MAX/ {print $2}' /etc/login.defs ansible.builtin.shell: update-crypto-policies --show
changed_when: false changed_when: false
failed_when: false check_mode: false
check_mode: false register: system_wide_crypto_policy
register: prelim_uid_max_id when:
- rhel9cis_rule_1_10
tags:
- level1-server
- level1-workstation
- rule_1.10
- crypto
- name: "PRELIM | AUDIT | Set facts for interactive UID/GID ranges" - name: "PRELIM | if systemd coredump"
tags: always ansible.builtin.stat:
path: /etc/systemd/coredump.conf
register: systemd_coredump
when:
- rhel9cis_rule_1_5_1
tags:
- level1-server
- level1-workstation
- rule_1.5.1
- systemd
- name: "PRELIM | Section 1.1 | Create list of mount points"
ansible.builtin.set_fact: ansible.builtin.set_fact:
prelim_min_int_uid: "{{ prelim_uid_min_id.stdout | default(min_int_uid) }}" mount_names: "{{ ansible_mounts | map(attribute='mount') | list }}"
prelim_max_int_uid: "{{ prelim_uid_max_id.stdout | default(max_int_uid) }}" tags:
- level1-server
- level1-workstation
- name: "PRELIM | AUDIT | Gather the package facts after prelim" - name: "PRELIM | Ensure python3-libselinux is installed"
tags: always ansible.builtin.package:
name: python3-libselinux
state: present
when:
- '"python3-libselinux" not in ansible_facts.packages'
- name: "PRELIM | Set facts based on boot type"
block:
- name: "PRELIM | Check whether machine is UEFI-based"
ansible.builtin.stat:
path: /sys/firmware/efi
register: rhel_09_efi_boot
- name: "PRELIM | set legacy boot and grub path | Bios"
ansible.builtin.set_fact:
rhel9cis_legacy_boot: true
grub2_path: /etc/grub2.cfg
when: not rhel_09_efi_boot.stat.exists
- name: "PRELIM | set grub fact | UEFI"
ansible.builtin.set_fact:
grub2_path: /etc/grub2-efi.cfg
when: rhel_09_efi_boot.stat.exists
- name: "PRELIM | Update to latest gpg keys"
ansible.builtin.package:
name: "{{ gpg_key_package }}"
state: latest
when:
- rhel9cis_rule_1_2_4
- ansible_distribution != 'RedHat'
- ansible_distribution != 'OracleLinux'
- name: "PRELIM | Section 4.1 | Configure System Accounting (auditd)"
ansible.builtin.package:
name: audit
state: present
become: true
when:
- '"auditd" not in ansible_facts.packages'
- rhel9cis_rule_4_1_1_1
tags:
- level2-server
- level2-workstation
- patch
- rule_4.1.1.1
- auditd
- name: "PRELIM | 4.1.4.5 | Audit conf and rules files | list files"
ansible.builtin.find:
path: /etc/audit
file_type: file
recurse: true
patterns: '*.conf,*.rules'
register: auditd_conf_files
when:
- rhel9cis_rule_4_1_4_5 or
rhel9cis_rule_4_1_4_6 or
rhel9cis_rule_4_1_4_7
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.5
- rule_4.1.4.6
- rule_4.1.4.7
- name: "PRELIM | Section 5.1 | Configure cron"
ansible.builtin.package:
name: cronie
state: present
become: true
when:
- rhel9cis_rule_5_1_1
- '"cronie" not in ansible_facts.packages'
tags:
- level1-server
- level1-workstation
- rule_5.1.1
- cron
# Added to ensure ssh drop in file exists if not default /etc/ssh/sshd_config
- name: "PRELIM | Section 5.2 | SSH"
ansible.builtin.file:
path: "{{ rhel9_cis_sshd_config_file }}"
owner: root
group: root
mode: 0600
state: touch
when:
- rhel9_cis_sshd_config_file != '/etc/ssh/sshd_config'
- "'openssh-server' in ansible_facts.packages"
tags:
- ssh
- level1_server
- level1_workstation
- name: "PRELIM | Install authconfig"
ansible.builtin.package:
name: authconfig
state: present
become: true
when:
- rhel9cis_use_authconfig
- rhel9cis_rule_5_3_1 or
rhel9cis_rule_5_3_2 or
rhel9cis_rule_5_3_3 or
'"authconfig" not in ansible_facts.packages or
"auditd-lib" not in ansible_facts.packages'
tags:
- level1-server
- level1-workstation
- rule_5.3.1 or
rule_5.3.2 or
rule_5.3.3
- authconfig
- auditd
- name: "PRELIM | 5.3.4 | Find all sudoers files."
ansible.builtin.shell: "find /etc/sudoers /etc/sudoers.d/ -type f ! -name '*~' ! -name '*.*'"
changed_when: false
failed_when: false
check_mode: false
register: rhel9cis_sudoers_files
when:
- rhel9cis_rule_5_3_4 or
rhel9cis_rule_5_3_5
tags:
- rule_5.3.4
- rule_5.3.5
- name: "PRELIM | Discover Interactive UID MIN and MIN from logins.def"
block:
- name: "PRELIM | Capture UID_MIN information from logins.def"
ansible.builtin.shell: grep -w "^UID_MIN" /etc/login.defs | awk '{print $NF}'
changed_when: false
register: uid_min_id
- name: "PRELIM | Capture UID_MAX information from logins.def"
ansible.builtin.shell: grep -w "^UID_MAX" /etc/login.defs | awk '{print $NF}'
changed_when: false
register: uid_max_id
- name: "PRELIM | Capture GID_MIN information from logins.def"
ansible.builtin.shell: grep -w "^GID_MIN" /etc/login.defs | awk '{print $NF}'
changed_when: false
register: gid_min_id
- name: "PRELIM | set_facts for interactive uid/gid"
ansible.builtin.set_fact:
min_int_uid: "{{ uid_min_id.stdout }}"
max_int_uid: "{{ uid_max_id.stdout }}"
min_int_gid: "{{ gid_min_id.stdout }}"
- name: "PRELIM | Output of uid findings"
ansible.builtin.debug:
msg: "{{ min_int_uid }} {{ max_int_uid }}"
when:
- not discover_int_uid
- name: "PRELIM | Gather the package facts after prelim"
ansible.builtin.package_facts: ansible.builtin.package_facts:
manager: auto manager: auto
tags:
- always

View file

@ -1,292 +1,66 @@
--- ---
- name: "1.1.1.1 | PATCH | Ensure cramfs kernel module is not available" - name: "1.1.1.1 | PATCH | Ensure mounting of squashfs filesystems is disabled"
when: rhel9cis_rule_1_1_1_1
tags:
- level1-server
- level1-workstation
- patch
- rule_1.1.1.1
- cramfs
- NIST800-53R5_CM-7
block: block:
- name: "1.1.1.1 | PATCH | Ensure cramfs kernel module is not available | Edit modprobe config" - name: "1.1.1.1 | PATCH | Ensure mounting of squashfs filesystems is disabled | Edit modprobe config"
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install cramfs(\\s|$)" regexp: "^(#)?install squashfs(\\s|$)"
line: "install cramfs /bin/true" line: "install squashfs /bin/true"
create: true create: true
mode: 'go-rwx' mode: 0600
- name: "1.1.1.1 | PATCH | Ensure cramfs kernel module is not available | blacklist" - name: "1.1.1.1 | PATCH | Ensure mounting of squashfs filesystems is disabled | blacklist"
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist cramfs(\\s|$)" regexp: "^(#)?blacklist squashfs(\\s|$)"
line: "blacklist cramfs" line: "blacklist squashfs"
create: true create: true
mode: 'go-rwx' mode: 0600
- name: "1.1.1.1 | PATCH | Ensure cramfs kernel module is not available | Disable cramfs" - name: "1.1.1.1 | PATCH | Ensure mounting of squashfs filesystems is disabled | Disable squashfs"
when: community.general.modprobe:
- not system_is_container name: squashfs
community.general.modprobe: state: absent
name: cramfs when: not system_is_container
state: absent
- name: "1.1.1.2 | PATCH | Ensure freevxfs kernel module is not available" when:
when: rhel9cis_rule_1_1_1_2 - rhel9cis_rule_1_1_1_1
tags: tags:
- level1-server - level2-server
- level1-workstation - level2-workstation
- patch - patch
- rule_1.1.1.2 - rule_1.1.1.1
- freevxfs - squashfs
- NIST800-53R5_CM-7
- name: "1.1.1.2 | PATCH | Ensure mounting of udf filesystems is disabled"
block: block:
- name: "1.1.1.2 | PATCH | Ensure freevxfs kernel module is not available | Edit modprobe config" - name: "1.1.1.2 | PATCH | Ensure mounting of udf filesystems is disable | Edit modprobe config"
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install freevxfs(\\s|$)" regexp: "^(#)?install udf(\\s|$)"
line: "install freevxfs /bin/true" line: "install udf /bin/true"
create: true create: true
mode: 'go-rwx' mode: 0600
- name: "1.1.1.2 | PATCH | Ensure freevxfs kernel module is not available | blacklist" - name: "1.1.1.2 | PATCH | Ensure mounting of udf filesystems is disabled | blacklist"
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist freevxfs(\\s|$)" regexp: "^(#)?blacklist udf(\\s|$)"
line: "blacklist freevxfs" line: "blacklist udf"
create: true create: true
mode: 'go-rwx' mode: 0600
- name: "1.1.1.2 | PATCH | Ensure freevxfs kernel module is not available | Disable freevxfs" - name: "1.1.1.2 | PATCH | Ensure mounting of udf filesystems is disable | Disable udf"
when: not system_is_container community.general.modprobe:
community.general.modprobe: name: udf
name: freevxfs state: absent
state: absent when: not system_is_container
when:
- name: "1.1.1.3 | PATCH | Ensure hfs kernel module is not available" - rhel9cis_rule_1_1_1_2
when: rhel9cis_rule_1_1_1_3
tags: tags:
- level1-server - level2-server
- level1-workstation - level2-workstation
- patch - patch
- rule_1.1.1.3 - rule_1.1.1.2
- hfs - udf
- NIST800-53R5_CM-7
block:
- name: "1.1.1.3 | PATCH | Ensure hfs kernel module is not available | Edit modprobe config"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install hfs(\\s|$)"
line: "install hfs /bin/true"
create: true
mode: 'go-rwx'
- name: "1.1.1.3 | PATCH | Ensure hfs kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist hfs(\\s|$)"
line: "blacklist hfs"
create: true
mode: 'go-rwx'
- name: "1.1.1.3 | PATCH | Ensure hfs kernel module is not available | Disable hfs"
when: not system_is_container
community.general.modprobe:
name: hfs
state: absent
- name: "1.1.1.4 | PATCH | Ensure hfsplus kernel module is not available"
when: rhel9cis_rule_1_1_1_4
tags:
- level1-server
- level1-workstation
- patch
- rule_1.1.1.4
- hfsplus
- NIST800-53R5_CM-7
block:
- name: "1.1.1.4 | PATCH | Ensure hfsplus kernel module is not available | Edit modprobe config"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install hfsplus(\\s|$)"
line: "install hfsplus /bin/true"
create: true
mode: 'go-rwx'
- name: "1.1.1.4 | PATCH | Ensure hfsplus kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist hfsplus(\\s|$)"
line: "blacklist hfsplus"
create: true
mode: 'go-rwx'
- name: "1.1.1.4 | PATCH | Ensure hfsplus kernel module is not available | Disable hfsplus"
when: not system_is_container
community.general.modprobe:
name: hfsplus
state: absent
- name: "1.1.1.5 | PATCH | Ensure jffs2 kernel module is not available"
when: rhel9cis_rule_1_1_1_5
tags:
- level1-server
- level1-workstation
- patch
- rule_1.1.1.5
- jffs2
- NIST800-53R5_CM-7
block:
- name: "1.1.1.5 | PATCH | Ensure jffs2 kernel module is not available | Edit modprobe config"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install jffs2(\\s|$)"
line: "install jffs2 /bin/true"
create: true
mode: 'go-rwx'
- name: "1.1.1.5 | PATCH | Ensure jffs2 kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist jffs2(\\s|$)"
line: "blacklist jffs2"
create: true
mode: 'go-rwx'
- name: "1.1.1.5 | PATCH | Ensure jffs2 kernel module is not available | Disable jffs2"
when: not system_is_container
community.general.modprobe:
name: jffs2
state: absent
- name: "1.1.1.6 | PATCH | Ensure squashfs kernel module is not available"
when: rhel9cis_rule_1_1_1_6
tags:
- level2-server
- level2-workstation
- patch
- rule_1.1.1.6
- squashfs
- NIST800-53R5_CM-7
block:
- name: "1.1.1.6 | PATCH | Ensure squashfs kernel module is not available | Edit modprobe config"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install squashfs(\\s|$)"
line: "install squashfs /bin/true"
create: true
mode: 'go-rwx'
- name: "1.1.1.6 | PATCH | Ensure squashfs kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist squashfs(\\s|$)"
line: "blacklist squashfs"
create: true
mode: 'go-rwx'
- name: "1.1.1.6 | PATCH | Ensure squashfs kernel module is not available | Disable squashfs"
when: not system_is_container
community.general.modprobe:
name: squashfs
state: absent
- name: "1.1.1.7 | PATCH | Ensure udf kernel module is not available"
when: rhel9cis_rule_1_1_1_7
tags:
- level2-server
- level2-workstation
- patch
- rule_1.1.1.7
- udf
- NIST800-53R5_CM-7
block:
- name: "1.1.1.7 | PATCH | Ensure udf kernel module is not available | Edit modprobe config"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install udf(\\s|$)"
line: "install udf /bin/true"
create: true
mode: 'go-rwx'
- name: "1.1.1.7 | PATCH | Ensure udf kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist udf(\\s|$)"
line: "blacklist udf"
create: true
mode: 'go-rwx'
- name: "1.1.1.7 | PATCH | Ensure udf kernel module is not available | Disable udf"
when: not system_is_container
community.general.modprobe:
name: udf
state: absent
- name: "1.1.1.8 | PATCH | Ensure usb-storage kernel module is not available"
when: rhel9cis_rule_1_1_1_8
tags:
- level1-server
- level2-workstation
- patch
- rule_1.1.1.8
- usb
- NIST800-53R5_SI-3
block:
- name: "1.1.1.8 | PATCH | Ensure usb-storage kernel module is not available | Edit modprobe config"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install usb-storage(\\s|$)"
line: "install usb-storage /bin/true"
create: true
mode: 'go-rwx'
- name: "1.1.1.8 | PATCH | Ensure usb-storage kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist usb-storage(\\s|$)"
line: "blacklist usb-storage"
create: true
mode: 'go-rwx'
- name: "1.1.1.8 | PATCH | Ensure usb-storage kernel module is not available | Disable usb"
when: not system_is_container
community.general.modprobe:
name: usb-storage
state: absent
- name: "1.1.1.9 | PATCH | Ensure unused filesystems kernel modules are not available"
when: rhel9cis_rule_1_1_1_9
tags:
- level1-server
- level1-workstation
- patch
- rule_1.1.1.9
vars:
warn_control_id: '1.1.1.9'
block:
- name: "1.1.1.9 | PATCH | Ensure unused filesystems kernel modules are not available | Add discovery script"
ansible.builtin.copy:
src: fs_with_cves.sh
dest: /var/fs_with_cves.sh
owner: root
group: root
mode: 'u+x,go-wx'
- name: "1.1.1.9 | AUDIT | Ensure unused filesystems kernel modules are not available | Run discovery script"
ansible.builtin.command: /var/fs_with_cves.sh
changed_when: false
failed_when: discovered_fs_modules_loaded.rc not in [ 0, 99 ]
register: discovered_fs_modules_loaded
- name: "1.1.1.9 | AUDIT | Ensure unused filesystems kernel modules are not available | Output Warning"
when: discovered_fs_modules_loaded.stdout | length > 0
ansible.builtin.debug:
msg: "{{ ['Warning!! Discovered loaded Filesystem modules that need attention. This is a manual task'] + discovered_fs_modules_loaded.stdout_lines }}"
- name: "1.1.1.9 | AUDIT | Ensure unused filesystems kernel modules are not available | Capture Warning"
when: discovered_fs_modules_loaded.stdout | length > 0
ansible.builtin.import_tasks:
file: warning_facts.yml

View file

@ -1,132 +0,0 @@
---
- name: "1.1.2.1.1 | PATCH | Ensure /tmp is a separate partition"
when:
- required_mount not in prelim_mount_names
- rhel9cis_rule_1_1_2_1_1
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.2.1.1
- NIST800-53R5_CM-7
vars:
warn_control_id: "1.1.2.1.1"
required_mount: "/tmp"
block:
- name: "1.1.2.1.1 | AUDIT | Ensure /tmp is a separate partition | check for mount"
ansible.builtin.command: findmnt -kn "{{ required_mount }}"
changed_when: false
failed_when: discovered_tmp_mount.rc not in [ 0, 1 ]
register: discovered_tmp_mount
- name: "1.1.2.1.1 | AUDIT | Ensure /tmp is a separate partition | Absent"
when: discovered_tmp_mount is undefined
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} is not mounted on a separate partition"
- name: "1.1.2.1.1 | AUDIT | Ensure /tmp is a separate partition | Present"
when: discovered_tmp_mount is undefined
ansible.builtin.import_tasks:
file: warning_facts.yml
# via fstab
- name: "1.1.2.1.2 | PATCH | Ensure nodev option set on /tmp partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- not prelim_mount_point_fs_and_options[mount_point]['src'] == "tmpfs"
- rhel9cis_rule_1_1_2_1_2
- not rhel9cis_tmp_svc
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.1.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/tmp"
required_option: nodev
notify: &mount_option_notify
- "Remount {{ mount_point }}"
ansible.builtin.set_fact: &mount_option_set_fact
prelim_mount_point_fs_and_options: |
{{ prelim_mount_point_fs_and_options | combine({mount_point: {'options': (prelim_mount_point_fs_and_options[mount_point]['options'] + [required_option])}}, recursive=True) }}
changed_when: &mount_option_changed_when
- required_option not in prelim_mount_point_fs_and_options[mount_point]['original_options']
- name: "1.1.2.1.3 | PATCH | Ensure nosuid option set on /tmp partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- not prelim_mount_point_fs_and_options[mount_point]['src'] == "tmpfs"
- rhel9cis_rule_1_1_2_1_3
- not rhel9cis_tmp_svc
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.1.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/tmp"
required_option: nosuid
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when
- name: "1.1.2.1.4 | PATCH | Ensure noexec option set on /tmp partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- not prelim_mount_point_fs_and_options[mount_point]['src'] == "tmpfs"
- rhel9cis_rule_1_1_2_1_4
- not rhel9cis_tmp_svc
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.1.4
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/tmp"
required_option: noexec
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when
# via systemd
- name: |
"1.1.2.1.1 | PATCH | Ensure /tmp is configured
1.1.2.1.2 | PATCH | Ensure nodev option set on /tmp partition
1.1.2.1.3 | PATCH | Ensure noexec option set on /tmp partition
1.1.2.1.4 | PATCH | Ensure nosuid option set on /tmp partition"
when:
- rhel9cis_tmp_svc
- rhel9cis_rule_1_1_2_1_1 or rhel9cis_rule_1_1_2_1_2 or rhel9cis_rule_1_1_2_1_3 or rhel9cis_rule_1_1_2_1_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.1.1
- rule_1.1.2.1.2
- rule_1.1.2.1.3
- rule_1.1.2.1.4
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/tmp"
ansible.builtin.template:
src: etc/systemd/system/tmp.mount.j2
dest: /etc/systemd/system/tmp.mount
owner: root
group: root
mode: 'go-wx'
notify: *mount_option_notify

View file

@ -1,95 +0,0 @@
---
- name: "1.1.2.2.1 | PATCH | Ensure /dev/shm is a separate partition"
when:
- rhel9cis_rule_1_1_2_2_1
- required_mount not in prelim_mount_names
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.2.2.1
- NIST800-53R5_CM-7
vars:
warn_control_id: "1.1.2.2.1"
required_mount: "/dev/shm"
block:
- name: "1.1.2.2.1 | AUDIT | Ensure /dev/shm is a separate partition | check for mount"
ansible.builtin.command: findmnt -kn "{{ required_mount }}"
changed_when: false
failed_when: discovered_dev_shm_mount.rc not in [ 0, 1 ]
register: discovered_dev_shm_mount
- name: "1.1.2.2.1 | AUDIT | Ensure /dev/shm is a separate partition | Absent"
when: discovered_dev_shm_mount is undefined
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} is not mounted on a separate partition"
- name: "1.1.2.2.1 | AUDIT | Ensure /dev/shm is a separate partition | Present"
when: discovered_dev_shm_mount is undefined
ansible.builtin.import_tasks:
file: warning_facts.yml
- name: "1.1.2.2.2 | PATCH | Ensure nodev option set on /dev/shm partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_2_2
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.2.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/dev/shm"
required_option: nodev
notify: &mount_option_notify
- "Remount {{ mount_point }}"
ansible.builtin.set_fact: &mount_option_set_fact
prelim_mount_point_fs_and_options: |
{{ prelim_mount_point_fs_and_options | combine({mount_point: {'options': (prelim_mount_point_fs_and_options[mount_point]['options'] + [required_option])}}, recursive=True) }}
changed_when: &mount_option_changed_when
- required_option not in prelim_mount_point_fs_and_options[mount_point]['original_options']
- name: "1.1.2.2.3 | PATCH | Ensure nosuid option set on /dev/shm partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_2_3
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.2.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/dev/shm"
required_option: nosuid
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when
- name: "1.1.2.2.4 | PATCH | Ensure noexec option set on /dev/shm partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_2_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.2.4
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/dev/shm"
required_option: noexec
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when

View file

@ -1,74 +0,0 @@
---
- name: "1.1.2.3.1 | PATCH | Ensure /home is a separate partition"
when:
- rhel9cis_rule_1_1_2_3_1
- required_mount not in prelim_mount_names
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.2.3.1
- NIST800-53R5_CM-7
vars:
warn_control_id: "1.1.2.3.1"
required_mount: "/home"
block:
- name: "1.1.2.3.1 | AUDIT | Ensure /home is a separate partition | check for mount"
ansible.builtin.command: findmnt -kn "{{ required_mount }}"
changed_when: false
failed_when: discovered_home_mount.rc not in [ 0, 1 ]
register: discovered_home_mount
- name: "1.1.2.3.1 | AUDIT | Ensure /home is a separate partition | Absent"
when: discovered_home_mount is undefined
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} is not mounted on a separate partition"
- name: "1.1.2.3.1 | AUDIT | Ensure /home is a separate partition | Present"
when: discovered_home_mount is undefined
ansible.builtin.import_tasks:
file: warning_facts.yml
- name: "1.1.2.3.2 | PATCH | Ensure nodev option set on /home partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_3_2
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.3.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/home"
required_option: nodev
notify: &mount_option_notify
- "Remount {{ mount_point }}"
ansible.builtin.set_fact: &mount_option_set_fact
prelim_mount_point_fs_and_options: |
{{ prelim_mount_point_fs_and_options | combine({mount_point: {'options': (prelim_mount_point_fs_and_options[mount_point]['options'] + [required_option])}}, recursive=True) }}
changed_when: &mount_option_changed_when
- required_option not in prelim_mount_point_fs_and_options[mount_point]['original_options']
- name: "1.1.2.3.3 | PATCH | Ensure nosuid option set on /home partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_3_3
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.3.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/home"
required_option: nosuid
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when

View file

@ -1,75 +0,0 @@
---
- name: "1.1.2.4.1 | PATCH | Ensure /var is a separate partition"
when:
- rhel9cis_rule_1_1_2_4_1
- required_mount not in prelim_mount_names
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.2.4.1
- NIST800-53R5_CM-7
vars:
warn_control_id: '1.1.2.4.1'
required_mount: '/var'
block:
- name: "1.1.2.4.1 | AUDIT | Ensure /var is a separate partition | check for mount"
ansible.builtin.command: findmnt -kn "{{ required_mount }}"
changed_when: false
failed_when: discovered_var_mount.rc not in [ 0, 1 ]
register: discovered_var_mount
- name: "1.1.2.4.1 | AUDIT | Ensure /var is a separate partition | Absent"
when: discovered_var_mount is undefined
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} is not mounted on a separate partition"
- name: "1.1.2.4.1 | AUDIT | Ensure /var is a separate partition | Present"
when: discovered_var_mount is undefined
ansible.builtin.import_tasks:
file: warning_facts.yml
- name: "1.1.2.4.2 | PATCH | Ensure nodev option set on /var partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_4_2
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.4.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var"
required_option: nodev
notify: &mount_option_notify
- "Remount {{ mount_point }}"
ansible.builtin.set_fact: &mount_option_set_fact
prelim_mount_point_fs_and_options: |
{{ prelim_mount_point_fs_and_options | combine({mount_point: {'options': (prelim_mount_point_fs_and_options[mount_point]['options'] + [required_option])}}, recursive=True) }}
changed_when: &mount_option_changed_when
- required_option not in prelim_mount_point_fs_and_options[mount_point]['original_options']
- name: "1.1.2.4.3 | PATCH | Ensure nosuid option set on /var partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_4_3
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.4.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var"
required_option: nosuid
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when

View file

@ -1,95 +0,0 @@
---
- name: "1.1.2.5.1 | PATCH | Ensure /var/tmp is a separate partition"
when:
- rhel9cis_rule_1_1_2_5_1
- required_mount not in prelim_mount_names
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.2.5.1
- NIST800-53R5_CM-7
vars:
warn_control_id: '1.1.2.5.1'
required_mount: '/var/tmp'
block:
- name: "1.1.2.5.1 | AUDIT | Ensure /var/tmp is a separate partition | check for mount"
ansible.builtin.command: findmnt -kn "{{ required_mount }}"
changed_when: false
failed_when: discovered_var_tmp_mount.rc not in [ 0, 1 ]
register: discovered_var_tmp_mount
- name: "1.1.2.5.1 | AUDIT | Ensure /var/tmp is a separate partition | Absent"
when: discovered_var_tmp_mount is undefined
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} is not mounted on a separate partition"
- name: "1.1.2.5.1 | AUDIT | Ensure /var/tmp is a separate partition | Present"
when: discovered_var_tmp_mount is undefined
ansible.builtin.import_tasks:
file: warning_facts.yml
- name: "1.1.2.5.2 | PATCH | Ensure nodev option set on /var/tmp partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_5_2
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.5.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/tmp"
required_option: nodev
notify: &mount_option_notify
- "Remount {{ mount_point }}"
ansible.builtin.set_fact: &mount_option_set_fact
prelim_mount_point_fs_and_options: |
{{ prelim_mount_point_fs_and_options | combine({mount_point: {'options': (prelim_mount_point_fs_and_options[mount_point]['options'] + [required_option])}}, recursive=True) }}
changed_when: &mount_option_changed_when
- required_option not in prelim_mount_point_fs_and_options[mount_point]['original_options']
- name: "1.1.2.5.3 | PATCH | Ensure nosuid option set on /var/tmp partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_5_3
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.5.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/tmp"
required_option: nosuid
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when
- name: "1.1.2.5.4 | PATCH | Ensure noexec option set on /var/tmp partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_5_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.5.4
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/tmp"
required_option: noexec
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when

View file

@ -1,95 +0,0 @@
---
- name: "1.1.2.6.1 | PATCH | Ensure /var/log is a separate partition"
when:
- rhel9cis_rule_1_1_2_6_1
- required_mount not in prelim_mount_names
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.2.6.1
- NIST800-53R5_CM-7
vars:
warn_control_id: '1.1.2.6.1'
required_mount: '/var/log'
block:
- name: "1.1.2.6.1 | AUDIT | Ensure /var/log is a separate partition | check for mount"
ansible.builtin.command: findmnt -kn "{{ required_mount }}"
changed_when: false
failed_when: discovered_var_log_mount.rc not in [ 0, 1 ]
register: discovered_var_log_mount
- name: "1.1.2.6.1 | AUDIT | Ensure /var/log is a separate partition | Absent"
when: discovered_var_log_mount is undefined
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} is not mounted on a separate partition"
- name: "1.1.2.6.1 | AUDIT | Ensure /var/log is a separate partition | Present"
when: discovered_var_log_mount is undefined
ansible.builtin.import_tasks:
file: warning_facts.yml
- name: "1.1.2.6.2 | PATCH | Ensure nodev option set on /var/log partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_6_2
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.6.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/log"
required_option: nodev
notify: &mount_option_notify
- "Remount {{ mount_point }}"
ansible.builtin.set_fact: &mount_option_set_fact
prelim_mount_point_fs_and_options: |
{{ prelim_mount_point_fs_and_options | combine({mount_point: {'options': (prelim_mount_point_fs_and_options[mount_point]['options'] + [required_option])}}, recursive=True) }}
changed_when: &mount_option_changed_when
- required_option not in prelim_mount_point_fs_and_options[mount_point]['original_options']
- name: "1.1.2.6.3 | PATCH | Ensure nosuid option set on /var/log partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_6_3
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.6.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/log"
required_option: nosuid
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when
- name: "1.1.2.6.4 | PATCH | Ensure noexec option set on /var/log partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_6_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.6.4
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/log"
required_option: noexec
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when

View file

@ -1,95 +0,0 @@
---
- name: "1.1.2.7.1 | PATCH | Ensure /var/log/audit is a separate partition"
when:
- rhel9cis_rule_1_1_2_7_1
- required_mount not in prelim_mount_names
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.2.7.1
- NIST800-53R5_CM-7
vars:
warn_control_id: '1.1.2.7.1'
required_mount: '/var/log/audit'
block:
- name: "1.1.2.7.1 | AUDIT | Ensure /var/log/audit is a separate partition | check for mount"
ansible.builtin.command: findmnt -kn "{{ required_mount }}"
changed_when: false
failed_when: discovered_var_log_audit_mount.rc not in [ 0, 1 ]
register: discovered_var_log_audit_mount
- name: "1.1.2.7.1 | AUDIT | Ensure /var/log/audit is a separate partition | Absent"
when: discovered_var_log_audit_mount is undefined
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} is not mounted on a separate partition"
- name: "1.1.2.7.1 | AUDIT | Ensure /var/log/audit is a separate partition | Present"
when: discovered_var_log_audit_mount is undefined
ansible.builtin.import_tasks:
file: warning_facts.yml
- name: "1.1.2.7.2 | PATCH | Ensure nodev option set on /var/log/audit partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_7_2
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.7.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/log/audit"
required_option: nodev
notify: &mount_option_notify
- "Remount {{ mount_point }}"
ansible.builtin.set_fact: &mount_option_set_fact
prelim_mount_point_fs_and_options: |
{{ prelim_mount_point_fs_and_options | combine({mount_point: {'options': (prelim_mount_point_fs_and_options[mount_point]['options'] + [required_option])}}, recursive=True) }}
changed_when: &mount_option_changed_when
- required_option not in prelim_mount_point_fs_and_options[mount_point]['original_options']
- name: "1.1.2.7.3 | PATCH | Ensure nosuid option set on /var/log/audit partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_7_3
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.7.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/log/audit"
required_option: nosuid
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when
- name: "1.1.2.7.4 | PATCH | Ensure noexec option set on /var/log/audit partition"
when:
- prelim_mount_point_fs_and_options[mount_point] is defined
- rhel9cis_rule_1_1_2_7_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.7.4
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
mount_point: "/var/log/audit"
required_option: noexec
notify: *mount_option_notify
ansible.builtin.set_fact:
<<: *mount_option_set_fact
changed_when: *mount_option_changed_when

View file

@ -0,0 +1,81 @@
---
- name: "1.1.2.1 | PATCH | Ensure /tmp is a separate partition"
block:
- name: "1.1.2.1 | PATCH | Ensure /tmp is a separate partition | Absent"
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} doesn't exist. This is a manual task"
- name: "1.1.2.1 | PATCH | Ensure /tmp is a separate partition | Present"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '1.1.2.1'
required_mount: '/tmp'
when:
- required_mount not in mount_names
- rhel9cis_rule_1_1_2_1
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.2.1
# via fstab
- name: |
"1.1.2.2 | PATCH | Ensure nodev option set on /tmp partition"
"1.1.2.3 | PATCH | Ensure noexec option set on /tmp partition"
"1.1.2.4 | PATCH | Ensure nosuid option set on /tmp partition"
ansible.posix.mount:
name: /tmp
src: "{{ item.device }}"
fstype: "{{ item.fstype }}"
state: present
opts: defaults,{% if rhel9cis_rule_1_1_2_2 %}nodev,{% endif %}{% if rhel9cis_rule_1_1_2_3 %}noexec,{% endif %}{% if rhel9cis_rule_1_1_2_4 %}nosuid{% endif %}
notify: Remount tmp
loop: "{{ ansible_mounts }}"
loop_control:
label: "{{ item.device }}"
when:
- item.mount == "/tmp"
- not rhel9cis_tmp_svc
- rhel9cis_rule_1_1_2_2 or
rhel9cis_rule_1_1_2_3 or
rhel9cis_rule_1_1_2_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.2
- rule_1.1.2.3
- rule_1.1.2.4
# via systemd
- name: |
"1.1.2.1 | PATCH | Ensure /tmp is configured"
"1.1.2.2 | PATCH | Ensure nodev option set on /tmp partition"
"1.1.2.3 | PATCH | Ensure noexec option set on /tmp partition"
"1.1.2.4 | PATCH | Ensure nosuid option set on /tmp partition"
ansible.builtin.template:
src: etc/systemd/system/tmp.mount.j2
dest: /etc/systemd/system/tmp.mount
owner: root
group: root
mode: 0644
notify: Systemd restart tmp.mount
when:
- rhel9cis_tmp_svc
- rhel9cis_rule_1_1_2_1 or
rhel9cis_rule_1_1_2_2 or
rhel9cis_rule_1_1_2_3 or
rhel9cis_rule_1_1_2_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.2.1
- rule_1.1.2.2
- rule_1.1.2.3
- rule_1.1.2.4

View file

@ -0,0 +1,49 @@
---
- name: "1.1.3.1 | AUDIT | Ensure separate partition exists for /var"
block:
- name: "1.1.3.1 | AUDIT | Ensure separate partition exists for /var | Absent"
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} doesn't exist. This is a manual task"
- name: "1.1.3.1 | AUDIT | Ensure separate partition exists for /var | Present"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '1.1.3.1'
required_mount: '/var'
when:
- required_mount not in mount_names
- rhel9cis_rule_1_1_3_1
tags:
- level2-server
- level2-workstation
- patch
- mounts
- rule_1.1.3.1
# skips if mount is absent
- name: |
"1.1.3.2 | PATCH | Ensure nodev option set on /var partition"
"1.1.3.3 | PATCH | Ensure nosuid option set on /var partition"
ansible.builtin.mount:
name: /var
src: "{{ item.device }}"
fstype: "{{ item.fstype }}"
state: present
opts: defaults,{% if rhel9cis_rule_1_1_3_2 %}nodev,{% endif %}{% if rhel9cis_rule_1_1_3_3 %}nosuid,{% endif %}
loop: "{{ ansible_mounts }}"
loop_control:
label: "{{ item.device }}"
notify: Change_requires_reboot
when:
- item.mount == "/var"
- rhel9cis_rule_1_1_3_2 or
rhel9cis_rule_1_1_3_3
tags:
- level1-server
- level1-workstation
- patch
- mounts
- skip_ansible_lint
- rule_1.1.3.2
- rule_1.1.3.3

View file

@ -0,0 +1,53 @@
---
# Skips if mount is absent
- name: "1.1.4.1 | AUDIT | Ensure separate partition exists for /var/tmp"
block:
- name: "1.1.4.1 | AUDIT | Ensure separate partition exists for /var/tmp | Absent"
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} doesn't exist. This is a manual task"
- name: "1.1.4.1 | AUDIT | Ensure separate partition exists for /var/tmp | Present"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '1.1.4.1'
required_mount: '/var/tmp'
when:
- required_mount not in mount_names
- rhel9cis_rule_1_1_4_1
tags:
- level2-server
- level2-workstation
- audit
- mounts
- rule_1.1.4.1
# skips if mount is absent
- name: |
"1.1.4.2 | PATCH | Ensure noexec option set on /var/tmp partition"
"1.1.4.3 | PATCH | Ensure nosuid option set on /var/tmp partition"
"1.1.4.4 | PATCH | Ensure nodev option set on /var/tmp partition"
ansible.builtin.mount:
name: /var/tmp
src: "{{ item.device }}"
fstype: "{{ item.fstype }}"
state: present
opts: defaults,{% if rhel9cis_rule_1_1_4_2 %}noexec,{% endif %}{% if rhel9cis_rule_1_1_4_3 %}nosuid,{% endif %}{% if rhel9cis_rule_1_1_4_4 %}nodev{% endif %}
loop: "{{ ansible_mounts }}"
loop_control:
label: "{{ item.device }}"
notify: Change_requires_reboot
when:
- item.mount == "/var/tmp"
- rhel9cis_rule_1_1_4_2 or
rhel9cis_rule_1_1_4_3 or
rhel9cis_rule_1_1_4_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- skip_ansible_lint
- rule_1.1.4.2
- rule_1.1.4.3
- rule_1.1.4.4

View file

@ -0,0 +1,53 @@
---
- name: "1.1.5.1 | AUDIT | Ensure separate partition exists for /var/log"
block:
- name: "1.1.5.1 | AUDIT | Ensure separate partition exists for /var/log | Absent"
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} doesn't exist. This is a manual task"
- name: "1.1.5.1 | AUDIT | Ensure separate partition exists for /var/log | Present"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '1.1.5.1'
required_mount: '/var/log'
when:
- required_mount not in mount_names
- rhel9cis_rule_1_1_5_1
tags:
- level2-server
- level2-workstation
- audit
- mounts
- rule_1.1.5.1
- skip_ansible_lint
- name: |
"1.1.5.2 | PATCH | Ensure nodev option set on /var/log partition"
"1.1.5.3 | PATCH | Ensure noexec option set on /var/log partition"
"1.1.5.4 | PATCH | Ensure nosuid option set on /var/log partition"
ansible.builtin.mount:
name: /var/log
src: "{{ item.device }}"
fstype: "{{ item.fstype }}"
state: present
opts: defaults,{% if rhel9cis_rule_1_1_5_2 %}nodev,{% endif %}{% if rhel9cis_rule_1_1_5_3 %}noexec,{% endif %}{% if rhel9cis_rule_1_1_5_4 %}nosuid{% endif %}
loop: "{{ ansible_mounts }}"
loop_control:
label: "{{ item.device }}"
notify: Change_requires_reboot
when:
- item.mount == "/var/log"
- rhel9cis_rule_1_1_5_2 or
rhel9cis_rule_1_1_5_3 or
rhel9cis_rule_1_1_5_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- skip_ansible_lint
- rule_1.1.5.2
- rule_1.1.5.3
- rule_1.1.5.4

View file

@ -0,0 +1,52 @@
---
- name: "1.1.6.1 | AUDIT | Ensure separate partition exists for /var/log/audit"
block:
- name: "1.1.6.1 | AUDIT | Ensure separate partition exists for /var/log/audit | Absent"
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} doesn't exist. This is a manual task"
- name: "1.1.6.1 | AUDIT | Ensure separate partition exists for /var/log/audit | Present"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '1.1.6.1'
required_mount: '/var/log/audit'
when:
- required_mount not in mount_names
- rhel9cis_rule_1_1_6_1
tags:
- level2-server
- level2-workstation
- audit
- mounts
- rule_1.1.6.1
- name: |
"1.1.6.2 | PATCH | Ensure noexec option set on /var/log/audit partition"
"1.1.6.3 | PATCH | Ensure nodev option set on /var/log/audit partition"
"1.1.6.4 | PATCH | Ensure nosuid option set on /var/log/audit partition"
ansible.builtin.mount:
name: /var/log/audit
src: "{{ item.device }}"
fstype: "{{ item.fstype }}"
state: present
opts: defaults,{% if rhel9cis_rule_1_1_6_2 %}noexec,{% endif %}{% if rhel9cis_rule_1_1_6_3 %}nodev,{% endif %}{% if rhel9cis_rule_1_1_6_4 %}nosuid{% endif %}
loop: "{{ ansible_mounts }}"
loop_control:
label: "{{ item.device }}"
notify: Change_requires_reboot
when:
- item.mount == "/var/log/audit"
- rhel9cis_rule_1_1_6_2 or
rhel9cis_rule_1_1_6_3 or
rhel9cis_rule_1_1_6_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- skip_ansible_lint
- rule_1.1.6.2
- rule_1.1.6.3
- rule_1.1.6.4

View file

@ -0,0 +1,52 @@
---
- name: "1.1.7.1 | AUDIT | Ensure separate partition exists for /home"
block:
- name: "1.1.7.1 | AUDIT | Ensure separate partition exists for /home | Absent"
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} doesn't exist. This is a manual task"
- name: "1.1.7.1 | AUDIT | Ensure separate partition exists for /home | Present"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '1.1.7.1'
required_mount: '/home'
when:
- required_mount not in mount_names
- rhel9cis_rule_1_1_7_1
tags:
- level2-server
- level2-workstation
- audit
- mounts
- rule_1.1.7.1
- skip_ansible_lint
- name: |
"1.1.7.2 | PATCH | Ensure nodev option set on /home partition
1.1.7.3 | PATCH | Ensure nosuid option set on /home partition"
ansible.builtin.mount:
name: /home
src: "{{ item.device }}"
fstype: "{{ item.fstype }}"
state: present
opts: defaults,{% if rhel9cis_rule_1_1_7_2 %}nodev,{% endif %}{% if rhel9cis_rule_1_1_7_3 %}nosuid,{% endif %}
loop: "{{ ansible_mounts }}"
loop_control:
label: "{{ item.device }}"
notify: Change_requires_reboot
when:
- item.mount == "/home"
- rhel9cis_rule_1_1_7_1
- rhel9cis_rule_1_1_7_2 or
rhel9cis_rule_1_1_7_3
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.7.2
- rule_1.1.7.3
- rule_1.1.7.4
- skip_ansible_lint

View file

@ -0,0 +1,49 @@
---
# Skips if mount is absent
- name: "1.1.8.1 | AUDIT | Ensure /dev/shm is a separate partition"
block:
- name: "1.1.8.1 | AUDIT | Ensure /dev/shm is a separate partition | Absent"
ansible.builtin.debug:
msg: "Warning!! {{ required_mount }} doesn't exist. This is a manual task"
- name: "1.1.8.1 | AUDIT | Ensure separate partition exists for /home | Present"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '1.1.8.1'
required_mount: '/dev/shm'
when:
- required_mount not in mount_names
- rhel9cis_rule_1_1_8_1
tags:
- level1-server
- level1-workstation
- audit
- mounts
- rule_1.1.8.1
- skip_ansible_lint
- name: |
"1.1.8.2 | PATCH | Ensure nodev option set on /dev/shm partition | Set nodev option
1.1.8.3 | PATCH | Ensure noexec option set on /dev/shm partition | Set nosuid option
1.1.8.4 | PATCH | Ensure nosuid option set on /dev/shm partition | Set noexec option"
ansible.posix.mount:
name: /dev/shm
src: tmpfs
fstype: tmpfs
state: mounted
opts: defaults,{% if rhel9cis_rule_1_1_8_2 %}nodev,{% endif %}{% if rhel9cis_rule_1_1_8_3 %}noexec,{% endif %}{% if rhel9cis_rule_1_1_8_4 %}nosuid{% endif %}
notify: Change_requires_reboot
when:
- rhel9cis_rule_1_1_8_2 or
rhel9cis_rule_1_1_8_3 or
rhel9cis_rule_1_1_8_4
tags:
- level1-server
- level1-workstation
- patch
- mounts
- rule_1.1.8.2
- rule_1.1.8.3
- rule_1.1.8.4

View file

@ -0,0 +1,36 @@
---
- name: "1.1.9 | PATCH | Disable USB Storage"
block:
- name: "1.1.9 | PATCH | Disable USB Storage | Edit modprobe config"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/CIS.conf
regexp: "^(#)?install usb-storage(\\s|$)"
line: "install usb-storage /bin/true"
create: true
owner: root
group: root
mode: 0600
- name: "1.1.9 | PATCH | Disable USB Storage | Edit modprobe config"
community.general.modprobe:
name: usb-storage
state: absent
when: not system_is_container
- name: "1.1.9 | PATCH | Disable USB Storage | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist usb-storage(\\s|$)"
line: "blacklist usb-storage"
create: true
mode: 0600
when:
- rhel9cis_rule_1_1_9
tags:
- level1-server
- level2-workstation
- patch
- mounts
- removable_storage
- rule_1.1.9

View file

@ -0,0 +1,16 @@
---
- name: "1.10 | PATCH | Ensure system-wide crypto policy is not legacy"
ansible.builtin.shell: |
update-crypto-policies --set "{{ rhel9cis_crypto_policy }}"
update-crypto-policies
notify: Change_requires_reboot
when:
- rhel9cis_rule_1_10
- system_wide_crypto_policy['stdout'] == 'LEGACY'
tags:
- level1-server
- level1-workstation
- no system_is_ec2
- patch
- rule_1.10

View file

@ -1,122 +0,0 @@
---
- name: "1.2.1.1 | AUDIT | Ensure GPG keys are configured"
when:
- rhel9cis_rule_1_2_1_1
- ansible_facts.distribution == "RedHat" or
ansible_facts.distribution == "Rocky" or
ansible_facts.distribution == "AlmaLinux"
tags:
- level1-server
- level1-workstation
- manual
- patch
- rule_1.2.1.1
- NIST800-53R5_SI-2
block:
- name: "1.2.1.1 | AUDIT | Ensure GPG keys are configured | List installed pubkey keys"
ansible.builtin.shell: "rpm -qa | grep {{ os_gpg_key_pubkey_name }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
register: discovered_os_installed_pub_keys
- name: "1.2.1.1 | AUDIT | Ensure GPG keys are configured | Query found keys"
ansible.builtin.shell: |
'rpm -q --queryformat "%{PACKAGER} %{VERSION}\\n" {{ os_gpg_key_pubkey_name }} | grep "{{ os_gpg_key_pubkey_content }}"'
changed_when: false
failed_when: false
register: discovered_os_gpg_key_check
- name: "1.2.1.1 | AUDIT | Ensure GPG keys are configured | If expected keys fail"
when:
- discovered_os_installed_pub_keys.rc == 1 or
discovered_os_gpg_key_check.rc == 1
ansible.builtin.fail:
msg: Installed GPG Keys do not meet expected values or expected keys are not installed
- name: "1.2.1.2 | PATCH | Ensure gpgcheck is globally activated"
when: rhel9cis_rule_1_2_1_2
tags:
- level1-server
- level1-workstation
- patch
- rule_1.2.1.2
- NIST800-53R5_SI-2
block:
- name: "1.2.1.2 | AUDIT | Ensure gpgcheck is globally activated | Find repos"
ansible.builtin.find:
paths: /etc/yum.repos.d
patterns: "*.repo"
register: discovered_yum_repos
- name: "1.2.1.2 | PATCH | Ensure gpgcheck is globally activated | Update yum.repos"
ansible.builtin.replace:
name: "{{ item.path }}"
regexp: ^gpgcheck\s*=\s*0
replace: "gpgcheck=1"
loop: "{{ discovered_yum_repos.files }}"
loop_control:
label: "{{ item.path }}"
- name: "1.2.1.3 | AUDIT | Ensure repo_gpgcheck is globally activated"
when:
- rhel9cis_rule_1_2_1_3
- rhel9cis_rule_enable_repogpg
- not rhel9cis_rhel_default_repo
tags:
- level1-server
- level1-workstation
- manual
- audit
- rule_1.2.1.3
- NIST800-53R5_SI-2
block:
- name: "1.2.1.3 | PATCH | Ensure repo_gpgcheck is globally activated | dnf.conf"
ansible.builtin.lineinfile:
path: /etc/dnf/dnf.conf
regexp: '^repo_gpgcheck'
line: repo_gpgcheck=1
- name: "1.2.1.3 | AUDIT| Ensure repo_gpgcheck is globally activated | get repo files"
ansible.builtin.find:
paths: /etc/yum.repos.d
patterns: "*.repo"
register: discovered_repo_files
- name: "1.2.1.3 | PATCH | Ensure repo_gpgcheck is globally activated | amend repo files"
ansible.builtin.replace:
path: "{{ item.path }}"
regexp: ^repo_gpgcheck\s*=s*0
replace: repo_gpgcheck=1
loop: "{{ discovered_repo_files.files }}"
loop_control:
label: "{{ item.path }}"
- name: "1.2.1.4 | AUDIT | Ensure package manager repositories are configured"
when: rhel9cis_rule_1_2_1_4
tags:
- level1-server
- level1-workstation
- manual
- audit
- rule_1.2.1.4
- NIST800-53R5_SI-2
vars:
warn_control_id: '1.2.1.4'
block:
- name: "1.2.1.4 | AUDIT | Ensure package manager repositories are configured | Get repo list"
ansible.builtin.command: dnf repolist
changed_when: false
failed_when: false
check_mode: false
register: discovered_dnf_configured
- name: "1.2.1.4 | AUDIT | Ensure package manager repositories are configured | Display repo list"
ansible.builtin.debug:
msg:
- "Warning!! Below are the configured repos. Please review and make sure all align with site policy"
- "{{ discovered_dnf_configured.stdout_lines }}"
- name: "1.2.1.4 | AUDIT | Ensure package manager repositories are configured | Warn Count"
ansible.builtin.import_tasks:
file: warning_facts.yml

View file

@ -1,16 +0,0 @@
---
- name: "1.2.2.1 | PATCH | Ensure updates, patches, and additional security software are installed"
when:
- rhel9cis_rule_1_2_2_1
- not system_is_ec2
tags:
- level1-server
- level1-workstation
- patch
- rule_1.2.2.1
- NIST800-53R5_SI-2
ansible.builtin.package:
name: "*"
state: latest
notify: Change_requires_reboot

View file

@ -0,0 +1,121 @@
---
- name: "1.2.1 | AUDIT | Ensure GPG keys are configured"
block:
- name: "1.2.1 | AUDIT | Ensure GPG keys are configured | list installed pubkey keys"
ansible.builtin.shell: "rpm -qa | grep {{ os_gpg_key_pubkey_name }}"
changed_when: false
failed_when: false
register: os_installed_pub_keys
- name: "1.2.1 | AUDIT | Ensure GPG keys are configured | Query found keys"
ansible.builtin.shell: 'rpm -q --queryformat "%{PACKAGER} %{VERSION}\\n" {{ os_gpg_key_pubkey_name }} | grep "{{ os_gpg_key_pubkey_content }}"'
changed_when: false
failed_when: false
register: os_gpg_key_check
when: os_installed_pub_keys.rc == 0
- name: "1.2.1 | AUDIT | Ensure GPG keys are configured | expected keys fail"
ansible.builtin.fail:
msg: Installed GPG Keys do not meet expected values or keys installed that are not expected
when:
- os_installed_pub_keys.rc == 1 or
os_gpg_key_check.rc == 1
when:
- rhel9cis_rule_1_2_1
- ansible_distribution == "RedHat" or
ansible_distribution == "Rocky" or
ansible_distribution == "AlmaLinux"
tags:
- level1-server
- level1-workstation
- manual
- patch
- rule_1.2.1
- name: "1.2.2 | PATCH | Ensure gpgcheck is globally activated"
block:
- name: "1.2.2 | AUDIT | Ensure gpgcheck is globally activated | Find repos"
ansible.builtin.find:
paths: /etc/yum.repos.d
patterns: "*.repo"
register: yum_repos
- name: "1.2.2 | PATCH | Ensure gpgcheck is globally activated | Update yum.repos"
ansible.builtin.replace:
name: "{{ item.path }}"
regexp: "^gpgcheck=0"
replace: "gpgcheck=1"
loop: "{{ yum_repos.files }}"
loop_control:
label: "{{ item.path }}"
when:
- rhel9cis_rule_1_2_2
tags:
- level1-server
- level1-workstation
- patch
- rule_1.2.2
- name: "1.2.3 | AUDIT | Ensure package manager repositories are configured"
block:
- name: "1.2.3 | AUDIT | Ensure package manager repositories are configured | Get repo list"
ansible.builtin.shell: dnf repolist
changed_when: false
failed_when: false
register: dnf_configured
check_mode: false
- name: "1.2.3 | AUDIT | Ensure package manager repositories are configured | Display repo list"
ansible.builtin.debug:
msg:
- "Warning!! Below are the configured repos. Please review and make sure all align with site policy"
- "{{ dnf_configured.stdout_lines }}"
- name: "1.2.3 | AUDIT | Ensure package manager repositories are configured | Warn Count"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '1.2.3'
when:
- rhel9cis_rule_1_2_3
tags:
- level1-server
- level1-workstation
- manual
- audit
- rule_1.2.3
- skip_ansible_lint
- name: "1.2.4 | AUDIT | Ensure repo_gpgcheck is globally activated"
block:
- name: "1.2.4 | PATCH | Ensure repo_gpgcheck is globally activated | dnf.conf"
ansible.builtin.lineinfile:
path: /etc/dnf/dnf.conf
regexp: '^repo_gpgcheck'
line: repo_gpgcheck=1
- name: "1.2.4 | AUDIT| Ensure repo_gpgcheck is globally activated | get repo files"
ansible.builtin.find:
paths: /etc/yum.repos.d
patterns: "*.repo"
register: repo_files
- name: "1.2.4 | PATCH | Ensure repo_gpgcheck is globally activated | amend repo files"
ansible.builtin.replace:
path: "{{ item.path }}"
regexp: '^repo_gpgcheck( |)=( |)0'
replace: repo_gpgcheck=1
loop: "{{ repo_files.files }}"
loop_control:
label: "{{ item.path }}"
when:
- rhel9cis_rule_1_2_4
- not rhel9cis_rhel_default_repo or ansible_distribution != 'RedHat'
- ansible_distribution != 'OracleLinux'
tags:
- level1-server
- level1-workstation
- manual
- audit
- rule_1.2.4

View file

@ -1,150 +0,0 @@
---
- name: "1.3.1.1 | PATCH | Ensure SELinux is installed"
when:
- rhel9cis_rule_1_3_1_1
- not rhel9cis_selinux_disable
tags:
- level1-server
- level1-workstation
- patch
- rule_1.3.1.1
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.package:
name: libselinux
state: present
- name: "1.3.1.2 | PATCH | Ensure SELinux is not disabled in bootloader configuration"
when:
- rhel9cis_rule_1_3_1_2
- not rhel9cis_selinux_disable
tags:
- level1-server
- level1-workstation
- scored
- patch
- rule_1.3.1.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.replace:
path: /etc/default/grub
regexp: '{{ item }}'
replace: ''
loop:
- selinux=0
- enforcing=0
ignore_errors: true # noqa ignore-errors
notify: Grub2cfg
# State set to enforcing because control 1.3.1.5 requires enforcing to be set
- name: "1.3.1.3 | PATCH | Ensure SELinux policy is configured"
when:
- rhel9cis_rule_1_3_1_3
- not rhel9cis_selinux_disable
tags:
- level1-server
- level1-workstation
- selinux
- patch
- rule_1.3.1.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.posix.selinux:
conf: /etc/selinux/config
policy: "{{ rhel9cis_selinux_pol }}"
state: "{{ rhel9cis_selinux_enforce }}"
- name: "1.3.1.4 | PATCH | Ensure the SELinux state is not disabled"
when:
- rhel9cis_rule_1_3_1_4
- not rhel9cis_selinux_disable
tags:
- level1-server
- level1-workstation
- selinux
- patch
- rule_1.3.1.4
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.posix.selinux:
conf: /etc/selinux/config
policy: "{{ rhel9cis_selinux_pol }}"
state: "{{ rhel9cis_selinux_enforce }}"
- name: "1.3.1.5 | PATCH | Ensure the SELinux state is enforcing"
when:
- rhel9cis_selinux_enforce == 'enforcing'
- rhel9cis_rule_1_3_1_5
- not rhel9cis_selinux_disable
tags:
- level2-server
- level2-workstation
- selinux
- patch
- rule_1.3.1.5
- NIST800-53R4_AC-3
- NIST800-53R4_SI-6
ansible.posix.selinux:
conf: /etc/selinux/config
policy: "{{ rhel9cis_selinux_pol }}"
state: enforcing
- name: "1.3.1.6 | AUDIT | Ensure no unconfined services exist"
when:
- rhel9cis_rule_1_3_1_6
- not rhel9cis_selinux_disable
tags:
- level1-server
- level1-workstation
- audit
- services
- rule_1.3.1.6
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
vars:
warn_control_id: '1.3.1.6'
block:
- name: "1.3.1.6 | AUDIT | Ensure no unconfined services exist | Find the unconfined services"
ansible.builtin.shell: ps -eZ | grep unconfined_service_t | grep -Evw "tr|ps|egrep|bash|awk" | tr ':' ' ' | awk '{ print $NF }'
register: discovered_unconf_services
failed_when: false
changed_when: false
- name: "1.3.1.6 | AUDIT | Ensure no unconfined services exist | Message on unconfined services"
when: discovered_unconf_services.stdout | length > 0
ansible.builtin.debug:
msg: "Warning!! You have unconfined services: {{ discovered_unconf_services.stdout_lines }}"
- name: "1.3.1.6 | AUDIT | Ensure no unconfined services exist | warning count"
when: discovered_unconf_services.stdout | length > 0
ansible.builtin.import_tasks:
file: warning_facts.yml
- name: "1.3.1.7 | PATCH | Ensure the MCS Translation Service (mcstrans) is not installed"
when: rhel9cis_rule_1_3_1_7
tags:
- level1-server
- level1-workstation
- patch
- rule_1.3.1.7
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.package:
name: mcstrans
state: absent
- name: "1.3.1.8 | PATCH | Ensure SETroubleshoot is not installed"
when:
- rhel9cis_rule_1_3_1_8
- "'setroubleshoot' in ansible_facts.packages"
tags:
- level1-server
- selinux
- patch
- rule_1.3.1.8
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.package:
name: setroubleshoot
state: absent

View file

@ -0,0 +1,78 @@
---
- name: "1.3.1 | PATCH | Ensure AIDE is installed"
block:
- name: "1.3.1 | PATCH | Ensure AIDE is installed | Install AIDE"
ansible.builtin.package:
name: aide
state: present
- name: "1.3.1 | PATCH | Ensure AIDE is installed | Build AIDE DB"
ansible.builtin.shell: /usr/sbin/aide --init
changed_when: false
failed_when: false
async: 45
poll: 0
args:
creates: /var/lib/aide/aide.db.new.gz
when: not ansible_check_mode
- name: "1.3.1 | PATCH | Ensure AIDE is installed | copy AIDE DB"
ansible.builtin.copy:
src: /var/lib/aide/aide.db.new.gz
dest: /var/lib/aide/aide.db.gz
remote_src: true
when:
- rhel9cis_config_aide
- rhel9cis_rule_1_3_1
tags:
- level1-server
- level1-workstation
- aide
- patch
- rule_1.3.1
- name: "1.3.2 | PATCH | Ensure filesystem integrity is regularly checked"
ansible.builtin.cron:
name: Run AIDE integrity check
cron_file: "{{ rhel9cis_aide_cron['cron_file'] }}"
user: "{{ rhel9cis_aide_cron['cron_user'] }}"
minute: "{{ rhel9cis_aide_cron['aide_minute'] | default('0') }}"
hour: "{{ rhel9cis_aide_cron['aide_hour'] | default('5') }}"
day: "{{ rhel9cis_aide_cron['aide_day'] | default('*') }}"
month: "{{ rhel9cis_aide_cron['aide_month'] | default('*') }}"
weekday: "{{ rhel9cis_aide_cron['aide_weekday'] | default('*') }}"
job: "{{ rhel9cis_aide_cron['aide_job'] }}"
when:
- rhel9cis_rule_1_3_2
- not system_is_ec2
tags:
- level1-server
- level1-workstation
- aide
- file_integrity
- patch
- rule_1.3.2
- name: "1.3.3 | Ensure cryptographic mechanisms are used to protect the integrity of audit tools"
ansible.builtin.blockinfile:
path: /etc/aide.conf
marker: "# {mark} Audit tools - CIS benchmark - Ansible-lockdown"
block: |
/sbin/auditctl p+i+n+u+g+s+b+acl+xattrs+sha512
/sbin/auditd p+i+n+u+g+s+b+acl+xattrs+sha512
/sbin/augenrules p+i+n+u+g+s+b+acl+xattrs+sha512
/sbin/aureport p+i+n+u+g+s+b+acl+xattrs+sha512
/sbin/ausearch p+i+n+u+g+s+b+acl+xattrs+sha512
/sbin/autrace p+i+n+u+g+s+b+acl+xattrs+sha512
validate: aide -D --config %s
when:
- rhel9cis_rule_1_3_2
- not system_is_ec2
tags:
- level1-server
- level1-workstation
- aide
- file_integrity
- patch
- rule_1.3.3

View file

@ -1,71 +1,44 @@
--- ---
- name: "1.4.1 | PATCH | Ensure bootloader password is set" - name: "1.4.1 | PATCH | Ensure bootloader password is set"
when:
- rhel9cis_set_boot_pass
- rhel9cis_rule_1_4_1
tags:
- level1-server
- level1-workstation
- grub
- patch
- rule_1.4.1
- NIST800-53R5_AC-3
ansible.builtin.copy: ansible.builtin.copy:
dest: /boot/grub2/user.cfg dest: /boot/grub2/user.cfg
content: "GRUB2_PASSWORD={{ rhel9cis_bootloader_password_hash }}" # noqa template-instead-of-copy content: "GRUB2_PASSWORD={{ rhel9cis_bootloader_password_hash }}" # noqa template-instead-of-copy
owner: root owner: root
group: root group: root
mode: 'go-rwx' mode: 0600
notify: Grub2cfg notify: Grub2cfg
when:
- rhel9cis_set_boot_pass
- rhel9cis_rule_1_4_1
tags:
- level1-server
- level1-workstation
- grub
- patch
- rule_1.4.1
- name: "1.4.2 | PATCH | Ensure permissions on bootloader config are configured" - name: "1.4.2 | PATCH | Ensure permissions on bootloader config are configured"
when: rhel9cis_rule_1_4_2
tags:
- level1-server
- level1-workstation
- grub
- patch
- rule_1.4.2
- NIST800-53R5_AC-3
block: block:
- name: "1.4.2 | PATCH | Ensure permissions on bootloader config are configured | bios based system" - name: "1.4.2 | PATCH | Ensure permissions on bootloader config are configured"
when: rhel9cis_legacy_boot ansible.builtin.file:
ansible.builtin.file: path: "/boot/grub2/{{ item.path }}"
path: "/boot/grub2/{{ item.path }}" owner: root
owner: root group: root
group: root mode: "{{ item.mode }}"
mode: "{{ item.mode }}" state: touch
state: touch modification_time: preserve
modification_time: preserve access_time: preserve
access_time: preserve loop:
loop: - { path: 'grub.cfg', mode: '0700' }
- { path: 'grub.cfg', mode: 'u-x,go-rwx' } - { path: 'grubenv', mode: '0600' }
- { path: 'grubenv', mode: 'u-x,go-rwx' } - { path: 'user.cfg', mode: '0600' }
- { path: 'user.cfg', mode: 'u-x,go-rwx' }
- name: "1.4.2 | PATCH | Ensure permissions on bootloader config are configured | efi based system" when:
when: not rhel9cis_legacy_boot - rhel9cis_rule_1_4_2
vars: tags:
efi_mount_options: ['umask=0077', 'fmask=0077', 'uid=0', 'gid=0'] - level1-server
block: - level1-workstation
- name: "1.4.2 | AUDIT | Ensure permissions on bootloader config are configured | efi based system | capture current state" - grub
ansible.builtin.shell: grep "^[^#;]" /etc/fstab | grep '/boot/efi' | awk -F" " '{print $4}' - patch
changed_when: false - rule_1.4.2
check_mode: false
register: discovered_efi_fstab
- name: "1.4.2 | PATCH | Ensure permissions on bootloader config are configured | efi based system | Build Options"
when: item not in discovered_efi_fstab.stdout
ansible.builtin.set_fact:
efi_mount_opts_addition: "{{ efi_mount_opts_addition + ',' + item }}"
loop: "{{ efi_mount_options }}"
- name: "1.4.2 | PATCH | Ensure permissions on bootloader config are configured | efi based system | Add mount options"
when: efi_mount_opts_addition | length > 0
ansible.builtin.lineinfile:
path: /etc/fstab
regexp: (.*/boot/efi\s*\w*\s*){{ discovered_efi_fstab.stdout }}(.*)
line: \1{{ discovered_efi_fstab.stdout + efi_mount_opts_addition }}\2
backrefs: true
notify: Remount /boot/efi

View file

@ -1,66 +1,48 @@
--- ---
- name: "1.5.1 | PATCH | Ensure address space layout randomization (ASLR) is enabled" - name: "1.5.1 | PATCH | Ensure core dump storage is disabled"
when: rhel9cis_rule_1_5_1
tags:
- level1-server
- level1-workstation
- patch
- sysctl
- rule_1.5.1
- NIST800-53R5_CM-6
- NIST800-53R5_CM-6.1
block:
- name: "1.5.1 | PATCH | Ensure address space layout randomization (ASLR) is enabled"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
- name: "1.5.1 | PATCH | Ensure address space layout randomization (ASLR) is enabled"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-kernel_sysctl.conf"
- name: "1.5.2 | PATCH | Ensure ptrace_scope is restricted"
when: rhel9cis_rule_1_5_2
tags:
- level1-server
- level1-workstation
- patch
- sysctl
- rule_1.5.2
block:
- name: "1.5.2 | PATCH | Ensure ptrace_scope is restricted"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
- name: "1.5.2 | PATCH | Ensure ptrace_scope is restricted"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-kernel_sysctl.conf"
- name: "1.5.3 | PATCH | Ensure core dump backtraces are disabled"
when: rhel9cis_rule_1_5_3
tags:
- level1-server
- level1-workstation
- patch
- sysctl
- rule_1.5.3
- NIST800-53R5_CM-6b
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/systemd/coredump.conf path: /etc/systemd/coredump.conf
regexp: '(?#)^ProcessSizeMax\s*=\s*.*[1-9].*$' regexp: '^Storage\s*=\s*(?!none).*'
line: 'ProcessSizeMax=0' line: 'Storage=none'
- name: "1.5.4 | PATCH | Ensure core dump storage is disabled"
when:
- rhel9cis_rule_1_5_4
- prelim_systemd_coredump.stat.exists
tags:
- level1-server
- level1-workstation
- patch
- rule_1.5.4
ansible.builtin.lineinfile:
path: /etc/systemd/coredump.conf
regexp: '^Storage\s*=\s*(?!none).*'
line: 'Storage=none'
notify: Systemd daemon reload notify: Systemd daemon reload
when:
- rhel9cis_rule_1_5_1
- systemd_coredump.stat.exists
tags:
- level1-server
- level1-workstation
- patch
- rule_1.5.1
- name: "1.5.2 | PATCH | Ensure core dump backtraces are disabled"
ansible.builtin.lineinfile:
path: /etc/systemd/coredump.conf
regexp: '^ProcessSizeMax\s*=\s*.*[1-9]$'
line: 'ProcessSizeMax=0'
when:
- rhel9cis_rule_1_5_2
tags:
- level1-server
- level1-workstation
- patch
- sysctl
- rule_1.5.2
- name: "1.5.3 | PATCH | Ensure address space layout randomization (ASLR) is enabled"
block:
- name: "1.5.3 | PATCH | Ensure address space layout randomization (ASLR) is enabled"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
- name: "1.5.3 | PATCH | Ensure address space layout randomization (ASLR) is enabled"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-kernel_sysctl.conf"
when:
- rhel9cis_rule_1_5_3
tags:
- level1-server
- level1-workstation
- patch
- sysctl
- rule_1.5.3

View file

@ -0,0 +1,132 @@
---
- name: "1.6.1.1 | PATCH | Ensure SELinux is installed"
ansible.builtin.package:
name: libselinux
state: present
when:
- rhel9cis_rule_1_6_1_1
tags:
- level1-server
- level1-workstation
- patch
- rule_1.6.1.1
- name: "1.6.1.2 | PATCH | Ensure SELinux is not disabled in bootloader configuration"
ansible.builtin.replace:
path: /etc/default/grub
regexp: '{{ item }}'
replace: ''
loop:
- selinux=0
- enforcing=0
register: selinux_grub_patch
ignore_errors: true # noqa ignore-errors
notify: Grub2cfg
when:
- rhel9cis_rule_1_6_1_2
tags:
- level1-server
- level1-workstation
- scored
- patch
- rule_1.6.1.2
# State set to enforcing because control 1.6.1.5 requires enforcing to be set
- name: "1.6.1.3 | PATCH | Ensure SELinux policy is configured"
ansible.posix.selinux:
conf: /etc/selinux/config
policy: "{{ rhel9cis_selinux_pol }}"
state: "{{ rhel9cis_selinux_enforce }}"
when:
- not rhel9cis_selinux_disable
- rhel9cis_rule_1_6_1_3
tags:
- level1-server
- level1-workstation
- selinux
- patch
- rule_1.6.1.3
- name: "1.6.1.4 | PATCH | Ensure the SELinux state is not disabled"
ansible.posix.selinux:
conf: /etc/selinux/config
policy: "{{ rhel9cis_selinux_pol }}"
state: "{{ rhel9cis_selinux_enforce }}"
when:
- not rhel9cis_selinux_disable
- rhel9cis_rule_1_6_1_4
tags:
- level1-server
- level1-workstation
- selinux
- patch
- rule_1.6.1.4
- name: "1.6.1.5 | PATCH | Ensure the SELinux state is enforcing"
ansible.posix.selinux:
conf: /etc/selinux/config
policy: "{{ rhel9cis_selinux_pol }}"
state: enforcing
when:
- not rhel9cis_selinux_disable
- rhel9cis_selinux_enforce == 'enforcing'
- rhel9cis_rule_1_6_1_5
tags:
- level2-server
- level2-workstation
- selinux
- patch
- rule_1.6.1.5
- name: "1.6.1.6 | AUDIT | Ensure no unconfined services exist"
block:
- name: "1.6.1.6 | AUDIT | Ensure no unconfined services exist | Find the unconfined services"
ansible.builtin.shell: ps -eZ | grep unconfined_service_t | egrep -vw "tr|ps|egrep|bash|awk" | tr ':' ' ' | awk '{ print $NF }'
register: rhelcis_1_6_1_6_unconf_services
failed_when: false
changed_when: false
- name: "1.6.1.6 | AUDIT | Ensure no unconfined services exist | Message on unconfined services"
ansible.builtin.debug:
msg: "Warning!! You have unconfined services: {{ rhelcis_1_6_1_6_unconf_services.stdout_lines }}"
when: rhelcis_1_6_1_6_unconf_services.stdout | length > 0
- name: "1.6.1.6 | AUDIT | Ensure no unconfined services exist | warning count"
ansible.builtin.import_tasks: warning_facts.yml
when: rhelcis_1_6_1_6_unconf_services.stdout | length > 0
vars:
warn_control_id: '1.6.1.6'
when:
- rhel9cis_rule_1_6_1_6
tags:
- level1-server
- level1-workstation
- audit
- services
- rule_1.6.1.6
- name: "1.6.1.7 | PATCH | Ensure SETroubleshoot is not installed"
ansible.builtin.package:
name: setroubleshoot
state: absent
when:
- rhel9cis_rule_1_6_1_7
- "'setroubleshoot' in ansible_facts.packages"
tags:
- level1-server
- selinux
- patch
- rule_1.6.1.7
- name: "1.6.1.8 | PATCH | Ensure the MCS Translation Service (mcstrans) is not installed"
ansible.builtin.package:
name: mcstrans
state: absent
when:
- rhel9cis_rule_1_6_1_8
tags:
- level1-server
- level1-workstation
- patch
- rule_1.6.1.8

View file

@ -1,193 +0,0 @@
---
- name: "1.6.1 | AUDIT | Ensure system-wide crypto policy is not legacy"
when:
- rhel9cis_rule_1_6_1
- rhel9cis_crypto_policy_ansiblemanaged
tags:
- level1-server
- level1-workstation
- automated
- patch
- crypto
- rule_1.6.1
- NIST800-53R5_SC-6
ansible.builtin.debug:
msg: "Captured in prelim to ensure not LEGACY. Runs handler to update"
notify:
- Update Crypto Policy
- Set Crypto Policy
- name: "1.6.2 | PATCH | Ensure system wide crypto policy is not set in sshd configuration"
when: rhel9cis_rule_1_6_2
tags:
- level1-server
- level1-workstation
- sshd
- automated
- patch
- rule_1.6.2
- NIST800-53R5_SC-8
- NIST800-53R5_IA-5
- NIST800-53R5_AC-17
- NIST800-53R5_SC-6
ansible.builtin.lineinfile:
path: /etc/sysconfig/sshd
regexp: ^CRYPTO_POLICY\s*=
state: absent
notify: Restart sshd
- name: "1.6.3 | PATCH | Ensure system wide crypto policy disables sha1 hash and signature support | Add submodule exclusion"
when:
- rhel9cis_rule_1_6_3
- "'NO-SHA1' not in rhel9cis_crypto_policy_module"
- rhel9cis_crypto_policy_ansiblemanaged
tags:
- level1-server
- level1-workstation
- automated
- patch
- crypto
- rule_1.6.3
- NIST800-53R5_SC-6
block:
- name: "1.6.3 | PATCH | Ensure system wide crypto policy disables sha1 hash and signature support"
ansible.builtin.template:
src: etc/crypto-policies/policies/modules/NO-SHA1.pmod.j2
dest: /etc/crypto-policies/policies/modules/NO-SHA1.pmod
owner: root
group: root
mode: 'g-wx,o-rwx'
register: discovered_no_sha1_template
- name: "1.6.3 | PATCH | Ensure system wide crypto policy disables sha1 hash and signature support | submodule to crypto policy modules"
ansible.builtin.set_fact:
rhel9cis_crypto_policy_module: "{{ rhel9cis_crypto_policy_module + ':' + 'NO-SHA1' }}"
changed_when: discovered_no_sha1_template is changed # noqa: no-handler
notify:
- Update Crypto Policy
- Set Crypto Policy
- name: "1.6.4 | PATCH | Ensure system wide crypto policy disables macs less than 128 bits"
when:
- rhel9cis_rule_1_6_4
- "'NO-WEAKMAC' not in rhel9cis_crypto_policy_module"
- rhel9cis_crypto_policy_ansiblemanaged
tags:
- level1-server
- level1-workstation
- automated
- patch
- crypto
- rule_1.6.4
- NIST800-53R5_SC-6
block:
- name: "1.6.4 | PATCH | Ensure system wide crypto policy disables macs less than 128 bits | Add submodule exclusion"
ansible.builtin.template:
src: etc/crypto-policies/policies/modules/NO-WEAKMAC.pmod.j2
dest: /etc/crypto-policies/policies/modules/NO-WEAKMAC.pmod
owner: root
group: root
mode: 'g-wx,o-rwx'
register: discovered_no_weakmac_template
- name: "1.6.4 | PATCH | Ensure system wide crypto policy disables macs less than 128 bits | submodule to crypto policy modules"
ansible.builtin.set_fact:
rhel9cis_crypto_policy_module: "{{ rhel9cis_crypto_policy_module + ':' + 'NO-WEAKMAC' }}"
changed_when: discovered_no_weakmac_template is changed # noqa: no-handler
notify:
- Update Crypto Policy
- Set Crypto Policy
- name: "1.6.5 | PATCH | Ensure system wide crypto policy disables cbc for ssh"
when:
- rhel9cis_rule_1_6_5
- "'NO-SSHCBC' not in rhel9cis_crypto_policy_module"
- rhel9cis_crypto_policy_ansiblemanaged
tags:
- level1-server
- level1-workstation
- automated
- patch
- crypto
- rule_1.6.5
- NIST800-53R5_SC-6
block:
- name: "1.6.5 | PATCH | Ensure system wide crypto policy disables cbc for ssh | Add submodule exclusion"
ansible.builtin.template:
src: etc/crypto-policies/policies/modules/NO-SSHCBC.pmod.j2
dest: /etc/crypto-policies/policies/modules/NO-SSHCBC.pmod
owner: root
group: root
mode: 'g-wx,o-rwx'
register: discovered_no_sshcbc_template
- name: "1.6.5 | PATCH | Ensure system wide crypto policy disables cbc for ssh | submodule to crypto policy modules"
ansible.builtin.set_fact:
rhel9cis_crypto_policy_module: "{{ rhel9cis_crypto_policy_module + ':' + 'NO-SSHCBC' }}"
changed_when: discovered_no_sshcbc_template is changed # noqa: no-handler
notify:
- Update Crypto Policy
- Set Crypto Policy
- name: "1.6.6 | PATCH | Ensure system wide crypto policy disables chacha20-poly1305 for ssh"
when:
- rhel9cis_rule_1_6_6
- "'NO-SSHWEAKCIPHERS' not in rhel9cis_crypto_policy_module"
- rhel9cis_crypto_policy_ansiblemanaged
tags:
- level1-server
- level1-workstation
- automated
- patch
- crypto
- rule_1.6.6
- NIST800-53R5_SC-6
block:
- name: "1.6.6 | PATCH | Ensure system wide crypto policy disables chacha20-poly1305 for ssh | Add submodule exclusion"
ansible.builtin.template:
src: etc/crypto-policies/policies/modules/NO-SSHWEAKCIPHERS.pmod.j2
dest: /etc/crypto-policies/policies/modules/NO-SSHWEAKCIPHERS.pmod
owner: root
group: root
mode: 'g-wx,o-rwx'
register: discovered_no_sshweakciphers_template
- name: "1.6.6 | PATCH | Ensure system wide crypto policy disables chacha20-poly1305 for ssh | submodule to crypto policy modules"
ansible.builtin.set_fact:
rhel9cis_crypto_policy_module: "{{ rhel9cis_crypto_policy_module + ':' + 'NO-SSHWEAKCIPHERS' }}"
changed_when: discovered_no_sshweakciphers_template is changed # noqa: no-handler
notify:
- Update Crypto Policy
- Set Crypto Policy
- name: "1.6.7 | PATCH | Ensure system wide crypto policy disables EtM for ssh"
when:
- rhel9cis_rule_1_6_7
- "'NO-SSHETM' not in rhel9cis_crypto_policy_module"
- rhel9cis_crypto_policy_ansiblemanaged
tags:
- level1-server
- level1-workstation
- automated
- patch
- crypto
- rule_1.6.7
- NIST800-53R5_SC-6
block:
- name: "1.6.7 | PATCH | Ensure system wide crypto policy disables EtM for ssh | Add submodule exclusion"
ansible.builtin.template:
src: etc/crypto-policies/policies/modules/NO-SSHETM.pmod.j2
dest: /etc/crypto-policies/policies/modules/NO-SSHETM.pmod
owner: root
group: root
mode: 'g-wx,o-rwx'
register: discovered_no_sshetm_template
- name: "1.6.7 | PATCH | Ensure system wide crypto policy disables EtM for ssh | submodule to crypto policy modules"
ansible.builtin.set_fact:
rhel9cis_crypto_policy_module: "{{ rhel9cis_crypto_policy_module + ':' + 'NO-SSHETM' }}"
changed_when: discovered_no_sshetm_template is changed # noqa: no-handler
notify:
- Update Crypto Policy
- Set Crypto Policy

View file

@ -1,102 +1,93 @@
--- ---
- name: "1.7.1 | PATCH | Ensure message of the day is configured properly" - name: "1.7.1 | PATCH | Ensure message of the day is configured properly"
when: rhel9cis_rule_1_7_1
tags:
- level1-server
- level1-workstation
- banner
- patch
- rule_1.7.1
- NIST800-53R5_CM-1
- NIST800-53R5_CM-3
- NIST800-53R5_CM-6
ansible.builtin.template: ansible.builtin.template:
src: etc/motd.j2 src: etc/motd.j2
dest: /etc/motd dest: /etc/motd
owner: root owner: root
group: root group: root
mode: 'u-x,go-wx' mode: 0644
when:
- rhel9cis_rule_1_7_1
tags:
- level1-server
- level1-workstation
- banner
- patch
- rule_1.7.1
- name: "1.7.2 | PATCH | Ensure local login warning banner is configured properly" - name: "1.7.2 | PATCH | Ensure local login warning banner is configured properly"
when: rhel9cis_rule_1_7_2
tags:
- level1-server
- level1-workstation
- patch
- rule_1.7.2
- NIST800-53R5_CM-1
- NIST800-53R5_CM-3
- NIST800-53R5_CM-6
ansible.builtin.template: ansible.builtin.template:
src: etc/issue.j2 src: etc/issue.j2
dest: /etc/issue dest: /etc/issue
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
when:
- rhel9cis_rule_1_7_2
tags:
- level1-server
- level1-workstation
- patch
- rule_1.7.2
- name: "1.7.3 | PATCH | Ensure remote login warning banner is configured properly" - name: "1.7.3 | PATCH | Ensure remote login warning banner is configured properly"
when: rhel9cis_rule_1_7_3
tags:
- level1-server
- level1-workstation
- banner
- patch
- rule_1.7.3
- NIST800-53R5_CM-1
- NIST800-53R5_CM-3
- NIST800-53R5_CM-6
ansible.builtin.template: ansible.builtin.template:
src: etc/issue.net.j2 src: etc/issue.net.j2
dest: /etc/issue.net dest: /etc/issue.net
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
when:
- rhel9cis_rule_1_7_3
tags:
- level1-server
- level1-workstation
- banner
- patch
- rule_1.7.3
- name: "1.7.4 | PATCH | Ensure permissions on /etc/motd are configured" - name: "1.7.4 | PATCH | Ensure permissions on /etc/motd are configured"
when: rhel9cis_rule_1_7_4
tags:
- level1-server
- level1-workstation
- perms
- patch
- rule_1.7.4
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.file: ansible.builtin.file:
path: /etc/motd path: /etc/motd
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
when:
- rhel9cis_rule_1_7_4
tags:
- level1-server
- level1-workstation
- perms
- patch
- rule_1.7.4
- name: "1.7.5 | PATCH | Ensure permissions on /etc/issue are configured" - name: "1.7.5 | PATCH | Ensure permissions on /etc/issue are configured"
when: rhel9cis_rule_1_7_5
tags:
- level1-server
- level1-workstation
- perms
- patch
- rule_1.7.5
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.file: ansible.builtin.file:
path: /etc/issue path: /etc/issue
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
when:
- rhel9cis_rule_1_7_5
tags:
- level1-server
- level1-workstation
- perms
- patch
- rule_1.7.5
- name: "1.7.6 | PATCH | Ensure permissions on /etc/issue.net are configured" - name: "1.7.6 | PATCH | Ensure permissions on /etc/issue.net are configured"
when: rhel9cis_rule_1_7_6
tags:
- level1-server
- level1-workstation
- perms
- patch
- rule_1.7.6
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.file: ansible.builtin.file:
path: /etc/issue.net path: /etc/issue.net
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
when:
- rhel9cis_rule_1_7_6
tags:
- level1-server
- level1-workstation
- perms
- patch
- rule_1.7.6

View file

@ -1,264 +1,264 @@
--- ---
- name: "1.8.1 | PATCH | Ensure GNOME Display Manager is removed" - name: "1.8.1 | PATCH | Ensure GNOME Display Manager is removed"
when:
- rhel9cis_rule_1_8_1
- "'gdm' in ansible_facts.packages"
- not rhel9cis_gui
tags:
- level2-server
- patch
- gui
- gdm
- rule_1.8.1
ansible.builtin.package: ansible.builtin.package:
name: gdm name: gdm
state: absent state: absent
when:
- rhel9cis_rule_1_8_1
- "'gdm' in ansible_facts.packages"
- not rhel9cis_gui
tags:
- level2-server
- patch
- gui
- gdm
- rule_1.8.1
- name: "1.8.2 | PATCH | Ensure GDM login banner is configured" - name: "1.8.2 | PATCH | Ensure GDM login banner is configured"
when:
- rhel9cis_rule_1_8_2
- rhel9cis_gui
tags:
- level1-server
- level1-workstation
- patch
- gui
- gdm
- rule_1.8.2
block: block:
- name: "1.8.2 | PATCH | Ensure GDM login banner is configured | gdm profile" - name: "1.8.2 | PATCH | Ensure GDM login banner is configured | gdm profile"
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/dconf/profile/gdm path: /etc/dconf/profile/gdm
regexp: "{{ item.regexp }}" regexp: "{{ item.regexp }}"
line: "{{ item.line }}" line: "{{ item.line }}"
create: true create: true
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
notify: Reload dconf notify: Reload dconf
loop: loop:
- { regexp: 'user-db', line: 'user-db:user' } - { regexp: 'user-db', line: 'user-db:user' }
- { regexp: 'system-db', line: 'system-db:gdm' } - { regexp: 'system-db', line: 'system-db:gdm' }
- { regexp: 'file-db', line: 'file-db:/usr/share/gdm/greeter-dconf-defaults' } - { regexp: 'file-db', line: 'file-db:/usr/share/gdm/greeter-dconf-defaults' }
- name: "1.8.2 | PATCH | Ensure GDM login banner is configured | gdm profile" - name: "1.8.2 | PATCH | Ensure GDM login banner is configured | gdm profile"
ansible.builtin.template: ansible.builtin.template:
src: etc/dconf/db/gdm.d/01-banner-message.j2 src: etc/dconf/db/gdm.d/01-banner-message.j2
dest: /etc/dconf/db/gdm.d/01-banner-message dest: /etc/dconf/db/gdm.d/01-banner-message
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
notify: Reload dconf notify: Reload dconf
when:
- rhel9cis_rule_1_8_2
- rhel9cis_gui
tags:
- level1-server
- level1-workstation
- patch
- gui
- gdm
- rule_1.8.2
- name: "1.8.3 | PATCH | Ensure GDM disable-user-list option is enabled" - name: "1.8.3 | PATCH | Ensure GDM disable-user-list option is enabled"
when:
- rhel9cis_rule_1_8_3
- rhel9cis_gui
tags:
- level1-server
- level1-workstation
- patch
- gui
- rule_1.8.3
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: "{{ item.file }}" path: "{{ item.file }}"
regexp: "{{ item.regexp }}" regexp: "{{ item.regexp }}"
line: "{{ item.line }}" line: "{{ item.line }}"
create: true create: true
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
notify: Reload dconf notify: Reload dconf
loop: loop:
- { file: '/etc/dconf/profile/gdm', regexp: 'user-db', line: 'user-db:user' } - { file: '/etc/dconf/profile/gdm', regexp: 'user-db', line: 'user-db:user' }
- { file: '/etc/dconf/profile/gdm', regexp: 'system-db', line: 'system-db:gdm' } - { file: '/etc/dconf/profile/gdm', regexp: 'system-db', line: 'system-db:gdm' }
- { file: '/etc/dconf/profile/gdm', regexp: 'file-db', line: 'file-db:/usr/share/gdm/greeter-dconf-defaults'} - { file: '/etc/dconf/profile/gdm', regexp: 'file-db', line: 'file-db:/usr/share/gdm/greeter-dconf-defaults'}
- { file: '/etc/dconf/db/gdm.d/00-login-screen', regexp: '\[org\/gnome\/login-screen\]', line: '[org/gnome/login-screen]' } - { file: '/etc/dconf/db/gdm.d/00-login-screen', regexp: '\[org\/gnome\/login-screen\]', line: '[org/gnome/login-screen]' }
- { file: '/etc/dconf/db/gdm.d/00-login-screen', regexp: 'disable-user-list=', line: 'disable-user-list=true' } - { file: '/etc/dconf/db/gdm.d/00-login-screen', regexp: 'disable-user-list=', line: 'disable-user-list=true' }
when:
- rhel9cis_rule_1_8_3
- rhel9cis_gui
tags:
- level1-server
- level1-workstation
- patch
- gui
- rule_1.8.3
- name: "1.8.4 | PATCH | Ensure GDM screen locks when the user is idle" - name: "1.8.4 | PATCH | Ensure GDM screen locks when the user is idle"
when:
- rhel9cis_rule_1_8_4
- rhel9cis_gui
tags:
- level1-server
- level1-workstation
- patch
- gui
- rule_1.8.4
block: block:
- name: "1.8.4 | PATCH | Ensure GDM screen locks when the user is idle | User profile" - name: "1.8.4 | PATCH | Ensure GDM screen locks when the user is idle | User profile"
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/dconf/profile/user path: /etc/dconf/profile/user
regexp: "{{ item.regexp }}" regexp: "{{ item.regexp }}"
line: "{{ item.line }}" line: "{{ item.line }}"
create: true create: true
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
loop: loop:
- { regexp: '^user-db', line: 'user-db:user' } - { regexp: '^user-db', line: 'user-db: user' }
- { regexp: '^system-db', line: 'system-db:local' } - { regexp: '^system-db', line: 'system-db: local' }
- name: "1.8.4 | PATCH | Ensure GDM screen locks when the user is idle | Make db directory" - name: "1.8.4 | PATCH | Ensure GDM screen locks when the user is idle | Make db directory"
ansible.builtin.file: ansible.builtin.file:
path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d" path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d"
owner: root owner: root
group: root group: root
mode: 'go-w' mode: 0755
state: directory state: directory
- name: "1.8.4 | PATCH | Ensure GDM screen locks when the user is idle | Make conf file" - name: "1.8.4 | PATCH | Ensure GDM screen locks when the user is idle | Make conf file"
ansible.builtin.template: ansible.builtin.template:
src: etc/dconf/db/00-screensaver.j2 src: etc/dconf/db/00-screensaver.j2
dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/00-screensaver" dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/00-screensaver"
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: '0644'
notify: Reload dconf notify: Reload dconf
- name: "1.8.5 | PATCH | Ensure GDM screen locks cannot be overridden"
when: when:
- rhel9cis_rule_1_8_5 - rhel9cis_rule_1_8_4
- rhel9cis_gui - rhel9cis_gui
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- patch - patch
- gui - gui
- rule_1.8.5 - rule_1.8.4
block:
- name: "1.8.5 | PATCH | Ensure GDM screen locks cannot be overridden | Make lock directory"
ansible.builtin.file:
path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks"
owner: root
group: root
mode: 'go-w'
state: directory
- name: "1.8.5 | PATCH | Ensure GDM screen locks cannot be overridden | Make lock file" - name: "1.8.5 PATCH | Ensure GDM screen locks cannot be overridden"
ansible.builtin.template: block:
src: etc/dconf/db/00-screensaver_lock.j2 - name: "1.8.5 | PATCH | Ensure GDM screen locks cannot be overridden | Make lock directory"
dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks/00-screensaver_lock" ansible.builtin.file:
owner: root path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks"
group: root owner: root
mode: 'go-wx' group: root
notify: Reload dconf mode: 0755
state: directory
- name: "1.8.5 | PATCH | Ensure GDM screen locks cannot be overridden | Make lock file"
ansible.builtin.template:
src: etc/dconf/db/00-screensaver_lock.j2
dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks/00-screensaver"
owner: root
group: root
mode: 0644
notify: Reload dconf
when:
- rhel9cis_rule_1_8_5
- rhel9cis_gui
tags:
- level1-server
- level1-workstation
- patch
- gui
- rule_1.8.5
- name: "1.8.6 | PATCH | Ensure GDM automatic mounting of removable media is disabled" - name: "1.8.6 | PATCH | Ensure GDM automatic mounting of removable media is disabled"
when:
- rhel9cis_rule_1_8_6
- rhel9cis_gui
tags:
- level1-server
- level2-workstation
- patch
- gui
- rule_1.8.6
ansible.builtin.template: ansible.builtin.template:
src: etc/dconf/db/00-media-automount.j2 src: etc/dconf/db/00-media-automount.j2
dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/00-media-automount" dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/00-media-automount"
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: '0644'
notify: Reload dconf notify: Reload dconf
when:
- rhel9cis_rule_1_8_6
- rhel9cis_gui
tags:
- level1-server
- level2-workstation
- patch
- gui
- rule_1.8.6
- name: "1.8.7 | PATCH | Ensure GDM disabling automatic mounting of removable media is not overridden" - name: "1.8.7 | PATCH | Ensure GDM disabling automatic mounting of removable media is not overridden"
when:
- rhel9cis_rule_1_8_7
- rhel9cis_gui
tags:
- level1-server
- level2-workstation
- patch
- gui
- rule_1.8.7
block: block:
- name: "1.8.7 | PATCH | Ensure GDM disabling automatic mounting of removable media is not overridden | Make lock directory" - name: "1.8.7 | PATCH | Ensure GDM disabling automatic mounting of removable media is not overridden | Make lock directory"
ansible.builtin.file: ansible.builtin.file:
path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks" path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks"
owner: root owner: root
group: root group: root
mode: 'go-w' mode: 0755
state: directory state: directory
- name: "1.8.7 | PATCH | Ensure GDM disabling automatic mounting of removable media is not overridden | Make lock file" - name: "1.8.7 | PATCH | Ensure GDM disabling automatic mounting of removable media is not overridden | Make lock file"
ansible.builtin.template: ansible.builtin.template:
src: etc/dconf/db/00-automount_lock.j2 src: etc/dconf/db/00-automount_lock.j2
dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks/00-automount_lock" dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks/00-automount_lock"
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
notify: Reload dconf notify: Reload dconf
when:
- rhel9cis_rule_1_8_7
- rhel9cis_gui
tags:
- level1-server
- level2-workstation
- patch
- gui
- rule_1.8.7
- name: "1.8.8 | PATCH | Ensure GDM autorun-never is enabled" - name: "1.8.8 | PATCH | Ensure GDM autorun-never is enabled"
when:
- rhel9cis_rule_1_8_8
- rhel9cis_gui
tags:
- level1-server
- level2-workstation
- patch
- gui
- rule_1.8.8
block: block:
- name: "1.8.8 | PATCH | Ensure GDM autorun-never is enabled | Make directory" - name: "1.8.8 | PATCH | Ensure GDM autorun-never is enabled | Make directory"
ansible.builtin.file: ansible.builtin.file:
path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d" path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d"
owner: root owner: root
group: root group: root
mode: 'go-w' mode: 0755
state: directory state: directory
- name: "1.8.8 | PATCH | Ensure GDM autorun-never is enabled | Make conf file" - name: "1.8.8 | PATCH | Ensure GDM autorun-never is enabled | Make conf file"
ansible.builtin.template: ansible.builtin.template:
src: etc/dconf/db/00-media-autorun.j2 src: etc/dconf/db/00-media-autorun.j2
dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/00-media-autorun" dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/00-media-autorun"
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: '0644'
notify: Reload dconf notify: Reload dconf
when:
- rhel9cis_rule_1_8_8
- rhel9cis_gui
tags:
- level1-server
- level2-workstation
- patch
- gui
- rule_1.8.8
- name: "1.8.9 | PATCH | Ensure GDM autorun-never is not overridden" - name: "1.8.9 | PATCH | Ensure GDM autorun-never is not overridden"
when:
- rhel9cis_rule_1_8_9
- rhel9cis_gui
tags:
- level1-server
- level2-workstation
- patch
- gui
- rule_1.8.9
block: block:
- name: "1.8.9 | PATCH | Ensure GDM autorun-never is not overridden | Make lock directory" - name: "1.8.9 | PATCH | Ensure GDM autorun-never is not overridden | Make lock directory"
ansible.builtin.file: ansible.builtin.file:
path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks" path: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks"
owner: root owner: root
group: root group: root
mode: 'go-w' mode: 0755
state: directory state: directory
- name: "1.8.9 | PATCH | Ensure GDM autorun-never is not overridden | Make lockfile" - name: "1.8.9 | PATCH | Ensure GDM autorun-never is not overridden | Make lockfile"
ansible.builtin.template: ansible.builtin.template:
src: etc/dconf/db/00-autorun_lock.j2 src: etc/dconf/db/00-autorun_lock.j2
dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks/00-autorun_lock" dest: "/etc/dconf/db/{{ rhel9cis_dconf_db_name }}.d/locks/00-autorun_lock"
owner: root owner: root
group: root group: root
mode: 'go-wx' mode: 0644
notify: Reload dconf notify: Reload dconf
when:
- rhel9cis_rule_1_8_9
- rhel9cis_gui
tags:
- level1-server
- level2-workstation
- patch
- gui
- rule_1.8.9
- name: "1.8.10 | PATCH | Ensure XDMCP is not enabled" - name: "1.8.10 | PATCH | Ensure XDMCP is not enabled"
when:
- rhel9cis_rule_1_8_10
- rhel9cis_gui
tags:
- level1-server
- level1-workstation
- patch
- gui
- rule_1.8.10
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/gdm/custom.conf path: /etc/gdm/custom.conf
regexp: 'Enable=true' regexp: 'Enable=true'
state: absent state: absent
when:
- rhel9cis_rule_1_8_10
- rhel9cis_gui
tags:
- level1-server
- level1-workstation
- patch
- gui
- rule_1.8.4

View file

@ -0,0 +1,16 @@
---
- name: "1.9 | PATCH | Ensure updates, patches, and additional security software are installed"
ansible.builtin.package:
name: "*"
state: latest
notify: Change_requires_reboot
when:
- rhel9cis_rule_1_9
- not system_is_ec2
tags:
- level1-server
- level1-workstation
- patch
- rule_1.9
- skip_ansible_lint

View file

@ -1,66 +1,59 @@
--- ---
- name: "SECTION | 1.1.1.x | Disable unused filesystems" - name: "SECTION | 1.1.1.x | Disable unused filesystems"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.1.x.yml
file: cis_1.1.1.x.yml
- name: "SECTION | 1.1.2.1.x | Configure /tmp" - name: "SECTION | 1.1.2.x | Configure /tmp"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.2.x.yml
file: cis_1.1.2.1.x.yml
- name: "SECTION | 1.1.2.2.x | Configure /dev/shm" - name: "SECTION | 1.1.3.x | Configure /var"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.3.x.yml
file: cis_1.1.2.2.x.yml
- name: "SECTION | 1.1.2.3.x | Configure /home" - name: "SECTION | 1.1.4.x | Configure /var/tmp"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.4.x.yml
file: cis_1.1.2.3.x.yml
- name: "SECTION | 1.1.2.4.x | Configure /var" - name: "SECTION | 1.1.5.x | Configure /var/log"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.5.x.yml
file: cis_1.1.2.4.x.yml
- name: "SECTION | 1.1.2.5.x | Configure /var/tmp" - name: "SECTION | 1.1.6.x | Configure /var/log/audit"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.6.x.yml
file: cis_1.1.2.5.x.yml
- name: "SECTION | 1.1.2.6.x | Configure /var/log" - name: "SECTION | 1.1.7.x | Configure /home"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.7.x.yml
file: cis_1.1.2.6.x.yml
- name: "SECTION | 1.1.2.7.x | Configure /var/log/audit" - name: "SECTION | 1.1.8.x | Configure /dev/shm"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.8.x.yml
file: cis_1.1.2.7.x.yml
- name: "SECTION | 1.2.1.x | Configure Package Repositories" - name: "SECTION | 1.1.x | Disable various mounting"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.1.x.yml
file: cis_1.2.1.x.yml
- name: "SECTION | 1.2.2.x | Configure Package Updates" - name: "SECTION | 1.2 | Configure Software Updates"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.2.x.yml
file: cis_1.2.2.x.yml
- name: "SECTION | 1.3.1 | Configure SELinux" - name: "SECTION | 1.3 | Filesystem Integrity Checking"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.3.x.yml
file: cis_1.3.1.x.yml when: rhel9cis_config_aide
- name: "SECTION | 1.4 | Configure Bootloader" - name: "SECTION | 1.4 | Secure Boot Settings"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.4.x.yml
file: cis_1.4.x.yml
- name: "SECTION | 1.5 | Additional Process Hardening" - name: "SECTION | 1.5 | Additional Process Hardening"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.5.x.yml
file: cis_1.5.x.yml
- name: "SECTION | 1.6 | Configure system wide crypto policy" - name: "SECTION | 1.6 | Mandatory Access Control"
ansible.builtin.import_tasks: include_tasks: cis_1.6.1.x.yml
file: cis_1.6.x.yml when: not rhel9cis_selinux_disable
- name: "SECTION | 1.7 | Command Line Warning Banners" - name: "SECTION | 1.7 | Command Line Warning Banners"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_1.7.x.yml
file: cis_1.7.x.yml
- name: "SECTION | 1.8 | Gnome Display Manager" - name: "SECTION | 1.8 | Gnome Display Manager"
when: rhel9cis_display_manager == 'gdm' ansible.builtin.import_tasks: cis_1.8.x.yml
ansible.builtin.import_tasks:
file: cis_1.8.x.yml - name: "SECTION | 1.9 | Updates and Patches"
ansible.builtin.import_tasks: cis_1.9.yml
- name: "SECTION | 1.10 | Crypto policies"
include_tasks: cis_1.10.yml
when:
- not system_is_ec2

View file

@ -1,695 +1,40 @@
--- ---
- name: "2.1.1 | PATCH | Ensure autofs services are not in use" - name: "2.1.1 | PATCH | Ensure time synchronization is in use"
when:
- rhel9cis_rule_2_1_1
- "'autofs' in ansible_facts.packages"
tags:
- level1-server
- level2-workstation
- automated
- patch
- NIST800-53R5_SI-3
- NIST800-53R5_MP-7
- rule_2.1.1
block:
- name: "2.1.1 | PATCH | Ensure autofs services are not in use | Remove Package"
when:
- not rhel9cis_autofs_services
- not rhel9cis_autofs_mask
ansible.builtin.package:
name: autofs
state: absent
- name: "2.1.1 | PATCH | Ensure autofs services are not in use | Mask service"
when:
- not rhel9cis_autofs_services
- rhel9cis_autofs_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: autofs
enabled: false
state: stopped
masked: true
- name: "2.1.2 | PATCH | Ensure avahi daemon services are not in use"
when: rhel9cis_rule_2_1_2
tags:
- level1-server
- level2-workstation
- automated
- patch
- avahi
- NIST800-53R5_SI-4
- rule_2.1.2
block:
- name: "2.1.2 | PATCH | Ensure avahi daemon services are not in use | Remove package"
when:
- not rhel9cis_avahi_server
- not rhel9cis_avahi_mask
ansible.builtin.package:
name:
- avahi-autoipd
- avahi
state: absent
- name: "2.1.2 | PATCH | Ensure avahi daemon services are not in use | Mask service"
when:
- not rhel9cis_avahi_server
- rhel9cis_avahi_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
state: stopped
masked: true
loop:
- avahi-daemon.socket
- avahi-daemon.service
- name: "2.1.3 | PATCH | Ensure dhcp server services are not in use"
when: rhel9cis_rule_2_1_3
tags:
- level1-server
- level1-workstation
- automated
- patch
- dhcp
- NIST800-53R5_CM-7
- rule_2.1.3
block:
- name: "2.1.3 | PATCH | Ensure dhcp server services are not in use | Remove package"
when:
- not rhel9cis_dhcp_server
- not rhel9cis_dhcp_mask
ansible.builtin.package:
name: dhcp-server
state: absent
- name: "2.1.3 | PATCH | Ensure dhcp server services are not in use | Mask service"
when:
- not rhel9cis_dhcp_server
- rhel9cis_dhcp_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
state: stopped
masked: true
loop:
- dhcpd.service
- dhcpd6.service
- name: "2.1.4 | PATCH | Ensure dns server services are not in use"
when: rhel9cis_rule_2_1_4
tags:
- level1-server
- level1-workstation
- automated
- patch
- dns
- NIST800-53R5_CM-7
- rule_2.1.4
block:
- name: "2.1.4 | PATCH | Ensure dns server services are not in use | Remove package"
when:
- not rhel9cis_dns_server
- not rhel9cis_dns_mask
ansible.builtin.package:
name: bind
state: absent
- name: "2.1.4 | PATCH | Ensure dns server services are not in use | Mask service"
when:
- not rhel9cis_dns_server
- rhel9cis_dns_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: named.service
enabled: false
state: stopped
masked: true
- name: "2.1.5 | PATCH | Ensure dnsmasq server services are not in use"
when: rhel9cis_rule_2_1_5
tags:
- level1-server
- level1-workstation
- automated
- patch
- dns
- NIST800-53R5_CM-7
- rule_2.1.5
block:
- name: "2.1.5 | PATCH | Ensure dnsmasq server services are not in use | Remove package"
when:
- not rhel9cis_dnsmasq_server
- not rhel9cis_dnsmasq_mask
ansible.builtin.package:
name: dnsmasq
state: absent
- name: "2.1.5 | PATCH | Ensure dnsmasq server services are not in use | Mask service"
when:
- not rhel9cis_dnsmasq_server
- rhel9cis_dnsmasq_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: dnsmasq.service
enabled: false
state: stopped
masked: true
- name: "2.1.6 | PATCH | Ensure samba file server services are not in use"
when: rhel9cis_rule_2_1_6
tags:
- level1-server
- level1-workstation
- automated
- patch
- samba
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- rule_2.1.6
block:
- name: "2.1.6 | PATCH | Ensure samba file server services are not in use | Remove package"
when:
- not rhel9cis_samba_server
- not rhel9cis_samba_mask
ansible.builtin.package:
name: samba
state: absent
- name: "2.1.6 | PATCH | Ensure samba file server services are not in use | Mask service"
when:
- not rhel9cis_samba_server
- rhel9cis_samba_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: smb.service
enabled: false
state: stopped
masked: true
- name: "2.1.7 | PATCH | Ensure ftp server services are not in use"
when: rhel9cis_rule_2_1_7
tags:
- level1-server
- level1-workstation
- automation
- patch
- ftp
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- rule_2.1.7
block:
- name: "2.1.7 | PATCH | Ensure ftp server services are not in use | Remove package"
when:
- not rhel9cis_ftp_server
- not rhel9cis_ftp_mask
ansible.builtin.package:
name: vsftpd
state: absent
- name: "2.1.7 | PATCH | Ensure ftp server services are not in use | Mask service"
when:
- not rhel9cis_ftp_server
- rhel9cis_ftp_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: vsftpd.service
enabled: false
state: stopped
masked: true
- name: "2.1.8 | PATCH | Ensure message access server services are not in use"
when: rhel9cis_rule_2_1_8
tags:
- level1-server
- level1-workstation
- automated
- patch
- dovecot
- imap
- pop3
- NIST800-53R5_CM-7
- rule_2.1.8
block:
- name: "2.1.8 | PATCH | Ensure message access server services are not in use | Remove package"
when:
- not rhel9cis_message_server
- not rhel9cis_message_mask
ansible.builtin.package:
name:
- dovecot
- cyrus-imapd
state: absent
- name: "2.1.8 | PATCH | Ensure message access server services are not in use | Mask service"
when:
- not rhel9cis_message_server
- rhel9cis_message_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
state: stopped
masked: true
loop:
- "dovecot.socket"
- "dovecot.service"
- "cyrus-imapd.service"
- name: "2.1.9 | PATCH | Ensure network file system services are not in use"
when: rhel9cis_rule_2_1_9
tags:
- level1-server
- level1-workstation
- automated
- patch
- nfs
- services
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- rule_2.1.9
block:
- name: "2.1.9 | PATCH | Ensure network file system services are not in use | Remove package"
when:
- not rhel9cis_nfs_server
- not rhel9cis_nfs_mask
ansible.builtin.package:
name: nfs-utils
state: absent
- name: "2.1.9 | PATCH | Ensure network file system services are not in use | Mask service"
when:
- not rhel9cis_nfs_server
- rhel9cis_nfs_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: nfs-server.service
enabled: false
state: stopped
masked: true
- name: "2.1.10 | PATCH | Ensure nis server services are not in use"
when: rhel9cis_rule_2_1_10
tags:
- level1-server
- level1-workstation
- automated
- patch
- nis
- NIST800-53R5_CM-7
- rule_2.1.10
notify: Systemd daemon reload
block:
- name: "2.1.10 | PATCH | Ensure nis server services are not in use | Remove package"
when:
- not rhel9cis_nis_server
- not rhel9cis_nis_mask
ansible.builtin.package:
name: ypserv
state: absent
- name: "2.1.10 | PATCH | Ensure nis server services are not in use | Mask service"
when:
- not rhel9cis_nis_server
- rhel9cis_nis_mask
ansible.builtin.systemd:
name: ypserv.service
enabled: false
state: stopped
masked: true
- name: "2.1.11 | PATCH | Ensure print server services are not in use"
when: rhel9cis_rule_2_1_11
tags:
- level1-server
- automated
- patch
- cups
- NIST800-53R5_CM-7
- rule_2.1.11
block:
- name: "2.1.11 | PATCH | Ensure print server services are not in use | Remove package"
when:
- not rhel9cis_print_server
- not rhel9cis_print_mask
ansible.builtin.package:
name: cups
state: absent
- name: "2.1.11 | PATCH | Ensure print server services are not in use | Mask service"
when:
- not rhel9cis_print_server
- rhel9cis_print_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
state: stopped
masked: true
loop:
- "cups.socket"
- "cups.service"
- name: "2.1.12 | PATCH | Ensure rpcbind services are not in use"
when: rhel9cis_rule_2_1_12
tags:
- level1-server
- level1-workstation
- automated
- patch
- rpc
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- rule_2.1.12
block:
- name: "2.1.12 | PATCH | Ensure rpcbind services are not in use | Remove package"
when:
- not rhel9cis_rpc_server
- not rhel9cis_rpc_mask
ansible.builtin.package:
name: rpcbind
state: absent
- name: "2.1.12 | PATCH | Ensure rpcbind services are not in use | Mask service"
when:
- not rhel9cis_rpc_server
- rhel9cis_rpc_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
state: stopped
masked: true
loop:
- rpcbind.service
- rpcbind.socket
- name: "2.1.13 | PATCH | Ensure rsync services are not in use"
when: rhel9cis_rule_2_1_13
tags:
- level1-server
- level1-workstation
- automated
- patch
- rsync
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- rule_2.1.13
block:
- name: "2.1.13 | PATCH | Ensure rsync services are not in use | Remove package"
when:
- not rhel9cis_rsync_server
- not rhel9cis_rsync_mask
ansible.builtin.package:
name: rsync-daemon
state: absent
- name: "2.1.13 | PATCH | Ensure rsync services are not in use | Mask service"
when:
- not rhel9cis_rsync_server
- rhel9cis_rsync_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
state: stopped
masked: true
loop:
- 'rsyncd.socket'
- 'rsyncd.service'
- name: "2.1.14 | PATCH | Ensure snmp services are not in use"
when: rhel9cis_rule_2_1_14
tags:
- level1-server
- level1-workstation
- automation
- patch
- snmp
- NIST800-53R5_CM-7
- rule_2.1.14
block:
- name: "2.1.14 | PATCH | Ensure snmp services are not in use | Remove package"
when:
- not rhel9cis_snmp_server
- not rhel9cis_snmp_mask
ansible.builtin.package:
name: net-snmp
state: absent
- name: "2.1.14 | PATCH | Ensure snmp services are not in use | Mask service"
when:
- not rhel9cis_snmp_server
- rhel9cis_snmp_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: snmpd.service
enabled: false
state: stopped
masked: true
- name: "2.1.15 | PATCH | Ensure telnet server services are not in use"
when: rhel9cis_rule_2_1_15
tags:
- level1-server
- level1-workstation
- automated
- patch
- telnet
- NIST800-53R5_CM-7
- NIST800-53R5_CM-11
- rule_2.1.15
block:
- name: "2.1.15 | PATCH | Ensure telnet server services are not in use | Remove package"
when:
- not rhel9cis_telnet_server
- not rhel9cis_telnet_mask
ansible.builtin.package:
name: telnet-server
state: absent
- name: "2.1.15 | PATCH | Ensure telnet server services are not in use | Mask service"
when:
- not rhel9cis_telnet_server
- rhel9cis_telnet_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: telnet.socket
enabled: false
state: stopped
masked: true
- name: "2.1.16 | PATCH | Ensure tftp server services are not in use"
when: rhel9cis_rule_2_1_16
tags:
- level1-server
- level1-workstation
- automated
- patch
- tftp
- NIST800-53R5_CM-7
- rule_2.1.16
block:
- name: "2.1.16 | PATCH | Ensure tftp server services are not in use | Remove package"
when:
- not rhel9cis_tftp_server
- not rhel9cis_tftp_mask
ansible.builtin.package:
name: tftp-server
state: absent
- name: "2.1.16 | PATCH | Ensure tftp server services are not in use | Mask service"
when:
- not rhel9cis_tftp_server
- rhel9cis_tftp_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
state: stopped
masked: true
loop:
- 'tftp.socket'
- 'tftp.service'
- name: "2.1.17 | PATCH | Ensure web proxy server services are not in use"
when: rhel9cis_rule_2_1_17
tags:
- level1-server
- level1-workstation
- automation
- patch
- squid
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- rule_2.1.17
block:
- name: "2.1.17 | PATCH | Ensure web proxy server services are not in use | Remove package"
when:
- not rhel9cis_squid_server
- not rhel9cis_squid_mask
ansible.builtin.package:
name: squid
state: absent
- name: "2.1.17 | PATCH | Ensure web proxy server services are not in use | Mask service"
when:
- not rhel9cis_squid_server
- rhel9cis_squid_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: squid.service
enabled: false
state: stopped
masked: true
- name: "2.1.18 | PATCH | Ensure web server services are not in use"
when: rhel9cis_rule_2_1_18
tags:
- level1-server
- level1-workstation
- automated
- patch
- httpd
- nginx
- webserver
- NIST800-53R5_CM-7
- rule_2.1.18
block:
- name: "2.1.18 | PATCH | Ensure web server services are not in use | Remove httpd server"
when:
- not rhel9cis_httpd_server
- not rhel9cis_httpd_mask
ansible.builtin.package:
name: httpd
state: absent
- name: "2.1.18 | PATCH | Ensure web server services are not in use | Remove nginx server"
when:
- not rhel9cis_nginx_server
- not rhel9cis_nginx_mask
ansible.builtin.package:
name: nginx
state: absent
- name: "2.1.18 | PATCH | Ensure web server services are not in use | Mask httpd service"
when:
- not rhel9cis_httpd_server
- rhel9cis_httpd_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: httpd.service
enabled: false
state: stopped
masked: true
- name: "2.1.18 | PATCH | Ensure web server services are not in use | Mask nginx service"
when:
- not rhel9cis_nginx_server
- rhel9cis_nginx_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: ngnix.service
enabled: false
state: stopped
masked: true
- name: "2.1.19 | PATCH | Ensure xinetd services are not in use"
when: rhel9cis_rule_2_1_19
tags:
- level1-server
- level1-workstation
- automated
- patch
- xinetd
- NIST800-53R5_CM-7
- rule_2.1.19
block:
- name: "2.1.19 | PATCH | Ensure xinetd services are not in use | Remove package"
when:
- not rhel9cis_xinetd_server
- not rhel9cis_xinetd_mask
ansible.builtin.package:
name: xinetd
state: absent
- name: "2.1.19 | PATCH | Ensure xinetd services are not in use | Mask service"
when:
- not rhel9cis_xinetd_server
- rhel9cis_xinetd_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: xinetd.service
enabled: false
state: stopped
masked: true
- name: "2.1.20 | PATCH | Ensure X window server services are not in use"
when:
- not rhel9cis_xwindow_server
- rhel9cis_rule_2_1_20
tags:
- level1-server
- level1-workstation
- automated
- patch
- xwindow
- NIST800-53R5_CM-11
- rule_2.1.20
ansible.builtin.package: ansible.builtin.package:
name: xorg-x11-server-common name: chrony
state: absent state: present
- name: "2.1.21 | PATCH | Ensure mail transfer agents are configured for local-only mode"
when: when:
- not rhel9cis_is_mail_server - rhel9cis_rule_2_1_1
- "'postfix' in ansible_facts.packages" - not system_is_container
- rhel9cis_rule_2_1_21
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- automated - patch
- patch - rule_2.1.1
- postfix
- NIST800-53R5_CM-7
- rule_2.1.21
notify: Restart postfix
ansible.builtin.lineinfile:
path: /etc/postfix/main.cf
regexp: "^(#)?inet_interfaces"
line: "inet_interfaces = loopback-only"
- name: "2.1.22 | AUDIT | Ensure only approved services are listening on a network interface" - name: "2.1.2 | PATCH | Ensure chrony is configured"
when: rhel9cis_rule_2_1_22
tags:
- level1-server
- level1-workstation
- manual
- audit
- services
- NIST800-53R5_CM-7
- rule_2.1.22
vars:
warn_control_id: '2.1.22'
block: block:
- name: "2.1.22 | AUDIT | Ensure only approved services are listening on a network interface | Get list of services" - name: "2.1.2 | PATCH | Ensure chrony is configured | Set configuration"
ansible.builtin.command: systemctl list-units --type=service # noqa command-instead-of-module ansible.builtin.template:
changed_when: false src: etc/chrony.conf.j2
failed_when: discovered_running_services.rc not in [ 0, 1 ] dest: /etc/chrony.conf
check_mode: false owner: root
register: discovered_running_services group: root
mode: 0644
- name: "2.1.22 | AUDIT | Ensure only approved services are listening on a network interface | Display list of services" - name: "2.1.2 | PATCH | Ensure chrony is configured | modify /etc/sysconfig/chronyd | 1"
ansible.builtin.debug: ansible.builtin.lineinfile:
msg: path: /etc/sysconfig/chronyd
- "Warning!! Below are the list of services, both active and inactive" regexp: "^(#)?OPTIONS"
- "Please review to make sure all are essential" line: "OPTIONS=\"-u chrony\""
- "{{ discovered_running_services.stdout_lines }}" create: true
mode: 0644
- name: "2.1.22 | AUDIT | Ensure only approved services are listening on a network interface | Warn Count" when:
ansible.builtin.import_tasks: - rhel9cis_rule_2_1_2
file: warning_facts.yml - not system_is_container
tags:
- level1-server
- level1-workstation
- patch
- rule_2.1.2

View file

@ -1,81 +1,349 @@
--- ---
- name: "2.2.1 | PATCH | Ensure ftp client is not installed" - name: "2.2.1 | PATCH | Ensure xorg-x11-server-common is not installed"
when:
- not rhel9cis_ftp_client
- rhel9cis_rule_2_2_1
tags:
- level1-server
- level1-workstation
- automated
- patch
- ftp
- NIST800-53R5_CM-7
- rule_2.2.1
ansible.builtin.package: ansible.builtin.package:
name: ftp name: xorg-x11-server-common
state: absent state: absent
when:
- rhel9cis_rule_2_2_1
- "'xorg-x11-server-common' in ansible_facts.packages"
- not rhel9cis_gui
tags:
- level1-server
- patch
- x11
- rule_2.2.1
- name: "2.2.2 | PATCH | Ensure ldap client is not installed" - name: "2.2.2 | PATCH | Ensure Avahi Server is not installed"
when:
- not rhel9cis_openldap_clients_required
- rhel9cis_rule_2_2_2
tags:
- level2-server
- level2-workstation
- automated
- patch
- ldap
- NIST800-53R5_CM-7
- rule_2.2.2
ansible.builtin.package: ansible.builtin.package:
name: openldap-clients name:
state: absent - avahi-autoipd
- avahi
state: absent
when:
- rhel9cis_rule_2_2_2
- not rhel9cis_avahi_server
- "'avahi' in ansible_facts.packages or 'avahi-autopd' in ansible_facts.packages"
tags:
- level1-server
- level2-workstation
- patch
- avahi
- rule_2.2.2
- name: "2.2.3 | PATCH | Ensure nis client is not installed" - name: "2.2.3 | PATCH | Ensure CUPS is not installed"
when:
- not rhel9cis_ypbind_required
- rhel9cis_rule_2_2_3
tags:
- level1-server
- level1-workstation
- automated
- patch
- nis
- NIST800-53R5_CM-7
- rule_2.2.3
ansible.builtin.package: ansible.builtin.package:
name: ypbind name: cups
state: absent state: absent
when:
- not rhel9cis_cups_server
- "'cups' in ansible_facts.packages"
- rhel9cis_rule_2_2_3
tags:
- level1-server
- patch
- cups
- rule_2.2.3
- name: "2.2.4 | PATCH | Ensure telnet client is not installed" - name: "2.2.4 | PATCH | Ensure DHCP Server is not installed"
when:
- not rhel9cis_telnet_required
- rhel9cis_rule_2_2_4
tags:
- level1-server
- level1-workstation
- automated
- patch
- telnet
- NIST800-53R5_CM-7
- rule_2.2.4
ansible.builtin.package: ansible.builtin.package:
name: telnet name: dhcp-server
state: absent state: absent
when:
- not rhel9cis_dhcp_server
- "'dhcp-server' in ansible_facts.packages"
- rhel9cis_rule_2_2_4
tags:
- level1-server
- level1-workstation
- patch
- dhcp
- rule_2.2.4
- name: "2.2.5 | PATCH | Ensure TFTP client is not installed" - name: "2.2.5 | PATCH | Ensure DNS Server is not installed"
when:
- not rhel9cis_tftp_client
- rhel9cis_rule_2_2_5
tags:
- level1-server
- level1-workstation
- automated
- patch
- tftp
- NIST800-53R5_CM-7
- rule_2.2.5
ansible.builtin.package: ansible.builtin.package:
name: tftp name: bind
state: absent state: absent
when:
- not rhel9cis_dns_server
- "'bind' in ansible_facts.packages"
- rhel9cis_rule_2_2_5
tags:
- level1-server
- level1-workstation
- patch
- dns
- rule_2.2.5
- name: "2.2.6 | PATCH | Ensure VSFTP Server is not installed"
ansible.builtin.package:
name: vsftpd
state: absent
when:
- not rhel9cis_vsftpd_server
- "'vsftpd' in ansible_facts.packages"
- rhel9cis_rule_2_2_6
tags:
- level1-server
- level1-workstation
- patch
- vsftpd
- rule_2.2.6
- name: "2.2.7 | PACH | Ensure TFTP Server is not installed"
ansible.builtin.package:
name: tftp-server
state: absent
when:
- not rhel9cis_tftp_server
- "'tftp-server' in ansible_facts.packages"
- rhel9cis_rule_2_2_7
tags:
- level1-server
- level1-workstation
- patch
- tftp
- rule_2.2.7
- name: "2.2.8 | PATCH | Ensure a web server is not installed"
block:
- name: "2.2.8 | PATCH | Ensure a web server is not installed | Remove httpd server"
ansible.builtin.package:
name: httpd
state: absent
when:
- not rhel9cis_httpd_server
- "'httpd' in ansible_facts.packages"
- name: "2.2.8 | PATCH | Ensure a web server is not installed | Remove nginx server"
ansible.builtin.package:
name: nginx
state: absent
when:
- not rhel9cis_nginx_server
- "'nginx' in ansible_facts.packages"
when:
- rhel9cis_rule_2_2_8
tags:
- level1-server
- level1-workstation
- patch
- httpd
- nginx
- webserver
- rule_2.2.8
- name: "2.2.9 | PATCH | Ensure IMAP and POP3 server is not installed"
block:
- name: "2.2.9 | PATCH | Ensure IMAP and POP3 server is not installed"
ansible.builtin.package:
name:
- dovecot
state: absent
when:
- not rhel9cis_dovecot_server
- "'dovecot' in ansible_facts.packages"
- name: "2.2.9 | PATCH | Ensure IMAP and POP3 server is not installed"
ansible.builtin.package:
name:
- cyrus-imapd
state: absent
when:
- not rhel9cis_imap_server
- "'cyrus-imapd' in ansible_facts.packages"
when:
- rhel9cis_rule_2_2_9
tags:
- level1-server
- level1-workstation
- patch
- dovecot
- imap
- pop3
- rule_2.2.9
- name: "2.2.10 | PATCH | Ensure Samba is not enabled"
ansible.builtin.package:
name: samba
state: absent
when:
- not rhel9cis_samba_server
- "'samba' in ansible_facts.packages"
- rhel9cis_rule_2_2_10
tags:
- level1-server
- level1-workstation
- patch
- samba
- rule_2.2.10
- name: "2.2.11 | PATCH | Ensure HTTP Proxy Server is not installed"
ansible.builtin.package:
name: squid
state: absent
when:
- not rhel9cis_squid_server
- "'squid' in ansible_facts.packages"
- rhel9cis_rule_2_2_11
tags:
- level1-server
- level1-workstation
- patch
- squid
- rule_2.2.11
- name: "2.2.12 | PATCH | Ensure net-snmp is not installed"
ansible.builtin.package:
name: net-snmp
state: absent
when:
- not rhel9cis_snmp_server
- "'net-snmp' in ansible_facts.packages"
- rhel9cis_rule_2_2_12
tags:
- level1-server
- level1-workstation
- patch
- snmp
- rule_2.2.12
- name: "2.2.13 | PATCH | Ensure telnet-server is not installed"
ansible.builtin.package:
name: telnet-server
state: absent
when:
- not rhel9cis_telnet_server
- "'telnet-server' in ansible_facts.packages"
- rhel9cis_rule_2_2_13
tags:
- level1-server
- level1-workstation
- patch
- telnet
- rule_2.2.13
- name: "2.2.14 | PATCH | Ensure dnsmasq is not installed"
ansible.builtin.package:
name: dnsmasq
state: absent
notify: Restart postfix
when:
- not rhel9cis_is_mail_server
- "'dnsmasq' in ansible_facts.packages"
- rhel9cis_rule_2_2_14
tags:
- level1-server
- level1-workstation
- patch
- dnsmasq
- rule_2.2.14
- name: "2.2.15 | PATCH | Ensure mail transfer agent is configured for local-only mode"
ansible.builtin.lineinfile:
path: /etc/postfix/main.cf
regexp: "^(#)?inet_interfaces"
line: "inet_interfaces = loopback-only"
notify: Restart postfix
when:
- not rhel9cis_is_mail_server
- "'postfix' in ansible_facts.packages"
- rhel9cis_rule_2_2_15
tags:
- level1-server
- level1-workstation
- patch
- postfix
- rule_2.2.15
# The name title of the service says mask the service, but the fix allows for both options
# Options available in default/main if to remove the package default is false just mask the server service
- name: "2.2.16 | PATCH | Ensure nfs-utils is not installed or the nfs-server service is masked"
block:
- name: "2.2.16 | PATCH | Ensure nfs-utils is not installed or the nfs-server service is masked | remove package"
ansible.builtin.package:
name: nfs-utils
state: absent
when:
- not rhel9cis_use_nfs_server
- not rhel9cis_use_nfs_service
- name: "2.2.16 | PATCH | Ensure nfs-utils is not installed or the nfs-server service is masked | mask service"
ansible.builtin.systemd:
name: nfs-server
masked: true
state: stopped
when:
- not rhel9cis_use_nfs_server
- rhel9cis_use_nfs_service
when:
- "'nfs-utils' in ansible_facts.packages"
- rhel9cis_rule_2_2_16
tags:
- level1-server
- level1-workstation
- patch
- nfs
- services
- rule_2.2.16
# The name title of the service says mask the service, but the fix allows for both options
# Options available in default/main if to remove the package default is false just mask the server service
- name: "2.2.17 | PATCH | Ensure rpcbind is not installed or the rpcbind services are masked"
block:
- name: "2.2.17 | PATCH | Ensure rpcbind is not installed or the rpcbind services are masked | remove package"
ansible.builtin.package:
name: rpcbind
state: absent
when:
- not rhel9cis_use_rpc_server
- not rhel9cis_use_rpc_service
- name: "2.2.17 | PATCH | Ensure rpcbind is not installed or the rpcbind services are masked | mask service"
ansible.builtin.systemd:
name: rpcbind.socket
masked: true
state: stopped
when:
- rhel9cis_use_rpc_server
- not rhel9cis_use_rpc_service
when:
- "'rpcbind' in ansible_facts.packages"
- rhel9cis_rule_2_2_17
tags:
- level1-server
- level1-workstation
- patch
- rpc
- rule_2.2.17
# The name title of the service says mask the service, but the fix allows for both options
# Options available in default/main if to remove the package default is false just mask the server service
- name: "2.2.18 | PATCH | Ensure rsync service is not enabled "
block:
- name: "2.2.18 | PATCH | Ensure rsync-daemon is not installed or the rsync service is masked | remove package"
ansible.builtin.package:
name: rsync-daemon
state: absent
when:
- not rhel9cis_use_rsync_server
- not rhel9cis_use_rsync_service
- name: "2.2.18 | PATCH | Ensure rsync service is not enabled | mask service"
ansible.builtin.systemd:
name: rsyncd
masked: true
state: stopped
when:
- rhel9cis_use_rsync_server
- not rhel9cis_use_rsync_service
when:
- "'rsync' in ansible_facts.packages"
- rhel9cis_rule_2_2_18
tags:
- level1-server
- level1-workstation
- patch
- rsync
- rule_2.2.18

View file

@ -1,51 +1,61 @@
--- ---
- name: "2.3.1 | PATCH | Ensure time synchronization is in use" - name: "2.3.1 | PATCH | Ensure telnet client is not installed"
when:
- rhel9cis_rule_2_3_1
- not system_is_container
tags:
- level1-server
- level1-workstation
- patch
- NIST800-53R5_AU-3
- NIST800-53R5_AU-12
- rule_2.3.1
ansible.builtin.package: ansible.builtin.package:
name: chrony name: telnet
state: present state: absent
- name: "2.3.2 | PATCH | Ensure chrony is configured"
when: when:
- rhel9cis_rule_2_3_2 - not rhel9cis_telnet_required
- not system_is_container - "'telnet' in ansible_facts.packages"
- rhel9cis_rule_2_3_1
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- patch - patch
- rule_2.3.2 - telnet
- NIST800-53R5_AU-3 - rule_2.3.1
- NIST800-53R5_AU-12
ansible.builtin.template:
src: etc/chrony.conf.j2
dest: /etc/chrony.conf
owner: root
group: root
mode: 'go-wx'
- name: "2.3.3 | PATCH | Ensure chrony is not run as the root user" - name: "2.3.2 | PATCH | Ensure LDAP client is not installed"
ansible.builtin.package:
name: openldap-clients
state: absent
when: when:
- rhel9cis_rule_2_3_3 - not rhel9cis_openldap_clients_required
- not system_is_container - "'openldap-clients' in ansible_facts.packages"
- rhel9cis_rule_2_3_2
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- patch - patch
- rule_2.3.3 - ldap
ansible.builtin.lineinfile: - rule_2.3.2
path: /etc/sysconfig/chronyd
regexp: '^OPTIONS="(?!.* -u chrony.*)(.*)"' - name: "2.3.3 | PATCH | Ensure TFTP client is not installed"
line: OPTIONS="\1 -u chrony" ansible.builtin.package:
create: true name: tftp
backrefs: true state: absent
mode: 'go-wx' when:
- not rhel9cis_tftp_client
- "'tftp' in ansible_facts.packages"
- rhel9cis_rule_2_3_3
tags:
- level1-server
- level1-workstation
- patch
- tftp
- rule_2.3.3
- name: "2.3.4 | PATCH | Ensure FTP client is not installed"
ansible.builtin.package:
name: ftp
state: absent
when:
- not rhel9cis_tftp_client
- "'ftp' in ansible_facts.packages"
- rhel9cis_rule_2_3_4
tags:
- level1-server
- level1-workstation
- patch
- ftp
- rule_2.3.4

View file

@ -1,173 +0,0 @@
---
- name: "2.4.1.1 | PATCH | Ensure cron daemon is enabled"
when: rhel9cis_rule_2_4_1_1
tags:
- level1-server
- level1-workstation
- patch
- cron
- rule_2.4.1.1
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
ansible.builtin.service:
name: crond
enabled: true
- name: "2.4.1.2 | PATCH | Ensure permissions on /etc/crontab are configured"
when: rhel9cis_rule_2_4_1_2
tags:
- level1-server
- level1-workstation
- patch
- cron
- rule_2.4.1.2
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.file:
path: /etc/crontab
owner: root
group: root
mode: 'og-rwx'
- name: "2.4.1.3 | PATCH | Ensure permissions on /etc/cron.hourly are configured"
when: rhel9cis_rule_2_4_1_3
tags:
- level1-server
- level1-workstation
- patch
- cron
- rule_2.4.1.3
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.file:
path: /etc/cron.hourly
state: directory
owner: root
group: root
mode: 'og-rwx'
- name: "2.4.1.4 | PATCH | Ensure permissions on /etc/cron.daily are configured"
when: rhel9cis_rule_2_4_1_4
tags:
- level1-server
- level1-workstation
- patch
- cron
- rule_2.4.1.4
ansible.builtin.file:
path: /etc/cron.daily
state: directory
owner: root
group: root
mode: 'og-rwx'
- name: "2.4.1.5 | PATCH | Ensure permissions on /etc/cron.weekly are configured"
when: rhel9cis_rule_2_4_1_5
tags:
- level1-server
- level1-workstation
- patch
- rule_2.4.1.5
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.file:
path: /etc/cron.weekly
state: directory
owner: root
group: root
mode: 'og-rwx'
- name: "2.4.1.6 | PATCH | Ensure permissions on /etc/cron.monthly are configured"
when: rhel9cis_rule_2_4_1_6
tags:
- level1-server
- level1-workstation
- patch
- rule_2.4.1.6
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.file:
path: /etc/cron.monthly
state: directory
owner: root
group: root
mode: 'og-rwx'
- name: "2.4.1.7 | PATCH | Ensure permissions on /etc/cron.d are configured"
when: rhel9cis_rule_2_4_1_7
tags:
- level1-server
- level1-workstation
- patch
- cron
- rule_2.4.1.7
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
ansible.builtin.file:
path: /etc/cron.d
state: directory
owner: root
group: root
mode: 'og-rwx'
- name: "2.4.1.8 | PATCH | Ensure crontab is restricted to authorized users"
when: rhel9cis_rule_2_4_1_8
tags:
- level1-server
- level1-workstation
- patch
- cron
- rule_2.4.1.8
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
block:
- name: "2.4.1.8 | PATCH | Ensure crontab is restricted to authorized users | Remove cron.deny"
ansible.builtin.file:
path: /etc/cron.deny
state: absent
- name: "2.4.1.8 | PATCH | Ensure crontab is restricted to authorized users | Check if cron.allow exists"
ansible.builtin.stat:
path: "/etc/cron.allow"
register: discovered_cron_allow_state
- name: "2.4.1.8 | PATCH | Ensure crontab is restricted to authorized users | Ensure cron.allow is restricted to authorized users"
ansible.builtin.file:
path: /etc/cron.allow
state: '{{ "file" if discovered_cron_allow_state.stat.exists else "touch" }}'
owner: root
group: root
mode: 'u-x,g-wx,o-rwx'
- name: "2.4.2.1 | PATCH | Ensure at is restricted to authorized users"
when: rhel9cis_rule_2_4_2_1
tags:
- level1-server
- level1-workstation
- patch
- cron
- rule_2.4.2.1
- NIST800-53R5_AC-3
- NIST800-53R5_MP-2
block:
- name: "2.4.2.1 | PATCH | Ensure at is restricted to authorized users | Remove at.deny"
ansible.builtin.file:
path: /etc/at.deny
state: absent
- name: "2.4.2.1 | PATCH | Ensure at is restricted to authorized users | Check if at.allow exists"
ansible.builtin.stat:
path: "/etc/at.allow"
register: discovered_at_allow_state
- name: "2.4.2.1 | PATCH | Ensure at is restricted to authorized users | Ensure at.allow is restricted to authorized users"
ansible.builtin.file:
path: /etc/at.allow
state: '{{ "file" if discovered_at_allow_state.stat.exists else "touch" }}'
owner: root
group: root
mode: 'u-x,g-wx,o-rwx'

View file

@ -0,0 +1,39 @@
---
- name: "2.4 | AUDIT | Ensure nonessential services listening on the system are removed or masked"
block:
- name: "2.4 | AUDIT | Ensure nonessential services listening on the system are removed or masked | Get list of services"
ansible.builtin.shell: systemctl list-units --type=service
changed_when: false
failed_when: false
check_mode: false
register: rhel9cis_2_4_services
- name: "2.4 | AUDIT | Ensure nonessential services listening on the system are removed or masked | Get list of sockets"
ansible.builtin.shell: systemctl list-units --type=sockets
changed_when: false
failed_when: false
check_mode: false
register: rhel9cis_2_4_sockets
- name: "2.4 | AUDIT | Ensure nonessential services listening on the system are removed or masked | Display list of services"
ansible.builtin.debug:
msg:
- "Warning!! Below are the list of services and sockets, both active and inactive"
- "Please review to make sure all are essential"
- "{{ rhel9cis_2_4_services.stdout_lines }}"
- "{{ rhel9cis_2_4_sockets.stdout_lines }}"
- name: "2.4 | AUDIT | Ensure nonessential services listening on the system are removed or masked | Warn Count"
ansible.builtin.import_tasks: warning_facts.yml
vars:
warn_control_id: '2.4'
when:
- rhel9cis_rule_2_4
tags:
- level1-server
- level1-workstation
- manual
- audit
- services
- rule_2.4

View file

@ -1,17 +1,13 @@
--- ---
- name: "SECTION | 2.1 | Special Purpose Services" - name: "SECTION | 2.1 | Time Synchronization"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_2.1.x.yml
file: cis_2.1.x.yml
- name: "SECTION | 2.2 | Service Clients" - name: "SECTION | 2.2 | Special Purpose Services"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_2.2.x.yml
file: cis_2.2.x.yml
- name: "SECTION | 2.3 | Time Synchronization" - name: "SECTION | 2.3 | Service Clients"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_2.3.x.yml
file: cis_2.3.x.yml
- name: "SECTION | 2.4 | Job Schedulers" - name: "SECTION | 2.4 | Nonessential services removed"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_2.4.yml
file: cis_2.4.x.yml

View file

@ -3,107 +3,81 @@
# The CIS Control wants IPv6 disabled if not in use. # The CIS Control wants IPv6 disabled if not in use.
# We are using the rhel9cis_ipv6_required to specify if you have IPv6 in use # We are using the rhel9cis_ipv6_required to specify if you have IPv6 in use
- name: "3.1.1 | PATCH | Ensure IPv6 status is identified" - name: "3.1.1 | PATCH | Ensure IPv6 status is identified"
when:
- not rhel9cis_ipv6_required
- rhel9cis_rule_3_1_1
tags:
- level1-server
- level1-workstation
- manual
- patch
- ipv6
- networking
- rule_3.1.1
- NIST800-53R5_CM-7
block: block:
- name: "3.1.1 | PATCH | Ensure IPv6 status is identified | Set vars for sysctl template" - name: "3.1.1 | PATCH | Ensure IPv6 status is identified | refresh"
when: "'sysctl' in rhel9cis_ipv6_disable_method" ansible.builtin.set_fact:
ansible.builtin.set_fact: rhel9cis_sysctl_update: true
rhel9cis_sysctl_update: true rhel9cis_flush_ipv6_route: true
rhel9cis_flush_ipv6_route: true
- name: "3.1.1 | AUDIT | Ensure IPv6 status is identified | Message out implementation info" - name: "3.1.1 | PATCH | Ensure IPv6 status is identified | disable"
when: "'sysctl' in rhel9cis_ipv6_disable_method" ansible.builtin.debug:
ansible.builtin.debug: msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-disable_ipv6.conf"
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-disable_ipv6.conf" when:
- not rhel9cis_ipv6_required
- name: "3.1.1 | AUDIT | Ensure IPv6 status is identified | Find IPv6 status" - rhel9cis_rule_3_1_1
when: "'kernel' in rhel9cis_ipv6_disable_method" tags:
ansible.builtin.command: grubby --info=ALL - level1-server
changed_when: false - level1-workstation
failed_when: false - manual
register: discovered_rhel9cis_3_1_1_ipv6_status - patch
- ipv6
- name: "3.1.1 | PATCH | Ensure IPv6 status is identified | Disable IPV6 via Kernel" - networking
when: - rule_3.1.1
- "'kernel' in rhel9cis_ipv6_disable_method"
- "'ipv6.disable=1' not in discovered_rhel9cis_3_1_1_ipv6_status.stdout"
ansible.builtin.shell: grubby --update-kernel=ALL --args="ipv6.disable=1"
- name: "3.1.2 | PATCH | Ensure wireless interfaces are disabled" - name: "3.1.2 | PATCH | Ensure wireless interfaces are disabled"
block:
- name: "3.1.2 | AUDIT | Ensure wireless interfaces are disabled | Check if nmcli command is available"
ansible.builtin.shell: rpm -q NetworkManager
changed_when: false
failed_when: false
check_mode: false
register: rhel_09_nmcli_available
- name: "3.1.2 | AUDIT | Ensure wireless interfaces are disabled | Check if wifi is enabled"
ansible.builtin.shell: nmcli radio wifi
register: rhel_09_wifi_enabled
changed_when: rhel_09_wifi_enabled.stdout != "disabled"
failed_when: false
when: rhel_09_nmcli_available.rc == 0
- name: "3.1.2 | PATCH | Ensure wireless interfaces are disabled | Disable wifi if enabled"
ansible.builtin.shell: nmcli radio all off
changed_when: false
failed_when: false
when: rhel_09_wifi_enabled is changed
when: when:
- rhel9cis_rule_3_1_2 - rhel9cis_rule_3_1_2
- discover_wireless_adapters.rc == 0
tags: tags:
- level1-server - level1-server
- patch - patch
- rule_3.1.2 - wireless
- wireless - rule_3.1.2
- NIST800-53R5_CM-7
vars: - name: "3.1.3 | PATCH | Ensure TIPC is disabled"
warn_control_id: '3.1.2'
block: block:
- name: "3.1.2 | PATCH | Ensure wireless interfaces are disabled | Check for network-manager tool" - name: "3.1.3 | PATCH | Ensure TIPC is disabled"
when: "rhel9cis_network_manager_package_name in ansible_facts.packages" ansible.builtin.template:
ansible.builtin.command: nmcli radio wifi src: "etc/modprobe.d/modprobe.conf.j2"
changed_when: false dest: "/etc/modprobe.d/{{ item }}.conf"
failed_when: false mode: "0600"
check_mode: false owner: root
register: discovered_wifi_status group: root
loop:
- tipc
# note the item used in the template
- name: "3.1.2 | PATCH | Ensure wireless interfaces are disabled | Disable wireless if network-manager installed" - name: "3.1.3 | PATCH | Ensure TIPC is disabled | blacklist"
when: ansible.builtin.lineinfile:
- "rhel9cis_network_manager_package_name in ansible_facts.packages" path: /etc/modprobe.d/blacklist.conf
- "'enabled' in discovered_wifi_status.stdout" regexp: "^(#)?blacklist tipc(\\s|$)"
ansible.builtin.command: nmcli radio all off line: "blacklist tipc"
changed_when: discovered_nmcli_radio_off.rc == 0 create: true
register: discovered_nmcli_radio_off mode: 0600
when:
- name: "3.1.2 | PATCH | Ensure wireless interfaces are disabled | Warn about wireless if network-manager not installed" - rhel9cis_rule_3_1_3
when: "rhel9cis_network_manager_package_name not in ansible_facts.packages"
ansible.builtin.debug:
msg: "Warning!! You need to disable wireless interfaces manually since network-manager is not installed"
- name: "3.1.2 | PATCH | Ensure wireless interfaces are disabled | Set warning count"
when: "rhel9cis_network_manager_package_name not in ansible_facts.packages"
ansible.builtin.import_tasks:
file: warning_facts.yml
- name: "3.1.3 | PATCH | Ensure bluetooth services are not in use"
when: rhel9cis_rule_3_1_3
tags: tags:
- level1-server - level2-server
- level2-workstation - level2-workstation
- patch - patch
- bluetooth - tipc
- rule_3.1.3 - rule_3.1.3
- NIST800-53R5_CM-7
block:
- name: "3.1.3 | PATCH | Ensure bluetooth services are not in use | pkg"
when:
- not rhel9cis_bluetooth_service
- not rhel9cis_bluetooth_mask
ansible.builtin.package:
name: bluez
state: absent
- name: "3.1.3 | PATCH | Ensure bluetooth services are not in use | mask"
when:
- not rhel9cis_bluetooth_service
- rhel9cis_bluetooth_mask
notify: Systemd daemon reload
ansible.builtin.systemd:
name: bluetooth.service
enabled: false
state: stopped
masked: true

View file

@ -1,121 +1,52 @@
--- ---
- name: "3.2.1 | PATCH | Ensure dccp kernel module is not available" - name: "3.2.1 | PATCH | Ensure IP forwarding is disabled"
when: rhel9cis_rule_3_2_1
tags:
- level2-server
- level2-workstation
- patch
- rule_3.2.1
- dccp
- NIST800-53R5_CM-7
- NIST800-53R5_SI-4
block: block:
- name: "3.2.1 | PATCH | Ensure dccp kernel module is not available | modprobe" - name: "3.2.1 | PATCH | Ensure IP forwarding is disabled | Disable IPv4 forwarding | Set Fact"
ansible.builtin.lineinfile: ansible.builtin.set_fact:
path: /etc/modprobe.d/dccp.conf rhel9cis_sysctl_update: true
regexp: '^(#)?install dccp(\\s|$)' rhel9cis_flush_ipv4_route: true
line: "{{ item }}"
create: true
mode: 'u-x,go-rwx'
loop:
- install dccp /bin/true
- blacklist dccp
- name: "3.2.1 | PATCH | Ensure dccp kernel module is not available | blacklist" - name: "3.2.1 | PATCH | Ensure IP forwarding is disabled | Disable IPv4 forwarding"
ansible.builtin.lineinfile: ansible.builtin.debug:
path: /etc/modprobe.d/blacklist.conf msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
regexp: "^(#)?blacklist dccp(\\s|$)"
line: "blacklist dccp"
create: true
mode: 'u-x,go-rwx'
- name: "3.2.2 | PATCH | Ensure tipc kernel module is not available" - name: "3.2.1 | PATCH | Ensure IP forwarding is disabled | IPv6"
when: rhel9cis_rule_3_2_2 block:
- name: "3.2.1 | PATCH | Ensure IP forwarding is disabled | Disable IPv6 forwarding | Set Fact"
ansible.builtin.set_fact:
rhel9cis_flush_ipv6_route: true
- name: "3.2.1 | PATCH | Ensure IP forwarding is disabled | Disable IPv6 forwarding"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl.conf"
when: rhel9cis_ipv6_required
when:
- not rhel9cis_is_router
- rhel9cis_rule_3_2_1
tags: tags:
- level2-server - level1-server
- level2-workstation - level1-workstation
- patch - sysctl
- rule_3.2.2 - patch
- tipc - rule_3.2.1
- NIST800-53R5_CM-7
- NIST800-53R5_SI-4 - name: "3.2.2 | PATCH | Ensure packet redirect sending is disabled"
block: block:
- name: "3.2.2 | PATCH | Ensure tipc kernel module is not available | modprobe" - name: "3.2.2 | PATCH | Ensure packet redirect sending is disabled | Set Fact"
ansible.builtin.lineinfile: ansible.builtin.set_fact:
path: /etc/modprobe.d/tipc.conf rhel9cis_sysctl_update: true
regexp: '^(#)?install tipc(\\s|$)' rhel9cis_flush_ipv4_route: true
line: "{{ item }}" - name: "3.2.2 | PATCH | Ensure packet redirect sending is disabled"
create: true ansible.builtin.debug:
mode: 'u-x,go-rwx' msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
loop: when:
- install tipc /bin/true - not rhel9cis_is_router
- blacklist tipc - rhel9cis_rule_3_2_2
- name: "3.2.2 | PATCH | Ensure tipc kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist tipc(\\s|$)"
line: "blacklist tipc"
create: true
mode: 'u-x,go-rwx'
- name: "3.2.3 | PATCH | Ensure rds kernel module is not available"
when: rhel9cis_rule_3_2_3
tags: tags:
- level2-server - level1-server
- level2-workstation - level1-workstation
- patch - patch
- rule_3.2.3 - sysctl
- rds - rule_3.2.2
- NIST800-53R5_CM-7
- NIST800-53R5_SI-4
block:
- name: "3.2.3 | PATCH | Ensure rds kernel module is not available | modprobe"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/rds.conf
regexp: '^(#)?install rds(\\s|$)'
line: "{{ item }}"
create: true
mode: 'u-x,go-rwx'
loop:
- install rds /bin/true
- blacklist rds
- name: "3.2.3 | PATCH | Ensure rds kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist rds(\\s|$)"
line: "blacklist rds"
create: true
mode: 'u-x,go-rwx'
- name: "3.2.4 | PATCH | Ensure sctp kernel module is not available"
when: rhel9cis_rule_3_2_4
tags:
- level2-server
- level2-workstation
- patch
- rule_3.2.4
- sctp
- NIST800-53R5_CM-7
- NIST800-53R5_SI-4
block:
- name: "3.2.4 | PATCH | Ensure sctp kernel module is not available | modprobe"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/sctp.conf
regexp: '^(#)?install sctp(\\s|$)'
line: "{{ item }}"
create: true
mode: 'u-x,go-rwx'
loop:
- install sctp /bin/true
- blacklist sctp
- name: "3.2.4 | PATCH | Ensure sctp kernel module is not available | blacklist"
ansible.builtin.lineinfile:
path: /etc/modprobe.d/blacklist.conf
regexp: "^(#)?blacklist sctp(\\s|$)"
line: "blacklist sctp"
create: true
mode: 'u-x,go-rwx'

View file

@ -1,298 +1,194 @@
--- ---
- name: "3.3.1 | PATCH | Ensure IP forwarding is disabled" - name: "3.3.1 | PATCH | Ensure source routed packets are not accepted"
block:
- name: "3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv4 | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true
- name: "3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv4"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
- name: "3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv6"
block:
- name: "3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv6 | Set Fact"
ansible.builtin.set_fact:
rhel9cis_flush_ipv6_route: true
- name: "3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv6"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl.conf"
when: rhel9cis_ipv6_required
when: when:
- not rhel9cis_is_router - rhel9cis_rule_3_3_1
- rhel9cis_rule_3_3_1
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- sysctl - sysctl
- patch - patch
- rule_3.3.1 - rule_3.3.1
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2 - name: "3.3.2 | PATCH | Ensure ICMP redirects are not accepted"
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block: block:
- name: "3.3.1 | PATCH | Ensure IP forwarding is disabled | Disable IPv4 forwarding | Set Fact" - name: "3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv4 | Set Fact"
ansible.builtin.set_fact: ansible.builtin.set_fact:
rhel9cis_sysctl_update: true rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true rhel9cis_flush_ipv4_route: true
- name: "3.3.1 | PATCH | Ensure IP forwarding is disabled | Disable IPv4 forwarding" - name: "3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv4"
ansible.builtin.debug: ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf" msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
- name: "3.3.1 | PATCH | Ensure IP forwarding is disabled | IPv6" - name: "3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv6"
when: rhel9cis_ipv6_required block:
block: - name: "3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv6 | Set Fact"
- name: "3.3.1 | PATCH | Ensure IP forwarding is disabled | Disable IPv6 forwarding | Set Fact" ansible.builtin.set_fact:
ansible.builtin.set_fact: rhel9cis_flush_ipv6_route: true
rhel9cis_flush_ipv6_route: true
- name: "3.3.1 | PATCH | Ensure IP forwarding is disabled | Disable IPv6 forwarding" - name: "3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv6"
ansible.builtin.debug: ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl.conf" msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl.conf"
when: rhel9cis_ipv6_required
- name: "3.3.2 | PATCH | Ensure packet redirect sending is disabled"
when: when:
- not rhel9cis_is_router - rhel9cis_rule_3_3_2
- rhel9cis_rule_3_3_2
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- patch - sysctl
- sysctl - patch
- rule_3.3.2 - rule_3.3.2
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block:
- name: "3.3.2 | PATCH | Ensure packet redirect sending is disabled | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true
- name: "3.3.2 | PATCH | Ensure packet redirect sending is disabled"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
- name: "3.3.3 | PATCH | Ensure bogus ICMP responses are ignored" - name: "3.3.3 | PATCH | Ensure secure ICMP redirects are not accepted"
when: rhel9cis_rule_3_3_3 block:
- name: "3.3.3 | PATCH | Ensure secure ICMP redirects are not accepted | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true
- name: "3.3.3 | PATCH | Ensure secure ICMP redirects are not accepted"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
when:
- rhel9cis_rule_3_3_3
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- sysctl - sysctl
- patch - patch
- rule_3.3.3 - rule_3.3.3
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2 - name: "3.3.4 | PATCH | Ensure suspicious packets are logged"
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block: block:
- name: "3.3.3 | PATCH | Ensure bogus ICMP responses are ignored | Set Fact" - name: "3.3.4 | PATCH | Ensure suspicious packets are logged | Set Fact"
ansible.builtin.set_fact: ansible.builtin.set_fact:
rhel9cis_sysctl_update: true rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true rhel9cis_flush_ipv4_route: true
- name: "3.3.3 | PATCH | Ensure bogus ICMP responses are ignored" - name: "3.3.4 | PATCH | Ensure suspicious packets are logged"
ansible.builtin.debug: ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf" msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
when:
- name: "3.3.4 | PATCH | Ensure broadcast ICMP requests are ignored" - rhel9cis_rule_3_3_4
when: rhel9cis_rule_3_3_4
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- sysctl - sysctl
- patch - patch
- rule_3.3.4 - rule_3.3.4
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2 - name: "3.3.5 | PATCH | Ensure broadcast ICMP requests are ignored"
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block: block:
- name: "3.3.4 | PATCH | Ensure broadcast ICMP requests are ignored | Set Fact" - name: "3.3.5 | PATCH | Ensure broadcast ICMP requests are ignored | Set Fact"
ansible.builtin.set_fact: ansible.builtin.set_fact:
rhel9cis_sysctl_update: true rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true rhel9cis_flush_ipv4_route: true
- name: 3.3.4 | PATCH | Ensure broadcast ICMP requests are ignored" - name: 3.3.5 | PATCH | Ensure broadcast ICMP requests are ignored"
ansible.builtin.debug: ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf" msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
when:
- name: "3.3.5 | PATCH | Ensure ICMP redirects are not accepted" - rhel9cis_rule_3_3_5
when: rhel9cis_rule_3_3_5
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- sysctl - sysctl
- patch - patch
- rule_3.3.5 - rule_3.3.5
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2 - name: "3.3.6 | PATCH | Ensure bogus ICMP responses are ignored"
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block: block:
- name: "3.3.5 | PATCH | Ensure ICMP redirects are not accepted | Set Fact" - name: "3.3.6 | PATCH | Ensure bogus ICMP responses are ignored | Set Fact"
ansible.builtin.set_fact: ansible.builtin.set_fact:
rhel9cis_sysctl_update: true rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true rhel9cis_flush_ipv4_route: true
- name: "3.3.5 | PATCH | Ensure ICMP redirects are not accepted" - name: "3.3.6 | PATCH | Ensure bogus ICMP responses are ignored"
ansible.builtin.debug: ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf" msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
when:
- name: "3.3.5 | PATCH | Ensure ICMP redirects are not accepted | IPv6" - rhel9cis_rule_3_3_6
when: rhel9cis_ipv6_required
block:
- name: "3.3.5 | PATCH | Ensure ICMP redirects are not accepted | IPv6 | Set Fact"
ansible.builtin.set_fact:
rhel9cis_flush_ipv6_route: true
- name: "3.3.5 | PATCH | Ensure ICMP redirects are not accepted | IPv6"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl.conf"
- name: "3.3.6 | PATCH | Ensure secure ICMP redirects are not accepted"
when: rhel9cis_rule_3_3_6
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- sysctl - sysctl
- patch - patch
- rule_3.3.6 - rule_3.3.6
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block:
- name: "3.3.6 | PATCH | Ensure secure ICMP redirects are not accepted | IPv4 | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true
- name: "3.3.6 | PATCH | Ensure secure ICMP redirects are not accepted | IPv4"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
- name: "3.3.6 | PATCH | Ensure secure ICMP redirects are not accepted | IPv6"
when: rhel9cis_ipv6_required
block:
- name: "3.3.6 | PATCH | Ensure secure ICMP redirects are not accepted | IPv6 | Set Fact"
ansible.builtin.set_fact:
rhel9cis_flush_ipv6_route: true
- name: "3.3.6 | PATCH | Ensure secure ICMP redirects are not accepted | IPv6"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl.conf"
- name: "3.3.7 | PATCH | Ensure Reverse Path Filtering is enabled" - name: "3.3.7 | PATCH | Ensure Reverse Path Filtering is enabled"
when: rhel9cis_rule_3_3_7
tags:
- level1-server
- level1-workstation
- sysctl
- patch
- rule_3.3.7
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block: block:
- name: "3.3.7 | PATCH | Ensure Reverse Path Filtering is enabled | Set Fact" - name: "3.3.7 | PATCH | Ensure Reverse Path Filtering is enabled | Set Fact"
ansible.builtin.set_fact: ansible.builtin.set_fact:
rhel9cis_sysctl_update: true rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true rhel9cis_flush_ipv4_route: true
- name: "3.3.7 | PATCH | Ensure Reverse Path Filtering is enabled" - name: "3.3.7 | PATCH | Ensure Reverse Path Filtering is enabled"
ansible.builtin.debug: ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf" msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
when:
- name: "3.3.8 | PATCH | Ensure source routed packets are not accepted" - rhel9cis_rule_3_3_7
when: rhel9cis_rule_3_3_8
tags: tags:
- level1-server - level1-server
- level1-workstation - level1-workstation
- sysctl - sysctl
- patch - patch
- rule_3.3.8 - rule_3.3.7
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block:
- name: "3.3.8 | PATCH | Ensure source routed packets are not accepted | IPv4 | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true
- name: "3.3.8 | PATCH | Ensure source routed packets are not accepted | IPv4"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
- name: "3.3.8 | PATCH | Ensure source routed packets are not accepted | IPv6" - name: "3.3.8 | PATCH | Ensure TCP SYN Cookies is enabled"
when: rhel9cis_ipv6_required block:
block: - name: "3.3.8 | PATCH | Ensure TCP SYN Cookies is enabled | Set Fact"
- name: "3.3.8 | PATCH | Ensure source routed packets are not accepted | IPv6 | Set Fact" ansible.builtin.set_fact:
ansible.builtin.set_fact: rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true
- name: "3.3.8 | PATCH | Ensure TCP SYN Cookies is enabled"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
when:
- rhel9cis_rule_3_3_8
tags:
- level1-server
- level1-workstation
- sysctl
- patch
- rule_3.3.8
- name: "3.3.9 | PATCH | Ensure IPv6 router advertisements are not accepted"
block:
- name: "3.3.9 | PATCH | Ensure IPv6 router advertisements are not accepted | IPv6 | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv6_route: true rhel9cis_flush_ipv6_route: true
- name: "3.3.8 | PATCH | Ensure source routed packets are not accepted | IPv6" - name: "3.3.9 | PATCH | Ensure IPv6 router advertisements are not accepted | IPv6"
ansible.builtin.debug: ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl.conf" msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl"
- name: "3.3.9 | PATCH | Ensure suspicious packets are logged"
when: rhel9cis_rule_3_3_9
tags:
- level1-server
- level1-workstation
- sysctl
- patch
- rule_3.3.9
- NIST800-53R5_AU-3
block:
- name: "3.3.9 | PATCH | Ensure suspicious packets are logged | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true
- name: "3.3.9 | PATCH | Ensure suspicious packets are logged"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
- name: "3.3.10 | PATCH | Ensure TCP SYN Cookies is enabled"
when: rhel9cis_rule_3_3_10
tags:
- level1-server
- level1-workstation
- sysctl
- patch
- rule_3.3.10
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block:
- name: "3.3.10 | PATCH | Ensure TCP SYN Cookies is enabled | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv4_route: true
- name: "3.3.10 | PATCH | Ensure TCP SYN Cookies is enabled"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv4_sysctl.conf"
- name: "3.3.11 | PATCH | Ensure IPv6 router advertisements are not accepted"
when: when:
- rhel9cis_ipv6_required - rhel9cis_ipv6_required
- rhel9cis_rule_3_3_11 - rhel9cis_rule_3_3_9
tags: tags:
- level2-server - level2-server
- level2-workstation - level2-workstation
- sysctl - sysctl
- patch - patch
- rule_3.3.11 - rule_3.3.9
- NIST800-53R5_CM-1
- NIST800-53R5_CM-2
- NIST800-53R5_CM-6
- NIST800-53R5_CM-7
- NIST800-53R5_IA-5
block:
- name: "3.3.11 | PATCH | Ensure IPv6 router advertisements are not accepted | IPv6 | Set Fact"
ansible.builtin.set_fact:
rhel9cis_sysctl_update: true
rhel9cis_flush_ipv6_route: true
- name: "3.3.11 | PATCH | Ensure IPv6 router advertisements are not accepted | IPv6"
ansible.builtin.debug:
msg: "Control being set via Handler 'update sysctl' which writes to /etc/sysctl.d/60-netipv6_sysctl"

View file

@ -0,0 +1,59 @@
---
- name: "3.4.1.1 | PATCH | Ensure nftables is installed"
ansible.builtin.package:
name:
- nftables
state: present
when:
- rhel9cis_rule_3_4_1_1
- rhel9cis_firewall == 'nftables'
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_3.4.1.1
- name: "3.4.1.2 | PATCH | Ensure a single firewall configuration utility is in use"
block:
- name: "3.4.1.2 | PATCH | Ensure a single firewall configuration utility is in use | nftables"
ansible.builtin.systemd:
name: "{{ item }}"
masked: true
loop:
- firewalld
when:
- item in ansible_facts.packages
- rhel9cis_firewall == 'nftables'
- name: "3.4.1.2 | PATCH | Ensure a single firewall configuration utility is in use | firewalld"
ansible.builtin.systemd:
name: "{{ item }}"
masked: true
loop:
- nftables
when:
- item in ansible_facts.packages
- rhel9cis_firewall == 'firewalld'
- name: "3.4.1.2 | PATCH | Ensure a single firewall configuration utility is in use | package installed"
ansible.builtin.package:
name: "{{ rhel9cis_firewall }}"
state: installed
- name: "3.4.1.2 | PATCH | Ensure a single firewall configuration utility is in use | {{ rhel9cis_firewall }} started and enabled"
ansible.builtin.systemd:
name: "{{ rhel9cis_firewall }}"
enabled: true
state: started
when:
- rhel9cis_rule_3_4_1_2
tags:
- level1-server
- level1-workstation
- patch
- firewalld
- nftables
- rule_3.4.1.2

View file

@ -0,0 +1,299 @@
---
- name: "3.4.2.1 | PATCH | Ensure firewalld default zone is set"
block:
- name: "3.4.2.1 | AUDIT | Ensure firewalld default zone is set"
ansible.builtin.shell: "firewall-cmd --get-default-zone | grep {{ rhel9cis_default_zone }}"
changed_when: false
failed_when: ( firewalld_zone_set.rc not in [ 0, 1 ] )
register: firewalld_zone_set
- name: "3.4.2.1 | AUDIT | Ensure firewalld default zone is set"
ansible.builtin.command: firewall-cmd --set-default-zone="{{ rhel9cis_default_zone }}"
when: firewalld_zone_set.rc != 0
when:
- rhel9cis_firewall == "firewalld"
- rhel9cis_rule_3_4_2_1
tags:
- level1-server
- level1-workstation
- patch
- firewalld
- rule_3.4.2.1
- name: "3.4.2.2 | AUDIT | Ensure at least one nftables table exists"
block:
- name: "3.4.2.2 | AUDIT | Ensure a table exists | Check for tables"
ansible.builtin.command: nft list tables
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_2_nft_tables
- name: "3.4.2.2 | AUDIT | Ensure an nftables table exists | Show existing tables"
ansible.builtin.debug:
msg:
- "Below are the current nft tables, please review"
- "{{ rhel9cis_3_4_2_2_nft_tables.stdout_lines }}"
when: rhel9cis_3_4_2_2_nft_tables.stdout | length > 0
- name: "3.4.2.2 | AUDIT | Ensure an nftables table exists | Alert on no tables"
ansible.builtin.debug:
msg:
- "Warning!! You currently have no nft tables, please review your setup"
- 'Use the command "nft create table inet <table name>" to create a new table'
when:
- rhel9cis_3_4_2_2_nft_tables.stdout | length == 0
- not rhel9cis_nft_tables_autonewtable
- name: "3.4.2.2 | AUDIT | Ensure an nftables table exists | Alert on no tables | warning count"
ansible.builtin.import_tasks: warning_facts.yml
when:
- rhel9cis_3_4_2_2_nft_tables.stdout | length == 0
- not rhel9cis_nft_tables_autonewtable
- name: "3.4.2.2 | PATCH | Ensure a table exists | Create table if needed"
ansible.builtin.command: nft create table inet "{{ rhel9cis_nft_tables_tablename }}"
failed_when: false
when: rhel9cis_nft_tables_autonewtable
vars:
warn_control_id: '3.4.2.2'
when:
- rhel9cis_firewall == "nftables"
- rhel9cis_rule_3_4_2_2
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_3.4.2.2
- name: "3.4.2.3 | PATCH | Ensure nftables base chains exist"
block:
- name: "3.4.2.3 | AUDIT | Ensure nftables base chains exist | Get current chains for INPUT"
ansible.builtin.shell: nft list ruleset | grep 'hook input'
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_3_input_chains
- name: "3.4.2.3 | AUDIT | Ensure nftables base chains exist | Get current chains for FORWARD"
ansible.builtin.shell: nft list ruleset | grep 'hook forward'
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_3_forward_chains
- name: "3.4.2.3 | AUDIT | Ensure nftables base chains exist | Get current chains for OUTPUT"
ansible.builtin.shell: nft list ruleset | grep 'hook output'
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_3_output_chains
- name: "3.4.2.3 | AUDIT | Ensure nftables base chains exist | Display chains for review"
ansible.builtin.debug:
msg:
- "Below are the current INPUT chains"
- "{{ rhel9cis_3_4_2_3_input_chains.stdout_lines }}"
- "Below are the current FORWARD chains"
- "{{ rhel9cis_3_4_2_3_forward_chains.stdout_lines }}"
- "Below are teh current OUTPUT chains"
- "{{ rhel9cis_3_4_2_3_output_chains.stdout_lines }}"
when: not rhel9cis_nft_tables_autochaincreate
- name: "3.4.2.3 | PATCH | Ensure nftables base chains exist | Create chains if needed"
ansible.builtin.shell: "{{ item }}"
failed_when: false
loop:
- nft create chain inet "{{ rhel9cis_nft_tables_tablename }}" input { type filter hook input priority 0 \; }
- nft create chain inet "{{ rhel9cis_nft_tables_tablename }}" forward { type filter hook forward priority 0 \; }
- nft create chain inet "{{ rhel9cis_nft_tables_tablename }}" output { type filter hook output priority 0 \; }
when: rhel9cis_nft_tables_autochaincreate
when:
- rhel9cis_firewall == "nftables"
- rhel9cis_rule_3_4_2_3
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_3.4.2.3
- name: "3.4.2.4 | PATCH | Ensure host based firewall loopback traffic is configured"
block:
- name: "3.4.2.4 | AUDIT | Ensure host based firewall loopback traffic is configured | Gather iif lo accept existence | nftables"
ansible.builtin.shell: nft list ruleset | awk '/hook input/,/}/' | grep 'iif "lo" accept'
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_4_iiflo
- name: "3.4.2.4 | AUDIT | Ensure host based firewall loopback traffic is configured | Gather ip saddr existence | nftables"
ansible.builtin.shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip saddr'
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_4_ipsaddr
- name: "3.4.2.4 | AUDIT | Ensure host based firewall loopback traffic is configured | Gather ip6 saddr existence | nftables"
ansible.builtin.shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip6 saddr'
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_4_ip6saddr
- name: "3.4.2.4 | PATCH | Ensure host based firewall loopback traffic is configured | Set iif lo accept rule | nftables"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input iif lo accept
when: '"iif \"lo\" accept" not in rhel9cis_3_4_2_4_iiflo.stdout'
- name: "3.4.2.4 | PATCH | Ensure host based firewall loopback traffic is configured | Set ip sddr rule | nftables"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip saddr 127.0.0.0/8 counter drop
when: '"ip saddr 127.0.0.0/8 counter packets 0 bytes 0 drop" not in rhel9cis_3_4_2_4_ipsaddr.stdout'
- name: "3.4.2.4 | PATCH | Ensure host based firewall loopback traffic is configured | Set ip6 saddr rule | nftables"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip6 saddr ::1 counter drop
when: '"ip6 saddr ::1 counter packets 0 bytes 0 drop" not in rhel9cis_3_4_2_4_ip6saddr.stdout'
when:
- rhel9cis_firewall == "nftables"
- rhel9cis_rule_3_4_2_4
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_3.4.2.4
- name: "3.4.2.4 | PATCH | Ensure host based firewall loopback traffic is configured | firewalld"
ansible.posix.firewalld:
rich_rule: "{{ item }}"
zone: "{{ rhel9cis_default_zone }}"
permanent: true
immediate: true
state: enabled
loop:
- rule family="ipv4" source address="127.0.0.1" destination not address="127.0.0.1" drop
- rule family="ipv6" source address="::1" destination not address="::1" drop
when:
- rhel9cis_firewall == "firewalld"
- rhel9cis_rule_3_4_2_4
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_3.4.2.4
- name: "3.4.2.5 | AUDIT | Ensure firewalld drops unnecessary services and ports"
block:
- name: "3.4.2.5 | AUDIT | Ensure firewalld drops unnecessary services and ports | Get list of services and ports"
ansible.builtin.shell: "firewall-cmd --get-active-zones | awk '!/:/ {print $1}' | while read ZN; do firewall-cmd --list-all --zone=$ZN; done"
changed_when: false
failed_when: false
check_mode: false
register: rhel9cis_3_4_2_5_servicesport
- name: "3.4.2.5 | AUDIT | Ensure firewalld drops unnecessary services and ports | Show services and ports"
ansible.builtin.debug:
msg:
- "The items below are the services and ports that are accepted, please correct as needed"
- "{{ rhel9cis_3_4_2_5_servicesport.stdout_lines }}"
when:
- rhel9cis_rule_3_4_2_5
tags:
- level1-server
- level1-workstation
- manual
- audit
- rule_3.4.2.5
- name: "3.4.2.6 | PATCH | Ensure nftables established connections are configured"
block:
- name: "3.4.2.6 | AUDIT | EEnsure nftables established connections are configured | Gather incoming connection rules"
ansible.builtin.shell: nft list ruleset | awk '/hook input/,/}/' | grep -E 'ip protocol (tcp|udp|icmp) ct state'
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_6_inconnectionrule
- name: "3.4.2.6| AUDIT | Ensure nftables established connections are configured | Gather outbound connection rules"
ansible.builtin.shell: nft list ruleset | awk '/hook output/,/}/' | grep -E 'ip protocol (tcp|udp|icmp) ct state'
changed_when: false
failed_when: false
register: rhel9cis_3_4_2_6_outconnectionrule
- name: "3.4.2.6| PATCH | Ensure nftables established connections are configured | Add input tcp established accept policy"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip protocol tcp ct state established accept
when: '"ip protocol tcp ct state established accept" not in rhel9cis_3_4_2_6_inconnectionrule.stdout'
- name: "3.4.2.6 | PATCH | Ensure nftables established connections are configured | Add input udp established accept policy"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip protocol udp ct state established accept
when: '"ip protocol udp ct state established accept" not in rhel9cis_3_4_2_6_inconnectionrule.stdout'
- name: "3.4.2.6 | PATCH | Ensure nftables established connections are configured | Add input icmp established accept policy"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip protocol icmp ct state established accept
when: '"ip protocol icmp ct state established accept" not in rhel9cis_3_4_2_6_inconnectionrule.stdout'
- name: "3.4.2.6 | PATCH | Ensure nftables established connections are configured | Add output tcp new, related, established accept policy"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" output ip protocol tcp ct state new,related,established accept
when: '"ip protocol tcp ct state established,related,new accept" not in rhel9cis_3_4_2_6_outconnectionrule.stdout'
- name: "3.4.2.6 | PATCH | Ensure nftables established connections are configured | Add output udp new, related, established accept policy"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" output ip protocol udp ct state new,related,established accept
when: '"ip protocol udp ct state established,related,new accept" not in rhel9cis_3_4_2_6_outconnectionrule.stdout'
- name: "3.4.2.6 | PATCH | Ensure nftables established connections are configured | Add output icmp new, related, established accept policy"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" output ip protocol icmp ct state new,related,established accept
when: '"ip protocol icmp ct state established,related,new accept" not in rhel9cis_3_4_2_6_outconnectionrule.stdout'
when:
- rhel9cis_firewall == "nftables"
- rhel9cis_rule_3_4_2_6
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_3.4.2.6
- name: "3.4.2.7 | PATCH | Ensure nftables default deny firewall policy"
block:
- name: "3.4.2.7 | AUDIT | Ensure nftables default deny firewall policy | Check for hook input deny policy"
ansible.builtin.shell: nft list table inet "{{ rhel9cis_nft_tables_tablename }}" | grep 'hook input'
failed_when: false
changed_when: false
register: rhel9cis_3_4_2_7_inputpolicy
- name: "3.4.2.7 | AUDIT | Ensure nftables default deny firewall policy | Check for hook forward deny policy"
ansible.builtin.shell: nft list table inet "{{ rhel9cis_nft_tables_tablename }}" | grep 'hook forward'
failed_when: false
changed_when: false
register: rhel9cis_3_4_2_7_forwardpolicy
- name: "3.4.2.7 | AUDIT | Ensure nftables default deny firewall policy | Check for hook output deny policy"
ansible.builtin.shell: nft list table inet "{{ rhel9cis_nft_tables_tablename }}" | grep 'hook output'
failed_when: false
changed_when: false
register: rhel9cis_3_4_2_7_outputpolicy
- name: "3.4.2.7 | AUDIT | Ensure nftables default deny firewall policy | Check for SSH allow"
ansible.builtin.shell: nft list table inet "{{ rhel9cis_nft_tables_tablename }}" | grep 'ssh'
failed_when: false
changed_when: false
register: rhel9cis_3_4_2_7_sshallowcheck
- name: "3.4.2.7 | PATCH | Ensure nftables default deny firewall policy | Enable SSH traffic"
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input tcp dport ssh accept
when: '"tcp dport ssh accept" not in rhel9cis_3_4_2_7_sshallowcheck.stdout'
- name: "3.4.2.7 | PATCH | Ensure nftables default deny firewall policy | Set hook input deny policy"
ansible.builtin.command: nft chain inet "{{ rhel9cis_nft_tables_tablename }}" input { policy drop \; }
when: '"type filter hook input priority 0; policy drop;" not in rhel9cis_3_4_2_7_inputpolicy.stdout'
- name: "3.4.2.7 | PATCH | Ensure nftables default deny firewall policy | Create hook forward deny policy"
ansible.builtin.command: nft chain inet "{{ rhel9cis_nft_tables_tablename }}" forward { policy drop \; }
when: '"type filter hook forward priority 0; policy drop;" not in rhel9cis_3_4_2_7_forwardpolicy.stdout'
- name: "3.4.2.7 | PATCH | Ensure nftables default deny firewall policy | Create hook output deny policy"
ansible.builtin.command: nft chain inet "{{ rhel9cis_nft_tables_tablename }}" output { policy drop \; }
when: '"type filter hook output priority 0; policy drop;" not in rhel9cis_3_4_2_7_outputpolicy.stdout'
when:
- rhel9cis_firewall == "nftables"
- rhel9cis_rule_3_4_2_7
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_3.4.2.7

View file

@ -1,13 +1,16 @@
--- ---
- name: "SECTION | 3.1.x | Configure Network Devices" - name: "SECTION | 3.1.x | Disable unused network protocols and devices"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_3.1.x.yml
file: cis_3.1.x.yml
- name: "SECTION | 3.2.x | Configure Network Kernel Modules" - name: "SECTION | 3.2.x | Network Parameters (Host Only)"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_3.2.x.yml
file: cis_3.2.x.yml
- name: "SECTION | 3.3.x | Configure Network Kernel Parameters" - name: "SECTION | 3.3.x | Network Parameters (host and Router)"
ansible.builtin.import_tasks: ansible.builtin.import_tasks: cis_3.3.x.yml
file: cis_3.3.x.yml
- name: "SECTION | 3.4.1.x | Firewall configuration"
ansible.builtin.import_tasks: cis_3.4.1.x.yml
- name: "SECTION | 3.4.2.x | Configure firewall"
ansible.builtin.import_tasks: cis_3.4.2.x.yml

View file

@ -0,0 +1,105 @@
---
- name: "4.1.1.1 | PATCH | Ensure auditd is installed"
block:
- name: "4.1.1.1 | PATCH | Ensure auditd is installed | Install auditd packages"
ansible.builtin.package:
name: audit
state: present
when: '"auditd" not in ansible_facts.packages'
- name: "4.1.1.1 | PATCH | Ensure auditd is installed | Install auditd-lib packages"
ansible.builtin.package:
name: audit-libs
state: present
when: '"auditd-lib" not in ansible_facts.packages'
when:
- rhel9cis_rule_4_1_1_1
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.1.1
- name: "4.1.1.2 | PATCH | Ensure auditing for processes that start prior to auditd is enabled"
block:
- name: "4.1.1.2 | AUDIT | Ensure auditing for processes that start prior to auditd is enabled | Get GRUB_CMDLINE_LINUX"
ansible.builtin.shell: grep 'GRUB_CMDLINE_LINUX=' /etc/default/grub | sed 's/.$//'
changed_when: false
failed_when: false
check_mode: false
register: rhel9cis_4_1_1_2_grub_cmdline_linux
- name: "4.1.1.2 | PATCH | Ensure auditing for processes that start prior to auditd is enabled | Replace existing setting"
ansible.builtin.replace:
path: /etc/default/grub
regexp: 'audit=.'
replace: 'audit=1'
notify: Grub2cfg
when: "'audit=' in rhel9cis_4_1_1_2_grub_cmdline_linux.stdout"
- name: "4.1.1.2 | PATCH | Ensure auditing for processes that start prior to auditd is enabled | Add audit setting if missing"
ansible.builtin.lineinfile:
path: /etc/default/grub
regexp: '^GRUB_CMDLINE_LINUX='
line: '{{ rhel9cis_4_1_1_2_grub_cmdline_linux.stdout }} audit=1"'
notify: Grub2cfg
when: "'audit=' not in rhel9cis_4_1_1_2_grub_cmdline_linux.stdout"
when:
- rhel9cis_rule_4_1_1_2
tags:
- level2-server
- level2-workstation
- patch
- auditd
- grub
- rule_4.1.1.2
- name: "4.1.1.3 | PATCH | Ensure audit_backlog_limit is sufficient"
block:
- name: "4.1.1.3 | AUDIT | Ensure audit_backlog_limit is sufficient | Get GRUB_CMDLINE_LINUX"
ansible.builtin.shell: grep 'GRUB_CMDLINE_LINUX=' /etc/default/grub | sed 's/.$//'
changed_when: false
failed_when: false
check_mode: false
register: rhel9cis_4_1_1_3_grub_cmdline_linux
- name: "4.1.1.3 | PATCH | Ensure audit_backlog_limit is sufficient | Replace existing setting"
ansible.builtin.replace:
path: /etc/default/grub
regexp: 'audit_backlog_limit=\d+'
replace: 'audit_backlog_limit={{ rhel9cis_audit_back_log_limit }}'
notify: Grub2cfg
when: "'audit_backlog_limit=' in rhel9cis_4_1_1_3_grub_cmdline_linux.stdout"
- name: "4.1.1.3 | PATCH | Ensure audit_backlog_limit is sufficient | Add audit_backlog_limit setting if missing"
ansible.builtin.lineinfile:
path: /etc/default/grub
regexp: '^GRUB_CMDLINE_LINUX='
line: '{{ rhel9cis_4_1_1_3_grub_cmdline_linux.stdout }} audit_backlog_limit={{ rhel9cis_audit_back_log_limit }}"'
notify: Grub2cfg
when: "'audit_backlog_limit=' not in rhel9cis_4_1_1_3_grub_cmdline_linux.stdout"
when:
- rhel9cis_rule_4_1_1_3
tags:
- level2-server
- level2-workstation
- patch
- auditd
- grub
- rule_4.1.1.3
- name: "4.1.1.4 | PATCH | Ensure auditd service is enabled"
ansible.builtin.systemd:
name: auditd
state: started
enabled: true
when:
- rhel9cis_rule_4_1_1_4
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.1.4

View file

@ -0,0 +1,65 @@
---
- name: "4.1.2.1 | PATCH | Ensure audit log storage size is configured"
ansible.builtin.lineinfile:
path: /etc/audit/auditd.conf
regexp: "^max_log_file( |=)"
line: "max_log_file = {{ rhel9cis_max_log_file_size }}"
notify: Restart auditd
when:
- rhel9cis_rule_4_1_2_1
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.2.1
- name: "4.1.2.2 | PATCH | Ensure audit logs are not automatically deleted"
ansible.builtin.lineinfile:
path: /etc/audit/auditd.conf
regexp: "^max_log_file_action"
line: "max_log_file_action = {{ rhel9cis_auditd['max_log_file_action'] }}"
notify: Restart auditd
when:
- rhel9cis_rule_4_1_2_2
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.2.2
- name: "4.1.2.3 | PATCH | Ensure system is disabled when audit logs are full"
ansible.builtin.lineinfile:
path: /etc/audit/auditd.conf
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
notify: Restart auditd
loop:
- { regexp: '^admin_space_left_action', line: 'admin_space_left_action = {{ rhel9cis_auditd.admin_space_left_action }}' }
- { regexp: '^action_mail_acct', line: 'action_mail_acct = {{ rhel9cis_auditd.action_mail_acct }}' }
- { regexp: '^space_left_action', line: 'space_left_action = {{ rhel9cis_auditd.space_left_action }}' }
when:
- rhel9cis_rule_4_1_2_3
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.2.3
- name: PATCH | Configure other keys for auditd.conf
ansible.builtin.lineinfile:
path: /etc/audit/auditd.conf
regexp: "^{{ item }}( |=)"
line: "{{ item }} = {{ rhel9cis_auditd_extra_conf[item] }}"
loop: "{{ rhel9cis_auditd_extra_conf.keys() }}"
notify: Restart auditd
when:
- rhel9cis_auditd_extra_conf.keys() | length > 0
tags:
- level2-server
- level2-workstation
- patch
- auditd

View file

@ -0,0 +1,292 @@
---
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.1 | PATCH | Ensure changes to system administration scope (sudoers) is collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_1
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.1
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.2 | PATCH | Ensure actions as another user are always logged"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_2
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.2
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.3 | PATCH | Ensure events that modify the sudo log file are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_3
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.3
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.4 | PATCH | Ensure events that modify date and time information are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_4
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.4
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.5 | PATCH | Ensure events that modify the system's network environment are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_5
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.5
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.6 | PATCH | Ensure use of privileged commands is collected"
block:
- name: "4.1.3.6 | PATCH | Ensure use of privileged commands is collected"
ansible.builtin.shell: for i in $(df | grep '^/dev' | awk '{ print $NF }'); do find $i -xdev -type f -perm /6000 2>/dev/null; done
changed_when: false
failed_when: false
check_mode: false
register: priv_procs
- name: "4.1.3.6 | PATCH | Ensure use of privileged commands is collected"
ansible.builtin.set_fact:
update_audit_template: true
notify: update auditd
when:
- rhel9cis_rule_4_1_3_6
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.6
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.7 | PATCH | Ensure unsuccessful unauthorized file access attempts are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_7
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3_7
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.8 | PATCH | Ensure events that modify user/group information are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_8
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.8
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.9 | PATCH | Ensure discretionary access control permission modification events are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_9
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.9
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.10 | PATCH | Ensure successful file system mounts are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_10
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.10
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.11 | PATCH | Ensure session initiation information is collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_11
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.11
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.12 | PATCH | Ensure login and logout events are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_12
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.12
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.13 | PATCH | Ensure file deletion events by users are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_13
tags:
- level2-server
- level2-workstation
- auditd
- patch
- rule_4.1.3.13
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.14 | PATCH | Ensure events that modify the system's Mandatory Access Controls are collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_14
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.14
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.15 | PATCH | Ensure successful and unsuccessful attempts to use the chcon command are recorded"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_15
tags:
- level2-server
- level2- workstation
- patch
- auditd
- rule_4.1.3.15
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.16 | PATCH | Ensure successful and unsuccessful attempts to use the setfacl command are recorded"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_16
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.16
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.17 | PATCH | Ensure successful and unsuccessful attempts to use the chacl command are recorded"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_17
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.17
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.18 | PATCH | Ensure successful and unsuccessful attempts to use the usermod command are recorded"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_18
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.18
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.19 | PATCH | Ensure kernel module loading and unloading is collected"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_19
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.3.19
# All changes selected are managed by the POST audit and handlers to update
- name: "4.1.3.20 | PATCH | Ensure the audit configuration is immutable"
ansible.builtin.set_fact:
update_audit_template: true
when:
- rhel9cis_rule_4_1_3_20
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.20
- name: "4.1.3.21 | AUDIT | Ensure the running and on disk configuration is the same"
ansible.builtin.debug:
msg:
- "Please run augenrules --load if you suspect there is a configuration that is not active"
when:
- rhel9cis_rule_4_1_3_21
tags:
- level2-server
- level2-workstation
- manual
- patch
- auditd
- rule_4.1.3.21
- name: Auditd | 4.1.3 | Auditd controls updated
ansible.builtin.debug:
msg: "Auditd Controls handled in POST using template - updating /etc/auditd/rules.d/99_auditd.rules"
changed_when: false
when:
- update_audit_template

View file

@ -0,0 +1,184 @@
---
- name: |
"4.1.4.1 | PATCH | Ensure audit log files are mode 0640 or less permissive"
"4.1.4.2 | PATCH | Ensure only authorized users own audit log files"
"4.1.4.3 | PATCH | Ensure only authorized groups are assigned ownership of audit log files"
block:
- name: "4.1.4.1 | AUDIT | Ensure audit log files are mode 0640 or less permissive | discover file"
ansible.builtin.shell: grep ^log_file /etc/audit/auditd.conf | awk '{ print $NF }'
changed_when: false
register: audit_discovered_logfile
- name: "4.1.4.1 | AUDIT | Ensure audit log files are mode 0640 or less permissive | stat file"
ansible.builtin.stat:
path: "{{ audit_discovered_logfile.stdout }}"
changed_when: false
register: auditd_logfile
- name: |
"4.1.4.1 | PATCH | Ensure audit log files are mode 0640 or less permissive"
"4.1.4.2 | PATCH | Ensure only authorized users own audit log files"
"4.1.4.3 | PATCH | Ensure only authorized groups are assigned ownership of audit log files"
ansible.builtin.file:
path: "{{ audit_discovered_logfile.stdout }}"
mode: "{% if auditd_logfile.stat.mode != '0600' %}0640{% endif %}"
owner: root
group: root
when:
- rhel9cis_rule_4_1_4_1 or
rhel9cis_rule_4_1_4_2 or
rhel9cis_rule_4_1_4_3
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.1
- rule_4.1.4.2
- rule_4.1.4.3
- name: "4.1.4.4 | PATCH | Ensure the audit log directory is 0750 or more restrictive"
block:
- name: "4.1.4.4 | AUDIT | Ensure the audit log directory is 0750 or more restrictive | get current permissions"
ansible.builtin.stat:
path: "{{ audit_discovered_logfile.stdout | dirname }}"
register: auditlog_dir
- name: "4.1.4.4 | PATCH | Ensure the audit log directory is 0750 or more restrictive | set"
ansible.builtin.file:
path: "{{ audit_discovered_logfile.stdout | dirname }}"
state: directory
mode: 0750
when: not auditlog_dir.stat.mode is match('07(0|5)0')
when:
- rhel9cis_rule_4_1_4_4
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.4
- name: "4.1.4.5 | PATCH | Ensure audit configuration files are 640 or more restrictive"
ansible.builtin.file:
path: "{{ item.path }}"
mode: 0640
loop: "{{ auditd_conf_files.files }}"
loop_control:
label: "{{ item.path }}"
when:
- item.mode != '06(0|4)0'
- rhel9cis_rule_4_1_4_5
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.5
- name: "4.1.4.6 | PATCH | Ensure audit configuration files are owned by root"
ansible.builtin.file:
path: "{{ item.path }}"
owner: root
loop: "{{ auditd_conf_files.files }}"
loop_control:
label: "{{ item.path }}"
when:
- rhel9cis_rule_4_1_4_6
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.6
- name: "4.1.4.7 | PATCH | Ensure audit configuration files belong to group root"
ansible.builtin.file:
path: "{{ item.path }}"
group: root
loop: "{{ auditd_conf_files.files }}"
loop_control:
label: "{{ item.path }}"
when:
- rhel9cis_rule_4_1_4_7
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.7
- name: "4.1.4.8 | PATCH | Ensure audit tools are 755 or more restrictive"
block:
- name: "4.1.4.8 | AUDIT | Get audit binary file stat | get current mode"
ansible.builtin.stat:
path: "{{ item }}"
register: "audit_bins"
loop:
- /sbin/auditctl
- /sbin/aureport
- /sbin/ausearch
- /sbin/autrace
- /sbin/auditd
- /sbin/augenrules
- name: "4.1.4.8 | PATCH | Ensure audit tools are 755 or more restrictive | set if required"
ansible.builtin.file:
path: "{{ item.item }}"
mode: 0750
loop: "{{ audit_bins.results }}"
loop_control:
label: "{{ item.item }}"
when: not item.stat.mode is match('07(0|5)0')
when:
- rhel9cis_rule_4_1_4_8
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.8
- name: "4.1.4.9 | PATCH | Ensure audit tools are owned by root"
ansible.builtin.file:
path: "{{ item }}"
owner: root
group: root
loop:
- /sbin/auditctl
- /sbin/aureport
- /sbin/ausearch
- /sbin/autrace
- /sbin/auditd
- /sbin/augenrules
when:
- rhel9cis_rule_4_1_4_9
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.9
- name: "4.1.4.10 | PATCH | Ensure audit tools belong to group root"
ansible.builtin.file:
path: "{{ item }}"
group: root
loop:
- /sbin/auditctl
- /sbin/aureport
- /sbin/ausearch
- /sbin/autrace
- /sbin/auditd
- /sbin/augenrules
when:
- rhel9cis_rule_4_1_4_10
tags:
- level2-server
- level2-workstation
- patch
- auditd
- rule_4.1.4.10

View file

@ -1,58 +0,0 @@
---
- name: "4.1.1 | PATCH | Ensure nftables is installed"
when:
- rhel9cis_rule_4_1_1
- rhel9cis_firewall == 'nftables'
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_4.1.1
- NIST800-53R5_CA-9
ansible.builtin.package:
name:
- nftables
state: present
- name: "4.1.2 | PATCH | Ensure a single firewall configuration utility is in use"
when: rhel9cis_rule_4_1_2
tags:
- level1-server
- level1-workstation
- patch
- firewalld
- nftables
- rule_4.1.2
block:
- name: "4.1.2 | PATCH | Ensure a single firewall configuration utility is in use | nftables"
when:
- item in ansible_facts.packages
- rhel9cis_firewall == 'nftables'
ansible.builtin.systemd:
name: "{{ item }}"
masked: true
loop:
- firewalld
- name: "4.1.2 | PATCH | Ensure a single firewall configuration utility is in use | firewalld"
when:
- item in ansible_facts.packages
- rhel9cis_firewall == 'firewalld'
ansible.builtin.systemd:
name: "{{ item }}"
masked: true
loop:
- nftables
- name: "4.1.2 | PATCH | Ensure a single firewall configuration utility is in use | package installed"
ansible.builtin.package:
name: "{{ rhel9cis_firewall }}"
state: installed
- name: "4.1.2 | PATCH | Ensure a single firewall configuration utility is in use | {{ rhel9cis_firewall }} started and enabled" # noqa name[template]
ansible.builtin.systemd:
name: "{{ rhel9cis_firewall }}"
enabled: true
state: started

View file

@ -0,0 +1,216 @@
---
- name: "4.2.1.1 | PATCH | Ensure rsyslog installed"
ansible.builtin.package:
name: rsyslog
state: present
when:
- "'rsyslog' not in ansible_facts.packages"
- rhel9cis_rule_4_2_1_1
tags:
- level1-server
- level1-workstation
- patch
- rsyslog
- rule_4.2.1.1
- name: "4.2.1.2 | PATCH | Ensure rsyslog Service is enabled"
ansible.builtin.systemd:
name: rsyslog
enabled: true
when:
- rhel9cis_rule_4_2_1_2
tags:
- level1-server
- level1-workstation
- patch
- rsyslog
- rule_4.2.1.2
- name: "4.2.1.3 | PATCH | Ensure journald is configured to send logs to rsyslog"
ansible.builtin.lineinfile:
path: /etc/systemd/journald.conf
regexp: "^#ForwardToSyslog=|^ForwardToSyslog="
line: ForwardToSyslog=yes
notify: Restart rsyslog
when:
- rhel9cis_rule_4_2_1_3
- rhel9cis_syslog == "rsyslog"
tags:
- level1-server
- level1-workstation
- patch
- rule_4.2.1.3
- name: "4.2.1.4 | PATCH | Ensure rsyslog default file permissions configured"
ansible.builtin.lineinfile:
path: /etc/rsyslog.conf
regexp: '^\$FileCreateMode'
line: '$FileCreateMode 0640'
notify: Restart rsyslog
when:
- rhel9cis_rule_4_2_1_4
tags:
- level1-server
- level1-workstation
- patch
- rsyslog
- rule_4.2.1.4
- name: "4.2.1.5 | PATCH | Ensure logging is configured"
block:
- name: "4.2.1.5 | AUDIT | Ensure logging is configured | rsyslog current config message out"
ansible.builtin.shell: cat /etc/rsyslog.conf
changed_when: false
failed_when: false
check_mode: false
register: rhel_09_4_2_1_5_audit
- name: "4.2.1.5 | AUDIT | Ensure logging is configured | rsyslog current config message out"
ansible.builtin.debug:
msg:
- "These are the current logging configurations for rsyslog, please review:"
- "{{ rhel_09_4_2_1_5_audit.stdout_lines }}"
- name: "4.2.1.5 | PATCH | Ensure logging is configured | mail.* log setting"
ansible.builtin.blockinfile:
path: /etc/rsyslog.conf
marker: "# {mark} MAIL LOG SETTINGS - CIS benchmark - Ansible-lockdown"
block: |
# mail logging additions to meet CIS standards
mail.* -/var/log/mail
mail.info -/var/log/mail.info
mail.warning -/var/log/mail.warning
mail.err /var/log/mail.err
insertafter: '# Log all the mail messages in one place.'
notify: Restart rsyslog
when: rhel9cis_rsyslog_ansiblemanaged
- name: "4.2.1.5 | PATCH | Ensure logging is configured | news.crit log setting"
ansible.builtin.blockinfile:
path: /etc/rsyslog.conf
state: present
marker: "# {mark} NEWS LOG SETTINGS - CIS benchmark - Ansible-lockdown"
block: |
# news logging additions to meet CIS standards
news.crit -/var/log/news/news.crit
news.notice -/var/log/news/news.crit
insertafter: '# Save news errors of level crit and higher in a special file.'
notify: Restart rsyslog
when: rhel9cis_rsyslog_ansiblemanaged
- name: "4.2.1.5 | PATCH | Ensure logging is configured | Misc. log setting"
ansible.builtin.blockinfile:
path: /etc/rsyslog.conf
state: present
marker: "# {mark} MISC. LOG SETTINGS - CIS benchmark - Ansible-lockdown"
block: |
# misc. logging additions to meet CIS standards
*.=warning;*.=err -/var/log/warn
*.crit /var/log/warn
*.*;mail.none;news.none /var/log/messages
insertafter: '#### RULES ####'
notify: Restart rsyslog
when: rhel9cis_rsyslog_ansiblemanaged
- name: "4.2.1.5 | PATCH | Ensure logging is configured | Local log settings"
ansible.builtin.blockinfile:
path: /etc/rsyslog.conf
state: present
marker: "#{mark} LOCAL LOG SETTINGS - CIS benchmark - Ansible-lockdown"
block: |
# local log settings to meet CIS standards
local0,local1.* -/var/log/localmessages
local2,local3.* -/var/log/localmessages
local4,local5.* -/var/log/localmessages
local6,local7.* -/var/log/localmessages
*.emrg :omusrmsg:*
insertafter: '#### RULES ####'
notify: Restart rsyslog
- name: "4.2.1.5 | PATCH | Ensure logging is configured | Auth Settings"
ansible.builtin.blockinfile:
path: /etc/rsyslog.conf
state: present
marker: "#{mark} Auth SETTINGS - CIS benchmark - Ansible-lockdown"
block: |
# Private settings to meet CIS standards
auth,authpriv.* /var/log/secure
insertafter: '#### RULES ####'
notify: Restart rsyslog
- name: "4.2.1.5 | PATCH | Ensure logging is configured | Cron Settings"
ansible.builtin.blockinfile:
path: /etc/rsyslog.conf
state: present
marker: "#{mark} Cron SETTINGS - CIS benchmark - Ansible-lockdown"
block: |
# Cron settings to meet CIS standards
cron.* /var/log/cron
insertafter: '#### RULES ####'
notify: Restart rsyslog
when:
- rhel9cis_rule_4_2_1_5
tags:
- level1-server
- level1-workstation
- patch
- rsyslog
- rule_4.2.1.5
- name: "4.2.1.6 | PATCH | Ensure rsyslog is configured to send logs to a remote log host"
ansible.builtin.blockinfile:
path: /etc/rsyslog.conf
state: present
block: |
# target can be IP or FQDN
*.* action(type="omfwd" target="{{ rhel9cis_remote_log_host }}" port="{{ rhel9cis_remote_log_port }}" protocol="{{ rhel9cis_remote_log_protocol }}" action.resumeRetryCount="{{ rhel9cis_remote_log_retrycount }}" queue.type="LinkedList" queue.size="{{ rhel9cis_remote_log_queuesize }}")
insertafter: EOF
register: result
failed_when:
- result is failed
- result.rc != 257
notify: Restart rsyslog
when:
- rhel9cis_rule_4_2_1_6
- rhel9cis_remote_log_server
tags:
- level1-server
- level1-workstation
- patch
- rsyslog
- rule_4.2.1.6
- name: "4.2.1.7 | PATCH | Ensure rsyslog is not configured to recieve logs from a remote client"
block:
- name: "4.2.1.7 | PATCH | Ensure rsyslog is not configured to recieve logs from a remote client. | When not log host"
ansible.builtin.replace:
path: /etc/rsyslog.conf
regexp: '{{ item }}'
replace: '#\1'
notify: Restart rsyslog
loop:
- '^(\$ModLoad imtcp)'
- '^(\$InputTCPServerRun)'
- '^(module\(load="imtcp"\))'
- '^(input\(type="imtcp")'
when: not rhel9cis_system_is_log_server
- name: "4.2.1.7 | PATCH | Ensure rsyslog is not configured to recieve logs from a remote clients. | When log host"
ansible.builtin.replace:
path: /etc/rsyslog.conf
regexp: '^#(.*{{ item }}.*)'
replace: '\1'
notify: Restart rsyslog
loop:
- 'ModLoad imtcp'
- 'InputTCPServerRun'
when: rhel9cis_system_is_log_server
when:
- rhel9cis_rule_4_2_1_7
tags:
- level1-server
- level1-workstation
- patch
- rsyslog
- rule_4.2.1.7

View file

@ -0,0 +1,199 @@
---
- name: "4.2.2.1.1 | PATCH | Ensure systemd-journal-remote is installed"
ansible.builtin.package:
name: systemd-journal-remote
state: present
when:
- rhel9cis_rule_4_2_2_1_1
tags:
- level1-server
- level1-workstation
- manual
- patch
- journald
- rule_4.2.2.1.1
- name: "4.2.2.1.2 | PATCH | Ensure systemd-journal-remote is configured"
ansible.builtin.lineinfile:
path: /etc/systemd/journal-upload.conf
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
notify: Restart journald
loop:
- { regexp: 'URL=', line: 'URL={{ rhel9cis_journal_upload_url }}'}
- { regexp: 'ServerKeyFile=', line: 'ServerKeyFile={{ rhel9cis_journal_upload_serverkeyfile }}'}
- { regexp: 'ServerCertificateFile=', line: 'ServerCertificateFile={{ rhel9cis_journal_servercertificatefile }}'}
- { regexp: 'TrustedCertificateFile=', line: 'TrustedCertificateFile={{ rhel9cis_journal_trustedcertificatefile }}'}
when:
- rhel9cis_rule_4_2_2_1_2
tags:
- level1-server
- level1-workstation
- manual
- patch
- journald
- rule_4.2.2.1.2
- name: "4.2.2.1.3 | PATCH | Ensure systemd-journal-remote is enabled"
ansible.builtin.systemd:
name: systemd-journal-upload
state: started
enabled: true
when:
- rhel9cis_system_is_log_server
- rhel9cis_rule_4_2_2_1_3
tags:
- level1-server
- level1-workstation
- manual
- patch
- journald
- rule_4.2.2.1.3
- name: "4.2.2.1.4 | PATCH | Ensure journald is not configured to recieve logs from a remote client"
ansible.builtin.systemd:
name: systemd-journal-remote.socket
state: stopped
enabled: false
masked: true
when:
- not rhel9cis_system_is_log_server
- rhel9cis_rule_4_2_2_1_4
tags:
- level1-server
- level1-workstation
- patch
- journald
- rule_4.2.2.1.4
- name: "4.2.2.2 | PATCH | Ensure journald service is enabled"
block:
- name: "4.2.2.2 | PATCH | Ensure journald service is enabled | Enable service"
ansible.builtin.systemd:
name: systemd-journald
state: started
enabled: true
- name: "4.2.2.2 | AUDIT | Ensure journald service is enabled | Capture status"
ansible.builtin.shell: systemctl is-enabled systemd-journald.service
changed_when: false
failed_when: false
register: rhel9cis_4_2_2_2_status
- name: "4.2.2.2 | AUDIT | Ensure journald service is enabled | Alert on bad status"
ansible.builtin.debug:
msg:
- "Warning!! The status of systemd-journald should be static and it is not. Please investigate"
when: "'static' not in rhel9cis_4_2_2_2_status.stdout"
- name: "4.2.2.2 | AUDIT | Ensure journald service is enabled | Warn Count"
ansible.builtin.import_tasks: warning_facts.yml
when: "'static' not in rhel9cis_4_2_2_2_status.stdout"
vars:
warn_control_id: '4.2.2.2'
when:
- rhel9cis_rule_4_2_2_2
tags:
- level1-server
- level1-workstation
- audit
- journald
- rule_4.2.2.2
- name: "4.2.2.3 | PATCH | Ensure journald is configured to compress large log files"
ansible.builtin.lineinfile:
path: /etc/systemd/journald.conf
regexp: "^#Compress=|^Compress="
line: Compress=yes
notify: Restart journald
when:
- rhel9cis_rule_4_2_2_3
tags:
- level1-server
- level1-workstation
- patch
- journald
- rule_4.2.2.3
- name: "4.2.2.4 | PATCH | Ensure journald is configured to write logfiles to persistent disk"
ansible.builtin.lineinfile:
path: /etc/systemd/journald.conf
regexp: "^#Storage=|^Storage="
line: Storage=persistent
notify: Restart journald
when:
- rhel9cis_rule_4_2_2_4
tags:
- level1-server
- level1-workstation
- patch
- journald
- rule_4.2.2.4
# This is counter to control 4.2.1.3??
- name: "4.2.2.5 | PATCH | Ensure journald is not configured to send logs to rsyslog"
ansible.builtin.lineinfile:
path: /etc/systemd/journald.conf
regexp: "^ForwardToSyslog="
line: "#ForwardToSyslog=yes"
notify: Restart journald
when:
- rhel9cis_rule_4_2_2_5
tags:
- level1-server
- level2-workstation
- manual
- patch
- journald
- rule_4.2.2.5
- name: "4.2.2.6 | PATCH | Ensure journald log rotation is configured per site policy"
ansible.builtin.lineinfile:
path: /etc/systemd/journald.conf
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
notify: Restart journald
loop:
- { regexp: '^#SystemMaxUse=|^SystemMaxUse=', line: 'SystemMaxUse={{ rhel9cis_journald_systemmaxuse }}'}
- { regexp: '^#SystemKeepFree=|^SystemKeepFree=', line: 'SystemKeepFree={{ rhel9cis_journald_systemkeepfree }}' }
- { regexp: '^#RuntimeMaxUse=|^RuntimeMaxUse=', line: 'RuntimeMaxUse={{ rhel9cis_journald_runtimemaxuse }}'}
- { regexp: '^#RuntimeKeepFree=|^RuntimeKeepFree=', line: 'RuntimeKeepFree={{ rhel9cis_journald_runtimekeepfree }}'}
- { regexp: '^#MaxFileSec=|^MaxFileSec=', line: 'MaxFileSec={{ rhel9cis_journald_maxfilesec }}'}
when:
- rhel9cis_rule_4_2_2_6
tags:
- level1-server
- level1-workstation
- manual
- patch
- journald
- rule_4.2.2.6
- name: "4.2.2.7 | AUDIT | Ensure journald default file permissions configured"
block:
- name: "4.2.2.7 | AUDIT | Ensure journald default file permissions configured | Check for override file"
ansible.builtin.stat:
path: /etc/tmpfiles.d/systemd.conf
register: rhel9cis_4_2_2_7_override
- name: "4.2.2.7 | AUDIT | Ensure journald default file permissions configured | Set live file"
ansible.builtin.set_fact:
systemd_conf_file: /etc/tmpfiles.d/systemd.conf
when: rhel9cis_4_2_2_7_override.stat.exists
- name: "4.2.2.7 | PATCH | Ensure journald default file permissions configured | Set permission"
ansible.builtin.lineinfile:
path: "{{ systemd_conf_file | default('/usr/lib/tmpfiles.d/systemd.conf') }}"
regexp: '^z \/var\/log\/journal\/%m\/system.journal (!?06(0|4)0) root'
line: 'z /var/log/journal/%m/system.journal 0640 root systemd-journal - -'
when:
- rhel9cis_rule_4_2_2_7
tags:
- level1-server
- level1-workstation
- manual
- patch
- journald
- rule_4.2.2.7

View file

@ -0,0 +1,30 @@
---
- name: "4.2.3 | PATCH | Ensure permissions on all logfiles are configured"
block:
- name: "4.2.3 | AUDIT | Ensure permissions on all logfiles are configured | find files"
ansible.builtin.find:
paths: "/var/log"
file_type: file
recurse: true
register: logfiles
- name: "4.2.3 | PATCH | Ensure permissions on all logfiles are configured | change permissions"
ansible.builtin.file:
path: "{{ item.path }}"
mode: 0640
loop: "{{ logfiles.files }}"
loop_control:
label: "{{ item.path }}"
when:
- item.path != "/var/log/btmp"
- item.path != "/var/log/utmp"
- item.path != "/var/log/wtmp"
when:
- rhel9cis_rule_4_2_3
tags:
- level1-server
- level1-workstation
- patch
- logfiles
- rule_4.2.3

View file

@ -1,43 +0,0 @@
---
- name: "4.2.1 | AUDIT | Ensure firewalld drops unnecessary services and ports"
when: rhel9cis_rule_4_2_1
tags:
- level1-server
- level1-workstation
- manual
- audit
- rule_4.2.1
- NIST800-55_CA-9
block:
- name: "4.2.1 | AUDIT | Ensure firewalld drops unnecessary services and ports | Get list of services and ports"
ansible.builtin.shell: "firewall-cmd --get-active-zones | awk '!/:/ {print $1}' | while read ZN; do firewall-cmd --list-all --zone=$ZN; done"
changed_when: false
failed_when: false
check_mode: false
register: discovered_services_and_ports
- name: "4.2.1 | AUDIT | Ensure firewalld drops unnecessary services and ports | Show services and ports"
ansible.builtin.debug:
msg:
- "The items below are the services and ports that are accepted, please correct as needed"
- "{{ discovered_services_and_ports.stdout_lines }}"
- name: "4.2.2 | PATCH | Ensure firewalld loopback traffic is configured | firewalld"
when: rhel9cis_rule_4_2_2
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_4.2.2
- NIST800-55_CA-9
ansible.posix.firewalld:
rich_rule: "{{ item }}"
zone: "{{ rhel9cis_default_zone }}"
permanent: true
immediate: true
state: enabled
loop:
- rule family="ipv4" source address="127.0.0.1" destination not address="127.0.0.1" drop
- rule family="ipv6" source address="::1" destination not address="::1" drop

View file

@ -1,221 +0,0 @@
---
- name: "OPTIONAL | PATCH | Create Table if doesn't exist and required"
when:
- rhel9cis_nft_tables_autonewtable
- rhel9cis_rule_4_3_1
- rhel9cis_rule_4_3_2
- rhel9cis_rule_4_3_3
- rhel9cis_rule_4_3_4
tags: always
ansible.builtin.command: "nft add table inet {{ rhel9cis_nft_tables_tablename }}"
changed_when: true
- name: "4.3.1 | PATCH | Ensure nftables base chains exist"
when: rhel9cis_rule_4_3_1
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_4.3.1
- NIST800-55_CA-9
block:
- name: "4.3.1 | AUDIT | Ensure nftables base chains exist | Get current chains for INPUT"
ansible.builtin.shell: |
nft list ruleset | grep 'hook input'
changed_when: false
failed_when: false
register: discovered_nftables_input_chains
- name: "4.3.1 | AUDIT | Ensure nftables base chains exist | Get current chains for FORWARD"
ansible.builtin.shell: |
nft list ruleset | grep 'hook forward'
changed_when: false
failed_when: false
register: discovered_nftables_forward_chains
- name: "4.3.1 | AUDIT | Ensure nftables base chains exist | Get current chains for OUTPUT"
ansible.builtin.shell: |
nft list ruleset | grep 'hook output'
changed_when: false
failed_when: false
register: discovered_nftables_output_chains
- name: "4.3.1 | AUDIT | Ensure nftables base chains exist | Display chains for review"
when: not rhel9cis_nft_tables_autochaincreate
ansible.builtin.debug:
msg:
- "Below are the current INPUT chains"
- "{{ discovered_nftables_input_chains.stdout_lines }}"
- "Below are the current FORWARD chains"
- "{{ discovered_nftables_forward_chains.stdout_lines }}"
- "Below are teh current OUTPUT chains"
- "{{ discovered_nftables_output_chains.stdout_lines }}"
- name: "4.3.1 | PATCH | Ensure nftables base chains exist | Create chains if needed"
when: rhel9cis_nft_tables_autochaincreate
ansible.builtin.command: "{{ item }}"
changed_when: true
failed_when: false
loop:
- nft create chain inet "{{ rhel9cis_nft_tables_tablename }}" input { type filter hook input priority 0 \; }
- nft create chain inet "{{ rhel9cis_nft_tables_tablename }}" forward { type filter hook forward priority 0 \; }
- nft create chain inet "{{ rhel9cis_nft_tables_tablename }}" output { type filter hook output priority 0 \; }
- name: "4.3.2 | PATCH | Ensure nftables established connections are configured"
when: rhel9cis_rule_4_3_2
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_4.3.2
- NIST800-55_CA-9
block:
- name: "4.3.2 | AUDIT | Ensure nftables established connections are configured | Gather incoming connection rules"
ansible.builtin.shell: |
nft list ruleset | awk '/hook input/,/}/' | grep -E 'ip protocol (tcp|udp|icmp) ct state'
changed_when: false
failed_when: false
register: discovered_nftables_inconnectionrule
- name: "4.3.2 | AUDIT | Ensure nftables established connections are configured | Gather outbound connection rules"
ansible.builtin.shell: nft list ruleset | awk '/hook output/,/}/' | grep -E 'ip protocol (tcp|udp|icmp) ct state'
changed_when: false
failed_when: false
register: discovered_nftables_outconnectionrule
- name: "4.3.2| PATCH | Ensure nftables established connections are configured | Add input tcp established accept policy"
when: '"ip protocol tcp ct state established accept" not in discovered_nftables_inconnectionrule.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip protocol tcp ct state established accept
changed_when: true
- name: "4.3.2 | PATCH | Ensure nftables established connections are configured | Add input udp established accept policy"
when: '"ip protocol udp ct state established accept" not in discovered_nftables_inconnectionrule.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip protocol udp ct state established accept
changed_when: true
- name: "4.3.2 | PATCH | Ensure nftables established connections are configured | Add input icmp established accept policy"
when: '"ip protocol icmp ct state established accept" not in discovered_nftables_inconnectionrule.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip protocol icmp ct state established accept
changed_when: true
- name: "4.3.2 | PATCH | Ensure nftables established connections are configured | Add output tcp new, related, established accept policy"
when: '"ip protocol tcp ct state established,related,new accept" not in discovered_nftables_outconnectionrule.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" output ip protocol tcp ct state new,related,established accept
changed_when: true
- name: "4.3.2 | PATCH | Ensure nftables established connections are configured | Add output udp new, related, established accept policy"
when: '"ip protocol udp ct state established,related,new accept" not in discovered_nftables_outconnectionrule.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" output ip protocol udp ct state new,related,established accept
changed_when: true
- name: "4.3.2 | PATCH | Ensure nftables established connections are configured | Add output icmp new, related, established accept policy"
when: '"ip protocol icmp ct state established,related,new accept" not in discovered_nftables_outconnectionrule.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" output ip protocol icmp ct state new,related,established accept
changed_when: true
- name: "4.3.3 | PATCH | Ensure nftables default deny firewall policy"
when: rhel9cis_rule_4_3_3
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_4.3.3
- NIST800-55_CA-9
block:
- name: "4.3.3 | AUDIT | Ensure nftables default deny firewall policy | Check for hook input deny policy"
ansible.builtin.shell: |
nft list table inet "{{ rhel9cis_nft_tables_tablename }}" | grep 'hook input'
failed_when: false
changed_when: false
register: discovered_nftables_inputpolicy
- name: "4.3.3 | AUDIT | Ensure nftables default deny firewall policy | Check for hook forward deny policy"
ansible.builtin.shell: |
nft list table inet "{{ rhel9cis_nft_tables_tablename }}" | grep 'hook forward'
failed_when: false
changed_when: false
register: discovered_nftables_forwardpolicy
- name: "4.3.3 | AUDIT | Ensure nftables default deny firewall policy | Check for hook output deny policy"
ansible.builtin.shell: |
nft list table inet "{{ rhel9cis_nft_tables_tablename }}" | grep 'hook output'
failed_when: false
changed_when: false
register: discovered_nftables_outputpolicy
- name: "4.3.3 | AUDIT | Ensure nftables default deny firewall policy | Check for SSH allow"
ansible.builtin.shell: |
nft list table inet "{{ rhel9cis_nft_tables_tablename }}" | grep 'ssh'
failed_when: false
changed_when: false
register: discovered_nftables_sshallowcheck
- name: "4.3.3 | PATCH | Ensure nftables default deny firewall policy | Enable SSH traffic"
when: '"tcp dport ssh accept" not in discovered_nftables_sshallowcheck.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input tcp dport ssh accept
changed_when: true
- name: "4.3.3 | PATCH | Ensure nftables default deny firewall policy | Set hook input deny policy"
when: '"type filter hook input priority 0; policy drop;" not in discovered_nftables_inputpolicy.stdout'
ansible.builtin.command: nft chain inet "{{ rhel9cis_nft_tables_tablename }}" input { policy drop \; }
changed_when: true
- name: "4.3.3 | PATCH | Ensure nftables default deny firewall policy | Create hook forward deny policy"
when: '"type filter hook forward priority 0; policy drop;" not in discovered_nftables_forwardpolicy.stdout'
ansible.builtin.command: nft chain inet "{{ rhel9cis_nft_tables_tablename }}" forward { policy drop \; }
changed_when: true
- name: "4.3.3 | PATCH | Ensure nftables default deny firewall policy | Create hook output deny policy"
when: '"type filter hook output priority 0; policy drop;" not in discovered_nftables_outputpolicy.stdout'
ansible.builtin.command: nft chain inet "{{ rhel9cis_nft_tables_tablename }}" output { policy drop \; }
changed_when: true
- name: "4.3.4 | PATCH | Ensure nftables loopback traffic is configured"
when: rhel9cis_rule_4_3_4
tags:
- level1-server
- level1-workstation
- patch
- nftables
- rule_4.3.4
- NIST800-55_CA-9
block:
- name: "4.3.4 | AUDIT | Ensure nftables loopback traffic is configured | Gather iif lo accept existence | nftables"
ansible.builtin.shell: |
nft list ruleset | awk '/hook input/,/}/' | grep 'iif "lo" accept'
changed_when: false
failed_when: false
register: discovered_nftables_iiflo
- name: "4.3.4 | AUDIT | Ensure nftables loopback traffic is configured | Gather ip saddr existence | nftables"
ansible.builtin.shell: |
nft list ruleset | awk '/hook input/,/}/' | grep 'ip saddr'
changed_when: false
failed_when: false
register: discovered_nftables_ipsaddr
- name: "4.3.4 | AUDIT | Ensure nftables loopback traffic is configured | Gather ip6 saddr existence | nftables"
ansible.builtin.shell: |
nft list ruleset | awk '/hook input/,/}/' | grep 'ip6 saddr'
changed_when: false
failed_when: false
register: discovered_nftables_ip6saddr
- name: "4.3.4 | PATCH | Ensure nftables loopback traffic is configured | Set iif lo accept rule | nftables"
when: '"iif \"lo\" accept" not in discovered_nftables_iiflo.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input iif lo accept
changed_when: true
- name: "4.3.4 | PATCH | Ensure nftables loopback traffic is configured | Set ip sddr rule | nftables"
when: '"ip saddr 127.0.0.0/8 counter packets 0 bytes 0 drop" not in discovered_nftables_ipsaddr.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip saddr 127.0.0.0/8 counter drop
changed_when: true
- name: "4.3.4 | PATCH | Ensure nftables loopback traffic is configured | Set ip6 saddr rule | nftables"
when: '"ip6 saddr ::1 counter packets 0 bytes 0 drop" not in discovered_nftables_ip6saddr.stdout'
ansible.builtin.command: nft add rule inet "{{ rhel9cis_nft_tables_tablename }}" input ip6 saddr ::1 counter drop
changed_when: true

Some files were not shown because too many files have changed in this diff Show more