This repository contains Ansible playbooks and roles for deploying core infrastructure services (DNS, DHCP, and NFS) for a lab environment supporting multiple OpenShift clusters.
The infrastructure deployment consists of three main services:
- Handles name resolution for the lab environment
- Configured for multiple OpenShift clusters
- Containerized using Podman
- Managed via systemd user services
- Manages IP address allocation
- Supports multiple subnet pools for different clusters
- Containerized using Podman
- Managed via systemd user services
- Provides shared storage for the lab environment
- Native systemd service
- Exports configured for various use cases
- RHEL/CentOS/Fedora-based system
- Podman installed
- Systemd
- SELinux (optional but supported)
- Firewalld
- User with sudo privileges
- ZSH shell with oh-my-zsh
- Available IP range: 192.168.10.0/24
- DNS server IP: 192.168.10.2
- Required ports:
- DNS: 53 (TCP/UDP)
- DHCP: 67/68 (UDP)
- NFS: 2049 (TCP), 111 (TCP/UDP)
git clone https://github.com/pdred/ansible_idc.git
cd lab-infraReview and modify the following files if needed:
- group_vars/all.yml # Global variables
- inventory/hosts.yml # Inventory configuration
- roles/*/defaults/main.yml # Role-specific variables
# Run complete deployment
ansible-playbook site.yml
# Run specific roles
ansible-playbook site.yml --tags dns,dhcp
# Run with different inventory
ansible-playbook -i custom-inventory.yml site.ymlThe common role handles base system configuration:
# roles/common/tasks/main.yml
- name: Install required packages
package:
name: "{{ common_packages[ansible_os_family] }}"
state: present- common_packages: Dictionary of required packages per OS family
- systemd_user_dir: Path to user's systemd directory
Configures BIND DNS server:
# roles/dns/templates/named.conf.j2
options {
listen-on port 53 { 127.0.0.1; {{ dns_server_ip }}; };
directory "/etc/bind";
allow-query { any; };
forwarders { {{ dns_forwarders | join('; ') }}; };
}- dns_server_ip: IP address for DNS server
- dns_forwarders: List of upstream DNS servers
- base_domain: Base domain for the lab environment
Configures ISC DHCP server:
# roles/dhcp/templates/dhcpd.conf.j2
subnet {{ network_subnet }} netmask {{ network_netmask }} {
pool {
range {{ dhcp_range_start }} {{ dhcp_range_end }};
}
}- network_subnet: Network subnet for DHCP
- network_netmask: Network netmask
- dhcp_lease_time: Default lease time
Configures NFS server:
# roles/nfs/templates/exports.j2
{% for export in nfs_exports %}
{{ export.path }} {{ export.options }}
{% endfor %}- nfs_exports: List of export configurations
- nfs_server_ip: IP address for NFS server
# roles/dns/vars/main.yml
cluster_configs:
- name: production
base_domain: prod.lab.com
api_ip: 192.168.10.50
ingress_ip: 192.168.10.51
- name: development
base_domain: dev.lab.com
api_ip: 192.168.10.60
ingress_ip: 192.168.10.61Apply configuration:
ansible-playbook site.yml --tags dns -e cluster_name=production# roles/dhcp/vars/main.yml
cluster_subnets:
production:
range_start: 192.168.10.50
range_end: 192.168.10.59
domain: prod.lab.com
development:
range_start: 192.168.10.60
range_end: 192.168.10.69
domain: dev.lab.comApply configuration:
ansible-playbook site.yml --tags dhcp -e cluster_name=production# Update network range for all services
./scripts/network_config.py --new-network 192.168.20
# Verify changes
dig @192.168.20.2 api.prod.lab.com
nmap -sU -p 67 192.168.20.2
showmount -e 192.168.20.2# roles/dhcp/templates/dhcpd.conf.j2
subnet {{ network_subnet }} netmask {{ network_netmask }} {
pool {
range {{ dhcp_range_start }} {{ dhcp_range_end }};
allow members of "testing-cluster";
}
}
class "testing-cluster" {
match if substring(hardware, 1, 3) = 10:70:fd;
}# Generate DNSSEC keys
cd /etc/bind
dnssec-keygen -a NSEC3RSASHA1 -b 2048 -n ZONE lab.com
dnssec-keygen -f KSK -a NSEC3RSASHA1 -b 4096 -n ZONE lab.com
# Add DNSSEC configuration
cat >> named.conf << EOF
dnssec-enable yes;
dnssec-validation yes;
dnssec-lookaside auto;
EOF
# Sign the zone
dnssec-signzone -A -3 $(head -c 1000 /dev/random | sha1sum | cut -b 1-16) \
-N INCREMENT -o lab.com -t db.lab.com# Install Kerberos
sudo dnf install krb5-server krb5-workstation
# Configure Kerberos realm
sudo cat > /etc/krb5.conf << EOF
[libdefaults]
default_realm = LAB.COM
[realms]
LAB.COM = {
kdc = 192.168.10.2
admin_server = 192.168.10.2
}
EOF
# Update NFS exports with Kerberos
sudo cat > /etc/exports << EOF
/exports/data *(sec=krb5p,rw,sync)
EOF# roles/monitoring/templates/prometheus.yml.j2
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'dns'
static_configs:
- targets: ['{{ dns_server_ip }}:9119']
- job_name: 'dhcp'
static_configs:
- targets: ['{{ dhcp_server_ip }}:9119']
- job_name: 'nfs'
static_configs:
- targets: ['{{ nfs_server_ip }}:9119']{
"dashboard": {
"title": "Lab Infrastructure",
"panels": [
{
"title": "DNS Queries/sec",
"type": "graph",
"targets": [
{
"expr": "rate(bind_queries_total[5m])"
}
]
},
{
"title": "DHCP Leases",
"type": "gauge",
"targets": [
{
"expr": "dhcp_leases_current_count"
}
]
}
]
}
}#!/bin/bash
# scripts/backup.sh
BACKUP_DIR=~/lab-infra/backups/$(date +%Y%m%d)
mkdir -p $BACKUP_DIR/{dns,dhcp,nfs}
# Backup DNS
podman exec dns-server rndc freeze lab.com
cp -r ~/lab-infra/dns/config/* $BACKUP_DIR/dns/
podman exec dns-server rndc thaw lab.com
# Backup DHCP
cp -r ~/lab-infra/dhcp/config/* $BACKUP_DIR/dhcp/
cp /var/lib/dhcp/dhcpd.leases $BACKUP_DIR/dhcp/
# Backup NFS
cp /etc/exports $BACKUP_DIR/nfs/#!/bin/bash
# scripts/restore.sh
BACKUP_DATE=$1
BACKUP_DIR=~/lab-infra/backups/$BACKUP_DATE
# Stop services
systemctl --user stop dns-server.service dhcp-server.service
sudo systemctl stop nfs-server
# Restore configurations
cp -r $BACKUP_DIR/dns/* ~/lab-infra/dns/config/
cp -r $BACKUP_DIR/dhcp/* ~/lab-infra/dhcp/config/
sudo cp $BACKUP_DIR/nfs/exports /etc/exports
# Start services
systemctl --user start dns-server.service dhcp-server.service
sudo systemctl start nfs-server
# Verify services
./run_tests.sh- Fork the repository
- Create a feature branch
- Commit your changes
- Push to the branch
- Create a Pull Request
- Follow Ansible best practices
- Use YAML files with .yml extension
- Include comments for complex tasks
- Use Jinja2 templating consistently
- Follow variable naming conventions
- Run ansible-lint before committing
- Test configurations using validate-configs.yml
- Verify documentation with validate-docs.yml
- Test on both RHEL and Fedora systems