diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index bd378bbd..92d6430f 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -46,6 +46,8 @@ jobs:
- none
database:
- internal
+ iop:
+ - enabled
include:
- certificate_source: default
security: fapolicyd
@@ -54,7 +56,7 @@ jobs:
security: none
database: external
runs-on: ubuntu-24.04
- name: "Tests (certificate source: ${{ matrix.certificate_source }}, database: ${{ matrix.database }}, security mode: ${{ matrix.security }})"
+ name: "Tests (certificate source: ${{ matrix.certificate_source }}, database: ${{ matrix.database }}, security mode: ${{ matrix.security }}, iop: ${{ matrix.iop }})"
steps:
- uses: actions/checkout@v6
- name: Set up Python
@@ -108,6 +110,10 @@ jobs:
- name: Add optional feature - foreman_azure_rm and foreman_google
run: |
./foremanctl deploy --add-feature foreman_azure_rm --add-feature foreman_google
+ - name: Enable iop
+ if: matrix.iop == 'enabled'
+ run: |
+ ./foremanctl deploy --add-feature iop
- name: Run tests
run: |
./forge test --pytest-args="--certificate-source=${{ matrix.certificate_source }} --database-mode=${{ matrix.database }}"
@@ -127,7 +133,7 @@ jobs:
if: ${{ always() }}
uses: actions/upload-artifact@v6
with:
- name: sosreport-${{ matrix.certificate_source }}-${{ matrix.security }}-${{ matrix.database }}
+ name: sosreport-${{ matrix.certificate_source }}-${{ matrix.security }}-${{ matrix.database }}-${{ matrix.iop }}
path: sos/
- name: Setup upterm session
if: ${{ failure() }}
diff --git a/src/playbooks/deploy/deploy.yaml b/src/playbooks/deploy/deploy.yaml
index 6982ebb7..d6d36422 100644
--- a/src/playbooks/deploy/deploy.yaml
+++ b/src/playbooks/deploy/deploy.yaml
@@ -11,6 +11,20 @@
- "../../vars/database.yml"
- "../../vars/foreman.yml"
- "../../vars/base.yaml"
+ pre_tasks:
+ - name: Add iop databases
+ when:
+ - "'iop' in enabled_features"
+ - database_mode == 'internal'
+ block:
+ - name: Include iop databases
+ ansible.builtin.include_vars:
+ file: "../../vars/database_iop.yml"
+
+ - name: Combine lists
+ ansible.builtin.set_fact:
+ postgresql_databases: "{{ postgresql_databases + iop_postgresql_databases }}"
+ postgresql_users: "{{ postgresql_users + iop_postgresql_users }}"
roles:
- role: pre_install
- role: checks
@@ -30,6 +44,10 @@
- pulp
- foreman
- role: systemd_target
+ - role: iop_core
+ when:
+ - "'iop' in enabled_features"
+ - database_mode == 'internal'
- role: foreman_proxy
when:
- "'foreman-proxy' in enabled_features"
diff --git a/src/requirements.yml b/src/requirements.yml
index 01a19ec6..103f8826 100644
--- a/src/requirements.yml
+++ b/src/requirements.yml
@@ -1,4 +1,5 @@
collections:
+ - community.general
- community.postgresql
- community.crypto
- ansible.posix
diff --git a/src/roles/iop_advisor/defaults/main.yaml b/src/roles/iop_advisor/defaults/main.yaml
new file mode 100644
index 00000000..0d7b0d1f
--- /dev/null
+++ b/src/roles/iop_advisor/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+iop_advisor_container_image: "quay.io/iop/advisor-backend"
+iop_advisor_container_tag: "foreman-3.16"
+
+iop_advisor_database_name: advisor_db
+iop_advisor_database_user: advisor_user
+iop_advisor_database_password: CHANGEME
+iop_advisor_database_host: host.containers.internal
+iop_advisor_database_port: 5432
diff --git a/src/roles/iop_advisor/handlers/main.yaml b/src/roles/iop_advisor/handlers/main.yaml
new file mode 100644
index 00000000..126e16bf
--- /dev/null
+++ b/src/roles/iop_advisor/handlers/main.yaml
@@ -0,0 +1,28 @@
+---
+- name: Check if advisor backend api service exists
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-api
+ register: iop_advisor_api_service_status
+ failed_when: false
+ listen: restart advisor
+
+- name: Restart advisor backend api service if it exists
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-api
+ state: restarted
+ when: iop_advisor_api_service_status.status is defined and iop_advisor_api_service_status.status.LoadState != "not-found"
+ listen: restart advisor
+
+- name: Check if advisor backend service exists
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-service
+ register: iop_advisor_service_status
+ failed_when: false
+ listen: restart advisor
+
+- name: Restart advisor backend service if it exists
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-service
+ state: restarted
+ when: iop_advisor_service_status.status is defined and iop_advisor_service_status.status.LoadState != "not-found"
+ listen: restart advisor
diff --git a/src/roles/iop_advisor/tasks/main.yaml b/src/roles/iop_advisor/tasks/main.yaml
new file mode 100644
index 00000000..ad26a342
--- /dev/null
+++ b/src/roles/iop_advisor/tasks/main.yaml
@@ -0,0 +1,136 @@
+---
+- name: Pull Advisor Backend container image
+ containers.podman.podman_image:
+ name: "{{ iop_advisor_container_image }}:{{ iop_advisor_container_tag }}"
+ state: present
+
+- name: Create podman secret for advisor database username
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-username
+ data: "{{ iop_advisor_database_user }}"
+ notify: restart advisor
+
+- name: Create podman secret for advisor database password
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-password
+ data: "{{ iop_advisor_database_password }}"
+ notify: restart advisor
+
+- name: Create podman secret for advisor database name
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-name
+ data: "{{ iop_advisor_database_name }}"
+ notify: restart advisor
+
+- name: Create podman secret for advisor database host
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-host
+ data: "{{ iop_advisor_database_host }}"
+ notify: restart advisor
+
+- name: Create podman secret for advisor database port
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-port
+ data: "{{ iop_advisor_database_port }}"
+ notify: restart advisor
+
+- name: Deploy Advisor Backend API Container
+ containers.podman.podman_container:
+ name: iop-service-advisor-backend-api
+ image: "{{ iop_advisor_container_image }}:{{ iop_advisor_container_tag }}"
+ state: quadlet
+ command: sh -c "./container_init.sh && api/app.sh"
+ network:
+ - iop-core-network
+ env:
+ DJANGO_SESSION_KEY: "UNUSED"
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ ADVISOR_ENV: "prod"
+ LOG_LEVEL: "INFO"
+ USE_DJANGO_WEBSERVER: "false"
+ CLOWDER_ENABLED: "false"
+ WEB_CONCURRENCY: "2"
+ ENABLE_AUTOSUB: "true"
+ TASKS_REWRITE_INTERNAL_URLS: "true"
+ TASKS_REWRITE_INTERNAL_URLS_FOR: "internal.localhost"
+ ENABLE_INIT_CONTAINER_MIGRATIONS: "true"
+ ENABLE_INIT_CONTAINER_IMPORT_CONTENT: "true"
+ IMAGE: "latest"
+ ALLOWED_HOSTS: "*"
+ INVENTORY_SERVER_URL: "http://iop-core-host-inventory-api:8081/api/inventory/v1"
+ ADVISOR_DB_SSL_MODE: "disable"
+ PORT: "8000"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-service-advisor-backend-database-username,type=env,target=ADVISOR_DB_USER'
+ - 'iop-service-advisor-backend-database-password,type=env,target=ADVISOR_DB_PASSWORD'
+ - 'iop-service-advisor-backend-database-name,type=env,target=ADVISOR_DB_NAME'
+ - 'iop-service-advisor-backend-database-host,type=env,target=ADVISOR_DB_HOST'
+ - 'iop-service-advisor-backend-database-port,type=env,target=ADVISOR_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Advisor Backend API
+ After=iop-core-kafka.service
+ Wants=iop-core-kafka.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy Advisor Backend Service Container
+ containers.podman.podman_container:
+ name: iop-service-advisor-backend-service
+ image: "{{ iop_advisor_container_image }}:{{ iop_advisor_container_tag }}"
+ state: quadlet
+ command: pipenv run python service/service.py
+ network:
+ - iop-core-network
+ env:
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ ADVISOR_DB_SSL_MODE: "disable"
+ DISABLE_WEB_SERVER: "true"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-service-advisor-backend-database-username,type=env,target=ADVISOR_DB_USER'
+ - 'iop-service-advisor-backend-database-password,type=env,target=ADVISOR_DB_PASSWORD'
+ - 'iop-service-advisor-backend-database-name,type=env,target=ADVISOR_DB_NAME'
+ - 'iop-service-advisor-backend-database-host,type=env,target=ADVISOR_DB_HOST'
+ - 'iop-service-advisor-backend-database-port,type=env,target=ADVISOR_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Advisor Backend Service
+ After=iop-core-kafka.service
+ Wants=iop-core-kafka.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Advisor Backend API service
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-api
+ enabled: true
+ state: started
+
+- name: Start Advisor Backend Service
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-service
+ enabled: true
+ state: started
+
+- name: Set up Foreign Data Wrapper for advisor database
+ ansible.builtin.include_role:
+ name: iop_fdw
+ vars:
+ iop_fdw_database_name: "{{ iop_advisor_database_name }}"
+ iop_fdw_database_user: "{{ iop_advisor_database_user }}"
+ iop_fdw_database_password: "{{ iop_advisor_database_password }}"
+ iop_fdw_remote_database_name: "{{ iop_inventory_database_name }}"
+ iop_fdw_remote_user: "{{ iop_inventory_database_user }}"
+ iop_fdw_remote_password: "{{ iop_inventory_database_password }}"
diff --git a/src/roles/iop_advisor_frontend/defaults/main.yaml b/src/roles/iop_advisor_frontend/defaults/main.yaml
new file mode 100644
index 00000000..df7ec32d
--- /dev/null
+++ b/src/roles/iop_advisor_frontend/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+iop_advisor_frontend_container_image: "quay.io/iop/advisor-frontend"
+iop_advisor_frontend_container_tag: "foreman-3.16"
+iop_advisor_frontend_assets_path: "/var/lib/foreman/public/assets/apps/advisor"
+iop_advisor_frontend_source_path: "/srv/dist/."
diff --git a/src/roles/iop_advisor_frontend/tasks/main.yaml b/src/roles/iop_advisor_frontend/tasks/main.yaml
new file mode 100644
index 00000000..fa1d19b5
--- /dev/null
+++ b/src/roles/iop_advisor_frontend/tasks/main.yaml
@@ -0,0 +1,98 @@
+---
+- name: Pull Advisor Frontend container image
+ containers.podman.podman_image:
+ name: "{{ iop_advisor_frontend_container_image }}:{{ iop_advisor_frontend_container_tag }}"
+ state: present
+
+- name: Ensure parent assets directory exists
+ ansible.builtin.file:
+ path: /var/lib/foreman/public/assets/apps
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+
+- name: Ensure assets directory exists
+ ansible.builtin.file:
+ path: "{{ iop_advisor_frontend_assets_path }}"
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+
+- name: Create temporary container for asset extraction
+ containers.podman.podman_container:
+ name: iop-advisor-frontend-temp
+ image: "{{ iop_advisor_frontend_container_image }}:{{ iop_advisor_frontend_container_tag }}"
+ state: created
+
+- name: Extract advisor frontend assets from container
+ containers.podman.podman_container_copy:
+ container: iop-advisor-frontend-temp
+ src: "{{ iop_advisor_frontend_source_path }}"
+ dest: "{{ iop_advisor_frontend_assets_path }}"
+ from_container: true
+
+- name: Remove temporary container
+ containers.podman.podman_container:
+ name: iop-advisor-frontend-temp
+ state: absent
+
+- name: Set ownership of advisor frontend assets
+ ansible.builtin.file:
+ path: "{{ iop_advisor_frontend_assets_path }}"
+ owner: root
+ group: root
+ recurse: true
+
+- name: Set SELinux file context for advisor frontend assets
+ community.general.sefcontext:
+ target: "{{ iop_advisor_frontend_assets_path }}(/.*)?"
+ setype: httpd_exec_t
+ state: present
+ when: ansible_facts["selinux"]["status"] == "enabled"
+
+- name: Restore SELinux context for advisor frontend assets
+ ansible.builtin.command:
+ cmd: restorecon -R "{{ iop_advisor_frontend_assets_path }}"
+ when: ansible_facts["selinux"]["status"] == "enabled"
+ changed_when: false
+
+- name: Ensure Apache SSL config directory exists
+ ansible.builtin.file:
+ path: /etc/httpd/conf.d/05-foreman-ssl.d
+ state: directory
+ mode: '0755'
+
+- name: Configure Apache for advisor frontend assets
+ ansible.builtin.copy:
+ dest: /etc/httpd/conf.d/05-foreman-ssl.d/advisor-frontend.conf
+ content: |
+ # IOP Advisor Frontend Assets Configuration
+ Alias /assets/apps/advisor {{ iop_advisor_frontend_assets_path }}
+ ProxyPass /assets/apps/advisor !
+
+
+ Options SymLinksIfOwnerMatch
+ AllowOverride None
+ Require all granted
+
+ # Use standard http expire header for assets instead of ETag
+
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+
+
+ # Return compressed assets if they are precompiled
+ RewriteEngine On
+ # Make sure the browser supports gzip encoding and file with .gz added
+ # does exist on disc before we rewrite with the extension
+ RewriteCond %{HTTP:Accept-Encoding} \b(x-)?gzip\b
+ RewriteCond %{REQUEST_FILENAME} \.(css|js|svg)$
+ RewriteCond %{REQUEST_FILENAME}.gz -s
+ RewriteRule ^(.+) $1.gz [L]
+
+ mode: '0644'
+ notify: "httpd : Restart httpd"
diff --git a/src/roles/iop_core/defaults/main.yaml b/src/roles/iop_core/defaults/main.yaml
new file mode 100644
index 00000000..ed97d539
--- /dev/null
+++ b/src/roles/iop_core/defaults/main.yaml
@@ -0,0 +1 @@
+---
diff --git a/src/roles/iop_core/tasks/main.yaml b/src/roles/iop_core/tasks/main.yaml
new file mode 100644
index 00000000..a33ad858
--- /dev/null
+++ b/src/roles/iop_core/tasks/main.yaml
@@ -0,0 +1,56 @@
+---
+- name: Deploy IOP Network
+ ansible.builtin.include_role:
+ name: iop_network
+
+- name: Deploy IOP Kafka service
+ ansible.builtin.include_role:
+ name: iop_kafka
+
+- name: Deploy IOP Ingress service
+ ansible.builtin.include_role:
+ name: iop_ingress
+
+- name: Deploy IOP Puptoo service
+ ansible.builtin.include_role:
+ name: iop_puptoo
+
+- name: Deploy IOP Yuptoo service
+ ansible.builtin.include_role:
+ name: iop_yuptoo
+
+- name: Deploy IOP Engine service
+ ansible.builtin.include_role:
+ name: iop_engine
+
+- name: Deploy IOP Gateway service
+ ansible.builtin.include_role:
+ name: iop_gateway
+
+- name: Deploy IOP Inventory service
+ ansible.builtin.include_role:
+ name: iop_inventory
+
+- name: Deploy IOP Advisor service
+ ansible.builtin.include_role:
+ name: iop_advisor
+
+- name: Deploy IOP Remediation service
+ ansible.builtin.include_role:
+ name: iop_remediation
+
+- name: Deploy IOP VMAAS service
+ ansible.builtin.include_role:
+ name: iop_vmaas
+
+- name: Deploy IOP Vulnerability service
+ ansible.builtin.include_role:
+ name: iop_vulnerability
+
+- name: Deploy IOP Advisor Frontend
+ ansible.builtin.include_role:
+ name: iop_advisor_frontend
+
+- name: Deploy IOP Vulnerability Frontend
+ ansible.builtin.include_role:
+ name: iop_vulnerability_frontend
diff --git a/src/roles/iop_engine/defaults/main.yaml b/src/roles/iop_engine/defaults/main.yaml
new file mode 100644
index 00000000..4a792222
--- /dev/null
+++ b/src/roles/iop_engine/defaults/main.yaml
@@ -0,0 +1,8 @@
+---
+iop_engine_container_image: "quay.io/iop/insights-engine"
+iop_engine_container_tag: "foreman-3.16"
+
+iop_engine_packages:
+ - "insights.specs.default"
+ - "insights.specs.insights_archive"
+ - "insights_kafka_service.rules"
diff --git a/src/roles/iop_engine/handlers/main.yaml b/src/roles/iop_engine/handlers/main.yaml
new file mode 100644
index 00000000..c090e205
--- /dev/null
+++ b/src/roles/iop_engine/handlers/main.yaml
@@ -0,0 +1,7 @@
+---
+- name: Restart engine
+ ansible.builtin.systemd:
+ name: iop-core-engine
+ state: restarted
+ listen: restart engine
+ when: ansible_facts.services['iop-core-engine.service'] is defined
diff --git a/src/roles/iop_engine/tasks/main.yaml b/src/roles/iop_engine/tasks/main.yaml
new file mode 100644
index 00000000..5bf70ee8
--- /dev/null
+++ b/src/roles/iop_engine/tasks/main.yaml
@@ -0,0 +1,46 @@
+---
+- name: Pull Engine container image
+ containers.podman.podman_image:
+ name: "{{ iop_engine_container_image }}:{{ iop_engine_container_tag }}"
+ state: present
+
+- name: Create Engine config secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-engine-config-yml
+ data: "{{ lookup('ansible.builtin.template', 'engine/config.yml.j2') }}"
+ notify: restart engine
+
+- name: Deploy Engine container
+ containers.podman.podman_container:
+ name: iop-core-engine
+ image: "{{ iop_engine_container_image }}:{{ iop_engine_container_tag }}"
+ state: quadlet
+ command: insights-core-engine /var/config.yml
+ secrets:
+ - 'iop-core-engine-config-yml,target=/var/config.yml,mode=0440,uid=1000,type=mount'
+ etc_hosts:
+ console.redhat.com: "127.0.0.1"
+ network:
+ - iop-core-network
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Engine Container
+ After=iop-core-kafka.service iop-core-ingress.service iop-core-puptoo.service
+ Wants=iop-core-kafka.service iop-core-ingress.service iop-core-puptoo.service
+ [Service]
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Engine service
+ ansible.builtin.systemd:
+ name: iop-core-engine
+ enabled: true
+ state: started
diff --git a/src/roles/iop_engine/templates/engine/config.yml.j2 b/src/roles/iop_engine/templates/engine/config.yml.j2
new file mode 100644
index 00000000..a62b7902
--- /dev/null
+++ b/src/roles/iop_engine/templates/engine/config.yml.j2
@@ -0,0 +1,33 @@
+plugins:
+ default_component_enabled: true
+ packages:
+{% for package in iop_engine_packages %}
+ - {{ package }}
+{% endfor %}
+configs: []
+service:
+ extract_timeout: 10
+ unpacked_archive_size_limit: 1800000000
+ extract_tmp_dir:
+ format: insights_kafka_service.formats._insights.InsightsFormat
+ target_components: []
+ consumer:
+ name: "insights_kafka_service.consumer.InsightsKafkaConsumer"
+ kwargs:
+ services:
+ - "advisor"
+ group_id: "insights-core-kafka"
+ queued.max.messages.kbytes: 10000
+ session.timeout.ms: 30000
+ max.poll.interval.ms: 600000
+ bootstrap_servers:
+ - "iop-core-kafka:9092"
+ incoming_topic: platform.inventory.events
+ publisher:
+ name: "insights_kafka_service.producer.InsightsKafkaProducer"
+ kwargs:
+ bootstrap_servers:
+ - "iop-core-kafka:9092"
+ topic: platform.engine.results
+ downloader:
+ name: "insights_messaging.downloaders.httpfs.Http"
diff --git a/src/roles/iop_fdw/defaults/main.yaml b/src/roles/iop_fdw/defaults/main.yaml
new file mode 100644
index 00000000..4b669add
--- /dev/null
+++ b/src/roles/iop_fdw/defaults/main.yaml
@@ -0,0 +1,20 @@
+---
+# Required parameters - must be passed by caller
+iop_fdw_database_name: ""
+iop_fdw_database_user: ""
+iop_fdw_database_password: ""
+iop_fdw_remote_database_name: ""
+iop_fdw_remote_user: ""
+iop_fdw_remote_password: ""
+
+# Optional parameters - can use defaults
+iop_fdw_database_host: "localhost"
+iop_fdw_database_port: 5432
+
+# Constants - same for all invocations (matching puppet-iop)
+iop_fdw_foreign_server_name: hbi_server
+iop_fdw_remote_table_schema: inventory
+iop_fdw_remote_table_name: hosts
+iop_fdw_local_source_schema: inventory_source
+iop_fdw_local_view_schema: inventory
+iop_fdw_local_view_name: hosts
diff --git a/src/roles/iop_fdw/handlers/main.yaml b/src/roles/iop_fdw/handlers/main.yaml
new file mode 100644
index 00000000..1aeb62a4
--- /dev/null
+++ b/src/roles/iop_fdw/handlers/main.yaml
@@ -0,0 +1,3 @@
+---
+# Handlers for iop_fdw role
+# Currently no specific handlers needed for FDW operations
diff --git a/src/roles/iop_fdw/tasks/main.yaml b/src/roles/iop_fdw/tasks/main.yaml
new file mode 100644
index 00000000..ec21087f
--- /dev/null
+++ b/src/roles/iop_fdw/tasks/main.yaml
@@ -0,0 +1,165 @@
+---
+- name: Install PostgreSQL client for FDW operations
+ ansible.builtin.package:
+ name: postgresql
+ state: present
+
+- name: Enable postgres_fdw extension on target database
+ community.postgresql.postgresql_ext:
+ name: postgres_fdw
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+
+- name: Check if foreign server exists
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "SELECT srvname FROM pg_foreign_server WHERE srvname = %s"
+ positional_args:
+ - "{{ iop_fdw_foreign_server_name }}"
+ register: iop_fdw_foreign_server_check
+ changed_when: false
+
+- name: Create foreign server for inventory database
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ CREATE SERVER {{ iop_fdw_foreign_server_name }}
+ FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (host %s, port %s, dbname %s)
+ positional_args:
+ - "{{ iop_fdw_database_host }}"
+ - "{{ iop_fdw_database_port | string }}"
+ - "{{ iop_fdw_remote_database_name }}"
+ when: iop_fdw_foreign_server_check.rowcount == 0
+
+- name: Check if user mapping exists for service user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "SELECT umuser FROM pg_user_mappings WHERE srvname = %s AND usename = %s"
+ positional_args:
+ - "{{ iop_fdw_foreign_server_name }}"
+ - "{{ iop_fdw_database_user }}"
+ register: iop_fdw_user_mapping_check
+ changed_when: false
+
+- name: Create user mapping for service user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ CREATE USER MAPPING FOR {{ iop_fdw_database_user }}
+ SERVER {{ iop_fdw_foreign_server_name }}
+ OPTIONS (user %s, password %s)
+ positional_args:
+ - "{{ iop_fdw_remote_user }}"
+ - "{{ iop_fdw_remote_password }}"
+ when: iop_fdw_user_mapping_check.rowcount == 0
+
+- name: Check if user mapping exists for postgres user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "SELECT umuser FROM pg_user_mappings WHERE srvname = %s AND usename = 'postgres'"
+ positional_args:
+ - "{{ iop_fdw_foreign_server_name }}"
+ register: iop_fdw_postgres_mapping_check
+ changed_when: false
+
+- name: Create user mapping for postgres user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ CREATE USER MAPPING FOR postgres
+ SERVER {{ iop_fdw_foreign_server_name }}
+ OPTIONS (user %s, password %s)
+ positional_args:
+ - "{{ iop_fdw_remote_user }}"
+ - "{{ iop_fdw_remote_password }}"
+ when: iop_fdw_postgres_mapping_check.rowcount == 0
+
+- name: Grant usage on foreign server
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "GRANT USAGE ON FOREIGN SERVER {{ iop_fdw_foreign_server_name }} TO {{ iop_fdw_database_user }}"
+
+- name: Create local view schema
+ community.postgresql.postgresql_schema:
+ db: "{{ iop_fdw_database_name }}"
+ name: "{{ iop_fdw_local_view_schema }}"
+ owner: "{{ iop_fdw_database_user }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+
+- name: Create local schema for foreign tables
+ community.postgresql.postgresql_schema:
+ db: "{{ iop_fdw_database_name }}"
+ name: "{{ iop_fdw_local_source_schema }}"
+ owner: "{{ iop_fdw_database_user }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+
+- name: Check if foreign table exists
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "SELECT foreign_table_name FROM information_schema.foreign_tables WHERE foreign_table_schema = %s AND foreign_table_name = %s"
+ positional_args:
+ - "{{ iop_fdw_local_source_schema }}"
+ - "{{ iop_fdw_remote_table_name }}"
+ register: iop_fdw_foreign_table_check
+ changed_when: false
+
+- name: Import foreign schema
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ IMPORT FOREIGN SCHEMA {{ iop_fdw_remote_table_schema }}
+ LIMIT TO ({{ iop_fdw_remote_table_name }})
+ FROM SERVER {{ iop_fdw_foreign_server_name }}
+ INTO {{ iop_fdw_local_source_schema }}
+ when: iop_fdw_foreign_table_check.rowcount == 0
+
+- name: Create local view pointing to foreign table
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ CREATE OR REPLACE VIEW "{{ iop_fdw_local_view_schema }}"."{{ iop_fdw_local_view_name }}" AS
+ SELECT * FROM "{{ iop_fdw_local_source_schema }}"."{{ iop_fdw_remote_table_name }}"
+
+- name: Grant select on foreign table to service user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ GRANT USAGE ON SCHEMA {{ iop_fdw_local_source_schema }} TO {{ iop_fdw_database_user }};
+ GRANT USAGE ON SCHEMA {{ iop_fdw_local_view_schema }} TO {{ iop_fdw_database_user }};
+ GRANT SELECT ON {{ iop_fdw_local_source_schema }}.{{ iop_fdw_remote_table_name }} TO {{ iop_fdw_database_user }};
+ GRANT SELECT ON {{ iop_fdw_local_view_schema }}.{{ iop_fdw_local_view_name }} TO {{ iop_fdw_database_user }};
+
+- name: Grant permissions on remote database view to remote user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_remote_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ GRANT USAGE ON SCHEMA {{ iop_fdw_remote_table_schema }} TO {{ iop_fdw_remote_user }};
+ GRANT SELECT ON {{ iop_fdw_remote_table_schema }}.{{ iop_fdw_local_view_name }} TO {{ iop_fdw_remote_user }};
diff --git a/src/roles/iop_gateway/defaults/main.yaml b/src/roles/iop_gateway/defaults/main.yaml
new file mode 100644
index 00000000..f0c893c8
--- /dev/null
+++ b/src/roles/iop_gateway/defaults/main.yaml
@@ -0,0 +1,11 @@
+---
+iop_gateway_container_image: "quay.io/iop/gateway"
+iop_gateway_container_tag: "foreman-3.16"
+
+# Certificate paths - gateway server uses localhost certs to match puppet-iop behavior
+iop_gateway_server_certificate: "/root/certificates/certs/localhost.crt"
+iop_gateway_server_key: "/root/certificates/private/localhost.key"
+iop_gateway_server_ca_certificate: "/root/certificates/certs/ca.crt"
+iop_gateway_client_certificate: "/root/certificates/certs/localhost-client.crt"
+iop_gateway_client_key: "/root/certificates/private/localhost-client.key"
+iop_gateway_client_ca_certificate: "/root/certificates/certs/ca.crt"
diff --git a/src/roles/iop_gateway/handlers/main.yaml b/src/roles/iop_gateway/handlers/main.yaml
new file mode 100644
index 00000000..40d60d7f
--- /dev/null
+++ b/src/roles/iop_gateway/handlers/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Check if gateway service exists
+ ansible.builtin.systemd:
+ name: iop-core-gateway
+ register: iop_gateway_service_status
+ failed_when: false
+ listen: restart gateway
+
+- name: Restart gateway service if it exists
+ ansible.builtin.systemd:
+ name: iop-core-gateway
+ state: restarted
+ when: iop_gateway_service_status.status is defined and iop_gateway_service_status.status.LoadState != "not-found"
+ listen: restart gateway
diff --git a/src/roles/iop_gateway/tasks/main.yaml b/src/roles/iop_gateway/tasks/main.yaml
new file mode 100644
index 00000000..13804a9e
--- /dev/null
+++ b/src/roles/iop_gateway/tasks/main.yaml
@@ -0,0 +1,95 @@
+---
+- name: Pull Gateway container image
+ containers.podman.podman_image:
+ name: "{{ iop_gateway_container_image }}:{{ iop_gateway_container_tag }}"
+ state: present
+
+- name: Create Gateway server certificate secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-server-cert
+ path: "{{ iop_gateway_server_certificate }}"
+ notify: restart gateway
+
+- name: Create Gateway server key secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-server-key
+ path: "{{ iop_gateway_server_key }}"
+ notify: restart gateway
+
+- name: Create Gateway server CA certificate secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-server-ca-cert
+ path: "{{ iop_gateway_server_ca_certificate }}"
+ notify: restart gateway
+
+- name: Create Gateway client certificate secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-client-cert
+ path: "{{ iop_gateway_client_certificate }}"
+ notify: restart gateway
+
+- name: Create Gateway client key secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-client-key
+ path: "{{ iop_gateway_client_key }}"
+ notify: restart gateway
+
+- name: Create Gateway client CA certificate secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-client-ca-cert
+ path: "{{ iop_gateway_client_ca_certificate }}"
+ notify: restart gateway
+
+- name: Create Gateway relay configuration secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-relay-conf
+ data: "{{ lookup('ansible.builtin.template', 'relay.conf.j2') }}"
+ notify: restart gateway
+
+- name: Deploy Gateway container
+ containers.podman.podman_container:
+ name: iop-core-gateway
+ image: "{{ iop_gateway_container_image }}:{{ iop_gateway_container_tag }}"
+ state: quadlet
+ network:
+ - iop-core-network
+ publish:
+ - "127.0.0.1:24443:8443"
+ env:
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-gateway-server-cert,target=/etc/nginx/certs/nginx.crt,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-server-key,target=/etc/nginx/certs/nginx.key,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-server-ca-cert,target=/etc/nginx/certs/ca.crt,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-client-cert,target=/etc/nginx/smart-proxy-relay/certs/proxy.crt,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-client-key,target=/etc/nginx/smart-proxy-relay/certs/proxy.key,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-client-ca-cert,target=/etc/nginx/smart-proxy-relay/certs/ca.crt,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-relay-conf,target=/etc/nginx/smart-proxy-relay/relay.conf,mode=0440,uid=998,gid=998,type=mount'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Gateway Container
+ After=iop-core-kafka.service iop-core-engine.service iop-core-ingress.service
+ Wants=iop-core-kafka.service iop-core-engine.service iop-core-ingress.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=multi-user.target
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Gateway service
+ ansible.builtin.systemd:
+ name: iop-core-gateway
+ enabled: true
+ state: started
diff --git a/src/roles/iop_gateway/templates/relay.conf.j2 b/src/roles/iop_gateway/templates/relay.conf.j2
new file mode 100644
index 00000000..8d13704a
--- /dev/null
+++ b/src/roles/iop_gateway/templates/relay.conf.j2
@@ -0,0 +1,6 @@
+# (REQUIRED) CName of the Foreman instance (must match Foreman's TLS certificate)
+proxy_ssl_name "{{ foreman_servername | default(ansible_fqdn) }}";
+
+# URI to forman
+# Example of host.containers.internal is the container network gateway.
+proxy_pass "https://host.containers.internal";
diff --git a/src/roles/iop_ingress/defaults/main.yaml b/src/roles/iop_ingress/defaults/main.yaml
new file mode 100644
index 00000000..eeefa0b3
--- /dev/null
+++ b/src/roles/iop_ingress/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+iop_ingress_container_image: "quay.io/iop/ingress"
+iop_ingress_container_tag: "foreman-3.16"
diff --git a/src/roles/iop_ingress/handlers/main.yaml b/src/roles/iop_ingress/handlers/main.yaml
new file mode 100644
index 00000000..c00101a0
--- /dev/null
+++ b/src/roles/iop_ingress/handlers/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Check if ingress service exists
+ ansible.builtin.systemd:
+ name: iop-core-ingress
+ register: iop_ingress_service_status
+ failed_when: false
+ listen: restart ingress
+
+- name: Restart ingress service if it exists
+ ansible.builtin.systemd:
+ name: iop-core-ingress
+ state: restarted
+ when: iop_ingress_service_status.status is defined and iop_ingress_service_status.status.LoadState != "not-found"
+ listen: restart ingress
diff --git a/src/roles/iop_ingress/tasks/main.yaml b/src/roles/iop_ingress/tasks/main.yaml
new file mode 100644
index 00000000..dc461587
--- /dev/null
+++ b/src/roles/iop_ingress/tasks/main.yaml
@@ -0,0 +1,40 @@
+---
+- name: Pull Ingress container image
+ containers.podman.podman_image:
+ name: "{{ iop_ingress_container_image }}:{{ iop_ingress_container_tag }}"
+ state: present
+
+- name: Deploy Ingress container
+ containers.podman.podman_container:
+ name: iop-core-ingress
+ image: "{{ iop_ingress_container_image }}:{{ iop_ingress_container_tag }}"
+ state: quadlet
+ env:
+ INGRESS_VALID_UPLOAD_TYPES: "advisor,compliance,qpc,rhv,tower,leapp-reporting,xavier,playbook,playbook-sat,malware-detection,tasks"
+ INGRESS_KAFKA_BROKERS: "iop-core-kafka:9092"
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ INGRESS_STAGERIMPLEMENTATION: "filebased"
+ INGRESS_STORAGEFILESYSTEMPATH: "/var/tmp"
+ INGRESS_SERVICEBASEURL: "http://localhost:8080"
+ INGRESS_WEBPORT: "8080"
+ INGRESS_METRICSPORT: "3001"
+ network:
+ - iop-core-network
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Ingress Container
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Ingress service
+ ansible.builtin.systemd:
+ name: iop-core-ingress
+ enabled: true
+ state: started
diff --git a/src/roles/iop_inventory/defaults/main.yaml b/src/roles/iop_inventory/defaults/main.yaml
new file mode 100644
index 00000000..f04ea3a1
--- /dev/null
+++ b/src/roles/iop_inventory/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+iop_inventory_container_image: "quay.io/iop/host-inventory"
+iop_inventory_container_tag: "foreman-3.16"
+
+iop_inventory_database_name: inventory_db
+iop_inventory_database_user: inventory_admin
+iop_inventory_database_password: CHANGEME
+iop_inventory_database_host: host.containers.internal
+iop_inventory_database_port: 5432
diff --git a/src/roles/iop_inventory/handlers/main.yaml b/src/roles/iop_inventory/handlers/main.yaml
new file mode 100644
index 00000000..35661329
--- /dev/null
+++ b/src/roles/iop_inventory/handlers/main.yaml
@@ -0,0 +1,20 @@
+---
+- name: Check if inventory services exist
+ ansible.builtin.systemd:
+ name: "{{ item }}"
+ register: iop_inventory_services_status
+ failed_when: false
+ listen: restart inventory
+ loop:
+ - iop-core-host-inventory-migrate
+ - iop-core-host-inventory
+ - iop-core-host-inventory-api
+ - iop-core-host-inventory-cleanup
+
+- name: Restart inventory services if they exist
+ ansible.builtin.systemd:
+ name: "{{ item.item }}"
+ state: restarted
+ when: item.status is defined and item.status.LoadState != "not-found"
+ listen: restart inventory
+ loop: "{{ iop_inventory_services_status.results }}"
diff --git a/src/roles/iop_inventory/tasks/main.yaml b/src/roles/iop_inventory/tasks/main.yaml
new file mode 100644
index 00000000..f03c61cc
--- /dev/null
+++ b/src/roles/iop_inventory/tasks/main.yaml
@@ -0,0 +1,244 @@
+---
+- name: Pull Host Inventory container image
+ containers.podman.podman_image:
+ name: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: present
+
+- name: Create podman secret for inventory database username
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-username
+ data: "{{ iop_inventory_database_user }}"
+ notify: restart inventory
+
+- name: Create podman secret for inventory database password
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-password
+ data: "{{ iop_inventory_database_password }}"
+ notify: restart inventory
+
+- name: Create podman secret for inventory database name
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-name
+ data: "{{ iop_inventory_database_name }}"
+ notify: restart inventory
+
+- name: Create podman secret for inventory database host
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-host
+ data: "{{ iop_inventory_database_host }}"
+ notify: restart inventory
+
+- name: Create podman secret for inventory database port
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-port
+ data: "{{ iop_inventory_database_port }}"
+ notify: restart inventory
+
+- name: Deploy Host Inventory Database Migration Container
+ containers.podman.podman_container:
+ name: iop-core-host-inventory-migrate
+ image: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: quadlet
+ command: make upgrade_db
+ network:
+ - iop-core-network
+ env:
+ KAFKA_BOOTSTRAP_SERVERS: "PLAINTEXT://iop-core-kafka:9092"
+ USE_SUBMAN_ID: "true"
+ INVENTORY_DB_SSL_MODE: "disable"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-host-inventory-database-username,type=env,target=INVENTORY_DB_USER'
+ - 'iop-core-host-inventory-database-password,type=env,target=INVENTORY_DB_PASS'
+ - 'iop-core-host-inventory-database-name,type=env,target=INVENTORY_DB_NAME'
+ - 'iop-core-host-inventory-database-host,type=env,target=INVENTORY_DB_HOST'
+ - 'iop-core-host-inventory-database-port,type=env,target=INVENTORY_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Database Readiness and Migration Init Container
+ [Service]
+ Type=oneshot
+ RemainAfterExit=true
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy Host Inventory MQ Service Container
+ containers.podman.podman_container:
+ name: iop-core-host-inventory
+ image: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: quadlet
+ command: make run_inv_mq_service
+ network:
+ - iop-core-network
+ env:
+ KAFKA_BOOTSTRAP_SERVERS: "PLAINTEXT://iop-core-kafka:9092"
+ USE_SUBMAN_ID: "true"
+ INVENTORY_DB_SSL_MODE: "disable"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-host-inventory-database-username,type=env,target=INVENTORY_DB_USER'
+ - 'iop-core-host-inventory-database-password,type=env,target=INVENTORY_DB_PASS'
+ - 'iop-core-host-inventory-database-name,type=env,target=INVENTORY_DB_NAME'
+ - 'iop-core-host-inventory-database-host,type=env,target=INVENTORY_DB_HOST'
+ - 'iop-core-host-inventory-database-port,type=env,target=INVENTORY_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Host-Based Inventory Container
+ After=network-online.target iop-core-host-inventory-migrate.service
+ Requires=iop-core-host-inventory-migrate.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy Host Inventory API Container
+ containers.podman.podman_container:
+ name: iop-core-host-inventory-api
+ image: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: quadlet
+ command: python run_gunicorn.py
+ network:
+ - iop-core-network
+ env:
+ KAFKA_BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ LISTEN_PORT: "8081"
+ BYPASS_RBAC: "true"
+ USE_SUBMAN_ID: "true"
+ INVENTORY_DB_SSL_MODE: "disable"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-host-inventory-database-username,type=env,target=INVENTORY_DB_USER'
+ - 'iop-core-host-inventory-database-password,type=env,target=INVENTORY_DB_PASS'
+ - 'iop-core-host-inventory-database-name,type=env,target=INVENTORY_DB_NAME'
+ - 'iop-core-host-inventory-database-host,type=env,target=INVENTORY_DB_HOST'
+ - 'iop-core-host-inventory-database-port,type=env,target=INVENTORY_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Host-Based Inventory Web Container
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy Host Inventory Cleanup Container
+ containers.podman.podman_container:
+ name: iop-core-host-inventory-cleanup
+ image: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: quadlet
+ command: make run_host_delete_access_tags
+ network:
+ - iop-core-network
+ env:
+ KAFKA_BOOTSTRAP_SERVERS: "PLAINTEXT://iop-core-kafka:9092"
+ USE_SUBMAN_ID: "true"
+ INVENTORY_DB_SSL_MODE: "disable"
+ PYTHONPATH: "/opt/app-root/src"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-host-inventory-database-username,type=env,target=INVENTORY_DB_USER'
+ - 'iop-core-host-inventory-database-password,type=env,target=INVENTORY_DB_PASS'
+ - 'iop-core-host-inventory-database-name,type=env,target=INVENTORY_DB_NAME'
+ - 'iop-core-host-inventory-database-host,type=env,target=INVENTORY_DB_HOST'
+ - 'iop-core-host-inventory-database-port,type=env,target=INVENTORY_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Host Inventory Access Tags Cleanup Job
+ Wants=iop-core-host-inventory-api.service
+ After=iop-core-host-inventory-api.service
+
+- name: Create Host Inventory Cleanup Timer
+ ansible.builtin.copy:
+ dest: /etc/systemd/system/iop-core-host-inventory-cleanup.timer
+ content: |
+ [Unit]
+ Description=Host Inventory Access Tags Cleanup Timer
+
+ [Timer]
+ OnBootSec=10min
+ OnUnitActiveSec=24h
+ Persistent=true
+ RandomizedDelaySec=300
+
+ [Install]
+ WantedBy=timers.target
+ mode: '0644'
+ notify: restart inventory
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Host Inventory Migration service
+ ansible.builtin.systemd:
+ name: iop-core-host-inventory-migrate
+ enabled: true
+ state: started
+
+- name: Start Host Inventory MQ service
+ ansible.builtin.systemd:
+ name: iop-core-host-inventory
+ enabled: true
+ state: started
+
+- name: Start Host Inventory API service
+ ansible.builtin.systemd:
+ name: iop-core-host-inventory-api
+ enabled: true
+ state: started
+
+- name: Enable Host Inventory Cleanup Timer
+ ansible.builtin.systemd:
+ name: iop-core-host-inventory-cleanup.timer
+ enabled: true
+ state: started
+
+- name: Install PostgreSQL client for FDW operations
+ ansible.builtin.package:
+ name: postgresql
+ state: present
+
+- name: Enable postgres_fdw extension on inventory database
+ community.postgresql.postgresql_ext:
+ name: postgres_fdw
+ db: "{{ iop_inventory_database_name }}"
+ login_user: postgres
+ login_password: "{{ postgresql_admin_password }}"
+ login_host: localhost
+
+- name: Create inventory schema in inventory database
+ community.postgresql.postgresql_schema:
+ db: "{{ iop_inventory_database_name }}"
+ name: inventory
+ owner: "{{ iop_inventory_database_user }}"
+ login_user: postgres
+ login_password: "{{ postgresql_admin_password }}"
+ login_host: localhost
+
+- name: Create inventory.hosts view in inventory database
+ community.postgresql.postgresql_query:
+ db: "{{ iop_inventory_database_name }}"
+ login_user: postgres
+ login_password: "{{ postgresql_admin_password }}"
+ login_host: localhost
+ query: |
+ CREATE OR REPLACE VIEW "inventory"."hosts" AS SELECT
+ id,
+ account,
+ display_name,
+ created_on as created,
+ modified_on as updated,
+ stale_timestamp,
+ stale_timestamp + INTERVAL '1' DAY * '7' AS stale_warning_timestamp,
+ stale_timestamp + INTERVAL '1' DAY * '14' AS culled_timestamp,
+ tags_alt as tags,
+ system_profile_facts as system_profile,
+ (canonical_facts ->> 'insights_id')::uuid as insights_id,
+ reporter,
+ per_reporter_staleness,
+ org_id,
+ groups
+ FROM hbi.hosts WHERE (canonical_facts->'insights_id' IS NOT NULL);
diff --git a/src/roles/iop_kafka/defaults/main.yaml b/src/roles/iop_kafka/defaults/main.yaml
new file mode 100644
index 00000000..176858bc
--- /dev/null
+++ b/src/roles/iop_kafka/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+iop_kafka_container_image: "quay.io/strimzi/kafka"
+iop_kafka_container_tag: "latest-kafka-3.7.1"
diff --git a/src/roles/iop_kafka/files/kafka/init b/src/roles/iop_kafka/files/kafka/init
new file mode 100644
index 00000000..c4238f82
--- /dev/null
+++ b/src/roles/iop_kafka/files/kafka/init
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+usage() {
+ echo "Usage: [--create | --check]"
+ echo " --create : Perform the creation action."
+ echo " --check : Perform the check action."
+ exit 1
+}
+
+topics=(
+ "platform.engine.results"
+ "platform.insights.rule-hits"
+ "platform.insights.rule-deactivation"
+ "platform.inventory.events"
+ "platform.inventory.host-ingress"
+ "platform.sources.event-stream"
+ "platform.playbook-dispatcher.runs"
+ "platform.upload.announce"
+ "platform.upload.validation"
+ "platform.logging.logs"
+ "platform.payload-status"
+ "platform.remediation-updates.vulnerability"
+ "vulnerability.evaluator.results"
+ "vulnerability.evaluator.recalc"
+ "vulnerability.evaluator.upload"
+ "vulnerability.grouper.inventory.upload"
+ "vulnerability.grouper.advisor.upload"
+)
+
+if ! [[ -v IOP_CORE_KAFKA ]]; then
+ IOP_CORE_KAFKA=iop-core-kafka
+fi
+
+kafka_cmd="./bin/kafka-topics.sh"
+kafka_bootstrap_server=$IOP_CORE_KAFKA:9092
+
+create_topics() {
+
+ echo -e "====================="
+ echo -e "Creating Kafka topics:"
+ for topic in "${topics[@]}"; do
+ echo -e "Creating topic ""$topic"
+ $kafka_cmd --create --if-not-exists --topic "$topic" --bootstrap-server $kafka_bootstrap_server --partitions 1 replication-factor 1 &
+ done
+ wait
+
+ echo -e "=========================="
+ echo -e "Listing all Kafka topics:"
+ $kafka_cmd --bootstrap-server $kafka_bootstrap_server --list
+}
+
+check_all_kafka_topics_exist() {
+ echo "Using Kafka command: $kafka_cmd" >&2
+
+ echo "Attempting to fetch existing topics..." >&2
+ local existing_topics_list
+ local list_output
+ list_output=$("$kafka_cmd" --bootstrap-server "$kafka_bootstrap_server" --list 2>&1)
+ local list_exit_code=$?
+
+ if [ $list_exit_code -ne 0 ]; then
+ echo "--------------------------------------------------" >&2
+ echo "Error: Failed to connect to Kafka or list topics." >&2
+ echo "Command failed: $kafka_cmd --bootstrap-server \"$kafka_bootstrap_server\" --list" >&2
+ echo "Exit code: $list_exit_code" >&2
+ echo "Output/Error:" >&2
+ echo "$list_output" >&2
+ echo "--------------------------------------------------" >&2
+ return 1
+ fi
+ existing_topics_list="$list_output"
+ echo "Successfully retrieved topic list." >&2
+
+ local missing_count=0
+ local missing_list=()
+
+ echo "Checking if all required topics exist..." >&2
+ for topic in "${topics[@]}"; do
+ if ! echo "$existing_topics_list" | grep -q -x -w "$topic"; then
+ echo " - Required topic '$topic' is MISSING." >&2
+ missing_list+=("$topic")
+ ((missing_count++))
+ else
+ echo " - Required topic '$topic' exists." >&2
+ fi
+ done
+
+ if [ $missing_count -eq 0 ]; then
+ echo "Result: All ${#topics[@]} required topics exist." >&2
+ return 0
+ else
+ echo "Result: Found $missing_count missing required topic(s)." >&2
+ echo "Missing topics:" >&2
+ printf " - %s\n" "${missing_list[@]}" >&2
+ return 2
+ fi
+}
+
+if [ "$#" -lt 1 ]; then
+ echo "Error: No operation specified." >&2
+ usage
+fi
+
+MODE="$1"
+shift
+
+case "$MODE" in
+ --create)
+ echo "Mode: Create"
+ echo "Performing create action..."
+
+ create_topics
+ ;;
+
+ --check)
+ echo "Mode: Check"
+ echo "Performing check action..."
+ check_all_kafka_topics_exist
+ ;;
+
+ *)
+ echo "Error: Invalid option '$MODE'." >&2
+ usage
+ ;;
+esac
diff --git a/src/roles/iop_kafka/files/kafka/init-start.sh b/src/roles/iop_kafka/files/kafka/init-start.sh
new file mode 100644
index 00000000..69e1bcd7
--- /dev/null
+++ b/src/roles/iop_kafka/files/kafka/init-start.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Generate cluster UUID if it doesn't exist
+CLUSTER_ID_FILE="/var/lib/kafka/data/meta.properties"
+if [ ! -f "$CLUSTER_ID_FILE" ]; then
+ echo "Initializing KRaft storage..."
+ # Generate a cluster UUID
+ CLUSTER_UUID=$(bin/kafka-storage.sh random-uuid)
+ echo "Generated cluster UUID: $CLUSTER_UUID"
+
+ # Format the storage directory
+ bin/kafka-storage.sh format -t $CLUSTER_UUID -c /opt/kafka/config/kraft/server.properties
+else
+ echo "KRaft storage already initialized"
+fi
+
+# Start Kafka server
+echo "Starting Kafka server..."
+exec bin/kafka-server-start.sh /opt/kafka/config/kraft/server.properties
diff --git a/src/roles/iop_kafka/handlers/main.yaml b/src/roles/iop_kafka/handlers/main.yaml
new file mode 100644
index 00000000..a900c745
--- /dev/null
+++ b/src/roles/iop_kafka/handlers/main.yaml
@@ -0,0 +1,7 @@
+---
+- name: Restart kafka
+ ansible.builtin.systemd:
+ name: iop-core-kafka
+ state: restarted
+ listen: restart kafka
+ when: ansible_facts.services['iop-core-kafka.service'] is defined
diff --git a/src/roles/iop_kafka/tasks/main.yaml b/src/roles/iop_kafka/tasks/main.yaml
new file mode 100644
index 00000000..6c6ef6f2
--- /dev/null
+++ b/src/roles/iop_kafka/tasks/main.yaml
@@ -0,0 +1,74 @@
+---
+- name: Pull Kafka container image
+ containers.podman.podman_image:
+ name: "{{ iop_kafka_container_image }}:{{ iop_kafka_container_tag }}"
+ state: present
+
+- name: Create Kafka init script secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-kafka-init-start
+ data: "{{ lookup('ansible.builtin.file', 'kafka/init-start.sh') }}"
+ notify: restart kafka
+
+- name: Create Kafka server properties secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-kafka-server-properties
+ data: "{{ lookup('ansible.builtin.template', 'kafka/kraft.j2') }}"
+ notify: restart kafka
+
+- name: Create Kafka init topics script secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-kafka-init
+ data: "{{ lookup('ansible.builtin.file', 'kafka/init') }}"
+
+- name: Create Kafka data volume
+ containers.podman.podman_volume:
+ name: iop-core-kafka-data
+ state: present
+
+- name: Deploy Kafka container
+ containers.podman.podman_container:
+ name: iop-core-kafka
+ image: "{{ iop_kafka_container_image }}:{{ iop_kafka_container_tag }}"
+ state: quadlet
+ command: sh bin/init-start.sh
+ network:
+ - iop-core-network
+ env:
+ LOG_DIR: /tmp/kafka-logs
+ KAFKA_NODE_ID: "1"
+ volumes:
+ - "iop-core-kafka-data:/var/lib/kafka/data"
+ secrets:
+ - 'iop-core-kafka-init-start,target=/opt/kafka/bin/init-start.sh,mode=0755,type=mount'
+ - 'iop-core-kafka-server-properties,target=/opt/kafka/config/kraft/server.properties,mode=0644,type=mount'
+ - 'iop-core-kafka-init,target=/opt/kafka/init.sh,mode=0755,type=mount'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Kafka Container
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Kafka service
+ ansible.builtin.systemd:
+ name: iop-core-kafka
+ enabled: true
+ state: started
+
+- name: Initialize Kafka topics
+ containers.podman.podman_container_exec:
+ name: iop-core-kafka
+ command: /opt/kafka/init.sh --create
+ register: iop_kafka_topics_result
+ changed_when: "'Creating topic' in iop_kafka_topics_result.stdout"
+ failed_when: iop_kafka_topics_result.rc != 0 and 'already exists' not in iop_kafka_topics_result.stderr
diff --git a/src/roles/iop_kafka/templates/kafka/kraft.j2 b/src/roles/iop_kafka/templates/kafka/kraft.j2
new file mode 100644
index 00000000..96b9041b
--- /dev/null
+++ b/src/roles/iop_kafka/templates/kafka/kraft.j2
@@ -0,0 +1,123 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This file contains a minimal set of configs to get a single-node Kafka cluster
+# up and running in KRaft mode. This file was automatically generated.
+#
+
+############################# Server Basics #############################
+
+# The role of this server. Setting this puts us in KRaft mode
+process.roles=broker,controller
+
+# The node id associated with this instance's roles
+node.id=1
+
+# The connect string for the controller quorum
+controller.quorum.voters=1@iop-core-kafka:9093
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. If not configured, the host name will be equal to the value of
+# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
+# FORMAT:
+# listeners = listener_name://host_name:port
+# EXAMPLE:
+# listeners = PLAINTEXT://your.host.name:9092
+listeners=PLAINTEXT://iop-core-kafka:9092,CONTROLLER://iop-core-kafka:9093
+
+# Name of listener used for communication between brokers.
+inter.broker.listener.name=PLAINTEXT
+
+# Listener name, hostname and port the broker will advertise to clients.
+# If not set, it uses the value for "listeners".
+advertised.listeners=PLAINTEXT://iop-core-kafka:9092
+
+# A comma-separated list of the names of the listeners used by the controller.
+# If no explicit mapping set, the default will be using PLAINTEXT protocol
+# This is required if running in KRaft mode.
+controller.listener.names=CONTROLLER
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
+
+############################# Log Basics #############################
+
+# A comma separated list of directories under which to store log files
+log.dirs=/var/lib/kafka/data
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
+offsets.topic.replication.factor=1
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are removed when they grow larger
+# than this size. -1 disables size-based retention.
+log.segment.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.retention.bytes=-1
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
diff --git a/src/roles/iop_network/defaults/main.yaml b/src/roles/iop_network/defaults/main.yaml
new file mode 100644
index 00000000..c4d62c42
--- /dev/null
+++ b/src/roles/iop_network/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+iop_network_name: "iop-core-network"
+iop_network_subnet: "10.130.0.0/24"
+iop_network_gateway: "10.130.0.1"
+iop_network_driver: "bridge"
diff --git a/src/roles/iop_network/tasks/main.yaml b/src/roles/iop_network/tasks/main.yaml
new file mode 100644
index 00000000..3f6676ec
--- /dev/null
+++ b/src/roles/iop_network/tasks/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: Create IOP Core network
+ containers.podman.podman_network:
+ name: "{{ iop_network_name }}"
+ state: present
+ driver: "{{ iop_network_driver }}"
+ subnet: "{{ iop_network_subnet }}"
+ gateway: "{{ iop_network_gateway }}"
diff --git a/src/roles/iop_puptoo/defaults/main.yaml b/src/roles/iop_puptoo/defaults/main.yaml
new file mode 100644
index 00000000..3a9a1290
--- /dev/null
+++ b/src/roles/iop_puptoo/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+iop_puptoo_container_image: "quay.io/iop/puptoo"
+iop_puptoo_container_tag: "foreman-3.16"
diff --git a/src/roles/iop_puptoo/handlers/main.yaml b/src/roles/iop_puptoo/handlers/main.yaml
new file mode 100644
index 00000000..e3094888
--- /dev/null
+++ b/src/roles/iop_puptoo/handlers/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: Restart puptoo
+ ansible.builtin.systemd:
+ name: iop-core-puptoo
+ state: restarted
+ when: ansible_facts.services['iop-core-puptoo.service'] is defined
diff --git a/src/roles/iop_puptoo/tasks/main.yaml b/src/roles/iop_puptoo/tasks/main.yaml
new file mode 100644
index 00000000..792b11c4
--- /dev/null
+++ b/src/roles/iop_puptoo/tasks/main.yaml
@@ -0,0 +1,37 @@
+---
+- name: Pull Puptoo container image
+ containers.podman.podman_image:
+ name: "{{ iop_puptoo_container_image }}:{{ iop_puptoo_container_tag }}"
+ state: present
+
+- name: Deploy Puptoo container
+ containers.podman.podman_container:
+ name: iop-core-puptoo
+ image: "{{ iop_puptoo_container_image }}:{{ iop_puptoo_container_tag }}"
+ state: quadlet
+ env:
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ DISABLE_REDIS: "True"
+ DISABLE_S3_UPLOAD: "True"
+ network:
+ - iop-core-network
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Puptoo Container
+ After=iop-core-kafka.service
+ Wants=iop-core-kafka.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Puptoo service
+ ansible.builtin.systemd:
+ name: iop-core-puptoo
+ enabled: true
+ state: started
diff --git a/src/roles/iop_remediation/defaults/main.yaml b/src/roles/iop_remediation/defaults/main.yaml
new file mode 100644
index 00000000..48cf7569
--- /dev/null
+++ b/src/roles/iop_remediation/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+iop_remediation_container_image: "quay.io/iop/remediations"
+iop_remediation_container_tag: "foreman-3.16"
+
+iop_remediation_database_name: remediations_db
+iop_remediation_database_user: remediations_user
+iop_remediation_database_password: CHANGEME
+iop_remediation_database_host: "host.containers.internal"
+iop_remediation_database_port: "5432"
diff --git a/src/roles/iop_remediation/handlers/main.yaml b/src/roles/iop_remediation/handlers/main.yaml
new file mode 100644
index 00000000..e49666e0
--- /dev/null
+++ b/src/roles/iop_remediation/handlers/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Check if remediation api service exists
+ ansible.builtin.systemd:
+ name: iop-service-remediations-api
+ register: iop_remediation_api_service_status
+ failed_when: false
+ listen: restart remediation
+
+- name: Restart remediation api
+ ansible.builtin.systemd:
+ name: iop-service-remediations-api
+ state: restarted
+ when: iop_remediation_api_service_status.status is defined and iop_remediation_api_service_status.status.LoadState != "not-found"
+ listen: restart remediation
diff --git a/src/roles/iop_remediation/tasks/main.yaml b/src/roles/iop_remediation/tasks/main.yaml
new file mode 100644
index 00000000..0abbda89
--- /dev/null
+++ b/src/roles/iop_remediation/tasks/main.yaml
@@ -0,0 +1,82 @@
+---
+- name: Pull Remediation container image
+ containers.podman.podman_image:
+ name: "{{ iop_remediation_container_image }}:{{ iop_remediation_container_tag }}"
+ state: present
+
+- name: Create Remediation database username secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-username
+ data: "{{ iop_remediation_database_user }}"
+ notify: restart remediation
+
+- name: Create Remediation database password secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-password
+ data: "{{ iop_remediation_database_password }}"
+ notify: restart remediation
+
+- name: Create Remediation database name secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-name
+ data: "{{ iop_remediation_database_name }}"
+ notify: restart remediation
+
+- name: Create Remediation database host secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-host
+ data: "{{ iop_remediation_database_host }}"
+ notify: restart remediation
+
+- name: Create Remediation database port secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-port
+ data: "{{ iop_remediation_database_port }}"
+ notify: restart remediation
+
+- name: Deploy Remediation API container
+ containers.podman.podman_container:
+ name: iop-service-remediations-api
+ image: "{{ iop_remediation_container_image }}:{{ iop_remediation_container_tag }}"
+ state: quadlet
+ network: host
+ command: sh -c "npm run db:migrate && exec node --max-http-header-size=16384 src/app.js"
+ env:
+ REDIS_ENABLED: "false"
+ RBAC_ENFORCE: "false"
+ CONTENT_SERVER_HOST: "http://iop-service-advisor-backend-api:8000"
+ ADVISOR_HOST: "http://iop-service-advisor-backend-api:8000"
+ INVENTORY_HOST: "http://iop-core-host-inventory-api:8081"
+ DB_SSL_ENABLED: "false"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-service-remediations-db-username,type=env,target=DB_USERNAME'
+ - 'iop-service-remediations-db-password,type=env,target=DB_PASSWORD'
+ - 'iop-service-remediations-db-name,type=env,target=DB_DATABASE'
+ - 'iop-service-remediations-db-host,type=env,target=DB_HOST'
+ - 'iop-service-remediations-db-port,type=env,target=DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Remediations API
+ Wants=iop-core-host-inventory-api.service iop-service-advisor-backend-api.service
+ After=iop-core-host-inventory-api.service iop-service-advisor-backend-api.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Remediation API service
+ ansible.builtin.systemd:
+ name: iop-service-remediations-api
+ enabled: true
+ state: started
diff --git a/src/roles/iop_vmaas/defaults/main.yaml b/src/roles/iop_vmaas/defaults/main.yaml
new file mode 100644
index 00000000..7fe67d4d
--- /dev/null
+++ b/src/roles/iop_vmaas/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+iop_vmaas_container_image: "quay.io/iop/vmaas"
+iop_vmaas_container_tag: "foreman-3.16"
+
+iop_vmaas_database_name: vmaas_db
+iop_vmaas_database_user: vmaas_admin
+iop_vmaas_database_password: CHANGEME
+iop_vmaas_database_host: "host.containers.internal"
+iop_vmaas_database_port: "5432"
diff --git a/src/roles/iop_vmaas/handlers/main.yaml b/src/roles/iop_vmaas/handlers/main.yaml
new file mode 100644
index 00000000..195854cd
--- /dev/null
+++ b/src/roles/iop_vmaas/handlers/main.yaml
@@ -0,0 +1,28 @@
+---
+- name: Check if vmaas reposcan service exists
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-reposcan
+ register: iop_vmaas_reposcan_service_status
+ failed_when: false
+ listen: restart vmaas
+
+- name: Restart vmaas reposcan
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-reposcan
+ state: restarted
+ when: iop_vmaas_reposcan_service_status.status is defined and iop_vmaas_reposcan_service_status.status.LoadState != "not-found"
+ listen: restart vmaas
+
+- name: Check if vmaas webapp-go service exists
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-webapp-go
+ register: iop_vmaas_webapp_service_status
+ failed_when: false
+ listen: restart vmaas
+
+- name: Restart vmaas webapp-go
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-webapp-go
+ state: restarted
+ when: iop_vmaas_webapp_service_status.status is defined and iop_vmaas_webapp_service_status.status.LoadState != "not-found"
+ listen: restart vmaas
diff --git a/src/roles/iop_vmaas/tasks/main.yaml b/src/roles/iop_vmaas/tasks/main.yaml
new file mode 100644
index 00000000..43eb87aa
--- /dev/null
+++ b/src/roles/iop_vmaas/tasks/main.yaml
@@ -0,0 +1,110 @@
+---
+- name: Create VMAAS database secrets
+ containers.podman.podman_secret:
+ name: "{{ item.name }}"
+ data: "{{ item.data }}"
+ state: present
+ loop:
+ - name: "iop-service-vmaas-reposcan-database-username"
+ data: "{{ iop_vmaas_database_user }}"
+ - name: "iop-service-vmaas-reposcan-database-password"
+ data: "{{ iop_vmaas_database_password }}"
+ - name: "iop-service-vmaas-reposcan-database-name"
+ data: "{{ iop_vmaas_database_name }}"
+ - name: "iop-service-vmaas-reposcan-database-host"
+ data: "{{ iop_vmaas_database_host }}"
+ - name: "iop-service-vmaas-reposcan-database-port"
+ data: "{{ iop_vmaas_database_port }}"
+ no_log: true
+
+- name: Create VMAAS data volume
+ containers.podman.podman_volume:
+ name: iop-service-vmaas-data
+ state: present
+
+- name: Deploy VMAAS Reposcan container
+ containers.podman.podman_container:
+ name: iop-service-vmaas-reposcan
+ image: "{{ iop_vmaas_container_image }}:{{ iop_vmaas_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ volumes:
+ - iop-service-vmaas-data:/data
+ command: "/vmaas/entrypoint.sh database-upgrade reposcan"
+ env:
+ PROMETHEUS_PORT: "8085"
+ PROMETHEUS_MULTIPROC_DIR: "/tmp/prometheus_multiproc_dir"
+ SYNC_REPO_LIST_SOURCE: "katello"
+ SYNC_REPOS: "yes"
+ SYNC_CVE_MAP: "yes"
+ SYNC_CPE: "no"
+ SYNC_CSAF: "no"
+ SYNC_RELEASES: "no"
+ SYNC_RELEASE_GRAPH: "no"
+ KATELLO_URL: "http://iop-core-gateway:9090"
+ REDHAT_CVEMAP_URL: "http://iop-core-gateway:9090/pub/iop/data/meta/v1/cvemap.xml"
+ POSTGRESQL_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vmaas-reposcan-database-username,type=env,target=POSTGRESQL_USER"
+ - "iop-service-vmaas-reposcan-database-password,type=env,target=POSTGRESQL_PASSWORD"
+ - "iop-service-vmaas-reposcan-database-name,type=env,target=POSTGRESQL_DATABASE"
+ - "iop-service-vmaas-reposcan-database-host,type=env,target=POSTGRESQL_HOST"
+ - "iop-service-vmaas-reposcan-database-port,type=env,target=POSTGRESQL_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=VMAAS Reposcan Service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy VMAAS Webapp-Go container
+ containers.podman.podman_container:
+ name: iop-service-vmaas-webapp-go
+ image: "{{ iop_vmaas_container_image }}:{{ iop_vmaas_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/vmaas/entrypoint.sh webapp-go"
+ env:
+ REPOSCAN_PUBLIC_URL: "http://iop-service-vmaas-reposcan:8000"
+ REPOSCAN_PRIVATE_URL: "http://iop-service-vmaas-reposcan:10000"
+ CSAF_UNFIXED_EVAL_ENABLED: "FALSE"
+ GIN_MODE: "release"
+ POSTGRESQL_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vmaas-reposcan-database-username,type=env,target=POSTGRESQL_USER"
+ - "iop-service-vmaas-reposcan-database-password,type=env,target=POSTGRESQL_PASSWORD"
+ - "iop-service-vmaas-reposcan-database-name,type=env,target=POSTGRESQL_DATABASE"
+ - "iop-service-vmaas-reposcan-database-host,type=env,target=POSTGRESQL_HOST"
+ - "iop-service-vmaas-reposcan-database-port,type=env,target=POSTGRESQL_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=VMAAS Webapp-Go Service
+ Wants=iop-service-vmaas-reposcan.service
+ After=iop-service-vmaas-reposcan.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start VMAAS Reposcan service
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-reposcan
+ enabled: true
+ state: started
+
+- name: Start VMAAS Webapp-Go service
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-webapp-go
+ enabled: true
+ state: started
diff --git a/src/roles/iop_vulnerability/defaults/main.yaml b/src/roles/iop_vulnerability/defaults/main.yaml
new file mode 100644
index 00000000..03d1fbce
--- /dev/null
+++ b/src/roles/iop_vulnerability/defaults/main.yaml
@@ -0,0 +1,13 @@
+---
+iop_vulnerability_container_image: "quay.io/iop/vulnerability-engine"
+iop_vulnerability_container_tag: "foreman-3.16"
+
+iop_vulnerability_database_name: vulnerability_db
+iop_vulnerability_database_user: vulnerability_admin
+iop_vulnerability_database_password: CHANGEME
+iop_vulnerability_database_host: "host.containers.internal"
+iop_vulnerability_database_port: "5432"
+
+# Taskomatic configuration
+iop_vulnerability_taskomatic_jobs: "stale_systems:5,delete_systems:30,cacheman:5"
+iop_vulnerability_taskomatic_startup: "cacheman"
diff --git a/src/roles/iop_vulnerability/handlers/main.yaml b/src/roles/iop_vulnerability/handlers/main.yaml
new file mode 100644
index 00000000..c2a665c1
--- /dev/null
+++ b/src/roles/iop_vulnerability/handlers/main.yaml
@@ -0,0 +1,24 @@
+---
+- name: Check if vulnerability services exist
+ ansible.builtin.systemd:
+ name: "{{ item }}"
+ register: iop_vulnerability_services_status
+ failed_when: false
+ loop:
+ - iop-service-vuln-dbupgrade
+ - iop-service-vuln-manager
+ - iop-service-vuln-taskomatic
+ - iop-service-vuln-grouper
+ - iop-service-vuln-listener
+ - iop-service-vuln-evaluator-recalc
+ - iop-service-vuln-evaluator-upload
+ - iop-service-vuln-vmaas-sync
+ listen: restart vulnerability
+
+- name: Restart vulnerability services
+ ansible.builtin.systemd:
+ name: "{{ item.item }}"
+ state: restarted
+ when: item.status is defined and item.status.LoadState != "not-found"
+ loop: "{{ iop_vulnerability_services_status.results }}"
+ listen: restart vulnerability
diff --git a/src/roles/iop_vulnerability/tasks/main.yaml b/src/roles/iop_vulnerability/tasks/main.yaml
new file mode 100644
index 00000000..51a6fbbd
--- /dev/null
+++ b/src/roles/iop_vulnerability/tasks/main.yaml
@@ -0,0 +1,379 @@
+---
+- name: Create vulnerability database secrets
+ containers.podman.podman_secret:
+ name: "{{ item.name }}"
+ data: "{{ item.data }}"
+ state: present
+ loop:
+ - name: "iop-service-vulnerability-database-username"
+ data: "{{ iop_vulnerability_database_user }}"
+ - name: "iop-service-vulnerability-database-password"
+ data: "{{ iop_vulnerability_database_password }}"
+ - name: "iop-service-vulnerability-database-name"
+ data: "{{ iop_vulnerability_database_name }}"
+ - name: "iop-service-vulnerability-database-host"
+ data: "{{ iop_vulnerability_database_host }}"
+ - name: "iop-service-vulnerability-database-port"
+ data: "{{ iop_vulnerability_database_port }}"
+ no_log: true
+
+- name: Set up Foreign Data Wrapper for vulnerability database
+ ansible.builtin.include_role:
+ name: iop_fdw
+ vars:
+ iop_fdw_database_name: "{{ iop_vulnerability_database_name }}"
+ iop_fdw_database_user: "{{ iop_vulnerability_database_user }}"
+ iop_fdw_database_password: "{{ iop_vulnerability_database_password }}"
+ iop_fdw_remote_database_name: "{{ iop_inventory_database_name }}"
+ iop_fdw_remote_user: "{{ iop_inventory_database_user }}"
+ iop_fdw_remote_password: "{{ iop_inventory_database_password }}"
+
+# 1. Database upgrade init container (oneshot)
+- name: Deploy Vulnerability Database Upgrade container
+ containers.podman.podman_container:
+ name: iop-service-vuln-dbupgrade
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "bash -c /engine/dbupgrade.sh"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ DISABLE_RBAC: "TRUE"
+ POSTGRES_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Database Upgrade Init Container
+ [Service]
+ Type=oneshot
+ RemainAfterExit=true
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 2. Manager service (main service)
+- name: Deploy Vulnerability Manager container
+ containers.podman.podman_container:
+ name: iop-service-vuln-manager
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh manager"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ DISABLE_RBAC: "TRUE"
+ POSTGRES_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Manager Service
+ After=network-online.target iop-service-vuln-dbupgrade.service
+ Requires=iop-service-vuln-dbupgrade.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 3. Taskomatic service (task scheduler)
+- name: Deploy Vulnerability Taskomatic container
+ containers.podman.podman_container:
+ name: iop-service-vuln-taskomatic
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh taskomatic"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ IS_FEDRAMP: "true"
+ JOBS: "{{ iop_vulnerability_taskomatic_jobs }}"
+ JOBS_STARTUP: "{{ iop_vulnerability_taskomatic_startup }}"
+ POSTGRES_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Taskomatic Service
+ Wants=iop-service-vuln-manager.service
+ After=iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 4. Grouper service
+- name: Deploy Vulnerability Grouper container
+ containers.podman.podman_container:
+ name: iop-service-vuln-grouper
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh grouper"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability-grouper"
+ PAYLOAD_TRACKER_TOPIC: "platform.payload-status"
+ GROUPER_INVENTORY_TOPIC: "vulnerability.grouper.inventory.upload"
+ GROUPER_ADVISOR_TOPIC: "vulnerability.grouper.advisor.upload"
+ PROMETHEUS_PORT: "8085"
+ POSTGRES_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Grouper Service
+ Wants=iop-service-vuln-manager.service
+ After=iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 5. Listener service (event listener)
+- name: Deploy Vulnerability Listener container
+ containers.podman.podman_container:
+ name: iop-service-vuln-listener
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh listener"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability-listener2"
+ EVENTS_TOPIC: "platform.inventory.events"
+ PAYLOAD_TRACKER_TOPIC: "platform.payload-status"
+ ADVISOR_RESULTS_TOPIC: "platform.engine.results"
+ MESSAGE_TOPIC: "vulnerability.evaluator.upload"
+ ALLOWED_REPORTERS: "puptoo,satellite"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Listener Service
+ Wants=iop-service-vuln-manager.service
+ After=iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 6. Evaluator (Recalc) service
+- name: Deploy Vulnerability Evaluator (Recalc) container
+ containers.podman.podman_container:
+ name: iop-service-vuln-evaluator-recalc
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh evaluator"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability"
+ PAYLOAD_TRACKER_TOPIC: "platform.payload-status"
+ REMEDIATION_UPDATES_TOPIC: "platform.remediation-updates.vulnerability"
+ EVALUATOR_RESULTS_TOPIC: "vulnerability.evaluator.results"
+ EVALUATOR_TOPIC: "vulnerability.evaluator.recalc"
+ VMAAS_HOST: "http://iop-service-vmaas-webapp-go:8000"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Evaluator (Recalc) Service
+ Wants=iop-service-vuln-manager.service
+ After=iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 7. Evaluator (Upload) service
+- name: Deploy Vulnerability Evaluator (Upload) container
+ containers.podman.podman_container:
+ name: iop-service-vuln-evaluator-upload
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh evaluator"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability"
+ PAYLOAD_TRACKER_TOPIC: "platform.payload-status"
+ REMEDIATION_UPDATES_TOPIC: "platform.remediation-updates.vulnerability"
+ EVALUATOR_RESULTS_TOPIC: "vulnerability.evaluator.results"
+ EVALUATOR_TOPIC: "vulnerability.evaluator.upload"
+ VMAAS_HOST: "http://iop-service-vmaas-webapp-go:8000"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Evaluator (Upload) Service
+ Wants=iop-service-vuln-grouper.service iop-service-vuln-manager.service
+ After=iop-service-vuln-grouper.service iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 8. VMAAS Sync service (oneshot with timer)
+- name: Deploy Vulnerability VMAAS Sync container
+ containers.podman.podman_container:
+ name: iop-service-vuln-vmaas-sync
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh vmaas-sync"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability"
+ MESSAGE_TOPIC: "vulnerability.evaluator.recalc"
+ VMAAS_HOST: "http://iop-service-vmaas-webapp-go:8000"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability VMAAS Sync Job
+ Wants=iop-service-vmaas-webapp-go.service iop-service-vuln-manager.service
+ After=iop-service-vmaas-webapp-go.service iop-service-vuln-manager.service
+ [Service]
+ Type=oneshot
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ notify: restart vulnerability
+
+- name: Create VMAAS Sync systemd timer
+ ansible.builtin.copy:
+ dest: /etc/systemd/system/iop-service-vuln-vmaas-sync.timer
+ content: |
+ [Unit]
+ Description=Vulnerability VMAAS Sync Timer
+
+ [Timer]
+ OnCalendar=daily
+ RandomizedDelaySec=1h
+ Persistent=true
+
+ [Install]
+ WantedBy=timers.target
+ mode: '0644'
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Vulnerability Database Upgrade service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-dbupgrade
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Manager service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-manager
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Taskomatic service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-taskomatic
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Grouper service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-grouper
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Listener service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-listener
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Evaluator (Recalc) service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-evaluator-recalc
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Evaluator (Upload) service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-evaluator-upload
+ enabled: true
+ state: started
+
+- name: Enable VMAAS Sync timer
+ ansible.builtin.systemd:
+ name: iop-service-vuln-vmaas-sync.timer
+ enabled: true
+ state: started
diff --git a/src/roles/iop_vulnerability_frontend/defaults/main.yaml b/src/roles/iop_vulnerability_frontend/defaults/main.yaml
new file mode 100644
index 00000000..521828fe
--- /dev/null
+++ b/src/roles/iop_vulnerability_frontend/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+iop_vulnerability_frontend_container_image: "quay.io/iop/vulnerability-frontend"
+iop_vulnerability_frontend_container_tag: "foreman-3.16"
+iop_vulnerability_frontend_assets_path: "/var/lib/foreman/public/assets/apps/vulnerability"
+iop_vulnerability_frontend_source_path: "/srv/dist/."
diff --git a/src/roles/iop_vulnerability_frontend/tasks/main.yaml b/src/roles/iop_vulnerability_frontend/tasks/main.yaml
new file mode 100644
index 00000000..ab60638f
--- /dev/null
+++ b/src/roles/iop_vulnerability_frontend/tasks/main.yaml
@@ -0,0 +1,98 @@
+---
+- name: Pull Vulnerability Frontend container image
+ containers.podman.podman_image:
+ name: "{{ iop_vulnerability_frontend_container_image }}:{{ iop_vulnerability_frontend_container_tag }}"
+ state: present
+
+- name: Ensure parent assets directory exists
+ ansible.builtin.file:
+ path: /var/lib/foreman/public/assets/apps
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+
+- name: Ensure assets directory exists
+ ansible.builtin.file:
+ path: "{{ iop_vulnerability_frontend_assets_path }}"
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+
+- name: Create temporary container for asset extraction
+ containers.podman.podman_container:
+ name: iop-vulnerability-frontend-temp
+ image: "{{ iop_vulnerability_frontend_container_image }}:{{ iop_vulnerability_frontend_container_tag }}"
+ state: created
+
+- name: Extract vulnerability frontend assets from container
+ containers.podman.podman_container_copy:
+ container: iop-vulnerability-frontend-temp
+ src: "{{ iop_vulnerability_frontend_source_path }}"
+ dest: "{{ iop_vulnerability_frontend_assets_path }}"
+ from_container: true
+
+- name: Remove temporary container
+ containers.podman.podman_container:
+ name: iop-vulnerability-frontend-temp
+ state: absent
+
+- name: Set ownership of vulnerability frontend assets
+ ansible.builtin.file:
+ path: "{{ iop_vulnerability_frontend_assets_path }}"
+ owner: root
+ group: root
+ recurse: true
+
+- name: Set SELinux file context for vulnerability frontend assets
+ community.general.sefcontext:
+ target: "{{ iop_vulnerability_frontend_assets_path }}(/.*)?"
+ setype: httpd_exec_t
+ state: present
+ when: ansible_facts["selinux"]["status"] == "enabled"
+
+- name: Restore SELinux context for vulnerability frontend assets
+ ansible.builtin.command:
+ cmd: restorecon -R "{{ iop_vulnerability_frontend_assets_path }}"
+ when: ansible_facts["selinux"]["status"] == "enabled"
+ changed_when: false
+
+- name: Ensure Apache SSL config directory exists
+ ansible.builtin.file:
+ path: /etc/httpd/conf.d/05-foreman-ssl.d
+ state: directory
+ mode: '0755'
+
+- name: Configure Apache for vulnerability frontend assets
+ ansible.builtin.copy:
+ dest: /etc/httpd/conf.d/05-foreman-ssl.d/vulnerability-frontend.conf
+ content: |
+ # IOP Vulnerability Frontend Assets Configuration
+ Alias /assets/apps/vulnerability {{ iop_vulnerability_frontend_assets_path }}
+ ProxyPass /assets/apps/vulnerability !
+
+
+ Options SymLinksIfOwnerMatch
+ AllowOverride None
+ Require all granted
+
+ # Use standard http expire header for assets instead of ETag
+
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+
+
+ # Return compressed assets if they are precompiled
+ RewriteEngine On
+ # Make sure the browser supports gzip encoding and file with .gz added
+ # does exist on disc before we rewrite with the extension
+ RewriteCond %{HTTP:Accept-Encoding} \b(x-)?gzip\b
+ RewriteCond %{REQUEST_FILENAME} \.(css|js|svg)$
+ RewriteCond %{REQUEST_FILENAME}.gz -s
+ RewriteRule ^(.+) $1.gz [L]
+
+ mode: '0644'
+ notify: "httpd : Restart httpd"
diff --git a/src/roles/iop_yuptoo/defaults/main.yaml b/src/roles/iop_yuptoo/defaults/main.yaml
new file mode 100644
index 00000000..5bf9c386
--- /dev/null
+++ b/src/roles/iop_yuptoo/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+iop_yuptoo_container_image: "quay.io/iop/yuptoo"
+iop_yuptoo_container_tag: "foreman-3.16"
diff --git a/src/roles/iop_yuptoo/handlers/main.yaml b/src/roles/iop_yuptoo/handlers/main.yaml
new file mode 100644
index 00000000..99a098e2
--- /dev/null
+++ b/src/roles/iop_yuptoo/handlers/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: Restart yuptoo
+ ansible.builtin.systemd:
+ name: iop-core-yuptoo
+ state: restarted
+ when: ansible_facts.services['iop-core-yuptoo.service'] is defined
diff --git a/src/roles/iop_yuptoo/tasks/main.yaml b/src/roles/iop_yuptoo/tasks/main.yaml
new file mode 100644
index 00000000..5cdd9b6b
--- /dev/null
+++ b/src/roles/iop_yuptoo/tasks/main.yaml
@@ -0,0 +1,36 @@
+---
+- name: Pull Yuptoo container image
+ containers.podman.podman_image:
+ name: "{{ iop_yuptoo_container_image }}:{{ iop_yuptoo_container_tag }}"
+ state: present
+
+- name: Deploy Yuptoo container
+ containers.podman.podman_container:
+ name: iop-core-yuptoo
+ image: "{{ iop_yuptoo_container_image }}:{{ iop_yuptoo_container_tag }}"
+ state: quadlet
+ command: python -m main
+ env:
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ BYPASS_PAYLOAD_EXPIRATION: "true"
+ network:
+ - iop-core-network
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Yuptoo Container
+ [Service]
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Yuptoo service
+ ansible.builtin.systemd:
+ name: iop-core-yuptoo
+ enabled: true
+ state: started
diff --git a/src/vars/base.yaml b/src/vars/base.yaml
index 32c6f2c3..b20856c0 100644
--- a/src/vars/base.yaml
+++ b/src/vars/base.yaml
@@ -20,7 +20,7 @@ foreman_client_key: "{{ client_key }}"
foreman_client_certificate: "{{ client_certificate }}"
foreman_oauth_consumer_key: abcdefghijklmnopqrstuvwxyz123456
foreman_oauth_consumer_secret: abcdefghijklmnopqrstuvwxyz123456
-foreman_plugins: "{{ enabled_features | reject('contains', 'content/') | difference(['hammer', 'foreman-proxy', 'foreman']) }}"
+foreman_plugins: "{{ enabled_features | reject('contains', 'content/') | reject('contains', 'iop') | difference(['hammer', 'foreman-proxy', 'foreman']) }}"
foreman_url: "https://{{ ansible_facts['fqdn'] }}"
httpd_server_ca_certificate: "{{ server_ca_certificate }}"
@@ -33,4 +33,4 @@ pulp_pulp_url: "https://{{ ansible_facts['fqdn'] }}"
pulp_plugins: "{{ enabled_features | select('contains', 'content/') | map('replace', 'content/', 'pulp_') | list }}"
hammer_ca_certificate: "{{ server_ca_certificate }}"
-hammer_plugins: "{{ foreman_plugins | map('replace', 'foreman-tasks', 'foreman_tasks') | list }}"
+hammer_plugins: "{{ foreman_plugins | map('replace', 'foreman-tasks', 'foreman_tasks') | reject('contains', 'iop') | list }}"
diff --git a/src/vars/database.yml b/src/vars/database.yml
index 8061f626..d6cc54b0 100644
--- a/src/vars/database.yml
+++ b/src/vars/database.yml
@@ -37,9 +37,9 @@ postgresql_databases:
- name: "{{ pulp_database_name }}"
owner: "{{ pulp_database_user }}"
postgresql_users:
- - name: "{{ candlepin_database_name }}"
+ - name: "{{ candlepin_database_user }}"
password: "{{ candlepin_database_password }}"
- - name: "{{ foreman_database_name }}"
+ - name: "{{ foreman_database_user }}"
password: "{{ foreman_database_password }}"
- - name: "{{ pulp_database_name }}"
+ - name: "{{ pulp_database_user }}"
password: "{{ pulp_database_password }}"
diff --git a/src/vars/database_iop.yml b/src/vars/database_iop.yml
new file mode 100644
index 00000000..792333b2
--- /dev/null
+++ b/src/vars/database_iop.yml
@@ -0,0 +1,56 @@
+---
+iop_database_host: host.containers.internal
+iop_database_port: 5432
+
+iop_inventory_database_host: "{{ iop_database_host }}"
+iop_inventory_database_port: "{{ iop_database_port }}"
+iop_inventory_database_name: inventory_db
+iop_inventory_database_user: inventory_admin
+iop_inventory_database_password: CHANGEME
+
+iop_advisor_database_host: "{{ iop_database_host }}"
+iop_advisor_database_port: "{{ iop_database_port }}"
+iop_advisor_database_name: advisor_db
+iop_advisor_database_user: advisor_user
+iop_advisor_database_password: CHANGEME
+
+iop_remediation_database_host: "{{ iop_database_host }}"
+iop_remediation_database_port: "{{ iop_database_port }}"
+iop_remediation_database_name: remediations_db
+iop_remediation_database_user: remediations_user
+iop_remediation_database_password: CHANGEME
+
+iop_vmaas_database_host: "{{ iop_database_host }}"
+iop_vmaas_database_port: "{{ iop_database_port }}"
+iop_vmaas_database_name: vmaas_db
+iop_vmaas_database_user: vmaas_admin
+iop_vmaas_database_password: CHANGEME
+
+iop_vulnerability_database_host: "{{ iop_database_host }}"
+iop_vulnerability_database_port: "{{ iop_database_port }}"
+iop_vulnerability_database_name: vulnerability_db
+iop_vulnerability_database_user: vulnerability_admin
+iop_vulnerability_database_password: CHANGEME
+
+iop_postgresql_databases:
+ - name: "{{ iop_inventory_database_name }}"
+ owner: "{{ iop_inventory_database_user }}"
+ - name: "{{ iop_advisor_database_name }}"
+ owner: "{{ iop_advisor_database_user }}"
+ - name: "{{ iop_remediation_database_name }}"
+ owner: "{{ iop_remediation_database_user }}"
+ - name: "{{ iop_vmaas_database_name }}"
+ owner: "{{ iop_vmaas_database_user }}"
+ - name: "{{ iop_vulnerability_database_name }}"
+ owner: "{{ iop_vulnerability_database_user }}"
+iop_postgresql_users:
+ - name: "{{ iop_inventory_database_user }}"
+ password: "{{ iop_inventory_database_password }}"
+ - name: "{{ iop_advisor_database_user }}"
+ password: "{{ iop_advisor_database_password }}"
+ - name: "{{ iop_remediation_database_user }}"
+ password: "{{ iop_remediation_database_password }}"
+ - name: "{{ iop_vmaas_database_user }}"
+ password: "{{ iop_vmaas_database_password }}"
+ - name: "{{ iop_vulnerability_database_user }}"
+ password: "{{ iop_vulnerability_database_password }}"
diff --git a/src/vars/default_certificates.yml b/src/vars/default_certificates.yml
index 09f47c5c..04a3f814 100644
--- a/src/vars/default_certificates.yml
+++ b/src/vars/default_certificates.yml
@@ -11,3 +11,13 @@ client_key: "{{ certificates_ca_directory }}/private/{{ ansible_facts['fqdn'] }}
client_ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt"
localhost_key: "{{ certificates_ca_directory }}/private/localhost.key"
localhost_certificate: "{{ certificates_ca_directory }}/certs/localhost.crt"
+localhost_client_key: "{{ certificates_ca_directory }}/private/localhost-client.key"
+localhost_client_certificate: "{{ certificates_ca_directory }}/certs/localhost-client.crt"
+
+# IOP Gateway certificate paths - uses localhost certs to match puppet-iop behavior
+iop_gateway_server_certificate: "{{ certificates_ca_directory }}/certs/localhost.crt"
+iop_gateway_server_key: "{{ certificates_ca_directory }}/private/localhost.key"
+iop_gateway_server_ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt"
+iop_gateway_client_certificate: "{{ certificates_ca_directory }}/certs/localhost-client.crt"
+iop_gateway_client_key: "{{ certificates_ca_directory }}/private/localhost-client.key"
+iop_gateway_client_ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt"
diff --git a/tests/conftest.py b/tests/conftest.py
index 0ebfdac4..a3069a04 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -6,6 +6,7 @@
import pytest
import testinfra
import yaml
+import os
from jinja2 import Environment, FileSystemLoader, select_autoescape
@@ -161,3 +162,25 @@ def wait_for_tasks(foremanapi, search=None):
def wait_for_metadata_generate(foremanapi):
wait_for_tasks(foremanapi, 'label = Actions::Katello::Repository::MetadataGenerate')
+
+
+def is_iop_enabled():
+ test_dir = os.path.dirname(os.path.abspath(__file__))
+ foremanctl_dir = os.path.dirname(test_dir)
+ params_file = os.path.join(foremanctl_dir, '.var', 'lib', 'foremanctl', 'parameters.yaml')
+
+ if os.path.exists(params_file):
+ with open(params_file, 'r') as f:
+ params = yaml.safe_load(f)
+ features = params.get('features', [])
+ if isinstance(features, str):
+ features = features.split()
+ return 'iop' in features
+
+ return False
+
+
+def pytest_runtest_setup(item):
+ if "iop" in item.nodeid.lower():
+ if not is_iop_enabled():
+ pytest.skip("IOP not enabled - skipping IOP tests ('iop' not in enabled_features)")
diff --git a/tests/fixtures/help/checks.txt b/tests/fixtures/help/checks.txt
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/iop/__init__.py b/tests/iop/__init__.py
new file mode 100644
index 00000000..d558ba0a
--- /dev/null
+++ b/tests/iop/__init__.py
@@ -0,0 +1 @@
+# IOP Test Package
\ No newline at end of file
diff --git a/tests/iop/test_advisor.py b/tests/iop/test_advisor.py
new file mode 100644
index 00000000..491223f1
--- /dev/null
+++ b/tests/iop/test_advisor.py
@@ -0,0 +1,140 @@
+import pytest
+
+
+def test_advisor_backend_api_service(server):
+ service = server.service("iop-service-advisor-backend-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_advisor_backend_service(server):
+ service = server.service("iop-service-advisor-backend-service")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_advisor_api_container(server):
+ result = server.run("podman ps --format '{{.Names}}' | grep iop-service-advisor-backend-api")
+ assert result.succeeded
+ assert "iop-service-advisor-backend-api" in result.stdout
+
+
+def test_advisor_service_container(server):
+ result = server.run("podman ps --format '{{.Names}}' | grep iop-service-advisor-backend-service")
+ assert result.succeeded
+ assert "iop-service-advisor-backend-service" in result.stdout
+
+
+def test_advisor_api_quadlet_file(server):
+ quadlet_file = server.file("/etc/containers/systemd/iop-service-advisor-backend-api.container")
+ assert quadlet_file.exists
+ assert quadlet_file.is_file
+
+
+def test_advisor_service_quadlet_file(server):
+ quadlet_file = server.file("/etc/containers/systemd/iop-service-advisor-backend-service.container")
+ assert quadlet_file.exists
+ assert quadlet_file.is_file
+
+
+def test_advisor_api_service_dependencies(server):
+ result = server.run("systemctl show iop-service-advisor-backend-api --property=After")
+ assert result.succeeded
+ assert "iop-core-kafka.service" in result.stdout
+
+
+def test_advisor_service_dependencies(server):
+ result = server.run("systemctl show iop-service-advisor-backend-service --property=After")
+ assert result.succeeded
+ assert "iop-core-kafka.service" in result.stdout
+
+
+def test_advisor_database_secrets(server):
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-service-advisor-backend-database-username" in result.stdout
+ assert "iop-service-advisor-backend-database-password" in result.stdout
+ assert "iop-service-advisor-backend-database-name" in result.stdout
+ assert "iop-service-advisor-backend-database-host" in result.stdout
+ assert "iop-service-advisor-backend-database-port" in result.stdout
+
+
+def test_advisor_api_kafka_connectivity(server):
+ result = server.run("podman logs iop-service-advisor-backend-api 2>&1 | grep -i 'kafka\\|bootstrap'")
+ assert result.succeeded
+
+
+def test_advisor_service_kafka_connectivity(server):
+ result = server.run("podman logs iop-service-advisor-backend-service 2>&1 | grep -i 'kafka\\|bootstrap'")
+ assert result.succeeded
+
+
+def test_advisor_api_port_configured(server):
+ result = server.run("podman inspect iop-service-advisor-backend-api --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "PORT=8000" in result.stdout
+
+
+def test_advisor_fdw_foreign_server_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT * FROM pg_foreign_server WHERE srvname = 'hbi_server';\"")
+ assert result.succeeded
+ assert "hbi_server" in result.stdout
+
+
+def test_advisor_fdw_user_mapping_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT * FROM information_schema.user_mappings WHERE foreign_server_name = 'hbi_server';\"")
+ assert result.succeeded
+ assert "advisor_user" in result.stdout
+
+
+def test_advisor_fdw_foreign_table_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"\\det inventory_source.*\"")
+ assert result.succeeded
+ assert "hosts" in result.stdout
+
+
+def test_advisor_fdw_inventory_view_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"\\dv inventory.*\"")
+ assert result.succeeded
+ assert "hosts" in result.stdout
+
+
+def test_advisor_fdw_inventory_view_queryable(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT 1 FROM inventory.hosts LIMIT 1;\"")
+ assert result.rc == 0
+
+
+# Additional comprehensive FDW tests (beyond puppet-iop baseline)
+def test_advisor_fdw_postgres_fdw_extension(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT extname FROM pg_extension WHERE extname = 'postgres_fdw';\"")
+ assert result.succeeded
+ assert "postgres_fdw" in result.stdout
+
+
+def test_advisor_fdw_postgres_user_mapping_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT usename FROM pg_user_mappings WHERE srvname = 'hbi_server' AND usename = 'postgres';\"")
+ assert result.succeeded
+ assert "postgres" in result.stdout
+
+
+def test_advisor_fdw_inventory_source_schema_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'inventory_source';\"")
+ assert result.succeeded
+ assert "inventory_source" in result.stdout
+
+
+def test_advisor_fdw_inventory_schema_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'inventory';\"")
+ assert result.succeeded
+ assert "inventory" in result.stdout
+
+
+def test_advisor_fdw_permissions_on_view(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT privilege_type FROM information_schema.table_privileges WHERE grantee = 'advisor_user' AND table_schema = 'inventory' AND table_name = 'hosts';\"")
+ assert result.succeeded
+ assert "SELECT" in result.stdout
+
+
+def test_advisor_api_endpoint(server):
+ result = server.run("podman run --network=iop-core-network --rm quay.io/iop/advisor-backend:latest curl -s -o /dev/null -w '%{http_code}' http://iop-service-advisor-backend-api:8000/ 2>/dev/null || echo '000'")
+ assert result.stdout.strip() != "000"
diff --git a/tests/iop/test_advisor_frontend.py b/tests/iop/test_advisor_frontend.py
new file mode 100644
index 00000000..ff0bd9c9
--- /dev/null
+++ b/tests/iop/test_advisor_frontend.py
@@ -0,0 +1,26 @@
+import pytest
+
+
+def test_advisor_frontend_assets_directory(server):
+ assets_dir = server.file("/var/lib/foreman/public/assets/apps/advisor")
+ assert assets_dir.exists
+ assert assets_dir.is_directory
+ assert assets_dir.mode == 0o755
+
+
+def test_advisor_frontend_app_info_file(server):
+ app_info_file = server.file("/var/lib/foreman/public/assets/apps/advisor/app.info.json")
+
+ assert app_info_file.exists
+ assert app_info_file.is_file
+
+
+def test_advisor_frontend_javascript_assets_accessible(server):
+ result = server.run("find /var/lib/foreman/public/assets/apps/advisor -name '*.js' | head -1")
+ assert result.succeeded
+ assert result.stdout.strip()
+ js_file = result.stdout.strip().replace("/var/lib/foreman/public", "")
+ curl_result = server.run(f"curl -s -o /dev/null -w '%{{http_code}}' -k https://localhost{js_file}")
+ assert curl_result.succeeded
+ http_code = curl_result.stdout.strip()
+ assert http_code in ["200"]
diff --git a/tests/iop/test_engine.py b/tests/iop/test_engine.py
new file mode 100644
index 00000000..5d65c0d0
--- /dev/null
+++ b/tests/iop/test_engine.py
@@ -0,0 +1,35 @@
+import pytest
+
+
+def test_engine_service(server):
+ service = server.service("iop-core-engine")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_engine_secret(server):
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-core-engine-config-yml" in result.stdout
+
+
+def test_engine_config_content(server):
+ result = server.run("podman secret inspect iop-core-engine-config-yml --showsecret")
+ assert result.succeeded
+
+ config_data = result.stdout.strip()
+ assert "insights.specs.default" in config_data
+ assert "insights_kafka_service.rules" in config_data
+ assert "iop-core-kafka:9092" in config_data
+
+
+def test_engine_service_dependencies(server):
+ result = server.run("systemctl show iop-core-engine --property=After")
+ assert result.succeeded
+ assert "iop-core-ingress.service" in result.stdout
+ assert "iop-core-kafka.service" in result.stdout
+
+
+def test_engine_kafka_connectivity(server):
+ result = server.run("podman logs iop-core-engine 2>&1 | grep -i 'kafka\\|bootstrap'")
+ assert result.succeeded
\ No newline at end of file
diff --git a/tests/iop/test_gateway.py b/tests/iop/test_gateway.py
new file mode 100644
index 00000000..1928e2c2
--- /dev/null
+++ b/tests/iop/test_gateway.py
@@ -0,0 +1,30 @@
+import pytest
+
+
+def test_gateway_service(server):
+ service = server.service("iop-core-gateway")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_gateway_port(server):
+ addr = server.addr("localhost")
+ assert addr.port("24443").is_reachable
+
+
+def test_gateway_secrets(server):
+ secrets = [
+ 'iop-core-gateway-server-cert',
+ 'iop-core-gateway-server-key',
+ 'iop-core-gateway-server-ca-cert',
+ 'iop-core-gateway-client-cert',
+ 'iop-core-gateway-client-key',
+ 'iop-core-gateway-client-ca-cert',
+ 'iop-core-gateway-relay-conf'
+ ]
+
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+
+ for secret_name in secrets:
+ assert secret_name in result.stdout
\ No newline at end of file
diff --git a/tests/iop/test_ingress.py b/tests/iop/test_ingress.py
new file mode 100644
index 00000000..2f42afbc
--- /dev/null
+++ b/tests/iop/test_ingress.py
@@ -0,0 +1,13 @@
+import pytest
+
+
+def test_ingress_service(server):
+ service = server.service("iop-core-ingress")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_ingress_http_endpoint(server):
+ result = server.run("podman run --rm quay.io/iop/ingress:latest curl -s -o /dev/null -w '%{http_code}' http://iop-core-ingress:8080/")
+ if result.succeeded:
+ assert "200" in result.stdout
\ No newline at end of file
diff --git a/tests/iop/test_integration.py b/tests/iop/test_integration.py
new file mode 100644
index 00000000..a048bd09
--- /dev/null
+++ b/tests/iop/test_integration.py
@@ -0,0 +1,129 @@
+import pytest
+
+
+def test_iop_core_kafka_service(server):
+ service = server.service("iop-core-kafka")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_core_ingress_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-ingress").succeeded
+ if service_exists:
+ service = server.service("iop-core-ingress")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_ingress_endpoint(server):
+ result = server.run("curl -f http://localhost:8080/ 2>/dev/null || echo 'Ingress not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_core_puptoo_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-puptoo").succeeded
+ if service_exists:
+ service = server.service("iop-core-puptoo")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_puptoo_metrics_endpoint(server):
+ result = server.run("curl -f http://localhost:8000/metrics 2>/dev/null || echo 'Puptoo not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_core_yuptoo_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-yuptoo").succeeded
+ if service_exists:
+ service = server.service("iop-core-yuptoo")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_yuptoo_endpoint(server):
+ result = server.run("curl -f http://localhost:5005/ 2>/dev/null || echo 'Yuptoo not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_core_engine_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-engine").succeeded
+ if service_exists:
+ service = server.service("iop-core-engine")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_core_gateway_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-gateway").succeeded
+ if service_exists:
+ service = server.service("iop-core-gateway")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_gateway_endpoint(server):
+ result = server.run("curl -f http://localhost:24443/ 2>/dev/null || echo 'Gateway not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_gateway_api_ingress_endpoint(server):
+ result = server.run("curl -f http://localhost:24443/api/ingress 2>/dev/null || echo 'Gateway API ingress not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_gateway_https_cert_auth(server):
+ result = server.run("curl -s -o /dev/null -w '%{http_code}' https://localhost:24443/ --cert /root/certificates/certs/localhost-client.crt --key /root/certificates/private/localhost-client.key --cacert /root/certificates/certs/ca.crt 2>/dev/null || echo '000'")
+ assert "200" in result.stdout
+
+
+def test_iop_core_host_inventory_api_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-host-inventory-api").succeeded
+ if service_exists:
+ service = server.service("iop-core-host-inventory-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_inventory_mq_endpoint(server):
+ result = server.run("podman run --network=iop-core-network quay.io/iop/host-inventory:latest curl http://iop-core-host-inventory:9126/ 2>/dev/null || echo 'Host inventory MQ not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_inventory_api_health_endpoint(server):
+ result = server.run("podman run --network=iop-core-network quay.io/iop/host-inventory curl -s -o /dev/null -w '%{http_code}' http://iop-core-host-inventory-api:8081/health 2>/dev/null || echo '000'")
+ assert "200" in result.stdout
+
+
+def test_iop_service_advisor_backend_api_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-service-advisor-backend-api").succeeded
+ if service_exists:
+ service = server.service("iop-service-advisor-backend-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_service_advisor_backend_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-service-advisor-backend-service").succeeded
+ if service_exists:
+ service = server.service("iop-service-advisor-backend-service")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_advisor_api_endpoint(server):
+ result = server.run("podman run --network=iop-core-network --rm quay.io/iop/advisor-backend:latest curl -f http://iop-service-advisor-backend-api:8000/ 2>/dev/null || echo 'Advisor API not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_service_remediations_api_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-service-remediations-api").succeeded
+ if service_exists:
+ service = server.service("iop-service-remediations-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_remediations_api_endpoint(server):
+ result = server.run("curl -f http://localhost:9002/ 2>/dev/null || echo 'Remediations API not yet responding'")
+ assert result.rc == 0
diff --git a/tests/iop/test_inventory.py b/tests/iop/test_inventory.py
new file mode 100644
index 00000000..08c8b66a
--- /dev/null
+++ b/tests/iop/test_inventory.py
@@ -0,0 +1,66 @@
+import pytest
+
+
+def test_inventory_migrate_service(server):
+ service = server.service("iop-core-host-inventory-migrate")
+ assert service.is_enabled
+
+
+def test_inventory_mq_service(server):
+ service = server.service("iop-core-host-inventory")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_inventory_api_service(server):
+ service = server.service("iop-core-host-inventory-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_inventory_service_dependencies(server):
+ result = server.run("systemctl show iop-core-host-inventory --property=After")
+ assert result.succeeded
+ assert "iop-core-host-inventory-migrate.service" in result.stdout
+
+
+def test_inventory_api_endpoint(server):
+ result = server.run("podman run --rm quay.io/iop/host-inventory:latest curl -s -o /dev/null -w '%{http_code}' http://iop-core-host-inventory-api:8081/health")
+ if result.succeeded:
+ assert "200" in result.stdout
+
+
+def test_inventory_hosts_endpoint(server):
+ result = server.run("podman run --rm quay.io/iop/host-inventory:latest curl -s -o /dev/null -w '%{http_code}' http://iop-core-host-inventory-api:8081/api/inventory/v1/hosts")
+ if result.succeeded:
+ assert "200" in result.stdout
+
+
+def test_inventory_cleanup_service(server):
+ service = server.service("iop-core-host-inventory-cleanup")
+ assert not service.is_running
+
+
+def test_inventory_cleanup_service_enabled(server):
+ result = server.run("systemctl is-enabled iop-core-host-inventory-cleanup")
+ assert result.succeeded
+ assert "generated" in result.stdout
+
+
+def test_inventory_cleanup_timer(server):
+ service = server.service("iop-core-host-inventory-cleanup.timer")
+ assert service.is_enabled
+ assert service.is_running
+
+
+def test_inventory_cleanup_timer_config(server):
+ timer_file = server.file("/etc/systemd/system/iop-core-host-inventory-cleanup.timer")
+ assert timer_file.exists
+ assert timer_file.is_file
+
+ content = timer_file.content_string
+ assert "OnBootSec=10min" in content
+ assert "OnUnitActiveSec=24h" in content
+ assert "Persistent=true" in content
+ assert "RandomizedDelaySec=300" in content
+ assert "WantedBy=timers.target" in content
\ No newline at end of file
diff --git a/tests/iop/test_kafka.py b/tests/iop/test_kafka.py
new file mode 100644
index 00000000..a712fb35
--- /dev/null
+++ b/tests/iop/test_kafka.py
@@ -0,0 +1,69 @@
+import pytest
+
+
+def test_kafka_service(server):
+ service = server.service("iop-core-kafka")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_kafka_volume(server):
+ result = server.run("podman volume ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-core-kafka-data" in result.stdout
+
+
+def test_kafka_topics_initialized(server):
+ result = server.run("podman exec iop-core-kafka /opt/kafka/init.sh --check")
+ assert result.succeeded
+
+
+def test_kafka_secrets(server):
+ secrets = [
+ 'iop-core-kafka-init-start',
+ 'iop-core-kafka-server-properties',
+ 'iop-core-kafka-init'
+ ]
+
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+
+ for secret_name in secrets:
+ assert secret_name in result.stdout
+
+
+def test_kafka_config_content(server):
+ result = server.run("podman secret inspect iop-core-kafka-server-properties --showsecret")
+ assert result.succeeded
+
+ config_data = result.stdout.strip()
+ assert "advertised.listeners=PLAINTEXT://iop-core-kafka:9092" in config_data
+ assert "controller.quorum.voters=1@iop-core-kafka:9093" in config_data
+
+
+def test_kafka_topic_creation(server):
+ topics = [
+ "platform.engine.results",
+ "platform.insights.rule-hits",
+ "platform.insights.rule-deactivation",
+ "platform.inventory.events",
+ "platform.inventory.host-ingress",
+ "platform.sources.event-stream",
+ "platform.playbook-dispatcher.runs",
+ "platform.upload.announce",
+ "platform.upload.validation",
+ "platform.logging.logs",
+ "platform.payload-status",
+ "platform.remediation-updates.vulnerability",
+ "vulnerability.evaluator.results",
+ "vulnerability.evaluator.recalc",
+ "vulnerability.evaluator.upload",
+ "vulnerability.grouper.inventory.upload",
+ "vulnerability.grouper.advisor.upload"
+ ]
+
+ result = server.run("podman exec iop-core-kafka /opt/kafka/bin/kafka-topics.sh --bootstrap-server iop-core-kafka:9092 --list")
+ assert result.succeeded
+
+ for topic in topics:
+ assert topic in result.stdout
diff --git a/tests/iop/test_puptoo.py b/tests/iop/test_puptoo.py
new file mode 100644
index 00000000..e5536ef2
--- /dev/null
+++ b/tests/iop/test_puptoo.py
@@ -0,0 +1,7 @@
+import pytest
+
+
+def test_puptoo_service(server):
+ service = server.service("iop-core-puptoo")
+ assert service.is_running
+ assert service.is_enabled
\ No newline at end of file
diff --git a/tests/iop/test_remediation.py b/tests/iop/test_remediation.py
new file mode 100644
index 00000000..26cd6c17
--- /dev/null
+++ b/tests/iop/test_remediation.py
@@ -0,0 +1,27 @@
+import pytest
+
+
+def test_remediation_api_service(server):
+ service = server.service("iop-service-remediations-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_remediation_api_service_dependencies(server):
+ result = server.run("systemctl show iop-service-remediations-api --property=After")
+ assert result.succeeded
+ assert "iop-core-host-inventory-api.service" in result.stdout
+ assert "iop-service-advisor-backend-api.service" in result.stdout
+
+
+def test_remediation_api_environment_variables(server):
+ result = server.run("podman inspect iop-service-remediations-api --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "REDIS_ENABLED=false" in result.stdout
+ assert "RBAC_ENFORCE=false" in result.stdout
+ assert "DB_SSL_ENABLED=false" in result.stdout
+
+
+def test_remediation_api_endpoint(server):
+ result = server.run("curl -s -o /dev/null -w '%{http_code}' http://localhost:9002/ 2>/dev/null || echo '000'")
+ assert result.stdout.strip() != "000"
diff --git a/tests/iop/test_vmaas.py b/tests/iop/test_vmaas.py
new file mode 100644
index 00000000..488caee0
--- /dev/null
+++ b/tests/iop/test_vmaas.py
@@ -0,0 +1,41 @@
+import pytest
+
+
+def test_vmaas_reposcan_service(server):
+ service = server.service("iop-service-vmaas-reposcan")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vmaas_webapp_go_service(server):
+ service = server.service("iop-service-vmaas-webapp-go")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vmaas_webapp_go_service_dependencies(server):
+ result = server.run("systemctl show iop-service-vmaas-webapp-go --property=After")
+ assert result.succeeded
+ assert "iop-service-vmaas-reposcan.service" in result.stdout
+
+
+def test_vmaas_webapp_go_service_wants(server):
+ result = server.run("systemctl show iop-service-vmaas-webapp-go --property=Wants")
+ assert result.succeeded
+ assert "iop-service-vmaas-reposcan.service" in result.stdout
+
+
+def test_vmaas_database_secrets(server):
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-service-vmaas-reposcan-database-username" in result.stdout
+ assert "iop-service-vmaas-reposcan-database-password" in result.stdout
+ assert "iop-service-vmaas-reposcan-database-name" in result.stdout
+ assert "iop-service-vmaas-reposcan-database-host" in result.stdout
+ assert "iop-service-vmaas-reposcan-database-port" in result.stdout
+
+
+def test_vmaas_data_volume(server):
+ result = server.run("podman volume ls --format '{{.Name}}' | grep iop-service-vmaas-data")
+ assert result.succeeded
+ assert "iop-service-vmaas-data" in result.stdout
diff --git a/tests/iop/test_vulnerability.py b/tests/iop/test_vulnerability.py
new file mode 100644
index 00000000..a48b3636
--- /dev/null
+++ b/tests/iop/test_vulnerability.py
@@ -0,0 +1,189 @@
+import pytest
+
+
+def test_vulnerability_manager_service(server):
+ service = server.service("iop-service-vuln-manager")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_dbupgrade_service(server):
+ service = server.service("iop-service-vuln-dbupgrade")
+ assert service.is_enabled
+
+
+def test_vulnerability_taskomatic_service(server):
+ service = server.service("iop-service-vuln-taskomatic")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_grouper_service(server):
+ service = server.service("iop-service-vuln-grouper")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_listener_service(server):
+ service = server.service("iop-service-vuln-listener")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_evaluator_recalc_service(server):
+ service = server.service("iop-service-vuln-evaluator-recalc")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_evaluator_upload_service(server):
+ service = server.service("iop-service-vuln-evaluator-upload")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_vmaas_sync_timer(server):
+ timer = server.service("iop-service-vuln-vmaas-sync.timer")
+ assert timer.is_enabled
+
+
+def test_vulnerability_quadlet_files(server):
+ containers = [
+ "iop-service-vuln-dbupgrade",
+ "iop-service-vuln-manager",
+ "iop-service-vuln-taskomatic",
+ "iop-service-vuln-grouper",
+ "iop-service-vuln-listener",
+ "iop-service-vuln-evaluator-recalc",
+ "iop-service-vuln-evaluator-upload",
+ "iop-service-vuln-vmaas-sync",
+ ]
+ for container in containers:
+ quadlet_file = server.file(f"/etc/containers/systemd/{container}.container")
+ assert quadlet_file.exists
+ assert quadlet_file.is_file
+
+
+def test_vulnerability_database_secrets(server):
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-service-vulnerability-database-username" in result.stdout
+ assert "iop-service-vulnerability-database-password" in result.stdout
+ assert "iop-service-vulnerability-database-name" in result.stdout
+ assert "iop-service-vulnerability-database-host" in result.stdout
+ assert "iop-service-vulnerability-database-port" in result.stdout
+
+
+def test_vulnerability_containers_networking(server):
+ containers = [
+ "iop-service-vuln-manager",
+ "iop-service-vuln-taskomatic",
+ "iop-service-vuln-grouper",
+ "iop-service-vuln-listener",
+ "iop-service-vuln-evaluator-recalc",
+ "iop-service-vuln-evaluator-upload",
+ ]
+ for container in containers:
+ result = server.run(f"podman inspect {container} --format '{{{{.NetworkSettings.Networks}}}}'")
+ assert result.succeeded
+ assert "iop-core-network" in result.stdout
+
+
+def test_vulnerability_manager_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-manager --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "UNLEASH_BOOTSTRAP_FILE=develfeatureflags.json" in result.stdout
+ assert "DISABLE_RBAC=TRUE" in result.stdout
+
+
+def test_vulnerability_taskomatic_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-taskomatic --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "IS_FEDRAMP=true" in result.stdout
+ assert "JOBS=stale_systems:5,delete_systems:30,cacheman:5" in result.stdout
+ assert "JOBS_STARTUP=cacheman" in result.stdout
+
+
+def test_vulnerability_grouper_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-grouper --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "KAFKA_HOST=iop-core-kafka" in result.stdout
+ assert "KAFKA_PORT=9092" in result.stdout
+ assert "KAFKA_GROUP_ID=vulnerability-grouper" in result.stdout
+ assert "PROMETHEUS_PORT=8085" in result.stdout
+
+
+def test_vulnerability_listener_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-listener --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "KAFKA_GROUP_ID=vulnerability-listener2" in result.stdout
+ assert "EVENTS_TOPIC=platform.inventory.events" in result.stdout
+ assert "ALLOWED_REPORTERS=puptoo,satellite" in result.stdout
+
+
+def test_vulnerability_evaluator_recalc_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-evaluator-recalc --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "EVALUATOR_TOPIC=vulnerability.evaluator.recalc" in result.stdout
+ assert "VMAAS_HOST=http://iop-service-vmaas-webapp-go:8000" in result.stdout
+
+
+def test_vulnerability_evaluator_upload_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-evaluator-upload --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "EVALUATOR_TOPIC=vulnerability.evaluator.upload" in result.stdout
+ assert "VMAAS_HOST=http://iop-service-vmaas-webapp-go:8000" in result.stdout
+
+
+def test_vulnerability_container_commands(server):
+ containers_commands = {
+ "iop-service-vuln-dbupgrade": ["bash", "-c", "/engine/dbupgrade.sh"],
+ "iop-service-vuln-manager": ["/engine/entrypoint.sh", "manager"],
+ "iop-service-vuln-taskomatic": ["/engine/entrypoint.sh", "taskomatic"],
+ "iop-service-vuln-grouper": ["/engine/entrypoint.sh", "grouper"],
+ "iop-service-vuln-listener": ["/engine/entrypoint.sh", "listener"],
+ "iop-service-vuln-evaluator-recalc": ["/engine/entrypoint.sh", "evaluator"],
+ "iop-service-vuln-evaluator-upload": ["/engine/entrypoint.sh", "evaluator"],
+ }
+ for container, expected_cmd in containers_commands.items():
+ result = server.run(f"podman inspect {container} --format '{{{{.Config.Cmd}}}}'")
+ if result.succeeded:
+ for cmd_part in expected_cmd:
+ assert cmd_part in result.stdout
+
+
+def test_vulnerability_timer_file(server):
+ timer_file = server.file("/etc/systemd/system/iop-service-vuln-vmaas-sync.timer")
+ assert timer_file.exists
+ assert timer_file.is_file
+ assert "OnCalendar=daily" in timer_file.content.decode()
+
+
+def test_vulnerability_fdw_foreign_server_exists(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT * FROM pg_foreign_server WHERE srvname = 'hbi_server';\"")
+ assert result.succeeded
+ assert "hbi_server" in result.stdout
+
+
+def test_vulnerability_fdw_user_mapping_exists(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT * FROM pg_user_mappings WHERE srvname = 'hbi_server' AND usename = 'vulnerability_admin';\"")
+ assert result.succeeded
+ assert "vulnerability_admin" in result.stdout
+
+
+def test_vulnerability_fdw_foreign_table_exists(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT * FROM information_schema.foreign_tables WHERE foreign_table_schema = 'inventory_source' AND foreign_table_name = 'hosts';\"")
+ assert result.succeeded
+ assert "hosts" in result.stdout
+
+
+def test_vulnerability_fdw_view_exists(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT * FROM information_schema.views WHERE table_schema = 'inventory' AND table_name = 'hosts';\"")
+ assert result.succeeded
+ assert "hosts" in result.stdout
+
+
+def test_vulnerability_fdw_view_access(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT COUNT(*) FROM inventory.hosts;\"")
+ assert "permission denied" not in result.stdout.lower()
+ assert "does not exist" not in result.stdout.lower()
diff --git a/tests/iop/test_vulnerability_frontend.py b/tests/iop/test_vulnerability_frontend.py
new file mode 100644
index 00000000..6c1b635b
--- /dev/null
+++ b/tests/iop/test_vulnerability_frontend.py
@@ -0,0 +1,26 @@
+import pytest
+
+
+def test_vulnerability_frontend_assets_directory(server):
+ assets_dir = server.file("/var/lib/foreman/public/assets/apps/vulnerability")
+ assert assets_dir.exists
+ assert assets_dir.is_directory
+ assert assets_dir.mode == 0o755
+
+
+def test_vulnerability_frontend_app_info_file(server):
+ app_info_file = server.file("/var/lib/foreman/public/assets/apps/vulnerability/app.info.json")
+
+ assert app_info_file.exists
+ assert app_info_file.is_file
+
+
+def test_vulnerability_frontend_javascript_assets_accessible(server):
+ result = server.run("find /var/lib/foreman/public/assets/apps/vulnerability -name '*.js' | head -1")
+ assert result.succeeded
+ assert result.stdout.strip()
+ js_file = result.stdout.strip().replace("/var/lib/foreman/public", "")
+ curl_result = server.run(f"curl -s -o /dev/null -w '%{{http_code}}' -k https://localhost{js_file}")
+ assert curl_result.succeeded
+ http_code = curl_result.stdout.strip()
+ assert http_code in ["200"]
diff --git a/tests/iop/test_yuptoo.py b/tests/iop/test_yuptoo.py
new file mode 100644
index 00000000..1d11065a
--- /dev/null
+++ b/tests/iop/test_yuptoo.py
@@ -0,0 +1,7 @@
+import pytest
+
+
+def test_yuptoo_service(server):
+ service = server.service("iop-core-yuptoo")
+ assert service.is_running
+ assert service.is_enabled
\ No newline at end of file