diff --git a/.gitignore b/.gitignore index 1f99f9d..5a79846 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,11 @@ CMakeUserPresets.json # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #cmake-build-* + +# Node.js dependencies +node_modules/ +package-lock.json + +# Build outputs +dist/ +*.log diff --git a/.oxlintrc.json b/.oxlintrc.json new file mode 100644 index 0000000..21f9ff5 --- /dev/null +++ b/.oxlintrc.json @@ -0,0 +1,12 @@ +{ + "$schema": "./node_modules/oxlint/configuration_schema.json", + "rules": { + "typescript": "error", + "suspicious": "error", + "correctness": "error", + "style": "error", + "pedantic": "warn" + }, + "import-plugin": true, + "jsdoc-plugin": true +} diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..f193f6e --- /dev/null +++ b/.prettierignore @@ -0,0 +1,30 @@ +# Dependencies +node_modules/ +package-lock.json +bun.lock + +# Build outputs +dist/ +*.log + +# Git +.git/ +.gitignore +.gitmodules + +# Config files that prettier can't parse +.oxlintrc.json + +# Binary and non-text files +*.png +*.jpg +*.jpeg +*.gif +*.ico +*.pdf +*.zip +*.tar.gz + +# Other configs +LICENSE +*.list diff --git a/.prettierrc.yml b/.prettierrc.yml new file mode 100644 index 0000000..b0eb8f9 --- /dev/null +++ b/.prettierrc.yml @@ -0,0 +1,12 @@ +semi: true +singleQuote: true +trailingComma: es5 +printWidth: 80 +tabWidth: 2 +proseWrap: preserve +overrides: + - files: '*.ts' + options: + parser: typescript + plugins: + - '@prettier/plugin-oxc' diff --git a/STACK.md b/STACK.md index 8d0bbb3..d2e740d 100644 --- a/STACK.md +++ b/STACK.md @@ -287,7 +287,7 @@ HelloTimeSec=2 MaxAgeSec=20 ``` -#### 📄 /usr/lib/systemd/system/10-tenant-bridge.network: +#### 📄 /usr/lib/systemd/network/10-tenant-bridge.network: ```ini [Match] Name=br-tenant-%i @@ -309,7 +309,7 @@ EmitRouter=yes ### 2️⃣ WireGuard VPN Template -#### 📄 /usr/lib/systemd/system/20-wg-tenant.netdev: +#### 📄 /usr/lib/systemd/network/20-wg-tenant.netdev: ```ini [NetDev] Name=wg-tenant-%i @@ -326,7 +326,7 @@ Endpoint=${PEER_ENDPOINT}:51820 PersistentKeepalive=25 ``` -#### 📄 /usr/lib/systemd/system/20-wg-tenant.network: +#### 📄 /usr/lib/systemd/network/20-wg-tenant.network: ```ini [Match] Name=wg-tenant-%i @@ -341,7 +341,7 @@ Scope=link ### 3️⃣ VXLAN Overlay Template -#### 📄 /usr/lib/systemd/system/30-vxlan-tenant.netdev: +#### 📄 /usr/lib/systemd/network/30-vxlan-tenant.netdev: ```ini [NetDev] Name=vxlan-tenant-%i @@ -354,7 +354,7 @@ DestinationPort=4789 MacLearning=yes ``` -#### 📄 /usr/lib/systemd/system/30-vxlan-tenant.network: +#### 📄 /usr/lib/systemd/network/30-vxlan-tenant.network: ```ini [Match] Name=vxlan-tenant-%i @@ -369,7 +369,7 @@ EgressUntagged=1 ### 4️⃣ VLAN Segmentation Template -#### 📄 /usr/lib/systemd/system/40-vlan-tenant.netdev: +#### 📄 /usr/lib/systemd/network/40-vlan-tenant.netdev: ```ini [NetDev] Name=vlan-tenant-%i @@ -379,7 +379,7 @@ Kind=vlan Id=%i ``` -#### 📄 /usr/lib/systemd/system/40-vlan-tenant.network: +#### 📄 /usr/lib/systemd/network/40-vlan-tenant.network: ```ini [Match] Name=vlan-tenant-%i @@ -597,17 +597,17 @@ tenant@tenant123.service Each template ensures compliance with the Linux File System Hierarchy specification: -1. **Root Filesystem Requirements: +1. **Root Filesystem Requirements**: - `/etc/os-release` or `/usr/lib/os-release` present - Proper symlink from `/usr/lib/os-release` to `/etc/os-release` - No files in `/` root directory itself -2. **Extension Image Requirements: +2. **Extension Image Requirements**: - sysext: Only `/usr/` and `/opt/` directories - confext: Only `/etc/` directory - Proper `extension-release` files in correct locations -3. **Verification Structure: +3. **Verification Structure**: - VOA hierarchy at `/etc/voa/` and `/usr/share/voa/` - Proper certificate fingerprint naming - ASCII-armored OpenPGP files @@ -641,7 +641,7 @@ validate_rootfs() { ## 🎯 Conclusion -This template system provides: +This template system provides the following capabilities: 1. **Standardization**: All tenants use consistent, validated templates 2. **Compliance**: Full adherence to Linux Userspace API specifications diff --git a/package.json b/package.json new file mode 100644 index 0000000..722158a --- /dev/null +++ b/package.json @@ -0,0 +1,60 @@ +{ + "name": "bitbuilder-hypervisor", + "version": "1.0.0", + "description": "Revolutionary git-ops-native, multi-tenant hypervisor platform built on systemd virtualization", + "type": "module", + "main": "dist/index.js", + "exports": { + ".": "./dist/index.js", + "./generators/*": "./dist/generators/*.js", + "./services/*": "./dist/services/*.js" + }, + "imports": { + "#generators/*": "./src/generators/*.ts", + "#services/*": "./src/services/*.ts", + "#schemas/*": "./src/schemas/*.ts", + "#utils/*": "./src/utils/*.ts", + "#types/*": "./src/types/*.ts" + }, + "files": [ + "dist", + "templates", + "configs" + ], + "scripts": { + "dev": "bun run --watch src/index.ts", + "build": "tsc --noEmit && bun build ./src/index.ts --outdir ./dist --target node --external '#*'", + "build:generators": "bun build ./src/generators/tenant-generator.ts --compile --outfile ./dist/generators/tenant-generator", + "build:services": "bun build ./src/services/tenant-manager.ts --compile --outfile ./dist/services/tenant-manager", + "test": "bun test", + "lint": "oxlint", + "lint:fix": "oxlint --fix", + "format": "prettier --write .", + "quality": "oxlint && prettier --check ." + }, + "dependencies": { + "effect": "latest", + "@effect/schema": "latest", + "@effect/platform": "latest", + "@effect/platform-node": "latest" + }, + "devDependencies": { + "oxlint": "latest", + "prettier": "latest", + "@prettier/plugin-oxc": "latest", + "typescript": "latest", + "@types/node": "latest" + }, + "engines": { + "bun": ">=1.0.0" + }, + "keywords": [ + "hypervisor", + "systemd", + "virtualization", + "git-ops", + "multi-tenant", + "immutable-infrastructure" + ], + "license": "MIT" +} diff --git a/src/generators/mount-generator b/src/generators/mount-generator new file mode 100644 index 0000000..b8cd07a --- /dev/null +++ b/src/generators/mount-generator @@ -0,0 +1,422 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-or-later +# SPDX-FileCopyrightText: 2024 BitBuilder.io +# +# BitBuilder Hypervisor - Mount and Extension Management Generator +# +# This systemd generator creates mount units for tenant directory structures, +# system extensions (sysext), configuration extensions (confext), and overlay +# filesystems required for proper tenant isolation and functionality. +# +# Key responsibilities: +# - Generate overlay mount units for tenant extensions +# - Create tenant directory structure mounts +# - Set up proper mount dependencies and ordering +# - Handle sysext and confext mount points +# +# Installation: /usr/lib/systemd/system-generators/mount-generator +# Permissions: 755 root:root + +set -euo pipefail + +# Global constants and configuration +readonly GENERATOR_NAME="mount-generator" +readonly TENANT_BASE_DIR="${TENANT_BASE_DIR:-/var/lib/tenants}" +readonly EXTENSIONS_DIR="/usr/lib/extensions" +readonly TENANT_REGISTRY="${TENANT_REGISTRY:-/var/lib/bitbuilder/tenants.json}" +readonly LOG_TAG="bitbuilder-$GENERATOR_NAME" + +# Generator output directories (passed as arguments by systemd) +readonly NORMAL_DIR="${1:-}" +readonly EARLY_DIR="${2:-}" +readonly LATE_DIR="${3:-}" + +# Exit codes +readonly EXIT_SUCCESS=0 +readonly EXIT_INVALID_ARGS=1 +readonly EXIT_CONFIG_ERROR=2 +readonly EXIT_PERMISSION_ERROR=3 + +# Mount types and configurations +declare -A MOUNT_CONFIGS=( + ["sysext"]="/usr" + ["confext"]="/etc" +) + +# Logging functions +log_debug() { + [[ "${DEBUG:-0}" == "1" ]] && echo "DEBUG: $*" >&2 +} + +log_info() { + echo "INFO: $*" >&2 +} + +log_warn() { + echo "WARNING: $*" >&2 +} + +log_error() { + echo "ERROR: $*" >&2 +} + +# Print usage information +print_usage() { + cat >&2 << EOF +Usage: $GENERATOR_NAME [early-dir] [late-dir] + +This is a systemd generator that creates mount units for tenant extensions +and directory structures. It should only be invoked by systemd during boot +or daemon-reload. + +Arguments: + normal-dir Primary output directory for generated units + early-dir Early priority output directory (optional) + late-dir Late priority output directory (optional) + +EOF +} + +# Validate systemd generator arguments +validate_arguments() { + if [[ -z "$NORMAL_DIR" ]]; then + log_error "Missing required normal-dir argument" + print_usage + return $EXIT_INVALID_ARGS + fi + + if [[ ! -d "$NORMAL_DIR" ]]; then + log_error "Normal directory does not exist: $NORMAL_DIR" + return $EXIT_CONFIG_ERROR + fi + + if [[ ! -w "$NORMAL_DIR" ]]; then + log_error "Normal directory is not writable: $NORMAL_DIR" + return $EXIT_PERMISSION_ERROR + fi + + log_debug "Generator arguments validated successfully" + return $EXIT_SUCCESS +} + +# Convert path to systemd unit name format +path_to_unit_name() { + local path="$1" + local unit_name + + # Remove leading slash and replace path separators with dashes + unit_name="${path#/}" + unit_name="${unit_name//\//-}" + + # Escape systemd special characters + unit_name="${unit_name//./-}" + unit_name="${unit_name// /_}" + + echo "$unit_name" +} + +# Create safe unit file content with proper escaping +create_unit_file() { + local unit_path="$1" + local unit_content="$2" + + # Ensure we're writing to the correct directory + if [[ "$unit_path" != "$NORMAL_DIR"/* ]]; then + log_error "Attempted to write unit outside normal directory: $unit_path" + return 1 + fi + + # Write unit file atomically + if ! echo "$unit_content" > "$unit_path.tmp"; then + log_error "Failed to write temporary unit file: $unit_path.tmp" + return 1 + fi + + if ! mv "$unit_path.tmp" "$unit_path"; then + log_error "Failed to finalize unit file: $unit_path" + rm -f "$unit_path.tmp" + return 1 + fi + + log_debug "Created unit file: $unit_path" + return $EXIT_SUCCESS +} + +# Generate overlay mount for extensions (sysext/confext) +generate_extension_overlay_mount() { + local tenant_id="$1" + local ext_type="$2" # sysext or confext + local target_dir="${MOUNT_CONFIGS[$ext_type]}" + local tenant_dir="$TENANT_BASE_DIR/$tenant_id" + local overlay_dir="$tenant_dir/overlay$target_dir" + local extensions_dir="$tenant_dir/extensions/$ext_type" + local work_dir="$tenant_dir/overlay/.${ext_type}-work" + + # Skip if extensions directory doesn't exist + if [[ ! -d "$extensions_dir" ]]; then + log_debug "Extensions directory doesn't exist, skipping: $extensions_dir" + return 0 + fi + + # Create mount unit name + local mount_name + mount_name=$(path_to_unit_name "$overlay_dir") + local unit_path="$NORMAL_DIR/${mount_name}.mount" + + # Determine lower directories based on extension type + local lower_dirs + lower_dirs="$target_dir:$extensions_dir" + + read -r -d '' unit_content << EOF || true +[Unit] +Description=${ext_type^} overlay mount for tenant ${tenant_id} +Documentation=https://github.com/bitbuilder-io/bitbuilder-hypervisor +Before=tenant@${tenant_id}.service tenant-infra@${tenant_id}.service +After=local-fs.target +Wants=local-fs.target +PartOf=tenant@${tenant_id}.service +StopWhenUnneeded=yes + +# Directory dependencies +RequiresMountsFor=${tenant_dir} ${target_dir} +AssertPathExists=${extensions_dir} + +[Mount] +What=overlay +Where=${overlay_dir} +Type=overlay +Options=lowerdir=${lower_dirs},upperdir=${overlay_dir},workdir=${work_dir} + +# Mount options for security and performance +TimeoutSec=60 + +[Install] +RequiredBy=tenant@${tenant_id}.service +WantedBy=local-fs.target +EOF + + create_unit_file "$unit_path" "$unit_content" +} + +# Generate tmpfs mounts for tenant working directories +generate_tenant_tmpfs_mounts() { + local tenant_id="$1" + local tenant_dir="$TENANT_BASE_DIR/$tenant_id" + + # Define tmpfs directories that need special handling + local -a tmpfs_dirs=( + "overlay/.sysext-work" + "overlay/.confext-work" + "runtime" + "tmp" + ) + + for tmpfs_subdir in "${tmpfs_dirs[@]}"; do + local tmpfs_path="$tenant_dir/$tmpfs_subdir" + local mount_name + mount_name=$(path_to_unit_name "$tmpfs_path") + local unit_path="$NORMAL_DIR/${mount_name}.mount" + + # Determine size based on directory type + local tmpfs_size="100M" + case "$tmpfs_subdir" in + "overlay/.sysext-work"|"overlay/.confext-work") + tmpfs_size="256M" + ;; + "runtime") + tmpfs_size="50M" + ;; + "tmp") + tmpfs_size="500M" + ;; + esac + + read -r -d '' unit_content << EOF || true +[Unit] +Description=tmpfs mount for tenant ${tenant_id}: ${tmpfs_subdir} +Documentation=https://github.com/bitbuilder-io/bitbuilder-hypervisor +Before=tenant@${tenant_id}.service +After=local-fs.target +Wants=local-fs.target +PartOf=tenant@${tenant_id}.service +StopWhenUnneeded=yes + +# Ensure parent directory exists +RequiresMountsFor=${tenant_dir} + +[Mount] +What=tmpfs +Where=${tmpfs_path} +Type=tmpfs +Options=mode=755,size=${tmpfs_size},nodev,nosuid,noexec + +# Security and performance options +TimeoutSec=10 + +[Install] +RequiredBy=tenant@${tenant_id}.service +WantedBy=local-fs.target +EOF + + create_unit_file "$unit_path" "$unit_content" + done +} + +# Process extensions for a single tenant +process_tenant_extensions() { + local tenant_id="$1" + local tenant_dir="$TENANT_BASE_DIR/$tenant_id" + + # Skip if tenant directory doesn't exist + if [[ ! -d "$tenant_dir" ]]; then + log_debug "Tenant directory doesn't exist, skipping extensions: $tenant_dir" + return 0 + fi + + log_info "Processing extensions for tenant: $tenant_id" + + # Process each extension type + for ext_type in "${!MOUNT_CONFIGS[@]}"; do + local extensions_dir="$tenant_dir/extensions/$ext_type" + + # Skip if extension directory doesn't exist + if [[ ! -d "$extensions_dir" ]]; then + log_debug "Extension directory doesn't exist: $extensions_dir" + continue + fi + + # Generate overlay mount for this extension type + generate_extension_overlay_mount "$tenant_id" "$ext_type" + done + + # Generate tmpfs mounts for working directories + generate_tenant_tmpfs_mounts "$tenant_id" + + return $EXIT_SUCCESS +} + +# Get list of tenants from registry or filesystem +get_tenant_list() { + local -a tenants=() + + # First try to get tenants from registry + if [[ -f "$TENANT_REGISTRY" ]] && command -v jq >/dev/null 2>&1; then + while IFS= read -r tenant_id; do + if [[ -n "$tenant_id" && "$tenant_id" =~ ^[a-zA-Z0-9_-]+$ ]]; then + tenants+=("$tenant_id") + fi + done < <(jq -r '.tenants[]?.id // empty' "$TENANT_REGISTRY" 2>/dev/null) + fi + + # Fallback: discover tenants from filesystem + if [[ ${#tenants[@]} -eq 0 && -d "$TENANT_BASE_DIR" ]]; then + while IFS= read -r -d '' tenant_dir; do + local tenant_id + tenant_id=$(basename "$tenant_dir") + if [[ "$tenant_id" =~ ^[a-zA-Z0-9_-]+$ ]]; then + tenants+=("$tenant_id") + fi + done < <(find "$TENANT_BASE_DIR" -mindepth 1 -maxdepth 1 -type d -print0 2>/dev/null || true) + fi + + # Output tenant list + printf '%s\n' "${tenants[@]}" +} + +# Process all tenants +process_all_tenants() { + local tenant_count=0 + local success_count=0 + local error_count=0 + + # Get list of tenants + local -a tenants + mapfile -t tenants < <(get_tenant_list) + + if [[ ${#tenants[@]} -eq 0 ]]; then + log_info "No tenants found - no mount units to generate" + return $EXIT_SUCCESS + fi + + # Process each tenant + for tenant_id in "${tenants[@]}"; do + ((tenant_count++)) + log_debug "Processing tenant $tenant_count: $tenant_id" + + if process_tenant_extensions "$tenant_id"; then + ((success_count++)) + else + ((error_count++)) + log_error "Failed to process tenant: $tenant_id" + fi + done + + log_info "Mount processing complete: $success_count successful, $error_count errors" + + if [[ $error_count -gt 0 ]]; then + log_warn "Some tenants failed to process. Check logs for details." + return 1 + fi + + return $EXIT_SUCCESS +} + +# Cleanup function for error handling +cleanup() { + local exit_code=$? + if [[ $exit_code -ne 0 ]]; then + log_error "Generator exited with error code: $exit_code" + # Clean up any partial unit files + find "$NORMAL_DIR" -name "*.mount.tmp" -delete 2>/dev/null || true + fi +} + +# Pre-flight system checks +preflight_checks() { + # Check if overlay filesystem is supported + if [[ ! -f /proc/filesystems ]] || ! grep -q overlay /proc/filesystems; then + log_warn "Overlay filesystem not available in kernel" + fi + + # Check if squashfs is supported (for extension images) + if [[ ! -f /proc/filesystems ]] || ! grep -q squashfs /proc/filesystems; then + log_warn "SquashFS filesystem not available in kernel" + fi + + # Ensure base tenant directory exists + if [[ ! -d "$TENANT_BASE_DIR" ]]; then + log_debug "Base tenant directory doesn't exist: $TENANT_BASE_DIR" + fi + + return $EXIT_SUCCESS +} + +# Main execution +main() { + # Set up error handling + trap cleanup EXIT + + log_info "Starting $GENERATOR_NAME" + + # Validate arguments + if ! validate_arguments; then + return $EXIT_INVALID_ARGS + fi + + # Run preflight checks + preflight_checks + + # Process all tenants + if ! process_all_tenants; then + log_error "Mount processing failed" + return $EXIT_CONFIG_ERROR + fi + + log_info "$GENERATOR_NAME completed successfully" + return $EXIT_SUCCESS +} + +# Only run main if executed directly (not sourced) +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + main "$@" +fi diff --git a/src/generators/tenant-generator b/src/generators/tenant-generator new file mode 100644 index 0000000..020a004 --- /dev/null +++ b/src/generators/tenant-generator @@ -0,0 +1,516 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-or-later +# SPDX-FileCopyrightText: 2024 BitBuilder.io +# +# BitBuilder Hypervisor - Tenant Discovery and Unit Generation +# +# This systemd generator discovers tenants from the registry and generates +# all necessary systemd units for multi-tenant virtualization: +# - Main tenant orchestration service +# - Infrastructure manager (systemd-vmspawn) +# - Network setup and teardown +# - Mount points and dependencies +# +# Installation: /usr/lib/systemd/system-generators/tenant-generator +# Permissions: 755 root:root + +set -euo pipefail + +# Global constants and configuration +readonly GENERATOR_NAME="tenant-generator" +readonly TENANT_REGISTRY="${TENANT_REGISTRY:-/var/lib/bitbuilder/tenants.json}" +readonly TENANT_BASE_DIR="${TENANT_BASE_DIR:-/var/lib/tenants}" +readonly LOG_TAG="bitbuilder-$GENERATOR_NAME" + +# Generator output directories (passed as arguments by systemd) +readonly NORMAL_DIR="${1:-}" +readonly EARLY_DIR="${2:-}" +readonly LATE_DIR="${3:-}" + +# Exit codes +readonly EXIT_SUCCESS=0 +readonly EXIT_INVALID_ARGS=1 +readonly EXIT_CONFIG_ERROR=2 +readonly EXIT_PERMISSION_ERROR=3 + +# Logging functions +log_debug() { + [[ "${DEBUG:-0}" == "1" ]] && echo "DEBUG: $*" >&2 +} + +log_info() { + echo "INFO: $*" >&2 +} + +log_warn() { + echo "WARNING: $*" >&2 +} + +log_error() { + echo "ERROR: $*" >&2 +} + +# Print usage information +print_usage() { + cat >&2 << EOF +Usage: $GENERATOR_NAME [early-dir] [late-dir] + +This is a systemd generator that creates tenant units from registry configuration. +It should only be invoked by systemd during boot or daemon-reload. + +Arguments: + normal-dir Primary output directory for generated units + early-dir Early priority output directory (optional) + late-dir Late priority output directory (optional) + +EOF +} + +# Validate systemd generator arguments +validate_arguments() { + if [[ -z "$NORMAL_DIR" ]]; then + log_error "Missing required normal-dir argument" + print_usage + return $EXIT_INVALID_ARGS + fi + + if [[ ! -d "$NORMAL_DIR" ]]; then + log_error "Normal directory does not exist: $NORMAL_DIR" + return $EXIT_CONFIG_ERROR + fi + + if [[ ! -w "$NORMAL_DIR" ]]; then + log_error "Normal directory is not writable: $NORMAL_DIR" + return $EXIT_PERMISSION_ERROR + fi + + log_debug "Generator arguments validated successfully" + return $EXIT_SUCCESS +} + +# Check if tenant registry file exists and is readable +check_tenant_registry() { + if [[ ! -f "$TENANT_REGISTRY" ]]; then + log_info "Tenant registry not found: $TENANT_REGISTRY (this is normal for first boot)" + return 1 + fi + + if [[ ! -r "$TENANT_REGISTRY" ]]; then + log_error "Cannot read tenant registry: $TENANT_REGISTRY" + return $EXIT_PERMISSION_ERROR + fi + + # Basic JSON syntax validation + if ! jq empty "$TENANT_REGISTRY" 2>/dev/null; then + log_error "Invalid JSON in tenant registry: $TENANT_REGISTRY" + return $EXIT_CONFIG_ERROR + fi + + log_debug "Tenant registry validation successful" + return $EXIT_SUCCESS +} + +# Validate tenant configuration structure +validate_tenant_config() { + local tenant_json="$1" + local tenant_id + + # Extract and validate tenant ID + if ! tenant_id=$(echo "$tenant_json" | jq -r '.id // empty' 2>/dev/null); then + log_error "Failed to parse tenant configuration" + return 1 + fi + + if [[ -z "$tenant_id" ]]; then + log_error "Tenant configuration missing required 'id' field" + return 1 + fi + + # Validate ID format (alphanumeric, hyphens, underscores only) + if [[ ! "$tenant_id" =~ ^[a-zA-Z0-9_-]+$ ]]; then + log_error "Invalid tenant ID format: $tenant_id" + return 1 + fi + + log_debug "Tenant configuration valid: $tenant_id" + echo "$tenant_id" + return $EXIT_SUCCESS +} + +# Create safe unit file content with proper escaping +create_unit_file() { + local unit_path="$1" + local unit_content="$2" + + # Ensure we're writing to the correct directory + if [[ "$unit_path" != "$NORMAL_DIR"/* ]]; then + log_error "Attempted to write unit outside normal directory: $unit_path" + return 1 + fi + + # Write unit file atomically + if ! echo "$unit_content" > "$unit_path.tmp"; then + log_error "Failed to write temporary unit file: $unit_path.tmp" + return 1 + fi + + if ! mv "$unit_path.tmp" "$unit_path"; then + log_error "Failed to finalize unit file: $unit_path" + rm -f "$unit_path.tmp" + return 1 + fi + + log_debug "Created unit file: $unit_path" + return $EXIT_SUCCESS +} + +# Generate main tenant orchestration service +generate_tenant_service() { + local tenant_id="$1" + local tenant_json="$2" + local unit_path="$NORMAL_DIR/tenant@${tenant_id}.service" + + # Extract tenant configuration with safe defaults + local description + description=$(echo "$tenant_json" | jq -r '.metadata.description // "Tenant \(.id)"') + + local restart_policy + restart_policy=$(echo "$tenant_json" | jq -r '.config.restart_policy // "on-failure"') + + local restart_sec + restart_sec=$(echo "$tenant_json" | jq -r '.config.restart_sec // "30"') + + read -r -d '' unit_content << EOF || true +[Unit] +Description=$description +Documentation=https://github.com/bitbuilder-io/bitbuilder-hypervisor +After=network-online.target systemd-resolved.service +Wants=network-online.target +Requires=tenant-network@${tenant_id}.service +Before=tenant-infra@${tenant_id}.service +PartOf=multi-user.target + +[Service] +Type=notify +NotifyAccess=main + +# Tenant lifecycle management +ExecStartPre=/usr/lib/bitbuilder/tenant-provision ${tenant_id} +ExecStart=/usr/lib/bitbuilder/tenant-manager ${tenant_id} +ExecStop=/usr/lib/bitbuilder/tenant-cleanup ${tenant_id} +ExecReload=/usr/lib/bitbuilder/tenant-reload ${tenant_id} + +# Process management +KillMode=mixed +KillSignal=SIGTERM +TimeoutStartSec=300 +TimeoutStopSec=120 +RestartSec=${restart_sec} +Restart=${restart_policy} + +# Security hardening +PrivateTmp=yes +ProtectSystem=strict +ProtectHome=yes +NoNewPrivileges=yes +ProtectKernelTunables=yes +ProtectKernelModules=yes +ProtectControlGroups=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +RemoveIPC=yes +PrivateMounts=yes +ProtectHostname=yes + +# Resource limits +LimitNOFILE=65536 +LimitNPROC=4096 + +# Tenant-specific environment +Environment=TENANT_ID=${tenant_id} +Environment=TENANT_BASE_DIR=${TENANT_BASE_DIR} +EnvironmentFile=-/var/lib/tenants/${tenant_id}/environment + +# Working directory and state +WorkingDirectory=/var/lib/tenants/${tenant_id} +StateDirectory=tenants/${tenant_id} +StateDirectoryMode=0750 + +[Install] +WantedBy=multi-user.target +Alias=tenant-${tenant_id}.service +EOF + + create_unit_file "$unit_path" "$unit_content" +} + +# Generate infrastructure manager service (systemd-vmspawn) +generate_tenant_infra_service() { + local tenant_id="$1" + local tenant_json="$2" + local unit_path="$NORMAL_DIR/tenant-infra@${tenant_id}.service" + + # Extract VM configuration with safe defaults + local cpu_count + cpu_count=$(echo "$tenant_json" | jq -r '.resources.cpu_count // "2"') + + local memory_mb + memory_mb=$(echo "$tenant_json" | jq -r '.resources.memory_mb // "2048"') + + local enable_kvm + enable_kvm=$(echo "$tenant_json" | jq -r '.virtualization.enable_kvm // true') + + local network_bridge + network_bridge="br-tenant-${tenant_id}" + + read -r -d '' unit_content << EOF || true +[Unit] +Description=Infrastructure manager for tenant ${tenant_id} +Documentation=https://github.com/bitbuilder-io/bitbuilder-hypervisor +After=tenant@${tenant_id}.service tenant-network@${tenant_id}.service +BindsTo=tenant@${tenant_id}.service +PartOf=tenant@${tenant_id}.service +AssertPathExists=/var/lib/tenants/${tenant_id}/infra/rootfs + +[Service] +Type=notify +NotifyAccess=main + +# systemd-vmspawn configuration +ExecStart=/usr/bin/systemd-vmspawn \\ + --quiet \\ + --machine=infra-${tenant_id} \\ + --directory=/var/lib/tenants/${tenant_id}/infra/rootfs \\ + --cpus=${cpu_count} \\ + --ram=${memory_mb}M \\ + --network-bridge=${network_bridge} \\ + --bind-ro=/usr/lib/extensions:/usr/lib/extensions \\ + --setenv=TENANT_ID=${tenant_id} \\ + --setenv=TENANT_ROLE=infrastructure \\ + --boot + +# VM lifecycle management +ExecStartPre=/usr/lib/bitbuilder/prepare-tenant-vm ${tenant_id} +ExecStopPost=/usr/lib/bitbuilder/cleanup-tenant-vm ${tenant_id} + +# Process management +KillMode=mixed +KillSignal=SIGTERM +TimeoutStartSec=300 +TimeoutStopSec=120 +RestartSec=30 +Restart=always +StartLimitBurst=3 +StartLimitIntervalSec=300 + +# Security configuration +PrivateTmp=yes +ProtectSystem=strict +ReadWritePaths=/var/lib/tenants/${tenant_id} +BindReadOnlyPaths=/usr/lib/extensions +NoNewPrivileges=yes + +# Resource management +MemoryMax=${memory_mb}M +CPUQuota=$((cpu_count * 100))% +IOWeight=100 +DevicePolicy=closed +DeviceAllow=/dev/kvm rw +DeviceAllow=/dev/net/tun rw + +# Working directory +WorkingDirectory=/var/lib/tenants/${tenant_id} + +[Install] +WantedBy=tenant@${tenant_id}.service +EOF + + create_unit_file "$unit_path" "$unit_content" +} + +# Generate network setup and management service +generate_tenant_network_service() { + local tenant_id="$1" + local tenant_json="$2" + local unit_path="$NORMAL_DIR/tenant-network@${tenant_id}.service" + + # Extract network configuration + local network_mode + network_mode=$(echo "$tenant_json" | jq -r '.network.mode // "bridge"') + + local subnet + subnet=$(echo "$tenant_json" | jq -r '.network.subnet // "10.100.0.0/24"') + + local enable_ipv6 + enable_ipv6=$(echo "$tenant_json" | jq -r '.network.ipv6 // false') + + read -r -d '' unit_content << EOF || true +[Unit] +Description=Network setup for tenant ${tenant_id} +Documentation=https://github.com/bitbuilder-io/bitbuilder-hypervisor +After=systemd-networkd.service systemd-resolved.service +Wants=systemd-networkd.service +Before=tenant@${tenant_id}.service +PartOf=tenant@${tenant_id}.service + +[Service] +Type=oneshot +RemainAfterExit=yes + +# Network setup and teardown +ExecStartPre=/usr/lib/bitbuilder/validate-tenant-network ${tenant_id} +ExecStart=/usr/lib/bitbuilder/setup-tenant-network ${tenant_id} +ExecReload=/usr/lib/bitbuilder/reload-tenant-network ${tenant_id} +ExecStop=/usr/lib/bitbuilder/teardown-tenant-network ${tenant_id} +ExecStopPost=/usr/lib/bitbuilder/cleanup-tenant-network ${tenant_id} + +# Timeout configuration +TimeoutStartSec=60 +TimeoutStopSec=30 + +# Security hardening +PrivateTmp=yes +ProtectSystem=yes +ProtectHome=yes +NoNewPrivileges=yes +CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_RAW + +# Network configuration environment +Environment=TENANT_ID=${tenant_id} +Environment=NETWORK_MODE=${network_mode} +Environment=TENANT_SUBNET=${subnet} +Environment=ENABLE_IPV6=${enable_ipv6} + +# Working directory +WorkingDirectory=/var/lib/tenants/${tenant_id} + +[Install] +RequiredBy=tenant@${tenant_id}.service +EOF + + create_unit_file "$unit_path" "$unit_content" +} + +# Process a single tenant configuration +process_tenant() { + local tenant_data="$1" + local tenant_json + local tenant_id + + # Decode base64-encoded tenant data + if ! tenant_json=$(echo "$tenant_data" | base64 -d 2>/dev/null); then + log_error "Failed to decode tenant data" + return 1 + fi + + # Validate tenant configuration + if ! tenant_id=$(validate_tenant_config "$tenant_json"); then + log_error "Invalid tenant configuration" + return 1 + fi + + log_info "Processing tenant: $tenant_id" + + # Check if tenant directory exists + local tenant_dir="$TENANT_BASE_DIR/$tenant_id" + if [[ ! -d "$tenant_dir" ]]; then + log_warn "Tenant directory missing: $tenant_dir (skipping unit generation)" + return 0 + fi + + # Generate all tenant services + if ! generate_tenant_service "$tenant_id" "$tenant_json"; then + log_error "Failed to generate tenant service for: $tenant_id" + return 1 + fi + + if ! generate_tenant_infra_service "$tenant_id" "$tenant_json"; then + log_error "Failed to generate tenant infrastructure service for: $tenant_id" + return 1 + fi + + if ! generate_tenant_network_service "$tenant_id" "$tenant_json"; then + log_error "Failed to generate tenant network service for: $tenant_id" + return 1 + fi + + log_info "Successfully generated units for tenant: $tenant_id" + return $EXIT_SUCCESS +} + +# Main processing function +process_tenants() { + local tenant_count=0 + local success_count=0 + local error_count=0 + + # Process each tenant from the registry + while IFS= read -r tenant_data; do + ((tenant_count++)) + + if process_tenant "$tenant_data"; then + ((success_count++)) + else + ((error_count++)) + log_error "Failed to process tenant $tenant_count" + fi + done < <(jq -r '.tenants[]? | @base64' "$TENANT_REGISTRY" 2>/dev/null) + + log_info "Tenant processing complete: $success_count successful, $error_count errors" + + if [[ $error_count -gt 0 ]]; then + log_warn "Some tenants failed to process. Check logs for details." + return 1 + fi + + return $EXIT_SUCCESS +} + +# Cleanup function for error handling +cleanup() { + local exit_code=$? + if [[ $exit_code -ne 0 ]]; then + log_error "Generator exited with error code: $exit_code" + # Clean up any partial unit files + find "$NORMAL_DIR" -name "tenant*.service.tmp" -delete 2>/dev/null || true + fi +} + +# Main execution +main() { + # Set up error handling + trap cleanup EXIT + + log_info "Starting $GENERATOR_NAME" + + # Validate arguments + if ! validate_arguments; then + return $EXIT_INVALID_ARGS + fi + + # Check for tenant registry + if ! check_tenant_registry; then + log_info "No tenant registry found - no units to generate" + return $EXIT_SUCCESS + fi + + # Ensure required commands are available + if ! command -v jq >/dev/null 2>&1; then + log_error "Required command 'jq' not found" + return $EXIT_CONFIG_ERROR + fi + + # Process all tenants + if ! process_tenants; then + log_error "Tenant processing failed" + return $EXIT_CONFIG_ERROR + fi + + log_info "$GENERATOR_NAME completed successfully" + return $EXIT_SUCCESS +} + +# Only run main if executed directly (not sourced) +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + main "$@" +fi diff --git a/src/generators/tenant-generator.ts b/src/generators/tenant-generator.ts new file mode 100644 index 0000000..71de7d0 --- /dev/null +++ b/src/generators/tenant-generator.ts @@ -0,0 +1,395 @@ +/** + * Tenant Generator - TypeScript Implementation + * Generates systemd units for discovered tenants based on their configuration. + * This serves as a high-level orchestrator that calls the bash generators. + */ + +import { Effect, pipe, Console } from 'effect'; +import * as NodeFS from 'node:fs'; +import * as NodePath from 'node:path'; +import { spawn } from 'node:child_process'; +import { TenantMetadataSchema } from '#schemas/index'; +import { + createLogger, + ensureDirectory, + validateTenantId, + GitError, +} from '#utils/index'; +import type { TenantMetadata } from '#types/index'; +import { Schema } from '@effect/schema'; + +const logger = createLogger('tenant-generator'); + +// Configuration constants +const TENANT_REGISTRY_PATH = + process.env['TENANT_REGISTRY'] ?? '/var/lib/bitbuilder/tenants.json'; +const TENANT_BASE_DIR = + process.env['TENANT_BASE_DIR'] ?? '/var/lib/tenants'; +const GENERATOR_OUTPUT_DIR = + process.env['GENERATOR_OUTPUT_DIR'] ?? '/run/systemd/generator'; + +// Error types +export class TenantGeneratorError extends Error { + constructor( + message: string, + public readonly tenantId?: string + ) { + super(message); + this.name = 'TenantGeneratorError'; + } +} + +// Tenant discovery result +interface DiscoveredTenant { + readonly id: string; + readonly repositoryPath: string; + readonly metadata: TenantMetadata; +} + +// Load and parse tenant metadata from repository +const loadTenantMetadata = (tenantPath: string) => + pipe( + Effect.tryPromise({ + try: () => + NodeFS.promises.readFile( + NodePath.join(tenantPath, 'metadata.json'), + 'utf8' + ), + catch: (error) => + new TenantGeneratorError( + `Failed to read metadata: ${error}`, + NodePath.basename(tenantPath) + ), + }), + Effect.flatMap((content) => + Effect.try({ + try: () => JSON.parse(content) as unknown, + catch: (error) => + new TenantGeneratorError( + `Failed to parse metadata JSON: ${error}`, + NodePath.basename(tenantPath) + ), + }) + ), + Effect.flatMap((data) => + pipe( + Schema.decodeUnknown(TenantMetadataSchema)(data), + Effect.mapError( + (error) => + new TenantGeneratorError( + `Invalid tenant metadata: ${error.message}`, + NodePath.basename(tenantPath) + ) + ) + ) + ) + ); + +// Discover tenants from the tenant base directory +const discoverTenants = () => + pipe( + Effect.tryPromise({ + try: () => NodeFS.promises.readdir(TENANT_BASE_DIR, { withFileTypes: true }), + catch: (error) => + new TenantGeneratorError(`Failed to read tenant directory: ${error}`), + }), + Effect.flatMap((entries) => { + const tenantDirs = entries.filter( + (entry) => entry.isDirectory() && validateTenantId(entry.name) + ); + + return Effect.forEach( + tenantDirs, + (entry) => { + const tenantPath = NodePath.join(TENANT_BASE_DIR, entry.name); + return pipe( + loadTenantMetadata(tenantPath), + Effect.map((metadata) => ({ + id: entry.name, + repositoryPath: tenantPath, + metadata, + })), + Effect.catchAll((error) => { + logger.warn(`Failed to load tenant ${entry.name}: ${error}`); + return Effect.succeed(null); + }) + ); + }, + { concurrency: 'unbounded' } + ); + }), + Effect.map((results) => + results.filter((r): r is DiscoveredTenant => r !== null) + ) + ); + +// Generate main tenant service unit +const generateTenantService = (tenant: DiscoveredTenant, outputDir: string) => { + const { id, metadata } = tenant; + const unitPath = NodePath.join(outputDir, `tenant@${id}.service`); + + const unitContent = `[Unit] +Description=${metadata.tenant.description ?? `Tenant ${metadata.tenant.name}`} +Documentation=https://github.com/bitbuilder-io/bitbuilder-hypervisor +After=network-online.target systemd-resolved.service +Wants=network-online.target +Requires=tenant-network@${id}.service +Before=tenant-infra@${id}.service +PartOf=multi-user.target + +[Service] +Type=notify +NotifyAccess=main + +# Tenant lifecycle management +ExecStartPre=/usr/lib/bitbuilder/tenant-provision ${id} +ExecStart=/usr/lib/bitbuilder/tenant-manager ${id} +ExecStop=/usr/lib/bitbuilder/tenant-cleanup ${id} +ExecReload=/usr/lib/bitbuilder/tenant-reload ${id} + +# Process management +KillMode=mixed +KillSignal=SIGTERM +TimeoutStartSec=300 +TimeoutStopSec=120 +RestartSec=30 +Restart=on-failure + +# Security hardening +PrivateTmp=yes +ProtectSystem=strict +ProtectHome=yes +NoNewPrivileges=yes +ProtectKernelTunables=yes +ProtectKernelModules=yes +ProtectControlGroups=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +RemoveIPC=yes +PrivateMounts=yes +ProtectHostname=yes + +# Resource limits +CPUQuota=${metadata.resources.cpu.cores * 100}% +MemoryMax=${metadata.resources.memory.limit} +LimitNOFILE=65536 +LimitNPROC=4096 + +# Tenant-specific environment +Environment=TENANT_ID=${id} +Environment=TENANT_BASE_DIR=${TENANT_BASE_DIR} +EnvironmentFile=-/var/lib/tenants/${id}/environment + +# Working directory and state +WorkingDirectory=/var/lib/tenants/${id} +StateDirectory=tenants/${id} +StateDirectoryMode=0750 + +[Install] +WantedBy=multi-user.target +Alias=tenant-${id}.service +`; + + return Effect.tryPromise({ + try: () => NodeFS.promises.writeFile(unitPath, unitContent, 'utf8'), + catch: (error) => + new TenantGeneratorError(`Failed to write tenant service: ${error}`, id), + }); +}; + +// Generate infrastructure manager service +const generateTenantInfraService = ( + tenant: DiscoveredTenant, + outputDir: string +) => { + const { id, metadata } = tenant; + const unitPath = NodePath.join(outputDir, `tenant-infra@${id}.service`); + const bridgeName = `br-tenant-${id}`; + + const unitContent = `[Unit] +Description=Infrastructure manager for tenant ${id} +Documentation=https://github.com/bitbuilder-io/bitbuilder-hypervisor +After=tenant@${id}.service tenant-network@${id}.service +BindsTo=tenant@${id}.service +PartOf=tenant@${id}.service +AssertPathExists=/var/lib/tenants/${id}/infra/rootfs + +[Service] +Type=notify +NotifyAccess=main + +# systemd-vmspawn configuration +ExecStart=/usr/bin/systemd-vmspawn \\ + --quiet \\ + --machine=infra-${id} \\ + --directory=/var/lib/tenants/${id}/infra/rootfs \\ + --cpus=${metadata.resources.cpu.cores} \\ + --ram=${metadata.resources.memory.limit} \\ + --network-bridge=${bridgeName} \\ + --bind-ro=/usr/lib/extensions:/usr/lib/extensions \\ + --setenv=TENANT_ID=${id} \\ + --setenv=TENANT_ROLE=infrastructure \\ + --boot + +# VM lifecycle management +ExecStartPre=/usr/lib/bitbuilder/prepare-tenant-vm ${id} +ExecStopPost=/usr/lib/bitbuilder/cleanup-tenant-vm ${id} + +# Process management +KillMode=mixed +KillSignal=SIGTERM +TimeoutStartSec=300 +TimeoutStopSec=120 +RestartSec=30 +Restart=always +StartLimitBurst=3 +StartLimitIntervalSec=300 + +# Security configuration +PrivateTmp=yes +ProtectSystem=strict +ReadWritePaths=/var/lib/tenants/${id} +BindReadOnlyPaths=/usr/lib/extensions +NoNewPrivileges=yes + +# Resource management +CPUQuota=${metadata.resources.cpu.cores * 100}% +MemoryMax=${metadata.resources.memory.limit} +IOWeight=100 +DevicePolicy=closed +DeviceAllow=/dev/kvm rw +DeviceAllow=/dev/net/tun rw + +# Working directory +WorkingDirectory=/var/lib/tenants/${id} + +[Install] +WantedBy=tenant@${id}.service +`; + + return Effect.tryPromise({ + try: () => NodeFS.promises.writeFile(unitPath, unitContent, 'utf8'), + catch: (error) => + new TenantGeneratorError( + `Failed to write tenant infra service: ${error}`, + id + ), + }); +}; + +// Generate network service +const generateTenantNetworkService = ( + tenant: DiscoveredTenant, + outputDir: string +) => { + const { id, metadata } = tenant; + const unitPath = NodePath.join(outputDir, `tenant-network@${id}.service`); + + const unitContent = `[Unit] +Description=Network setup for tenant ${id} +Documentation=https://github.com/bitbuilder-io/bitbuilder-hypervisor +After=systemd-networkd.service systemd-resolved.service +Wants=systemd-networkd.service +Before=tenant@${id}.service +PartOf=tenant@${id}.service + +[Service] +Type=oneshot +RemainAfterExit=yes + +# Network setup and teardown +ExecStartPre=/usr/lib/bitbuilder/validate-tenant-network ${id} +ExecStart=/usr/lib/bitbuilder/setup-tenant-network ${id} +ExecReload=/usr/lib/bitbuilder/reload-tenant-network ${id} +ExecStop=/usr/lib/bitbuilder/teardown-tenant-network ${id} +ExecStopPost=/usr/lib/bitbuilder/cleanup-tenant-network ${id} + +# Timeout configuration +TimeoutStartSec=60 +TimeoutStopSec=30 + +# Security hardening +PrivateTmp=yes +ProtectSystem=yes +ProtectHome=yes +NoNewPrivileges=yes +CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_RAW + +# Network configuration environment +Environment=TENANT_ID=${id} +Environment=NETWORK_MODE=${metadata.network.mode} + +# Working directory +WorkingDirectory=/var/lib/tenants/${id} + +[Install] +RequiredBy=tenant@${id}.service +`; + + return Effect.tryPromise({ + try: () => NodeFS.promises.writeFile(unitPath, unitContent, 'utf8'), + catch: (error) => + new TenantGeneratorError( + `Failed to write tenant network service: ${error}`, + id + ), + }); +}; + +// Generate all units for a single tenant +const generateTenantUnits = (tenant: DiscoveredTenant, outputDir: string) => + pipe( + Effect.all([ + generateTenantService(tenant, outputDir), + generateTenantInfraService(tenant, outputDir), + generateTenantNetworkService(tenant, outputDir), + ]), + Effect.flatMap(() => + logger.info(`Generated units for tenant: ${tenant.id}`) + ), + Effect.catchAll((error) => { + logger.error(`Failed to generate units for tenant ${tenant.id}: ${error}`); + return Effect.fail(error); + }) + ); + +// Main generator execution +export const run = (outputDir: string = GENERATOR_OUTPUT_DIR) => + pipe( + logger.info('Starting tenant-generator'), + Effect.flatMap(() => ensureDirectory(outputDir)), + Effect.flatMap(() => discoverTenants()), + Effect.flatMap((tenants) => { + if (tenants.length === 0) { + return logger.info('No tenants discovered'); + } + + return pipe( + Effect.forEach( + tenants, + (tenant) => generateTenantUnits(tenant, outputDir), + { concurrency: 'unbounded' } + ), + Effect.flatMap(() => + logger.info(`Generated units for ${tenants.length} tenants`) + ) + ); + }), + Effect.flatMap(() => logger.info('tenant-generator completed successfully')), + Effect.catchAll((error) => { + logger.error(`tenant-generator failed: ${error}`); + return Effect.fail(error); + }) + ); + +// CLI execution +if (import.meta.main) { + const outputDir = process.argv[2] ?? GENERATOR_OUTPUT_DIR; + + Effect.runPromise(run(outputDir)).catch((error) => { + console.error('Fatal error in tenant-generator:', error); + process.exit(1); + }); +} + +export default run; diff --git a/src/index.ts b/src/index.ts new file mode 100644 index 0000000..588751e --- /dev/null +++ b/src/index.ts @@ -0,0 +1,222 @@ +/** + * BitBuilder Hypervisor - Main Entry Point + * A revolutionary git-ops-native, multi-tenant hypervisor platform built on systemd virtualization + */ + +import { Effect, Console, Layer, Context } from 'effect'; +import { NodeContext } from '@effect/platform-node'; + +// Export all public APIs +export * from '#types/index'; +export * from '#schemas/index'; +export * from '#utils/index'; + +// Import types and utilities +import type { TenantMetadata } from '#types/index'; +import { createLogger, gracefulShutdown } from '#utils/index'; + +// Application configuration +interface BitBuilderConfig { + readonly logLevel: 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'; + readonly enableMetrics: boolean; + readonly enableTracing: boolean; + readonly configPath: string; +} + +const defaultConfig: BitBuilderConfig = { + logLevel: 'INFO', + enableMetrics: false, + enableTracing: false, + configPath: '/etc/bitbuilder', +}; + +// Application service interface +export interface BitBuilderHypervisor { + readonly start: () => Effect.Effect; + readonly stop: () => Effect.Effect; + readonly getStatus: () => Effect.Effect; +} + +export interface SystemStatus { + readonly version: string; + readonly uptime: number; + readonly services: readonly { + readonly name: string; + readonly status: 'running' | 'stopped' | 'error'; + readonly lastUpdate: Date; + }[]; + readonly tenants: { + readonly total: number; + readonly active: number; + readonly inactive: number; + readonly error: number; + }; + readonly system: { + readonly hostId: string; + readonly systemdVersion: string; + readonly kernelVersion: string; + }; +} + +// Main application implementation +class BitBuilderHypervisorImpl implements BitBuilderHypervisor { + private readonly logger = createLogger('bitbuilder-hypervisor'); + private readonly startTime = Date.now(); + private isRunning = false; + + constructor(private readonly config: BitBuilderConfig) {} + + readonly start = () => + Effect.gen(this, function* () { + if (this.isRunning) { + return; + } + + yield* Console.log('🚀 Starting BitBuilder Hypervisor'); + yield* this.logger.info('BitBuilder Hypervisor starting...', { + version: '1.0.0', + config: this.config, + }); + + this.isRunning = true; + + yield* Console.log('✅ BitBuilder Hypervisor started successfully'); + yield* this.logger.info('BitBuilder Hypervisor started successfully'); + + // Display system information + const status = yield* this.getStatus(); + yield* this.displaySystemInfo(status); + }); + + readonly stop = () => + Effect.gen(this, function* () { + if (!this.isRunning) { + return; + } + + yield* this.logger.info('Stopping BitBuilder Hypervisor...'); + this.isRunning = false; + yield* this.logger.info('BitBuilder Hypervisor stopped successfully'); + yield* Console.log('✅ BitBuilder Hypervisor stopped successfully'); + }); + + readonly getStatus = () => + Effect.gen(this, function* () { + const uptime = Date.now() - this.startTime; + + const status: SystemStatus = { + version: '1.0.0', + uptime, + services: [ + { name: 'tenant-manager', status: 'running', lastUpdate: new Date() }, + { name: 'git-sync', status: 'running', lastUpdate: new Date() }, + { name: 'network-manager', status: 'running', lastUpdate: new Date() }, + { name: 'templates', status: 'running', lastUpdate: new Date() }, + ], + tenants: { + total: 0, + active: 0, + inactive: 0, + error: 0, + }, + system: { + hostId: 'bitbuilder-host-1', + systemdVersion: '258+', + kernelVersion: '6.0+', + }, + }; + + return status; + }); + + private readonly displaySystemInfo = (status: SystemStatus) => + Effect.gen(this, function* () { + yield* Console.log(` +🏛️ BitBuilder Hypervisor System Status +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +📊 System Information: + Version: ${status.version} + Uptime: ${Math.floor(status.uptime / 1000)}s + Host ID: ${status.system.hostId} + systemd: ${status.system.systemdVersion} + Kernel: ${status.system.kernelVersion} + +⚙️ Services: +${status.services.map((s) => ` ${s.status === 'running' ? '✅' : '❌'} ${s.name}: ${s.status}`).join('\n')} + +🏠 Tenants: + Total: ${status.tenants.total} + Active: ${status.tenants.active} + Inactive: ${status.tenants.inactive} + Errors: ${status.tenants.error} + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +`); + }); +} + +// Service layer for the main application +export const BitBuilderHypervisorService = Layer.succeed( + 'BitBuilderHypervisor' as const, + new BitBuilderHypervisorImpl(defaultConfig) +); + +// CLI interface for direct execution +const cli = Effect.gen(function* () { + const args = process.argv.slice(2); + const command = args[0]; + + switch (command) { + case 'start': + yield* Console.log('Starting BitBuilder Hypervisor...'); + const hypervisor = new BitBuilderHypervisorImpl(defaultConfig); + yield* hypervisor.start(); + yield* Effect.never; + break; + + case 'status': + yield* Console.log('BitBuilder Hypervisor Status: Running'); + break; + + case 'stop': + yield* Console.log('Stopping BitBuilder Hypervisor...'); + break; + + case 'version': + yield* Console.log('BitBuilder Hypervisor v1.0.0'); + break; + + default: + yield* Console.log(` +BitBuilder Hypervisor - Git-Ops Native Multi-Tenant Hypervisor Platform + +Usage: + bitbuilder-hypervisor + +Commands: + start Start the hypervisor system + stop Stop the hypervisor system + status Show system status + version Show version information + +Environment: + Built on systemd virtualization with immutable infrastructure + Supports VM and container tenants with complete isolation + Git-ops native configuration management +`); + break; + } +}); + +// Run CLI if this is the main module +if (import.meta.main) { + const main = cli.pipe(Effect.provide(NodeContext.layer)); + + Effect.runPromise(main).catch((error) => { + console.error('Fatal error:', error); + process.exit(1); + }); +} + +export default cli; diff --git a/src/network/index.ts b/src/network/index.ts new file mode 100644 index 0000000..136723c --- /dev/null +++ b/src/network/index.ts @@ -0,0 +1,439 @@ +/** + * Network Management for BitBuilder Hypervisor + * Provides network configuration and management for tenant isolation + */ + +import { Effect, pipe, Console } from 'effect'; +import * as NodeFS from 'node:fs'; +import * as NodePath from 'node:path'; +import { spawn } from 'node:child_process'; +import { + createLogger, + ensureDirectory, + generateTenantNetworkId, + generateMacAddress, + SystemdError, +} from '#utils/index'; +import type { NetworkMode, NetworkInterface } from '#types/index'; + +const logger = createLogger('network-manager'); + +// Network configuration constants +const NETWORKD_CONFIG_DIR = '/etc/systemd/network'; +const TENANT_NETWORK_PREFIX = 'br-tenant'; + +// Subnet info from network ID +interface SubnetInfo { + readonly subnet: string; + readonly gateway: string; + readonly dhcpRangeStart: string; + readonly dhcpRangeEnd: string; + readonly octet2: number; + readonly octet3: number; +} + +// Helper function to generate subnet info from network ID +const generateSubnetInfo = (networkId: number): SubnetInfo => { + const octet2 = Math.floor(networkId / 256); + const octet3 = networkId % 256; + return { + subnet: `10.${octet2}.${octet3}.0/24`, + gateway: `10.${octet2}.${octet3}.1`, + dhcpRangeStart: `10.${octet2}.${octet3}.100`, + dhcpRangeEnd: `10.${octet2}.${octet3}.254`, + octet2, + octet3, + }; +}; + +// Error types +export class NetworkManagerError extends Error { + constructor( + message: string, + public readonly tenantId?: string + ) { + super(message); + this.name = 'NetworkManagerError'; + } +} + +// Bridge configuration +interface BridgeConfig { + readonly name: string; + readonly vlan?: number; + readonly subnet: string; + readonly gateway: string; + readonly dhcp: { + readonly enabled: boolean; + readonly rangeStart?: string; + readonly rangeEnd?: string; + }; +} + +// Generate bridge netdev configuration +const generateNetdevConfig = (config: BridgeConfig): string => ` +[NetDev] +Name=${config.name} +Kind=bridge +Description=BitBuilder Hypervisor tenant bridge + +[Bridge] +ForwardDelay=0 +HelloTime=1 +MaxAge=12 +AgeingTime=30 +STP=false +`; + +// Generate bridge network configuration +const generateNetworkConfig = (config: BridgeConfig): string => ` +[Match] +Name=${config.name} + +[Network] +Description=BitBuilder Hypervisor tenant network +DHCP=no +IPv6AcceptRA=no +LinkLocalAddressing=no + +[Address] +Address=${config.gateway}/24 + +[Route] +Destination=${config.subnet} +`; + +// Generate VLAN netdev configuration +const generateVlanNetdevConfig = ( + parentInterface: string, + vlanId: number +): string => ` +[NetDev] +Name=${parentInterface}.${vlanId} +Kind=vlan +Description=BitBuilder Hypervisor VLAN ${vlanId} + +[VLAN] +Id=${vlanId} +`; + +// Create tenant bridge +export const createTenantBridge = (tenantId: string, vlan?: number) => + Effect.gen(function* () { + yield* logger.info(`Creating bridge for tenant: ${tenantId}`); + + const networkId = generateTenantNetworkId(tenantId); + const bridgeName = `${TENANT_NETWORK_PREFIX}-${tenantId}`.slice(0, 15); + const subnetInfo = generateSubnetInfo(networkId); + + const config: BridgeConfig = { + name: bridgeName, + vlan, + subnet: subnetInfo.subnet, + gateway: subnetInfo.gateway, + dhcp: { + enabled: true, + rangeStart: subnetInfo.dhcpRangeStart, + rangeEnd: subnetInfo.dhcpRangeEnd, + }, + }; + + // Ensure network config directory exists + yield* ensureDirectory(NETWORKD_CONFIG_DIR); + + // Write netdev configuration + const netdevPath = NodePath.join( + NETWORKD_CONFIG_DIR, + `50-${bridgeName}.netdev` + ); + yield* Effect.tryPromise({ + try: () => + NodeFS.promises.writeFile( + netdevPath, + generateNetdevConfig(config), + 'utf8' + ), + catch: (error) => + new NetworkManagerError( + `Failed to write netdev config: ${error}`, + tenantId + ), + }); + + // Write network configuration + const networkPath = NodePath.join( + NETWORKD_CONFIG_DIR, + `50-${bridgeName}.network` + ); + yield* Effect.tryPromise({ + try: () => + NodeFS.promises.writeFile( + networkPath, + generateNetworkConfig(config), + 'utf8' + ), + catch: (error) => + new NetworkManagerError( + `Failed to write network config: ${error}`, + tenantId + ), + }); + + // Reload networkd + yield* reloadNetworkd(); + + yield* logger.info(`Bridge created for tenant ${tenantId}: ${bridgeName}`); + + return config; + }); + +// Delete tenant bridge +export const deleteTenantBridge = (tenantId: string) => + Effect.gen(function* () { + yield* logger.info(`Deleting bridge for tenant: ${tenantId}`); + + const bridgeName = `${TENANT_NETWORK_PREFIX}-${tenantId}`.slice(0, 15); + + // Remove network configuration files + const netdevPath = NodePath.join( + NETWORKD_CONFIG_DIR, + `50-${bridgeName}.netdev` + ); + const networkPath = NodePath.join( + NETWORKD_CONFIG_DIR, + `50-${bridgeName}.network` + ); + + yield* Effect.tryPromise({ + try: () => NodeFS.promises.unlink(netdevPath), + catch: () => undefined, // Ignore if file doesn't exist + }); + + yield* Effect.tryPromise({ + try: () => NodeFS.promises.unlink(networkPath), + catch: () => undefined, // Ignore if file doesn't exist + }); + + // Reload networkd + yield* reloadNetworkd(); + + yield* logger.info(`Bridge deleted for tenant: ${tenantId}`); + }); + +// Reload systemd-networkd +export const reloadNetworkd = () => + Effect.async((resume) => { + const proc = spawn('networkctl', ['reload'], { + stdio: ['pipe', 'pipe', 'pipe'], + }); + + let stderr = ''; + + proc.stderr?.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + if (code === 0) { + resume(Effect.succeed(undefined)); + } else { + resume( + Effect.fail( + new SystemdError( + `networkctl reload failed: ${stderr}`, + 'networkctl reload' + ) + ) + ); + } + }); + + proc.on('error', (error) => { + resume( + Effect.fail( + new SystemdError( + `Failed to execute networkctl: ${error.message}`, + 'networkctl reload' + ) + ) + ); + }); + }); + +// Configure network interfaces for a tenant +export const configureInterfaces = ( + tenantId: string, + interfaces: readonly NetworkInterface[] +) => + Effect.gen(function* () { + yield* logger.info( + `Configuring ${interfaces.length} interfaces for tenant: ${tenantId}` + ); + + const networkId = generateTenantNetworkId(tenantId); + + for (let i = 0; i < interfaces.length; i++) { + const iface = interfaces[i]!; + const macAddress = + iface.mac === 'auto' ? generateMacAddress(tenantId, i) : iface.mac; + + yield* logger.debug( + `Interface ${iface.name}: MAC=${macAddress}, IPv4=${typeof iface.ipv4 === 'string' ? iface.ipv4 : 'static'}` + ); + + // Generate interface configuration + const interfaceConfig = generateInterfaceConfig(tenantId, iface, i); + + const configPath = NodePath.join( + NETWORKD_CONFIG_DIR, + `60-tenant-${tenantId}-${iface.name}.network` + ); + + yield* Effect.tryPromise({ + try: () => + NodeFS.promises.writeFile(configPath, interfaceConfig, 'utf8'), + catch: (error) => + new NetworkManagerError( + `Failed to write interface config: ${error}`, + tenantId + ), + }); + } + + // Reload networkd to apply changes + yield* reloadNetworkd(); + + yield* logger.info(`Interfaces configured for tenant: ${tenantId}`); + }); + +// Generate interface-specific network configuration +const generateInterfaceConfig = ( + tenantId: string, + iface: NetworkInterface, + index: number +): string => { + const macAddress = + iface.mac === 'auto' ? generateMacAddress(tenantId, index) : iface.mac; + + let config = ` +[Match] +Name=${iface.name} +MACAddress=${macAddress} + +[Network] +Description=Tenant ${tenantId} interface ${iface.name} +`; + + // IPv4 configuration + if (iface.ipv4 === 'dhcp') { + config += `DHCP=ipv4\n`; + } else if (typeof iface.ipv4 === 'object') { + config += ` +[Address] +Address=${iface.ipv4.address} +`; + if (iface.ipv4.gateway) { + config += ` +[Route] +Gateway=${iface.ipv4.gateway} +`; + } + } + + // IPv6 configuration + if (iface.ipv6 === 'disabled') { + config += `IPv6AcceptRA=no\nLinkLocalAddressing=no\n`; + } else if (iface.ipv6 === 'dhcp') { + config += `DHCP=ipv6\nIPv6AcceptRA=yes\n`; + } else if (typeof iface.ipv6 === 'object') { + config += ` +[Address] +Address=${iface.ipv6.address} +`; + if (iface.ipv6.gateway) { + config += ` +[Route] +Gateway=${iface.ipv6.gateway} +`; + } + } + + return config; +}; + +// Get network status for a tenant +export const getTenantNetworkStatus = (tenantId: string) => + Effect.async((resume) => { + const bridgeName = `${TENANT_NETWORK_PREFIX}-${tenantId}`.slice(0, 15); + + const proc = spawn('networkctl', ['status', bridgeName, '--json=short'], { + stdio: ['pipe', 'pipe', 'pipe'], + }); + + let stdout = ''; + let stderr = ''; + + proc.stdout?.on('data', (data) => { + stdout += data.toString(); + }); + + proc.stderr?.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + if (code === 0) { + try { + const status = JSON.parse(stdout) as NetworkStatus; + resume(Effect.succeed(status)); + } catch (e) { + resume( + Effect.fail( + new NetworkManagerError( + `Failed to parse network status: ${e}`, + tenantId + ) + ) + ); + } + } else { + resume( + Effect.fail( + new NetworkManagerError(`Failed to get network status: ${stderr}`, tenantId) + ) + ); + } + }); + + proc.on('error', (error) => { + resume( + Effect.fail( + new NetworkManagerError( + `Failed to execute networkctl: ${error.message}`, + tenantId + ) + ) + ); + }); + }); + +// Network status type +interface NetworkStatus { + readonly Name: string; + readonly Type: string; + readonly State: string; + readonly Addresses?: readonly { + readonly Address: string; + readonly PrefixLength: number; + readonly Scope: string; + }[]; +} + +// Export for external use +export default { + createTenantBridge, + deleteTenantBridge, + configureInterfaces, + getTenantNetworkStatus, + reloadNetworkd, +}; diff --git a/src/schemas/index.ts b/src/schemas/index.ts new file mode 100644 index 0000000..79840e1 --- /dev/null +++ b/src/schemas/index.ts @@ -0,0 +1,237 @@ +/** + * Schema definitions for BitBuilder Hypervisor + * Using Effect-TS Schema for runtime validation and type safety + */ + +import { Schema } from '@effect/schema'; + +// Brand types for domain modeling +export const TenantId = Schema.String.pipe( + Schema.pattern(/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/), + Schema.minLength(3), + Schema.maxLength(63), + Schema.brand('TenantId') +); + +export const RepositoryUrl = Schema.String.pipe( + Schema.pattern(/^(https?:\/\/|git@|ssh:\/\/)/), + Schema.brand('RepositoryUrl') +); + +export const SystemdUnitName = Schema.String.pipe( + Schema.pattern( + /^[a-zA-Z0-9:_.\\@-]+\.(service|socket|timer|mount|automount|path|slice|scope|target|device)$/ + ), + Schema.brand('SystemdUnitName') +); + +// Resource limits schema +export const ResourceLimitsSchema = Schema.Struct({ + cpu: Schema.Struct({ + cores: Schema.Number.pipe(Schema.positive(), Schema.int()), + shares: Schema.Number.pipe(Schema.between(2, 262144)), + }), + memory: Schema.Struct({ + limit: Schema.String.pipe(Schema.pattern(/^\d+[KMGT]?B?$/i)), + swap: Schema.optional( + Schema.String.pipe(Schema.pattern(/^\d+[KMGT]?B?$/i)) + ), + }), + storage: Schema.Struct({ + root: Schema.String.pipe(Schema.pattern(/^\d+[KMGT]?B?$/i)), + data: Schema.optional( + Schema.String.pipe(Schema.pattern(/^\d+[KMGT]?B?$/i)) + ), + }), + network: Schema.optional( + Schema.Struct({ + bandwidth: Schema.optional( + Schema.String.pipe(Schema.pattern(/^\d+[KMGT]?bps$/i)) + ), + connections: Schema.optional( + Schema.Number.pipe(Schema.positive(), Schema.int()) + ), + }) + ), +}); + +// Network interface schema +export const NetworkInterfaceSchema = Schema.Struct({ + name: Schema.String.pipe(Schema.pattern(/^[a-z0-9]+$/), Schema.maxLength(15)), + mac: Schema.Union( + Schema.Literal('auto'), + Schema.String.pipe( + Schema.pattern(/^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$/) + ) + ), + ipv4: Schema.Union( + Schema.Literal('dhcp'), + Schema.Literal('static'), + Schema.Struct({ + address: Schema.String.pipe( + Schema.pattern(/^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d{1,2}$/) + ), + gateway: Schema.optional( + Schema.String.pipe( + Schema.pattern(/^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$/) + ) + ), + dns: Schema.optional( + Schema.Array( + Schema.String.pipe( + Schema.pattern(/^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$/) + ) + ) + ), + }) + ), + ipv6: Schema.Union( + Schema.Literal('dhcp'), + Schema.Literal('static'), + Schema.Literal('disabled'), + Schema.Struct({ + address: Schema.String.pipe( + Schema.pattern(/^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}\/\d{1,3}$/) + ), + gateway: Schema.optional( + Schema.String.pipe( + Schema.pattern(/^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$/) + ) + ), + dns: Schema.optional(Schema.Array(Schema.String)), + }) + ), + vlan: Schema.optional(Schema.Number.pipe(Schema.between(1, 4094))), +}); + +// Security context schema +export const SecurityContextSchema = Schema.Struct({ + selinux_context: Schema.optional(Schema.String), + capabilities: Schema.Array( + Schema.String.pipe(Schema.pattern(/^CAP_[A-Z_]+$/)) + ), + syscalls: Schema.Struct({ + allow: Schema.Array(Schema.String), + deny: Schema.Array(Schema.String), + }), + namespaces: Schema.Struct({ + pid: Schema.Boolean, + net: Schema.Boolean, + mnt: Schema.Boolean, + uts: Schema.Boolean, + ipc: Schema.Boolean, + user: Schema.Boolean, + }), +}); + +// Main tenant metadata schema +export const TenantMetadataSchema = Schema.Struct({ + version: Schema.String.pipe(Schema.pattern(/^\d+\.\d+(\.\d+)?$/)), + tenant: Schema.Struct({ + id: TenantId, + name: Schema.String.pipe(Schema.minLength(1), Schema.maxLength(255)), + type: Schema.Literal('vm', 'container', 'hybrid'), + enabled: Schema.Boolean, + description: Schema.optional(Schema.String.pipe(Schema.maxLength(1000))), + }), + resources: ResourceLimitsSchema, + network: Schema.Struct({ + mode: Schema.Literal('bridge', 'nat', 'host', 'isolated'), + interfaces: Schema.Array(NetworkInterfaceSchema), + }), + extensions: Schema.Struct({ + sysext: Schema.Array(Schema.String.pipe(Schema.pattern(/^[a-z0-9-]+$/))), + confext: Schema.Array(Schema.String.pipe(Schema.pattern(/^[a-z0-9-]+$/))), + }), + services: Schema.Struct({ + portable: Schema.Array(Schema.String.pipe(Schema.pattern(/^[a-z0-9-]+$/))), + systemd: Schema.Array(SystemdUnitName), + }), + security: SecurityContextSchema, + lifecycle: Schema.optional( + Schema.Struct({ + pre_provision: Schema.optional(Schema.String), + post_provision: Schema.optional(Schema.String), + pre_stop: Schema.optional(Schema.String), + post_stop: Schema.optional(Schema.String), + }) + ), +}); + +// Tenant registry schema +export const TenantRegistrySchema = Schema.Struct({ + version: Schema.String, + updated_at: Schema.DateFromString, + tenants: Schema.Array( + Schema.Struct({ + id: TenantId, + name: Schema.String, + repository: RepositoryUrl, + branch: Schema.String.pipe(Schema.pattern(/^[a-zA-Z0-9/_.-]+$/)), + enabled: Schema.Boolean, + last_sync: Schema.DateFromString, + }) + ), +}); + +// System configuration schema +export const SystemConfigSchema = Schema.Struct({ + version: Schema.String, + system: Schema.Struct({ + host_id: Schema.String.pipe(Schema.pattern(/^[a-zA-Z0-9-]+$/)), + cluster_mode: Schema.Boolean, + git_ops: Schema.Struct({ + system_repo: RepositoryUrl, + sync_interval: Schema.Number.pipe(Schema.positive()), + auto_update: Schema.Boolean, + }), + }), + defaults: Schema.Struct({ + tenant_resources: ResourceLimitsSchema, + network_mode: Schema.Literal('bridge', 'nat', 'host', 'isolated'), + security_context: SecurityContextSchema, + }), + extensions: Schema.Struct({ + system_sysext: Schema.Array(Schema.String), + system_confext: Schema.Array(Schema.String), + }), +}); + +// Git-ops configuration schema +export const GitOpsConfigSchema = Schema.Struct({ + repository: RepositoryUrl, + branch: Schema.String, + sync_interval: Schema.Number.pipe(Schema.positive()), + webhook_secret: Schema.optional(Schema.String), + ssh_key_path: Schema.optional(Schema.String), + auto_rollback: Schema.Boolean, + validation: Schema.Struct({ + schema_check: Schema.Boolean, + syntax_check: Schema.Boolean, + security_scan: Schema.Boolean, + }), +}); + +// Export type inference +export type TenantMetadataType = Schema.Schema.Type; +export type TenantRegistryType = Schema.Schema.Type; +export type SystemConfigType = Schema.Schema.Type; +export type GitOpsConfigType = Schema.Schema.Type; + +// Validation functions (Effect-based - must be executed with Effect.runPromise/runSync) +export const validateTenantMetadata = + Schema.decodeUnknown(TenantMetadataSchema); +export const validateTenantRegistry = + Schema.decodeUnknown(TenantRegistrySchema); +export const validateSystemConfig = Schema.decodeUnknown(SystemConfigSchema); +export const validateGitOpsConfig = Schema.decodeUnknown(GitOpsConfigSchema); + +// Synchronous validation functions for immediate validation +export const validateTenantMetadataSync = + Schema.decodeUnknownSync(TenantMetadataSchema); +export const validateTenantRegistrySync = + Schema.decodeUnknownSync(TenantRegistrySchema); +export const validateSystemConfigSync = + Schema.decodeUnknownSync(SystemConfigSchema); +export const validateGitOpsConfigSync = + Schema.decodeUnknownSync(GitOpsConfigSchema); diff --git a/src/services/tenant-manager.ts b/src/services/tenant-manager.ts new file mode 100644 index 0000000..c5270d5 --- /dev/null +++ b/src/services/tenant-manager.ts @@ -0,0 +1,450 @@ +/** + * Tenant Manager Service + * Orchestrates tenant lifecycle operations including: + * - Tenant provisioning + * - Configuration synchronization + * - Health monitoring + * - Graceful shutdown + */ + +import { Effect, pipe, Console, Schedule, Duration, Fiber } from 'effect'; +import * as NodeFS from 'node:fs'; +import * as NodePath from 'node:path'; +import { TenantMetadataSchema } from '#schemas/index'; +import { + createLogger, + ensureDirectory, + pathExists, + gitClone, + gitPull, + getGitCommitHash, + systemctl, + daemonReload, + enableService, + startService, + stopService, + isServiceActive, + gracefulShutdown, + GitError, + SystemdError, +} from '#utils/index'; +import type { TenantMetadata, TenantStatus } from '#types/index'; +import { Schema } from '@effect/schema'; + +const logger = createLogger('tenant-manager'); + +// Configuration +const TENANT_BASE_DIR = + process.env['TENANT_BASE_DIR'] ?? '/var/lib/tenants'; +const GIT_SYNC_INTERVAL = parseInt( + process.env['GIT_SYNC_INTERVAL'] ?? '300', + 10 +); +const HEALTH_CHECK_INTERVAL = parseInt( + process.env['HEALTH_CHECK_INTERVAL'] ?? '60', + 10 +); + +// Error types +export class TenantManagerError extends Error { + constructor( + message: string, + public readonly tenantId: string + ) { + super(message); + this.name = 'TenantManagerError'; + } +} + +// Tenant runtime state +interface TenantState { + readonly id: string; + readonly status: TenantStatus; + readonly metadata: TenantMetadata; + readonly gitCommit: string; + readonly lastSync: Date; + readonly lastHealthCheck: Date; + readonly errors: readonly string[]; +} + +// Tenant manager service +export class TenantManager { + private readonly logger = createLogger('tenant-manager'); + private tenants: Map = new Map(); + private syncFiber: Fiber.RuntimeFiber | null = null; + private healthFiber: Fiber.RuntimeFiber | null = null; + + constructor() { + // Set up graceful shutdown + gracefulShutdown(() => this.shutdown()); + } + + // Initialize the tenant manager + readonly start = () => + Effect.gen(this, function* () { + yield* this.logger.info('Starting tenant manager service'); + + // Discover and load existing tenants + yield* this.discoverTenants(); + + // Start background sync and health monitoring + const syncSchedule = Schedule.fixed( + Duration.seconds(GIT_SYNC_INTERVAL) + ); + const healthSchedule = Schedule.fixed( + Duration.seconds(HEALTH_CHECK_INTERVAL) + ); + + // Start sync loop + this.syncFiber = yield* Effect.fork( + this.runSyncLoop().pipe(Effect.schedule(syncSchedule)) + ); + + // Start health check loop + this.healthFiber = yield* Effect.fork( + this.runHealthChecks().pipe(Effect.schedule(healthSchedule)) + ); + + yield* this.logger.info('Tenant manager started successfully'); + }); + + // Graceful shutdown + readonly shutdown = () => + Effect.gen(this, function* () { + yield* this.logger.info('Shutting down tenant manager'); + + // Cancel background fibers + if (this.syncFiber) { + yield* Fiber.interrupt(this.syncFiber); + } + if (this.healthFiber) { + yield* Fiber.interrupt(this.healthFiber); + } + + // Stop all tenants gracefully + yield* Effect.forEach( + Array.from(this.tenants.keys()), + (tenantId) => this.stopTenant(tenantId), + { concurrency: 5 } + ); + + yield* this.logger.info('Tenant manager shutdown complete'); + }); + + // Discover tenants from the filesystem + private readonly discoverTenants = () => + Effect.gen(this, function* () { + const exists = yield* pathExists(TENANT_BASE_DIR); + if (!exists) { + yield* this.logger.info( + `Tenant base directory doesn't exist: ${TENANT_BASE_DIR}` + ); + return; + } + + const entries = yield* Effect.tryPromise({ + try: () => + NodeFS.promises.readdir(TENANT_BASE_DIR, { withFileTypes: true }), + catch: (error) => + new TenantManagerError( + `Failed to read tenant directory: ${error}`, + 'system' + ), + }); + + const tenantDirs = entries.filter((entry) => entry.isDirectory()); + + yield* Effect.forEach( + tenantDirs, + (entry) => + this.loadTenant(entry.name).pipe( + Effect.catchAll((error) => + pipe( + this.logger.warn( + `Failed to load tenant ${entry.name}: ${error}` + ), + Effect.map(() => undefined) + ) + ) + ), + { concurrency: 'unbounded' } + ); + + yield* this.logger.info(`Discovered ${this.tenants.size} tenants`); + }); + + // Load a single tenant + private readonly loadTenant = (tenantId: string) => + Effect.gen(this, function* () { + const tenantPath = NodePath.join(TENANT_BASE_DIR, tenantId); + const metadataPath = NodePath.join(tenantPath, 'metadata.json'); + + // Check if metadata exists + const metadataExists = yield* pathExists(metadataPath); + if (!metadataExists) { + yield* this.logger.warn( + `Tenant ${tenantId} missing metadata.json, skipping` + ); + return; + } + + // Load and validate metadata + const metadataContent = yield* Effect.tryPromise({ + try: () => NodeFS.promises.readFile(metadataPath, 'utf8'), + catch: (error) => + new TenantManagerError(`Failed to read metadata: ${error}`, tenantId), + }); + + const metadataJson = yield* Effect.try({ + try: () => JSON.parse(metadataContent) as unknown, + catch: (error) => + new TenantManagerError( + `Failed to parse metadata: ${error}`, + tenantId + ), + }); + + const metadata = yield* pipe( + Schema.decodeUnknown(TenantMetadataSchema)(metadataJson), + Effect.mapError( + (error) => + new TenantManagerError( + `Invalid tenant metadata: ${error.message}`, + tenantId + ) + ) + ); + + // Get git commit hash + const gitCommit = yield* getGitCommitHash(tenantPath).pipe( + Effect.catchAll(() => Effect.succeed('unknown')) + ); + + // Create tenant state + const state: TenantState = { + id: tenantId, + status: metadata.tenant.enabled ? 'pending' : 'stopped', + metadata, + gitCommit, + lastSync: new Date(), + lastHealthCheck: new Date(), + errors: [], + }; + + this.tenants.set(tenantId, state); + yield* this.logger.info(`Loaded tenant: ${tenantId}`); + }); + + // Provision a tenant + readonly provisionTenant = (tenantId: string) => + Effect.gen(this, function* () { + yield* this.logger.info(`Provisioning tenant: ${tenantId}`); + + const state = this.tenants.get(tenantId); + if (!state) { + yield* Effect.fail( + new TenantManagerError(`Tenant not found: ${tenantId}`, tenantId) + ); + return; + } + + // Update state to provisioning + this.updateTenantState(tenantId, { status: 'provisioning' }); + + // Ensure tenant directories exist + const tenantPath = NodePath.join(TENANT_BASE_DIR, tenantId); + yield* ensureDirectory(NodePath.join(tenantPath, 'extensions', 'sysext')); + yield* ensureDirectory(NodePath.join(tenantPath, 'extensions', 'confext')); + yield* ensureDirectory(NodePath.join(tenantPath, 'infra')); + yield* ensureDirectory(NodePath.join(tenantPath, 'runtime')); + + // Reload systemd to pick up new units + yield* daemonReload().pipe( + Effect.catchAll((error) => { + this.logger.warn(`Failed to reload systemd: ${error}`); + return Effect.succeed(undefined); + }) + ); + + // Enable tenant services + yield* enableService(`tenant@${tenantId}.service`).pipe( + Effect.catchAll((error) => { + this.logger.warn(`Failed to enable tenant service: ${error}`); + return Effect.succeed(undefined); + }) + ); + + // Start network service + yield* startService(`tenant-network@${tenantId}.service`).pipe( + Effect.catchAll((error) => { + this.logger.warn(`Failed to start tenant network: ${error}`); + return Effect.succeed(undefined); + }) + ); + + // Update state to active + this.updateTenantState(tenantId, { status: 'active' }); + + yield* this.logger.info(`Tenant provisioned successfully: ${tenantId}`); + }); + + // Stop a tenant + readonly stopTenant = (tenantId: string) => + Effect.gen(this, function* () { + yield* this.logger.info(`Stopping tenant: ${tenantId}`); + + const state = this.tenants.get(tenantId); + if (!state) { + yield* this.logger.warn(`Tenant not found for stop: ${tenantId}`); + return; + } + + // Update state to stopping + this.updateTenantState(tenantId, { status: 'stopping' }); + + // Stop tenant services + yield* stopService(`tenant-infra@${tenantId}.service`).pipe( + Effect.catchAll(() => Effect.succeed(undefined)) + ); + yield* stopService(`tenant@${tenantId}.service`).pipe( + Effect.catchAll(() => Effect.succeed(undefined)) + ); + yield* stopService(`tenant-network@${tenantId}.service`).pipe( + Effect.catchAll(() => Effect.succeed(undefined)) + ); + + // Update state to stopped + this.updateTenantState(tenantId, { status: 'stopped' }); + + yield* this.logger.info(`Tenant stopped: ${tenantId}`); + }); + + // Get tenant status + readonly getTenantStatus = (tenantId: string) => + Effect.sync(() => { + const state = this.tenants.get(tenantId); + return state ?? null; + }); + + // List all tenants + readonly listTenants = () => + Effect.sync(() => Array.from(this.tenants.values())); + + // Sync configuration from git + private readonly runSyncLoop = () => + Effect.gen(this, function* () { + yield* this.logger.debug('Running git sync cycle'); + + for (const [tenantId, state] of this.tenants) { + if (state.status !== 'active') continue; + + const tenantPath = NodePath.join(TENANT_BASE_DIR, tenantId); + + yield* gitPull(tenantPath).pipe( + Effect.flatMap(() => + Effect.gen(this, function* () { + const newCommit = yield* getGitCommitHash(tenantPath); + + if (newCommit !== state.gitCommit) { + yield* this.logger.info( + `Configuration updated for tenant ${tenantId}: ${state.gitCommit} -> ${newCommit}` + ); + + // Reload tenant configuration + yield* this.loadTenant(tenantId); + + // Trigger service reload + yield* systemctl('reload', `tenant@${tenantId}.service`).pipe( + Effect.catchAll(() => Effect.succeed(undefined)) + ); + } + }) + ), + Effect.catchAll((error) => { + this.logger.warn(`Git sync failed for tenant ${tenantId}: ${error}`); + return Effect.succeed(undefined); + }) + ); + } + }); + + // Run health checks on all tenants + private readonly runHealthChecks = () => + Effect.gen(this, function* () { + yield* this.logger.debug('Running health checks'); + + for (const [tenantId, state] of this.tenants) { + if (state.status !== 'active') continue; + + const isActive = yield* isServiceActive( + `tenant@${tenantId}.service` + ); + + if (!isActive && state.status === 'active') { + yield* this.logger.warn( + `Tenant ${tenantId} service is not running but should be active` + ); + + // Update state with error (limit to last 100 errors to prevent memory leak) + const maxErrors = 100; + const existingErrors = state.errors.slice(-maxErrors + 1); + this.updateTenantState(tenantId, { + status: 'error', + errors: [...existingErrors, `Service not running at ${new Date().toISOString()}`], + }); + + // Attempt to restart + yield* startService(`tenant@${tenantId}.service`).pipe( + Effect.catchAll((error) => + pipe( + this.logger.error( + `Failed to restart tenant ${tenantId}: ${error}` + ), + Effect.map(() => undefined) + ) + ) + ); + } + + // Update last health check time + this.updateTenantState(tenantId, { lastHealthCheck: new Date() }); + } + }); + + // Update tenant state with type-safe merging + private updateTenantState( + tenantId: string, + updates: Partial> + ): void { + const state = this.tenants.get(tenantId); + if (state) { + const updatedState: TenantState = { + ...state, + ...updates, + }; + this.tenants.set(tenantId, updatedState); + } + } +} + +// Main entry point +export const run = () => { + const manager = new TenantManager(); + return manager.start(); +}; + +// CLI execution +if (import.meta.main) { + const manager = new TenantManager(); + + Effect.runPromise(manager.start()) + .then(() => { + console.log('Tenant manager is running. Press Ctrl+C to stop.'); + }) + .catch((error) => { + console.error('Fatal error in tenant-manager:', error); + process.exit(1); + }); +} + +export default run; diff --git a/src/templates/index.ts b/src/templates/index.ts new file mode 100644 index 0000000..982917c --- /dev/null +++ b/src/templates/index.ts @@ -0,0 +1,332 @@ +/** + * Template Management for BitBuilder Hypervisor + * Handles tenant template loading, validation, and instantiation + */ + +import { Effect, pipe } from 'effect'; +import * as NodeFS from 'node:fs'; +import * as NodePath from 'node:path'; +import { Schema } from '@effect/schema'; +import { TenantMetadataSchema } from '#schemas/index'; +import { + createLogger, + ensureDirectory, + pathExists, + gitClone, + renderTemplate, + ValidationError, +} from '#utils/index'; +import type { TenantMetadata, TenantType } from '#types/index'; + +const logger = createLogger('template-manager'); + +// Configuration +const TEMPLATES_DIR = + process.env['TEMPLATES_DIR'] ?? '/usr/lib/bitbuilder/templates'; +const TENANT_BASE_DIR = + process.env['TENANT_BASE_DIR'] ?? '/var/lib/tenants'; + +// Error types +export class TemplateError extends Error { + constructor( + message: string, + public readonly templateName?: string + ) { + super(message); + this.name = 'TemplateError'; + } +} + +// Template metadata +interface TemplateMetadata { + readonly name: string; + readonly version: string; + readonly description: string; + readonly type: TenantType; + readonly maintainers: readonly string[]; + readonly variables: readonly { + readonly name: string; + readonly description: string; + readonly required: boolean; + readonly default?: string; + }[]; + readonly files: readonly string[]; +} + +// List available templates +export const listTemplates = () => + Effect.gen(function* () { + yield* logger.info('Listing available templates'); + + const exists = yield* pathExists(TEMPLATES_DIR); + if (!exists) { + yield* logger.warn(`Templates directory does not exist: ${TEMPLATES_DIR}`); + return []; + } + + const entries = yield* Effect.tryPromise({ + try: () => + NodeFS.promises.readdir(TEMPLATES_DIR, { withFileTypes: true }), + catch: (error) => + new TemplateError(`Failed to read templates directory: ${error}`), + }); + + const templateDirs = entries.filter((entry) => entry.isDirectory()); + + const templates: TemplateMetadata[] = []; + + for (const entry of templateDirs) { + const metadataPath = NodePath.join( + TEMPLATES_DIR, + entry.name, + 'template.json' + ); + + const metadataExists = yield* pathExists(metadataPath); + if (!metadataExists) { + yield* logger.debug( + `Template ${entry.name} missing template.json, skipping` + ); + continue; + } + + const metadata = yield* loadTemplateMetadata(entry.name).pipe( + Effect.catchAll((error) => { + logger.warn(`Failed to load template ${entry.name}: ${error}`); + return Effect.succeed(null); + }) + ); + + if (metadata) { + templates.push(metadata); + } + } + + yield* logger.info(`Found ${templates.length} templates`); + return templates; + }); + +// Load template metadata +export const loadTemplateMetadata = (templateName: string) => + Effect.gen(function* () { + const metadataPath = NodePath.join( + TEMPLATES_DIR, + templateName, + 'template.json' + ); + + const content = yield* Effect.tryPromise({ + try: () => NodeFS.promises.readFile(metadataPath, 'utf8'), + catch: (error) => + new TemplateError( + `Failed to read template metadata: ${error}`, + templateName + ), + }); + + const metadata = yield* Effect.try({ + try: () => JSON.parse(content) as TemplateMetadata, + catch: (error) => + new TemplateError( + `Failed to parse template metadata: ${error}`, + templateName + ), + }); + + return metadata; + }); + +// Instantiate a template for a new tenant +export const instantiateTemplate = ( + templateName: string, + tenantId: string, + variables: Record +) => + Effect.gen(function* () { + yield* logger.info( + `Instantiating template ${templateName} for tenant ${tenantId}` + ); + + // Load template metadata + const metadata = yield* loadTemplateMetadata(templateName); + + // Validate required variables + for (const variable of metadata.variables) { + if (variable.required && !(variable.name in variables)) { + if (variable.default === undefined) { + yield* Effect.fail( + new TemplateError( + `Missing required variable: ${variable.name}`, + templateName + ) + ); + } else { + variables[variable.name] = variable.default; + } + } + } + + // Add standard variables + const allVariables: Record = { + ...variables, + TENANT_ID: tenantId, + TEMPLATE_NAME: templateName, + TEMPLATE_VERSION: metadata.version, + }; + + // Create tenant directory + const tenantPath = NodePath.join(TENANT_BASE_DIR, tenantId); + yield* ensureDirectory(tenantPath); + + // Copy and render template files + const templatePath = NodePath.join(TEMPLATES_DIR, templateName); + + for (const file of metadata.files) { + yield* copyTemplateFile( + NodePath.join(templatePath, file), + NodePath.join(tenantPath, file), + allVariables + ); + } + + yield* logger.info( + `Template ${templateName} instantiated for tenant ${tenantId}` + ); + + return tenantPath; + }); + +// Copy and render a template file +const copyTemplateFile = ( + sourcePath: string, + destPath: string, + variables: Record +) => + Effect.gen(function* () { + // Ensure destination directory exists + yield* ensureDirectory(NodePath.dirname(destPath)); + + // Read source file + const content = yield* Effect.tryPromise({ + try: () => NodeFS.promises.readFile(sourcePath, 'utf8'), + catch: (error) => + new TemplateError( + `Failed to read template file ${sourcePath}: ${error}` + ), + }); + + // Render template variables + const rendered = renderTemplate(content, variables); + + // Write destination file + yield* Effect.tryPromise({ + try: () => NodeFS.promises.writeFile(destPath, rendered, 'utf8'), + catch: (error) => + new TemplateError( + `Failed to write template file ${destPath}: ${error}` + ), + }); + + yield* logger.debug(`Copied template file: ${sourcePath} -> ${destPath}`); + }); + +// Validate a tenant configuration against its template +export const validateTenantConfig = (tenantId: string) => + Effect.gen(function* () { + yield* logger.info(`Validating tenant configuration: ${tenantId}`); + + const tenantPath = NodePath.join(TENANT_BASE_DIR, tenantId); + const metadataPath = NodePath.join(tenantPath, 'metadata.json'); + + // Check if metadata exists + const exists = yield* pathExists(metadataPath); + if (!exists) { + yield* Effect.fail( + new ValidationError( + `Tenant metadata not found: ${metadataPath}`, + 'TenantMetadata' + ) + ); + } + + // Load and validate metadata + const content = yield* Effect.tryPromise({ + try: () => NodeFS.promises.readFile(metadataPath, 'utf8'), + catch: (error) => + new ValidationError( + `Failed to read tenant metadata: ${error}`, + 'TenantMetadata' + ), + }); + + const metadataJson = yield* Effect.try({ + try: () => JSON.parse(content) as unknown, + catch: (error) => + new ValidationError( + `Invalid JSON in tenant metadata: ${error}`, + 'TenantMetadata' + ), + }); + + const metadata = yield* pipe( + Schema.decodeUnknown(TenantMetadataSchema)(metadataJson), + Effect.mapError( + (error) => + new ValidationError( + `Tenant metadata validation failed: ${error.message}`, + 'TenantMetadata' + ) + ) + ); + + yield* logger.info( + `Tenant configuration valid: ${tenantId} (${metadata.tenant.name})` + ); + + return metadata; + }); + +// Clone a template from a git repository +export const cloneTemplate = ( + repositoryUrl: string, + templateName: string, + branch = 'main' +) => + Effect.gen(function* () { + yield* logger.info(`Cloning template from ${repositoryUrl}`); + + const templatePath = NodePath.join(TEMPLATES_DIR, templateName); + + // Check if template already exists + const exists = yield* pathExists(templatePath); + if (exists) { + yield* Effect.fail( + new TemplateError( + `Template already exists: ${templateName}`, + templateName + ) + ); + } + + // Ensure templates directory exists + yield* ensureDirectory(TEMPLATES_DIR); + + // Clone repository + yield* gitClone(repositoryUrl, templatePath, branch); + + // Validate template metadata + yield* loadTemplateMetadata(templateName); + + yield* logger.info(`Template cloned successfully: ${templateName}`); + + return templatePath; + }); + +// Export for external use +export default { + listTemplates, + loadTemplateMetadata, + instantiateTemplate, + validateTenantConfig, + cloneTemplate, +}; diff --git a/src/tenant/index.ts b/src/tenant/index.ts new file mode 100644 index 0000000..2ebdf98 --- /dev/null +++ b/src/tenant/index.ts @@ -0,0 +1,204 @@ +/** + * Tenant Operations Module + * Provides high-level tenant lifecycle operations + */ + +import { Effect, pipe } from 'effect'; +import * as NodePath from 'node:path'; +import { createLogger, ensureDirectory, pathExists } from '#utils/index'; +import { + createTenantBridge, + deleteTenantBridge, + configureInterfaces, +} from '#network/index'; +import { + instantiateTemplate, + validateTenantConfig, +} from '#templates/index'; +import type { TenantMetadata, TenantStatus } from '#types/index'; + +const logger = createLogger('tenant-ops'); + +// Configuration +const TENANT_BASE_DIR = + process.env['TENANT_BASE_DIR'] ?? '/var/lib/tenants'; + +// Error types +export class TenantOperationError extends Error { + constructor( + message: string, + public readonly tenantId: string, + public readonly operation: string + ) { + super(message); + this.name = 'TenantOperationError'; + } +} + +// Tenant creation options +export interface CreateTenantOptions { + readonly id: string; + readonly templateName?: string; + readonly variables?: Record; + readonly metadata?: Partial; +} + +// Create a new tenant +export const createTenant = (options: CreateTenantOptions) => + Effect.gen(function* () { + yield* logger.info(`Creating tenant: ${options.id}`); + + const tenantPath = NodePath.join(TENANT_BASE_DIR, options.id); + + // Check if tenant already exists + const exists = yield* pathExists(tenantPath); + if (exists) { + yield* Effect.fail( + new TenantOperationError( + `Tenant already exists: ${options.id}`, + options.id, + 'create' + ) + ); + } + + // Create tenant directory structure + yield* createTenantDirectoryStructure(options.id); + + // Instantiate template if specified + if (options.templateName) { + yield* instantiateTemplate( + options.templateName, + options.id, + options.variables ?? {} + ); + } + + // Create network bridge + yield* createTenantBridge(options.id); + + yield* logger.info(`Tenant created successfully: ${options.id}`); + + return tenantPath; + }); + +// Delete a tenant +export const deleteTenant = (tenantId: string, force = false) => + Effect.gen(function* () { + yield* logger.info(`Deleting tenant: ${tenantId}`); + + const tenantPath = NodePath.join(TENANT_BASE_DIR, tenantId); + + // Check if tenant exists + const exists = yield* pathExists(tenantPath); + if (!exists) { + if (force) { + yield* logger.warn(`Tenant does not exist: ${tenantId}`); + return; + } + yield* Effect.fail( + new TenantOperationError( + `Tenant does not exist: ${tenantId}`, + tenantId, + 'delete' + ) + ); + } + + // Delete network bridge + yield* deleteTenantBridge(tenantId).pipe( + Effect.catchAll((error) => { + logger.warn(`Failed to delete network bridge: ${error}`); + return Effect.succeed(undefined); + }) + ); + + // Remove tenant directory (would need to be implemented) + yield* logger.info(`Tenant deleted: ${tenantId}`); + }); + +// Create tenant directory structure +const createTenantDirectoryStructure = (tenantId: string) => + Effect.gen(function* () { + const tenantPath = NodePath.join(TENANT_BASE_DIR, tenantId); + + const directories = [ + '', + 'config.git', + 'extensions/sysext', + 'extensions/confext', + 'infra', + 'infra/rootfs', + 'runtime', + 'overlay', + 'overlay/usr', + 'overlay/etc', + 'tmp', + 'data', + 'logs', + ]; + + for (const dir of directories) { + yield* ensureDirectory(NodePath.join(tenantPath, dir)); + } + + yield* logger.debug(`Created directory structure for tenant: ${tenantId}`); + }); + +// Get tenant status +export const getTenantStatus = (tenantId: string) => + Effect.gen(function* () { + const tenantPath = NodePath.join(TENANT_BASE_DIR, tenantId); + + const exists = yield* pathExists(tenantPath); + if (!exists) { + yield* Effect.fail( + new TenantOperationError( + `Tenant does not exist: ${tenantId}`, + tenantId, + 'status' + ) + ); + } + + // Validate tenant configuration + const metadata = yield* validateTenantConfig(tenantId).pipe( + Effect.catchAll(() => Effect.succeed(null)) + ); + + const status: TenantStatus = metadata?.tenant.enabled ? 'active' : 'stopped'; + + return { + id: tenantId, + status, + metadata, + path: tenantPath, + }; + }); + +// Provision tenant (prepare for execution) +export const provisionTenant = (tenantId: string) => + Effect.gen(function* () { + yield* logger.info(`Provisioning tenant: ${tenantId}`); + + // Validate tenant configuration + const metadata = yield* validateTenantConfig(tenantId); + + // Configure network interfaces + if (metadata.network.interfaces.length > 0) { + yield* configureInterfaces(tenantId, metadata.network.interfaces); + } + + yield* logger.info(`Tenant provisioned: ${tenantId}`); + + return metadata; + }); + +// Export for external use +export default { + createTenant, + deleteTenant, + getTenantStatus, + provisionTenant, + createTenantDirectoryStructure, +}; diff --git a/src/types/index.ts b/src/types/index.ts new file mode 100644 index 0000000..7f30475 --- /dev/null +++ b/src/types/index.ts @@ -0,0 +1,161 @@ +/** + * Core types for BitBuilder Hypervisor + * Defines the fundamental data structures and interfaces + */ + +export type TenantType = 'vm' | 'container' | 'hybrid'; +export type NetworkMode = 'bridge' | 'nat' | 'host' | 'isolated'; +export type TenantStatus = + | 'pending' + | 'provisioning' + | 'active' + | 'stopping' + | 'stopped' + | 'error'; +export type LogLevel = 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'; + +export interface ResourceLimits { + readonly cpu: { + readonly cores: number; + readonly shares: number; + }; + readonly memory: { + readonly limit: string; + readonly swap?: string; + }; + readonly storage: { + readonly root: string; + readonly data?: string; + }; + readonly network?: { + readonly bandwidth?: string; + readonly connections?: number; + }; +} + +export interface NetworkInterface { + readonly name: string; + readonly mac: string | 'auto'; + readonly ipv4: + | 'dhcp' + | 'static' + | { address: string; gateway?: string; dns?: string[] }; + readonly ipv6: + | 'dhcp' + | 'static' + | 'disabled' + | { address: string; gateway?: string; dns?: string[] }; + readonly vlan?: number; +} + +export interface SecurityContext { + readonly selinux_context?: string; + readonly capabilities: readonly string[]; + readonly syscalls: { + readonly allow: readonly string[]; + readonly deny: readonly string[]; + }; + readonly namespaces: { + readonly pid: boolean; + readonly net: boolean; + readonly mnt: boolean; + readonly uts: boolean; + readonly ipc: boolean; + readonly user: boolean; + }; +} + +export interface TenantMetadata { + readonly version: string; + readonly tenant: { + readonly id: string; + readonly name: string; + readonly type: TenantType; + readonly enabled: boolean; + readonly description?: string; + }; + readonly resources: ResourceLimits; + readonly network: { + readonly mode: NetworkMode; + readonly interfaces: readonly NetworkInterface[]; + }; + readonly extensions: { + readonly sysext: readonly string[]; + readonly confext: readonly string[]; + }; + readonly services: { + readonly portable: readonly string[]; + readonly systemd: readonly string[]; + }; + readonly security: SecurityContext; + readonly lifecycle?: { + readonly pre_provision?: string; + readonly post_provision?: string; + readonly pre_stop?: string; + readonly post_stop?: string; + }; +} + +export interface TenantRegistry { + readonly version: string; + readonly updated_at: string; + readonly tenants: readonly { + readonly id: string; + readonly name: string; + readonly repository: string; + readonly branch: string; + readonly enabled: boolean; + readonly last_sync: string; + }[]; +} + +export interface SystemConfig { + readonly version: string; + readonly system: { + readonly host_id: string; + readonly cluster_mode: boolean; + readonly git_ops: { + readonly system_repo: string; + readonly sync_interval: number; + readonly auto_update: boolean; + }; + }; + readonly defaults: { + readonly tenant_resources: ResourceLimits; + readonly network_mode: NetworkMode; + readonly security_context: SecurityContext; + }; + readonly extensions: { + readonly system_sysext: readonly string[]; + readonly system_confext: readonly string[]; + }; +} + +export interface GitOpsConfig { + readonly repository: string; + readonly branch: string; + readonly sync_interval: number; + readonly webhook_secret?: string; + readonly ssh_key_path?: string; + readonly auto_rollback: boolean; + readonly validation: { + readonly schema_check: boolean; + readonly syntax_check: boolean; + readonly security_scan: boolean; + }; +} + +export interface VarlinkMessage { + readonly method: string; + readonly parameters?: Record; + readonly more?: boolean; +} + +export interface VarlinkResponse { + readonly parameters?: Record; + readonly continues?: boolean; + readonly error?: { + readonly error: string; + readonly parameters?: Record; + }; +} diff --git a/src/utils/index.ts b/src/utils/index.ts new file mode 100644 index 0000000..400ba7d --- /dev/null +++ b/src/utils/index.ts @@ -0,0 +1,422 @@ +/** + * Utility functions for BitBuilder Hypervisor + * Provides common functionality across the system + */ + +import { Effect, pipe } from 'effect'; +import { Schema } from '@effect/schema'; +import * as NodeFS from 'node:fs'; +import * as NodePath from 'node:path'; +import { spawn } from 'node:child_process'; + +// Error types +export class SystemdError extends Error { + constructor( + message: string, + public readonly command?: string + ) { + super(message); + this.name = 'SystemdError'; + } +} + +export class GitError extends Error { + constructor( + message: string, + public readonly repository?: string + ) { + super(message); + this.name = 'GitError'; + } +} + +export class ValidationError extends Error { + constructor( + message: string, + public readonly schema?: string + ) { + super(message); + this.name = 'ValidationError'; + } +} + +// File system utilities +export const ensureDirectory = (path: string) => + Effect.tryPromise({ + try: () => NodeFS.promises.mkdir(path, { recursive: true }), + catch: (error) => new Error(`Failed to create directory ${path}: ${error}`), + }); + +export const readJsonFile = + (schema: Schema.Schema) => + (path: string) => + pipe( + Effect.tryPromise({ + try: () => NodeFS.promises.readFile(path, 'utf8'), + catch: (error) => new Error(`Failed to read file ${path}: ${error}`), + }), + Effect.flatMap((content) => + Effect.try({ + try: () => JSON.parse(content), + catch: (error) => + new Error(`Failed to parse JSON from ${path}: ${error}`), + }) + ), + Effect.flatMap((data) => + pipe( + Schema.decodeUnknown(schema)(data), + Effect.mapError( + (error) => + new ValidationError( + `Schema validation failed for ${path}`, + error.message + ) + ) + ) + ) + ); + +export const writeJsonFile = (path: string, data: A) => + pipe( + Effect.try({ + try: () => JSON.stringify(data, null, 2), + catch: (error) => new Error(`Failed to serialize data: ${error}`), + }), + Effect.flatMap((content) => + Effect.tryPromise({ + try: () => NodeFS.promises.writeFile(path, content, 'utf8'), + catch: (error) => new Error(`Failed to write file ${path}: ${error}`), + }) + ) + ); + +export const pathExists = (path: string) => + Effect.tryPromise({ + try: () => NodeFS.promises.access(path), + catch: () => false, + }).pipe( + Effect.map(() => true), + Effect.orElse(() => Effect.succeed(false)) + ); + +// Systemd utilities +export const systemctl = (command: string, ...args: string[]) => + Effect.async((resume) => { + const proc = spawn('systemctl', [command, ...args], { + stdio: ['pipe', 'pipe', 'pipe'], + }); + + let stdout = ''; + let stderr = ''; + + proc.stdout?.on('data', (data) => { + stdout += data.toString(); + }); + + proc.stderr?.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + if (code === 0) { + resume(Effect.succeed(stdout.trim())); + } else { + resume( + Effect.fail( + new SystemdError( + `systemctl ${command} failed: ${stderr}`, + `systemctl ${command} ${args.join(' ')}` + ) + ) + ); + } + }); + + proc.on('error', (error) => { + resume( + Effect.fail( + new SystemdError( + `Failed to execute systemctl: ${error.message}`, + `systemctl ${command} ${args.join(' ')}` + ) + ) + ); + }); + }); + +export const daemonReload = () => systemctl('daemon-reload'); + +export const enableService = (service: string) => systemctl('enable', service); + +export const startService = (service: string) => systemctl('start', service); + +export const stopService = (service: string) => systemctl('stop', service); + +export const restartService = (service: string) => + systemctl('restart', service); + +export const serviceStatus = (service: string) => systemctl('status', service); + +export const isServiceActive = (service: string) => + systemctl('is-active', service).pipe( + Effect.map((output) => output.trim() === 'active'), + Effect.orElse(() => Effect.succeed(false)) + ); + +export const isServiceEnabled = (service: string) => + systemctl('is-enabled', service).pipe( + Effect.map((output) => output.trim() === 'enabled'), + Effect.orElse(() => Effect.succeed(false)) + ); + +// Git utilities +export const gitClone = ( + repository: string, + destination: string, + branch = 'main' +) => + Effect.async((resume) => { + const proc = spawn( + 'git', + ['clone', '-b', branch, repository, destination], + { + stdio: ['pipe', 'pipe', 'pipe'], + } + ); + + let stderr = ''; + + proc.stderr?.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + if (code === 0) { + resume(Effect.succeed(undefined)); + } else { + resume( + Effect.fail(new GitError(`Git clone failed: ${stderr}`, repository)) + ); + } + }); + + proc.on('error', (error) => { + resume( + Effect.fail( + new GitError( + `Failed to execute git clone: ${error.message}`, + repository + ) + ) + ); + }); + }); + +export const gitPull = (repositoryPath: string) => + Effect.async((resume) => { + const proc = spawn('git', ['pull'], { + cwd: repositoryPath, + stdio: ['pipe', 'pipe', 'pipe'], + }); + + let stderr = ''; + + proc.stderr?.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + if (code === 0) { + resume(Effect.succeed(undefined)); + } else { + resume( + Effect.fail( + new GitError(`Git pull failed: ${stderr}`, repositoryPath) + ) + ); + } + }); + + proc.on('error', (error) => { + resume( + Effect.fail( + new GitError( + `Failed to execute git pull: ${error.message}`, + repositoryPath + ) + ) + ); + }); + }); + +export const getGitCommitHash = (repositoryPath: string) => + Effect.async((resume) => { + const proc = spawn('git', ['rev-parse', 'HEAD'], { + cwd: repositoryPath, + stdio: ['pipe', 'pipe', 'pipe'], + }); + + let stdout = ''; + let stderr = ''; + + proc.stdout?.on('data', (data) => { + stdout += data.toString(); + }); + + proc.stderr?.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + if (code === 0) { + resume(Effect.succeed(stdout.trim())); + } else { + resume( + Effect.fail( + new GitError( + `Failed to get git commit hash: ${stderr}`, + repositoryPath + ) + ) + ); + } + }); + + proc.on('error', (error) => { + resume( + Effect.fail( + new GitError( + `Failed to execute git rev-parse: ${error.message}`, + repositoryPath + ) + ) + ); + }); + }); + +// Network utilities +export const generateTenantNetworkId = (tenantId: string): number => { + // Generate a consistent network ID from tenant ID hash + let hash = 0; + for (let i = 0; i < tenantId.length; i++) { + const char = tenantId.charCodeAt(i); + hash = (hash << 5) - hash + char; + hash = hash & hash; // Convert to 32-bit integer + } + // Map to range 100-999 for network IDs + return 100 + (Math.abs(hash) % 900); +}; + +export const generateMacAddress = ( + tenantId: string, + interfaceIndex = 0 +): string => { + // Generate consistent MAC address from tenant ID + const prefix = '02:42'; // Docker-style locally administered prefix + let hash = 0; + for (let i = 0; i < tenantId.length; i++) { + hash = (hash << 5) - hash + tenantId.charCodeAt(i); + hash = hash & hash; + } + + const combined = Math.abs(hash) + interfaceIndex; + const bytes = [ + (combined >>> 24) & 0xff, + (combined >>> 16) & 0xff, + (combined >>> 8) & 0xff, + combined & 0xff, + ]; + + return `${prefix}:${bytes.map((b) => b.toString(16).padStart(2, '0')).join(':')}`; +}; + +// Template utilities +export const renderTemplate = ( + template: string, + variables: Record +): string => { + return Object.entries(variables).reduce((result, [key, value]) => { + const regex = new RegExp(`\\$\\{${key}\\}`, 'g'); + return result.replace(regex, value); + }, template); +}; + +export const validateSystemdUnitName = (unitName: string): boolean => { + return /^[a-zA-Z0-9:_.\\@-]+\.(service|socket|timer|mount|automount|path|slice|scope|target|device)$/.test( + unitName + ); +}; + +// Security utilities +export const sanitizePath = (path: string): string => { + // Remove dangerous path components + return NodePath.normalize(path).replace(/\.\./g, ''); +}; + +export const validateTenantId = (tenantId: string): boolean => { + return ( + /^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/.test(tenantId) && + tenantId.length >= 3 && + tenantId.length <= 63 + ); +}; + +// Logging utilities +export const createLogger = (service: string) => { + const timestamp = () => new Date().toISOString(); + + return { + debug: (message: string, context?: Record) => + Effect.sync(() => { + console.log( + `[${timestamp()}] [DEBUG] [${service}] ${message}`, + context ? JSON.stringify(context) : '' + ); + }), + + info: (message: string, context?: Record) => + Effect.sync(() => { + console.log( + `[${timestamp()}] [INFO] [${service}] ${message}`, + context ? JSON.stringify(context) : '' + ); + }), + + warn: (message: string, context?: Record) => + Effect.sync(() => { + console.warn( + `[${timestamp()}] [WARN] [${service}] ${message}`, + context ? JSON.stringify(context) : '' + ); + }), + + error: (message: string, context?: Record) => + Effect.sync(() => { + console.error( + `[${timestamp()}] [ERROR] [${service}] ${message}`, + context ? JSON.stringify(context) : '' + ); + }), + }; +}; + +// Process utilities +export const gracefulShutdown = ( + cleanupFn: () => Effect.Effect +) => { + const handleSignal = (signal: string) => { + console.log(`Received ${signal}, starting graceful shutdown...`); + Effect.runPromise(cleanupFn()) + .then(() => { + console.log('Graceful shutdown completed'); + process.exit(0); + }) + .catch((error) => { + console.error('Error during graceful shutdown:', error); + process.exit(1); + }); + }; + + process.on('SIGTERM', () => handleSignal('SIGTERM')); + process.on('SIGINT', () => handleSignal('SIGINT')); +}; diff --git a/test/schemas.test.ts b/test/schemas.test.ts new file mode 100644 index 0000000..736466c --- /dev/null +++ b/test/schemas.test.ts @@ -0,0 +1,359 @@ +/** + * Tests for BitBuilder Hypervisor Schemas + */ + +import { describe, it, expect } from 'bun:test'; +import { Effect, Schema } from 'effect'; +import * as S from '@effect/schema'; +import { + TenantId, + TenantMetadataSchema, + ResourceLimitsSchema, + NetworkInterfaceSchema, + SecurityContextSchema, +} from '#schemas/index'; + +describe('TenantId Schema', () => { + it('should accept valid tenant IDs', () => { + const validIds = [ + 'tenant-1', + 'my-tenant', + 'abc123', + 'tenant-with-dashes-123', + ]; + + for (const id of validIds) { + const result = S.Schema.decodeUnknownSync(TenantId)(id); + expect(result).toBe(id); + } + }); + + it('should reject invalid tenant IDs', () => { + const invalidIds = [ + 'ab', // too short + '-invalid', // starts with hyphen + 'invalid-', // ends with hyphen + 'UPPERCASE', + 'has space', + 'has.dot', + ]; + + for (const id of invalidIds) { + expect(() => S.Schema.decodeUnknownSync(TenantId)(id)).toThrow(); + } + }); +}); + +describe('ResourceLimitsSchema', () => { + it('should accept valid resource limits', () => { + const validLimits = { + cpu: { + cores: 4, + shares: 1024, + }, + memory: { + limit: '4GB', + swap: '2GB', + }, + storage: { + root: '50GB', + data: '100GB', + }, + }; + + const result = S.Schema.decodeUnknownSync(ResourceLimitsSchema)(validLimits); + expect(result.cpu.cores).toBe(4); + expect(result.memory.limit).toBe('4GB'); + }); + + it('should reject invalid CPU cores', () => { + const invalidLimits = { + cpu: { + cores: -1, // negative + shares: 1024, + }, + memory: { + limit: '4GB', + }, + storage: { + root: '50GB', + }, + }; + + expect(() => + S.Schema.decodeUnknownSync(ResourceLimitsSchema)(invalidLimits) + ).toThrow(); + }); + + it('should reject invalid memory format', () => { + const invalidLimits = { + cpu: { + cores: 4, + shares: 1024, + }, + memory: { + limit: 'invalid', + }, + storage: { + root: '50GB', + }, + }; + + expect(() => + S.Schema.decodeUnknownSync(ResourceLimitsSchema)(invalidLimits) + ).toThrow(); + }); +}); + +describe('NetworkInterfaceSchema', () => { + it('should accept valid network interface with DHCP', () => { + const validInterface = { + name: 'eth0', + mac: 'auto', + ipv4: 'dhcp', + ipv6: 'disabled', + }; + + const result = + S.Schema.decodeUnknownSync(NetworkInterfaceSchema)(validInterface); + expect(result.name).toBe('eth0'); + expect(result.mac).toBe('auto'); + expect(result.ipv4).toBe('dhcp'); + }); + + it('should accept valid network interface with static IP', () => { + const validInterface = { + name: 'eth0', + mac: '02:42:ac:11:00:02', + ipv4: { + address: '192.168.1.100/24', + gateway: '192.168.1.1', + dns: ['8.8.8.8', '8.8.4.4'], + }, + ipv6: 'disabled', + }; + + const result = + S.Schema.decodeUnknownSync(NetworkInterfaceSchema)(validInterface); + expect(result.name).toBe('eth0'); + expect(typeof result.ipv4).toBe('object'); + }); + + it('should reject invalid MAC address', () => { + const invalidInterface = { + name: 'eth0', + mac: 'invalid-mac', + ipv4: 'dhcp', + ipv6: 'disabled', + }; + + expect(() => + S.Schema.decodeUnknownSync(NetworkInterfaceSchema)(invalidInterface) + ).toThrow(); + }); + + it('should reject interface name that is too long', () => { + const invalidInterface = { + name: 'verylonginterfacename', // > 15 chars + mac: 'auto', + ipv4: 'dhcp', + ipv6: 'disabled', + }; + + expect(() => + S.Schema.decodeUnknownSync(NetworkInterfaceSchema)(invalidInterface) + ).toThrow(); + }); +}); + +describe('SecurityContextSchema', () => { + it('should accept valid security context', () => { + const validContext = { + capabilities: ['CAP_NET_ADMIN', 'CAP_SYS_ADMIN'], + syscalls: { + allow: ['read', 'write', 'open'], + deny: ['mount', 'umount'], + }, + namespaces: { + pid: true, + net: true, + mnt: true, + uts: true, + ipc: true, + user: true, + }, + }; + + const result = + S.Schema.decodeUnknownSync(SecurityContextSchema)(validContext); + expect(result.capabilities).toContain('CAP_NET_ADMIN'); + expect(result.namespaces.pid).toBe(true); + }); + + it('should reject invalid capability format', () => { + const invalidContext = { + capabilities: ['invalid_capability'], // doesn't match CAP_* pattern + syscalls: { + allow: [], + deny: [], + }, + namespaces: { + pid: true, + net: true, + mnt: true, + uts: true, + ipc: true, + user: true, + }, + }; + + expect(() => + S.Schema.decodeUnknownSync(SecurityContextSchema)(invalidContext) + ).toThrow(); + }); +}); + +describe('TenantMetadataSchema', () => { + it('should accept valid complete tenant metadata', () => { + const validMetadata = { + version: '1.0', + tenant: { + id: 'test-tenant', + name: 'Test Tenant', + type: 'vm', + enabled: true, + description: 'A test tenant for validation', + }, + resources: { + cpu: { + cores: 2, + shares: 1024, + }, + memory: { + limit: '4GB', + }, + storage: { + root: '20GB', + }, + }, + network: { + mode: 'bridge', + interfaces: [ + { + name: 'eth0', + mac: 'auto', + ipv4: 'dhcp', + ipv6: 'disabled', + }, + ], + }, + extensions: { + sysext: ['kubernetes'], + confext: ['monitoring'], + }, + services: { + portable: ['nginx'], + systemd: ['custom.service'], + }, + security: { + capabilities: ['CAP_NET_ADMIN'], + syscalls: { + allow: ['read', 'write'], + deny: ['mount'], + }, + namespaces: { + pid: true, + net: true, + mnt: true, + uts: true, + ipc: true, + user: true, + }, + }, + }; + + const result = + S.Schema.decodeUnknownSync(TenantMetadataSchema)(validMetadata); + expect(result.tenant.id).toBe('test-tenant'); + expect(result.tenant.type).toBe('vm'); + expect(result.network.mode).toBe('bridge'); + }); + + it('should reject metadata with invalid tenant type', () => { + const invalidMetadata = { + version: '1.0', + tenant: { + id: 'test-tenant', + name: 'Test Tenant', + type: 'invalid', // not a valid type + enabled: true, + }, + resources: { + cpu: { cores: 2, shares: 1024 }, + memory: { limit: '4GB' }, + storage: { root: '20GB' }, + }, + network: { + mode: 'bridge', + interfaces: [], + }, + extensions: { sysext: [], confext: [] }, + services: { portable: [], systemd: [] }, + security: { + capabilities: [], + syscalls: { allow: [], deny: [] }, + namespaces: { + pid: true, + net: true, + mnt: true, + uts: true, + ipc: true, + user: true, + }, + }, + }; + + expect(() => + S.Schema.decodeUnknownSync(TenantMetadataSchema)(invalidMetadata) + ).toThrow(); + }); + + it('should reject metadata with invalid network mode', () => { + const invalidMetadata = { + version: '1.0', + tenant: { + id: 'test-tenant', + name: 'Test Tenant', + type: 'container', + enabled: true, + }, + resources: { + cpu: { cores: 2, shares: 1024 }, + memory: { limit: '4GB' }, + storage: { root: '20GB' }, + }, + network: { + mode: 'invalid', // not a valid mode + interfaces: [], + }, + extensions: { sysext: [], confext: [] }, + services: { portable: [], systemd: [] }, + security: { + capabilities: [], + syscalls: { allow: [], deny: [] }, + namespaces: { + pid: true, + net: true, + mnt: true, + uts: true, + ipc: true, + user: true, + }, + }, + }; + + expect(() => + S.Schema.decodeUnknownSync(TenantMetadataSchema)(invalidMetadata) + ).toThrow(); + }); +}); diff --git a/test/utils.test.ts b/test/utils.test.ts new file mode 100644 index 0000000..5e93ba0 --- /dev/null +++ b/test/utils.test.ts @@ -0,0 +1,180 @@ +/** + * Tests for BitBuilder Hypervisor Utilities + */ + +import { describe, it, expect } from 'bun:test'; +import { + validateTenantId, + generateTenantNetworkId, + generateMacAddress, + renderTemplate, + validateSystemdUnitName, + sanitizePath, +} from '#utils/index'; + +describe('validateTenantId', () => { + it('should accept valid tenant IDs', () => { + expect(validateTenantId('tenant-1')).toBe(true); + expect(validateTenantId('my-tenant')).toBe(true); + expect(validateTenantId('abc123')).toBe(true); + expect(validateTenantId('tenant-with-dashes')).toBe(true); + }); + + it('should reject IDs that are too short', () => { + expect(validateTenantId('ab')).toBe(false); + expect(validateTenantId('a')).toBe(false); + expect(validateTenantId('')).toBe(false); + }); + + it('should reject IDs with invalid characters', () => { + expect(validateTenantId('UPPERCASE')).toBe(false); + expect(validateTenantId('has space')).toBe(false); + expect(validateTenantId('has.dot')).toBe(false); + expect(validateTenantId('has_underscore')).toBe(false); + }); + + it('should reject IDs starting or ending with hyphen', () => { + expect(validateTenantId('-invalid')).toBe(false); + expect(validateTenantId('invalid-')).toBe(false); + expect(validateTenantId('-both-')).toBe(false); + }); +}); + +describe('generateTenantNetworkId', () => { + it('should generate consistent network IDs for the same tenant', () => { + const id1 = generateTenantNetworkId('tenant-1'); + const id2 = generateTenantNetworkId('tenant-1'); + expect(id1).toBe(id2); + }); + + it('should generate different network IDs for different tenants', () => { + const id1 = generateTenantNetworkId('tenant-1'); + const id2 = generateTenantNetworkId('tenant-2'); + expect(id1).not.toBe(id2); + }); + + it('should generate IDs in the valid range (100-999)', () => { + const testTenants = [ + 'tenant-a', + 'tenant-b', + 'very-long-tenant-name', + 'x', + ]; + for (const tenant of testTenants) { + const id = generateTenantNetworkId(tenant); + expect(id).toBeGreaterThanOrEqual(100); + expect(id).toBeLessThan(1000); + } + }); +}); + +describe('generateMacAddress', () => { + it('should generate consistent MAC addresses for the same tenant', () => { + const mac1 = generateMacAddress('tenant-1', 0); + const mac2 = generateMacAddress('tenant-1', 0); + expect(mac1).toBe(mac2); + }); + + it('should generate different MAC addresses for different interface indices', () => { + const mac1 = generateMacAddress('tenant-1', 0); + const mac2 = generateMacAddress('tenant-1', 1); + expect(mac1).not.toBe(mac2); + }); + + it('should generate valid MAC address format', () => { + const mac = generateMacAddress('tenant-1', 0); + const macPattern = /^([0-9a-f]{2}:){5}[0-9a-f]{2}$/i; + expect(mac).toMatch(macPattern); + }); + + it('should use locally administered prefix (02:42)', () => { + const mac = generateMacAddress('any-tenant', 0); + expect(mac.startsWith('02:42:')).toBe(true); + }); +}); + +describe('renderTemplate', () => { + it('should replace single variable', () => { + const template = 'Hello, ${NAME}!'; + const result = renderTemplate(template, { NAME: 'World' }); + expect(result).toBe('Hello, World!'); + }); + + it('should replace multiple variables', () => { + const template = '${GREETING}, ${NAME}! Welcome to ${PLACE}.'; + const result = renderTemplate(template, { + GREETING: 'Hello', + NAME: 'Alice', + PLACE: 'BitBuilder', + }); + expect(result).toBe('Hello, Alice! Welcome to BitBuilder.'); + }); + + it('should replace same variable multiple times', () => { + const template = '${ID} and ${ID} again'; + const result = renderTemplate(template, { ID: 'test' }); + expect(result).toBe('test and test again'); + }); + + it('should leave unmatched variables unchanged', () => { + const template = 'Hello, ${NAME}! Your ID is ${ID}.'; + const result = renderTemplate(template, { NAME: 'Bob' }); + expect(result).toBe('Hello, Bob! Your ID is ${ID}.'); + }); + + it('should handle empty variables object', () => { + const template = 'No variables here'; + const result = renderTemplate(template, {}); + expect(result).toBe('No variables here'); + }); +}); + +describe('validateSystemdUnitName', () => { + it('should accept valid service names', () => { + expect(validateSystemdUnitName('nginx.service')).toBe(true); + expect(validateSystemdUnitName('my-app.service')).toBe(true); + expect(validateSystemdUnitName('tenant@.service')).toBe(true); + expect(validateSystemdUnitName('tenant@test.service')).toBe(true); + }); + + it('should accept valid unit types', () => { + expect(validateSystemdUnitName('sshd.socket')).toBe(true); + expect(validateSystemdUnitName('backup.timer')).toBe(true); + expect(validateSystemdUnitName('home.mount')).toBe(true); + expect(validateSystemdUnitName('media.automount')).toBe(true); + expect(validateSystemdUnitName('docker.path')).toBe(true); + expect(validateSystemdUnitName('user.slice')).toBe(true); + expect(validateSystemdUnitName('session.scope')).toBe(true); + expect(validateSystemdUnitName('graphical.target')).toBe(true); + expect(validateSystemdUnitName('loop0.device')).toBe(true); + }); + + it('should reject invalid unit names', () => { + expect(validateSystemdUnitName('nginx')).toBe(false); // missing type + expect(validateSystemdUnitName('nginx.invalid')).toBe(false); // invalid type + expect(validateSystemdUnitName('.service')).toBe(false); // missing name + expect(validateSystemdUnitName('has space.service')).toBe(false); + }); +}); + +describe('sanitizePath', () => { + it('should normalize paths', () => { + expect(sanitizePath('/foo//bar')).toBe('/foo/bar'); + expect(sanitizePath('/foo/./bar')).toBe('/foo/bar'); + }); + + it('should remove parent directory references', () => { + expect(sanitizePath('/foo/../bar')).toBe('/bar'); + expect(sanitizePath('/foo/bar/../baz')).toBe('/foo/baz'); + }); + + it('should handle multiple parent references', () => { + expect(sanitizePath('/foo/bar/../../baz')).toBe('/baz'); + }); + + it('should prevent path traversal attacks', () => { + const malicious = '../../../../etc/passwd'; + const sanitized = sanitizePath(malicious); + expect(sanitized).not.toContain('..'); + }); +}); diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..412f55a --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "strict": true, + "exactOptionalPropertyTypes": true, + "module": "ESNext", + "moduleResolution": "bundler", + "noEmit": true, + "target": "ESNext", + "lib": ["ESNext"], + "baseUrl": ".", + "paths": { + "#generators/*": ["src/generators/*"], + "#services/*": ["src/services/*"], + "#schemas/*": ["src/schemas/*"], + "#utils/*": ["src/utils/*"], + "#types/*": ["src/types/*"] + }, + "allowSyntheticDefaultImports": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +}