diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index d6ae02c147..95a05c1e5b 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -15,20 +15,185 @@ name: E2E Matrix Tests (bootstrap) on: + workflow_dispatch: pull_request: types: [opened, reopened, synchronize, labeled, unlabeled] branches: - main - - feat/ci-e2e-matrix - workflow_dispatch: + - feat/ci/nightly-e2e-test-nested-env + +concurrency: + group: "${{ github.workflow }}-${{ github.event.number || github.ref }}" + cancel-in-progress: true -permissions: - contents: read +defaults: + run: + shell: bash jobs: - noop: - name: Bootstrap + e2e-ceph: + name: E2E Pipeline (Ceph) + uses: ./.github/workflows/e2e-reusable-pipeline.yml + with: + storage_type: ceph + nested_storageclass_name: nested-ceph-pool-r2-csi-rbd + branch: main + virtualization_tag: main + deckhouse_tag: pr17193 + pod_subnet_cidr: 10.88.0.0/16 + service_subnet_cidr: 10.92.0.0/16 + default_user: cloud + go_version: "1.24.6" + e2e_timeout: "3h" + secrets: + DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + BOOTSTRAP_DEV_PROXY: ${{ secrets.BOOTSTRAP_DEV_PROXY }} + + e2e-replicated: + name: E2E Pipeline (Replicated) + uses: ./.github/workflows/e2e-reusable-pipeline.yml + with: + storage_type: replicated + nested_storageclass_name: nested-thin-r1 + branch: main + virtualization_tag: main + deckhouse_tag: pr17193 + pod_subnet_cidr: 10.89.0.0/16 + service_subnet_cidr: 10.93.0.0/16 + default_user: cloud + go_version: "1.24.6" + e2e_timeout: "3h" + secrets: + DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + BOOTSTRAP_DEV_PROXY: ${{ secrets.BOOTSTRAP_DEV_PROXY }} + + report-to-channel: runs-on: ubuntu-latest + name: End-to-End tests report + needs: + - e2e-ceph + - e2e-replicated + if: ${{ always()}} steps: - - name: Say hello - run: echo "Bootstrap workflow OK" + - name: Send results to channel + run: | + # Map storage types to CSI names + get_csi_name() { + local storage_type=$1 + case "$storage_type" in + "ceph") + echo "rbd.csi.ceph.com" + ;; + "replicated") + echo "replicated.csi.storage.deckhouse.io" + ;; + *) + echo "$storage_type" + ;; + esac + } + + # Parse summary JSON and add to table + parse_summary() { + local summary_json=$1 + local storage_type=$2 + + if [ -z "$summary_json" ] || [ "$summary_json" == "null" ] || [ "$summary_json" == "" ]; then + echo "Warning: Empty summary for $storage_type" + return + fi + + # Try to parse as JSON (handle both JSON string and already parsed JSON) + if ! echo "$summary_json" | jq empty 2>/dev/null; then + echo "Warning: Invalid JSON for $storage_type: $summary_json" + return + fi + + # Parse JSON fields + csi_raw=$(echo "$summary_json" | jq -r '.CSI // empty' 2>/dev/null) + if [ -z "$csi_raw" ] || [ "$csi_raw" == "null" ] || [ "$csi_raw" == "" ]; then + csi=$(get_csi_name "$storage_type") + else + csi="$csi_raw" + fi + + date=$(echo "$summary_json" | jq -r '.Date // ""' 2>/dev/null) + time=$(echo "$summary_json" | jq -r '.StartTime // ""' 2>/dev/null) + branch=$(echo "$summary_json" | jq -r '.Branch // ""' 2>/dev/null) + status=$(echo "$summary_json" | jq -r '.Status // ":question: UNKNOWN"' 2>/dev/null) + passed=$(echo "$summary_json" | jq -r '.Passed // 0' 2>/dev/null) + failed=$(echo "$summary_json" | jq -r '.Failed // 0' 2>/dev/null) + pending=$(echo "$summary_json" | jq -r '.Pending // 0' 2>/dev/null) + skipped=$(echo "$summary_json" | jq -r '.Skipped // 0' 2>/dev/null) + link=$(echo "$summary_json" | jq -r '.Link // ""' 2>/dev/null) + + # Set defaults if empty + [ -z "$passed" ] && passed=0 + [ -z "$failed" ] && failed=0 + [ -z "$pending" ] && pending=0 + [ -z "$skipped" ] && skipped=0 + [ -z "$status" ] && status=":question: UNKNOWN" + + # Validate date + if [ -n "$date" ] && [ "$date" != "" ]; then + current_date=$(date +"%Y-%m-%d") + if date -d "$current_date" +%s >/dev/null 2>&1 && date -d "$date" +%s >/dev/null 2>&1; then + if [ "$(date -d "$current_date" +%s)" -gt "$(date -d "$date" +%s)" ]; then + status=":x: WRONG REPORT DATE!" + fi + fi + fi + + # Format link - use CSI name as fallback if link is empty + if [ -z "$link" ] || [ "$link" == "" ]; then + link_text="$csi" + else + link_text="[:link: $csi]($link)" + fi + + # Add row to table + markdown_table+="| $link_text | $status | $passed | $failed | $pending | $skipped | $date | $time | $branch |\n" + } + + # Initialize markdown table + markdown_table="" + header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" + separator="|---|---|---|---|---|---|---|---|---|\n" + markdown_table+="$header" + markdown_table+="$separator" + + # Get current date for header + DATE=$(date +"%Y-%m-%d") + COMBINED_SUMMARY="## :dvp: **DVP | End-to-End tests | $DATE**\n\n" + + # Save to json files + cat > /tmp/ceph.json << 'EOF' + ${{ needs.e2e-ceph.outputs.e2e-summary }} + EOF + + cat > /tmp/replicated.json << 'EOF' + ${{ needs.e2e-replicated.outputs.e2e-summary }} + EOF + + if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then + parse_summary "$(cat /tmp/ceph.json)" "ceph" + fi + + if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then + parse_summary "$(cat /tmp/replicated.json)" "replicated" + fi + + COMBINED_SUMMARY+="${markdown_table}\n" + + echo -e "$COMBINED_SUMMARY" + + # Send to channel if webhook is configured + if [ -n "$LOOP_WEBHOOK_URL" ]; then + curl --request POST --header 'Content-Type: application/json' --data "{\"text\": \"${COMBINED_SUMMARY}\"}" "$LOOP_WEBHOOK_URL" + fi + env: + LOOP_WEBHOOK_URL: ${{ secrets.LOOP_TEST_CHANNEL }} diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index d9b84d26ca..dbb0ae356f 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -20,15 +20,11 @@ on: storage_type: required: true type: string - description: "Storage type (ceph or replicated)" + description: "Storage type (ceph or replicated or etc.)" nested_storageclass_name: required: true type: string description: "Nested storage class name" - default_cluster_storageclass: - required: true - type: string - description: "Default cluster storage class" branch: required: false type: string @@ -44,16 +40,31 @@ on: type: string default: "main" description: "Deckhouse tag" + pod_subnet_cidr: + required: false + type: string + default: "10.88.0.0/16" + description: "Pod subnet CIDR" + service_subnet_cidr: + required: false + type: string + default: "10.99.0.0/16" + description: "Service subnet CIDR" default_user: required: false type: string default: "ubuntu" - description: "Default user" + description: "Default user for vms" go_version: required: false type: string - default: "1.24.5" + default: "1.24.6" description: "Go version" + e2e_timeout: + required: false + type: string + default: "3h" + description: "E2E tests timeout" secrets: DEV_REGISTRY_DOCKER_CFG: required: true @@ -61,8 +72,12 @@ on: required: true PROD_IO_REGISTRY_DOCKER_CFG: required: true - GITHUB_TOKEN: + BOOTSTRAP_DEV_PROXY: required: true + outputs: + e2e-summary: + description: "E2E test results" + value: ${{ jobs.e2e-test.outputs.report-summary }} env: BRANCH: ${{ inputs.branch }} @@ -70,15 +85,862 @@ env: DECKHOUSE_TAG: ${{ inputs.deckhouse_tag }} DEFAULT_USER: ${{ inputs.default_user }} GO_VERSION: ${{ inputs.go_version }} + SETUP_CLUSTER_TYPE_PATH: test/dvp-static-cluster defaults: run: shell: bash jobs: - noop: - name: Bootstrap + bootstrap: + name: Bootstrap cluster (${{ inputs.storage_type }}) + runs-on: ubuntu-latest + concurrency: + group: "${{ github.workflow }}-${{ github.event.number || github.ref }}-${{ inputs.storage_type }}" + cancel-in-progress: true + outputs: + kubeconfig-content: ${{ steps.generate-kubeconfig.outputs.config }} + storage-type: ${{ steps.vars.outputs.storage_type }} + nested-storageclass-name: ${{ steps.vars.outputs.nested_storageclass_name }} + steps: + - uses: actions/checkout@v4 + # with: + # ref: ${{ env.BRANCH }} + + - name: Set outputs + id: vars + run: | + namespace="nightly-e2e-${{ inputs.storage_type }}-$(git rev-parse --short HEAD)" + echo "namespace=$namespace" >> $GITHUB_OUTPUT + echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + echo "storage_type=${{ inputs.storage_type }}" >> $GITHUB_OUTPUT + echo "nested_storageclass_name=${{ inputs.nested_storageclass_name }}" >> $GITHUB_OUTPUT + + REGISTRY=$(base64 -d <<< ${{secrets.DEV_REGISTRY_DOCKER_CFG}} | jq '.auths | to_entries | .[] | .key' -r) + USERNAME=$(base64 -d <<< ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} | jq '.auths | to_entries | .[] | .value.auth' -r | base64 -d | cut -d ':' -f1) + PASSWORD=$(base64 -d <<< ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} | jq '.auths | to_entries | .[] | .value.auth' -r | base64 -d | cut -d ':' -f2) + + echo "registry=$REGISTRY" >> $GITHUB_OUTPUT + echo "username=$USERNAME" >> $GITHUB_OUTPUT + echo "password=$PASSWORD" >> $GITHUB_OUTPUT + + - name: Install htpasswd utility + run: | + sudo apt-get update + sudo apt-get install -y apache2-utils + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Log in to private registry + uses: docker/login-action@v3 + with: + registry: ${{ steps.vars.outputs.registry }} + username: ${{ steps.vars.outputs.username }} + password: ${{ steps.vars.outputs.password }} + + - name: Configure kubectl via azure/k8s-set-context@v4 + uses: azure/k8s-set-context@v4 + with: + method: kubeconfig + context: e2e-cluster-nightly-e2e-virt-sa + kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + + - name: Generate values.yaml + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} + run: | + defaultStorageClass=$(kubectl get storageclass -o json \ + | jq -r '.items[] | select(.metadata.annotations."storageclass.kubernetes.io/is-default-class" == "true") | .metadata.name') + + cat < values.yaml + namespace: ${{ steps.vars.outputs.namespace }} + storage_type: ${{ inputs.storage_type }} + storageClass: ${defaultStorageClass} + sa: dkp-sa + deckhouse: + podSubnetCIDR: ${{ inputs.pod_subnet_cidr }} + serviceSubnetCIDR: ${{ inputs.service_subnet_cidr }} + tag: ${{ env.DECKHOUSE_TAG }} + kubernetesVersion: Automatic + registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + bundle: Default + proxy: + httpProxy: ${{ secrets.BOOTSTRAP_DEV_PROXY }} + httpsProxy: ${{ secrets.BOOTSTRAP_DEV_PROXY }} + noProxy: + - "localhost" + - "127.0.0.1" + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + - "10.112.0.0/16" + - "10.223.0.0/16" + - "docker.io" + - ".ubuntu.com" + image: + url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img + defaultUser: ${{ env.DEFAULT_USER }} + bootloader: BIOS + ingressHosts: + - api + - grafana + - dex + - prometheus + - console + - virtualization + instances: + masterNodes: + count: 1 + cfg: + rootDiskSize: 60Gi + cpu: + cores: 4 + coreFraction: 50% + memory: + size: 12Gi + additionalNodes: + - name: worker + count: 3 + cfg: + cpu: + cores: 4 + coreFraction: 50% + memory: + size: 6Gi + additionalDisks: + - size: 50Gi + EOF + + - name: Bootstrap cluster [infra-deploy] + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} + run: | + task infra-deploy + - name: Bootstrap cluster [dhctl-bootstrap] + id: dhctl-bootstrap + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} + run: | + task dhctl-bootstrap + timeout-minutes: 30 + - name: Bootstrap cluster [show-connection-info] + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} + run: | + task show-connection-info + + - name: Save ssh to secrets in cluster + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + if: always() + run: | + kubectl -n $NAMESPACE create secret generic ssh-key --from-file=${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp/ssh/cloud + + - name: Get info about nested master VM + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + PREFIX: ${{ inputs.storage_type }} + run: | + nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") + + echo "Pods" + kubectl get pods -n "${NAMESPACE}" + echo "" + + echo "VMs" + kubectl get vm -n "${NAMESPACE}" + echo "" + + echo "VDs" + kubectl get vd -n "${NAMESPACE}" + echo "" + + echo "login to master" + echo "os-release master" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'cat /etc/os-release' + echo "" + + echo "hostname master" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'hostname' + + - name: Generate nested kubeconfig + id: generate-kubeconfig + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} + env: + kubeConfigPath: tmp/kube.config + NAMESPACE: ${{ steps.vars.outputs.namespace }} + PREFIX: ${{ inputs.storage_type }} + run: | + nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") + + d8vscp() { + local source=$1 + local dest=$2 + d8 v scp -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + $source $dest + echo "d8vscp: $source -> $dest - done" + } + + d8vssh() { + local cmd=$1 + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c "$cmd" + } + + echo "[INFO] Copy script for generating kubeconfig in nested cluster" + echo "[INFO] Copy tools/gen-kubeconfig.sh to master" + d8vscp "./tools/gen-kubeconfig.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/gen-kubeconfig.sh" + echo "" + d8vscp "./tools/deckhouse-queue.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/deckhouse-queue.sh" + echo "" + + echo "[INFO] Set file exec permissions" + d8vssh 'chmod +x /tmp/{gen-kubeconfig.sh,deckhouse-queue.sh}' + d8vssh 'ls -la /tmp/' + echo "[INFO] Check d8 queue in nested cluster" + d8vssh 'sudo /tmp/deckhouse-queue.sh' + + echo "[INFO] Generate kube conf in nested cluster" + echo "[INFO] Run gen-kubeconfig.sh in nested cluster" + d8vssh "sudo /tmp/gen-kubeconfig.sh nested-sa nested nested-e2e /${kubeConfigPath}" + echo "" + + echo "[INFO] Copy kubeconfig to runner" + echo "[INFO] ${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath} ./${kubeConfigPath}" + d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath}" "./${kubeConfigPath}" + + echo "[INFO] Set rights for kubeconfig" + echo "[INFO] sudo chown 1001:1001 ${kubeConfigPath}" + sudo chown 1001:1001 ${kubeConfigPath} + echo " " + + echo "[INFO] Kubeconf to github output" + CONFIG=$(cat ${kubeConfigPath} | base64 -w 0) + CONFIG=$(echo $CONFIG | base64 -w 0) + echo "config=$CONFIG" >> $GITHUB_OUTPUT + + - name: cloud-init logs + if: steps.dhctl-bootstrap.outcome == 'failure' + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + PREFIX: ${{ inputs.storage_type }} + run: | + nested_master=$(kubectl -n ${NAMESPACE} get vm -l group=${PREFIX}-master -o jsonpath="{.items[0].metadata.name}") + + d8vscp() { + local source=$1 + local dest=$2 + d8 v scp -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + $source $dest + echo "d8vscp: $source -> $dest - done" + } + + d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/var/log/cloud-init*.log" "./${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp/" + + - name: Prepare artifact + if: always() + run: | + sudo chown -fR 1001:1001 ${{ env.SETUP_CLUSTER_TYPE_PATH }} + yq e '.deckhouse.registryDockerCfg = "None"' -i ./${{ env.SETUP_CLUSTER_TYPE_PATH }}/values.yaml + yq e 'select(.kind == "InitConfiguration") .deckhouse.registryDockerCfg = "None"' -i ./${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp/config.yaml || echo "The config.yaml file is not generated, skipping" + echo "${{ steps.generate-kubeconfig.outputs.config }}" | base64 -d | base64 -d > ./${{ env.SETUP_CLUSTER_TYPE_PATH }}/kube-config + + - name: Upload generated files + uses: actions/upload-artifact@v4 + id: artifact-upload + if: always() + with: + name: generated-files-${{ inputs.storage_type }} + path: | + ${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp + ${{ env.SETUP_CLUSTER_TYPE_PATH }}/values.yaml + overwrite: true + include-hidden-files: true + retention-days: 1 + + - name: Upload ssh config + uses: actions/upload-artifact@v4 + id: artifact-upload-ssh + if: always() + with: + name: generated-files-ssh-${{ inputs.storage_type }} + path: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/tmp/ssh + overwrite: true + include-hidden-files: true + retention-days: 1 + + - name: Upload kubeconfig config + uses: actions/upload-artifact@v4 + id: artifact-upload-kubeconfig + if: always() + with: + name: generated-files-kubeconfig-${{ inputs.storage_type }} + path: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/kube-config + overwrite: true + include-hidden-files: true + retention-days: 1 + + configure-storage: + name: Configure storage (${{ inputs.storage_type }}) + runs-on: ubuntu-latest + needs: bootstrap + steps: + - uses: actions/checkout@v4 + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup d8 + uses: ./.github/actions/install-d8 + - name: Install kubectl CLI + uses: azure/setup-kubectl@v4 + + - name: Check kubeconfig + run: | + mkdir -p ~/.kube + echo "[INFO] Configure kubeconfig for nested cluster" + echo "${{ needs.bootstrap.outputs.kubeconfig-content }}" | base64 -d | base64 -d > ~/.kube/config + + echo "[INFO] Show paths and files content" + ls -la ~/.kube + echo "[INFO] Set permissions for kubeconfig" + chmod 600 ~/.kube/config + + echo "[INFO] Show nodes in cluster" + kubectl config use-context nested-e2e-nested-sa + + # some times kubectl get nodes returns error, so we need to retry + for i in {1..3}; do + echo "Attempt $i/3..." + if (kubectl get nodes); then + echo "[SUCCESS] Successfully retrieved nodes." + break + else + echo "[INFO] Retrying in 5 seconds..." + sleep 5 + fi + done + + - name: Configure replicated storage + if: ${{ inputs.storage_type == 'replicated' }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/sds-replicated + run: | + kubectl apply -f mc.yaml + echo "[INFO] Wait for sds-node-configurator" + kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-node-configurator --timeout=300s + + for i in {1..60}; do + sds_replicated_volume_status=$(kubectl get ns d8-sds-replicated-volume -o jsonpath='{.status.phase}' || echo "False") + + if [[ "${sds_replicated_volume_status}" = "Active" ]]; then + echo "[SUCCESS] Namespaces sds-replicated-volume are Active" + kubectl -n d8-sds-replicated-volume get pods + break + fi + + echo "[INFO] Waiting 10s for sds-replicated-volume to be ready" + echo "[INFO] Show namespaces sds-replicated-volume" + kubectl get ns | grep sds-replicated-volume || echo "Namespaces sds-replicated-volume are not ready" + + if (( i % 5 == 0 )); then + d8 s queue list | head -n25 || echo "No queues" + fi + sleep 10 + done + + echo "[INFO] Wait BlockDevice are ready" + workers=$(kubectl get nodes -o name | grep -c worker) + workers=$((workers)) + bdexists=false + count=60 + for i in $(seq 1 $count); do + blockdevices=$(kubectl get blockdevice -o name | wc -l) + if [ $blockdevices -ge $workers ]; then + bdexists=true + break + fi + echo "[INFO] Wait 10 sec until blockdevices is greater or equal to $workers [${i}/${count}]" + d8 s queue list | head -n25 || echo "No queues" + sleep 10 + done + + if [ $bdexists = false ]; then + echo "[ERROR] Blockdevices is not 3" + echo "[DEBUG] Show blockdevice" + kubectl get blockdevice + echo "[DEBUG] Show sds namespaces" + kubectl get ns | grep sds || echo "ns sds is not found" + echo "[DEBUG] Show cluster nodes" + kubectl get nodes + echo "[DEBUG] Show deckhouse logs" + d8 s logs | tail -n 100 + echo " " + exit 1 + fi + + echo "[INFO] Wait pods and webhooks sds-replicated pods" + for i in {1..60}; do + echo "[INFO] Check sds-replicated pods, linstor-node csi-node webhooks" + linstor_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep linstor-node | grep -c Running || echo 0) + csi_node=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep csi-node | grep -c Running || echo 0) + webhooks=$(kubectl -n d8-sds-replicated-volume get po 2>/dev/null | grep webhooks | grep -c Running || echo 0) + + linstor_node=$((linstor_node)) + csi_node=$((csi_node)) + webhooks=$((webhooks)) + + echo "[INFO] Check if sds-replicated pods are ready" + if [[ "${linstor_node}" -ge ${workers} && "${csi_node}" -ge ${workers} && ${webhooks} -ge 1 ]]; then + echo "[SUCCESS] sds-replicated-volume is ready" + break + fi + + echo "[INFO] Waiting 10s for sds-replicated-volume to be ready" + if (( i % 5 == 0 )); then + echo "[DEBUG] Get pods" + kubectl -n d8-sds-replicated-volume get pods || true + echo "[DEBUG] Show queue (first 25 lines)" + d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" + echo " " + fi + done + + chmod +x lvg-gen.sh + ./lvg-gen.sh + + chmod +x rsc-gen.sh + ./rsc-gen.sh + + echo "[INFO] Enshure that nested storageclasses are created" + kubectl get storageclass | grep nested || echo "[WARNING] No nested storageclasses" + echo "[SUCCESS] Done" + - name: Configure ceph storage + if: ${{ inputs.storage_type == 'ceph' }} + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/storage/ceph + run: | + d8_queue_list() { + d8 s queue list | grep -Po '([0-9]+)(?= active)' || echo "[WARNING] Failed to retrieve list queue" + } + + d8_queue() { + local count=90 + + for i in $(seq 1 $count) ; do + if [[ "$(d8_queue_list)" == "0" ]]; then + echo "[SUCCESS] Queue is clear" + break + else + echo "[INFO] Show queue list" + d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" + fi + + echo "[INFO] Wait until queues are empty ${i}/${count}" + kubectl get ns | grep sds || echo "Namespaces sds not found" + echo " " + sleep 10 + done + } + + export registry=${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + yq e '.spec.registry.dockerCfg = env(registry)' -i 00-ms.yaml + unset registry + + echo "[INFO] Create prod module source" + kubectl apply -f 00-ms.yaml + kubectl wait --for=jsonpath='{.status.phase}' modulesource deckhouse-prod --timeout=30s + kubectl get modulesources + + echo "[INFO] Create ceph operator and csi module config" + kubectl apply -f 01-mc.yaml + + echo "[INFO] Wait while queues are empty" + d8_queue + + echo "Start wait for ceph operator and csi" + for i in {1..60}; do + ceph_operator_status=$(kubectl get ns d8-operator-ceph -o jsonpath='{.status.phase}' || echo "False") + csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") + + if [[ "${ceph_operator_status}" = "Active" && "${csi_ceph_status}" = "Ready" ]]; then + echo "[SUCCESS] Namespaces operator-ceph and csi are Active" + break + fi + + echo "[INFO] Waiting 10s for ceph operator and csi namespaces to be ready" + echo "[INFO] Get namespace" + kubectl get namespace | grep ceph || echo "[WARNING] Namespaces operator-ceph and csi are not ready" + + if (( i % 5 == 0 )); then + echo "[DEBUG] Show all namespaces" + kubectl get namespace + echo " " + d8 s queue list | head -n25 || echo "[WARNING] Failed to retrieve list queue" + fi + sleep 10 + done + + echo "[INFO] Create ServiceAccounts" + kubectl apply -f 02-sa.yaml + echo "[INFO] Create ConfigMap (patch existing for configure rbd support)" + kubectl apply -f 03-cm.yaml + echo "[INFO] Create Cluster" + kubectl apply -f 04-cluster.yaml + + echo "[INFO] Get pod in d8-operator-ceph" + kubectl -n d8-operator-ceph get po + + echo "[INFO] Wait for ceph operator" + for i in {1..60}; do + echo "[INFO] Check ceph pods, mon mgr osd" + ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running || echo 0) + ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running || echo 0) + ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running || echo 0) + + ceph_mgr=$((ceph_mgr)) + ceph_mon=$((ceph_mon)) + ceph_osd=$((ceph_osd)) + + echo "[INFO] check if ceph pods are ready" + if [[ $ceph_mgr -ge 2 && $ceph_mon -ge 3 && $ceph_osd -ge 3 ]]; then + echo "[SUCCESS] Ceph cluster is ready" + break + fi + + echo "[WARNING] Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" + echo "[INFO] Waiting 10s for ceph operator to be ready" + kubectl -n d8-operator-ceph get po || echo "Failed to retrieve pods" + if (( i % 5 == 0 )); then + echo "[DEBUG] Show ceph namespace" + kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" + echo "[DEBUG] Show ModuleConfig ceph" + kubectl get mc | grep ceph || echo "Failed to retrieve mc" + echo "[DEBUG] Show ceph in resource modules" + kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" + echo "[DEBUG] Show queue" + d8 s queue list | head -n 25 || echo "Failed to retrieve list queue" + fi + echo "[INFO] Wait until all necessary pods are ready ${i}/60" + sleep 10 + done + + echo "[INFO] Show pods" + kubectl get pods -n d8-operator-ceph + + kubectl apply -f 05-blockpool.yaml + echo "[INFO] Wait for ceph-rbd-pool-r2 blockpool to be ready, timeout 600s" + kubectl -n d8-operator-ceph wait --for=jsonpath='{.status.phase}'=Ready cephblockpools.ceph.rook.io ceph-rbd-pool-r2 --timeout=600s + kubectl apply -f 06-toolbox.yaml + echo "[INFO] Wait for rook-ceph-tools, timeout 300s" + kubectl -n d8-operator-ceph wait --for=condition=Available deployment/rook-ceph-tools --timeout=300s + + echo "[INFO] Show ceph pools via rook-ceph-tools" + kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph osd pool ls + + echo "[INFO] Configure storage class" + chmod +x ./ceph-configure.sh + ./ceph-configure.sh + + configure-virtualization: + name: Configure Virtualization (${{ inputs.storage_type }}) + runs-on: ubuntu-22.04 + needs: + - bootstrap + - configure-storage + steps: + - uses: actions/checkout@v4 + - name: Install kubectl CLI + uses: azure/setup-kubectl@v4 + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Check kubeconfig + run: | + echo "[INFO] Configure kube config" + mkdir -p ~/.kube + echo "${{ needs.bootstrap.outputs.kubeconfig-content }}" | base64 -d | base64 -d > ~/.kube/config + chmod 600 ~/.kube/config + kubectl config use-context nested-e2e-nested-sa + + - name: Configure Virtualization + run: | + echo "[INFO] Apply Virtualization module config" + kubectl apply -f -< ~/.kube/config + chmod 600 ~/.kube/config + kubectl config use-context nested-e2e-nested-sa + kubectl get vmclass + + - name: Download dependencies + working-directory: ./test/e2e/ + run: | + echo "Download dependencies" + go mod download + + - name: Create vmclass for e2e tests + run: | + kubectl get vmclass/generic -o json | jq 'del(.status) | del(.metadata) | .metadata = {"name":"generic-for-e2e","annotations":{"virtualmachineclass.virtualization.deckhouse.io/is-default-class":"true"}} ' | kubectl create -f - + + - name: Run E2E + id: e2e-summary + env: + TIMEOUT: ${{ inputs.e2e_timeout }} + CSI: ${{ inputs.storage_type }} + working-directory: ./test/e2e/ + run: | + if [[ "${{ inputs.storage_type }}" == "replicated" ]]; then + export SKIP_IMMEDIATE_SC_CHECK="yes" + fi + STORAGE_CLASS_NAME=${{ inputs.nested_storageclass_name }} FOCUS="VirtualMachineConfiguration" task run:ci -v LABELS="Slow" + + echo "summary=$SUMMARY" >> $GITHUB_OUTPUT + summary_file_name="e2e_summary_${{ inputs.storage_type }}_$DATE.json" + echo "report_file_name=${summary_file_name}" >> $GITHUB_OUTPUT + + echo $SUMMARY > "${summary_file_name}" + + - name: Upload summary test results + uses: actions/upload-artifact@v4 + id: e2e-summary-artifact + if: always() + with: + name: ${{ steps.e2e-summary.outputs.report_file_name }} + path: test/e2e/e2e_summary_${{ inputs.storage_type }}.json + if-no-files-found: ignore + + undeploy-cluster: + name: Undeploy cluster (${{ inputs.storage_type }}) runs-on: ubuntu-latest + needs: + - bootstrap + - configure-storage + - configure-virtualization + - e2e-test + # if: always() + if: cancelled() || success() steps: - - name: Say hello - run: echo "Bootstrap workflow OK" + - uses: actions/checkout@v4 + + - name: Install htpasswd utility + run: | + sudo apt-get update + sudo apt-get install -y apache2-utils + + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Download artifacts + uses: actions/download-artifact@v5 + with: + name: generated-files-${{ inputs.storage_type }} + path: ${{ env.SETUP_CLUSTER_TYPE_PATH }}/ + + - name: Configure kubectl via azure/k8s-set-context@v4 + uses: azure/k8s-set-context@v4 + with: + method: kubeconfig + context: e2e-cluster-nightly-e2e-virt-sa + kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + + - name: infra-undeploy + working-directory: ${{ env.SETUP_CLUSTER_TYPE_PATH }} + run: | + task infra-undeploy diff --git a/.gitignore b/.gitignore index ae343f44fd..63df742d42 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,9 @@ local.Dockerfile # direnv .envrc +# dotenv file +.env + # logs log/ logs/ @@ -60,3 +63,6 @@ retry/ # nodejs node_modules/ package-lock.json + +# values +values.yaml diff --git a/test/dvp-static-cluster/Taskfile.yaml b/test/dvp-static-cluster/Taskfile.yaml new file mode 100644 index 0000000000..4b9a4d9204 --- /dev/null +++ b/test/dvp-static-cluster/Taskfile.yaml @@ -0,0 +1,212 @@ +# https://taskfile.dev + +version: "3" + +vars: + NAMESPACE: + sh: yq eval '.namespace' values.yaml + DECKHOUSE_TAG: + sh: yq eval '.deckhouse.tag' values.yaml + DEFAULT_USER: + sh: yq eval '.image.defaultUser' values.yaml + TMP_DIR: ./tmp + SSH_DIR: "{{ .TMP_DIR }}/ssh" + SSH_FILE_NAME: cloud + SSH_PUB_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.pub" + SSH_PRIV_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}" + DISCOVERED_VALUES_FILE: tmp/discovered-values.yaml + PASSWORD_FILE: "{{ .TMP_DIR }}/password.txt" + PASSWORD_HASH_FILE: "{{ .TMP_DIR }}/password-hash.txt" +tasks: + create-tmp-dir: + desc: Preflight / Create tmp dir + cmds: + - mkdir -p "{{ .TMP_DIR }}" + - mkdir -p "{{ .SSH_DIR }}" + status: + - test -d "{{ .TMP_DIR }}" + - test -d "{{ .SSH_DIR }}" + + ssh-gen: + desc: Preflight / Generate ssh keypair for jump-host + deps: + - create-tmp-dir + cmds: + - ssh-keygen -t ed25519 -b 1024 -f {{ .SSH_PRIV_KEY_FILE }} -N "" -C "cloud" + - chmod 0600 "{{ .SSH_PUB_KEY_FILE }}" + - chmod 0400 "{{ .SSH_PRIV_KEY_FILE }}" + status: + - test -f "{{ .SSH_PRIV_KEY_FILE }}" + + password-gen: + desc: Preflight / Generate password + deps: + - ssh-gen + cmds: + - date +%s | sha256sum | base64 | head -c 10 > {{ .PASSWORD_FILE }} + - | + echo $(cat {{ .TMP_DIR }}/password.txt) | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 > {{ .PASSWORD_HASH_FILE }} + status: + - test -f "{{ .PASSWORD_FILE }}" + - test -f "{{ .PASSWORD_HASH_FILE }}" + + generate-helm-values: + desc: Generate helm values {{ .DISCOVERED_VALUES_FILE }} + deps: + - password-gen + cmds: + - touch {{ .DISCOVERED_VALUES_FILE }} + - | + export SSH_PUB_KEY="$(cat {{ .SSH_PUB_KEY_FILE }})" + yq eval --inplace '.discovered.publicSSHKey = env(SSH_PUB_KEY)' {{ .DISCOVERED_VALUES_FILE }} + - | + export SSH_PRIV_KEY_B64="$(cat {{ .SSH_PRIV_KEY_FILE }} | base64 -w 0)" + yq eval --inplace '.discovered.privateSSHKeyBase64 = env(SSH_PRIV_KEY_B64)' {{ .DISCOVERED_VALUES_FILE }} + - | + export DOMAIN=$(kubectl get mc global -o json | jq '.spec.settings.modules.publicDomainTemplate | split(".")[1:] | join(".")' -rc) + yq eval --inplace '.discovered.domain = env(DOMAIN)' {{ .DISCOVERED_VALUES_FILE }} + - | + export CLUSTER_DOMAIN=$(kubectl -n d8-system exec svc/deckhouse-leader -- deckhouse-controller global values -o json | jq -rc .clusterConfiguration.clusterDomain) + yq eval --inplace '.discovered.clusterDomain = env(CLUSTER_DOMAIN)' {{ .DISCOVERED_VALUES_FILE }} + - | + export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" + yq eval --inplace '.discovered.passwordHash = env(PASSWORD_HASH)' {{ .DISCOVERED_VALUES_FILE }} + + render-vm-ips: + desc: Get VM IPs + cmds: + - | + if kubectl -n {{ .NAMESPACE }} get vm -o name 2>/dev/null | grep -q .; then + export VM_IPS=$(kubectl -n {{ .NAMESPACE }} get vm -o json | jq -r '[.items[] | select(.status.ipAddress != null) | .metadata.name + ": " + .status.ipAddress] | join("\n")') + yq eval --inplace '.discovered.vmIPs = env(VM_IPS)' {{ .DISCOVERED_VALUES_FILE }} + else + yq eval --inplace '.discovered.vmIPs = {}' {{ .DISCOVERED_VALUES_FILE }} + fi + + render-infra: + desc: Preparation / Generate infra manifests + deps: + - generate-helm-values + cmds: + - helm template static-dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} >> {{ .TMP_DIR }}/infra.yaml + + infra-deploy: + deps: + - render-infra + desc: Deploy infra (Namespace/RBAC/Jumphost) + vars: + start_time: + sh: date +%s + cmds: + - kubectl apply -f {{ .TMP_DIR }}/infra.yaml + - kubectl -n {{ .NAMESPACE }} get all + - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=jump-host --timeout=300s + - kubectl -n {{ .NAMESPACE }} get vi -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.phase}=Ready' --timeout=600s + - kubectl -n {{ .NAMESPACE }} get vd -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.phase}=Ready' --timeout=600s + - kubectl -n {{ .NAMESPACE }} get vm -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.phase}=Running' --timeout=600s + - kubectl -n {{ .NAMESPACE }} get vm -o name | xargs kubectl -n {{ .NAMESPACE }} wait --for='jsonpath={.status.conditions[?(@.type=="AgentReady")].status}=True' --timeout=300s + - | + export end_time=$(date +%s) + difference=$((end_time - {{.start_time}})) + date -ud "@$difference" +'%H:%M:%S' + - task: render-vm-ips + + infra-undeploy: + desc: Destroy infra + aliases: + - uninstall + cmds: + - kubectl delete -f {{ .TMP_DIR }}/infra.yaml --timeout 300s || true + - kubectl wait --for=delete namespace/{{ .NAMESPACE }} --timeout 300s || true + + render-cluster-config: + desc: Preparation / Generate cluster config (infra required) + deps: + - ssh-gen + - generate-helm-values + cmds: + - helm template dvp-over-static-dvp-cluster-config ./charts/cluster-config -f values.yaml -f {{ .DISCOVERED_VALUES_FILE }} > {{ .TMP_DIR }}/config.yaml + + render-cluster-manifests: + desc: Preparation / Generate cluster config without cluster bootstrap configs (infra required) + deps: + - render-cluster-config + cmds: + - yq 'select( (.apiVersion + "/" + .kind) != "deckhouse.io/v1/InitConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/ClusterConfiguration" and (.apiVersion + "/" + .kind) != "deckhouse.io/v1/StaticClusterConfiguration" )' {{ .TMP_DIR }}/config.yaml > {{ .TMP_DIR }}/config-manifests.yaml + + render-all: + desc: Generate all manifests + cmds: + - task render-infra + - task render-cluster-config + - task render-cluster-manifests + + dhctl-bootstrap: + desc: Bootstrap DKP over DVP + deps: + - render-cluster-config + vars: + DeckhouseInstallImage: "dev-registry.deckhouse.io/sys/deckhouse-oss/install" + prefix: + sh: yq eval '.storage_type' values.yaml + start_time: + sh: date +%s + JUMPHOST_EXT_IP: + sh: kubectl -n {{ .NAMESPACE }} exec deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + JUMPHOST_NODEPORT: + sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' + MASTER_NODE_IP: + sh: kubectl -n {{ .NAMESPACE }} get vm {{.prefix}}-master-0 -o jsonpath="{.status.ipAddress}" + cmds: + - | + docker run --pull=always \ + -v "{{ .TMP_DIR }}/config.yaml:/config.yaml" \ + -v "{{ .SSH_DIR }}:/tmp/.ssh/" \ + -v "{{ .TMP_DIR }}/dhctl:/tmp/dhctl/" \ + {{ .DeckhouseInstallImage }}:{{ .DECKHOUSE_TAG }} \ + dhctl bootstrap \ + --config=/config.yaml \ + --ssh-agent-private-keys=/tmp/.ssh/{{ .SSH_FILE_NAME }} \ + --ssh-host={{ .MASTER_NODE_IP }} \ + --ssh-user={{ .DEFAULT_USER }} \ + --ssh-bastion-port={{ .JUMPHOST_NODEPORT }} \ + --ssh-bastion-host={{ .JUMPHOST_EXT_IP }} \ + --ssh-bastion-user=user \ + {{.CLI_ARGS}} + - | + export end_time=$(date +%s) + difference=$((end_time - {{.start_time}})) + date -ud "@$difference" +'%H:%M:%S' + + show-connection-info: + desc: Show connection info + vars: + DOMAIN: + sh: yq eval '.discovered.domain' {{ .DISCOVERED_VALUES_FILE }} + PASSWORD: + sh: cat {{ .PASSWORD_FILE }} + JUMPHOST_EXT_IP: + sh: kubectl -n {{ .NAMESPACE }} exec deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + JUMPHOST_NODEPORT: + sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' + MASTER_NODE_NAME: + sh: kubectl get node -l node.deckhouse.io/group=master -o jsonpath="{.items[0].metadata.name}" + + silent: true + cmds: + - echo "Connect to master task ssh-to-master" + - | + echo "Host cluster master node: {{ .MASTER_NODE_NAME }}" + echo "Namespace: {{ .NAMESPACE }}" + echo "OS User: {{ .DEFAULT_USER }}" + echo "Bastion: user@{{ .JUMPHOST_EXT_IP }}:{{ .JUMPHOST_NODEPORT }}" + echo vms: + kubectl -n {{ .NAMESPACE }} get vm + echo "Grafana URL https://grafana.{{ .NAMESPACE }}.{{ .DOMAIN }}" + echo "Default user/password admin@deckhouse.io/{{ .PASSWORD}}" + + install: + cmds: + - task: infra-deploy + - task: dhctl-bootstrap + - task: show-connection-info diff --git a/test/dvp-static-cluster/charts/cluster-config/Chart.yaml b/test/dvp-static-cluster/charts/cluster-config/Chart.yaml new file mode 100644 index 0000000000..c61a43f29a --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: cluster-config +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/_helpers.tpl b/test/dvp-static-cluster/charts/cluster-config/templates/_helpers.tpl new file mode 100644 index 0000000000..8c39b8c503 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/_helpers.tpl @@ -0,0 +1,5 @@ +{{- define "cluster-config.full-svc-address" -}} +{{- $ctx := index . 0 -}} +{{- $name := index . 1 -}} +{{ $name }}.{{ $ctx.Values.namespace }}.svc.{{ $ctx.Values.discovered.clusterDomain }} +{{- end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml new file mode 100644 index 0000000000..4dd4fca6a7 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/cluster-config.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: deckhouse.io/v1 +kind: ClusterConfiguration +clusterType: Static +podSubnetCIDR: {{ .Values.deckhouse.podSubnetCIDR }} +podSubnetNodeCIDRPrefix: '24' +serviceSubnetCIDR: {{ .Values.deckhouse.serviceSubnetCIDR }} +kubernetesVersion: {{ .Values.deckhouse.kubernetesVersion | quote }} +clusterDomain: "internal.{{ .Values.discovered.clusterDomain }}" +defaultCRI: ContainerdV2 +{{- if .Values.deckhouse.proxy }} +proxy: + httpProxy: "{{ .Values.deckhouse.proxy.httpProxy }}" + httpsProxy: "{{ .Values.deckhouse.proxy.httpsProxy }}" + noProxy: + {{- range .Values.deckhouse.proxy.noProxy }} + - "{{ . }}" + {{- end }} +{{- end }} +--- +apiVersion: deckhouse.io/v1 +kind: InitConfiguration +deckhouse: + imagesRepo: dev-registry.deckhouse.io/sys/deckhouse-oss + registryDockerCfg: {{ .Values.deckhouse.registryDockerCfg }} + devBranch: {{ .Values.deckhouse.tag }} +--- +apiVersion: deckhouse.io/v1 +kind: StaticClusterConfiguration +internalNetworkCIDRs: + - 10.66.0.0/16 diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml new file mode 100644 index 0000000000..ea1bd9e0c6 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/disabled-modules.yaml @@ -0,0 +1,11 @@ +{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" "observability" "dashboard" "console" "loki" "log-shipper" -}} + +{{- range $modules }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: {{ . }} +spec: + enabled: false +{{- end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml new file mode 100644 index 0000000000..0f41bc3524 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/master-nodes.yaml @@ -0,0 +1,46 @@ + +{{- $totalNodes := 0 -}} +{{- range .Values.instances.additionalNodes -}} + {{- $totalNodes = add $totalNodes .count -}} +{{- end -}} + +--- +apiVersion: deckhouse.io/v1 +kind: NodeGroup +metadata: + name: master +spec: + disruptions: + approvalMode: Manual + kubelet: + containerLogMaxFiles: 4 + containerLogMaxSize: 50Mi + nodeTemplate: + {{- if eq $totalNodes 0 }} + taints: [] + {{- end }} + labels: + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + nodeType: Static + staticInstances: + count: {{ .Values.instances.masterNodes.count }} + labelSelector: + matchLabels: + role: master + +{{range $_, $i := untilStep 0 (.Values.instances.masterNodes.count | int) 1}} + {{ $vmName := printf "%s-master-%d" $.Values.storage_type $i }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: StaticInstance +metadata: + name: {{ $vmName }} + labels: + role: master +spec: + address: {{ index $.Values.discovered.vmIPs $vmName }} + credentialsRef: + kind: SSHCredentials + name: mvp-static +{{- end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml new file mode 100644 index 0000000000..ab53cb6446 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-dvp-base.yaml @@ -0,0 +1,133 @@ +{{- $totalNodes := .Values.instances.masterNodes.count -}} +{{- range .Values.instances.additionalNodes -}} + {{- $totalNodes = add $totalNodes .count -}} +{{- end -}} + +{{- if eq .Values.deckhouse.bundle "Default" }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authn +spec: + version: 2 + enabled: true + settings: + controlPlaneConfigurator: + dexCAMode: DoNotNeed + publishAPI: + enabled: true + https: + mode: SelfSigned + global: + kubeconfigGeneratorMasterCA: "" +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authz +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: descheduler +spec: + enabled: {{ if eq $totalNodes 1 }}false{{ else }}true{{ end }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: prometheus +spec: + version: 1 + enabled: true + settings: + retentionDays: 7 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: prompp +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: prompp +spec: + imageTag: stable + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: monitoring-applications +spec: + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: node-local-dns +spec: + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: snapshot-controller +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: snapshot-controller +spec: + imageTag: main + rollback: false + scanInterval: 10m0s +--- +apiVersion: deckhouse.io/v1 +kind: IngressNginxController +metadata: + name: main +spec: + inlet: HostPort + enableIstioSidecar: true + ingressClass: nginx + hostPort: + httpPort: 80 + httpsPort: 443 + nodeSelector: + node-role.kubernetes.io/master: '' + tolerations: + - effect: NoSchedule + operator: Exists +--- +apiVersion: deckhouse.io/v1 +kind: ClusterAuthorizationRule +metadata: + name: admin +spec: + subjects: + - kind: User + name: admin@deckhouse.io + accessLevel: SuperAdmin + portForwarding: true +--- +apiVersion: deckhouse.io/v1 +kind: User +metadata: + name: admin +spec: + email: admin@deckhouse.io + # echo "t3chn0l0gi4" | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 + password: {{ .Values.discovered.passwordHash }} +{{ end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml new file mode 100644 index 0000000000..e389d9bf51 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/modules-minimal.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: deckhouse +spec: + version: 1 + enabled: true + settings: +{{- if .Values.cse }} + allowExperimentalModules: true +{{- end }} + bundle: {{ .Values.deckhouse.bundle }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: global +spec: + version: 2 + settings: + modules: + publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: cni-cilium +spec: + version: 1 + enabled: true + settings: + tunnelMode: VXLAN +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: node-manager +spec: + enabled: true + version: 2 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: kube-dns +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: registry-packages-proxy +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: control-plane-manager +spec: + enabled: true + version: 2 diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml new file mode 100644 index 0000000000..81b59e4edf --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/ngc.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: qemu-guest-agent-install-ubuntu.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["ubuntu-lts", "debian", "astra"] + content: | + bb-apt-install qemu-guest-agent bash-completion + systemctl enable --now qemu-guest-agent +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: astra-d8-dm-modules.conf +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["astra", "ubuntu-lts", "debian"] + content: | + bb-sync-file /etc/modules-load.d/d8-dm-modules.conf - << "EOF" + dm_snapshot + dm_thin_pool + dm_cache + EOF + + systemctl restart systemd-modules-load.service +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: install-tools.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["*"] + content: | + bb-sync-file /etc/profile.d/01-kubectl-aliases.sh - << "EOF" + source <(/opt/deckhouse/bin/kubectl completion bash) + alias k=kubectl + complete -o default -F __start_kubectl k + EOF + + if [ ! -f /usr/local/bin/k9s ]; then + K9S_URL=$(curl -s https://api.github.com/repos/derailed/k9s/releases/latest | jq '.assets[] | select(.name=="k9s_Linux_amd64.tar.gz") | .browser_download_url' -r) + curl -L "${K9S_URL}" | tar -xz -C /usr/bin/ "k9s" + fi + + if [ ! -f /usr/local/bin/stern ]; then + STERN_URL=$(curl -s https://api.github.com/repos/stern/stern/releases/latest | jq '.assets[].browser_download_url | select(. | test("linux_amd64"))' -r) + curl -L "${STERN_URL}" | tar -xz -C /usr/bin/ "stern" + fi diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml new file mode 100644 index 0000000000..ad6845ad80 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/nodes.yaml @@ -0,0 +1,44 @@ +{{- range $_, $v := .Values.instances.additionalNodes }} + {{ if (ne ($v.count |int) 0) }} +--- +apiVersion: deckhouse.io/v1 +kind: NodeGroup +metadata: + name: {{ $v.name }} +spec: + disruptions: + approvalMode: Manual + nodeTemplate: + labels: + node-role.deckhouse.io/{{ $v.name }}: "" + {{ if ne $v.name "worker" -}} + taints: + - effect: NoExecute + key: dedicated.deckhouse.io + value: {{ $v.name }} + {{- end }} + nodeType: Static + staticInstances: + count: {{ $v.count }} + labelSelector: + matchLabels: + role: {{ $v.name }} + {{- end }} + + {{- range $_, $i := untilStep 0 ($v.count | int) 1}} + {{- $vmName := printf "%s-%s-%d" $.Values.storage_type $v.name $i }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: StaticInstance +metadata: + name: {{ $vmName }} + labels: + role: {{ $v.name }} +spec: + address: {{ index $.Values.discovered.vmIPs $vmName }} + credentialsRef: + kind: SSHCredentials + name: mvp-static + {{- end }} + +{{- end }} diff --git a/test/dvp-static-cluster/charts/cluster-config/templates/ssh-creds.yaml b/test/dvp-static-cluster/charts/cluster-config/templates/ssh-creds.yaml new file mode 100644 index 0000000000..cfb6d9cd69 --- /dev/null +++ b/test/dvp-static-cluster/charts/cluster-config/templates/ssh-creds.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: deckhouse.io/v1alpha2 +kind: SSHCredentials +metadata: + name: mvp-static +spec: + user: cloud + privateSSHKey: {{ .Values.discovered.privateSSHKeyBase64 }} diff --git a/test/dvp-static-cluster/charts/infra/.helmignore b/test/dvp-static-cluster/charts/infra/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/test/dvp-static-cluster/charts/infra/Chart.yaml b/test/dvp-static-cluster/charts/infra/Chart.yaml new file mode 100644 index 0000000000..e0ab20a245 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: infra +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl b/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl new file mode 100644 index 0000000000..0f22da234c --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/_helpers.tpl @@ -0,0 +1,119 @@ +{{- define "infra.vm-labels" -}} +{{- $prefix := regexReplaceAll "-\\d+$" . "" -}} +vm: {{ . }} +group: {{ $prefix }} +{{- end }} + +{{- define "infra.vmclass-name" -}} +{{ .Values.namespace }}-cpu +{{- end }} + +{{- define "infra.vd-root-name" -}} +{{ . }}-root +{{- end }} + +{{- define "infra.vm" -}} +{{- $ctx := index . 0 -}} +{{- $name := index . 1 -}} +{{- $cfg := index . 2 -}} +--- +kind: Service +apiVersion: v1 +metadata: + name: {{ $name }} + namespace: {{ $ctx.Values.namespace }} + labels: + {{- include "infra.vm-labels" $name | nindent 4 }} +spec: + clusterIP: None + selector: + {{- include "infra.vm-labels" $name | nindent 4 }} +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachine +metadata: + name: {{ $name }} + namespace: {{ $ctx.Values.namespace }} + labels: + {{- include "infra.vm-labels" $name | nindent 4 }} +spec: + blockDeviceRefs: + - kind: VirtualDisk + name: {{ include "infra.vd-root-name" $name }} +{{- range $i, $v := $cfg.additionalDisks }} + - kind: VirtualDisk + name: {{ printf "%s-%d" $name $i }} +{{- end }} + bootloader: {{ $ctx.Values.image.bootloader }} + liveMigrationPolicy: PreferForced + cpu: + coreFraction: {{ $cfg.cpu.coreFraction }} + cores: {{ $cfg.cpu.cores }} + disruptions: + restartApprovalMode: Automatic + enableParavirtualization: true + memory: + size: {{ $cfg.memory.size }} + osType: Generic + provisioning: + type: UserData + userData: | + #cloud-config + ssh_pwauth: true + package_update: true + packages: + - qemu-guest-agent + - jq + - rsync + - bind9-dnsutils + users: + - default + - name: cloud + passwd: $6$rounds=4096$vln/.aPHBOI7BMYR$bBMkqQvuGs5Gyd/1H5DP4m9HjQSy.kgrxpaGEHwkX7KEFV8BS.HZWPitAtZ2Vd8ZqIZRqmlykRCagTgPejt1i. + shell: /bin/bash + sudo: ALL=(ALL) NOPASSWD:ALL + chpasswd: {expire: False} + lock_passwd: false + ssh_authorized_keys: + - {{ $ctx.Values.discovered.publicSSHKey }} + + runcmd: + - systemctl enable --now qemu-guest-agent.service + final_message: "\U0001F525\U0001F525\U0001F525 The system is finally up, after $UPTIME seconds \U0001F525\U0001F525\U0001F525" + runPolicy: AlwaysOn + virtualMachineClassName: {{ include "infra.vmclass-name" $ctx }} +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualDisk +metadata: + name: {{ include "infra.vd-root-name" $name }} + namespace: {{ $ctx.Values.namespace }} + labels: + {{- include "infra.vm-labels" $name | nindent 4 }} +spec: + dataSource: + objectRef: + kind: VirtualImage + name: base-image + type: ObjectRef + persistentVolumeClaim: + size: {{ $cfg.rootDiskSize | default "50Gi" }} + {{- if $ctx.Values.storageClass }} + storageClassName: {{ $ctx.Values.storageClass }} + {{- end }} + + {{range $i, $v := $cfg.additionalDisks }} +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualDisk +metadata: + name: {{ printf "%s-%d" $name $i }} + namespace: {{ $ctx.Values.namespace }} +spec: + persistentVolumeClaim: + size: {{ $v.size }} + {{- if $ctx.Values.storageClass }} + storageClassName: {{ $ctx.Values.storageClass }} + {{- end }} + {{- end }} +{{- end }} diff --git a/test/dvp-static-cluster/charts/infra/templates/ingress.yaml b/test/dvp-static-cluster/charts/infra/templates/ingress.yaml new file mode 100644 index 0000000000..ccffd4b232 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/ingress.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-80 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + group: {{ printf "%s-%s" $.Values.storage_type "master" }} +--- +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-443 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + group: {{ printf "%s-%s" $.Values.storage_type "master" }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-https + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" +spec: + ingressClassName: nginx + rules: + {{- range .Values.ingressHosts }} + - host: "{{ . }}.{{ $.Values.namespace }}.{{ $.Values.discovered.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-443 + port: + number: 443 + {{- end }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-http + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + ingressClassName: nginx + rules: + - host: "*.{{ .Values.namespace }}.{{ .Values.discovered.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-80 + port: + number: 80 diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml new file mode 100644 index 0000000000..4c2f742e10 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/jump-host/deploy.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jump-host + namespace: {{ .Values.namespace }} + labels: + infra: jump-host +spec: + replicas: 1 + selector: + matchLabels: + app: jump-host + template: + metadata: + labels: + app: jump-host + spec: + containers: + - name: jump-host + image: registry-dvp.dev.flant.dev/tools/jump-host:v0.1.2 + imagePullPolicy: Always + resources: + limits: + cpu: "200m" + memory: "200Mi" + requests: + cpu: "200m" + memory: "200Mi" + ports: + - containerPort: 2222 + env: + - name: SSH_KEY + value: "{{ .Values.discovered.publicSSHKey }}" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + securityContext: + runAsNonRoot: true + runAsUser: 1000 diff --git a/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml b/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml new file mode 100644 index 0000000000..e795b2aa6f --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/jump-host/svc.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: jump-host + namespace: {{ .Values.namespace }} + labels: + infra: jump-host +spec: + type: NodePort + selector: + app: jump-host + ports: + - name: ssh + protocol: TCP + port: 2222 + targetPort: 2222 + diff --git a/test/dvp-static-cluster/charts/infra/templates/ns.yaml b/test/dvp-static-cluster/charts/infra/templates/ns.yaml new file mode 100644 index 0000000000..77db5f9f65 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespace }} diff --git a/test/dvp-static-cluster/charts/infra/templates/vi.yaml b/test/dvp-static-cluster/charts/infra/templates/vi.yaml new file mode 100644 index 0000000000..541d23cbc7 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/vi.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualImage +metadata: + name: base-image + namespace: {{ .Values.namespace }} +spec: + storage: ContainerRegistry + dataSource: + type: HTTP + http: + url: {{ .Values.image.url }} diff --git a/test/dvp-static-cluster/charts/infra/templates/vmc.yaml b/test/dvp-static-cluster/charts/infra/templates/vmc.yaml new file mode 100644 index 0000000000..c91d76f6c0 --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/vmc.yaml @@ -0,0 +1,7 @@ +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachineClass +metadata: + name: {{ include "infra.vmclass-name" . }} +spec: + cpu: + type: Discovery diff --git a/test/dvp-static-cluster/charts/infra/templates/vms.yaml b/test/dvp-static-cluster/charts/infra/templates/vms.yaml new file mode 100644 index 0000000000..a55beaac3d --- /dev/null +++ b/test/dvp-static-cluster/charts/infra/templates/vms.yaml @@ -0,0 +1,12 @@ + +{{- range $_, $i := untilStep 0 (.Values.instances.masterNodes.count | int) 1}} + {{- $vmName := printf "%s-master-%d" $.Values.storage_type $i -}} + {{ include "infra.vm" (list $ $vmName $.Values.instances.masterNodes.cfg) | nindent 0 }} +{{- end }} + +{{- range $_, $v := .Values.instances.additionalNodes }} + {{range $_, $i := untilStep 0 ($v.count | int) 1}} + {{- $vmName := printf "%s-%s-%d" $.Values.storage_type $v.name $i -}} + {{ include "infra.vm" (list $ $vmName $v.cfg) | nindent 0}} + {{- end }} +{{- end }} diff --git a/test/dvp-static-cluster/storage/ceph/00-ms.yaml b/test/dvp-static-cluster/storage/ceph/00-ms.yaml new file mode 100644 index 0000000000..64caf5c439 --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/00-ms.yaml @@ -0,0 +1,10 @@ +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleSource +metadata: + name: deckhouse-prod +spec: + registry: + ca: "" + dockerCfg: "" + repo: registry.deckhouse.io/deckhouse/ee/modules + scheme: HTTPS diff --git a/test/dvp-static-cluster/storage/ceph/01-mc.yaml b/test/dvp-static-cluster/storage/ceph/01-mc.yaml new file mode 100644 index 0000000000..1f711e905e --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/01-mc.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: operator-ceph +spec: + enabled: true + source: deckhouse-prod + version: 1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: csi-ceph +spec: + enabled: true + source: deckhouse + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: csi-ceph +spec: + imageTag: main + scanInterval: 10m diff --git a/test/dvp-static-cluster/storage/ceph/02-sa.yaml b/test/dvp-static-cluster/storage/ceph/02-sa.yaml new file mode 100644 index 0000000000..caa601cd99 --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/02-sa.yaml @@ -0,0 +1,74 @@ +--- +# Service account for the job that reports the Ceph version in an image +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-cmd-reporter + namespace: d8-operator-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator +imagePullSecrets: + - name: operator-ceph-registrysecret +--- +# Service account for Ceph mgrs +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-mgr + namespace: d8-operator-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator +imagePullSecrets: + - name: operator-ceph-registrysecret +--- +# Service account for Ceph OSDs +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-osd + namespace: d8-operator-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator +imagePullSecrets: + - name: operator-ceph-registrysecret +--- +# Service account for job that purges OSDs from a Rook-Ceph cluster +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-purge-osd + namespace: d8-operator-ceph # namespace:cluster +imagePullSecrets: + - name: operator-ceph-registrysecret +--- +# Service account for RGW server +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-rgw + namespace: d8-operator-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator +imagePullSecrets: + - name: operator-ceph-registrysecret +--- +# Service account for RGW server +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-default + namespace: d8-operator-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator +imagePullSecrets: + - name: operator-ceph-registrysecret diff --git a/test/dvp-static-cluster/storage/ceph/03-cm.yaml b/test/dvp-static-cluster/storage/ceph/03-cm.yaml new file mode 100644 index 0000000000..3ce8e04923 --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/03-cm.yaml @@ -0,0 +1,245 @@ +apiVersion: v1 +data: + CSI_CEPHFS_FSGROUPPOLICY: File + CSI_CEPHFS_PLUGIN_RESOURCE: | + - name : driver-registrar + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + - name : csi-cephfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + CSI_CEPHFS_PROVISIONER_RESOURCE: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-resizer + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-attacher + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-snapshotter + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-cephfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true" + CSI_ENABLE_CSIADDONS: "false" + CSI_ENABLE_ENCRYPTION: "false" + CSI_ENABLE_HOST_NETWORK: "true" + CSI_ENABLE_METADATA: "false" + CSI_ENABLE_NFS_SNAPSHOTTER: "true" + CSI_ENABLE_OMAP_GENERATOR: "false" + CSI_ENABLE_RBD_SNAPSHOTTER: "true" + CSI_ENABLE_READ_AFFINITY: "false" + CSI_ENABLE_TOPOLOGY: "false" + CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true" + CSI_GRPC_TIMEOUT_SECONDS: "150" + CSI_NFS_FSGROUPPOLICY: File + CSI_NFS_PLUGIN_RESOURCE: | + - name : driver-registrar + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + - name : csi-nfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + CSI_NFS_PROVISIONER_RESOURCE: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-nfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false" + CSI_PLUGIN_PRIORITY_CLASSNAME: system-node-critical + CSI_PROVISIONER_PRIORITY_CLASSNAME: system-cluster-critical + CSI_PROVISIONER_REPLICAS: "2" + CSI_RBD_ATTACH_REQUIRED: "true" + CSI_RBD_FSGROUPPOLICY: File + CSI_RBD_PLUGIN_RESOURCE: | + - name : driver-registrar + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + - name : csi-rbdplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + CSI_RBD_PROVISIONER_RESOURCE: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-resizer + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-attacher + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-snapshotter + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-rbdplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : csi-omap-generator + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + DISCOVER_DAEMON_RESOURCES: | + - name: DISCOVER_DAEMON_RESOURCES + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + ROOK_CEPH_ALLOW_LOOP_DEVICES: "true" + ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" + ROOK_CSI_ENABLE_CEPHFS: "false" + ROOK_CSI_ENABLE_GRPC_METRICS: "true" + ROOK_CSI_ENABLE_NFS: "false" + ROOK_CSI_ENABLE_RBD: "false" + ROOK_CSI_IMAGE_PULL_POLICY: IfNotPresent + ROOK_CSIADDONS_IMAGE: quay.io/csiaddons/k8s-sidecar:v0.5.0 + ROOK_DISABLE_ADMISSION_CONTROLLER: "true" + ROOK_DISABLE_DEVICE_HOTPLUG: "false" + ROOK_DISCOVER_DEVICES_INTERVAL: 60m + ROOK_ENABLE_DISCOVERY_DAEMON: "false" + ROOK_LOG_LEVEL: INFO + ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true" + ROOK_WATCH_FOR_NODE_FAILURE: "true" +kind: ConfigMap +metadata: + labels: + app: operator-ceph + app.kubernetes.io/managed-by: Helm + heritage: deckhouse + module: operator-ceph + name: rook-ceph-operator-config + namespace: d8-operator-ceph diff --git a/test/dvp-static-cluster/storage/ceph/04-cluster.yaml b/test/dvp-static-cluster/storage/ceph/04-cluster.yaml new file mode 100644 index 0000000000..acfc4155d3 --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/04-cluster.yaml @@ -0,0 +1,111 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph-cluster + namespace: d8-operator-ceph +spec: + cephVersion: + image: quay.io/ceph/ceph:v18.2.2 + allowUnsupported: false + dataDirHostPath: /var/lib/rook + skipUpgradeChecks: false + continueUpgradeAfterChecksEvenIfNotHealthy: false + waitTimeoutForHealthyOSDInMinutes: 10 + mon: + count: 3 + allowMultiplePerNode: false + mgr: + count: 2 + allowMultiplePerNode: false + modules: + - name: pg_autoscaler + enabled: false + placement: + all: + tolerations: + - operator: Exists + dashboard: + enabled: true + ssl: true + annotations: + mgr: + prometheus.deckhouse.io/sample-limit: "10000" + network: + connections: + encryption: + enabled: false + compression: + enabled: false + requireMsgr2: false + crashCollector: + disable: false + logCollector: + enabled: true + periodicity: daily # one of: hourly, daily, weekly, monthly + maxLogSize: 100M # SUFFIX may be 'M' or 'G'. Must be at least 1M. + cleanupPolicy: + confirmation: "" + sanitizeDisks: + method: quick + dataSource: zero + iteration: 1 + allowUninstallWithVolumes: false + labels: + mon: + ceph-component: mon + prepareosd: + ceph-component: osd-prepare + osd: + ceph-component: osd + mgr: + ceph-component: mgr + prometheus.deckhouse.io/custom-target: ceph + prometheus.deckhouse.io/port: "9283" + resources: + osd: + limits: + memory: "4096Mi" + requests: + memory: "1024Mi" + removeOSDsIfOutAndSafeToRemove: false + priorityClassNames: + mon: system-node-critical + osd: system-node-critical + mgr: system-cluster-critical + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: true + deviceFilter: "^sd[c-f]" + onlyApplyOSDPlacement: false + disruptionManagement: + managePodBudgets: true + osdMaintenanceTimeout: 30 + pgHealthCheckTimeout: 0 + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe timing or threshold values. Works for all mon,mgr,osd daemons. + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false + # Change pod startup probe timing or threshold values. Works for all mon,mgr,osd daemons. + startupProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false diff --git a/test/dvp-static-cluster/storage/ceph/05-blockpool.yaml b/test/dvp-static-cluster/storage/ceph/05-blockpool.yaml new file mode 100644 index 0000000000..d018703992 --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/05-blockpool.yaml @@ -0,0 +1,13 @@ +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: ceph-rbd-pool-r2 + namespace: d8-operator-ceph # namespace:cluster +spec: + parameters: + pg_num: "128" + pgp_num: "128" + failureDomain: host + replicated: + size: 2 + requireSafeReplicaSize: false diff --git a/test/dvp-static-cluster/storage/ceph/06-toolbox.yaml b/test/dvp-static-cluster/storage/ceph/06-toolbox.yaml new file mode 100644 index 0000000000..c4eed53149 --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/06-toolbox.yaml @@ -0,0 +1,83 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-tools + namespace: d8-operator-ceph +spec: + replicas: 1 + selector: + matchLabels: + app: ceph-tools + template: + metadata: + labels: + app: ceph-tools + spec: + imagePullSecrets: + - name: operator-ceph-registrysecret + initContainers: + - name: configure + image: quay.io/ceph/ceph:v18.2.2 + command: + - /bin/bash + - -c + - | + set -euo pipefail + + cat << EOF > /etc/ceph/ceph.conf + [global] + mon_host = $(sed 's/[a-z]=//g' /etc/rook/mon-endpoints) + EOF + + cat << EOF > /etc/ceph/ceph.client.admin.keyring + [$ROOK_CEPH_USERNAME] + key = $ROOK_CEPH_SECRET + EOF + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + key: ceph-username + name: rook-ceph-mon + - name: ROOK_CEPH_SECRET + valueFrom: + secretKeyRef: + key: ceph-secret + name: rook-ceph-mon + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - mountPath: /etc/rook + name: mon-endpoint-volume + containers: + - name: ceph-tools + command: + - sleep + - infinity + image: quay.io/ceph/ceph:v18.2.2 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + tty: true + workingDir: /var/lib/ceph + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - mountPath: /var/lib/ceph + name: homedir + securityContext: + runAsGroup: 167 + runAsNonRoot: true + runAsUser: 167 + volumes: + - name: mon-endpoint-volume + configMap: + defaultMode: 420 + items: + - key: data + path: mon-endpoints + name: rook-ceph-mon-endpoints + - name: ceph-config + emptyDir: {} + - name: homedir + emptyDir: {} diff --git a/test/dvp-static-cluster/storage/ceph/ceph-configure.sh b/test/dvp-static-cluster/storage/ceph/ceph-configure.sh new file mode 100644 index 0000000000..aad18a1bf5 --- /dev/null +++ b/test/dvp-static-cluster/storage/ceph/ceph-configure.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ceph_user_pool=ceph-rbd-pool-r2 +echo "Use user $ceph_user_pool" +echo "Set permissions for user $ceph_user_pool (mgr 'allow *' mon 'allow *' osd 'allow *' mds 'allow *')" +usr=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- \ + ceph auth get-or-create client.$ceph_user_pool mon 'allow *' mgr 'allow *' osd "allow *") +echo "Get fsid" +fsid=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph fsid) + +userKey="${usr#*key = }" +ceph_monitors_ip=$(kubectl -n d8-operator-ceph get svc | grep mon | awk '{print $3}') +monitors_yaml=$( + for monitor_ip in $ceph_monitors_ip; do + echo " - $monitor_ip:6789" + done +) + +# Verify we have monitors +if [ -z "$monitors_yaml" ]; then + echo "ERROR: No Ceph monitors found" + exit 1 +fi + +echo "Create CephClusterConnection" +kubectl apply -f - <> "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: LVMVolumeGroup +metadata: + name: vg-data-${node_name}-${dev_path} +spec: + actualVGNameOnTheNode: vg-thin-data + type: Local + local: + nodeName: ${dev_node} + blockDeviceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - ${dev_name} + thinPools: + - name: thin-data + size: ${LVMVG_SIZE} + allocationLimit: 100% +EOF + +done + +kubectl apply -f "${manifest}" diff --git a/test/dvp-static-cluster/storage/sds-replicated/mc.yaml b/test/dvp-static-cluster/storage/sds-replicated/mc.yaml new file mode 100644 index 0000000000..b7d6abda99 --- /dev/null +++ b/test/dvp-static-cluster/storage/sds-replicated/mc.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-node-configurator +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-replicated-volume +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-node-configurator +spec: + imageTag: main + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-replicated-volume +spec: + imageTag: main + scanInterval: 15s diff --git a/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh b/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh new file mode 100644 index 0000000000..7d93443620 --- /dev/null +++ b/test/dvp-static-cluster/storage/sds-replicated/rsc-gen.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +manifest=sds-rsp-rsc.yaml +replicatedStoragePoolName=thin-data + +pools=$(kubectl get lvmvolumegroup -o json | jq '.items[] | {name: .metadata.name, thinPoolName: .spec.thinPools[0].name}' -rc) + +cat << EOF > "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStoragePool +metadata: + name: $replicatedStoragePoolName +spec: + type: LVMThin + lvmVolumeGroups: +EOF + +for pool in ${pools}; do + vg_name=$(echo $pool | jq -r '.name'); + pool_node=$(echo $pool | jq -r '.thinPoolName'); + echo "${pool_node} ${vg_name}" +cat << EOF >> "${manifest}" + - name: ${vg_name} + thinPoolName: ${pool_node} +EOF +done + +cat << EOF >> "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r2 +spec: + replication: Availability + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: PreferablyLocal + topology: Ignored +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r1 +spec: + replication: None + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: PreferablyLocal + topology: Ignored +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r1-immediate +spec: + replication: None + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: Any + topology: Ignored +EOF + +kubectl apply -f ${manifest} + +DEFAULT_STORAGE_CLASS=nested-thin-r1 +kubectl patch mc global --type='json' -p='[{"op": "replace", "path": "/spec/settings/defaultClusterStorageClass", "value": "'"$DEFAULT_STORAGE_CLASS"'"}]' + +sleep 2 +echo "Showing Storage Classes" +kubectl get storageclass +echo " " diff --git a/test/dvp-static-cluster/tools/deckhouse-queue.sh b/test/dvp-static-cluster/tools/deckhouse-queue.sh new file mode 100644 index 0000000000..6492aeac0e --- /dev/null +++ b/test/dvp-static-cluster/tools/deckhouse-queue.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +get_current_date() { + date +"%H:%M:%S %d-%m-%Y" +} + +get_timestamp() { + date +%s +} + +log_info() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${BLUE}[INFO]${NC} $message" +} + +log_success() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${GREEN}[SUCCESS]${NC} $message" +} + +log_warning() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${YELLOW}[WARNING]${NC} $message" +} + +log_error() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${RED}[ERROR]${NC} $message" +} + +kubectl() { + /opt/deckhouse/bin/kubectl $@ +} + +d8() { + /opt/deckhouse/bin/d8 $@ +} + + +d8_queue_main() { + echo "$( d8 p queue main | grep -Po '(?<=length )([0-9]+)' )" +} + +d8_queue_list() { + d8 p queue list | grep -Po '([0-9]+)(?= active)' +} + +d8_queue() { + local count=90 + + for i in $(seq 1 $count) ; do + if [ $(d8_queue_list) == "0" ]; then + log_success "Queue is clear" + break + else + log_info "Show queue first 25 lines" + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + fi + log_info "Wait until queues are empty ${i}/${count}" + sleep 10 + done +} + +d8_ready() { + local ready=false + local count=60 + common_start_time=$(get_timestamp) + for i in $(seq 1 $count) ; do + start_time=$(get_timestamp) + if kubectl -n d8-system wait deploy/deckhouse --for condition=available --timeout=20s 2>/dev/null; then + ready=true + break + fi + end_time=$(get_timestamp) + difference=$((end_time - start_time)) + log_info "Wait until deckhouse is ready ${i}/${count} after ${difference}s" + if (( i % 5 == 0 )); then + kubectl -n d8-system get pods + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + fi + done + + if [ "$ready" = true ]; then + log_success "Deckhouse is Ready!" + log_info "Checking queues" + d8_queue + else + common_end_time=$(get_timestamp) + common_difference=$((common_end_time - common_start_time)) + common_formatted_difference=$(date -u +'%H:%M:%S' -d "@$common_difference") + log_error "Deckhouse is not ready after ${count} attempts and ${common_formatted_difference} time, check its queue for errors:" + d8 p queue main | head -n25 + exit 1 + fi +} + +start_time=$(get_timestamp) +log_info "Checking that deckhouse is ready" +d8_ready +end_time=$(get_timestamp) +difference=$((end_time - start_time)) +log_success "Deckhouse is ready after $(date -ud "@$difference" +'%H:%M:%S')" diff --git a/test/dvp-static-cluster/tools/gen-kubeconfig.sh b/test/dvp-static-cluster/tools/gen-kubeconfig.sh new file mode 100644 index 0000000000..02e01b5e55 --- /dev/null +++ b/test/dvp-static-cluster/tools/gen-kubeconfig.sh @@ -0,0 +1,184 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +get_current_date() { + date +"%H:%M:%S %d-%m-%Y" +} + +get_timestamp() { + date +%s +} + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +log_info() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${BLUE}[INFO]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [INFO] $message" >> "$LOG_FILE" + fi +} + +log_success() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${GREEN}[SUCCESS]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [SUCCESS] $message" >> "$LOG_FILE" + fi +} + +log_warning() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${YELLOW}[WARNING]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [WARNING] $message" >> "$LOG_FILE" + fi +} + +log_error() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${RED}[ERROR]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [ERROR] $message" >> "$LOG_FILE" + fi +} + +exit_trap() { + echo "" + log_info "Exiting..." + echo "" + exit 0 +} + +kubectl() { + sudo /opt/deckhouse/bin/kubectl $@ +} + +trap exit_trap SIGINT SIGTERM + + +SA_NAME=$1 +CLUSTER_PREFIX=$2 +CLUSTER_NAME=$3 +FILE_NAME=$4 + +if [[ -z "$SA_NAME" ]] || [[ -z "$CLUSTER_PREFIX" ]] || [[ -z "$CLUSTER_NAME" ]]; then + log_error "Usage: gen-sa.sh [FILE_NAME]" + exit 1 +fi + +if [[ -z "$FILE_NAME" ]]; then + FILE_NAME=/tmp/kube.config +fi + +SA_TOKEN=virt-${CLUSTER_PREFIX}-${SA_NAME}-token +SA_CAR_NAME=virt-${CLUSTER_PREFIX}-${SA_NAME} + +USER_NAME=${SA_NAME} +CONTEXT_NAME=${CLUSTER_NAME}-${USER_NAME} + +if kubectl cluster-info > /dev/null 2>&1; then + log_success "Access to Kubernetes cluster exists." +else + log_error "No access to Kubernetes cluster or configuration issue." + exit 1 +fi + +sleep 2 +log_info "====" +log_info "Kubeconfig will be created successfully if you connected to k8s cluster via ssh tunnel or directly" +log_info "====" +sleep 2 + + +log_info "Apply SA, Secrets and ClusterAuthorizationRule" +kubectl apply -f -<> $GITHUB_ENV START_TIME=$(date +"%H:%M:%S") echo "START_TIME=$START_TIME" >> $GITHUB_ENV -go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +if [[ -n $FOCUS ]];then + go tool ginkgo --focus "$FOCUS" -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +else + go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +fi + EXIT_CODE="${PIPESTATUS[0]}" RESULT=$(sed -e "s/\x1b\[[0-9;]*m//g" $GINKGO_RESULT | grep --color=never -E "FAIL!|SUCCESS!") if [[ $RESULT == FAIL!* || $EXIT_CODE -ne "0" ]]; then