From cda5403528e0e49f721e2c4f087d0e4d3c8d0a80 Mon Sep 17 00:00:00 2001 From: Gustavo Lira Date: Wed, 21 Jan 2026 10:43:59 -0300 Subject: [PATCH] feat(ci): add /test rerun-failed-tests command Add a new CI command that re-executes only the tests that failed in the previous e2e-ocp-helm run, optimizing time and resources. New files: - retest-failed-utils.sh: Utility functions for fetching JUnit artifacts from GCS, parsing failed tests, and running specific test files - jobs/ocp-rerun-failed-tests.sh: Main job handler that orchestrates fetching previous results, deploying only needed namespaces, and running failed tests The command: - Fetches JUnit results from the previous e2e-ocp-helm run via GCS - Parses which tests failed for showcase and showcase-rbac namespaces - Deploys only the namespaces that had failures - Runs only the tests that previously failed using Playwright - Returns success if no previous run exists or no tests failed Co-Authored-By: Claude Opus 4.5 --- .ibm/pipelines/jobs/ocp-rerun-failed-tests.sh | 388 ++++++++++++++++ .ibm/pipelines/openshift-ci-tests.sh | 7 + .ibm/pipelines/retest-failed-utils.sh | 419 ++++++++++++++++++ 3 files changed, 814 insertions(+) create mode 100644 .ibm/pipelines/jobs/ocp-rerun-failed-tests.sh create mode 100644 .ibm/pipelines/retest-failed-utils.sh diff --git a/.ibm/pipelines/jobs/ocp-rerun-failed-tests.sh b/.ibm/pipelines/jobs/ocp-rerun-failed-tests.sh new file mode 100644 index 0000000000..e002429528 --- /dev/null +++ b/.ibm/pipelines/jobs/ocp-rerun-failed-tests.sh @@ -0,0 +1,388 @@ +#!/bin/bash +# +# Job handler for re-running only the failed tests from a previous e2e-ocp-helm execution. +# +# This job: +# 1. Fetches JUnit results from the previous e2e-ocp-helm run +# 2. Parses which tests failed for each namespace (showcase, showcase-rbac) +# 3. Deploys only the namespaces that had failures +# 4. Runs only the tests that previously failed +# + +# shellcheck source=.ibm/pipelines/lib/log.sh +source "${DIR}/lib/log.sh" +# shellcheck source=.ibm/pipelines/playwright-projects.sh +source "${DIR}/playwright-projects.sh" +# shellcheck source=.ibm/pipelines/retest-failed-utils.sh +source "${DIR}/retest-failed-utils.sh" + +####################################### +# Main handler for the rerun-failed-tests job +####################################### +handle_ocp_rerun_failed_tests() { + export NAME_SPACE="${NAME_SPACE:-showcase}" + export NAME_SPACE_RBAC="${NAME_SPACE_RBAC:-showcase-rbac}" + export NAME_SPACE_POSTGRES_DB="${NAME_SPACE_POSTGRES_DB:-postgress-external-db}" + + log::section "Rerun Failed Tests Job" + + # Validate required dependencies + if ! validate_dependencies; then + return 1 + fi + + # Get PR information + get_pr_info + + if [[ -z "${PULL_NUMBER:-}" ]]; then + log::error "PULL_NUMBER is not set. Cannot determine which PR to fetch results for." + log::info "This job should only run in a PR context." + return 1 + fi + + # Login to OpenShift cluster + log::info "Logging into OpenShift cluster..." + oc_login + log::info "OCP version: $(oc version)" + + # Get cluster router base + K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + export K8S_CLUSTER_ROUTER_BASE + + # Create temp directory for JUnit files + local temp_dir="/tmp/rerun-failed-tests" + rm -rf "${temp_dir}" + mkdir -p "${temp_dir}" + + # Get previous build ID + local build_id + build_id=$(get_previous_failed_build_id "${REPO_OWNER}" "${REPO_NAME}" "${PULL_NUMBER}") + + if [[ -z "${build_id}" ]]; then + log::warn "No previous build found for e2e-ocp-helm job." + log::info "Nothing to rerun. Exiting with success." + return 0 + fi + + log::info "Previous build ID: ${build_id}" + + # Fetch and parse JUnit results for each namespace + local showcase_junit="${temp_dir}/showcase-junit.xml" + local showcase_rbac_junit="${temp_dir}/showcase-rbac-junit.xml" + + local showcase_url + showcase_url=$(build_previous_run_artifact_url "${REPO_OWNER}" "${REPO_NAME}" "${PULL_NUMBER}" \ + "${RERUN_TARGET_JOB}" "${build_id}" "${NAME_SPACE}") + + local showcase_rbac_url + showcase_rbac_url=$(build_previous_run_artifact_url "${REPO_OWNER}" "${REPO_NAME}" "${PULL_NUMBER}" \ + "${RERUN_TARGET_JOB}" "${build_id}" "${NAME_SPACE_RBAC}") + + # Fetch JUnit results + local has_showcase_results=false + local has_rbac_results=false + + if fetch_previous_junit_results "${showcase_url}" "${showcase_junit}"; then + has_showcase_results=true + fi + + if fetch_previous_junit_results "${showcase_rbac_url}" "${showcase_rbac_junit}"; then + has_rbac_results=true + fi + + if [[ "${has_showcase_results}" == "false" && "${has_rbac_results}" == "false" ]]; then + log::warn "Could not fetch JUnit results from previous run." + log::info "The previous run may not have completed or artifacts may have expired." + log::info "Nothing to rerun. Exiting with success." + return 0 + fi + + # Parse failed tests for each namespace + local -a showcase_failed_tests=() + local -a rbac_failed_tests=() + + if [[ "${has_showcase_results}" == "true" ]]; then + local showcase_failures + showcase_failures=$(get_failed_test_count "${showcase_junit}") + log::info "Showcase namespace: ${showcase_failures} failures" + + if [[ "${showcase_failures}" -gt 0 ]]; then + mapfile -t showcase_failed_tests < <(parse_failed_tests_from_junit "${showcase_junit}") + # Filter to only existing test files + mapfile -t showcase_failed_tests < <(filter_existing_test_files "${showcase_failed_tests[@]}") + fi + fi + + if [[ "${has_rbac_results}" == "true" ]]; then + local rbac_failures + rbac_failures=$(get_failed_test_count "${showcase_rbac_junit}") + log::info "Showcase-RBAC namespace: ${rbac_failures} failures" + + if [[ "${rbac_failures}" -gt 0 ]]; then + mapfile -t rbac_failed_tests < <(parse_failed_tests_from_junit "${showcase_rbac_junit}") + # Filter to only existing test files + mapfile -t rbac_failed_tests < <(filter_existing_test_files "${rbac_failed_tests[@]}") + fi + fi + + # Check if there are any tests to rerun + if [[ ${#showcase_failed_tests[@]} -eq 0 && ${#rbac_failed_tests[@]} -eq 0 ]]; then + log::success "No failed tests found in previous run!" + log::info "Either all tests passed or the failed test files no longer exist." + return 0 + fi + + log::section "Tests to Rerun" + log::info "Showcase failed tests: ${#showcase_failed_tests[@]}" + log::info "RBAC failed tests: ${#rbac_failed_tests[@]}" + + # Setup cluster (operators, etc.) - needed for deployment + cluster_setup_ocp_helm + + # Deploy and test based on which namespaces had failures + local overall_result=0 + + if [[ ${#showcase_failed_tests[@]} -gt 0 ]]; then + log::section "Rerunning Showcase Failed Tests" + deploy_and_retest_namespace \ + "${NAME_SPACE}" \ + "${RELEASE_NAME}" \ + "${PW_PROJECT_SHOWCASE}" \ + showcase_failed_tests[@] || overall_result=1 + fi + + if [[ ${#rbac_failed_tests[@]} -gt 0 ]]; then + log::section "Rerunning RBAC Failed Tests" + deploy_and_retest_namespace_rbac \ + "${NAME_SPACE_RBAC}" \ + "${RELEASE_NAME_RBAC}" \ + "${PW_PROJECT_SHOWCASE_RBAC}" \ + rbac_failed_tests[@] || overall_result=1 + fi + + # Cleanup temp directory + rm -rf "${temp_dir}" + + # Report final result + if [[ ${overall_result} -eq 0 ]]; then + log::success "All rerun tests passed!" + else + log::error "Some rerun tests still failed." + save_overall_result 1 + fi + + return ${overall_result} +} + +####################################### +# Deploy showcase namespace and retest failed tests +# Arguments: +# namespace: The namespace to deploy to +# release_name: Helm release name +# playwright_project: Playwright project name +# failed_tests_ref: Name reference to array of failed test files +####################################### +deploy_and_retest_namespace() { + local namespace="${1}" + local release_name="${2}" + local playwright_project="${3}" + # shellcheck disable=SC2034 # nameref variable used via indirection + local -n failed_tests="${4}" # NOSONAR - nameref is used when passed to run_failed_tests_and_report + + log::info "Deploying to namespace: ${namespace}" + + # Configure namespace + configure_namespace "${namespace}" + deploy_redis_cache "${namespace}" + + cd "${DIR}" + + local rhdh_base_url="https://${release_name}-developer-hub-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" + apply_yaml_files "${DIR}" "${namespace}" "${rhdh_base_url}" + + log::info "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}" + + # Use the same deployment logic as PR jobs (skip orchestrator) + local merged_pr_value_file="/tmp/merged-values_showcase_PR.yaml" + yq_merge_value_files "merge" "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" "${DIR}/value_files/diff-values_showcase_PR.yaml" "${merged_pr_value_file}" + disable_orchestrator_plugins_in_values "${merged_pr_value_file}" + + mkdir -p "${ARTIFACT_DIR}/${namespace}" + cp -a "${merged_pr_value_file}" "${ARTIFACT_DIR}/${namespace}/" || true + + # shellcheck disable=SC2046 + helm upgrade -i "${release_name}" -n "${namespace}" \ + "${HELM_CHART_URL}" --version "${CHART_VERSION}" \ + -f "${merged_pr_value_file}" \ + --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" \ + $(get_image_helm_set_params) + + deploy_test_backstage_customization_provider "${namespace}" + + # Wait for deployment and run failed tests + local url="https://${release_name}-developer-hub-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" + + if check_backstage_running "${release_name}" "${namespace}" "${url}"; then + log::info "Backstage is running. Running failed tests..." + run_failed_tests_and_report "${namespace}" "${playwright_project}" "${url}" failed_tests[@] + local result=$? + save_all_pod_logs "${namespace}" + return ${result} + else + log::error "Backstage deployment failed in ${namespace}" + save_all_pod_logs "${namespace}" + return 1 + fi +} + +####################################### +# Deploy showcase-rbac namespace and retest failed tests +# Arguments: +# namespace: The namespace to deploy to +# release_name: Helm release name +# playwright_project: Playwright project name +# failed_tests_ref: Name reference to array of failed test files +####################################### +deploy_and_retest_namespace_rbac() { + local namespace="${1}" + local release_name="${2}" + local playwright_project="${3}" + # shellcheck disable=SC2034 # nameref variable used via indirection + local -n failed_tests="${4}" # NOSONAR - nameref is used when passed to run_failed_tests_and_report + + log::info "Deploying RBAC to namespace: ${namespace}" + + # Configure namespaces + configure_namespace "${NAME_SPACE_POSTGRES_DB}" + configure_namespace "${namespace}" + configure_external_postgres_db "${namespace}" + + cd "${DIR}" + + local rbac_rhdh_base_url="https://${release_name}-developer-hub-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" + apply_yaml_files "${DIR}" "${namespace}" "${rbac_rhdh_base_url}" + + log::info "Deploying RBAC image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}" + + # Use the same deployment logic as PR jobs (skip orchestrator) + local merged_pr_rbac_value_file="/tmp/merged-values_showcase-rbac_PR.yaml" + yq_merge_value_files "merge" "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" "${DIR}/value_files/diff-values_showcase-rbac_PR.yaml" "${merged_pr_rbac_value_file}" + disable_orchestrator_plugins_in_values "${merged_pr_rbac_value_file}" + + mkdir -p "${ARTIFACT_DIR}/${namespace}" + cp -a "${merged_pr_rbac_value_file}" "${ARTIFACT_DIR}/${namespace}/" || true + + # shellcheck disable=SC2046 + helm upgrade -i "${release_name}" -n "${namespace}" \ + "${HELM_CHART_URL}" --version "${CHART_VERSION}" \ + -f "${merged_pr_rbac_value_file}" \ + --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" \ + $(get_image_helm_set_params) + + # Wait for deployment and run failed tests + local url="https://${release_name}-developer-hub-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" + + if check_backstage_running "${release_name}" "${namespace}" "${url}"; then + log::info "RBAC Backstage is running. Running failed tests..." + run_failed_tests_and_report "${namespace}" "${playwright_project}" "${url}" failed_tests[@] + local result=$? + save_all_pod_logs "${namespace}" + return ${result} + else + log::error "RBAC Backstage deployment failed in ${namespace}" + save_all_pod_logs "${namespace}" + return 1 + fi +} + +####################################### +# Run failed tests and save results/artifacts +# Arguments: +# namespace: Kubernetes namespace +# playwright_project: Playwright project name +# url: Backstage URL +# test_files_ref: Name reference to array of test files to run +####################################### +run_failed_tests_and_report() { + local namespace="${1}" + local playwright_project="${2}" + local url="${3}" + # shellcheck disable=SC2034 # nameref variable used via indirection + local -n test_files="${4}" + + CURRENT_DEPLOYMENT=$((CURRENT_DEPLOYMENT + 1)) + save_status_deployment_namespace "${CURRENT_DEPLOYMENT}" "${namespace}" + save_status_failed_to_deploy "${CURRENT_DEPLOYMENT}" false + + BASE_URL="${url}" + export BASE_URL + + log::info "BASE_URL: ${BASE_URL}" + log::info "Running ${#test_files[@]} previously failed tests for project '${playwright_project}'" + + cd "${DIR}/../../e2e-tests" + local e2e_tests_dir + e2e_tests_dir=$(pwd) + + yarn install --immutable > /tmp/yarn.install.log.txt 2>&1 + local install_status=$? + if [[ ${install_status} -ne 0 ]]; then + log::error "=== YARN INSTALL FAILED ===" + cat /tmp/yarn.install.log.txt + return ${install_status} + fi + log::success "Yarn install completed successfully." + + yarn playwright install chromium + + Xvfb :99 & + export DISPLAY=:99 + + # Run only the specific failed test files + ( + set -e + log::info "Using PR container image: ${TAG_NAME}" + log::info "Running tests: ${test_files[*]}" + yarn playwright test --project="${playwright_project}" "${test_files[@]}" + ) 2>&1 | tee "/tmp/${LOGFILE}" + + local result=${PIPESTATUS[0]} + + pkill Xvfb || true + + # Save artifacts + mkdir -p "${ARTIFACT_DIR}/${namespace}/test-results" + mkdir -p "${ARTIFACT_DIR}/${namespace}/attachments/screenshots" + cp -a "${e2e_tests_dir}/test-results/"* "${ARTIFACT_DIR}/${namespace}/test-results" || true + cp -a "${e2e_tests_dir}/${JUNIT_RESULTS}" "${ARTIFACT_DIR}/${namespace}/${JUNIT_RESULTS}" || true + if [[ "${CI}" == "true" ]]; then + cp "${ARTIFACT_DIR}/${namespace}/${JUNIT_RESULTS}" "${SHARED_DIR}/junit-results-${namespace}.xml" || true + fi + + cp -a "${e2e_tests_dir}/screenshots/"* "${ARTIFACT_DIR}/${namespace}/attachments/screenshots/" || true + ansi2html < "/tmp/${LOGFILE}" > "/tmp/${LOGFILE}.html" + cp -a "/tmp/${LOGFILE}.html" "${ARTIFACT_DIR}/${namespace}" || true + cp -a "${e2e_tests_dir}/playwright-report/"* "${ARTIFACT_DIR}/${namespace}" || true + + log::info "Rerun tests in namespace '${namespace}' RESULT: ${result}" + + if [[ ${result} -ne 0 ]]; then + save_overall_result 1 + save_status_test_failed "${CURRENT_DEPLOYMENT}" true + else + save_status_test_failed "${CURRENT_DEPLOYMENT}" false + fi + + # Count failures from new JUnit results + if [[ -f "${e2e_tests_dir}/${JUNIT_RESULTS}" ]]; then + local failed_tests_count + failed_tests_count=$(grep -oP 'failures="\K[0-9]+' "${e2e_tests_dir}/${JUNIT_RESULTS}" | head -n 1) + log::info "Number of failed tests after rerun: ${failed_tests_count:-0}" + save_status_number_of_test_failed "${CURRENT_DEPLOYMENT}" "${failed_tests_count:-0}" + else + log::warn "JUnit results file not found: ${e2e_tests_dir}/${JUNIT_RESULTS}" + save_status_number_of_test_failed "${CURRENT_DEPLOYMENT}" "unknown" + fi + + return ${result} +} diff --git a/.ibm/pipelines/openshift-ci-tests.sh b/.ibm/pipelines/openshift-ci-tests.sh index 81340c2f3a..5d425728d7 100755 --- a/.ibm/pipelines/openshift-ci-tests.sh +++ b/.ibm/pipelines/openshift-ci-tests.sh @@ -133,6 +133,13 @@ main() { log::info "Calling handle_ocp_operator" handle_ocp_operator ;; + *rerun-failed-tests*) + log::info "Sourcing ocp-rerun-failed-tests.sh" + # shellcheck source=.ibm/pipelines/jobs/ocp-rerun-failed-tests.sh + source "${DIR}/jobs/ocp-rerun-failed-tests.sh" + log::info "Calling handle_ocp_rerun_failed_tests" + handle_ocp_rerun_failed_tests + ;; *pull*ocp*helm*) log::info "Sourcing ocp-pull.sh" # shellcheck source=.ibm/pipelines/jobs/ocp-pull.sh diff --git a/.ibm/pipelines/retest-failed-utils.sh b/.ibm/pipelines/retest-failed-utils.sh new file mode 100644 index 0000000000..c1f0163b5e --- /dev/null +++ b/.ibm/pipelines/retest-failed-utils.sh @@ -0,0 +1,419 @@ +#!/bin/bash +# +# Utility functions for re-running failed tests from previous CI executions. +# +# This script provides functions to: +# - Fetch JUnit XML results from previous GCS artifacts +# - Parse failed test names from JUnit XML +# - Build URLs to access previous run artifacts +# - Execute only the tests that failed in the previous run +# + +# shellcheck source=.ibm/pipelines/lib/log.sh +source "${DIR}/lib/log.sh" + +# GCS base URL for OpenShift CI test artifacts +readonly GCS_BASE_URL="https://gcsweb-ci.apps.ci.l2s4.p1.openshiftapps.com/gcs/test-platform-results" + +# GitHub API base URL +readonly GITHUB_API_URL="https://api.github.com" + +# The job name we want to find previous failed runs for +readonly RERUN_TARGET_JOB="pull-ci-redhat-developer-rhdh-main-e2e-ocp-helm" + +####################################### +# Validate required dependencies are available +# Returns: +# 0 if all dependencies available, 1 otherwise +####################################### +validate_dependencies() { + local missing=() + + for cmd in curl jq; do + if ! command -v "${cmd}" &> /dev/null; then + missing+=("${cmd}") + fi + done + + if [[ ${#missing[@]} -gt 0 ]]; then + log::error "Missing required dependencies: ${missing[*]}" + log::error "Please install them before running this script" + return 1 + fi + + return 0 +} + +####################################### +# Build GitHub API auth header if token available +# Outputs: +# Writes auth header arguments for curl to stdout +####################################### +get_github_auth_header() { + if [[ -n "${GITHUB_TOKEN:-}" ]]; then + echo "-H" "Authorization: Bearer ${GITHUB_TOKEN}" + fi + + return 0 +} + +####################################### +# Make authenticated GitHub API request with error handling +# Arguments: +# endpoint: API endpoint (e.g., "/repos/org/repo/pulls/123") +# Outputs: +# Writes JSON response to stdout +# Returns: +# 0 if successful, 1 if failed +####################################### +github_api_request() { + local endpoint="${1}" + local url="${GITHUB_API_URL}${endpoint}" + local response_file + response_file=$(mktemp) + local http_status + + # Build curl args with optional auth + local curl_args=(-sS -w "%{http_code}" -o "${response_file}") + curl_args+=(-H "Accept: application/vnd.github.v3+json") + + # Add auth header if GITHUB_TOKEN is set + if [[ -n "${GITHUB_TOKEN:-}" ]]; then + curl_args+=(-H "Authorization: Bearer ${GITHUB_TOKEN}") + fi + + http_status=$(curl "${curl_args[@]}" "${url}") + + if [[ "${http_status}" =~ ^2[0-9][0-9]$ ]]; then + cat "${response_file}" + rm -f "${response_file}" + return 0 + else + log::warn "GitHub API request failed (HTTP ${http_status}): ${endpoint}" + rm -f "${response_file}" + return 1 + fi +} + +####################################### +# Build the GCS artifact URL for a specific job run +# Arguments: +# org: GitHub organization (e.g., "redhat-developer") +# repo: Repository name (e.g., "rhdh") +# pr_number: Pull request number +# job_name: CI job name (e.g., "pull-ci-redhat-developer-rhdh-main-e2e-ocp-helm") +# build_id: Prow build ID +# namespace: Test namespace (e.g., "showcase" or "showcase-rbac") +# Outputs: +# Writes the constructed URL to stdout +####################################### +build_previous_run_artifact_url() { + local org="${1}" + local repo="${2}" + local pr_number="${3}" + local job_name="${4}" + local build_id="${5}" + local namespace="${6}" + + local url="${GCS_BASE_URL}/pr-logs/pull/${org}_${repo}/${pr_number}/${job_name}/${build_id}" + url="${url}/artifacts/e2e-ocp-helm/redhat-developer-rhdh-ocp-helm/artifacts/${namespace}/junit-results.xml" + + echo "${url}" +} + +####################################### +# Get the previous build ID for a specific job from GitHub API +# Arguments: +# org: GitHub organization +# repo: Repository name +# pr_number: Pull request number +# target_job: Job name to find (default: RERUN_TARGET_JOB) +# Outputs: +# Writes the build ID to stdout, or empty string if not found +# Returns: +# 0 if build ID found, 1 otherwise +####################################### +get_previous_failed_build_id() { + local org="${1}" + local repo="${2}" + local pr_number="${3}" + local target_job="${4:-${RERUN_TARGET_JOB}}" + + log::info "Fetching check runs for PR #${pr_number} in ${org}/${repo}..." + + # Get the PR's head SHA first + local pr_response + if ! pr_response=$(github_api_request "/repos/${org}/${repo}/pulls/${pr_number}"); then + log::error "Failed to fetch PR information" + return 1 + fi + + local head_sha + head_sha=$(echo "${pr_response}" | jq -r '.head.sha // empty') + + if [[ -z "${head_sha}" ]]; then + log::error "Could not get PR head SHA from response" + return 1 + fi + + log::info "PR head SHA: ${head_sha}" + + # Get check runs for this commit + local check_runs_response + if ! check_runs_response=$(github_api_request "/repos/${org}/${repo}/commits/${head_sha}/check-runs"); then + log::error "Failed to fetch check runs" + return 1 + fi + + # Find the most recent completed check run matching our target job + # Sort by completed_at descending to get the most recent + local details_url + details_url=$(echo "${check_runs_response}" | jq -r \ + --arg job "${target_job}" \ + '[.check_runs[] | select(.name == $job and .conclusion != null)] + | sort_by(.completed_at) | reverse | .[0].details_url // empty') + + if [[ -z "${details_url}" || "${details_url}" == "null" ]]; then + log::warn "No completed check run found for job: ${target_job}" + return 1 + fi + + log::info "Found details URL: ${details_url}" + + # Extract build ID from the details URL using a more precise pattern + # URL format: https://prow.ci.openshift.org/view/gs/test-platform-results/pr-logs/pull/org_repo/pr/job/BUILD_ID + local build_id + build_id=$(echo "${details_url}" | sed -n 's|.*/\([0-9]\{10,\}\)$|\1|p') + + # Fallback to grep if sed didn't match + if [[ -z "${build_id}" ]]; then + build_id=$(echo "${details_url}" | grep -oE '/[0-9]{10,}$' | tr -d '/') + fi + + if [[ -z "${build_id}" ]]; then + log::error "Could not extract build ID from details URL: ${details_url}" + return 1 + fi + + log::success "Found previous build ID: ${build_id}" + echo "${build_id}" +} + +####################################### +# Fetch JUnit results XML from GCS for a specific namespace +# Arguments: +# artifact_url: Full URL to the junit-results.xml file +# output_file: Local path to save the XML file +# Returns: +# 0 if successful, 1 if failed +####################################### +fetch_previous_junit_results() { + local artifact_url="${1}" + local output_file="${2}" + + log::info "Fetching JUnit results from: ${artifact_url}" + + local http_status + http_status=$(curl -sS -w "%{http_code}" -o "${output_file}" "${artifact_url}") + + if [[ "${http_status}" == "200" ]]; then + # Validate the downloaded file is valid XML + if [[ -s "${output_file}" ]] && head -1 "${output_file}" | grep -q ' /dev/null; then + # Extract file paths from testcases with failures, handling multiple attributes properly + local xpath_result + xpath_result=$(xmllint --xpath '//testcase[failure]/@file' "${junit_file}" 2> /dev/null || echo "") + + if [[ -n "${xpath_result}" ]]; then + # Parse file="..." attributes, one per line + echo "${xpath_result}" | grep -oP 'file="\K[^"]+' | sort -u + fi + else + # Fallback: use grep/sed for systems without xmllint + # Match testcase elements that contain a failure child element + grep -zoP ']*file="[^"]*"[^>]*>.*? /dev/null \ + | grep -oP 'file="\K[^"]+' \ + | sort -u + fi + + return 0 +} + +####################################### +# Get the count of failed tests from JUnit XML +# Arguments: +# junit_file: Path to the JUnit XML file +# Outputs: +# Writes the number of failures to stdout +####################################### +get_failed_test_count() { + local junit_file="${1}" + + if [[ ! -f "${junit_file}" ]]; then + echo "0" + return + fi + + # Use xmllint for accurate count if available + if command -v xmllint &> /dev/null; then + local count + count=$(xmllint --xpath 'count(//testcase[failure])' "${junit_file}" 2> /dev/null || echo "0") + echo "${count%.*}" # Remove decimal if present + else + # Fallback: sum failures attributes from all testsuites + local total=0 + while IFS= read -r failures; do + total=$((total + failures)) + done < <(grep -oP 'failures="\K[0-9]+' "${junit_file}" 2> /dev/null) + echo "${total}" + fi + + return 0 +} + +####################################### +# Run only the specified failed tests using Playwright +# Arguments: +# playwright_project: The Playwright project name (e.g., "showcase" or "showcase-rbac") +# test_files: Array of test file paths to run +# Returns: +# Exit code from Playwright +####################################### +run_failed_tests_only() { + local playwright_project="${1}" + shift + local test_files=("$@") + + if [[ ${#test_files[@]} -eq 0 ]]; then + log::warn "No test files specified to run" + return 0 + fi + + log::section "Running Failed Tests Only" + log::info "Project: ${playwright_project}" + log::info "Test files to run:" + for file in "${test_files[@]}"; do + log::info " - ${file}" + done + + cd "${DIR}/../../e2e-tests" || return 1 + + yarn install --immutable > /tmp/yarn.install.log.txt 2>&1 + local install_status=$? + if [[ ${install_status} -ne 0 ]]; then + log::error "Yarn install failed" + cat /tmp/yarn.install.log.txt + return ${install_status} + fi + + yarn playwright install chromium + + Xvfb :99 & + export DISPLAY=:99 + + log::info "Executing Playwright tests..." + yarn playwright test --project="${playwright_project}" "${test_files[@]}" + local result=$? + + pkill Xvfb || true + + return ${result} +} + +####################################### +# Get PR information from environment or git +# Outputs: +# Sets PULL_NUMBER, REPO_OWNER, REPO_NAME environment variables +####################################### +get_pr_info() { + if [[ -n "${PULL_NUMBER:-}" ]]; then + log::info "Using PR number from environment: ${PULL_NUMBER}" + else + if [[ -n "${PULL_REFS:-}" ]]; then + PULL_NUMBER=$(echo "${PULL_REFS}" | grep -oP ':\K[0-9]+' | head -n 1) + export PULL_NUMBER + fi + fi + + export REPO_OWNER="${REPO_OWNER:-redhat-developer}" + export REPO_NAME="${REPO_NAME:-rhdh}" + + log::info "PR Info: ${REPO_OWNER}/${REPO_NAME}#${PULL_NUMBER:-unknown}" + + return 0 +} + +####################################### +# Check if test file exists in the current codebase +# Arguments: +# test_file: Path to the test file (relative to repo root) +# Returns: +# 0 if file exists, 1 otherwise +####################################### +test_file_exists() { + local test_file="${1}" + local full_path="${DIR}/../../${test_file}" + + if [[ -f "${full_path}" ]]; then + return 0 + else + log::warn "Test file no longer exists: ${test_file}" + return 1 + fi +} + +####################################### +# Filter test files to only include those that still exist +# Arguments: +# test_files: Array of test file paths +# Outputs: +# Writes existing test file paths to stdout +####################################### +filter_existing_test_files() { + local test_files=("$@") + local existing_files=() + + for file in "${test_files[@]}"; do + if test_file_exists "${file}"; then + existing_files+=("${file}") + fi + done + + printf '%s\n' "${existing_files[@]}" + + return 0 +}