Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 84 additions & 15 deletions .semaphore/end-to-end/pipelines/certification.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ name: banzai-calico Openshift Certification

agent:
machine:
type: f1-standard-2
type: c1-standard-1
os_image: ubuntu2204

execution_time_limit:
Expand All @@ -31,34 +31,85 @@ global_job_config:
value: "ocp-cert"
- name: STERN_CHECK
value: "DISABLED"
- name: PROVISIONER
value: aws-openshift
- name: INSTALLER
value: operator
- name: DATAPLANE
value: "CalicoIptables"

blocks:
- name: Openshift OCP
- name: Standalone cluster tests
dependencies: []
task:
agent:
machine:
type: f1-standard-2
os_image: ubuntu2204
jobs:
- name: CNI & Virtualization tests
- name: Standalone cluster tests (cert)
execution_time_limit:
hours: 6
commands:
- ~/calico/.semaphore/end-to-end/scripts/body_standard.sh
matrix:
- env_var: OPENSHIFT_VERSION
values:
- "4.16.38"
- "4.17.25"
- "4.18.9"
values: ["4.20.1", "4.19.17", "4.18.27"]
env_vars:
- name: USE_HASH_RELEASE
value: "false"
- name: PROVISIONER
value: aws-openshift
- name: INSTALLER
value: operator
- name: OPENSHIFT_CLUSTER_TYPE
value: "Standalone"
- name: TEST_TYPE
value: "ocp-cert"
- name: ENABLE_OCP_VIRT
value: "true"
- name: DATAPLANE
value: "CalicoIptables"

- name: HCP setup hosting
dependencies: []
task:
Comment on lines +66 to +68
Copy link

Copilot AI Dec 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] The agent configuration is missing for the "HCP setup hosting" job. While this may use defaults, for consistency with the other blocks ("Standalone cluster tests" at lines 45-48 and "HCP hosted setup and tests" at lines 89-92), consider explicitly specifying the agent machine type and OS image.

Copilot uses AI. Check for mistakes.
jobs:
# only define one provisioner per job. if you wish to create multiple hosting clusters,
# use separate jobs per hosting cluster and update the `HOSTING_CLUSTER` value in hosted block
# do not forget to add the `HOSTING_CLUSTER` value to the "teardown hosting" block
- name: HCP setup hosting
execution_time_limit:
hours: 3
commands:
- ~/calico/.semaphore/end-to-end/scripts/body_standard.sh
env_vars:
- name: OPENSHIFT_VERSION
value: "4.20.1"
- name: OPENSHIFT_CLUSTER_TYPE
value: "HCP-hosting"
- name: HOSTING_CLUSTER
value: "hcp-shared-hosting"
- name: HCP_STAGE
value: "setup-hosting"

- name: HCP hosted setup and tests
dependencies: ["HCP setup hosting"]
task:
agent:
machine:
type: f1-standard-2
os_image: ubuntu2204
jobs:
- name: HCP tests - hosted (cert)
execution_time_limit:
hours: 6
commands:
- ~/calico/.semaphore/end-to-end/scripts/body_standard.sh
matrix:
- env_var: OPENSHIFT_VERSION
values: ["4.20.1", "4.19.17", "4.18.27"]
env_vars:
- name: HCP_STAGE
value: "hosted"
- name: OPENSHIFT_CLUSTER_TYPE
value: "HCP-hosted"
- name: HOSTING_CLUSTER
value: "hcp-shared-hosting"
- name: TEST_TYPE
value: "ocp-cert"

promotions:
- name: Cleanup jobs
Expand All @@ -68,7 +119,25 @@ promotions:

after_pipeline:
task:
secrets:
- name: banzai-secrets
jobs:
- name: Reports
commands:
- test-results gen-pipeline-report --force
- name: Destroy Hosting Clusters
execution_time_limit:
hours: 3
commands:
- checkout
- source ~/calico/.semaphore/end-to-end/scripts/global_prologue.sh
- unset CLUSTER_NAME
- ~/calico/.semaphore/end-to-end/scripts/body_standard.sh
- ~/calico/.semaphore/end-to-end/scripts/global_epilogue.sh
env_vars:
- name: HCP_STAGE
value: "destroy-hosting"
matrix:
- env_var: HOSTING_CLUSTER
values:
- "hcp-shared-hosting"
117 changes: 73 additions & 44 deletions .semaphore/end-to-end/scripts/body_standard.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,59 +8,88 @@ else
VERBOSE=""
fi

cd "${BZ_HOME}"
if [[ "${HCP_ENABLED}" == "true" ]]; then
echo "[INFO] starting hcp job..."

echo "[INFO] starting bz provision..."
bz provision $VERBOSE | tee >(gzip --stdout > ${BZ_LOGS_DIR}/provision.log.gz)
echo "[INFO] starting hcp provision..."
hcp-provision.sh |& tee ${BZ_LOGS_DIR}/provision.log

cache delete $SEMAPHORE_JOB_ID
cache store ${SEMAPHORE_JOB_ID} ${BZ_HOME}
cache delete ${SEMAPHORE_JOB_ID}
cache store ${SEMAPHORE_JOB_ID} ${BZ_HOME}

echo "[INFO] starting bz install..."
bz install $VERBOSE | tee >(gzip --stdout > ${BZ_LOGS_DIR}/install.log.gz)
echo "[INFO] Test logs will be available here after the run: ${SEMAPHORE_ORGANIZATION_URL}/artifacts/jobs/${SEMAPHORE_JOB_ID}?path=semaphore%2Flogs"
echo "[INFO] Alternatively, you can view logs while job is running using 'sem attach ${SEMAPHORE_JOB_ID}' and then 'tail -f ${BZ_LOGS_DIR}/${TEST_TYPE}-tests.log'"

# Put the bin dir into the PATH
export PATH=$PATH:${BZ_LOCAL_DIR}/bin
echo "[INFO] starting hcp testing..."
hcp-test.sh |& tee ${BZ_LOGS_DIR}/${TEST_TYPE}-tests.log

if [[ "${ENABLE_EXTERNAL_NODE}" == "true" ]]; then
export EXT_USER=ubuntu
EXT_IP=$(cat "${BZ_LOCAL_DIR}"/external_ip)
export EXT_IP
export EXT_KEY=${BZ_LOCAL_DIR}/external_key
export K8S_E2E_DOCKER_EXTRA_FLAGS="-v $EXT_KEY:/key --env EXT_USER --env EXT_KEY=/key --env EXT_IP $K8S_E2E_DOCKER_EXTRA_FLAGS"
echo "EXT_USER=ubuntu EXT_IP=$EXT_IP, EXT_KEY=$EXT_KEY"
echo "K8S_E2E_DOCKER_EXTRA_FLAGS=$K8S_E2E_DOCKER_EXTRA_FLAGS"
fi
else
echo "[INFO] starting job..."
echo "[INFO] BZ_HOME=${BZ_HOME}"

if [ -n "${IPAM_TEST_POOL_SUBNET}" ]; then
export K8S_E2E_DOCKER_EXTRA_FLAGS="$K8S_E2E_DOCKER_EXTRA_FLAGS --env IPAM_TEST_POOL_SUBNET"
echo "IPAM_TEST_POOL_SUBNET=$IPAM_TEST_POOL_SUBNET"
fi
cd "${BZ_HOME}"
if [[ "${HCP_STAGE}" == "hosting" || "${HCP_STAGE}" == "destroy-hosting" ]]; then
: # Skip provisioning for hosting stages as cluster already exists
else
echo "[INFO] starting bz provision..."
bz provision $VERBOSE |& tee >(gzip --stdout > ${BZ_LOGS_DIR}/provision.log.gz)

if [ "${FAILSAFE_443}" == "true" ]; then
KUBECONFIG=${BZ_LOCAL_DIR}/kubeconfig kubectl patch felixconfiguration default --type=merge -p '{"spec":{"failsafeOutboundHostPorts": [{"protocol": "udp", "port":53},{"protocol": "udp", "port":67},{"protocol": "tcp", "port":179},{"protocol": "tcp", "port":2379},{"protocol": "tcp", "port":2380},{"protocol": "tcp", "port":5473},{"protocol": "tcp", "port":443},{"protocol": "tcp", "port":6666},{"protocol": "tcp", "port":6667}]}}'
fi
cache delete $SEMAPHORE_JOB_ID
cache store ${SEMAPHORE_JOB_ID} ${BZ_HOME}

# Perform the operator migration following the instructions here:
# https://projectcalico.docs.tigera.io/maintenance/operator-migration
if [[ -n "$OPERATOR_MIGRATE" ]]; then
${HOME}/${SEMAPHORE_GIT_DIR}/.semaphore/end-to-end/scripts/test_scripts/operator_migrate.sh
fi
echo "[INFO] starting bz install..."
bz install $VERBOSE |& tee >(gzip --stdout > ${BZ_LOGS_DIR}/install.log.gz)

# Perform the AKS migration following the instructions here:
# https://docs.tigera.io/calico/latest/getting-started/kubernetes/managed-public-cloud/aks-migrate
if [[ -n "$DESIRED_POLICY" ]]; then
echo "[INFO] starting AKS migration..."
bz addons run aks-migrate:setup
fi
if [[ "${HCP_STAGE}" == "setup-hosting" ]]; then
echo "[INFO] HCP_STAGE=${HCP_STAGE}, storing hosting cluster profile in cache"
cache store ${SEMAPHORE_WORKFLOW_ID}-hosting-${HOSTING_CLUSTER} ${BZ_HOME}
fi
fi

if [[ -n "$UPLEVEL_RELEASE_STREAM" ]]; then
echo "[INFO] starting bz upgrade..."
bz upgrade $VERBOSE | tee >(gzip --stdout > ${BZ_LOGS_DIR}/upgrade.log.gz)
fi
# Put the bin dir into the PATH
export PATH=$PATH:${BZ_LOCAL_DIR}/bin

if [[ "${ENABLE_EXTERNAL_NODE}" == "true" ]]; then
export EXT_USER=ubuntu
EXT_IP=$(cat "${BZ_LOCAL_DIR}"/external_ip)
export EXT_IP
export EXT_KEY=${BZ_LOCAL_DIR}/external_key
export K8S_E2E_DOCKER_EXTRA_FLAGS="-v $EXT_KEY:/key --env EXT_USER --env EXT_KEY=/key --env EXT_IP $K8S_E2E_DOCKER_EXTRA_FLAGS"
echo "EXT_USER=ubuntu EXT_IP=$EXT_IP, EXT_KEY=$EXT_KEY"
echo "K8S_E2E_DOCKER_EXTRA_FLAGS=$K8S_E2E_DOCKER_EXTRA_FLAGS"
fi

if [ -n "${IPAM_TEST_POOL_SUBNET}" ]; then
export K8S_E2E_DOCKER_EXTRA_FLAGS="$K8S_E2E_DOCKER_EXTRA_FLAGS --env IPAM_TEST_POOL_SUBNET"
echo "IPAM_TEST_POOL_SUBNET=$IPAM_TEST_POOL_SUBNET"
fi

echo "[INFO] Test logs will be available here after the run: ${SEMAPHORE_ORGANIZATION_URL}/artifacts/jobs/${SEMAPHORE_JOB_ID}?path=semaphore%2Flogs"
echo "[INFO] Alternatively, you can view logs while job is running using 'sem attach ${SEMAPHORE_JOB_ID}' and then 'tail -f ${BZ_LOGS_DIR}/${TEST_TYPE}-tests.log'"
if [ "${FAILSAFE_443}" == "true" ]; then
KUBECONFIG=${BZ_LOCAL_DIR}/kubeconfig kubectl patch felixconfiguration default --type=merge -p '{"spec":{"failsafeOutboundHostPorts": [{"protocol": "udp", "port":53},{"protocol": "udp", "port":67},{"protocol": "tcp", "port":179},{"protocol": "tcp", "port":2379},{"protocol": "tcp", "port":2380},{"protocol": "tcp", "port":5473},{"protocol": "tcp", "port":443},{"protocol": "tcp", "port":6666},{"protocol": "tcp", "port":6667}]}}'
fi

echo "[INFO] starting bz testing..."
bz tests $VERBOSE | tee >(gzip --stdout > ${BZ_LOGS_DIR}/${TEST_TYPE}-tests.log.gz)
# Perform the operator migration following the instructions here:
# https://projectcalico.docs.tigera.io/maintenance/operator-migration
if [[ -n "$OPERATOR_MIGRATE" ]]; then
${HOME}/${SEMAPHORE_GIT_DIR}/.semaphore/end-to-end/scripts/test_scripts/operator_migrate.sh |& tee >(gzip --stdout > ${BZ_LOGS_DIR}/operator_migrate.log.gz)
fi
# Perform the AKS migration following the instructions here:
# https://docs.tigera.io/calico/latest/getting-started/kubernetes/managed-public-cloud/aks-migrate
if [[ -n "$DESIRED_POLICY" ]]; then
echo "[INFO] starting AKS migration..."
bz addons run aks-migrate:setup
fi

if [[ -n "$UPLEVEL_RELEASE_STREAM" ]]; then
echo "[INFO] starting bz upgrade..."
bz upgrade $VERBOSE | tee >(gzip --stdout > ${BZ_LOGS_DIR}/upgrade.log.gz)
fi

if [[ ${MCM_STAGE:-} != *-mgmt* ]] && [[ ${HCP_STAGE:-} != *-hosting* ]]; then
echo "[INFO] Test logs will be available here after the run: ${SEMAPHORE_ORGANIZATION_URL}/artifacts/jobs/${SEMAPHORE_JOB_ID}?path=semaphore%2Flogs"
echo "[INFO] Alternatively, you can view logs while job is running using 'sem attach ${SEMAPHORE_JOB_ID}' and then 'tail -f ${BZ_LOGS_DIR}/${TEST_TYPE}-tests.log'"

echo "[INFO] starting bz testing..."
bz tests $VERBOSE |& tee >(gzip --stdout > ${BZ_LOGS_DIR}/${TEST_TYPE}-tests.log.gz)
fi
fi
Loading