1+ # .github/workflows/test_endpoint.yml
2+ name : Endpoint Validation Test
3+
4+ on :
5+ workflow_call :
6+ inputs :
7+ endpoint :
8+ description : ' API Endpoint URL to test'
9+ required : true
10+ type : string
11+ eval_function :
12+ description : ' Evaluation Function Name'
13+ required : true
14+ type : string
15+ sql_limit :
16+ description : ' Max number of records to fetch'
17+ required : false
18+ type : number
19+ default : 1000
20+ secrets :
21+ # Explicitly declare the secrets the reusable workflow needs
22+ DB_USER :
23+ required : true
24+ DB_PASSWORD :
25+ required : true
26+ DB_HOST :
27+ required : true
28+ DB_PORT :
29+ required : true
30+ DB_NAME :
31+ required : true
32+
33+ jobs :
34+ run_test :
35+ runs-on : ubuntu-latest
36+
37+ steps :
38+ - name : Checkout Code
39+ uses : actions/checkout@v4
40+
41+ - name : Set up Python Environment
42+ uses : actions/setup-python@v5
43+ with :
44+ python-version : ' 3.11'
45+
46+ - name : Install Dependencies
47+ run : pip install -r requirements.txt
48+
49+ - name : Run Test Script and Capture Results
50+ id : run_script
51+ env :
52+ # --- Database Secrets (Loaded from GitHub Secrets) ---
53+ DB_USER : ${{ secrets.DB_USER }}
54+ DB_PASSWORD : ${{ secrets.DB_PASSWORD }}
55+ DB_HOST : ${{ secrets.DB_HOST }}
56+ DB_PORT : ${{ secrets.DB_PORT }}
57+ DB_NAME : ${{ secrets.DB_NAME }}
58+
59+ # --- Configuration ---
60+ LOG_LEVEL : DEBUG
61+ SQL_LIMIT : ${{ inputs.sql_limit }}
62+
63+ run : |
64+ # 1. Prepare event payload as JSON for the lambda_handler
65+ # Removed recipient_email from payload
66+ EVENT_PAYLOAD='{
67+ "endpoint": "${{ inputs.endpoint }}",
68+ "eval_function_name": "${{ inputs.eval_function }}",
69+ "grade_params_json": "",
70+ "sql_limit": ${{ inputs.sql_limit }}
71+ }'
72+
73+ # 2. Execute script and pipe the final JSON summary to a file
74+ python3 test_endpoint.py <<< "$EVENT_PAYLOAD" > report_data.json
75+
76+ # 3. Read the output file to get the JSON data
77+ REPORT_DATA=$(cat report_data.json)
78+
79+ # 4. Extract key metrics and set them as step outputs for later steps
80+ echo "error_count=$(echo $REPORT_DATA | jq -r '.number_of_errors')" >> $GITHUB_OUTPUT
81+ echo "csv_filename=$(echo $REPORT_DATA | jq -r '.csv_filename')" >> $GITHUB_OUTPUT
82+
83+ # 5. Fail the step if errors were found (optional but good practice)
84+ ERROR_COUNT=$(echo $REPORT_DATA | jq -r '.number_of_errors')
85+ if [ "$ERROR_COUNT" -gt 0 ]; then
86+ echo "::error file=test_endpoint.py::Test completed with $ERROR_COUNT errors."
87+ fi
88+
89+ - name : 📊 Create Job Summary Report
90+ run : |
91+ REPORT_DATA=$(cat report_data.json)
92+ PASSES=$(echo $REPORT_DATA | jq -r '.pass_count')
93+ TOTAL=$(echo $REPORT_DATA | jq -r '.total_count')
94+ ERRORS=$(echo $REPORT_DATA | jq -r '.number_of_errors')
95+ PASS_RATE=$(echo "scale=2; $PASSES / $TOTAL * 100" | bc -l)
96+
97+ STATUS_EMOJI="✅"
98+ if [ "$ERRORS" -gt 0 ]; then
99+ STATUS_EMOJI="❌"
100+ fi
101+
102+ # Write the markdown report to the special Job Summary file
103+ {
104+ echo "## ${STATUS_EMOJI} Endpoint Validation Report"
105+ echo "---"
106+ echo "**Endpoint:** ${{ inputs.endpoint }}"
107+ echo "**Evaluation Function:** ${{ inputs.eval_function }}"
108+ echo ""
109+ echo "| Metric | Value |"
110+ echo "| :--- | :--- |"
111+ echo "| **Total Tests** | ${TOTAL} |"
112+ echo "| **Passed** | ${PASSES} |"
113+ echo "| **Failed** | **${ERRORS}** |"
114+ echo "| **Pass Rate** | ${PASS_RATE}% |"
115+ echo "---"
116+
117+ if [ "$ERRORS" -gt 0 ]; then
118+ echo "⚠ **${ERRORS} Mismatches/Errors Found.** Detailed report attached below."
119+ else
120+ echo "🎉 All tests passed successfully!"
121+ fi
122+
123+ } >> $GITHUB_STEP_SUMMARY
124+ shell : bash
125+
126+ - name : 📦 Upload Error CSV Artifact
127+ if : steps.run_script.outputs.csv_filename != ''
128+ uses : actions/upload-artifact@v4
129+ with :
130+ name : ${{ inputs.eval_function }}_error_report
131+ path : ${{ steps.run_script.outputs.csv_filename }}
132+ retention-days : 7
0 commit comments