Skip to content

Commit 0f3670f

Browse files
Merge pull request #375 from gurock/fix/TRCLI-207
TRCLI-207 Fix test run cases deletion issue in update_run_in_plan_entry
2 parents 41c39fb + c6e7592 commit 0f3670f

File tree

3 files changed

+284
-39
lines changed

3 files changed

+284
-39
lines changed

.github/workflows/pr-validation.yml

Lines changed: 32 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -23,30 +23,31 @@ jobs:
2323
PR_BODY="${{ github.event.pull_request.body }}"
2424
2525
# Check if PR title or body contains issue reference
26-
# Accepts: TRCLI-### (JIRA), GIT-### (GitHub), #123 (GitHub), issues/123
27-
if echo "$PR_TITLE $PR_BODY" | grep -qE "TRCLI-[0-9]+|GIT-[0-9]+|#[0-9]+|issues/[0-9]+"; then
28-
echo "issue_found=true" >> $GITHUB_OUTPUT
26+
if echo "$PR_TITLE $PR_BODY" | grep -qE "(TRCLI-[0-9]+|GIT-[0-9]+|#[0-9]+|issues/[0-9]+)"; then
27+
echo "issue_found=true" >> "$GITHUB_OUTPUT"
2928
30-
# Extract the issue key/number
3129
if echo "$PR_TITLE $PR_BODY" | grep -qE "TRCLI-[0-9]+"; then
3230
ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "TRCLI-[0-9]+" | head -1)
33-
echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT
34-
echo "issue_type=JIRA" >> $GITHUB_OUTPUT
31+
echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT"
32+
echo "issue_type=JIRA" >> "$GITHUB_OUTPUT"
33+
3534
elif echo "$PR_TITLE $PR_BODY" | grep -qE "GIT-[0-9]+"; then
3635
ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "GIT-[0-9]+" | head -1)
37-
echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT
38-
echo "issue_type=GitHub" >> $GITHUB_OUTPUT
36+
echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT"
37+
echo "issue_type=GitHub" >> "$GITHUB_OUTPUT"
38+
3939
elif echo "$PR_TITLE $PR_BODY" | grep -qE "#[0-9]+"; then
4040
ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "#[0-9]+" | head -1)
41-
echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT
42-
echo "issue_type=GitHub" >> $GITHUB_OUTPUT
41+
echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT"
42+
echo "issue_type=GitHub" >> "$GITHUB_OUTPUT"
43+
4344
elif echo "$PR_BODY" | grep -qE "issues/[0-9]+"; then
4445
ISSUE_KEY=$(echo "$PR_BODY" | grep -oE "issues/[0-9]+" | head -1)
45-
echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT
46-
echo "issue_type=GitHub" >> $GITHUB_OUTPUT
46+
echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT"
47+
echo "issue_type=GitHub" >> "$GITHUB_OUTPUT"
4748
fi
4849
else
49-
echo "issue_found=false" >> $GITHUB_OUTPUT
50+
echo "issue_found=false" >> "$GITHUB_OUTPUT"
5051
fi
5152
5253
- name: Comment on PR if issue reference missing
@@ -61,42 +62,38 @@ jobs:
6162
repo: context.repo.repo,
6263
body: `## ⚠️ Missing Issue Reference
6364
64-
This PR does not reference an issue. Please include a reference in either:
65+
This PR does not reference an issue. Please include one.
6566
66-
**JIRA tickets:**
67-
- PR title: "feat(api): TRCLI-123 Add new endpoint"
68-
- PR description: "Resolves TRCLI-123"
67+
**JIRA example:**
68+
- TRCLI-123
6969
70-
**GitHub issues:**
71-
- PR title: "feat(api): GIT-123 Add new endpoint"
72-
- PR description: "Resolves GIT-123" or "Fixes #123"
73-
- Or link to the GitHub issue
74-
75-
This helps with tracking and project management. Thank you!`
70+
**GitHub examples:**
71+
- GIT-123
72+
- Fixes #123
73+
- issues/123`
7674
})
7775
7876
- name: Check PR Description Completeness
7977
id: check_description
8078
run: |
8179
PR_BODY="${{ github.event.pull_request.body }}"
8280
83-
# Check for required sections
8481
if echo "$PR_BODY" | grep -q "Issue being resolved"; then
85-
echo "has_issue=true" >> $GITHUB_OUTPUT
82+
echo "has_issue=true" >> "$GITHUB_OUTPUT"
8683
else
87-
echo "has_issue=false" >> $GITHUB_OUTPUT
84+
echo "has_issue=false" >> "$GITHUB_OUTPUT"
8885
fi
8986
9087
if echo "$PR_BODY" | grep -q "Solution description"; then
91-
echo "has_solution=true" >> $GITHUB_OUTPUT
88+
echo "has_solution=true" >> "$GITHUB_OUTPUT"
9289
else
93-
echo "has_solution=false" >> $GITHUB_OUTPUT
90+
echo "has_solution=false" >> "$GITHUB_OUTPUT"
9491
fi
9592
9693
if echo "$PR_BODY" | grep -q "Steps to test"; then
97-
echo "has_test_steps=true" >> $GITHUB_OUTPUT
94+
echo "has_test_steps=true" >> "$GITHUB_OUTPUT"
9895
else
99-
echo "has_test_steps=false" >> $GITHUB_OUTPUT
96+
echo "has_test_steps=false" >> "$GITHUB_OUTPUT"
10097
fi
10198
10299
- name: Generate PR Validation Summary
@@ -107,6 +104,7 @@ jobs:
107104
const issueFound = '${{ steps.check_issue.outputs.issue_found }}' === 'true';
108105
const issueKey = '${{ steps.check_issue.outputs.issue_key }}';
109106
const issueType = '${{ steps.check_issue.outputs.issue_type }}';
107+
110108
const hasIssue = '${{ steps.check_description.outputs.has_issue }}' === 'true';
111109
const hasSolution = '${{ steps.check_description.outputs.has_solution }}' === 'true';
112110
const hasTestSteps = '${{ steps.check_description.outputs.has_test_steps }}' === 'true';
@@ -124,9 +122,9 @@ jobs:
124122
| Solution Description | ${hasSolution ? '✅ Present' : '⚠️ Missing'} |
125123
| Test Steps | ${hasTestSteps ? '✅ Present' : '⚠️ Missing'} |
126124
127-
${issueFound && hasSolution && hasTestSteps ? '✅ All checks passed!' : '⚠️ Some optional sections are missing. Consider adding them for better review context.'}
125+
${issueFound && hasSolution && hasTestSteps
126+
? '✅ All checks passed!'
127+
: '⚠️ Some optional sections are missing.'}
128128
`;
129129
130-
await core.summary
131-
.addRaw(summary)
132-
.write();
130+
await core.summary.addRaw(summary).write();

tests/test_api_request_handler.py

Lines changed: 236 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -827,6 +827,242 @@ def test_delete_run(self, api_request_handler_verify: ApiRequestHandler, request
827827
resources_added, error = api_request_handler_verify.delete_run(run_id)
828828
assert error == "", "There should be no error in verification."
829829

830+
@pytest.mark.api_handler
831+
def test_update_run_with_include_all_false_standalone(self, api_request_handler: ApiRequestHandler, requests_mock):
832+
"""Test update_run for standalone run with include_all=false"""
833+
run_id = 100
834+
run_name = "Updated Test Run"
835+
836+
# Mock get_run response - standalone run (no plan_id), include_all=false
837+
get_run_response = {
838+
"id": run_id,
839+
"name": "Original Run",
840+
"description": "Original description",
841+
"refs": "REF-1",
842+
"include_all": False,
843+
"plan_id": None,
844+
"config_ids": [],
845+
}
846+
847+
# Mock get_tests response - existing cases in run
848+
get_tests_response = {
849+
"offset": 0,
850+
"limit": 250,
851+
"size": 2,
852+
"_links": {"next": None, "prev": None},
853+
"tests": [{"id": 1, "case_id": 1, "status_id": 1}, {"id": 2, "case_id": 2, "status_id": 1}],
854+
}
855+
856+
# Mock update_run response
857+
update_run_response = {"id": run_id, "name": run_name}
858+
859+
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
860+
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
861+
requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response)
862+
863+
# Execute update_run
864+
run_data, error = api_request_handler.update_run(run_id, run_name)
865+
866+
# Assertions
867+
assert error == "", "No error should occur"
868+
assert run_data["id"] == run_id, "Run ID should match"
869+
870+
# Verify the payload sent to update_run
871+
request_history = requests_mock.request_history
872+
update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0]
873+
payload = update_request.json()
874+
875+
assert payload["include_all"] == False, "include_all should be False"
876+
assert "case_ids" in payload, "case_ids should be present"
877+
# Should contain union of existing (1, 2) and report cases
878+
assert set(payload["case_ids"]) >= {1, 2}, "Should include existing case IDs"
879+
880+
@pytest.mark.api_handler
881+
def test_update_run_with_include_all_false_plan_with_config(
882+
self, api_request_handler: ApiRequestHandler, requests_mock
883+
):
884+
"""Test update_run for run in plan with config and include_all=false (the bug scenario)"""
885+
run_id = 200
886+
run_name = "Updated Test Run in Plan"
887+
888+
# Mock get_run response - run in plan with config, include_all=false
889+
get_run_response = {
890+
"id": run_id,
891+
"name": "Original Run",
892+
"description": "Original description",
893+
"refs": "REF-1",
894+
"include_all": False,
895+
"plan_id": 10,
896+
"config_ids": [5, 6], # Has configs - will use update_run_in_plan_entry
897+
}
898+
899+
# Mock get_tests response - existing cases
900+
get_tests_response = {
901+
"offset": 0,
902+
"limit": 250,
903+
"size": 3,
904+
"_links": {"next": None, "prev": None},
905+
"tests": [
906+
{"id": 1, "case_id": 188, "status_id": 1},
907+
{"id": 2, "case_id": 180, "status_id": 1},
908+
{"id": 3, "case_id": 191, "status_id": 1},
909+
],
910+
}
911+
912+
# Mock update_run_in_plan_entry response
913+
update_run_response = {"id": run_id, "name": run_name}
914+
915+
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
916+
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
917+
requests_mock.post(create_url(f"update_run_in_plan_entry/{run_id}"), json=update_run_response)
918+
919+
# Execute update_run
920+
run_data, error = api_request_handler.update_run(run_id, run_name)
921+
922+
# Assertions
923+
assert error == "", "No error should occur"
924+
assert run_data["id"] == run_id, "Run ID should match"
925+
926+
# Verify the payload sent to update_run_in_plan_entry
927+
request_history = requests_mock.request_history
928+
update_request = [r for r in request_history if "update_run_in_plan_entry" in r.url][0]
929+
payload = update_request.json()
930+
931+
# THIS IS THE CRITICAL FIX - must include include_all=False
932+
assert payload["include_all"] == False, "include_all must be False (fixes the bug)"
933+
assert "case_ids" in payload, "case_ids should be present"
934+
# Should contain union of existing (188, 180, 191) and report cases
935+
assert set(payload["case_ids"]) >= {188, 180, 191}, "Should preserve existing case IDs"
936+
937+
@pytest.mark.api_handler
938+
def test_update_run_with_include_all_true_preserves_setting(
939+
self, api_request_handler: ApiRequestHandler, requests_mock
940+
):
941+
"""Test update_run preserves include_all=true and doesn't send case_ids"""
942+
run_id = 300
943+
run_name = "Updated Run with Include All"
944+
945+
# Mock get_run response - include_all=true
946+
get_run_response = {
947+
"id": run_id,
948+
"name": "Original Run",
949+
"description": "Original description",
950+
"refs": "REF-1",
951+
"include_all": True, # Run includes all cases
952+
"plan_id": None,
953+
"config_ids": [],
954+
}
955+
956+
# Mock update_run response
957+
update_run_response = {"id": run_id, "name": run_name, "include_all": True}
958+
959+
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
960+
requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response)
961+
962+
# Execute update_run
963+
run_data, error = api_request_handler.update_run(run_id, run_name)
964+
965+
# Assertions
966+
assert error == "", "No error should occur"
967+
assert run_data["include_all"] == True, "include_all should be preserved"
968+
969+
# Verify the payload sent to update_run
970+
request_history = requests_mock.request_history
971+
update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0]
972+
payload = update_request.json()
973+
974+
assert payload["include_all"] == True, "include_all should be True"
975+
assert "case_ids" not in payload, "case_ids should NOT be present when include_all=True"
976+
977+
@pytest.mark.api_handler
978+
def test_update_run_handles_get_tests_error(self, api_request_handler: ApiRequestHandler, requests_mock):
979+
"""Test update_run handles errors from get_tests gracefully"""
980+
run_id = 400
981+
run_name = "Test Run"
982+
983+
# Mock get_run response - include_all=false
984+
get_run_response = {
985+
"id": run_id,
986+
"name": "Original Run",
987+
"description": "Original description",
988+
"refs": "REF-1",
989+
"include_all": False,
990+
"plan_id": None,
991+
"config_ids": [],
992+
}
993+
994+
# Mock get_tests to return error (403 Forbidden, for example)
995+
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
996+
requests_mock.get(create_url(f"get_tests/{run_id}"), status_code=403, json={"error": "Access denied"})
997+
998+
# Execute update_run - should fail gracefully
999+
run_data, error = api_request_handler.update_run(run_id, run_name)
1000+
1001+
# Assertions
1002+
assert run_data is None, "run_data should be None on error"
1003+
assert error is not None, "Error message should be present"
1004+
assert "Failed to get tests in run" in error, "Error should indicate get_tests failure"
1005+
1006+
@pytest.mark.api_handler
1007+
def test_update_run_with_include_all_false_plan_without_config(
1008+
self, api_request_handler: ApiRequestHandler, requests_mock
1009+
):
1010+
"""Test update_run for run in plan without config uses update_plan_entry"""
1011+
run_id = 500
1012+
run_name = "Updated Test Run in Plan No Config"
1013+
plan_id = 20
1014+
entry_id = "abc-123"
1015+
1016+
# Mock get_run response - run in plan without config
1017+
get_run_response = {
1018+
"id": run_id,
1019+
"name": "Original Run",
1020+
"description": "Original description",
1021+
"refs": "REF-1",
1022+
"include_all": False,
1023+
"plan_id": plan_id,
1024+
"config_ids": [], # No configs - will use update_plan_entry
1025+
}
1026+
1027+
# Mock get_tests response
1028+
get_tests_response = {
1029+
"offset": 0,
1030+
"limit": 250,
1031+
"size": 1,
1032+
"_links": {"next": None, "prev": None},
1033+
"tests": [{"id": 1, "case_id": 50, "status_id": 1}],
1034+
}
1035+
1036+
# Mock get_plan response
1037+
get_plan_response = {
1038+
"id": plan_id,
1039+
"entries": [{"id": entry_id, "runs": [{"id": run_id, "entry_id": entry_id}]}],
1040+
}
1041+
1042+
# Mock update_plan_entry response
1043+
update_plan_response = {"id": run_id, "name": run_name}
1044+
1045+
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
1046+
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
1047+
requests_mock.get(create_url(f"get_plan/{plan_id}"), json=get_plan_response)
1048+
requests_mock.post(create_url(f"update_plan_entry/{plan_id}/{entry_id}"), json=update_plan_response)
1049+
1050+
# Execute update_run
1051+
run_data, error = api_request_handler.update_run(run_id, run_name)
1052+
1053+
# Assertions
1054+
assert error == "", "No error should occur"
1055+
assert run_data["id"] == run_id, "Run ID should match"
1056+
1057+
# Verify update_plan_entry was called with correct payload
1058+
request_history = requests_mock.request_history
1059+
update_request = [r for r in request_history if f"update_plan_entry/{plan_id}/{entry_id}" in r.url][0]
1060+
payload = update_request.json()
1061+
1062+
assert payload["include_all"] == False, "include_all should be False"
1063+
assert "case_ids" in payload, "case_ids should be present"
1064+
assert 50 in payload["case_ids"], "Should include existing case ID"
1065+
8301066
@pytest.mark.api_handler
8311067
def test_upload_attachments_413_error(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path):
8321068
"""Test that 413 errors (file too large) are properly reported."""

trcli/api/api_request_handler.py

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -539,11 +539,22 @@ def update_run(
539539
else:
540540
add_run_data["refs"] = existing_refs # Keep existing refs if none provided
541541

542-
run_tests, error_message = self.__get_all_tests_in_run(run_id)
543-
run_case_ids = [test["case_id"] for test in run_tests]
544-
report_case_ids = add_run_data["case_ids"]
545-
joint_case_ids = list(set(report_case_ids + run_case_ids))
546-
add_run_data["case_ids"] = joint_case_ids
542+
existing_include_all = run_response.response_text.get("include_all", False)
543+
add_run_data["include_all"] = existing_include_all
544+
545+
if not existing_include_all:
546+
# Only manage explicit case_ids when include_all=False
547+
run_tests, error_message = self.__get_all_tests_in_run(run_id)
548+
if error_message:
549+
return None, f"Failed to get tests in run: {error_message}"
550+
run_case_ids = [test["case_id"] for test in run_tests]
551+
report_case_ids = add_run_data["case_ids"]
552+
joint_case_ids = list(set(report_case_ids + run_case_ids))
553+
add_run_data["case_ids"] = joint_case_ids
554+
else:
555+
# include_all=True: TestRail includes all suite cases automatically
556+
# Do NOT send case_ids array (TestRail ignores it anyway)
557+
add_run_data.pop("case_ids", None)
547558

548559
plan_id = run_response.response_text["plan_id"]
549560
config_ids = run_response.response_text["config_ids"]

0 commit comments

Comments
 (0)