From d3739b3425a621c519091a425ab44d26128ef5f8 Mon Sep 17 00:00:00 2001 From: graysurf <10785178+graysurf@users.noreply.github.com> Date: Wed, 25 Feb 2026 18:51:35 +0800 Subject: [PATCH] feat(skills): migrate issue delivery flows to plan-issue binaries --- .../plan-issue-cli-skill-mapping.md | 38 + docs/plans/duck-issue-loop-test-plan.md | 10 +- .../plan-issue-cli-skill-migration-plan.md | 283 ++ docs/runbooks/skills/TOOLING_INDEX_V2.md | 16 +- .../automation/issue-delivery-loop/SKILL.md | 84 +- .../scripts/manage_issue_delivery_loop.sh | 1178 ------- .../test_automation_issue_delivery_loop.py | 69 +- .../plan-issue-delivery-loop/SKILL.md | 146 +- .../scripts/plan-issue-delivery-loop.sh | 2911 ----------------- ...est_automation_plan_issue_delivery_loop.py | 117 +- .../workflows/issue/issue-pr-review/SKILL.md | 2 +- .../scripts/manage_issue_pr_review.sh | 103 +- .../test_workflows_issue_issue_pr_review.py | 14 + .../issue/issue-subagent-pr/SKILL.md | 121 +- .../scripts/manage_issue_subagent_pr.sh | 1321 -------- .../test_workflows_issue_issue_subagent_pr.py | 47 +- .../manage_issue_delivery_loop.sh.json | 94 - .../scripts/manage_issue_pr_review.sh.json | 2 +- .../scripts/manage_issue_subagent_pr.sh.json | 160 - 19 files changed, 701 insertions(+), 6015 deletions(-) create mode 100644 docs/migrations/plan-issue-cli-skill-mapping.md create mode 100644 docs/plans/plan-issue-cli-skill-migration-plan.md delete mode 100755 skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh delete mode 100755 skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh delete mode 100755 skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh delete mode 100644 tests/script_specs/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh.json delete mode 100644 tests/script_specs/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh.json diff --git a/docs/migrations/plan-issue-cli-skill-mapping.md b/docs/migrations/plan-issue-cli-skill-mapping.md new file mode 100644 index 00000000..ae67a032 --- /dev/null +++ b/docs/migrations/plan-issue-cli-skill-mapping.md @@ -0,0 +1,38 @@ +# Plan-Issue CLI Skill Mapping + +This matrix maps the legacy `plan-issue-delivery-loop.sh` command surface to Rust CLI entrypoints. + +| Legacy Command | New Command | Scope | +| --- | --- | --- | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh start-plan` | `plan-issue start-plan` | Live GitHub-backed plan bootstrap (`1 plan = 1 issue`). | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh start-sprint` | `plan-issue start-sprint` | Live sprint kickoff and task-row sync. | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh ready-sprint` | `plan-issue ready-sprint` | Live sprint-ready review request comment. | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh accept-sprint` | `plan-issue accept-sprint` | Live sprint acceptance gate (merged PR check + `Status=done` sync). | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh ready-plan` | `plan-issue ready-plan` | Live final plan review handoff. | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh close-plan` | `plan-issue close-plan` | Live final close gate + worktree cleanup enforcement. | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh status-plan` | `plan-issue status-plan` | Live plan issue status snapshot. | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh status-sprint` | `plan-issue status-plan` | Legacy alias removal: use `status-plan` in Rust CLI. | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh build-task-spec` | `plan-issue build-task-spec` | Build sprint-scoped task-spec TSV from plan. | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh build-plan-task-spec` | `plan-issue build-plan-task-spec` | Build plan-scoped task-spec TSV (all sprints). | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh cleanup-worktrees` | `plan-issue cleanup-worktrees` | Enforce cleanup of issue-assigned task worktrees. | +| `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh multi-sprint-guide` | `plan-issue multi-sprint-guide` | Print repeated multi-sprint orchestration flow. | + +`plan-issue-local` supports the same subcommands as `plan-issue` for local-first rehearsal (typically with `--dry-run`). + +## Inventory + +### Directly Impacted Skills/Docs + +- `skills/automation/plan-issue-delivery-loop/SKILL.md` +- `skills/automation/issue-delivery-loop/SKILL.md` +- `skills/workflows/issue/issue-subagent-pr/SKILL.md` +- `skills/workflows/issue/issue-pr-review/SKILL.md` +- `docs/runbooks/skills/TOOLING_INDEX_V2.md` + +### Transitive Dependencies Relevant To This Migration + +- `skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh` (legacy wrapper being replaced by Rust binaries). +- `skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh` (status/review/close orchestration contract wrapped by plan-level commands). +- `skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh` (subagent worktree/PR execution path used by sprint orchestration). +- `skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh` (main-agent review/merge decisions used before accept/close gates). +- PATH tooling used by impacted skills: `plan-tooling`, `gh`. diff --git a/docs/plans/duck-issue-loop-test-plan.md b/docs/plans/duck-issue-loop-test-plan.md index 891b4549..043c3577 100644 --- a/docs/plans/duck-issue-loop-test-plan.md +++ b/docs/plans/duck-issue-loop-test-plan.md @@ -1,7 +1,7 @@ # Plan: Duck plan for plan issue delivery loop ## Overview -This plan creates disposable test deliverables under `tests/issues/duck-loop/` to validate `plan-issue-delivery-loop` orchestration using three distinct execution profiles. The three sprints intentionally cover both supported grouping styles and avoid ambiguous naming between task summaries and grouping behavior. +This plan creates disposable test deliverables under `tests/issues/duck-loop/` to validate `plan-issue` / `plan-issue-local` orchestration using three distinct execution profiles. The three sprints intentionally cover both supported grouping styles and avoid ambiguous naming between task summaries and grouping behavior. ## Scope - In scope: @@ -18,7 +18,7 @@ This plan creates disposable test deliverables under `tests/issues/duck-loop/` t ## Assumptions 1. `plan-tooling` and `python3` are available on `PATH`. -2. `skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh` is executable in this repo. +2. `plan-issue` and `plan-issue-local` are available on `PATH` in this repo. 3. GitHub approval/merge gates are validated when the orchestration workflow runs, not in this planning step. ## Success criteria @@ -34,7 +34,7 @@ This plan creates disposable test deliverables under `tests/issues/duck-loop/` t **Demo/Validation**: - Command(s): - `plan-tooling validate --file docs/plans/duck-issue-loop-test-plan.md` - - `skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh build-task-spec --plan docs/plans/duck-issue-loop-test-plan.md --sprint 1 --pr-grouping per-sprint --task-spec-out "$AGENT_HOME/out/plan-issue-delivery-loop/duck-s1-per-sprint.tsv"` + - `plan-issue-local build-task-spec --plan docs/plans/duck-issue-loop-test-plan.md --sprint 1 --pr-grouping per-sprint --task-spec-out "$AGENT_HOME/out/plan-issue-delivery-loop/duck-s1-per-sprint.tsv" --dry-run` - `python3 - <<'PY'\nimport csv\nfrom pathlib import Path\nrows=list(csv.reader(Path("$AGENT_HOME/out/plan-issue-delivery-loop/duck-s1-per-sprint.tsv").open(), delimiter="\t"))\ndata=[r for r in rows if r and not r[0].startswith("#")]\ngroups={r[6] for r in data}\nassert len(groups)==1, groups\nprint("ok")\nPY` - Verify: - Sprint 1 task-spec file exists under `$AGENT_HOME/out/plan-issue-delivery-loop/`. @@ -90,7 +90,7 @@ This plan creates disposable test deliverables under `tests/issues/duck-loop/` t **Goal**: Validate `group` mode with one isolated task plus one shared two-task group. **Demo/Validation**: - Command(s): - - `skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh build-task-spec --plan docs/plans/duck-issue-loop-test-plan.md --sprint 2 --pr-grouping group --pr-group S2T1=s2-isolated --pr-group S2T2=s2-shared --pr-group S2T3=s2-shared --task-spec-out "$AGENT_HOME/out/plan-issue-delivery-loop/duck-s2-group-shared.tsv"` + - `plan-issue-local build-task-spec --plan docs/plans/duck-issue-loop-test-plan.md --sprint 2 --pr-grouping group --pr-group S2T1=s2-isolated --pr-group S2T2=s2-shared --pr-group S2T3=s2-shared --task-spec-out "$AGENT_HOME/out/plan-issue-delivery-loop/duck-s2-group-shared.tsv" --dry-run` - `python3 - <<'PY'\nimport csv\nfrom pathlib import Path\nrows=list(csv.reader(Path("$AGENT_HOME/out/plan-issue-delivery-loop/duck-s2-group-shared.tsv").open(), delimiter="\t"))\ndata=[r for r in rows if r and not r[0].startswith("#")]\ngroups=[r[6] for r in data]\nassert groups.count("s2-shared")==2, groups\nassert groups.count("s2-isolated")==1, groups\nprint("ok")\nPY` - Verify: - Sprint 2 group output has one isolated group (`s2-isolated`) and one shared pair (`s2-shared`). @@ -142,7 +142,7 @@ This plan creates disposable test deliverables under `tests/issues/duck-loop/` t **Goal**: Validate `group` mode where every task is explicitly isolated (no shared pair), and finalize cleanup manifest indexing. **Demo/Validation**: - Command(s): - - `skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh build-task-spec --plan docs/plans/duck-issue-loop-test-plan.md --sprint 3 --pr-grouping group --pr-group S3T1=s3-a --pr-group S3T2=s3-b --pr-group S3T3=s3-c --task-spec-out "$AGENT_HOME/out/plan-issue-delivery-loop/duck-s3-group-isolated.tsv"` + - `plan-issue-local build-task-spec --plan docs/plans/duck-issue-loop-test-plan.md --sprint 3 --pr-grouping group --pr-group S3T1=s3-a --pr-group S3T2=s3-b --pr-group S3T3=s3-c --task-spec-out "$AGENT_HOME/out/plan-issue-delivery-loop/duck-s3-group-isolated.tsv" --dry-run` - `python3 - <<'PY'\nimport csv\nfrom pathlib import Path\nrows=list(csv.reader(Path("$AGENT_HOME/out/plan-issue-delivery-loop/duck-s3-group-isolated.tsv").open(), delimiter="\t"))\ndata=[r for r in rows if r and not r[0].startswith("#")]\ngroups=[r[6] for r in data]\nassert len(set(groups))==3, groups\nprint("ok")\nPY` - Verify: - Sprint 3 group output has three unique groups (`s3-a`, `s3-b`, `s3-c`). diff --git a/docs/plans/plan-issue-cli-skill-migration-plan.md b/docs/plans/plan-issue-cli-skill-migration-plan.md new file mode 100644 index 00000000..a4dbe179 --- /dev/null +++ b/docs/plans/plan-issue-cli-skill-migration-plan.md @@ -0,0 +1,283 @@ +# Plan: Plan-issue CLI migration for plan issue delivery skills + +## Overview +This plan migrates plan-issue delivery orchestration skills from legacy shell wrappers to the Rust binaries `plan-issue` and `plan-issue-local`. The target state removes legacy script entrypoints, rewrites affected skill contracts to binary-first command usage, and preserves the orchestration-only role boundary for main-agent. Sprints are sequential integration gates; parallelism is optimized only inside each sprint. + +## Scope +- In scope: + - Rewrite all `plan-issue-delivery-loop` related skill contracts to use `plan-issue` / `plan-issue-local` command surface. + - Remove deprecated shell wrappers in affected skills. + - Update dependent tests and docs that currently assert legacy script paths/behavior. + - Keep command examples and validation flow aligned with the typed Rust CLI contract. +- Out of scope: + - Implement new features in `/Users/terry/Project/graysurf/nils-cli/crates/plan-issue-cli`. + - Change GitHub policy/gates beyond what `plan-issue` already enforces. + - Re-architect unrelated PR workflows outside this migration. + +## Assumptions +1. `plan-issue` and `plan-issue-local` are installed on `PATH` in this repo environment. +2. `plan-tooling`, `pytest`, `git`, and `gh` remain available for validation. +3. Deleting legacy wrappers is acceptable as a breaking change for downstream references in this repo. +4. For plan orchestration, `plan-issue` is the canonical command contract; any legacy wrapper behavior not present in the Rust CLI will be removed, not reintroduced via new shell scripts. + +## Sprint sequencing gates +1. Sprint 2 starts only after Sprint 1 is merged and accepted. +2. Sprint 3 starts only after Sprint 2 is merged and accepted. + +## Success criteria +- Skill docs no longer require these deleted entrypoints: + - `skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh` + - `skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh` + - `skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh` +- `plan-issue-delivery-loop` skill contract and command examples are fully binary-first (`plan-issue`, `plan-issue-local`). +- Related skills inventory is explicit and reflected in updated docs/tests. +- All affected tests pass after contract updates. + +## Sprint 1: Inventory and contract rewrite +**Goal**: Freeze migration surface and rewrite top-level orchestration contracts to Rust CLI usage. +**PR Grouping Intent**: per-sprint +**Execution Profile**: parallel-x2 (intended width: 2) +**Sprint Scorecard**: +- `TotalComplexity`: 11 +- `CriticalPathComplexity`: 7 +- `MaxBatchWidth`: 2 +- `OverlapHotspots`: `skills/automation/plan-issue-delivery-loop/SKILL.md`, `skills/automation/issue-delivery-loop/SKILL.md` +**Demo/Validation**: +- Command(s): + - `plan-tooling validate --file docs/plans/plan-issue-cli-skill-migration-plan.md` + - `plan-tooling to-json --file docs/plans/plan-issue-cli-skill-migration-plan.md --sprint 1` + - `plan-tooling batches --file docs/plans/plan-issue-cli-skill-migration-plan.md --sprint 1` + - `plan-tooling split-prs --file docs/plans/plan-issue-cli-skill-migration-plan.md --scope sprint --sprint 1 --pr-grouping per-sprint --strategy deterministic --format json` +- Verify: + - Migration inventory includes every impacted skill, test, doc, and script path. + - `plan-issue-delivery-loop` and `issue-delivery-loop` skills point to `plan-issue` command usage, not legacy wrappers. +**Parallelizable tasks**: +- `Task 1.2` and `Task 1.3` can run in parallel after `Task 1.1`. + +### Task 1.1: Build migration inventory and command parity matrix +- **Location**: + - `docs/migrations/plan-issue-cli-skill-mapping.md` + - `skills/automation/plan-issue-delivery-loop/SKILL.md` + - `skills/automation/issue-delivery-loop/SKILL.md` + - `skills/workflows/issue/issue-subagent-pr/SKILL.md` + - `skills/workflows/issue/issue-pr-review/SKILL.md` + - `skills/automation/plan-issue-delivery-loop/tests/test_automation_plan_issue_delivery_loop.py` + - `skills/automation/issue-delivery-loop/tests/test_automation_issue_delivery_loop.py` + - `skills/workflows/issue/issue-subagent-pr/tests/test_workflows_issue_issue_subagent_pr.py` + - `skills/workflows/issue/issue-pr-review/tests/test_workflows_issue_issue_pr_review.py` + - `docs/runbooks/skills/TOOLING_INDEX_V2.md` +- **Description**: Produce a definitive migration matrix (`old script command -> new binary command`) and an explicit impacted-skills inventory to drive all subsequent edits. +- **Dependencies**: none +- **Complexity**: 3 +- **Acceptance criteria**: + - Inventory lists all directly impacted skills and transitive dependencies. + - Mapping covers each migrated command family (`start-plan`, `start-sprint`, `ready-sprint`, `accept-sprint`, `ready-plan`, `close-plan`, status/build commands). +- **Validation**: + - `rg -n 'manage_issue_delivery_loop\.sh|manage_issue_subagent_pr\.sh|plan-issue-delivery-loop\.sh' skills docs/runbooks/skills/TOOLING_INDEX_V2.md` + - `rg -n '^\\| Legacy Command \\| New Command \\| Scope \\|$|start-plan|start-sprint|ready-sprint|accept-sprint|ready-plan|close-plan' docs/migrations/plan-issue-cli-skill-mapping.md` + +### Task 1.2: Rewrite plan-issue-delivery-loop skill contract to binary-first +- **Location**: + - `skills/automation/plan-issue-delivery-loop/SKILL.md` +- **Description**: Replace legacy script prereqs/entrypoint sections with the Rust CLI contract (`plan-issue`, `plan-issue-local`), including dry-run/local rehearsal behavior and required grouping controls. +- **Dependencies**: + - Task 1.1 +- **Complexity**: 4 +- **Acceptance criteria**: + - SKILL contract references only `plan-issue`/`plan-issue-local` as orchestration entrypoints. + - Workflow and completion policy keep existing gate semantics and role boundaries. + - No references remain to `plan-issue-delivery-loop.sh` or inherited shell wrappers. +- **Validation**: + - `rg -n 'plan-issue|plan-issue-local|start-plan|start-sprint|ready-sprint|accept-sprint|ready-plan|close-plan' skills/automation/plan-issue-delivery-loop/SKILL.md` + - `rg -n 'plan-issue-delivery-loop\.sh|manage_issue_delivery_loop\.sh|manage_issue_subagent_pr\.sh' skills/automation/plan-issue-delivery-loop/SKILL.md && exit 1 || true` + +### Task 1.3: Rewrite issue-delivery-loop skill to align with plan-issue orchestration boundary +- **Location**: + - `skills/automation/issue-delivery-loop/SKILL.md` +- **Description**: Retire direct dependency on `manage_issue_delivery_loop.sh`; document orchestration through typed CLI flows and keep `issue-pr-review` as review decision path. +- **Dependencies**: + - Task 1.1 +- **Complexity**: 4 +- **Acceptance criteria**: + - SKILL no longer declares the deleted script as required entrypoint. + - Main-agent orchestration-only contract remains explicit. + - Command examples are executable with currently supported binaries/tools. +- **Validation**: + - `rg -n 'plan-issue|plan-issue-local|status-plan|ready-plan|close-plan|issue-pr-review' skills/automation/issue-delivery-loop/SKILL.md` + - `rg -n 'manage_issue_delivery_loop\.sh' skills/automation/issue-delivery-loop/SKILL.md && exit 1 || true` + +## Sprint 2: Script removal and dependency rewiring +**Goal**: Delete legacy wrappers and remove hard runtime dependencies on those files. +**PR Grouping Intent**: per-sprint +**Start gate**: Sprint 1 merged and accepted. +**Execution Profile**: parallel-x2 (intended width: 2) +**Sprint Scorecard**: +- `TotalComplexity`: 16 +- `CriticalPathComplexity`: 13 +- `MaxBatchWidth`: 2 +- `OverlapHotspots`: `skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh`, legacy script deletion paths, dependent tests +**Demo/Validation**: +- Command(s): + - `plan-tooling to-json --file docs/plans/plan-issue-cli-skill-migration-plan.md --sprint 2` + - `plan-tooling batches --file docs/plans/plan-issue-cli-skill-migration-plan.md --sprint 2` + - `plan-tooling split-prs --file docs/plans/plan-issue-cli-skill-migration-plan.md --scope sprint --sprint 2 --pr-grouping per-sprint --strategy deterministic --format json` +- Verify: + - Legacy script files are removed from the repository. + - No remaining runtime path resolution points to deleted scripts. + - Tests reflect the new command contract without script-content assertions. +**Parallelizable tasks**: +- `Task 2.2` and `Task 2.3` can run in parallel after `Task 2.1`. + +### Task 2.1: Delete deprecated plan/issue shell wrappers +- **Location**: + - `skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh` + - `skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh` + - `skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh` +- **Description**: Remove all legacy shell entrypoints that are superseded by the Rust `plan-issue` command contract. +- **Dependencies**: none +- **Complexity**: 4 +- **Acceptance criteria**: + - All three files are deleted from git tracking. +- **Validation**: + - `test ! -f skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh` + - `test ! -f skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh` + - `test ! -f skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh` + +### Task 2.2: Rewire issue-subagent-pr contract away from deleted script dependency +- **Location**: + - `skills/workflows/issue/issue-subagent-pr/SKILL.md` +- **Description**: Convert `issue-subagent-pr` to a scriptless, explicit command-contract skill (native `git`/`gh` steps + `plan-issue` artifacts where applicable), and remove legacy entrypoint requirements from the skill contract. +- **Dependencies**: + - Task 2.1 +- **Complexity**: 3 +- **Acceptance criteria**: + - `issue-subagent-pr` contract does not require a removed entrypoint. +- **Validation**: + - `rg -n 'git|gh|plan-issue' skills/workflows/issue/issue-subagent-pr/SKILL.md` + - `rg -n 'manage_issue_subagent_pr\.sh' skills/workflows/issue/issue-subagent-pr/SKILL.md && exit 1 || true` + +### Task 2.3: Rewire issue-pr-review runtime gate away from deleted subagent script +- **Location**: + - `skills/workflows/issue/issue-pr-review/SKILL.md` + - `skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh` +- **Description**: Update `issue-pr-review` so PR body hygiene validation is self-contained and no longer shells out to `manage_issue_subagent_pr.sh`. +- **Dependencies**: + - Task 2.1 +- **Complexity**: 4 +- **Acceptance criteria**: + - `issue-pr-review` skill contract reflects the new self-contained validation behavior. + - `manage_issue_pr_review.sh` does not reference deleted script paths. + - Runtime behavior for merge/close hygiene remains deterministic. +- **Validation**: + - `rg -n 'manage_issue_subagent_pr\.sh' skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh && exit 1 || true` + - `rg -n 'PR body|hygiene|validation' skills/workflows/issue/issue-pr-review/SKILL.md` + +### Task 2.4: Update migration-affected tests for binary-first contracts +- **Location**: + - `skills/automation/plan-issue-delivery-loop/tests/test_automation_plan_issue_delivery_loop.py` + - `skills/automation/issue-delivery-loop/tests/test_automation_issue_delivery_loop.py` + - `skills/workflows/issue/issue-subagent-pr/tests/test_workflows_issue_issue_subagent_pr.py` + - `skills/workflows/issue/issue-pr-review/tests/test_workflows_issue_issue_pr_review.py` +- **Description**: Replace script-content assertions and script-entrypoint existence checks with binary-contract assertions and SKILL contract invariants. +- **Dependencies**: + - Task 2.2 + - Task 2.3 +- **Complexity**: 5 +- **Acceptance criteria**: + - Tests no longer require removed script files. + - Tests enforce `plan-issue`/`plan-issue-local` references where expected. + - Updated tests pass locally. +- **Validation**: + - `scripts/test.sh skills/automation/plan-issue-delivery-loop/tests/test_automation_plan_issue_delivery_loop.py skills/automation/issue-delivery-loop/tests/test_automation_issue_delivery_loop.py skills/workflows/issue/issue-subagent-pr/tests/test_workflows_issue_issue_subagent_pr.py skills/workflows/issue/issue-pr-review/tests/test_workflows_issue_issue_pr_review.py` + +## Sprint 3: Documentation sync and local rehearsal hardening +**Goal**: Remove stale references, codify local-first rehearsal, and finish end-to-end validation. +**PR Grouping Intent**: per-sprint +**Start gate**: Sprint 2 merged and accepted. +**Execution Profile**: parallel-x2 (intended width: 2) +**Sprint Scorecard**: +- `TotalComplexity`: 11 +- `CriticalPathComplexity`: 8 +- `MaxBatchWidth`: 2 +- `OverlapHotspots`: docs references under `docs/runbooks/skills/`, sprint-flow examples in `skills/automation/plan-issue-delivery-loop/SKILL.md` +**Demo/Validation**: +- Command(s): + - `plan-tooling to-json --file docs/plans/plan-issue-cli-skill-migration-plan.md --sprint 3` + - `plan-tooling batches --file docs/plans/plan-issue-cli-skill-migration-plan.md --sprint 3` + - `plan-tooling split-prs --file docs/plans/plan-issue-cli-skill-migration-plan.md --scope sprint --sprint 3 --pr-grouping per-sprint --strategy deterministic --format json` + - `plan-tooling split-prs --file docs/plans/plan-issue-cli-skill-migration-plan.md --scope sprint --sprint 3 --pr-grouping group --strategy deterministic --pr-group S3T1=s3-docs --pr-group S3T2=s3-guidance --pr-group S3T3=s3-validation --format json` + - `plan-issue-local multi-sprint-guide --plan docs/plans/plan-issue-cli-skill-migration-plan.md --dry-run --format json` +- Verify: + - Tooling index and sample plans no longer point to deleted scripts. + - Local rehearsal flow can run without GitHub mutations. + - End-state docs accurately describe live mode (`plan-issue`) and local mode (`plan-issue-local`). +**Parallelizable tasks**: +- `Task 3.1` and `Task 3.2` can run in parallel before `Task 3.3`. + +### Task 3.1: Update tooling index and stale docs references +- **Location**: + - `docs/runbooks/skills/TOOLING_INDEX_V2.md` + - `docs/plans/duck-issue-loop-test-plan.md` +- **Description**: Replace stale script entrypoint references with binary-first command references and update command examples accordingly. +- **Dependencies**: none +- **Complexity**: 3 +- **Acceptance criteria**: + - No deleted script paths remain in targeted docs. + - Tooling index reflects `plan-issue` and `plan-issue-local` usage paths. +- **Validation**: + - `rg -n 'plan-issue-delivery-loop\.sh|manage_issue_delivery_loop\.sh|manage_issue_subagent_pr\.sh' docs/runbooks/skills/TOOLING_INDEX_V2.md docs/plans/duck-issue-loop-test-plan.md && exit 1 || true` + - `rg -n 'plan-issue|plan-issue-local' docs/runbooks/skills/TOOLING_INDEX_V2.md docs/plans/duck-issue-loop-test-plan.md` + +### Task 3.2: Harden local rehearsal guidance in plan-issue-delivery-loop skill +- **Location**: + - `skills/automation/plan-issue-delivery-loop/SKILL.md` +- **Description**: Make local-first practice explicit with `plan-issue-local` command sequence, required `--body-file` dry-run gates, and expected outputs for non-GitHub rehearsal. +- **Dependencies**: none +- **Complexity**: 4 +- **Acceptance criteria**: + - SKILL includes complete local rehearsal path from `start-plan` through `close-plan` dry-run semantics. + - Distinction between live and local binaries is explicit and non-ambiguous. +- **Validation**: + - `rg -n 'plan-issue-local|--body-file|--dry-run|start-plan|start-sprint|ready-sprint|accept-sprint|ready-plan|close-plan' skills/automation/plan-issue-delivery-loop/SKILL.md` + +### Task 3.3: Final validation pass and migration closure checks +- **Location**: + - `skills/automation/plan-issue-delivery-loop/SKILL.md` + - `skills/automation/issue-delivery-loop/SKILL.md` + - `skills/workflows/issue/issue-subagent-pr/SKILL.md` + - `skills/workflows/issue/issue-pr-review/SKILL.md` + - `docs/runbooks/skills/TOOLING_INDEX_V2.md` +- **Description**: Run final checks for contract validity, test pass status, and repository-wide removal of deprecated script references in migrated surface. +- **Dependencies**: + - Task 3.1 + - Task 3.2 +- **Complexity**: 4 +- **Acceptance criteria**: + - `validate_skill_contracts.sh` passes for edited skills. + - Migration-target tests are green. + - No stale references remain in migrated skill/doc scope. +- **Validation**: + - `skills/tools/skill-management/skill-governance/scripts/validate_skill_contracts.sh --file skills/automation/plan-issue-delivery-loop/SKILL.md --file skills/automation/issue-delivery-loop/SKILL.md --file skills/workflows/issue/issue-subagent-pr/SKILL.md --file skills/workflows/issue/issue-pr-review/SKILL.md` + - `scripts/test.sh skills/automation/plan-issue-delivery-loop/tests/test_automation_plan_issue_delivery_loop.py skills/automation/issue-delivery-loop/tests/test_automation_issue_delivery_loop.py skills/workflows/issue/issue-subagent-pr/tests/test_workflows_issue_issue_subagent_pr.py skills/workflows/issue/issue-pr-review/tests/test_workflows_issue_issue_pr_review.py` + - `rg -n 'plan-issue-delivery-loop\.sh|manage_issue_delivery_loop\.sh|manage_issue_subagent_pr\.sh' skills/automation/plan-issue-delivery-loop skills/automation/issue-delivery-loop skills/workflows/issue/issue-subagent-pr skills/workflows/issue/issue-pr-review docs/runbooks/skills/TOOLING_INDEX_V2.md && exit 1 || true` + +## Testing Strategy +- Unit: + - Run focused skill tests for the four impacted skill directories. +- Integration: + - Validate plan format and per-sprint split determinism via `plan-tooling validate|to-json|batches|split-prs`. + - Validate `plan-issue-local` dry-run orchestration command flow for offline rehearsal. +- E2E/manual: + - In a real repo, run one full sprint with `plan-issue` live mode and verify issue/task table updates, sprint gates, and review/acceptance flow. + +## Risks & gotchas +- `issue-pr-review` currently calls the deleted `manage_issue_subagent_pr.sh` for PR-body validation; this must be replaced before script deletion lands. +- Removing script entrypoints is a breaking change for any undocumented external callers. +- If docs/tests are updated before dependency rewiring, CI may fail due to dangling runtime references. +- Grouping behavior (`--pr-grouping group`) must keep full task coverage via explicit `--pr-group` mappings in deterministic mode. + +## Rollback plan +- Keep migration changes split by sprint/PR so each layer can be reverted independently. +- If script deletion causes unexpected runtime regressions, temporarily restore only the affected script(s) from the previous commit while preserving updated docs/tests in a separate revert PR. +- If `issue-pr-review` rewiring is unstable, disable merge/close automation changes first and keep review workflow read-only until validation is fixed. +- If the full migration must pause, keep `plan-issue-delivery-loop` skill in binary-first mode and mark other impacted skills as follow-up items in a tracked issue. diff --git a/docs/runbooks/skills/TOOLING_INDEX_V2.md b/docs/runbooks/skills/TOOLING_INDEX_V2.md index 890b3c03..b3bb2875 100644 --- a/docs/runbooks/skills/TOOLING_INDEX_V2.md +++ b/docs/runbooks/skills/TOOLING_INDEX_V2.md @@ -1,6 +1,6 @@ # Skills Tooling Index v2 -This doc lists canonical executable entrypoints (under `skills/**/scripts/` plus PATH-installed tooling). +This doc lists canonical entrypoints (skill scripts, PATH-installed tooling, and scriptless command contracts). Install `nils-cli` via `brew install nils-cli` to get `plan-tooling`, `api-*`, and `semantic-commit` on PATH. For skill directory layout/path rules, use `docs/runbooks/skills/SKILLS_ANATOMY_V2.md` as the canonical reference. For create/validate/remove workflows, see `skills/tools/skill-management/README.md`. @@ -44,13 +44,15 @@ For create/validate/remove workflows, see `skills/tools/skill-management/README. - Main-agent issue lifecycle: - `$AGENT_HOME/skills/workflows/issue/issue-lifecycle/scripts/manage_issue_lifecycle.sh` - Subagent worktree + PR execution: - - `$AGENT_HOME/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh` + - Scriptless contract using native `git` + `gh` commands (see `skills/workflows/issue/issue-subagent-pr/SKILL.md`) - Main-agent PR review + issue sync: - `$AGENT_HOME/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh` -## Issue delivery automation (main-agent orchestration wrapper) +## Issue delivery automation (main-agent orchestration CLI) -- End-to-end orchestration (`start` / `status` / `ready-for-review` / `close-after-review`): - - `$AGENT_HOME/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh` -- Plan-driven sprint orchestration (`start-plan` / `start-sprint` / `ready-sprint` / `accept-sprint` / `ready-plan` / `close-plan`): - - `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh` +- Live GitHub-backed orchestration (issue and plan flows): + - `plan-issue ` +- Local rehearsal / dry-run orchestration (same subcommands, no GitHub writes): + - `plan-issue-local --dry-run` +- Key subcommands: + - `start-plan`, `start-sprint`, `ready-sprint`, `accept-sprint`, `status-plan`, `ready-plan`, `close-plan` diff --git a/skills/automation/issue-delivery-loop/SKILL.md b/skills/automation/issue-delivery-loop/SKILL.md index 1e1a4c3a..4671de2a 100644 --- a/skills/automation/issue-delivery-loop/SKILL.md +++ b/skills/automation/issue-delivery-loop/SKILL.md @@ -1,6 +1,6 @@ --- name: issue-delivery-loop -description: "Orchestrate end-to-end issue execution loops where main-agent owns issue flow/review only, subagents own implementation PRs, and close gates require approval + merged PRs." +description: "Orchestrate plan-issue review/close loops where main-agent owns orchestration and review only, subagents own implementation PRs, and close gates require approval plus merged PRs." --- # Issue Delivery Loop @@ -10,24 +10,30 @@ description: "Orchestrate end-to-end issue execution loops where main-agent owns Prereqs: - Run inside (or have access to) the target repository. -- `gh` available on `PATH`, and `gh auth status` succeeds for issue/PR reads and writes. -- Base workflow scripts exist: - - `$AGENT_HOME/skills/workflows/issue/issue-lifecycle/scripts/manage_issue_lifecycle.sh` +- `plan-issue` and `plan-issue-local` available on `PATH`. +- `gh` available on `PATH`, and `gh auth status` succeeds for live issue/PR reads and writes. +- `issue-pr-review` is the review decision workflow after handoff. Inputs: -- Main-agent issue metadata (`title`, optional body/labels/assignees/milestone). -- Optional task decomposition TSV for bootstrap comments. -- Optional review summary text. +- Plan issue number (`--issue `) created during plan orchestration. +- Optional repository override (`--repo `). +- Optional review summary text (`--summary`). - Approval comment URL (`https://github.com///(issues|pull)/#issuecomment-`) when closing. +- Local rehearsal body markdown file (`--body-file `) for dry-run handoff/close checks. +- Runtime mode: + - Live mode: `plan-issue ...` for GitHub-backed orchestration. + - Local rehearsal mode: + - `plan-issue-local ... --dry-run` for local sprint/status orchestration from body files. + - `plan-issue --dry-run --body-file ...` for plan-level review/close gate rehearsal. - Task owners must be subagent identities (must reference `subagent`); `main-agent` ownership is invalid for implementation tasks. Outputs: -- Deterministic orchestration over issue lifecycle commands with explicit gate checks. -- Status snapshot and review-request markdown blocks for traceable issue history. -- Issue close only when review approval and merged-PR checks pass. -- Definition of done: execution is complete only when `close-after-review` succeeds and the target issue is actually closed. +- Deterministic orchestration over typed `plan-issue`/`plan-issue-local` command flows with explicit gate checks. +- Status snapshots and review-request markdown blocks for traceable issue history. +- Issue close only when review approval and merged-PR checks pass via `close-plan`. +- Definition of done: execution is complete only when `close-plan` succeeds and the target issue is actually closed. - Error contract: if any gate/command fails, stop forward progress and report the failing command plus key stderr/stdout gate errors. - Main-agent acts as orchestrator/reviewer only; implementation branches/PRs are delegated to subagents. - Issue task table remains the single execution source of truth (`Subagent PRs` section is legacy and removed by sync). @@ -35,48 +41,51 @@ Outputs: Exit codes: - `0`: success -- non-zero: usage errors, missing tools, gh failures, or gate validation failures +- `1`: runtime failure / gate failure +- `2`: usage error Failure modes: -- Missing required options (`--title`, `--issue`, `--approved-comment-url`, etc.). +- Missing required options (`--issue`, `--approved-comment-url`, `--summary` when required by policy). +- Missing required binaries (`plan-issue`/`plan-issue-local`; `gh` for live mode). - Invalid approval URL format or repo mismatch with `--repo`. - Task rows violate close gates (status not `done`, execution metadata/PR missing, or PR not merged). -- Issue/PR metadata fetch fails via `gh`. +- Issue/PR metadata fetch fails via `gh` in live mode. +- `close-plan --dry-run` invoked without required `--body-file` in local rehearsal. - Task `Owner` is `main-agent`/non-subagent identity in `Task Decomposition`. -## Entrypoint - -- `$AGENT_HOME/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh` - ## Role Boundary (Mandatory) - Main-agent is limited to issue orchestration: - - open/update/snapshot/review-handoff/close gates + - status/review-handoff/close gates - dispatch and acceptance decisions - Main-agent must not implement issue tasks directly. - Even for a single-PR issue, implementation must be produced by a subagent PR and then reviewed by main-agent. -- Main-agent review/merge decisions should use `issue-pr-review`; this loop skill enforces owner and close gates. +- Main-agent review/merge decisions should use `issue-pr-review`; this loop skill enforces ownership and close gates. ## Core usage -1. Start issue execution: - - `.../manage_issue_delivery_loop.sh start --repo --title "" --label issue --task-spec <tasks.tsv>` +1. Select execution mode: + - Live mode: `plan-issue <subcommand> ...` + - Local rehearsal mode: `plan-issue-local` for local sprint/status checks; `plan-issue --dry-run --body-file ...` for `ready-plan`/`close-plan`. 2. Dispatch implementation to subagent(s): - Use `issue-subagent-pr` workflow to create task worktrees/PRs. 3. Update status snapshot (main-agent checkpoint): - - `.../manage_issue_delivery_loop.sh status --repo <owner/repo> --issue <number>` + - `plan-issue status-plan --repo <owner/repo> --issue <number>` + - Local rehearsal: `plan-issue-local status-plan --body-file <path> --dry-run` 4. Request review (main-agent review handoff): - - `.../manage_issue_delivery_loop.sh ready-for-review --repo <owner/repo> --issue <number> --summary "<review focus>"` + - `plan-issue ready-plan --repo <owner/repo> --issue <number> --summary "<review focus>"` + - Local rehearsal: `plan-issue ready-plan --summary "<review focus>" --dry-run --body-file <path>` 5. Main-agent review decision: - Use `issue-pr-review` to request follow-up or merge after checks/review are satisfied. 6. Close after explicit review approval: - - `.../manage_issue_delivery_loop.sh close-after-review --repo <owner/repo> --issue <number> --approved-comment-url <url>` + - `plan-issue close-plan --repo <owner/repo> --issue <number> --approved-comment-url <url>` + - Local rehearsal: `plan-issue close-plan --approved-comment-url <url> --dry-run --body-file <path>` ## Completion Policy (Mandatory) -- Do not stop at `start`, `status`, or `ready-for-review` as a final state. -- A successful run must terminate at `close-after-review` with issue state `CLOSED`. +- Do not stop at `status-plan` or `ready-plan` as a final state. +- A successful run must terminate at `close-plan` with issue state `CLOSED`. - If close gates fail, treat the run as unfinished and report: - failing command - gate errors (task status, PR merge, approval URL, or owner policy) @@ -84,21 +93,20 @@ Failure modes: ## Full Skill Flow -1. Confirm repository context and `gh auth status` are valid. -2. Prepare issue metadata (title/body/labels) and optional task decomposition TSV (`Owner` must be subagent identities). -3. Run `start` to open the issue and optionally bootstrap `Task Decomposition` from the TSV. -4. Main-agent dispatches implementation tasks to subagents (for example via `issue-subagent-pr`), while remaining orchestration/review-only. -5. As subagent PRs progress, update the issue task table and PR links so task state stays consistent. +1. Confirm repository context, runtime mode (`plan-issue` vs `plan-issue-local`), and `gh auth status` for live mode. +2. Confirm the plan issue already exists and task decomposition ownership remains subagent-only. +3. Main-agent dispatches implementation tasks to subagents (for example via `issue-subagent-pr`), while remaining orchestration/review-only. +4. As subagent PRs progress, update the issue task table and PR links so task state stays consistent. - Fill real `Owner` / `Branch` / `Worktree` / `Execution Mode` / `PR` values as execution happens (initial `TBD` rows are expected). - Use canonical PR references as `#<number>` for tables/comments. -6. Run `status` to generate a main-agent snapshot comment for task/PR/review state checkpoints. -7. Run `ready-for-review` when the issue is ready for main-agent review handoff (adds review comment/labels as configured). -8. Main-agent reviews subagent PRs (typically with `issue-pr-review`), requests follow-up or merges until close gates are satisfied. -9. Run `close-after-review` with an explicit approval comment URL to enforce final gates (task status + merged PR checks), re-sync/normalize the issue task table, and close the issue. +5. Run `status-plan` to generate a main-agent snapshot comment for task/PR/review state checkpoints. +6. Run `ready-plan` when the issue is ready for main-agent review handoff. +7. Main-agent reviews subagent PRs (typically with `issue-pr-review`), requests follow-up or merges until close gates are satisfied. +8. Run `close-plan` with an explicit approval comment URL to enforce final gates (task status plus merged PR checks), re-sync/normalize the issue task table, and close the issue. ## Notes -- `status` and `ready-for-review` also support `--body-file` for offline/dry-run rendering in tests. -- `close-after-review` supports `--body-file` for offline gate checks; it prints `DRY-RUN-CLOSE-SKIPPED` in body-file mode. +- Use `plan-issue-local` for local sprint/status orchestration from body files. +- Use `plan-issue --dry-run --body-file ...` for deterministic offline `ready-plan` / `close-plan` gate rehearsal. - `Execution Mode` controls branch/worktree uniqueness checks: only `pr-isolated` requires unique branch/worktree per row. - Use `--dry-run` to suppress write operations while previewing commands. diff --git a/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh b/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh deleted file mode 100755 index aa6d64fb..00000000 --- a/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh +++ /dev/null @@ -1,1178 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" -skill_dir="$(cd "${script_dir}/.." && pwd -P)" -repo_root_default="$(cd "${skill_dir}/../../.." && pwd -P)" -agent_home="${AGENT_HOME:-$repo_root_default}" - -issue_lifecycle_script="${repo_root_default}/skills/workflows/issue/issue-lifecycle/scripts/manage_issue_lifecycle.sh" -if [[ ! -x "$issue_lifecycle_script" ]]; then - issue_lifecycle_script="${agent_home%/}/skills/workflows/issue/issue-lifecycle/scripts/manage_issue_lifecycle.sh" -fi - -die() { - echo "error: $*" >&2 - exit 1 -} - -require_cmd() { - local cmd="${1:-}" - command -v "$cmd" >/dev/null 2>&1 || die "$cmd is required" -} - -print_cmd() { - local out='' - local arg='' - for arg in "$@"; do - out+=" $(printf '%q' "$arg")" - done - printf '%s\n' "${out# }" -} - -run_cmd() { - if [[ "${dry_run}" == "1" ]]; then - echo "dry-run: $(print_cmd "$@")" >&2 - return 0 - fi - "$@" -} - -trim_text() { - local value="${1:-}" - value="${value#"${value%%[![:space:]]*}"}" - value="${value%"${value##*[![:space:]]}"}" - printf '%s' "$value" -} - -to_lower() { - printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]' -} - -is_pr_placeholder() { - local value='' - value="$(trim_text "${1:-}")" - local lower='' - lower="$(to_lower "$value")" - case "$lower" in - ""|"-"|"tbd"|"none"|"n/a"|"na") - return 0 - ;; - *) - return 1 - ;; - esac -} - -normalize_owner_token() { - local value='' - value="$(trim_text "${1:-}")" - value="$(to_lower "$value")" - value="${value//_/ }" - value="${value//-/ }" - value="${value//\// }" - value="${value// /}" - printf '%s' "$value" -} - -is_owner_placeholder() { - local normalized='' - normalized="$(normalize_owner_token "${1:-}")" - case "$normalized" in - ""|"tbd"|"none"|"na") - return 0 - ;; - *) - return 1 - ;; - esac -} - -is_main_agent_owner() { - local owner_raw='' - owner_raw="$(trim_text "${1:-}")" - local owner_lower='' - owner_lower="$(to_lower "$owner_raw")" - local normalized='' - normalized="$(normalize_owner_token "$owner_raw")" - - case "$normalized" in - "mainagent"|"main"|"codex"|"orchestrator"|"leadagent") - return 0 - ;; - esac - - case "$owner_lower" in - *"main-agent"*|*"main agent"*) - return 0 - ;; - esac - - return 1 -} - -is_subagent_owner() { - local owner_lower='' - owner_lower="$(to_lower "$(trim_text "${1:-}")")" - [[ "$owner_lower" == *"subagent"* ]] -} - -body_contains_task_decomposition() { - local body_file="${1:-}" - [[ -f "$body_file" ]] || return 1 - if command -v rg >/dev/null 2>&1; then - rg -q '^## Task Decomposition$' "$body_file" - return $? - fi - grep -Eq '^## Task Decomposition$' "$body_file" -} - -enforce_subagent_owner_policy() { - local body_file="${1:-}" - local source_label="${2:-issue body}" - [[ -f "$body_file" ]] || die "owner policy check body file not found: $body_file" - - if ! body_contains_task_decomposition "$body_file"; then - return 0 - fi - - local errors=() - while IFS=$'\t' read -r task _summary owner branch worktree execution_mode _pr status _notes; do - local task_id owner_value branch_value worktree_value mode_value status_value - task_id="$(trim_text "$task")" - owner_value="$(trim_text "$owner")" - branch_value="$(trim_text "$branch")" - worktree_value="$(trim_text "$worktree")" - mode_value="$(trim_text "$execution_mode")" - status_value="$(to_lower "$(trim_text "$status")")" - - # Planning/blocked rows can remain TBD until execution details are real. - if [[ "$status_value" == "planned" || "$status_value" == "blocked" ]]; then - continue - fi - if is_owner_placeholder "$owner_value"; then - errors+=("${task_id}: Owner must reference a subagent identity (got: ${owner_value:-<empty>})") - continue - fi - if is_main_agent_owner "$owner_value"; then - errors+=("${task_id}: Owner must not be main-agent; main-agent is orchestration/review-only") - continue - fi - if ! is_subagent_owner "$owner_value"; then - errors+=("${task_id}: Owner must include 'subagent' to mark delegated implementation ownership") - continue - fi - if is_owner_placeholder "$branch_value"; then - errors+=("${task_id}: Branch must not be TBD when Status is ${status_value:-<empty>}") - fi - if is_owner_placeholder "$worktree_value"; then - errors+=("${task_id}: Worktree must not be TBD when Status is ${status_value:-<empty>}") - fi - if is_owner_placeholder "$mode_value"; then - errors+=("${task_id}: Execution Mode must not be TBD when Status is ${status_value:-<empty>}") - fi - done < <(parse_issue_tasks_tsv "$body_file") - - if [[ ${#errors[@]} -gt 0 ]]; then - local err='' - for err in "${errors[@]}"; do - echo "error: ${source_label}: ${err}" >&2 - done - return 1 - fi - - return 0 -} - -normalize_pr_ref() { - local value - value="$(trim_text "${1:-}")" - if [[ "$value" =~ ^PR#([0-9]+)$ ]]; then - printf '%s\n' "${BASH_REMATCH[1]}" - return 0 - fi - if [[ "$value" =~ ^#([0-9]+)$ ]]; then - printf '%s\n' "${BASH_REMATCH[1]}" - return 0 - fi - if [[ "$value" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+#([0-9]+)$ ]]; then - printf '%s\n' "${BASH_REMATCH[1]}" - return 0 - fi - printf '%s\n' "$value" -} - -canonical_pr_display() { - local value - value="$(trim_text "${1:-}")" - if is_pr_placeholder "$value"; then - printf 'TBD\n' - return 0 - fi - - local normalized - normalized="$(normalize_pr_ref "$value")" - if [[ "$normalized" =~ ^[0-9]+$ ]]; then - printf '#%s\n' "$normalized" - return 0 - fi - printf '%s\n' "$normalized" -} - -extract_issue_number_from_url() { - local issue_url="${1:-}" - if [[ "$issue_url" =~ /issues/([0-9]+)$ ]]; then - printf '%s\n' "${BASH_REMATCH[1]}" - return 0 - fi - if [[ "$issue_url" == "DRY-RUN-ISSUE-URL" ]]; then - printf '999\n' - return 0 - fi - return 1 -} - -ensure_issue_lifecycle_script() { - [[ -x "$issue_lifecycle_script" ]] || die "missing executable: $issue_lifecycle_script" -} - -run_issue_lifecycle() { - local cmd=("$issue_lifecycle_script" "$@") - if [[ -n "$repo_arg" ]]; then - cmd+=(--repo "$repo_arg") - fi - if [[ "$dry_run" == "1" ]]; then - cmd+=(--dry-run) - fi - "${cmd[@]}" -} - -issue_read_cmd() { - local issue_number="${1:-}" - local out_file="${2:-}" - [[ -n "$issue_number" ]] || die "issue number is required" - [[ -n "$out_file" ]] || die "output file path is required" - - require_cmd gh - local cmd=(gh issue view "$issue_number") - if [[ -n "$repo_arg" ]]; then - cmd+=(-R "$repo_arg") - fi - cmd+=(--json body -q .body) - - "${cmd[@]}" >"$out_file" -} - -fetch_issue_state() { - local issue_number="${1:-}" - [[ -n "$issue_number" ]] || die "issue number is required" - require_cmd gh - - local cmd=(gh issue view "$issue_number") - if [[ -n "$repo_arg" ]]; then - cmd+=(-R "$repo_arg") - fi - cmd+=(--json state -q .state) - "${cmd[@]}" -} - -parse_issue_tasks_tsv() { - local body_file="${1:-}" - [[ -f "$body_file" ]] || die "issue body file not found: $body_file" - - python3 - "$body_file" <<'PY' -import pathlib -import sys - -path = pathlib.Path(sys.argv[1]) -text = path.read_text(encoding="utf-8") -lines = text.splitlines() - - -def section_bounds(heading: str) -> tuple[int, int]: - start = None - for idx, line in enumerate(lines): - if line.strip() == heading: - start = idx + 1 - break - if start is None: - raise SystemExit(f"error: missing required heading: {heading}") - - end = len(lines) - for idx in range(start, len(lines)): - if lines[idx].startswith("## "): - end = idx - break - return start, end - - -def parse_row(line: str) -> list[str]: - s = line.strip() - if not (s.startswith("|") and s.endswith("|")): - return [] - return [cell.strip() for cell in s[1:-1].split("|")] - - -required_columns = ["Task", "Summary", "Owner", "Branch", "Worktree", "PR", "Status", "Notes"] -start, end = section_bounds("## Task Decomposition") -section = lines[start:end] -table_lines = [line for line in section if line.strip().startswith("|")] -if len(table_lines) < 3: - raise SystemExit("error: Task Decomposition must contain a markdown table with at least one row") - -headers = parse_row(table_lines[0]) -missing = [col for col in required_columns if col not in headers] -if missing: - raise SystemExit("error: missing Task Decomposition columns: " + ", ".join(missing)) - -rows = [] -for raw in table_lines[2:]: - cells = parse_row(raw) - if not cells: - continue - if len(cells) != len(headers): - raise SystemExit("error: malformed Task Decomposition row") - row = {headers[idx]: cells[idx] for idx in range(len(headers))} - if "Execution Mode" not in row: - row["Execution Mode"] = "TBD" - if not any(v.strip() for v in cells): - continue - rows.append(row) - -if not rows: - raise SystemExit("error: Task Decomposition table must include at least one task row") - -for row in rows: - values = [ - row.get("Task", "").replace("\t", " "), - row.get("Summary", "").replace("\t", " "), - row.get("Owner", "").replace("\t", " "), - row.get("Branch", "").replace("\t", " "), - row.get("Worktree", "").replace("\t", " "), - row.get("Execution Mode", "").replace("\t", " "), - row.get("PR", "").replace("\t", " "), - row.get("Status", "").replace("\t", " "), - row.get("Notes", "").replace("\t", " "), - ] - print("\t".join(values)) -PY -} - -validate_approval_comment_url() { - local url="${1:-}" - python3 - "$url" <<'PY' -import re -import sys - -url = sys.argv[1].strip() -pat = re.compile(r"^https://github\.com/([^/]+)/([^/]+)/(issues|pull)/(\d+)#issuecomment-(\d+)$") -m = pat.match(url) -if not m: - raise SystemExit("error: --approved-comment-url must be a GitHub issues/pull comment URL") -print("\t".join(m.groups())) -PY -} - -fetch_pr_meta_tsv() { - local pr_ref="${1:-}" - require_cmd gh - - local cmd=(gh pr view "$pr_ref") - if [[ -n "$repo_arg" ]]; then - cmd+=(-R "$repo_arg") - fi - cmd+=( - --json - "number,url,state,isDraft,reviewDecision,mergeStateStatus,mergedAt" - -q - '[.number, .url, .state, (if .isDraft then "true" else "false" end), ((.reviewDecision // "") | if . == "" then "NONE" else . end), ((.mergeStateStatus // "") | if . == "" then "UNKNOWN" else . end), (.mergedAt // "")] | @tsv' - ) - - "${cmd[@]}" -} - -build_status_snapshot() { - local body_file="${1:-}" - - local now_utc - now_utc="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" - local nl=$'\n' - - local output="## Main-Agent Status Snapshot${nl}${nl}" - output+="- Generated at: ${now_utc}${nl}${nl}" - output+="| Task | Summary | Planned Status | PR | PR State | Review | Suggested |${nl}" - output+="| --- | --- | --- | --- | --- | --- | --- |${nl}" - - local errors=() - local has_rows="0" - - while IFS=$'\t' read -r task summary _owner _branch _worktree _execution_mode pr status _notes; do - has_rows="1" - local task_id summary_value pr_value planned_status - task_id="$(trim_text "$task")" - summary_value="$(trim_text "$summary")" - pr_value="$(trim_text "$pr")" - planned_status="$(trim_text "$status")" - - local pr_display pr_state review_state suggested - pr_display="$pr_value" - pr_state="NO_PR" - review_state="-" - suggested="planned" - - if is_pr_placeholder "$pr_value"; then - pr_display="TBD" - if [[ "$planned_status" == "done" ]]; then - suggested="blocked" - fi - else - local pr_ref - pr_ref="$(normalize_pr_ref "$pr_value")" - pr_display="$(canonical_pr_display "$pr_value")" - - if [[ "$dry_run" == "1" ]]; then - pr_state="UNKNOWN" - review_state="UNKNOWN" - suggested="in-progress" - else - local meta - set +e - meta="$(fetch_pr_meta_tsv "$pr_ref" 2>&1)" - local meta_code=$? - set -e - if [[ "$meta_code" -ne 0 ]]; then - errors+=("${task_id}: failed to query PR ${pr_ref}: ${meta}") - pr_state="ERROR" - review_state="ERROR" - suggested="blocked" - else - local _pr_number pr_url state _is_draft review_decision _merge_status merged_at - IFS=$'\t' read -r _pr_number pr_url state _is_draft review_decision _merge_status merged_at <<<"$meta" - pr_display="${pr_url:-$pr_ref}" - pr_state="${state:-UNKNOWN}" - review_state="${review_decision:-UNKNOWN}" - - if [[ -n "$merged_at" ]]; then - suggested="done" - elif [[ "$state" == "CLOSED" ]]; then - suggested="blocked" - elif [[ "$review_decision" == "CHANGES_REQUESTED" ]]; then - suggested="blocked" - else - suggested="in-progress" - fi - fi - fi - fi - - output+="| ${task_id} | ${summary_value:-'-'} | ${planned_status:-unknown} | ${pr_display} | ${pr_state} | ${review_state} | ${suggested} |${nl}" - done < <(parse_issue_tasks_tsv "$body_file") - - [[ "$has_rows" == "1" ]] || die "no task rows found in issue body" - - if [[ ${#errors[@]} -gt 0 ]]; then - printf '%s\n' "$output" - local err - for err in "${errors[@]+"${errors[@]}"}"; do - echo "error: $err" >&2 - done - return 1 - fi - - printf '%s\n' "$output" -} - -build_review_request_body() { - local body_file="${1:-}" - local summary_text="${2:-}" - local nl=$'\n' - - local output="## Main-Agent Review Request${nl}${nl}" - output+="- Close gate: provide an approval comment URL, then run close-after-review.${nl}${nl}" - output+="| Task | Summary | Status | PR |${nl}" - output+="| --- | --- | --- | --- |${nl}" - - local task_count=0 - local pr_count=0 - - while IFS=$'\t' read -r task summary _owner _branch _worktree _execution_mode pr status _notes; do - task_count=$((task_count + 1)) - local task_id summary_value status_value pr_value - task_id="$(trim_text "$task")" - summary_value="$(trim_text "$summary")" - status_value="$(trim_text "$status")" - pr_value="$(trim_text "$pr")" - - if ! is_pr_placeholder "$pr_value"; then - pr_count=$((pr_count + 1)) - pr_value="$(canonical_pr_display "$pr_value")" - else - pr_value="TBD" - fi - - output+="| ${task_id} | ${summary_value:-'-'} | ${status_value} | ${pr_value} |${nl}" - done < <(parse_issue_tasks_tsv "$body_file") - - [[ "$task_count" -gt 0 ]] || die "issue has no tasks in Task Decomposition" - [[ "$pr_count" -gt 0 ]] || die "ready-for-review requires at least one non-TBD PR" - - if [[ -n "$summary_text" ]]; then - output+="${nl}## Main-Agent Notes${nl}${nl}${summary_text}${nl}" - fi - - printf '%s\n' "$output" -} - -compose_close_comment() { - local approved_url="${1:-}" - local merged_rows="${2:-}" - local extra="${3:-}" - local nl=$'\n' - - local msg="Closed after review approval: ${approved_url}${nl}${nl}" - msg+="Merged implementation PRs:${nl}${merged_rows}${nl}" - if [[ -n "$extra" ]]; then - msg+="${nl}Additional note:${nl}${extra}${nl}" - fi - printf '%s\n' "$msg" -} - -usage() { - cat <<'USAGE' -Usage: - manage_issue_delivery_loop.sh <start|status|ready-for-review|close-after-review> [options] - -Subcommands: - start Open and bootstrap an issue execution loop - status Build a task/PR status snapshot and optionally comment on issue - ready-for-review Post main-agent review request and optionally set review labels - close-after-review Close issue only after approval URL + merged PR checks - -Owner policy: - - Task Decomposition.Owner must reference subagent ownership. - - main-agent/codex ownership is rejected for implementation tasks. - -Common options: - --repo <owner/repo> Target repository passed to gh via -R - --dry-run Print write operations without mutating GitHub state - -start options: - --title <text> Issue title (required) - --body <text> Issue body text - --body-file <path> Issue body file - --use-template Force issue-lifecycle built-in template - --label <name> Repeatable label - --assignee <login> Repeatable assignee - --project <title> Repeatable project title - --milestone <name> Milestone title - --task-spec <path> Optional TSV for task decomposition comment - --task-header <text> Decomposition heading (default: Task Decomposition) - --no-decompose-comment Print decomposition only (do not comment) - -status options: - --issue <number> Issue number (required unless --body-file) - --body-file <path> Local issue body markdown for offline snapshot - --comment Post snapshot to issue (default when --issue) - --no-comment Do not post snapshot comment - -ready-for-review options: - --issue <number> Issue number (required unless --body-file) - --body-file <path> Local issue body markdown for offline render - --summary <text> Additional reviewer notes - --summary-file <path> Additional reviewer notes file - --label <name> Label to add when issue mode (default: needs-review) - --remove-label <name> Repeatable labels to remove when issue mode - --no-label-update Do not mutate labels - --comment Post review request comment (default when --issue) - --no-comment Do not post review request comment - -close-after-review options: - --issue <number> Issue number to close (required unless --body-file) - --body-file <path> Local issue body markdown for offline gate checks - --approved-comment-url <url> Reviewer approval comment URL (required) - --reason <completed|not planned> - Close reason (default: completed) - --comment <text> Additional close note - --comment-file <path> Additional close note file - --allow-not-done Allow closing even when task Status is not done -USAGE -} - -subcommand="${1:-}" -if [[ -z "$subcommand" ]]; then - usage >&2 - exit 1 -fi -shift || true - -repo_arg="" -dry_run="0" - -ensure_issue_lifecycle_script - -case "$subcommand" in - start) - title="" - body="" - body_file="" - use_template="0" - milestone="" - task_spec="" - task_header="Task Decomposition" - decompose_comment="1" - labels=() - assignees=() - projects=() - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --title) - title="${2:-}" - shift 2 - ;; - --body) - body="${2:-}" - shift 2 - ;; - --body-file) - body_file="${2:-}" - shift 2 - ;; - --use-template) - use_template="1" - shift - ;; - --label) - labels+=("${2:-}") - shift 2 - ;; - --assignee) - assignees+=("${2:-}") - shift 2 - ;; - --project) - projects+=("${2:-}") - shift 2 - ;; - --milestone) - milestone="${2:-}" - shift 2 - ;; - --task-spec) - task_spec="${2:-}" - shift 2 - ;; - --task-header) - task_header="${2:-}" - shift 2 - ;; - --no-decompose-comment) - decompose_comment="0" - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for start: $1" - ;; - esac - done - - [[ -n "$title" ]] || die "--title is required for start" - if [[ -n "$body" && -n "$body_file" ]]; then - die "use either --body or --body-file, not both" - fi - if [[ -n "$body_file" && ! -f "$body_file" ]]; then - die "body file not found: $body_file" - fi - if [[ -n "$task_spec" && ! -f "$task_spec" ]]; then - die "task spec not found: $task_spec" - fi - - if [[ -n "$body" ]]; then - temp_start_body="$(mktemp)" - printf '%s\n' "$body" >"$temp_start_body" - enforce_subagent_owner_policy "$temp_start_body" "start-body" - rm -f "$temp_start_body" - elif [[ -n "$body_file" ]]; then - enforce_subagent_owner_policy "$body_file" "start-body-file" - fi - - local_open_args=(open --title "$title") - if [[ -n "$body" ]]; then - local_open_args+=(--body "$body") - elif [[ -n "$body_file" ]]; then - local_open_args+=(--body-file "$body_file") - elif [[ "$use_template" == "1" ]]; then - local_open_args+=(--use-template) - fi - if [[ -n "$milestone" ]]; then - local_open_args+=(--milestone "$milestone") - fi - - item='' - for item in "${labels[@]+"${labels[@]}"}"; do - local_open_args+=(--label "$item") - done - for item in "${assignees[@]+"${assignees[@]}"}"; do - local_open_args+=(--assignee "$item") - done - for item in "${projects[@]+"${projects[@]}"}"; do - local_open_args+=(--project "$item") - done - - issue_url="$(run_issue_lifecycle "${local_open_args[@]}")" - issue_number="" - if ! issue_number="$(extract_issue_number_from_url "$issue_url")"; then - die "failed to parse issue number from URL: $issue_url" - fi - - if [[ -n "$task_spec" ]]; then - decompose_args=(decompose --issue "$issue_number" --spec "$task_spec" --header "$task_header") - if [[ "$decompose_comment" == "1" ]]; then - decompose_args+=(--comment) - fi - run_issue_lifecycle "${decompose_args[@]}" >/dev/null - fi - - if [[ "$dry_run" != "1" && "$issue_number" != "999" ]]; then - run_issue_lifecycle validate --issue "$issue_number" >/dev/null - fi - - printf 'ISSUE_URL=%s\n' "$issue_url" - printf 'ISSUE_NUMBER=%s\n' "$issue_number" - printf 'TASK_SPEC_APPLIED=%s\n' "$( [[ -n "$task_spec" ]] && echo 1 || echo 0 )" - ;; - - status) - issue_number="" - body_file="" - post_comment="" - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue) - issue_number="${2:-}" - shift 2 - ;; - --body-file) - body_file="${2:-}" - shift 2 - ;; - --comment) - post_comment="1" - shift - ;; - --no-comment) - post_comment="0" - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for status: $1" - ;; - esac - done - - if [[ -n "$issue_number" && -n "$body_file" ]]; then - die "use either --issue or --body-file, not both" - fi - if [[ -z "$issue_number" && -z "$body_file" ]]; then - die "status requires --issue or --body-file" - fi - - source_label="body-file" - source_ref="$body_file" - temp_body="" - if [[ -n "$issue_number" ]]; then - source_label="issue" - source_ref="#${issue_number}" - if [[ -z "$post_comment" ]]; then - post_comment="1" - fi - run_issue_lifecycle validate --issue "$issue_number" >/dev/null - temp_body="$(mktemp)" - issue_read_cmd "$issue_number" "$temp_body" - body_file="$temp_body" - else - [[ -f "$body_file" ]] || die "body file not found: $body_file" - if [[ -z "$post_comment" ]]; then - post_comment="0" - fi - run_issue_lifecycle validate --body-file "$body_file" >/dev/null - fi - - enforce_subagent_owner_policy "$body_file" "status ${source_label}:${source_ref}" - - snapshot="$(build_status_snapshot "$body_file")" - printf '%s\n' "$snapshot" - - if [[ -n "$temp_body" ]]; then - rm -f "$temp_body" - fi - - if [[ "$post_comment" == "1" ]]; then - [[ -n "$issue_number" ]] || die "--comment requires --issue" - run_issue_lifecycle comment --issue "$issue_number" --body "$snapshot" >/dev/null - fi - ;; - - ready-for-review) - issue_number="" - body_file="" - summary_text="" - summary_file="" - review_label="needs-review" - label_update="1" - post_comment="" - remove_labels=() - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue) - issue_number="${2:-}" - shift 2 - ;; - --body-file) - body_file="${2:-}" - shift 2 - ;; - --summary) - summary_text="${2:-}" - shift 2 - ;; - --summary-file) - summary_file="${2:-}" - shift 2 - ;; - --label) - review_label="${2:-}" - shift 2 - ;; - --remove-label) - remove_labels+=("${2:-}") - shift 2 - ;; - --no-label-update) - label_update="0" - shift - ;; - --comment) - post_comment="1" - shift - ;; - --no-comment) - post_comment="0" - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for ready-for-review: $1" - ;; - esac - done - - if [[ -n "$issue_number" && -n "$body_file" ]]; then - die "use either --issue or --body-file, not both" - fi - if [[ -z "$issue_number" && -z "$body_file" ]]; then - die "ready-for-review requires --issue or --body-file" - fi - if [[ -n "$summary_text" && -n "$summary_file" ]]; then - die "use either --summary or --summary-file, not both" - fi - if [[ -n "$summary_file" ]]; then - [[ -f "$summary_file" ]] || die "summary file not found: $summary_file" - summary_text="$(cat "$summary_file")" - fi - - issue_ref="local-body" - temp_body="" - if [[ -n "$issue_number" ]]; then - issue_ref="#${issue_number}" - if [[ -z "$post_comment" ]]; then - post_comment="1" - fi - run_issue_lifecycle validate --issue "$issue_number" >/dev/null - temp_body="$(mktemp)" - issue_read_cmd "$issue_number" "$temp_body" - body_file="$temp_body" - else - [[ -f "$body_file" ]] || die "body file not found: $body_file" - if [[ -z "$post_comment" ]]; then - post_comment="0" - fi - run_issue_lifecycle validate --body-file "$body_file" >/dev/null - fi - - enforce_subagent_owner_policy "$body_file" "ready-for-review ${issue_ref}" - - review_body="$(build_review_request_body "$body_file" "$summary_text")" - printf '%s\n' "$review_body" - - if [[ -n "$temp_body" ]]; then - rm -f "$temp_body" - fi - - if [[ "$label_update" == "1" && -n "$issue_number" ]]; then - update_args=(update --issue "$issue_number") - if [[ -n "$review_label" ]]; then - update_args+=(--add-label "$review_label") - fi - lbl='' - for lbl in "${remove_labels[@]+"${remove_labels[@]}"}"; do - update_args+=(--remove-label "$lbl") - done - if [[ ${#update_args[@]} -gt 2 ]]; then - run_issue_lifecycle "${update_args[@]}" >/dev/null - fi - fi - - if [[ "$post_comment" == "1" ]]; then - [[ -n "$issue_number" ]] || die "--comment requires --issue" - run_issue_lifecycle comment --issue "$issue_number" --body "$review_body" >/dev/null - fi - ;; - - close-after-review) - issue_number="" - body_file="" - approved_comment_url="" - close_reason="completed" - close_comment="" - close_comment_file="" - allow_not_done="0" - issue_state="" - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue) - issue_number="${2:-}" - shift 2 - ;; - --body-file) - body_file="${2:-}" - shift 2 - ;; - --approved-comment-url) - approved_comment_url="${2:-}" - shift 2 - ;; - --reason) - close_reason="${2:-}" - shift 2 - ;; - --comment) - close_comment="${2:-}" - shift 2 - ;; - --comment-file) - close_comment_file="${2:-}" - shift 2 - ;; - --allow-not-done) - allow_not_done="1" - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for close-after-review: $1" - ;; - esac - done - - if [[ -n "$issue_number" && -n "$body_file" ]]; then - die "use either --issue or --body-file, not both" - fi - if [[ -z "$issue_number" && -z "$body_file" ]]; then - die "close-after-review requires --issue or --body-file" - fi - [[ -n "$approved_comment_url" ]] || die "--approved-comment-url is required" - if [[ "$close_reason" != "completed" && "$close_reason" != "not planned" ]]; then - die "--reason must be one of: completed, not planned" - fi - if [[ -n "$close_comment" && -n "$close_comment_file" ]]; then - die "use either --comment or --comment-file, not both" - fi - if [[ -n "$close_comment_file" ]]; then - [[ -f "$close_comment_file" ]] || die "comment file not found: $close_comment_file" - close_comment="$(cat "$close_comment_file")" - fi - - approval_meta="$(validate_approval_comment_url "$approved_comment_url")" - IFS=$'\t' read -r approval_owner approval_repo _ _ approval_comment_id <<<"$approval_meta" - - if [[ -n "$repo_arg" ]]; then - if [[ "$repo_arg" != "${approval_owner}/${approval_repo}" ]]; then - die "approved comment URL repo (${approval_owner}/${approval_repo}) does not match --repo ${repo_arg}" - fi - fi - - if [[ "$dry_run" != "1" ]]; then - require_cmd gh - gh api "repos/${approval_owner}/${approval_repo}/issues/comments/${approval_comment_id}" >/dev/null - fi - - temp_body="" - if [[ -n "$issue_number" ]]; then - # Re-normalize the issue body before the final gate so main-agent closes against - # the latest corrected Task Decomposition shape (including legacy section cleanup). - run_issue_lifecycle sync --issue "$issue_number" >/dev/null - run_issue_lifecycle validate --issue "$issue_number" >/dev/null - temp_body="$(mktemp)" - issue_read_cmd "$issue_number" "$temp_body" - body_file="$temp_body" - else - [[ -f "$body_file" ]] || die "body file not found: $body_file" - run_issue_lifecycle validate --body-file "$body_file" >/dev/null - fi - - enforce_subagent_owner_policy "$body_file" "close-after-review" - - merged_rows="" - gate_errors=() - nl=$'\n' - pr_refs=() - pr_tasks=() - - while IFS=$'\t' read -r task _summary _owner _branch _worktree _execution_mode pr status _notes; do - task_id="$(trim_text "$task")" - pr_value="$(trim_text "$pr")" - status_value="$(to_lower "$status")" - - if [[ "$allow_not_done" != "1" && "$status_value" != "done" ]]; then - gate_errors+=("${task_id}: Status must be done before close (got: ${status})") - fi - if is_pr_placeholder "$pr_value"; then - gate_errors+=("${task_id}: PR must not be TBD before close") - continue - fi - - pr_ref="$(normalize_pr_ref "$pr_value")" - pr_index='-1' - for i in "${!pr_refs[@]}"; do - if [[ "${pr_refs[$i]}" == "$pr_ref" ]]; then - pr_index="$i" - break - fi - done - if [[ "$pr_index" == '-1' ]]; then - pr_refs+=("$pr_ref") - pr_tasks+=("$task_id") - else - pr_tasks[$pr_index]+=", ${task_id}" - fi - done < <(parse_issue_tasks_tsv "$body_file") - - for i in "${!pr_refs[@]}"; do - pr_ref="${pr_refs[$i]}" - task_list="${pr_tasks[$i]}" - - if [[ "$dry_run" == "1" && -z "$issue_number" ]]; then - merged_rows+="- ${pr_ref} (tasks: ${task_list}; merge check skipped in dry-run body-file mode)${nl}" - continue - fi - - set +e - pr_meta="$(fetch_pr_meta_tsv "$pr_ref" 2>&1)" - pr_meta_code=$? - set -e - if [[ "$pr_meta_code" -ne 0 ]]; then - pr_meta="${pr_meta//$'\n'/ }" - gate_errors+=("Tasks [${task_list}]: failed to query PR ${pr_ref}: ${pr_meta}") - continue - fi - - IFS=$'\t' read -r _pr_number pr_url pr_state _is_draft _review_decision _merge_state merged_at <<<"$pr_meta" - if [[ -z "$merged_at" ]]; then - gate_errors+=("Tasks [${task_list}]: PR is not merged (${pr_url:-$pr_ref}, state=${pr_state})") - else - merged_rows+="- ${pr_url:-$pr_ref} (tasks: ${task_list})${nl}" - fi - done - - if [[ -n "$temp_body" ]]; then - rm -f "$temp_body" - fi - - if [[ ${#gate_errors[@]} -gt 0 ]]; then - for err in "${gate_errors[@]+"${gate_errors[@]}"}"; do - echo "error: $err" >&2 - done - exit 1 - fi - - final_close_comment="$(compose_close_comment "$approved_comment_url" "$merged_rows" "$close_comment")" - printf '%s\n' "$final_close_comment" - - if [[ -n "$issue_number" ]]; then - run_issue_lifecycle close --issue "$issue_number" --reason "$close_reason" --comment "$final_close_comment" >/dev/null - if [[ "$dry_run" == "1" ]]; then - printf 'ISSUE_CLOSE_STATUS=DRY_RUN\n' - else - issue_state="$(fetch_issue_state "$issue_number")" - if [[ "$issue_state" != "CLOSED" ]]; then - die "close-after-review did not close issue #${issue_number} (state=${issue_state})" - fi - printf 'ISSUE_CLOSE_STATUS=SUCCESS\n' - printf 'ISSUE_NUMBER=%s\n' "$issue_number" - printf 'ISSUE_STATE=%s\n' "$issue_state" - printf 'DONE_CRITERIA=ISSUE_CLOSED\n' - fi - else - echo "DRY-RUN-CLOSE-SKIPPED" - printf 'ISSUE_CLOSE_STATUS=DRY_RUN\n' - fi - ;; - - -h|--help) - usage - ;; - - *) - die "unknown subcommand: $subcommand" - ;; -esac diff --git a/skills/automation/issue-delivery-loop/tests/test_automation_issue_delivery_loop.py b/skills/automation/issue-delivery-loop/tests/test_automation_issue_delivery_loop.py index 24d0613f..a54b538b 100644 --- a/skills/automation/issue-delivery-loop/tests/test_automation_issue_delivery_loop.py +++ b/skills/automation/issue-delivery-loop/tests/test_automation_issue_delivery_loop.py @@ -2,7 +2,7 @@ from pathlib import Path -from skills._shared.python.skill_testing import assert_entrypoints_exist, assert_skill_contract +from skills._shared.python.skill_testing import assert_skill_contract def test_automation_issue_delivery_loop_contract() -> None: @@ -10,16 +10,6 @@ def test_automation_issue_delivery_loop_contract() -> None: assert_skill_contract(skill_root) -def test_automation_issue_delivery_loop_entrypoints_exist() -> None: - skill_root = Path(__file__).resolve().parents[1] - assert_entrypoints_exist( - skill_root, - [ - "scripts/manage_issue_delivery_loop.sh", - ], - ) - - def test_issue_delivery_loop_skill_enforces_main_agent_role_boundary() -> None: skill_root = Path(__file__).resolve().parents[1] text = (skill_root / "SKILL.md").read_text(encoding="utf-8") @@ -31,55 +21,22 @@ def test_issue_delivery_loop_skill_enforces_main_agent_role_boundary() -> None: def test_issue_delivery_loop_skill_requires_close_for_done() -> None: skill_root = Path(__file__).resolve().parents[1] text = (skill_root / "SKILL.md").read_text(encoding="utf-8") - assert "Definition of done: execution is complete only when `close-after-review` succeeds and the target issue is actually closed." in text - assert "A successful run must terminate at `close-after-review` with issue state `CLOSED`." in text + assert "Definition of done: execution is complete only when `close-plan` succeeds and the target issue is actually closed." in text + assert "A successful run must terminate at `close-plan` with issue state `CLOSED`." in text assert "If close gates fail, treat the run as unfinished" in text -def test_issue_delivery_loop_script_enforces_subagent_owner_policy() -> None: - skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "manage_issue_delivery_loop.sh").read_text(encoding="utf-8") - assert "enforce_subagent_owner_policy" in text - assert "Owner must not be main-agent" in text - assert "Owner must include 'subagent'" in text - assert "canonical_pr_display" in text - assert "pr_refs=()" in text - assert "Tasks [" in text - assert "(tasks: " in text - - -def test_issue_delivery_loop_close_emits_done_markers() -> None: - skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "manage_issue_delivery_loop.sh").read_text(encoding="utf-8") - assert "ISSUE_CLOSE_STATUS=SUCCESS" in text - assert "DONE_CRITERIA=ISSUE_CLOSED" in text - assert "close-after-review did not close issue" in text - - -def test_issue_delivery_loop_review_request_omits_issue_line() -> None: - skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "manage_issue_delivery_loop.sh").read_text(encoding="utf-8") - assert "## Main-Agent Review Request" in text - assert 'output+="- Issue: ${issue_ref}${nl}"' not in text - - -def test_issue_delivery_loop_ready_for_review_updates_labels_before_comment() -> None: +def test_issue_delivery_loop_skill_uses_binary_first_command_contract() -> None: skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "manage_issue_delivery_loop.sh").read_text(encoding="utf-8") - - ready_for_review_body = text.split("ready-for-review)", maxsplit=1)[1].split("close-after-review)", maxsplit=1)[0] - comment_line = 'run_issue_lifecycle comment --issue "$issue_number" --body "$review_body" >/dev/null' - update_line = 'run_issue_lifecycle "${update_args[@]}" >/dev/null' - - assert update_line in ready_for_review_body - assert comment_line in ready_for_review_body - assert ready_for_review_body.index(update_line) < ready_for_review_body.index(comment_line) + text = (skill_root / "SKILL.md").read_text(encoding="utf-8") + assert "plan-issue" in text + assert "plan-issue-local" in text + assert "status-plan" in text + assert "ready-plan" in text + assert "close-plan" in text -def test_issue_delivery_loop_status_snapshot_omits_source_line() -> None: +def test_issue_delivery_loop_skill_excludes_deleted_wrapper_script() -> None: skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "manage_issue_delivery_loop.sh").read_text(encoding="utf-8") - assert "## Main-Agent Status Snapshot" in text - assert 'output+="- Source: ${source_label} ${issue_ref}${nl}"' not in text - assert "| Task | Summary | Planned Status | PR | PR State | Review | Suggested |" in text - assert "Merge State" not in text + text = (skill_root / "SKILL.md").read_text(encoding="utf-8") + assert ("manage_issue_delivery_loop" + ".sh") not in text diff --git a/skills/automation/plan-issue-delivery-loop/SKILL.md b/skills/automation/plan-issue-delivery-loop/SKILL.md index a9bfc541..6b9648a5 100644 --- a/skills/automation/plan-issue-delivery-loop/SKILL.md +++ b/skills/automation/plan-issue-delivery-loop/SKILL.md @@ -11,38 +11,44 @@ Prereqs: - Run inside (or have access to) the target git repository. - `plan-tooling` available on `PATH` for plan parsing/linting. -- `gh` available on `PATH`, and `gh auth status` succeeds. -- Base orchestration scripts exist: - - `$AGENT_HOME/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh` - - `$AGENT_HOME/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh` +- `plan-issue` available on `PATH` for live GitHub orchestration mode. +- `plan-issue-local` available on `PATH` for local rehearsal mode. +- `gh` available on `PATH`, and `gh auth status` succeeds only when using live mode (`plan-issue`). Inputs: - Plan file path (`docs/plans/...-plan.md`). -- Plan issue number (after `start-plan` creates the single issue). +- Plan issue number (`--issue <number>`) after `start-plan` creates the single issue. - Sprint number for sprint orchestration commands. - Approval comment URL (`https://github.com/<owner>/<repo>/(issues|pull)/<n>#issuecomment-<id>`) for: - sprint acceptance record comments - final plan issue close gate -- Optional repository override (`--repo <owner/repo>`). -- Local rehearsal mode (`--dry-run`) for full flow testing without GitHub API calls: - - `start-plan` emits a synthetic plan-issue token (`DRY_RUN_PLAN_ISSUE`). - - sprint commands default to no comment posting in dry-run. - - `ready-plan` should use `--body-file`. - - `close-plan --dry-run` requires `--body-file`. -- Required PR grouping controls (no defaults): - - `--pr-grouping per-sprint|group` (`per-spring` alias accepted) - - `--pr-group <task-or-plan-id>=<group>` (repeatable; required for `group`, and must cover every task in scope) +- Optional repository override (`--repo <owner/repo>`) in live mode. +- Typed subcommands: `start-plan`, `start-sprint`, `ready-sprint`, `accept-sprint`, `ready-plan`, `close-plan`. +- Typed local rehearsal behavior: + - `plan-issue-local` runs without GitHub API usage for local sprint orchestration rehearsal. + - `plan-issue --dry-run` provides live-binary rehearsal behavior without mutating GitHub. + - local rehearsal for sprint commands still requires `--issue <number>` input; use a local placeholder number (for example `999`) when no live issue exists. + - sprint commands default to no comment posting during dry-run/local rehearsal. + - `ready-plan` requires one of `--issue` or `--body-file`; dry-run/local rehearsal should use `--body-file <path>`. + - `close-plan` requires `--approved-comment-url`; dry-run/local rehearsal also requires `--body-file <path>`. +- Required PR grouping controls: + - Always pass `--pr-grouping per-sprint|group` (`per-spring` alias accepted) and `--strategy deterministic|auto` on split-dependent commands. + - Skill default: use `--pr-grouping group --strategy auto`. + - `--pr-group <task-or-plan-id>=<group>` is: + - optional for `group + auto` (supports partial pinning; remaining tasks are auto-assigned) + - required for `group + deterministic` (must cover every task in scope) + - unused for `per-sprint` Outputs: - Plan-scoped task-spec TSV generated from all plan tasks (all sprints) for one issue. - Sprint-scoped task-spec TSV generated per sprint for subagent dispatch hints, including `pr_group`. - Sprint-scoped rendered subagent prompt files + a prompt manifest (`task_id -> prompt_path -> execution_mode`) generated at `start-sprint`. -- Exactly one GitHub Issue for the whole plan (`1 plan = 1 issue`). -- Sprint progress tracked on that issue via comments + task decomposition rows/PR links. +- Live mode (`plan-issue`) creates/updates exactly one GitHub Issue for the whole plan (`1 plan = 1 issue`). +- Local rehearsal (`plan-issue-local` or `plan-issue --dry-run`) emits equivalent orchestration artifacts without GitHub mutations. - `start-sprint`/`ready-sprint`/`accept-sprint` sync sprint task rows (`Owner/Branch/Worktree/Execution Mode/Notes`) from the sprint task-spec. -- `accept-sprint` additionally enforces sprint PRs are merged and syncs sprint task `Status` to `done`. +- `accept-sprint` additionally enforces sprint PRs are merged and syncs sprint task `Status` to `done` in live mode. - `start-sprint` for sprint `N>1` is blocked until sprint `N-1` is merged and all its task rows are `done`. - PR references in sprint comments and review tables use canonical `#<number>` format. - Sprint start comments may still show `TBD` PR placeholders until subagents open PRs and rows are linked. @@ -51,8 +57,7 @@ Outputs: - Main-agent must launch subagents from rendered `TASK_PROMPT_PATH` artifacts (no ad-hoc dispatch prompt bypass). - Final issue close only after plan-level acceptance and merged-PR close gate. - `close-plan` enforces cleanup of all issue-assigned task worktrees before completion. -- `multi-sprint-guide --dry-run` emits a local-only command sequence that avoids GitHub calls. -- Definition of done: execution is complete only when `close-plan` succeeds, the plan issue is closed, and worktree cleanup passes. +- Definition of done: execution is complete only when `close-plan` succeeds, the plan issue is closed (live mode), and worktree cleanup passes. - Error contract: if any gate/command fails, stop forward progress and report the failing command plus key stderr/stdout gate errors. Exit codes: @@ -64,29 +69,51 @@ Exit codes: Failure modes: - Plan file missing, sprint missing, or selected sprint has zero tasks. -- Required commands missing (`plan-tooling`, `python3`; `gh` required for live GitHub mode). -- Approval URL invalid. -- Final plan close gate fails (task status/PR merge not satisfied). +- Required commands missing (`plan-tooling`, `plan-issue`, `plan-issue-local`; `gh` only required for live GitHub mode). +- Typed argument validation fails (unknown subcommand, invalid flag, malformed `--pr-group`, invalid `--pr-grouping`). +- Live mode approval URL invalid. +- Dry-run/local `ready-plan` invoked without `--issue` or `--body-file`. +- Dry-run/local `close-plan` invoked without required `--approved-comment-url` and `--body-file`. +- Final plan close gate fails (task status/PR merge not satisfied in live mode). - Worktree cleanup gate fails (any issue-assigned task worktree still exists after cleanup). - Attempted transition to a next sprint that does not exist. -## Scripts (only entrypoints) +## Binaries (only entrypoints) -- `$AGENT_HOME/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh` +- `plan-issue` (live GitHub orchestration) +- `plan-issue-local` (local rehearsal) ## Workflow -1. Plan issue bootstrap (one-time) - - `start-plan`: parse the full plan, generate one task decomposition covering all sprints, and open one plan issue. -2. Sprint execution loop (repeat on the same plan issue) - - `start-sprint`: generate sprint task TSV, render per-task subagent prompts, sync sprint task rows in issue body, post sprint-start comment, emit subagent dispatch hints (supports grouped PR dispatch). For sprint `N>1`, this command requires sprint `N-1` merged+done gate to pass first. - - `ready-sprint`: post sprint-ready comment to request main-agent review before merge. +1. Live mode (GitHub mutations): use `plan-issue`. +2. Local rehearsal mode (no GitHub mutations): use `plan-issue-local` (or `plan-issue --dry-run` when matching live CLI ergonomics is required). +3. Plan issue bootstrap (one-time): + - `start-plan`: parse the full plan, generate one task decomposition covering all sprints, and open one plan issue in live mode. +4. Sprint execution loop (repeat on the same plan issue): + - `start-sprint`: generate sprint task TSV, render per-task subagent prompts, sync sprint task rows in issue body, post sprint-start comment in live mode, and emit subagent dispatch hints (supports grouped PR dispatch). For sprint `N>1`, this command requires sprint `N-1` merged+done gate to pass first. + - `ready-sprint`: post sprint-ready comment in live mode to request main-agent review before merge. - After review approval, merge sprint PRs. - - `accept-sprint`: validate sprint PRs are merged, sync sprint task statuses to `done`, and record sprint acceptance comment on the same issue (issue stays open). + - `accept-sprint`: validate sprint PRs are merged, sync sprint task statuses to `done`, and record sprint acceptance comment on the same issue in live mode (issue stays open). - If another sprint exists, run `start-sprint` for the next sprint on the same issue. -3. Plan close (one-time) - - `ready-plan`: request final plan review using issue-delivery-loop review helper. - - `close-plan`: run the plan-level close gate, close the single plan issue, and enforce task worktree cleanup. +5. Plan close (one-time): + - `ready-plan`: request final plan review. For dry-run/local rehearsal, provide `--body-file`. + - `close-plan`: run the plan-level close gate, close the single plan issue in live mode, and enforce task worktree cleanup. For dry-run/local rehearsal, `--body-file` is required. + +## PR Grouping Steps (Mandatory) + +1. Choose grouping profile before any split command: + - Default/recommended: `group + auto` + - Deterministic/manual split: `group + deterministic` + - One shared PR per sprint: `per-sprint` +2. Keep the same grouping flags across the same sprint flow (`start-plan`, `start-sprint`, `ready-sprint`, `accept-sprint`) to avoid row/spec drift. +3. If using `group + auto`: + - You may omit `--pr-group` completely. + - You may pass partial `--pr-group` mappings to pin selected tasks; unmapped tasks remain auto-assigned. +4. If using `group + deterministic`: + - You must pass `--pr-group` and cover every task in scope. + - Missing mappings are a hard validation error; stop and fix before proceeding. +5. If using `per-sprint`: + - Do not pass `--pr-group`; all tasks in the sprint share one PR group anchor. ## Completion Policy (Mandatory) @@ -103,23 +130,52 @@ Failure modes: ## Full Skill Flow 1. Confirm the plan file exists and passes `plan-tooling validate`. -2. Run `start-plan` to open exactly one GitHub issue for the whole plan (`1 plan = 1 issue`). -3. Run `start-sprint` for Sprint 1 on that same issue: - - main-agent posts sprint kickoff comment - - main-agent chooses PR grouping (`per-sprint` or `group`) and emits dispatch hints +2. Choose execution mode: + - Live mode: `plan-issue ...` + - Local rehearsal: `plan-issue-local ...` (or `plan-issue --dry-run ...`) +3. Choose PR grouping profile (default `group + auto`): + - `group + auto`: no mapping requirement; optional partial pinning with `--pr-group`. + - `group + deterministic`: full `--pr-group` coverage required. + - `per-sprint`: one shared PR group per sprint. +4. Run `start-plan` to initialize plan orchestration (`1 plan = 1 issue` in live mode). +5. Run `start-sprint` for Sprint 1 on the same plan issue token/number: + - main-agent chooses PR grouping + strategy (`per-sprint`, `group + auto`, or `group + deterministic`) and emits dispatch hints - main-agent starts subagents using rendered `TASK_PROMPT_PATH` prompt artifacts from dispatch hints - subagents create worktrees/PRs and implement tasks -4. While sprint work is active, keep issue task rows + PR links traceable: +6. While sprint work is active, keep issue task rows + PR links traceable: - sprint row metadata is synced from task-spec by sprint commands - unresolved PRs remain `TBD` - linked PRs should be recorded as `#<number>` - - optionally run `status-plan` for snapshots -5. When sprint work is ready, run `ready-sprint` to record a sprint review/acceptance request comment. -6. Main-agent reviews sprint PR content, records approval, and merges the sprint PRs. -7. Run `accept-sprint` with the approval comment URL to enforce merged-PR gate and sync sprint task status rows to `done` (issue stays open). -8. If another sprint exists, run `start-sprint` for the next sprint on the same issue; this is blocked until prior sprint is merged+done. -9. After the final sprint is implemented and accepted, run `ready-plan` for the final plan-level review. -10. Run `close-plan` with the final approval comment URL to enforce merged-PR/task gates, close the single plan issue, and force cleanup of task worktrees. +7. When sprint work is ready, run `ready-sprint` to record a sprint review/acceptance request (live comment in live mode). +8. Main-agent reviews sprint PR content, records approval, and merges the sprint PRs. +9. Run `accept-sprint` with the approval comment URL in live mode to enforce merged-PR gate and sync sprint task status rows to `done` (issue stays open). +10. If another sprint exists, run `start-sprint` for the next sprint on the same issue; this is blocked until prior sprint is merged+done. +11. After the final sprint is implemented and accepted, run `ready-plan` for final review: + - live mode: `plan-issue ready-plan --issue <number> [--repo <owner/repo>]` + - dry-run/local rehearsal: `plan-issue ready-plan --dry-run --body-file <ready-plan-comment.md>` +12. Run `close-plan` with the final approval comment URL in live mode to enforce merged-PR/task gates, close the single plan issue, and force cleanup of task worktrees: + - live mode: `plan-issue close-plan --issue <number> --approved-comment-url <comment-url> [--repo <owner/repo>]` + - dry-run/local rehearsal: `plan-issue close-plan --dry-run --approved-comment-url <comment-url> --body-file <close-plan-comment.md>` + +## Command-Oriented Flow + +1. Live mode (`plan-issue`) + - Validate: `plan-tooling validate --file <plan.md>` + - Start plan: `plan-issue start-plan --plan <plan.md> --pr-grouping <per-sprint|group> --strategy <auto|deterministic> [--pr-group <task-id>=<group> ...] [--repo <owner/repo>]` + - Start sprint: `plan-issue start-sprint --plan <plan.md> --issue <number> --sprint <n> --pr-grouping <per-sprint|group> --strategy <auto|deterministic> [--pr-group <task-id>=<group> ...] [--repo <owner/repo>]` + - Ready sprint: `plan-issue ready-sprint --plan <plan.md> --issue <number> --sprint <n> --pr-grouping <per-sprint|group> --strategy <auto|deterministic> [--pr-group <task-id>=<group> ...] [--repo <owner/repo>]` + - Accept sprint: `plan-issue accept-sprint --plan <plan.md> --issue <number> --sprint <n> --pr-grouping <per-sprint|group> --strategy <auto|deterministic> --approved-comment-url <comment-url> [--pr-group <task-id>=<group> ...] [--repo <owner/repo>]` + - Ready plan: `plan-issue ready-plan --issue <number> [--repo <owner/repo>]` + - Close plan: `plan-issue close-plan --issue <number> --approved-comment-url <comment-url> [--repo <owner/repo>]` +2. Local rehearsal (`plan-issue-local`) + - Validate: `plan-tooling validate --file <plan.md>` + - Start plan: `plan-issue-local start-plan --plan <plan.md> --pr-grouping <per-sprint|group> --strategy <auto|deterministic> [--pr-group <task-id>=<group> ...]` + - Start sprint: `plan-issue-local start-sprint --plan <plan.md> --issue <local-placeholder-number> --sprint <n> --pr-grouping <per-sprint|group> --strategy <auto|deterministic> [--pr-group <task-id>=<group> ...]` + - Ready sprint: `plan-issue-local ready-sprint --plan <plan.md> --issue <local-placeholder-number> --sprint <n> --pr-grouping <per-sprint|group> --strategy <auto|deterministic> [--pr-group <task-id>=<group> ...]` + - Accept sprint: `plan-issue-local accept-sprint --plan <plan.md> --issue <local-placeholder-number> --sprint <n> --pr-grouping <per-sprint|group> --strategy <auto|deterministic> --approved-comment-url <comment-url> [--pr-group <task-id>=<group> ...]` +3. Plan-level local/offline rehearsal (`plan-issue --dry-run`) + - Ready plan: `plan-issue ready-plan --dry-run --body-file <ready-plan-comment.md>` + - Close plan: `plan-issue close-plan --dry-run --approved-comment-url <comment-url> --body-file <close-plan-comment.md>` ## Role boundary (mandatory) diff --git a/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh b/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh deleted file mode 100755 index b12a41f9..00000000 --- a/skills/automation/plan-issue-delivery-loop/scripts/plan-issue-delivery-loop.sh +++ /dev/null @@ -1,2911 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" -skill_dir="$(cd "${script_dir}/.." && pwd -P)" -repo_root_default="$(cd "${skill_dir}/../../.." && pwd -P)" -agent_home="${AGENT_HOME:-$repo_root_default}" - -issue_delivery_script="${repo_root_default}/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh" -if [[ ! -x "$issue_delivery_script" ]]; then - issue_delivery_script="${agent_home%/}/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh" -fi - -issue_lifecycle_script="${repo_root_default}/skills/workflows/issue/issue-lifecycle/scripts/manage_issue_lifecycle.sh" -if [[ ! -x "$issue_lifecycle_script" ]]; then - issue_lifecycle_script="${agent_home%/}/skills/workflows/issue/issue-lifecycle/scripts/manage_issue_lifecycle.sh" -fi - -issue_lifecycle_template="${repo_root_default}/skills/workflows/issue/issue-lifecycle/references/ISSUE_TEMPLATE.md" -if [[ ! -f "$issue_lifecycle_template" ]]; then - issue_lifecycle_template="${agent_home%/}/skills/workflows/issue/issue-lifecycle/references/ISSUE_TEMPLATE.md" -fi - -issue_subagent_script="${repo_root_default}/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh" -if [[ ! -x "$issue_subagent_script" ]]; then - issue_subagent_script="${agent_home%/}/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh" -fi - -die() { - echo "error: $*" >&2 - exit 1 -} - -usage_die() { - echo "error: $*" >&2 - exit 2 -} - -require_cmd() { - local cmd="${1:-}" - command -v "$cmd" >/dev/null 2>&1 || die "$cmd is required" -} - -is_positive_int() { - [[ "${1:-}" =~ ^[1-9][0-9]*$ ]] -} - -validate_pr_grouping_args() { - local mode="${1:-}" - [[ -n "$mode" ]] || usage_die "--pr-grouping is required (per-sprint|group)" - # Delegate grouping/mapping validation and error contract to plan-tooling split-prs. -} - -join_lines() { - local joined='' - local item='' - for item in "$@"; do - if [[ -n "$joined" ]]; then - joined+=$'\n' - fi - joined+="$item" - done - printf '%s' "$joined" -} - -print_cmd() { - local out='' - local arg='' - for arg in "$@"; do - out+=" $(printf '%q' "$arg")" - done - printf '%s\n' "${out# }" -} - -ensure_entrypoints() { - [[ -x "$issue_delivery_script" ]] || die "missing executable: $issue_delivery_script" - [[ -x "$issue_lifecycle_script" ]] || die "missing executable: $issue_lifecycle_script" - [[ -x "$issue_subagent_script" ]] || die "missing executable: $issue_subagent_script" - [[ -f "$issue_lifecycle_template" ]] || die "missing file: $issue_lifecycle_template" -} - -validate_plan() { - local plan_file="${1:-}" - [[ -n "$plan_file" ]] || die "plan file path is required" - [[ -f "$plan_file" ]] || die "plan file not found: $plan_file" - require_cmd plan-tooling - plan-tooling validate --file "$plan_file" >/dev/null -} - -validate_approval_comment_url_format() { - local url="${1:-}" - python3 - "$url" <<'PY' -import re -import sys - -url = sys.argv[1].strip() -pat = re.compile(r"^https://github\.com/[^/]+/[^/]+/(issues|pull)/\d+#issuecomment-\d+$") -if not pat.match(url): - raise SystemExit("error: invalid approval comment URL format") -print(url) -PY -} - -plan_summary_tsv() { - local plan_file="${1:-}" - python3 - "$plan_file" <<'PY' -import json -import pathlib -import subprocess -import sys - -plan = pathlib.Path(sys.argv[1]) -if not plan.is_file(): - raise SystemExit(f"error: plan file not found: {plan}") - -parsed = subprocess.run( - ["plan-tooling", "to-json", "--file", str(plan)], - check=True, - capture_output=True, - text=True, -) -data = json.loads(parsed.stdout) - -plan_title = (data.get("title") or plan.stem).strip() or plan.stem -total_tasks = 0 -max_sprint = 0 -for sprint in data.get("sprints", []): - num = int(sprint.get("number", 0)) - max_sprint = max(max_sprint, num) - total_tasks += len(sprint.get("tasks", [])) - -print(f"{plan_title}\t{max_sprint}\t{total_tasks}") -PY -} - -plan_sprint_meta_tsv() { - local plan_file="${1:-}" - local sprint="${2:-}" - python3 - "$plan_file" "$sprint" <<'PY' -import json -import pathlib -import subprocess -import sys - -plan = pathlib.Path(sys.argv[1]) -sprint_raw = sys.argv[2].strip() - -if not plan.is_file(): - raise SystemExit(f"error: plan file not found: {plan}") -if not sprint_raw.isdigit() or int(sprint_raw) <= 0: - raise SystemExit(f"error: sprint must be a positive integer (got: {sprint_raw})") - -sprint_num = int(sprint_raw) -parsed = subprocess.run( - ["plan-tooling", "to-json", "--file", str(plan)], - check=True, - capture_output=True, - text=True, -) -data = json.loads(parsed.stdout) - -max_sprint = 0 -target = None -for sprint in data.get("sprints", []): - number = int(sprint.get("number", 0)) - max_sprint = max(max_sprint, number) - if number == sprint_num: - target = sprint - -if target is None: - raise SystemExit(f"error: sprint {sprint_num} not found (max sprint: {max_sprint})") - -name = (target.get("name") or "").strip() or f"Sprint {sprint_num}" -task_count = len(target.get("tasks", [])) -print(f"{name}\t{task_count}\t{max_sprint}") -PY -} - -default_plan_task_spec_path() { - local plan_file="${1:-}" - local plan_base plan_stem - plan_base="$(basename "$plan_file")" - plan_stem="${plan_base%.md}" - printf '%s/out/plan-issue-delivery-loop/%s-plan-tasks.tsv\n' \ - "${agent_home%/}" \ - "$plan_stem" -} - -default_plan_issue_body_path() { - local plan_file="${1:-}" - local plan_base plan_stem - plan_base="$(basename "$plan_file")" - plan_stem="${plan_base%.md}" - printf '%s/out/plan-issue-delivery-loop/%s-plan-issue-body.md\n' \ - "${agent_home%/}" \ - "$plan_stem" -} - -default_sprint_task_spec_path() { - local plan_file="${1:-}" - local sprint="${2:-}" - local plan_base plan_stem - plan_base="$(basename "$plan_file")" - plan_stem="${plan_base%.md}" - printf '%s/out/plan-issue-delivery-loop/%s-sprint-%s-tasks.tsv\n' \ - "${agent_home%/}" \ - "$plan_stem" \ - "$sprint" -} - -default_sprint_prompt_dir() { - local plan_file="${1:-}" - local sprint="${2:-}" - local plan_base plan_stem - plan_base="$(basename "$plan_file")" - plan_stem="${plan_base%.md}" - printf '%s/out/plan-issue-delivery-loop/%s-sprint-%s-subagent-prompts\n' \ - "${agent_home%/}" \ - "$plan_stem" \ - "$sprint" -} - -default_dry_run_issue_number() { - printf 'DRY_RUN_PLAN_ISSUE\n' -} - -issue_read_body_cmd() { - local issue_number="${1:-}" - local out_file="${2:-}" - local repo_arg="${3:-}" - [[ -n "$issue_number" ]] || die "issue number is required" - [[ -n "$out_file" ]] || die "output file path is required" - - require_cmd gh - local cmd=(gh issue view "$issue_number") - if [[ -n "$repo_arg" ]]; then - cmd+=(-R "$repo_arg") - fi - cmd+=(--json body -q .body) - "${cmd[@]}" >"$out_file" -} - -cleanup_plan_issue_worktrees() { - local issue_number="${1:-}" - local repo_arg="${2:-}" - local dry_run="${3:-0}" - local body_file_override="${4:-}" - - require_cmd git - require_cmd python3 - - local body_file='' - local cleanup_body_file='0' - if [[ -n "$body_file_override" ]]; then - [[ -f "$body_file_override" ]] || die "body file not found: $body_file_override" - body_file="$body_file_override" - else - [[ -n "$issue_number" ]] || usage_die "--issue is required for worktree cleanup" - body_file="$(mktemp)" - cleanup_body_file='1' - issue_read_body_cmd "$issue_number" "$body_file" "$repo_arg" - fi - - set +e - python3 - "$body_file" "$dry_run" <<'PY' -import os -import pathlib -import subprocess -import sys - -body_file = pathlib.Path(sys.argv[1]) -dry_run = sys.argv[2].strip() == "1" - - -def is_placeholder(value: str) -> bool: - token = (value or "").strip().lower() - if token in {"", "-", "tbd", "none", "n/a", "na", "..."}: - return True - if token.startswith("<") and token.endswith(">"): - return True - if "task ids" in token: - return True - return False - - -def parse_row(line: str) -> list[str]: - s = line.strip() - if not (s.startswith("|") and s.endswith("|")): - return [] - return [cell.strip() for cell in s[1:-1].split("|")] - - -def section_bounds(lines: list[str], heading: str) -> tuple[int, int]: - start = None - for idx, line in enumerate(lines): - if line.strip() == heading: - start = idx + 1 - break - if start is None: - raise SystemExit(f"error: missing required heading: {heading}") - end = len(lines) - for idx in range(start, len(lines)): - if lines[idx].startswith("## "): - end = idx - break - return start, end - - -def run(cmd: list[str]) -> subprocess.CompletedProcess[str]: - return subprocess.run(cmd, check=True, capture_output=True, text=True) - - -def list_worktrees() -> list[tuple[str, str]]: - output = run(["git", "worktree", "list", "--porcelain"]).stdout.splitlines() - rows: list[tuple[str, str]] = [] - current_path = "" - current_branch = "" - for line in output + [""]: - if not line.strip(): - if current_path: - rows.append((current_path, current_branch)) - current_path = "" - current_branch = "" - continue - if line.startswith("worktree "): - current_path = line[len("worktree ") :].strip() - continue - if line.startswith("branch "): - ref = line[len("branch ") :].strip() - if ref.startswith("refs/heads/"): - ref = ref[len("refs/heads/") :] - current_branch = ref - return rows - - -text = body_file.read_text(encoding="utf-8") -lines = text.splitlines() -start, end = section_bounds(lines, "## Task Decomposition") -table_lines = [line for line in lines[start:end] if line.strip().startswith("|")] -if len(table_lines) < 3: - raise SystemExit("error: Task Decomposition must contain a markdown table with at least one task row") - -headers = parse_row(table_lines[0]) -required_columns = ["Task", "Branch", "Worktree"] -missing = [col for col in required_columns if col not in headers] -if missing: - raise SystemExit("error: missing Task Decomposition columns: " + ", ".join(missing)) - -records: list[tuple[str, str, str]] = [] -for raw in table_lines[2:]: - cells = parse_row(raw) - if not cells: - continue - if len(cells) != len(headers): - raise SystemExit("error: malformed Task Decomposition row") - row = {headers[idx]: cells[idx] for idx in range(len(headers))} - task = row.get("Task", "").strip() - if not task: - continue - records.append((task, row.get("Branch", "").strip(), row.get("Worktree", "").strip())) - -if not records: - raise SystemExit("error: Task Decomposition table must include at least one task row") - -repo_root = pathlib.Path(run(["git", "rev-parse", "--show-toplevel"]).stdout.strip()).resolve() -main_worktree = str(repo_root) -default_worktrees_root = (repo_root / ".." / ".worktrees" / repo_root.name / "issue").resolve() - -expected_branches: set[str] = set() -expected_worktree_names: set[str] = set() -expected_paths: set[str] = set() - -for _task, branch, worktree in records: - if not is_placeholder(branch): - expected_branches.add(branch) - if is_placeholder(worktree): - continue - token = worktree.strip() - expected_worktree_names.add(pathlib.Path(token).name) - token_path = pathlib.Path(token) - if token_path.is_absolute(): - expected_paths.add(str(token_path.resolve())) - else: - if "/" in token or token.startswith("."): - expected_paths.add(str((repo_root / token).resolve())) - expected_paths.add(str((default_worktrees_root / token).resolve())) - -if not expected_branches and not expected_worktree_names and not expected_paths: - print("WORKTREE_CLEANUP_STATUS=SKIP_NO_TARGETS") - raise SystemExit(0) - -targets: dict[str, list[str]] = {} -for path_raw, branch in list_worktrees(): - path = str(pathlib.Path(path_raw).resolve()) - if path == main_worktree: - continue - reasons: list[str] = [] - if branch and branch in expected_branches: - reasons.append(f"branch:{branch}") - if path in expected_paths: - reasons.append("path") - if pathlib.Path(path).name in expected_worktree_names: - reasons.append(f"name:{pathlib.Path(path).name}") - if reasons: - targets[path] = sorted(set(reasons)) - -errors: list[str] = [] -removed = 0 - -for path in sorted(targets): - reason_text = ",".join(targets[path]) - if dry_run: - print(f"DRY_RUN_WORKTREE_REMOVE={path} ({reason_text})") - continue - proc = subprocess.run(["git", "worktree", "remove", "--force", path], capture_output=True, text=True) - if proc.returncode != 0: - message = (proc.stderr or proc.stdout or "").strip() or f"exit {proc.returncode}" - errors.append(f"{path}: {message}") - else: - removed += 1 - print(f"WORKTREE_REMOVED={path} ({reason_text})") - -if dry_run: - print(f"WORKTREE_CLEANUP_DRY_RUN_TARGETS={len(targets)}") - raise SystemExit(0) - -prune_proc = subprocess.run(["git", "worktree", "prune"], capture_output=True, text=True) -if prune_proc.returncode != 0: - message = (prune_proc.stderr or prune_proc.stdout or "").strip() or f"exit {prune_proc.returncode}" - errors.append(f"git worktree prune failed: {message}") - -remaining: list[str] = [] -for path_raw, branch in list_worktrees(): - path = str(pathlib.Path(path_raw).resolve()) - if path == main_worktree: - continue - reasons: list[str] = [] - if branch and branch in expected_branches: - reasons.append(f"branch:{branch}") - if path in expected_paths: - reasons.append("path") - if pathlib.Path(path).name in expected_worktree_names: - reasons.append(f"name:{pathlib.Path(path).name}") - if reasons: - remaining.append(f"{path} ({','.join(sorted(set(reasons)))})") - -lingering_paths = [] -for path in sorted(expected_paths): - if path != main_worktree and os.path.exists(path): - lingering_paths.append(path) - -for message in errors: - print(f"error: worktree cleanup remove failed: {message}", file=sys.stderr) -for message in remaining: - print(f"error: worktree cleanup residual git worktree: {message}", file=sys.stderr) -for path in lingering_paths: - print(f"error: worktree cleanup residual path exists: {path}", file=sys.stderr) - -if errors or remaining or lingering_paths: - raise SystemExit(1) - -print(f"WORKTREE_CLEANUP_REMOVED={removed}") -print("WORKTREE_CLEANUP_STATUS=PASS") -PY - local cleanup_rc=$? - set -e - - if [[ "$cleanup_body_file" == '1' ]]; then - rm -f "$body_file" - fi - return "$cleanup_rc" -} - -render_plan_issue_body_from_task_spec() { - local template_file="${1:-}" - local plan_file="${2:-}" - local plan_title="${3:-}" - local task_spec_file="${4:-}" - local out_file="${5:-}" - - python3 - "$template_file" "$plan_file" "$plan_title" "$task_spec_file" "$out_file" <<'PY' -import csv -import pathlib -import sys - -template_path = pathlib.Path(sys.argv[1]) -plan_file = sys.argv[2].strip() -plan_title = sys.argv[3].strip() or pathlib.Path(plan_file).stem -task_spec_path = pathlib.Path(sys.argv[4]) -out_path = pathlib.Path(sys.argv[5]) - -if not template_path.is_file(): - raise SystemExit(f"error: template file not found: {template_path}") -if not task_spec_path.is_file(): - raise SystemExit(f"error: task spec file not found: {task_spec_path}") - -rows = [] -with task_spec_path.open("r", encoding="utf-8") as handle: - reader = csv.reader(handle, delimiter="\t") - for raw in reader: - if not raw: - continue - if raw[0].strip().startswith("#"): - continue - if len(raw) < 6: - raise SystemExit("error: malformed task spec row") - task_id = raw[0].strip() - summary = raw[1].strip() - branch = raw[2].strip() - worktree = raw[3].strip() - owner = raw[4].strip() - notes = raw[5].strip() if len(raw) >= 6 else "" - rows.append((task_id, summary, owner, branch, worktree, notes)) - -if not rows: - raise SystemExit("error: task spec contains no rows") - -text = template_path.read_text(encoding="utf-8") -lines = text.splitlines() - -if lines and lines[0].startswith("# "): - lines[0] = f"# {plan_title}" - - -def replace_section(heading: str, body_lines: list[str]) -> None: - start = None - for idx, line in enumerate(lines): - if line.strip() == heading: - start = idx - break - if start is None: - raise SystemExit(f"error: missing heading in template: {heading}") - end = len(lines) - for idx in range(start + 1, len(lines)): - if lines[idx].startswith("## "): - end = idx - break - new_block = [lines[start], ""] + body_lines + [""] - lines[:] = lines[:start] + new_block + lines[end:] - - -goal_lines = [ - f"- Execute plan `{plan_file}` end-to-end using one GitHub issue and subagent-owned PRs.", - f"- Track sprint progress via issue comments while keeping task/PR state in the issue body.", -] -acceptance_lines = [ - "- All in-scope plan tasks are implemented via subagent PRs and linked in the issue task table.", - "- Final plan review approval comment URL is recorded.", - "- The single plan issue closes after close-gate checks pass.", -] -scope_lines = [ - f"- In-scope: tasks defined in `{plan_file}`", - "- Out-of-scope: work not represented in the plan task list", -] -risk_lines = [ - "- Sprint approvals may be recorded before final close; issue stays open until final plan acceptance.", - "- Close gate fails if task statuses or PR merge states in the issue body are incomplete.", -] -evidence_lines = [ - f"- Plan source: `{plan_file}`", - "- Sprint approvals: issue comments (one comment per accepted sprint)", - "- Final approval: issue/pull comment URL passed to `close-plan`", -] - -task_table_lines = [ - "| Task | Summary | Owner | Branch | Worktree | Execution Mode | PR | Status | Notes |", - "| --- | --- | --- | --- | --- | --- | --- | --- | --- |", -] -for task_id, summary, owner, branch, worktree, notes in rows: - note_val = notes if notes else "-" - task_table_lines.append( - f"| {task_id} | {summary} | TBD | TBD | TBD | TBD | TBD | planned | {note_val} |" - ) - -replace_section("## Goal", goal_lines) -replace_section("## Acceptance Criteria", acceptance_lines) -replace_section("## Scope", scope_lines) -replace_section("## Task Decomposition", task_table_lines) -replace_section("## Risks / Uncertainties", risk_lines) -replace_section("## Evidence", evidence_lines) - -out_path.parent.mkdir(parents=True, exist_ok=True) -out_path.write_text("\n".join(lines) + "\n", encoding="utf-8") -print(out_path) -PY -} - -render_task_spec_from_plan_scope() { - local plan_file="${1:-}" - local scope_kind="${2:-}" # plan | sprint - local scope_value="${3:-}" # ignored for plan - local task_spec_out="${4:-}" - local owner_prefix="${5:-subagent}" - local branch_prefix="${6:-issue}" - local worktree_prefix="${7:-issue__}" - local pr_grouping="${8:-}" - local pr_group_entries="${9:-}" - [[ -n "$plan_file" ]] || die "plan file path is required" - [[ -f "$plan_file" ]] || die "plan file not found: $plan_file" - [[ "$scope_kind" == "plan" || "$scope_kind" == "sprint" ]] || die "unsupported scope scope: $scope_kind" - if [[ "$scope_kind" == "sprint" ]]; then - is_positive_int "$scope_value" || die "sprint must be a positive integer (got: ${scope_value:-<empty>})" - fi - [[ -n "$pr_grouping" ]] || die "pr-grouping is required" - [[ -n "$task_spec_out" ]] || die "task-spec output path is required" - - local cmd=( - plan-tooling split-prs - --file "$plan_file" - --scope "$scope_kind" - --pr-grouping "$pr_grouping" - --strategy deterministic - --owner-prefix "$owner_prefix" - --branch-prefix "$branch_prefix" - --worktree-prefix "$worktree_prefix" - --format tsv - ) - if [[ "$scope_kind" == "sprint" ]]; then - cmd+=(--sprint "$scope_value") - fi - - while IFS= read -r entry; do - [[ -n "$entry" ]] || continue - cmd+=(--pr-group "$entry") - done <<<"$pr_group_entries" - - mkdir -p "$(dirname "$task_spec_out")" - "${cmd[@]}" > "$task_spec_out" - printf '%s\n' "$task_spec_out" -} - -run_issue_delivery() { - local dry_run="${1:-0}" - local repo_arg="${2:-}" - shift 2 - - local cmd=("$issue_delivery_script" "$@") - if [[ -n "$repo_arg" ]]; then - cmd+=(--repo "$repo_arg") - fi - if [[ "$dry_run" == "1" ]]; then - cmd+=(--dry-run) - fi - "${cmd[@]}" -} - -run_issue_lifecycle() { - local dry_run="${1:-0}" - local repo_arg="${2:-}" - shift 2 - - local cmd=("$issue_lifecycle_script" "$@") - if [[ -n "$repo_arg" ]]; then - cmd+=(--repo "$repo_arg") - fi - if [[ "$dry_run" == "1" ]]; then - cmd+=(--dry-run) - fi - "${cmd[@]}" -} - -read_optional_text() { - local inline_text="${1:-}" - local file_path="${2:-}" - if [[ -n "$inline_text" && -n "$file_path" ]]; then - die "use either inline text or file path, not both" - fi - if [[ -n "$file_path" ]]; then - [[ -f "$file_path" ]] || die "file not found: $file_path" - cat "$file_path" - return 0 - fi - printf '%s' "$inline_text" -} - -emit_dispatch_hints() { - local task_spec_file="${1:-}" - local issue_number="${2:-}" - local issue_subagent_entrypoint="${3:-}" - local prompt_manifest_file="${4:-}" - - python3 - "$task_spec_file" "$issue_number" "$issue_subagent_entrypoint" "$prompt_manifest_file" <<'PY' -import csv -import pathlib -import shlex -import sys - -spec_path = pathlib.Path(sys.argv[1]) -issue_number = sys.argv[2].strip() -subagent_entrypoint = sys.argv[3].strip() -manifest_path = pathlib.Path(sys.argv[4]) - -if not spec_path.is_file(): - raise SystemExit(f"error: task spec file not found: {spec_path}") -if not manifest_path.is_file(): - raise SystemExit(f"error: subagent prompt manifest not found: {manifest_path}") - -prompt_map: dict[str, dict[str, str]] = {} -with manifest_path.open("r", encoding="utf-8") as handle: - reader = csv.reader(handle, delimiter="\t") - for raw in reader: - if not raw: - continue - if raw[0].strip().startswith("#"): - continue - if len(raw) < 6: - raise SystemExit("error: malformed prompt manifest row") - task_id = raw[0].strip() - prompt_path = raw[1].strip() if len(raw) >= 2 else "" - execution_mode = raw[2].strip() if len(raw) >= 3 else "" - owner = raw[3].strip() if len(raw) >= 4 else "" - pr_group = raw[4].strip() if len(raw) >= 5 else "" - branch = raw[5].strip() if len(raw) >= 6 else "" - if not task_id: - continue - if not prompt_path: - raise SystemExit(f"error: prompt manifest missing prompt path for task {task_id}") - prompt_file = pathlib.Path(prompt_path) - if not prompt_file.is_file(): - raise SystemExit(f"error: rendered prompt file missing for task {task_id}: {prompt_file}") - prompt_map[task_id] = { - "prompt_path": prompt_path, - "execution_mode": execution_mode, - "owner": owner, - "pr_group": pr_group, - "branch": branch, - } - -rows = [] -with spec_path.open("r", encoding="utf-8") as handle: - reader = csv.reader(handle, delimiter="\t") - for raw in reader: - if not raw: - continue - if raw[0].strip().startswith("#"): - continue - if len(raw) < 5: - raise SystemExit("error: malformed task spec row") - task_id = raw[0].strip() - summary = raw[1].strip() if len(raw) >= 2 else "" - branch = raw[2].strip() if len(raw) >= 3 else "" - worktree = raw[3].strip() if len(raw) >= 4 else "" - owner = raw[4].strip() if len(raw) >= 5 else "subagent" - pr_group = raw[6].strip() if len(raw) >= 7 else task_id - rows.append( - { - "task_id": task_id, - "summary": summary, - "branch": branch, - "worktree": worktree, - "owner": owner, - "pr_group": pr_group or task_id, - } - ) - -groups = {} -group_order = [] -for row in rows: - key = row["pr_group"] or row["task_id"] - if key not in groups: - groups[key] = [] - group_order.append(key) - groups[key].append(row) - -print("DISPATCH_HINTS_BEGIN") -print("SUBAGENT_PROMPT_POLICY=MANDATORY_RENDERED_PROMPT") -print("START_SUBAGENT_RULE=USE_TASK_PROMPT_PATH_AS_INIT_PROMPT") -for group_key in group_order: - group_rows = groups[group_key] - leader = group_rows[0] - summary = leader["summary"] or leader["task_id"] - if len(group_rows) > 1: - summary = f"{summary} (+{len(group_rows) - 1} tasks)" - pr_title = f"feat: {summary}" - open_cmd = ( - f"{shlex.quote(subagent_entrypoint)} open-pr " - f"--issue {shlex.quote(issue_number or '<issue-number>')} " - f"--title {shlex.quote(pr_title)} " - f"--head {shlex.quote(leader['branch'])} --use-template" - ) - task_list = ",".join(row["task_id"] for row in group_rows) - print(f"PR_GROUP={group_key} HEAD={leader['branch']} TASK_COUNT={len(group_rows)} TASKS={task_list}") - for idx, row in enumerate(group_rows): - task_id = row["task_id"] - if task_id not in prompt_map: - raise SystemExit(f"error: missing rendered prompt in manifest for task {task_id}") - prompt_row = prompt_map[task_id] - execution_mode = prompt_row.get("execution_mode", "").strip() or "unknown" - prompt_path = prompt_row.get("prompt_path", "").strip() - if not prompt_path: - raise SystemExit(f"error: prompt manifest missing prompt path for task {task_id}") - print(f"TASK={task_id} OWNER={row['owner']} PR_GROUP={group_key} EXECUTION_MODE={execution_mode}") - print(f"TASK_PROMPT_PATH={prompt_path}") - print("START_SUBAGENT_INPUT=TASK_PROMPT_PATH") - if idx == 0: - print(f"OPEN_PR_CMD={open_cmd}") - else: - print("OPEN_PR_CMD=SHARED_WITH_GROUP") -print("DISPATCH_HINTS_END") -PY -} - -render_subagent_task_prompts() { - local task_spec_file="${1:-}" - local issue_number="${2:-}" - local issue_subagent_entrypoint="${3:-}" - local prompts_out_dir="${4:-}" - local repo_arg="${5:-}" - local prompt_manifest_out="${6:-}" - - [[ -f "$task_spec_file" ]] || die "task spec file not found: $task_spec_file" - [[ -n "$issue_number" ]] || die "issue number is required for subagent prompt rendering" - [[ -n "$issue_subagent_entrypoint" ]] || die "issue-subagent entrypoint is required" - [[ -x "$issue_subagent_entrypoint" ]] || die "missing executable: $issue_subagent_entrypoint" - [[ -n "$prompts_out_dir" ]] || die "subagent prompts output directory is required" - [[ -n "$prompt_manifest_out" ]] || die "subagent prompt manifest output path is required" - - python3 - "$task_spec_file" "$issue_number" "$issue_subagent_entrypoint" "$prompts_out_dir" "$repo_arg" "$prompt_manifest_out" <<'PY' -import csv -import pathlib -import re -import subprocess -import sys - -task_spec_path = pathlib.Path(sys.argv[1]) -issue_number = sys.argv[2].strip() -subagent_entrypoint = sys.argv[3].strip() -prompts_out_dir = pathlib.Path(sys.argv[4]) -repo_arg = sys.argv[5].strip() -manifest_out = pathlib.Path(sys.argv[6]) - -if not task_spec_path.is_file(): - raise SystemExit(f"error: task spec file not found: {task_spec_path}") -if not issue_number: - raise SystemExit("error: issue number is required for subagent prompt rendering") -if not subagent_entrypoint: - raise SystemExit("error: issue-subagent entrypoint is required") -if not pathlib.Path(subagent_entrypoint).is_file(): - raise SystemExit(f"error: issue-subagent entrypoint not found: {subagent_entrypoint}") - -prompts_out_dir.mkdir(parents=True, exist_ok=True) -manifest_out.parent.mkdir(parents=True, exist_ok=True) - - -def extract_note_value(notes: str, key: str) -> str: - prefix = f"{key}=" - prefix_lower = prefix.lower() - for part in (notes or "").split(";"): - token = part.strip() - if token.lower().startswith(prefix_lower): - return token[len(prefix) :].strip() - return "" - - -def safe_name(value: str) -> str: - token = re.sub(r"[^A-Za-z0-9._-]+", "-", value or "").strip("-") - return token or "task" - - -rows: list[dict[str, str]] = [] -with task_spec_path.open("r", encoding="utf-8") as handle: - reader = csv.reader(handle, delimiter="\t") - for raw in reader: - if not raw: - continue - if raw[0].strip().startswith("#"): - continue - if len(raw) < 5: - raise SystemExit("error: malformed task spec row") - task_id = raw[0].strip() - if not task_id: - continue - summary = raw[1].strip() if len(raw) >= 2 else "" - branch = raw[2].strip() if len(raw) >= 3 else "" - worktree = raw[3].strip() if len(raw) >= 4 else "" - owner = raw[4].strip() if len(raw) >= 5 else "subagent" - notes = raw[5].strip() if len(raw) >= 6 else "" - pr_group = raw[6].strip() if len(raw) >= 7 else task_id - grouping_mode = extract_note_value(notes, "pr-grouping").lower() - if grouping_mode not in {"per-sprint", "group"}: - raise SystemExit( - f"error: unsupported pr-grouping in task spec for {task_id}: {grouping_mode or '<empty>'}" - ) - if not branch: - raise SystemExit(f"error: task spec missing branch for {task_id}") - if not worktree: - raise SystemExit(f"error: task spec missing worktree for {task_id}") - rows.append( - { - "task_id": task_id, - "summary": summary, - "branch": branch, - "worktree": worktree, - "owner": owner or "subagent", - "notes": notes, - "pr_group": pr_group or task_id, - "grouping_mode": grouping_mode, - } - ) - -if not rows: - raise SystemExit("error: no sprint tasks found for subagent prompt rendering") - -groups: dict[str, list[dict[str, str]]] = {} -group_order: list[str] = [] -for row in rows: - key = row["pr_group"] or row["task_id"] - if key not in groups: - groups[key] = [] - group_order.append(key) - groups[key].append(row) - -manifest_rows: list[tuple[str, str, str, str, str, str]] = [] -for group_key in group_order: - group_rows = groups[group_key] - leader = group_rows[0] - summary = leader["summary"] or leader["task_id"] - if len(group_rows) > 1: - summary = f"{summary} (+{len(group_rows) - 1} tasks)" - pr_title = f"feat: {summary}" - - for row in group_rows: - task_id = row["task_id"] - if row["grouping_mode"] == "per-sprint": - execution_mode = "per-sprint" - elif len(group_rows) > 1: - execution_mode = "pr-shared" - else: - execution_mode = "pr-isolated" - - prompt_path = prompts_out_dir / f"{safe_name(task_id)}-subagent-prompt.md" - cmd = [ - subagent_entrypoint, - "render-task-prompt", - "--issue", - issue_number, - "--task-id", - task_id, - "--summary", - row["summary"] or task_id, - "--owner", - row["owner"], - "--branch", - row["branch"], - "--worktree", - row["worktree"], - "--execution-mode", - execution_mode, - "--pr-title", - pr_title, - "--output", - str(prompt_path), - ] - if row["notes"]: - cmd.extend(["--notes", row["notes"]]) - if repo_arg: - cmd.extend(["--repo", repo_arg]) - - try: - subprocess.run(cmd, check=True, capture_output=True, text=True) - except subprocess.CalledProcessError as exc: - details = (exc.stderr or "").strip() or (exc.stdout or "").strip() - raise SystemExit( - f"error: failed to render subagent prompt for {task_id}: {details or exc}" - ) from exc - - if not prompt_path.is_file() or prompt_path.stat().st_size == 0: - raise SystemExit(f"error: rendered prompt file missing or empty for {task_id}: {prompt_path}") - - manifest_rows.append( - ( - task_id, - str(prompt_path), - execution_mode, - row["owner"], - group_key, - row["branch"], - ) - ) - -with manifest_out.open("w", encoding="utf-8") as handle: - handle.write("# task_id\tprompt_path\texecution_mode\towner\tpr_group\tbranch\n") - for row in manifest_rows: - handle.write("\t".join(row) + "\n") - -print(manifest_out) -PY -} - -sync_issue_sprint_task_rows() { - local issue_number="${1:-}" - local task_spec_file="${2:-}" - local repo_arg="${3:-}" - local dry_run="${4:-0}" - - [[ -n "$issue_number" ]] || die "issue number is required for sprint task sync" - [[ -f "$task_spec_file" ]] || die "task spec file not found: $task_spec_file" - if [[ "$dry_run" == '1' ]]; then - return 0 - fi - - local issue_body_file='' - issue_body_file="$(mktemp)" - issue_read_body_cmd "$issue_number" "$issue_body_file" "$repo_arg" - - local synced_body_file='' - synced_body_file="$(mktemp)" - - python3 - "$issue_body_file" "$task_spec_file" "$synced_body_file" <<'PY' -import csv -import pathlib -import re -import sys - -body_path = pathlib.Path(sys.argv[1]) -task_spec_path = pathlib.Path(sys.argv[2]) -output_path = pathlib.Path(sys.argv[3]) - -if not body_path.is_file(): - raise SystemExit(f"error: issue body file not found: {body_path}") -if not task_spec_path.is_file(): - raise SystemExit(f"error: task spec file not found: {task_spec_path}") - -lines = body_path.read_text(encoding="utf-8").splitlines() - - -def section_bounds(heading: str) -> tuple[int, int]: - start = None - for idx, line in enumerate(lines): - if line.strip() == heading: - start = idx + 1 - break - if start is None: - raise SystemExit(f"error: missing required heading: {heading}") - end = len(lines) - for idx in range(start, len(lines)): - if lines[idx].startswith("## "): - end = idx - break - return start, end - - -def parse_row(line: str) -> list[str]: - s = line.strip() - if not (s.startswith("|") and s.endswith("|")): - return [] - return [cell.strip() for cell in s[1:-1].split("|")] - - -def is_placeholder(value: str) -> bool: - token = (value or "").strip().lower().strip("`") - if token in {"", "-", "tbd", "none", "n/a", "na", "..."}: - return True - if token.startswith("tbd"): - return True - if token.startswith("<") and token.endswith(">"): - return True - if "task ids" in token: - return True - return False - - -def normalize_pr_display(value: str) -> str: - token = (value or "").strip() - if is_placeholder(token): - return "TBD" - if m := re.fullmatch(r"PR#(\d+)", token, flags=re.IGNORECASE): - return f"#{m.group(1)}" - if m := re.fullmatch(r"#(\d+)", token): - return f"#{m.group(1)}" - if m := re.fullmatch(r"[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+#(\d+)", token): - return f"#{m.group(1)}" - if m := re.fullmatch( - r"https://github\.com/[^/\s]+/[^/\s]+/pull/(\d+)(?:[/?#].*)?", - token, - flags=re.IGNORECASE, - ): - return f"#{m.group(1)}" - return token - - -def extract_note_value(notes: str, key: str) -> str: - prefix = f"{key}=" - for part in notes.split(";"): - token = part.strip() - if token.lower().startswith(prefix): - return token[len(prefix) :].strip() - return "" - - -spec_rows: dict[str, dict[str, str]] = {} -group_sizes: dict[str, int] = {} -group_anchor: dict[str, dict[str, str]] = {} -with task_spec_path.open("r", encoding="utf-8") as handle: - reader = csv.reader(handle, delimiter="\t") - for raw in reader: - if not raw: - continue - if raw[0].strip().startswith("#"): - continue - if len(raw) < 7: - raise SystemExit("error: malformed task spec row") - task_id = raw[0].strip() - summary = raw[1].strip() - branch = raw[2].strip() - worktree = raw[3].strip() - owner = raw[4].strip() - notes = raw[5].strip() - pr_group = raw[6].strip() or task_id - grouping_mode = extract_note_value(notes, "pr-grouping").lower() - if not task_id: - continue - if grouping_mode not in {"per-sprint", "group"}: - raise SystemExit( - f"error: unsupported pr-grouping in task spec for {task_id}: {grouping_mode or '<empty>'}" - ) - spec_rows[task_id] = { - "summary": summary, - "branch": branch, - "worktree": worktree, - "owner": owner, - "notes": notes, - "pr_group": pr_group, - "grouping_mode": grouping_mode, - } - group_sizes[pr_group] = group_sizes.get(pr_group, 0) + 1 - if pr_group not in group_anchor: - group_anchor[pr_group] = { - "branch": branch, - "worktree": worktree, - "owner": owner, - } - -if not spec_rows: - raise SystemExit("error: sprint task spec has no rows") - -start, end = section_bounds("## Task Decomposition") -table_rows = [idx for idx in range(start, end) if lines[idx].strip().startswith("|")] -if len(table_rows) < 3: - raise SystemExit("error: Task Decomposition must contain a markdown table with at least one task row") - -header_line_index = table_rows[0] -headers = parse_row(lines[header_line_index]) -required_columns = ["Task", "Owner", "Branch", "Worktree", "Execution Mode", "PR", "Status", "Notes"] -missing = [name for name in required_columns if name not in headers] -if missing: - raise SystemExit("error: missing Task Decomposition columns: " + ", ".join(missing)) -header_index = {name: idx for idx, name in enumerate(headers)} - -for idx in table_rows[2:]: - cells = parse_row(lines[idx]) - if not cells or len(cells) != len(headers): - continue - row_changed = False - existing_pr = cells[header_index["PR"]] - normalized_pr = normalize_pr_display(existing_pr) - if existing_pr.strip() != normalized_pr: - cells[header_index["PR"]] = normalized_pr - row_changed = True - - task_id = cells[header_index["Task"]].strip() - spec = spec_rows.get(task_id) - if spec is not None: - pr_group = spec["pr_group"] - grouping_mode = spec.get("grouping_mode", "") - if grouping_mode == "per-sprint": - mode = "per-sprint" - execution_source = group_anchor.get(pr_group, spec) - elif grouping_mode == "group": - mode = "pr-shared" if group_sizes.get(pr_group, 0) > 1 else "pr-isolated" - execution_source = group_anchor.get(pr_group, spec) - else: - raise SystemExit( - f"error: unsupported pr-grouping in task spec for {task_id}: {grouping_mode or '<empty>'}" - ) - - cells[header_index["Owner"]] = execution_source["owner"] or cells[header_index["Owner"]] - cells[header_index["Branch"]] = f"`{execution_source['branch']}`" - cells[header_index["Worktree"]] = f"`{execution_source['worktree']}`" - cells[header_index["Execution Mode"]] = mode - if spec["notes"]: - cells[header_index["Notes"]] = spec["notes"] - row_changed = True - - if row_changed: - lines[idx] = "| " + " | ".join(cells) + " |" - -output_path.write_text("\n".join(lines) + "\n", encoding="utf-8") -print(output_path) -PY - - run_issue_lifecycle "$dry_run" "$repo_arg" update --issue "$issue_number" --body-file "$synced_body_file" >/dev/null - rm -f "$issue_body_file" "$synced_body_file" -} - -enforce_sprint_merge_gate() { - local issue_number="${1:-}" - local sprint="${2:-}" - local repo_arg="${3:-}" - local dry_run="${4:-0}" - local mark_done="${5:-0}" # 0=check only, 1=check and mark sprint task rows done - - [[ -n "$issue_number" ]] || die "issue number is required for sprint merge gate" - [[ -n "$sprint" ]] || die "sprint number is required for sprint merge gate" - is_positive_int "$sprint" || die "sprint must be a positive integer for sprint merge gate" - - if [[ "$dry_run" == '1' ]]; then - printf 'SPRINT_MERGE_GATE=SKIP_DRY_RUN\n' - printf 'SPRINT=%s\n' "$sprint" - printf 'SPRINT_STATUS_MARK_DONE=%s\n' "$mark_done" - return 0 - fi - - require_cmd gh - require_cmd python3 - - local issue_body_file='' - issue_body_file="$(mktemp)" - issue_read_body_cmd "$issue_number" "$issue_body_file" "$repo_arg" - - local updated_body_file='' - updated_body_file="$(mktemp)" - - set +e - local gate_output='' - gate_output="$(python3 - "$issue_body_file" "$sprint" "$repo_arg" "$mark_done" "$updated_body_file" <<'PY' -import json -import pathlib -import re -import subprocess -import sys - -body_path = pathlib.Path(sys.argv[1]) -sprint = sys.argv[2].strip() -repo_arg = sys.argv[3].strip() -mark_done = sys.argv[4].strip() == "1" -out_path = pathlib.Path(sys.argv[5]) - -if not body_path.is_file(): - raise SystemExit(f"error: issue body file not found: {body_path}") -if not sprint.isdigit() or int(sprint) <= 0: - raise SystemExit(f"error: invalid sprint number: {sprint}") - -lines = body_path.read_text(encoding="utf-8").splitlines() - - -def parse_row(line: str) -> list[str]: - s = line.strip() - if not (s.startswith("|") and s.endswith("|")): - return [] - return [cell.strip() for cell in s[1:-1].split("|")] - - -def section_bounds(heading: str) -> tuple[int, int]: - start = None - for idx, line in enumerate(lines): - if line.strip() == heading: - start = idx + 1 - break - if start is None: - raise SystemExit(f"error: missing required heading: {heading}") - end = len(lines) - for idx in range(start, len(lines)): - if lines[idx].startswith("## "): - end = idx - break - return start, end - - -def extract_note_value(notes: str, key: str) -> str: - prefix = f"{key}=" - for part in notes.split(";"): - token = part.strip() - if token.lower().startswith(prefix): - return token[len(prefix) :].strip() - return "" - - -def is_placeholder(value: str) -> bool: - token = (value or "").strip().lower() - if token in {"", "-", "tbd", "none", "n/a", "na", "..."}: - return True - if token.startswith("tbd"): - return True - if token.startswith("<") and token.endswith(">"): - return True - if "task ids" in token: - return True - return False - - -def normalize_pr_number(value: str) -> str: - token = (value or "").strip() - if is_placeholder(token): - return "" - if m := re.fullmatch(r"#(\d+)", token): - return m.group(1) - if m := re.fullmatch(r"PR#(\d+)", token, flags=re.IGNORECASE): - return m.group(1) - if m := re.fullmatch(r"[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+#(\d+)", token): - return m.group(1) - if m := re.fullmatch( - r"https://github\.com/[^/\s]+/[^/\s]+/pull/(\d+)(?:[/?#].*)?", - token, - flags=re.IGNORECASE, - ): - return m.group(1) - return "" - - -start, end = section_bounds("## Task Decomposition") -table_rows = [idx for idx in range(start, end) if lines[idx].strip().startswith("|")] -if len(table_rows) < 3: - raise SystemExit("error: Task Decomposition must contain a markdown table with at least one task row") - -headers = parse_row(lines[table_rows[0]]) -required_cols = ["Task", "PR", "Status", "Notes"] -missing = [col for col in required_cols if col not in headers] -if missing: - raise SystemExit("error: missing Task Decomposition columns: " + ", ".join(missing)) - -idx_task = headers.index("Task") -idx_pr = headers.index("PR") -idx_status = headers.index("Status") -idx_notes = headers.index("Notes") - -sprint_rows: list[tuple[int, list[str]]] = [] -target_sprint_tag = f"S{sprint}" -task_pat = re.compile(rf"^S{sprint}T\d+$", flags=re.IGNORECASE) - -for line_idx in table_rows[2:]: - cells = parse_row(lines[line_idx]) - if not cells or len(cells) != len(headers): - continue - task_id = cells[idx_task].strip() - notes = cells[idx_notes].strip() - note_sprint = extract_note_value(notes, "sprint").upper() - in_sprint = note_sprint == target_sprint_tag or bool(task_pat.fullmatch(task_id)) - if in_sprint: - sprint_rows.append((line_idx, cells)) - -if not sprint_rows: - raise SystemExit(f"error: no Task Decomposition rows found for sprint {sprint}") - -errors: list[str] = [] -pr_to_tasks: dict[str, list[str]] = {} - -for _line_idx, cells in sprint_rows: - task_id = cells[idx_task].strip() or "<unknown-task>" - status = cells[idx_status].strip() - pr_value = cells[idx_pr].strip() - - if not mark_done and status.strip().lower() != "done": - errors.append(f"{task_id}: Status must be done before next sprint (got: {status or '<empty>'})") - - pr_number = normalize_pr_number(pr_value) - if not pr_number: - errors.append(f"{task_id}: PR must be a concrete merged PR reference before sprint close gate (got: {pr_value or '<empty>'})") - continue - pr_to_tasks.setdefault(pr_number, []).append(task_id) - -for pr_number, tasks in sorted(pr_to_tasks.items(), key=lambda item: int(item[0])): - cmd = ["gh", "pr", "view", pr_number, "--json", "number,state,mergedAt,url"] - if repo_arg: - cmd.extend(["-R", repo_arg]) - proc = subprocess.run(cmd, capture_output=True, text=True) - if proc.returncode != 0: - detail = (proc.stderr or proc.stdout or "").strip() or f"exit {proc.returncode}" - errors.append(f"tasks [{', '.join(tasks)}]: failed to query PR #{pr_number}: {detail}") - continue - data = json.loads(proc.stdout or "{}") - merged_at = (data.get("mergedAt") or "").strip() - if not merged_at: - pr_url = data.get("url") or f"#{pr_number}" - pr_state = data.get("state") or "UNKNOWN" - errors.append(f"tasks [{', '.join(tasks)}]: PR is not merged ({pr_url}, state={pr_state})") - -if errors: - for err in errors: - print(f"error: {err}", file=sys.stderr) - raise SystemExit(1) - -updated_rows = 0 -if mark_done: - for line_idx, cells in sprint_rows: - if cells[idx_status].strip().lower() != "done": - cells[idx_status] = "done" - lines[line_idx] = "| " + " | ".join(cells) + " |" - updated_rows += 1 - -out_path.write_text("\n".join(lines) + "\n", encoding="utf-8") -print(f"SPRINT={sprint}") -print(f"SPRINT_TASK_COUNT={len(sprint_rows)}") -print(f"SPRINT_PR_COUNT={len(pr_to_tasks)}") -print(f"SPRINT_STATUS_ROWS_MARKED_DONE={updated_rows}") -print("SPRINT_MERGE_GATE=PASS") -PY -)" - local gate_rc=$? - set -e - - if [[ "$gate_rc" -ne 0 ]]; then - printf '%s\n' "$gate_output" >&2 - rm -f "$issue_body_file" "$updated_body_file" - return "$gate_rc" - fi - - if [[ -n "$gate_output" ]]; then - printf '%s\n' "$gate_output" - fi - - if [[ "$mark_done" == '1' ]]; then - run_issue_lifecycle "$dry_run" "$repo_arg" update --issue "$issue_number" --body-file "$updated_body_file" >/dev/null - printf 'SPRINT_STATUS_SYNC=UPDATED_TO_DONE\n' - fi - - rm -f "$issue_body_file" "$updated_body_file" -} - -render_sprint_comment_body() { - local mode="${1:-}" # start|ready|accepted - local plan_file="${2:-}" - local issue_number="${3:-}" - local sprint="${4:-}" - local sprint_name="${5:-}" - local task_spec_file="${6:-}" - local note_text="${7:-}" - local approval_comment_url="${8:-}" - local issue_body_file="${9:-}" - - python3 - "$mode" "$plan_file" "$issue_number" "$sprint" "$sprint_name" "$task_spec_file" "$note_text" "$approval_comment_url" "$issue_body_file" <<'PY' -import csv -import pathlib -import re -import sys - -mode = sys.argv[1].strip() -plan_file = sys.argv[2].strip() -issue_number = sys.argv[3].strip() -sprint = sys.argv[4].strip() -sprint_name = sys.argv[5].strip() -spec_path = pathlib.Path(sys.argv[6]) -note_text = sys.argv[7] -approval_url = sys.argv[8].strip() -issue_body_path_raw = sys.argv[9].strip() -issue_body_path = pathlib.Path(issue_body_path_raw) if issue_body_path_raw else None - -if mode not in {"start", "ready", "accepted"}: - raise SystemExit(f"error: unsupported sprint comment mode: {mode}") -if not spec_path.is_file(): - raise SystemExit(f"error: task spec file not found: {spec_path}") - - -def is_placeholder(value: str) -> bool: - token = (value or "").strip().lower() - if token in {"", "-", "tbd", "none", "n/a", "na", "..."}: - return True - if token.startswith("tbd"): - return True - if token.startswith("<") and token.endswith(">"): - return True - if "task ids" in token: - return True - return False - - -def normalize_pr_display(value: str) -> str: - token = (value or "").strip() - if is_placeholder(token): - return "" - if m := re.fullmatch(r"PR#(\d+)", token, flags=re.IGNORECASE): - return f"#{m.group(1)}" - if m := re.fullmatch(r"#(\d+)", token): - return f"#{m.group(1)}" - if m := re.fullmatch(r"[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+#(\d+)", token): - return f"#{m.group(1)}" - if m := re.fullmatch( - r"https://github\.com/[^/\s]+/[^/\s]+/pull/(\d+)(?:[/?#].*)?", - token, - flags=re.IGNORECASE, - ): - return f"#{m.group(1)}" - return token - - -def parse_row(line: str) -> list[str]: - s = line.strip() - if not (s.startswith("|") and s.endswith("|")): - return [] - return [cell.strip() for cell in s[1:-1].split("|")] - - -def extract_sprint_section(plan_path: pathlib.Path, sprint_number: str) -> str: - if not plan_path.is_file(): - raise SystemExit(f"error: plan file not found: {plan_path}") - - lines = plan_path.read_text(encoding="utf-8").splitlines() - target_re = re.compile(rf"^##\s+Sprint\s+{re.escape(sprint_number)}\b") - - start = None - for idx, line in enumerate(lines): - if target_re.match(line.strip()): - start = idx - break - - if start is None: - return "" - - end = len(lines) - for idx in range(start + 1, len(lines)): - if lines[idx].startswith("## "): - end = idx - break - - return "\n".join(lines[start:end]).strip() - - -def section_bounds(lines: list[str], heading: str) -> tuple[int, int]: - start = None - for idx, line in enumerate(lines): - if line.strip() == heading: - start = idx + 1 - break - if start is None: - raise SystemExit(f"error: missing required heading: {heading}") - end = len(lines) - for idx in range(start, len(lines)): - if lines[idx].startswith("## "): - end = idx - break - return start, end - - -def load_issue_pr_values(path: pathlib.Path | None) -> dict[str, str]: - if path is None or not path.is_file(): - return {} - text = path.read_text(encoding="utf-8") - lines = text.splitlines() - try: - start, end = section_bounds(lines, "## Task Decomposition") - except SystemExit: - return {} - table_lines = [line for line in lines[start:end] if line.strip().startswith("|")] - if len(table_lines) < 3: - return {} - headers = parse_row(table_lines[0]) - if "Task" not in headers or "PR" not in headers: - return {} - - pr_map: dict[str, str] = {} - for raw in table_lines[2:]: - cells = parse_row(raw) - if not cells or len(cells) != len(headers): - continue - row = {headers[idx]: cells[idx] for idx in range(len(headers))} - task = row.get("Task", "").strip() - pr_value = row.get("PR", "").strip() - if not task or is_placeholder(pr_value): - continue - pr_map[task] = pr_value - return pr_map - - -def extract_note_value(notes: str, key: str) -> str: - prefix = f"{key}=" - for part in notes.split(";"): - token = part.strip() - if token.lower().startswith(prefix): - return token[len(prefix) :].strip() - return "" - -rows = [] -with spec_path.open("r", encoding="utf-8") as handle: - reader = csv.reader(handle, delimiter="\t") - for raw in reader: - if not raw: - continue - if raw[0].strip().startswith("#"): - continue - if len(raw) < 5: - raise SystemExit("error: malformed task spec row") - task_id = raw[0].strip() - summary = raw[1].strip() if len(raw) >= 2 else "" - pr_group = raw[6].strip() if len(raw) >= 7 else task_id - notes = raw[5].strip() if len(raw) >= 6 else "" - grouping_mode = extract_note_value(notes, "pr-grouping").lower() - if not task_id: - continue - if grouping_mode not in {"per-sprint", "group"}: - raise SystemExit( - f"error: unsupported pr-grouping in task spec for {task_id}: {grouping_mode or '<empty>'}" - ) - rows.append((task_id, summary, pr_group or task_id, notes, grouping_mode)) - -group_sizes = {} -for _task_id, _summary, pr_group, _notes, _grouping_mode in rows: - group_sizes[pr_group] = group_sizes.get(pr_group, 0) + 1 -issue_pr_values = load_issue_pr_values(issue_body_path) -plan_path = pathlib.Path(plan_file) - -if mode == "start": - heading = f"## Sprint {sprint} Start" - lead = "Main-agent starts this sprint on the plan issue and dispatches implementation to subagents." -elif mode == "ready": - heading = f"## Sprint {sprint} Ready for Review" - lead = "Main-agent requests sprint-level review before merge/acceptance on the plan issue (the issue remains open)." -else: - heading = f"## Sprint {sprint} Accepted" - lead = "Main-agent records sprint acceptance after merge gate passes and sprint rows are synced to done (issue remains open for remaining sprints)." - -print(heading) -print("") -print(f"- Sprint: {sprint} ({sprint_name})") -print(f"- Tasks in sprint: {len(rows)}") -print(f"- Note: {lead}") -if mode == "start": - print("- Execution Mode comes from current Task Decomposition for each sprint task.") -else: - print("- PR values come from current Task Decomposition; unresolved tasks remain `TBD` until PRs are linked.") -if approval_url: - print(f"- Approval comment URL: {approval_url}") -print("") -if mode == "start": - print("| Task | Summary | Execution Mode |") - print("| --- | --- | --- |") - for task_id, summary, pr_group, _notes, grouping_mode in rows: - if grouping_mode == "per-sprint": - execution_mode = "per-sprint" - elif group_sizes.get(pr_group, 0) > 1: - execution_mode = "pr-shared" - else: - execution_mode = "pr-isolated" - print(f"| {task_id} | {summary or '-'} | {execution_mode} |") -else: - print("| Task | Summary | PR |") - print("| --- | --- | --- |") - for task_id, summary, pr_group, _notes, grouping_mode in rows: - pr_value = normalize_pr_display(issue_pr_values.get(task_id, "")) - if is_placeholder(pr_value): - if grouping_mode == "per-sprint": - pr_value = "TBD (per-sprint)" - elif grouping_mode == "group": - pr_value = f"TBD (group:{pr_group})" - elif group_sizes.get(pr_group, 0) > 1: - pr_value = f"TBD (shared:{pr_group})" - else: - pr_value = "TBD" - print(f"| {task_id} | {summary or '-'} | {pr_value} |") - -if mode == "start": - sprint_section = extract_sprint_section(plan_path, sprint) - if sprint_section: - print("") - print(sprint_section) - -if note_text.strip(): - print("") - print("## Main-Agent Notes") - print("") - print(note_text.strip()) -PY -} - -usage() { - cat <<'USAGE' -Usage: - plan-issue-delivery-loop.sh <subcommand> [options] - -Subcommands: - build-task-spec Build sprint-scoped task-spec TSV from a plan - build-plan-task-spec Build plan-scoped task-spec TSV (all sprints) for the single plan issue - start-plan Open one plan issue with all plan tasks in Task Decomposition - status-plan Wrapper of issue-delivery-loop status for the plan issue - ready-plan Wrapper of issue-delivery-loop ready-for-review for final plan review - close-plan Close the single plan issue after final approval + merged PR gates, then enforce worktree cleanup - cleanup-worktrees Enforce cleanup of all issue-assigned task worktrees - start-sprint Start sprint only after previous sprint merge+done gate passes - ready-sprint Post sprint-ready comment for main-agent review before merge - accept-sprint Enforce merged-PR gate, sync sprint status=done, then post accepted comment - multi-sprint-guide Print the full repeated command flow for a plan (1 plan = 1 issue) - -Main-agent role boundary: - - main-agent is orchestration/review-only - - implementation must be subagent-owned PR work - - the plan issue closes only after the final plan acceptance gate - -Common options: - --repo <owner/repo> Pass-through repository target for GitHub operations - --dry-run Print write actions without mutating GitHub state - -build-task-spec options (sprint scope): - --plan <path> Plan markdown path (required) - --sprint <number> Sprint number (required) - --task-spec-out <path> Output TSV path (default: $AGENT_HOME/out/plan-issue-delivery-loop/...) - --owner-prefix <text> Default: subagent - --branch-prefix <text> Default: issue - --worktree-prefix <text> Default: issue__ - --pr-grouping <mode> per-sprint | group (required; `per-spring` alias accepted) - --pr-group <task=group> Repeatable; group mode only; task can be SxTy or plan task id - -build-plan-task-spec options: - --plan <path> Plan markdown path (required) - --task-spec-out <path> Output TSV path (default: $AGENT_HOME/out/plan-issue-delivery-loop/...) - --owner-prefix <text> Default: subagent - --branch-prefix <text> Default: issue - --worktree-prefix <text> Default: issue__ - --pr-grouping <mode> per-sprint | group (required; `per-spring` alias accepted) - --pr-group <task=group> Repeatable; group mode only; task can be SxTy or plan task id - -start-plan options: - --plan <path> Plan markdown path (required) - --title <text> Override plan issue title - --task-spec-out <path> Plan task-spec output path override - --issue-body-out <path> Rendered plan issue body output path override - --owner-prefix <text> Default: subagent - --branch-prefix <text> Default: issue - --worktree-prefix <text> Default: issue__ - --pr-grouping <mode> per-sprint | group (required; `per-spring` alias accepted) - --pr-group <task=group> Repeatable; group mode only; task can be SxTy or plan task id - --label <name> Repeatable; default labels: issue, plan - -status-plan options: - --issue <number> Plan issue number (required unless --body-file) - --body-file <path> Offline issue body - --comment | --no-comment - -ready-plan options: - --issue <number> Plan issue number (required unless --body-file) - --body-file <path> Offline issue body - --summary <text> | --summary-file <path> - --label <name> Review label (default: needs-review) - --remove-label <name> Repeatable - --comment | --no-comment - --no-label-update - -close-plan options: - --issue <number> Plan issue number (required unless --body-file in --dry-run mode) - --body-file <path> Local issue body (dry-run only; no GitHub dependency) - --approved-comment-url <url> Final approval comment URL (required) - --reason <completed|not planned> - --comment <text> | --comment-file <path> - --allow-not-done - Note: after close gate succeeds, close-plan always runs strict worktree cleanup for issue task rows. - -cleanup-worktrees options: - --issue <number> Plan issue number (required) - --repo <owner/repo> Optional repository override - --dry-run Print matching worktrees without removing - -start-sprint / ready-sprint / accept-sprint options: - --plan <path> Plan markdown path (required) - --issue <number> Plan issue number (required) - --sprint <number> Sprint number (required) - --task-spec-out <path> Sprint task-spec output path override - --subagent-prompts-out <dir> start-sprint only; rendered per-task prompt output dir - --owner-prefix <text> Default: subagent - --branch-prefix <text> Default: issue - --worktree-prefix <text> Default: issue__ - --pr-grouping <mode> per-sprint | group (required; `per-spring` alias accepted) - --pr-group <task=group> Repeatable; group mode only; task can be SxTy or plan task id - --summary <text> | --summary-file <path> - --comment | --no-comment Default: comment when --issue is provided - --approved-comment-url <url> accept-sprint only (required; review approval record) - -multi-sprint-guide options: - --plan <path> Plan markdown path (required) - --from-sprint <number> Default: 1 - --to-sprint <number> Default: max sprint in plan - --dry-run Print a local-only rehearsal flow (no GitHub calls) -USAGE -} - -build_task_spec_cmd() { - local plan_file='' - local sprint='' - local task_spec_out='' - local owner_prefix='subagent' - local branch_prefix='issue' - local worktree_prefix='issue__' - local pr_grouping='' - local pr_group_entries=() - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --plan) - plan_file="${2:-}" - shift 2 - ;; - --sprint) - sprint="${2:-}" - shift 2 - ;; - --task-spec-out) - task_spec_out="${2:-}" - shift 2 - ;; - --owner-prefix) - owner_prefix="${2:-}" - shift 2 - ;; - --branch-prefix) - branch_prefix="${2:-}" - shift 2 - ;; - --worktree-prefix) - worktree_prefix="${2:-}" - shift 2 - ;; - --pr-grouping) - pr_grouping="${2:-}" - shift 2 - ;; - --pr-group) - pr_group_entries+=("${2:-}") - shift 2 - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for build-task-spec: $1" - ;; - esac - done - - [[ -n "$plan_file" ]] || usage_die "--plan is required for build-task-spec" - [[ -n "$sprint" ]] || usage_die "--sprint is required for build-task-spec" - is_positive_int "$sprint" || usage_die "--sprint must be a positive integer" - validate_pr_grouping_args "$pr_grouping" "${#pr_group_entries[@]}" - - validate_plan "$plan_file" - - local sprint_meta sprint_name sprint_task_count max_sprint - sprint_meta="$(plan_sprint_meta_tsv "$plan_file" "$sprint")" - IFS=$'\t' read -r sprint_name sprint_task_count max_sprint <<<"$sprint_meta" - [[ "$sprint_task_count" -gt 0 ]] || die "sprint ${sprint} has no tasks" - - if [[ -z "$task_spec_out" ]]; then - task_spec_out="$(default_sprint_task_spec_path "$plan_file" "$sprint")" - fi - - local pr_group_config='' - pr_group_config="$(join_lines "${pr_group_entries[@]}")" - render_task_spec_from_plan_scope "$plan_file" sprint "$sprint" "$task_spec_out" "$owner_prefix" "$branch_prefix" "$worktree_prefix" "$pr_grouping" "$pr_group_config" >/dev/null - - printf 'PLAN_FILE=%s\n' "$plan_file" - printf 'SCOPE=sprint\n' - printf 'SPRINT=%s\n' "$sprint" - printf 'SPRINT_NAME=%s\n' "$sprint_name" - printf 'SPRINT_TASK_COUNT=%s\n' "$sprint_task_count" - printf 'MAX_SPRINT=%s\n' "$max_sprint" - printf 'PR_GROUPING=%s\n' "$pr_grouping" - printf 'TASK_SPEC_PATH=%s\n' "$task_spec_out" -} - -build_plan_task_spec_cmd() { - local plan_file='' - local task_spec_out='' - local owner_prefix='subagent' - local branch_prefix='issue' - local worktree_prefix='issue__' - local pr_grouping='' - local pr_group_entries=() - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --plan) - plan_file="${2:-}" - shift 2 - ;; - --task-spec-out) - task_spec_out="${2:-}" - shift 2 - ;; - --owner-prefix) - owner_prefix="${2:-}" - shift 2 - ;; - --branch-prefix) - branch_prefix="${2:-}" - shift 2 - ;; - --worktree-prefix) - worktree_prefix="${2:-}" - shift 2 - ;; - --pr-grouping) - pr_grouping="${2:-}" - shift 2 - ;; - --pr-group) - pr_group_entries+=("${2:-}") - shift 2 - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for build-plan-task-spec: $1" - ;; - esac - done - - [[ -n "$plan_file" ]] || usage_die "--plan is required for build-plan-task-spec" - validate_pr_grouping_args "$pr_grouping" "${#pr_group_entries[@]}" - validate_plan "$plan_file" - - local plan_summary plan_title max_sprint total_tasks - plan_summary="$(plan_summary_tsv "$plan_file")" - IFS=$'\t' read -r plan_title max_sprint total_tasks <<<"$plan_summary" - [[ "$total_tasks" -gt 0 ]] || die "plan has no tasks" - - if [[ -z "$task_spec_out" ]]; then - task_spec_out="$(default_plan_task_spec_path "$plan_file")" - fi - - local pr_group_config='' - pr_group_config="$(join_lines "${pr_group_entries[@]}")" - render_task_spec_from_plan_scope "$plan_file" plan '' "$task_spec_out" "$owner_prefix" "$branch_prefix" "$worktree_prefix" "$pr_grouping" "$pr_group_config" >/dev/null - - printf 'PLAN_FILE=%s\n' "$plan_file" - printf 'SCOPE=plan\n' - printf 'PLAN_TITLE=%s\n' "$plan_title" - printf 'MAX_SPRINT=%s\n' "$max_sprint" - printf 'TOTAL_TASK_COUNT=%s\n' "$total_tasks" - printf 'PR_GROUPING=%s\n' "$pr_grouping" - printf 'TASK_SPEC_PATH=%s\n' "$task_spec_out" -} - -start_plan_cmd() { - local plan_file='' - local issue_title='' - local task_spec_out='' - local issue_body_out='' - local owner_prefix='subagent' - local branch_prefix='issue' - local worktree_prefix='issue__' - local pr_grouping='' - local pr_group_entries=() - local repo_arg='' - local dry_run='0' - local labels=() - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --plan) - plan_file="${2:-}" - shift 2 - ;; - --title) - issue_title="${2:-}" - shift 2 - ;; - --task-spec-out) - task_spec_out="${2:-}" - shift 2 - ;; - --issue-body-out) - issue_body_out="${2:-}" - shift 2 - ;; - --owner-prefix) - owner_prefix="${2:-}" - shift 2 - ;; - --branch-prefix) - branch_prefix="${2:-}" - shift 2 - ;; - --worktree-prefix) - worktree_prefix="${2:-}" - shift 2 - ;; - --pr-grouping) - pr_grouping="${2:-}" - shift 2 - ;; - --pr-group) - pr_group_entries+=("${2:-}") - shift 2 - ;; - --label) - labels+=("${2:-}") - shift 2 - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for start-plan: $1" - ;; - esac - done - - [[ -n "$plan_file" ]] || usage_die "--plan is required for start-plan" - validate_pr_grouping_args "$pr_grouping" "${#pr_group_entries[@]}" - validate_plan "$plan_file" - - local plan_summary plan_title max_sprint total_tasks - plan_summary="$(plan_summary_tsv "$plan_file")" - IFS=$'\t' read -r plan_title max_sprint total_tasks <<<"$plan_summary" - [[ "$total_tasks" -gt 0 ]] || die "plan has no tasks" - - if [[ -z "$task_spec_out" ]]; then - task_spec_out="$(default_plan_task_spec_path "$plan_file")" - fi - local pr_group_config='' - pr_group_config="$(join_lines "${pr_group_entries[@]}")" - render_task_spec_from_plan_scope "$plan_file" plan '' "$task_spec_out" "$owner_prefix" "$branch_prefix" "$worktree_prefix" "$pr_grouping" "$pr_group_config" >/dev/null - - if [[ -z "$issue_body_out" ]]; then - issue_body_out="$(default_plan_issue_body_path "$plan_file")" - fi - render_plan_issue_body_from_task_spec "$issue_lifecycle_template" "$plan_file" "$plan_title" "$task_spec_out" "$issue_body_out" >/dev/null - - if [[ -z "$issue_title" ]]; then - issue_title="${plan_title}" - fi - - if [[ ${#labels[@]} -eq 0 ]]; then - labels=("issue" "plan") - fi - - local start_args=(start --title "$issue_title" --body-file "$issue_body_out") - local label='' - for label in "${labels[@]}"; do - start_args+=(--label "$label") - done - - local start_output issue_number - if [[ "$dry_run" == '1' ]]; then - issue_number="$(default_dry_run_issue_number)" - start_output="$(join_lines \ - "ISSUE_URL=DRY-RUN-ISSUE-URL" \ - "ISSUE_NUMBER=${issue_number}" \ - "TASK_SPEC_APPLIED=0")" - else - start_output="$(run_issue_delivery "$dry_run" "$repo_arg" "${start_args[@]}")" - issue_number="$(printf '%s\n' "$start_output" | awk -F= '/^ISSUE_NUMBER=/{print $2; exit}')" - fi - printf '%s\n' "$start_output" - - if [[ -z "$issue_number" ]]; then - issue_number="$(default_dry_run_issue_number)" - fi - printf 'PLAN_FILE=%s\n' "$plan_file" - printf 'PLAN_ISSUE_NUMBER=%s\n' "$issue_number" - printf 'PLAN_TITLE=%s\n' "$plan_title" - printf 'MAX_SPRINT=%s\n' "$max_sprint" - printf 'TOTAL_TASK_COUNT=%s\n' "$total_tasks" - printf 'PR_GROUPING=%s\n' "$pr_grouping" - printf 'TASK_SPEC_PATH=%s\n' "$task_spec_out" - printf 'ISSUE_BODY_PATH=%s\n' "$issue_body_out" - printf 'DESIGN=ONE_PLAN_ONE_ISSUE\n' -} - -status_plan_cmd() { - local repo_arg='' - local dry_run='0' - local passthrough=() - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue|--body-file) - passthrough+=("${1:-}" "${2:-}") - shift 2 - ;; - --comment|--no-comment) - passthrough+=("${1:-}") - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for status-plan: $1" - ;; - esac - done - - run_issue_delivery "$dry_run" "$repo_arg" status "${passthrough[@]}" -} - -ready_plan_cmd() { - local repo_arg='' - local dry_run='0' - local passthrough=() - local issue_number='' - local body_file='' - local summary_text='' - local summary_file='' - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue|--body-file|--summary|--summary-file|--label|--remove-label) - if [[ "${1:-}" == "--issue" ]]; then - issue_number="${2:-}" - fi - if [[ "${1:-}" == "--body-file" ]]; then - body_file="${2:-}" - fi - if [[ "${1:-}" == "--summary" ]]; then - summary_text="${2:-}" - fi - if [[ "${1:-}" == "--summary-file" ]]; then - summary_file="${2:-}" - fi - passthrough+=("${1:-}" "${2:-}") - shift 2 - ;; - --comment|--no-comment|--no-label-update) - passthrough+=("${1:-}") - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for ready-plan: $1" - ;; - esac - done - - if [[ "$dry_run" == '1' ]]; then - if [[ -n "$issue_number" && -n "$body_file" ]]; then - usage_die "use either --issue or --body-file for ready-plan, not both" - fi - if [[ -z "$issue_number" && -z "$body_file" ]]; then - usage_die "ready-plan requires --issue or --body-file" - fi - [[ -n "$body_file" ]] || usage_die "--body-file is required for ready-plan --dry-run" - [[ -f "$body_file" ]] || usage_die "body file not found: $body_file" - summary_text="$(read_optional_text "$summary_text" "$summary_file")" - printf 'READY_PLAN_STATUS=DRY_RUN\n' - printf 'READY_PLAN_SCOPE=LOCAL_BODY_FILE\n' - printf 'READY_PLAN_BODY_FILE=%s\n' "$body_file" - if [[ -n "$summary_text" ]]; then - printf 'READY_PLAN_SUMMARY=%s\n' "$summary_text" - fi - return 0 - fi - - run_issue_delivery "$dry_run" "$repo_arg" ready-for-review "${passthrough[@]}" -} - -close_plan_cmd() { - local repo_arg='' - local dry_run='0' - local passthrough=() - local issue_number='' - local body_file='' - local approved_comment_url='' - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue|--body-file|--approved-comment-url|--reason|--comment|--comment-file) - if [[ "${1:-}" == "--issue" ]]; then - issue_number="${2:-}" - fi - if [[ "${1:-}" == "--body-file" ]]; then - body_file="${2:-}" - fi - if [[ "${1:-}" == "--approved-comment-url" ]]; then - approved_comment_url="${2:-}" - fi - passthrough+=("${1:-}" "${2:-}") - shift 2 - ;; - --allow-not-done) - passthrough+=("${1:-}") - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for close-plan: $1" - ;; - esac - done - - [[ -n "$approved_comment_url" ]] || usage_die "--approved-comment-url is required for close-plan" - validate_approval_comment_url_format "$approved_comment_url" >/dev/null - - if [[ -n "$issue_number" && -n "$body_file" ]]; then - usage_die "use either --issue or --body-file for close-plan, not both" - fi - - if [[ "$dry_run" == '1' ]]; then - [[ -n "$body_file" ]] || usage_die "--body-file is required for close-plan --dry-run" - [[ -f "$body_file" ]] || usage_die "body file not found: $body_file" - cleanup_plan_issue_worktrees '' "$repo_arg" "$dry_run" "$body_file" - printf 'PLAN_CLOSE_STATUS=DRY_RUN\n' - printf 'PLAN_CLOSE_SCOPE=LOCAL_BODY_FILE\n' - printf 'PLAN_CLOSE_BODY_FILE=%s\n' "$body_file" - else - [[ -n "$issue_number" ]] || usage_die "--issue is required for close-plan" - [[ -z "$body_file" ]] || usage_die "--body-file is only supported with --dry-run" - run_issue_delivery "$dry_run" "$repo_arg" close-after-review "${passthrough[@]}" - cleanup_plan_issue_worktrees "$issue_number" "$repo_arg" "$dry_run" - printf 'PLAN_CLOSE_STATUS=SUCCESS\n' - printf 'PLAN_ISSUE_NUMBER=%s\n' "$issue_number" - printf 'DONE_CRITERIA=ISSUE_CLOSED_AND_WORKTREES_CLEANED\n' - fi -} - -cleanup_worktrees_cmd() { - local issue_number='' - local repo_arg='' - local dry_run='0' - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue) - issue_number="${2:-}" - shift 2 - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for cleanup-worktrees: $1" - ;; - esac - done - - [[ -n "$issue_number" ]] || usage_die "--issue is required for cleanup-worktrees" - cleanup_plan_issue_worktrees "$issue_number" "$repo_arg" "$dry_run" -} - -start_sprint_cmd() { - local plan_file='' - local issue_number='' - local sprint='' - local task_spec_out='' - local subagent_prompts_out='' - local owner_prefix='subagent' - local branch_prefix='issue' - local worktree_prefix='issue__' - local pr_grouping='' - local pr_group_entries=() - local summary_text='' - local summary_file='' - local post_comment='' - local repo_arg='' - local dry_run='0' - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --plan) - plan_file="${2:-}" - shift 2 - ;; - --issue) - issue_number="${2:-}" - shift 2 - ;; - --sprint) - sprint="${2:-}" - shift 2 - ;; - --task-spec-out) - task_spec_out="${2:-}" - shift 2 - ;; - --subagent-prompts-out) - subagent_prompts_out="${2:-}" - shift 2 - ;; - --owner-prefix) - owner_prefix="${2:-}" - shift 2 - ;; - --branch-prefix) - branch_prefix="${2:-}" - shift 2 - ;; - --worktree-prefix) - worktree_prefix="${2:-}" - shift 2 - ;; - --pr-grouping) - pr_grouping="${2:-}" - shift 2 - ;; - --pr-group) - pr_group_entries+=("${2:-}") - shift 2 - ;; - --summary) - summary_text="${2:-}" - shift 2 - ;; - --summary-file) - summary_file="${2:-}" - shift 2 - ;; - --comment) - post_comment='1' - shift - ;; - --no-comment) - post_comment='0' - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for start-sprint: $1" - ;; - esac - done - - [[ -n "$plan_file" ]] || usage_die "--plan is required for start-sprint" - [[ -n "$issue_number" ]] || usage_die "--issue is required for start-sprint" - [[ -n "$sprint" ]] || usage_die "--sprint is required for start-sprint" - is_positive_int "$sprint" || usage_die "--sprint must be a positive integer" - validate_pr_grouping_args "$pr_grouping" "${#pr_group_entries[@]}" - summary_text="$(read_optional_text "$summary_text" "$summary_file")" - - validate_plan "$plan_file" - - local sprint_meta sprint_name sprint_task_count max_sprint - sprint_meta="$(plan_sprint_meta_tsv "$plan_file" "$sprint")" - IFS=$'\t' read -r sprint_name sprint_task_count max_sprint <<<"$sprint_meta" - [[ "$sprint_task_count" -gt 0 ]] || die "sprint ${sprint} has no tasks" - - local previous_sprint='' - if (( sprint > 1 )); then - previous_sprint=$((sprint - 1)) - enforce_sprint_merge_gate "$issue_number" "$previous_sprint" "$repo_arg" "$dry_run" '0' - printf 'PREVIOUS_SPRINT_GATE=PASS sprint=%s\n' "$previous_sprint" - fi - - if [[ -z "$task_spec_out" ]]; then - task_spec_out="$(default_sprint_task_spec_path "$plan_file" "$sprint")" - fi - if [[ -z "$subagent_prompts_out" ]]; then - subagent_prompts_out="$(default_sprint_prompt_dir "$plan_file" "$sprint")" - fi - local pr_group_config='' - pr_group_config="$(join_lines "${pr_group_entries[@]}")" - render_task_spec_from_plan_scope "$plan_file" sprint "$sprint" "$task_spec_out" "$owner_prefix" "$branch_prefix" "$worktree_prefix" "$pr_grouping" "$pr_group_config" >/dev/null - local subagent_prompt_manifest='' - subagent_prompt_manifest="${subagent_prompts_out%/}/manifest.tsv" - render_subagent_task_prompts "$task_spec_out" "$issue_number" "$issue_subagent_script" "$subagent_prompts_out" "$repo_arg" "$subagent_prompt_manifest" >/dev/null - - if [[ -z "$post_comment" ]]; then - if [[ "$dry_run" == '1' ]]; then - post_comment='0' - else - post_comment='1' - fi - fi - - if (( sprint > 1 )); then - printf 'TRANSITION=SPRINT_%s_TO_%s\n' "$previous_sprint" "$sprint" - fi - - printf 'PLAN_FILE=%s\n' "$plan_file" - printf 'PLAN_ISSUE_NUMBER=%s\n' "$issue_number" - printf 'SPRINT=%s\n' "$sprint" - printf 'SPRINT_NAME=%s\n' "$sprint_name" - printf 'SPRINT_TASK_COUNT=%s\n' "$sprint_task_count" - printf 'PR_GROUPING=%s\n' "$pr_grouping" - printf 'TASK_SPEC_PATH=%s\n' "$task_spec_out" - printf 'SUBAGENT_PROMPTS_DIR=%s\n' "$subagent_prompts_out" - printf 'SUBAGENT_PROMPT_MANIFEST=%s\n' "$subagent_prompt_manifest" - printf 'SUBAGENT_DISPATCH_POLICY=RENDERED_TASK_PROMPT_REQUIRED\n' - - emit_dispatch_hints "$task_spec_out" "$issue_number" "$issue_subagent_script" "$subagent_prompt_manifest" - sync_issue_sprint_task_rows "$issue_number" "$task_spec_out" "$repo_arg" "$dry_run" - - local issue_body_file='' - if [[ "$dry_run" != '1' ]]; then - issue_body_file="$(mktemp)" - issue_read_body_cmd "$issue_number" "$issue_body_file" "$repo_arg" - fi - - local comment_body='' - comment_body="$(render_sprint_comment_body start "$plan_file" "$issue_number" "$sprint" "$sprint_name" "$task_spec_out" "$summary_text" '' "$issue_body_file")" - if [[ -n "$issue_body_file" ]]; then - rm -f "$issue_body_file" - fi - printf '%s\n' "$comment_body" - - if [[ "$post_comment" == '1' ]]; then - run_issue_lifecycle "$dry_run" "$repo_arg" comment --issue "$issue_number" --body "$comment_body" >/dev/null - # Avoid failing after comment post; retries would duplicate comments. - printf 'SPRINT_COMMENT_POSTED=1\n' || true - else - printf 'SPRINT_COMMENT_POSTED=0\n' - fi -} - -ready_sprint_cmd() { - local plan_file='' - local issue_number='' - local sprint='' - local task_spec_out='' - local owner_prefix='subagent' - local branch_prefix='issue' - local worktree_prefix='issue__' - local pr_grouping='' - local pr_group_entries=() - local summary_text='' - local summary_file='' - local post_comment='' - local repo_arg='' - local dry_run='0' - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --plan) - plan_file="${2:-}" - shift 2 - ;; - --issue) - issue_number="${2:-}" - shift 2 - ;; - --sprint) - sprint="${2:-}" - shift 2 - ;; - --task-spec-out) - task_spec_out="${2:-}" - shift 2 - ;; - --owner-prefix) - owner_prefix="${2:-}" - shift 2 - ;; - --branch-prefix) - branch_prefix="${2:-}" - shift 2 - ;; - --worktree-prefix) - worktree_prefix="${2:-}" - shift 2 - ;; - --pr-grouping) - pr_grouping="${2:-}" - shift 2 - ;; - --pr-group) - pr_group_entries+=("${2:-}") - shift 2 - ;; - --summary) - summary_text="${2:-}" - shift 2 - ;; - --summary-file) - summary_file="${2:-}" - shift 2 - ;; - --comment) - post_comment='1' - shift - ;; - --no-comment) - post_comment='0' - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for ready-sprint: $1" - ;; - esac - done - - [[ -n "$plan_file" ]] || usage_die "--plan is required for ready-sprint" - [[ -n "$issue_number" ]] || usage_die "--issue is required for ready-sprint" - [[ -n "$sprint" ]] || usage_die "--sprint is required for ready-sprint" - is_positive_int "$sprint" || usage_die "--sprint must be a positive integer" - validate_pr_grouping_args "$pr_grouping" "${#pr_group_entries[@]}" - summary_text="$(read_optional_text "$summary_text" "$summary_file")" - - validate_plan "$plan_file" - - local sprint_meta sprint_name sprint_task_count max_sprint - sprint_meta="$(plan_sprint_meta_tsv "$plan_file" "$sprint")" - IFS=$'\t' read -r sprint_name sprint_task_count max_sprint <<<"$sprint_meta" - [[ "$sprint_task_count" -gt 0 ]] || die "sprint ${sprint} has no tasks" - - if [[ -z "$task_spec_out" ]]; then - task_spec_out="$(default_sprint_task_spec_path "$plan_file" "$sprint")" - fi - local pr_group_config='' - pr_group_config="$(join_lines "${pr_group_entries[@]}")" - render_task_spec_from_plan_scope "$plan_file" sprint "$sprint" "$task_spec_out" "$owner_prefix" "$branch_prefix" "$worktree_prefix" "$pr_grouping" "$pr_group_config" >/dev/null - sync_issue_sprint_task_rows "$issue_number" "$task_spec_out" "$repo_arg" "$dry_run" - - if [[ -z "$post_comment" ]]; then - if [[ "$dry_run" == '1' ]]; then - post_comment='0' - else - post_comment='1' - fi - fi - - local issue_body_file='' - if [[ "$dry_run" != '1' ]]; then - issue_body_file="$(mktemp)" - issue_read_body_cmd "$issue_number" "$issue_body_file" "$repo_arg" - fi - - local comment_body='' - comment_body="$(render_sprint_comment_body ready "$plan_file" "$issue_number" "$sprint" "$sprint_name" "$task_spec_out" "$summary_text" '' "$issue_body_file")" - if [[ -n "$issue_body_file" ]]; then - rm -f "$issue_body_file" - fi - printf '%s\n' "$comment_body" - - if [[ "$post_comment" == '1' ]]; then - run_issue_lifecycle "$dry_run" "$repo_arg" comment --issue "$issue_number" --body "$comment_body" >/dev/null - # Avoid failing after comment post; retries would duplicate comments. - printf 'SPRINT_READY_COMMENT_POSTED=1\n' || true - else - printf 'SPRINT_READY_COMMENT_POSTED=0\n' - fi -} - -accept_sprint_cmd() { - local plan_file='' - local issue_number='' - local sprint='' - local task_spec_out='' - local owner_prefix='subagent' - local branch_prefix='issue' - local worktree_prefix='issue__' - local pr_grouping='' - local pr_group_entries=() - local summary_text='' - local summary_file='' - local approved_comment_url='' - local post_comment='' - local repo_arg='' - local dry_run='0' - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --plan) - plan_file="${2:-}" - shift 2 - ;; - --issue) - issue_number="${2:-}" - shift 2 - ;; - --sprint) - sprint="${2:-}" - shift 2 - ;; - --task-spec-out) - task_spec_out="${2:-}" - shift 2 - ;; - --owner-prefix) - owner_prefix="${2:-}" - shift 2 - ;; - --branch-prefix) - branch_prefix="${2:-}" - shift 2 - ;; - --worktree-prefix) - worktree_prefix="${2:-}" - shift 2 - ;; - --pr-grouping) - pr_grouping="${2:-}" - shift 2 - ;; - --pr-group) - pr_group_entries+=("${2:-}") - shift 2 - ;; - --summary) - summary_text="${2:-}" - shift 2 - ;; - --summary-file) - summary_file="${2:-}" - shift 2 - ;; - --approved-comment-url) - approved_comment_url="${2:-}" - shift 2 - ;; - --comment) - post_comment='1' - shift - ;; - --no-comment) - post_comment='0' - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for accept-sprint: $1" - ;; - esac - done - - [[ -n "$plan_file" ]] || usage_die "--plan is required for accept-sprint" - [[ -n "$issue_number" ]] || usage_die "--issue is required for accept-sprint" - [[ -n "$sprint" ]] || usage_die "--sprint is required for accept-sprint" - [[ -n "$approved_comment_url" ]] || usage_die "--approved-comment-url is required for accept-sprint" - is_positive_int "$sprint" || usage_die "--sprint must be a positive integer" - validate_pr_grouping_args "$pr_grouping" "${#pr_group_entries[@]}" - summary_text="$(read_optional_text "$summary_text" "$summary_file")" - validate_approval_comment_url_format "$approved_comment_url" >/dev/null - - validate_plan "$plan_file" - - local sprint_meta sprint_name sprint_task_count max_sprint - sprint_meta="$(plan_sprint_meta_tsv "$plan_file" "$sprint")" - IFS=$'\t' read -r sprint_name sprint_task_count max_sprint <<<"$sprint_meta" - [[ "$sprint_task_count" -gt 0 ]] || die "sprint ${sprint} has no tasks" - - if [[ -z "$task_spec_out" ]]; then - task_spec_out="$(default_sprint_task_spec_path "$plan_file" "$sprint")" - fi - local pr_group_config='' - pr_group_config="$(join_lines "${pr_group_entries[@]}")" - render_task_spec_from_plan_scope "$plan_file" sprint "$sprint" "$task_spec_out" "$owner_prefix" "$branch_prefix" "$worktree_prefix" "$pr_grouping" "$pr_group_config" >/dev/null - sync_issue_sprint_task_rows "$issue_number" "$task_spec_out" "$repo_arg" "$dry_run" - enforce_sprint_merge_gate "$issue_number" "$sprint" "$repo_arg" "$dry_run" '1' - - if [[ -z "$post_comment" ]]; then - if [[ "$dry_run" == '1' ]]; then - post_comment='0' - else - post_comment='1' - fi - fi - - local issue_body_file='' - if [[ "$dry_run" != '1' ]]; then - issue_body_file="$(mktemp)" - issue_read_body_cmd "$issue_number" "$issue_body_file" "$repo_arg" - fi - - local comment_body='' - comment_body="$(render_sprint_comment_body accepted "$plan_file" "$issue_number" "$sprint" "$sprint_name" "$task_spec_out" "$summary_text" "$approved_comment_url" "$issue_body_file")" - if [[ -n "$issue_body_file" ]]; then - rm -f "$issue_body_file" - fi - printf '%s\n' "$comment_body" - - if [[ "$post_comment" == '1' ]]; then - run_issue_lifecycle "$dry_run" "$repo_arg" comment --issue "$issue_number" --body "$comment_body" >/dev/null - # Avoid failing after comment post; retries would duplicate comments. - printf 'SPRINT_ACCEPT_COMMENT_POSTED=1\n' || true - else - printf 'SPRINT_ACCEPT_COMMENT_POSTED=0\n' - fi - - printf 'PLAN_ISSUE_REMAINS_OPEN=1\n' || true -} - -multi_sprint_guide_cmd() { - local plan_file='' - local from_sprint='1' - local to_sprint='' - local guide_dry_run='0' - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --plan) - plan_file="${2:-}" - shift 2 - ;; - --from-sprint) - from_sprint="${2:-}" - shift 2 - ;; - --to-sprint) - to_sprint="${2:-}" - shift 2 - ;; - --dry-run) - guide_dry_run='1' - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - usage_die "unknown option for multi-sprint-guide: $1" - ;; - esac - done - - [[ -n "$plan_file" ]] || usage_die "--plan is required for multi-sprint-guide" - is_positive_int "$from_sprint" || usage_die "--from-sprint must be a positive integer" - - validate_plan "$plan_file" - - local plan_summary plan_title max_sprint total_tasks - plan_summary="$(plan_summary_tsv "$plan_file")" - IFS=$'\t' read -r plan_title max_sprint total_tasks <<<"$plan_summary" - if [[ -z "$to_sprint" ]]; then - to_sprint="$max_sprint" - fi - is_positive_int "$to_sprint" || usage_die "--to-sprint must be a positive integer" - if [[ "$from_sprint" -gt "$to_sprint" ]]; then - usage_die "--from-sprint must be <= --to-sprint" - fi - - local dry_run_issue_number='' - local dry_run_issue_body='' - if [[ "$guide_dry_run" == '1' ]]; then - dry_run_issue_number="$(default_dry_run_issue_number)" - dry_run_issue_body="$(default_plan_issue_body_path "$plan_file")" - fi - - printf 'MULTI_SPRINT_GUIDE_BEGIN\n' - printf 'DESIGN=ONE_PLAN_ONE_ISSUE\n' - if [[ "$guide_dry_run" == '1' ]]; then - printf 'MODE=DRY_RUN_LOCAL\n' - else - printf 'MODE=LIVE\n' - fi - printf 'PLAN_FILE=%s\n' "$plan_file" - printf 'PLAN_TITLE=%s\n' "$plan_title" - printf 'FROM_SPRINT=%s\n' "$from_sprint" - printf 'TO_SPRINT=%s\n' "$to_sprint" - if [[ "$guide_dry_run" == '1' ]]; then - printf 'DRY_RUN_PLAN_ISSUE=%s\n' "$dry_run_issue_number" - printf 'DRY_RUN_ISSUE_BODY=%s\n' "$dry_run_issue_body" - printf 'STEP_1=%s\n' "$(print_cmd "$0" start-plan --plan "$plan_file" --pr-grouping "<per-sprint|group>" --dry-run)" - printf 'STEP_2=%s\n' "$(print_cmd "$0" start-sprint --plan "$plan_file" --issue "$dry_run_issue_number" --sprint "$from_sprint" --pr-grouping "<per-sprint|group>" --no-comment --dry-run)" - else - printf 'STEP_1=%s\n' "$(print_cmd "$0" start-plan --plan "$plan_file" --pr-grouping "<per-sprint|group>" --repo "<owner/repo>")" - printf 'STEP_2=%s\n' "$(print_cmd "$0" start-sprint --plan "$plan_file" --issue "<plan-issue>" --sprint "$from_sprint" --pr-grouping "<per-sprint|group>" --repo "<owner/repo>")" - fi - - local step_index=3 - local sprint='' - for (( sprint=from_sprint; sprint<to_sprint; sprint++ )); do - local next_sprint - next_sprint=$((sprint + 1)) - if [[ "$guide_dry_run" == '1' ]]; then - printf 'STEP_%s=%s\n' "$step_index" "$(print_cmd "$0" accept-sprint --plan "$plan_file" --issue "$dry_run_issue_number" --sprint "$sprint" --approved-comment-url "<approval-comment-url-sprint-${sprint}>" --pr-grouping "<per-sprint|group>" --no-comment --dry-run)" - else - printf 'STEP_%s=%s\n' "$step_index" "$(print_cmd "$0" accept-sprint --plan "$plan_file" --issue "<plan-issue>" --sprint "$sprint" --approved-comment-url "<approval-comment-url-sprint-${sprint}>" --pr-grouping "<per-sprint|group>" --repo "<owner/repo>")" - fi - step_index=$((step_index + 1)) - if [[ "$guide_dry_run" == '1' ]]; then - printf 'STEP_%s=%s\n' "$step_index" "$(print_cmd "$0" start-sprint --plan "$plan_file" --issue "$dry_run_issue_number" --sprint "$next_sprint" --pr-grouping "<per-sprint|group>" --no-comment --dry-run)" - else - printf 'STEP_%s=%s\n' "$step_index" "$(print_cmd "$0" start-sprint --plan "$plan_file" --issue "<plan-issue>" --sprint "$next_sprint" --pr-grouping "<per-sprint|group>" --repo "<owner/repo>")" - fi - step_index=$((step_index + 1)) - done - - if [[ "$guide_dry_run" == '1' ]]; then - printf 'STEP_%s=%s\n' "$step_index" "$(print_cmd "$0" ready-plan --body-file "$dry_run_issue_body" --summary "Final plan review" --no-comment --no-label-update --dry-run)" - else - printf 'STEP_%s=%s\n' "$step_index" "$(print_cmd "$0" ready-plan --issue "<plan-issue>" --summary "Final plan review" --repo "<owner/repo>")" - fi - step_index=$((step_index + 1)) - if [[ "$guide_dry_run" == '1' ]]; then - printf 'STEP_%s=%s\n' "$step_index" "$(print_cmd "$0" close-plan --body-file "$dry_run_issue_body" --approved-comment-url "<final-plan-approval-comment-url>" --dry-run)" - printf 'NOTE_DRY_RUN=%s\n' "Dry-run guide is local-only and does not call GitHub." - else - printf 'STEP_%s=%s\n' "$step_index" "$(print_cmd "$0" close-plan --issue "<plan-issue>" --approved-comment-url "<final-plan-approval-comment-url>" --repo "<owner/repo>")" - fi - printf 'NOTE_GROUP_MODE=%s\n' "When using --pr-grouping group, pass --pr-group for every task in the selected scope." - printf 'NOTE_SPRINT_GATE=%s\n' "Before starting sprint N+1, sprint N must be reviewed, merged, and accepted." - printf 'NOTE_ACCEPT_SYNC=%s\n' "accept-sprint enforces merged PRs for the sprint and syncs sprint task Status to done." - printf 'MULTI_SPRINT_GUIDE_END\n' -} - -subcommand="${1:-}" -if [[ -z "$subcommand" ]]; then - usage >&2 - exit 2 -fi -shift || true - -ensure_entrypoints - -case "$subcommand" in - build-task-spec) - build_task_spec_cmd "$@" - ;; - build-plan-task-spec) - build_plan_task_spec_cmd "$@" - ;; - start-plan) - start_plan_cmd "$@" - ;; - status-plan|status-sprint) - status_plan_cmd "$@" - ;; - ready-plan) - ready_plan_cmd "$@" - ;; - close-plan) - close_plan_cmd "$@" - ;; - cleanup-worktrees) - cleanup_worktrees_cmd "$@" - ;; - start-sprint) - start_sprint_cmd "$@" - ;; - ready-sprint) - ready_sprint_cmd "$@" - ;; - accept-sprint) - accept_sprint_cmd "$@" - ;; - multi-sprint-guide) - multi_sprint_guide_cmd "$@" - ;; - -h|--help) - usage - ;; - *) - echo "error: unknown subcommand: ${subcommand}" >&2 - usage >&2 - exit 2 - ;; -esac - -exit 0 diff --git a/skills/automation/plan-issue-delivery-loop/tests/test_automation_plan_issue_delivery_loop.py b/skills/automation/plan-issue-delivery-loop/tests/test_automation_plan_issue_delivery_loop.py index 87c87373..fa0992de 100644 --- a/skills/automation/plan-issue-delivery-loop/tests/test_automation_plan_issue_delivery_loop.py +++ b/skills/automation/plan-issue-delivery-loop/tests/test_automation_plan_issue_delivery_loop.py @@ -2,7 +2,7 @@ from pathlib import Path -from skills._shared.python.skill_testing import assert_entrypoints_exist, assert_skill_contract +from skills._shared.python.skill_testing import assert_skill_contract def test_automation_plan_issue_delivery_loop_contract() -> None: @@ -10,11 +10,6 @@ def test_automation_plan_issue_delivery_loop_contract() -> None: assert_skill_contract(skill_root) -def test_automation_plan_issue_delivery_loop_entrypoints_exist() -> None: - skill_root = Path(__file__).resolve().parents[1] - assert_entrypoints_exist(skill_root, ["scripts/plan-issue-delivery-loop.sh"]) - - def test_plan_issue_delivery_loop_skill_enforces_main_agent_role_boundary() -> None: skill_root = Path(__file__).resolve().parents[1] text = (skill_root / "SKILL.md").read_text(encoding="utf-8") @@ -23,113 +18,37 @@ def test_plan_issue_delivery_loop_skill_enforces_main_agent_role_boundary() -> N assert "subagent-owned PRs" in text assert "1 plan = 1 issue" in text assert "PR grouping controls" in text + assert "PR Grouping Steps (Mandatory)" in text + assert "group + auto" in text + assert "group + deterministic" in text assert "## Full Skill Flow" in text def test_plan_issue_delivery_loop_skill_requires_close_for_done() -> None: skill_root = Path(__file__).resolve().parents[1] text = (skill_root / "SKILL.md").read_text(encoding="utf-8") - assert "Definition of done: execution is complete only when `close-plan` succeeds, the plan issue is closed, and worktree cleanup passes." in text + assert "Definition of done: execution is complete only when `close-plan` succeeds, the plan issue is closed" in text + assert "worktree cleanup passes." in text assert "A successful run must terminate at `close-plan` with:" in text assert "If any close gate fails, treat the run as unfinished" in text -def test_plan_issue_delivery_loop_script_supports_sprint_progression_flow() -> None: +def test_plan_issue_delivery_loop_skill_uses_binary_first_command_contract() -> None: skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "plan-issue-delivery-loop.sh").read_text(encoding="utf-8") + text = (skill_root / "SKILL.md").read_text(encoding="utf-8") + assert "plan-issue" in text + assert "plan-issue-local" in text assert "start-plan" in text - assert "close-plan" in text - assert "build-plan-task-spec" in text assert "start-sprint" in text + assert "ready-sprint" in text assert "accept-sprint" in text - assert "next-sprint" not in text - assert "multi-sprint-guide" in text - assert "cleanup-worktrees" in text - assert "close-after-review" in text - assert "issue_lifecycle_script" in text - assert "render_plan_issue_body_from_task_spec" in text - assert '"to-json"' in text - assert "validate_pr_grouping_args" in text - assert "--pr-grouping <mode>" in text - assert "--pr-group <task=group>" in text - assert "--subagent-prompts-out <dir>" in text - assert "per-sprint | group (required; `per-spring` alias accepted)" in text - assert "--pr-grouping is required (per-sprint|group)" in text - assert "per-task (default)" not in text - assert "--pr-grouping manual" not in text - assert "--pr-grouping auto" not in text - assert "render_subagent_task_prompts" in text - assert "SUBAGENT_PROMPT_POLICY=MANDATORY_RENDERED_PROMPT" in text - assert "START_SUBAGENT_INPUT=TASK_PROMPT_PATH" in text - assert "SUBAGENT_DISPATCH_POLICY=RENDERED_TASK_PROMPT_REQUIRED" in text - assert "PR_GROUP=" in text - assert "OPEN_PR_CMD=SHARED_WITH_GROUP" in text - assert "sync_issue_sprint_task_rows" in text - assert "enforce_sprint_merge_gate" in text - assert "PREVIOUS_SPRINT_GATE=PASS" in text - assert "SPRINT_STATUS_SYNC=UPDATED_TO_DONE" in text - assert "PR values come from current Task Decomposition" in text - assert "group_anchor" in text - assert "MODE=DRY_RUN_LOCAL" in text - assert "NOTE_DRY_RUN=" in text - assert "default_dry_run_issue_number" in text - - -def test_plan_issue_delivery_loop_close_plan_enforces_worktree_cleanup() -> None: - skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "plan-issue-delivery-loop.sh").read_text(encoding="utf-8") - assert "cleanup_plan_issue_worktrees" in text - assert "--issue is required for close-plan" in text - assert "--body-file is required for close-plan --dry-run" in text - assert "PLAN_CLOSE_SCOPE=LOCAL_BODY_FILE" in text - assert "close-plan always runs strict worktree cleanup" in text - assert "WORKTREE_CLEANUP_STATUS=PASS" in text - assert "PLAN_CLOSE_STATUS=SUCCESS" in text - assert "DONE_CRITERIA=ISSUE_CLOSED_AND_WORKTREES_CLEANED" in text - - -def test_plan_issue_delivery_loop_sprint_comment_omits_redundant_plan_metadata() -> None: - skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "plan-issue-delivery-loop.sh").read_text(encoding="utf-8") - assert 'print(f"- Plan issue: #{issue_number}")' not in text - assert 'print(f"- Plan file: `{plan_file}`")' not in text - - -def test_plan_issue_delivery_loop_sprint_comment_prefers_issue_pr_values() -> None: - skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "plan-issue-delivery-loop.sh").read_text(encoding="utf-8") - assert "load_issue_pr_values" in text - assert "normalize_pr_display" in text - assert "Execution Mode comes from current Task Decomposition for each sprint task." in text - assert "| Task | Summary | Execution Mode |" in text - assert "PR values come from current Task Decomposition; unresolved tasks remain `TBD` until PRs are linked." in text - assert "extract_sprint_section" in text - assert 'if mode == "start":' in text - assert "pr-shared" in text - assert "pr-isolated" in text - assert "single-pr" not in text - - -def test_plan_issue_delivery_loop_sprint_comments_are_posted_after_sync() -> None: - skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "plan-issue-delivery-loop.sh").read_text(encoding="utf-8") - - start_block = text.split("start_sprint_cmd() {", maxsplit=1)[1].split("ready_sprint_cmd() {", maxsplit=1)[0] - ready_block = text.split("ready_sprint_cmd() {", maxsplit=1)[1].split("accept_sprint_cmd() {", maxsplit=1)[0] - accept_block = text.split("accept_sprint_cmd() {", maxsplit=1)[1].split("multi_sprint_guide_cmd() {", maxsplit=1)[0] - - sync_call = 'sync_issue_sprint_task_rows "$issue_number" "$task_spec_out" "$repo_arg" "$dry_run"' - comment_call = 'run_issue_lifecycle "$dry_run" "$repo_arg" comment --issue "$issue_number" --body "$comment_body" >/dev/null' - - assert start_block.index(sync_call) < start_block.index(comment_call) - assert ready_block.index(sync_call) < ready_block.index(comment_call) - assert accept_block.index(sync_call) < accept_block.index(comment_call) + assert "ready-plan" in text + assert "close-plan" in text -def test_plan_issue_delivery_loop_sprint_comment_markers_do_not_fail_after_post() -> None: +def test_plan_issue_delivery_loop_skill_excludes_deleted_wrapper_scripts() -> None: skill_root = Path(__file__).resolve().parents[1] - text = (skill_root / "scripts" / "plan-issue-delivery-loop.sh").read_text(encoding="utf-8") - assert "printf 'SPRINT_COMMENT_POSTED=1\\n' || true" in text - assert "printf 'SPRINT_READY_COMMENT_POSTED=1\\n' || true" in text - assert "printf 'SPRINT_ACCEPT_COMMENT_POSTED=1\\n' || true" in text - assert "printf 'PLAN_ISSUE_REMAINS_OPEN=1\\n' || true" in text + text = (skill_root / "SKILL.md").read_text(encoding="utf-8") + assert ("plan-issue-delivery-loop" + ".sh") not in text + assert ("manage_issue_delivery_loop" + ".sh") not in text + assert ("manage_issue_subagent_pr" + ".sh") not in text diff --git a/skills/workflows/issue/issue-pr-review/SKILL.md b/skills/workflows/issue/issue-pr-review/SKILL.md index 4dd1cf28..e453fd9d 100644 --- a/skills/workflows/issue/issue-pr-review/SKILL.md +++ b/skills/workflows/issue/issue-pr-review/SKILL.md @@ -66,6 +66,6 @@ Failure modes: - Important review instructions should remain in PR comments; always mirror the exact comment URL into the issue to direct subagents unambiguously. - Use `--dry-run` in workflow simulations before touching live GitHub state. -- Before `merge`/`close-pr`, main-agent re-validates the current PR body using the `issue-subagent-pr` PR-body validator and must correct invalid placeholder content. +- Before `merge`/`close-pr`, main-agent runs internal/self-contained PR body hygiene validation (required headings, placeholder rejection, issue bullet check) and must correct invalid content. - Main-agent performs review/acceptance only; implementation changes belong to subagent-owned task branches/PRs. - This skill is the canonical path for main-agent review decisions in issue-delivery loops. diff --git a/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh b/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh index d52e9bb3..a4d29962 100755 --- a/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh +++ b/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh @@ -1,16 +1,6 @@ #!/usr/bin/env bash set -euo pipefail -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" -skill_dir="$(cd "${script_dir}/.." && pwd -P)" -repo_root_default="$(cd "${skill_dir}/../../../.." && pwd -P)" -agent_home="${AGENT_HOME:-$repo_root_default}" - -issue_subagent_pr_script="${repo_root_default}/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh" -if [[ ! -x "$issue_subagent_pr_script" ]]; then - issue_subagent_pr_script="${agent_home%/}/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh" -fi - die() { echo "error: $*" >&2 exit 1 @@ -98,33 +88,78 @@ load_body() { printf '%s' "$body_text" } -ensure_issue_subagent_pr_script() { - [[ -x "$issue_subagent_pr_script" ]] || die "missing executable: $issue_subagent_pr_script" +validate_pr_body_hygiene_text() { + local body_text="${1:-}" + local issue_number="${2:-}" + local source_label="${3:-pr-body-check}" + + if [[ -z "${body_text//[[:space:]]/}" ]]; then + echo "error: ${source_label}: PR body cannot be empty" >&2 + return 1 + fi + + local required_heading_regexes=( + '^[[:space:]]*##[[:space:]]+Summary[[:space:]]*$' + '^[[:space:]]*##[[:space:]]+Scope[[:space:]]*$' + '^[[:space:]]*##[[:space:]]+Testing[[:space:]]*$' + '^[[:space:]]*##[[:space:]]+Issue[[:space:]]*$' + ) + local required_heading_labels=( + '## Summary' + '## Scope' + '## Testing' + '## Issue' + ) + local i=0 + for i in "${!required_heading_regexes[@]}"; do + if ! printf '%s\n' "$body_text" | grep -Eq "${required_heading_regexes[$i]}"; then + echo "error: ${source_label}: missing required heading '${required_heading_labels[$i]}'" >&2 + return 1 + fi + done + + local placeholder_regexes=( + '<[^>]+>' + '(^|[^[:alnum:]_])TODO([^[:alnum:]_]|$)' + '(^|[^[:alnum:]_])TBD([^[:alnum:]_]|$)' + '#<number>' + 'not[[:space:]]+run[[:space:]]*\(reason\)' + '<command>[[:space:]]*\(pass\)' + ) + local placeholder_labels=( + '<...>' + 'TODO' + 'TBD' + '#<number>' + 'not run (reason)' + '<command> (pass)' + ) + for i in "${!placeholder_regexes[@]}"; do + if printf '%s\n' "$body_text" | grep -Eiq "${placeholder_regexes[$i]}"; then + echo "error: ${source_label}: disallowed placeholder found: ${placeholder_labels[$i]}" >&2 + return 1 + fi + done + + if [[ -n "$issue_number" ]]; then + if ! printf '%s\n' "$body_text" | grep -Eq "^[[:space:]]*-[[:space:]]*#${issue_number}([^0-9]|$)"; then + echo "error: ${source_label}: missing required issue bullet '- #${issue_number}'" >&2 + return 1 + fi + fi + + return 0 } -validate_pr_body_via_subagent_script() { +validate_pr_body_hygiene_input() { local body_text="${1:-}" local body_file="${2:-}" local issue_number="${3:-}" local source_label="${4:-pr-body-check}" - ensure_issue_subagent_pr_script - local cmd=("$issue_subagent_pr_script" validate-pr-body) - if [[ -n "$body_text" && -n "$body_file" ]]; then - die "use either --pr-body or --pr-body-file, not both" - fi - if [[ -n "$body_file" ]]; then - cmd+=(--body-file "$body_file") - else - cmd+=(--body "$body_text") - fi - if [[ -n "$issue_number" ]]; then - cmd+=(--issue "$issue_number") - fi - if ! "${cmd[@]}" >/dev/null 2>&1; then - # Re-run once without redirect to preserve the underlying validation error. - "${cmd[@]}" >/dev/null - fi + local normalized_body='' + normalized_body="$(load_body "$body_text" "$body_file")" + validate_pr_body_hygiene_text "$normalized_body" "$issue_number" "$source_label" } ensure_pr_body_hygiene_for_close() { @@ -138,7 +173,7 @@ ensure_pr_body_hygiene_for_close() { if [[ "$dry_run" == "1" ]]; then if [[ -n "$override_body" || -n "$override_body_file" ]]; then - validate_pr_body_via_subagent_script "$override_body" "$override_body_file" "$issue_number" "${action_label}-override" + validate_pr_body_hygiene_input "$override_body" "$override_body_file" "$issue_number" "${action_label}-override" fi return 0 fi @@ -153,7 +188,7 @@ ensure_pr_body_hygiene_for_close() { local current_body='' current_body="$(run_cmd "${view_cmd[@]}")" - if validate_pr_body_via_subagent_script "$current_body" "" "$issue_number" "${action_label}-current"; then + if validate_pr_body_hygiene_input "$current_body" "" "$issue_number" "${action_label}-current"; then return 0 fi @@ -161,7 +196,7 @@ ensure_pr_body_hygiene_for_close() { die "PR #$pr_number body failed validation before ${action_label}; provide --pr-body or --pr-body-file to correct it" fi - validate_pr_body_via_subagent_script "$override_body" "$override_body_file" "$issue_number" "${action_label}-override" + validate_pr_body_hygiene_input "$override_body" "$override_body_file" "$issue_number" "${action_label}-override" local edit_cmd=(gh pr edit "$pr_number") if [[ -n "$repo_arg" ]]; then @@ -175,7 +210,7 @@ ensure_pr_body_hygiene_for_close() { run_cmd "${edit_cmd[@]}" >/dev/null current_body="$(run_cmd "${view_cmd[@]}")" - validate_pr_body_via_subagent_script "$current_body" "" "$issue_number" "${action_label}-updated" + validate_pr_body_hygiene_input "$current_body" "" "$issue_number" "${action_label}-updated" } subcommand="${1:-}" diff --git a/skills/workflows/issue/issue-pr-review/tests/test_workflows_issue_issue_pr_review.py b/skills/workflows/issue/issue-pr-review/tests/test_workflows_issue_issue_pr_review.py index f831d8c4..d59770b7 100644 --- a/skills/workflows/issue/issue-pr-review/tests/test_workflows_issue_issue_pr_review.py +++ b/skills/workflows/issue/issue-pr-review/tests/test_workflows_issue_issue_pr_review.py @@ -25,3 +25,17 @@ def test_issue_pr_review_skill_requires_comment_link_traceability() -> None: text = skill_md.read_text(encoding="utf-8") assert "comment URL" in text assert "issue" in text.lower() + + +def test_issue_pr_review_script_has_internal_pr_body_validator() -> None: + script_path = Path(__file__).resolve().parents[1] / "scripts" / "manage_issue_pr_review.sh" + text = script_path.read_text(encoding="utf-8") + assert "validate_pr_body_hygiene_text" in text + assert "validate_pr_body_hygiene_input" in text + assert "ensure_pr_body_hygiene_for_close" in text + + +def test_issue_pr_review_script_has_no_subagent_wrapper_dependency() -> None: + script_path = Path(__file__).resolve().parents[1] / "scripts" / "manage_issue_pr_review.sh" + text = script_path.read_text(encoding="utf-8") + assert ("manage_issue_subagent_pr" + ".sh") not in text diff --git a/skills/workflows/issue/issue-subagent-pr/SKILL.md b/skills/workflows/issue/issue-subagent-pr/SKILL.md index dc1fa143..81e9d46e 100644 --- a/skills/workflows/issue/issue-subagent-pr/SKILL.md +++ b/skills/workflows/issue/issue-subagent-pr/SKILL.md @@ -5,7 +5,7 @@ description: Subagent workflow for isolated worktree implementation, draft PR cr # Issue Subagent PR -Subagents implement only in dedicated worktrees, open/update PRs, and mirror key updates back to the owning issue. +Subagent owns implementation execution in assigned branches/worktrees and keeps PR/issue artifacts synchronized. ## Contract @@ -13,56 +13,102 @@ Prereqs: - Run inside the target git repo. - `git` and `gh` available on `PATH`, and `gh auth status` succeeds. -- Worktree branch strategy defined by the main agent. +- Worktree/branch ownership assigned by main-agent (or the issue Task Decomposition table when using `plan-issue` flows). Inputs: -- Issue number and subagent task scope. -- Branch/base/worktree naming inputs. -- PR title/body metadata and optional review-comment URL for follow-up updates. +- Issue number, task ID/scope, and assigned owner/branch/worktree facts. +- Base branch, PR title, and PR body markdown file path. +- Optional review comment URL + response body markdown for follow-up comments. +- Optional repository override (`owner/repo`) for `gh` commands when not running in the default remote context. Outputs: -- Dedicated worktree path for the task. -- Parameterized subagent task prompt rendered from assigned execution facts (issue/task/owner/branch/worktree/execution mode). +- Dedicated task worktree checked out to the assigned branch. - Draft PR URL for the implementation branch. -- Automatic writeback of `Task Decomposition.PR` (and related sprint comment table rows) for tasks matched by the opened PR head branch / shared `pr-group`. -- PR body validation gate that rejects unfilled templates/placeholders before PR open. -- PR follow-up comments referencing main-agent review comment URLs. -- Optional mirrored issue comments for traceability. +- PR/body validation evidence (required sections present; placeholders removed). +- Review response comments on the PR that reference the main-agent review comment URL. +- Optional issue sync comments (`gh issue comment`) that mirror task status and PR linkage. +- `plan-issue` artifact compatibility: canonical issue/PR references (`#<number>`) suitable for Task Decomposition sync. Exit codes: - `0`: success -- non-zero: invalid args, missing repo context, or `git`/`gh` failures +- non-zero: invalid inputs, failed validation checks, repo context issues, or `git`/`gh` failures Failure modes: -- Missing required flags (`--branch`, `--issue`, `--title`, `--review-comment-url`). -- Worktree path collision. -- PR body source conflicts (`--body` and `--body-file`). -- Missing/empty PR body for `open-pr`. -- Placeholder/template PR body content (`TBD`, `TODO`, `<...>`, `#<number>`, stub testing lines). -- Invalid subagent prompt render inputs (placeholder `Owner/Branch/Worktree`, invalid `Execution Mode`, non-subagent owner). -- `gh` auth/permissions insufficient to open or comment on PRs. +- Missing assigned execution facts (issue/task/owner/branch/worktree). +- Worktree path collision or branch already bound to another worktree. +- Empty PR body file or unresolved template placeholders (`TBD`, `TODO`, `<...>`, `#<number>`, template stub lines). +- Missing required PR body sections (`## Summary`, `## Scope`, `## Testing`, `## Issue`). +- `gh` auth/permission failures for PR/issue reads or writes. -## Entrypoint +## Command Contract (Scriptless) -- `$AGENT_HOME/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh` +- Use native `git` for worktree and branch lifecycle. +- Use native `gh` for draft PR creation and PR/issue comments. +- Use `rg`-based checks (or equivalent) for PR body section/placeholder validation before PR open and before final review updates. ## Core usage -1. Create isolated worktree: - - `.../manage_issue_subagent_pr.sh create-worktree --branch feat/issue-123-api --base main` -2. Render a subagent task prompt from assigned task facts (recommended before implementation handoff): - - `.../manage_issue_subagent_pr.sh render-task-prompt --issue 123 --task-id T1 --summary "Implement API task" --owner subagent-api --branch issue/123/t1-api --worktree .worktrees/issue/123-t1-api --execution-mode pr-isolated --pr-title "feat(issue): implement API task"` -3. Open draft PR and sync PR URL to issue: - - `cp references/PR_BODY_TEMPLATE.md /tmp/pr-123.md && <edit file>` - - `.../manage_issue_subagent_pr.sh open-pr --issue 123 --title "feat: api task" --body-file /tmp/pr-123.md` -4. Validate PR body before submitting (optional explicit precheck): - - `.../manage_issue_subagent_pr.sh validate-pr-body --issue 123 --body-file /tmp/pr-123.md` -5. Respond to main-agent review comment with explicit link: - - `.../manage_issue_subagent_pr.sh respond-review --pr 456 --review-comment-url <url> --body-file references/REVIEW_RESPONSE_TEMPLATE.md --issue 123` +1. Create isolated worktree/branch with `git worktree`: + - ```bash + ISSUE=123 + TASK_ID=T1 + BASE=main + BRANCH="issue/${ISSUE}/${TASK_ID}-api" + WORKTREE=".worktrees/issue-${ISSUE}-${TASK_ID}-api" + + git fetch origin --prune + git worktree add -b "$BRANCH" "$WORKTREE" "origin/$BASE" + cd "$WORKTREE" + git branch --show-current + git worktree list + ``` +2. Prepare and validate PR body (required sections + placeholder checks): + - ```bash + BODY_FILE="$WORKTREE/.tmp/pr-${ISSUE}-${TASK_ID}.md" + mkdir -p "$(dirname "$BODY_FILE")" + cp /Users/terry/.config/agent-kit/skills/workflows/issue/issue-subagent-pr/references/PR_BODY_TEMPLATE.md "$BODY_FILE" + # Edit BODY_FILE and replace all template placeholders before continuing. + + for section in "## Summary" "## Scope" "## Testing" "## Issue"; do + rg -q "^${section}$" "$BODY_FILE" || { echo "Missing section: ${section}" >&2; exit 1; } + done + + rg -n 'TBD|TODO|<[^>]+>|#<number>|<implemented scope>|<explicitly excluded scope>|<command> \\(pass\\)|not run \\(reason\\)' "$BODY_FILE" \ + && { echo "Placeholder content found in PR body" >&2; exit 1; } || true + ``` +3. Open draft PR with `gh pr create`: + - ```bash + gh pr create \ + --draft \ + --base "$BASE" \ + --head "$BRANCH" \ + --title "feat(issue-${ISSUE}): implement ${TASK_ID} API changes" \ + --body-file "$BODY_FILE" + + PR_NUMBER="$(gh pr view --json number --jq '.number')" + PR_URL="$(gh pr view --json url --jq '.url')" + echo "Opened ${PR_URL}" + ``` +4. Post review response comment with `gh pr comment`: + - ```bash + REVIEW_COMMENT_URL="https://github.com/<owner>/<repo>/pull/<pr>#issuecomment-<id>" + RESPONSE_FILE="$WORKTREE/.tmp/review-response-${PR_NUMBER}.md" + cp /Users/terry/.config/agent-kit/skills/workflows/issue/issue-subagent-pr/references/REVIEW_RESPONSE_TEMPLATE.md "$RESPONSE_FILE" + # Edit RESPONSE_FILE: include REVIEW_COMMENT_URL and concrete change/testing notes. + + gh pr comment "$PR_NUMBER" --body-file "$RESPONSE_FILE" + ``` +5. Optional issue sync comment with `gh issue comment` (traceability): + - ```bash + gh issue comment "$ISSUE" \ + --body "Task ${TASK_ID} in progress by subagent. Branch: \`${BRANCH}\`. Worktree: \`${WORKTREE}\`. PR: #${PR_NUMBER}. Review response: ${REVIEW_COMMENT_URL}" + ``` +6. Optional plan-issue artifact sync note: + - Keep Task Decomposition row fields (`Owner`, `Branch`, `Worktree`, `Execution Mode`, `PR`) aligned with actual execution facts; use canonical PR references like `#${PR_NUMBER}` so `plan-issue status-plan` / `ready-plan` snapshots remain consistent. ## References @@ -72,11 +118,8 @@ Failure modes: ## Notes -- Use `--dry-run` in orchestration/testing contexts. -- `open-pr` now syncs the issue task table PR references using canonical `#<number>` format and marks matched `planned` rows as `in-progress`. -- `render-task-prompt` is intended to freeze real execution facts (`Owner/Branch/Worktree/Execution Mode/PR title`) into a reusable subagent handoff prompt and reduce manual dispatch mistakes. -- `render-task-prompt --issue DRY_RUN_PLAN_ISSUE` is allowed for local orchestration rehearsal flows that do not call GitHub APIs. -- `open-pr --use-template` is not a substitute for filling the PR body; subagent must submit a fully edited body that passes validation. +- Subagent may pre-fill `references/SUBAGENT_TASK_PROMPT_TEMPLATE.md` from assigned execution facts to avoid owner/branch/worktree drift during implementation. +- Treat PR body validation as a required gate, not an optional cleanup step. - Keep implementation details and evidence in PR comments; issue comments should summarize status and link back to PR artifacts. -- Subagents own implementation execution; main-agent does not implement issue task code directly. -- Even when an issue has a single implementation PR, that PR remains subagent-owned. +- Subagent owns implementation execution; main-agent remains orchestration/review-only. +- Even for single-PR issues, implementation PR authorship/ownership stays with subagent. diff --git a/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh b/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh deleted file mode 100755 index 0fc8926b..00000000 --- a/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh +++ /dev/null @@ -1,1321 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -skill_dir="$(cd "${script_dir}/.." && pwd)" -default_pr_template="${skill_dir}/references/PR_BODY_TEMPLATE.md" -default_task_prompt_template="${skill_dir}/references/SUBAGENT_TASK_PROMPT_TEMPLATE.md" - -die() { - echo "error: $*" >&2 - exit 1 -} - -require_cmd() { - local cmd="${1:-}" - command -v "$cmd" >/dev/null 2>&1 || die "$cmd is required" -} - -print_cmd() { - local out='' - local arg='' - for arg in "$@"; do - out+=" $(printf '%q' "$arg")" - done - printf '%s\n' "${out# }" -} - -run_cmd() { - if [[ "${dry_run}" == "1" ]]; then - echo "dry-run: $(print_cmd "$@")" >&2 - return 0 - fi - "$@" -} - -usage() { - cat <<'USAGE' -Usage: - manage_issue_subagent_pr.sh <create-worktree|open-pr|respond-review|validate-pr-body|render-task-prompt> [options] - -Subcommands: - create-worktree Create a dedicated worktree/branch for subagent implementation - open-pr Open a draft PR for an issue branch and sync PR URL back to issue - respond-review Post a PR follow-up comment referencing a main-agent review comment link - validate-pr-body Validate a PR body (placeholders/template stubs are rejected) - render-task-prompt Render a parameterized prompt template for a subagent task - -Common options: - --repo <owner/repo> Target repository (passed to gh with -R) - --dry-run Print actions without executing commands - -create-worktree options: - --branch <name> Branch to create (required) - --base <ref> Start-point reference (default: main) - --worktree-name <name> Worktree folder name override (default from branch) - --worktrees-root <dir> Worktrees root override (default: <repo>/../.worktrees/<repo>/issue) - -open-pr options: - --issue <number> Issue number (required) - --title <text> PR title (required) - --base <branch> Base branch (default: main) - --head <branch> PR head branch (default: current branch) - --body <text> PR body text - --body-file <path> PR body file - --use-template Use references/PR_BODY_TEMPLATE.md when body not set - --ready Open non-draft PR (default: draft) - --no-issue-comment Do not comment the issue with PR URL - -respond-review options: - --pr <number> PR number (required) - --review-comment-url <url> Main-agent PR comment URL (required) - --body <text> Additional response details - --body-file <path> Additional response details file - --issue <number> Optional issue number to mirror response status - -validate-pr-body options: - --body <text> PR body text - --body-file <path> PR body file - --issue <number> Expected issue number (optional, validates `## Issue` section when present) - -render-task-prompt options: - --issue <number|DRY_RUN_PLAN_ISSUE> Issue number (required; dry-run token allowed) - --task-id <id> Task id (required, e.g. T1) - --summary <text> Task summary from Task Decomposition (required) - --owner <subagent-id> Assigned subagent owner (required; must include 'subagent') - --branch <name> Assigned branch (required; non-TBD) - --worktree <path> Assigned worktree path (required; non-TBD) - --execution-mode <mode> per-sprint|pr-isolated|pr-shared (required) - --pr-title <text> PR title to use (required) - --base <branch> Base branch for PR/worktree (default: main) - --notes <text> Task Notes column text (optional) - --acceptance <text> Acceptance bullet (repeatable) - --acceptance-file <path> Acceptance bullets file (one item per non-empty line) - --template <path> Prompt template file (default: references/SUBAGENT_TASK_PROMPT_TEMPLATE.md) - --output <path> Write rendered prompt to file (otherwise stdout) -USAGE -} - -repo_root() { - git rev-parse --show-toplevel 2>/dev/null || die "must run inside a git repository" -} - -current_branch() { - local branch='' - branch="$(git symbolic-ref --quiet --short HEAD 2>/dev/null || true)" - [[ -n "$branch" ]] || die "cannot resolve current branch" - printf '%s\n' "$branch" -} - -validate_pr_body_text() { - local body_text="${1:-}" - local issue_number="${2:-}" - local source_label="${3:-pr body}" - - python3 - "$issue_number" "$source_label" "$body_text" <<'PY' -import re -import sys - -issue_number = (sys.argv[1] or "").strip() -source_label = (sys.argv[2] or "pr body").strip() -text = sys.argv[3] - -if not text.strip(): - raise SystemExit(f"error: {source_label}: PR body must not be empty") - -required_headings = ["## Summary", "## Scope", "## Testing", "## Issue"] -missing = [h for h in required_headings if h not in text] -if missing: - raise SystemExit(f"error: {source_label}: missing required PR body sections: {', '.join(missing)}") - -placeholder_patterns = [ - (r"<[^>\n]+>", "angle-bracket placeholder"), - (r"\bTODO\b", "TODO placeholder"), - (r"\bTBD\b", "TBD placeholder"), - (r"#<number>", "issue-number placeholder"), - (r"not run \(reason\)", "testing placeholder"), - (r"<command> \(pass\)", "testing command placeholder"), -] -hits = [] -for pattern, label in placeholder_patterns: - m = re.search(pattern, text, flags=re.IGNORECASE) - if m: - hits.append(f"{label}: {m.group(0)}") - -if issue_number: - issue_pat = re.compile(rf"(?m)^\s*-\s*#\s*{re.escape(issue_number)}\s*$") - if not issue_pat.search(text): - hits.append(f"missing issue link bullet for #{issue_number} in ## Issue section") - -if hits: - joined = "; ".join(hits) - raise SystemExit(f"error: {source_label}: invalid PR body content ({joined})") - -print("ok: PR body validation passed") -PY -} - -validate_pr_body_input() { - local body_text="${1:-}" - local body_file="${2:-}" - local issue_number="${3:-}" - local source_label="${4:-pr body}" - - if [[ -n "$body_text" && -n "$body_file" ]]; then - die "use either --body or --body-file, not both" - fi - if [[ -n "$body_file" ]]; then - [[ -f "$body_file" ]] || die "body file not found: $body_file" - validate_pr_body_text "$(cat "$body_file")" "$issue_number" "$source_label" - return 0 - fi - validate_pr_body_text "$body_text" "$issue_number" "$source_label" -} - -to_lower() { - printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]' -} - -is_placeholder_value() { - local value lowered - value="${1:-}" - lowered="$(to_lower "$value")" - case "$lowered" in - ""|"-"|tbd|none|n/a|na) - return 0 - ;; - *) - return 1 - ;; - esac -} - -normalize_execution_mode_value() { - local raw lowered - raw="${1:-}" - lowered="$(to_lower "$raw")" - lowered="${lowered//_/-}" - lowered="${lowered// /-}" - case "$lowered" in - per-sprint|pr-isolated|pr-shared) - printf '%s\n' "$lowered" - ;; - persprint) - printf '%s\n' "per-sprint" - ;; - prisolated) - printf '%s\n' "pr-isolated" - ;; - prshared) - printf '%s\n' "pr-shared" - ;; - *) - printf '%s\n' "$raw" - ;; - esac -} - -is_main_agent_owner() { - local owner lowered token - owner="${1:-}" - lowered="$(to_lower "$owner")" - token="${lowered//_/ }" - token="${token//-/ }" - token="${token// /}" - - case "$token" in - mainagent|main|codex|orchestrator|leadagent) - return 0 - ;; - esac - if [[ "$lowered" == *"main-agent"* || "$lowered" == *"main agent"* ]]; then - return 0 - fi - return 1 -} - -shell_quote() { - printf '%q' "${1:-}" -} - -normalize_pr_announcement_ref() { - local value='' - value="$(echo "${1:-}" | tr -d '\r')" - value="${value#"${value%%[![:space:]]*}"}" - value="${value%"${value##*[![:space:]]}"}" - - if [[ "$value" =~ ^#([0-9]+)$ ]]; then - printf '#%s\n' "${BASH_REMATCH[1]}" - return 0 - fi - if [[ "$value" =~ ^PR#([0-9]+)$ ]]; then - printf '#%s\n' "${BASH_REMATCH[1]}" - return 0 - fi - if [[ "$value" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+#([0-9]+)$ ]]; then - printf '#%s\n' "${BASH_REMATCH[1]}" - return 0 - fi - if [[ "$value" =~ ^https://github\.com/[^/[:space:]]+/[^/[:space:]]+/pull/([0-9]+)([/?#].*)?$ ]]; then - printf '#%s\n' "${BASH_REMATCH[1]}" - return 0 - fi - - printf '%s\n' "$value" -} - -issue_read_body_cmd() { - local issue_number="${1:-}" - local out_file="${2:-}" - local repo="${3:-}" - [[ -n "$issue_number" ]] || die "issue number is required" - [[ -n "$out_file" ]] || die "output file path is required" - - require_cmd gh - local cmd=(gh issue view "$issue_number") - if [[ -n "$repo" ]]; then - cmd+=(-R "$repo") - fi - cmd+=(--json body -q .body) - "${cmd[@]}" >"$out_file" -} - -issue_update_body_cmd() { - local issue_number="${1:-}" - local body_file="${2:-}" - local repo="${3:-}" - [[ -n "$issue_number" ]] || die "issue number is required" - [[ -f "$body_file" ]] || die "body file not found: $body_file" - - require_cmd gh - local cmd=(gh issue edit "$issue_number" --body-file "$body_file") - if [[ -n "$repo" ]]; then - cmd+=(--repo "$repo") - fi - "${cmd[@]}" >/dev/null -} - -resolve_repo_name_with_owner() { - local repo="${1:-}" - if [[ -n "$repo" ]]; then - printf '%s\n' "$repo" - return 0 - fi - require_cmd gh - gh repo view --json nameWithOwner -q .nameWithOwner -} - -refresh_sprint_start_comments_pr_values() { - local issue_number="${1:-}" - local task_ids_csv="${2:-}" - local pr_ref="${3:-}" - local repo="${4:-}" - local is_dry_run="${5:-0}" - [[ -n "$issue_number" ]] || die "issue number is required for sprint comment sync" - - if [[ -z "$task_ids_csv" ]]; then - return 0 - fi - if [[ "$is_dry_run" == "1" ]]; then - echo "dry-run: refresh sprint start comments PR values for issue #${issue_number}" >&2 - return 0 - fi - - local resolved_repo='' - resolved_repo="$(resolve_repo_name_with_owner "$repo")" - - local refresh_out='' - refresh_out="$(python3 - "$issue_number" "$task_ids_csv" "$pr_ref" "$resolved_repo" <<'PY' -import json -import pathlib -import re -import subprocess -import sys -import tempfile - -issue_number = sys.argv[1].strip() -task_ids_csv = sys.argv[2].strip() -pr_ref = sys.argv[3].strip() -repo = sys.argv[4].strip() - -if not issue_number: - raise SystemExit("error: issue number is required") -if not repo: - raise SystemExit("error: repo is required") - -target_tasks = {token.strip() for token in task_ids_csv.split(",") if token.strip()} -if not target_tasks: - print("UPDATED_START_COMMENTS=0") - raise SystemExit(0) - - -def parse_row(line: str) -> list[str]: - s = line.strip() - if not (s.startswith("|") and s.endswith("|")): - return [] - return [cell.strip() for cell in s[1:-1].split("|")] - - -def update_start_comment_body(body: str) -> tuple[str, int]: - lines = body.splitlines() - changed_rows = 0 - - idx = 0 - while idx < len(lines): - if lines[idx].strip() != "| Task | Summary | PR |": - idx += 1 - continue - header = parse_row(lines[idx]) - if idx + 1 >= len(lines): - break - sep = parse_row(lines[idx + 1]) - if not header or not sep or len(header) != len(sep): - idx += 1 - continue - header_idx = {name: col for col, name in enumerate(header)} - if "Task" not in header_idx or "PR" not in header_idx: - idx += 1 - continue - - row_i = idx + 2 - while row_i < len(lines): - row = parse_row(lines[row_i]) - if not row or len(row) != len(header): - break - task_id = row[header_idx["Task"]].strip() - if task_id in target_tasks: - if row[header_idx["PR"]].strip() != pr_ref: - row[header_idx["PR"]] = pr_ref - lines[row_i] = "| " + " | ".join(row) + " |" - changed_rows += 1 - row_i += 1 - idx = row_i + 1 - - if changed_rows == 0: - return body, 0 - return "\n".join(lines) + "\n", changed_rows - - -view_cmd = ["gh", "issue", "view", issue_number, "--repo", repo, "--json", "comments"] -view = subprocess.run(view_cmd, capture_output=True, text=True, check=True) -comments = json.loads(view.stdout).get("comments", []) - -updated_comments = 0 -for comment in comments: - body = comment.get("body", "") - new_body, changed_rows = update_start_comment_body(body) - if changed_rows == 0: - continue - with tempfile.NamedTemporaryFile("w", encoding="utf-8", delete=False) as tmp: - payload_path = pathlib.Path(tmp.name) - json.dump({"body": new_body}, tmp) - try: - patch_cmd = [ - "gh", - "api", - f"repos/{repo}/issues/comments/{comment['id']}", - "--method", - "PATCH", - "--input", - str(payload_path), - ] - subprocess.run(patch_cmd, capture_output=True, text=True, check=True) - updated_comments += 1 - finally: - payload_path.unlink(missing_ok=True) - -print(f"UPDATED_START_COMMENTS={updated_comments}") -PY -)" - - if [[ -n "$refresh_out" ]]; then - echo "$refresh_out" >&2 - fi -} - -sync_issue_task_pr_by_branch() { - local issue_number="${1:-}" - local head_branch="${2:-}" - local pr_ref="${3:-}" - local repo="${4:-}" - local is_dry_run="${5:-0}" - [[ -n "$issue_number" ]] || die "issue number is required for task PR sync" - [[ -n "$head_branch" ]] || die "head branch is required for task PR sync" - - if [[ "$is_dry_run" == "1" ]]; then - echo "dry-run: sync Task Decomposition PR by branch ${head_branch} -> ${pr_ref}" >&2 - return 0 - fi - - local issue_body_file='' - issue_body_file="$(mktemp)" - issue_read_body_cmd "$issue_number" "$issue_body_file" "$repo" - - local synced_body_file='' - synced_body_file="$(mktemp)" - - local sync_out='' - sync_out="$(python3 - "$issue_body_file" "$head_branch" "$pr_ref" "$synced_body_file" <<'PY' -import pathlib -import re -import sys - -body_path = pathlib.Path(sys.argv[1]) -head_branch = sys.argv[2].strip() -pr_ref = sys.argv[3].strip() -output_path = pathlib.Path(sys.argv[4]) - -if not body_path.is_file(): - raise SystemExit(f"error: issue body file not found: {body_path}") -if not head_branch: - raise SystemExit("error: head branch is required") - -lines = body_path.read_text(encoding="utf-8").splitlines() - - -def section_bounds(heading: str) -> tuple[int, int]: - start = None - for idx, line in enumerate(lines): - if line.strip() == heading: - start = idx + 1 - break - if start is None: - raise SystemExit(f"error: missing required heading: {heading}") - end = len(lines) - for idx in range(start, len(lines)): - if lines[idx].startswith("## "): - end = idx - break - return start, end - - -def parse_row(line: str) -> list[str]: - s = line.strip() - if not (s.startswith("|") and s.endswith("|")): - return [] - return [cell.strip() for cell in s[1:-1].split("|")] - - -def normalize_branch(value: str) -> str: - token = (value or "").strip() - if token.startswith("`") and token.endswith("`") and len(token) >= 2: - token = token[1:-1].strip() - return token - - -def extract_pr_group(notes: str) -> str: - m = re.search(r"(?:^|;)\s*pr-group=([^;]+)", notes or "", flags=re.IGNORECASE) - if not m: - return "" - return m.group(1).strip() - - -def is_placeholder(value: str) -> bool: - token = (value or "").strip().lower() - if token in {"", "-", "tbd", "none", "n/a", "na", "..."}: - return True - if token.startswith("tbd"): - return True - if token.startswith("<") and token.endswith(">"): - return True - if "task ids" in token: - return True - return False - - -def normalize_pr_display(value: str) -> str: - token = (value or "").strip() - if is_placeholder(token): - return "TBD" - if m := re.fullmatch(r"PR#(\d+)", token, flags=re.IGNORECASE): - return f"#{m.group(1)}" - if m := re.fullmatch(r"#(\d+)", token): - return f"#{m.group(1)}" - if m := re.fullmatch(r"[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+#(\d+)", token): - return f"#{m.group(1)}" - if m := re.fullmatch( - r"https://github\.com/[^/\s]+/[^/\s]+/pull/(\d+)(?:[/?#].*)?", - token, - flags=re.IGNORECASE, - ): - return f"#{m.group(1)}" - return token - - -start, end = section_bounds("## Task Decomposition") -table_rows = [idx for idx in range(start, end) if lines[idx].strip().startswith("|")] -if len(table_rows) < 3: - raise SystemExit("error: Task Decomposition must contain a markdown table with at least one task row") - -headers = parse_row(lines[table_rows[0]]) -required = ["Task", "Branch", "PR"] -missing = [name for name in required if name not in headers] -if missing: - raise SystemExit("error: missing Task Decomposition columns: " + ", ".join(missing)) -header_idx = {name: idx for idx, name in enumerate(headers)} -status_idx = header_idx.get("Status") -notes_idx = header_idx.get("Notes") - -entries = [] -normalized_pr_rows = 0 -for idx in table_rows[2:]: - cells = parse_row(lines[idx]) - if not cells or len(cells) != len(headers): - continue - - normalized_pr = normalize_pr_display(cells[header_idx["PR"]].strip()) - if cells[header_idx["PR"]].strip() != normalized_pr: - cells[header_idx["PR"]] = normalized_pr - lines[idx] = "| " + " | ".join(cells) + " |" - normalized_pr_rows += 1 - - task_id = cells[header_idx["Task"]].strip() - branch = normalize_branch(cells[header_idx["Branch"]]) - notes = cells[notes_idx].strip() if notes_idx is not None else "" - pr_group = extract_pr_group(notes) - entries.append( - { - "line_idx": idx, - "cells": cells, - "task": task_id, - "branch": branch, - "pr_group": pr_group, - } - ) - -target_groups = set() -target_indices = set() -for entry in entries: - if entry["branch"] != head_branch: - continue - target_indices.add(entry["line_idx"]) - if entry["pr_group"]: - target_groups.add(entry["pr_group"]) - -for entry in entries: - if entry["pr_group"] and entry["pr_group"] in target_groups: - target_indices.add(entry["line_idx"]) - -updated_tasks = [] -for entry in entries: - if entry["line_idx"] not in target_indices: - continue - cells = entry["cells"] - changed = False - if cells[header_idx["PR"]].strip() != pr_ref: - cells[header_idx["PR"]] = pr_ref - changed = True - if status_idx is not None: - status_value = (cells[status_idx] or "").strip().lower() - if status_value == "planned": - cells[status_idx] = "in-progress" - changed = True - if changed: - lines[entry["line_idx"]] = "| " + " | ".join(cells) + " |" - updated_tasks.append(entry["task"]) - -output_path.write_text("\n".join(lines) + "\n", encoding="utf-8") -print(f"UPDATED_TASK_COUNT={len(updated_tasks)}") -print("UPDATED_TASK_IDS=" + ",".join(updated_tasks)) -print(f"NORMALIZED_PR_ROWS={normalized_pr_rows}") -PY -)" - - local updated_count='' - local updated_task_ids='' - local normalized_pr_rows='' - updated_count="$(printf '%s\n' "$sync_out" | awk -F= '/^UPDATED_TASK_COUNT=/{print $2; exit}')" - updated_task_ids="$(printf '%s\n' "$sync_out" | awk -F= '/^UPDATED_TASK_IDS=/{print $2; exit}')" - normalized_pr_rows="$(printf '%s\n' "$sync_out" | awk -F= '/^NORMALIZED_PR_ROWS=/{print $2; exit}')" - updated_count="${updated_count:-0}" - normalized_pr_rows="${normalized_pr_rows:-0}" - - local should_update='0' - if [[ "$updated_count" =~ ^[0-9]+$ ]] && [[ "$updated_count" -gt 0 ]]; then - should_update='1' - fi - if [[ "$normalized_pr_rows" =~ ^[0-9]+$ ]] && [[ "$normalized_pr_rows" -gt 0 ]]; then - should_update='1' - fi - - if [[ "$should_update" == '1' ]]; then - issue_update_body_cmd "$issue_number" "$synced_body_file" "$repo" - echo "TASK_DECOMPOSITION_PR_SYNC=updated tasks=${updated_task_ids} normalized_pr_rows=${normalized_pr_rows}" >&2 - if [[ -n "$updated_task_ids" ]]; then - refresh_sprint_start_comments_pr_values "$issue_number" "$updated_task_ids" "$pr_ref" "$repo" "$is_dry_run" - fi - else - echo "TASK_DECOMPOSITION_PR_SYNC=no matching task rows for branch ${head_branch}" >&2 - fi - - rm -f "$issue_body_file" "$synced_body_file" -} - -best_effort_sync_issue_task_pr_by_branch() { - local issue_number="${1:-}" - local head_branch="${2:-}" - local pr_ref="${3:-}" - local repo="${4:-}" - local is_dry_run="${5:-0}" - - set +e - sync_issue_task_pr_by_branch "$issue_number" "$head_branch" "$pr_ref" "$repo" "$is_dry_run" - local sync_rc=$? - set -e - - if [[ "$sync_rc" -ne 0 ]]; then - echo "warn: unable to sync issue task PR fields automatically (issue #${issue_number}, branch=${head_branch})" >&2 - fi - return 0 -} - -render_task_prompt_template_text() { - local template_file="${1:-}" - [[ -f "$template_file" ]] || die "template file not found: $template_file" - - python3 - "$template_file" <<'PY' -import os -import re -import sys -from pathlib import Path - -template_file = Path(sys.argv[1]) -text = template_file.read_text(encoding="utf-8") - -keys = [ - "ISSUE_NUMBER", - "TASK_ID", - "TASK_SUMMARY", - "TASK_OWNER", - "BRANCH", - "WORKTREE", - "EXECUTION_MODE", - "BASE_BRANCH", - "PR_TITLE", - "REPO_DISPLAY", - "REPO_FLAG", - "TASK_NOTES_BULLETS", - "ACCEPTANCE_BULLETS", - "PR_BODY_TEMPLATE_PATH", - "PR_BODY_DRAFT_PATH", - "ISSUE_SUBAGENT_PR_SCRIPT", - "CREATE_WORKTREE_HINT", - "OPEN_PR_COMMAND", - "VALIDATE_PR_BODY_COMMAND", -] -values = {k: os.environ.get(k, "") for k in keys} - -for key, value in values.items(): - text = text.replace(f"{{{{{key}}}}}", value) - -unresolved = sorted(set(re.findall(r"\{\{[A-Z0-9_]+\}\}", text))) -if unresolved: - raise SystemExit( - "error: unresolved prompt template placeholders: " + ", ".join(unresolved) - ) - -sys.stdout.write(text.rstrip("\n") + "\n") -PY -} - -subcommand="${1:-}" -if [[ -z "$subcommand" ]]; then - usage >&2 - exit 1 -fi -shift || true - -dry_run="0" -repo_arg="" - -case "$subcommand" in - create-worktree) - branch="" - base_ref="main" - worktree_name="" - worktrees_root="" - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --branch) - branch="${2:-}" - shift 2 - ;; - --base) - base_ref="${2:-}" - shift 2 - ;; - --worktree-name) - worktree_name="${2:-}" - shift 2 - ;; - --worktrees-root) - worktrees_root="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for create-worktree: $1" - ;; - esac - done - - [[ -n "$branch" ]] || die "--branch is required for create-worktree" - - require_cmd git - - root="$(repo_root)" - repo_name="$(basename "$root")" - - if [[ -z "$worktrees_root" ]]; then - worktrees_root="${root}/../.worktrees/${repo_name}/issue" - fi - - if [[ -z "$worktree_name" ]]; then - worktree_name="${branch//\//__}" - fi - - worktree_path="${worktrees_root%/}/${worktree_name}" - - if [[ -e "$worktree_path" ]]; then - die "worktree path already exists: $worktree_path" - fi - - mkdir -p "$worktrees_root" - - cmd=(git worktree add -b "$branch" "$worktree_path" "$base_ref") - - if [[ "$dry_run" == "1" ]]; then - run_cmd "${cmd[@]}" - echo "$worktree_path" - exit 0 - fi - - run_cmd "${cmd[@]}" - echo "$worktree_path" - ;; - - open-pr) - issue_number="" - pr_title="" - base_branch="main" - head_branch="" - body="" - body_file="" - use_template="0" - is_draft="1" - comment_issue="1" - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue) - issue_number="${2:-}" - shift 2 - ;; - --title) - pr_title="${2:-}" - shift 2 - ;; - --base) - base_branch="${2:-}" - shift 2 - ;; - --head) - head_branch="${2:-}" - shift 2 - ;; - --body) - body="${2:-}" - shift 2 - ;; - --body-file) - body_file="${2:-}" - shift 2 - ;; - --use-template) - use_template="1" - shift - ;; - --ready) - is_draft="0" - shift - ;; - --no-issue-comment) - comment_issue="0" - shift - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for open-pr: $1" - ;; - esac - done - - [[ -n "$issue_number" ]] || die "--issue is required for open-pr" - [[ -n "$pr_title" ]] || die "--title is required for open-pr" - - if [[ -z "$head_branch" ]]; then - require_cmd git - head_branch="$(current_branch)" - fi - - if [[ "$use_template" == "1" && -z "$body" && -z "$body_file" ]]; then - body_file="$default_pr_template" - fi - - if [[ -n "$body_file" && ! -f "$body_file" ]]; then - die "body file not found: $body_file" - fi - - # Subagent must submit a filled PR body. `--use-template` is allowed, but the - # template must be copied/edited first so placeholder tokens are gone. - if [[ -n "$body" || -n "$body_file" ]]; then - validate_pr_body_input "$body" "$body_file" "$issue_number" "open-pr" - else - die "PR body is required; provide --body/--body-file (or a filled template via --use-template)" - fi - - require_cmd gh - - cmd=(gh pr create --title "$pr_title" --base "$base_branch" --head "$head_branch") - if [[ -n "$repo_arg" ]]; then - cmd+=(-R "$repo_arg") - fi - if [[ "$is_draft" == "1" ]]; then - cmd+=(--draft) - fi - - if [[ -n "$body" ]]; then - cmd+=(--body "$body") - else - cmd+=(--body-file "$body_file") - fi - - if [[ "$dry_run" == "1" ]]; then - run_cmd "${cmd[@]}" - pr_url="DRY-RUN-PR-URL" - pr_comment_ref="$(normalize_pr_announcement_ref "$pr_url")" - best_effort_sync_issue_task_pr_by_branch "$issue_number" "$head_branch" "$pr_comment_ref" "$repo_arg" "$dry_run" - if [[ "$comment_issue" == "1" ]]; then - issue_cmd=(gh issue comment "$issue_number") - if [[ -n "$repo_arg" ]]; then - issue_cmd+=(-R "$repo_arg") - fi - issue_cmd+=(--body "Subagent opened PR: ${pr_comment_ref}") - run_cmd "${issue_cmd[@]}" - fi - echo "$pr_url" - exit 0 - fi - - pr_url="$(run_cmd "${cmd[@]}")" - pr_comment_ref="$(normalize_pr_announcement_ref "$pr_url")" - best_effort_sync_issue_task_pr_by_branch "$issue_number" "$head_branch" "$pr_comment_ref" "$repo_arg" "$dry_run" - if [[ "$comment_issue" == "1" ]]; then - issue_cmd=(gh issue comment "$issue_number") - if [[ -n "$repo_arg" ]]; then - issue_cmd+=(-R "$repo_arg") - fi - issue_cmd+=(--body "Subagent opened PR: ${pr_comment_ref}") - run_cmd "${issue_cmd[@]}" - fi - - echo "$pr_url" - ;; - - render-task-prompt) - issue_number="" - task_id="" - task_summary="" - task_owner="" - branch="" - worktree="" - execution_mode="" - pr_title="" - base_branch="main" - task_notes="" - acceptance_file="" - prompt_template_file="$default_task_prompt_template" - output_file="" - acceptance_items=() - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --issue) - issue_number="${2:-}" - shift 2 - ;; - --task-id) - task_id="${2:-}" - shift 2 - ;; - --summary) - task_summary="${2:-}" - shift 2 - ;; - --owner) - task_owner="${2:-}" - shift 2 - ;; - --branch) - branch="${2:-}" - shift 2 - ;; - --worktree) - worktree="${2:-}" - shift 2 - ;; - --execution-mode) - execution_mode="${2:-}" - shift 2 - ;; - --pr-title) - pr_title="${2:-}" - shift 2 - ;; - --base) - base_branch="${2:-}" - shift 2 - ;; - --notes) - task_notes="${2:-}" - shift 2 - ;; - --acceptance) - acceptance_items+=("${2:-}") - shift 2 - ;; - --acceptance-file) - acceptance_file="${2:-}" - shift 2 - ;; - --template) - prompt_template_file="${2:-}" - shift 2 - ;; - --output) - output_file="${2:-}" - shift 2 - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for render-task-prompt: $1" - ;; - esac - done - - if [[ ! "$issue_number" =~ ^[0-9]+$ && "$issue_number" != "DRY_RUN_PLAN_ISSUE" ]]; then - die "--issue must be a numeric issue number (or DRY_RUN_PLAN_ISSUE for dry-run)" - fi - [[ -n "$task_id" ]] || die "--task-id is required for render-task-prompt" - [[ -n "$task_summary" ]] || die "--summary is required for render-task-prompt" - [[ -n "$task_owner" ]] || die "--owner is required for render-task-prompt" - [[ -n "$branch" ]] || die "--branch is required for render-task-prompt" - [[ -n "$worktree" ]] || die "--worktree is required for render-task-prompt" - [[ -n "$execution_mode" ]] || die "--execution-mode is required for render-task-prompt" - [[ -n "$pr_title" ]] || die "--pr-title is required for render-task-prompt" - - is_placeholder_value "$task_id" && die "--task-id must not be a placeholder" - is_placeholder_value "$task_summary" && die "--summary must not be a placeholder" - is_placeholder_value "$task_owner" && die "--owner must not be TBD" - is_placeholder_value "$branch" && die "--branch must not be TBD" - is_placeholder_value "$worktree" && die "--worktree must not be TBD" - is_placeholder_value "$pr_title" && die "--pr-title must not be TBD" - - if is_main_agent_owner "$task_owner"; then - die "--owner must not be main-agent; subagent-only ownership is required" - fi - if [[ "$(to_lower "$task_owner")" != *"subagent"* ]]; then - die "--owner must include 'subagent' to reflect delegated implementation ownership" - fi - - execution_mode="$(normalize_execution_mode_value "$execution_mode")" - case "$execution_mode" in - per-sprint|pr-isolated|pr-shared) - ;; - *) - die "--execution-mode must be one of: per-sprint, pr-isolated, pr-shared" - ;; - esac - - if [[ -n "$acceptance_file" ]]; then - [[ -f "$acceptance_file" ]] || die "acceptance file not found: $acceptance_file" - while IFS= read -r line || [[ -n "$line" ]]; do - [[ -n "${line// }" ]] || continue - acceptance_items+=("$line") - done <"$acceptance_file" - fi - - acceptance_bullets="" - if [[ ${#acceptance_items[@]} -eq 0 ]]; then - acceptance_bullets='- (Use issue/plan acceptance criteria for this task if no task-specific criteria were supplied.)' - else - for item in "${acceptance_items[@]}"; do - acceptance_bullets+="- ${item}"$'\n' - done - acceptance_bullets="${acceptance_bullets%$'\n'}" - fi - - if is_placeholder_value "$task_notes"; then - task_notes="" - fi - if [[ -z "$task_notes" ]]; then - task_notes_bullets='- (No additional task notes.)' - else - task_notes_bullets="- ${task_notes}" - fi - - task_id_safe="$(printf '%s' "$task_id" | tr -cs 'A-Za-z0-9._-' '-')" - task_id_safe="${task_id_safe#-}" - task_id_safe="${task_id_safe%-}" - [[ -n "$task_id_safe" ]] || task_id_safe="task" - - issue_subagent_pr_script_ref='$AGENT_HOME/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh' - pr_body_draft_path="/tmp/pr-${issue_number}-${task_id_safe}.md" - if [[ -n "$repo_arg" ]]; then - repo_display="$repo_arg" - repo_flag=" --repo $(shell_quote "$repo_arg")" - else - repo_display="(current repo context)" - repo_flag="" - fi - - validate_pr_body_command="${issue_subagent_pr_script_ref} validate-pr-body${repo_flag} --issue $(shell_quote "$issue_number") --body-file $(shell_quote "$pr_body_draft_path")" - open_pr_command="${issue_subagent_pr_script_ref} open-pr${repo_flag} --issue $(shell_quote "$issue_number") --title $(shell_quote "$pr_title") --base $(shell_quote "$base_branch") --head $(shell_quote "$branch") --body-file $(shell_quote "$pr_body_draft_path")" - create_worktree_hint="Use assigned worktree path $(shell_quote "$worktree"). If the worktree does not exist yet, create it with branch $(shell_quote "$branch") from base $(shell_quote "$base_branch"), then verify the resulting path matches the assignment before editing." - - rendered_prompt="$( - ISSUE_NUMBER="$issue_number" \ - TASK_ID="$task_id" \ - TASK_SUMMARY="$task_summary" \ - TASK_OWNER="$task_owner" \ - BRANCH="$branch" \ - WORKTREE="$worktree" \ - EXECUTION_MODE="$execution_mode" \ - BASE_BRANCH="$base_branch" \ - PR_TITLE="$pr_title" \ - REPO_DISPLAY="$repo_display" \ - REPO_FLAG="$repo_flag" \ - TASK_NOTES_BULLETS="$task_notes_bullets" \ - ACCEPTANCE_BULLETS="$acceptance_bullets" \ - PR_BODY_TEMPLATE_PATH="$default_pr_template" \ - PR_BODY_DRAFT_PATH="$pr_body_draft_path" \ - ISSUE_SUBAGENT_PR_SCRIPT="$issue_subagent_pr_script_ref" \ - CREATE_WORKTREE_HINT="$create_worktree_hint" \ - OPEN_PR_COMMAND="$open_pr_command" \ - VALIDATE_PR_BODY_COMMAND="$validate_pr_body_command" \ - render_task_prompt_template_text "$prompt_template_file" - )" - - if [[ -n "$output_file" ]]; then - output_dir="$(dirname "$output_file")" - if [[ "$dry_run" == "1" ]]; then - echo "dry-run: $(print_cmd mkdir -p "$output_dir")" >&2 - echo "dry-run: $(print_cmd write "$output_file")" >&2 - printf '%s\n' "$rendered_prompt" - exit 0 - fi - mkdir -p "$output_dir" - printf '%s\n' "$rendered_prompt" >"$output_file" - echo "$output_file" - exit 0 - fi - - printf '%s\n' "$rendered_prompt" - ;; - - validate-pr-body) - issue_number="" - body="" - body_file="" - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --body) - body="${2:-}" - shift 2 - ;; - --body-file) - body_file="${2:-}" - shift 2 - ;; - --issue) - issue_number="${2:-}" - shift 2 - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for validate-pr-body: $1" - ;; - esac - done - - if [[ -z "$body" && -z "$body_file" ]]; then - die "validate-pr-body requires --body or --body-file" - fi - validate_pr_body_input "$body" "$body_file" "$issue_number" "validate-pr-body" - ;; - - respond-review) - pr_number="" - review_comment_url="" - body="" - body_file="" - issue_number="" - - while [[ $# -gt 0 ]]; do - case "${1:-}" in - --pr) - pr_number="${2:-}" - shift 2 - ;; - --review-comment-url) - review_comment_url="${2:-}" - shift 2 - ;; - --body) - body="${2:-}" - shift 2 - ;; - --body-file) - body_file="${2:-}" - shift 2 - ;; - --issue) - issue_number="${2:-}" - shift 2 - ;; - --repo) - repo_arg="${2:-}" - shift 2 - ;; - --dry-run) - dry_run="1" - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - die "unknown option for respond-review: $1" - ;; - esac - done - - [[ -n "$pr_number" ]] || die "--pr is required for respond-review" - [[ -n "$review_comment_url" ]] || die "--review-comment-url is required for respond-review" - - if [[ -n "$body" && -n "$body_file" ]]; then - die "use either --body or --body-file, not both" - fi - - if [[ -n "$body_file" && ! -f "$body_file" ]]; then - die "body file not found: $body_file" - fi - - require_cmd gh - - additional="" - if [[ -n "$body" ]]; then - additional="$body" - elif [[ -n "$body_file" ]]; then - additional="$(cat "$body_file")" - fi - - response_body="Addressing main-agent review comment: ${review_comment_url}" - if [[ -n "$additional" ]]; then - response_body+=$'\n\n' - response_body+="$additional" - fi - - comment_cmd=(gh pr comment "$pr_number") - if [[ -n "$repo_arg" ]]; then - comment_cmd+=(-R "$repo_arg") - fi - comment_cmd+=(--body "$response_body") - - if [[ "$dry_run" == "1" ]]; then - run_cmd "${comment_cmd[@]}" - pr_comment_url="DRY-RUN-PR-COMMENT-URL" - if [[ -n "$issue_number" ]]; then - issue_cmd=(gh issue comment "$issue_number") - if [[ -n "$repo_arg" ]]; then - issue_cmd+=(-R "$repo_arg") - fi - issue_cmd+=(--body "Subagent posted an update in PR #${pr_number}: ${pr_comment_url}") - run_cmd "${issue_cmd[@]}" - fi - echo "$pr_comment_url" - exit 0 - fi - - run_cmd "${comment_cmd[@]}" - - view_cmd=(gh pr view "$pr_number") - if [[ -n "$repo_arg" ]]; then - view_cmd+=(-R "$repo_arg") - fi - view_cmd+=(--json comments -q '.comments[-1].url') - pr_comment_url="$(run_cmd "${view_cmd[@]}")" - - if [[ -n "$issue_number" ]]; then - issue_cmd=(gh issue comment "$issue_number") - if [[ -n "$repo_arg" ]]; then - issue_cmd+=(-R "$repo_arg") - fi - issue_cmd+=(--body "Subagent posted an update in PR #${pr_number}: ${pr_comment_url}") - run_cmd "${issue_cmd[@]}" - fi - - echo "$pr_comment_url" - ;; - - -h|--help) - usage - ;; - - *) - die "unknown subcommand: $subcommand" - ;; -esac diff --git a/skills/workflows/issue/issue-subagent-pr/tests/test_workflows_issue_issue_subagent_pr.py b/skills/workflows/issue/issue-subagent-pr/tests/test_workflows_issue_issue_subagent_pr.py index f5068ca3..d7c35834 100644 --- a/skills/workflows/issue/issue-subagent-pr/tests/test_workflows_issue_issue_subagent_pr.py +++ b/skills/workflows/issue/issue-subagent-pr/tests/test_workflows_issue_issue_subagent_pr.py @@ -2,7 +2,7 @@ from pathlib import Path -from skills._shared.python.skill_testing import assert_entrypoints_exist, assert_skill_contract +from skills._shared.python.skill_testing import assert_skill_contract def test_workflows_issue_issue_subagent_pr_contract() -> None: @@ -10,38 +10,33 @@ def test_workflows_issue_issue_subagent_pr_contract() -> None: assert_skill_contract(skill_root) -def test_workflows_issue_issue_subagent_pr_entrypoints_exist() -> None: - skill_root = Path(__file__).resolve().parents[1] - assert_entrypoints_exist( - skill_root, - [ - "scripts/manage_issue_subagent_pr.sh", - ], - ) - - def test_issue_subagent_pr_skill_mentions_worktree_isolation() -> None: skill_md = Path(__file__).resolve().parents[1] / "SKILL.md" text = skill_md.read_text(encoding="utf-8") assert "worktree" in text.lower() - assert "Subagents" in text - assert "Task Decomposition.PR" in text + assert "Subagent" in text + assert "Task Decomposition" in text + + +def test_issue_subagent_pr_skill_requires_native_git_gh_commands() -> None: + skill_md = Path(__file__).resolve().parents[1] / "SKILL.md" + text = skill_md.read_text(encoding="utf-8") + assert "git worktree" in text + assert "gh pr create" in text + assert "gh pr comment" in text -def test_issue_subagent_pr_script_syncs_issue_pr_fields_on_open() -> None: - script_path = Path(__file__).resolve().parents[1] / "scripts" / "manage_issue_subagent_pr.sh" - text = script_path.read_text(encoding="utf-8") - assert "sync_issue_task_pr_by_branch" in text - assert "refresh_sprint_start_comments_pr_values" in text - assert "UPDATED_TASK_IDS=" in text +def test_issue_subagent_pr_skill_excludes_deleted_wrapper_script() -> None: + skill_md = Path(__file__).resolve().parents[1] / "SKILL.md" + text = skill_md.read_text(encoding="utf-8") + assert ("manage_issue_subagent_pr" + ".sh") not in text -def test_issue_subagent_pr_execution_modes_are_explicit_and_non_legacy() -> None: - skill_root = Path(__file__).resolve().parents[1] - script_text = (skill_root / "scripts" / "manage_issue_subagent_pr.sh").read_text(encoding="utf-8") - prompt_template = (skill_root / "references" / "SUBAGENT_TASK_PROMPT_TEMPLATE.md").read_text(encoding="utf-8") +def test_issue_subagent_prompt_template_excludes_legacy_single_pr_mode() -> None: + prompt_template = ( + Path(__file__).resolve().parents[1] / "references" / "SUBAGENT_TASK_PROMPT_TEMPLATE.md" + ).read_text(encoding="utf-8") - assert "pr-isolated" in script_text - assert "pr-shared" in script_text - assert "single-pr" not in script_text + assert "pr-shared" in prompt_template + assert "pr-isolated" in prompt_template assert "single-pr" not in prompt_template diff --git a/tests/script_specs/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh.json b/tests/script_specs/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh.json deleted file mode 100644 index ba474faa..00000000 --- a/tests/script_specs/skills/automation/issue-delivery-loop/scripts/manage_issue_delivery_loop.sh.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "smoke": [ - { - "name": "start-dry-run", - "args": [ - "start", - "--title", - "Fixture issue automation", - "--dry-run" - ], - "timeout_sec": 15, - "expect": { - "exit_codes": [0], - "stdout_regex": "ISSUE_URL=DRY-RUN-ISSUE-URL" - } - }, - { - "name": "status-body-file", - "args": [ - "status", - "--body-file", - "tests/fixtures/issue/issue_body_valid.md", - "--dry-run" - ], - "timeout_sec": 15, - "expect": { - "exit_codes": [0], - "stdout_regex": "## Main-Agent Status Snapshot" - } - }, - { - "name": "status-body-file-fail-main-agent-owner", - "args": [ - "status", - "--body-file", - "tests/fixtures/issue/issue_body_main_agent_owner.md", - "--dry-run" - ], - "timeout_sec": 15, - "expect": { - "exit_codes": [1], - "stderr_regex": "Owner must not be main-agent" - } - }, - { - "name": "ready-body-file", - "args": [ - "ready-for-review", - "--body-file", - "tests/fixtures/issue/issue_body_valid.md", - "--summary", - "Please focus on API contracts.", - "--dry-run" - ], - "timeout_sec": 15, - "expect": { - "exit_codes": [0], - "stdout_regex": "(?s)## Main-Agent Review Request.*\\| Task \\| Summary \\| Status \\| PR \\|" - } - }, - { - "name": "close-body-file-dry-run", - "args": [ - "close-after-review", - "--body-file", - "tests/fixtures/issue/issue_body_done.md", - "--approved-comment-url", - "https://github.com/graysurf/agent-kit/issues/999#issuecomment-123", - "--dry-run" - ], - "timeout_sec": 15, - "expect": { - "exit_codes": [0], - "stdout_regex": "DRY-RUN-CLOSE-SKIPPED" - } - }, - { - "name": "close-body-file-fail-not-done", - "args": [ - "close-after-review", - "--body-file", - "tests/fixtures/issue/issue_body_valid.md", - "--approved-comment-url", - "https://github.com/graysurf/agent-kit/issues/999#issuecomment-123", - "--dry-run" - ], - "timeout_sec": 15, - "expect": { - "exit_codes": [1], - "stderr_regex": "Status must be done before close" - } - } - ] -} diff --git a/tests/script_specs/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh.json b/tests/script_specs/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh.json index 97af1efa..681f9484 100644 --- a/tests/script_specs/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh.json +++ b/tests/script_specs/skills/workflows/issue/issue-pr-review/scripts/manage_issue_pr_review.sh.json @@ -55,7 +55,7 @@ "timeout_sec": 10, "expect": { "exit_codes": [1], - "stderr_regex": "invalid PR body content" + "stderr_regex": "disallowed placeholder found" } } ] diff --git a/tests/script_specs/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh.json b/tests/script_specs/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh.json deleted file mode 100644 index 75e5311f..00000000 --- a/tests/script_specs/skills/workflows/issue/issue-subagent-pr/scripts/manage_issue_subagent_pr.sh.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - "smoke": [ - { - "name": "create-worktree-dry-run", - "args": [ - "create-worktree", - "--branch", - "feat/issue-999-api", - "--base", - "main", - "--dry-run" - ], - "timeout_sec": 10, - "expect": { - "exit_codes": [0], - "stdout_regex": "feat__issue-999-api" - } - }, - { - "name": "render-task-prompt", - "args": [ - "render-task-prompt", - "--issue", - "999", - "--task-id", - "T1", - "--summary", - "Implement API validation", - "--owner", - "subagent-api", - "--branch", - "issue/999/t1-api", - "--worktree", - ".worktrees/issue/999-t1-api", - "--execution-mode", - "pr-isolated", - "--pr-title", - "feat(issue): implement API validation", - "--acceptance", - "Return stable error codes for invalid input" - ], - "timeout_sec": 10, - "expect": { - "exit_codes": [0], - "stdout_regex": "(?s)## Assigned Execution Facts.*Assigned worktree: `\\.worktrees/issue/999-t1-api`.*Execution mode: `pr-isolated`.*validate-pr-body.*open-pr" - } - }, - { - "name": "render-task-prompt-dry-run-issue-token", - "args": [ - "render-task-prompt", - "--issue", - "DRY_RUN_PLAN_ISSUE", - "--task-id", - "T1", - "--summary", - "Render prompt for dry-run orchestration", - "--owner", - "subagent-api", - "--branch", - "issue/dry-run/t1-api", - "--worktree", - ".worktrees/issue/dry-run-t1-api", - "--execution-mode", - "per-sprint", - "--pr-title", - "feat(issue): dry run prompt" - ], - "timeout_sec": 10, - "expect": { - "exit_codes": [0], - "stdout_regex": "(?s)Issue: `#DRY_RUN_PLAN_ISSUE`.*Execution mode: `per-sprint`" - } - }, - { - "name": "render-task-prompt-fail-main-agent-owner", - "args": [ - "render-task-prompt", - "--issue", - "999", - "--task-id", - "T1", - "--summary", - "Implement API validation", - "--owner", - "main-agent", - "--branch", - "issue/999/t1-api", - "--worktree", - ".worktrees/issue/999-t1-api", - "--execution-mode", - "pr-isolated", - "--pr-title", - "feat(issue): implement API validation" - ], - "timeout_sec": 10, - "expect": { - "exit_codes": [1], - "stderr_regex": "owner must not be main-agent" - } - }, - { - "name": "open-pr-dry-run", - "args": [ - "open-pr", - "--issue", - "999", - "--title", - "feat: fixture", - "--base", - "main", - "--head", - "feat/issue-999-api", - "--body-file", - "tests/fixtures/issue/pr_body_valid.md", - "--dry-run" - ], - "timeout_sec": 10, - "expect": { - "exit_codes": [0], - "stdout_regex": "DRY-RUN-PR-URL" - } - }, - { - "name": "validate-pr-body-fail-placeholder", - "args": [ - "validate-pr-body", - "--issue", - "999", - "--body-file", - "tests/fixtures/issue/pr_body_invalid_placeholder.md" - ], - "timeout_sec": 10, - "expect": { - "exit_codes": [1], - "stderr_regex": "invalid PR body content" - } - }, - { - "name": "respond-review-dry-run", - "args": [ - "respond-review", - "--pr", - "456", - "--review-comment-url", - "https://github.com/org/repo/pull/456#issuecomment-1", - "--issue", - "999", - "--body", - "Addressed feedback", - "--dry-run" - ], - "timeout_sec": 10, - "expect": { - "exit_codes": [0], - "stdout_regex": "DRY-RUN-PR-COMMENT-URL" - } - } - ] -}