From bc69fd4b0a5e063594045a2371b5a191912a2607 Mon Sep 17 00:00:00 2001 From: Oleksandr Bezdieniezhnykh Date: Wed, 25 Mar 2026 06:40:30 +0200 Subject: [PATCH] Update decomposition skill documentation and templates; remove obsolete feature specification and initial structure templates. Enhance task decomposition descriptions and adjust directory paths for project documentation. --- .DS_Store | Bin 6148 -> 6148 bytes .cursor/skills/.DS_Store | Bin 0 -> 6148 bytes .cursor/skills/autopilot/SKILL.md | 107 ++ .../skills/autopilot/flows/existing-code.md | 234 ++++ .cursor/skills/autopilot/flows/greenfield.md | 235 ++++ .cursor/skills/autopilot/protocols.md | 314 ++++++ .cursor/skills/autopilot/state.md | 122 ++ .cursor/skills/code-review/SKILL.md | 193 ++++ .cursor/skills/decompose/SKILL.md | 388 ++++--- .../decompose/templates/dependencies-table.md | 31 + ...structure.md => initial-structure-task.md} | 44 +- .cursor/skills/decompose/templates/summary.md | 59 - .../templates/{feature-spec.md => task.md} | 23 +- .../templates/test-infrastructure-task.md | 129 +++ .cursor/skills/deploy/SKILL.md | 491 ++++++++ .../skills/deploy/templates/ci_cd_pipeline.md | 87 ++ .../deploy/templates/containerization.md | 94 ++ .../skills/deploy/templates/deploy_scripts.md | 114 ++ .../deploy/templates/deploy_status_report.md | 73 ++ .../deploy/templates/deployment_procedures.md | 103 ++ .../deploy/templates/environment_strategy.md | 61 + .../skills/deploy/templates/observability.md | 132 +++ .cursor/skills/document/SKILL.md | 515 +++++++++ .cursor/skills/implement/SKILL.md | 194 ++++ .../references/batching-algorithm.md | 31 + .../implement/templates/batch-report.md | 36 + .cursor/skills/new-task/SKILL.md | 302 +++++ .cursor/skills/new-task/templates/task.md | 2 + .cursor/skills/plan/SKILL.md | 348 +----- .cursor/skills/plan/steps/00_prerequisites.md | 27 + .../plan/steps/01_artifact-management.md | 87 ++ .../skills/plan/steps/02_solution-analysis.md | 74 ++ .../plan/steps/03_component-decomposition.md | 29 + .cursor/skills/plan/steps/04_review-risk.md | 38 + .../plan/steps/05_test-specifications.md | 20 + .cursor/skills/plan/steps/06_jira-epics.md | 48 + .../skills/plan/steps/07_quality-checklist.md | 57 + .cursor/skills/plan/templates/architecture.md | 2 +- .../skills/plan/templates/blackbox-tests.md | 78 ++ .cursor/skills/plan/templates/epic-spec.md | 12 +- .cursor/skills/plan/templates/final-report.md | 2 +- .../plan/templates/performance-tests.md | 35 + .../skills/plan/templates/resilience-tests.md | 37 + .../plan/templates/resource-limit-tests.md | 31 + .../skills/plan/templates/risk-register.md | 2 +- .../skills/plan/templates/security-tests.md | 30 + .cursor/skills/plan/templates/system-flows.md | 4 +- .cursor/skills/plan/templates/test-data.md | 55 + ...-infrastructure.md => test-environment.md} | 63 +- .cursor/skills/plan/templates/test-spec.md | 4 +- .../plan/templates/traceability-matrix.md | 47 + .cursor/skills/problem/SKILL.md | 241 ++++ .cursor/skills/refactor/SKILL.md | 23 +- .cursor/skills/research/SKILL.md | 1001 +---------------- .../references/comparison-frameworks.md | 34 + .../references/novelty-sensitivity.md | 75 ++ .../research/references/quality-checklists.md | 72 ++ .../research/references/source-tiering.md | 121 ++ .../research/references/usage-examples.md | 56 + .../research/steps/00_project-integration.md | 103 ++ .../steps/01_mode-a-initial-research.md | 127 +++ .../steps/02_mode-b-solution-assessment.md | 27 + .../research/steps/03_engine-investigation.md | 227 ++++ .../research/steps/04_engine-analysis.md | 146 +++ .cursor/skills/retrospective/SKILL.md | 174 +++ .../templates/retrospective-report.md | 93 ++ .cursor/skills/security/SKILL.md | 544 ++++----- .../security/evals/security-testing.yaml | 789 ------------- .cursor/skills/security/schemas/output.json | 879 --------------- .../security/scripts/validate-config.json | 45 - .cursor/skills/test-run/SKILL.md | 75 ++ .cursor/skills/test-spec/SKILL.md | 469 ++++++++ .../test-spec/templates/expected-results.md | 135 +++ .../test-spec/templates/run-tests-script.md | 88 ++ .cursor/skills/ui-design/SKILL.md | 254 +++++ .../ui-design/references/anti-patterns.md | 69 ++ .../skills/ui-design/references/components.md | 307 +++++ .../ui-design/references/design-vocabulary.md | 139 +++ .../ui-design/references/quality-checklist.md | 109 ++ .../ui-design/templates/design-system.md | 199 ++++ 80 files changed, 8339 insertions(+), 3526 deletions(-) create mode 100644 .cursor/skills/.DS_Store create mode 100644 .cursor/skills/autopilot/SKILL.md create mode 100644 .cursor/skills/autopilot/flows/existing-code.md create mode 100644 .cursor/skills/autopilot/flows/greenfield.md create mode 100644 .cursor/skills/autopilot/protocols.md create mode 100644 .cursor/skills/autopilot/state.md create mode 100644 .cursor/skills/code-review/SKILL.md create mode 100644 .cursor/skills/decompose/templates/dependencies-table.md rename .cursor/skills/decompose/templates/{initial-structure.md => initial-structure-task.md} (68%) delete mode 100644 .cursor/skills/decompose/templates/summary.md rename .cursor/skills/decompose/templates/{feature-spec.md => task.md} (78%) create mode 100644 .cursor/skills/decompose/templates/test-infrastructure-task.md create mode 100644 .cursor/skills/deploy/SKILL.md create mode 100644 .cursor/skills/deploy/templates/ci_cd_pipeline.md create mode 100644 .cursor/skills/deploy/templates/containerization.md create mode 100644 .cursor/skills/deploy/templates/deploy_scripts.md create mode 100644 .cursor/skills/deploy/templates/deploy_status_report.md create mode 100644 .cursor/skills/deploy/templates/deployment_procedures.md create mode 100644 .cursor/skills/deploy/templates/environment_strategy.md create mode 100644 .cursor/skills/deploy/templates/observability.md create mode 100644 .cursor/skills/document/SKILL.md create mode 100644 .cursor/skills/implement/SKILL.md create mode 100644 .cursor/skills/implement/references/batching-algorithm.md create mode 100644 .cursor/skills/implement/templates/batch-report.md create mode 100644 .cursor/skills/new-task/SKILL.md create mode 100644 .cursor/skills/new-task/templates/task.md create mode 100644 .cursor/skills/plan/steps/00_prerequisites.md create mode 100644 .cursor/skills/plan/steps/01_artifact-management.md create mode 100644 .cursor/skills/plan/steps/02_solution-analysis.md create mode 100644 .cursor/skills/plan/steps/03_component-decomposition.md create mode 100644 .cursor/skills/plan/steps/04_review-risk.md create mode 100644 .cursor/skills/plan/steps/05_test-specifications.md create mode 100644 .cursor/skills/plan/steps/06_jira-epics.md create mode 100644 .cursor/skills/plan/steps/07_quality-checklist.md create mode 100644 .cursor/skills/plan/templates/blackbox-tests.md create mode 100644 .cursor/skills/plan/templates/performance-tests.md create mode 100644 .cursor/skills/plan/templates/resilience-tests.md create mode 100644 .cursor/skills/plan/templates/resource-limit-tests.md create mode 100644 .cursor/skills/plan/templates/security-tests.md create mode 100644 .cursor/skills/plan/templates/test-data.md rename .cursor/skills/plan/templates/{e2e-test-infrastructure.md => test-environment.md} (56%) create mode 100644 .cursor/skills/plan/templates/traceability-matrix.md create mode 100644 .cursor/skills/problem/SKILL.md create mode 100644 .cursor/skills/research/references/comparison-frameworks.md create mode 100644 .cursor/skills/research/references/novelty-sensitivity.md create mode 100644 .cursor/skills/research/references/quality-checklists.md create mode 100644 .cursor/skills/research/references/source-tiering.md create mode 100644 .cursor/skills/research/references/usage-examples.md create mode 100644 .cursor/skills/research/steps/00_project-integration.md create mode 100644 .cursor/skills/research/steps/01_mode-a-initial-research.md create mode 100644 .cursor/skills/research/steps/02_mode-b-solution-assessment.md create mode 100644 .cursor/skills/research/steps/03_engine-investigation.md create mode 100644 .cursor/skills/research/steps/04_engine-analysis.md create mode 100644 .cursor/skills/retrospective/SKILL.md create mode 100644 .cursor/skills/retrospective/templates/retrospective-report.md delete mode 100644 .cursor/skills/security/evals/security-testing.yaml delete mode 100644 .cursor/skills/security/schemas/output.json delete mode 100644 .cursor/skills/security/scripts/validate-config.json create mode 100644 .cursor/skills/test-run/SKILL.md create mode 100644 .cursor/skills/test-spec/SKILL.md create mode 100644 .cursor/skills/test-spec/templates/expected-results.md create mode 100644 .cursor/skills/test-spec/templates/run-tests-script.md create mode 100644 .cursor/skills/ui-design/SKILL.md create mode 100644 .cursor/skills/ui-design/references/anti-patterns.md create mode 100644 .cursor/skills/ui-design/references/components.md create mode 100644 .cursor/skills/ui-design/references/design-vocabulary.md create mode 100644 .cursor/skills/ui-design/references/quality-checklist.md create mode 100644 .cursor/skills/ui-design/templates/design-system.md diff --git a/.DS_Store b/.DS_Store index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..f1ad969bc4d205bf80e0437a7a853ec8676dcf63 100644 GIT binary patch delta 335 zcmZoMXfc=|#>B`mF;Q%yo}wrd0|Nsi1A_nqLoq`>Ls3$BaY0hf#Kh(GAPIH`J%(h4 zQidXA$^0as%#NgjoXp}91A}XfOw25-Z0ziu?A+|J!5R7G!6k_$rNvH(MbRK$NPd1! z5{#Xg6qcD<9xou`oS#>cn3QQbfW znzzb=i}G^v^U{GbFkK7`8?*kgPBsu>+04zs%>fLqjfvlxC-aLavVy`zVX~o!@MaH@ GJAjHu~2NHo+1YW5HK<@2yA9#Vq@DZz_f~SGdl-A2T%b} -- _docs//` +4. **State file rollback**: when rolling back artifacts, also update `_docs/_autopilot_state.md` to reflect the rolled-back step (set it to `in_progress`, clear completed date) + +## Status Summary + +On every invocation, before executing any skill, present a status summary built from the state file (with folder scan fallback). Use the Status Summary Template from the active flow file (`flows/greenfield.md` or `flows/existing-code.md`). + +For re-entry (state file exists), also include: +- Key decisions from the state file's `Key Decisions` section +- Last session context from the `Last Session` section +- Any blockers from the `Blockers` section diff --git a/.cursor/skills/autopilot/state.md b/.cursor/skills/autopilot/state.md new file mode 100644 index 0000000..57e6444 --- /dev/null +++ b/.cursor/skills/autopilot/state.md @@ -0,0 +1,122 @@ +# Autopilot State Management + +## State File: `_docs/_autopilot_state.md` + +The autopilot persists its state to `_docs/_autopilot_state.md`. This file is the primary source of truth for re-entry. Folder scanning is the fallback when the state file doesn't exist. + +### Format + +```markdown +# Autopilot State + +## Current Step +flow: [greenfield | existing-code] +step: [1-10 for greenfield, 1-12 for existing-code, or "done"] +name: [step name from the active flow's Step Reference Table] +status: [not_started / in_progress / completed / skipped / failed] +sub_step: [optional — sub-skill internal step number + name if interrupted mid-step] +retry_count: [0-3 — number of consecutive auto-retry attempts for current step, reset to 0 on success] + +When updating `Current Step`, always write it as: + flow: existing-code ← active flow + step: N ← autopilot step (sequential integer) + sub_step: M ← sub-skill's own internal step/phase number + name + retry_count: 0 ← reset on new step or success; increment on each failed retry +Example: + flow: greenfield + step: 3 + name: Plan + status: in_progress + sub_step: 4 — Architecture Review & Risk Assessment + retry_count: 0 +Example (failed after 3 retries): + flow: existing-code + step: 2 + name: Test Spec + status: failed + sub_step: 1b — Test Case Generation + retry_count: 3 + +## Completed Steps + +| Step | Name | Completed | Key Outcome | +|------|------|-----------|-------------| +| 1 | [name] | [date] | [one-line summary] | +| 2 | [name] | [date] | [one-line summary] | +| ... | ... | ... | ... | + +## Key Decisions +- [decision 1: e.g. "Tech stack: Python + Rust for perf-critical, Postgres DB"] +- [decision N] + +## Last Session +date: [date] +ended_at: Step [N] [Name] — SubStep [M] [sub-step name] +reason: [completed step / session boundary / user paused / context limit] +notes: [any context for next session] + +## Retry Log +| Attempt | Step | Name | SubStep | Failure Reason | Timestamp | +|---------|------|------|---------|----------------|-----------| +| 1 | [step] | [name] | [sub_step] | [reason] | [date-time] | +| ... | ... | ... | ... | ... | ... | + +(Clear this table when the step succeeds or user resets. Append a row on each failed auto-retry.) + +## Blockers +- [blocker 1, if any] +- [none] +``` + +### State File Rules + +1. **Create** the state file on the very first autopilot invocation (after state detection determines Step 1) +2. **Update** the state file after every step completion, every session boundary, every BLOCKING gate confirmation, and every failed retry attempt +3. **Read** the state file as the first action on every invocation — before folder scanning +4. **Cross-check**: after reading the state file, verify against actual `_docs/` folder contents. If they disagree (e.g., state file says Step 3 but `_docs/02_document/architecture.md` already exists), trust the folder structure and update the state file to match +5. **Never delete** the state file. It accumulates history across the entire project lifecycle +6. **Retry tracking**: increment `retry_count` on each failed auto-retry; reset to `0` when the step succeeds or the user manually resets. If `retry_count` reaches 3, set `status: failed` and add an entry to `Blockers` +7. **Failed state on re-entry**: if the state file shows `status: failed` with `retry_count: 3`, do NOT auto-retry — present the blocker to the user and wait for their decision before proceeding + +## State Detection + +Read `_docs/_autopilot_state.md` first. If it exists and is consistent with the folder structure, use the `Current Step` from the state file. If the state file doesn't exist or is inconsistent, fall back to folder scanning. + +### Folder Scan Rules (fallback) + +Scan `_docs/` to determine the current workflow position. The detection rules are defined in each flow file (`flows/greenfield.md` and `flows/existing-code.md`). Check the existing-code flow first (Step 1 detection), then greenfield flow rules. First match wins. + +## Re-Entry Protocol + +When the user invokes `/autopilot` and work already exists: + +1. Read `_docs/_autopilot_state.md` +2. Cross-check against `_docs/` folder structure +3. Present Status Summary with context from state file (key decisions, last session, blockers) +4. If the detected step has a sub-skill with built-in resumability (plan, decompose, implement, deploy all do), the sub-skill handles mid-step recovery +5. Continue execution from detected state + +## Session Boundaries + +After any decompose/planning step completes, **do not auto-chain to implement**. Instead: + +1. Update state file: mark the step as completed, set current step to the next implement step with status `not_started` + - Existing-code flow: After Step 3 (Decompose Tests) → set current step to 4 (Implement Tests) + - Existing-code flow: After Step 7 (New Task) → set current step to 8 (Implement) + - Greenfield flow: After Step 5 (Decompose) → set current step to 6 (Implement) +2. Write `Last Session` section: `reason: session boundary`, `notes: Decompose complete, implementation ready` +3. Present a summary: number of tasks, estimated batches, total complexity points +4. Use Choose format: + +``` +══════════════════════════════════════ + DECISION REQUIRED: Decompose complete — start implementation? +══════════════════════════════════════ + A) Start a new conversation for implementation (recommended for context freshness) + B) Continue implementation in this conversation +══════════════════════════════════════ + Recommendation: A — implementation is the longest phase, fresh context helps +══════════════════════════════════════ +``` + +These are the only hard session boundaries. All other transitions auto-chain. diff --git a/.cursor/skills/code-review/SKILL.md b/.cursor/skills/code-review/SKILL.md new file mode 100644 index 0000000..041013a --- /dev/null +++ b/.cursor/skills/code-review/SKILL.md @@ -0,0 +1,193 @@ +--- +name: code-review +description: | + Multi-phase code review against task specs with structured findings output. + 6-phase workflow: context loading, spec compliance, code quality, security quick-scan, performance scan, cross-task consistency. + Produces a structured report with severity-ranked findings and a PASS/FAIL/PASS_WITH_WARNINGS verdict. + Invoked by /implement skill after each batch, or manually. + Trigger phrases: + - "code review", "review code", "review implementation" + - "check code quality", "review against specs" +category: review +tags: [code-review, quality, security-scan, performance, SOLID] +disable-model-invocation: true +--- + +# Code Review + +Multi-phase code review that verifies implementation against task specs, checks code quality, and produces structured findings. + +## Core Principles + +- **Understand intent first**: read the task specs before reviewing code — know what it should do before judging how +- **Structured output**: every finding has severity, category, location, description, and suggestion +- **Deduplicate**: same issue at the same location is reported once using `{file}:{line}:{title}` as key +- **Severity-ranked**: findings sorted Critical > High > Medium > Low +- **Verdict-driven**: clear PASS/FAIL/PASS_WITH_WARNINGS drives automation decisions + +## Input + +- List of task spec files that were just implemented (paths to `[JIRA-ID]_[short_name].md`) +- Changed files (detected via `git diff` or provided by the `/implement` skill) +- Project context: `_docs/00_problem/restrictions.md`, `_docs/01_solution/solution.md` + +## Phase 1: Context Loading + +Before reviewing code, build understanding of intent: + +1. Read each task spec — acceptance criteria, scope, constraints, dependencies +2. Read project restrictions and solution overview +3. Map which changed files correspond to which task specs +4. Understand what the code is supposed to do before judging how it does it + +## Phase 2: Spec Compliance Review + +For each task, verify implementation satisfies every acceptance criterion: + +- Walk through each AC (Given/When/Then) and trace it in the code +- Check that unit tests cover each AC +- Check that blackbox tests exist where specified in the task spec +- Flag any AC that is not demonstrably satisfied as a **Spec-Gap** finding (severity: High) +- Flag any scope creep (implementation beyond what the spec asked for) as a **Scope** finding (severity: Low) + +## Phase 3: Code Quality Review + +Check implemented code against quality standards: + +- **SOLID principles** — single responsibility, open/closed, Liskov, interface segregation, dependency inversion +- **Error handling** — consistent strategy, no bare catch/except, meaningful error messages +- **Naming** — clear intent, follows project conventions +- **Complexity** — functions longer than 50 lines or cyclomatic complexity > 10 +- **DRY** — duplicated logic across files +- **Test quality** — tests assert meaningful behavior, not just "no error thrown" +- **Dead code** — unused imports, unreachable branches + +## Phase 4: Security Quick-Scan + +Lightweight security checks (defer deep analysis to the `/security` skill): + +- SQL injection via string interpolation +- Command injection (subprocess with shell=True, exec, eval) +- Hardcoded secrets, API keys, passwords +- Missing input validation on external inputs +- Sensitive data in logs or error messages +- Insecure deserialization + +## Phase 5: Performance Scan + +Check for common performance anti-patterns: + +- O(n^2) or worse algorithms where O(n) is possible +- N+1 query patterns +- Unbounded data fetching (missing pagination/limits) +- Blocking I/O in async contexts +- Unnecessary memory copies or allocations in hot paths + +## Phase 6: Cross-Task Consistency + +When multiple tasks were implemented in the same batch: + +- Interfaces between tasks are compatible (method signatures, DTOs match) +- No conflicting patterns (e.g., one task uses repository pattern, another does raw SQL) +- Shared code is not duplicated across task implementations +- Dependencies declared in task specs are properly wired + +## Output Format + +Produce a structured report with findings deduplicated and sorted by severity: + +```markdown +# Code Review Report + +**Batch**: [task list] +**Date**: [YYYY-MM-DD] +**Verdict**: PASS | PASS_WITH_WARNINGS | FAIL + +## Findings + +| # | Severity | Category | File:Line | Title | +|---|----------|----------|-----------|-------| +| 1 | Critical | Security | src/api/auth.py:42 | SQL injection via f-string | +| 2 | High | Spec-Gap | src/service/orders.py | AC-3 not satisfied | + +### Finding Details + +**F1: SQL injection via f-string** (Critical / Security) +- Location: `src/api/auth.py:42` +- Description: User input interpolated directly into SQL query +- Suggestion: Use parameterized query via bind parameters +- Task: 04_auth_service + +**F2: AC-3 not satisfied** (High / Spec-Gap) +- Location: `src/service/orders.py` +- Description: AC-3 requires order total recalculation on item removal, but no such logic exists +- Suggestion: Add recalculation in remove_item() method +- Task: 07_order_processing +``` + +## Severity Definitions + +| Severity | Meaning | Blocks? | +|----------|---------|---------| +| Critical | Security vulnerability, data loss, crash | Yes — verdict FAIL | +| High | Spec gap, logic bug, broken test | Yes — verdict FAIL | +| Medium | Performance issue, maintainability concern, missing validation | No — verdict PASS_WITH_WARNINGS | +| Low | Style, minor improvement, scope creep | No — verdict PASS_WITH_WARNINGS | + +## Category Values + +Bug, Spec-Gap, Security, Performance, Maintainability, Style, Scope + +## Verdict Logic + +- **FAIL**: any Critical or High finding exists +- **PASS_WITH_WARNINGS**: only Medium or Low findings +- **PASS**: no findings + +## Integration with /implement + +The `/implement` skill invokes this skill after each batch completes: + +1. Collects changed files from all implementer agents in the batch +2. Passes task spec paths + changed files to this skill +3. If verdict is FAIL — presents findings to user (BLOCKING), user fixes or confirms +4. If verdict is PASS or PASS_WITH_WARNINGS — proceeds automatically (findings shown as info) + +## Integration Contract + +### Inputs (provided by the implement skill) + +| Input | Type | Source | Required | +|-------|------|--------|----------| +| `task_specs` | list of file paths | Task `.md` files from `_docs/02_tasks/` for the current batch | Yes | +| `changed_files` | list of file paths | Files modified by implementer agents (from `git diff` or agent reports) | Yes | +| `batch_number` | integer | Current batch number (for report naming) | Yes | +| `project_restrictions` | file path | `_docs/00_problem/restrictions.md` | If exists | +| `solution_overview` | file path | `_docs/01_solution/solution.md` | If exists | + +### Invocation Pattern + +The implement skill invokes code-review by: + +1. Reading `.cursor/skills/code-review/SKILL.md` +2. Providing the inputs above as context (read the files, pass content to the review phases) +3. Executing all 6 phases sequentially +4. Consuming the verdict from the output + +### Outputs (returned to the implement skill) + +| Output | Type | Description | +|--------|------|-------------| +| `verdict` | `PASS` / `PASS_WITH_WARNINGS` / `FAIL` | Drives the implement skill's auto-fix gate | +| `findings` | structured list | Each finding has: severity, category, file:line, title, description, suggestion, task reference | +| `critical_count` | integer | Number of Critical findings | +| `high_count` | integer | Number of High findings | +| `report_path` | file path | `_docs/03_implementation/reviews/batch_[NN]_review.md` | + +### Report Persistence + +Save the review report to `_docs/03_implementation/reviews/batch_[NN]_review.md` (create the `reviews/` directory if it does not exist). The report uses the Output Format defined above. + +The implement skill uses `verdict` to decide: +- `PASS` / `PASS_WITH_WARNINGS` → proceed to commit +- `FAIL` → enter auto-fix loop (up to 2 attempts), then escalate to user diff --git a/.cursor/skills/decompose/SKILL.md b/.cursor/skills/decompose/SKILL.md index b312e6d..ac1cb2c 100644 --- a/.cursor/skills/decompose/SKILL.md +++ b/.cursor/skills/decompose/SKILL.md @@ -1,25 +1,30 @@ --- name: decompose description: | - Decompose planned components into atomic implementable features with bootstrap structure plan. - 4-step workflow: bootstrap structure plan, feature decomposition, cross-component verification, and Jira task creation. - Supports project mode (_docs/ structure), single component mode, and standalone mode (@file.md). + Decompose planned components into atomic implementable tasks with bootstrap structure plan. + 4-step workflow: bootstrap structure plan, component task decomposition, blackbox test task decomposition, and cross-task verification. + Supports full decomposition (_docs/ structure), single component mode, and tests-only mode. Trigger phrases: - "decompose", "decompose features", "feature decomposition" - "task decomposition", "break down components" - "prepare for implementation" + - "decompose tests", "test decomposition" +category: build +tags: [decomposition, tasks, dependencies, jira, implementation-prep] disable-model-invocation: true --- -# Feature Decomposition +# Task Decomposition -Decompose planned components into atomic, implementable feature specs with a bootstrap structure plan through a systematic workflow. +Decompose planned components into atomic, implementable task specs with a bootstrap structure plan through a systematic workflow. All tasks are named with their Jira ticket ID prefix in a flat directory. ## Core Principles -- **Atomic features**: each feature does one thing; if it exceeds 5 complexity points, split it +- **Atomic tasks**: each task does one thing; if it exceeds 5 complexity points, split it - **Behavioral specs, not implementation plans**: describe what the system should do, not how to build it -- **Save immediately**: write artifacts to disk after each component; never accumulate unsaved work +- **Flat structure**: all tasks are Jira-ID-prefixed files in TASKS_DIR — no component subdirectories +- **Save immediately**: write artifacts to disk after each task; never accumulate unsaved work +- **Jira inline**: create Jira ticket immediately after writing each task file - **Ask, don't assume**: when requirements are ambiguous, ask the user before proceeding - **Plan, don't code**: this workflow produces documents and Jira tasks, never implementation code @@ -27,27 +32,26 @@ Decompose planned components into atomic, implementable feature specs with a boo Determine the operating mode based on invocation before any other logic runs. -**Full project mode** (no explicit input file provided): -- PLANS_DIR: `_docs/02_plans/` +**Default** (no explicit input file provided): +- DOCUMENT_DIR: `_docs/02_document/` - TASKS_DIR: `_docs/02_tasks/` -- Reads from: `_docs/00_problem/`, `_docs/01_solution/`, PLANS_DIR -- Runs Step 1 (bootstrap) + Step 2 (all components) + Step 3 (cross-verification) + Step 4 (Jira) +- Reads from: `_docs/00_problem/`, `_docs/01_solution/`, DOCUMENT_DIR +- Runs Step 1 (bootstrap) + Step 2 (all components) + Step 3 (blackbox tests) + Step 4 (cross-verification) -**Single component mode** (provided file is within `_docs/02_plans/` and inside a `components/` subdirectory): -- PLANS_DIR: `_docs/02_plans/` +**Single component mode** (provided file is within `_docs/02_document/` and inside a `components/` subdirectory): +- DOCUMENT_DIR: `_docs/02_document/` - TASKS_DIR: `_docs/02_tasks/` -- Derive ``, component number, and component name from the file path +- Derive component number and component name from the file path - Ask user for the parent Epic ID -- Runs Step 2 (that component only) + Step 4 (Jira) -- Overwrites existing feature files in that component's TASKS_DIR subdirectory +- Runs Step 2 (that component only, appending to existing task numbering) -**Standalone mode** (explicit input file provided, not within `_docs/02_plans/`): -- INPUT_FILE: the provided file (treated as a component spec) -- Derive `` from the input filename (without extension) -- TASKS_DIR: `_standalone//tasks/` -- Guardrails relaxed: only INPUT_FILE must exist and be non-empty -- Ask user for the parent Epic ID -- Runs Step 2 (that component only) + Step 4 (Jira) +**Tests-only mode** (provided file/directory is within `tests/`, or `DOCUMENT_DIR/tests/` exists and input explicitly requests test decomposition): +- DOCUMENT_DIR: `_docs/02_document/` +- TASKS_DIR: `_docs/02_tasks/` +- TESTS_DIR: `DOCUMENT_DIR/tests/` +- Reads from: `_docs/00_problem/`, `_docs/01_solution/`, TESTS_DIR +- Runs Step 1t (test infrastructure bootstrap) + Step 3 (blackbox test decomposition) + Step 4 (cross-verification against test coverage) +- Skips Step 1 (project bootstrap) and Step 2 (component decomposition) — the codebase already exists Announce the detected mode and resolved paths to the user before proceeding. @@ -55,17 +59,18 @@ Announce the detected mode and resolved paths to the user before proceeding. ### Required Files -**Full project mode:** +**Default:** | File | Purpose | |------|---------| | `_docs/00_problem/problem.md` | Problem description and context | -| `_docs/00_problem/restrictions.md` | Constraints and limitations (if available) | -| `_docs/00_problem/acceptance_criteria.md` | Measurable acceptance criteria (if available) | +| `_docs/00_problem/restrictions.md` | Constraints and limitations | +| `_docs/00_problem/acceptance_criteria.md` | Measurable acceptance criteria | | `_docs/01_solution/solution.md` | Finalized solution | -| `PLANS_DIR//architecture.md` | Architecture from plan skill | -| `PLANS_DIR//system-flows.md` | System flows from plan skill | -| `PLANS_DIR//components/[##]_[name]/description.md` | Component specs from plan skill | +| `DOCUMENT_DIR/architecture.md` | Architecture from plan skill | +| `DOCUMENT_DIR/system-flows.md` | System flows from plan skill | +| `DOCUMENT_DIR/components/[##]_[name]/description.md` | Component specs from plan skill | +| `DOCUMENT_DIR/tests/` | Blackbox test specs from plan skill | **Single component mode:** @@ -74,64 +79,70 @@ Announce the detected mode and resolved paths to the user before proceeding. | The provided component `description.md` | Component spec to decompose | | Corresponding `tests.md` in the same directory (if available) | Test specs for context | -**Standalone mode:** +**Tests-only mode:** | File | Purpose | |------|---------| -| INPUT_FILE (the provided file) | Component spec to decompose | +| `TESTS_DIR/environment.md` | Test environment specification (Docker services, networks, volumes) | +| `TESTS_DIR/test-data.md` | Test data management (seed data, mocks, isolation) | +| `TESTS_DIR/blackbox-tests.md` | Blackbox functional scenarios (positive + negative) | +| `TESTS_DIR/performance-tests.md` | Performance test scenarios | +| `TESTS_DIR/resilience-tests.md` | Resilience test scenarios | +| `TESTS_DIR/security-tests.md` | Security test scenarios | +| `TESTS_DIR/resource-limit-tests.md` | Resource limit test scenarios | +| `TESTS_DIR/traceability-matrix.md` | AC/restriction coverage mapping | +| `_docs/00_problem/problem.md` | Problem context | +| `_docs/00_problem/restrictions.md` | Constraints for test design | +| `_docs/00_problem/acceptance_criteria.md` | Acceptance criteria being verified | ### Prerequisite Checks (BLOCKING) -**Full project mode:** -1. At least one `/` directory exists under PLANS_DIR with `architecture.md` and `components/` — **STOP if missing** -2. If multiple topics exist, ask user which one to decompose -3. Create TASKS_DIR if it does not exist -4. If `TASKS_DIR//` already exists, ask user: **resume from last checkpoint or start fresh?** +**Default:** +1. DOCUMENT_DIR contains `architecture.md` and `components/` — **STOP if missing** +2. Create TASKS_DIR if it does not exist +3. If TASKS_DIR already contains task files, ask user: **resume from last checkpoint or start fresh?** **Single component mode:** 1. The provided component file exists and is non-empty — **STOP if missing** -2. Create the component's subdirectory under TASKS_DIR if it does not exist -**Standalone mode:** -1. INPUT_FILE exists and is non-empty — **STOP if missing** -2. Create TASKS_DIR if it does not exist +**Tests-only mode:** +1. `TESTS_DIR/blackbox-tests.md` exists and is non-empty — **STOP if missing** +2. `TESTS_DIR/environment.md` exists — **STOP if missing** +3. Create TASKS_DIR if it does not exist +4. If TASKS_DIR already contains task files, ask user: **resume from last checkpoint or start fresh?** ## Artifact Management ### Directory Structure ``` -TASKS_DIR// -├── initial_structure.md (Step 1, full mode only) -├── cross_dependencies.md (Step 3, full mode only) -├── SUMMARY.md (final) -├── [##]_[component_name]/ -│ ├── [##].[##]_feature_[feature_name].md -│ ├── [##].[##]_feature_[feature_name].md -│ └── ... -├── [##]_[component_name]/ -│ └── ... -└── ... +TASKS_DIR/ +├── [JIRA-ID]_initial_structure.md +├── [JIRA-ID]_[short_name].md +├── [JIRA-ID]_[short_name].md +├── ... +└── _dependencies_table.md ``` +**Naming convention**: Each task file is initially saved with a temporary numeric prefix (`[##]_[short_name].md`). After creating the Jira ticket, rename the file to use the Jira ticket ID as prefix (`[JIRA-ID]_[short_name].md`). For example: `01_initial_structure.md` → `AZ-42_initial_structure.md`. + ### Save Timing | Step | Save immediately after | Filename | |------|------------------------|----------| -| Step 1 | Bootstrap structure plan complete | `initial_structure.md` | -| Step 2 | Each component decomposed | `[##]_[name]/[##].[##]_feature_[feature_name].md` | -| Step 3 | Cross-component verification complete | `cross_dependencies.md` | -| Step 4 | Jira tasks created | Jira via MCP | -| Final | All steps complete | `SUMMARY.md` | +| Step 1 | Bootstrap structure plan complete + Jira ticket created + file renamed | `[JIRA-ID]_initial_structure.md` | +| Step 1t | Test infrastructure bootstrap complete + Jira ticket created + file renamed | `[JIRA-ID]_test_infrastructure.md` | +| Step 2 | Each component task decomposed + Jira ticket created + file renamed | `[JIRA-ID]_[short_name].md` | +| Step 3 | Each blackbox test task decomposed + Jira ticket created + file renamed | `[JIRA-ID]_[short_name].md` | +| Step 4 | Cross-task verification complete | `_dependencies_table.md` | ### Resumability -If `TASKS_DIR//` already contains artifacts: +If TASKS_DIR already contains task files: -1. List existing files and match them to the save timing table -2. Identify the last completed component based on which feature files exist -3. Resume from the next incomplete component -4. Inform the user which components are being skipped +1. List existing `*_*.md` files (excluding `_dependencies_table.md`) and count them +2. Resume numbering from the next number (for temporary numeric prefix before Jira rename) +3. Inform the user which tasks already exist and are being skipped ## Progress Tracking @@ -139,143 +150,240 @@ At the start of execution, create a TodoWrite with all applicable steps. Update ## Workflow -### Step 1: Bootstrap Structure Plan (full project mode only) +### Step 1t: Test Infrastructure Bootstrap (tests-only mode only) + +**Role**: Professional Quality Assurance Engineer +**Goal**: Produce `01_test_infrastructure.md` — the first task describing the test project scaffold +**Constraints**: This is a plan document, not code. The `/implement` skill executes it. + +1. Read `TESTS_DIR/environment.md` and `TESTS_DIR/test-data.md` +2. Read problem.md, restrictions.md, acceptance_criteria.md for domain context +3. Document the test infrastructure plan using `templates/test-infrastructure-task.md` + +The test infrastructure bootstrap must include: +- Test project folder layout (`e2e/` directory structure) +- Mock/stub service definitions for each external dependency +- `docker-compose.test.yml` structure from environment.md +- Test runner configuration (framework, plugins, fixtures) +- Test data fixture setup from test-data.md seed data sets +- Test reporting configuration (format, output path) +- Data isolation strategy + +**Self-verification**: +- [ ] Every external dependency from environment.md has a mock service defined +- [ ] Docker Compose structure covers all services from environment.md +- [ ] Test data fixtures cover all seed data sets from test-data.md +- [ ] Test runner configuration matches the consumer app tech stack from environment.md +- [ ] Data isolation strategy is defined + +**Save action**: Write `01_test_infrastructure.md` (temporary numeric name) + +**Jira action**: Create a Jira ticket for this task under the "Blackbox Tests" epic. Write the Jira ticket ID and Epic ID back into the task header. + +**Rename action**: Rename the file from `01_test_infrastructure.md` to `[JIRA-ID]_test_infrastructure.md`. Update the **Task** field inside the file to match the new filename. + +**BLOCKING**: Present test infrastructure plan summary to user. Do NOT proceed until user confirms. + +--- + +### Step 1: Bootstrap Structure Plan (default mode only) **Role**: Professional software architect -**Goal**: Produce `initial_structure.md` describing the project skeleton for implementation -**Constraints**: This is a plan document, not code. The `implement-initial` command executes it. +**Goal**: Produce `01_initial_structure.md` — the first task describing the project skeleton +**Constraints**: This is a plan document, not code. The `/implement` skill executes it. -1. Read architecture.md, all component specs, and system-flows.md from PLANS_DIR +1. Read architecture.md, all component specs, system-flows.md, data_model.md, and `deployment/` from DOCUMENT_DIR 2. Read problem, solution, and restrictions from `_docs/00_problem/` and `_docs/01_solution/` 3. Research best implementation patterns for the identified tech stack -4. Document the structure plan using `templates/initial-structure.md` +4. Document the structure plan using `templates/initial-structure-task.md` + +The bootstrap structure plan must include: +- Project folder layout with all component directories +- Shared models, interfaces, and DTOs +- Dockerfile per component (multi-stage, non-root, health checks, pinned base images) +- `docker-compose.yml` for local development (all components + database + dependencies) +- `docker-compose.test.yml` for blackbox test environment (blackbox test runner) +- `.dockerignore` +- CI/CD pipeline file (`.github/workflows/ci.yml` or `azure-pipelines.yml`) with stages from `deployment/ci_cd_pipeline.md` +- Database migration setup and initial seed data scripts +- Observability configuration: structured logging setup, health check endpoints (`/health/live`, `/health/ready`), metrics endpoint (`/metrics`) +- Environment variable documentation (`.env.example`) +- Test structure with unit and blackbox test locations **Self-verification**: - [ ] All components have corresponding folders in the layout - [ ] All inter-component interfaces have DTOs defined -- [ ] CI/CD stages cover build, lint, test, security, deploy +- [ ] Dockerfile defined for each component +- [ ] `docker-compose.yml` covers all components and dependencies +- [ ] `docker-compose.test.yml` enables blackbox testing +- [ ] CI/CD pipeline file defined with lint, test, security, build, deploy stages +- [ ] Database migration setup included +- [ ] Health check endpoints specified for each service +- [ ] Structured logging configuration included +- [ ] `.env.example` with all required environment variables - [ ] Environment strategy covers dev, staging, production -- [ ] Test structure includes unit and integration test locations +- [ ] Test structure includes unit and blackbox test locations -**Save action**: Write `initial_structure.md` +**Save action**: Write `01_initial_structure.md` (temporary numeric name) + +**Jira action**: Create a Jira ticket for this task under the "Bootstrap & Initial Structure" epic. Write the Jira ticket ID and Epic ID back into the task header. + +**Rename action**: Rename the file from `01_initial_structure.md` to `[JIRA-ID]_initial_structure.md` (e.g., `AZ-42_initial_structure.md`). Update the **Task** field inside the file to match the new filename. **BLOCKING**: Present structure plan summary to user. Do NOT proceed until user confirms. --- -### Step 2: Feature Decomposition (all modes) +### Step 2: Task Decomposition (default and single component modes) **Role**: Professional software architect -**Goal**: Decompose each component into atomic, implementable feature specs +**Goal**: Decompose each component into atomic, implementable task specs — numbered sequentially starting from 02 **Constraints**: Behavioral specs only — describe what, not how. No implementation code. +**Numbering**: Tasks are numbered sequentially across all components in dependency order. Start from 02 (01 is initial_structure). In single component mode, start from the next available number in TASKS_DIR. + +**Component ordering**: Process components in dependency order — foundational components first (shared models, database), then components that depend on them. + For each component (or the single provided component): 1. Read the component's `description.md` and `tests.md` (if available) -2. Decompose into atomic features; create only 1 feature if the component is simple or atomic -3. Split into multiple features only when it is necessary and would be easier to implement -4. Do not create features of other components — only features of the current component -5. Each feature should be atomic, containing 0 APIs or a list of semantically connected APIs -6. Write each feature spec using `templates/feature-spec.md` -7. Estimate complexity per feature (1, 2, 3, 5 points); no feature should exceed 5 points — split if it does -8. Note feature dependencies (within component and cross-component) +2. Decompose into atomic tasks; create only 1 task if the component is simple or atomic +3. Split into multiple tasks only when it is necessary and would be easier to implement +4. Do not create tasks for other components — only tasks for the current component +5. Each task should be atomic, containing 0 APIs or a list of semantically connected APIs +6. Write each task spec using `templates/task.md` +7. Estimate complexity per task (1, 2, 3, 5 points); no task should exceed 5 points — split if it does +8. Note task dependencies (referencing Jira IDs of already-created dependency tasks, e.g., `AZ-42_initial_structure`) +9. **Immediately after writing each task file**: create a Jira ticket, link it to the component's epic, write the Jira ticket ID and Epic ID back into the task header, then rename the file from `[##]_[short_name].md` to `[JIRA-ID]_[short_name].md`. **Self-verification** (per component): -- [ ] Every feature is atomic (single concern) -- [ ] No feature exceeds 5 complexity points -- [ ] Feature dependencies are noted -- [ ] Features cover all interfaces defined in the component spec -- [ ] No features duplicate work from other components +- [ ] Every task is atomic (single concern) +- [ ] No task exceeds 5 complexity points +- [ ] Task dependencies reference correct Jira IDs +- [ ] Tasks cover all interfaces defined in the component spec +- [ ] No tasks duplicate work from other components +- [ ] Every task has a Jira ticket linked to the correct epic -**Save action**: Write each `[##]_[name]/[##].[##]_feature_[feature_name].md` +**Save action**: Write each `[##]_[short_name].md` (temporary numeric name), create Jira ticket inline, then rename the file to `[JIRA-ID]_[short_name].md`. Update the **Task** field inside the file to match the new filename. Update **Dependencies** references in the file to use Jira IDs of the dependency tasks. --- -### Step 3: Cross-Component Verification (full project mode only) +### Step 3: Blackbox Test Task Decomposition (default and tests-only modes) + +**Role**: Professional Quality Assurance Engineer +**Goal**: Decompose blackbox test specs into atomic, implementable task specs +**Constraints**: Behavioral specs only — describe what, not how. No test code. + +**Numbering**: +- In default mode: continue sequential numbering from where Step 2 left off. +- In tests-only mode: start from 02 (01 is the test infrastructure bootstrap from Step 1t). + +1. Read all test specs from `DOCUMENT_DIR/tests/` (`blackbox-tests.md`, `performance-tests.md`, `resilience-tests.md`, `security-tests.md`, `resource-limit-tests.md`) +2. Group related test scenarios into atomic tasks (e.g., one task per test category or per component under test) +3. Each task should reference the specific test scenarios it implements and the environment/test-data specs +4. Dependencies: + - In default mode: blackbox test tasks depend on the component implementation tasks they exercise + - In tests-only mode: blackbox test tasks depend on the test infrastructure bootstrap task (Step 1t) +5. Write each task spec using `templates/task.md` +6. Estimate complexity per task (1, 2, 3, 5 points); no task should exceed 5 points — split if it does +7. Note task dependencies (referencing Jira IDs of already-created dependency tasks) +8. **Immediately after writing each task file**: create a Jira ticket under the "Blackbox Tests" epic, write the Jira ticket ID and Epic ID back into the task header, then rename the file from `[##]_[short_name].md` to `[JIRA-ID]_[short_name].md`. + +**Self-verification**: +- [ ] Every scenario from `tests/blackbox-tests.md` is covered by a task +- [ ] Every scenario from `tests/performance-tests.md`, `tests/resilience-tests.md`, `tests/security-tests.md`, and `tests/resource-limit-tests.md` is covered by a task +- [ ] No task exceeds 5 complexity points +- [ ] Dependencies correctly reference the dependency tasks (component tasks in default mode, test infrastructure in tests-only mode) +- [ ] Every task has a Jira ticket linked to the "Blackbox Tests" epic + +**Save action**: Write each `[##]_[short_name].md` (temporary numeric name), create Jira ticket inline, then rename to `[JIRA-ID]_[short_name].md`. + +--- + +### Step 4: Cross-Task Verification (default and tests-only modes) **Role**: Professional software architect and analyst -**Goal**: Verify feature consistency across all components -**Constraints**: Review step — fix gaps found, do not add new features +**Goal**: Verify task consistency and produce `_dependencies_table.md` +**Constraints**: Review step — fix gaps found, do not add new tasks -1. Verify feature dependencies across all components are consistent -2. Check no gaps: every interface in architecture.md has features covering it -3. Check no overlaps: features don't duplicate work across components -4. Produce dependency matrix showing cross-component feature dependencies -5. Determine recommended implementation order based on dependencies +1. Verify task dependencies across all tasks are consistent +2. Check no gaps: + - In default mode: every interface in architecture.md has tasks covering it + - In tests-only mode: every test scenario in `traceability-matrix.md` is covered by a task +3. Check no overlaps: tasks don't duplicate work +4. Check no circular dependencies in the task graph +5. Produce `_dependencies_table.md` using `templates/dependencies-table.md` **Self-verification**: -- [ ] Every architecture interface is covered by at least one feature -- [ ] No circular feature dependencies across components -- [ ] Cross-component dependencies are explicitly noted in affected feature specs -**Save action**: Write `cross_dependencies.md` +Default mode: +- [ ] Every architecture interface is covered by at least one task +- [ ] No circular dependencies in the task graph +- [ ] Cross-component dependencies are explicitly noted in affected task specs +- [ ] `_dependencies_table.md` contains every task with correct dependencies -**BLOCKING**: Present cross-component summary to user. Do NOT proceed until user confirms. +Tests-only mode: +- [ ] Every test scenario from traceability-matrix.md "Covered" entries has a corresponding task +- [ ] No circular dependencies in the task graph +- [ ] Test task dependencies reference the test infrastructure bootstrap +- [ ] `_dependencies_table.md` contains every task with correct dependencies + +**Save action**: Write `_dependencies_table.md` + +**BLOCKING**: Present dependency summary to user. Do NOT proceed until user confirms. --- -### Step 4: Jira Tasks (all modes) - -**Role**: Professional product manager -**Goal**: Create Jira tasks from feature specs under the appropriate parent epics -**Constraints**: Be concise — fewer words with the same meaning is better - -1. For each feature spec, create a Jira task following the parsing rules and field mapping from `gen_jira_task_and_branch.md` (skip branch creation and file renaming — those happen during implementation) -2. In full mode: search Jira for epics matching component names/labels to find parent epic IDs -3. In single component mode: use the Epic ID obtained during context resolution -4. In standalone mode: use the Epic ID obtained during context resolution -5. Do NOT create git branches or rename files — that happens during implementation - -**Self-verification**: -- [ ] Every feature has a corresponding Jira task -- [ ] Every task is linked to the correct parent epic -- [ ] Task descriptions match feature spec content - -**Save action**: Jira tasks created via MCP - ---- - -## Summary Report - -After all steps complete, write `SUMMARY.md` using `templates/summary.md` as structure. - ## Common Mistakes - **Coding during decomposition**: this workflow produces specs, never code -- **Over-splitting**: don't create many features if the component is simple — 1 feature is fine -- **Features exceeding 5 points**: split them; no feature should be too complex for a single task -- **Cross-component features**: each feature belongs to exactly one component +- **Over-splitting**: don't create many tasks if the component is simple — 1 task is fine +- **Tasks exceeding 5 points**: split them; no task should be too complex for a single implementer +- **Cross-component tasks**: each task belongs to exactly one component - **Skipping BLOCKING gates**: never proceed past a BLOCKING marker without user confirmation - **Creating git branches**: branch creation is an implementation concern, not a decomposition one +- **Creating component subdirectories**: all tasks go flat in TASKS_DIR +- **Forgetting Jira**: every task must have a Jira ticket created inline — do not defer to a separate step +- **Forgetting to rename**: after Jira ticket creation, always rename the file from numeric prefix to Jira ID prefix ## Escalation Rules | Situation | Action | |-----------|--------| | Ambiguous component boundaries | ASK user | -| Feature complexity exceeds 5 points after splitting | ASK user | -| Missing component specs in PLANS_DIR | ASK user | +| Task complexity exceeds 5 points after splitting | ASK user | +| Missing component specs in DOCUMENT_DIR | ASK user | | Cross-component dependency conflict | ASK user | | Jira epic not found for a component | ASK user for Epic ID | -| Component naming | PROCEED, confirm at next BLOCKING gate | +| Task naming | PROCEED, confirm at next BLOCKING gate | ## Methodology Quick Reference ``` ┌────────────────────────────────────────────────────────────────┐ -│ Feature Decomposition (4-Step Method) │ +│ Task Decomposition (Multi-Mode) │ ├────────────────────────────────────────────────────────────────┤ -│ CONTEXT: Resolve mode (full / single component / standalone) │ -│ 1. Bootstrap Structure → initial_structure.md (full only) │ -│ [BLOCKING: user confirms structure] │ -│ 2. Feature Decompose → [##]_[name]/[##].[##]_feature_* │ -│ 3. Cross-Verification → cross_dependencies.md (full only) │ -│ [BLOCKING: user confirms dependencies] │ -│ 4. Jira Tasks → Jira via MCP │ -│ ───────────────────────────────────────────────── │ -│ Summary → SUMMARY.md │ +│ CONTEXT: Resolve mode (default / single component / tests-only)│ +│ │ +│ DEFAULT MODE: │ +│ 1. Bootstrap Structure → [JIRA-ID]_initial_structure.md │ +│ [BLOCKING: user confirms structure] │ +│ 2. Component Tasks → [JIRA-ID]_[short_name].md each │ +│ 3. Blackbox Tests → [JIRA-ID]_[short_name].md each │ +│ 4. Cross-Verification → _dependencies_table.md │ +│ [BLOCKING: user confirms dependencies] │ +│ │ +│ TESTS-ONLY MODE: │ +│ 1t. Test Infrastructure → [JIRA-ID]_test_infrastructure.md │ +│ [BLOCKING: user confirms test scaffold] │ +│ 3. Blackbox Tests → [JIRA-ID]_[short_name].md each │ +│ 4. Cross-Verification → _dependencies_table.md │ +│ [BLOCKING: user confirms dependencies] │ +│ │ +│ SINGLE COMPONENT MODE: │ +│ 2. Component Tasks → [JIRA-ID]_[short_name].md each │ ├────────────────────────────────────────────────────────────────┤ -│ Principles: Atomic features · Behavioral specs · Save now │ -│ Ask don't assume · Plan don't code │ +│ Principles: Atomic tasks · Behavioral specs · Flat structure │ +│ Jira inline · Rename to Jira ID · Save now · Ask don't assume│ └────────────────────────────────────────────────────────────────┘ ``` diff --git a/.cursor/skills/decompose/templates/dependencies-table.md b/.cursor/skills/decompose/templates/dependencies-table.md new file mode 100644 index 0000000..65612ba --- /dev/null +++ b/.cursor/skills/decompose/templates/dependencies-table.md @@ -0,0 +1,31 @@ +# Dependencies Table Template + +Use this template after cross-task verification. Save as `TASKS_DIR/_dependencies_table.md`. + +--- + +```markdown +# Dependencies Table + +**Date**: [YYYY-MM-DD] +**Total Tasks**: [N] +**Total Complexity Points**: [N] + +| Task | Name | Complexity | Dependencies | Epic | +|------|------|-----------|-------------|------| +| [JIRA-ID] | initial_structure | [points] | None | [EPIC-ID] | +| [JIRA-ID] | [short_name] | [points] | [JIRA-ID] | [EPIC-ID] | +| [JIRA-ID] | [short_name] | [points] | [JIRA-ID] | [EPIC-ID] | +| [JIRA-ID] | [short_name] | [points] | [JIRA-ID], [JIRA-ID] | [EPIC-ID] | +| ... | ... | ... | ... | ... | +``` + +--- + +## Guidelines + +- Every task from TASKS_DIR must appear in this table +- Dependencies column lists Jira IDs (e.g., "AZ-43, AZ-44") or "None" +- No circular dependencies allowed +- Tasks should be listed in recommended execution order +- The `/implement` skill reads this table to compute parallel batches diff --git a/.cursor/skills/decompose/templates/initial-structure.md b/.cursor/skills/decompose/templates/initial-structure-task.md similarity index 68% rename from .cursor/skills/decompose/templates/initial-structure.md rename to .cursor/skills/decompose/templates/initial-structure-task.md index 92f124b..371e5e0 100644 --- a/.cursor/skills/decompose/templates/initial-structure.md +++ b/.cursor/skills/decompose/templates/initial-structure-task.md @@ -1,15 +1,20 @@ -# Initial Structure Plan Template +# Initial Structure Task Template -Use this template for the bootstrap structure plan. Save as `TASKS_DIR//initial_structure.md`. +Use this template for the bootstrap structure plan. Save as `TASKS_DIR/01_initial_structure.md` initially, then rename to `TASKS_DIR/[JIRA-ID]_initial_structure.md` after Jira ticket creation. --- ```markdown -# Initial Project Structure Plan +# Initial Project Structure -**Date**: [YYYY-MM-DD] -**Tech Stack**: [language, framework, database, etc.] -**Source**: architecture.md, component specs from _docs/02_plans// +**Task**: [JIRA-ID]_initial_structure +**Name**: Initial Structure +**Description**: Scaffold the project skeleton — folders, shared models, interfaces, stubs, CI/CD, DB migrations, test structure +**Complexity**: [3|5] points +**Dependencies**: None +**Component**: Bootstrap +**Jira**: [TASK-ID] +**Epic**: [EPIC-ID] ## Project Folder Layout @@ -35,7 +40,7 @@ project-root/ | Component | Interface | Methods | Exposed To | |-----------|-----------|---------|-----------| -| [##]_[name] | [InterfaceName] | [method list] | [consumers] | +| [name] | [InterfaceName] | [method list] | [consumers] | ## CI/CD Pipeline @@ -44,7 +49,7 @@ project-root/ | Build | Compile/bundle the application | Every push | | Lint / Static Analysis | Code quality and style checks | Every push | | Unit Tests | Run unit test suite | Every push | -| Integration Tests | Run integration test suite | Every push | +| Blackbox Tests | Run blackbox test suite | Every push | | Security Scan | SAST / dependency check | Every push | | Deploy to Staging | Deploy to staging environment | Merge to staging branch | @@ -97,16 +102,33 @@ tests/ | Order | Component | Reason | |-------|-----------|--------| -| 1 | [##]_[name] | [why first — foundational, no dependencies] | -| 2 | [##]_[name] | [depends on #1] | +| 1 | [name] | [why first — foundational, no dependencies] | +| 2 | [name] | [depends on #1] | | ... | ... | ... | + +## Acceptance Criteria + +**AC-1: Project scaffolded** +Given the structure plan above +When the implementer executes this task +Then all folders, stubs, and configuration files exist + +**AC-2: Tests runnable** +Given the scaffolded project +When the test suite is executed +Then all stub tests pass (even if they only assert true) + +**AC-3: CI/CD configured** +Given the scaffolded project +When CI pipeline runs +Then build, lint, and test stages complete successfully ``` --- ## Guidance Notes -- This is a PLAN document, not code. The `3.05_implement_initial_structure` command executes it. +- This is a PLAN document, not code. The `/implement` skill executes it. - Focus on structure and organization decisions, not implementation details. - Reference component specs for interface and DTO details — don't repeat everything. - The folder layout should follow conventions of the identified tech stack. diff --git a/.cursor/skills/decompose/templates/summary.md b/.cursor/skills/decompose/templates/summary.md deleted file mode 100644 index 9241e74..0000000 --- a/.cursor/skills/decompose/templates/summary.md +++ /dev/null @@ -1,59 +0,0 @@ -# Decomposition Summary Template - -Use this template after all steps complete. Save as `TASKS_DIR//SUMMARY.md`. - ---- - -```markdown -# Decomposition Summary - -**Date**: [YYYY-MM-DD] -**Topic**: [topic name] -**Total Components**: [N] -**Total Features**: [N] -**Total Complexity Points**: [N] - -## Component Breakdown - -| # | Component | Features | Total Points | Jira Epic | -|---|-----------|----------|-------------|-----------| -| 01 | [name] | [count] | [sum] | [EPIC-ID] | -| 02 | [name] | [count] | [sum] | [EPIC-ID] | -| ... | ... | ... | ... | ... | - -## Feature List - -| Component | Feature | Complexity | Jira Task | Dependencies | -|-----------|---------|-----------|-----------|-------------| -| [##]_[name] | [##].[##]_feature_[name] | [points] | [TASK-ID] | [deps or "None"] | -| ... | ... | ... | ... | ... | - -## Implementation Order - -Recommended sequence based on dependency analysis: - -| Phase | Components / Features | Rationale | -|-------|----------------------|-----------| -| 1 | [list] | [foundational, no dependencies] | -| 2 | [list] | [depends on phase 1] | -| 3 | [list] | [depends on phase 1-2] | -| ... | ... | ... | - -### Parallelization Opportunities - -[Features/components that can be implemented concurrently within each phase] - -## Cross-Component Dependencies - -| From (Feature) | To (Feature) | Dependency Type | -|----------------|-------------|-----------------| -| [comp.feature] | [comp.feature] | [data / API / event] | -| ... | ... | ... | - -## Artifacts Produced - -- `initial_structure.md` — project skeleton plan -- `cross_dependencies.md` — dependency matrix -- `[##]_[name]/[##].[##]_feature_*.md` — feature specs per component -- Jira tasks created under respective epics -``` diff --git a/.cursor/skills/decompose/templates/feature-spec.md b/.cursor/skills/decompose/templates/task.md similarity index 78% rename from .cursor/skills/decompose/templates/feature-spec.md rename to .cursor/skills/decompose/templates/task.md index bc0ef6e..f36ea38 100644 --- a/.cursor/skills/decompose/templates/feature-spec.md +++ b/.cursor/skills/decompose/templates/task.md @@ -1,17 +1,21 @@ -# Feature Specification Template +# Task Specification Template Create a focused behavioral specification that describes **what** the system should do, not **how** it should be built. -Save as `TASKS_DIR//[##]_[component_name]/[##].[##]_feature_[feature_name].md`. +Save as `TASKS_DIR/[##]_[short_name].md` initially, then rename to `TASKS_DIR/[JIRA-ID]_[short_name].md` after Jira ticket creation. --- ```markdown # [Feature Name] -**Status**: Draft | **Date**: [YYYY-MM-DD] | **Feature**: [Brief Feature Description] +**Task**: [JIRA-ID]_[short_name] +**Name**: [short human name] +**Description**: [one-line description of what this task delivers] **Complexity**: [1|2|3|5] points -**Dependencies**: [List dependent features or "None"] -**Component**: [##]_[component_name] +**Dependencies**: [AZ-43_shared_models, AZ-44_db_migrations] or "None" +**Component**: [component name for context] +**Jira**: [TASK-ID] +**Epic**: [EPIC-ID] ## Problem @@ -21,11 +25,12 @@ Clear, concise statement of the problem users are facing. - Measurable or observable goal 1 - Measurable or observable goal 2 +- ... ## Scope ### Included -- What's in scope for this feature +- What's in scope for this task ### Excluded - Explicitly what's NOT in scope @@ -59,7 +64,7 @@ Then [expected result] |--------|-------------|-----------------| | AC-1 | [test subject] | [expected result] | -## Integration Tests +## Blackbox Tests | AC Ref | Initial Data/Conditions | What to Test | Expected Behavior | NFR References | |--------|------------------------|-------------|-------------------|----------------| @@ -86,7 +91,7 @@ Then [expected result] - 2 points: Non-trivial, low complexity, minimal coordination - 3 points: Multi-step, moderate complexity, potential alignment needed - 5 points: Difficult, interconnected logic, medium-high risk -- 8 points: Too complex — split into smaller features +- 8 points: Too complex — split into smaller tasks ## Output Guidelines @@ -97,7 +102,7 @@ Then [expected result] - Include realistic scope boundaries - Write from the user's perspective - Include complexity estimation -- Note dependencies on other features +- Reference dependencies by Jira ID (e.g., AZ-43_shared_models) **DON'T:** - Include implementation details (file paths, classes, methods) diff --git a/.cursor/skills/decompose/templates/test-infrastructure-task.md b/.cursor/skills/decompose/templates/test-infrastructure-task.md new file mode 100644 index 0000000..a07cb42 --- /dev/null +++ b/.cursor/skills/decompose/templates/test-infrastructure-task.md @@ -0,0 +1,129 @@ +# Test Infrastructure Task Template + +Use this template for the test infrastructure bootstrap (Step 1t in tests-only mode). Save as `TASKS_DIR/01_test_infrastructure.md` initially, then rename to `TASKS_DIR/[JIRA-ID]_test_infrastructure.md` after Jira ticket creation. + +--- + +```markdown +# Test Infrastructure + +**Task**: [JIRA-ID]_test_infrastructure +**Name**: Test Infrastructure +**Description**: Scaffold the Blackbox test project — test runner, mock services, Docker test environment, test data fixtures, reporting +**Complexity**: [3|5] points +**Dependencies**: None +**Component**: Blackbox Tests +**Jira**: [TASK-ID] +**Epic**: [EPIC-ID] + +## Test Project Folder Layout + +``` +e2e/ +├── conftest.py +├── requirements.txt +├── Dockerfile +├── mocks/ +│ ├── [mock_service_1]/ +│ │ ├── Dockerfile +│ │ └── [entrypoint file] +│ └── [mock_service_2]/ +│ ├── Dockerfile +│ └── [entrypoint file] +├── fixtures/ +│ └── [test data files] +├── tests/ +│ ├── test_[category_1].py +│ ├── test_[category_2].py +│ └── ... +└── docker-compose.test.yml +``` + +### Layout Rationale + +[Brief explanation of directory structure choices — framework conventions, separation of mocks from tests, fixture management] + +## Mock Services + +| Mock Service | Replaces | Endpoints | Behavior | +|-------------|----------|-----------|----------| +| [name] | [external service] | [endpoints it serves] | [response behavior, configurable via control API] | + +### Mock Control API + +Each mock service exposes a `POST /mock/config` endpoint for test-time behavior control (e.g., simulate downtime, inject errors). A `GET /mock/[resource]` endpoint returns recorded interactions for assertion. + +## Docker Test Environment + +### docker-compose.test.yml Structure + +| Service | Image / Build | Purpose | Depends On | +|---------|--------------|---------|------------| +| [system-under-test] | [build context] | Main system being tested | [mock services] | +| [mock-1] | [build context] | Mock for [external service] | — | +| [e2e-consumer] | [build from e2e/] | Test runner | [system-under-test] | + +### Networks and Volumes + +[Isolated test network, volume mounts for test data, model files, results output] + +## Test Runner Configuration + +**Framework**: [e.g., pytest] +**Plugins**: [e.g., pytest-csv, sseclient-py, requests] +**Entry point**: [e.g., pytest --csv=/results/report.csv] + +### Fixture Strategy + +| Fixture | Scope | Purpose | +|---------|-------|---------| +| [name] | [session/module/function] | [what it provides] | + +## Test Data Fixtures + +| Data Set | Source | Format | Used By | +|----------|--------|--------|---------| +| [name] | [volume mount / generated / API seed] | [format] | [test categories] | + +### Data Isolation + +[Strategy: fresh containers per run, volume cleanup, mock state reset] + +## Test Reporting + +**Format**: [e.g., CSV] +**Columns**: [e.g., Test ID, Test Name, Execution Time (ms), Result, Error Message] +**Output path**: [e.g., /results/report.csv → mounted to host] + +## Acceptance Criteria + +**AC-1: Test environment starts** +Given the docker-compose.test.yml +When `docker compose -f docker-compose.test.yml up` is executed +Then all services start and the system-under-test is reachable + +**AC-2: Mock services respond** +Given the test environment is running +When the e2e-consumer sends requests to mock services +Then mock services respond with configured behavior + +**AC-3: Test runner executes** +Given the test environment is running +When the e2e-consumer starts +Then the test runner discovers and executes test files + +**AC-4: Test report generated** +Given tests have been executed +When the test run completes +Then a report file exists at the configured output path with correct columns +``` + +--- + +## Guidance Notes + +- This is a PLAN document, not code. The `/implement` skill executes it. +- Focus on test infrastructure decisions, not individual test implementations. +- Reference environment.md and test-data.md from the test specs — don't repeat everything. +- Mock services must be deterministic: same input always produces same output. +- The Docker environment must be self-contained: `docker compose up` sufficient. diff --git a/.cursor/skills/deploy/SKILL.md b/.cursor/skills/deploy/SKILL.md new file mode 100644 index 0000000..d325667 --- /dev/null +++ b/.cursor/skills/deploy/SKILL.md @@ -0,0 +1,491 @@ +--- +name: deploy +description: | + Comprehensive deployment skill covering status check, env setup, containerization, CI/CD pipeline, environment strategy, observability, deployment procedures, and deployment scripts. + 7-step workflow: Status & env check, Docker containerization, CI/CD pipeline definition, environment strategy, observability planning, deployment procedures, deployment scripts. + Uses _docs/04_deploy/ structure. + Trigger phrases: + - "deploy", "deployment", "deployment strategy" + - "CI/CD", "pipeline", "containerize" + - "observability", "monitoring", "logging" + - "dockerize", "docker compose" +category: ship +tags: [deployment, docker, ci-cd, observability, monitoring, containerization, scripts] +disable-model-invocation: true +--- + +# Deployment Planning + +Plan and document the full deployment lifecycle: check deployment status and environment requirements, containerize the application, define CI/CD pipelines, configure environments, set up observability, document deployment procedures, and generate deployment scripts. + +## Core Principles + +- **Docker-first**: every component runs in a container; local dev, blackbox tests, and production all use Docker +- **Infrastructure as code**: all deployment configuration is version-controlled +- **Observability built-in**: logging, metrics, and tracing are part of the deployment plan, not afterthoughts +- **Environment parity**: dev, staging, and production environments mirror each other as closely as possible +- **Save immediately**: write artifacts to disk after each step; never accumulate unsaved work +- **Ask, don't assume**: when infrastructure constraints or preferences are unclear, ask the user +- **Plan, don't code**: this workflow produces deployment documents and specifications, not implementation code (except deployment scripts in Step 7) + +## Context Resolution + +Fixed paths: + +- DOCUMENT_DIR: `_docs/02_document/` +- DEPLOY_DIR: `_docs/04_deploy/` +- REPORTS_DIR: `_docs/04_deploy/reports/` +- SCRIPTS_DIR: `scripts/` +- ARCHITECTURE: `_docs/02_document/architecture.md` +- COMPONENTS_DIR: `_docs/02_document/components/` + +Announce the resolved paths to the user before proceeding. + +## Input Specification + +### Required Files + +| File | Purpose | Required | +|------|---------|----------| +| `_docs/00_problem/problem.md` | Problem description and context | Greenfield only | +| `_docs/00_problem/restrictions.md` | Constraints and limitations | Greenfield only | +| `_docs/01_solution/solution.md` | Finalized solution | Greenfield only | +| `DOCUMENT_DIR/architecture.md` | Architecture (from plan or document skill) | Always | +| `DOCUMENT_DIR/components/` | Component specs | Always | + +### Prerequisite Checks (BLOCKING) + +1. `architecture.md` exists — **STOP if missing**, run `/plan` first +2. At least one component spec exists in `DOCUMENT_DIR/components/` — **STOP if missing** +3. Create DEPLOY_DIR, REPORTS_DIR, and SCRIPTS_DIR if they do not exist +4. If DEPLOY_DIR already contains artifacts, ask user: **resume from last checkpoint or start fresh?** + +## Artifact Management + +### Directory Structure + +``` +DEPLOY_DIR/ +├── containerization.md +├── ci_cd_pipeline.md +├── environment_strategy.md +├── observability.md +├── deployment_procedures.md +├── deploy_scripts.md +└── reports/ + └── deploy_status_report.md + +SCRIPTS_DIR/ (project root) +├── deploy.sh +├── pull-images.sh +├── start-services.sh +├── stop-services.sh +└── health-check.sh + +.env (project root, git-ignored) +.env.example (project root, committed) +``` + +### Save Timing + +| Step | Save immediately after | Filename | +|------|------------------------|----------| +| Step 1 | Status check & env setup complete | `reports/deploy_status_report.md` + `.env` + `.env.example` | +| Step 2 | Containerization plan complete | `containerization.md` | +| Step 3 | CI/CD pipeline defined | `ci_cd_pipeline.md` | +| Step 4 | Environment strategy documented | `environment_strategy.md` | +| Step 5 | Observability plan complete | `observability.md` | +| Step 6 | Deployment procedures documented | `deployment_procedures.md` | +| Step 7 | Deployment scripts created | `deploy_scripts.md` + scripts in `SCRIPTS_DIR/` | + +### Resumability + +If DEPLOY_DIR already contains artifacts: + +1. List existing files and match to the save timing table +2. Identify the last completed step +3. Resume from the next incomplete step +4. Inform the user which steps are being skipped + +## Progress Tracking + +At the start of execution, create a TodoWrite with all steps (1 through 7). Update status as each step completes. + +## Workflow + +### Step 1: Deployment Status & Environment Setup + +**Role**: DevOps / Platform engineer +**Goal**: Assess current deployment readiness, identify all required environment variables, and create `.env` files +**Constraints**: Must complete before any other step + +1. Read architecture.md, all component specs, and restrictions.md +2. Assess deployment readiness: + - List all components and their current state (planned / implemented / tested) + - Identify external dependencies (databases, APIs, message queues, cloud services) + - Identify infrastructure prerequisites (container registry, cloud accounts, DNS, SSL certificates) + - Check if any deployment blockers exist +3. Identify all required environment variables by scanning: + - Component specs for configuration needs + - Database connection requirements + - External API endpoints and credentials + - Feature flags and runtime configuration + - Container registry credentials + - Cloud provider credentials + - Monitoring/logging service endpoints +4. Generate `.env.example` in project root with all variables and placeholder values (committed to VCS) +5. Generate `.env` in project root with development defaults filled in where safe (git-ignored) +6. Ensure `.gitignore` includes `.env` (but NOT `.env.example`) +7. Produce a deployment status report summarizing readiness, blockers, and required setup + +**Self-verification**: +- [ ] All components assessed for deployment readiness +- [ ] External dependencies catalogued +- [ ] Infrastructure prerequisites identified +- [ ] All required environment variables discovered +- [ ] `.env.example` created with placeholder values +- [ ] `.env` created with safe development defaults +- [ ] `.gitignore` updated to exclude `.env` +- [ ] Status report written to `reports/deploy_status_report.md` + +**Save action**: Write `reports/deploy_status_report.md` using `templates/deploy_status_report.md`, create `.env` and `.env.example` in project root + +**BLOCKING**: Present status report and environment variables to user. Do NOT proceed until confirmed. + +--- + +### Step 2: Containerization + +**Role**: DevOps / Platform engineer +**Goal**: Define Docker configuration for every component, local development, and blackbox test environments +**Constraints**: Plan only — no Dockerfile creation. Describe what each Dockerfile should contain. + +1. Read architecture.md and all component specs +2. Read restrictions.md for infrastructure constraints +3. Research best Docker practices for the project's tech stack (multi-stage builds, base image selection, layer optimization) +4. For each component, define: + - Base image (pinned version, prefer alpine/distroless for production) + - Build stages (dependency install, build, production) + - Non-root user configuration + - Health check endpoint and command + - Exposed ports + - `.dockerignore` contents +5. Define `docker-compose.yml` for local development: + - All application components + - Database (Postgres) with named volume + - Any message queues, caches, or external service mocks + - Shared network + - Environment variable files (`.env`) +6. Define `docker-compose.test.yml` for blackbox tests: + - Application components under test + - Test runner container (black-box, no internal imports) + - Isolated database with seed data + - All tests runnable via `docker compose -f docker-compose.test.yml up --abort-on-container-exit` +7. Define image tagging strategy: `//:` for CI, `latest` for local dev only + +**Self-verification**: +- [ ] Every component has a Dockerfile specification +- [ ] Multi-stage builds specified for all production images +- [ ] Non-root user for all containers +- [ ] Health checks defined for every service +- [ ] docker-compose.yml covers all components + dependencies +- [ ] docker-compose.test.yml enables black-box testing +- [ ] `.dockerignore` defined + +**Save action**: Write `containerization.md` using `templates/containerization.md` + +**BLOCKING**: Present containerization plan to user. Do NOT proceed until confirmed. + +--- + +### Step 3: CI/CD Pipeline + +**Role**: DevOps engineer +**Goal**: Define the CI/CD pipeline with quality gates, security scanning, and multi-environment deployment +**Constraints**: Pipeline definition only — produce YAML specification, not implementation + +1. Read architecture.md for tech stack and deployment targets +2. Read restrictions.md for CI/CD constraints (cloud provider, registry, etc.) +3. Research CI/CD best practices for the project's platform (GitHub Actions / Azure Pipelines) +4. Define pipeline stages: + +| Stage | Trigger | Steps | Quality Gate | +|-------|---------|-------|-------------| +| **Lint** | Every push | Run linters per language (black, rustfmt, prettier, dotnet format) | Zero errors | +| **Test** | Every push | Unit tests, blackbox tests, coverage report | 75%+ coverage (see `.cursor/rules/cursor-meta.mdc` Quality Thresholds) | +| **Security** | Every push | Dependency audit, SAST scan (Semgrep/SonarQube), image scan (Trivy) | Zero critical/high CVEs | +| **Build** | PR merge to dev | Build Docker images, tag with git SHA | Build succeeds | +| **Push** | After build | Push to container registry | Push succeeds | +| **Deploy Staging** | After push | Deploy to staging environment | Health checks pass | +| **Smoke Tests** | After staging deploy | Run critical path tests against staging | All pass | +| **Deploy Production** | Manual approval | Deploy to production | Health checks pass | + +5. Define caching strategy: dependency caches, Docker layer caches, build artifact caches +6. Define parallelization: which stages can run concurrently +7. Define notifications: build failures, deployment status, security alerts + +**Self-verification**: +- [ ] All pipeline stages defined with triggers and gates +- [ ] Coverage threshold enforced (75%+) +- [ ] Security scanning included (dependencies + images + SAST) +- [ ] Caching configured for dependencies and Docker layers +- [ ] Multi-environment deployment (staging → production) +- [ ] Rollback procedure referenced +- [ ] Notifications configured + +**Save action**: Write `ci_cd_pipeline.md` using `templates/ci_cd_pipeline.md` + +--- + +### Step 4: Environment Strategy + +**Role**: Platform engineer +**Goal**: Define environment configuration, secrets management, and environment parity +**Constraints**: Strategy document — no secrets or credentials in output + +1. Define environments: + +| Environment | Purpose | Infrastructure | Data | +|-------------|---------|---------------|------| +| **Development** | Local developer workflow | docker-compose, local volumes | Seed data, mocks for external APIs | +| **Staging** | Pre-production validation | Mirrors production topology | Anonymized production-like data | +| **Production** | Live system | Full infrastructure | Real data | + +2. Define environment variable management: + - Reference `.env.example` created in Step 1 + - Per-environment variable sources (`.env` for dev, secret manager for staging/prod) + - Validation: fail fast on missing required variables at startup +3. Define secrets management: + - Never commit secrets to version control + - Development: `.env` files (git-ignored) + - Staging/Production: secret manager (AWS Secrets Manager / Azure Key Vault / Vault) + - Rotation policy +4. Define database management per environment: + - Development: Docker Postgres with named volume, seed data + - Staging: managed Postgres, migrations applied via CI/CD + - Production: managed Postgres, migrations require approval + +**Self-verification**: +- [ ] All three environments defined with clear purpose +- [ ] Environment variable documentation complete (references `.env.example` from Step 1) +- [ ] No secrets in any output document +- [ ] Secret manager specified for staging/production +- [ ] Database strategy per environment + +**Save action**: Write `environment_strategy.md` using `templates/environment_strategy.md` + +--- + +### Step 5: Observability + +**Role**: Site Reliability Engineer (SRE) +**Goal**: Define logging, metrics, tracing, and alerting strategy +**Constraints**: Strategy document — describe what to implement, not how to wire it + +1. Read architecture.md and component specs for service boundaries +2. Research observability best practices for the tech stack + +**Logging**: +- Structured JSON to stdout/stderr (no file logging in containers) +- Fields: `timestamp` (ISO 8601), `level`, `service`, `correlation_id`, `message`, `context` +- Levels: ERROR (exceptions), WARN (degraded), INFO (business events), DEBUG (diagnostics, dev only) +- No PII in logs +- Retention: dev = console, staging = 7 days, production = 30 days + +**Metrics**: +- Expose Prometheus-compatible `/metrics` endpoint per service +- System metrics: CPU, memory, disk, network +- Application metrics: `request_count`, `request_duration` (histogram), `error_count`, `active_connections` +- Business metrics: derived from acceptance criteria +- Collection interval: 15s + +**Distributed Tracing**: +- OpenTelemetry SDK integration +- Trace context propagation via HTTP headers and message queue metadata +- Span naming: `.` +- Sampling: 100% in dev/staging, 10% in production (adjust based on volume) + +**Alerting**: + +| Severity | Response Time | Condition Examples | +|----------|---------------|-------------------| +| Critical | 5 min | Service down, data loss, health check failed | +| High | 30 min | Error rate > 5%, P95 latency > 2x baseline | +| Medium | 4 hours | Disk > 80%, elevated latency | +| Low | Next business day | Non-critical warnings | + +**Dashboards**: +- Operations: service health, request rate, error rate, response time percentiles, resource utilization +- Business: key business metrics from acceptance criteria + +**Self-verification**: +- [ ] Structured logging format defined with required fields +- [ ] Metrics endpoint specified per service +- [ ] OpenTelemetry tracing configured +- [ ] Alert severities with response times defined +- [ ] Dashboards cover operations and business metrics +- [ ] PII exclusion from logs addressed + +**Save action**: Write `observability.md` using `templates/observability.md` + +--- + +### Step 6: Deployment Procedures + +**Role**: DevOps / Platform engineer +**Goal**: Define deployment strategy, rollback procedures, health checks, and deployment checklist +**Constraints**: Procedures document — no implementation + +1. Define deployment strategy: + - Preferred pattern: blue-green / rolling / canary (choose based on architecture) + - Zero-downtime requirement for production + - Graceful shutdown: 30-second grace period for in-flight requests + - Database migration ordering: migrate before deploy, backward-compatible only + +2. Define health checks: + +| Check | Type | Endpoint | Interval | Threshold | +|-------|------|----------|----------|-----------| +| Liveness | HTTP GET | `/health/live` | 10s | 3 failures → restart | +| Readiness | HTTP GET | `/health/ready` | 5s | 3 failures → remove from LB | +| Startup | HTTP GET | `/health/ready` | 5s | 30 attempts max | + +3. Define rollback procedures: + - Trigger criteria: health check failures, error rate spike, critical alert + - Rollback steps: redeploy previous image tag, verify health, rollback database if needed + - Communication: notify stakeholders during rollback + - Post-mortem: required after every production rollback + +4. Define deployment checklist: + - [ ] All tests pass in CI + - [ ] Security scan clean (zero critical/high CVEs) + - [ ] Database migrations reviewed and tested + - [ ] Environment variables configured + - [ ] Health check endpoints responding + - [ ] Monitoring alerts configured + - [ ] Rollback plan documented and tested + - [ ] Stakeholders notified + +**Self-verification**: +- [ ] Deployment strategy chosen and justified +- [ ] Zero-downtime approach specified +- [ ] Health checks defined (liveness, readiness, startup) +- [ ] Rollback trigger criteria and steps documented +- [ ] Deployment checklist complete + +**Save action**: Write `deployment_procedures.md` using `templates/deployment_procedures.md` + +**BLOCKING**: Present deployment procedures to user. Do NOT proceed until confirmed. + +--- + +### Step 7: Deployment Scripts + +**Role**: DevOps / Platform engineer +**Goal**: Create executable deployment scripts for pulling Docker images and running services on the remote target machine +**Constraints**: Produce real, executable shell scripts. This is the ONLY step that creates implementation artifacts. + +1. Read containerization.md and deployment_procedures.md from previous steps +2. Read `.env.example` for required variables +3. Create the following scripts in `SCRIPTS_DIR/`: + +**`deploy.sh`** — Main deployment orchestrator: + - Validates that required environment variables are set (sources `.env` if present) + - Calls `pull-images.sh`, then `stop-services.sh`, then `start-services.sh`, then `health-check.sh` + - Exits with non-zero code on any failure + - Supports `--rollback` flag to redeploy previous image tags + +**`pull-images.sh`** — Pull Docker images to target machine: + - Reads image list and tags from environment or config + - Authenticates with container registry + - Pulls all required images + - Verifies image integrity (digest check) + +**`start-services.sh`** — Start services on target machine: + - Runs `docker compose up -d` or individual `docker run` commands + - Applies environment variables from `.env` + - Configures networks and volumes + - Waits for containers to reach healthy state + +**`stop-services.sh`** — Graceful shutdown: + - Stops services with graceful shutdown period + - Saves current image tags for rollback reference + - Cleans up orphaned containers/networks + +**`health-check.sh`** — Verify deployment health: + - Checks all health endpoints + - Reports status per service + - Returns non-zero if any service is unhealthy + +4. All scripts must: + - Be POSIX-compatible (#!/bin/bash with set -euo pipefail) + - Source `.env` from project root or accept env vars from the environment + - Include usage/help output (`--help` flag) + - Be idempotent where possible + - Handle SSH connection to remote target (configurable via `DEPLOY_HOST` env var) + +5. Document all scripts in `deploy_scripts.md` + +**Self-verification**: +- [ ] All five scripts created and executable +- [ ] Scripts source environment variables correctly +- [ ] `deploy.sh` orchestrates the full flow +- [ ] `pull-images.sh` handles registry auth and image pull +- [ ] `start-services.sh` starts containers with correct config +- [ ] `stop-services.sh` handles graceful shutdown +- [ ] `health-check.sh` validates all endpoints +- [ ] Rollback supported via `deploy.sh --rollback` +- [ ] Scripts work for remote deployment via SSH (DEPLOY_HOST) +- [ ] `deploy_scripts.md` documents all scripts + +**Save action**: Write scripts to `SCRIPTS_DIR/`, write `deploy_scripts.md` using `templates/deploy_scripts.md` + +--- + +## Escalation Rules + +| Situation | Action | +|-----------|--------| +| Unknown cloud provider or hosting | **ASK user** | +| Container registry not specified | **ASK user** | +| CI/CD platform preference unclear | **ASK user** — default to GitHub Actions | +| Secret manager not chosen | **ASK user** | +| Deployment pattern trade-offs | **ASK user** with recommendation | +| Missing architecture.md | **STOP** — run `/plan` first | +| Remote target machine details unknown | **ASK user** for SSH access, OS, and specs | + +## Common Mistakes + +- **Implementing during planning**: Steps 1–6 produce documents, not code (Step 7 is the exception — it creates scripts) +- **Hardcoding secrets**: never include real credentials in deployment documents or scripts +- **Ignoring blackbox test containerization**: the test environment must be containerized alongside the app +- **Skipping BLOCKING gates**: never proceed past a BLOCKING marker without user confirmation +- **Using `:latest` tags**: always pin base image versions +- **Forgetting observability**: logging, metrics, and tracing are deployment concerns, not post-deployment additions +- **Committing `.env`**: only `.env.example` goes to version control; `.env` must be in `.gitignore` +- **Non-portable scripts**: deployment scripts must work across environments; avoid hardcoded paths + +## Methodology Quick Reference + +``` +┌────────────────────────────────────────────────────────────────┐ +│ Deployment Planning (7-Step Method) │ +├────────────────────────────────────────────────────────────────┤ +│ PREREQ: architecture.md + component specs exist │ +│ │ +│ 1. Status & Env → reports/deploy_status_report.md │ +│ + .env + .env.example │ +│ [BLOCKING: user confirms status & env vars] │ +│ 2. Containerization → containerization.md │ +│ [BLOCKING: user confirms Docker plan] │ +│ 3. CI/CD Pipeline → ci_cd_pipeline.md │ +│ 4. Environment → environment_strategy.md │ +│ 5. Observability → observability.md │ +│ 6. Procedures → deployment_procedures.md │ +│ [BLOCKING: user confirms deployment plan] │ +│ 7. Scripts → deploy_scripts.md + scripts/ │ +├────────────────────────────────────────────────────────────────┤ +│ Principles: Docker-first · IaC · Observability built-in │ +│ Environment parity · Save immediately │ +└────────────────────────────────────────────────────────────────┘ +``` diff --git a/.cursor/skills/deploy/templates/ci_cd_pipeline.md b/.cursor/skills/deploy/templates/ci_cd_pipeline.md new file mode 100644 index 0000000..16102e3 --- /dev/null +++ b/.cursor/skills/deploy/templates/ci_cd_pipeline.md @@ -0,0 +1,87 @@ +# CI/CD Pipeline Template + +Save as `_docs/04_deploy/ci_cd_pipeline.md`. + +--- + +```markdown +# [System Name] — CI/CD Pipeline + +## Pipeline Overview + +| Stage | Trigger | Quality Gate | +|-------|---------|-------------| +| Lint | Every push | Zero lint errors | +| Test | Every push | 75%+ coverage, all tests pass | +| Security | Every push | Zero critical/high CVEs | +| Build | PR merge to dev | Docker build succeeds | +| Push | After build | Images pushed to registry | +| Deploy Staging | After push | Health checks pass | +| Smoke Tests | After staging deploy | Critical paths pass | +| Deploy Production | Manual approval | Health checks pass | + +## Stage Details + +### Lint +- [Language-specific linters and formatters] +- Runs in parallel per language + +### Test +- Unit tests: [framework and command] +- Blackbox tests: [framework and command, uses docker-compose.test.yml] +- Coverage threshold: 75% overall, 90% critical paths +- Coverage report published as pipeline artifact + +### Security +- Dependency audit: [tool, e.g., npm audit / pip-audit / dotnet list package --vulnerable] +- SAST scan: [tool, e.g., Semgrep / SonarQube] +- Image scan: Trivy on built Docker images +- Block on: critical or high severity findings + +### Build +- Docker images built using multi-stage Dockerfiles +- Tagged with git SHA: `/:` +- Build cache: Docker layer cache via CI cache action + +### Push +- Registry: [container registry URL] +- Authentication: [method] + +### Deploy Staging +- Deployment method: [docker compose / Kubernetes / cloud service] +- Pre-deploy: run database migrations +- Post-deploy: verify health check endpoints +- Automated rollback on health check failure + +### Smoke Tests +- Subset of blackbox tests targeting staging environment +- Validates critical user flows +- Timeout: [maximum duration] + +### Deploy Production +- Requires manual approval via [mechanism] +- Deployment strategy: [blue-green / rolling / canary] +- Pre-deploy: database migration review +- Post-deploy: health checks + monitoring for 15 min + +## Caching Strategy + +| Cache | Key | Restore Keys | +|-------|-----|-------------| +| Dependencies | [lockfile hash] | [partial match] | +| Docker layers | [Dockerfile hash] | [partial match] | +| Build artifacts | [source hash] | [partial match] | + +## Parallelization + +[Diagram or description of which stages run concurrently] + +## Notifications + +| Event | Channel | Recipients | +|-------|---------|-----------| +| Build failure | [Slack/email] | [team] | +| Security alert | [Slack/email] | [team + security] | +| Deploy success | [Slack] | [team] | +| Deploy failure | [Slack/email + PagerDuty] | [on-call] | +``` diff --git a/.cursor/skills/deploy/templates/containerization.md b/.cursor/skills/deploy/templates/containerization.md new file mode 100644 index 0000000..d6c7073 --- /dev/null +++ b/.cursor/skills/deploy/templates/containerization.md @@ -0,0 +1,94 @@ +# Containerization Plan Template + +Save as `_docs/04_deploy/containerization.md`. + +--- + +```markdown +# [System Name] — Containerization + +## Component Dockerfiles + +### [Component Name] + +| Property | Value | +|----------|-------| +| Base image | [e.g., mcr.microsoft.com/dotnet/aspnet:8.0-alpine] | +| Build image | [e.g., mcr.microsoft.com/dotnet/sdk:8.0-alpine] | +| Stages | [dependency install → build → production] | +| User | [non-root user name] | +| Health check | [endpoint and command] | +| Exposed ports | [port list] | +| Key build args | [if any] | + +### [Repeat for each component] + +## Docker Compose — Local Development + +```yaml +# docker-compose.yml structure +services: + [component]: + build: ./[path] + ports: ["host:container"] + environment: [reference .env.dev] + depends_on: [dependencies with health condition] + healthcheck: [command, interval, timeout, retries] + + db: + image: [postgres:version-alpine] + volumes: [named volume] + environment: [credentials from .env.dev] + healthcheck: [pg_isready] + +volumes: + [named volumes] + +networks: + [shared network] +``` + +## Docker Compose — Blackbox Tests + +```yaml +# docker-compose.test.yml structure +services: + [app components under test] + + test-runner: + build: ./tests/integration + depends_on: [app components with health condition] + environment: [test configuration] + # Exit code determines test pass/fail + + db: + image: [postgres:version-alpine] + volumes: [seed data mount] +``` + +Run: `docker compose -f docker-compose.test.yml up --abort-on-container-exit` + +## Image Tagging Strategy + +| Context | Tag Format | Example | +|---------|-----------|---------| +| CI build | `//:` | `ghcr.io/org/api:a1b2c3d` | +| Release | `//:` | `ghcr.io/org/api:1.2.0` | +| Local dev | `:latest` | `api:latest` | + +## .dockerignore + +``` +.git +.cursor +_docs +_standalone +node_modules +**/bin +**/obj +**/__pycache__ +*.md +.env* +docker-compose*.yml +``` +``` diff --git a/.cursor/skills/deploy/templates/deploy_scripts.md b/.cursor/skills/deploy/templates/deploy_scripts.md new file mode 100644 index 0000000..24e915c --- /dev/null +++ b/.cursor/skills/deploy/templates/deploy_scripts.md @@ -0,0 +1,114 @@ +# Deployment Scripts Documentation Template + +Save as `_docs/04_deploy/deploy_scripts.md`. + +--- + +```markdown +# [System Name] — Deployment Scripts + +## Overview + +| Script | Purpose | Location | +|--------|---------|----------| +| `deploy.sh` | Main deployment orchestrator | `scripts/deploy.sh` | +| `pull-images.sh` | Pull Docker images from registry | `scripts/pull-images.sh` | +| `start-services.sh` | Start all services | `scripts/start-services.sh` | +| `stop-services.sh` | Graceful shutdown | `scripts/stop-services.sh` | +| `health-check.sh` | Verify deployment health | `scripts/health-check.sh` | + +## Prerequisites + +- Docker and Docker Compose installed on target machine +- SSH access to target machine (configured via `DEPLOY_HOST`) +- Container registry credentials configured +- `.env` file with required environment variables (see `.env.example`) + +## Environment Variables + +All scripts source `.env` from the project root or accept variables from the environment. + +| Variable | Required By | Purpose | +|----------|------------|---------| +| `DEPLOY_HOST` | All (remote mode) | SSH target for remote deployment | +| `REGISTRY_URL` | `pull-images.sh` | Container registry URL | +| `REGISTRY_USER` | `pull-images.sh` | Registry authentication | +| `REGISTRY_PASS` | `pull-images.sh` | Registry authentication | +| `IMAGE_TAG` | `pull-images.sh`, `start-services.sh` | Image version to deploy (default: latest git SHA) | +| [add project-specific variables] | | | + +## Script Details + +### deploy.sh + +Main orchestrator that runs the full deployment flow. + +**Usage**: +- `./scripts/deploy.sh` — Deploy latest version +- `./scripts/deploy.sh --rollback` — Rollback to previous version +- `./scripts/deploy.sh --help` — Show usage + +**Flow**: +1. Validate required environment variables +2. Call `pull-images.sh` +3. Call `stop-services.sh` +4. Call `start-services.sh` +5. Call `health-check.sh` +6. Report success or failure + +**Rollback**: When `--rollback` is passed, reads the previous image tags saved by `stop-services.sh` and redeploys those versions. + +### pull-images.sh + +**Usage**: `./scripts/pull-images.sh [--help]` + +**Steps**: +1. Authenticate with container registry (`REGISTRY_URL`) +2. Pull all required images with specified `IMAGE_TAG` +3. Verify image integrity via digest check +4. Report pull results per image + +### start-services.sh + +**Usage**: `./scripts/start-services.sh [--help]` + +**Steps**: +1. Run `docker compose up -d` with the correct env file +2. Configure networks and volumes +3. Wait for all containers to report healthy state +4. Report startup status per service + +### stop-services.sh + +**Usage**: `./scripts/stop-services.sh [--help]` + +**Steps**: +1. Save current image tags to `previous_tags.env` (for rollback) +2. Stop services with graceful shutdown period (30s) +3. Clean up orphaned containers and networks + +### health-check.sh + +**Usage**: `./scripts/health-check.sh [--help]` + +**Checks**: + +| Service | Endpoint | Expected | +|---------|----------|----------| +| [Component 1] | `http://localhost:[port]/health/live` | HTTP 200 | +| [Component 2] | `http://localhost:[port]/health/ready` | HTTP 200 | +| [add all services] | | | + +**Exit codes**: +- `0` — All services healthy +- `1` — One or more services unhealthy + +## Common Script Properties + +All scripts: +- Use `#!/bin/bash` with `set -euo pipefail` +- Support `--help` flag for usage information +- Source `.env` from project root if present +- Are idempotent where possible +- Support remote execution via SSH when `DEPLOY_HOST` is set +``` diff --git a/.cursor/skills/deploy/templates/deploy_status_report.md b/.cursor/skills/deploy/templates/deploy_status_report.md new file mode 100644 index 0000000..9482ad7 --- /dev/null +++ b/.cursor/skills/deploy/templates/deploy_status_report.md @@ -0,0 +1,73 @@ +# Deployment Status Report Template + +Save as `_docs/04_deploy/reports/deploy_status_report.md`. + +--- + +```markdown +# [System Name] — Deployment Status Report + +## Deployment Readiness Summary + +| Aspect | Status | Notes | +|--------|--------|-------| +| Architecture defined | ✅ / ❌ | | +| Component specs complete | ✅ / ❌ | | +| Infrastructure prerequisites met | ✅ / ❌ | | +| External dependencies identified | ✅ / ❌ | | +| Blockers | [count] | [summary] | + +## Component Status + +| Component | State | Docker-ready | Notes | +|-----------|-------|-------------|-------| +| [Component 1] | planned / implemented / tested | yes / no | | +| [Component 2] | planned / implemented / tested | yes / no | | + +## External Dependencies + +| Dependency | Type | Required For | Status | +|------------|------|-------------|--------| +| [e.g., PostgreSQL] | Database | Data persistence | [available / needs setup] | +| [e.g., Redis] | Cache | Session management | [available / needs setup] | +| [e.g., External API] | API | [purpose] | [available / needs setup] | + +## Infrastructure Prerequisites + +| Prerequisite | Status | Action Needed | +|-------------|--------|--------------| +| Container registry | [ready / not set up] | [action] | +| Cloud account | [ready / not set up] | [action] | +| DNS configuration | [ready / not set up] | [action] | +| SSL certificates | [ready / not set up] | [action] | +| CI/CD platform | [ready / not set up] | [action] | +| Secret manager | [ready / not set up] | [action] | + +## Deployment Blockers + +| Blocker | Severity | Resolution | +|---------|----------|-----------| +| [blocker description] | critical / high / medium | [resolution steps] | + +## Required Environment Variables + +| Variable | Purpose | Required In | Default (Dev) | Source (Staging/Prod) | +|----------|---------|------------|---------------|----------------------| +| `DATABASE_URL` | Postgres connection string | All components | `postgres://dev:dev@db:5432/app` | Secret manager | +| `DEPLOY_HOST` | Remote target machine | Deployment scripts | `localhost` | Environment | +| `REGISTRY_URL` | Container registry URL | CI/CD, deploy scripts | `localhost:5000` | Environment | +| `REGISTRY_USER` | Registry username | CI/CD, deploy scripts | — | Secret manager | +| `REGISTRY_PASS` | Registry password | CI/CD, deploy scripts | — | Secret manager | +| [add all required variables] | | | | | + +## .env Files Created + +- `.env.example` — committed to VCS, contains all variable names with placeholder values +- `.env` — git-ignored, contains development defaults + +## Next Steps + +1. [Resolve any blockers listed above] +2. [Set up missing infrastructure prerequisites] +3. [Proceed to containerization planning] +``` diff --git a/.cursor/skills/deploy/templates/deployment_procedures.md b/.cursor/skills/deploy/templates/deployment_procedures.md new file mode 100644 index 0000000..8bb5f0e --- /dev/null +++ b/.cursor/skills/deploy/templates/deployment_procedures.md @@ -0,0 +1,103 @@ +# Deployment Procedures Template + +Save as `_docs/04_deploy/deployment_procedures.md`. + +--- + +```markdown +# [System Name] — Deployment Procedures + +## Deployment Strategy + +**Pattern**: [blue-green / rolling / canary] +**Rationale**: [why this pattern fits the architecture] +**Zero-downtime**: required for production deployments + +### Graceful Shutdown + +- Grace period: 30 seconds for in-flight requests +- Sequence: stop accepting new requests → drain connections → shutdown +- Container orchestrator: `terminationGracePeriodSeconds: 40` + +### Database Migration Ordering + +- Migrations run **before** new code deploys +- All migrations must be backward-compatible (old code works with new schema) +- Irreversible migrations require explicit approval + +## Health Checks + +| Check | Type | Endpoint | Interval | Failure Threshold | Action | +|-------|------|----------|----------|-------------------|--------| +| Liveness | HTTP GET | `/health/live` | 10s | 3 failures | Restart container | +| Readiness | HTTP GET | `/health/ready` | 5s | 3 failures | Remove from load balancer | +| Startup | HTTP GET | `/health/ready` | 5s | 30 attempts | Kill and recreate | + +### Health Check Responses + +- `/health/live`: returns 200 if process is running (no dependency checks) +- `/health/ready`: returns 200 if all dependencies (DB, cache, queues) are reachable + +## Staging Deployment + +1. CI/CD builds and pushes Docker images tagged with git SHA +2. Run database migrations against staging +3. Deploy new images to staging environment +4. Wait for health checks to pass (readiness probe) +5. Run smoke tests against staging +6. If smoke tests fail: automatic rollback to previous image + +## Production Deployment + +1. **Approval**: manual approval required via [mechanism] +2. **Pre-deploy checks**: + - [ ] Staging smoke tests passed + - [ ] Security scan clean + - [ ] Database migration reviewed + - [ ] Monitoring alerts configured + - [ ] Rollback plan confirmed +3. **Deploy**: apply deployment strategy (blue-green / rolling / canary) +4. **Verify**: health checks pass, error rate stable, latency within baseline +5. **Monitor**: observe dashboards for 15 minutes post-deploy +6. **Finalize**: mark deployment as successful or trigger rollback + +## Rollback Procedures + +### Trigger Criteria + +- Health check failures persist after deploy +- Error rate exceeds 5% for more than 5 minutes +- Critical alert fires within 15 minutes of deploy +- Manual decision by on-call engineer + +### Rollback Steps + +1. Redeploy previous Docker image tag (from CI/CD artifact) +2. Verify health checks pass +3. If database migration was applied: + - Run DOWN migration if reversible + - If irreversible: assess data impact, escalate if needed +4. Notify stakeholders +5. Schedule post-mortem within 24 hours + +### Post-Mortem + +Required after every production rollback: +- Timeline of events +- Root cause +- What went wrong +- Prevention measures + +## Deployment Checklist + +- [ ] All tests pass in CI +- [ ] Security scan clean (zero critical/high CVEs) +- [ ] Docker images built and pushed +- [ ] Database migrations reviewed and tested +- [ ] Environment variables configured for target environment +- [ ] Health check endpoints verified +- [ ] Monitoring alerts configured +- [ ] Rollback plan documented and tested +- [ ] Stakeholders notified of deployment window +- [ ] On-call engineer available during deployment +``` diff --git a/.cursor/skills/deploy/templates/environment_strategy.md b/.cursor/skills/deploy/templates/environment_strategy.md new file mode 100644 index 0000000..a257698 --- /dev/null +++ b/.cursor/skills/deploy/templates/environment_strategy.md @@ -0,0 +1,61 @@ +# Environment Strategy Template + +Save as `_docs/04_deploy/environment_strategy.md`. + +--- + +```markdown +# [System Name] — Environment Strategy + +## Environments + +| Environment | Purpose | Infrastructure | Data Source | +|-------------|---------|---------------|-------------| +| Development | Local developer workflow | docker-compose | Seed data, mocked externals | +| Staging | Pre-production validation | [mirrors production] | Anonymized production-like data | +| Production | Live system | [full infrastructure] | Real data | + +## Environment Variables + +### Required Variables + +| Variable | Purpose | Dev Default | Staging/Prod Source | +|----------|---------|-------------|-------------------| +| `DATABASE_URL` | Postgres connection | `postgres://dev:dev@db:5432/app` | Secret manager | +| [add all required variables] | | | | + +### `.env.example` + +```env +# Copy to .env and fill in values +DATABASE_URL=postgres://user:pass@host:5432/dbname +# [all required variables with placeholder values] +``` + +### Variable Validation + +All services validate required environment variables at startup and fail fast with a clear error message if any are missing. + +## Secrets Management + +| Environment | Method | Tool | +|-------------|--------|------| +| Development | `.env` file (git-ignored) | dotenv | +| Staging | Secret manager | [AWS Secrets Manager / Azure Key Vault / Vault] | +| Production | Secret manager | [AWS Secrets Manager / Azure Key Vault / Vault] | + +Rotation policy: [frequency and procedure] + +## Database Management + +| Environment | Type | Migrations | Data | +|-------------|------|-----------|------| +| Development | Docker Postgres, named volume | Applied on container start | Seed data via init script | +| Staging | Managed Postgres | Applied via CI/CD pipeline | Anonymized production snapshot | +| Production | Managed Postgres | Applied via CI/CD with approval | Live data | + +Migration rules: +- All migrations must be backward-compatible (support old and new code simultaneously) +- Reversible migrations required (DOWN/rollback script) +- Production migrations require review before apply +``` diff --git a/.cursor/skills/deploy/templates/observability.md b/.cursor/skills/deploy/templates/observability.md new file mode 100644 index 0000000..d34a517 --- /dev/null +++ b/.cursor/skills/deploy/templates/observability.md @@ -0,0 +1,132 @@ +# Observability Template + +Save as `_docs/04_deploy/observability.md`. + +--- + +```markdown +# [System Name] — Observability + +## Logging + +### Format + +Structured JSON to stdout/stderr. No file-based logging in containers. + +```json +{ + "timestamp": "ISO8601", + "level": "INFO", + "service": "service-name", + "correlation_id": "uuid", + "message": "Event description", + "context": {} +} +``` + +### Log Levels + +| Level | Usage | Example | +|-------|-------|---------| +| ERROR | Exceptions, failures requiring attention | Database connection failed | +| WARN | Potential issues, degraded performance | Retry attempt 2/3 | +| INFO | Significant business events | User registered, Order placed | +| DEBUG | Detailed diagnostics (dev/staging only) | Request payload, Query params | + +### Retention + +| Environment | Destination | Retention | +|-------------|-------------|-----------| +| Development | Console | Session | +| Staging | [log aggregator] | 7 days | +| Production | [log aggregator] | 30 days | + +### PII Rules + +- Never log passwords, tokens, or session IDs +- Mask email addresses and personal identifiers +- Log user IDs (opaque) instead of usernames + +## Metrics + +### Endpoints + +Every service exposes Prometheus-compatible metrics at `/metrics`. + +### Application Metrics + +| Metric | Type | Description | +|--------|------|-------------| +| `request_count` | Counter | Total HTTP requests by method, path, status | +| `request_duration_seconds` | Histogram | Response time by method, path | +| `error_count` | Counter | Failed requests by type | +| `active_connections` | Gauge | Current open connections | + +### System Metrics + +- CPU usage, Memory usage, Disk I/O, Network I/O + +### Business Metrics + +| Metric | Type | Description | Source | +|--------|------|-------------|--------| +| [from acceptance criteria] | | | | + +Collection interval: 15 seconds + +## Distributed Tracing + +### Configuration + +- SDK: OpenTelemetry +- Propagation: W3C Trace Context via HTTP headers +- Span naming: `.` + +### Sampling + +| Environment | Rate | Rationale | +|-------------|------|-----------| +| Development | 100% | Full visibility | +| Staging | 100% | Full visibility | +| Production | 10% | Balance cost vs observability | + +### Integration Points + +- HTTP requests: automatic instrumentation +- Database queries: automatic instrumentation +- Message queues: manual span creation on publish/consume + +## Alerting + +| Severity | Response Time | Conditions | +|----------|---------------|-----------| +| Critical | 5 min | Service unreachable, health check failed for 1 min, data loss detected | +| High | 30 min | Error rate > 5% for 5 min, P95 latency > 2x baseline for 10 min | +| Medium | 4 hours | Disk usage > 80%, elevated latency, connection pool exhaustion | +| Low | Next business day | Non-critical warnings, deprecated API usage | + +### Notification Channels + +| Severity | Channel | +|----------|---------| +| Critical | [PagerDuty / phone] | +| High | [Slack + email] | +| Medium | [Slack] | +| Low | [Dashboard only] | + +## Dashboards + +### Operations Dashboard + +- Service health status (up/down per component) +- Request rate and error rate +- Response time percentiles (P50, P95, P99) +- Resource utilization (CPU, memory per container) +- Active alerts + +### Business Dashboard + +- [Key business metrics from acceptance criteria] +- [User activity indicators] +- [Transaction volumes] +``` diff --git a/.cursor/skills/document/SKILL.md b/.cursor/skills/document/SKILL.md new file mode 100644 index 0000000..c920555 --- /dev/null +++ b/.cursor/skills/document/SKILL.md @@ -0,0 +1,515 @@ +--- +name: document +description: | + Bottom-up codebase documentation skill. Analyzes existing code from modules up through components + to architecture, then retrospectively derives problem/restrictions/acceptance criteria. + Produces the same _docs/ artifacts as the problem, research, and plan skills, but from code + analysis instead of user interview. + Trigger phrases: + - "document", "document codebase", "document this project" + - "documentation", "generate documentation", "create documentation" + - "reverse-engineer docs", "code to docs" + - "analyze and document" +category: build +tags: [documentation, code-analysis, reverse-engineering, architecture, bottom-up] +disable-model-invocation: true +--- + +# Bottom-Up Codebase Documentation + +Analyze an existing codebase from the bottom up — individual modules first, then components, then system-level architecture — and produce the same `_docs/` artifacts that the `problem` and `plan` skills generate, without requiring user interview. + +## Core Principles + +- **Bottom-up always**: module docs -> component specs -> architecture/flows -> solution -> problem extraction. Every higher level is synthesized from the level below. +- **Dependencies first**: process modules in topological order (leaves first). When documenting module X, all of X's dependencies already have docs. +- **Incremental context**: each module's doc uses already-written dependency docs as context — no ever-growing chain. +- **Verify against code**: cross-reference every entity in generated docs against actual codebase. Catch hallucinations. +- **Save immediately**: write each artifact as soon as its step completes. Enable resume from any checkpoint. +- **Ask, don't assume**: when code intent is ambiguous, ASK the user before proceeding. + +## Context Resolution + +Fixed paths: + +- DOCUMENT_DIR: `_docs/02_document/` +- SOLUTION_DIR: `_docs/01_solution/` +- PROBLEM_DIR: `_docs/00_problem/` + +Optional input: + +- FOCUS_DIR: a specific directory subtree provided by the user (e.g., `/document @src/api/`). When set, only this subtree and its transitive dependencies are analyzed. + +Announce resolved paths (and FOCUS_DIR if set) to user before proceeding. + +## Mode Detection + +Determine the execution mode before any other logic: + +| Mode | Trigger | Scope | +|------|---------|-------| +| **Full** | No input file, no existing state | Entire codebase | +| **Focus Area** | User provides a directory path (e.g., `@src/api/`) | Only the specified subtree + transitive dependencies | +| **Resume** | `state.json` exists in DOCUMENT_DIR | Continue from last checkpoint | + +Focus Area mode produces module + component docs for the targeted area only. It can be run repeatedly for different areas — each run appends to the existing module and component docs without overwriting other areas. + +## Prerequisite Checks + +1. If `_docs/` already exists and contains files AND mode is **Full**, ASK user: **overwrite, merge, or write to `_docs_generated/` instead?** +2. Create DOCUMENT_DIR, SOLUTION_DIR, and PROBLEM_DIR if they don't exist +3. If DOCUMENT_DIR contains a `state.json`, offer to **resume from last checkpoint or start fresh** +4. If FOCUS_DIR is set, verify the directory exists and contains source files — **STOP if missing** + +## Progress Tracking + +Create a TodoWrite with all steps (0 through 7). Update status as each step completes. + +## Workflow + +### Step 0: Codebase Discovery + +**Role**: Code analyst +**Goal**: Build a complete map of the codebase (or targeted subtree) before analyzing any code. + +**Focus Area scoping**: if FOCUS_DIR is set, limit the scan to that directory subtree. Still identify transitive dependencies outside FOCUS_DIR (modules that FOCUS_DIR imports) and include them in the processing order, but skip modules that are neither inside FOCUS_DIR nor dependencies of it. + +Scan and catalog: + +1. Directory tree (ignore `node_modules`, `.git`, `__pycache__`, `bin/`, `obj/`, build artifacts) +2. Language detection from file extensions and config files +3. Package manifests: `package.json`, `requirements.txt`, `pyproject.toml`, `*.csproj`, `Cargo.toml`, `go.mod` +4. Config files: `Dockerfile`, `docker-compose.yml`, `.env.example`, CI/CD configs (`.github/workflows/`, `.gitlab-ci.yml`, `azure-pipelines.yml`) +5. Entry points: `main.*`, `app.*`, `index.*`, `Program.*`, startup scripts +6. Test structure: test directories, test frameworks, test runner configs +7. Existing documentation: README, `docs/`, wiki references, inline doc coverage +8. **Dependency graph**: build a module-level dependency graph by analyzing imports/references. Identify: + - Leaf modules (no internal dependencies) + - Entry points (no internal dependents) + - Cycles (mark for grouped analysis) + - Topological processing order + - If FOCUS_DIR: mark which modules are in-scope vs dependency-only + +**Save**: `DOCUMENT_DIR/00_discovery.md` containing: +- Directory tree (concise, relevant directories only) +- Tech stack summary table (language, framework, database, infra) +- Dependency graph (textual list + Mermaid diagram) +- Topological processing order +- Entry points and leaf modules + +**Save**: `DOCUMENT_DIR/state.json` with initial state: +```json +{ + "current_step": "module-analysis", + "completed_steps": ["discovery"], + "focus_dir": null, + "modules_total": 0, + "modules_documented": [], + "modules_remaining": [], + "module_batch": 0, + "components_written": [], + "last_updated": "" +} +``` + +Set `focus_dir` to the FOCUS_DIR path if in Focus Area mode, or `null` for Full mode. + +--- + +### Step 1: Module-Level Documentation + +**Role**: Code analyst +**Goal**: Document every identified module individually, processing in topological order (leaves first). + +**Batched processing**: process modules in batches of ~5 (sorted by topological order). After each batch: save all module docs, update `state.json`, present a progress summary. Between batches, evaluate whether to suggest a session break. + +For each module in topological order: + +1. **Read**: read the module's source code. Assess complexity and what context is needed. +2. **Gather context**: collect already-written docs of this module's dependencies (available because of bottom-up order). Note external library usage. +3. **Write module doc** with these sections: + - **Purpose**: one-sentence responsibility + - **Public interface**: exported functions/classes/methods with signatures, input/output types + - **Internal logic**: key algorithms, patterns, non-obvious behavior + - **Dependencies**: what it imports internally and why + - **Consumers**: what uses this module (from the dependency graph) + - **Data models**: entities/types defined in this module + - **Configuration**: env vars, config keys consumed + - **External integrations**: HTTP calls, DB queries, queue operations, file I/O + - **Security**: auth checks, encryption, input validation, secrets access + - **Tests**: what tests exist for this module, what they cover +4. **Verify**: cross-check that every entity referenced in the doc exists in the codebase. Flag uncertainties. + +**Cycle handling**: modules in a dependency cycle are analyzed together as a group, producing a single combined doc. + +**Large modules**: if a module exceeds comfortable analysis size, split into logical sub-sections and analyze each part, then combine. + +**Save**: `DOCUMENT_DIR/modules/[module_name].md` for each module. +**State**: update `state.json` after each module completes (move from `modules_remaining` to `modules_documented`). Increment `module_batch` after each batch of ~5. + +**Session break heuristic**: after each batch, if more than 10 modules remain AND 2+ batches have already completed in this session, suggest a session break: + +``` +══════════════════════════════════════ + SESSION BREAK SUGGESTED +══════════════════════════════════════ + Modules documented: [X] of [Y] + Batches completed this session: [N] +══════════════════════════════════════ + A) Continue in this conversation + B) Save and continue in a fresh conversation (recommended) +══════════════════════════════════════ + Recommendation: B — fresh context improves + analysis quality for remaining modules +══════════════════════════════════════ +``` + +Re-entry is seamless: `state.json` tracks exactly which modules are done. + +--- + +### Step 2: Component Assembly + +**Role**: Software architect +**Goal**: Group related modules into logical components and produce component specs. + +1. Analyze module docs from Step 1 to identify natural groupings: + - By directory structure (most common) + - By shared data models or common purpose + - By dependency clusters (tightly coupled modules) +2. For each identified component, synthesize its module docs into a single component specification using `templates/component-spec.md` as structure: + - High-level overview: purpose, pattern, upstream/downstream + - Internal interfaces: method signatures, DTOs (from actual module code) + - External API specification (if the component exposes HTTP/gRPC endpoints) + - Data access patterns: queries, caching, storage estimates + - Implementation details: algorithmic complexity, state management, key libraries + - Extensions and helpers: shared utilities needed + - Caveats and edge cases: limitations, race conditions, bottlenecks + - Dependency graph: implementation order relative to other components + - Logging strategy +3. Identify common helpers shared across multiple components -> document in `common-helpers/` +4. Generate component relationship diagram (Mermaid) + +**Self-verification**: +- [ ] Every module from Step 1 is covered by exactly one component +- [ ] No component has overlapping responsibility with another +- [ ] Inter-component interfaces are explicit (who calls whom, with what) +- [ ] Component dependency graph has no circular dependencies + +**Save**: +- `DOCUMENT_DIR/components/[##]_[name]/description.md` per component +- `DOCUMENT_DIR/common-helpers/[##]_helper_[name].md` per shared helper +- `DOCUMENT_DIR/diagrams/components.md` (Mermaid component diagram) + +**BLOCKING**: Present component list with one-line summaries to user. Do NOT proceed until user confirms the component breakdown is correct. + +--- + +### Step 3: System-Level Synthesis + +**Role**: Software architect +**Goal**: From component docs, synthesize system-level documents. + +All documents here are derived from component docs (Step 2) + module docs (Step 1). No new code reading should be needed. If it is, that indicates a gap in Steps 1-2 — go back and fill it. + +#### 3a. Architecture + +Using `templates/architecture.md` as structure: + +- System context and boundaries from entry points and external integrations +- Tech stack table from discovery (Step 0) + component specs +- Deployment model from Dockerfiles, CI configs, environment strategies +- Data model overview from per-component data access sections +- Integration points from inter-component interfaces +- NFRs from test thresholds, config limits, health checks +- Security architecture from per-module security observations +- Key ADRs inferred from technology choices and patterns + +**Save**: `DOCUMENT_DIR/architecture.md` + +#### 3b. System Flows + +Using `templates/system-flows.md` as structure: + +- Trace main flows through the component interaction graph +- Entry point -> component chain -> output for each major flow +- Mermaid sequence diagrams and flowcharts +- Error scenarios from exception handling patterns +- Data flow tables per flow + +**Save**: `DOCUMENT_DIR/system-flows.md` and `DOCUMENT_DIR/diagrams/flows/flow_[name].md` + +#### 3c. Data Model + +- Consolidate all data models from module docs +- Entity-relationship diagram (Mermaid ERD) +- Migration strategy (if ORM/migration tooling detected) +- Seed data observations +- Backward compatibility approach (if versioning found) + +**Save**: `DOCUMENT_DIR/data_model.md` + +#### 3d. Deployment (if Dockerfile/CI configs exist) + +- Containerization summary +- CI/CD pipeline structure +- Environment strategy (dev, staging, production) +- Observability (logging patterns, metrics, health checks found in code) + +**Save**: `DOCUMENT_DIR/deployment/` (containerization.md, ci_cd_pipeline.md, environment_strategy.md, observability.md — only files for which sufficient code evidence exists) + +--- + +### Step 4: Verification Pass + +**Role**: Quality verifier +**Goal**: Compare every generated document against actual code. Fix hallucinations, fill gaps, correct inaccuracies. + +For each document generated in Steps 1-3: + +1. **Entity verification**: extract all code entities (class names, function names, module names, endpoints) mentioned in the doc. Cross-reference each against the actual codebase. Flag any that don't exist. +2. **Interface accuracy**: for every method signature, DTO, or API endpoint in component specs, verify it matches actual code. +3. **Flow correctness**: for each system flow diagram, trace the actual code path and verify the sequence matches. +4. **Completeness check**: are there modules or components discovered in Step 0 that aren't covered by any document? Flag gaps. +5. **Consistency check**: do component docs agree with architecture doc? Do flow diagrams match component interfaces? + +Apply corrections inline to the documents that need them. + +**Save**: `DOCUMENT_DIR/04_verification_log.md` with: +- Total entities verified vs flagged +- Corrections applied (which document, what changed) +- Remaining gaps or uncertainties +- Completeness score (modules covered / total modules) + +**BLOCKING**: Present verification summary to user. Do NOT proceed until user confirms corrections are acceptable or requests additional fixes. + +**Session boundary**: After verification is confirmed, suggest a session break before proceeding to the synthesis steps (5–7). These steps produce different artifact types and benefit from fresh context: + +``` +══════════════════════════════════════ + VERIFICATION COMPLETE — session break? +══════════════════════════════════════ + Steps 0–4 (analysis + verification) are done. + Steps 5–7 (solution + problem extraction + report) + can run in a fresh conversation. +══════════════════════════════════════ + A) Continue in this conversation + B) Save and continue in a new conversation (recommended) +══════════════════════════════════════ +``` + +If **Focus Area mode**: Steps 5–7 are skipped (they require full codebase coverage). Present a summary of modules and components documented for this area. The user can run `/document` again for another area, or run without FOCUS_DIR once all areas are covered to produce the full synthesis. + +--- + +### Step 5: Solution Extraction (Retrospective) + +**Role**: Software architect +**Goal**: From all verified technical documentation, retrospectively create `solution.md` — the same artifact the research skill produces. This makes downstream skills (`plan`, `deploy`, `decompose`) compatible with the documented codebase. + +Synthesize from architecture (Step 3) + component specs (Step 2) + system flows (Step 3) + verification findings (Step 4): + +1. **Product Solution Description**: what the system is, brief component interaction diagram (Mermaid) +2. **Architecture**: the architecture that is implemented, with per-component solution tables: + +| Solution | Tools | Advantages | Limitations | Requirements | Security | Cost | Fit | +|----------|-------|-----------|-------------|-------------|----------|------|-----| +| [actual implementation] | [libs/platforms used] | [observed strengths] | [observed limitations] | [requirements met] | [security approach] | [cost indicators] | [fitness assessment] | + +3. **Testing Strategy**: summarize integration/functional tests and non-functional tests found in the codebase +4. **References**: links to key config files, Dockerfiles, CI configs that evidence the solution choices + +**Save**: `SOLUTION_DIR/solution.md` (`_docs/01_solution/solution.md`) + +--- + +### Step 6: Problem Extraction (Retrospective) + +**Role**: Business analyst +**Goal**: From all verified technical docs, retrospectively derive the high-level problem definition — producing the same documents the `problem` skill creates through interview. + +This is the inverse of normal workflow: instead of problem -> solution -> code, we go code -> technical docs -> problem understanding. + +#### 6a. `problem.md` + +- Synthesize from architecture overview + component purposes + system flows +- What is this system? What problem does it solve? Who are the users? How does it work at a high level? +- Cross-reference with README if one exists +- Free-form text, concise, readable by someone unfamiliar with the project + +#### 6b. `restrictions.md` + +- Extract from: tech stack choices, Dockerfile specs (OS, base images), CI configs (platform constraints), dependency versions, environment configs +- Categorize with headers: Hardware, Software, Environment, Operational +- Each restriction should be specific and testable + +#### 6c. `acceptance_criteria.md` + +- Derive from: test assertions (expected values, thresholds), performance configs (timeouts, rate limits, batch sizes), health check endpoints, validation rules in code +- Categorize with headers by domain +- Every criterion must have a measurable value — if only implied, note the source + +#### 6d. `input_data/` + +- Document data schemas found (DB schemas, API request/response types, config file formats) +- Create `data_parameters.md` describing what data the system consumes, formats, volumes, update patterns + +#### 6e. `security_approach.md` (only if security code found) + +- Authentication mechanisms, authorization patterns, encryption, secrets handling, CORS, rate limiting, input sanitization — all from code observations +- If no security-relevant code found, skip this file + +**Save**: all files to `PROBLEM_DIR/` (`_docs/00_problem/`) + +**BLOCKING**: Present all problem documents to user. These are the most abstracted and therefore most prone to interpretation error. Do NOT proceed until user confirms or requests corrections. + +--- + +### Step 7: Final Report + +**Role**: Technical writer +**Goal**: Produce `FINAL_report.md` integrating all generated documentation. + +Using `templates/final-report.md` as structure: + +- Executive summary from architecture + problem docs +- Problem statement (transformed from problem.md, not copy-pasted) +- Architecture overview with tech stack one-liner +- Component summary table (number, name, purpose, dependencies) +- System flows summary table +- Risk observations from verification log (Step 4) +- Open questions (uncertainties flagged during analysis) +- Artifact index listing all generated documents with paths + +**Save**: `DOCUMENT_DIR/FINAL_report.md` + +**State**: update `state.json` with `current_step: "complete"`. + +--- + +## Artifact Management + +### Directory Structure + +``` +_docs/ +├── 00_problem/ # Step 6 (retrospective) +│ ├── problem.md +│ ├── restrictions.md +│ ├── acceptance_criteria.md +│ ├── input_data/ +│ │ └── data_parameters.md +│ └── security_approach.md +├── 01_solution/ # Step 5 (retrospective) +│ └── solution.md +└── 02_document/ # DOCUMENT_DIR + ├── 00_discovery.md # Step 0 + ├── modules/ # Step 1 + │ ├── [module_name].md + │ └── ... + ├── components/ # Step 2 + │ ├── 01_[name]/description.md + │ ├── 02_[name]/description.md + │ └── ... + ├── common-helpers/ # Step 2 + ├── architecture.md # Step 3 + ├── system-flows.md # Step 3 + ├── data_model.md # Step 3 + ├── deployment/ # Step 3 + ├── diagrams/ # Steps 2-3 + │ ├── components.md + │ └── flows/ + ├── 04_verification_log.md # Step 4 + ├── FINAL_report.md # Step 7 + └── state.json # Resumability +``` + +### Resumability + +Maintain `DOCUMENT_DIR/state.json`: + +```json +{ + "current_step": "module-analysis", + "completed_steps": ["discovery"], + "focus_dir": null, + "modules_total": 12, + "modules_documented": ["utils/helpers", "models/user"], + "modules_remaining": ["services/auth", "api/endpoints"], + "module_batch": 1, + "components_written": [], + "last_updated": "2026-03-21T14:00:00Z" +} +``` + +Update after each module/component completes. If interrupted, resume from next undocumented module. + +When resuming: +1. Read `state.json` +2. Cross-check against actual files in DOCUMENT_DIR (trust files over state if they disagree) +3. Continue from the next incomplete item +4. Inform user which steps are being skipped + +### Save Principles + +1. **Save immediately**: write each module doc as soon as analysis completes +2. **Incremental context**: each subsequent module uses already-written docs as context +3. **Preserve intermediates**: keep all module docs even after synthesis into component docs +4. **Enable recovery**: state file tracks exact progress for resume + +## Escalation Rules + +| Situation | Action | +|-----------|--------| +| Minified/obfuscated code detected | WARN user, skip module, note in verification log | +| Module too large for context window | Split into sub-sections, analyze parts separately, combine | +| Cycle in dependency graph | Group cycled modules, analyze together as one doc | +| Generated code (protobuf, swagger-gen) | Note as generated, document the source spec instead | +| No tests found in codebase | Note gap in acceptance_criteria.md, derive AC from validation rules and config limits only | +| Contradictions between code and README | Flag in verification log, ASK user | +| Binary files or non-code assets | Skip, note in discovery | +| `_docs/` already exists | ASK user: overwrite, merge, or use `_docs_generated/` | +| Code intent is ambiguous | ASK user, do not guess | + +## Common Mistakes + +- **Top-down guessing**: never infer architecture before documenting modules. Build up, don't assume down. +- **Hallucinating entities**: always verify that referenced classes/functions/endpoints actually exist in code. +- **Skipping modules**: every source module must appear in exactly one module doc and one component. +- **Monolithic analysis**: don't try to analyze the entire codebase in one pass. Module by module, in order. +- **Inventing restrictions**: only document constraints actually evidenced in code, configs, or Dockerfiles. +- **Vague acceptance criteria**: "should be fast" is not a criterion. Extract actual numeric thresholds from code. +- **Writing code**: this skill produces documents, never implementation code. + +## Methodology Quick Reference + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ Bottom-Up Codebase Documentation (8-Step) │ +├──────────────────────────────────────────────────────────────────┤ +│ MODE: Full / Focus Area (@dir) / Resume (state.json) │ +│ PREREQ: Check _docs/ exists (overwrite/merge/new?) │ +│ PREREQ: Check state.json for resume │ +│ │ +│ 0. Discovery → dependency graph, tech stack, topo order │ +│ (Focus Area: scoped to FOCUS_DIR + transitive deps) │ +│ 1. Module Docs → per-module analysis (leaves first) │ +│ (batched ~5 modules; session break between batches) │ +│ 2. Component Assembly → group modules, write component specs │ +│ [BLOCKING: user confirms components] │ +│ 3. System Synthesis → architecture, flows, data model, deploy │ +│ 4. Verification → compare all docs vs code, fix errors │ +│ [BLOCKING: user reviews corrections] │ +│ [SESSION BREAK suggested before Steps 5–7] │ +│ ── Focus Area mode stops here ── │ +│ 5. Solution Extraction → retrospective solution.md │ +│ 6. Problem Extraction → retrospective problem, restrictions, AC │ +│ [BLOCKING: user confirms problem docs] │ +│ 7. Final Report → FINAL_report.md │ +├──────────────────────────────────────────────────────────────────┤ +│ Principles: Bottom-up always · Dependencies first │ +│ Incremental context · Verify against code │ +│ Save immediately · Resume from checkpoint │ +│ Batch modules · Session breaks for large codebases │ +└──────────────────────────────────────────────────────────────────┘ +``` diff --git a/.cursor/skills/implement/SKILL.md b/.cursor/skills/implement/SKILL.md new file mode 100644 index 0000000..cf44a57 --- /dev/null +++ b/.cursor/skills/implement/SKILL.md @@ -0,0 +1,194 @@ +--- +name: implement +description: | + Orchestrate task implementation with dependency-aware batching, parallel subagents, and integrated code review. + Reads flat task files and _dependencies_table.md from TASKS_DIR, computes execution batches via topological sort, + launches up to 4 implementer subagents in parallel, runs code-review skill after each batch, and loops until done. + Use after /decompose has produced task files. + Trigger phrases: + - "implement", "start implementation", "implement tasks" + - "run implementers", "execute tasks" +category: build +tags: [implementation, orchestration, batching, parallel, code-review] +disable-model-invocation: true +--- + +# Implementation Orchestrator + +Orchestrate the implementation of all tasks produced by the `/decompose` skill. This skill is a **pure orchestrator** — it does NOT write implementation code itself. It reads task specs, computes execution order, delegates to `implementer` subagents, validates results via the `/code-review` skill, and escalates issues. + +The `implementer` agent is the specialist that writes all the code — it receives a task spec, analyzes the codebase, implements the feature, writes tests, and verifies acceptance criteria. + +## Core Principles + +- **Orchestrate, don't implement**: this skill delegates all coding to `implementer` subagents +- **Dependency-aware batching**: tasks run only when all their dependencies are satisfied +- **Max 4 parallel agents**: never launch more than 4 implementer subagents simultaneously +- **File isolation**: no two parallel agents may write to the same file +- **Integrated review**: `/code-review` skill runs automatically after each batch +- **Auto-start**: batches launch immediately — no user confirmation before a batch +- **Gate on failure**: user confirmation is required only when code review returns FAIL +- **Commit and push per batch**: after each batch is confirmed, commit and push to remote + +## Context Resolution + +- TASKS_DIR: `_docs/02_tasks/` +- Task files: all `*.md` files in TASKS_DIR (excluding files starting with `_`) +- Dependency table: `TASKS_DIR/_dependencies_table.md` + +## Prerequisite Checks (BLOCKING) + +1. TASKS_DIR exists and contains at least one task file — **STOP if missing** +2. `_dependencies_table.md` exists — **STOP if missing** +3. At least one task is not yet completed — **STOP if all done** + +## Algorithm + +### 1. Parse + +- Read all task `*.md` files from TASKS_DIR (excluding files starting with `_`) +- Read `_dependencies_table.md` — parse into a dependency graph (DAG) +- Validate: no circular dependencies, all referenced dependencies exist + +### 2. Detect Progress + +- Scan the codebase to determine which tasks are already completed +- Match implemented code against task acceptance criteria +- Mark completed tasks as done in the DAG +- Report progress to user: "X of Y tasks completed" + +### 3. Compute Next Batch + +- Topological sort remaining tasks +- Select tasks whose dependencies are ALL satisfied (completed) +- If a ready task depends on any task currently being worked on in this batch, it must wait for the next batch +- Cap the batch at 4 parallel agents +- If the batch would exceed 20 total complexity points, suggest splitting and let the user decide + +### 4. Assign File Ownership + +For each task in the batch: +- Parse the task spec's Component field and Scope section +- Map the component to directories/files in the project +- Determine: files OWNED (exclusive write), files READ-ONLY (shared interfaces, types), files FORBIDDEN (other agents' owned files) +- If two tasks in the same batch would modify the same file, schedule them sequentially instead of in parallel + +### 5. Update Tracker Status → In Progress + +For each task in the batch, transition its ticket status to **In Progress** via the configured work item tracker (Jira MCP or Azure DevOps MCP — see `protocols.md` for detection) before launching the implementer. If `tracker: local`, skip this step. + +### 6. Launch Implementer Subagents + +For each task in the batch, launch an `implementer` subagent with: +- Path to the task spec file +- List of files OWNED (exclusive write access) +- List of files READ-ONLY +- List of files FORBIDDEN + +Launch all subagents immediately — no user confirmation. + +### 7. Monitor + +- Wait for all subagents to complete +- Collect structured status reports from each implementer +- If any implementer reports "Blocked", log the blocker and continue with others + +**Stuck detection** — while monitoring, watch for these signals per subagent: +- Same file modified 3+ times without test pass rate improving → flag as stuck, stop the subagent, report as Blocked +- Subagent has not produced new output for an extended period → flag as potentially hung +- If a subagent is flagged as stuck, do NOT let it continue looping — stop it and record the blocker in the batch report + +### 8. Code Review + +- Run `/code-review` skill on the batch's changed files + corresponding task specs +- The code-review skill produces a verdict: PASS, PASS_WITH_WARNINGS, or FAIL + +### 9. Auto-Fix Gate + +Auto-fix loop with bounded retries (max 2 attempts) before escalating to user: + +1. If verdict is **PASS** or **PASS_WITH_WARNINGS**: show findings as info, continue automatically to step 10 +2. If verdict is **FAIL** (attempt 1 or 2): + - Parse the code review findings (Critical and High severity items) + - For each finding, attempt an automated fix using the finding's location, description, and suggestion + - Re-run `/code-review` on the modified files + - If now PASS or PASS_WITH_WARNINGS → continue to step 10 + - If still FAIL → increment retry counter, repeat from (2) up to max 2 attempts +3. If still **FAIL** after 2 auto-fix attempts: present all findings to user (**BLOCKING**). User must confirm fixes or accept before proceeding. + +Track `auto_fix_attempts` count in the batch report for retrospective analysis. + +### 10. Test + +- Run the full test suite +- If failures: report to user with details + +### 11. Commit and Push + +- After user confirms the batch (explicitly for FAIL, implicitly for PASS/PASS_WITH_WARNINGS): + - `git add` all changed files from the batch + - `git commit` with a message that includes ALL task IDs (Jira IDs, ADO IDs, or numeric prefixes) of tasks implemented in the batch, followed by a summary of what was implemented. Format: `[TASK-ID-1] [TASK-ID-2] ... Summary of changes` + - `git push` to the remote branch + +### 12. Update Tracker Status → In Testing + +After the batch is committed and pushed, transition the ticket status of each task in the batch to **In Testing** via the configured work item tracker. If `tracker: local`, skip this step. + +### 13. Loop + +- Go back to step 2 until all tasks are done +- When all tasks are complete, report final summary + +## Batch Report Persistence + +After each batch completes, save the batch report to `_docs/03_implementation/batch_[NN]_report.md`. Create the directory if it doesn't exist. When all tasks are complete, produce `_docs/03_implementation/FINAL_implementation_report.md` with a summary of all batches. + +## Batch Report + +After each batch, produce a structured report: + +```markdown +# Batch Report + +**Batch**: [N] +**Tasks**: [list] +**Date**: [YYYY-MM-DD] + +## Task Results + +| Task | Status | Files Modified | Tests | Issues | +|------|--------|---------------|-------|--------| +| [JIRA-ID]_[name] | Done | [count] files | [pass/fail] | [count or None] | + +## Code Review Verdict: [PASS/FAIL/PASS_WITH_WARNINGS] +## Auto-Fix Attempts: [0/1/2] +## Stuck Agents: [count or None] + +## Next Batch: [task list] or "All tasks complete" +``` + +## Stop Conditions and Escalation + +| Situation | Action | +|-----------|--------| +| Implementer fails same approach 3+ times | Stop it, escalate to user | +| Task blocked on external dependency (not in task list) | Report and skip | +| File ownership conflict unresolvable | ASK user | +| Test failures exceed 50% of suite after a batch | Stop and escalate | +| All tasks complete | Report final summary, suggest final commit | +| `_dependencies_table.md` missing | STOP — run `/decompose` first | + +## Recovery + +Each batch commit serves as a rollback checkpoint. If recovery is needed: + +- **Tests fail after a batch commit**: `git revert ` using the hash from the batch report in `_docs/03_implementation/` +- **Resuming after interruption**: Read `_docs/03_implementation/batch_*_report.md` files to determine which batches completed, then continue from the next batch +- **Multiple consecutive batches fail**: Stop and escalate to user with links to batch reports and commit hashes + +## Safety Rules + +- Never launch tasks whose dependencies are not yet completed +- Never allow two parallel agents to write to the same file +- If a subagent fails or is flagged as stuck, stop it and report — do not let it loop indefinitely +- Always run tests after each batch completes diff --git a/.cursor/skills/implement/references/batching-algorithm.md b/.cursor/skills/implement/references/batching-algorithm.md new file mode 100644 index 0000000..74a1c29 --- /dev/null +++ b/.cursor/skills/implement/references/batching-algorithm.md @@ -0,0 +1,31 @@ +# Batching Algorithm Reference + +## Topological Sort with Batch Grouping + +The `/implement` skill uses a topological sort to determine execution order, +then groups tasks into batches for parallel execution. + +## Algorithm + +1. Build adjacency list from `_dependencies_table.md` +2. Compute in-degree for each task node +3. Initialize batch 0 with all nodes that have in-degree 0 +4. For each batch: + a. Select up to 4 tasks from the ready set + b. Check file ownership — if two tasks would write the same file, defer one to the next batch + c. Launch selected tasks as parallel implementer subagents + d. When all complete, remove them from the graph and decrement in-degrees of dependents + e. Add newly zero-in-degree nodes to the next batch's ready set +5. Repeat until the graph is empty + +## File Ownership Conflict Resolution + +When two tasks in the same batch map to overlapping files: +- Prefer to run the lower-numbered task first (it's more foundational) +- Defer the higher-numbered task to the next batch +- If both have equal priority, ask the user + +## Complexity Budget + +Each batch should not exceed 20 total complexity points. +If it does, split the batch and let the user choose which tasks to include. diff --git a/.cursor/skills/implement/templates/batch-report.md b/.cursor/skills/implement/templates/batch-report.md new file mode 100644 index 0000000..33e2616 --- /dev/null +++ b/.cursor/skills/implement/templates/batch-report.md @@ -0,0 +1,36 @@ +# Batch Report Template + +Use this template after each implementation batch completes. + +--- + +```markdown +# Batch Report + +**Batch**: [N] +**Tasks**: [list of task names] +**Date**: [YYYY-MM-DD] + +## Task Results + +| Task | Status | Files Modified | Tests | Issues | +|------|--------|---------------|-------|--------| +| [JIRA-ID]_[name] | Done/Blocked/Partial | [count] files | [X/Y pass] | [count or None] | + +## Code Review Verdict: [PASS / FAIL / PASS_WITH_WARNINGS] + +[Link to code review report if FAIL or PASS_WITH_WARNINGS] + +## Test Suite + +- Total: [N] tests +- Passed: [N] +- Failed: [N] +- Skipped: [N] + +## Commit + +[Suggested commit message] + +## Next Batch: [task list] or "All tasks complete" +``` diff --git a/.cursor/skills/new-task/SKILL.md b/.cursor/skills/new-task/SKILL.md new file mode 100644 index 0000000..e68ff4c --- /dev/null +++ b/.cursor/skills/new-task/SKILL.md @@ -0,0 +1,302 @@ +--- +name: new-task +description: | + Interactive skill for adding new functionality to an existing codebase. + Guides the user through describing the feature, assessing complexity, + optionally running research, analyzing the codebase for insertion points, + validating assumptions with the user, and producing a task spec with Jira ticket. + Supports a loop — the user can add multiple tasks in one session. + Trigger phrases: + - "new task", "add feature", "new functionality" + - "I want to add", "new component", "extend" +category: build +tags: [task, feature, interactive, planning, jira] +disable-model-invocation: true +--- + +# New Task (Interactive Feature Planning) + +Guide the user through defining new functionality for an existing codebase. Produces one or more task specifications with Jira tickets, optionally running deep research for complex features. + +## Core Principles + +- **User-driven**: every task starts with the user's description; never invent requirements +- **Right-size research**: only invoke the research skill when the change is big enough to warrant it +- **Validate before committing**: surface all assumptions and uncertainties to the user before writing the task file +- **Save immediately**: write task files to disk as soon as they are ready; never accumulate unsaved work +- **Ask, don't assume**: when scope, insertion point, or approach is unclear, STOP and ask the user + +## Context Resolution + +Fixed paths: + +- TASKS_DIR: `_docs/02_tasks/` +- PLANS_DIR: `_docs/02_task_plans/` +- DOCUMENT_DIR: `_docs/02_document/` +- DEPENDENCIES_TABLE: `_docs/02_tasks/_dependencies_table.md` + +Create TASKS_DIR and PLANS_DIR if they don't exist. + +If TASKS_DIR already contains task files, scan them to determine the next numeric prefix for temporary file naming. + +## Workflow + +The skill runs as a loop. Each iteration produces one task. After each task the user chooses to add another or finish. + +--- + +### Step 1: Gather Feature Description + +**Role**: Product analyst +**Goal**: Get a clear, detailed description of the new functionality from the user. + +Ask the user: + +``` +══════════════════════════════════════ + NEW TASK: Describe the functionality +══════════════════════════════════════ + Please describe in detail the new functionality you want to add: + - What should it do? + - Who is it for? + - Any specific requirements or constraints? +══════════════════════════════════════ +``` + +**BLOCKING**: Do NOT proceed until the user provides a description. + +Record the description verbatim for use in subsequent steps. + +--- + +### Step 2: Analyze Complexity + +**Role**: Technical analyst +**Goal**: Determine whether deep research is needed. + +Read the user's description and the existing codebase documentation from DOCUMENT_DIR (architecture.md, components/, system-flows.md). + +Assess the change along these dimensions: +- **Scope**: how many components/files are affected? +- **Novelty**: does it involve libraries, protocols, or patterns not already in the codebase? +- **Risk**: could it break existing functionality or require architectural changes? + +Classification: + +| Category | Criteria | Action | +|----------|----------|--------| +| **Needs research** | New libraries/frameworks, unfamiliar protocols, significant architectural change, multiple unknowns | Proceed to Step 3 (Research) | +| **Skip research** | Extends existing functionality, uses patterns already in codebase, straightforward new component with known tech | Skip to Step 4 (Codebase Analysis) | + +Present the assessment to the user: + +``` +══════════════════════════════════════ + COMPLEXITY ASSESSMENT +══════════════════════════════════════ + Scope: [low / medium / high] + Novelty: [low / medium / high] + Risk: [low / medium / high] +══════════════════════════════════════ + Recommendation: [Research needed / Skip research] + Reason: [one-line justification] +══════════════════════════════════════ +``` + +**BLOCKING**: Ask the user to confirm or override the recommendation before proceeding. + +--- + +### Step 3: Research (conditional) + +**Role**: Researcher +**Goal**: Investigate unknowns before task specification. + +This step only runs if Step 2 determined research is needed. + +1. Create a problem description file at `PLANS_DIR//problem.md` summarizing the feature request and the specific unknowns to investigate +2. Invoke `.cursor/skills/research/SKILL.md` in standalone mode: + - INPUT_FILE: `PLANS_DIR//problem.md` + - BASE_DIR: `PLANS_DIR//` +3. After research completes, read the solution draft from `PLANS_DIR//01_solution/solution_draft01.md` +4. Extract the key findings relevant to the task specification + +The `` is a short kebab-case name derived from the feature description (e.g., `auth-provider-integration`, `real-time-notifications`). + +--- + +### Step 4: Codebase Analysis + +**Role**: Software architect +**Goal**: Determine where and how to insert the new functionality. + +1. Read the codebase documentation from DOCUMENT_DIR: + - `architecture.md` — overall structure + - `components/` — component specs + - `system-flows.md` — data flows (if exists) + - `data_model.md` — data model (if exists) +2. If research was performed (Step 3), incorporate findings +3. Analyze and determine: + - Which existing components are affected + - Where new code should be inserted (which layers, modules, files) + - What interfaces need to change + - What new interfaces or models are needed + - How data flows through the change +4. If the change is complex enough, read the actual source files (not just docs) to verify insertion points + +Present the analysis: + +``` +══════════════════════════════════════ + CODEBASE ANALYSIS +══════════════════════════════════════ + Affected components: [list] + Insertion points: [list of modules/layers] + Interface changes: [list or "None"] + New interfaces: [list or "None"] + Data flow impact: [summary] +══════════════════════════════════════ +``` + +--- + +### Step 5: Validate Assumptions + +**Role**: Quality gate +**Goal**: Surface every uncertainty and get user confirmation. + +Review all decisions and assumptions made in Steps 2–4. For each uncertainty: +1. State the assumption clearly +2. Propose a solution or approach +3. List alternatives if they exist + +Present using the Choose format for each decision that has meaningful alternatives: + +``` +══════════════════════════════════════ + ASSUMPTION VALIDATION +══════════════════════════════════════ + 1. [Assumption]: [proposed approach] + Alternative: [other option, if any] + 2. [Assumption]: [proposed approach] + Alternative: [other option, if any] + ... +══════════════════════════════════════ + Please confirm or correct these assumptions. +══════════════════════════════════════ +``` + +**BLOCKING**: Do NOT proceed until the user confirms or corrects all assumptions. + +--- + +### Step 6: Create Task + +**Role**: Technical writer +**Goal**: Produce the task specification file. + +1. Determine the next numeric prefix by scanning TASKS_DIR for existing files +2. Write the task file using `.cursor/skills/decompose/templates/task.md`: + - Fill all fields from the gathered information + - Set **Complexity** based on the assessment from Step 2 + - Set **Dependencies** by cross-referencing existing tasks in TASKS_DIR + - Set **Jira** and **Epic** to `pending` (filled in Step 7) +3. Save as `TASKS_DIR/[##]_[short_name].md` + +**Self-verification**: +- [ ] Problem section clearly describes the user need +- [ ] Acceptance criteria are testable (Gherkin format) +- [ ] Scope boundaries are explicit +- [ ] Complexity points match the assessment +- [ ] Dependencies reference existing task Jira IDs where applicable +- [ ] No implementation details leaked into the spec + +--- + +### Step 7: Work Item Ticket + +**Role**: Project coordinator +**Goal**: Create a work item ticket and link it to the task file. + +1. Create a ticket via the configured work item tracker (Jira MCP or Azure DevOps MCP — see `autopilot/protocols.md` for detection): + - Summary: the task's **Name** field + - Description: the task's **Problem** and **Acceptance Criteria** sections + - Story points: the task's **Complexity** value + - Link to the appropriate epic (ask user if unclear which epic) +2. Write the ticket ID and Epic ID back into the task file header: + - Update **Task** field: `[TICKET-ID]_[short_name]` + - Update **Jira** field: `[TICKET-ID]` + - Update **Epic** field: `[EPIC-ID]` +3. Rename the file from `[##]_[short_name].md` to `[TICKET-ID]_[short_name].md` + +If the work item tracker is not authenticated or unavailable (`tracker: local`): +- Keep the numeric prefix +- Set **Jira** to `pending` +- Set **Epic** to `pending` +- The task is still valid and can be implemented; tracker sync happens later + +--- + +### Step 8: Loop Gate + +Ask the user: + +``` +══════════════════════════════════════ + Task created: [JIRA-ID or ##] — [task name] +══════════════════════════════════════ + A) Add another task + B) Done — finish and update dependencies +══════════════════════════════════════ +``` + +- If **A** → loop back to Step 1 +- If **B** → proceed to Finalize + +--- + +### Finalize + +After the user chooses **Done**: + +1. Update (or create) `TASKS_DIR/_dependencies_table.md` — add all newly created tasks to the dependencies table +2. Present a summary of all tasks created in this session: + +``` +══════════════════════════════════════ + NEW TASK SUMMARY +══════════════════════════════════════ + Tasks created: N + Total complexity: M points + ───────────────────────────────────── + [JIRA-ID] [name] ([complexity] pts) + [JIRA-ID] [name] ([complexity] pts) + ... +══════════════════════════════════════ +``` + +## Escalation Rules + +| Situation | Action | +|-----------|--------| +| User description is vague or incomplete | **ASK** for more detail — do not guess | +| Unclear which epic to link to | **ASK** user for the epic | +| Research skill hits a blocker | Follow research skill's own escalation rules | +| Codebase analysis reveals conflicting architectures | **ASK** user which pattern to follow | +| Complexity exceeds 5 points | **WARN** user and suggest splitting into multiple tasks | +| Jira MCP unavailable | **WARN**, continue with local-only task files | + +## Trigger Conditions + +When the user wants to: +- Add new functionality to an existing codebase +- Plan a new feature or component +- Create task specifications for upcoming work + +**Keywords**: "new task", "add feature", "new functionality", "extend", "I want to add" + +**Differentiation**: +- User wants to decompose an existing plan into tasks → use `/decompose` +- User wants to research a topic without creating tasks → use `/research` +- User wants to refactor existing code → use `/refactor` +- User wants to define and plan a new feature → use this skill diff --git a/.cursor/skills/new-task/templates/task.md b/.cursor/skills/new-task/templates/task.md new file mode 100644 index 0000000..3a52cf9 --- /dev/null +++ b/.cursor/skills/new-task/templates/task.md @@ -0,0 +1,2 @@ + + diff --git a/.cursor/skills/plan/SKILL.md b/.cursor/skills/plan/SKILL.md index 36cea21..b1cc48d 100644 --- a/.cursor/skills/plan/SKILL.md +++ b/.cursor/skills/plan/SKILL.md @@ -1,19 +1,21 @@ --- name: plan description: | - Decompose a solution into architecture, system flows, components, tests, and Jira epics. - Systematic 5-step planning workflow with BLOCKING gates, self-verification, and structured artifact management. - Supports project mode (_docs/ + _docs/02_plans/ structure) and standalone mode (@file.md). + Decompose a solution into architecture, data model, deployment plan, system flows, components, tests, and Jira epics. + Systematic 6-step planning workflow with BLOCKING gates, self-verification, and structured artifact management. + Uses _docs/ + _docs/02_document/ structure. Trigger phrases: - "plan", "decompose solution", "architecture planning" - "break down the solution", "create planning documents" - "component decomposition", "solution analysis" +category: build +tags: [planning, architecture, components, testing, jira, epics] disable-model-invocation: true --- # Solution Planning -Decompose a problem and solution into architecture, system flows, components, tests, and Jira epics through a systematic 5-step workflow. +Decompose a problem and solution into architecture, data model, deployment plan, system flows, components, tests, and Jira epics through a systematic 6-step workflow. ## Core Principles @@ -25,329 +27,83 @@ Decompose a problem and solution into architecture, system flows, components, te ## Context Resolution -Determine the operating mode based on invocation before any other logic runs. +Fixed paths — no mode detection needed: -**Project mode** (no explicit input file provided): - PROBLEM_FILE: `_docs/00_problem/problem.md` - SOLUTION_FILE: `_docs/01_solution/solution.md` -- PLANS_DIR: `_docs/02_plans/` -- All existing guardrails apply as-is. +- DOCUMENT_DIR: `_docs/02_document/` -**Standalone mode** (explicit input file provided, e.g. `/plan @some_doc.md`): -- INPUT_FILE: the provided file (treated as combined problem + solution context) -- Derive `` from the input filename (without extension) -- PLANS_DIR: `_standalone//plans/` -- Guardrails relaxed: only INPUT_FILE must exist and be non-empty -- `acceptance_criteria.md` and `restrictions.md` are optional — warn if absent +Announce the resolved paths to the user before proceeding. -Announce the detected mode and resolved paths to the user before proceeding. - -## Input Specification - -### Required Files - -**Project mode:** +## Required Files | File | Purpose | |------|---------| -| PROBLEM_FILE (`_docs/00_problem/problem.md`) | Problem description and context | -| `_docs/00_problem/input_data/` | Reference data examples (if available) | -| `_docs/00_problem/restrictions.md` | Constraints and limitations (if available) | -| `_docs/00_problem/acceptance_criteria.md` | Measurable acceptance criteria (if available) | -| SOLUTION_FILE (`_docs/01_solution/solution.md`) | Solution draft to decompose | +| `_docs/00_problem/problem.md` | Problem description and context | +| `_docs/00_problem/acceptance_criteria.md` | Measurable acceptance criteria | +| `_docs/00_problem/restrictions.md` | Constraints and limitations | +| `_docs/00_problem/input_data/` | Reference data examples | +| `_docs/01_solution/solution.md` | Finalized solution to decompose | -**Standalone mode:** +## Prerequisites -| File | Purpose | -|------|---------| -| INPUT_FILE (the provided file) | Combined problem + solution context | - -### Prerequisite Checks (BLOCKING) - -**Project mode:** -1. PROBLEM_FILE exists and is non-empty — **STOP if missing** -2. SOLUTION_FILE exists and is non-empty — **STOP if missing** -3. Create PLANS_DIR if it does not exist -4. If `PLANS_DIR//` already exists, ask user: **resume from last checkpoint or start fresh?** - -**Standalone mode:** -1. INPUT_FILE exists and is non-empty — **STOP if missing** -2. Warn if no `restrictions.md` or `acceptance_criteria.md` provided alongside INPUT_FILE -3. Create PLANS_DIR if it does not exist -4. If `PLANS_DIR//` already exists, ask user: **resume from last checkpoint or start fresh?** +Read and follow `steps/00_prerequisites.md`. All three prerequisite checks are **BLOCKING** — do not start the workflow until they pass. ## Artifact Management -### Directory Structure - -At the start of planning, create a topic-named working directory under PLANS_DIR: - -``` -PLANS_DIR// -├── architecture.md -├── system-flows.md -├── risk_mitigations.md -├── risk_mitigations_02.md (iterative, ## as sequence) -├── components/ -│ ├── 01_[name]/ -│ │ ├── description.md -│ │ └── tests.md -│ ├── 02_[name]/ -│ │ ├── description.md -│ │ └── tests.md -│ └── ... -├── common-helpers/ -│ ├── 01_helper_[name]/ -│ ├── 02_helper_[name]/ -│ └── ... -├── e2e_test_infrastructure.md -├── diagrams/ -│ ├── components.drawio -│ └── flows/ -│ ├── flow_[name].md (Mermaid) -│ └── ... -└── FINAL_report.md -``` - -### Save Timing - -| Step | Save immediately after | Filename | -|------|------------------------|----------| -| Step 1 | Architecture analysis complete | `architecture.md` | -| Step 1 | System flows documented | `system-flows.md` | -| Step 2 | Each component analyzed | `components/[##]_[name]/description.md` | -| Step 2 | Common helpers generated | `common-helpers/[##]_helper_[name].md` | -| Step 2 | Diagrams generated | `diagrams/` | -| Step 3 | Risk assessment complete | `risk_mitigations.md` | -| Step 4 | Tests written per component | `components/[##]_[name]/tests.md` | -| Step 4b | E2E test infrastructure spec | `e2e_test_infrastructure.md` | -| Step 5 | Epics created in Jira | Jira via MCP | -| Final | All steps complete | `FINAL_report.md` | - -### Save Principles - -1. **Save immediately**: write to disk as soon as a step completes; do not wait until the end -2. **Incremental updates**: same file can be updated multiple times; append or replace -3. **Preserve process**: keep all intermediate files even after integration into final report -4. **Enable recovery**: if interrupted, resume from the last saved artifact (see Resumability) - -### Resumability - -If `PLANS_DIR//` already contains artifacts: - -1. List existing files and match them to the save timing table above -2. Identify the last completed step based on which artifacts exist -3. Resume from the next incomplete step -4. Inform the user which steps are being skipped +Read `steps/01_artifact-management.md` for directory structure, save timing, save principles, and resumability rules. Refer to it throughout the workflow. ## Progress Tracking -At the start of execution, create a TodoWrite with all steps (1 through 5, including 4b). Update status as each step completes. +At the start of execution, create a TodoWrite with all steps (1 through 6 plus Final). Update status as each step completes. ## Workflow -### Step 1: Solution Analysis +### Step 1: Blackbox Tests -**Role**: Professional software architect -**Goal**: Produce `architecture.md` and `system-flows.md` from the solution draft -**Constraints**: No code, no component-level detail yet; focus on system-level view +Read and execute `.cursor/skills/test-spec/SKILL.md`. -1. Read all input files thoroughly -2. Research unknown or questionable topics via internet; ask user about ambiguities -3. Document architecture using `templates/architecture.md` as structure -4. Document system flows using `templates/system-flows.md` as structure - -**Self-verification**: -- [ ] Architecture covers all capabilities mentioned in solution.md -- [ ] System flows cover all main user/system interactions -- [ ] No contradictions with problem.md or restrictions.md -- [ ] Technology choices are justified - -**Save action**: Write `architecture.md` and `system-flows.md` - -**BLOCKING**: Present architecture summary to user. Do NOT proceed until user confirms. +Capture any new questions, findings, or insights that arise during test specification — these feed forward into Steps 2 and 3. --- -### Step 2: Component Decomposition +### Step 2: Solution Analysis -**Role**: Professional software architect -**Goal**: Decompose the architecture into components with detailed specs -**Constraints**: No code; only names, interfaces, inputs/outputs. Follow SRP strictly. - -1. Identify components from the architecture; think about separation, reusability, and communication patterns -2. If additional components are needed (data preparation, shared helpers), create them -3. For each component, write a spec using `templates/component-spec.md` as structure -4. Generate diagrams: - - draw.io component diagram showing relations (minimize line intersections, group semantically coherent components, place external users near their components) - - Mermaid flowchart per main control flow -5. Components can share and reuse common logic, same for multiple components. Hence for such occurences common-helpers folder is specified. - -**Self-verification**: -- [ ] Each component has a single, clear responsibility -- [ ] No functionality is spread across multiple components -- [ ] All inter-component interfaces are defined (who calls whom, with what) -- [ ] Component dependency graph has no circular dependencies -- [ ] All components from architecture.md are accounted for - -**Save action**: Write: - - each component `components/[##]_[name]/description.md` - - comomon helper `common-helpers/[##]_helper_[name].md` - - diagrams `diagrams/` - -**BLOCKING**: Present component list with one-line summaries to user. Do NOT proceed until user confirms. +Read and follow `steps/02_solution-analysis.md`. --- -### Step 3: Architecture Review & Risk Assessment +### Step 3: Component Decomposition -**Role**: Professional software architect and analyst -**Goal**: Validate all artifacts for consistency, then identify and mitigate risks -**Constraints**: This is a review step — fix problems found, do not add new features - -#### 3a. Evaluator Pass (re-read ALL artifacts) - -Review checklist: -- [ ] All components follow Single Responsibility Principle -- [ ] All components follow dumb code / smart data principle -- [ ] Inter-component interfaces are consistent (caller's output matches callee's input) -- [ ] No circular dependencies in the dependency graph -- [ ] No missing interactions between components -- [ ] No over-engineering — is there a simpler decomposition? -- [ ] Security considerations addressed in component design -- [ ] Performance bottlenecks identified -- [ ] API contracts are consistent across components - -Fix any issues found before proceeding to risk identification. - -#### 3b. Risk Identification - -1. Identify technical and project risks -2. Assess probability and impact using `templates/risk-register.md` -3. Define mitigation strategies -4. Apply mitigations to architecture, flows, and component documents where applicable - -**Self-verification**: -- [ ] Every High/Critical risk has a concrete mitigation strategy -- [ ] Mitigations are reflected in the relevant component or architecture docs -- [ ] No new risks introduced by the mitigations themselves - -**Save action**: Write `risk_mitigations.md` - -**BLOCKING**: Present risk summary to user. Ask whether assessment is sufficient. - -**Iterative**: If user requests another round, repeat Step 3 and write `risk_mitigations_##.md` (## as sequence number). Continue until user confirms. +Read and follow `steps/03_component-decomposition.md`. --- -### Step 4: Test Specifications +### Step 4: Architecture Review & Risk Assessment -**Role**: Professional Quality Assurance Engineer -**Goal**: Write test specs for each component achieving minimum 75% acceptance criteria coverage -**Constraints**: Test specs only — no test code. Each test must trace to an acceptance criterion. - -1. For each component, write tests using `templates/test-spec.md` as structure -2. Cover all 4 types: integration, performance, security, acceptance -3. Include test data management (setup, teardown, isolation) -4. Verify traceability: every acceptance criterion from `acceptance_criteria.md` must be covered by at least one test - -**Self-verification**: -- [ ] Every acceptance criterion has at least one test covering it -- [ ] Test inputs are realistic and well-defined -- [ ] Expected results are specific and measurable -- [ ] No component is left without tests - -**Save action**: Write each `components/[##]_[name]/tests.md` +Read and follow `steps/04_review-risk.md`. --- -### Step 4b: E2E Black-Box Test Infrastructure +### Step 5: Test Specifications -**Role**: Professional Quality Assurance Engineer -**Goal**: Specify a separate consumer application and Docker environment for black-box end-to-end testing of the main system -**Constraints**: Spec only — no test code. Consumer must treat the main system as a black box (no internal imports, no direct DB access). - -1. Define Docker environment: services (system under test, test DB, consumer app, dependencies), networks, volumes -2. Specify consumer application: tech stack, entry point, communication interfaces with the main system -3. Define E2E test scenarios from acceptance criteria — focus on critical end-to-end use cases that cross component boundaries -4. Specify test data management: seed data, isolation strategy, external dependency mocks -5. Define CI/CD integration: when to run, gate behavior, timeout -6. Define reporting format (CSV: test ID, name, execution time, result, error message) - -Use `templates/e2e-test-infrastructure.md` as structure. - -**Self-verification**: -- [ ] Critical acceptance criteria are covered by at least one E2E scenario -- [ ] Consumer app has no direct access to system internals -- [ ] Docker environment is self-contained (`docker compose up` sufficient) -- [ ] External dependencies have mock/stub services defined - -**Save action**: Write `e2e_test_infrastructure.md` +Read and follow `steps/05_test-specifications.md`. --- -### Step 5: Jira Epics +### Step 6: Jira Epics -**Role**: Professional product manager -**Goal**: Create Jira epics from components, ordered by dependency -**Constraints**: Be concise — fewer words with the same meaning is better - -1. Generate Jira Epics from components using Jira MCP, structured per `templates/epic-spec.md` -2. Order epics by dependency (which must be done first) -3. Include effort estimation per epic (T-shirt size or story points range) -4. Ensure each epic has clear acceptance criteria cross-referenced with component specs -5. Generate updated draw.io diagram showing component-to-epic mapping - -**Self-verification**: -- [ ] Every component maps to exactly one epic -- [ ] Dependency order is respected (no epic depends on a later one) -- [ ] Acceptance criteria are measurable -- [ ] Effort estimates are realistic - -**Save action**: Epics created in Jira via MCP +Read and follow `steps/06_jira-epics.md`. --- -## Quality Checklist (before FINAL_report.md) +### Final: Quality Checklist -Before writing the final report, verify ALL of the following: - -### Architecture -- [ ] Covers all capabilities from solution.md -- [ ] Technology choices are justified -- [ ] Deployment model is defined - -### Components -- [ ] Every component follows SRP -- [ ] No circular dependencies -- [ ] All inter-component interfaces are defined and consistent -- [ ] No orphan components (unused by any flow) - -### Risks -- [ ] All High/Critical risks have mitigations -- [ ] Mitigations are reflected in component/architecture docs -- [ ] User has confirmed risk assessment is sufficient - -### Tests -- [ ] Every acceptance criterion is covered by at least one test -- [ ] All 4 test types are represented per component (where applicable) -- [ ] Test data management is defined - -### E2E Test Infrastructure -- [ ] Critical use cases covered by E2E scenarios -- [ ] Docker environment is self-contained -- [ ] Consumer app treats main system as black box -- [ ] CI/CD integration and reporting defined - -### Epics -- [ ] Every component maps to an epic -- [ ] Dependency order is correct -- [ ] Acceptance criteria are measurable - -**Save action**: Write `FINAL_report.md` using `templates/final-report.md` as structure +Read and follow `steps/07_quality-checklist.md`. ## Common Mistakes +- **Proceeding without input data**: all three data gate items (acceptance_criteria, restrictions, input_data) must be present before any planning begins - **Coding during planning**: this workflow produces documents, never code - **Multi-responsibility components**: if a component does two things, split it - **Skipping BLOCKING gates**: never proceed past a BLOCKING marker without user confirmation @@ -355,13 +111,15 @@ Before writing the final report, verify ALL of the following: - **Copy-pasting problem.md**: the architecture doc should analyze and transform, not repeat the input - **Vague interfaces**: "component A talks to component B" is not enough; define the method, input, output - **Ignoring restrictions.md**: every constraint must be traceable in the architecture or risk register +- **Ignoring blackbox test findings**: insights from Step 1 must feed into architecture (Step 2) and component decomposition (Step 3) ## Escalation Rules | Situation | Action | |-----------|--------| +| Missing acceptance_criteria.md, restrictions.md, or input_data/ | **STOP** — planning cannot proceed | | Ambiguous requirements | ASK user | -| Missing acceptance criteria | ASK user | +| Input data coverage below 70% | Search internet for supplementary data, ASK user to validate | | Technology choice with multiple valid options | ASK user | | Component naming | PROCEED, confirm at next BLOCKING gate | | File structure within templates | PROCEED | @@ -372,22 +130,26 @@ Before writing the final report, verify ALL of the following: ``` ┌────────────────────────────────────────────────────────────────┐ -│ Solution Planning (5-Step Method) │ +│ Solution Planning (6-Step + Final) │ ├────────────────────────────────────────────────────────────────┤ -│ CONTEXT: Resolve mode (project vs standalone) + set paths │ -│ 1. Solution Analysis → architecture.md, system-flows.md │ +│ PREREQ: Data Gate (BLOCKING) │ +│ → verify AC, restrictions, input_data, solution exist │ +│ │ +│ 1. Blackbox Tests → test-spec/SKILL.md │ +│ [BLOCKING: user confirms test coverage] │ +│ 2. Solution Analysis → architecture, data model, deployment │ │ [BLOCKING: user confirms architecture] │ -│ 2. Component Decompose → components/[##]_[name]/description │ -│ [BLOCKING: user confirms decomposition] │ -│ 3. Review & Risk Assess → risk_mitigations.md │ -│ [BLOCKING: user confirms risks, iterative] │ -│ 4. Test Specifications → components/[##]_[name]/tests.md │ -│ 4b.E2E Test Infra → e2e_test_infrastructure.md │ -│ 5. Jira Epics → Jira via MCP │ +│ 3. Component Decomp → component specs + interfaces │ +│ [BLOCKING: user confirms components] │ +│ 4. Review & Risk → risk register, iterations │ +│ [BLOCKING: user confirms mitigations] │ +│ 5. Test Specifications → per-component test specs │ +│ 6. Jira Epics → epic per component + bootstrap │ │ ───────────────────────────────────────────────── │ -│ Quality Checklist → FINAL_report.md │ +│ Final: Quality Checklist → FINAL_report.md │ ├────────────────────────────────────────────────────────────────┤ -│ Principles: SRP · Dumb code/smart data · Save immediately │ -│ Ask don't assume · Plan don't code │ +│ Principles: Single Responsibility · Dumb code, smart data │ +│ Save immediately · Ask don't assume │ +│ Plan don't code │ └────────────────────────────────────────────────────────────────┘ ``` diff --git a/.cursor/skills/plan/steps/00_prerequisites.md b/.cursor/skills/plan/steps/00_prerequisites.md new file mode 100644 index 0000000..3eccbc8 --- /dev/null +++ b/.cursor/skills/plan/steps/00_prerequisites.md @@ -0,0 +1,27 @@ +## Prerequisite Checks (BLOCKING) + +Run sequentially before any planning step: + +### Prereq 1: Data Gate + +1. `_docs/00_problem/acceptance_criteria.md` exists and is non-empty — **STOP if missing** +2. `_docs/00_problem/restrictions.md` exists and is non-empty — **STOP if missing** +3. `_docs/00_problem/input_data/` exists and contains at least one data file — **STOP if missing** +4. `_docs/00_problem/problem.md` exists and is non-empty — **STOP if missing** + +All four are mandatory. If any is missing or empty, STOP and ask the user to provide them. If the user cannot provide the required data, planning cannot proceed — just stop. + +### Prereq 2: Finalize Solution Draft + +Only runs after the Data Gate passes: + +1. Scan `_docs/01_solution/` for files matching `solution_draft*.md` +2. Identify the highest-numbered draft (e.g. `solution_draft06.md`) +3. **Rename** it to `_docs/01_solution/solution.md` +4. If `solution.md` already exists, ask the user whether to overwrite or keep existing +5. Verify `solution.md` is non-empty — **STOP if missing or empty** + +### Prereq 3: Workspace Setup + +1. Create DOCUMENT_DIR if it does not exist +2. If DOCUMENT_DIR already contains artifacts, ask user: **resume from last checkpoint or start fresh?** diff --git a/.cursor/skills/plan/steps/01_artifact-management.md b/.cursor/skills/plan/steps/01_artifact-management.md new file mode 100644 index 0000000..95af1d0 --- /dev/null +++ b/.cursor/skills/plan/steps/01_artifact-management.md @@ -0,0 +1,87 @@ +## Artifact Management + +### Directory Structure + +All artifacts are written directly under DOCUMENT_DIR: + +``` +DOCUMENT_DIR/ +├── tests/ +│ ├── environment.md +│ ├── test-data.md +│ ├── blackbox-tests.md +│ ├── performance-tests.md +│ ├── resilience-tests.md +│ ├── security-tests.md +│ ├── resource-limit-tests.md +│ └── traceability-matrix.md +├── architecture.md +├── system-flows.md +├── data_model.md +├── deployment/ +│ ├── containerization.md +│ ├── ci_cd_pipeline.md +│ ├── environment_strategy.md +│ ├── observability.md +│ └── deployment_procedures.md +├── risk_mitigations.md +├── risk_mitigations_02.md (iterative, ## as sequence) +├── components/ +│ ├── 01_[name]/ +│ │ ├── description.md +│ │ └── tests.md +│ ├── 02_[name]/ +│ │ ├── description.md +│ │ └── tests.md +│ └── ... +├── common-helpers/ +│ ├── 01_helper_[name]/ +│ ├── 02_helper_[name]/ +│ └── ... +├── diagrams/ +│ ├── components.drawio +│ └── flows/ +│ ├── flow_[name].md (Mermaid) +│ └── ... +└── FINAL_report.md +``` + +### Save Timing + +| Step | Save immediately after | Filename | +|------|------------------------|----------| +| Step 1 | Blackbox test environment spec | `tests/environment.md` | +| Step 1 | Blackbox test data spec | `tests/test-data.md` | +| Step 1 | Blackbox tests | `tests/blackbox-tests.md` | +| Step 1 | Blackbox performance tests | `tests/performance-tests.md` | +| Step 1 | Blackbox resilience tests | `tests/resilience-tests.md` | +| Step 1 | Blackbox security tests | `tests/security-tests.md` | +| Step 1 | Blackbox resource limit tests | `tests/resource-limit-tests.md` | +| Step 1 | Blackbox traceability matrix | `tests/traceability-matrix.md` | +| Step 2 | Architecture analysis complete | `architecture.md` | +| Step 2 | System flows documented | `system-flows.md` | +| Step 2 | Data model documented | `data_model.md` | +| Step 2 | Deployment plan complete | `deployment/` (5 files) | +| Step 3 | Each component analyzed | `components/[##]_[name]/description.md` | +| Step 3 | Common helpers generated | `common-helpers/[##]_helper_[name].md` | +| Step 3 | Diagrams generated | `diagrams/` | +| Step 4 | Risk assessment complete | `risk_mitigations.md` | +| Step 5 | Tests written per component | `components/[##]_[name]/tests.md` | +| Step 6 | Epics created in Jira | Jira via MCP | +| Final | All steps complete | `FINAL_report.md` | + +### Save Principles + +1. **Save immediately**: write to disk as soon as a step completes; do not wait until the end +2. **Incremental updates**: same file can be updated multiple times; append or replace +3. **Preserve process**: keep all intermediate files even after integration into final report +4. **Enable recovery**: if interrupted, resume from the last saved artifact (see Resumability) + +### Resumability + +If DOCUMENT_DIR already contains artifacts: + +1. List existing files and match them to the save timing table above +2. Identify the last completed step based on which artifacts exist +3. Resume from the next incomplete step +4. Inform the user which steps are being skipped diff --git a/.cursor/skills/plan/steps/02_solution-analysis.md b/.cursor/skills/plan/steps/02_solution-analysis.md new file mode 100644 index 0000000..701f409 --- /dev/null +++ b/.cursor/skills/plan/steps/02_solution-analysis.md @@ -0,0 +1,74 @@ +## Step 2: Solution Analysis + +**Role**: Professional software architect +**Goal**: Produce `architecture.md`, `system-flows.md`, `data_model.md`, and `deployment/` from the solution draft +**Constraints**: No code, no component-level detail yet; focus on system-level view + +### Phase 2a: Architecture & Flows + +1. Read all input files thoroughly +2. Incorporate findings, questions, and insights discovered during Step 1 (blackbox tests) +3. Research unknown or questionable topics via internet; ask user about ambiguities +4. Document architecture using `templates/architecture.md` as structure +5. Document system flows using `templates/system-flows.md` as structure + +**Self-verification**: +- [ ] Architecture covers all capabilities mentioned in solution.md +- [ ] System flows cover all main user/system interactions +- [ ] No contradictions with problem.md or restrictions.md +- [ ] Technology choices are justified +- [ ] Blackbox test findings are reflected in architecture decisions + +**Save action**: Write `architecture.md` and `system-flows.md` + +**BLOCKING**: Present architecture summary to user. Do NOT proceed until user confirms. + +### Phase 2b: Data Model + +**Role**: Professional software architect +**Goal**: Produce a detailed data model document covering entities, relationships, and migration strategy + +1. Extract core entities from architecture.md and solution.md +2. Define entity attributes, types, and constraints +3. Define relationships between entities (Mermaid ERD) +4. Define migration strategy: versioning tool (EF Core migrations / Alembic / sql-migrate), reversibility requirement, naming convention +5. Define seed data requirements per environment (dev, staging) +6. Define backward compatibility approach for schema changes (additive-only by default) + +**Self-verification**: +- [ ] Every entity mentioned in architecture.md is defined +- [ ] Relationships are explicit with cardinality +- [ ] Migration strategy specifies reversibility requirement +- [ ] Seed data requirements defined +- [ ] Backward compatibility approach documented + +**Save action**: Write `data_model.md` + +### Phase 2c: Deployment Planning + +**Role**: DevOps / Platform engineer +**Goal**: Produce deployment plan covering containerization, CI/CD, environment strategy, observability, and deployment procedures + +Use the `/deploy` skill's templates as structure for each artifact: + +1. Read architecture.md and restrictions.md for infrastructure constraints +2. Research Docker best practices for the project's tech stack +3. Define containerization plan: Dockerfile per component, docker-compose for dev and tests +4. Define CI/CD pipeline: stages, quality gates, caching, parallelization +5. Define environment strategy: dev, staging, production with secrets management +6. Define observability: structured logging, metrics, tracing, alerting +7. Define deployment procedures: strategy, health checks, rollback, checklist + +**Self-verification**: +- [ ] Every component has a Docker specification +- [ ] CI/CD pipeline covers lint, test, security, build, deploy +- [ ] Environment strategy covers dev, staging, production +- [ ] Observability covers logging, metrics, tracing, alerting +- [ ] Deployment procedures include rollback and health checks + +**Save action**: Write all 5 files under `deployment/`: +- `containerization.md` +- `ci_cd_pipeline.md` +- `environment_strategy.md` +- `observability.md` +- `deployment_procedures.md` diff --git a/.cursor/skills/plan/steps/03_component-decomposition.md b/.cursor/skills/plan/steps/03_component-decomposition.md new file mode 100644 index 0000000..c026e65 --- /dev/null +++ b/.cursor/skills/plan/steps/03_component-decomposition.md @@ -0,0 +1,29 @@ +## Step 3: Component Decomposition + +**Role**: Professional software architect +**Goal**: Decompose the architecture into components with detailed specs +**Constraints**: No code; only names, interfaces, inputs/outputs. Follow SRP strictly. + +1. Identify components from the architecture; think about separation, reusability, and communication patterns +2. Use blackbox test scenarios from Step 1 to validate component boundaries +3. If additional components are needed (data preparation, shared helpers), create them +4. For each component, write a spec using `templates/component-spec.md` as structure +5. Generate diagrams: + - draw.io component diagram showing relations (minimize line intersections, group semantically coherent components, place external users near their components) + - Mermaid flowchart per main control flow +6. Components can share and reuse common logic, same for multiple components. Hence for such occurences common-helpers folder is specified. + +**Self-verification**: +- [ ] Each component has a single, clear responsibility +- [ ] No functionality is spread across multiple components +- [ ] All inter-component interfaces are defined (who calls whom, with what) +- [ ] Component dependency graph has no circular dependencies +- [ ] All components from architecture.md are accounted for +- [ ] Every blackbox test scenario can be traced through component interactions + +**Save action**: Write: + - each component `components/[##]_[name]/description.md` + - common helper `common-helpers/[##]_helper_[name].md` + - diagrams `diagrams/` + +**BLOCKING**: Present component list with one-line summaries to user. Do NOT proceed until user confirms. diff --git a/.cursor/skills/plan/steps/04_review-risk.md b/.cursor/skills/plan/steps/04_review-risk.md new file mode 100644 index 0000000..747b7cf --- /dev/null +++ b/.cursor/skills/plan/steps/04_review-risk.md @@ -0,0 +1,38 @@ +## Step 4: Architecture Review & Risk Assessment + +**Role**: Professional software architect and analyst +**Goal**: Validate all artifacts for consistency, then identify and mitigate risks +**Constraints**: This is a review step — fix problems found, do not add new features + +### 4a. Evaluator Pass (re-read ALL artifacts) + +Review checklist: +- [ ] All components follow Single Responsibility Principle +- [ ] All components follow dumb code / smart data principle +- [ ] Inter-component interfaces are consistent (caller's output matches callee's input) +- [ ] No circular dependencies in the dependency graph +- [ ] No missing interactions between components +- [ ] No over-engineering — is there a simpler decomposition? +- [ ] Security considerations addressed in component design +- [ ] Performance bottlenecks identified +- [ ] API contracts are consistent across components + +Fix any issues found before proceeding to risk identification. + +### 4b. Risk Identification + +1. Identify technical and project risks +2. Assess probability and impact using `templates/risk-register.md` +3. Define mitigation strategies +4. Apply mitigations to architecture, flows, and component documents where applicable + +**Self-verification**: +- [ ] Every High/Critical risk has a concrete mitigation strategy +- [ ] Mitigations are reflected in the relevant component or architecture docs +- [ ] No new risks introduced by the mitigations themselves + +**Save action**: Write `risk_mitigations.md` + +**BLOCKING**: Present risk summary to user. Ask whether assessment is sufficient. + +**Iterative**: If user requests another round, repeat Step 4 and write `risk_mitigations_##.md` (## as sequence number). Continue until user confirms. diff --git a/.cursor/skills/plan/steps/05_test-specifications.md b/.cursor/skills/plan/steps/05_test-specifications.md new file mode 100644 index 0000000..9657359 --- /dev/null +++ b/.cursor/skills/plan/steps/05_test-specifications.md @@ -0,0 +1,20 @@ +## Step 5: Test Specifications + +**Role**: Professional Quality Assurance Engineer + +**Goal**: Write test specs for each component achieving minimum 75% acceptance criteria coverage + +**Constraints**: Test specs only — no test code. Each test must trace to an acceptance criterion. + +1. For each component, write tests using `templates/test-spec.md` as structure +2. Cover all 4 types: integration, performance, security, acceptance +3. Include test data management (setup, teardown, isolation) +4. Verify traceability: every acceptance criterion from `acceptance_criteria.md` must be covered by at least one test + +**Self-verification**: +- [ ] Every acceptance criterion has at least one test covering it +- [ ] Test inputs are realistic and well-defined +- [ ] Expected results are specific and measurable +- [ ] No component is left without tests + +**Save action**: Write each `components/[##]_[name]/tests.md` diff --git a/.cursor/skills/plan/steps/06_jira-epics.md b/.cursor/skills/plan/steps/06_jira-epics.md new file mode 100644 index 0000000..e93d95e --- /dev/null +++ b/.cursor/skills/plan/steps/06_jira-epics.md @@ -0,0 +1,48 @@ +## Step 6: Work Item Epics + +**Role**: Professional product manager + +**Goal**: Create epics from components, ordered by dependency + +**Constraints**: Epic descriptions must be **comprehensive and self-contained** — a developer reading only the epic should understand the full context without needing to open separate files. + +1. **Create "Bootstrap & Initial Structure" epic first** — this epic will parent the `01_initial_structure` task created by the decompose skill. It covers project scaffolding: folder structure, shared models, interfaces, stubs, CI/CD config, DB migrations setup, test structure. +2. Generate epics for each component using the configured work item tracker (Jira MCP or Azure DevOps MCP — see `autopilot/protocols.md`), structured per `templates/epic-spec.md` +3. Order epics by dependency (Bootstrap epic is always first, then components based on their dependency graph) +4. Include effort estimation per epic (T-shirt size or story points range) +5. Ensure each epic has clear acceptance criteria cross-referenced with component specs +6. Generate Mermaid diagrams showing component-to-epic mapping and component relationships + +**CRITICAL — Epic description richness requirements**: + +Each epic description MUST include ALL of the following sections with substantial content: +- **System context**: where this component fits in the overall architecture (include Mermaid diagram showing this component's position and connections) +- **Problem / Context**: what problem this component solves, why it exists, current pain points +- **Scope**: detailed in-scope and out-of-scope lists +- **Architecture notes**: relevant ADRs, technology choices, patterns used, key design decisions +- **Interface specification**: full method signatures, input/output types, error types (from component description.md) +- **Data flow**: how data enters and exits this component (include Mermaid sequence or flowchart diagram) +- **Dependencies**: epic dependencies (with Jira IDs) and external dependencies (libraries, hardware, services) +- **Acceptance criteria**: measurable criteria with specific thresholds (from component tests.md) +- **Non-functional requirements**: latency, memory, throughput targets with failure thresholds +- **Risks & mitigations**: relevant risks from risk_mitigations.md with concrete mitigation strategies +- **Effort estimation**: T-shirt size and story points range +- **Child issues**: planned task breakdown with complexity points +- **Key constraints**: from restrictions.md that affect this component +- **Testing strategy**: summary of test types and coverage from tests.md + +Do NOT create minimal epics with just a summary and short description. The epic is the primary reference document for the implementation team. + +**Self-verification**: +- [ ] "Bootstrap & Initial Structure" epic exists and is first in order +- [ ] "Blackbox Tests" epic exists +- [ ] Every component maps to exactly one epic +- [ ] Dependency order is respected (no epic depends on a later one) +- [ ] Acceptance criteria are measurable +- [ ] Effort estimates are realistic +- [ ] Every epic description includes architecture diagram, interface spec, data flow, risks, and NFRs +- [ ] Epic descriptions are self-contained — readable without opening other files + +7. **Create "Blackbox Tests" epic** — this epic will parent the blackbox test tasks created by the `/decompose` skill. It covers implementing the test scenarios defined in `tests/`. + +**Save action**: Epics created via the configured tracker MCP. Also saved locally in `epics.md` with ticket IDs. If `tracker: local`, save locally only. diff --git a/.cursor/skills/plan/steps/07_quality-checklist.md b/.cursor/skills/plan/steps/07_quality-checklist.md new file mode 100644 index 0000000..f883e88 --- /dev/null +++ b/.cursor/skills/plan/steps/07_quality-checklist.md @@ -0,0 +1,57 @@ +## Quality Checklist (before FINAL_report.md) + +Before writing the final report, verify ALL of the following: + +### Blackbox Tests +- [ ] Every acceptance criterion is covered in traceability-matrix.md +- [ ] Every restriction is verified by at least one test +- [ ] Positive and negative scenarios are balanced +- [ ] Docker environment is self-contained +- [ ] Consumer app treats main system as black box +- [ ] CI/CD integration and reporting defined + +### Architecture +- [ ] Covers all capabilities from solution.md +- [ ] Technology choices are justified +- [ ] Deployment model is defined +- [ ] Blackbox test findings are reflected in architecture decisions + +### Data Model +- [ ] Every entity from architecture.md is defined +- [ ] Relationships have explicit cardinality +- [ ] Migration strategy with reversibility requirement +- [ ] Seed data requirements defined +- [ ] Backward compatibility approach documented + +### Deployment +- [ ] Containerization plan covers all components +- [ ] CI/CD pipeline includes lint, test, security, build, deploy stages +- [ ] Environment strategy covers dev, staging, production +- [ ] Observability covers logging, metrics, tracing, alerting +- [ ] Deployment procedures include rollback and health checks + +### Components +- [ ] Every component follows SRP +- [ ] No circular dependencies +- [ ] All inter-component interfaces are defined and consistent +- [ ] No orphan components (unused by any flow) +- [ ] Every blackbox test scenario can be traced through component interactions + +### Risks +- [ ] All High/Critical risks have mitigations +- [ ] Mitigations are reflected in component/architecture docs +- [ ] User has confirmed risk assessment is sufficient + +### Tests +- [ ] Every acceptance criterion is covered by at least one test +- [ ] All 4 test types are represented per component (where applicable) +- [ ] Test data management is defined + +### Epics +- [ ] "Bootstrap & Initial Structure" epic exists +- [ ] "Blackbox Tests" epic exists +- [ ] Every component maps to an epic +- [ ] Dependency order is correct +- [ ] Acceptance criteria are measurable + +**Save action**: Write `FINAL_report.md` using `templates/final-report.md` as structure diff --git a/.cursor/skills/plan/templates/architecture.md b/.cursor/skills/plan/templates/architecture.md index 0f05dc0..1d381cc 100644 --- a/.cursor/skills/plan/templates/architecture.md +++ b/.cursor/skills/plan/templates/architecture.md @@ -1,6 +1,6 @@ # Architecture Document Template -Use this template for the architecture document. Save as `_docs/02_plans//architecture.md`. +Use this template for the architecture document. Save as `_docs/02_document/architecture.md`. --- diff --git a/.cursor/skills/plan/templates/blackbox-tests.md b/.cursor/skills/plan/templates/blackbox-tests.md new file mode 100644 index 0000000..d522698 --- /dev/null +++ b/.cursor/skills/plan/templates/blackbox-tests.md @@ -0,0 +1,78 @@ +# Blackbox Tests Template + +Save as `DOCUMENT_DIR/tests/blackbox-tests.md`. + +--- + +```markdown +# Blackbox Tests + +## Positive Scenarios + +### FT-P-01: [Scenario Name] + +**Summary**: [One sentence: what black-box use case this validates] +**Traces to**: AC-[ID], AC-[ID] +**Category**: [which AC category — e.g., Position Accuracy, Image Processing, etc.] + +**Preconditions**: +- [System state required before test] + +**Input data**: [reference to specific data set or file from test-data.md] + +**Steps**: + +| Step | Consumer Action | Expected System Response | +|------|----------------|------------------------| +| 1 | [call / send / provide input] | [response / event / output] | +| 2 | [call / send / provide input] | [response / event / output] | + +**Expected outcome**: [specific, measurable result] +**Max execution time**: [e.g., 10s] + +--- + +### FT-P-02: [Scenario Name] + +(repeat structure) + +--- + +## Negative Scenarios + +### FT-N-01: [Scenario Name] + +**Summary**: [One sentence: what invalid/edge input this tests] +**Traces to**: AC-[ID] (negative case), RESTRICT-[ID] +**Category**: [which AC/restriction category] + +**Preconditions**: +- [System state required before test] + +**Input data**: [reference to specific invalid data or edge case] + +**Steps**: + +| Step | Consumer Action | Expected System Response | +|------|----------------|------------------------| +| 1 | [provide invalid input / trigger edge case] | [error response / graceful degradation / fallback behavior] | + +**Expected outcome**: [system rejects gracefully / falls back to X / returns error Y] +**Max execution time**: [e.g., 5s] + +--- + +### FT-N-02: [Scenario Name] + +(repeat structure) +``` + +--- + +## Guidance Notes + +- Blackbox tests should typically trace to at least one acceptance criterion or restriction. Tests without a trace are allowed but should have a clear justification. +- Positive scenarios validate the system does what it should. +- Negative scenarios validate the system rejects or handles gracefully what it shouldn't accept. +- Expected outcomes must be specific and measurable — not "works correctly" but "returns position within 50m of ground truth." +- Input data references should point to specific entries in test-data.md. diff --git a/.cursor/skills/plan/templates/epic-spec.md b/.cursor/skills/plan/templates/epic-spec.md index 26bb953..6cb60e6 100644 --- a/.cursor/skills/plan/templates/epic-spec.md +++ b/.cursor/skills/plan/templates/epic-spec.md @@ -1,6 +1,6 @@ -# Jira Epic Template +# Epic Template -Use this template for each Jira epic. Create epics via Jira MCP. +Use this template for each epic. Create epics via the configured work item tracker (Jira MCP or Azure DevOps MCP). --- @@ -73,14 +73,14 @@ Link to architecture.md and relevant component spec.] ### Design & Architecture -- Architecture doc: `_docs/02_plans//architecture.md` -- Component spec: `_docs/02_plans//components/[##]_[name]/description.md` -- System flows: `_docs/02_plans//system-flows.md` +- Architecture doc: `_docs/02_document/architecture.md` +- Component spec: `_docs/02_document/components/[##]_[name]/description.md` +- System flows: `_docs/02_document/system-flows.md` ### Definition of Done - [ ] All in-scope capabilities implemented -- [ ] Automated tests pass (unit + integration + e2e) +- [ ] Automated tests pass (unit + blackbox) - [ ] Minimum coverage threshold met (75%) - [ ] Runbooks written (if applicable) - [ ] Documentation updated diff --git a/.cursor/skills/plan/templates/final-report.md b/.cursor/skills/plan/templates/final-report.md index b809d65..0e27016 100644 --- a/.cursor/skills/plan/templates/final-report.md +++ b/.cursor/skills/plan/templates/final-report.md @@ -1,6 +1,6 @@ # Final Planning Report Template -Use this template after completing all 5 steps and the quality checklist. Save as `_docs/02_plans//FINAL_report.md`. +Use this template after completing all 6 steps and the quality checklist. Save as `_docs/02_document/FINAL_report.md`. --- diff --git a/.cursor/skills/plan/templates/performance-tests.md b/.cursor/skills/plan/templates/performance-tests.md new file mode 100644 index 0000000..dfbcd14 --- /dev/null +++ b/.cursor/skills/plan/templates/performance-tests.md @@ -0,0 +1,35 @@ +# Performance Tests Template + +Save as `DOCUMENT_DIR/tests/performance-tests.md`. + +--- + +```markdown +# Performance Tests + +### NFT-PERF-01: [Test Name] + +**Summary**: [What performance characteristic this validates] +**Traces to**: AC-[ID] +**Metric**: [what is measured — latency, throughput, frame rate, etc.] + +**Preconditions**: +- [System state, load profile, data volume] + +**Steps**: + +| Step | Consumer Action | Measurement | +|------|----------------|-------------| +| 1 | [action] | [what to measure and how] | + +**Pass criteria**: [specific threshold — e.g., p95 latency < 400ms] +**Duration**: [how long the test runs] +``` + +--- + +## Guidance Notes + +- Performance tests should run long enough to capture steady-state behavior, not just cold-start. +- Define clear pass/fail thresholds with specific metrics (p50, p95, p99 latency, throughput, etc.). +- Include warm-up preconditions to separate initialization cost from steady-state performance. diff --git a/.cursor/skills/plan/templates/resilience-tests.md b/.cursor/skills/plan/templates/resilience-tests.md new file mode 100644 index 0000000..72890ae --- /dev/null +++ b/.cursor/skills/plan/templates/resilience-tests.md @@ -0,0 +1,37 @@ +# Resilience Tests Template + +Save as `DOCUMENT_DIR/tests/resilience-tests.md`. + +--- + +```markdown +# Resilience Tests + +### NFT-RES-01: [Test Name] + +**Summary**: [What failure/recovery scenario this validates] +**Traces to**: AC-[ID] + +**Preconditions**: +- [System state before fault injection] + +**Fault injection**: +- [What fault is introduced — process kill, network partition, invalid input sequence, etc.] + +**Steps**: + +| Step | Action | Expected Behavior | +|------|--------|------------------| +| 1 | [inject fault] | [system behavior during fault] | +| 2 | [observe recovery] | [system behavior after recovery] | + +**Pass criteria**: [recovery time, data integrity, continued operation] +``` + +--- + +## Guidance Notes + +- Resilience tests must define both the fault and the expected recovery — not just "system should recover." +- Include specific recovery time expectations and data integrity checks. +- Test both graceful degradation (partial failure) and full recovery scenarios. diff --git a/.cursor/skills/plan/templates/resource-limit-tests.md b/.cursor/skills/plan/templates/resource-limit-tests.md new file mode 100644 index 0000000..53779e3 --- /dev/null +++ b/.cursor/skills/plan/templates/resource-limit-tests.md @@ -0,0 +1,31 @@ +# Resource Limit Tests Template + +Save as `DOCUMENT_DIR/tests/resource-limit-tests.md`. + +--- + +```markdown +# Resource Limit Tests + +### NFT-RES-LIM-01: [Test Name] + +**Summary**: [What resource constraint this validates] +**Traces to**: AC-[ID], RESTRICT-[ID] + +**Preconditions**: +- [System running under specified constraints] + +**Monitoring**: +- [What resources to monitor — memory, CPU, GPU, disk, temperature] + +**Duration**: [how long to run] +**Pass criteria**: [resource stays within limit — e.g., memory < 8GB throughout] +``` + +--- + +## Guidance Notes + +- Resource limit tests must specify monitoring duration — short bursts don't prove sustained compliance. +- Define specific numeric limits that can be programmatically checked. +- Include both the monitoring method and the threshold in the pass criteria. diff --git a/.cursor/skills/plan/templates/risk-register.md b/.cursor/skills/plan/templates/risk-register.md index 71fec69..786aec9 100644 --- a/.cursor/skills/plan/templates/risk-register.md +++ b/.cursor/skills/plan/templates/risk-register.md @@ -1,6 +1,6 @@ # Risk Register Template -Use this template for risk assessment. Save as `_docs/02_plans//risk_mitigations.md`. +Use this template for risk assessment. Save as `_docs/02_document/risk_mitigations.md`. Subsequent iterations: `risk_mitigations_02.md`, `risk_mitigations_03.md`, etc. --- diff --git a/.cursor/skills/plan/templates/security-tests.md b/.cursor/skills/plan/templates/security-tests.md new file mode 100644 index 0000000..b243404 --- /dev/null +++ b/.cursor/skills/plan/templates/security-tests.md @@ -0,0 +1,30 @@ +# Security Tests Template + +Save as `DOCUMENT_DIR/tests/security-tests.md`. + +--- + +```markdown +# Security Tests + +### NFT-SEC-01: [Test Name] + +**Summary**: [What security property this validates] +**Traces to**: AC-[ID], RESTRICT-[ID] + +**Steps**: + +| Step | Consumer Action | Expected Response | +|------|----------------|------------------| +| 1 | [attempt unauthorized access / injection / etc.] | [rejection / no data leak / etc.] | + +**Pass criteria**: [specific security outcome] +``` + +--- + +## Guidance Notes + +- Security tests at blackbox level focus on black-box attacks (unauthorized API calls, malformed input), not code-level vulnerabilities. +- Verify the system remains operational after security-related edge cases (no crash, no hang). +- Test authentication/authorization boundaries from the consumer's perspective. diff --git a/.cursor/skills/plan/templates/system-flows.md b/.cursor/skills/plan/templates/system-flows.md index 9b22bf1..6c887a8 100644 --- a/.cursor/skills/plan/templates/system-flows.md +++ b/.cursor/skills/plan/templates/system-flows.md @@ -1,7 +1,7 @@ # System Flows Template -Use this template for the system flows document. Save as `_docs/02_plans//system-flows.md`. -Individual flow diagrams go in `_docs/02_plans//diagrams/flows/flow_[name].md`. +Use this template for the system flows document. Save as `_docs/02_document/system-flows.md`. +Individual flow diagrams go in `_docs/02_document/diagrams/flows/flow_[name].md`. --- diff --git a/.cursor/skills/plan/templates/test-data.md b/.cursor/skills/plan/templates/test-data.md new file mode 100644 index 0000000..0cee7fa --- /dev/null +++ b/.cursor/skills/plan/templates/test-data.md @@ -0,0 +1,55 @@ +# Test Data Template + +Save as `DOCUMENT_DIR/tests/test-data.md`. + +--- + +```markdown +# Test Data Management + +## Seed Data Sets + +| Data Set | Description | Used by Tests | How Loaded | Cleanup | +|----------|-------------|---------------|-----------|---------| +| [name] | [what it contains] | [test IDs] | [SQL script / API call / fixture file / volume mount] | [how removed after test] | + +## Data Isolation Strategy + +[e.g., each test run gets a fresh container restart, or transactions are rolled back, or namespaced data, or separate DB per test group] + +## Input Data Mapping + +| Input Data File | Source Location | Description | Covers Scenarios | +|-----------------|----------------|-------------|-----------------| +| [filename] | `_docs/00_problem/input_data/[filename]` | [what it contains] | [test IDs that use this data] | + +## Expected Results Mapping + +| Test Scenario ID | Input Data | Expected Result | Comparison Method | Tolerance | Expected Result Source | +|-----------------|------------|-----------------|-------------------|-----------|----------------------| +| [test ID] | `input_data/[filename]` | [quantifiable expected output] | [exact / tolerance / pattern / threshold / file-diff] | [± value or N/A] | `input_data/expected_results/[filename]` or inline | + +## External Dependency Mocks + +| External Service | Mock/Stub | How Provided | Behavior | +|-----------------|-----------|-------------|----------| +| [service name] | [mock type] | [Docker service / in-process stub / recorded responses] | [what it returns / simulates] | + +## Data Validation Rules + +| Data Type | Validation | Invalid Examples | Expected System Behavior | +|-----------|-----------|-----------------|------------------------| +| [type] | [rules] | [invalid input examples] | [how system should respond] | +``` + +--- + +## Guidance Notes + +- Every seed data set should be traceable to specific test scenarios. +- Input data from `_docs/00_problem/input_data/` should be mapped to test scenarios that use it. +- Every input data item MUST have a corresponding expected result in the Expected Results Mapping table. +- Expected results MUST be quantifiable: exact values, numeric tolerances, pattern matches, thresholds, or reference files. "Works correctly" is never acceptable. +- For complex expected outputs, provide machine-readable reference files (JSON, CSV) in `_docs/00_problem/input_data/expected_results/` and reference them in the mapping. +- External mocks must be deterministic — same input always produces same output. +- Data isolation must guarantee no test can affect another test's outcome. diff --git a/.cursor/skills/plan/templates/e2e-test-infrastructure.md b/.cursor/skills/plan/templates/test-environment.md similarity index 56% rename from .cursor/skills/plan/templates/e2e-test-infrastructure.md rename to .cursor/skills/plan/templates/test-environment.md index 0ba96f5..b5d74fa 100644 --- a/.cursor/skills/plan/templates/e2e-test-infrastructure.md +++ b/.cursor/skills/plan/templates/test-environment.md @@ -1,17 +1,16 @@ -# E2E Black-Box Test Infrastructure Template +# Test Environment Template -Describes a separate consumer application that tests the main system as a black box. -Save as `PLANS_DIR//e2e_test_infrastructure.md`. +Save as `DOCUMENT_DIR/tests/environment.md`. --- ```markdown -# E2E Test Infrastructure +# Test Environment ## Overview -**System under test**: [main system name and entry points — API URLs, message queues, etc.] -**Consumer app purpose**: Standalone application that exercises the main system through its public interfaces, validating end-to-end use cases without access to internals. +**System under test**: [main system name and entry points — API URLs, message queues, serial ports, etc.] +**Consumer app purpose**: Standalone application that exercises the main system through its public interfaces, validating black-box use cases without access to internals. ## Docker Environment @@ -22,7 +21,7 @@ Save as `PLANS_DIR//e2e_test_infrastructure.md`. | system-under-test | [main app image or build context] | The main system being tested | [ports] | | test-db | [postgres/mysql/etc.] | Database for the main system | [ports] | | e2e-consumer | [build context for consumer app] | Black-box test runner | — | -| [dependency] | [image] | [purpose — cache, queue, etc.] | [ports] | +| [dependency] | [image] | [purpose — cache, queue, mock, etc.] | [ports] | ### Networks @@ -68,54 +67,6 @@ services: - No internal module imports - No shared memory or file system with the main system -## E2E Test Scenarios - -### Acceptance Criteria Traceability - -| AC ID | Acceptance Criterion | E2E Test IDs | Coverage | -|-------|---------------------|-------------|----------| -| AC-01 | [criterion] | E2E-01 | Covered | -| AC-02 | [criterion] | E2E-02, E2E-03 | Covered | -| AC-03 | [criterion] | — | NOT COVERED — [reason] | - -### E2E-01: [Scenario Name] - -**Summary**: [One sentence: what end-to-end use case this validates] - -**Traces to**: AC-01 - -**Preconditions**: -- [System state required before test] - -**Steps**: - -| Step | Consumer Action | Expected System Response | -|------|----------------|------------------------| -| 1 | [call / send] | [response / event] | -| 2 | [call / send] | [response / event] | - -**Max execution time**: [e.g., 10s] - ---- - -### E2E-02: [Scenario Name] - -(repeat structure) - ---- - -## Test Data Management - -**Seed data**: - -| Data Set | Description | How Loaded | Cleanup | -|----------|-------------|-----------|---------| -| [name] | [what it contains] | [SQL script / API call / fixture file] | [how removed after test] | - -**Isolation strategy**: [e.g., each test run gets a fresh DB via container restart, or transactions are rolled back, or namespaced data] - -**External dependencies**: [any external APIs that need mocking or sandbox environments] - ## CI/CD Integration **When to run**: [e.g., on PR merge to dev, nightly, before production deploy] @@ -134,8 +85,6 @@ services: ## Guidance Notes -- Every E2E test MUST trace to at least one acceptance criterion. If it doesn't, question whether it's needed. - The consumer app must treat the main system as a true black box — no internal imports, no direct DB queries against the main system's database. -- Keep the number of E2E tests focused on critical use cases. Exhaustive testing belongs in per-component tests (Step 4). - Docker environment should be self-contained — `docker compose up` must be sufficient to run the full suite. - If the main system requires external services (payment gateways, third-party APIs), define mock/stub services in the Docker environment. diff --git a/.cursor/skills/plan/templates/test-spec.md b/.cursor/skills/plan/templates/test-spec.md index 2b6ee44..5b7b83e 100644 --- a/.cursor/skills/plan/templates/test-spec.md +++ b/.cursor/skills/plan/templates/test-spec.md @@ -17,7 +17,7 @@ Use this template for each component's test spec. Save as `components/[##]_[name --- -## Integration Tests +## Blackbox Tests ### IT-01: [Test Name] @@ -169,4 +169,4 @@ Use this template for each component's test spec. Save as `components/[##]_[name - If an acceptance criterion has no test covering it, mark it as NOT COVERED and explain why (e.g., "requires manual verification", "deferred to phase 2"). - Performance test targets should come from the NFR section in `architecture.md`. - Security tests should cover at minimum: authentication bypass, authorization escalation, injection attacks relevant to this component. -- Not every component needs all 4 test types. A stateless utility component may only need integration tests. +- Not every component needs all 4 test types. A stateless utility component may only need blackbox tests. diff --git a/.cursor/skills/plan/templates/traceability-matrix.md b/.cursor/skills/plan/templates/traceability-matrix.md new file mode 100644 index 0000000..e0192ac --- /dev/null +++ b/.cursor/skills/plan/templates/traceability-matrix.md @@ -0,0 +1,47 @@ +# Traceability Matrix Template + +Save as `DOCUMENT_DIR/tests/traceability-matrix.md`. + +--- + +```markdown +# Traceability Matrix + +## Acceptance Criteria Coverage + +| AC ID | Acceptance Criterion | Test IDs | Coverage | +|-------|---------------------|----------|----------| +| AC-01 | [criterion text] | FT-P-01, NFT-PERF-01 | Covered | +| AC-02 | [criterion text] | FT-P-02, FT-N-01 | Covered | +| AC-03 | [criterion text] | — | NOT COVERED — [reason and mitigation] | + +## Restrictions Coverage + +| Restriction ID | Restriction | Test IDs | Coverage | +|---------------|-------------|----------|----------| +| RESTRICT-01 | [restriction text] | FT-N-02, NFT-RES-LIM-01 | Covered | +| RESTRICT-02 | [restriction text] | — | NOT COVERED — [reason and mitigation] | + +## Coverage Summary + +| Category | Total Items | Covered | Not Covered | Coverage % | +|----------|-----------|---------|-------------|-----------| +| Acceptance Criteria | [N] | [N] | [N] | [%] | +| Restrictions | [N] | [N] | [N] | [%] | +| **Total** | [N] | [N] | [N] | [%] | + +## Uncovered Items Analysis + +| Item | Reason Not Covered | Risk | Mitigation | +|------|-------------------|------|-----------| +| [AC/Restriction ID] | [why it cannot be tested at blackbox level] | [what could go wrong] | [how risk is addressed — e.g., covered by component tests in Step 5] | +``` + +--- + +## Guidance Notes + +- Every acceptance criterion must appear in the matrix — either covered or explicitly marked as not covered with a reason. +- Every restriction must appear in the matrix. +- NOT COVERED items must have a reason and a mitigation strategy (e.g., "covered at component test level" or "requires real hardware"). +- Coverage percentage should be at least 75% for acceptance criteria at the blackbox test level. diff --git a/.cursor/skills/problem/SKILL.md b/.cursor/skills/problem/SKILL.md new file mode 100644 index 0000000..570fa1e --- /dev/null +++ b/.cursor/skills/problem/SKILL.md @@ -0,0 +1,241 @@ +--- +name: problem +description: | + Interactive problem gathering skill that builds _docs/00_problem/ through structured interview. + Iteratively asks probing questions until the problem, restrictions, acceptance criteria, and input data + are fully understood. Produces all required files for downstream skills (research, plan, etc.). + Trigger phrases: + - "problem", "define problem", "problem gathering" + - "what am I building", "describe problem" + - "start project", "new project" +category: build +tags: [problem, gathering, interview, requirements, acceptance-criteria] +disable-model-invocation: true +--- + +# Problem Gathering + +Build a complete problem definition through structured, interactive interview with the user. Produces all required files in `_docs/00_problem/` that downstream skills (research, plan, decompose, implement, deploy) depend on. + +## Core Principles + +- **Ask, don't assume**: never infer requirements the user hasn't stated +- **Exhaust before writing**: keep asking until all dimensions are covered; do not write files prematurely +- **Concrete over vague**: push for measurable values, specific constraints, real numbers +- **Save immediately**: once the user confirms, write all files at once +- **User is the authority**: the AI suggests, the user decides + +## Context Resolution + +Fixed paths: + +- OUTPUT_DIR: `_docs/00_problem/` +- INPUT_DATA_DIR: `_docs/00_problem/input_data/` + +## Prerequisite Checks + +1. If OUTPUT_DIR already exists and contains files, present what exists and ask user: **resume and fill gaps, overwrite, or skip?** +2. If overwrite or fresh start, create OUTPUT_DIR and INPUT_DATA_DIR + +## Completeness Criteria + +The interview is complete when the AI can write ALL of these: + +| File | Complete when | +|------|--------------| +| `problem.md` | Clear problem statement: what is being built, why, for whom, what it does | +| `restrictions.md` | All constraints identified: hardware, software, environment, operational, regulatory, budget, timeline | +| `acceptance_criteria.md` | Measurable success criteria with specific numeric targets grouped by category | +| `input_data/` | At least one reference data file or detailed data description document. Must include `expected_results.md` with input→output pairs for downstream test specification | +| `security_approach.md` | (optional) Security requirements identified, or explicitly marked as not applicable | + +## Interview Protocol + +### Phase 1: Open Discovery + +Start with broad, open questions. Let the user describe the problem in their own words. + +**Opening**: Ask the user to describe what they are building and what problem it solves. Do not interrupt or narrow down yet. + +After the user responds, summarize what you understood and ask: "Did I get this right? What did I miss?" + +### Phase 2: Structured Probing + +Work through each dimension systematically. For each dimension, ask only what the user hasn't already covered. Skip dimensions that were fully answered in Phase 1. + +**Dimension checklist:** + +1. **Problem & Goals** + - What exactly does the system do? + - What problem does it solve? Why does it need to exist? + - Who are the users / operators / stakeholders? + - What is the expected usage pattern (frequency, load, environment)? + +2. **Scope & Boundaries** + - What is explicitly IN scope? + - What is explicitly OUT of scope? + - Are there related systems this integrates with? + - What does the system NOT do (common misconceptions)? + +3. **Hardware & Environment** + - What hardware does it run on? (CPU, GPU, memory, storage) + - What operating system / platform? + - What is the deployment environment? (cloud, edge, embedded, on-prem) + - Any physical constraints? (power, thermal, size, connectivity) + +4. **Software & Tech Constraints** + - Required programming languages or frameworks? + - Required protocols or interfaces? + - Existing systems it must integrate with? + - Libraries or tools that must or must not be used? + +5. **Acceptance Criteria** + - What does "done" look like? + - Performance targets: latency, throughput, accuracy, error rates? + - Quality bars: reliability, availability, recovery time? + - Push for specific numbers: "less than Xms", "above Y%", "within Z meters" + - Edge cases: what happens when things go wrong? + - Startup and shutdown behavior? + +6. **Input Data** + - What data does the system consume? + - Formats, schemas, volumes, update frequency? + - Does the user have sample/reference data to provide? + - If no data exists yet, what would representative data look like? + +7. **Security** (optional, probe gently) + - Authentication / authorization requirements? + - Data sensitivity (PII, classified, proprietary)? + - Communication security (encryption, TLS)? + - If the user says "not a concern", mark as N/A and move on + +8. **Operational Constraints** + - Budget constraints? + - Timeline constraints? + - Team size / expertise constraints? + - Regulatory or compliance requirements? + - Geographic restrictions? + +### Phase 3: Gap Analysis + +After all dimensions are covered: + +1. Internally assess completeness against the Completeness Criteria table +2. Present a completeness summary to the user: + +``` +Completeness Check: +- problem.md: READY / GAPS: [list missing aspects] +- restrictions.md: READY / GAPS: [list missing aspects] +- acceptance_criteria.md: READY / GAPS: [list missing aspects] +- input_data/: READY / GAPS: [list missing aspects] +- security_approach.md: READY / N/A / GAPS: [list missing aspects] +``` + +3. If gaps exist, ask targeted follow-up questions for each gap +4. Repeat until all required files show READY + +### Phase 4: Draft & Confirm + +1. Draft all files in the conversation (show the user what will be written) +2. Present each file's content for review +3. Ask: "Should I save these files? Any changes needed?" +4. Apply any requested changes +5. Save all files to OUTPUT_DIR + +## Output File Formats + +### problem.md + +Free-form text. Clear, concise description of: +- What is being built +- What problem it solves +- How it works at a high level +- Key context the reader needs to understand the problem + +No headers required. Paragraph format. Should be readable by someone unfamiliar with the project. + +### restrictions.md + +Categorized constraints with markdown headers and bullet points: + +```markdown +# [Category Name] + +- Constraint description with specific values where applicable +- Another constraint +``` + +Categories are derived from the interview (hardware, software, environment, operational, etc.). Each restriction should be specific and testable. + +### acceptance_criteria.md + +Categorized measurable criteria with markdown headers and bullet points: + +```markdown +# [Category Name] + +- Criterion with specific numeric target +- Another criterion with measurable threshold +``` + +Every criterion must have a measurable value. Vague criteria like "should be fast" are not acceptable — push for "less than 400ms end-to-end". + +### input_data/ + +At least one file. Options: +- User provides actual data files (CSV, JSON, images, etc.) — save as-is +- User describes data parameters — save as `data_parameters.md` +- User provides URLs to data — save as `data_sources.md` with links and descriptions +- `expected_results.md` — expected outputs for given inputs (required by downstream test-spec skill). During the Acceptance Criteria dimension, probe for concrete input→output pairs and save them here. Format: use the template from `.cursor/skills/test-spec/templates/expected-results.md`. + +### security_approach.md (optional) + +If security requirements exist, document them. If the user says security is not a concern for this project, skip this file entirely. + +## Progress Tracking + +Create a TodoWrite with phases 1-4. Update as each phase completes. + +## Escalation Rules + +| Situation | Action | +|-----------|--------| +| User cannot provide acceptance criteria numbers | Suggest industry benchmarks, ASK user to confirm or adjust | +| User has no input data at all | ASK what representative data would look like, create a `data_parameters.md` describing expected data | +| User says "I don't know" to a critical dimension | Research the domain briefly, suggest reasonable defaults, ASK user to confirm | +| Conflicting requirements discovered | Present the conflict, ASK user which takes priority | +| User wants to skip a required file | Explain why downstream skills need it, ASK if they want a minimal placeholder | + +## Common Mistakes + +- **Writing files before the interview is complete**: gather everything first, then write +- **Accepting vague criteria**: "fast", "accurate", "reliable" are not acceptance criteria without numbers +- **Assuming technical choices**: do not suggest specific technologies unless the user constrains them +- **Over-engineering the problem statement**: problem.md should be concise, not a dissertation +- **Inventing restrictions**: only document what the user actually states as a constraint +- **Skipping input data**: downstream skills (especially research and plan) need concrete data context + +## Methodology Quick Reference + +``` +┌────────────────────────────────────────────────────────────────┐ +│ Problem Gathering (4-Phase Interview) │ +├────────────────────────────────────────────────────────────────┤ +│ PREREQ: Check if _docs/00_problem/ exists (resume/overwrite?) │ +│ │ +│ Phase 1: Open Discovery │ +│ → "What are you building?" → summarize → confirm │ +│ Phase 2: Structured Probing │ +│ → 8 dimensions: problem, scope, hardware, software, │ +│ acceptance criteria, input data, security, operations │ +│ → skip what Phase 1 already covered │ +│ Phase 3: Gap Analysis │ +│ → assess completeness per file → fill gaps iteratively │ +│ Phase 4: Draft & Confirm │ +│ → show all files → user confirms → save to _docs/00_problem/ │ +├────────────────────────────────────────────────────────────────┤ +│ Principles: Ask don't assume · Concrete over vague │ +│ Exhaust before writing · User is authority │ +└────────────────────────────────────────────────────────────────┘ +``` diff --git a/.cursor/skills/refactor/SKILL.md b/.cursor/skills/refactor/SKILL.md index d05c779..3acea10 100644 --- a/.cursor/skills/refactor/SKILL.md +++ b/.cursor/skills/refactor/SKILL.md @@ -10,6 +10,8 @@ description: | - "refactor", "refactoring", "improve code" - "analyze coupling", "decoupling", "technical debt" - "refactoring assessment", "code quality improvement" +category: evolve +tags: [refactoring, coupling, technical-debt, performance, hardening] disable-model-invocation: true --- @@ -32,15 +34,14 @@ Determine the operating mode based on invocation before any other logic runs. **Project mode** (no explicit input file provided): - PROBLEM_DIR: `_docs/00_problem/` - SOLUTION_DIR: `_docs/01_solution/` -- COMPONENTS_DIR: `_docs/02_components/` -- TESTS_DIR: `_docs/02_tests/` +- COMPONENTS_DIR: `_docs/02_document/components/` +- DOCUMENT_DIR: `_docs/02_document/` - REFACTOR_DIR: `_docs/04_refactoring/` - All existing guardrails apply. **Standalone mode** (explicit input file provided, e.g. `/refactor @some_component.md`): - INPUT_FILE: the provided file (treated as component/area description) -- Derive `` from the input filename (without extension) -- REFACTOR_DIR: `_standalone//refactoring/` +- REFACTOR_DIR: `_standalone/refactoring/` - Guardrails relaxed: only INPUT_FILE must exist and be non-empty - `acceptance_criteria.md` is optional — warn if absent @@ -154,7 +155,7 @@ Store in PROBLEM_DIR. | Metric Category | What to Capture | |----------------|-----------------| -| **Coverage** | Overall, unit, integration, critical paths | +| **Coverage** | Overall, unit, blackbox, critical paths | | **Complexity** | Cyclomatic complexity (avg + top 5 functions), LOC, tech debt ratio | | **Code Smells** | Total, critical, major | | **Performance** | Response times (P50/P95/P99), CPU/memory, throughput | @@ -209,7 +210,7 @@ Write: Also copy to project standard locations if in project mode: - `SOLUTION_DIR/solution.md` -- `COMPONENTS_DIR/system_flows.md` +- `DOCUMENT_DIR/system_flows.md` **Self-verification**: - [ ] Every component in the codebase is documented @@ -275,14 +276,14 @@ Write `REFACTOR_DIR/analysis/refactoring_roadmap.md`: #### 3a. Design Test Specs -Coverage requirements (must meet before refactoring): +Coverage requirements (must meet before refactoring — see `.cursor/rules/cursor-meta.mdc` Quality Thresholds): - Minimum overall coverage: 75% - Critical path coverage: 90% -- All public APIs must have integration tests +- All public APIs must have blackbox tests - All error handling paths must be tested For each critical area, write test specs to `REFACTOR_DIR/test_specs/[##]_[test_name].md`: -- Integration tests: summary, current behavior, input data, expected result, max expected time +- Blackbox tests: summary, current behavior, input data, expected result, max expected time - Acceptance tests: summary, preconditions, steps with expected results - Coverage analysis: current %, target %, uncovered critical paths @@ -296,7 +297,7 @@ For each critical area, write test specs to `REFACTOR_DIR/test_specs/[##]_[test_ **Self-verification**: - [ ] Coverage requirements met (75% overall, 90% critical paths) - [ ] All tests pass on current codebase -- [ ] All public APIs have integration tests +- [ ] All public APIs have blackbox tests - [ ] Test data fixtures are configured **Save action**: Write test specs; implemented tests go into the project's test folder @@ -331,7 +332,7 @@ Write `REFACTOR_DIR/coupling_analysis.md`: For each change in the decoupling strategy: 1. Implement the change -2. Run integration tests +2. Run blackbox tests 3. Fix any failures 4. Commit with descriptive message diff --git a/.cursor/skills/research/SKILL.md b/.cursor/skills/research/SKILL.md index 3ae1aca..85fd5d7 100644 --- a/.cursor/skills/research/SKILL.md +++ b/.cursor/skills/research/SKILL.md @@ -1,5 +1,5 @@ --- -name: deep-research +name: research description: | Deep Research Methodology (8-Step Method) with two execution modes: - Mode A (Initial Research): Assess acceptance criteria, then research problem and produce solution draft @@ -11,6 +11,9 @@ description: | - "research this", "investigate", "look into" - "assess solution", "review solution draft" - "comparative analysis", "concept comparison", "technical comparison" +category: build +tags: [research, analysis, solution-design, comparison, decision-support] +disable-model-invocation: true --- # Deep Research (8-Step Method) @@ -24,6 +27,9 @@ Transform vague topics raised by users into high-quality, deliverable research r - **Prioritize authoritative sources: L1 > L2 > L3 > L4** - **Intermediate results must be saved for traceability and reuse** - **Ask, don't assume** — when any aspect of the problem, criteria, or restrictions is unclear, STOP and ask the user before proceeding +- **Internet-first investigation** — do not rely on training data for factual claims; search the web extensively for every sub-question, rephrase queries when results are thin, and keep searching until you have converging evidence from multiple independent sources +- **Multi-perspective analysis** — examine every problem from at least 3 different viewpoints (e.g., end-user, implementer, business decision-maker, contrarian, domain expert, field practitioner); each perspective should generate its own search queries +- **Question multiplication** — for each sub-question, generate multiple reformulated search queries (synonyms, related terms, negations, "what can go wrong" variants, practitioner-focused variants) to maximize coverage and uncover blind spots ## Context Resolution @@ -37,248 +43,51 @@ Determine the operating mode based on invocation before any other logic runs. **Standalone mode** (explicit input file provided, e.g. `/research @some_doc.md`): - INPUT_FILE: the provided file (treated as problem description) -- Derive `` from the input filename (without extension) -- OUTPUT_DIR: `_standalone//01_solution/` -- RESEARCH_DIR: `_standalone//00_research/` +- BASE_DIR: if specified by the caller, use it; otherwise default to `_standalone/` +- OUTPUT_DIR: `BASE_DIR/01_solution/` +- RESEARCH_DIR: `BASE_DIR/00_research/` - Guardrails relaxed: only INPUT_FILE must exist and be non-empty - `restrictions.md` and `acceptance_criteria.md` are optional — warn if absent, proceed if user confirms - Mode detection uses OUTPUT_DIR for `solution_draft*.md` scanning - Draft numbering works the same, scoped to OUTPUT_DIR +- **Final step**: after all research is complete, move INPUT_FILE into BASE_DIR Announce the detected mode and resolved paths to the user before proceeding. ## Project Integration -### Prerequisite Guardrails (BLOCKING) - -Before any research begins, verify the input context exists. **Do not proceed if guardrails fail.** - -**Project mode:** -1. Check INPUT_DIR exists — **STOP if missing**, ask user to create it and provide problem files -2. Check `problem.md` in INPUT_DIR exists and is non-empty — **STOP if missing** -3. Check for `restrictions.md` and `acceptance_criteria.md` in INPUT_DIR: - - If missing: **warn user** and ask whether to proceed without them or provide them first - - If present: read and validate they are non-empty -4. Read **all** files in INPUT_DIR to ground the investigation in the project context -5. Create OUTPUT_DIR and RESEARCH_DIR if they don't exist - -**Standalone mode:** -1. Check INPUT_FILE exists and is non-empty — **STOP if missing** -2. Warn if no `restrictions.md` or `acceptance_criteria.md` were provided alongside INPUT_FILE — proceed if user confirms -3. Create OUTPUT_DIR and RESEARCH_DIR if they don't exist - -### Mode Detection - -After guardrails pass, determine the execution mode: - -1. Scan OUTPUT_DIR for files matching `solution_draft*.md` -2. **No matches found** → **Mode A: Initial Research** -3. **Matches found** → **Mode B: Solution Assessment** (use the highest-numbered draft as input) -4. **User override**: if the user explicitly says "research from scratch" or "initial research", force Mode A regardless of existing drafts - -Inform the user which mode was detected and confirm before proceeding. - -### Solution Draft Numbering - -All final output is saved as `OUTPUT_DIR/solution_draft##.md` with a 2-digit zero-padded number: - -1. Scan existing files in OUTPUT_DIR matching `solution_draft*.md` -2. Extract the highest existing number -3. Increment by 1 -4. Zero-pad to 2 digits (e.g., `01`, `02`, ..., `10`, `11`) - -Example: if `solution_draft01.md` through `solution_draft10.md` exist, the next output is `solution_draft11.md`. - -### Working Directory & Intermediate Artifact Management - -#### Directory Structure - -At the start of research, **must** create a topic-named working directory under RESEARCH_DIR: - -``` -RESEARCH_DIR// -├── 00_ac_assessment.md # Mode A Phase 1 output: AC & restrictions assessment -├── 00_question_decomposition.md # Step 0-1 output -├── 01_source_registry.md # Step 2 output: all consulted source links -├── 02_fact_cards.md # Step 3 output: extracted facts -├── 03_comparison_framework.md # Step 4 output: selected framework and populated data -├── 04_reasoning_chain.md # Step 6 output: fact → conclusion reasoning -├── 05_validation_log.md # Step 7 output: use-case validation results -└── raw/ # Raw source archive (optional) - ├── source_1.md - └── source_2.md -``` - -### Save Timing & Content - -| Step | Save immediately after completion | Filename | -|------|-----------------------------------|----------| -| Mode A Phase 1 | AC & restrictions assessment tables | `00_ac_assessment.md` | -| Step 0-1 | Question type classification + sub-question list | `00_question_decomposition.md` | -| Step 2 | Each consulted source link, tier, summary | `01_source_registry.md` | -| Step 3 | Each fact card (statement + source + confidence) | `02_fact_cards.md` | -| Step 4 | Selected comparison framework + initial population | `03_comparison_framework.md` | -| Step 6 | Reasoning process for each dimension | `04_reasoning_chain.md` | -| Step 7 | Validation scenarios + results + review checklist | `05_validation_log.md` | -| Step 8 | Complete solution draft | `OUTPUT_DIR/solution_draft##.md` | - -### Save Principles - -1. **Save immediately**: Write to the corresponding file as soon as a step is completed; don't wait until the end -2. **Incremental updates**: Same file can be updated multiple times; append or replace new content -3. **Preserve process**: Keep intermediate files even after their content is integrated into the final report -4. **Enable recovery**: If research is interrupted, progress can be recovered from intermediate files +Read and follow `steps/00_project-integration.md` for prerequisite guardrails, mode detection, draft numbering, working directory setup, save timing, and output file inventory. ## Execution Flow ### Mode A: Initial Research -Triggered when no `solution_draft*.md` files exist in OUTPUT_DIR, or when the user explicitly requests initial research. +Read and follow `steps/01_mode-a-initial-research.md`. -#### Phase 1: AC & Restrictions Assessment (BLOCKING) - -**Role**: Professional software architect - -A focused preliminary research pass **before** the main solution research. The goal is to validate that the acceptance criteria and restrictions are realistic before designing a solution around them. - -**Input**: All files from INPUT_DIR (or INPUT_FILE in standalone mode) - -**Task**: -1. Read all problem context files thoroughly -2. **ASK the user about every unclear aspect** — do not assume: - - Unclear problem boundaries → ask - - Ambiguous acceptance criteria values → ask - - Missing context (no `security_approach.md`, no `input_data/`) → ask what they have - - Conflicting restrictions → ask which takes priority -3. Research in internet: - - How realistic are the acceptance criteria for this specific domain? - - How critical is each criterion? - - What domain-specific acceptance criteria are we missing? - - Impact of each criterion value on the whole system quality - - Cost/budget implications of each criterion - - Timeline implications — how long would it take to meet each criterion -4. Research restrictions: - - Are the restrictions realistic? - - Should any be tightened or relaxed? - - Are there additional restrictions we should add? -5. Verify findings with authoritative sources (official docs, papers, benchmarks) - -**Uses Steps 0-3 of the 8-step engine** (question classification, decomposition, source tiering, fact extraction) scoped to AC and restrictions assessment. - -**📁 Save action**: Write `RESEARCH_DIR//00_ac_assessment.md` with format: - -```markdown -# Acceptance Criteria Assessment - -## Acceptance Criteria - -| Criterion | Our Values | Researched Values | Cost/Timeline Impact | Status | -|-----------|-----------|-------------------|---------------------|--------| -| [name] | [current] | [researched range] | [impact] | Added / Modified / Removed | - -## Restrictions Assessment - -| Restriction | Our Values | Researched Values | Cost/Timeline Impact | Status | -|-------------|-----------|-------------------|---------------------|--------| -| [name] | [current] | [researched range] | [impact] | Added / Modified / Removed | - -## Key Findings -[Summary of critical findings] - -## Sources -[Key references used] -``` - -**BLOCKING**: Present the AC assessment tables to the user. Wait for confirmation or adjustments before proceeding to Phase 2. The user may update `acceptance_criteria.md` or `restrictions.md` based on findings. - ---- - -#### Phase 2: Problem Research & Solution Draft - -**Role**: Professional researcher and software architect - -Full 8-step research methodology. Produces the first solution draft. - -**Input**: All files from INPUT_DIR (possibly updated after Phase 1) + Phase 1 artifacts - -**Task** (drives the 8-step engine): -1. Research existing/competitor solutions for similar problems -2. Research the problem thoroughly — all possible ways to solve it, split into components -3. For each component, research all possible solutions and find the most efficient state-of-the-art approaches -4. Verify that suggested tools/libraries actually exist and work as described -5. Include security considerations in each component analysis -6. Provide rough cost estimates for proposed solutions - -Be concise in formulating. The fewer words, the better, but do not miss any important details. - -**📁 Save action**: Write `OUTPUT_DIR/solution_draft##.md` using template: `templates/solution_draft_mode_a.md` - ---- - -#### Phase 3: Tech Stack Consolidation (OPTIONAL) - -**Role**: Software architect evaluating technology choices - -Focused synthesis step — no new 8-step cycle. Uses research already gathered in Phase 2 to make concrete technology decisions. - -**Input**: Latest `solution_draft##.md` from OUTPUT_DIR + all files from INPUT_DIR - -**Task**: -1. Extract technology options from the solution draft's component comparison tables -2. Score each option against: fitness for purpose, maturity, security track record, team expertise, cost, scalability -3. Produce a tech stack summary with selection rationale -4. Assess risks and learning requirements per technology choice - -**📁 Save action**: Write `OUTPUT_DIR/tech_stack.md` with: -- Requirements analysis (functional, non-functional, constraints) -- Technology evaluation tables (language, framework, database, infrastructure, key libraries) with scores -- Tech stack summary block -- Risk assessment and learning requirements tables - ---- - -#### Phase 4: Security Deep Dive (OPTIONAL) - -**Role**: Security architect - -Focused analysis step — deepens the security column from the solution draft into a proper threat model and controls specification. - -**Input**: Latest `solution_draft##.md` from OUTPUT_DIR + `security_approach.md` from INPUT_DIR + problem context - -**Task**: -1. Build threat model: asset inventory, threat actors, attack vectors -2. Define security requirements and proposed controls per component (with risk level) -3. Summarize authentication/authorization, data protection, secure communication, and logging/monitoring approach - -**📁 Save action**: Write `OUTPUT_DIR/security_analysis.md` with: -- Threat model (assets, actors, vectors) -- Per-component security requirements and controls table -- Security controls summary +Phases: AC Assessment (BLOCKING) → Problem Research → Tech Stack (optional) → Security (optional). --- ### Mode B: Solution Assessment -Triggered when `solution_draft*.md` files exist in OUTPUT_DIR. +Read and follow `steps/02_mode-b-solution-assessment.md`. -**Role**: Professional software architect +--- -Full 8-step research methodology applied to assessing and improving an existing solution draft. +## Research Engine (8-Step Method) -**Input**: All files from INPUT_DIR + the latest (highest-numbered) `solution_draft##.md` from OUTPUT_DIR +The 8-step method is the core research engine used by both modes. Steps 0-1 and Step 8 have mode-specific behavior; Steps 2-7 are identical regardless of mode. -**Task** (drives the 8-step engine): -1. Read the existing solution draft thoroughly -2. Research in internet — identify all potential weak points and problems -3. Identify security weak points and vulnerabilities -4. Identify performance bottlenecks -5. Address these problems and find ways to solve them -6. Based on findings, form a new solution draft in the same format -7. During the comparison, try to find the best solution which produces the most quality result within the boundaries of the restrictions. In case of uncertaintis and come closer or above the boundaries, ask the user +**Investigation phase** (Steps 0–3.5): Read and follow `steps/03_engine-investigation.md`. +Covers: question classification, novelty sensitivity, question decomposition, perspective rotation, exhaustive web search, fact extraction, iterative deepening. -**📁 Save action**: Write `OUTPUT_DIR/solution_draft##.md` (incremented) using template: `templates/solution_draft_mode_b.md` +**Analysis phase** (Steps 4–8): Read and follow `steps/04_engine-analysis.md`. +Covers: comparison framework, baseline alignment, reasoning chain, use-case validation, deliverable formatting. -**Optional follow-up**: After Mode B completes, the user can request Phase 3 (Tech Stack Consolidation) or Phase 4 (Security Deep Dive) using the revised draft. These phases work identically to their Mode A descriptions above. +## Solution Draft Output Templates + +- Mode A: `templates/solution_draft_mode_a.md` +- Mode B: `templates/solution_draft_mode_b.md` ## Escalation Rules @@ -302,526 +111,12 @@ When the user wants to: - Gather information and evidence for a decision - Assess or improve an existing solution draft -**Keywords**: -- "deep research", "deep dive", "in-depth analysis" -- "research this", "investigate", "look into" -- "assess solution", "review draft", "improve solution" -- "comparative analysis", "concept comparison", "technical comparison" - **Differentiation from other Skills**: - Needs a **visual knowledge graph** → use `research-to-diagram` - Needs **written output** (articles/tutorials) → use `wsy-writer` - Needs **material organization** → use `material-to-markdown` - Needs **research + solution draft** → use this Skill -## Research Engine (8-Step Method) - -The 8-step method is the core research engine used by both modes. Steps 0-1 and Step 8 have mode-specific behavior; Steps 2-7 are identical regardless of mode. - -### Step 0: Question Type Classification - -First, classify the research question type and select the corresponding strategy: - -| Question Type | Core Task | Focus Dimensions | -|---------------|-----------|------------------| -| **Concept Comparison** | Build comparison framework | Mechanism differences, applicability boundaries | -| **Decision Support** | Weigh trade-offs | Cost, risk, benefit | -| **Trend Analysis** | Map evolution trajectory | History, driving factors, predictions | -| **Problem Diagnosis** | Root cause analysis | Symptoms, causes, evidence chain | -| **Knowledge Organization** | Systematic structuring | Definitions, classifications, relationships | - -**Mode-specific classification**: - -| Mode / Phase | Typical Question Type | -|--------------|----------------------| -| Mode A Phase 1 | Knowledge Organization + Decision Support | -| Mode A Phase 2 | Decision Support | -| Mode B | Problem Diagnosis + Decision Support | - -### Step 0.5: Novelty Sensitivity Assessment (BLOCKING) - -**Before starting research, you must assess the novelty sensitivity of the question. This determines the source filtering strategy.** - -#### Novelty Sensitivity Classification - -| Sensitivity Level | Typical Domains | Source Time Window | Description | -|-------------------|-----------------|-------------------|-------------| -| **🔴 Critical** | AI/LLMs, blockchain, cryptocurrency | 3-6 months | Technology iterates extremely fast; info from months ago may be completely outdated | -| **🟠 High** | Cloud services, frontend frameworks, API interfaces | 6-12 months | Frequent version updates; must confirm current version | -| **🟡 Medium** | Programming languages, databases, operating systems | 1-2 years | Relatively stable but still evolving | -| **🟢 Low** | Algorithm fundamentals, design patterns, theoretical concepts | No limit | Core principles change slowly | - -#### 🔴 Critical Sensitivity Domain Special Rules - -When the research topic involves the following domains, **special rules must be enforced**: - -**Trigger word identification**: -- AI-related: LLM, GPT, Claude, Gemini, AI Agent, RAG, vector database, prompt engineering -- Cloud-native: Kubernetes new versions, Serverless, container runtimes -- Cutting-edge tech: Web3, quantum computing, AR/VR - -**Mandatory rules**: - -1. **Search with time constraints**: - - Use `time_range: "month"` or `time_range: "week"` to limit search results - - Prefer `start_date: "YYYY-MM-DD"` set to within the last 3 months - -2. **Elevate official source priority**: - - **Must first consult** official documentation, official blogs, official Changelogs - - GitHub Release Notes, official X/Twitter announcements - - Academic papers (arXiv and other preprint platforms) - -3. **Mandatory version number annotation**: - - Any technical description must annotate the **current version number** - - Example: "Claude 3.5 Sonnet (claude-3-5-sonnet-20241022) supports..." - - Prohibit vague statements like "the latest version supports..." - -4. **Outdated information handling**: - - Technical blogs/tutorials older than 6 months → historical reference only, **cannot serve as factual evidence** - - Version inconsistency found → must **verify current version** before using - - Obviously outdated descriptions (e.g., "will support in the future" but now already supported) → **discard directly** - -5. **Cross-validation**: - - Highly sensitive information must be confirmed from **at least 2 independent sources** - - Priority: Official docs > Official blogs > Authoritative tech media > Personal blogs - -6. **Official download/release page direct verification (BLOCKING)**: - - **Must directly visit** official download pages to verify platform support (don't rely on search engine caches) - - Use `mcp__tavily-mcp__tavily-extract` or `WebFetch` to directly extract download page content - - Example: `https://product.com/download` or `https://github.com/xxx/releases` - - Search results about "coming soon" or "planned support" may be outdated; must verify in real time - - **Platform support is frequently changing information**; cannot infer from old sources - -7. **Product-specific protocol/feature name search (BLOCKING)**: - - Beyond searching the product name, **must additionally search protocol/standard names the product supports** - - Common protocols/standards to search: - - AI tools: MCP, ACP (Agent Client Protocol), LSP, DAP - - Cloud services: OAuth, OIDC, SAML - - Data exchange: GraphQL, gRPC, REST - - Search format: `" support"` or `" integration"` - - These protocol integrations are often differentiating features, easily missed in main docs but documented in specialized pages - -#### Timeliness Assessment Output Template - -```markdown -## Timeliness Sensitivity Assessment - -- **Research Topic**: [topic] -- **Sensitivity Level**: 🔴 Critical / 🟠 High / 🟡 Medium / 🟢 Low -- **Rationale**: [why this level] -- **Source Time Window**: [X months/years] -- **Priority official sources to consult**: - 1. [Official source 1] - 2. [Official source 2] -- **Key version information to verify**: - - [Product/technology 1]: Current version ____ - - [Product/technology 2]: Current version ____ -``` - -**📁 Save action**: Append timeliness assessment to the end of `00_question_decomposition.md` - ---- - -### Step 1: Question Decomposition & Boundary Definition - -**Mode-specific sub-questions**: - -**Mode A Phase 2** (Initial Research — Problem & Solution): -- "What existing/competitor solutions address this problem?" -- "What are the component parts of this problem?" -- "For each component, what are the state-of-the-art solutions?" -- "What are the security considerations per component?" -- "What are the cost implications of each approach?" - -**Mode B** (Solution Assessment): -- "What are the weak points and potential problems in the existing draft?" -- "What are the security vulnerabilities in the proposed architecture?" -- "Where are the performance bottlenecks?" -- "What solutions exist for each identified issue?" - -**General sub-question patterns** (use when applicable): -- **Sub-question A**: "What is X and how does it work?" (Definition & mechanism) -- **Sub-question B**: "What are the dimensions of relationship/difference between X and Y?" (Comparative analysis) -- **Sub-question C**: "In what scenarios is X applicable/inapplicable?" (Boundary conditions) -- **Sub-question D**: "What are X's development trends/best practices?" (Extended analysis) - -**⚠️ Research Subject Boundary Definition (BLOCKING - must be explicit)**: - -When decomposing questions, you must explicitly define the **boundaries of the research subject**: - -| Dimension | Boundary to define | Example | -|-----------|--------------------|---------| -| **Population** | Which group is being studied? | University students vs K-12 vs vocational students vs all students | -| **Geography** | Which region is being studied? | Chinese universities vs US universities vs global | -| **Timeframe** | Which period is being studied? | Post-2020 vs full historical picture | -| **Level** | Which level is being studied? | Undergraduate vs graduate vs vocational | - -**Common mistake**: User asks about "university classroom issues" but sources include policies targeting "K-12 students" — mismatched target populations will invalidate the entire research. - -**📁 Save action**: -1. Read all files from INPUT_DIR to ground the research in the project context -2. Create working directory `RESEARCH_DIR//` -3. Write `00_question_decomposition.md`, including: - - Original question - - Active mode (A Phase 2 or B) and rationale - - Summary of relevant problem context from INPUT_DIR - - Classified question type and rationale - - **Research subject boundary definition** (population, geography, timeframe, level) - - List of decomposed sub-questions -4. Write TodoWrite to track progress - -### Step 2: Source Tiering & Authority Anchoring - -Tier sources by authority, **prioritize primary sources**: - -| Tier | Source Type | Purpose | Credibility | -|------|------------|---------|-------------| -| **L1** | Official docs, papers, specs, RFCs | Definitions, mechanisms, verifiable facts | ✅ High | -| **L2** | Official blogs, tech talks, white papers | Design intent, architectural thinking | ✅ High | -| **L3** | Authoritative media, expert commentary, tutorials | Supplementary intuition, case studies | ⚠️ Medium | -| **L4** | Community discussions, personal blogs, forums | Discover blind spots, validate understanding | ❓ Low | - -**L4 Community Source Specifics** (mandatory for product comparison research): - -| Source Type | Access Method | Value | -|------------|---------------|-------| -| **GitHub Issues** | Visit `github.com///issues` | Real user pain points, feature requests, bug reports | -| **GitHub Discussions** | Visit `github.com///discussions` | Feature discussions, usage insights, community consensus | -| **Reddit** | Search `site:reddit.com ""` | Authentic user reviews, comparison discussions | -| **Hacker News** | Search `site:news.ycombinator.com ""` | In-depth technical community discussions | -| **Discord/Telegram** | Product's official community channels | Active user feedback (must annotate [limited source]) | - -**Principles**: -- Conclusions must be traceable to L1/L2 -- L3/L4 serve only as supplementary and validation -- **L4 community discussions are used to discover "what users truly care about"** -- Record all information sources - -**⏰ Timeliness Filtering Rules (execute based on Step 0.5 sensitivity level)**: - -| Sensitivity Level | Source Filtering Rule | Suggested Search Parameters | -|-------------------|----------------------|-----------------------------| -| 🔴 Critical | Only accept sources within 6 months as factual evidence | `time_range: "month"` or `start_date` set to last 3 months | -| 🟠 High | Prefer sources within 1 year; annotate if older than 1 year | `time_range: "year"` | -| 🟡 Medium | Sources within 2 years used normally; older ones need validity check | Default search | -| 🟢 Low | No time limit | Default search | - -**High-Sensitivity Domain Search Strategy**: - -``` -1. Round 1: Targeted official source search - - Use include_domains to restrict to official domains - - Example: include_domains: ["anthropic.com", "openai.com", "docs.xxx.com"] - -2. Round 2: Official download/release page direct verification (BLOCKING) - - Directly visit official download pages; don't rely on search caches - - Use tavily-extract or WebFetch to extract page content - - Verify: platform support, current version number, release date - - This step is mandatory; search engines may cache outdated "Coming soon" info - -3. Round 3: Product-specific protocol/feature search (BLOCKING) - - Search protocol names the product supports (MCP, ACP, LSP, etc.) - - Format: `" " site:official_domain` - - These integration features are often not displayed on the main page but documented in specialized pages - -4. Round 4: Time-limited broad search - - time_range: "month" or start_date set to recent - - Exclude obviously outdated sources - -5. Round 5: Version verification - - Cross-validate version numbers from search results - - If inconsistency found, immediately consult official Changelog - -6. Round 6: Community voice mining (BLOCKING - mandatory for product comparison research) - - Visit the product's GitHub Issues page, review popular/pinned issues - - Search Issues for key feature terms (e.g., "MCP", "plugin", "integration") - - Review discussion trends from the last 3-6 months - - Identify the feature points and differentiating characteristics users care most about - - Value of this step: Official docs rarely emphasize "features we have that others don't", but community discussions do -``` - -**Community Voice Mining Detailed Steps**: - -``` -GitHub Issues Mining Steps: -1. Visit github.com///issues -2. Sort by "Most commented" to view popular discussions -3. Search keywords: - - Feature-related: feature request, enhancement, MCP, plugin, API - - Comparison-related: vs, compared to, alternative, migrate from -4. Review issue labels: enhancement, feature, discussion -5. Record frequently occurring feature demands and user pain points - -Value Translation: -- Frequently discussed features → likely differentiating highlights -- User complaints/requests → likely product weaknesses -- Comparison discussions → directly obtain user-perspective difference analysis -``` - -**Source Timeliness Annotation Template** (append to source registry): - -```markdown -- **Publication Date**: [YYYY-MM-DD] -- **Timeliness Status**: ✅ Currently valid / ⚠️ Needs verification / ❌ Outdated -- **Version Info**: [If applicable, annotate the relevant version number] -``` - -**Tool Usage**: -- Prefer `mcp__plugin_context7_context7__query-docs` for technical documentation -- Use `WebSearch` or `mcp__tavily-mcp__tavily-search` for broad searches -- Use `mcp__tavily-mcp__tavily-extract` to extract specific page content - -**⚠️ Target Audience Verification (BLOCKING - must check before inclusion)**: - -Before including each source, verify that its **target audience matches the research boundary**: - -| Source Type | Target audience to verify | Verification method | -|------------|---------------------------|---------------------| -| **Policy/Regulation** | Who is it for? (K-12/university/all) | Check document title, scope clauses | -| **Academic Research** | Who are the subjects? (vocational/undergraduate/graduate) | Check methodology/sample description sections | -| **Statistical Data** | Which population is measured? | Check data source description | -| **Case Reports** | What type of institution is involved? | Confirm institution type (university/high school/vocational) | - -**Handling mismatched sources**: -- Target audience completely mismatched → **do not include** -- Partially overlapping (e.g., "students" includes university students) → include but **annotate applicable scope** -- Usable as analogous reference (e.g., K-12 policy as a trend reference) → include but **explicitly annotate "reference only"** - -**📁 Save action**: -For each source consulted, **immediately** append to `01_source_registry.md`: -```markdown -## Source #[number] -- **Title**: [source title] -- **Link**: [URL] -- **Tier**: L1/L2/L3/L4 -- **Publication Date**: [YYYY-MM-DD] -- **Timeliness Status**: ✅ Currently valid / ⚠️ Needs verification / ❌ Outdated (reference only) -- **Version Info**: [If involving a specific version, must annotate] -- **Target Audience**: [Explicitly annotate the group/geography/level this source targets] -- **Research Boundary Match**: ✅ Full match / ⚠️ Partial overlap / 📎 Reference only -- **Summary**: [1-2 sentence key content] -- **Related Sub-question**: [which sub-question this corresponds to] -``` - -### Step 3: Fact Extraction & Evidence Cards - -Transform sources into **verifiable fact cards**: - -```markdown -## Fact Cards - -### Fact 1 -- **Statement**: [specific fact description] -- **Source**: [link/document section] -- **Confidence**: High/Medium/Low - -### Fact 2 -... -``` - -**Key discipline**: -- Pin down facts first, then reason -- Distinguish "what officials said" from "what I infer" -- When conflicting information is found, annotate and preserve both sides -- Annotate confidence level: - - ✅ High: Explicitly stated in official documentation - - ⚠️ Medium: Mentioned in official blog but not formally documented - - ❓ Low: Inference or from unofficial sources - -**📁 Save action**: -For each extracted fact, **immediately** append to `02_fact_cards.md`: -```markdown -## Fact #[number] -- **Statement**: [specific fact description] -- **Source**: [Source #number] [link] -- **Phase**: [Phase 1 / Phase 2 / Assessment] -- **Target Audience**: [which group this fact applies to, inherited from source or further refined] -- **Confidence**: ✅/⚠️/❓ -- **Related Dimension**: [corresponding comparison dimension] -``` - -**⚠️ Target audience in fact statements**: -- If a fact comes from a "partially overlapping" or "reference only" source, the statement **must explicitly annotate the applicable scope** -- Wrong: "The Ministry of Education banned phones in classrooms" (doesn't specify who) -- Correct: "The Ministry of Education banned K-12 students from bringing phones into classrooms (does not apply to university students)" - -### Step 4: Build Comparison/Analysis Framework - -Based on the question type, select fixed analysis dimensions: - -**General Dimensions** (select as needed): -1. Goal / What problem does it solve -2. Working mechanism / Process -3. Input / Output / Boundaries -4. Advantages / Disadvantages / Trade-offs -5. Applicable scenarios / Boundary conditions -6. Cost / Benefit / Risk -7. Historical evolution / Future trends -8. Security / Permissions / Controllability - -**Concept Comparison Specific Dimensions**: -1. Definition & essence -2. Trigger / invocation method -3. Execution agent -4. Input/output & type constraints -5. Determinism & repeatability -6. Resource & context management -7. Composition & reuse patterns -8. Security boundaries & permission control - -**Decision Support Specific Dimensions**: -1. Solution overview -2. Implementation cost -3. Maintenance cost -4. Risk assessment -5. Expected benefit -6. Applicable scenarios -7. Team capability requirements -8. Migration difficulty - -**📁 Save action**: -Write to `03_comparison_framework.md`: -```markdown -# Comparison Framework - -## Selected Framework Type -[Concept Comparison / Decision Support / ...] - -## Selected Dimensions -1. [Dimension 1] -2. [Dimension 2] -... - -## Initial Population -| Dimension | X | Y | Factual Basis | -|-----------|---|---|---------------| -| [Dimension 1] | [description] | [description] | Fact #1, #3 | -| ... | | | | -``` - -### Step 5: Reference Point Baseline Alignment - -Ensure all compared parties have clear, consistent definitions: - -**Checklist**: -- [ ] Is the reference point's definition stable/widely accepted? -- [ ] Does it need verification, or can domain common knowledge be used? -- [ ] Does the reader's understanding of the reference point match mine? -- [ ] Are there ambiguities that need to be clarified first? - -### Step 6: Fact-to-Conclusion Reasoning Chain - -Explicitly write out the "fact → comparison → conclusion" reasoning process: - -```markdown -## Reasoning Process - -### Regarding [Dimension Name] - -1. **Fact confirmation**: According to [source], X's mechanism is... -2. **Compare with reference**: While Y's mechanism is... -3. **Conclusion**: Therefore, the difference between X and Y on this dimension is... -``` - -**Key discipline**: -- Conclusions come from mechanism comparison, not "gut feelings" -- Every conclusion must be traceable to specific facts -- Uncertain conclusions must be annotated - -**📁 Save action**: -Write to `04_reasoning_chain.md`: -```markdown -# Reasoning Chain - -## Dimension 1: [Dimension Name] - -### Fact Confirmation -According to [Fact #X], X's mechanism is... - -### Reference Comparison -While Y's mechanism is... (Source: [Fact #Y]) - -### Conclusion -Therefore, the difference between X and Y on this dimension is... - -### Confidence -✅/⚠️/❓ + rationale - ---- -## Dimension 2: [Dimension Name] -... -``` - -### Step 7: Use-Case Validation (Sanity Check) - -Validate conclusions against a typical scenario: - -**Validation questions**: -- Based on my conclusions, how should this scenario be handled? -- Is that actually the case? -- Are there counterexamples that need to be addressed? - -**Review checklist**: -- [ ] Are draft conclusions consistent with Step 3 fact cards? -- [ ] Are there any important dimensions missed? -- [ ] Is there any over-extrapolation? -- [ ] Are conclusions actionable/verifiable? - -**📁 Save action**: -Write to `05_validation_log.md`: -```markdown -# Validation Log - -## Validation Scenario -[Scenario description] - -## Expected Based on Conclusions -If using X: [expected behavior] -If using Y: [expected behavior] - -## Actual Validation Results -[actual situation] - -## Counterexamples -[yes/no, describe if yes] - -## Review Checklist -- [x] Draft conclusions consistent with fact cards -- [x] No important dimensions missed -- [x] No over-extrapolation -- [ ] Issue found: [if any] - -## Conclusions Requiring Revision -[if any] -``` - -### Step 8: Deliverable Formatting - -Make the output **readable, traceable, and actionable**. - -**📁 Save action**: -Integrate all intermediate artifacts. Write to `OUTPUT_DIR/solution_draft##.md` using the appropriate output template based on active mode: -- Mode A: `templates/solution_draft_mode_a.md` -- Mode B: `templates/solution_draft_mode_b.md` - -Sources to integrate: -- Extract background from `00_question_decomposition.md` -- Reference key facts from `02_fact_cards.md` -- Organize conclusions from `04_reasoning_chain.md` -- Generate references from `01_source_registry.md` -- Supplement with use cases from `05_validation_log.md` -- For Mode A: include AC assessment from `00_ac_assessment.md` - -## Solution Draft Output Templates - -### Mode A: Initial Research Output - -Use template: `templates/solution_draft_mode_a.md` - -### Mode B: Solution Assessment Output - -Use template: `templates/solution_draft_mode_b.md` - ## Stakeholder Perspectives Adjust content depth based on audience: @@ -832,250 +127,24 @@ Adjust content depth based on audience: | **Implementers** | Specific mechanisms, how-to | Detailed, emphasize how to do it | | **Technical experts** | Details, boundary conditions, limitations | In-depth, emphasize accuracy | -## Output Files - -Default intermediate artifacts location: `RESEARCH_DIR//` - -**Required files** (automatically generated through the process): - -| File | Content | When Generated | -|------|---------|----------------| -| `00_ac_assessment.md` | AC & restrictions assessment (Mode A only) | After Phase 1 completion | -| `00_question_decomposition.md` | Question type, sub-question list | After Step 0-1 completion | -| `01_source_registry.md` | All source links and summaries | Continuously updated during Step 2 | -| `02_fact_cards.md` | Extracted facts and sources | Continuously updated during Step 3 | -| `03_comparison_framework.md` | Selected framework and populated data | After Step 4 completion | -| `04_reasoning_chain.md` | Fact → conclusion reasoning | After Step 6 completion | -| `05_validation_log.md` | Use-case validation and review | After Step 7 completion | -| `OUTPUT_DIR/solution_draft##.md` | Complete solution draft | After Step 8 completion | -| `OUTPUT_DIR/tech_stack.md` | Tech stack evaluation and decisions | After Phase 3 (optional) | -| `OUTPUT_DIR/security_analysis.md` | Threat model and security controls | After Phase 4 (optional) | - -**Optional files**: -- `raw/*.md` - Raw source archives (saved when content is lengthy) - -## Methodology Quick Reference Card - -``` -┌──────────────────────────────────────────────────────────────────┐ -│ Deep Research — Mode-Aware 8-Step Method │ -├──────────────────────────────────────────────────────────────────┤ -│ CONTEXT: Resolve mode (project vs standalone) + set paths │ -│ GUARDRAILS: Check INPUT_DIR/INPUT_FILE exists + required files │ -│ MODE DETECT: solution_draft*.md in 01_solution? → A or B │ -│ │ -│ MODE A: Initial Research │ -│ Phase 1: AC & Restrictions Assessment (BLOCKING) │ -│ Phase 2: Full 8-step → solution_draft##.md │ -│ Phase 3: Tech Stack Consolidation (OPTIONAL) → tech_stack.md │ -│ Phase 4: Security Deep Dive (OPTIONAL) → security_analysis.md │ -│ │ -│ MODE B: Solution Assessment │ -│ Read latest draft → Full 8-step → solution_draft##.md (N+1) │ -│ Optional: Phase 3 / Phase 4 on revised draft │ -│ │ -│ 8-STEP ENGINE: │ -│ 0. Classify question type → Select framework template │ -│ 1. Decompose question → mode-specific sub-questions │ -│ 2. Tier sources → L1 Official > L2 Blog > L3 Media > L4 │ -│ 3. Extract facts → Each with source, confidence level │ -│ 4. Build framework → Fixed dimensions, structured compare │ -│ 5. Align references → Ensure unified definitions │ -│ 6. Reasoning chain → Fact→Compare→Conclude, explicit │ -│ 7. Use-case validation → Sanity check, prevent armchairing │ -│ 8. Deliverable → solution_draft##.md (mode-specific format) │ -├──────────────────────────────────────────────────────────────────┤ -│ Key discipline: Ask don't assume · Facts before reasoning │ -│ Conclusions from mechanism, not gut feelings │ -└──────────────────────────────────────────────────────────────────┘ -``` - -## Usage Examples - -### Example 1: Initial Research (Mode A) - -``` -User: Research this problem and find the best solution -``` - -Execution flow: -1. Context resolution: no explicit file → project mode (INPUT_DIR=`_docs/00_problem/`, OUTPUT_DIR=`_docs/01_solution/`) -2. Guardrails: verify INPUT_DIR exists with required files -3. Mode detection: no `solution_draft*.md` → Mode A -4. Phase 1: Assess acceptance criteria and restrictions, ask user about unclear parts -5. BLOCKING: present AC assessment, wait for user confirmation -6. Phase 2: Full 8-step research — competitors, components, state-of-the-art solutions -7. Output: `OUTPUT_DIR/solution_draft01.md` -8. (Optional) Phase 3: Tech stack consolidation → `tech_stack.md` -9. (Optional) Phase 4: Security deep dive → `security_analysis.md` - -### Example 2: Solution Assessment (Mode B) - -``` -User: Assess the current solution draft -``` - -Execution flow: -1. Context resolution: no explicit file → project mode -2. Guardrails: verify INPUT_DIR exists -3. Mode detection: `solution_draft03.md` found in OUTPUT_DIR → Mode B, read it as input -4. Full 8-step research — weak points, security, performance, solutions -5. Output: `OUTPUT_DIR/solution_draft04.md` with findings table + revised draft - -### Example 3: Standalone Research - -``` -User: /research @my_problem.md -``` - -Execution flow: -1. Context resolution: explicit file → standalone mode (INPUT_FILE=`my_problem.md`, OUTPUT_DIR=`_standalone/my_problem/01_solution/`) -2. Guardrails: verify INPUT_FILE exists and is non-empty, warn about missing restrictions/AC -3. Mode detection + full research flow as in Example 1, scoped to standalone paths -4. Output: `_standalone/my_problem/01_solution/solution_draft01.md` - -### Example 4: Force Initial Research (Override) - -``` -User: Research from scratch, ignore existing drafts -``` - -Execution flow: -1. Context resolution: no explicit file → project mode -2. Mode detection: drafts exist, but user explicitly requested initial research → Mode A -3. Phase 1 + Phase 2 as in Example 1 -4. Output: `OUTPUT_DIR/solution_draft##.md` (incremented from highest existing) - ## Source Verifiability Requirements -**Core principle**: Every piece of external information cited in the report must be directly verifiable by the user. - -**Mandatory rules**: - -1. **URL Accessibility**: - - All cited links must be publicly accessible (no login/paywall required) - - If citing content that requires login, must annotate `[login required]` - - If citing academic papers, prefer publicly available versions (arXiv/DOI) - -2. **Citation Precision**: - - For long documents, must specify exact section/page/timestamp - - Example: `[Source: OpenAI Blog, 2024-03-15, "GPT-4 Technical Report", §3.2 Safety]` - - Video/audio citations need timestamps - -3. **Content Correspondence**: - - Cited facts must have corresponding statements in the original text - - Prohibit over-interpretation of original text presented as "citations" - - If there's interpretation/inference, must explicitly annotate "inferred based on [source]" - -4. **Timeliness Annotation**: - - Annotate source publication/update date - - For technical docs, annotate version number - - Sources older than 2 years need validity assessment - -5. **Handling Unverifiable Information**: - - If the information source cannot be publicly verified (e.g., private communication, paywalled report excerpts), must annotate `[limited source]` in confidence level - - Unverifiable information cannot be the sole support for core conclusions +Every cited piece of external information must be directly verifiable by the user. All links must be publicly accessible (annotate `[login required]` if not), citations must include exact section/page/timestamp, and unverifiable information must be annotated `[limited source]`. Full checklist in `references/quality-checklists.md`. ## Quality Checklist -Before completing the solution draft, check the following items: - -### General Quality - -- [ ] All core conclusions have L1/L2 tier factual support -- [ ] No use of vague words like "possibly", "probably" without annotating uncertainty -- [ ] Comparison dimensions are complete with no key differences missed -- [ ] At least one real use case validates conclusions -- [ ] References are complete with accessible links -- [ ] **Every citation can be directly verified by the user (source verifiability)** -- [ ] Structure hierarchy is clear; executives can quickly locate information - -### Mode A Specific - -- [ ] **Phase 1 completed**: AC assessment was presented to and confirmed by user -- [ ] **AC assessment consistent**: Solution draft respects the (possibly adjusted) acceptance criteria and restrictions -- [ ] **Competitor analysis included**: Existing solutions were researched -- [ ] **All components have comparison tables**: Each component lists alternatives with tools, advantages, limitations, security, cost -- [ ] **Tools/libraries verified**: Suggested tools actually exist and work as described -- [ ] **Testing strategy covers AC**: Tests map to acceptance criteria -- [ ] **Tech stack documented** (if Phase 3 ran): `tech_stack.md` has evaluation tables, risk assessment, and learning requirements -- [ ] **Security analysis documented** (if Phase 4 ran): `security_analysis.md` has threat model and per-component controls - -### Mode B Specific - -- [ ] **Findings table complete**: All identified weak points documented with solutions -- [ ] **Weak point categories covered**: Functional, security, and performance assessed -- [ ] **New draft is self-contained**: Written as if from scratch, no "updated" markers -- [ ] **Performance column included**: Mode B comparison tables include performance characteristics -- [ ] **Previous draft issues addressed**: Every finding in the table is resolved in the new draft - -### ⏰ Timeliness Check (High-Sensitivity Domain BLOCKING) - -When the research topic has 🔴 Critical or 🟠 High sensitivity level, **the following checks must be completed**: - -- [ ] **Timeliness sensitivity assessment completed**: `00_question_decomposition.md` contains a timeliness assessment section -- [ ] **Source timeliness annotated**: Every source has publication date, timeliness status, version info -- [ ] **No outdated sources used as factual evidence**: - - 🔴 Critical: Core fact sources are all within 6 months - - 🟠 High: Core fact sources are all within 1 year -- [ ] **Version numbers explicitly annotated**: - - Technical product/API/SDK descriptions all annotate specific version numbers - - No vague time expressions like "latest version" or "currently" -- [ ] **Official sources prioritized**: Core conclusions have support from official documentation/blogs -- [ ] **Cross-validation completed**: Key technical information confirmed from at least 2 independent sources -- [ ] **Download page directly verified**: Platform support info comes from real-time extraction of official download pages, not search caches -- [ ] **Protocol/feature names searched**: Searched for product-supported protocol names (MCP, ACP, etc.) -- [ ] **GitHub Issues mined**: Reviewed product's GitHub Issues popular discussions -- [ ] **Community hotspots identified**: Identified and recorded feature points users care most about - -**Typical community voice oversight error cases**: - -> Wrong: Relying solely on official docs, MCP briefly mentioned as a regular feature in the report -> Correct: Discovered through GitHub Issues that MCP is the most hotly discussed feature in the community, expanded analysis of its value in the report - -> Wrong: "Both Alma and Cherry Studio support MCP" (no difference analysis) -> Correct: Discovered through community discussion that "Alma's MCP implementation is highly consistent with Claude Code — this is its core competitive advantage" - -**Typical platform support/protocol oversight error cases**: - -> Wrong: "Alma only supports macOS" (based on search engine cached "Coming soon" info) -> Correct: Directly visited alma.now/download page to verify currently supported platforms - -> Wrong: "Alma supports MCP" (only searched MCP, missed ACP) -> Correct: Searched both "Alma MCP" and "Alma ACP", discovered Alma also supports ACP protocol integration for CLI tools - -**Typical timeliness error cases**: - -> Wrong: "Claude supports function calling" (no version annotated, may refer to old version capabilities) -> Correct: "Claude 3.5 Sonnet (claude-3-5-sonnet-20241022) supports function calling via Tool Use API, with a maximum of 8192 tokens for tool definitions" - -> Wrong: "According to a 2023 blog post, GPT-4's context length is 8K" -> Correct: "As of January 2024, GPT-4 Turbo supports 128K context (Source: OpenAI official documentation, updated 2024-01-25)" - -### ⚠️ Target Audience Consistency Check (BLOCKING) - -This is the most easily overlooked and most critical check item: - -- [ ] **Research boundary clearly defined**: `00_question_decomposition.md` has clear population/geography/timeframe/level boundaries -- [ ] **Every source has target audience annotated**: `01_source_registry.md` has "Target Audience" and "Research Boundary Match" fields for each source -- [ ] **Mismatched sources properly handled**: - - Completely mismatched sources were not included - - Partially overlapping sources have annotated applicable scope - - Reference-only sources are explicitly annotated -- [ ] **No audience confusion in fact cards**: Every fact in `02_fact_cards.md` has a target audience consistent with the research boundary -- [ ] **No audience confusion in the report**: Policies/research/data cited in the solution draft have target audiences consistent with the research topic - -**Typical error case**: -> Research topic: "University students not paying attention in class" -> Wrong citation: "In October 2025, the Ministry of Education banned phones in classrooms" -> Problem: That policy targets K-12 students, not university students -> Consequence: Readers mistakenly believe the Ministry of Education banned university students from carrying phones — severely misleading +Before completing the solution draft, run through the checklists in `references/quality-checklists.md`. This covers: +- General quality (L1/L2 support, verifiability, actionability) +- Mode A specific (AC assessment, competitor analysis, component tables, tech stack) +- Mode B specific (findings table, self-contained draft, performance column) +- Timeliness check for high-sensitivity domains (version annotations, cross-validation, community mining) +- Target audience consistency (boundary definition, source matching, fact card audience) ## Final Reply Guidelines When replying to the user after research is complete: -**✅ Should include**: +**Should include**: - Active mode used (A or B) and which optional phases were executed - One-sentence core conclusion - Key findings summary (3-5 points) @@ -1083,7 +152,7 @@ When replying to the user after research is complete: - Paths to optional artifacts if produced: `tech_stack.md`, `security_analysis.md` - If there are significant uncertainties, annotate points requiring further verification -**❌ Must not include**: +**Must not include**: - Process file listings (e.g., `00_question_decomposition.md`, `01_source_registry.md`, etc.) - Detailed research step descriptions - Working directory structure display diff --git a/.cursor/skills/research/references/comparison-frameworks.md b/.cursor/skills/research/references/comparison-frameworks.md new file mode 100644 index 0000000..da1c42c --- /dev/null +++ b/.cursor/skills/research/references/comparison-frameworks.md @@ -0,0 +1,34 @@ +# Comparison & Analysis Frameworks — Reference + +## General Dimensions (select as needed) + +1. Goal / What problem does it solve +2. Working mechanism / Process +3. Input / Output / Boundaries +4. Advantages / Disadvantages / Trade-offs +5. Applicable scenarios / Boundary conditions +6. Cost / Benefit / Risk +7. Historical evolution / Future trends +8. Security / Permissions / Controllability + +## Concept Comparison Specific Dimensions + +1. Definition & essence +2. Trigger / invocation method +3. Execution agent +4. Input/output & type constraints +5. Determinism & repeatability +6. Resource & context management +7. Composition & reuse patterns +8. Security boundaries & permission control + +## Decision Support Specific Dimensions + +1. Solution overview +2. Implementation cost +3. Maintenance cost +4. Risk assessment +5. Expected benefit +6. Applicable scenarios +7. Team capability requirements +8. Migration difficulty diff --git a/.cursor/skills/research/references/novelty-sensitivity.md b/.cursor/skills/research/references/novelty-sensitivity.md new file mode 100644 index 0000000..815245d --- /dev/null +++ b/.cursor/skills/research/references/novelty-sensitivity.md @@ -0,0 +1,75 @@ +# Novelty Sensitivity Assessment — Reference + +## Novelty Sensitivity Classification + +| Sensitivity Level | Typical Domains | Source Time Window | Description | +|-------------------|-----------------|-------------------|-------------| +| **Critical** | AI/LLMs, blockchain, cryptocurrency | 3-6 months | Technology iterates extremely fast; info from months ago may be completely outdated | +| **High** | Cloud services, frontend frameworks, API interfaces | 6-12 months | Frequent version updates; must confirm current version | +| **Medium** | Programming languages, databases, operating systems | 1-2 years | Relatively stable but still evolving | +| **Low** | Algorithm fundamentals, design patterns, theoretical concepts | No limit | Core principles change slowly | + +## Critical Sensitivity Domain Special Rules + +When the research topic involves the following domains, special rules must be enforced: + +**Trigger word identification**: +- AI-related: LLM, GPT, Claude, Gemini, AI Agent, RAG, vector database, prompt engineering +- Cloud-native: Kubernetes new versions, Serverless, container runtimes +- Cutting-edge tech: Web3, quantum computing, AR/VR + +**Mandatory rules**: + +1. **Search with time constraints**: + - Use `time_range: "month"` or `time_range: "week"` to limit search results + - Prefer `start_date: "YYYY-MM-DD"` set to within the last 3 months + +2. **Elevate official source priority**: + - Must first consult official documentation, official blogs, official Changelogs + - GitHub Release Notes, official X/Twitter announcements + - Academic papers (arXiv and other preprint platforms) + +3. **Mandatory version number annotation**: + - Any technical description must annotate the current version number + - Example: "Claude 3.5 Sonnet (claude-3-5-sonnet-20241022) supports..." + - Prohibit vague statements like "the latest version supports..." + +4. **Outdated information handling**: + - Technical blogs/tutorials older than 6 months -> historical reference only, cannot serve as factual evidence + - Version inconsistency found -> must verify current version before using + - Obviously outdated descriptions (e.g., "will support in the future" but now already supported) -> discard directly + +5. **Cross-validation**: + - Highly sensitive information must be confirmed from at least 2 independent sources + - Priority: Official docs > Official blogs > Authoritative tech media > Personal blogs + +6. **Official download/release page direct verification (BLOCKING)**: + - Must directly visit official download pages to verify platform support (don't rely on search engine caches) + - Use `WebFetch` to directly extract download page content + - Search results about "coming soon" or "planned support" may be outdated; must verify in real time + - Platform support is frequently changing information; cannot infer from old sources + +7. **Product-specific protocol/feature name search (BLOCKING)**: + - Beyond searching the product name, must additionally search protocol/standard names the product supports + - Common protocols/standards to search: + - AI tools: MCP, ACP (Agent Client Protocol), LSP, DAP + - Cloud services: OAuth, OIDC, SAML + - Data exchange: GraphQL, gRPC, REST + - Search format: `" support"` or `" integration"` + +## Timeliness Assessment Output Template + +```markdown +## Timeliness Sensitivity Assessment + +- **Research Topic**: [topic] +- **Sensitivity Level**: Critical / High / Medium / Low +- **Rationale**: [why this level] +- **Source Time Window**: [X months/years] +- **Priority official sources to consult**: + 1. [Official source 1] + 2. [Official source 2] +- **Key version information to verify**: + - [Product/technology 1]: Current version ____ + - [Product/technology 2]: Current version ____ +``` diff --git a/.cursor/skills/research/references/quality-checklists.md b/.cursor/skills/research/references/quality-checklists.md new file mode 100644 index 0000000..9a4717a --- /dev/null +++ b/.cursor/skills/research/references/quality-checklists.md @@ -0,0 +1,72 @@ +# Quality Checklists — Reference + +## General Quality + +- [ ] All core conclusions have L1/L2 tier factual support +- [ ] No use of vague words like "possibly", "probably" without annotating uncertainty +- [ ] Comparison dimensions are complete with no key differences missed +- [ ] At least one real use case validates conclusions +- [ ] References are complete with accessible links +- [ ] Every citation can be directly verified by the user (source verifiability) +- [ ] Structure hierarchy is clear; executives can quickly locate information + +## Internet Search Depth + +- [ ] Every sub-question was searched with at least 3-5 different query variants +- [ ] At least 3 perspectives from the Perspective Rotation were applied and searched +- [ ] Search saturation reached: last searches stopped producing new substantive information +- [ ] Adjacent fields and analogous problems were searched, not just direct matches +- [ ] Contrarian viewpoints were actively sought ("why not X", "X criticism", "X failure") +- [ ] Practitioner experience was searched (production use, real-world results, lessons learned) +- [ ] Iterative deepening completed: follow-up questions from initial findings were searched +- [ ] No sub-question relies solely on training data without web verification + +## Mode A Specific + +- [ ] Phase 1 completed: AC assessment was presented to and confirmed by user +- [ ] AC assessment consistent: Solution draft respects the (possibly adjusted) acceptance criteria and restrictions +- [ ] Competitor analysis included: Existing solutions were researched +- [ ] All components have comparison tables: Each component lists alternatives with tools, advantages, limitations, security, cost +- [ ] Tools/libraries verified: Suggested tools actually exist and work as described +- [ ] Testing strategy covers AC: Tests map to acceptance criteria +- [ ] Tech stack documented (if Phase 3 ran): `tech_stack.md` has evaluation tables, risk assessment, and learning requirements +- [ ] Security analysis documented (if Phase 4 ran): `security_analysis.md` has threat model and per-component controls + +## Mode B Specific + +- [ ] Findings table complete: All identified weak points documented with solutions +- [ ] Weak point categories covered: Functional, security, and performance assessed +- [ ] New draft is self-contained: Written as if from scratch, no "updated" markers +- [ ] Performance column included: Mode B comparison tables include performance characteristics +- [ ] Previous draft issues addressed: Every finding in the table is resolved in the new draft + +## Timeliness Check (High-Sensitivity Domain BLOCKING) + +When the research topic has Critical or High sensitivity level: + +- [ ] Timeliness sensitivity assessment completed: `00_question_decomposition.md` contains a timeliness assessment section +- [ ] Source timeliness annotated: Every source has publication date, timeliness status, version info +- [ ] No outdated sources used as factual evidence (Critical: within 6 months; High: within 1 year) +- [ ] Version numbers explicitly annotated for all technical products/APIs/SDKs +- [ ] Official sources prioritized: Core conclusions have support from official documentation/blogs +- [ ] Cross-validation completed: Key technical information confirmed from at least 2 independent sources +- [ ] Download page directly verified: Platform support info comes from real-time extraction of official download pages +- [ ] Protocol/feature names searched: Searched for product-supported protocol names (MCP, ACP, etc.) +- [ ] GitHub Issues mined: Reviewed product's GitHub Issues popular discussions +- [ ] Community hotspots identified: Identified and recorded feature points users care most about + +## Target Audience Consistency Check (BLOCKING) + +- [ ] Research boundary clearly defined: `00_question_decomposition.md` has clear population/geography/timeframe/level boundaries +- [ ] Every source has target audience annotated in `01_source_registry.md` +- [ ] Mismatched sources properly handled (excluded, annotated, or marked reference-only) +- [ ] No audience confusion in fact cards: Every fact has target audience consistent with research boundary +- [ ] No audience confusion in the report: Policies/research/data cited have consistent target audiences + +## Source Verifiability + +- [ ] All cited links are publicly accessible (annotate `[login required]` if not) +- [ ] Citations include exact section/page/timestamp for long documents +- [ ] Cited facts have corresponding statements in the original text (no over-interpretation) +- [ ] Source publication/update dates annotated; technical docs include version numbers +- [ ] Unverifiable information annotated `[limited source]` and not sole support for core conclusions diff --git a/.cursor/skills/research/references/source-tiering.md b/.cursor/skills/research/references/source-tiering.md new file mode 100644 index 0000000..ce59c4f --- /dev/null +++ b/.cursor/skills/research/references/source-tiering.md @@ -0,0 +1,121 @@ +# Source Tiering & Authority Anchoring — Reference + +## Source Tiers + +| Tier | Source Type | Purpose | Credibility | +|------|------------|---------|-------------| +| **L1** | Official docs, papers, specs, RFCs | Definitions, mechanisms, verifiable facts | High | +| **L2** | Official blogs, tech talks, white papers | Design intent, architectural thinking | High | +| **L3** | Authoritative media, expert commentary, tutorials | Supplementary intuition, case studies | Medium | +| **L4** | Community discussions, personal blogs, forums | Discover blind spots, validate understanding | Low | + +## L4 Community Source Specifics (mandatory for product comparison research) + +| Source Type | Access Method | Value | +|------------|---------------|-------| +| **GitHub Issues** | Visit `github.com///issues` | Real user pain points, feature requests, bug reports | +| **GitHub Discussions** | Visit `github.com///discussions` | Feature discussions, usage insights, community consensus | +| **Reddit** | Search `site:reddit.com ""` | Authentic user reviews, comparison discussions | +| **Hacker News** | Search `site:news.ycombinator.com ""` | In-depth technical community discussions | +| **Discord/Telegram** | Product's official community channels | Active user feedback (must annotate [limited source]) | + +## Principles + +- Conclusions must be traceable to L1/L2 +- L3/L4 serve only as supplementary and validation +- L4 community discussions are used to discover "what users truly care about" +- Record all information sources +- **Search broadly before searching deeply** — cast a wide net with multiple query variants before diving deep into any single source +- **Cross-domain search** — when direct results are sparse, search adjacent fields, analogous problems, and related industries +- **Never rely on a single search** — each sub-question requires multiple searches from different angles (synonyms, negations, practitioner language, academic language) + +## Timeliness Filtering Rules (execute based on Step 0.5 sensitivity level) + +| Sensitivity Level | Source Filtering Rule | Suggested Search Parameters | +|-------------------|----------------------|-----------------------------| +| Critical | Only accept sources within 6 months as factual evidence | `time_range: "month"` or `start_date` set to last 3 months | +| High | Prefer sources within 1 year; annotate if older than 1 year | `time_range: "year"` | +| Medium | Sources within 2 years used normally; older ones need validity check | Default search | +| Low | No time limit | Default search | + +## High-Sensitivity Domain Search Strategy + +``` +1. Round 1: Targeted official source search + - Use include_domains to restrict to official domains + - Example: include_domains: ["anthropic.com", "openai.com", "docs.xxx.com"] + +2. Round 2: Official download/release page direct verification (BLOCKING) + - Directly visit official download pages; don't rely on search caches + - Use tavily-extract or WebFetch to extract page content + - Verify: platform support, current version number, release date + +3. Round 3: Product-specific protocol/feature search (BLOCKING) + - Search protocol names the product supports (MCP, ACP, LSP, etc.) + - Format: " " site:official_domain + +4. Round 4: Time-limited broad search + - time_range: "month" or start_date set to recent + - Exclude obviously outdated sources + +5. Round 5: Version verification + - Cross-validate version numbers from search results + - If inconsistency found, immediately consult official Changelog + +6. Round 6: Community voice mining (BLOCKING - mandatory for product comparison research) + - Visit the product's GitHub Issues page, review popular/pinned issues + - Search Issues for key feature terms (e.g., "MCP", "plugin", "integration") + - Review discussion trends from the last 3-6 months + - Identify the feature points and differentiating characteristics users care most about +``` + +## Community Voice Mining Detailed Steps + +``` +GitHub Issues Mining Steps: +1. Visit github.com///issues +2. Sort by "Most commented" to view popular discussions +3. Search keywords: + - Feature-related: feature request, enhancement, MCP, plugin, API + - Comparison-related: vs, compared to, alternative, migrate from +4. Review issue labels: enhancement, feature, discussion +5. Record frequently occurring feature demands and user pain points + +Value Translation: +- Frequently discussed features -> likely differentiating highlights +- User complaints/requests -> likely product weaknesses +- Comparison discussions -> directly obtain user-perspective difference analysis +``` + +## Source Registry Entry Template + +For each source consulted, immediately append to `01_source_registry.md`: +```markdown +## Source #[number] +- **Title**: [source title] +- **Link**: [URL] +- **Tier**: L1/L2/L3/L4 +- **Publication Date**: [YYYY-MM-DD] +- **Timeliness Status**: Currently valid / Needs verification / Outdated (reference only) +- **Version Info**: [If involving a specific version, must annotate] +- **Target Audience**: [Explicitly annotate the group/geography/level this source targets] +- **Research Boundary Match**: Full match / Partial overlap / Reference only +- **Summary**: [1-2 sentence key content] +- **Related Sub-question**: [which sub-question this corresponds to] +``` + +## Target Audience Verification (BLOCKING) + +Before including each source, verify that its target audience matches the research boundary: + +| Source Type | Target audience to verify | Verification method | +|------------|---------------------------|---------------------| +| **Policy/Regulation** | Who is it for? (K-12/university/all) | Check document title, scope clauses | +| **Academic Research** | Who are the subjects? (vocational/undergraduate/graduate) | Check methodology/sample description sections | +| **Statistical Data** | Which population is measured? | Check data source description | +| **Case Reports** | What type of institution is involved? | Confirm institution type | + +Handling mismatched sources: +- Target audience completely mismatched -> do not include +- Partially overlapping -> include but annotate applicable scope +- Usable as analogous reference -> include but explicitly annotate "reference only" diff --git a/.cursor/skills/research/references/usage-examples.md b/.cursor/skills/research/references/usage-examples.md new file mode 100644 index 0000000..a401ff8 --- /dev/null +++ b/.cursor/skills/research/references/usage-examples.md @@ -0,0 +1,56 @@ +# Usage Examples — Reference + +## Example 1: Initial Research (Mode A) + +``` +User: Research this problem and find the best solution +``` + +Execution flow: +1. Context resolution: no explicit file -> project mode (INPUT_DIR=`_docs/00_problem/`, OUTPUT_DIR=`_docs/01_solution/`) +2. Guardrails: verify INPUT_DIR exists with required files +3. Mode detection: no `solution_draft*.md` -> Mode A +4. Phase 1: Assess acceptance criteria and restrictions, ask user about unclear parts +5. BLOCKING: present AC assessment, wait for user confirmation +6. Phase 2: Full 8-step research — competitors, components, state-of-the-art solutions +7. Output: `OUTPUT_DIR/solution_draft01.md` +8. (Optional) Phase 3: Tech stack consolidation -> `tech_stack.md` +9. (Optional) Phase 4: Security deep dive -> `security_analysis.md` + +## Example 2: Solution Assessment (Mode B) + +``` +User: Assess the current solution draft +``` + +Execution flow: +1. Context resolution: no explicit file -> project mode +2. Guardrails: verify INPUT_DIR exists +3. Mode detection: `solution_draft03.md` found in OUTPUT_DIR -> Mode B, read it as input +4. Full 8-step research — weak points, security, performance, solutions +5. Output: `OUTPUT_DIR/solution_draft04.md` with findings table + revised draft + +## Example 3: Standalone Research + +``` +User: /research @my_problem.md +``` + +Execution flow: +1. Context resolution: explicit file -> standalone mode (INPUT_FILE=`my_problem.md`, OUTPUT_DIR=`_standalone/my_problem/01_solution/`) +2. Guardrails: verify INPUT_FILE exists and is non-empty, warn about missing restrictions/AC +3. Mode detection + full research flow as in Example 1, scoped to standalone paths +4. Output: `_standalone/my_problem/01_solution/solution_draft01.md` +5. Move `my_problem.md` into `_standalone/my_problem/` + +## Example 4: Force Initial Research (Override) + +``` +User: Research from scratch, ignore existing drafts +``` + +Execution flow: +1. Context resolution: no explicit file -> project mode +2. Mode detection: drafts exist, but user explicitly requested initial research -> Mode A +3. Phase 1 + Phase 2 as in Example 1 +4. Output: `OUTPUT_DIR/solution_draft##.md` (incremented from highest existing) diff --git a/.cursor/skills/research/steps/00_project-integration.md b/.cursor/skills/research/steps/00_project-integration.md new file mode 100644 index 0000000..f94ef4f --- /dev/null +++ b/.cursor/skills/research/steps/00_project-integration.md @@ -0,0 +1,103 @@ +## Project Integration + +### Prerequisite Guardrails (BLOCKING) + +Before any research begins, verify the input context exists. **Do not proceed if guardrails fail.** + +**Project mode:** +1. Check INPUT_DIR exists — **STOP if missing**, ask user to create it and provide problem files +2. Check `problem.md` in INPUT_DIR exists and is non-empty — **STOP if missing** +3. Check `restrictions.md` in INPUT_DIR exists and is non-empty — **STOP if missing** +4. Check `acceptance_criteria.md` in INPUT_DIR exists and is non-empty — **STOP if missing** +5. Check `input_data/` in INPUT_DIR exists and contains at least one file — **STOP if missing** +6. Read **all** files in INPUT_DIR to ground the investigation in the project context +7. Create OUTPUT_DIR and RESEARCH_DIR if they don't exist + +**Standalone mode:** +1. Check INPUT_FILE exists and is non-empty — **STOP if missing** +2. Resolve BASE_DIR: use the caller-specified directory if provided; otherwise default to `_standalone/` +3. Resolve OUTPUT_DIR (`BASE_DIR/01_solution/`) and RESEARCH_DIR (`BASE_DIR/00_research/`) +4. Warn if no `restrictions.md` or `acceptance_criteria.md` were provided alongside INPUT_FILE — proceed if user confirms +5. Create BASE_DIR, OUTPUT_DIR, and RESEARCH_DIR if they don't exist + +### Mode Detection + +After guardrails pass, determine the execution mode: + +1. Scan OUTPUT_DIR for files matching `solution_draft*.md` +2. **No matches found** → **Mode A: Initial Research** +3. **Matches found** → **Mode B: Solution Assessment** (use the highest-numbered draft as input) +4. **User override**: if the user explicitly says "research from scratch" or "initial research", force Mode A regardless of existing drafts + +Inform the user which mode was detected and confirm before proceeding. + +### Solution Draft Numbering + +All final output is saved as `OUTPUT_DIR/solution_draft##.md` with a 2-digit zero-padded number: + +1. Scan existing files in OUTPUT_DIR matching `solution_draft*.md` +2. Extract the highest existing number +3. Increment by 1 +4. Zero-pad to 2 digits (e.g., `01`, `02`, ..., `10`, `11`) + +Example: if `solution_draft01.md` through `solution_draft10.md` exist, the next output is `solution_draft11.md`. + +### Working Directory & Intermediate Artifact Management + +#### Directory Structure + +At the start of research, **must** create a working directory under RESEARCH_DIR: + +``` +RESEARCH_DIR/ +├── 00_ac_assessment.md # Mode A Phase 1 output: AC & restrictions assessment +├── 00_question_decomposition.md # Step 0-1 output +├── 01_source_registry.md # Step 2 output: all consulted source links +├── 02_fact_cards.md # Step 3 output: extracted facts +├── 03_comparison_framework.md # Step 4 output: selected framework and populated data +├── 04_reasoning_chain.md # Step 6 output: fact → conclusion reasoning +├── 05_validation_log.md # Step 7 output: use-case validation results +└── raw/ # Raw source archive (optional) + ├── source_1.md + └── source_2.md +``` + +### Save Timing & Content + +| Step | Save immediately after completion | Filename | +|------|-----------------------------------|----------| +| Mode A Phase 1 | AC & restrictions assessment tables | `00_ac_assessment.md` | +| Step 0-1 | Question type classification + sub-question list | `00_question_decomposition.md` | +| Step 2 | Each consulted source link, tier, summary | `01_source_registry.md` | +| Step 3 | Each fact card (statement + source + confidence) | `02_fact_cards.md` | +| Step 4 | Selected comparison framework + initial population | `03_comparison_framework.md` | +| Step 6 | Reasoning process for each dimension | `04_reasoning_chain.md` | +| Step 7 | Validation scenarios + results + review checklist | `05_validation_log.md` | +| Step 8 | Complete solution draft | `OUTPUT_DIR/solution_draft##.md` | + +### Save Principles + +1. **Save immediately**: Write to the corresponding file as soon as a step is completed; don't wait until the end +2. **Incremental updates**: Same file can be updated multiple times; append or replace new content +3. **Preserve process**: Keep intermediate files even after their content is integrated into the final report +4. **Enable recovery**: If research is interrupted, progress can be recovered from intermediate files + +### Output Files + +**Required files** (automatically generated through the process): + +| File | Content | When Generated | +|------|---------|----------------| +| `00_ac_assessment.md` | AC & restrictions assessment (Mode A only) | After Phase 1 completion | +| `00_question_decomposition.md` | Question type, sub-question list | After Step 0-1 completion | +| `01_source_registry.md` | All source links and summaries | Continuously updated during Step 2 | +| `02_fact_cards.md` | Extracted facts and sources | Continuously updated during Step 3 | +| `03_comparison_framework.md` | Selected framework and populated data | After Step 4 completion | +| `04_reasoning_chain.md` | Fact → conclusion reasoning | After Step 6 completion | +| `05_validation_log.md` | Use-case validation and review | After Step 7 completion | +| `OUTPUT_DIR/solution_draft##.md` | Complete solution draft | After Step 8 completion | +| `OUTPUT_DIR/tech_stack.md` | Tech stack evaluation and decisions | After Phase 3 (optional) | +| `OUTPUT_DIR/security_analysis.md` | Threat model and security controls | After Phase 4 (optional) | + +**Optional files**: +- `raw/*.md` - Raw source archives (saved when content is lengthy) diff --git a/.cursor/skills/research/steps/01_mode-a-initial-research.md b/.cursor/skills/research/steps/01_mode-a-initial-research.md new file mode 100644 index 0000000..88404cd --- /dev/null +++ b/.cursor/skills/research/steps/01_mode-a-initial-research.md @@ -0,0 +1,127 @@ +## Mode A: Initial Research + +Triggered when no `solution_draft*.md` files exist in OUTPUT_DIR, or when the user explicitly requests initial research. + +### Phase 1: AC & Restrictions Assessment (BLOCKING) + +**Role**: Professional software architect + +A focused preliminary research pass **before** the main solution research. The goal is to validate that the acceptance criteria and restrictions are realistic before designing a solution around them. + +**Input**: All files from INPUT_DIR (or INPUT_FILE in standalone mode) + +**Task**: +1. Read all problem context files thoroughly +2. **ASK the user about every unclear aspect** — do not assume: + - Unclear problem boundaries → ask + - Ambiguous acceptance criteria values → ask + - Missing context (no `security_approach.md`, no `input_data/`) → ask what they have + - Conflicting restrictions → ask which takes priority +3. Research in internet **extensively** — use multiple search queries per question, rephrase, and search from different angles: + - How realistic are the acceptance criteria for this specific domain? Search for industry benchmarks, standards, and typical values + - How critical is each criterion? Search for case studies where criteria were relaxed or tightened + - What domain-specific acceptance criteria are we missing? Search for industry standards, regulatory requirements, and best practices in the specific domain + - Impact of each criterion value on the whole system quality — search for research papers and engineering reports + - Cost/budget implications of each criterion — search for pricing, total cost of ownership analyses, and comparable project budgets + - Timeline implications — search for project timelines, development velocity reports, and comparable implementations + - What do practitioners in this domain consider the most important criteria? Search forums, conference talks, and experience reports +4. Research restrictions from multiple perspectives: + - Are the restrictions realistic? Search for comparable projects that operated under similar constraints + - Should any be tightened or relaxed? Search for what constraints similar projects actually ended up with + - Are there additional restrictions we should add? Search for regulatory, compliance, and safety requirements in this domain + - What restrictions do practitioners wish they had defined earlier? Search for post-mortem reports and lessons learned +5. Verify findings with authoritative sources (official docs, papers, benchmarks) — each key finding must have at least 2 independent sources + +**Uses Steps 0-3 of the 8-step engine** (question classification, decomposition, source tiering, fact extraction) scoped to AC and restrictions assessment. + +**Save action**: Write `RESEARCH_DIR/00_ac_assessment.md` with format: + +```markdown +# Acceptance Criteria Assessment + +## Acceptance Criteria + +| Criterion | Our Values | Researched Values | Cost/Timeline Impact | Status | +|-----------|-----------|-------------------|---------------------|--------| +| [name] | [current] | [researched range] | [impact] | Added / Modified / Removed | + +## Restrictions Assessment + +| Restriction | Our Values | Researched Values | Cost/Timeline Impact | Status | +|-------------|-----------|-------------------|---------------------|--------| +| [name] | [current] | [researched range] | [impact] | Added / Modified / Removed | + +## Key Findings +[Summary of critical findings] + +## Sources +[Key references used] +``` + +**BLOCKING**: Present the AC assessment tables to the user. Wait for confirmation or adjustments before proceeding to Phase 2. The user may update `acceptance_criteria.md` or `restrictions.md` based on findings. + +--- + +### Phase 2: Problem Research & Solution Draft + +**Role**: Professional researcher and software architect + +Full 8-step research methodology. Produces the first solution draft. + +**Input**: All files from INPUT_DIR (possibly updated after Phase 1) + Phase 1 artifacts + +**Task** (drives the 8-step engine): +1. Research existing/competitor solutions for similar problems — search broadly across industries and adjacent domains, not just the obvious competitors +2. Research the problem thoroughly — all possible ways to solve it, split into components; search for how different fields approach analogous problems +3. For each component, research all possible solutions and find the most efficient state-of-the-art approaches — use multiple query variants and perspectives from Step 1 +4. For each promising approach, search for real-world deployment experience: success stories, failure reports, lessons learned, and practitioner opinions +5. Search for contrarian viewpoints — who argues against the common approaches and why? What failure modes exist? +6. Verify that suggested tools/libraries actually exist and work as described — check official repos, latest releases, and community health (stars, recent commits, open issues) +7. Include security considerations in each component analysis +8. Provide rough cost estimates for proposed solutions + +Be concise in formulating. The fewer words, the better, but do not miss any important details. + +**Save action**: Write `OUTPUT_DIR/solution_draft##.md` using template: `templates/solution_draft_mode_a.md` + +--- + +### Phase 3: Tech Stack Consolidation (OPTIONAL) + +**Role**: Software architect evaluating technology choices + +Focused synthesis step — no new 8-step cycle. Uses research already gathered in Phase 2 to make concrete technology decisions. + +**Input**: Latest `solution_draft##.md` from OUTPUT_DIR + all files from INPUT_DIR + +**Task**: +1. Extract technology options from the solution draft's component comparison tables +2. Score each option against: fitness for purpose, maturity, security track record, team expertise, cost, scalability +3. Produce a tech stack summary with selection rationale +4. Assess risks and learning requirements per technology choice + +**Save action**: Write `OUTPUT_DIR/tech_stack.md` with: +- Requirements analysis (functional, non-functional, constraints) +- Technology evaluation tables (language, framework, database, infrastructure, key libraries) with scores +- Tech stack summary block +- Risk assessment and learning requirements tables + +--- + +### Phase 4: Security Deep Dive (OPTIONAL) + +**Role**: Security architect + +Focused analysis step — deepens the security column from the solution draft into a proper threat model and controls specification. + +**Input**: Latest `solution_draft##.md` from OUTPUT_DIR + `security_approach.md` from INPUT_DIR + problem context + +**Task**: +1. Build threat model: asset inventory, threat actors, attack vectors +2. Define security requirements and proposed controls per component (with risk level) +3. Summarize authentication/authorization, data protection, secure communication, and logging/monitoring approach + +**Save action**: Write `OUTPUT_DIR/security_analysis.md` with: +- Threat model (assets, actors, vectors) +- Per-component security requirements and controls table +- Security controls summary diff --git a/.cursor/skills/research/steps/02_mode-b-solution-assessment.md b/.cursor/skills/research/steps/02_mode-b-solution-assessment.md new file mode 100644 index 0000000..d14d031 --- /dev/null +++ b/.cursor/skills/research/steps/02_mode-b-solution-assessment.md @@ -0,0 +1,27 @@ +## Mode B: Solution Assessment + +Triggered when `solution_draft*.md` files exist in OUTPUT_DIR. + +**Role**: Professional software architect + +Full 8-step research methodology applied to assessing and improving an existing solution draft. + +**Input**: All files from INPUT_DIR + the latest (highest-numbered) `solution_draft##.md` from OUTPUT_DIR + +**Task** (drives the 8-step engine): +1. Read the existing solution draft thoroughly +2. Research in internet extensively — for each component/decision in the draft, search for: + - Known problems and limitations of the chosen approach + - What practitioners say about using it in production + - Better alternatives that may have emerged recently + - Common failure modes and edge cases + - How competitors/similar projects solve the same problem differently +3. Search specifically for contrarian views: "why not [chosen approach]", "[chosen approach] criticism", "[chosen approach] failure" +4. Identify security weak points and vulnerabilities — search for CVEs, security advisories, and known attack vectors for each technology in the draft +5. Identify performance bottlenecks — search for benchmarks, load test results, and scalability reports +6. For each identified weak point, search for multiple solution approaches and compare them +7. Based on findings, form a new solution draft in the same format + +**Save action**: Write `OUTPUT_DIR/solution_draft##.md` (incremented) using template: `templates/solution_draft_mode_b.md` + +**Optional follow-up**: After Mode B completes, the user can request Phase 3 (Tech Stack Consolidation) or Phase 4 (Security Deep Dive) using the revised draft. These phases work identically to their Mode A descriptions in `steps/01_mode-a-initial-research.md`. diff --git a/.cursor/skills/research/steps/03_engine-investigation.md b/.cursor/skills/research/steps/03_engine-investigation.md new file mode 100644 index 0000000..733905d --- /dev/null +++ b/.cursor/skills/research/steps/03_engine-investigation.md @@ -0,0 +1,227 @@ +## Research Engine — Investigation Phase (Steps 0–3.5) + +### Step 0: Question Type Classification + +First, classify the research question type and select the corresponding strategy: + +| Question Type | Core Task | Focus Dimensions | +|---------------|-----------|------------------| +| **Concept Comparison** | Build comparison framework | Mechanism differences, applicability boundaries | +| **Decision Support** | Weigh trade-offs | Cost, risk, benefit | +| **Trend Analysis** | Map evolution trajectory | History, driving factors, predictions | +| **Problem Diagnosis** | Root cause analysis | Symptoms, causes, evidence chain | +| **Knowledge Organization** | Systematic structuring | Definitions, classifications, relationships | + +**Mode-specific classification**: + +| Mode / Phase | Typical Question Type | +|--------------|----------------------| +| Mode A Phase 1 | Knowledge Organization + Decision Support | +| Mode A Phase 2 | Decision Support | +| Mode B | Problem Diagnosis + Decision Support | + +### Step 0.5: Novelty Sensitivity Assessment (BLOCKING) + +Before starting research, assess the novelty sensitivity of the question (Critical/High/Medium/Low). This determines source time windows and filtering strategy. + +**For full classification table, critical-domain rules, trigger words, and assessment template**: Read `references/novelty-sensitivity.md` + +Key principle: Critical-sensitivity topics (AI/LLMs, blockchain) require sources within 6 months, mandatory version annotations, cross-validation from 2+ sources, and direct verification of official download pages. + +**Save action**: Append timeliness assessment to the end of `00_question_decomposition.md` + +--- + +### Step 1: Question Decomposition & Boundary Definition + +**Mode-specific sub-questions**: + +**Mode A Phase 2** (Initial Research — Problem & Solution): +- "What existing/competitor solutions address this problem?" +- "What are the component parts of this problem?" +- "For each component, what are the state-of-the-art solutions?" +- "What are the security considerations per component?" +- "What are the cost implications of each approach?" + +**Mode B** (Solution Assessment): +- "What are the weak points and potential problems in the existing draft?" +- "What are the security vulnerabilities in the proposed architecture?" +- "Where are the performance bottlenecks?" +- "What solutions exist for each identified issue?" + +**General sub-question patterns** (use when applicable): +- **Sub-question A**: "What is X and how does it work?" (Definition & mechanism) +- **Sub-question B**: "What are the dimensions of relationship/difference between X and Y?" (Comparative analysis) +- **Sub-question C**: "In what scenarios is X applicable/inapplicable?" (Boundary conditions) +- **Sub-question D**: "What are X's development trends/best practices?" (Extended analysis) + +#### Perspective Rotation (MANDATORY) + +For each research problem, examine it from **at least 3 different perspectives**. Each perspective generates its own sub-questions and search queries. + +| Perspective | What it asks | Example queries | +|-------------|-------------|-----------------| +| **End-user / Consumer** | What problems do real users encounter? What do they wish were different? | "X problems", "X frustrations reddit", "X user complaints" | +| **Implementer / Engineer** | What are the technical challenges, gotchas, hidden complexities? | "X implementation challenges", "X pitfalls", "X lessons learned" | +| **Business / Decision-maker** | What are the costs, ROI, strategic implications? | "X total cost of ownership", "X ROI case study", "X vs Y business comparison" | +| **Contrarian / Devil's advocate** | What could go wrong? Why might this fail? What are critics saying? | "X criticism", "why not X", "X failures", "X disadvantages real world" | +| **Domain expert / Academic** | What does peer-reviewed research say? What are theoretical limits? | "X research paper", "X systematic review", "X benchmarks academic" | +| **Practitioner / Field** | What do people who actually use this daily say? What works in practice vs theory? | "X in production", "X experience report", "X after 1 year" | + +Select at least 3 perspectives relevant to the problem. Document the chosen perspectives in `00_question_decomposition.md`. + +#### Question Explosion (MANDATORY) + +For **each sub-question**, generate **at least 3-5 search query variants** before searching. This ensures broad coverage and avoids missing relevant information due to terminology differences. + +**Query variant strategies**: +- **Specificity ladder**: broad ("indoor navigation systems") → narrow ("UWB-based indoor drone navigation accuracy") +- **Negation/failure**: "X limitations", "X failure modes", "when X doesn't work" +- **Comparison framing**: "X vs Y for Z", "X alternative for Z", "X or Y which is better for Z" +- **Practitioner voice**: "X in production experience", "X real-world results", "X lessons learned" +- **Temporal**: "X 2025", "X latest developments", "X roadmap" +- **Geographic/domain**: "X in Europe", "X for defense applications", "X in agriculture" + +Record all planned queries in `00_question_decomposition.md` alongside each sub-question. + +**Research Subject Boundary Definition (BLOCKING - must be explicit)**: + +When decomposing questions, you must explicitly define the **boundaries of the research subject**: + +| Dimension | Boundary to define | Example | +|-----------|--------------------|---------| +| **Population** | Which group is being studied? | University students vs K-12 vs vocational students vs all students | +| **Geography** | Which region is being studied? | Chinese universities vs US universities vs global | +| **Timeframe** | Which period is being studied? | Post-2020 vs full historical picture | +| **Level** | Which level is being studied? | Undergraduate vs graduate vs vocational | + +**Common mistake**: User asks about "university classroom issues" but sources include policies targeting "K-12 students" — mismatched target populations will invalidate the entire research. + +**Save action**: +1. Read all files from INPUT_DIR to ground the research in the project context +2. Create working directory `RESEARCH_DIR/` +3. Write `00_question_decomposition.md`, including: + - Original question + - Active mode (A Phase 2 or B) and rationale + - Summary of relevant problem context from INPUT_DIR + - Classified question type and rationale + - **Research subject boundary definition** (population, geography, timeframe, level) + - List of decomposed sub-questions + - **Chosen perspectives** (at least 3 from the Perspective Rotation table) with rationale + - **Search query variants** for each sub-question (at least 3-5 per sub-question) +4. Write TodoWrite to track progress + +--- + +### Step 2: Source Tiering & Exhaustive Web Investigation + +Tier sources by authority, **prioritize primary sources** (L1 > L2 > L3 > L4). Conclusions must be traceable to L1/L2; L3/L4 serve as supplementary and validation. + +**For full tier definitions, search strategies, community mining steps, and source registry templates**: Read `references/source-tiering.md` + +**Tool Usage**: +- Use `WebSearch` for broad searches; `WebFetch` to read specific pages +- Use the `context7` MCP server (`resolve-library-id` then `get-library-docs`) for up-to-date library/framework documentation +- Always cross-verify training data claims against live sources for facts that may have changed (versions, APIs, deprecations, security advisories) +- When citing web sources, include the URL and date accessed + +#### Exhaustive Search Requirements (MANDATORY) + +Do not stop at the first few results. The goal is to build a comprehensive evidence base. + +**Minimum search effort per sub-question**: +- Execute **all** query variants generated in Step 1's Question Explosion (at least 3-5 per sub-question) +- Consult at least **2 different source tiers** per sub-question (e.g., L1 official docs + L4 community discussion) +- If initial searches yield fewer than 3 relevant sources for a sub-question, **broaden the search** with alternative terms, related domains, or analogous problems + +**Search broadening strategies** (use when results are thin): +- Try adjacent fields: if researching "drone indoor navigation", also search "robot indoor navigation", "warehouse AGV navigation" +- Try different communities: academic papers, industry whitepapers, military/defense publications, hobbyist forums +- Try different geographies: search in English + search for European/Asian approaches if relevant +- Try historical evolution: "history of X", "evolution of X approaches", "X state of the art 2024 2025" +- Try failure analysis: "X project failure", "X post-mortem", "X recall", "X incident report" + +**Search saturation rule**: Continue searching until new queries stop producing substantially new information. If the last 3 searches only repeat previously found facts, the sub-question is saturated. + +**Save action**: +For each source consulted, **immediately** append to `01_source_registry.md` using the entry template from `references/source-tiering.md`. + +--- + +### Step 3: Fact Extraction & Evidence Cards + +Transform sources into **verifiable fact cards**: + +```markdown +## Fact Cards + +### Fact 1 +- **Statement**: [specific fact description] +- **Source**: [link/document section] +- **Confidence**: High/Medium/Low + +### Fact 2 +... +``` + +**Key discipline**: +- Pin down facts first, then reason +- Distinguish "what officials said" from "what I infer" +- When conflicting information is found, annotate and preserve both sides +- Annotate confidence level: + - ✅ High: Explicitly stated in official documentation + - ⚠️ Medium: Mentioned in official blog but not formally documented + - ❓ Low: Inference or from unofficial sources + +**Save action**: +For each extracted fact, **immediately** append to `02_fact_cards.md`: +```markdown +## Fact #[number] +- **Statement**: [specific fact description] +- **Source**: [Source #number] [link] +- **Phase**: [Phase 1 / Phase 2 / Assessment] +- **Target Audience**: [which group this fact applies to, inherited from source or further refined] +- **Confidence**: ✅/⚠️/❓ +- **Related Dimension**: [corresponding comparison dimension] +``` + +**Target audience in fact statements**: +- If a fact comes from a "partially overlapping" or "reference only" source, the statement **must explicitly annotate the applicable scope** +- Wrong: "The Ministry of Education banned phones in classrooms" (doesn't specify who) +- Correct: "The Ministry of Education banned K-12 students from bringing phones into classrooms (does not apply to university students)" + +--- + +### Step 3.5: Iterative Deepening — Follow-Up Investigation + +After initial fact extraction, review what you have found and identify **knowledge gaps and new questions** that emerged from the initial research. This step ensures the research doesn't stop at surface-level findings. + +**Process**: + +1. **Gap analysis**: Review fact cards and identify: + - Sub-questions with fewer than 3 high-confidence facts → need more searching + - Contradictions between sources → need tie-breaking evidence + - Perspectives (from Step 1) that have no or weak coverage → need targeted search + - Claims that rely only on L3/L4 sources → need L1/L2 verification + +2. **Follow-up question generation**: Based on initial findings, generate new questions: + - "Source X claims [fact] — is this consistent with other evidence?" + - "If [approach A] has [limitation], how do practitioners work around it?" + - "What are the second-order effects of [finding]?" + - "Who disagrees with [common finding] and why?" + - "What happened when [solution] was deployed at scale?" + +3. **Targeted deep-dive searches**: Execute follow-up searches focusing on: + - Specific claims that need verification + - Alternative viewpoints not yet represented + - Real-world case studies and experience reports + - Failure cases and edge conditions + - Recent developments that may change the picture + +4. **Update artifacts**: Append new sources to `01_source_registry.md`, new facts to `02_fact_cards.md` + +**Exit criteria**: Proceed to Step 4 when: +- Every sub-question has at least 3 facts with at least one from L1/L2 +- At least 3 perspectives from Step 1 have supporting evidence +- No unresolved contradictions remain (or they are explicitly documented as open questions) +- Follow-up searches are no longer producing new substantive information diff --git a/.cursor/skills/research/steps/04_engine-analysis.md b/.cursor/skills/research/steps/04_engine-analysis.md new file mode 100644 index 0000000..b06f7cd --- /dev/null +++ b/.cursor/skills/research/steps/04_engine-analysis.md @@ -0,0 +1,146 @@ +## Research Engine — Analysis Phase (Steps 4–8) + +### Step 4: Build Comparison/Analysis Framework + +Based on the question type, select fixed analysis dimensions. **For dimension lists** (General, Concept Comparison, Decision Support): Read `references/comparison-frameworks.md` + +**Save action**: +Write to `03_comparison_framework.md`: +```markdown +# Comparison Framework + +## Selected Framework Type +[Concept Comparison / Decision Support / ...] + +## Selected Dimensions +1. [Dimension 1] +2. [Dimension 2] +... + +## Initial Population +| Dimension | X | Y | Factual Basis | +|-----------|---|---|---------------| +| [Dimension 1] | [description] | [description] | Fact #1, #3 | +| ... | | | | +``` + +--- + +### Step 5: Reference Point Baseline Alignment + +Ensure all compared parties have clear, consistent definitions: + +**Checklist**: +- [ ] Is the reference point's definition stable/widely accepted? +- [ ] Does it need verification, or can domain common knowledge be used? +- [ ] Does the reader's understanding of the reference point match mine? +- [ ] Are there ambiguities that need to be clarified first? + +--- + +### Step 6: Fact-to-Conclusion Reasoning Chain + +Explicitly write out the "fact → comparison → conclusion" reasoning process: + +```markdown +## Reasoning Process + +### Regarding [Dimension Name] + +1. **Fact confirmation**: According to [source], X's mechanism is... +2. **Compare with reference**: While Y's mechanism is... +3. **Conclusion**: Therefore, the difference between X and Y on this dimension is... +``` + +**Key discipline**: +- Conclusions come from mechanism comparison, not "gut feelings" +- Every conclusion must be traceable to specific facts +- Uncertain conclusions must be annotated + +**Save action**: +Write to `04_reasoning_chain.md`: +```markdown +# Reasoning Chain + +## Dimension 1: [Dimension Name] + +### Fact Confirmation +According to [Fact #X], X's mechanism is... + +### Reference Comparison +While Y's mechanism is... (Source: [Fact #Y]) + +### Conclusion +Therefore, the difference between X and Y on this dimension is... + +### Confidence +✅/⚠️/❓ + rationale + +--- +## Dimension 2: [Dimension Name] +... +``` + +--- + +### Step 7: Use-Case Validation (Sanity Check) + +Validate conclusions against a typical scenario: + +**Validation questions**: +- Based on my conclusions, how should this scenario be handled? +- Is that actually the case? +- Are there counterexamples that need to be addressed? + +**Review checklist**: +- [ ] Are draft conclusions consistent with Step 3 fact cards? +- [ ] Are there any important dimensions missed? +- [ ] Is there any over-extrapolation? +- [ ] Are conclusions actionable/verifiable? + +**Save action**: +Write to `05_validation_log.md`: +```markdown +# Validation Log + +## Validation Scenario +[Scenario description] + +## Expected Based on Conclusions +If using X: [expected behavior] +If using Y: [expected behavior] + +## Actual Validation Results +[actual situation] + +## Counterexamples +[yes/no, describe if yes] + +## Review Checklist +- [x] Draft conclusions consistent with fact cards +- [x] No important dimensions missed +- [x] No over-extrapolation +- [ ] Issue found: [if any] + +## Conclusions Requiring Revision +[if any] +``` + +--- + +### Step 8: Deliverable Formatting + +Make the output **readable, traceable, and actionable**. + +**Save action**: +Integrate all intermediate artifacts. Write to `OUTPUT_DIR/solution_draft##.md` using the appropriate output template based on active mode: +- Mode A: `templates/solution_draft_mode_a.md` +- Mode B: `templates/solution_draft_mode_b.md` + +Sources to integrate: +- Extract background from `00_question_decomposition.md` +- Reference key facts from `02_fact_cards.md` +- Organize conclusions from `04_reasoning_chain.md` +- Generate references from `01_source_registry.md` +- Supplement with use cases from `05_validation_log.md` +- For Mode A: include AC assessment from `00_ac_assessment.md` diff --git a/.cursor/skills/retrospective/SKILL.md b/.cursor/skills/retrospective/SKILL.md new file mode 100644 index 0000000..3b5191a --- /dev/null +++ b/.cursor/skills/retrospective/SKILL.md @@ -0,0 +1,174 @@ +--- +name: retrospective +description: | + Collect metrics from implementation batch reports and code review findings, analyze trends across cycles, + and produce improvement reports with actionable recommendations. + 3-step workflow: collect metrics, analyze trends, produce report. + Outputs to _docs/06_metrics/. + Trigger phrases: + - "retrospective", "retro", "run retro" + - "metrics review", "feedback loop" + - "implementation metrics", "analyze trends" +category: evolve +tags: [retrospective, metrics, trends, improvement, feedback-loop] +disable-model-invocation: true +--- + +# Retrospective + +Collect metrics from implementation artifacts, analyze trends across development cycles, and produce actionable improvement reports. + +## Core Principles + +- **Data-driven**: conclusions come from metrics, not impressions +- **Actionable**: every finding must have a concrete improvement suggestion +- **Cumulative**: each retrospective compares against previous ones to track progress +- **Save immediately**: write artifacts to disk after each step +- **Non-judgmental**: focus on process improvement, not blame + +## Context Resolution + +Fixed paths: + +- IMPL_DIR: `_docs/03_implementation/` +- METRICS_DIR: `_docs/06_metrics/` +- TASKS_DIR: `_docs/02_tasks/` + +Announce the resolved paths to the user before proceeding. + +## Prerequisite Checks (BLOCKING) + +1. `IMPL_DIR` exists and contains at least one `batch_*_report.md` — **STOP if missing** (nothing to analyze) +2. Create METRICS_DIR if it does not exist +3. Check for previous retrospective reports in METRICS_DIR to enable trend comparison + +## Artifact Management + +### Directory Structure + +``` +METRICS_DIR/ +├── retro_[YYYY-MM-DD].md +├── retro_[YYYY-MM-DD].md +└── ... +``` + +## Progress Tracking + +At the start of execution, create a TodoWrite with all steps (1 through 3). Update status as each step completes. + +## Workflow + +### Step 1: Collect Metrics + +**Role**: Data analyst +**Goal**: Parse all implementation artifacts and extract quantitative metrics +**Constraints**: Collection only — no interpretation yet + +#### Sources + +| Source | Metrics Extracted | +|--------|------------------| +| `batch_*_report.md` | Tasks per batch, batch count, task statuses (Done/Blocked/Partial) | +| Code review sections in batch reports | PASS/FAIL/PASS_WITH_WARNINGS ratios, finding counts by severity and category | +| Task spec files in TASKS_DIR | Complexity points per task, dependency count | +| `FINAL_implementation_report.md` | Total tasks, total batches, overall duration | +| Git log (if available) | Commits per batch, files changed per batch | + +#### Metrics to Compute + +**Implementation Metrics**: +- Total tasks implemented +- Total batches executed +- Average tasks per batch +- Average complexity points per batch +- Total complexity points delivered + +**Quality Metrics**: +- Code review pass rate (PASS / total reviews) +- Code review findings by severity: Critical, High, Medium, Low counts +- Code review findings by category: Bug, Spec-Gap, Security, Performance, Maintainability, Style, Scope +- FAIL count (batches that required user intervention) + +**Efficiency Metrics**: +- Blocked task count and reasons +- Tasks completed on first attempt vs requiring fixes +- Batch with most findings (identify problem areas) + +**Self-verification**: +- [ ] All batch reports parsed +- [ ] All metric categories computed +- [ ] No batch reports missed + +--- + +### Step 2: Analyze Trends + +**Role**: Process improvement analyst +**Goal**: Identify patterns, recurring issues, and improvement opportunities +**Constraints**: Analysis must be grounded in the metrics from Step 1 + +1. If previous retrospective reports exist in METRICS_DIR, load the most recent one for comparison +2. Identify patterns: + - **Recurring findings**: which code review categories appear most frequently? + - **Problem components**: which components/files generate the most findings? + - **Complexity accuracy**: do high-complexity tasks actually produce more issues? + - **Blocker patterns**: what types of blockers occur and can they be prevented? +3. Compare against previous retrospective (if exists): + - Which metrics improved? + - Which metrics degraded? + - Were previous improvement actions effective? +4. Identify top 3 improvement actions ranked by impact + +**Self-verification**: +- [ ] Patterns are grounded in specific metrics +- [ ] Comparison with previous retro included (if exists) +- [ ] Top 3 actions are concrete and actionable + +--- + +### Step 3: Produce Report + +**Role**: Technical writer +**Goal**: Write a structured retrospective report with metrics, trends, and recommendations +**Constraints**: Concise, data-driven, actionable + +Write `METRICS_DIR/retro_[YYYY-MM-DD].md` using `templates/retrospective-report.md` as structure. + +**Self-verification**: +- [ ] All metrics from Step 1 included +- [ ] Trend analysis from Step 2 included +- [ ] Top 3 improvement actions clearly stated +- [ ] Suggested rule/skill updates are specific + +**Save action**: Write `retro_[YYYY-MM-DD].md` + +Present the report summary to the user. + +--- + +## Escalation Rules + +| Situation | Action | +|-----------|--------| +| No batch reports exist | **STOP** — nothing to analyze | +| Batch reports have inconsistent format | **WARN user**, extract what is available | +| No previous retrospective for comparison | PROCEED — report baseline metrics only | +| Metrics suggest systemic issue (>50% FAIL rate) | **WARN user** — suggest immediate process review | + +## Methodology Quick Reference + +``` +┌────────────────────────────────────────────────────────────────┐ +│ Retrospective (3-Step Method) │ +├────────────────────────────────────────────────────────────────┤ +│ PREREQ: batch reports exist in _docs/03_implementation/ │ +│ │ +│ 1. Collect Metrics → parse batch reports, compute metrics │ +│ 2. Analyze Trends → patterns, comparison, improvement areas │ +│ 3. Produce Report → _docs/06_metrics/retro_[date].md │ +├────────────────────────────────────────────────────────────────┤ +│ Principles: Data-driven · Actionable · Cumulative │ +│ Non-judgmental · Save immediately │ +└────────────────────────────────────────────────────────────────┘ +``` diff --git a/.cursor/skills/retrospective/templates/retrospective-report.md b/.cursor/skills/retrospective/templates/retrospective-report.md new file mode 100644 index 0000000..629c730 --- /dev/null +++ b/.cursor/skills/retrospective/templates/retrospective-report.md @@ -0,0 +1,93 @@ +# Retrospective Report Template + +Save as `_docs/05_metrics/retro_[YYYY-MM-DD].md`. + +--- + +```markdown +# Retrospective — [YYYY-MM-DD] + +## Implementation Summary + +| Metric | Value | +|--------|-------| +| Total tasks | [count] | +| Total batches | [count] | +| Total complexity points | [sum] | +| Avg tasks per batch | [value] | +| Avg complexity per batch | [value] | + +## Quality Metrics + +### Code Review Results + +| Verdict | Count | Percentage | +|---------|-------|-----------| +| PASS | [count] | [%] | +| PASS_WITH_WARNINGS | [count] | [%] | +| FAIL | [count] | [%] | + +### Findings by Severity + +| Severity | Count | +|----------|-------| +| Critical | [count] | +| High | [count] | +| Medium | [count] | +| Low | [count] | + +### Findings by Category + +| Category | Count | Top Files | +|----------|-------|-----------| +| Bug | [count] | [most affected files] | +| Spec-Gap | [count] | [most affected files] | +| Security | [count] | [most affected files] | +| Performance | [count] | [most affected files] | +| Maintainability | [count] | [most affected files] | +| Style | [count] | [most affected files] | + +## Efficiency + +| Metric | Value | +|--------|-------| +| Blocked tasks | [count] | +| Tasks requiring fixes after review | [count] | +| Batch with most findings | Batch [N] — [reason] | + +### Blocker Analysis + +| Blocker Type | Count | Prevention | +|-------------|-------|-----------| +| [type] | [count] | [suggested prevention] | + +## Trend Comparison + +| Metric | Previous | Current | Change | +|--------|----------|---------|--------| +| Pass rate | [%] | [%] | [+/-] | +| Avg findings per batch | [value] | [value] | [+/-] | +| Blocked tasks | [count] | [count] | [+/-] | + +*Previous retrospective: [date or "N/A — first retro"]* + +## Top 3 Improvement Actions + +1. **[Action title]**: [specific, actionable description] + - Impact: [expected improvement] + - Effort: [low/medium/high] + +2. **[Action title]**: [specific, actionable description] + - Impact: [expected improvement] + - Effort: [low/medium/high] + +3. **[Action title]**: [specific, actionable description] + - Impact: [expected improvement] + - Effort: [low/medium/high] + +## Suggested Rule/Skill Updates + +| File | Change | Rationale | +|------|--------|-----------| +| [.cursor/rules/... or .cursor/skills/...] | [specific change] | [based on which metric] | +``` diff --git a/.cursor/skills/security/SKILL.md b/.cursor/skills/security/SKILL.md index ceab368..1e35084 100644 --- a/.cursor/skills/security/SKILL.md +++ b/.cursor/skills/security/SKILL.md @@ -1,311 +1,347 @@ --- -name: security-testing -description: "Test for security vulnerabilities using OWASP principles. Use when conducting security audits, testing auth, or implementing security practices." -category: specialized-testing -priority: critical -tokenEstimate: 1200 -agents: [qe-security-scanner, qe-api-contract-validator, qe-quality-analyzer] -implementation_status: optimized -optimization_version: 1.0 -last_optimized: 2025-12-02 -dependencies: [] -quick_reference_card: true -tags: [security, owasp, sast, dast, vulnerabilities, auth, injection] -trust_tier: 3 -validation: - schema_path: schemas/output.json - validator_path: scripts/validate-config.json - eval_path: evals/security-testing.yaml +name: security +description: | + OWASP-based security audit skill. Analyzes codebase for vulnerabilities across dependency scanning, + static analysis, OWASP Top 10 review, and secrets detection. Produces a structured security report + with severity-ranked findings and remediation guidance. + Can be invoked standalone or as part of the autopilot flow (optional step before deploy). + Trigger phrases: + - "security audit", "security scan", "OWASP review" + - "vulnerability scan", "security check" + - "check for vulnerabilities", "pentest" +category: review +tags: [security, owasp, sast, vulnerabilities, auth, injection, secrets] +disable-model-invocation: true --- -# Security Testing +# Security Audit - -When testing security or conducting audits: -1. TEST OWASP Top 10 vulnerabilities systematically -2. VALIDATE authentication and authorization on every endpoint -3. SCAN dependencies for known vulnerabilities (npm audit) -4. CHECK for injection attacks (SQL, XSS, command) -5. VERIFY secrets aren't exposed in code/logs +Analyze the codebase for security vulnerabilities using OWASP principles. Produces a structured report with severity-ranked findings, remediation suggestions, and a security checklist verdict. -**Quick Security Checks:** -- Access control → Test horizontal/vertical privilege escalation -- Crypto → Verify password hashing, HTTPS, no sensitive data exposed -- Injection → Test SQL injection, XSS, command injection -- Auth → Test weak passwords, session fixation, MFA enforcement -- Config → Check error messages don't leak info +## Core Principles -**Critical Success Factors:** -- Think like an attacker, build like a defender -- Security is built in, not added at the end -- Test continuously in CI/CD, not just before release - +- **OWASP-driven**: use the current OWASP Top 10 as the primary framework — verify the latest version at https://owasp.org/www-project-top-ten/ at audit start +- **Evidence-based**: every finding must reference a specific file, line, or configuration +- **Severity-ranked**: findings sorted Critical > High > Medium > Low +- **Actionable**: every finding includes a concrete remediation suggestion +- **Save immediately**: write artifacts to disk after each phase; never accumulate unsaved work +- **Complement, don't duplicate**: the `/code-review` skill does a lightweight security quick-scan; this skill goes deeper -## Quick Reference Card +## Context Resolution -### When to Use -- Security audits and penetration testing -- Testing authentication/authorization -- Validating input sanitization -- Reviewing security configuration +**Project mode** (default): +- PROBLEM_DIR: `_docs/00_problem/` +- SOLUTION_DIR: `_docs/01_solution/` +- DOCUMENT_DIR: `_docs/02_document/` +- SECURITY_DIR: `_docs/05_security/` -### OWASP Top 10 (2021) -| # | Vulnerability | Key Test | -|---|---------------|----------| -| 1 | Broken Access Control | User A accessing User B's data | -| 2 | Cryptographic Failures | Plaintext passwords, HTTP | -| 3 | Injection | SQL/XSS/command injection | -| 4 | Insecure Design | Rate limiting, session timeout | -| 5 | Security Misconfiguration | Verbose errors, exposed /admin | -| 6 | Vulnerable Components | npm audit, outdated packages | -| 7 | Auth Failures | Weak passwords, no MFA | -| 8 | Integrity Failures | Unsigned updates, malware | -| 9 | Logging Failures | No audit trail for breaches | -| 10 | SSRF | Server fetching internal URLs | +**Standalone mode** (explicit target provided, e.g. `/security @src/api/`): +- TARGET: the provided path +- SECURITY_DIR: `_standalone/security/` -### Tools -| Type | Tool | Purpose | -|------|------|---------| -| SAST | SonarQube, Semgrep | Static code analysis | -| DAST | OWASP ZAP, Burp | Dynamic scanning | -| Deps | npm audit, Snyk | Dependency vulnerabilities | -| Secrets | git-secrets, TruffleHog | Secret scanning | +Announce the detected mode and resolved paths to the user before proceeding. -### Agent Coordination -- `qe-security-scanner`: Multi-layer SAST/DAST scanning -- `qe-api-contract-validator`: API security testing -- `qe-quality-analyzer`: Security code review +## Prerequisite Checks + +1. Codebase must contain source code files — **STOP if empty** +2. Create SECURITY_DIR if it does not exist +3. If SECURITY_DIR already contains artifacts, ask user: **resume, overwrite, or skip?** +4. If `_docs/00_problem/security_approach.md` exists, read it for project-specific security requirements + +## Progress Tracking + +At the start of execution, create a TodoWrite with all phases (1 through 5). Update status as each phase completes. + +## Workflow + +### Phase 1: Dependency Scan + +**Role**: Security analyst +**Goal**: Identify known vulnerabilities in project dependencies +**Constraints**: Scan only — no code changes + +1. Detect the project's package manager(s): `requirements.txt`, `package.json`, `Cargo.toml`, `*.csproj`, `go.mod` +2. Run the appropriate audit tool: + - Python: `pip audit` or `safety check` + - Node: `npm audit` + - Rust: `cargo audit` + - .NET: `dotnet list package --vulnerable` + - Go: `govulncheck` +3. If no audit tool is available, manually inspect dependency files for known CVEs using WebSearch +4. Record findings with CVE IDs, affected packages, severity, and recommended upgrade versions + +**Self-verification**: +- [ ] All package manifests scanned +- [ ] Each finding has a CVE ID or advisory reference +- [ ] Upgrade paths identified for Critical/High findings + +**Save action**: Write `SECURITY_DIR/dependency_scan.md` --- -## Key Vulnerability Tests +### Phase 2: Static Analysis (SAST) -### 1. Broken Access Control -```javascript -// Horizontal escalation - User A accessing User B's data -test('user cannot access another user\'s order', async () => { - const userAToken = await login('userA'); - const userBOrder = await createOrder('userB'); +**Role**: Security engineer +**Goal**: Identify code-level vulnerabilities through static analysis +**Constraints**: Analysis only — no code changes - const response = await api.get(`/orders/${userBOrder.id}`, { - headers: { Authorization: `Bearer ${userAToken}` } - }); - expect(response.status).toBe(403); -}); +Scan the codebase for these vulnerability patterns: -// Vertical escalation - Regular user accessing admin -test('regular user cannot access admin', async () => { - const userToken = await login('regularUser'); - expect((await api.get('/admin/users', { - headers: { Authorization: `Bearer ${userToken}` } - })).status).toBe(403); -}); -``` +**Injection**: +- SQL injection via string interpolation or concatenation +- Command injection (subprocess with shell=True, exec, eval, os.system) +- XSS via unsanitized user input in HTML output +- Template injection -### 2. Injection Attacks -```javascript -// SQL Injection -test('prevents SQL injection', async () => { - const malicious = "' OR '1'='1"; - const response = await api.get(`/products?search=${malicious}`); - expect(response.body.length).toBeLessThan(100); // Not all products -}); +**Authentication & Authorization**: +- Hardcoded credentials, API keys, passwords, tokens +- Missing authentication checks on endpoints +- Missing authorization checks (horizontal/vertical escalation paths) +- Weak password validation rules -// XSS -test('sanitizes HTML output', async () => { - const xss = ''; - await api.post('/comments', { text: xss }); +**Cryptographic Failures**: +- Plaintext password storage (no hashing) +- Weak hashing algorithms (MD5, SHA1 for passwords) +- Hardcoded encryption keys or salts +- Missing TLS/HTTPS enforcement - const html = (await api.get('/comments')).body; - expect(html).toContain('<script>'); - expect(html).not.toContain('` for Tailwind +- `