فهرست منبع

Merge pull request #10973 from growilabs/master

Release v7.5.1
mergify[bot] 1 روز پیش
والد
کامیت
ea35513dcc
100فایلهای تغییر یافته به همراه9084 افزوده شده و 1413 حذف شده
  1. 239 0
      .claude/commands/invest-issue.md
  2. 0 307
      .claude/commands/kiro/spec-cleanup.md
  3. 0 179
      .claude/commands/kiro/spec-design.md
  4. 0 110
      .claude/commands/kiro/spec-impl.md
  5. 0 98
      .claude/commands/kiro/spec-requirements.md
  6. 0 87
      .claude/commands/kiro/spec-status.md
  7. 0 138
      .claude/commands/kiro/spec-tasks.md
  8. 0 92
      .claude/commands/kiro/validate-design.md
  9. 0 88
      .claude/commands/kiro/validate-gap.md
  10. 0 138
      .claude/commands/kiro/validate-impl.md
  11. 94 0
      .claude/rules/coding-style.md
  12. 152 0
      .claude/skills/kiro-debug/SKILL.md
  13. 262 0
      .claude/skills/kiro-discovery/SKILL.md
  14. 246 0
      .claude/skills/kiro-impl/SKILL.md
  15. 54 0
      .claude/skills/kiro-impl/templates/debugger-prompt.md
  16. 93 0
      .claude/skills/kiro-impl/templates/implementer-prompt.md
  17. 111 0
      .claude/skills/kiro-impl/templates/reviewer-prompt.md
  18. 171 0
      .claude/skills/kiro-review/SKILL.md
  19. 171 0
      .claude/skills/kiro-spec-batch/SKILL.md
  20. 258 0
      .claude/skills/kiro-spec-cleanup/SKILL.md
  21. 202 0
      .claude/skills/kiro-spec-design/SKILL.md
  22. 93 0
      .claude/skills/kiro-spec-design/rules/design-discovery-full.md
  23. 49 0
      .claude/skills/kiro-spec-design/rules/design-discovery-light.md
  24. 198 0
      .claude/skills/kiro-spec-design/rules/design-principles.md
  25. 50 0
      .claude/skills/kiro-spec-design/rules/design-review-gate.md
  26. 29 0
      .claude/skills/kiro-spec-design/rules/design-synthesis.md
  27. 12 26
      .claude/skills/kiro-spec-init/SKILL.md
  28. 255 0
      .claude/skills/kiro-spec-quick/SKILL.md
  29. 135 0
      .claude/skills/kiro-spec-requirements/SKILL.md
  30. 49 0
      .claude/skills/kiro-spec-requirements/rules/ears-format.md
  31. 51 0
      .claude/skills/kiro-spec-requirements/rules/requirements-review-gate.md
  32. 70 0
      .claude/skills/kiro-spec-status/SKILL.md
  33. 189 0
      .claude/skills/kiro-spec-tasks/SKILL.md
  34. 222 0
      .claude/skills/kiro-spec-tasks/rules/tasks-generation.md
  35. 41 0
      .claude/skills/kiro-spec-tasks/rules/tasks-parallel-analysis.md
  36. 37 21
      .claude/skills/kiro-steering-custom/SKILL.md
  37. 90 0
      .claude/skills/kiro-steering-custom/rules/steering-principles.md
  38. 44 27
      .claude/skills/kiro-steering/SKILL.md
  39. 90 0
      .claude/skills/kiro-steering/rules/steering-principles.md
  40. 102 0
      .claude/skills/kiro-validate-design/SKILL.md
  41. 110 0
      .claude/skills/kiro-validate-design/rules/design-review.md
  42. 107 0
      .claude/skills/kiro-validate-gap/SKILL.md
  43. 144 0
      .claude/skills/kiro-validate-gap/rules/gap-analysis.md
  44. 204 0
      .claude/skills/kiro-validate-impl/SKILL.md
  45. 131 0
      .claude/skills/kiro-verify-completion/SKILL.md
  46. 3 3
      .devcontainer/app/postCreateCommand.sh
  47. 3 3
      .github/workflows/reusable-app-prod.yml
  48. 56 1
      .kiro/settings/templates/specs/design.md
  49. 1 1
      .kiro/settings/templates/specs/requirements-init.md
  50. 6 0
      .kiro/settings/templates/specs/requirements.md
  51. 3 0
      .kiro/settings/templates/specs/tasks.md
  52. 815 0
      .kiro/specs/collaborative-editor-awareness/design.md
  53. 107 0
      .kiro/specs/collaborative-editor-awareness/requirements.md
  54. 218 0
      .kiro/specs/collaborative-editor-awareness/research.md
  55. 23 0
      .kiro/specs/collaborative-editor-awareness/spec.json
  56. 144 0
      .kiro/specs/collaborative-editor-awareness/tasks.md
  57. 3 1
      .kiro/specs/collaborative-editor/design.md
  58. 4 2
      .kiro/specs/collaborative-editor/requirements.md
  59. 638 0
      .kiro/specs/editor-keymaps/design.md
  60. 189 0
      .kiro/specs/editor-keymaps/requirements.md
  61. 118 0
      .kiro/specs/editor-keymaps/research.md
  62. 22 0
      .kiro/specs/editor-keymaps/spec.json
  63. 147 0
      .kiro/specs/editor-keymaps/tasks.md
  64. 544 0
      .kiro/specs/growi-logger/design.md
  65. 79 0
      .kiro/specs/growi-logger/requirements.md
  66. 224 0
      .kiro/specs/growi-logger/research.md
  67. 23 0
      .kiro/specs/growi-logger/spec.json
  68. 18 0
      .kiro/specs/growi-logger/tasks.md
  69. 598 0
      .kiro/specs/news-inappnotification/design.md
  70. 1 1
      .kiro/specs/news-inappnotification/requirements.md
  71. 142 0
      .kiro/specs/news-inappnotification/research.md
  72. 5 5
      .kiro/specs/news-inappnotification/spec.json
  73. 150 0
      .kiro/specs/news-inappnotification/tasks.md
  74. 0 14
      .kiro/specs/suggest-path/design.md
  75. 1 1
      .kiro/specs/suggest-path/requirements.md
  76. 1 1
      .kiro/specs/suggest-path/spec.json
  77. 0 2
      .kiro/specs/suggest-path/tasks.md
  78. 8 4
      .kiro/steering/product.md
  79. 5 1
      .kiro/steering/tech.md
  80. 32 18
      CLAUDE.md
  81. 2 3
      README.md
  82. 2 3
      README_JP.md
  83. 1 4
      apps/app/.claude/skills/build-optimization/SKILL.md
  84. 2 0
      apps/app/.gitignore
  85. 14 3
      apps/app/bin/postbuild-server.ts
  86. 5 1
      apps/app/config/logger/config.dev.ts
  87. 5 1
      apps/app/config/logger/config.prod.ts
  88. 2 0
      apps/app/docker/Dockerfile.dockerignore
  89. 0 3
      apps/app/next.config.ts
  90. 3 9
      apps/app/package.json
  91. 128 0
      apps/app/playwright/23-editor/emacs-keymap.spec.ts
  92. 2 1
      apps/app/public/static/locales/en_US/translation.json
  93. 2 1
      apps/app/public/static/locales/fr_FR/translation.json
  94. 2 1
      apps/app/public/static/locales/ja_JP/translation.json
  95. 2 1
      apps/app/public/static/locales/ko_KR/translation.json
  96. 2 1
      apps/app/public/static/locales/zh_CN/translation.json
  97. 9 2
      apps/app/resource/Contributor.js
  98. 4 3
      apps/app/src/client/components/Admin/App/AppSettingsPageContents.tsx
  99. 6 7
      apps/app/src/client/components/PageCreateModal.tsx
  100. 5 0
      apps/app/src/client/components/PageEditor/EditorNavbar/EditingUserList.module.scss

+ 239 - 0
.claude/commands/invest-issue.md

@@ -0,0 +1,239 @@
+---
+name: invest-issue
+description: Investigate a GitHub issue - fetch info, update labels, analyze code/reproduce, report findings, and optionally fix. Usage: /invest-issue <issue-url-or-number>
+---
+
+# /invest-issue — Issue Investigation
+
+Investigate a GROWI GitHub issue end-to-end: fetch details, label it, analyze or reproduce the problem, report findings, and proceed to fix if approved.
+
+## Input
+
+`$ARGUMENTS` is either:
+- A full GitHub issue URL: `https://github.com/growilabs/growi/issues/99999`
+- An issue number: `99999`
+
+Parse the issue number from whichever form is provided.
+
+## Step 1: Fetch Issue Information
+
+Run the following to get full issue details:
+
+```bash
+gh issue view {ISSUE_NUMBER} --repo growilabs/growi --json number,title,body,labels,comments,createdAt,author,url
+```
+
+Extract and display:
+- Title and URL
+- Description (body)
+- Current labels
+- Reported GROWI version (look for version info in the body/comments)
+- Steps to reproduce (if any)
+- Expected vs actual behavior
+
+## Step 2: Update Labels — Mark as Under Investigation
+
+Remove `phase/new` (if present) and add `phase/under-investigation`:
+
+```bash
+# Remove phase/new
+gh issue edit {ISSUE_NUMBER} --repo growilabs/growi --remove-label "phase/new"
+
+# Add phase/under-investigation
+gh issue edit {ISSUE_NUMBER} --repo growilabs/growi --add-label "phase/under-investigation"
+```
+
+If `phase/new` is not present, skip the removal step and only add `phase/under-investigation`.
+
+## Step 3: Analyze the Issue
+
+### 3-A: Version Check
+
+1. Determine the reported GROWI version from the issue body or comments.
+2. Get the current master major version:
+   ```bash
+   cat apps/app/package.json | grep '"version"'
+   ```
+3. If the reported major version matches master's major version → proceed with master-branch analysis.
+4. If the reported major version is **older** than master's major version → **STOP analysis** and ask the user:
+
+   > Reported version is v{X}.x, but master is v{Y}.x.
+   > Would you like me to:
+   > 1. **Check out v{X}.x tag/branch** and analyze on that version
+   > 2. **Continue on master** — the issue may still be relevant
+   > 3. **Close as outdated** — skip analysis
+
+   **Wait for the user's response before continuing to Step 3-B.**
+
+### 3-B: Code Investigation
+
+Search the codebase for relevant code related to the reported symptoms:
+
+- Read error messages, stack traces, or behavioral descriptions carefully.
+- Use Grep and Glob to locate relevant files, functions, and modules.
+- Trace the data/execution flow to find the root cause.
+- Check recent commits for related changes:
+  ```bash
+  git log --oneline -20 -- {relevant-file}
+  ```
+
+### 3-C: Reproduction Attempt (if needed)
+
+If code analysis alone is insufficient to confirm the root cause, attempt reproduction:
+
+1. Start the development server:
+   ```bash
+   turbo run dev
+   ```
+2. Follow the reproduction steps from the issue.
+3. Check browser console and server logs for errors.
+
+### 3-D: Label Update on Confirmation
+
+If the problem is **confirmed** (root cause found in code OR reproduction succeeded):
+
+```bash
+gh issue edit {ISSUE_NUMBER} --repo growilabs/growi --remove-label "phase/under-investigation"
+gh issue edit {ISSUE_NUMBER} --repo growilabs/growi --add-label "phase/confirmed"
+```
+
+## Step 4: Report Findings
+
+> **CRITICAL**: Do NOT modify any source files in this step. Step 4 is analysis and planning only.
+> Implementing code changes before receiving explicit user approval is strictly forbidden.
+
+### 4-A: Report in This Session
+
+Present a clear summary:
+
+```
+## Investigation Results for #{ISSUE_NUMBER}: {TITLE}
+
+**Status**: Confirmed / Unconfirmed / Needs reproduction
+
+### Root Cause
+{Describe what was found — file paths, line numbers, logic errors, etc.}
+
+### Evidence
+{Code snippets, git log entries, or reproduction steps that confirm the finding}
+
+### Fix Plan (not yet implemented)
+{High-level description of the fix approach, if a cause was found.
+List specific files and changes needed, but do NOT apply them yet.}
+```
+
+### 4-B: Post Comment on Issue
+
+Detect the language of the issue body (from Step 1) and write the comment **in the same language**.
+For example, if the issue is written in Japanese, write the comment in Japanese.
+
+Post the findings as a GitHub issue comment:
+
+```bash
+gh issue comment {ISSUE_NUMBER} --repo growilabs/growi --body "$(cat <<'EOF'
+## Investigation Results
+
+**Status**: [Confirmed / Under investigation]
+
+### Root Cause
+{root cause description}
+
+### Evidence
+{relevant code locations, snippets, or reproduction steps}
+
+### Fix Plan
+{fix approach — files and changes needed}
+
+---
+*Investigated by Claude Code*
+EOF
+)"
+```
+
+### 4-C: STOP — Ask for Direction
+
+**STOP HERE. Do not proceed to Step 5 until the user explicitly approves.**
+
+After reporting, ask the user:
+
+> Investigation complete. Root cause [found / not yet confirmed].
+> Would you like me to:
+> 1. **Proceed with the fix** — I'll implement the fix now
+> 2. **Investigate further** — specify what additional analysis is needed
+> 3. **Stop here** — you'll handle the fix manually
+
+**Wait for the user's response before doing anything else.**
+
+## Step 5: Implement the Fix (Only if Approved)
+
+Proceed only after explicit user approval.
+
+### 5-A: Add WIP Label
+
+```bash
+gh issue edit {ISSUE_NUMBER} --repo growilabs/growi --add-label "phase/WIP"
+```
+
+### 5-B: Create a Fix Branch
+
+**Always create a dedicated fix branch before touching any source files.**
+Never commit fixes to `master` or the current branch directly.
+
+Branch naming convention: `fix/{ISSUE_NUMBER}-{short-description}`
+
+```bash
+git checkout -b fix/{ISSUE_NUMBER}-{short-description}
+```
+
+Example: `fix/12345-page-title-overflow`
+
+### 5-C: Implement the Fix
+
+- Make the minimal targeted fix
+- Run lint and tests:
+  ```bash
+  turbo run lint --filter @growi/app
+  turbo run test --filter @growi/app
+  ```
+- Commit with a meaningful message referencing the issue:
+  ```
+  fix(scope): brief description of fix
+
+  Fixes #ISSUE_NUMBER
+  ```
+
+### 5-D: Open a Pull Request
+
+```bash
+gh pr create \
+  --repo growilabs/growi \
+  --title "fix: {brief description}" \
+  --body "$(cat <<'EOF'
+## Summary
+
+{description of the fix}
+
+## Root Cause
+
+{root cause identified during investigation}
+
+## Changes
+
+- {bullet list of changes}
+
+## Test Plan
+
+- [ ] {manual test step 1}
+- [ ] {manual test step 2}
+
+Closes #{ISSUE_NUMBER}
+EOF
+)"
+```
+
+## Error Handling
+
+- If the issue number is invalid or not found: display error from `gh` and stop
+- If `gh` is not authenticated: instruct the user to run `gh auth login`
+- If a label does not exist in the repo: note it in output and skip (don't create new labels)
+- If the dev server fails to start: note this and rely on code analysis only

+ 0 - 307
.claude/commands/kiro/spec-cleanup.md

@@ -1,307 +0,0 @@
----
-description: Organize and clean up specification documents after implementation completion
-allowed-tools: Bash, Glob, Grep, Read, Write, Edit, MultiEdit, Update
-argument-hint: <feature-name>
----
-
-# Specification Cleanup
-
-<background_information>
-- **Mission**: Organize specification documents after implementation completion, removing implementation details while preserving essential context for future refactoring
-- **Success Criteria**:
-  - Implementation details (testing procedures, deployment checklists) removed
-  - Design decisions and constraints preserved in research.md and design.md
-  - Requirements simplified (Acceptance Criteria condensed to summaries)
-  - Unimplemented features removed or documented
-  - Documents remain valuable for future refactoring work
-</background_information>
-
-<instructions>
-## Core Task
-Clean up and organize specification documents for feature **$1** after implementation is complete.
-
-## Organizing Principle
-
-**"Can we read essential context from these spec documents when refactoring this feature months later?"**
-
-- **Keep**: "Why" (design decisions, architectural constraints, limitations, trade-offs)
-- **Remove**: "How" (testing procedures, deployment steps, detailed implementation examples)
-
-## Execution Steps
-
-### Step 1: Load Context
-
-**Discover all spec files**:
-- Use Glob to find all files in `.kiro/specs/$1/` directory
-- Categorize files:
-  - **Core files** (must preserve): `spec.json`, `requirements.md`, `design.md`, `tasks.md`, `research.md`
-  - **Other files** (evaluate case-by-case): validation reports, notes, prototypes, migration guides, etc.
-
-**Read all discovered files**:
-- Read all core files first
-- Read other files to understand their content and value
-
-**Determine target language**:
-- Read `spec.json` and extract the `language` field (e.g., `"ja"`, `"en"`)
-- This is the language ALL spec document content must be written in
-- Note: code comments within code blocks are exempt (must stay in English per project rules)
-
-**Verify implementation status**:
-- Check that tasks are marked complete `[x]` in tasks.md
-- If implementation incomplete, warn user and ask to confirm cleanup
-
-### Step 2: Analyze Current State
-
-**Identify cleanup opportunities**:
-
-1. **Other files** (non-core files like validation-report.md, notes.md, etc.):
-   - Read each file to understand its content and purpose
-   - Identify valuable information that should be preserved:
-     * Implementation discoveries and lessons learned
-     * Critical constraints or design decisions
-     * Historical context for future refactoring
-   - Determine salvage strategy:
-     * Migrate valuable content to research.md or design.md
-     * Keep file if it contains essential reference information
-     * Delete if content is redundant or no longer relevant
-   - **Case-by-case evaluation required** - never assume files should be deleted
-
-2. **research.md**:
-   - Should contain production discoveries and implementation lessons learned
-   - Check if implementation revealed new constraints or patterns to document
-   - Identify content from other files that should be migrated here
-
-3. **requirements.md**:
-   - Identify verbose Acceptance Criteria that can be condensed to summaries
-   - Find unimplemented requirements (compare with tasks.md)
-   - Detect duplicate or redundant content
-
-4. **design.md**:
-   - Identify implementation-specific sections that can be removed:
-     * Detailed Testing Strategy (test procedures)
-     * Security Considerations (if covered in implementation)
-     * Error Handling code examples (if implemented)
-     * Migration Strategy (after migration complete)
-     * Deployment Checklist (after deployment)
-   - Identify sections to preserve:
-     * Architecture diagrams (essential for understanding)
-     * Component interfaces (API contracts)
-     * Design decisions and rationale
-     * Critical implementation constraints
-     * Known limitations
-   - Check if content from other files should be migrated here
-
-5. **Language audit** (compare actual language vs. `spec.json.language`):
-   - For each markdown file, scan prose content (headings, paragraphs, list items) and detect the written language
-   - Flag any file or section whose language does **not** match the target language
-   - Exemptions — do NOT flag:
-     * Content inside fenced code blocks (` ``` `) — code comments must stay in English
-     * Inline code spans (`` `...` ``)
-     * Proper nouns, technical terms, and identifiers that are always written in English
-   - Collect flagged items into a **translation plan**: file name, approximate line range, detected language, and a brief excerpt
-
-### Step 3: Interactive Confirmation
-
-**Present cleanup plan to user**:
-
-For each file and section identified in Step 2, ask:
-- "Should I delete/simplify/keep/salvage this section?"
-- Provide recommendations based on organizing principle
-- Show brief preview of content to aid decision
-
-**Example questions for other files**:
-- "validation-report.md found. Contains {brief summary}. Options:"
-  - "A: Migrate valuable content to research.md, then delete"
-  - "B: Keep as historical reference"
-  - "C: Delete (content no longer needed)"
-- "notes.md found. Contains {brief summary}. Salvage to research.md before deleting? [Y/n]"
-
-**Example questions for core files**:
-- "research.md: Add 'Session N: Production Discoveries' section to document implementation lessons? [Y/n]"
-- "requirements.md: Simplify Acceptance Criteria from detailed bullet points to summary paragraphs? [Y/n]"
-- "requirements.md: Remove unimplemented requirements (e.g., Req 4.4 field masking not implemented)? [Y/n]"
-- "design.md: Delete 'Testing Strategy' section (lines X-Y)? [Y/n]"
-- "design.md: Delete 'Security Considerations' section (lines X-Y)? [Y/n]"
-- "design.md: Keep Architecture diagrams (essential for refactoring)? [Y/n]"
-
-**Translation confirmation** (if language mismatches were found in Step 2):
-- Show summary: "Found content in language(s) other than `{target_language}` in the following files:"
-  - List each flagged file with line range and a short excerpt
-- Ask: "Translate mismatched content to `{target_language}`? [Y/n]"
-  - If Y: translate all flagged sections in Step 4
-  - If n: skip translation (leave files as-is)
-- Note: code blocks are never translated
-
-**Batch similar decisions**:
-- Group related sections (e.g., all "delete implementation details" decisions)
-- Allow user to approve categories rather than individual items
-- Present file-by-file salvage decisions for other files
-
-### Step 4: Execute Cleanup
-
-**For each approved action**:
-
-1. **Salvage and cleanup other files** (if approved):
-   - For each non-core file (validation-report.md, notes.md, etc.):
-     * Extract valuable information (implementation lessons, constraints, decisions)
-     * Migrate content to appropriate core file:
-       - Technical discoveries → research.md
-       - Design constraints → design.md
-       - Requirement clarifications → requirements.md
-     * Delete file after salvage (if approved)
-   - Document salvaged content with source reference (e.g., "From validation-report.md:")
-
-2. **Update research.md** (if new discoveries or salvaged content):
-   - Add new section "Session N: Production Implementation Discoveries" (if needed)
-   - Document critical technical constraints discovered during implementation
-   - Include code examples for critical patterns (e.g., falsy checks, credential preservation)
-   - Integrate salvaged content from other files
-   - Cross-reference requirements.md and design.md where relevant
-
-3. **Simplify requirements.md** (if approved):
-   - Transform detailed Acceptance Criteria into summary paragraphs
-   - Remove unimplemented requirements entirely
-   - Preserve requirement objectives and summaries
-   - Example transformation:
-     ```
-     Before: "1. System shall X... 2. System shall Y... [7 criteria]"
-     After: "**Summary**: System provides X and Y. Configuration includes..."
-     ```
-
-4. **Clean up design.md** (if approved):
-   - Delete approved sections (Testing Strategy, Security Considerations, etc.)
-   - Add "Critical Implementation Constraints" section if implementation revealed new constraints
-   - Integrate salvaged content from other files (if relevant)
-   - Preserve architecture diagrams and component interfaces
-   - Keep design decisions and rationale sections
-
-5. **Translate language-mismatched content** (if approved):
-   - For each flagged file and section, translate prose content to the target language
-   - **Never translate**: content inside fenced code blocks or inline code spans
-   - Preserve all Markdown formatting (headings, bold, lists, links, etc.)
-   - After translation, verify the overall document reads naturally in the target language
-   - Document translated files in the cleanup summary
-
-6. **Update spec.json metadata**:
-   - Set `phase: "implementation-complete"` (if not already set)
-   - Add `cleanup_completed: true` flag
-   - Update `updated_at` timestamp
-
-### Step 5: Generate Cleanup Summary
-
-**Provide summary report**:
-- List of files modified/deleted
-- Sections removed and lines saved
-- Critical information preserved
-- Recommendations for future refactoring
-
-**Format**:
-```markdown
-## Cleanup Summary for {feature-name}
-
-### Files Modified
-- ✅ validation-report.md: Salvaged to research.md, then deleted (730 lines removed)
-- ✅ notes.md: Salvaged to design.md, then deleted (120 lines removed)
-- ✅ research.md: Added Session 2 discoveries + salvaged content (180 lines added)
-- ✅ requirements.md: Simplified 6 requirements (350 lines → 180 lines)
-- ✅ design.md: Removed 4 sections, added constraints + salvaged content (250 lines removed, 100 added)
-- ✅ requirements.md: Translated mismatched sections to {target_language}
-
-### Information Salvaged
-- Implementation discoveries from validation-report.md → research.md
-- Design notes from notes.md → design.md
-- Historical context preserved with source attribution
-
-### Information Preserved
-- Architecture diagrams and component interfaces
-- Design decisions and rationale
-- Critical implementation constraints
-- Known limitations and trade-offs
-
-### Next Steps
-- Spec documents ready for future refactoring reference
-- Consider creating knowledge base entry if pattern is reusable
-```
-
-## Critical Constraints
-
-- **User approval required**: Never delete content without explicit confirmation
-- **Language consistency**: All prose content must be written in the language specified in `spec.json.language`; translate any mismatched sections (code blocks exempt)
-- **Preserve history**: Don't delete discovery rationale or design decisions
-- **Balance brevity with completeness**: Remove redundancy but keep essential context
-- **Interactive workflow**: Pause for user input rather than making assumptions
-
-## Tool Guidance
-
-- **Glob**: Discover all files in `.kiro/specs/{feature}/` directory
-- **Read**: Load all discovered files for analysis
-- **Grep**: Search for patterns (e.g., unimplemented requirements, completed tasks)
-- **Edit/Write**: Update files based on approved changes, salvage content
-- **Bash**: Delete files after salvage (if approved)
-- **MultiEdit**: For batch edits across multiple sections
-
-## Output Description
-
-Provide cleanup plan and execution report in the language specified in spec.json.
-
-**Report Structure**:
-1. **Current State Analysis**: What needs cleanup and why
-2. **Cleanup Plan**: Proposed changes with recommendations
-3. **Confirmation Prompts**: Interactive questions for user approval
-4. **Execution Summary**: What was changed and why
-5. **Preserved Context**: What critical information remains for future refactoring
-
-**Format**: Clear, scannable format with sections and bullet points
-
-## Safety & Fallback
-
-### Error Scenarios
-
-**Implementation Incomplete**:
-- **Condition**: Less than 90% of tasks marked `[x]` in tasks.md
-- **Action**: Warn user: "Implementation appears incomplete (X/Y tasks done). Continue cleanup? [y/N]"
-- **Recommendation**: Wait until implementation complete before cleanup
-
-**Spec Not Found**:
-- **Message**: "No spec found for `$1`. Check available specs in `.kiro/specs/`"
-- **Action**: List available spec directories
-
-**Missing Critical Files**:
-- **Condition**: requirements.md or design.md missing
-- **Action**: Skip cleanup for missing files, proceed with available files
-- **Warning**: "requirements.md missing - cannot simplify requirements"
-
-### Dry Run Mode (Future Enhancement)
-
-**If `-n` or `--dry-run` flag provided**:
-- Show cleanup plan without executing changes
-- Allow user to review before committing to cleanup
-
-### Backup Recommendation
-
-**Before cleanup**:
-- Recommend user create git commit or backup
-- Warning: "This will modify spec files. Commit current state first? [Y/n]"
-
-### Undo Support
-
-**If cleanup goes wrong**:
-- Use git to restore previous state: `git checkout HEAD -- .kiro/specs/{feature}/`
-- Remind user to commit before cleanup for easy rollback
-
-## Example Usage
-
-```bash
-# Basic cleanup after implementation
-/kiro:spec-cleanup oauth2-email-support
-
-# With conversation context about implementation discoveries
-# Command will prompt for Session N discoveries to document
-/kiro:spec-cleanup user-authentication
-```
-
-## Related Commands
-
-- `/kiro:spec-impl {feature}` - Implement tasks (run before cleanup)
-- `/kiro:validate-impl {feature}` - Validate implementation (run before cleanup)
-- `/kiro:spec-status {feature}` - Check implementation status

+ 0 - 179
.claude/commands/kiro/spec-design.md

@@ -1,179 +0,0 @@
----
-description: Create comprehensive technical design for a specification
-allowed-tools: Bash, Glob, Grep, LS, Read, Write, Edit, MultiEdit, Update, WebSearch, WebFetch
-argument-hint: <feature-name> [-y]
----
-
-# Technical Design Generator
-
-<background_information>
-- **Mission**: Generate comprehensive technical design document that translates requirements (WHAT) into architectural design (HOW)
-- **Success Criteria**:
-  - All requirements mapped to technical components with clear interfaces
-  - Appropriate architecture discovery and research completed
-  - Design aligns with steering context and existing patterns
-  - Visual diagrams included for complex architectures
-</background_information>
-
-<instructions>
-## Core Task
-Generate technical design document for feature **$1** based on approved requirements.
-
-## Execution Steps
-
-### Step 1: Load Context
-
-**Read all necessary context**:
-- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md` (if exists)
-- **Entire `.kiro/steering/` directory** for complete project memory
-- `.kiro/settings/templates/specs/design.md` for document structure
-- `.kiro/settings/rules/design-principles.md` for design principles
-- `.kiro/settings/templates/specs/research.md` for discovery log structure
-
-**Validate requirements approval**:
-- If `-y` flag provided ($2 == "-y"): Auto-approve requirements in spec.json
-- Otherwise: Verify approval status (stop if unapproved, see Safety & Fallback)
-
-### Step 2: Discovery & Analysis
-
-**Critical: This phase ensures design is based on complete, accurate information.**
-
-1. **Classify Feature Type**:
-   - **New Feature** (greenfield) → Full discovery required
-   - **Extension** (existing system) → Integration-focused discovery
-   - **Simple Addition** (CRUD/UI) → Minimal or no discovery
-   - **Complex Integration** → Comprehensive analysis required
-
-2. **Execute Appropriate Discovery Process**:
-   
-   **For Complex/New Features**:
-   - Read and execute `.kiro/settings/rules/design-discovery-full.md`
-   - Conduct thorough research using WebSearch/WebFetch:
-     - Latest architectural patterns and best practices
-     - External dependency verification (APIs, libraries, versions, compatibility)
-     - Official documentation, migration guides, known issues
-     - Performance benchmarks and security considerations
-   
-   **For Extensions**:
-   - Read and execute `.kiro/settings/rules/design-discovery-light.md`
-   - Focus on integration points, existing patterns, compatibility
-   - Use Grep to analyze existing codebase patterns
-   
-   **For Simple Additions**:
-   - Skip formal discovery, quick pattern check only
-
-3. **Retain Discovery Findings for Step 3**:
-- External API contracts and constraints
-- Technology decisions with rationale
-- Existing patterns to follow or extend
-- Integration points and dependencies
-- Identified risks and mitigation strategies
-- Potential architecture patterns and boundary options (note details in `research.md`)
-- Parallelization considerations for future tasks (capture dependencies in `research.md`)
-
-4. **Persist Findings to Research Log**:
-- Create or update `.kiro/specs/$1/research.md` using the shared template
-- Summarize discovery scope and key findings (Summary section)
-- Record investigations in Research Log topics with sources and implications
-- Document architecture pattern evaluation, design decisions, and risks using the template sections
-- Use the language specified in spec.json when writing or updating `research.md`
-
-### Step 3: Generate Design Document
-
-1. **Load Design Template and Rules**:
-- Read `.kiro/settings/templates/specs/design.md` for structure
-- Read `.kiro/settings/rules/design-principles.md` for principles
-
-2. **Generate Design Document**:
-- **Follow specs/design.md template structure and generation instructions strictly**
-- **Integrate all discovery findings**: Use researched information (APIs, patterns, technologies) throughout component definitions, architecture decisions, and integration points
-- If existing design.md found in Step 1, use it as reference context (merge mode)
-- Apply design rules: Type Safety, Visual Communication, Formal Tone
-- Use language specified in spec.json
-- Ensure sections reflect updated headings ("Architecture Pattern & Boundary Map", "Technology Stack & Alignment", "Components & Interface Contracts") and reference supporting details from `research.md`
-
-3. **Update Metadata** in spec.json:
-- Set `phase: "design-generated"`
-- Set `approvals.design.generated: true, approved: false`
-- Set `approvals.requirements.approved: true`
-- Update `updated_at` timestamp
-
-## Critical Constraints
- - **Type Safety**:
-   - Enforce strong typing aligned with the project's technology stack.
-   - For statically typed languages, define explicit types/interfaces and avoid unsafe casts.
-   - For TypeScript, never use `any`; prefer precise types and generics.
-   - For dynamically typed languages, provide type hints/annotations where available (e.g., Python type hints) and validate inputs at boundaries.
-   - Document public interfaces and contracts clearly to ensure cross-component type safety.
-- **Latest Information**: Use WebSearch/WebFetch for external dependencies and best practices
-- **Steering Alignment**: Respect existing architecture patterns from steering context
-- **Template Adherence**: Follow specs/design.md template structure and generation instructions strictly
-- **Design Focus**: Architecture and interfaces ONLY, no implementation code
-- **Requirements Traceability IDs**: Use numeric requirement IDs only (e.g. "1.1", "1.2", "3.1", "3.3") exactly as defined in requirements.md. Do not invent new IDs or use alphabetic labels.
-</instructions>
-
-## Tool Guidance
-- **Read first**: Load all context before taking action (specs, steering, templates, rules)
-- **Research when uncertain**: Use WebSearch/WebFetch for external dependencies, APIs, and latest best practices
-- **Analyze existing code**: Use Grep to find patterns and integration points in codebase
-- **Write last**: Generate design.md only after all research and analysis complete
-
-## Output Description
-
-**Command execution output** (separate from design.md content):
-
-Provide brief summary in the language specified in spec.json:
-
-1. **Status**: Confirm design document generated at `.kiro/specs/$1/design.md`
-2. **Discovery Type**: Which discovery process was executed (full/light/minimal)
-3. **Key Findings**: 2-3 critical insights from `research.md` that shaped the design
-4. **Next Action**: Approval workflow guidance (see Safety & Fallback)
-5. **Research Log**: Confirm `research.md` updated with latest decisions
-
-**Format**: Concise Markdown (under 200 words) - this is the command output, NOT the design document itself
-
-**Note**: The actual design document follows `.kiro/settings/templates/specs/design.md` structure.
-
-## Safety & Fallback
-
-### Error Scenarios
-
-**Requirements Not Approved**:
-- **Stop Execution**: Cannot proceed without approved requirements
-- **User Message**: "Requirements not yet approved. Approval required before design generation."
-- **Suggested Action**: "Run `/kiro:spec-design $1 -y` to auto-approve requirements and proceed"
-
-**Missing Requirements**:
-- **Stop Execution**: Requirements document must exist
-- **User Message**: "No requirements.md found at `.kiro/specs/$1/requirements.md`"
-- **Suggested Action**: "Run `/kiro:spec-requirements $1` to generate requirements first"
-
-**Template Missing**:
-- **User Message**: "Template file missing at `.kiro/settings/templates/specs/design.md`"
-- **Suggested Action**: "Check repository setup or restore template file"
-- **Fallback**: Use inline basic structure with warning
-
-**Steering Context Missing**:
-- **Warning**: "Steering directory empty or missing - design may not align with project standards"
-- **Proceed**: Continue with generation but note limitation in output
-
-**Discovery Complexity Unclear**:
-- **Default**: Use full discovery process (`.kiro/settings/rules/design-discovery-full.md`)
-- **Rationale**: Better to over-research than miss critical context
-- **Invalid Requirement IDs**:
-  - **Stop Execution**: If requirements.md is missing numeric IDs or uses non-numeric headings (for example, "Requirement A"), stop and instruct the user to fix requirements.md before continuing.
-
-### Next Phase: Task Generation
-
-**If Design Approved**:
-- Review generated design at `.kiro/specs/$1/design.md`
-- **Optional**: Run `/kiro:validate-design $1` for interactive quality review
-- Then `/kiro:spec-tasks $1 -y` to generate implementation tasks
-
-**If Modifications Needed**:
-- Provide feedback and re-run `/kiro:spec-design $1`
-- Existing design used as reference (merge mode)
-
-**Note**: Design approval is mandatory before proceeding to task generation.
-
-think hard

+ 0 - 110
.claude/commands/kiro/spec-impl.md

@@ -1,110 +0,0 @@
----
-description: Execute spec tasks using TDD methodology
-allowed-tools: Bash, Read, Write, Edit, MultiEdit, Grep, Glob, LS, WebFetch, WebSearch
-argument-hint: <feature-name> [task-numbers]
----
-
-# Implementation Task Executor
-
-<background_information>
-- **Mission**: Execute implementation tasks using Test-Driven Development methodology based on approved specifications
-- **Success Criteria**:
-  - All tests written before implementation code
-  - Code passes all tests with no regressions
-  - Tasks marked as completed in tasks.md
-  - Implementation aligns with design and requirements
-</background_information>
-
-<instructions>
-## Core Task
-Execute implementation tasks for feature **$1** using Test-Driven Development.
-
-## Execution Steps
-
-### Step 1: Load Context
-
-**Read all necessary context**:
-- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md`, `tasks.md`
-- **Entire `.kiro/steering/` directory** for complete project memory
-
-**Validate approvals**:
-- Verify tasks are approved in spec.json (stop if not, see Safety & Fallback)
-
-### Step 2: Select Tasks
-
-**Determine which tasks to execute**:
-- If `$2` provided: Execute specified task numbers (e.g., "1.1" or "1,2,3")
-- Otherwise: Execute all pending tasks (unchecked `- [ ]` in tasks.md)
-
-### Step 3: Execute with TDD
-
-For each selected task, follow Kent Beck's TDD cycle:
-
-1. **RED - Write Failing Test**:
-   - Write test for the next small piece of functionality
-   - Test should fail (code doesn't exist yet)
-   - Use descriptive test names
-
-2. **GREEN - Write Minimal Code**:
-   - Implement simplest solution to make test pass
-   - Focus only on making THIS test pass
-   - Avoid over-engineering
-
-3. **REFACTOR - Clean Up**:
-   - Improve code structure and readability
-   - Remove duplication
-   - Apply design patterns where appropriate
-   - Ensure all tests still pass after refactoring
-
-4. **VERIFY - Validate Quality**:
-   - All tests pass (new and existing)
-   - No regressions in existing functionality
-   - Code coverage maintained or improved
-
-5. **MARK COMPLETE**:
-   - Update checkbox from `- [ ]` to `- [x]` in tasks.md
-
-## Critical Constraints
-- **TDD Mandatory**: Tests MUST be written before implementation code
-- **Task Scope**: Implement only what the specific task requires
-- **Test Coverage**: All new code must have tests
-- **No Regressions**: Existing tests must continue to pass
-- **Design Alignment**: Implementation must follow design.md specifications
-</instructions>
-
-## Tool Guidance
-- **Read first**: Load all context before implementation
-- **Test first**: Write tests before code
-- Use **WebSearch/WebFetch** for library documentation when needed
-
-## Output Description
-
-Provide brief summary in the language specified in spec.json:
-
-1. **Tasks Executed**: Task numbers and test results
-2. **Status**: Completed tasks marked in tasks.md, remaining tasks count
-
-**Format**: Concise (under 150 words)
-
-## Safety & Fallback
-
-### Error Scenarios
-
-**Tasks Not Approved or Missing Spec Files**:
-- **Stop Execution**: All spec files must exist and tasks must be approved
-- **Suggested Action**: "Complete previous phases: `/kiro:spec-requirements`, `/kiro:spec-design`, `/kiro:spec-tasks`"
-
-**Test Failures**:
-- **Stop Implementation**: Fix failing tests before continuing
-- **Action**: Debug and fix, then re-run
-
-### Task Execution
-
-**Execute specific task(s)**:
-- `/kiro:spec-impl $1 1.1` - Single task
-- `/kiro:spec-impl $1 1,2,3` - Multiple tasks
-
-**Execute all pending**:
-- `/kiro:spec-impl $1` - All unchecked tasks
-
-think

+ 0 - 98
.claude/commands/kiro/spec-requirements.md

@@ -1,98 +0,0 @@
----
-description: Generate comprehensive requirements for a specification
-allowed-tools: Bash, Glob, Grep, LS, Read, Write, Edit, MultiEdit, Update, WebSearch, WebFetch
-argument-hint: <feature-name>
----
-
-# Requirements Generation
-
-<background_information>
-- **Mission**: Generate comprehensive, testable requirements in EARS format based on the project description from spec initialization
-- **Success Criteria**:
-  - Create complete requirements document aligned with steering context
-  - Follow the project's EARS patterns and constraints for all acceptance criteria
-  - Focus on core functionality without implementation details
-  - Update metadata to track generation status
-</background_information>
-
-<instructions>
-## Core Task
-Generate complete requirements for feature **$1** based on the project description in requirements.md.
-
-## Execution Steps
-
-1. **Load Context**:
-   - Read `.kiro/specs/$1/spec.json` for language and metadata
-   - Read `.kiro/specs/$1/requirements.md` for project description
-   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
-     - Default files: `structure.md`, `tech.md`, `product.md`
-     - All custom steering files (regardless of mode settings)
-     - This provides complete project memory and context
-
-2. **Read Guidelines**:
-   - Read `.kiro/settings/rules/ears-format.md` for EARS syntax rules
-   - Read `.kiro/settings/templates/specs/requirements.md` for document structure
-
-3. **Generate Requirements**:
-   - Create initial requirements based on project description
-   - Group related functionality into logical requirement areas
-   - Apply EARS format to all acceptance criteria
-   - Use language specified in spec.json
-
-4. **Update Metadata**:
-   - Set `phase: "requirements-generated"`
-   - Set `approvals.requirements.generated: true`
-   - Update `updated_at` timestamp
-
-## Important Constraints
-- Focus on WHAT, not HOW (no implementation details)
-- Requirements must be testable and verifiable
-- Choose appropriate subject for EARS statements (system/service name for software)
-- Generate initial version first, then iterate with user feedback (no sequential questions upfront)
-- Requirement headings in requirements.md MUST include a leading numeric ID only (for example: "Requirement 1", "1.", "2 Feature ..."); do not use alphabetic IDs like "Requirement A".
-</instructions>
-
-## Tool Guidance
-- **Read first**: Load all context (spec, steering, rules, templates) before generation
-- **Write last**: Update requirements.md only after complete generation
-- Use **WebSearch/WebFetch** only if external domain knowledge needed
-
-## Output Description
-Provide output in the language specified in spec.json with:
-
-1. **Generated Requirements Summary**: Brief overview of major requirement areas (3-5 bullets)
-2. **Document Status**: Confirm requirements.md updated and spec.json metadata updated
-3. **Next Steps**: Guide user on how to proceed (approve and continue, or modify)
-
-**Format Requirements**:
-- Use Markdown headings for clarity
-- Include file paths in code blocks
-- Keep summary concise (under 300 words)
-
-## Safety & Fallback
-
-### Error Scenarios
-- **Missing Project Description**: If requirements.md lacks project description, ask user for feature details
-- **Ambiguous Requirements**: Propose initial version and iterate with user rather than asking many upfront questions
-- **Template Missing**: If template files don't exist, use inline fallback structure with warning
-- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
-- **Incomplete Requirements**: After generation, explicitly ask user if requirements cover all expected functionality
-- **Steering Directory Empty**: Warn user that project context is missing and may affect requirement quality
-- **Non-numeric Requirement Headings**: If existing headings do not include a leading numeric ID (for example, they use "Requirement A"), normalize them to numeric IDs and keep that mapping consistent (never mix numeric and alphabetic labels).
-
-### Next Phase: Design Generation
-
-**If Requirements Approved**:
-- Review generated requirements at `.kiro/specs/$1/requirements.md`
-- **Optional Gap Analysis** (for existing codebases):
-  - Run `/kiro:validate-gap $1` to analyze implementation gap with current code
-  - Identifies existing components, integration points, and implementation strategy
-  - Recommended for brownfield projects; skip for greenfield
-- Then `/kiro:spec-design $1 -y` to proceed to design phase
-
-**If Modifications Needed**:
-- Provide feedback and re-run `/kiro:spec-requirements $1`
-
-**Note**: Approval is mandatory before proceeding to design phase.
-
-think

+ 0 - 87
.claude/commands/kiro/spec-status.md

@@ -1,87 +0,0 @@
----
-description: Show specification status and progress
-allowed-tools: Bash, Read, Glob, Write, Edit, MultiEdit, Update
-argument-hint: <feature-name>
----
-
-# Specification Status
-
-<background_information>
-- **Mission**: Display comprehensive status and progress for a specification
-- **Success Criteria**:
-  - Show current phase and completion status
-  - Identify next actions and blockers
-  - Provide clear visibility into progress
-</background_information>
-
-<instructions>
-## Core Task
-Generate status report for feature **$1** showing progress across all phases.
-
-## Execution Steps
-
-### Step 1: Load Spec Context
-- Read `.kiro/specs/$1/spec.json` for metadata and phase status
-- Read existing files: `requirements.md`, `design.md`, `tasks.md` (if they exist)
-- Check `.kiro/specs/$1/` directory for available files
-
-### Step 2: Analyze Status
-
-**Parse each phase**:
-- **Requirements**: Count requirements and acceptance criteria
-- **Design**: Check for architecture, components, diagrams
-- **Tasks**: Count completed vs total tasks (parse `- [x]` vs `- [ ]`)
-- **Approvals**: Check approval status in spec.json
-
-### Step 3: Generate Report
-
-Create report in the language specified in spec.json covering:
-1. **Current Phase & Progress**: Where the spec is in the workflow
-2. **Completion Status**: Percentage complete for each phase
-3. **Task Breakdown**: If tasks exist, show completed/remaining counts
-4. **Next Actions**: What needs to be done next
-5. **Blockers**: Any issues preventing progress
-
-## Critical Constraints
-- Use language from spec.json
-- Calculate accurate completion percentages
-- Identify specific next action commands
-</instructions>
-
-## Tool Guidance
-- **Read**: Load spec.json first, then other spec files as needed
-- **Parse carefully**: Extract completion data from tasks.md checkboxes
-- Use **Glob** to check which spec files exist
-
-## Output Description
-
-Provide status report in the language specified in spec.json:
-
-**Report Structure**:
-1. **Feature Overview**: Name, phase, last updated
-2. **Phase Status**: Requirements, Design, Tasks with completion %
-3. **Task Progress**: If tasks exist, show X/Y completed
-4. **Next Action**: Specific command to run next
-5. **Issues**: Any blockers or missing elements
-
-**Format**: Clear, scannable format with emojis (✅/⏳/❌) for status
-
-## Safety & Fallback
-
-### Error Scenarios
-
-**Spec Not Found**:
-- **Message**: "No spec found for `$1`. Check available specs in `.kiro/specs/`"
-- **Action**: List available spec directories
-
-**Incomplete Spec**:
-- **Warning**: Identify which files are missing
-- **Suggested Action**: Point to next phase command
-
-### List All Specs
-
-To see all available specs:
-- Run with no argument or use wildcard
-- Shows all specs in `.kiro/specs/` with their status
-
-think

+ 0 - 138
.claude/commands/kiro/spec-tasks.md

@@ -1,138 +0,0 @@
----
-description: Generate implementation tasks for a specification
-allowed-tools: Read, Write, Edit, MultiEdit, Glob, Grep
-argument-hint: <feature-name> [-y] [--sequential]
----
-
-# Implementation Tasks Generator
-
-<background_information>
-- **Mission**: Generate detailed, actionable implementation tasks that translate technical design into executable work items
-- **Success Criteria**:
-  - All requirements mapped to specific tasks
-  - Tasks properly sized (1-3 hours each)
-  - Clear task progression with proper hierarchy
-  - Natural language descriptions focused on capabilities
-</background_information>
-
-<instructions>
-## Core Task
-Generate implementation tasks for feature **$1** based on approved requirements and design.
-
-## Execution Steps
-
-### Step 1: Load Context
-
-**Read all necessary context**:
-- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md`
-- `.kiro/specs/$1/tasks.md` (if exists, for merge mode)
-- **Entire `.kiro/steering/` directory** for complete project memory
-
-**Validate approvals**:
-- If `-y` flag provided ($2 == "-y"): Auto-approve requirements and design in spec.json
-- Otherwise: Verify both approved (stop if not, see Safety & Fallback)
-- Determine sequential mode based on presence of `--sequential`
-
-### Step 2: Generate Implementation Tasks
-
-**Load generation rules and template**:
-- Read `.kiro/settings/rules/tasks-generation.md` for principles
-- If `sequential` is **false**: Read `.kiro/settings/rules/tasks-parallel-analysis.md` for parallel judgement criteria
-- Read `.kiro/settings/templates/specs/tasks.md` for format (supports `(P)` markers)
-
-**Generate task list following all rules**:
-- Use language specified in spec.json
-- Map all requirements to tasks
-- When documenting requirement coverage, list numeric requirement IDs only (comma-separated) without descriptive suffixes, parentheses, translations, or free-form labels
-- Ensure all design components included
-- Verify task progression is logical and incremental
-- Collapse single-subtask structures by promoting them to major tasks and avoid duplicating details on container-only major tasks (use template patterns accordingly)
-- Apply `(P)` markers to tasks that satisfy parallel criteria (omit markers in sequential mode)
-- Mark optional test coverage subtasks with `- [ ]*` only when they strictly cover acceptance criteria already satisfied by core implementation and can be deferred post-MVP
-- If existing tasks.md found, merge with new content
-
-### Step 3: Finalize
-
-**Write and update**:
-- Create/update `.kiro/specs/$1/tasks.md`
-- Update spec.json metadata:
-  - Set `phase: "tasks-generated"`
-  - Set `approvals.tasks.generated: true, approved: false`
-  - Set `approvals.requirements.approved: true`
-  - Set `approvals.design.approved: true`
-  - Update `updated_at` timestamp
-
-## Critical Constraints
-- **Follow rules strictly**: All principles in tasks-generation.md are mandatory
-- **Natural Language**: Describe what to do, not code structure details
-- **Complete Coverage**: ALL requirements must map to tasks
-- **Maximum 2 Levels**: Major tasks and sub-tasks only (no deeper nesting)
-- **Sequential Numbering**: Major tasks increment (1, 2, 3...), never repeat
-- **Task Integration**: Every task must connect to the system (no orphaned work)
-</instructions>
-
-## Tool Guidance
-- **Read first**: Load all context, rules, and templates before generation
-- **Write last**: Generate tasks.md only after complete analysis and verification
-
-## Output Description
-
-Provide brief summary in the language specified in spec.json:
-
-1. **Status**: Confirm tasks generated at `.kiro/specs/$1/tasks.md`
-2. **Task Summary**: 
-   - Total: X major tasks, Y sub-tasks
-   - All Z requirements covered
-   - Average task size: 1-3 hours per sub-task
-3. **Quality Validation**:
-   - ✅ All requirements mapped to tasks
-   - ✅ Task dependencies verified
-   - ✅ Testing tasks included
-4. **Next Action**: Review tasks and proceed when ready
-
-**Format**: Concise (under 200 words)
-
-## Safety & Fallback
-
-### Error Scenarios
-
-**Requirements or Design Not Approved**:
-- **Stop Execution**: Cannot proceed without approved requirements and design
-- **User Message**: "Requirements and design must be approved before task generation"
-- **Suggested Action**: "Run `/kiro:spec-tasks $1 -y` to auto-approve both and proceed"
-
-**Missing Requirements or Design**:
-- **Stop Execution**: Both documents must exist
-- **User Message**: "Missing requirements.md or design.md at `.kiro/specs/$1/`"
-- **Suggested Action**: "Complete requirements and design phases first"
-
-**Incomplete Requirements Coverage**:
-- **Warning**: "Not all requirements mapped to tasks. Review coverage."
-- **User Action Required**: Confirm intentional gaps or regenerate tasks
-
-**Template/Rules Missing**:
-- **User Message**: "Template or rules files missing in `.kiro/settings/`"
-- **Fallback**: Use inline basic structure with warning
-- **Suggested Action**: "Check repository setup or restore template files"
-- **Missing Numeric Requirement IDs**:
-  - **Stop Execution**: All requirements in requirements.md MUST have numeric IDs. If any requirement lacks a numeric ID, stop and request that requirements.md be fixed before generating tasks.
-
-### Next Phase: Implementation
-
-**Before Starting Implementation**:
-- **IMPORTANT**: Clear conversation history and free up context before running `/kiro:spec-impl`
-- This applies when starting first task OR switching between tasks
-- Fresh context ensures clean state and proper task focus
-
-**If Tasks Approved**:
-- Execute specific task: `/kiro:spec-impl $1 1.1` (recommended: clear context between each task)
-- Execute multiple tasks: `/kiro:spec-impl $1 1.1,1.2` (use cautiously, clear context between tasks)
-- Without arguments: `/kiro:spec-impl $1` (executes all pending tasks - NOT recommended due to context bloat)
-
-**If Modifications Needed**:
-- Provide feedback and re-run `/kiro:spec-tasks $1`
-- Existing tasks used as reference (merge mode)
-
-**Note**: The implementation phase will guide you through executing tasks with appropriate context and validation.
-
-think

+ 0 - 92
.claude/commands/kiro/validate-design.md

@@ -1,92 +0,0 @@
----
-description: Interactive technical design quality review and validation
-allowed-tools: Read, Glob, Grep
-argument-hint: <feature-name>
----
-
-# Technical Design Validation
-
-<background_information>
-- **Mission**: Conduct interactive quality review of technical design to ensure readiness for implementation
-- **Success Criteria**:
-  - Critical issues identified (maximum 3 most important concerns)
-  - Balanced assessment with strengths recognized
-  - Clear GO/NO-GO decision with rationale
-  - Actionable feedback for improvements if needed
-</background_information>
-
-<instructions>
-## Core Task
-Interactive design quality review for feature **$1** based on approved requirements and design document.
-
-## Execution Steps
-
-1. **Load Context**:
-   - Read `.kiro/specs/$1/spec.json` for language and metadata
-   - Read `.kiro/specs/$1/requirements.md` for requirements
-   - Read `.kiro/specs/$1/design.md` for design document
-   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
-     - Default files: `structure.md`, `tech.md`, `product.md`
-     - All custom steering files (regardless of mode settings)
-     - This provides complete project memory and context
-
-2. **Read Review Guidelines**:
-   - Read `.kiro/settings/rules/design-review.md` for review criteria and process
-
-3. **Execute Design Review**:
-   - Follow design-review.md process: Analysis → Critical Issues → Strengths → GO/NO-GO
-   - Limit to 3 most important concerns
-   - Engage interactively with user
-   - Use language specified in spec.json for output
-
-4. **Provide Decision and Next Steps**:
-   - Clear GO/NO-GO decision with rationale
-   - Guide user on proceeding based on decision
-
-## Important Constraints
-- **Quality assurance, not perfection seeking**: Accept acceptable risk
-- **Critical focus only**: Maximum 3 issues, only those significantly impacting success
-- **Interactive approach**: Engage in dialogue, not one-way evaluation
-- **Balanced assessment**: Recognize both strengths and weaknesses
-- **Actionable feedback**: All suggestions must be implementable
-</instructions>
-
-## Tool Guidance
-- **Read first**: Load all context (spec, steering, rules) before review
-- **Grep if needed**: Search codebase for pattern validation or integration checks
-- **Interactive**: Engage with user throughout the review process
-
-## Output Description
-Provide output in the language specified in spec.json with:
-
-1. **Review Summary**: Brief overview (2-3 sentences) of design quality and readiness
-2. **Critical Issues**: Maximum 3, following design-review.md format
-3. **Design Strengths**: 1-2 positive aspects
-4. **Final Assessment**: GO/NO-GO decision with rationale and next steps
-
-**Format Requirements**:
-- Use Markdown headings for clarity
-- Follow design-review.md output format
-- Keep summary concise
-
-## Safety & Fallback
-
-### Error Scenarios
-- **Missing Design**: If design.md doesn't exist, stop with message: "Run `/kiro:spec-design $1` first to generate design document"
-- **Design Not Generated**: If design phase not marked as generated in spec.json, warn but proceed with review
-- **Empty Steering Directory**: Warn user that project context is missing and may affect review quality
-- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
-
-### Next Phase: Task Generation
-
-**If Design Passes Validation (GO Decision)**:
-- Review feedback and apply changes if needed
-- Run `/kiro:spec-tasks $1` to generate implementation tasks
-- Or `/kiro:spec-tasks $1 -y` to auto-approve and proceed directly
-
-**If Design Needs Revision (NO-GO Decision)**:
-- Address critical issues identified
-- Re-run `/kiro:spec-design $1` with improvements
-- Re-validate with `/kiro:validate-design $1`
-
-**Note**: Design validation is recommended but optional. Quality review helps catch issues early.

+ 0 - 88
.claude/commands/kiro/validate-gap.md

@@ -1,88 +0,0 @@
----
-description: Analyze implementation gap between requirements and existing codebase
-allowed-tools: Bash, Glob, Grep, Read, Write, Edit, MultiEdit, WebSearch, WebFetch
-argument-hint: <feature-name>
----
-
-# Implementation Gap Validation
-
-<background_information>
-- **Mission**: Analyze the gap between requirements and existing codebase to inform implementation strategy
-- **Success Criteria**:
-  - Comprehensive understanding of existing codebase patterns and components
-  - Clear identification of missing capabilities and integration challenges
-  - Multiple viable implementation approaches evaluated
-  - Technical research needs identified for design phase
-</background_information>
-
-<instructions>
-## Core Task
-Analyze implementation gap for feature **$1** based on approved requirements and existing codebase.
-
-## Execution Steps
-
-1. **Load Context**:
-   - Read `.kiro/specs/$1/spec.json` for language and metadata
-   - Read `.kiro/specs/$1/requirements.md` for requirements
-   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
-     - Default files: `structure.md`, `tech.md`, `product.md`
-     - All custom steering files (regardless of mode settings)
-     - This provides complete project memory and context
-
-2. **Read Analysis Guidelines**:
-   - Read `.kiro/settings/rules/gap-analysis.md` for comprehensive analysis framework
-
-3. **Execute Gap Analysis**:
-   - Follow gap-analysis.md framework for thorough investigation
-   - Analyze existing codebase using Grep and Read tools
-   - Use WebSearch/WebFetch for external dependency research if needed
-   - Evaluate multiple implementation approaches (extend/new/hybrid)
-   - Use language specified in spec.json for output
-
-4. **Generate Analysis Document**:
-   - Create comprehensive gap analysis following the output guidelines in gap-analysis.md
-   - Present multiple viable options with trade-offs
-   - Flag areas requiring further research
-
-## Important Constraints
-- **Information over Decisions**: Provide analysis and options, not final implementation choices
-- **Multiple Options**: Present viable alternatives when applicable
-- **Thorough Investigation**: Use tools to deeply understand existing codebase
-- **Explicit Gaps**: Clearly flag areas needing research or investigation
-</instructions>
-
-## Tool Guidance
-- **Read first**: Load all context (spec, steering, rules) before analysis
-- **Grep extensively**: Search codebase for patterns, conventions, and integration points
-- **WebSearch/WebFetch**: Research external dependencies and best practices when needed
-- **Write last**: Generate analysis only after complete investigation
-
-## Output Description
-Provide output in the language specified in spec.json with:
-
-1. **Analysis Summary**: Brief overview (3-5 bullets) of scope, challenges, and recommendations
-2. **Document Status**: Confirm analysis approach used
-3. **Next Steps**: Guide user on proceeding to design phase
-
-**Format Requirements**:
-- Use Markdown headings for clarity
-- Keep summary concise (under 300 words)
-- Detailed analysis follows gap-analysis.md output guidelines
-
-## Safety & Fallback
-
-### Error Scenarios
-- **Missing Requirements**: If requirements.md doesn't exist, stop with message: "Run `/kiro:spec-requirements $1` first to generate requirements"
-- **Requirements Not Approved**: If requirements not approved, warn user but proceed (gap analysis can inform requirement revisions)
-- **Empty Steering Directory**: Warn user that project context is missing and may affect analysis quality
-- **Complex Integration Unclear**: Flag for comprehensive research in design phase rather than blocking
-- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
-
-### Next Phase: Design Generation
-
-**If Gap Analysis Complete**:
-- Review gap analysis insights
-- Run `/kiro:spec-design $1` to create technical design document
-- Or `/kiro:spec-design $1 -y` to auto-approve requirements and proceed directly
-
-**Note**: Gap analysis is optional but recommended for brownfield projects to inform design decisions.

+ 0 - 138
.claude/commands/kiro/validate-impl.md

@@ -1,138 +0,0 @@
----
-description: Validate implementation against requirements, design, and tasks
-allowed-tools: Bash, Glob, Grep, Read, LS
-argument-hint: [feature-name] [task-numbers]
----
-
-# Implementation Validation
-
-<background_information>
-- **Mission**: Verify that implementation aligns with approved requirements, design, and tasks
-- **Success Criteria**:
-  - All specified tasks marked as completed
-  - Tests exist and pass for implemented functionality
-  - Requirements traceability confirmed (EARS requirements covered)
-  - Design structure reflected in implementation
-  - No regressions in existing functionality
-</background_information>
-
-<instructions>
-## Core Task
-Validate implementation for feature(s) and task(s) based on approved specifications.
-
-## Execution Steps
-
-### 1. Detect Validation Target
-
-**If no arguments provided** (`$1` empty):
-- Parse conversation history for `/kiro:spec-impl <feature> [tasks]` commands
-- Extract feature names and task numbers from each execution
-- Aggregate all implemented tasks by feature
-- Report detected implementations (e.g., "user-auth: 1.1, 1.2, 1.3")
-- If no history found, scan `.kiro/specs/` for features with completed tasks `[x]`
-
-**If feature provided** (`$1` present, `$2` empty):
-- Use specified feature
-- Detect all completed tasks `[x]` in `.kiro/specs/$1/tasks.md`
-
-**If both feature and tasks provided** (`$1` and `$2` present):
-- Validate specified feature and tasks only (e.g., `user-auth 1.1,1.2`)
-
-### 2. Load Context
-
-For each detected feature:
-- Read `.kiro/specs/<feature>/spec.json` for metadata
-- Read `.kiro/specs/<feature>/requirements.md` for requirements
-- Read `.kiro/specs/<feature>/design.md` for design structure
-- Read `.kiro/specs/<feature>/tasks.md` for task list
-- **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
-  - Default files: `structure.md`, `tech.md`, `product.md`
-  - All custom steering files (regardless of mode settings)
-
-### 3. Execute Validation
-
-For each task, verify:
-
-#### Task Completion Check
-- Checkbox is `[x]` in tasks.md
-- If not completed, flag as "Task not marked complete"
-
-#### Test Coverage Check
-- Tests exist for task-related functionality
-- Tests pass (no failures or errors)
-- Use Bash to run test commands (e.g., `npm test`, `pytest`)
-- If tests fail or don't exist, flag as "Test coverage issue"
-
-#### Requirements Traceability
-- Identify EARS requirements related to the task
-- Use Grep to search implementation for evidence of requirement coverage
-- If requirement not traceable to code, flag as "Requirement not implemented"
-
-#### Design Alignment
-- Check if design.md structure is reflected in implementation
-- Verify key interfaces, components, and modules exist
-- Use Grep/LS to confirm file structure matches design
-- If misalignment found, flag as "Design deviation"
-
-#### Regression Check
-- Run full test suite (if available)
-- Verify no existing tests are broken
-- If regressions detected, flag as "Regression detected"
-
-### 4. Generate Report
-
-Provide summary in the language specified in spec.json:
-- Validation summary by feature
-- Coverage report (tasks, requirements, design)
-- Issues and deviations with severity (Critical/Warning)
-- GO/NO-GO decision
-
-## Important Constraints
-- **Conversation-aware**: Prioritize conversation history for auto-detection
-- **Non-blocking warnings**: Design deviations are warnings unless critical
-- **Test-first focus**: Test coverage is mandatory for GO decision
-- **Traceability required**: All requirements must be traceable to implementation
-</instructions>
-
-## Tool Guidance
-- **Conversation parsing**: Extract `/kiro:spec-impl` patterns from history
-- **Read context**: Load all specs and steering before validation
-- **Bash for tests**: Execute test commands to verify pass status
-- **Grep for traceability**: Search codebase for requirement evidence
-- **LS/Glob for structure**: Verify file structure matches design
-
-## Output Description
-
-Provide output in the language specified in spec.json with:
-
-1. **Detected Target**: Features and tasks being validated (if auto-detected)
-2. **Validation Summary**: Brief overview per feature (pass/fail counts)
-3. **Issues**: List of validation failures with severity and location
-4. **Coverage Report**: Requirements/design/task coverage percentages
-5. **Decision**: GO (ready for next phase) / NO-GO (needs fixes)
-
-**Format Requirements**:
-- Use Markdown headings and tables for clarity
-- Flag critical issues with ⚠️ or 🔴
-- Keep summary concise (under 400 words)
-
-## Safety & Fallback
-
-### Error Scenarios
-- **No Implementation Found**: If no `/kiro:spec-impl` in history and no `[x]` tasks, report "No implementations detected"
-- **Test Command Unknown**: If test framework unclear, warn and skip test validation (manual verification required)
-- **Missing Spec Files**: If spec.json/requirements.md/design.md missing, stop with error
-- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
-
-### Next Steps Guidance
-
-**If GO Decision**:
-- Implementation validated and ready
-- Proceed to deployment or next feature
-
-**If NO-GO Decision**:
-- Address critical issues listed
-- Re-run `/kiro:spec-impl <feature> [tasks]` for fixes
-- Re-validate with `/kiro:validate-impl [feature] [tasks]`
-
-**Note**: Validation is recommended after implementation to ensure spec alignment and quality.

+ 94 - 0
.claude/rules/coding-style.md

@@ -38,6 +38,97 @@ MANY SMALL FILES > FEW LARGE FILES:
 - Extract utilities from large components
 - Organize by feature/domain, not by type
 
+## Module Design: Separation of Concerns
+
+### Pure Function Extraction
+
+When a framework-specific wrapper (React hook, Express middleware, CodeMirror extension handler, etc.) contains non-trivial logic, extract the core logic as a **pure function** and reduce the wrapper to a thin adapter. This enables direct reuse across different contexts and makes unit testing straightforward.
+
+```typescript
+// ❌ WRONG: Business logic locked inside a framework-specific wrapper
+export const useToggleSymbol = (view?: EditorView) => {
+  return useCallback((prefix, suffix) => {
+    // 30 lines of symbol-toggling logic here...
+  }, [view]);
+};
+
+// ✅ CORRECT: Pure function + thin wrappers for each context
+// services-internal/markdown-utils/toggle-markdown-symbol.ts
+export const toggleMarkdownSymbol = (view: EditorView, prefix: string, suffix: string): void => {
+  // Pure logic — testable, reusable from hooks, keymaps, shortcuts, etc.
+};
+
+// React hook wrapper
+export const useInsertMarkdownElements = (view?: EditorView) => {
+  return useCallback((prefix, suffix) => {
+    if (view == null) return;
+    toggleMarkdownSymbol(view, prefix, suffix);
+  }, [view]);
+};
+
+// Emacs command wrapper
+EmacsHandler.addCommands({
+  markdownBold(handler: { view: EditorView }) {
+    toggleMarkdownSymbol(handler.view, '**', '**');
+  },
+});
+```
+
+**Applies to**: React hooks, Express/Koa middleware, CLI command handlers, CodeMirror extension callbacks, test fixtures — any framework-specific adapter that wraps reusable logic.
+
+### Data-Driven Control over Hard-Coded Mode Checks
+
+Replace conditional branching on mode/variant names with **declared metadata** that consumers filter generically. This eliminates the need to update consumers when adding new modes.
+
+```typescript
+// ❌ WRONG: Consumer knows mode-specific behavior
+if (keymapModeName === 'emacs') {
+  return sharedKeyBindings; // exclude formatting
+}
+return [formattingBindings, ...sharedKeyBindings];
+
+// ✅ CORRECT: Module declares its overrides, consumer filters generically
+// Keymap module returns: { overrides: ['formatting', 'structural'] }
+const activeBindings = allGroups
+  .filter(group => group.category === null || !overrides?.includes(group.category))
+  .flatMap(group => group.bindings);
+```
+
+### Factory Pattern with Encapsulated Metadata
+
+When a module produces a value that requires configuration from the consumer (precedence, feature flags, etc.), **bundle the metadata alongside the value** in a structured return type. This keeps decision-making inside the module that has the knowledge.
+
+```typescript
+// ❌ WRONG: Consumer decides precedence based on mode name
+const wrapWithPrecedence = mode === 'vim' ? Prec.high : Prec.low;
+codeMirrorEditor.appendExtensions(wrapWithPrecedence(keymapExtension));
+
+// ✅ CORRECT: Factory encapsulates its own requirements
+interface KeymapResult {
+  readonly extension: Extension;
+  readonly precedence: (ext: Extension) => Extension;
+  readonly overrides: readonly ShortcutCategory[];
+}
+// Consumer applies generically:
+codeMirrorEditor.appendExtensions(result.precedence(result.extension));
+```
+
+### Responsibility-Based Submodule Decomposition
+
+When a single module grows beyond ~200 lines or accumulates multiple distinct responsibilities, split into submodules **by responsibility domain** (not by arbitrary size). Each submodule should be independently understandable.
+
+```
+// ❌ WRONG: One large file with mixed concerns
+keymaps/emacs.ts  (400+ lines: formatting + structural + navigation + save)
+
+// ✅ CORRECT: Split by responsibility
+keymaps/emacs/
+├── index.ts          ← Factory: composes submodules
+├── formatting.ts     ← Text styling commands
+├── structural.ts     ← Document structure commands
+└── navigation.ts     ← Movement and editing commands
+```
+
 ## Naming Conventions
 
 ### Variables and Functions
@@ -236,3 +327,6 @@ Before marking work complete:
 - [ ] Named exports (except Next.js pages)
 - [ ] English comments
 - [ ] Co-located tests
+- [ ] Non-trivial logic extracted as pure functions from framework wrappers
+- [ ] No hard-coded mode/variant checks in consumers (use declared metadata)
+- [ ] Modules with multiple responsibilities split by domain

+ 152 - 0
.claude/skills/kiro-debug/SKILL.md

@@ -0,0 +1,152 @@
+---
+name: kiro-debug
+description: Investigate implementation failures using root-cause-first debugging. Use when an implementer is blocked, verification fails, or repeated remediation does not converge.
+allowed-tools: Read, Bash, Grep, Glob, WebSearch, WebFetch
+argument-hint: <failure-summary>
+---
+
+# kiro-debug
+
+## Overview
+
+This skill is for fresh-context root cause investigation. It combines local evidence, runtime/config inspection, and external documentation or issue research when available. It is not a patch generator for guess-first debugging.
+
+## When to Use
+
+- Implementer reports `BLOCKED`
+- Reviewer rejection repeats after remediation
+- Validation fails unexpectedly
+- A task appears to conflict with runtime or platform reality
+- The same failure survives more than one attempted fix
+
+Do not use this skill to speculate about fixes before gathering evidence.
+
+## Inputs
+
+Provide:
+- Exact failure symptom or blocker statement
+- Error messages, stack trace, and failing command output
+- Current `git diff` or summary of uncommitted failed changes
+- Task brief: what was being built
+- Reviewer feedback, if the failure came from review rejection
+- Relevant spec file paths (`requirements.md`, `design.md`)
+- Relevant requirement/design section numbers
+- Relevant `## Implementation Notes`
+- Runtime or environment constraints already known
+
+## Outputs
+
+Return:
+- `ROOT_CAUSE`
+- `CATEGORY`
+- `FIX_PLAN`
+- `VERIFICATION`
+- `NEXT_ACTION: RETRY_TASK | BLOCK_TASK | STOP_FOR_HUMAN`
+- `CONFIDENCE: HIGH | MEDIUM | LOW`
+- `NOTES`
+
+Use the language specified in `spec.json`.
+
+## Method
+
+### 1. Read the Error Carefully
+Extract:
+- Exact error text
+- Stack trace or failure location
+- The command that produced the failure
+- Whether the failure is deterministic or intermittent
+
+### 2. Inspect Local Runtime and Repository State
+Inspect the repository for local evidence:
+- `package.json`, `pyproject.toml`, `go.mod`, `Makefile`, `README*`
+- Build config
+- `tsconfig` or equivalent language/runtime config
+- Runtime-specific config
+- Dependency versions and scripts
+- Relevant changed files from `git diff`
+
+### 3. Search the Web if Available
+If web access is available, search:
+- The exact error message
+- The technology + symptom combination
+- Official documentation
+- Version-specific issue trackers or migration notes
+
+Prefer:
+- Official docs
+- Official repos/issues
+- Version-specific references
+- Runtime-specific documentation
+
+### 4. Classify the Root Cause
+Use one category:
+- `MISSING_DEPENDENCY`
+- `RUNTIME_MISMATCH`
+- `MODULE_FORMAT`
+- `NATIVE_ABI`
+- `CONFIG_GAP`
+- `LOGIC_ERROR`
+- `TASK_ORDERING_PROBLEM`
+- `TASK_DECOMPOSITION_PROBLEM`
+- `SPEC_CONFLICT`
+- `EXTERNAL_DEPENDENCY`
+
+### 5. Determine the Smallest Safe Next Action
+Decide whether the issue can be fixed inside this repo by:
+- Editing files
+- Adjusting configuration
+- Adding or correcting dependencies
+- Restructuring code
+
+Use `NEXT_ACTION: RETRY_TASK` when the issue is repo-fixable inside the current approved task plan.
+
+### 6. Determine Whether the Task Plan Is Still Valid
+Decide whether the current approved task plan is still safe to execute as written.
+
+Prefer `NEXT_ACTION: STOP_FOR_HUMAN` when:
+- A missing prerequisite task should exist before this one
+- The current task is ordered incorrectly relative to unfinished work
+- The current task boundary is wrong and should be split or merged
+- The task is too large or ambiguous to fix safely inside the current implementation loop
+
+Use `NEXT_ACTION: BLOCK_TASK` only when the current task should stop but the rest of the queue can still proceed safely.
+
+Do not propose a brute-force code fix as a substitute for revising `tasks.md` or the approved plan.
+
+## Critical Rule
+
+Do not propose a multi-fix shotgun plan. Identify the root cause first, then produce the smallest plausible fix plan. If the true problem is a spec conflict or architecture problem, say so directly.
+
+## Stop / Escalate
+
+Use `NEXT_ACTION: STOP_FOR_HUMAN` when the blocker genuinely requires:
+- Human product/requirements decision
+- External credentials or inaccessible services
+- Hardware or unavailable external systems
+- Re-scoping due to spec/platform conflict
+
+If the issue is fixable by repo changes inside the current task plan, do not escalate prematurely.
+
+## Common Rationalizations
+
+| Rationalization | Reality |
+|---|---|
+| “This probably just needs a quick patch” | Patch-first debugging creates rework. |
+| “Let’s try a few fixes” | Multi-fix guessing hides root cause. |
+| “The spec is probably wrong, I’ll adapt it” | Spec conflicts must be surfaced explicitly. |
+| “The docs search is optional” | For runtime/dependency issues, docs and version issues often contain the shortest path to root cause. |
+
+## Output Format
+
+```md
+## Debug Report
+- ROOT_CAUSE: <1-2 sentence root cause>
+- CATEGORY: MISSING_DEPENDENCY | RUNTIME_MISMATCH | MODULE_FORMAT | NATIVE_ABI | CONFIG_GAP | LOGIC_ERROR | TASK_ORDERING_PROBLEM | TASK_DECOMPOSITION_PROBLEM | SPEC_CONFLICT | EXTERNAL_DEPENDENCY
+- FIX_PLAN:
+  1. <specific repo-fixable action>
+  2. <specific repo-fixable action>
+- VERIFICATION: <command(s) to confirm the fix>
+- NEXT_ACTION: RETRY_TASK | BLOCK_TASK | STOP_FOR_HUMAN
+- CONFIDENCE: HIGH | MEDIUM | LOW
+- NOTES: <context the next implementer should know>
+```

+ 262 - 0
.claude/skills/kiro-discovery/SKILL.md

@@ -0,0 +1,262 @@
+---
+name: kiro-discovery
+description: Entry point for new work. Determines the best action path or work decomposition (update existing spec, create new spec, mixed decomposition, or no spec needed) and refines ideas through structured dialogue.
+disable-model-invocation: true
+allowed-tools: Read, Write, Glob, Grep, Agent, WebSearch, WebFetch, AskUserQuestion
+argument-hint: <idea-or-request>
+---
+
+# kiro-discovery Skill
+
+## Core Mission
+- **Success Criteria**:
+  - Correct action path or work decomposition identified based on existing project state
+  - User's intent clarified through questions, not assumptions
+  - Output is an actionable next step (not just a description)
+
+## Execution Steps
+
+### Step 1: Lightweight Scan
+
+Gather **only metadata** to determine the action path. Do NOT read full file contents yet.
+
+- **Specs inventory**: Glob `.kiro/specs/*/spec.json`, read each spec.json for `name`, `phase` fields and `approvals` status. Note feature names and their current status.
+- **Steering existence**: Check which files exist in `.kiro/steering/` (product.md, tech.md, structure.md, roadmap.md). Do NOT read their contents yet.
+- **Roadmap check**: If `.kiro/steering/roadmap.md` exists, read it. This contains project-level context (approach, scope, constraints, spec list) from a previous discovery session. Use it to restore project context.
+- **Top-level structure**: List the project root directory to note key directories and files. Do NOT recurse into subdirectories.
+
+This step should consume minimal context. If `specs/` is empty and no steering exists, note "greenfield project" and move to Step 2.
+
+### Step 2: Determine Action Path
+
+Based on the user's request and the metadata from Step 1, determine which path applies:
+
+**Path A: Existing spec covers this**
+- The request is an extension, enhancement, or fix within an existing spec's domain
+- Every meaningful part of the request fits that same spec boundary
+- Any remaining small follow-up work can be handled directly without creating a new spec
+- Skip remaining steps
+
+**Path B: No spec needed**
+- The request is a bug fix, config change, simple refactor, or trivial addition
+- No meaningful part of the request needs a new or updated spec boundary
+- The request does not need to update an existing spec either
+- Skip remaining steps
+
+**Path C: New single-scope feature**
+- The request is new, doesn't overlap with existing specs, and fits in one spec
+
+**Path D: Multi-scope decomposition needed**
+- The request spans multiple domains or would produce 20+ tasks in a single spec
+
+**Path E: Mixed decomposition**
+- The request contains a mix of: existing spec extensions, one or more new spec candidates, and optional direct-implementation work
+- Use this path only when at least one genuinely new spec boundary is needed
+
+For Path C/D/E, present the determined path (or mixed decomposition) to the user and confirm before proceeding.
+For Path A/B, recommend the next action and stop.
+
+### Step 3: Deep Context Loading
+
+**Only for Path C, D, and E.** Now load the context needed for discovery.
+
+**In main context** (essential for dialogue with user):
+- **Steering documents**: Read product.md and tech.md (if they exist) for project goals, constraints, and tech stack
+- **Relevant specs**: If the request is adjacent to an existing spec, read that spec's requirements.md to understand boundaries and avoid overlap
+
+**Delegate to subagent via Agent tool** (keeps exploration out of main context):
+- **Codebase exploration**: Dispatch a subagent to explore the codebase and return a structured summary. Example prompt: "Explore this project's codebase. Summarize: (1) tech stack and frameworks, (2) directory structure and key modules, (3) patterns and conventions used, (4) areas relevant to [user's request]. Return a summary under 200 lines."
+- The subagent uses Read/Glob/Grep to explore, then returns findings. Only the summary enters the main context.
+- For Path D/E, also ask the subagent to identify natural domain boundaries, existing module separation, and which areas look like existing-spec extensions vs new boundaries.
+- Skip subagent dispatch for small/obvious requests where the top-level directory listing from Step 1 is sufficient.
+
+**Context budget**: Keep total content loaded into main context under ~500 lines. The subagent handles the heavy exploration.
+
+### Step 4: Understand the Idea
+
+Ask clarifying questions **sequentially** (not all at once), prioritizing boundary discovery over feature detail:
+
+1. **Who and why**: Who has the problem? What pain does it cause?
+2. **Desired outcome**: What should be true when this is done?
+3. **Boundary candidates**: What are the natural responsibility seams in this work? Where could this be split so implementation can proceed independently?
+4. **Out of boundary**: What should this spec explicitly NOT own, even if related?
+5. **Existing vs new**: Which parts seem like extensions to existing specs, and which parts look like genuinely new boundaries?
+6. **Upstream / downstream**: What existing systems, specs, or components does this depend on? What future work is likely to depend on this?
+7. **Constraints**: Are there technology, timeline, or compatibility constraints?
+
+Ask only questions whose answers you cannot infer from the context already loaded. Skip questions that steering documents already answer. If the user already provided a clear description, skip to Step 5.
+The goal is NOT to assign final owners yet. The goal is to discover the cleanest responsibility boundaries that can later become specs, tasks, and review scopes.
+
+### Step 5: Propose Approaches
+
+Propose **2-3 concrete approaches** with trade-offs:
+
+For each approach:
+- **Approach name**: One-line summary
+- **How it works**: 2-3 sentences on the technical approach
+- **Pros**: What makes this approach good
+- **Cons**: What are the risks or downsides
+- **Scope estimate**: Rough complexity (small / medium / large)
+
+If technical research is needed (unfamiliar framework, library evaluation), dispatch a subagent via Agent tool. Example prompt: "Research [topic]: compare options, check latest versions, note known issues. Return a summary of findings with recommendation." The subagent uses WebSearch/WebFetch and returns a concise summary. Raw search results never enter the main context.
+
+Recommend one approach and explain why.
+
+**After the user selects an approach**, dispatch a subagent to verify viability before proceeding to Step 6. Example prompt: "Verify the viability of this technical approach: [chosen tech stack / key libraries]. Check: (1) Are these technologies still actively maintained? (2) Any license incompatibilities (e.g., GPL contamination)? (3) Do the components actually work together for [use case]? (4) Any known showstoppers (critical bugs, security vulnerabilities, platform limitations)? Return only issues found, or 'No issues found' if everything checks out."
+
+If the viability check reveals issues, present them to the user and revisit the approach selection. If no issues, proceed to Step 6.
+
+### Step 6: Refine and Confirm
+
+- Address user's questions or concerns about the approaches
+- Narrow scope if needed: favor smaller, deliverable increments and cleaner responsibility seams
+- For Path D/E: propose work decomposition with dependency ordering
+  - Each new boundary-worthy feature = one spec
+  - Existing spec extensions are explicitly listed with their target spec
+  - Truly small direct-implementation items are listed separately instead of being forced into a spec
+  - Dependencies between specs/workstreams are explicit
+  - Consider vertical slices (end-to-end value) vs horizontal layers (one layer at a time) based on the project needs
+- Confirm the final direction
+
+### Step 7: Write Files to Disk
+
+**CRITICAL: You MUST use the Write tool to create these files BEFORE suggesting any next command. Conversation text does not survive session boundaries. If you skip this step, all discovery analysis is lost when the session ends.**
+
+**For Path C (single spec)**:
+
+Use the Write tool to create `.kiro/specs/<feature-name>/brief.md` with this structure:
+
+```
+# Brief: <feature-name>
+
+## Problem
+[who has the problem, what pain it causes]
+
+## Current State
+[what exists today, what's the gap]
+
+## Desired Outcome
+[what should be true when done]
+
+## Approach
+[chosen approach and why]
+
+## Scope
+- **In**: [what this feature includes]
+- **Out**: [what's explicitly excluded]
+
+## Boundary Candidates
+- [responsibility seam 1]
+- [responsibility seam 2]
+
+## Out of Boundary
+- [explicit non-goals this spec does not own]
+
+## Upstream / Downstream
+- **Upstream**: [existing systems/specs this depends on]
+- **Downstream**: [likely consumers or follow-on specs]
+
+## Existing Spec Touchpoints
+- **Extends**: [existing spec(s) this work updates, if any]
+- **Adjacent**: [neighbor specs or modules to avoid overlapping]
+
+## Constraints
+[technology, compatibility, or other constraints]
+```
+
+**For Path D (multi-spec decomposition)**:
+
+Use the Write tool to create:
+- `.kiro/steering/roadmap.md`
+- `.kiro/specs/<feature>/brief.md` for every feature listed under `## Specs (dependency order)`
+
+Use this roadmap structure:
+
+```
+# Roadmap
+
+## Overview
+[Project goal and chosen approach -- 1-2 paragraphs]
+
+## Approach Decision
+- **Chosen**: [approach name and summary]
+- **Why**: [key reasoning]
+- **Rejected alternatives**: [what was considered and why it was rejected]
+
+## Scope
+- **In**: [what the overall project includes]
+- **Out**: [what is explicitly excluded]
+
+## Constraints
+[technology, compatibility, timeline, or other project-wide constraints]
+
+## Boundary Strategy
+- **Why this split**: [why these spec boundaries improve independence]
+- **Shared seams to watch**: [cross-spec boundaries needing careful review]
+
+## Specs (dependency order)
+- [ ] feature-a -- [one-line description]. Dependencies: none
+- [ ] feature-b -- [one-line description]. Dependencies: feature-a
+- [ ] feature-c -- [one-line description]. Dependencies: feature-a, feature-b
+```
+
+Then create `.kiro/specs/<feature>/brief.md` for **every** feature listed under `## Specs (dependency order)` using the Path C brief format. This enables parallel spec creation via `/kiro-spec-batch`.
+
+**For Path E (mixed decomposition)**:
+
+Use the same roadmap structure as Path D, plus these additional sections:
+
+```
+## Existing Spec Updates
+- [ ] existing-feature-a -- [one-line description of the extension]. Dependencies: none
+- [ ] existing-feature-b -- [one-line description of the extension]. Dependencies: feature-a
+
+## Direct Implementation Candidates
+- [ ] small-item-a -- [why this stays direct implementation]
+- [ ] small-item-b -- [why this stays direct implementation]
+
+## Specs (dependency order)
+- [ ] new-feature-a -- [one-line description]. Dependencies: none
+- [ ] new-feature-b -- [one-line description]. Dependencies: new-feature-a
+```
+
+Path E rules:
+- Keep `## Specs (dependency order)` reserved for **new specs only** so `/kiro-spec-batch` can still parse it unchanged
+- Record existing-spec extensions under `## Existing Spec Updates`
+- Record true no-spec work under `## Direct Implementation Candidates`
+- Create `brief.md` only for the **new specs** listed under `## Specs (dependency order)`
+
+**Re-entry (roadmap.md already exists)**:
+Use the Write tool to create the next new spec's brief.md. Update roadmap.md with Write tool if scope/ordering changed, preserving completed items and prior phases.
+
+After writing, verify the files exist by reading them back.
+
+### Step 8: Suggest Next Steps
+
+Suggest the next command and stop. Do NOT automatically run downstream spec generation from this skill.
+
+- Path A: `/kiro-spec-requirements {feature}` to update the existing spec
+- Path B: Recommend direct implementation without creating a spec
+- Path C: Default to `/kiro-spec-init <feature-name>`
+  - Optional fast path: `/kiro-spec-quick <feature-name>` when the user explicitly wants to continue immediately
+- Path D: Default to `/kiro-spec-batch` (creates all specs in parallel based on roadmap.md dependency order)
+  - Optional cautious path: `/kiro-spec-init <first-feature-name>` when the user wants to validate the first slice before batching the rest
+- Path E: Choose the next command based on the new-spec portion of the decomposition
+  - If there is exactly one new spec: `/kiro-spec-init <new-feature-name>`
+  - If there are multiple new specs: `/kiro-spec-batch`
+  - Also note which existing specs should be revisited with `/kiro-spec-requirements <feature>`
+- Re-entry: `/kiro-spec-init <next-feature-name>` or `/kiro-spec-batch` if multiple specs remain
+
+If the decomposition contains only existing-spec updates plus direct implementation candidates, do NOT use Path E. Prefer Path A when one existing spec is the clear home, or recommend the existing-spec update plus direct implementation work without creating roadmap entries.
+
+## Critical Constraints
+- **Files on disk are the source of continuity**: For Path C/D/E, create brief.md and roadmap.md as needed before suggesting the next command. Do NOT leave discovery results only in conversation text.
+
+## Safety & Fallback
+
+**Roadmap Already Exists (re-entry)**:
+- Read roadmap.md to restore project context before asking questions
+- Determine next spec based on completed specs' status
+- Write brief.md for the next spec only (just-in-time)
+- Update roadmap.md if scope/ordering changed based on implementation experience
+- Append new specs as a new phase if the request expands the project, don't overwrite existing content

+ 246 - 0
.claude/skills/kiro-impl/SKILL.md

@@ -0,0 +1,246 @@
+---
+name: kiro-impl
+description: Implement approved tasks using TDD with native subagent dispatch. Runs all pending tasks autonomously or selected tasks manually.
+disable-model-invocation: true
+allowed-tools: Read, Write, Edit, MultiEdit, Bash, Glob, Grep, Agent, WebSearch, WebFetch
+argument-hint: <feature-name> [task-numbers]
+---
+
+# kiro-impl Skill
+
+## Role
+You operate in two modes:
+- **Autonomous mode** (no task numbers): Dispatch a fresh subagent per task, with independent review after each
+- **Manual mode** (task numbers provided): Execute selected tasks directly in the main context
+
+## Core Mission
+- **Success Criteria**:
+  - All tests written before implementation code
+  - Code passes all tests with no regressions
+  - Tasks marked as completed in tasks.md
+  - Implementation aligns with design and requirements
+  - Independent reviewer approves each task before completion
+
+## Execution Steps
+
+### Step 1: Gather Context
+
+If steering/spec context is already available from conversation, skip redundant file reads.
+Otherwise, load all necessary context:
+- `.kiro/specs/{feature}/spec.json`, `requirements.md`, `design.md`, `tasks.md`
+- Core steering context: `product.md`, `tech.md`, `structure.md`
+- Additional steering files only when directly relevant to the selected task's boundary, runtime prerequisites, integrations, domain rules, security/performance constraints, or team conventions that affect implementation or validation
+- Relevant local agent skills or playbooks only when they clearly match the task's host environment or use case; read the specific artifact(s) you need, not entire directories
+
+#### Parallel Research
+
+The following research areas are independent and can be executed in parallel:
+1. **Spec context loading**: spec.json, requirements.md, design.md, tasks.md
+2. **Steering, playbooks, & patterns**: Core steering, task-relevant extra steering, matching local agent skills/playbooks, and existing code patterns
+
+After all parallel research completes, synthesize implementation brief before starting.
+
+#### Preflight
+
+**Validate approvals**:
+- Verify tasks are approved in spec.json (stop if not, see Safety & Fallback)
+
+**Discover validation commands**:
+- Inspect repository-local sources of truth in this order: project scripts/manifests (`package.json`, `pyproject.toml`, `go.mod`, `Cargo.toml`, app manifests), task runners (`Makefile`, `justfile`), CI/workflow files, existing e2e/integration configs, then `README*`
+- Derive a canonical validation set for this repo: `TEST_COMMANDS`, `BUILD_COMMANDS`, and `SMOKE_COMMANDS`
+- Prefer commands already used by repo automation over ad hoc shell pipelines
+- For `SMOKE_COMMANDS`, choose the lightest trustworthy runtime-liveness check for the app shape (for example: root URL load, Electron launch, CLI `--help`, service health endpoint, mobile simulator/e2e harness if one already exists)
+- Keep the full command set in the parent context, and pass only the task-relevant subset to implementer and reviewer subagents
+
+**Establish repo baseline**:
+- Run `git status --porcelain` and note any pre-existing uncommitted changes
+
+### Step 2: Select Tasks & Determine Mode
+
+**Parse arguments**:
+- Extract feature name from first argument
+- If task numbers provided (e.g., "1.1" or "1,2,3"): **manual mode**
+- If no task numbers: **autonomous mode** (all pending tasks)
+
+**Build task queue**:
+- Read tasks.md, identify actionable sub-tasks (X.Y numbering like 1.1, 2.3)
+- Major tasks (1., 2.) are grouping headers, not execution units
+- Skip tasks with `_Blocked:_` annotation
+- For each selected task, check `_Depends:_` annotations -- verify referenced tasks are `[x]`
+- If prerequisites incomplete, execute them first or warn the user
+- Use `_Boundary:_` annotations to understand the task's component scope
+
+### Step 3: Execute Implementation
+
+#### Autonomous Mode (subagent dispatch)
+
+**Iteration discipline**: Process exactly ONE sub-task (e.g., 1.1) per iteration. Do NOT batch multiple sub-tasks into a single subagent dispatch. Each iteration follows the full cycle: dispatch implementer → review → commit → re-read tasks.md → next.
+
+**Context management**: At the start of each iteration, re-read `tasks.md` to determine the next actionable sub-task. Do NOT rely on accumulated memory of previous iterations. After completing each iteration, retain only a one-line summary (e.g., "1.1: READY_FOR_REVIEW, 3 files changed") and discard the full status report and reviewer details.
+
+For each task (one at a time):
+
+**a) Dispatch implementer**:
+- Read `templates/implementer-prompt.md` from this skill's directory
+- Construct a prompt by combining the template with task-specific context:
+  - Task description and boundary scope
+  - Paths to spec files: requirements.md, design.md, tasks.md
+  - Exact requirement and design section numbers this task must satisfy (using source numbering, NOT invented `REQ-*` aliases)
+  - Task-relevant steering context and parent-discovered validation commands (tests/build/smoke as relevant)
+  - Whether the task is behavioral (Feature Flag Protocol) or non-behavioral
+  - **Previous learnings**: Include any `## Implementation Notes` entries from tasks.md that are relevant to this task's boundary or dependencies (e.g., "better-sqlite3 requires separate rebuild for Electron"). This prevents the same mistakes from recurring.
+- The implementer subagent will read the spec files and build its own Task Brief (acceptance criteria, completion definition, design constraints, verification method) before implementation
+- Dispatch via **Agent tool** as a fresh subagent
+
+**b) Handle implementer status**:
+- Parse implementer status only from the exact `## Status Report` block and `- STATUS:` field.
+- If `STATUS` is missing, ambiguous, or replaced with prose, re-dispatch the implementer once requesting the exact structured status block only. Do NOT proceed to review without a parseable `READY_FOR_REVIEW | BLOCKED | NEEDS_CONTEXT` value.
+- **READY_FOR_REVIEW** → proceed to review
+- **BLOCKED** → dispatch debug subagent (see section below); do NOT immediately skip
+- **NEEDS_CONTEXT** → re-dispatch once with the requested additional context; if still unresolved → dispatch debug subagent
+
+**c) Dispatch reviewer**:
+- Read `templates/reviewer-prompt.md` from this skill's directory
+- Construct a review prompt with:
+  - The task description and relevant spec section numbers
+  - Paths to spec files (requirements.md, design.md) so the reviewer can read them directly
+  - The implementer's status report (for reference only — reviewer must verify independently)
+- The reviewer must apply the `kiro-review` protocol to this task-local review.
+- Preserve the existing task-specific context: task text, spec refs, `_Boundary:_` scope, validation commands, implementer report, and the actual `git diff` as the primary source of truth.
+- The reviewer subagent will run `git diff` itself to read the actual code changes and verify against the spec
+- Dispatch via **Agent tool** as a fresh subagent
+
+**d) Handle reviewer verdict**:
+- Parse reviewer verdict only from the exact `## Review Verdict` block and `- VERDICT:` field.
+- If `VERDICT` is missing, ambiguous, or replaced with prose, re-dispatch the reviewer once requesting the exact structured verdict only. Do NOT mark the task complete, commit, or continue to the next task without a parseable `APPROVED | REJECTED` value.
+- **APPROVED** → before marking the task `[x]` or making any success claim, apply `kiro-verify-completion` using fresh evidence from the current code state; then mark task `[x]` in tasks.md and perform selective git commit
+- **REJECTED (round 1-2)** → re-dispatch implementer with review feedback
+- **REJECTED (round 3)** → dispatch debug subagent (see section below)
+
+**e) Commit** (parent-only, selective staging):
+- Stage only the files actually changed for this task, plus tasks.md
+- **NEVER** use `git add -A` or `git add .`
+- Use `git add <file1> <file2> ...` with explicit file paths
+- Commit message format: `feat(<feature-name>): <task description>`
+
+**f) Record learnings**:
+- If this task revealed cross-cutting insights, append a one-line note to the `## Implementation Notes` section at the bottom of tasks.md
+
+**g) Debug subagent** (triggered by BLOCKED, NEEDS_CONTEXT unresolved, or REJECTED after 2 remediation rounds):
+
+The debug subagent runs in a **fresh context** — it receives only the error information, not the failed implementation history. This avoids the context pollution that causes infinite retry loops.
+
+- Read `templates/debugger-prompt.md` from this skill's directory
+- Construct a debug prompt with:
+  - The error description / blocker reason / reviewer rejection findings
+  - `git diff` of the current uncommitted changes
+  - The task description and relevant spec section numbers
+  - Paths to spec files so the debugger can read them
+- The debugger must apply the `kiro-debug` protocol to this failure investigation.
+- Preserve rich failure context: error output, reviewer findings, current `git diff`, task/spec refs, and any relevant Implementation Notes.
+- When available, the debugger should inspect runtime/config state and use web or official documentation research to validate root-cause hypotheses before proposing a fix plan.
+- Dispatch via **Agent tool** as a fresh subagent
+
+**Handle debug report**:
+- Parse `NEXT_ACTION` from the debug report's exact structured field.
+- If `NEXT_ACTION: STOP_FOR_HUMAN` → append `_Blocked: <ROOT_CAUSE>_` to tasks.md, stop the feature run, and report that human review is required before continuing
+- If `NEXT_ACTION: BLOCK_TASK` → append `_Blocked: <ROOT_CAUSE>_` to tasks.md, skip to next task
+- If `NEXT_ACTION: RETRY_TASK` → preserve the current worktree; do NOT reset or discard unrelated changes. Spawn a **new** implementer subagent with the debug report's `FIX_PLAN`, `NOTES`, and the current `git diff`, and require it to repair the task with explicit edits only
+  - If the new implementer succeeds (READY_FOR_REVIEW → reviewer APPROVED) → normal flow
+  - If the new implementer also fails → repeat debug cycle (max 2 debug rounds total). After 2 failed debug rounds → append `_Blocked: debug attempted twice, still failing — <ROOT_CAUSE>_` to tasks.md, skip
+- **Max 2 debug rounds per task**. Each round: fresh debug subagent → fresh implementer. If still failing after 2 rounds, the task is blocked.
+- Record debug findings in `## Implementation Notes` (this helps subsequent tasks avoid the same issue)
+
+**`(P)` markers**: Tasks marked `(P)` in tasks.md indicate they have no inter-dependencies and could theoretically run in parallel. However, kiro-impl processes them sequentially (one at a time) to avoid git conflicts and simplify review. The `(P)` marker is informational for task planning, not an execution directive.
+
+**Completion check**: If all remaining tasks are BLOCKED, stop and report blocked tasks with reasons to the user.
+
+#### Manual Mode (main context)
+
+For each selected task:
+
+**1. Build Task Brief**:
+Before writing any code, read the relevant sections of requirements.md and design.md for this task and clarify:
+- What observable behaviors must be true when done (acceptance criteria)
+- What files/functions/tests must exist (completion definition)
+- What technical decisions to follow from design.md (design constraints)
+- How to confirm the task works (verification method)
+
+**2. Execute TDD cycle** (Kent Beck's RED → GREEN → REFACTOR):
+- **RED**: Write test for the next small piece of functionality based on the acceptance criteria. Test should fail.
+- **GREEN**: Implement simplest solution to make test pass, following the design constraints.
+- **REFACTOR**: Improve code structure, remove duplication. All tests must still pass.
+- **VERIFY**: All tests pass (new and existing), no regressions. Confirm verification method passes.
+- **REVIEW**: Apply `kiro-review` before marking the task complete. If the host supports fresh subagents in manual mode, use a fresh reviewer; otherwise perform the review in the main context using the `kiro-review` protocol. Do NOT continue until the verdict is parseably `APPROVED`.
+- **MARK COMPLETE**: Only after review returns `APPROVED`, apply `kiro-verify-completion`, then update the checkbox from `- [ ]` to `- [x]` in tasks.md.
+
+### Step 4: Final Validation
+
+**Autonomous mode**:
+- After all tasks complete, run `/kiro-validate-impl {feature}` as a GO/NO-GO gate
+- If validation returns GO → before reporting feature success, apply `kiro-verify-completion` to the feature-level claim using the validation result and fresh supporting evidence
+- If validation returns NO-GO:
+  - Fix only concrete findings from the validation report
+  - Cap remediation at 3 rounds; if still NO-GO, stop and report remaining findings
+- If validation returns MANUAL_VERIFY_REQUIRED → stop and report the missing verification step
+
+**Manual mode**:
+- Suggest running `/kiro-validate-impl {feature}` but do not auto-execute
+
+## Feature Flag Protocol
+
+For tasks that add or change behavior, enforce RED → GREEN with a feature flag:
+
+1. **Add flag** (OFF by default): Introduce a toggle appropriate to the codebase (env var, config constant, boolean, conditional -- agent chooses the mechanism)
+2. **RED -- flag OFF**: Write tests for the new behavior. Run tests → must FAIL. If tests pass with flag OFF, the tests are not testing the right thing. Rewrite.
+3. **GREEN -- flag ON + implement**: Enable the flag, write implementation. Run tests → must PASS.
+4. **Remove flag**: Make the code unconditional. Run tests → must still PASS.
+
+**Skip this protocol for**: refactoring, configuration, documentation, or tasks with no behavioral change.
+
+## Critical Constraints
+- **Strict Handoff Parsing**: Never infer implementer `STATUS` or reviewer `VERDICT` from surrounding prose; only the exact structured fields count
+- **No Destructive Reset**: Never use `git checkout .`, `git reset --hard`, or similar destructive rollback inside the implementation loop
+- **Selective Staging**: NEVER use `git add -A` or `git add .`; always stage explicit file paths
+- **Bounded Review Rounds**: Max 2 implementer re-dispatch rounds per reviewer rejection, then debug
+- **Bounded Debug**: Max 2 debug rounds per task (debug + re-implementation per round); if still failing → BLOCKED
+- **Bounded Remediation**: Cap final-validation remediation at 3 rounds
+
+## Output Description
+
+**Autonomous mode**: For each task, report:
+1. Task ID, implementer status, reviewer verdict
+2. Files changed, commit hash
+3. After all tasks: final validation result (GO/NO-GO)
+
+**Manual mode**:
+1. Tasks executed: task numbers and test results
+2. Status: completed tasks marked in tasks.md, remaining tasks count
+
+**Format**: Concise, in the language specified in spec.json.
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Tasks Not Approved or Missing Spec Files**:
+- **Stop Execution**: All spec files must exist and tasks must be approved
+- **Suggested Action**: "Complete previous phases: `/kiro-spec-requirements`, `/kiro-spec-design`, `/kiro-spec-tasks`"
+
+**Test Failures**:
+- **Stop Implementation**: Fix failing tests before continuing
+- **Action**: Debug and fix, then re-run
+
+**All Tasks Blocked**:
+- Stop and report all blocked tasks with reasons
+- Human review needed to resolve blockers
+
+**Spec Conflicts with Reality**:
+- If a requirement or design conflicts with reality (API doesn't exist, platform limitation), block the task with `_Blocked: <reason>_` -- do not silently work around it
+
+**Upstream Ownership Detected**:
+- If review, debug, or validation shows that the root cause belongs to an upstream, foundation, shared-platform, or dependency spec, do not patch around it inside the downstream feature
+- Route the fix back to the owning upstream spec, keep the downstream task blocked until that contract is repaired, and re-run validation/smoke for dependent specs after the upstream fix lands
+
+**Task Plan Invalidated During Implementation**:
+- If debug returns `NEXT_ACTION: STOP_FOR_HUMAN` because of task ordering, boundary, or decomposition problems, stop and return for human review of `tasks.md` or the approved plan instead of forcing a code workaround

+ 54 - 0
.claude/skills/kiro-impl/templates/debugger-prompt.md

@@ -0,0 +1,54 @@
+# Debug Investigator
+
+Apply the `kiro-debug` protocol for this fresh-context root-cause investigation.
+
+If the host can invoke skills directly inside subagents, use `kiro-debug` as the governing debug protocol. Otherwise, follow the full investigation procedure embedded in this prompt, including local runtime inspection and web or official docs research when available.
+
+You are a fresh debug investigator with NO prior context about implementation attempts. Your sole job is root cause analysis and producing a concrete fix plan.
+
+## You Will Receive
+- Error description and messages
+- `git diff` of the failed changes (or a summary)
+- Task brief (what was being built)
+- Reviewer feedback (if the failure came from review rejection)
+- Relevant spec file paths (requirements.md, design.md)
+
+## Method
+
+1. **Read the error carefully** — extract the exact error message, stack trace, and failure location
+2. **Search the web** if available — search the exact error message, the technology + symptom combination, and official documentation
+   - e.g., `site:electronjs.org "Cannot find module"`, `better-sqlite3 electron ABI mismatch`
+   - Check GitHub Issues for the specific package/framework version
+3. **Inspect the runtime environment** — check package.json (dependencies, scripts, main/module fields), build config, tsconfig, and any runtime-specific configuration
+4. **Classify the root cause**:
+   - **Missing dependency**: A required package is not installed or not configured
+   - **Runtime mismatch**: Code works in one runtime (e.g., Node.js) but not the target (e.g., Electron, browser, Lambda)
+   - **Module format conflict**: ESM vs CJS incompatibility
+   - **Native module ABI**: Binary compiled for wrong runtime/version
+   - **Configuration gap**: Missing entry point, build output format, or runtime flags
+   - **Logic error**: Actual bug in the implementation
+   - **Spec conflict**: Requirements or design contradicts what's technically possible
+   - **External dependency**: Requires human decision, external API access, or hardware
+5. **Determine if repo-fixable** — can this be resolved by editing files, adding dependencies, or changing configuration within this repository?
+
+## Critical Rule
+
+Do not collapse this investigation into guess-first patching; preserve category classification, repo-fixability judgment, and explicit verification commands.
+
+Use `NEXT_ACTION: STOP_FOR_HUMAN` only when the fix genuinely requires something outside the repository or the approved task plan is no longer safe to continue. If the fix is adding a dependency, changing a config file, or restructuring code inside the current task plan, prefer `NEXT_ACTION: RETRY_TASK`.
+
+## Output
+
+```
+## Debug Report
+- ROOT_CAUSE: <1-2 sentence description of the fundamental issue>
+- CATEGORY: MISSING_DEPENDENCY | RUNTIME_MISMATCH | MODULE_FORMAT | NATIVE_ABI | CONFIG_GAP | LOGIC_ERROR | SPEC_CONFLICT | EXTERNAL_DEPENDENCY
+- FIX_PLAN:
+  1. <specific action with file path>
+  2. <specific action with file path>
+  ...
+- VERIFICATION: <command(s) to run after fix to confirm resolution>
+- NEXT_ACTION: RETRY_TASK | BLOCK_TASK | STOP_FOR_HUMAN
+- CONFIDENCE: HIGH | MEDIUM | LOW
+- NOTES: <any additional context the next implementer should know>
+```

+ 93 - 0
.claude/skills/kiro-impl/templates/implementer-prompt.md

@@ -0,0 +1,93 @@
+# TDD Task Implementer
+
+## Role
+You are a specialized implementation subagent for a single task. The parent controller owns setup, task sequencing, task-state updates, and commits. You own only the implementation and validation work for the assigned task.
+
+## You Will Receive
+- Feature name and task identifier/text
+- Paths to spec files: `requirements.md`, `design.md`, `tasks.md`
+- Exact numbered sections from `requirements.md` and `design.md` that this task must satisfy (source numbering, e.g., `1.2`, `3.1`, `A.2`)
+- `_Boundary:_` scope constraints and any `_Depends:_` information already checked by the parent
+- Project steering context and parent-discovered validation commands (tests/build/smoke when available)
+- Whether the task is behavioral (Feature Flag Protocol) or non-behavioral
+
+## Execution Protocol
+
+### Step 1: Load Task-Relevant Context
+- Read the referenced sections of `requirements.md` and `design.md` for this task
+- Preserve the original section numbering; do NOT invent `REQ-*` aliases
+- Expand any file globs or path patterns before reading files
+- Inspect existing code patterns only in the declared boundary
+- Read only the provided task-relevant steering; do not bulk-load unrelated skills or playbooks
+
+### Step 2: Build Task Brief
+Before writing any code, synthesize a concrete Task Brief from the spec sections you just read:
+
+- **Acceptance criteria**: What observable behaviors must be true when done? Extract from the requirement sections. Be specific (e.g., "POST /auth/login returns JWT on valid credentials, 401 on invalid"), not vague.
+- **Completion definition**: What files, functions, tests, or artifacts must exist? Derive from design.md component structure and task boundary.
+- **Design constraints**: What specific technical decisions from design.md must be followed? (e.g., "use bcrypt for hashing", "implement as Express middleware"). If design says "use X", you must use X.
+- **Verification method**: How to confirm the task works. Derive from the requirement's testability and the parent-provided validation commands.
+
+If any of these cannot be determined from the spec — the requirements are too vague, the design doesn't specify the approach, or the task description is ambiguous — report as **NEEDS_CONTEXT** immediately with what's missing. Do not guess or fill gaps with assumptions.
+
+### Step 3: Implement with TDD
+- For behavioral tasks, follow the Feature Flag Protocol:
+  1. Add a flag defaulting OFF
+  2. RED: write/adjust tests so they fail with the flag OFF. **Run tests and capture the failing output.** You will include this in the status report as evidence.
+  3. GREEN: enable the flag and implement until tests pass
+  4. Remove the flag and confirm tests still pass
+- For non-behavioral tasks, use a standard RED → GREEN → REFACTOR cycle. **Run tests after writing them (before implementation) and capture the failing output.**
+- Use the acceptance criteria from the Task Brief to drive test design
+- Follow the design constraints exactly
+- Keep changes tightly scoped to the assigned task
+
+### Step 4: Validate
+- Run the parent-provided validation commands needed to establish confidence for this task
+- Prefer the parent-discovered canonical commands over inventing new ones; only add a task-local verification command when the parent set does not cover the task, and explain why
+- Re-read the referenced requirement and design sections and compare them against the changed code and tests
+- Confirm the verification method from the Task Brief passes
+- If a validation command fails because of a pre-existing unrelated issue, report that precisely instead of masking it
+
+### Step 5: Self-Review
+- Review your own changes before reporting back
+- Verify each acceptance criterion from the Task Brief is satisfied by concrete behavior
+- Verify each design constraint is reflected in the implementation
+- Verify the implementation is NOT a mock, stub, placeholder, fake, or TODO-only path unless the task explicitly requires one
+- Verify there are no TBD, TODO, or FIXME markers left in changed files
+- Verify the tests prove the required behavior, not just scaffolding or a happy-path shell
+- Verify that any namespace or qualified-name access used at runtime (for example `React.X`, `module.Foo`, `pkg.Bar`) has a real value import or runtime binding, not only a type-only import or ambient type reference
+- Verify that any newly introduced runtime-sensitive dependency or packaging assumption (native modules, module-format boundaries, generated assets, required env vars, boot-time config) is reflected in validation or called out explicitly in `CONCERNS`
+- If any review check fails, fix the implementation, re-run validation, and repeat this step
+
+## Critical Constraints
+- Do NOT update `tasks.md`
+- Do NOT create commits
+- Do NOT expand scope beyond the assigned task and boundary
+- Do NOT silently work around requirement or design mismatches
+- Use the exact section numbers from `requirements.md` and `design.md` in all notes and reports; do NOT invent `REQ-*` aliases
+- Do NOT stop at a mock, stub, placeholder, fake, or TODO-only implementation unless the task explicitly requires it
+- Prefer the minimal implementation that satisfies the Task Brief and tests
+
+## Status Report
+
+End your response with this structured status block:
+
+The parent controller parses the exact `- STATUS:` line. Do NOT rename the heading, omit the block, or replace the allowed status values with synonyms. Return exactly one final status block. Put extra explanation inside the defined fields, not after the block.
+
+
+```
+## Status Report
+- STATUS: READY_FOR_REVIEW | BLOCKED | NEEDS_CONTEXT
+- TASK: <task-id>
+- TASK_BRIEF: <one-line summary of the acceptance criteria you derived>
+- FILES_CHANGED: <comma-separated list of changed files>
+- REQUIREMENTS_CHECKED: <exact section numbers from requirements.md>
+- DESIGN_CHECKED: <exact section numbers from design.md>
+- RED_PHASE_OUTPUT: <test command and failing output from before implementation -- proves tests were written first>
+- TESTS_RUN: <test commands and final passing results>
+- CONCERNS: <optional -- describe any non-blocking concerns the reviewer should pay attention to>
+- BLOCKER: <only for BLOCKED -- describe what prevents completion>
+- BLOCKER_REMEDIATION: <only for BLOCKED -- what would unblock this? e.g., "design.md section 3.2 specifies API X but it doesn't exist; update design or provide alternative">
+- MISSING: <only for NEEDS_CONTEXT -- describe exactly what additional context is needed and where it might be found>
+- EVIDENCE: <concrete code paths, functions, and tests that prove the behavior>
+```

+ 111 - 0
.claude/skills/kiro-impl/templates/reviewer-prompt.md

@@ -0,0 +1,111 @@
+# Task Implementation Reviewer
+
+Apply the `kiro-review` protocol for this task-local adversarial review.
+
+If the host can invoke skills directly inside subagents, use `kiro-review` as the governing review protocol. Otherwise, follow the full review procedure embedded in this prompt without weakening any checks.
+
+## Role
+You are an independent, adversarial reviewer. Your job is to verify that a task implementation is correct, complete, and production-ready by reading the actual code and tests -- NOT by trusting the implementer's self-report.
+
+## You Will Receive
+- The task description and relevant spec section numbers
+- Paths to spec files (requirements.md, design.md) — read the relevant sections yourself
+- The implementer's status report (for reference only — do NOT trust it as source of truth)
+- The task's `_Boundary:_` scope constraints
+- Validation commands discovered by the controller
+
+## First Action
+
+Run `git diff` to see the actual code changes. This is your primary input. If the diff is large, also read the full changed files for context.
+
+## Core Principle
+
+**Do Not Trust the Report.** Run `git diff` yourself and read the actual code changes line by line. Read the spec sections yourself. The implementer may report READY_FOR_REVIEW while the code is a stub, tests are trivial, or requirements are partially met.
+
+**Taste encoded as tooling.** Where a check can be verified mechanically (grep, test execution, linter), run the command and use the result. Do not rely on visual inspection alone for checks that have mechanical equivalents.
+
+This review must preserve all existing mechanical checks, boundary checks, RED-phase checks, and structured remediation output.
+
+## Review Checklist
+
+Evaluate each item. If ANY item fails, the verdict is REJECTED.
+
+### Mechanical Checks (run commands, use results)
+
+**1. Regression Safety**
+- Run the project's test suite (e.g., `npm test`, `pytest`). Use the exit code.
+- If tests fail → REJECTED. No judgment needed.
+
+**2. Completeness — No TBD/TODO/FIXME**
+- Run: `grep -rn "TBD\|TODO\|FIXME\|HACK\|XXX" <changed-files>`
+- If matches found in changed files → REJECTED (unless the marker existed before this task).
+
+**3. No Hardcoded Secrets**
+- Run: `grep -rn "password\s*=\|api_key\s*=\|secret\s*=\|token\s*=" <changed-files>` (case-insensitive)
+- If matches found that aren't environment variable references → REJECTED.
+
+**4. Boundary Respect**
+- Run: `git diff --name-only` and compare against the task's `_Boundary:_` scope.
+- If files outside boundary are changed → REJECTED.
+
+**5. RED Phase Evidence**
+- Check the implementer's status report for `RED_PHASE_OUTPUT`.
+- If the task is behavioral and RED_PHASE_OUTPUT is missing or empty → REJECTED (tests may not have been written before implementation).
+- The output should show test failures related to the task's acceptance criteria.
+
+### Judgment Checks (read code, compare to spec)
+
+**6. Reality Check**
+- Read the `git diff`. Implementation is real production code.
+- NOT a mock, stub, placeholder, fake, or TODO-only path (unless the task explicitly requires one).
+- No "will be implemented later" or similar deferred-work patterns.
+
+**7. Acceptance Criteria**
+- Read the task description from tasks.md. All aspects are addressed, not just the primary case.
+- The Task Brief's acceptance criteria (from implementer's status report) are met.
+
+**8. Spec Alignment (Requirements)**
+- Read the referenced sections of requirements.md yourself.
+- Each referenced requirement is satisfied by concrete, observable behavior.
+- Use source section numbers (e.g., 1.2, 3.1); do NOT accept invented `REQ-*` aliases.
+
+**9. Spec Alignment (Design)**
+- Read the referenced sections of design.md yourself.
+- If design says "use X", the code uses X — not a substitute.
+- Component structure, interfaces, and data flow match the design.
+- Dependency direction follows design.md's architecture (no upward imports).
+
+**10. Test Quality**
+- Tests prove the required behavior, not just scaffolding or happy-path shells.
+- Test assertions are meaningful (not `expect(true).toBe(true)` or similar).
+- Tests would fail if the implementation were removed or broken.
+
+**11. Error Handling**
+- Error paths are handled, not just the happy path.
+- Errors are not silently swallowed.
+
+## Review Verdict
+
+End your response with this structured verdict:
+
+The parent controller parses the exact `- VERDICT:` line. Do NOT rename the heading, omit the block, or replace `APPROVED | REJECTED` with synonyms. Return exactly one final verdict block. Put extra explanation inside the defined sections, not after the block.
+
+
+```
+## Review Verdict
+- VERDICT: APPROVED | REJECTED
+- TASK: <task-id>
+- MECHANICAL_RESULTS:
+  - Tests: PASS | FAIL (command and exit code)
+  - TBD/TODO grep: CLEAN | <count> matches
+  - Secrets grep: CLEAN | <count> matches
+  - Boundary: WITHIN | <files outside boundary>
+  - RED phase: VERIFIED | MISSING | N/A (non-behavioral task)
+- FINDINGS:
+  - <numbered list of specific findings, if any>
+  - <reference exact file paths, line ranges, and spec section numbers>
+- REMEDIATION: <if REJECTED: specific, actionable steps to fix each finding>
+- SUMMARY: <one-sentence summary of the review outcome>
+```
+
+If REJECTED, REMEDIATION is mandatory — identify the exact file, the exact problem, and what the implementer should do to fix it. Vague feedback like "improve tests" is not acceptable.

+ 171 - 0
.claude/skills/kiro-review/SKILL.md

@@ -0,0 +1,171 @@
+---
+name: kiro-review
+description: Review a task implementation against approved specs, task boundaries, and verification evidence. Use after an implementer finishes a task, after remediation, or before accepting a task as complete.
+allowed-tools: Read, Bash, Grep, Glob
+argument-hint: <task-id>
+---
+
+# kiro-review
+
+## Overview
+
+This skill performs task-local adversarial review. It verifies that the implementation is real, complete, bounded, aligned with approved requirements and design, and supported by mechanical verification evidence.
+
+Boundary terminology continuity:
+- discovery identifies `Boundary Candidates`
+- design fixes `Boundary Commitments`
+- tasks constrain execution with `_Boundary:_`
+- review rejects concrete `Boundary Violations`
+
+## When to Use
+
+- After an implementer reports `READY_FOR_REVIEW`
+- After remediation for a rejected review
+- Before marking a task `[x]`
+- Before accepting a task into feature-level validation
+
+Do not use this skill to invent missing requirements or silently reinterpret the spec.
+
+## Inputs
+
+Provide:
+- Task ID and exact task text from `tasks.md`
+- Relevant requirement section numbers
+- Relevant design section numbers
+- Spec file paths (`requirements.md`, `design.md`, optionally `tasks.md`)
+- The implementer's status report
+- The task `_Boundary:_` scope constraints
+- Validation commands discovered by the controller
+- Relevant steering excerpts when applicable
+- Relevant `## Implementation Notes` entries when applicable
+
+## Outputs
+
+Return one of:
+- `APPROVED`
+- `REJECTED`
+
+Also return:
+- Mechanical results
+- Findings with severity
+- Required remediation
+- One-sentence summary
+
+Use the language specified in `spec.json`.
+
+## First Action
+
+Run `git diff` to inspect the actual code changes. If the diff is large or ambiguous, read the changed files directly. Do not trust the implementer report as source of truth.
+
+## Core Principle
+
+Read the spec yourself. Read the diff yourself. Verify mechanically where possible. Reject on concrete failures rather than interpretive optimism.
+The main review question is not just "does it work?" but "does it stay inside the approved responsibility boundary without hiding new coupling?"
+
+## Mechanical Checks
+
+Run these checks and use the result as primary signal.
+
+### 1. Regression Safety
+- Run the project's canonical test suite using the validation commands discovered by the controller.
+- If tests fail, reject.
+
+### 2. No Residual Placeholder Markers
+- Check changed files for `TBD`, `TODO`, `FIXME`, `HACK`, `XXX`.
+- Reject if new placeholder markers were introduced without explicit task justification.
+
+### 3. No Hardcoded Secrets
+- Check changed files for hardcoded secrets or credentials.
+- Reject if concrete secret patterns are introduced.
+
+### 4. Boundary Respect
+- Compare changed files against the task `_Boundary:_` scope.
+- Reject if the change spills outside the approved boundary without explicit justification.
+- Reject if the implementation introduces hidden cross-boundary coordination inside what should be a local task.
+
+### 5. RED Phase Evidence
+- For behavioral tasks, verify that the implementer status report includes `RED_PHASE_OUTPUT`.
+- Reject if RED evidence is missing, empty, or unrelated to the task's acceptance criteria.
+
+### 6. Runtime-Sensitive Static Checks
+- If the project already has lint or equivalent static analysis for the touched stack, run the relevant command for the task boundary.
+- Pay attention to patterns that can survive typecheck/build yet fail at runtime: type-only imports used as values, missing namespace value imports for qualified-name access, unresolved globals, and newly introduced runtime-sensitive dependencies without matching boot/runtime handling.
+- If no project lint command exists, perform a targeted diff-based spot check in the changed files for those patterns.
+- Reject on concrete findings that create a realistic boot-time or module-load failure.
+
+## Judgment Checks
+
+### 7. Reality Check
+- Confirm the implementation is real production code, not a placeholder, stub, fake path, or deferred-work shell.
+
+### 8. Acceptance Criteria Coverage
+- Read the task description and confirm all aspects are implemented, not only the primary happy path.
+
+### 9. Requirements Alignment
+- Read the referenced sections in `requirements.md`.
+- Confirm each requirement is satisfied by concrete observable behavior.
+- Use original section numbers only.
+
+### 10. Design Alignment
+- Read the referenced sections in `design.md`.
+- Confirm the implementation uses the prescribed structures, interfaces, and dependency direction.
+- Reject silent substitutions for design-mandated choices.
+
+### 10.5 Boundary Audit
+- Compare the implementation against the design's boundary commitments and out-of-boundary statements.
+- Reject if downstream-specific behavior is pushed into an upstream boundary for convenience.
+- Reject if the implementation creates new hidden dependencies, shared ownership, or undeclared coupling across adjacent boundaries.
+- Reject if a task that is not an explicit integration task now behaves like one.
+
+### 11. Test Quality
+- Confirm tests prove the required behavior rather than only scaffolding.
+- Confirm tests would fail if the implementation were removed or broken.
+
+### 12. Error Handling
+- Confirm relevant failure paths are handled and not silently swallowed.
+
+## Severity Model
+
+Use:
+- `Critical` for broken functionality, invalid verification, data loss, security risk, or major scope violation
+- `Important` for required fixes before acceptance
+- `Suggestion` for non-blocking improvements
+- `FYI` for informational notes
+
+## Stop / Escalate
+
+Escalate instead of papering over the issue when:
+- The approved spec is ambiguous in a correctness-critical way
+- The design conflicts with what is technically possible
+- Required evidence cannot be gathered
+- The implementation only works by silently deviating from approved scope
+- Boundary ownership cannot be determined cleanly from requirements, design, and task scope
+
+## Common Rationalizations
+
+| Rationalization | Reality |
+|---|---|
+| “Tests pass, so approve” | Passing tests do not prove spec compliance or boundary respect. |
+| “The extra behavior is useful” | Extra behavior outside approved scope is still drift. |
+| “The implementer said RED was done” | RED must be evidenced, not asserted. |
+| “This gap is small enough to let through” | Real gaps must be rejected or escalated. |
+
+## Output Format
+
+```md
+## Review Verdict
+- VERDICT: APPROVED | REJECTED
+- TASK: <task-id>
+- MECHANICAL_RESULTS:
+  - Tests: PASS | FAIL (command and exit code)
+  - TBD/TODO grep: CLEAN | <count> matches
+  - Secrets grep: CLEAN | <count> matches
+  - Static checks: PASS | FAIL | SPOT_CHECKED
+  - Boundary: WITHIN | <files outside boundary>
+  - Boundary audit: CLEAN | <spillover / hidden dependency findings>
+  - RED phase: VERIFIED | MISSING | N/A
+- FINDINGS:
+  1. <specific finding with exact files/spec refs>
+- REMEDIATION: <mandatory if REJECTED>
+- SUMMARY: <one sentence>
+```

+ 171 - 0
.claude/skills/kiro-spec-batch/SKILL.md

@@ -0,0 +1,171 @@
+---
+name: kiro-spec-batch
+description: Create complete specs (requirements, design, tasks) for all features in roadmap.md using parallel subagent dispatch by dependency wave.
+allowed-tools: Read, Glob, Grep, Agent
+---
+
+# kiro-spec-batch Skill
+
+## Core Mission
+- **Success Criteria**:
+  - All features have complete spec files (spec.json, requirements.md, design.md, tasks.md)
+  - Dependency ordering respected (upstream specs complete before downstream)
+  - Independent features processed in parallel via subagent dispatch
+  - Cross-spec consistency verified (data models, interfaces, naming)
+  - Mixed roadmap context understood without breaking `## Specs (dependency order)` parsing
+  - Controller context stays lightweight (subagents do the heavy work)
+
+## Execution Steps
+
+### Step 1: Read Roadmap and Validate
+
+1. Read `.kiro/steering/roadmap.md`
+2. Parse the `## Specs (dependency order)` section to extract:
+   - Feature names
+   - One-line descriptions
+   - Dependencies for each feature
+   - Completion status (`[x]` = done, `[ ]` = pending)
+3. If present, also read for context:
+   - `## Existing Spec Updates`
+   - `## Direct Implementation Candidates`
+   Do not include these in dependency-wave execution; they are awareness-only inputs for sequencing and consistency review.
+4. For each pending feature in `## Specs (dependency order)`, verify `.kiro/specs/<feature>/brief.md` exists
+5. If any brief.md is missing, stop and report: "Missing brief.md for: [list]. Run `/kiro-discovery` to generate briefs first."
+
+### Step 2: Build Dependency Waves
+
+Group pending features into waves based on dependencies:
+
+- **Wave 1**: Features with no dependencies (or all dependencies already completed `[x]`)
+- **Wave 2**: Features whose dependencies are all in Wave 1 or already completed
+- **Wave N**: Features whose dependencies are all in earlier waves or already completed
+
+Display the execution plan:
+```
+Spec Batch Plan:
+  Wave 1 (parallel): app-foundation
+  Wave 2 (parallel): block-editor, page-management
+  Wave 3 (parallel): sidebar-navigation, database-views
+  Wave 4 (parallel): cli-integration
+  Total: 6 specs across 4 waves
+```
+
+If roadmap contains `## Existing Spec Updates` or `## Direct Implementation Candidates`, mention them separately as non-batch items so the user can see the whole decomposition.
+
+### Step 3: Execute Waves
+
+For each wave, dispatch all features in the wave as **parallel subagents** via the Agent tool.
+
+**For each feature in the wave**, dispatch a subagent with this prompt:
+
+```
+Create a complete specification for feature "{feature-name}".
+
+1. Read the brief at .kiro/specs/{feature-name}/brief.md for feature context
+2. Read the roadmap at .kiro/steering/roadmap.md for project context
+3. Execute the full spec pipeline. For each phase, read the corresponding skill's SKILL.md for complete instructions (templates, rules, review gates):
+   a. Initialize: Read .claude/skills/kiro-spec-init/SKILL.md, then create spec.json and requirements.md
+   b. Generate requirements: Read .claude/skills/kiro-spec-requirements/SKILL.md, then follow its steps
+   c. Generate design: Read .claude/skills/kiro-spec-design/SKILL.md, then follow its steps
+   d. Generate tasks: Read .claude/skills/kiro-spec-tasks/SKILL.md, then follow its steps
+4. Set all approvals to true in spec.json (auto-approve mode, equivalent of -y flag)
+5. Report completion with file list and task count
+```
+
+**After all subagents in the wave complete**:
+1. Verify each feature has: spec.json, requirements.md, design.md, tasks.md
+2. If any feature failed, report the error and continue with features that succeeded
+3. Display wave completion: "Wave N complete: [features]. Files verified."
+4. Proceed to next wave
+
+### Step 4: Cross-Spec Review
+
+After all waves complete, dispatch a **single subagent** for cross-spec consistency review. This is the highest-value quality gate -- it catches issues that per-spec review gates cannot.
+
+**Subagent prompt**:
+
+```
+You are a cross-spec reviewer. Read ALL generated specs and check for consistency across the entire project.
+
+Read these files for every feature in the roadmap:
+- .kiro/specs/*/design.md (primary: contains interfaces, data models, architecture)
+- .kiro/specs/*/requirements.md (for scope and acceptance criteria)
+- .kiro/specs/*/tasks.md (for boundary annotations only -- read _Boundary:_ lines, skip task descriptions)
+- .kiro/steering/roadmap.md
+
+Reading priority: Focus on design.md files (they contain interfaces, data models, architecture). For requirements.md, focus on section headings and acceptance criteria. For tasks.md, focus on _Boundary:_ annotations.
+
+Check the following:
+
+1. **Data model consistency**: Do all specs that reference the same entities (tables, types, interfaces) define them consistently? Are field names, types, and relationships aligned?
+
+2. **Interface alignment**: Where spec A produces output that spec B consumes (APIs, events, shared state), do the contracts match exactly? Are request/response shapes, event payloads, and error codes consistent?
+
+3. **No duplicate functionality**: Is any capability specified in more than one spec? Flag overlaps.
+
+4. **Dependency completeness**: Does every spec's design.md reference the correct upstream specs? Are there implicit dependencies not declared in roadmap.md?
+
+5. **Naming conventions**: Are component names, file paths, API routes, and database table names consistent across all specs?
+
+6. **Shared infrastructure**: Are shared concerns (authentication, error handling, logging, configuration) handled in one spec and correctly referenced by others?
+
+7. **Task boundary alignment**: Do task _Boundary:_ annotations across specs partition the codebase cleanly? Are there files claimed by multiple specs?
+8. **Roadmap boundary continuity**: If roadmap includes `Existing Spec Updates` or `Direct Implementation Candidates`, do the generated new specs avoid absorbing that work by accident?
+9. **Architecture boundary integrity**: Do the specs preserve clean responsibility seams, avoid shared ownership, keep dependency direction coherent, and include enough revalidation triggers to catch downstream impact?
+10. **Change-friendly decomposition**: Has any spec absorbed multiple independent seams that should probably be split instead of kept together?
+
+Output format:
+- CONSISTENT: [list areas that are well-aligned]
+- ISSUES: [list each issue with: which specs, what's inconsistent, suggested fix]
+- If no issues found: "All specs are consistent. Ready for implementation."
+```
+
+**After the review subagent returns**:
+- **Critical/important issues found**: Dispatch fix subagents for each affected spec to apply the suggested fixes. If the issue is really a decomposition problem (for example boundary overlap or one spec carrying multiple independent seams), stop and return to roadmap/discovery instead of papering over it locally. Re-run cross-spec review after fixes (max 3 remediation rounds).
+- **Minor issues only**: Report them for user awareness, proceed to Step 5.
+- **No issues**: Proceed to Step 5.
+
+### Step 5: Finalize
+
+1. Glob `.kiro/specs/*/tasks.md` to verify all specs exist
+2. For each completed spec, read spec.json to confirm phase and approvals
+3. Update roadmap.md: mark completed specs as `[x]`
+4. If roadmap.md includes `Existing Spec Updates` or `Direct Implementation Candidates`, leave them untouched and mention them as remaining follow-up items unless already explicitly completed elsewhere
+
+Display final summary:
+```
+Spec Batch Complete:
+  ✓ app-foundation: X requirements, Y design components, Z tasks
+  ✓ block-editor: ...
+  ✓ page-management: ...
+  ...
+  Total: N specs created, M tasks generated
+  Cross-spec review: PASSED / N issues found (M fixed)
+  Existing spec updates pending: <count or none>
+  Direct implementation candidates pending: <count or none>
+
+Next: Review generated specs, then start implementation with /kiro-impl <feature>
+```
+
+## Critical Constraints
+- **Controller stays lightweight**: Only read roadmap.md and brief.md existence checks in main context. All spec generation happens in subagents.
+- **Wave ordering is strict**: Never start a wave until all features in previous waves are complete.
+- **Parallel within waves**: All features in the same wave MUST be dispatched in parallel via Agent tool, not sequentially.
+- **No partial waves**: If a feature in a wave fails, still complete the other features in that wave before reporting.
+- **Skip completed specs**: Features with `[x]` in roadmap.md or existing tasks.md are skipped.
+- **`## Specs (dependency order)` remains authoritative for batch execution**: Other roadmap sections are context, not wave inputs.
+
+## Safety & Fallback
+
+**Subagent failure**:
+- Log the error, skip the failed feature
+- Continue with remaining features in the wave
+- Report failed features in the summary
+- Suggest: "Run `/kiro-spec-quick <feature> --auto` manually for failed features."
+
+**Circular dependencies**:
+- If dependency graph has cycles, report the cycle and stop
+- Suggest: "Fix dependency ordering in roadmap.md"
+
+**Roadmap not found**:
+- Stop and report: "No roadmap.md found. Run `/kiro-discovery` first."

+ 258 - 0
.claude/skills/kiro-spec-cleanup/SKILL.md

@@ -0,0 +1,258 @@
+---
+name: kiro-spec-cleanup
+description: Organize and clean up specification documents after implementation completion. Removes implementation details while preserving essential context for future refactoring.
+allowed-tools: Read, Write, Edit, Bash, Glob, Grep, AskUserQuestion
+argument-hint: <feature-name>
+---
+
+# kiro-spec-cleanup Skill
+
+## Role
+
+This skill fills the post-implementation gap in the spec lifecycle. After `/kiro-validate-impl` returns GO, spec documents accumulate implementation-specific content (testing procedures, deployment checklists, detailed code examples) that clutters future reading. This skill trims the HOW while preserving the WHY, so that the specs remain a useful reference when refactoring months later.
+
+Lifecycle position:
+
+```
+discovery → init → requirements → design → tasks → impl → validate-impl → **spec-cleanup**
+```
+
+## Core Mission
+- **Success Criteria**:
+  - Implementation details (testing procedures, deployment checklists) removed
+  - Design decisions, architectural constraints, and boundary metadata preserved
+  - Requirements simplified (Acceptance Criteria condensed to summaries)
+  - Unimplemented features removed or documented
+  - Documents remain valuable for future refactoring work
+  - All prose content matches the language in spec.json
+
+## Organizing Principle
+
+**"Can we read essential context from these spec documents when refactoring this feature months later?"**
+
+- **Keep**: "Why" — design decisions, architectural constraints, boundary commitments, limitations, trade-offs, Implementation Notes
+- **Remove**: "How" — testing procedures, deployment steps, detailed implementation code examples
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Discover all spec files**:
+- Glob `.kiro/specs/$ARGUMENTS/` to list every file
+- Categorize:
+  - **Core files** (must preserve): `spec.json`, `brief.md`, `requirements.md`, `design.md`, `tasks.md`, `research.md`
+  - **Other files** (evaluate case-by-case): validation reports, notes, prototypes, migration guides, etc.
+
+**Read all discovered files**:
+- Read all core files first
+- Read other files to understand their content and value
+
+**Determine target language**:
+- Read `spec.json` and extract the `language` field (e.g., `"ja"`, `"en"`)
+- All spec document prose must be in this language
+- Exempt: code inside fenced blocks, inline code spans, proper nouns, technical terms
+
+**Verify implementation status**:
+- Count `[x]` vs `[ ]` tasks in tasks.md
+- If less than 90% complete, warn user and ask to confirm cleanup
+
+### Step 2: Analyze Current State
+
+**Identify cleanup opportunities across all files**:
+
+1. **Other files** (non-core files like validation-report.md, notes.md, etc.):
+   - Read each file to understand content and purpose
+   - Identify valuable information worth preserving:
+     * Implementation discoveries and lessons learned
+     * Critical constraints or design decisions
+     * Historical context for future refactoring
+   - Determine salvage strategy:
+     * Migrate valuable content to research.md or design.md
+     * Keep file if it contains essential reference information
+     * Delete if content is redundant or no longer relevant
+   - **Case-by-case evaluation required** — never assume files should be deleted
+
+2. **brief.md** (v3 discovery output):
+   - Should be preserved as-is — it records the original problem, approach, scope, and boundary candidates from discovery
+   - No cleanup needed unless content duplicates other files
+
+3. **research.md**:
+   - Should contain discovery findings, design decisions, and implementation lessons
+   - Check if implementation revealed new constraints or patterns to document
+   - Identify content from other files that should be migrated here
+
+4. **requirements.md**:
+   - Identify verbose Acceptance Criteria that can be condensed to summaries
+   - Find unimplemented requirements (compare with tasks.md)
+   - Detect duplicate or redundant content
+
+5. **design.md**:
+   - Identify implementation-specific sections that can be removed:
+     * Detailed Testing Strategy (test procedures, not the test approach)
+     * Security Considerations (if fully addressed in implementation)
+     * Error Handling code examples (if implemented)
+     * Migration Strategy (after migration complete)
+     * Deployment Checklist (after deployment)
+   - Identify sections that MUST be preserved:
+     * Architecture diagrams and Boundary Commitments
+     * Component interfaces and API contracts
+     * File Structure Plan (drives task boundaries)
+     * Design decisions and rationale
+     * Out of Boundary declarations
+     * Allowed Dependencies
+     * Revalidation Triggers
+     * Critical implementation constraints
+     * Known limitations
+
+6. **tasks.md**:
+   - `## Implementation Notes` section MUST be preserved — it carries cross-task knowledge
+   - `_Boundary:_` and `_Depends:_` annotations MUST be preserved — they document the boundary discipline
+   - Task completion markers `[x]` should remain as historical record
+
+7. **Language audit** (compare prose language vs. `spec.json.language`):
+   - For each markdown file, scan prose content (headings, paragraphs, list items) and detect the written language
+   - Flag any file or section whose language does not match the target language
+   - Exemptions — do NOT flag:
+     * Content inside fenced code blocks — code comments must stay in English
+     * Inline code spans
+     * Proper nouns, technical terms, and identifiers always written in English
+   - Collect flagged items into a translation plan: file name, approximate line range, detected language, brief excerpt
+
+### Step 3: Interactive Confirmation
+
+**Present cleanup plan to user**:
+
+For each file and section identified in Step 2, present recommendations and ask for approval. Group related decisions to reduce interruptions.
+
+**Example questions for other files**:
+- "validation-report.md found. Contains {brief summary}. Options:"
+  - "A: Migrate valuable content to research.md, then delete"
+  - "B: Keep as historical reference"
+  - "C: Delete (content no longer needed)"
+
+**Example questions for core files**:
+- "requirements.md: Simplify Acceptance Criteria from detailed bullet points to summary paragraphs? [Y/n]"
+- "requirements.md: Remove unimplemented requirements (e.g., Req 4.4 not implemented)? [Y/n]"
+- "design.md: Delete 'Testing Strategy' section (lines X-Y)? [Y/n]"
+- "design.md: Keep Boundary Commitments and File Structure Plan (essential for refactoring)? [Y/n]"
+
+**Translation confirmation** (if language mismatches found):
+- Show summary: "Found content in language(s) other than `{target_language}` in:"
+  - List each flagged file with line range and short excerpt
+- Ask: "Translate mismatched content to `{target_language}`? [Y/n]"
+
+**Batch similar decisions**:
+- Group related sections (e.g., all "delete implementation details" decisions)
+- Allow user to approve categories rather than individual items
+
+### Step 4: Execute Cleanup
+
+**For each approved action**:
+
+1. **Salvage and cleanup other files** (if approved):
+   - For each non-core file:
+     * Extract valuable information
+     * Migrate content to appropriate core file:
+       - Technical discoveries → research.md
+       - Design constraints → design.md
+       - Requirement clarifications → requirements.md
+     * Delete file after salvage (if approved)
+   - Document salvaged content with source reference
+
+2. **Update research.md** (if new discoveries or salvaged content):
+   - Add "Post-Implementation Discoveries" section if needed
+   - Document critical technical constraints discovered during implementation
+   - Integrate salvaged content from other files
+   - Cross-reference requirements.md and design.md where relevant
+
+3. **Simplify requirements.md** (if approved):
+   - Transform detailed Acceptance Criteria into summary paragraphs
+   - Remove unimplemented requirements entirely
+   - Preserve requirement objectives and summaries
+
+4. **Clean up design.md** (if approved):
+   - Delete approved implementation-detail sections
+   - Preserve: Architecture diagrams, Boundary Commitments, Out of Boundary, Allowed Dependencies, Revalidation Triggers, File Structure Plan, Component interfaces, Design decisions and rationale
+   - Integrate salvaged content from other files if relevant
+
+5. **Preserve tasks.md structure**:
+   - Keep `## Implementation Notes` intact
+   - Keep `_Boundary:_` and `_Depends:_` annotations intact
+   - Keep task completion markers as historical record
+
+6. **Preserve brief.md**:
+   - No modifications — discovery context is immutable
+
+7. **Translate language-mismatched content** (if approved):
+   - For each flagged section, translate prose to the target language
+   - Never translate content inside fenced code blocks or inline code spans
+   - Preserve all Markdown formatting
+
+8. **Update spec.json metadata**:
+   - Set `phase: "implementation-complete"`
+   - Set `cleaned_up_at` to current ISO 8601 timestamp (e.g., `"2026-04-16T09:30:00.000Z"`)
+   - Remove legacy `cleanup_completed` boolean if present (superseded by `cleaned_up_at`)
+   - Update `updated_at` timestamp
+
+### Step 5: Generate Cleanup Summary
+
+Provide summary report in the language specified in spec.json:
+
+```markdown
+## Cleanup Summary for {feature-name}
+
+### Files Modified
+- file: action taken (lines changed)
+
+### Information Salvaged
+- Source → destination mapping
+
+### Information Preserved
+- Architecture diagrams and boundary commitments
+- Design decisions and rationale
+- Implementation Notes and boundary annotations
+- Brief (discovery context)
+- Known limitations and trade-offs
+
+### Next Steps
+- Spec documents ready for future refactoring reference
+```
+
+## Critical Constraints
+
+- **User approval required**: Never delete or modify content without explicit confirmation
+- **Boundary metadata is sacred**: Never remove Boundary Commitments, Out of Boundary, Allowed Dependencies, Revalidation Triggers, _Boundary:_, or _Depends:_ annotations
+- **Implementation Notes are sacred**: Never remove the `## Implementation Notes` section from tasks.md
+- **brief.md is immutable**: Never modify — it records the original discovery context
+- **Language consistency**: All prose content must match `spec.json.language`; code blocks exempt
+- **Preserve history**: Don't delete discovery rationale or design decisions
+- **Interactive workflow**: Pause for user input rather than making assumptions
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Implementation Incomplete**:
+- **Condition**: Less than 90% of tasks marked `[x]` in tasks.md
+- **Action**: Warn: "Implementation appears incomplete (X/Y tasks done). Continue cleanup? [y/N]"
+- **Recommendation**: Run `/kiro-validate-impl {feature}` first
+
+**Spec Not Found**:
+- **Message**: "No spec found for `$ARGUMENTS`. Available specs:"
+- **Action**: List available spec directories in `.kiro/specs/`
+
+**Missing Critical Files**:
+- **Condition**: requirements.md or design.md missing
+- **Action**: Skip cleanup for missing files, proceed with available files
+- **Warning**: "{file} missing — cannot clean up"
+
+### Backup Recommendation
+
+Before cleanup:
+- Recommend user commit current state: "This will modify spec files. Consider committing current state for easy rollback."
+- Undo path: `git checkout HEAD -- .kiro/specs/{feature}/`
+
+### Related Commands
+
+- `/kiro-validate-impl {feature}` — run before cleanup to confirm GO
+- `/kiro-spec-status {feature}` — check implementation progress

+ 202 - 0
.claude/skills/kiro-spec-design/SKILL.md

@@ -0,0 +1,202 @@
+---
+name: kiro-spec-design
+description: Generate comprehensive technical design translating requirements (WHAT) into architecture (HOW) with discovery process. Use when creating architecture from requirements.
+allowed-tools: Read, Write, Edit, Grep, Glob, WebSearch, WebFetch, Agent
+argument-hint: <feature-name> [-y]
+metadata:
+  shared-rules: "design-principles.md, design-discovery-full.md, design-discovery-light.md, design-synthesis.md, design-review-gate.md"
+---
+
+# kiro-spec-design Skill
+
+## Core Mission
+- **Success Criteria**:
+  - All requirements mapped to technical components with clear interfaces
+  - The design makes responsibility boundaries explicit enough to guide task generation and review
+  - Appropriate architecture discovery and research completed
+  - Design aligns with steering context and existing patterns
+  - Visual diagrams included for complex architectures
+
+## Execution Steps
+
+### Step 1: Gather Context
+
+If steering/spec context is already available from conversation, skip redundant file reads.
+Otherwise, load all necessary context:
+- `.kiro/specs/{feature}/spec.json`, `requirements.md`, `design.md` (if exists)
+- `.kiro/specs/{feature}/research.md` (if exists, contains gap analysis from `/kiro-validate-gap`)
+- Core steering context: `product.md`, `tech.md`, `structure.md`
+- Additional steering files only when directly relevant to requirement coverage, architecture boundaries, integrations, runtime prerequisites, security/performance constraints, or team conventions that affect implementation readiness
+- `.kiro/settings/templates/specs/design.md` for document structure
+- Read `rules/design-principles.md` from this skill's directory for design principles
+- `.kiro/settings/templates/specs/research.md` for discovery log structure
+
+**Validate requirements approval**:
+- If auto-approve flag is true: Auto-approve requirements in spec.json
+- Otherwise: Verify approval status (stop if unapproved, see Safety & Fallback)
+
+### Step 2: Discovery & Analysis
+
+**Critical: This phase ensures design is based on complete, accurate information.**
+
+1. **Classify Feature Type**:
+   - **New Feature** (greenfield) → Full discovery required
+   - **Extension** (existing system) → Integration-focused discovery
+   - **Simple Addition** (CRUD/UI) → Minimal or no discovery
+   - **Complex Integration** → Comprehensive analysis required
+
+2. **Execute Appropriate Discovery Process**:
+
+   **For Complex/New Features**:
+   - Read and execute `rules/design-discovery-full.md` from this skill's directory
+   - Conduct thorough research using WebSearch/WebFetch:
+     - Latest architectural patterns and best practices
+     - External dependency verification (APIs, libraries, versions, compatibility)
+     - Official documentation, migration guides, known issues
+     - Performance benchmarks and security considerations
+
+   **For Extensions**:
+   - Read and execute `rules/design-discovery-light.md` from this skill's directory
+   - Focus on integration points, existing patterns, compatibility
+   - Use Grep to analyze existing codebase patterns
+
+   **For Simple Additions**:
+   - Skip formal discovery, quick pattern check only
+
+#### Parallel Research (subagent dispatch)
+
+The following research areas are independent and can be dispatched as **subagents** via the Agent tool. The agent should decide the optimal decomposition based on feature complexity — split, merge, add, or skip subagents as needed. Each subagent returns a **findings summary** (not raw data) to keep the main context clean for synthesis.
+
+**Typical research areas** (adjust as appropriate):
+- **Codebase analysis**: Existing architecture patterns, integration points, code conventions (using Grep/Glob)
+- **External research**: Dependencies, APIs, latest best practices (using WebSearch/WebFetch)
+- **Context loading** (usually main context): Steering files, design principles, discovery rules, templates
+
+For simple additions, skip subagent dispatch entirely and do a quick pattern check in main context.
+
+After all findings return, synthesize in main context before proceeding.
+
+3. **Retain Discovery Findings for Step 3**:
+   - External API contracts and constraints
+   - Technology decisions with rationale
+   - Existing patterns to follow or extend
+   - Integration points and dependencies
+   - Identified risks and mitigation strategies
+   - Boundary candidates, out-of-boundary decisions, and likely revalidation triggers
+
+4. **Persist Findings to Research Log**:
+   - Create or update `.kiro/specs/{feature}/research.md` using the shared template
+   - Summarize discovery scope and key findings
+   - Record investigations with sources and implications
+   - Document architecture pattern evaluation, design decisions, and risks
+   - Use the language specified in spec.json when writing or updating `research.md`
+
+### Step 3: Synthesis
+
+**Apply design synthesis to discovery findings before writing.**
+
+- Read and apply `rules/design-synthesis.md` from this skill's directory
+- This step requires the full picture from discovery findings — execute in main context, not in a subagent
+- Record synthesis outcomes (generalizations found, build-vs-adopt decisions, simplifications) in `research.md`
+
+### Step 4: Generate Design Draft
+
+1. **Generate Design Draft**:
+   - **Follow specs/design.md template structure and generation instructions strictly**
+   - **Boundary-first requirement**: Before expanding supporting sections, make the boundary explicit. The draft must clearly define what this spec owns, what it does not own, which dependencies are allowed, and what changes would require downstream revalidation.
+   - **Integrate all discovery findings and synthesis outcomes**: Use researched information (APIs, patterns, technologies) and synthesis decisions (generalizations, build-vs-adopt, simplifications) throughout component definitions, architecture decisions, and integration points
+   - **File Structure Plan** (required): Populate the File Structure Plan section with concrete file paths and responsibilities. Analyze the codebase to determine which files need to be created vs. modified. Each file must have one clear responsibility. This section directly drives task `_Boundary:_` annotations and implementation Task Briefs — vague file structures produce vague implementations.
+   - **Testing Strategy**: Derive test items from requirements' acceptance criteria, not generic patterns. Each test item should reference specific components and behaviors from this design. E2E paths must map to the critical user flows identified in requirements. Avoid vague entries like "test login works" -- instead specify what is being verified and why it matters.
+   - If existing design.md found in Step 1, use it as reference context (merge mode)
+   - Apply design rules: Type Safety, Visual Communication, Formal Tone
+   - Use language specified in spec.json
+   - Keep this as a draft until the review gate passes; do not write `design.md` yet
+
+### Step 5: Review Design Draft
+
+- Read and apply `rules/design-review-gate.md` from this skill's directory
+- Verify requirements coverage, architecture readiness, and implementation executability before finalizing the design
+- If issues are local to the draft, repair the design and review again
+- Keep the review bounded to at most 2 repair passes
+- If the draft exposes a real requirements/design gap, stop and return to requirements clarification instead of papering over it in `design.md`
+
+### Step 6: Finalize Design Document
+
+1. **Write Final Design**:
+   - Write `.kiro/specs/{feature}/design.md` only after the design review gate passes
+   - Write research.md with discovery findings and synthesis outcomes (if not already written)
+
+2. **Update Metadata** in spec.json:
+
+   - Set `phase: "design-generated"`
+   - Set `approvals.design.generated: true, approved: false`
+   - Set `approvals.requirements.approved: true`
+   - Update `updated_at` timestamp
+
+## Critical Constraints
+ - **Type Safety**:
+   - Enforce strong typing aligned with the project's technology stack.
+   - For statically typed languages, define explicit types/interfaces and avoid unsafe casts.
+   - For TypeScript, never use `any`; prefer precise types and generics.
+   - For dynamically typed languages, provide type hints/annotations where available (e.g., Python type hints) and validate inputs at boundaries.
+   - Document public interfaces and contracts clearly to ensure cross-component type safety.
+- **Requirements Traceability IDs**: Use numeric requirement IDs only (e.g. "1.1", "1.2", "3.1", "3.3") exactly as defined in requirements.md. Do not invent new IDs or use alphabetic labels.
+
+## Output Description
+
+**Command execution output** (separate from design.md content):
+
+Provide brief summary in the language specified in spec.json:
+
+1. **Status**: Confirm design document generated at `.kiro/specs/{feature}/design.md`
+2. **Discovery Type**: Which discovery process was executed (full/light/minimal)
+3. **Key Findings**: 2-3 critical insights from discovery that shaped the design
+4. **Review Gate**: Confirm the design review gate passed
+5. **Next Action**: Approval workflow guidance (see Safety & Fallback)
+6. **Research Log**: Confirm `research.md` updated with latest decisions
+
+**Format**: Concise Markdown (under 200 words) - this is the command output, NOT the design document itself
+
+**Note**: The actual design document follows `.kiro/settings/templates/specs/design.md` structure.
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Requirements Not Approved**:
+- **Stop Execution**: Cannot proceed without approved requirements
+- **User Message**: "Requirements not yet approved. Approval required before design generation."
+- **Suggested Action**: "Run `/kiro-spec-design {feature} -y` to auto-approve requirements and proceed"
+
+**Missing Requirements**:
+- **Stop Execution**: Requirements document must exist
+- **User Message**: "No requirements.md found at `.kiro/specs/{feature}/requirements.md`"
+- **Suggested Action**: "Run `/kiro-spec-requirements {feature}` to generate requirements first"
+
+**Template Missing**:
+- **User Message**: "Template file missing at `.kiro/settings/templates/specs/design.md`"
+- **Suggested Action**: "Check repository setup or restore template file"
+- **Fallback**: Use inline basic structure with warning
+
+**Steering Context Missing**:
+- **Warning**: "Steering directory empty or missing - design may not align with project standards"
+- **Proceed**: Continue with generation but note limitation in output
+
+**Invalid Requirement IDs**:
+  - **Stop Execution**: If requirements.md is missing numeric IDs or uses non-numeric headings (for example, "Requirement A"), stop and instruct the user to fix requirements.md before continuing.
+
+**Spec Gap Found During Design Review**:
+- **Stop Execution**: Do not write a patched-over `design.md`
+- **User Message**: "Design review found a real spec gap or ambiguity that must be resolved before design can be finalized."
+- **Suggested Action**: Clarify or fix `requirements.md`, then re-run `/kiro-spec-design {feature}`
+
+### Next Phase: Task Generation
+
+**If Design Approved**:
+- **Optional**: Run `/kiro-validate-design {feature}` for interactive quality review
+- Run `/kiro-spec-tasks {feature}` to generate implementation tasks
+- Or `/kiro-spec-tasks {feature} -y` to auto-approve and proceed directly
+
+**If Modifications Needed**:
+- Provide feedback and re-run `/kiro-spec-design {feature}`
+- Existing design used as reference (merge mode)

+ 93 - 0
.claude/skills/kiro-spec-design/rules/design-discovery-full.md

@@ -0,0 +1,93 @@
+# Full Discovery Process for Technical Design
+
+## Objective
+Conduct comprehensive research and analysis to ensure the technical design is based on complete, accurate, and up-to-date information.
+
+## Discovery Steps
+
+### 1. Requirements Analysis
+**Map Requirements to Technical Needs**
+- Extract all functional requirements from EARS format
+- Identify non-functional requirements (performance, security, scalability)
+- Determine technical constraints and dependencies
+- List core technical challenges
+
+### 2. Existing Implementation Analysis
+**Understand Current System** (if modifying/extending):
+- Analyze codebase structure and architecture patterns
+- Map reusable components, services, utilities
+- Identify domain boundaries and data flows
+- Document integration points and dependencies
+- Determine approach: extend vs refactor vs wrap
+
+### 3. Technology Research
+**Investigate Best Practices and Solutions**:
+- **Use WebSearch** to find:
+  - Latest architectural patterns for similar problems
+  - Industry best practices for the technology stack
+  - Recent updates or changes in relevant technologies
+  - Common pitfalls and solutions
+
+- **Use WebFetch** to analyze:
+  - Official documentation for frameworks/libraries
+  - API references and usage examples
+  - Migration guides and breaking changes
+  - Performance benchmarks and comparisons
+
+### 4. External Dependencies Investigation
+**For Each External Service/Library**:
+- Search for official documentation and GitHub repositories
+- Verify API signatures and authentication methods
+- Check version compatibility with existing stack
+- Investigate rate limits and usage constraints
+- Find community resources and known issues
+- Document security considerations
+- Note any gaps requiring implementation investigation
+
+### 5. Architecture Pattern & Boundary Analysis
+**Evaluate Architectural Options**:
+- Compare relevant patterns (MVC, Clean, Hexagonal, Event-driven)
+- Assess fit with existing architecture and steering principles
+- Identify domain boundaries and ownership seams required to avoid team conflicts
+- Consider scalability implications and operational concerns
+- Evaluate maintainability and team expertise
+- Document preferred pattern and rejected alternatives in `research.md`
+
+### 6. Risk Assessment
+**Identify Technical Risks**:
+- Performance bottlenecks and scaling limits
+- Security vulnerabilities and attack vectors
+- Integration complexity and coupling
+- Technical debt creation vs resolution
+- Knowledge gaps and training needs
+
+## Research Guidelines
+
+### When to Search
+**Always search for**:
+- External API documentation and updates
+- Security best practices for authentication/authorization
+- Performance optimization techniques for identified bottlenecks
+- Latest versions and migration paths for dependencies
+
+**Search if uncertain about**:
+- Architectural patterns for specific use cases
+- Industry standards for data formats/protocols
+- Compliance requirements (GDPR, HIPAA, etc.)
+- Scalability approaches for expected load
+
+### Search Strategy
+1. Start with official sources (documentation, GitHub)
+2. Check recent blog posts and articles (last 6 months)
+3. Review Stack Overflow for common issues
+4. Investigate similar open-source implementations
+
+## Output Requirements
+Capture all findings that impact design decisions in `research.md` using the shared template:
+- Key insights affecting architecture, technology alignment, and contracts
+- Constraints discovered during research
+- Recommended approaches and selected architecture pattern with rationale
+- Rejected alternatives and trade-offs (documented in the Design Decisions section)
+- Updated domain boundaries that inform Components & Interface Contracts
+- Risks and mitigation strategies
+- Gaps requiring further investigation during implementation

+ 49 - 0
.claude/skills/kiro-spec-design/rules/design-discovery-light.md

@@ -0,0 +1,49 @@
+# Light Discovery Process for Extensions
+
+## Objective
+Quickly analyze existing system and integration requirements for feature extensions.
+
+## Focused Discovery Steps
+
+### 1. Extension Point Analysis
+**Identify Integration Approach**:
+- Locate existing extension points or interfaces
+- Determine modification scope (files, components)
+- Check for existing patterns to follow
+- Identify backward compatibility requirements
+
+### 2. Dependency Check
+**Verify Compatibility**:
+- Check version compatibility of new dependencies
+- Validate API contracts haven't changed
+- Ensure no breaking changes in pipeline
+
+### 3. Quick Technology Verification
+**For New Libraries Only**:
+- Use WebSearch for official documentation
+- Verify basic usage patterns
+- Check for known compatibility issues
+- Confirm licensing compatibility
+- Record key findings in `research.md` (technology alignment section)
+
+### 4. Integration Risk Assessment
+**Quick Risk Check**:
+- Impact on existing functionality
+- Performance implications
+- Security considerations
+- Testing requirements
+
+## When to Escalate to Full Discovery
+Switch to full discovery if you find:
+- Significant architectural changes needed
+- Complex external service integrations
+- Security-sensitive implementations
+- Performance-critical components
+- Unknown or poorly documented dependencies
+
+## Output Requirements
+- Clear integration approach (note boundary impacts in `research.md`)
+- List of files/components to modify
+- New dependencies with versions
+- Integration risks and mitigations
+- Testing focus areas

+ 198 - 0
.claude/skills/kiro-spec-design/rules/design-principles.md

@@ -0,0 +1,198 @@
+# Technical Design Rules and Principles
+
+## Core Design Principles
+
+### 0. Boundary First
+- **Boundary is mandatory; owner is optional**
+- A design is not ready when it explains components but leaves responsibility seams ambiguous
+- Define what the spec owns before elaborating how it works
+- Explicitly record what is out of boundary
+- Do not leak downstream-specific behavior or assumptions into upstream boundaries
+
+### 1. Type Safety is Mandatory
+- **NEVER** use `any` type in TypeScript interfaces
+- Define explicit types for all parameters and returns
+- Use discriminated unions for error handling
+- Specify generic constraints clearly
+
+### 2. Design vs Implementation
+- **Focus on WHAT, not HOW**
+- Define interfaces and contracts, not code
+- Specify behavior through pre/post conditions
+- Document architectural decisions, not algorithms
+
+### 3. Visual Communication
+- **Simple features**: Basic component diagram or none
+- **Medium complexity**: Architecture + data flow
+- **High complexity**: Multiple diagrams (architecture, sequence, state)
+- **Always pure Mermaid**: No styling, just structure
+
+### 4. Component Design Rules
+- **Single Responsibility**: One clear purpose per component
+- **Clear Boundaries**: Explicit domain ownership
+- **Boundary Commitments First**: Before detailing components, state the responsibility boundary this design commits to
+- **Dependency Direction**: Follow architectural layers
+- **Interface Segregation**: Minimal, focused interfaces
+- **Team-safe Interfaces**: Design boundaries that allow parallel implementation without merge conflicts
+- **No Hidden Shared Ownership**: If two areas appear to co-own the same behavior or data, the design is incomplete
+- **Research Traceability**: Record boundary decisions and rationale in `research.md`
+
+### 5. Data Modeling Standards
+- **Domain First**: Start with business concepts
+- **Consistency Boundaries**: Clear aggregate roots
+- **Normalization**: Balance between performance and integrity
+- **Evolution**: Plan for schema changes
+
+### 6. Error Handling Philosophy
+- **Fail Fast**: Validate early and clearly
+- **Graceful Degradation**: Partial functionality over complete failure
+- **User Context**: Actionable error messages
+- **Observability**: Comprehensive logging and monitoring
+
+### 7. Integration Patterns
+- **Loose Coupling**: Minimize dependencies
+- **Contract First**: Define interfaces before implementation
+- **Versioning**: Plan for API evolution
+- **Idempotency**: Design for retry safety
+- **Contract Visibility**: Surface API and event contracts in design.md while linking extended details from `research.md`
+
+### 8. Dependency Direction
+- **Define and enforce the dependency direction** in the architecture section of design.md (e.g., Types → Config → Repository → Service → Runtime → UI)
+- Each layer imports only from layers to its left — never upward
+- This constraint is not a suggestion; implementation and review should treat violations as errors
+- When the File Structure Plan maps files to components, the dependency direction determines which imports are allowed
+
+## Documentation Standards
+
+### Language and Tone
+- **Declarative**: "The system authenticates users" not "The system should authenticate"
+- **Precise**: Specific technical terms over vague descriptions
+- **Concise**: Essential information only
+- **Formal**: Professional technical writing
+
+### Structure Requirements
+- **Hierarchical**: Clear section organization
+- **Traceable**: Requirements to components mapping
+- **Complete**: All aspects covered for implementation
+- **Consistent**: Uniform terminology throughout
+- **Focused**: Keep design.md centered on architecture and contracts; move investigation logs and lengthy comparisons to `research.md`
+
+## Section Authoring Guidance
+
+### Global Ordering
+- Default flow: Overview → Goals/Non-Goals → Boundary Commitments → Architecture → File Structure Plan → Components & Interfaces → Optional sections.
+- Teams may swap Traceability earlier or place Data Models nearer Architecture when it improves clarity, but keep section headings intact.
+- Within each section, follow **Summary → Scope → Decisions → Impacts/Risks** so reviewers can scan consistently.
+
+### Requirement IDs
+- Reference requirements as `2.1, 2.3` without prefixes (no “Requirement 2.1”).
+- All requirements MUST have numeric IDs. If a requirement lacks a numeric ID, stop and fix `requirements.md` before continuing.
+- Use `N.M`-style numeric IDs where `N` is the top-level requirement number from requirements.md (for example, Requirement 1 → 1.1, 1.2; Requirement 2 → 2.1, 2.2).
+- Every component, task, and traceability row must reference the same canonical numeric ID.
+
+### Technology Stack
+- Include ONLY layers impacted by this feature (frontend, backend, data, messaging, infra).
+- For each layer specify tool/library + version + the role it plays; push extended rationale, comparisons, or benchmarks to `research.md`.
+- When extending an existing system, highlight deviations from the current stack and list new dependencies.
+
+### System Flows
+- Add diagrams only when they clarify behavior:  
+  - **Sequence** for multi-step interactions  
+  - **Process/State** for branching rules or lifecycle  
+  - **Data/Event** for pipelines or async patterns
+- Always use pure Mermaid. If no complex flow exists, omit the entire section.
+
+### Requirements Traceability
+- Use the standard table (`Requirement | Summary | Components | Interfaces | Flows`) to prove coverage.
+- Collapse to bullet form only when a single requirement maps 1:1 to a component.
+- Prefer the component summary table for simple mappings; reserve the full traceability table for complex or compliance-sensitive requirements.
+- Re-run this mapping whenever requirements or components change to avoid drift.
+
+### Components & Interfaces Authoring
+- Boundary Commitments should already make the ownership seam explicit before this section begins.
+- Group components by domain/layer and provide one block per component.
+- Begin with a summary table listing Component, Domain, Intent, Requirement coverage, key dependencies, and selected contracts.
+- Table fields: Intent (one line), Requirements (`2.1, 2.3`), Owner/Reviewers (optional).
+- Dependencies table must mark each entry as Inbound/Outbound/External and assign Criticality (`P0` blocking, `P1` high-risk, `P2` informational).
+- Summaries of external dependency research stay here; detailed investigation (API signatures, rate limits, migration notes) belongs in `research.md`.
+- design.md must remain a self-contained reviewer artifact. Reference `research.md` only for background, and restate any conclusions or decisions here.
+- Contracts: tick only the relevant types (Service/API/Event/Batch/State). Unchecked types should not appear later in the component section.
+- Service interfaces must declare method signatures, inputs/outputs, and error envelopes. API/Event/Batch contracts require schema tables or bullet lists covering trigger, payload, delivery, idempotency.
+- Use **Integration & Migration Notes**, **Validation Hooks**, and **Open Questions / Risks** to document rollout strategy, observability, and unresolved decisions.
+- Detail density rules:
+  - **Full block**: components introducing new boundaries (logic hooks, shared services, external integrations, data layers).
+  - **Summary-only**: presentational/UI components with no new boundaries (plus a short Implementation Note if needed).
+- Implementation Notes must combine Integration / Validation / Risks into a single bulleted subsection to reduce repetition.
+- Prefer lists or inline descriptors for short data (dependencies, contract selections). Use tables only when comparing multiple items.
+
+### Shared Interfaces & Props
+- Define a base interface (e.g., `BaseUIPanelProps`) for recurring UI components and extend it per component to capture only the deltas.
+- Hooks, utilities, and integration adapters that introduce new contracts should still include full TypeScript signatures.
+- When reusing a base contract, reference it explicitly (e.g., “Extends `BaseUIPanelProps` with `onSubmitAnswer` callback”) instead of duplicating the code block.
+
+### Data Models
+- Domain Model covers aggregates, entities, value objects, domain events, and invariants. Add Mermaid diagrams only when relationships are non-trivial.
+- Logical Data Model should articulate structure, indexing, sharding, and storage-specific considerations (event store, KV/wide-column) relevant to the change.
+- Data Contracts & Integration section documents API payloads, event schemas, and cross-service synchronization patterns when the feature crosses boundaries.
+- Lengthy type definitions or vendor-specific option objects should be placed in the Supporting References section within design.md, linked from the relevant section. Investigation notes stay in `research.md`.
+- Supporting References usage is optional; only create it when keeping the content in the main body would reduce readability. All decisions must still appear in the main sections so design.md stands alone.
+
+### Error/Testing/Security/Performance Sections
+- Record only feature-specific decisions or deviations. Link or reference organization-wide standards (steering) for baseline practices instead of restating them.
+
+### Diagram & Text Deduplication
+- Do not restate diagram content verbatim in prose. Use the text to highlight key decisions, trade-offs, or impacts that are not obvious from the visual.
+- When a decision is fully captured in the diagram annotations, a short “Key Decisions” bullet is sufficient.
+
+### General Deduplication
+- Avoid repeating the same information across Overview, Architecture, and Components. Reference earlier sections when context is identical.
+- If a requirement/component relationship is captured in the summary table, do not rewrite it elsewhere unless extra nuance is added.
+
+## Diagram Guidelines
+
+### When to include a diagram
+- **Architecture**: Use a structural diagram when 3+ components or external systems interact.
+- **Sequence**: Draw a sequence diagram when calls/handshakes span multiple steps.
+- **State / Flow**: Capture complex state machines or business flows in a dedicated diagram.
+- **ER**: Provide an entity-relationship diagram for non-trivial data models.
+- **Skip**: Minor one-component changes generally do not need diagrams.
+
+### Mermaid requirements
+```mermaid
+graph TB
+    Client --> ApiGateway
+    ApiGateway --> ServiceA
+    ApiGateway --> ServiceB
+    ServiceA --> Database
+```
+
+- **Plain Mermaid only** – avoid custom styling or unsupported syntax.
+- **Node IDs** – alphanumeric plus underscores only (e.g., `Client`, `ServiceA`). Do not use `@`, `/`, or leading `-`.
+- **Labels** – simple words. Do not embed parentheses `()`, square brackets `[]`, quotes `"`, or slashes `/`.
+  - ❌ `DnD[@dnd-kit/core]` → invalid ID (`@`).
+  - ❌ `UI[KanbanBoard(React)]` → invalid label (`()`).
+  - ✅ `DndKit[dnd-kit core]` → use plain text in labels, keep technology details in the accompanying description.
+  - ℹ️ Mermaid strict-mode will otherwise fail with errors like `Expecting 'SQE' ... got 'PS'`; remove punctuation from labels before rendering.
+- **Edges** – show data or control flow direction.
+- **Groups** – using Mermaid subgraphs to cluster related components is allowed; use it sparingly for clarity.
+
+## Quality Metrics
+### Design Completeness Checklist
+- All requirements addressed
+- No implementation details leaked
+- Clear component boundaries
+- Explicit error handling
+- Comprehensive test strategy
+- Security considered
+- Performance targets defined
+- Migration path clear (if applicable)
+
+### Common Anti-patterns to Avoid
+❌ Mixing design with implementation
+❌ Vague interface definitions
+❌ Missing error scenarios
+❌ Ignored non-functional requirements
+❌ Overcomplicated architectures
+❌ Tight coupling between components
+❌ Missing data consistency strategy
+❌ Incomplete dependency analysis

+ 50 - 0
.claude/skills/kiro-spec-design/rules/design-review-gate.md

@@ -0,0 +1,50 @@
+# Design Review Gate
+
+Before writing `design.md`, review the draft design and repair local issues until the design passes or a true spec gap is discovered.
+
+## Requirements Coverage Review
+
+- Every numeric requirement ID from `requirements.md` must appear in the design traceability mapping and be backed by one or more concrete components, contracts, flows, data models, or operational decisions.
+- Every requirement that introduces an external dependency, integration point, runtime prerequisite, migration concern, observability need, security constraint, or performance target must be reflected explicitly in `design.md`.
+- If coverage is missing because the design draft is incomplete, repair the draft and review again.
+- If coverage cannot be completed cleanly because requirements are ambiguous, contradictory, or underspecified, stop and return to the requirements phase instead of inventing design detail.
+
+## Architecture Readiness Review
+
+- Component boundaries must be explicit enough that implementation tasks can be assigned without guessing ownership.
+- Interfaces, contracts, state transitions, and integration boundaries must be concrete enough for implementation and validation.
+- Build-vs-adopt decisions that materially affect architecture must be captured in `design.md`, with deeper investigation left in `research.md` when present.
+- Runtime prerequisites, migrations, rollout constraints, validation hooks, and failure modes must be surfaced when they materially affect implementation order or risk.
+
+## Boundary Readiness Review
+
+- The design must explicitly state what this spec owns.
+- The design must explicitly state what is out of boundary.
+- Allowed dependencies must be concrete enough that reviewers can detect boundary violations later.
+- If data, behavior, or integration responsibility appears shared across multiple areas without a clear seam, stop and repair the design.
+- If downstream assumptions are embedded in upstream components "for convenience," stop and repair the design.
+- If the boundary cannot be explained in a few direct bullets, it is probably still too vague for task generation.
+- If the design reveals multiple independent responsibility seams that could move separately, stop and split the spec or return to roadmap discovery instead of forcing them into one spec.
+
+## Executability Review
+
+- The design must be implementable as a sequence of bounded tasks without hidden prerequisites.
+- Parallel-safe boundaries should be visible where the architecture intends concurrent implementation.
+- Avoid speculative abstraction: remove components, adapters, or interfaces that exist only for hypothetical future scope.
+- If a section is too vague for tasks to reference directly, rewrite it before finalizing the design.
+
+## Mechanical Checks
+
+Before applying judgment, verify these mechanically:
+- **Requirements traceability**: Extract all numeric requirement IDs from `requirements.md`. Scan the design draft for each ID. Report any IDs not found in the design.
+- **Boundary section populated**: `Boundary Commitments`, `Out of Boundary`, `Allowed Dependencies`, and `Revalidation Triggers` must not be empty or placeholder-only.
+- **File Structure Plan populated**: The File Structure Plan section must contain concrete file paths (not just "TBD" or empty). Scan for placeholder text in that section.
+- **Boundary ↔ file structure alignment**: The File Structure Plan must reflect the stated responsibility boundary. If files imply broader ownership than the boundary section claims, report a mismatch.
+- **No orphan components**: Every component mentioned in the design must appear in the File Structure Plan with a file path. Scan for component names that have no corresponding file entry.
+
+## Review Loop
+
+- Run mechanical checks first, then judgment-based review.
+- If issues are local to the draft, repair the draft and re-run the review gate.
+- Keep the loop bounded: no more than 2 review-and-repair passes before escalating a real spec gap.
+- Write `design.md` only after the review gate passes.

+ 29 - 0
.claude/skills/kiro-spec-design/rules/design-synthesis.md

@@ -0,0 +1,29 @@
+# Design Synthesis
+
+After discovery and before writing the design document, apply these three lenses to the collected findings. This step requires the full picture — do not parallelize.
+
+## 1. Generalization
+
+Look across all requirements as a group. Identify cases where multiple requirements are variations of the same underlying problem.
+
+- If feature A is a special case of a more general capability X, design X with an interface that naturally supports A (and potentially B, C later)
+- Keep the implementation scope to what the current requirements demand — generalize the interface, not the implementation
+- Record identified generalizations in `research.md` under Design Decisions
+
+## 2. Build vs. Adopt
+
+For each major component in the emerging design, ask: is this problem already solved?
+
+- Search for established standards (RFCs, protocols), battle-tested libraries, or platform-native capabilities that address the requirement
+- Prefer adopting existing solutions over building custom ones when they fit the requirements without significant adaptation
+- If adopting: verify the solution is actively maintained, compatible with the project's stack (check steering), and meets non-functional requirements
+- If building: document why existing solutions were rejected (capture in `research.md`)
+
+## 3. Simplification
+
+For each component and abstraction layer in the emerging design, ask: is this necessary?
+
+- Remove components that exist "just in case" or for hypothetical future requirements not in the current spec
+- Flatten unnecessary abstraction layers — if an interface has only one implementation with no foreseeable second, it may not need the indirection
+- Prefer fewer, cohesive components over many fine-grained ones
+- The right design is the smallest one that satisfies all requirements and remains extensible at the interface level

+ 12 - 26
.claude/commands/kiro/spec-init.md → .claude/skills/kiro-spec-init/SKILL.md

@@ -1,56 +1,42 @@
 ---
+name: kiro-spec-init
 description: Initialize a new specification with detailed project description
-allowed-tools: Bash, Read, Write, Glob
+allowed-tools: Bash, Read, Write, Glob, AskUserQuestion
 argument-hint: <project-description>
 ---
 
 # Spec Initialization
 
-<background_information>
-- **Mission**: Initialize the first phase of spec-driven development by creating directory structure and metadata for a new specification
-- **Success Criteria**:
-  - Generate appropriate feature name from project description
-  - Create unique spec structure without conflicts
-  - Provide clear path to next phase (requirements generation)
-</background_information>
-
 <instructions>
 ## Core Task
 Generate a unique feature name from the project description ($ARGUMENTS) and initialize the specification structure.
 
 ## Execution Steps
-1. **Check Uniqueness**: Verify `.kiro/specs/` for naming conflicts (append number suffix if needed)
-2. **Create Directory**: `.kiro/specs/[feature-name]/`
-3. **Initialize Files Using Templates**:
+1. **Check for Brief**: If `.kiro/specs/{feature-name}/brief.md` exists (created by `/kiro-discovery`), read it. The brief contains problem, approach, scope, and constraints from the discovery session. Use this to pre-fill the project description and skip clarification questions that the brief already answers.
+2. **Clarify Intent**: The Project Description in requirements.md must contain three elements: (a) who has the problem, (b) current situation, (c) what should change. If a brief.md exists and covers these, skip to step 3. Otherwise, ask the user to clarify before proceeding. Ask as many questions as needed; do not fill in gaps with your own assumptions.
+3. **Check Uniqueness**: Verify `.kiro/specs/` for naming conflicts. If the directory already exists with only `brief.md` (no `spec.json`), use that directory (discovery created it).
+4. **Create Directory**: `.kiro/specs/[feature-name]/` (skip if already exists from discovery)
+5. **Initialize Files Using Templates**:
    - Read `.kiro/settings/templates/specs/init.json`
    - Read `.kiro/settings/templates/specs/requirements-init.md`
    - Replace placeholders:
      - `{{FEATURE_NAME}}` → generated feature name
      - `{{TIMESTAMP}}` → current ISO 8601 timestamp
-     - `{{PROJECT_DESCRIPTION}}` → $ARGUMENTS
+     - `{{PROJECT_DESCRIPTION}}` → from brief.md if available, otherwise $ARGUMENTS
+     - `en` → language code (detect from user's input language, default to `en`)
    - Write `spec.json` and `requirements.md` to spec directory
 
 ## Important Constraints
-- DO NOT generate requirements/design/tasks at this stage
-- Follow stage-by-stage development principles
-- Maintain strict phase separation
-- Only initialization is performed in this phase
+- Do NOT generate requirements, design, or tasks. This skill only creates spec.json and requirements.md.
 </instructions>
 
-## Tool Guidance
-- Use **Glob** to check existing spec directories for name uniqueness
-- Use **Read** to fetch templates: `init.json` and `requirements-init.md`
-- Use **Write** to create spec.json and requirements.md after placeholder replacement
-- Perform validation before any file write operation
-
 ## Output Description
 Provide output in the language specified in `spec.json` with the following structure:
 
 1. **Generated Feature Name**: `feature-name` format with 1-2 sentence rationale
 2. **Project Summary**: Brief summary (1 sentence)
 3. **Created Files**: Bullet list with full paths
-4. **Next Step**: Command block showing `/kiro:spec-requirements <feature-name>`
-5. **Notes**: Explain why only initialization was performed (2-3 sentences on phase separation)
+4. **Next Step**: Command block showing `/kiro-spec-requirements <feature-name>`
 
 **Format Requirements**:
 - Use Markdown headings (##, ###)
@@ -62,4 +48,4 @@ Provide output in the language specified in `spec.json` with the following struc
 - **Ambiguous Feature Name**: If feature name generation is unclear, propose 2-3 options and ask user to select
 - **Template Missing**: If template files don't exist in `.kiro/settings/templates/specs/`, report error with specific missing file path and suggest checking repository setup
 - **Directory Conflict**: If feature name already exists, append numeric suffix (e.g., `feature-name-2`) and notify user of automatic conflict resolution
-- **Write Failure**: Report error with specific path and suggest checking permissions or disk space
+- **Write Failure**: Report error with specific path and suggest checking permissions or disk space

+ 255 - 0
.claude/skills/kiro-spec-quick/SKILL.md

@@ -0,0 +1,255 @@
+---
+name: kiro-spec-quick
+description: Quick spec generation with interactive or automatic mode
+allowed-tools: Read, Skill, Bash, Write, Glob, Agent
+argument-hint: <project-description> [--auto]
+---
+
+# Quick Spec Generator
+
+<instructions>
+## CRITICAL: Automatic Mode Execution Rules
+
+**If `--auto` flag is present in `$ARGUMENTS`, you are in AUTOMATIC MODE.**
+
+In Automatic Mode:
+- Execute ALL 4 phases in a continuous loop without stopping
+- Display progress after each phase (e.g., "Phase 1/4 complete: spec initialized")
+- IGNORE any "Next Step" messages from Phase 2-4 (they are for standalone usage)
+- After Phase 4, run the final sanity review before exiting
+- Stop ONLY after the sanity review completes or if error occurs
+
+---
+
+## Core Task
+Execute 4 spec phases sequentially. In automatic mode, execute all phases without stopping. In interactive mode, prompt user for approval between phases.
+
+Before claiming quick generation is complete, run one lightweight sanity review over the generated requirements, design, and tasks. If the host supports fresh subagents, use one. Otherwise run the sanity review inline.
+
+## Execution Steps
+
+### Step 1: Parse Arguments and Initialize
+
+Parse `$ARGUMENTS`:
+- If contains `--auto`: **Automatic Mode** (execute all 4 phases)
+- Otherwise: **Interactive Mode** (prompt at each phase)
+- Extract description (remove `--auto` flag if present)
+
+Example:
+```
+"User profile with avatar upload --auto" → mode=automatic, description="User profile with avatar upload"
+"User profile feature" → mode=interactive, description="User profile feature"
+```
+
+Display mode banner and proceed to Step 2.
+
+### Step 2: Execute Phase Loop
+
+Execute these 4 phases in order:
+
+---
+
+#### Phase 1: Initialize Spec (Direct Implementation)
+
+**Core Logic**:
+
+1. **Check for Brief**:
+   - If `.kiro/specs/{feature-name}/brief.md` exists (created by `/kiro-discovery`), read it for discovery context (problem, approach, scope, constraints)
+   - Use brief content as the project description instead of `$ARGUMENTS`
+
+2. **Generate Feature Name**:
+   - Convert description to kebab-case
+   - Example: "User profile with avatar upload" → "user-profile-avatar-upload"
+   - Keep name concise (2-4 words ideally)
+
+3. **Check Uniqueness**:
+   - Use Glob to check `.kiro/specs/*/`
+   - If directory exists with only `brief.md` (no `spec.json`), use that directory (discovery created it)
+   - Otherwise if feature name exists, append `-2`, `-3`, etc.
+
+4. **Create Directory**:
+   - Use Bash: `mkdir -p .kiro/specs/{feature-name}` (skip if already exists from discovery)
+
+5. **Initialize Files from Templates**:
+
+   a. Read templates:
+   ```
+   - .kiro/settings/templates/specs/init.json
+   - .kiro/settings/templates/specs/requirements-init.md
+   ```
+
+   b. Replace placeholders:
+   ```
+   {{FEATURE_NAME}} → feature-name
+   {{TIMESTAMP}} → current ISO 8601 timestamp (use `date -u +"%Y-%m-%dT%H:%M:%SZ"`)
+   {{PROJECT_DESCRIPTION}} → description
+   en → language code (detect from user's input language, default to `en`)
+   ```
+
+   c. Write files using Write tool:
+   ```
+   - .kiro/specs/{feature-name}/spec.json
+   - .kiro/specs/{feature-name}/requirements.md
+   ```
+
+6. **Output Progress**: "Phase 1/4 complete: Spec initialized at .kiro/specs/{feature-name}/"
+
+**Automatic Mode**: IMMEDIATELY continue to Phase 2.
+
+**Interactive Mode**: Prompt "Continue to requirements generation? (yes/no)"
+- If "no": Stop, show current state
+- If "yes": Continue to Phase 2
+
+---
+
+#### Phase 2: Generate Requirements
+
+Invoke `/kiro-spec-requirements {feature-name}` via the Skill tool.
+
+Wait for completion. IGNORE any "Next Step" message (it is for standalone usage).
+
+**Output Progress**: "Phase 2/4 complete: Requirements generated"
+
+**Automatic Mode**: IMMEDIATELY continue to Phase 3.
+
+**Interactive Mode**: Prompt "Continue to design generation? (yes/no)"
+- If "no": Stop, show current state
+- If "yes": Continue to Phase 3
+
+---
+
+#### Phase 3: Generate Design
+
+Invoke `/kiro-spec-design {feature-name} -y` via the Skill tool. The `-y` flag auto-approves requirements.
+
+Wait for completion. IGNORE any "Next Step" message.
+
+**Output Progress**: "Phase 3/4 complete: Design generated"
+
+**Automatic Mode**: IMMEDIATELY continue to Phase 4.
+
+**Interactive Mode**: Prompt "Continue to tasks generation? (yes/no)"
+- If "no": Stop, show current state
+- If "yes": Continue to Phase 4
+
+---
+
+#### Phase 4: Generate Tasks
+
+Invoke `/kiro-spec-tasks {feature-name} -y` via the Skill tool.
+
+Note: `-y` flag auto-approves requirements, design, and tasks.
+
+Wait for completion.
+
+**Output Progress**: "Phase 4/4 complete: Tasks generated"
+
+#### Final Sanity Review
+
+After Phase 4, run a lightweight sanity review before claiming completion.
+
+- Review `requirements.md`, `design.md`, and `tasks.md` directly from disk. If `brief.md` exists, use it only as supporting context.
+- Prefer a fresh review subagent when the host supports it. Pass only file paths and the review objective; the reviewer should read the generated files itself.
+- Review focus:
+  - Do requirements, design, and tasks tell a coherent story?
+  - Are there obvious contradictions, missing prerequisites, or missing task coverage for required design work?
+  - Are `_Depends:_`, `_Boundary:_`, and `(P)` markers plausible for implementation?
+- If the review finds only task-plan-local issues, repair or update the generated `tasks.md` once, then re-run the sanity review.
+- If the review finds a real requirements/design gap or contradiction, stop and report follow-up instead of claiming the quick spec is implementation-ready.
+
+**All 4 phases plus sanity review complete.**
+
+Output final completion summary (see Output Description section) and exit.
+
+---
+
+## Important Constraints
+
+### Error Handling
+- Any phase failure stops the workflow
+- Display error and current state
+- Suggest manual recovery command
+
+</instructions>
+
+## Output Description
+
+### Mode Banners
+
+**Interactive Mode**:
+```
+Quick Spec Generation (Interactive Mode)
+
+You will be prompted at each phase.
+Note: Skips gap analysis and design validation.
+```
+
+**Automatic Mode**:
+```
+Quick Spec Generation (Automatic Mode)
+
+All phases execute automatically without prompts.
+Note: Skips optional validations (gap analysis, design review) and user approval prompts. Internal review gates still run.
+Final sanity review still runs.
+```
+
+### Intermediate Output
+
+After each phase, show brief progress:
+```
+Spec initialized at .kiro/specs/{feature}/
+Requirements generated → Continuing to design...
+Design generated → Continuing to tasks...
+```
+
+### Final Completion Summary
+
+Provide output in the language specified in `spec.json`:
+
+```
+Quick Spec Generation Complete!
+
+Generated Files:
+- specs/{feature}/spec.json
+- specs/{feature}/requirements.md ({X} requirements)
+- specs/{feature}/design.md ({Y} components, {Z} endpoints)
+- specs/{feature}/tasks.md ({N} tasks)
+
+Skipped: /kiro-validate-gap, /kiro-validate-design
+
+Sanity review: PASSED | FOLLOW-UP REQUIRED
+
+Next Steps:
+1. Review generated specs (especially design.md)
+2. Optional: `/kiro-validate-gap {feature}`, `/kiro-validate-design {feature}`
+3. Start implementation: `/kiro-impl {feature}`
+```
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Template Missing**:
+- Check `.kiro/settings/templates/specs/` exists
+- Report specific missing file
+- Exit with error
+
+**Directory Creation Failed**:
+- Check permissions
+- Report error with path
+- Exit with error
+
+**Phase Execution Failed** (Phase 2-4):
+- Stop workflow
+- Show current state and completed phases
+- Suggest: "Continue manually from `/kiro-spec-{next-phase} {feature}`"
+
+**Sanity Review Failed**:
+- Stop workflow
+- Report the exact contradiction, missing prerequisite, or task-plan issue
+- Suggest targeted follow-up with `/kiro-spec-design {feature}`, `/kiro-spec-tasks {feature}`, or manual edits depending on the finding
+
+**User Cancellation** (Interactive Mode):
+- Stop gracefully
+- Show completed phases
+- Suggest manual continuation

+ 135 - 0
.claude/skills/kiro-spec-requirements/SKILL.md

@@ -0,0 +1,135 @@
+---
+name: kiro-spec-requirements
+description: Generate EARS-format requirements based on project description and steering context. Use when generating requirements from project description.
+allowed-tools: Read, Write, Edit, Glob, Grep, Agent, WebSearch, WebFetch, AskUserQuestion
+metadata:
+  shared-rules: "ears-format.md, requirements-review-gate.md"
+---
+
+# kiro-spec-requirements Skill
+
+## Core Mission
+- **Success Criteria**:
+  - Create complete requirements document aligned with steering context
+  - Follow the project's EARS patterns and constraints for all acceptance criteria
+  - Focus on core functionality without implementation details
+  - Make inclusion/exclusion boundaries explicit when scope could otherwise be misread
+  - Update metadata to track generation status
+
+## Execution Steps
+
+### Step 1: Gather Context
+
+If steering/spec context is already available from conversation, skip redundant file reads.
+Otherwise, load all necessary context:
+- Read `.kiro/specs/{feature}/spec.json` for language and metadata
+- Read `.kiro/specs/{feature}/brief.md` if it exists (discovery context: problem, approach, scope decisions, boundary candidates)
+- Read `.kiro/specs/{feature}/requirements.md` for project description
+- Core steering context: `product.md`, `tech.md`, `structure.md`
+- Additional steering files only when directly relevant to feature scope, user personas, business/domain rules, compliance/security constraints, operational constraints, or existing product boundaries
+- Relevant local agent skills or playbooks only when they clearly match the feature's host environment or use case and contain domain terminology or workflow rules that shape user-observable requirements
+
+### Step 2: Read Guidelines
+- Read `rules/ears-format.md` from this skill's directory for EARS syntax rules
+- Read `rules/requirements-review-gate.md` from this skill's directory for pre-write review criteria
+- Read `.kiro/settings/templates/specs/requirements.md` for document structure
+
+#### Parallel Research (subagent dispatch)
+
+The following research areas are independent. Decide the optimal decomposition based on project complexity -- split, merge, add, or skip subagents as needed.
+
+**Delegate to subagent via Agent tool** (keeps exploration out of main context):
+- **Codebase hints** (brownfield projects): Dispatch a subagent to explore existing implementations that inform requirement scope. Example prompt: "Explore this codebase for existing features related to [feature area]. Summarize: (1) what already exists, (2) relevant interfaces/APIs, (3) patterns that new requirements should align with. Return a summary under 150 lines."
+- **Domain research** (when external knowledge is needed): Dispatch a subagent for WebSearch/WebFetch to research domain-specific requirements, standards, or best practices. Return a concise findings summary.
+- **Additional steering and playbooks**: If many steering files or local agent playbooks exist, dispatch a subagent to scan them and return only the sections relevant to this feature.
+
+For greenfield projects with minimal codebase, skip subagent dispatch and load context directly.
+
+After all research completes, synthesize findings in main context before generating requirements.
+
+### Step 3: Generate Requirements Draft
+- Create initial requirements draft based on project description
+- Group related functionality into logical requirement areas
+- Apply EARS format to all acceptance criteria
+- Use language specified in spec.json
+- Preserve terminology continuity across phases:
+  - discovery = `Boundary Candidates`
+  - requirements = explicit inclusion/exclusion and adjacent expectations when needed
+  - design = `Boundary Commitments`
+  - tasks = `_Boundary:_`
+- If scope could be misread, add lightweight boundary context without introducing implementation or architecture ownership detail
+- Keep this as a draft until the review gate passes; do not write `requirements.md` yet
+
+### Step 4: Review Requirements Draft
+- Run the `Requirements Review Gate` from `rules/requirements-review-gate.md`
+- Review coverage, EARS compliance, ambiguity, adjacent expectations, and scope boundaries before finalizing
+- If issues are local to the draft, repair the requirements and review again
+- Keep the review bounded to at most 2 repair passes
+- If the draft exposes a real scope ambiguity or contradiction, stop and ask the user to clarify instead of writing guessed requirements
+
+### Step 5: Finalize and Update Metadata
+- Write `.kiro/specs/{feature}/requirements.md` only after the requirements review gate passes
+- Set `phase: "requirements-generated"`
+- Set `approvals.requirements.generated: true`
+- Update `updated_at` timestamp
+
+## Important Constraints
+
+### Requirements Scope: WHAT, not HOW
+Requirements describe user-observable behavior, not implementation. Use this to decide what belongs here vs. in design:
+
+**Ask the user about (requirements scope):**
+- Functional scope — what is included and what is excluded
+- User-observable behavior — "when X happens, what should the user see/experience?"
+- Business rules and edge cases — limits, error conditions, special cases
+- Non-functional requirements visible to users — response time expectations, availability, security level
+- Adjacent expectations only when they change user-visible behavior or operator expectations — what this feature relies on, and what it explicitly does not own
+
+**Do not ask about (design scope — defer to design phase):**
+- Technology stack choices (database, framework, language)
+- Architecture patterns (microservices, monolith, event-driven)
+- API design, data models, internal component structure
+- How to achieve non-functional requirements (caching strategy, scaling approach)
+- Internal ownership mapping, component seams, or implementation boundaries that belong in design
+
+**Litmus test**: If an EARS acceptance criterion can be written without mentioning any technology, it belongs in requirements. If it requires a technology choice, it belongs in design.
+
+### Other Constraints
+- Each requirement must be testable and unambiguous. If the project description leaves room for multiple interpretations on scope, behavior, or boundary conditions, ask the user to clarify before generating that requirement. Ask as many questions as needed; do not generate requirements that contain your own assumptions.
+- Choose appropriate subject for EARS statements (system/service name for software)
+- Requirement headings in requirements.md MUST include a leading numeric ID only (for example: "Requirement 1", "1.", "2 Feature ..."); do not use alphabetic IDs like "Requirement A".
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Generated Requirements Summary**: Brief overview of major requirement areas (3-5 bullets)
+2. **Document Status**: Confirm requirements.md updated and spec.json metadata updated
+3. **Review Gate**: Confirm the requirements review gate passed
+4. **Next Steps**: Guide user on how to proceed (approve and continue, or modify)
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Include file paths in code blocks
+- Keep summary concise (under 300 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Project Description**: If requirements.md lacks project description, ask user for feature details
+- **Template Missing**: If template files don't exist, use inline fallback structure with warning
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+- **Incomplete Requirements**: After generation, explicitly ask user if requirements cover all expected functionality
+- **Steering Directory Empty**: Warn user that project context is missing and may affect requirement quality
+- **Non-numeric Requirement Headings**: If existing headings do not include a leading numeric ID (for example, they use "Requirement A"), normalize them to numeric IDs and keep that mapping consistent (never mix numeric and alphabetic labels).
+
+### Next Phase: Design Generation
+
+**If Requirements Approved**:
+- **Optional Gap Analysis** (for existing codebases):
+  - Run `/kiro-validate-gap {feature}` to analyze implementation gap
+  - Recommended for brownfield projects; skip for greenfield
+- Run `/kiro-spec-design {feature}` to proceed to design phase
+- Or `/kiro-spec-design {feature} -y` to auto-approve requirements and proceed directly
+
+**If Modifications Needed**:
+- Provide feedback and re-run `/kiro-spec-requirements {feature}`

+ 49 - 0
.claude/skills/kiro-spec-requirements/rules/ears-format.md

@@ -0,0 +1,49 @@
+# EARS Format Guidelines
+
+## Overview
+EARS (Easy Approach to Requirements Syntax) is the standard format for acceptance criteria in spec-driven development.
+
+EARS patterns describe the logical structure of a requirement (condition + subject + response) and are not tied to any particular natural language.  
+All acceptance criteria should be written in the target language configured for the specification (for example, `spec.json.language` / `en`).  
+Keep EARS trigger keywords and fixed phrases in English (`When`, `If`, `While`, `Where`, `The system shall`, `The [system] shall`) and localize only the variable parts (`[event]`, `[precondition]`, `[trigger]`, `[feature is included]`, `[response/action]`) into the target language. Do not interleave target-language text inside the trigger or fixed English phrases themselves.
+
+## Primary EARS Patterns
+
+### 1. Event-Driven Requirements
+- **Pattern**: When [event], the [system] shall [response/action]
+- **Use Case**: Responses to specific events or triggers
+- **Example**: When user clicks checkout button, the Checkout Service shall validate cart contents
+
+### 2. State-Driven Requirements
+- **Pattern**: While [precondition], the [system] shall [response/action]
+- **Use Case**: Behavior dependent on system state or preconditions
+- **Example**: While payment is processing, the Checkout Service shall display loading indicator
+
+### 3. Unwanted Behavior Requirements
+- **Pattern**: If [trigger], the [system] shall [response/action]
+- **Use Case**: System response to errors, failures, or undesired situations
+- **Example**: If invalid credit card number is entered, then the website shall display error message
+
+### 4. Optional Feature Requirements
+- **Pattern**: Where [feature is included], the [system] shall [response/action]
+- **Use Case**: Requirements for optional or conditional features
+- **Example**: Where the car has a sunroof, the car shall have a sunroof control panel
+
+### 5. Ubiquitous Requirements
+- **Pattern**: The [system] shall [response/action]
+- **Use Case**: Always-active requirements and fundamental system properties
+- **Example**: The mobile phone shall have a mass of less than 100 grams
+
+## Combined Patterns
+- While [precondition], when [event], the [system] shall [response/action]
+- When [event] and [additional condition], the [system] shall [response/action]
+
+## Subject Selection Guidelines
+- **Software Projects**: Use concrete system/service name (e.g., "Checkout Service", "User Auth Module")
+- **Process/Workflow**: Use responsible team/role (e.g., "Support Team", "Review Process")
+- **Non-Software**: Use appropriate subject (e.g., "Marketing Campaign", "Documentation")
+
+## Quality Criteria
+- Requirements must be testable, verifiable, and describe a single behavior.
+- Use objective language: "shall" for mandatory behavior, "should" for recommendations; avoid ambiguous terms.
+- Follow EARS syntax: [condition], the [system] shall [response/action].

+ 51 - 0
.claude/skills/kiro-spec-requirements/rules/requirements-review-gate.md

@@ -0,0 +1,51 @@
+# Requirements Review Gate
+
+Before writing `requirements.md`, review the draft requirements and repair local issues until the draft passes or a true scope ambiguity is discovered.
+
+## Boundary Continuity
+
+Use boundary terminology consistently across phases without turning requirements into design:
+
+- **Discovery** identifies `Boundary Candidates`
+- **Requirements** make inclusion, exclusion, and adjacent expectations explicit when scope could be misread
+- **Design** turns those into `Boundary Commitments`
+- **Tasks** use `_Boundary:_` to constrain executable work
+
+Requirements should clarify the feature boundary in user- or operator-observable terms, not in architecture ownership or implementation detail.
+
+## Scope and Coverage Review
+
+- The draft must cover the feature's core user journeys, major scope boundaries, primary error cases, and meaningful edge conditions that are visible to the user or operator.
+- If the feature touches adjacent systems, specs, or workflows, the draft must make clear what this feature expects from them and what it does not own when that distinction affects user-visible behavior or operator expectations.
+- Business/domain rules, compliance constraints, security/privacy expectations, and operational constraints that materially shape user-visible behavior must be reflected explicitly when they are in scope.
+- If coverage is missing because the draft is incomplete, repair the draft and review again.
+- If coverage cannot be completed cleanly because the project description or steering context is ambiguous, contradictory, or underspecified, stop and ask the user to clarify instead of guessing.
+
+## EARS and Testability Review
+
+- Every acceptance criterion must follow the EARS rules defined in `ears-format.md`.
+- Every requirement must be testable, observable, and specific enough that later design and validation can verify it.
+- Remove implementation details that belong in `design.md` rather than `requirements.md`.
+- Requirement headings must use numeric IDs only; do not mix numeric and alphabetic labels.
+
+## Structure and Quality Review
+
+- Group related behaviors into coherent requirement areas without duplicating the same obligation across multiple sections.
+- Make inclusion/exclusion boundaries explicit when the feature scope could otherwise be misread.
+- Keep boundary statements lightweight and observable: describe feature responsibility and adjacent expectations without prescribing components, layers, or internal ownership.
+- Ensure non-functional expectations remain user-observable or operator-observable; move technology choices and internal architecture detail out of requirements.
+- Normalize vague language such as "fast", "robust", or "secure" into concrete user-visible expectations whenever the source material supports it.
+
+## Mechanical Checks
+
+Before applying judgment, verify these mechanically:
+- **Numeric IDs present**: Every requirement heading has a numeric ID (1, 1.1, 2, etc.). Scan the draft for headings without IDs.
+- **Acceptance criteria exist**: Every requirement has at least one EARS-format acceptance criterion. Scan for requirements with no "When/If/While/Where" acceptance statements.
+- **No implementation language**: Scan for technology-specific terms (database names, framework names, API patterns) that belong in design, not requirements. Flag any found.
+
+## Review Loop
+
+- Run mechanical checks first, then judgment-based review.
+- If issues are local to the draft, repair the draft and re-run the review gate.
+- Keep the loop bounded: no more than 2 review-and-repair passes before escalating a real ambiguity back to the user.
+- Write `requirements.md` only after the review gate passes.

+ 70 - 0
.claude/skills/kiro-spec-status/SKILL.md

@@ -0,0 +1,70 @@
+---
+name: kiro-spec-status
+description: Show specification status and progress
+allowed-tools: Read, Glob, Grep
+argument-hint: <feature-name>
+---
+
+# kiro-spec-status Skill
+
+## Core Mission
+- **Success Criteria**:
+  - Show current phase and completion status
+  - Identify next actions and blockers
+  - Provide clear visibility into progress
+  - Surface boundary readiness, upstream/downstream context, and likely revalidation needs when available
+
+## Execution Steps
+
+### Step 1: Load Spec Context
+- Read `.kiro/specs/$ARGUMENTS/spec.json` for metadata and phase status
+- Read `.kiro/specs/$ARGUMENTS/brief.md` if it exists
+- Read existing files: `requirements.md`, `design.md`, `tasks.md` (if they exist)
+- Check `.kiro/specs/$ARGUMENTS/` directory for available files
+- Read `.kiro/steering/roadmap.md` if it exists and this spec appears in it
+
+### Step 2: Analyze Status
+
+**Parse each phase**:
+- **Requirements**: Count requirements and acceptance criteria
+- **Design**: Check for architecture, components, diagrams, and whether boundary sections are present
+- **Tasks**: Count completed vs total tasks (parse `- [x]` vs `- [ ]`)
+- **Approvals**: Check approval status in spec.json
+- **Boundary context**:
+  - From brief.md: note `Boundary Candidates`, `Upstream / Downstream`, and `Existing Spec Touchpoints` if present
+  - From design.md: note `Boundary Commitments`, `Out of Boundary`, `Allowed Dependencies`, and `Revalidation Triggers` if present
+  - From roadmap.md: note upstream dependencies and whether this spec is adjacent to `Existing Spec Updates`
+- **Revalidation watchlist**:
+  - Identify downstream specs, neighboring existing-spec updates, or rollout-sensitive design notes that may need revalidation if this spec changes
+  - Call out when the current spec shape looks too broad and may want roadmap/design splitting instead of more local repair
+
+### Step 3: Generate Report
+
+Create report in the language specified in spec.json covering:
+1. **Current Phase & Progress**: Where the spec is in the workflow
+2. **Completion Status**: Percentage complete for each phase
+3. **Task Breakdown**: If tasks exist, show completed/remaining counts
+4. **Boundary Context**: Upstream/downstream, out-of-boundary, and allowed dependency notes when available
+5. **Revalidation Watchlist**: Downstream or adjacent work likely affected by changes to this spec
+6. **Next Actions**: What needs to be done next
+7. **Blockers**: Any issues preventing progress
+
+**Format**: Clear, scannable format with emojis for status
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Spec Not Found**:
+- **Message**: "No spec found for `$ARGUMENTS`. Check available specs in `.kiro/specs/`"
+- **Action**: List available spec directories
+
+**Incomplete Spec**:
+- **Warning**: Identify which files are missing
+- **Suggested Action**: Point to next phase command
+
+### List All Specs
+
+To see all available specs:
+- Run with no argument or use wildcard
+- Shows all specs in `.kiro/specs/` with their status

+ 189 - 0
.claude/skills/kiro-spec-tasks/SKILL.md

@@ -0,0 +1,189 @@
+---
+name: kiro-spec-tasks
+description: Generate implementation tasks from requirements and design. Use when creating actionable task lists.
+allowed-tools: Read, Write, Edit, Glob, Grep, Agent
+argument-hint: <feature-name> [-y] [--sequential]
+metadata:
+  shared-rules: "tasks-generation.md, tasks-parallel-analysis.md"
+---
+
+# kiro-spec-tasks Skill
+
+## Core Mission
+- **Success Criteria**:
+  - All requirements mapped to specific tasks
+  - Tasks properly sized (1-3 hours each)
+  - Clear task progression with proper hierarchy
+  - Natural language descriptions focused on capabilities
+  - A lightweight task-plan sanity review confirms the task graph is executable before `tasks.md` is written
+
+## Execution Steps
+
+### Step 1: Gather Context
+
+If steering/spec context is already available from conversation, skip redundant file reads.
+Otherwise, load all necessary context:
+- `.kiro/specs/{feature}/spec.json`, `requirements.md`, `design.md`
+- `.kiro/specs/{feature}/tasks.md` (if exists, for merge mode)
+- Core steering context: `product.md`, `tech.md`, `structure.md`
+- Additional steering files only when directly relevant to requirements coverage, design boundaries, runtime prerequisites, or team conventions that affect task executability
+
+- Determine execution mode:
+  - `sequential = (sequential flag is true)`
+
+**Validate approvals**:
+- If auto-approve flag (`-y`) is true: Auto-approve requirements and design in spec.json. Tasks approval is also handled automatically in Step 4.
+- Otherwise: Verify both approved (stop if not, see Safety & Fallback)
+
+### Step 2: Generate Implementation Tasks
+
+- Read `rules/tasks-generation.md` from this skill's directory for principles
+- Read `rules/tasks-parallel-analysis.md` from this skill's directory for parallel judgement criteria
+- Read `.kiro/settings/templates/specs/tasks.md` for format (supports `(P)` markers)
+
+#### Parallel Research
+
+The following research areas are independent and can be executed in parallel:
+1. **Context loading**: Spec documents (requirements.md, design.md), steering files
+2. **Rules loading**: tasks-generation.md, tasks-parallel-analysis.md, tasks template
+
+After all parallel research completes, synthesize findings before generating tasks.
+
+**Generate task list following all rules**:
+- Use language specified in spec.json
+- Map all requirements to tasks and list numeric requirement IDs only (comma-separated) without descriptive suffixes, parentheses, translations, or free-form labels
+- Ensure all design components included
+- Verify task progression is logical and incremental
+- Ensure each executable sub-task includes at least one detail bullet that states what "done" looks like in observable terms
+- Keep normal implementation tasks within a single responsibility boundary; if work crosses boundaries, make it an explicit integration task
+- Apply `(P)` markers to tasks that satisfy parallel criteria when `!sequential`
+- Explicitly note dependencies preventing `(P)` when tasks appear parallel but are not safe
+- If sequential mode is true, omit `(P)` entirely
+- If existing tasks.md found, merge with new content
+
+### Step 3: Review Task Plan
+
+- Keep the draft task plan in working memory; do NOT write `tasks.md` yet
+- Run the `Task Plan Review Gate` from `rules/tasks-generation.md`
+- Review coverage:
+  - Every requirement ID appears in at least one task
+  - Every design component, contract, integration point, runtime prerequisite, and validation concern is represented
+- Review executability:
+  - Each sub-task is an executable 1-3 hour work unit
+  - Each sub-task has a verifiable deliverable
+  - Each executable sub-task includes an observable completion bullet
+  - No implicit prerequisites remain hidden
+  - `_Depends:_`, `_Boundary:_`, and `(P)` markers still match the dependency graph and architecture boundaries
+- If issues are task-plan-local, repair the draft and re-run the review gate before writing
+- Keep the review bounded to at most 2 repair passes
+- If review exposes a real requirements/design gap or contradiction, stop and send the user back to requirements/design instead of inventing filler tasks
+
+### Step 3.5: Run Task-Graph Sanity Review
+
+Before writing `tasks.md`, run one lightweight independent sanity review of the task graph.
+
+- If fresh subagent dispatch is available, spawn one fresh review subagent for this step. Otherwise perform the same review in the current context.
+- Provide only file paths, the draft task plan, and merge context if an existing `tasks.md` is being updated. The reviewer should read `requirements.md`, `design.md`, and the task-generation rules directly instead of relying on a parent-synthesized coverage summary.
+- Check only:
+  - hidden prerequisites or missing setup tasks
+  - dependency or ordering mistakes
+  - boundary overlap or ambiguous ownership between tasks
+  - tasks that are too large, too vague, cross boundaries without being explicit integration tasks, or are missing a verifiable deliverable
+  - contradictions introduced between requirements, design, and the task graph
+- Return one verdict:
+  - `PASS`
+  - `NEEDS_FIXES`
+  - `RETURN_TO_DESIGN`
+- If `NEEDS_FIXES`, repair the draft once and re-run the sanity review one time.
+- If `RETURN_TO_DESIGN`, stop without writing `tasks.md` and point back to the exact gap in requirements/design.
+- Keep this bounded. Do not turn it into a second full planning cycle.
+
+### Step 4: Finalize
+
+**Write tasks.md**:
+- Create/update `.kiro/specs/{feature}/tasks.md`
+- Update spec.json metadata:
+  - Set `phase: "tasks-generated"`
+  - Set `approvals.tasks.generated: true, approved: false`
+  - Set `approvals.requirements.approved: true`
+  - Set `approvals.design.approved: true`
+  - Update `updated_at` timestamp
+
+**Approval**:
+- If auto-approve flag (`-y`) is true:
+  - Set `approvals.tasks.approved: true` in spec.json
+  - Display task summary (task count, major groups, parallel markers)
+  - Respond: "Tasks generated and auto-approved. Start implementation with `/kiro-impl {feature}`"
+- Otherwise (interactive):
+  - Display a summary of the generated tasks (task count, major groups, parallel markers)
+  - Ask the user: "Tasks generated. Approve and proceed to implementation?"
+  - If the user approves:
+    - Set `approvals.tasks.approved: true` in spec.json
+    - Respond: "Tasks approved. Start implementation with `/kiro-impl {feature}`"
+  - If the user wants changes:
+    - Keep `approvals.tasks.approved: false`
+    - Respond with guidance on what to adjust and re-run
+
+## Critical Constraints
+- **Task Integration**: Every task must connect to the system (no orphaned work)
+- **Boundary annotations**: Required for `(P)` tasks, recommended for all (`_Boundary: ComponentName_`)
+- **Explicit dependencies**: Cross-boundary non-obvious dependencies declared with `_Depends: X.X_`
+- **Executable deliverable granularity**: Each task must produce a verifiable deliverable (file, endpoint, UI component, config). Infrastructure tasks (project scaffolding, manifest, host integration, build config) must be explicit — never assume they exist
+- **Observable done state**: Each executable sub-task must include at least one detail bullet that makes the completed state visible without adding new bookkeeping fields
+- **No implicit prerequisites**: If a task requires a runtime, SDK, framework setup, or config file, that setup must be a separate preceding task
+
+## Output Description
+
+Provide brief summary in the language specified in spec.json:
+
+1. **Status**: Confirm tasks generated at `.kiro/specs/{feature}/tasks.md`
+2. **Task Summary**:
+   - Total: X major tasks, Y sub-tasks
+   - All Z requirements covered
+   - Average task size: 1-3 hours per sub-task
+3. **Quality Validation**:
+   - All requirements mapped to tasks
+   - Design coverage and runtime prerequisites reviewed
+   - Task dependencies verified
+   - Task plan review gate passed
+   - Independent task-graph sanity review passed
+   - Testing tasks included
+4. **Next Action**: Review tasks and proceed when ready
+
+**Format**: Concise (under 200 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Requirements or Design Not Approved**:
+- **Stop Execution**: Cannot proceed without approved requirements and design
+- **User Message**: "Requirements and design must be approved before task generation"
+- **Suggested Action**: "Run `/kiro-spec-tasks {feature} -y` to auto-approve all (requirements, design, and tasks) and proceed"
+
+**Missing Requirements or Design**:
+- **Stop Execution**: Both documents must exist
+- **User Message**: "Missing requirements.md or design.md at `.kiro/specs/{feature}/`"
+- **Suggested Action**: "Complete requirements and design phases first"
+
+**Incomplete Requirements Coverage**:
+- **Warning**: "Not all requirements mapped to tasks. Review coverage."
+- **User Action Required**: Confirm intentional gaps or regenerate tasks
+
+**Spec Gap Found During Task Review**:
+- **Stop Execution**: Do not write a patched-over `tasks.md`
+- **User Message**: "Requirements/design do not provide enough clear coverage to generate an executable task plan"
+- **Suggested Action**: "Refine requirements.md or design.md, then re-run `/kiro-spec-tasks {feature}`"
+
+**Template/Rules Missing**:
+- **User Message**: "Template or rules files missing in `.kiro/settings/`"
+- **Fallback**: Use inline basic structure with warning
+- **Suggested Action**: "Check repository setup or restore template files"
+- **Missing Numeric Requirement IDs**:
+  - **Stop Execution**: All requirements in requirements.md MUST have numeric IDs. If any requirement lacks a numeric ID, stop and request that requirements.md be fixed before generating tasks.
+
+### Next Phase: Implementation
+
+Tasks are approved in Step 4 via user confirmation. Once approved:
+- Autonomous implementation: `/kiro-impl {feature}`
+- Specific tasks only: `/kiro-impl {feature} 1.1,1.2`

+ 222 - 0
.claude/skills/kiro-spec-tasks/rules/tasks-generation.md

@@ -0,0 +1,222 @@
+# Task Generation Rules
+
+## Core Principles
+
+### 1. Natural Language Descriptions
+Focus on capabilities and outcomes, not code structure.
+
+**Describe**:
+- What functionality to achieve
+- Business logic and behavior
+- Features and capabilities
+- Domain language and concepts
+- Data relationships and workflows
+
+**Avoid**:
+- File paths and directory structure
+- Function/method names and signatures
+- Type definitions and interfaces
+- Class names and API contracts
+- Specific data structures
+
+**Rationale**: Implementation details (files, methods, types) are defined in design.md. Tasks describe the functional work to be done.
+
+### 2. Task Ordering Principle
+
+**Order implies dependency**: Task N implicitly depends on all tasks before it. This is the primary dependency mechanism.
+
+**Tasks must follow this phase order**:
+1. **Foundation**: Environment setup, test infrastructure, shared utilities, database schema, configuration
+2. **Core**: Primary feature implementation (parallel-capable tasks grouped here)
+3. **Integration**: Wiring components together, cross-boundary connections
+4. **Validation**: E2E tests, edge cases, regression checks
+
+**Rationale**: Foundation work unblocks everything else. Placing setup tasks early prevents downstream blocking. Core tasks can often run in parallel because foundation is already complete.
+
+### 3. Task Integration & Progression
+
+**Every task must**:
+- Build on previous outputs (no orphaned code)
+- Connect to the overall system (no hanging features)
+- Progress incrementally (no big jumps in complexity)
+- Respect architecture boundaries defined in design.md (Architecture Pattern & Boundary Map)
+- Honor interface contracts documented in design.md
+- Use major task summaries sparingly—omit detail bullets if the work is fully captured by child tasks.
+
+**End with integration tasks** to wire everything together.
+
+### 4. Dependency Declaration
+
+**Default**: Sequential ordering handles most dependencies (task N depends on tasks before it).
+
+**Explicit declaration required when**:
+- A task depends on a specific task in a different major-task group (cross-boundary)
+- The dependency is non-obvious from ordering alone
+- A task can skip ahead of its position (declared via `(P)`) but still needs specific prior work
+
+**Format**: `_Depends: 1.2, 2.3_` — placed alongside `_Requirements:_` in task detail sections.
+
+**Do not over-annotate**: If a task simply depends on the task directly before it, ordering alone is sufficient.
+
+### 5. Boundary Scope
+
+**Each task should declare its component boundary** using design.md component/module names:
+- `_Boundary: AuthService_` or `_Boundary: API Layer, UserRepository_`
+- Helps validate parallel safety: tasks with non-overlapping boundaries are parallel candidates
+- Helps agents understand scope: what to touch and what not to touch
+
+**When to use**: Required for tasks marked `(P)` to validate parallel safety. Omit for sequential tasks where scope is obvious from the description.
+
+**Boundary rule**:
+- Each executable task should stay within a single responsibility boundary
+- If work must cross boundaries, make it an explicit integration task rather than a normal implementation task
+- Do not hide cross-boundary coordination inside a task that appears local
+
+### 6. Flexible Task Sizing
+
+**Guidelines**:
+- **Major tasks**: As many sub-tasks as logically needed (group by cohesion)
+- **Sub-tasks**: 1-3 hours each, 3-10 details per sub-task
+- Balance between too granular and too broad
+
+**Don't force arbitrary numbers** - let logical grouping determine structure.
+
+### 7. Requirements Mapping
+
+**End each task detail section with**:
+- `_Requirements: X.X, Y.Y_` listing **only numeric requirement IDs** (comma-separated). Never append descriptive text, parentheses, translations, or free-form labels.
+- For cross-cutting requirements, list every relevant requirement ID. All requirements MUST have numeric IDs in requirements.md. If an ID is missing, stop and correct requirements.md before generating tasks.
+- Reference components/interfaces from design.md when helpful (e.g., `_Contracts: AuthService API`)
+
+### 7.5 Observable Completion
+
+**Each executable task must include at least one detail bullet that describes the observable completed state**:
+- Phrase it as a deliverable, runtime behavior, persisted state, UI state, endpoint behavior, test result, or integration outcome
+- Avoid vague bullets like "implement support", "wire things up", or "handle logic" unless paired with a concrete observable result
+- Prefer making one detail bullet clearly answer: "What will be true when this task is done?"
+- Keep this within the existing task body; do not add extra bookkeeping fields
+
+### 8. Code-Only Focus
+
+**Include ONLY**:
+- Coding tasks (implementation)
+- Testing tasks (unit, integration, E2E)
+- Technical setup tasks (infrastructure, configuration)
+
+**Exclude**:
+- Deployment tasks
+- Documentation tasks
+- User testing
+- Marketing/business activities
+
+## Task Plan Review Gate
+
+Before writing `tasks.md`, review the draft task plan and repair local issues until the plan passes or a true spec gap is discovered.
+
+### Coverage Review
+
+- Every requirement ID from `requirements.md` must appear in at least one task.
+- Every design component, interface/contract, integration point, runtime prerequisite, and validation concern from `design.md` must be represented by at least one task.
+- If coverage is missing because the task plan is incomplete, repair the draft tasks and review again.
+- If coverage cannot be added cleanly because requirements or design are ambiguous, contradictory, or underspecified, stop and return to the requirements/design phase instead of papering over the gap in `tasks.md`.
+
+### Executability Review
+
+- Every sub-task must be executable as written, usually within 1-3 hours.
+- Every sub-task must produce a verifiable deliverable (behavior, artifact, endpoint, UI state, config, migration, test, or integration result).
+- Every executable sub-task must include at least one detail bullet that states the observable completion condition.
+- Split tasks that combine multiple independently verifiable outcomes.
+- Split tasks that combine multiple responsibility boundaries unless they are explicit integration tasks.
+- If many tasks require broad `_Boundary:_` scopes or repeated cross-boundary coordination, stop and return to design or roadmap decomposition instead of forcing the spec through task generation.
+- Merge or collapse tasks that are too small, bookkeeping-only, or not meaningful execution units.
+- Make implicit prerequisites explicit as preceding tasks.
+- Re-check `_Depends:_`, `_Boundary:_`, and `(P)` markers after edits so concurrency claims still match the design boundaries and dependency graph.
+
+### Review Loop
+
+- Run the review gate on the draft task plan before writing `tasks.md`.
+- If issues are task-plan-local, repair the draft and re-run the review gate.
+- Keep the loop bounded: no more than 2 review-and-repair passes before escalating a real spec gap.
+- Write `tasks.md` only after the review gate passes.
+
+### Optional Test Coverage Tasks
+
+- When the design already guarantees functional coverage and rapid MVP delivery is prioritized, mark purely test-oriented follow-up work (e.g., baseline rendering/unit tests) as **optional** using the `- [ ]*` checkbox form.
+- Only apply the optional marker when the sub-task directly references acceptance criteria from requirements.md in its detail bullets.
+- Never mark implementation work or integration-critical verification as optional—reserve `*` for auxiliary/deferrable test coverage that can be revisited post-MVP.
+
+## Task Hierarchy Rules
+
+### Maximum 2 Levels
+- **Level 1**: Major tasks (1, 2, 3, 4...)
+- **Level 2**: Sub-tasks (1.1, 1.2, 2.1, 2.2...)
+- **No deeper nesting** (no 1.1.1)
+- If a major task would contain only a single actionable item, collapse the structure and promote the sub-task to the major level (e.g., replace `1.1` with `1.`).
+- When a major task exists purely as a container, keep the checkbox description concise and avoid duplicating detailed bullets—reserve specifics for its sub-tasks.
+
+### Sequential Numbering
+- Major tasks MUST increment: 1, 2, 3, 4, 5...
+- Sub-tasks reset per major task: 1.1, 1.2, then 2.1, 2.2...
+- Never repeat major task numbers
+
+### Parallel Analysis (default)
+- Assume parallel analysis is enabled unless explicitly disabled (e.g. `--sequential` flag).
+- `(P)` means: this task has no dependency on its immediately preceding peers and can run concurrently with them.
+- Identify tasks that can run concurrently when **all** conditions hold:
+  - No data dependency on other pending tasks
+  - No shared file or resource contention
+  - No prerequisite review/approval from another task
+  - `_Boundary:_` annotations confirm non-overlapping component scopes
+- Foundation-phase tasks (see Task Ordering Principle) are rarely `(P)` — they establish shared prerequisites.
+- Core-phase tasks are the primary candidates for `(P)` since foundation is already complete.
+- Validate that identified parallel tasks operate within separate boundaries defined in the Architecture Pattern & Boundary Map.
+- Confirm API/event contracts from design.md do not overlap in ways that cause conflicts.
+- `(P)` tasks with cross-boundary dependencies must declare `_Depends: X.X_` explicitly.
+- Append `(P)` immediately after the task number for each parallel-capable task:
+  - Example: `- [ ] 2.1 (P) Build background worker`
+  - Apply to both major tasks and sub-tasks when appropriate.
+- If sequential mode is requested, omit `(P)` markers entirely.
+- Group parallel tasks logically (same parent when possible) and highlight any ordering caveats in detail bullets.
+- Explicitly call out dependencies that prevent `(P)` even when tasks look similar.
+
+### Checkbox Format
+```markdown
+- [ ] 1. Foundation: environment and test infrastructure setup
+- [ ] 1.1 Sub-task description
+  - Detail item 1
+  - Detail item 2
+  - Observable completion condition
+  - _Requirements: X.X_
+
+- [ ] 2. Core feature A
+- [ ] 2.1 (P) Sub-task description
+  - Detail items...
+  - Observable completion condition
+  - _Requirements: Y.Y_
+  - _Boundary: AuthService_
+
+- [ ] 2.2 (P) Sub-task description
+  - Detail items...
+  - Observable completion condition
+  - _Requirements: Z.Z_
+  - _Boundary: UserRepository_
+
+- [ ] 3. Integration and wiring
+- [ ] 3.1 Sub-task description
+  - Detail items...
+  - Observable completion condition
+  - _Depends: 2.1, 2.2_
+  - _Requirements: W.W_
+```
+
+## Requirements Coverage
+
+**Mandatory Check**:
+- ALL requirements from requirements.md MUST be covered
+- Cross-reference every requirement ID with task mappings
+- If gaps found: Return to requirements or design phase
+- No requirement should be left without corresponding tasks
+
+Use `N.M`-style numeric requirement IDs where `N` is the top-level requirement number from requirements.md (for example, Requirement 1 → 1.1, 1.2; Requirement 2 → 2.1, 2.2), and `M` is a local index within that requirement group.
+
+Document any intentionally deferred requirements with rationale.

+ 41 - 0
.claude/skills/kiro-spec-tasks/rules/tasks-parallel-analysis.md

@@ -0,0 +1,41 @@
+# Parallel Task Analysis Rules
+
+## Purpose
+Provide a consistent way to identify implementation tasks that can be safely executed in parallel while generating `tasks.md`.
+
+## Relationship to Task Ordering
+
+`(P)` means: this task has no dependency on its immediately preceding peers and can run concurrently with them. The Task Ordering Principle (see tasks-generation.md) ensures Foundation-phase tasks run first, making Core-phase tasks the primary `(P)` candidates.
+
+## When to Consider Tasks Parallel
+Only mark a task as parallel-capable when **all** of the following are true:
+
+1. **No data dependency** on pending tasks.
+2. **No conflicting files or shared mutable resources** are touched.
+3. **No prerequisite review/approval** from another task is required beforehand.
+4. **Foundation work complete**: Environment/setup work needed by this task is already satisfied by earlier Foundation-phase tasks.
+5. **Non-overlapping boundaries**: `_Boundary:_` annotations confirm tasks operate on separate components.
+
+## Marking Convention
+- Append `(P)` immediately after the numeric identifier for each qualifying task.
+  - Example: `- [ ] 2.1 (P) Build background worker for emails`
+- Apply `(P)` to both major tasks and sub-tasks when appropriate.
+- If sequential execution is requested (e.g. via `--sequential` flag), omit `(P)` markers entirely.
+- Keep `(P)` **outside** of checkbox brackets to avoid confusion with completion state.
+
+## Grouping & Ordering Guidelines
+- Group parallel tasks under the same parent whenever the work belongs to the same theme.
+- List obvious prerequisites or caveats in the detail bullets (e.g., "Requires schema migration from 1.2").
+- When two tasks look similar but are not parallel-safe, call out the blocking dependency explicitly.
+- Skip marking container-only major tasks (those without their own actionable detail bullets) with `(P)`—evaluate parallel execution at the sub-task level instead.
+
+## Quality Checklist
+Before marking a task with `(P)`, ensure you have:
+
+- Verified that running this task concurrently will not create merge or deployment conflicts.
+- Confirmed `_Boundary:_` annotations show non-overlapping component scopes.
+- Captured any shared state expectations in the detail bullets.
+- Confirmed that the implementation can be tested independently.
+- Added `_Depends: X.X_` if this `(P)` task still requires specific prior work from a different major-task group.
+
+If any check fails, **do not** mark the task with `(P)` and explain the dependency in the task details.

+ 37 - 21
.claude/commands/kiro/steering-custom.md → .claude/skills/kiro-steering-custom/SKILL.md

@@ -1,11 +1,17 @@
 ---
-description: Create custom steering documents for specialized project contexts
-allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
+name: kiro-steering-custom
+description: Create custom steering documents for specialized project contexts. Use when creating domain-specific steering files.
+allowed-tools: Read, Write, Edit, Glob, Grep, Bash
+metadata:
+  shared-rules: "steering-principles.md"
 ---
 
-# Kiro Custom Steering Creation
+# kiro-steering-custom Skill
 
-<background_information>
+## Role
+You are a specialized skill for creating custom steering documents beyond core files (product, tech, structure).
+
+## Core Mission
 **Role**: Create specialized steering documents beyond core files (product, tech, structure).
 
 **Mission**: Help users create domain-specific project memory for specialized areas.
@@ -14,9 +20,16 @@ allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
 - Custom steering captures specialized patterns
 - Follows same granularity principles as core steering
 - Provides clear value for specific domain
-</background_information>
 
-<instructions>
+## Execution Steps
+
+### Step 1: Gather Context
+
+If steering context is already available from conversation, skip redundant file reads.
+Otherwise:
+- Check `.kiro/settings/templates/steering-custom/` for available templates
+- Read `rules/steering-principles.md` from this skill's directory for steering principles
+
 ## Workflow
 
 1. **Ask user** for custom steering needs:
@@ -28,13 +41,18 @@ allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
    - Use as starting point, customize based on project
 
 3. **Analyze codebase** (JIT) for relevant patterns:
-   - **Glob** for related files
-   - **Read** for existing implementations
-   - **Grep** for specific patterns
+
+#### Parallel Research
+
+The following research areas are independent and can be executed in parallel:
+1. **Template & principles**: Load matching template and steering-principles.md
+2. **Domain patterns**: Analyze codebase for domain-specific patterns using Glob/Grep/Read
+
+After all parallel research completes, synthesize findings for steering document.
 
 4. **Generate custom steering**:
    - Follow template structure if available
-   - Apply principles from `.kiro/settings/rules/steering-principles.md`
+   - Apply principles from `rules/steering-principles.md` from this skill's directory
    - Focus on patterns, not exhaustive lists
    - Keep to 100-200 lines (2-3 minute read)
 
@@ -56,7 +74,7 @@ Load template when needed, customize for project.
 
 ## Steering Principles
 
-From `.kiro/settings/rules/steering-principles.md`:
+From `rules/steering-principles.md` (in this skill's directory):
 
 - **Patterns over lists**: Document patterns, not every file/component
 - **Single domain**: One topic per file
@@ -64,23 +82,21 @@ From `.kiro/settings/rules/steering-principles.md`:
 - **Maintainable size**: 100-200 lines typical
 - **Security first**: Never include secrets or sensitive data
 
-</instructions>
-
-## Tool guidance
+## Tool Guidance
 
 - **Read**: Load template, analyze existing code
 - **Glob**: Find related files for pattern analysis
 - **Grep**: Search for specific patterns
-- **LS**: Understand relevant structure
+- **Bash** with `ls`: Understand relevant structure
 
 **JIT Strategy**: Load template only when creating that type of steering.
 
-## Output description
+## Output Description
 
 Chat summary with file location (file created directly).
 
 ```
-Custom Steering Created
+Custom Steering Created
 
 ## Created:
 - .kiro/steering/api-standards.md
@@ -102,13 +118,13 @@ Review and customize as needed.
 ## Examples
 
 ### Success: API Standards
-**Input**: "Create API standards steering"  
-**Action**: Load template, analyze src/api/, extract patterns  
+**Input**: "Create API standards steering"
+**Action**: Load template, analyze src/api/, extract patterns
 **Output**: api-standards.md with project-specific REST conventions
 
 ### Success: Testing Strategy
-**Input**: "Document our testing approach"  
-**Action**: Load template, analyze test files, extract patterns  
+**Input**: "Document our testing approach"
+**Action**: Load template, analyze test files, extract patterns
 **Output**: testing.md with test organization and mocking strategies
 
 ## Safety & Fallback

+ 90 - 0
.claude/skills/kiro-steering-custom/rules/steering-principles.md

@@ -0,0 +1,90 @@
+# Steering Principles
+
+Steering files are **project memory**, not exhaustive specifications.
+
+---
+
+## Content Granularity
+
+### Golden Rule
+> "If new code follows existing patterns, steering shouldn't need updating."
+
+### ✅ Document
+- Organizational patterns (feature-first, layered)
+- Naming conventions (PascalCase rules)
+- Import strategies (absolute vs relative)
+- Architectural decisions (state management)
+- Technology standards (key frameworks)
+
+### ❌ Avoid
+- Complete file listings
+- Every component description
+- All dependencies
+- Implementation details
+- Agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
+- Detailed documentation of `.kiro/` metadata directories (settings, automation)
+
+### Example Comparison
+
+**Bad** (Specification-like):
+```markdown
+- /components/Button.tsx - Primary button with variants
+- /components/Input.tsx - Text input with validation
+- /components/Modal.tsx - Modal dialog
+... (50+ files)
+```
+
+**Good** (Project Memory):
+```markdown
+## UI Components (`/components/ui/`)
+Reusable, design-system aligned primitives
+- Named by function (Button, Input, Modal)
+- Export component + TypeScript interface
+- No business logic
+```
+
+---
+
+## Security
+
+Never include:
+- API keys, passwords, credentials
+- Database URLs, internal IPs
+- Secrets or sensitive data
+
+---
+
+## Quality Standards
+
+- **Single domain**: One topic per file
+- **Concrete examples**: Show patterns with code
+- **Explain rationale**: Why decisions were made
+- **Maintainable size**: 100-200 lines typical
+
+---
+
+## Preservation (when updating)
+
+- Preserve user sections and custom examples
+- Additive by default (add, don't replace)
+- Add `updated_at` timestamp
+- Note why changes were made
+
+---
+
+## Notes
+
+- Templates are starting points, customize as needed
+- Follow same granularity principles as core steering
+- All steering files loaded as project memory
+- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories
+- Custom files equally important as core files
+
+---
+
+## File-Specific Focus
+
+- **product.md**: Purpose, value, business context (not exhaustive features)
+- **tech.md**: Key frameworks, standards, conventions (not all dependencies)
+- **structure.md**: Organization patterns, naming rules (not directory trees)
+- **Custom files**: Specialized patterns (API, testing, security, etc.)

+ 44 - 27
.claude/commands/kiro/steering.md → .claude/skills/kiro-steering/SKILL.md

@@ -1,11 +1,17 @@
 ---
-description: Manage .kiro/steering/ as persistent project knowledge
-allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
+name: kiro-steering
+description: Maintain .kiro/steering/ as persistent project memory (bootstrap/sync). Use when initializing or updating steering documents.
+allowed-tools: Read, Write, Edit, Glob, Grep, Bash
+metadata:
+  shared-rules: "steering-principles.md"
 ---
 
-# Kiro Steering Management
+# kiro-steering Skill
 
-<background_information>
+## Role
+You are a specialized skill for maintaining `.kiro/steering/` as persistent project memory.
+
+## Core Mission
 **Role**: Maintain `.kiro/steering/` as persistent project memory.
 
 **Mission**:
@@ -17,14 +23,22 @@ allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
 - Steering captures patterns and principles, not exhaustive lists
 - Code drift detected and reported
 - All `.kiro/steering/*.md` treated equally (core + custom)
-</background_information>
 
-<instructions>
+## Execution Steps
+
+### Step 1: Gather Context
+
+If steering context is already available from conversation, skip redundant file reads.
+
+- For Bootstrap mode: Read templates from `.kiro/settings/templates/steering/`
+- For Sync mode: Read all existing `.kiro/steering/*.md` files
+- Read `rules/steering-principles.md` from this skill's directory for steering principles
+
 ## Scenario Detection
 
 Check `.kiro/steering/` status:
 
-**Bootstrap Mode**: Empty OR missing core files (product.md, tech.md, structure.md)  
+**Bootstrap Mode**: Empty OR missing core files (product.md, tech.md, structure.md)
 **Sync Mode**: All core files exist
 
 ---
@@ -33,15 +47,22 @@ Check `.kiro/steering/` status:
 
 1. Load templates from `.kiro/settings/templates/steering/`
 2. Analyze codebase (JIT):
-   - `glob_file_search` for source files
-   - `read_file` for README, package.json, etc.
-   - `grep` for patterns
+
+#### Parallel Research
+
+The following research areas are independent and can be executed in parallel:
+1. **Product analysis**: README, package.json, documentation files for purpose, value, core capabilities
+2. **Tech analysis**: Config files, dependencies, frameworks for technology patterns and decisions
+3. **Structure analysis**: Directory tree, naming conventions, import patterns for organization
+
+After all parallel research completes, synthesize patterns for steering files.
+
 3. Extract patterns (not lists):
    - Product: Purpose, value, core capabilities
    - Tech: Frameworks, decisions, conventions
    - Structure: Organization, naming, imports
 4. Generate steering files (follow templates)
-5. Load principles from `.kiro/settings/rules/steering-principles.md`
+5. Load principles from `rules/steering-principles.md` from this skill's directory
 6. Present summary for review
 
 **Focus**: Patterns that guide decisions, not catalogs of files/dependencies.
@@ -65,33 +86,31 @@ Check `.kiro/steering/` status:
 
 ## Granularity Principle
 
-From `.kiro/settings/rules/steering-principles.md`:
+From `rules/steering-principles.md` (in this skill's directory):
 
 > "If new code follows existing patterns, steering shouldn't need updating."
 
 Document patterns and principles, not exhaustive lists.
 
-**Bad**: List every file in directory tree  
+**Bad**: List every file in directory tree
 **Good**: Describe organization pattern with examples
 
-</instructions>
-
-## Tool guidance
+## Tool Guidance
 
-- `glob_file_search`: Find source/config files
-- `read_file`: Read steering, docs, configs
-- `grep`: Search patterns
-- `list_dir`: Analyze structure
+- `Glob`: Find source/config files
+- `Read`: Read steering, docs, configs
+- `Grep`: Search patterns
+- `Bash` with `ls`: Analyze structure
 
 **JIT Strategy**: Fetch when needed, not upfront.
 
-## Output description
+## Output Description
 
 Chat summary only (files updated directly).
 
 ### Bootstrap:
 ```
-Steering Created
+Steering Created
 
 ## Generated:
 - product.md: [Brief description]
@@ -103,7 +122,7 @@ Review and approve as Source of Truth.
 
 ### Sync:
 ```
-Steering Updated
+Steering Updated
 
 ## Changes:
 - tech.md: React 18 → 19
@@ -119,11 +138,11 @@ Review and approve as Source of Truth.
 ## Examples
 
 ### Bootstrap
-**Input**: Empty steering, React TypeScript project  
+**Input**: Empty steering, React TypeScript project
 **Output**: 3 files with patterns - "Feature-first", "TypeScript strict", "React 19"
 
 ### Sync
-**Input**: Existing steering, new `/api` directory  
+**Input**: Existing steering, new `/api` directory
 **Output**: Updated structure.md, flagged non-compliant files, suggested api-standards.md
 
 ## Safety & Fallback
@@ -138,6 +157,4 @@ Review and approve as Source of Truth.
 - Templates and principles are external for customization
 - Focus on patterns, not catalogs
 - "Golden Rule": New code following patterns shouldn't require steering updates
-- Avoid documenting agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
 - `.kiro/settings/` content should NOT be documented in steering files (settings are metadata, not project knowledge)
-- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories

+ 90 - 0
.claude/skills/kiro-steering/rules/steering-principles.md

@@ -0,0 +1,90 @@
+# Steering Principles
+
+Steering files are **project memory**, not exhaustive specifications.
+
+---
+
+## Content Granularity
+
+### Golden Rule
+> "If new code follows existing patterns, steering shouldn't need updating."
+
+### ✅ Document
+- Organizational patterns (feature-first, layered)
+- Naming conventions (PascalCase rules)
+- Import strategies (absolute vs relative)
+- Architectural decisions (state management)
+- Technology standards (key frameworks)
+
+### ❌ Avoid
+- Complete file listings
+- Every component description
+- All dependencies
+- Implementation details
+- Agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
+- Detailed documentation of `.kiro/` metadata directories (settings, automation)
+
+### Example Comparison
+
+**Bad** (Specification-like):
+```markdown
+- /components/Button.tsx - Primary button with variants
+- /components/Input.tsx - Text input with validation
+- /components/Modal.tsx - Modal dialog
+... (50+ files)
+```
+
+**Good** (Project Memory):
+```markdown
+## UI Components (`/components/ui/`)
+Reusable, design-system aligned primitives
+- Named by function (Button, Input, Modal)
+- Export component + TypeScript interface
+- No business logic
+```
+
+---
+
+## Security
+
+Never include:
+- API keys, passwords, credentials
+- Database URLs, internal IPs
+- Secrets or sensitive data
+
+---
+
+## Quality Standards
+
+- **Single domain**: One topic per file
+- **Concrete examples**: Show patterns with code
+- **Explain rationale**: Why decisions were made
+- **Maintainable size**: 100-200 lines typical
+
+---
+
+## Preservation (when updating)
+
+- Preserve user sections and custom examples
+- Additive by default (add, don't replace)
+- Add `updated_at` timestamp
+- Note why changes were made
+
+---
+
+## Notes
+
+- Templates are starting points, customize as needed
+- Follow same granularity principles as core steering
+- All steering files loaded as project memory
+- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories
+- Custom files equally important as core files
+
+---
+
+## File-Specific Focus
+
+- **product.md**: Purpose, value, business context (not exhaustive features)
+- **tech.md**: Key frameworks, standards, conventions (not all dependencies)
+- **structure.md**: Organization patterns, naming rules (not directory trees)
+- **Custom files**: Specialized patterns (API, testing, security, etc.)

+ 102 - 0
.claude/skills/kiro-validate-design/SKILL.md

@@ -0,0 +1,102 @@
+---
+name: kiro-validate-design
+description: Interactive technical design quality review and validation. Use when reviewing design before implementation.
+allowed-tools: Read, Grep, Glob, AskUserQuestion
+argument-hint: <feature-name>
+metadata:
+  shared-rules: "design-review.md"
+---
+
+# kiro-validate-design Skill
+
+## Role
+You are a specialized skill for conducting interactive quality review of technical design to ensure readiness for implementation.
+
+## Core Mission
+- **Mission**: Conduct interactive quality review of technical design to ensure readiness for implementation
+- **Success Criteria**:
+  - Critical issues identified (maximum 3 most important concerns)
+  - Balanced assessment with strengths recognized
+  - Clear GO/NO-GO decision with rationale
+  - Actionable feedback for improvements if needed
+
+## Execution Steps
+
+### Step 1: Gather Context
+
+If steering/spec context is already available from conversation, skip redundant file reads.
+Otherwise, load all necessary context:
+- Read `.kiro/specs/{feature}/spec.json` for language and metadata
+- Read `.kiro/specs/{feature}/requirements.md` for requirements
+- Read `.kiro/specs/{feature}/design.md` for design document
+- Core steering context: `product.md`, `tech.md`, `structure.md`
+- Additional steering files only when directly relevant to architecture boundaries, integrations, runtime prerequisites, domain rules, security/performance constraints, or team conventions that affect implementation readiness
+- Relevant local agent skills or playbooks only when they clearly match the feature's host environment or use case and provide review-relevant context
+
+#### Parallel Research
+
+The following research areas are independent and can be executed in parallel:
+1. **Context & rules loading**: Spec documents, core steering, task-relevant extra steering, relevant local agent skills/playbooks, and `rules/design-review.md` from this skill's directory for review criteria
+2. **Codebase pattern survey**: Gather existing architecture patterns, naming conventions, and component structure from the codebase to use as reference during review
+
+After all parallel research completes, synthesize findings for review.
+
+### Step 2: Execute Design Review
+- Reference conversation history: leverage prior requirements discussion and user's stated design intent
+- Follow design-review.md process: Analysis → Critical Issues → Strengths → GO/NO-GO
+- Limit to 3 most important concerns
+- Engage interactively with user — ask clarifying questions, propose alternatives
+- Use language specified in spec.json for output
+
+### Step 3: Decision and Next Steps
+- Clear GO/NO-GO decision with rationale
+- Provide specific actionable next steps (see Next Phase below)
+
+## Important Constraints
+- **Quality assurance, not perfection seeking**: Accept acceptable risk
+- **Critical focus only**: Maximum 3 issues, only those significantly impacting success
+- **Conversation-aware**: Leverage discussion history for requirements context and user intent
+- **Interactive approach**: Engage in dialogue, ask clarifying questions, propose alternatives
+- **Balanced assessment**: Recognize both strengths and weaknesses
+- **Actionable feedback**: All suggestions must be implementable
+- **Context Discipline**: Start with core steering and expand only with review-relevant steering or use-case-aligned local agent skills/playbooks
+
+## Tool Guidance
+- **Read first**: Load spec, core steering, relevant local playbooks/agent skills, and rules before review
+- **Grep if needed**: Search codebase for pattern validation or integration checks
+- **Interactive**: Engage with user throughout the review process
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Review Summary**: Brief overview (2-3 sentences) of design quality and readiness
+2. **Critical Issues**: Maximum 3, following design-review.md format
+3. **Design Strengths**: 1-2 positive aspects
+4. **Final Assessment**: GO/NO-GO decision with rationale and next steps
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Follow design-review.md output format
+- Keep summary concise
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Design**: If design.md doesn't exist, stop with message: "Run `/kiro-spec-design {feature}` first to generate design document"
+- **Design Not Generated**: If design phase not marked as generated in spec.json, warn but proceed with review
+- **Empty Steering Directory**: Warn user that project context is missing and may affect review quality
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+
+### Next Phase: Task Generation
+
+**If Design Passes Validation (GO Decision)**:
+- Apply any suggested improvements if agreed
+- Run `/kiro-spec-tasks {feature}` to generate implementation tasks
+- Or `/kiro-spec-tasks {feature} -y` to auto-approve and proceed directly
+
+**If Design Needs Revision (NO-GO Decision)**:
+- Address critical issues identified in review
+- Re-run `/kiro-spec-design {feature}` with improvements
+- Re-validate with `/kiro-validate-design {feature}`
+
+**Note**: Design validation is recommended but optional. Quality review helps catch issues early.

+ 110 - 0
.claude/skills/kiro-validate-design/rules/design-review.md

@@ -0,0 +1,110 @@
+# Design Review Process
+
+## Objective
+Conduct interactive quality review of technical design documents to ensure they are solid enough to proceed to implementation with acceptable risk.
+
+## Review Philosophy
+- **Quality assurance, not perfection seeking**
+- **Critical focus**: Limit to 3 most important concerns
+- **Interactive dialogue**: Engage with designer, not one-way evaluation
+- **Balanced assessment**: Recognize strengths and weaknesses
+- **Clear decision**: Definitive GO/NO-GO with rationale
+
+## Scope & Non-Goals
+
+- Scope: Evaluate the quality of the design document against project context and standards to decide GO/NO-GO.
+- Non-Goals: Do not perform implementation-level design, deep technology research, or finalize technology choices. Defer such items to the design phase iteration.
+
+## Core Review Criteria
+
+### 1. Existing Architecture Alignment (Critical)
+- Integration with existing system boundaries and layers
+- Consistency with established architectural patterns
+- Proper dependency direction and coupling management
+- Alignment with current module organization
+
+### 2. Design Consistency & Standards
+- Adherence to project naming conventions and code standards
+- Consistent error handling and logging strategies
+- Uniform configuration and dependency management
+- Alignment with established data modeling patterns
+
+### 3. Extensibility & Maintainability
+- Design flexibility for future requirements
+- Clear separation of concerns and single responsibility
+- Testability and debugging considerations
+- Appropriate complexity for requirements
+
+### 4. Type Safety & Interface Design
+- Proper type definitions and interface contracts
+- Avoidance of unsafe patterns (e.g., `any` in TypeScript)
+- Clear API boundaries and data structures
+- Input validation and error handling coverage
+
+## Review Process
+
+### Step 1: Analyze
+Analyze design against all review criteria, focusing on critical issues impacting integration, maintainability, complexity, and requirements fulfillment.
+
+### Step 2: Identify Critical Issues (≤3)
+For each issue:
+```
+🔴 **Critical Issue [1-3]**: [Brief title]
+**Concern**: [Specific problem]
+**Impact**: [Why it matters]
+**Suggestion**: [Concrete improvement]
+**Traceability**: [Requirement ID/section from requirements.md]
+**Evidence**: [Design doc section/heading]
+```
+
+### Step 3: Recognize Strengths
+Acknowledge 1-2 strong aspects to maintain balanced feedback.
+
+### Step 4: Decide GO/NO-GO
+- **GO**: No critical architectural misalignment, requirements addressed, clear implementation path, acceptable risks
+- **NO-GO**: Fundamental conflicts, critical gaps, high failure risk, disproportionate complexity
+
+## Traceability & Evidence
+
+- Link each critical issue to the relevant requirement(s) from `requirements.md` (ID or section).
+- Cite evidence locations in the design document (section/heading, diagram, or artifact) to support the assessment.
+- When applicable, reference constraints from steering context to justify the issue.
+
+## Output Format
+
+### Design Review Summary
+2-3 sentences on overall quality and readiness.
+
+### Critical Issues (≤3)
+For each: Issue, Impact, Recommendation, Traceability (e.g., 1.1, 1.2), Evidence (design.md section).
+
+### Design Strengths
+1-2 positive aspects.
+
+### Final Assessment
+Decision (GO/NO-GO), Rationale (1-2 sentences), Next Steps.
+
+### Interactive Discussion
+Engage on designer's perspective, alternatives, clarifications, and necessary changes.
+
+## Length & Focus
+
+- Summary: 2–3 sentences
+- Each critical issue: 5–7 lines total (including Issue/Impact/Recommendation/Traceability/Evidence)
+- Overall review: keep concise (~400 words guideline)
+
+## Review Guidelines
+
+1. **Critical Focus**: Only flag issues that significantly impact success
+2. **Constructive Tone**: Provide solutions, not just criticism
+3. **Interactive Approach**: Engage in dialogue rather than one-way evaluation
+4. **Balanced Assessment**: Recognize both strengths and weaknesses
+5. **Clear Decision**: Make definitive GO/NO-GO recommendation
+6. **Actionable Feedback**: Ensure all suggestions are implementable
+
+## Final Checklist
+
+- **Critical Issues ≤ 3** and each includes Impact and Recommendation
+- **Traceability**: Each issue references requirement ID/section
+- **Evidence**: Each issue cites design doc location
+- **Decision**: GO/NO-GO with clear rationale and next steps

+ 107 - 0
.claude/skills/kiro-validate-gap/SKILL.md

@@ -0,0 +1,107 @@
+---
+name: kiro-validate-gap
+description: Analyze implementation gap between requirements and existing codebase. Use when planning integration with existing systems.
+allowed-tools: Read, Write, Grep, Glob, WebSearch, WebFetch
+argument-hint: <feature-name>
+metadata:
+  shared-rules: "gap-analysis.md"
+---
+
+# kiro-validate-gap Skill
+
+## Role
+You are a specialized skill for analyzing the implementation gap between requirements and existing codebase to inform implementation strategy.
+
+## Core Mission
+- **Mission**: Analyze the gap between requirements and existing codebase to inform implementation strategy
+- **Success Criteria**:
+  - Comprehensive understanding of existing codebase patterns and components
+  - Clear identification of missing capabilities and integration challenges
+  - Multiple viable implementation approaches evaluated
+  - Technical research needs identified for design phase
+
+## Execution Steps
+
+### Step 1: Gather Context
+
+If steering/spec context is already available from conversation, skip redundant file reads.
+Otherwise, load all necessary context:
+- Read `.kiro/specs/{feature}/spec.json` for language and metadata
+- Read `.kiro/specs/{feature}/requirements.md` for requirements
+- Core steering context: `product.md`, `tech.md`, `structure.md`
+- Additional steering files only when directly relevant to the feature's domain rules, integrations, runtime prerequisites, compliance/security constraints, or existing product boundaries
+- Relevant local agent skills or playbooks only when they clearly match the feature's host environment or use case and provide analysis-relevant context
+
+### Step 2: Read Analysis Guidelines
+- Read `rules/gap-analysis.md` from this skill's directory for comprehensive analysis framework
+
+### Step 3: Execute Gap Analysis
+
+#### Parallel Research
+
+The following research areas are independent and can be executed in parallel:
+1. **Codebase analysis**: Existing implementations, architecture patterns, integration points, extension possibilities (using Grep/Glob/Read)
+2. **External dependency research**: Dependency compatibility, version constraints, known integration challenges (using WebSearch/WebFetch when needed)
+3. **Context loading**: Requirements, core steering, task-relevant extra steering, relevant local agent skills/playbooks, and gap-analysis rules
+
+After all parallel research completes, synthesize findings for gap analysis.
+
+- Follow gap-analysis.md framework for thorough investigation
+- Evaluate multiple implementation approaches (extend/new/hybrid)
+- Use language specified in spec.json for output
+
+### Step 4: Generate Analysis Document
+- Create comprehensive gap analysis following the output guidelines in gap-analysis.md
+- Present multiple viable options with trade-offs
+- Flag areas requiring further research
+
+### Step 5: Write Gap Analysis to Disk
+
+**Write the gap analysis to disk so it survives session boundaries and can be referenced during design phase.**
+
+- Use the Write tool to save the gap analysis to `.kiro/specs/{feature}/research.md`
+- If the file already exists, append the new analysis (separated by a horizontal rule `---`) rather than overwriting previous research
+- Verify the file was written by reading it back
+
+## Important Constraints
+- **Information over Decisions**: Provide analysis and options, not final implementation choices
+- **Multiple Options**: Present viable alternatives when applicable
+- **Thorough Investigation**: Use tools to deeply understand existing codebase
+- **Explicit Gaps**: Clearly flag areas needing research or investigation
+- **Context Discipline**: Start with core steering and expand only with analysis-relevant steering or use-case-aligned local agent skills/playbooks
+
+## Tool Guidance
+- **Read first**: Load spec, core steering, relevant local playbooks/agent skills, and rules before analysis
+- **Grep extensively**: Search codebase for patterns, conventions, and integration points
+- **WebSearch/WebFetch**: Research external dependencies and best practices when needed
+- **Write last**: Generate analysis only after complete investigation
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Analysis Summary**: Brief overview (3-5 bullets) of scope, challenges, and recommendations
+2. **Document Status**: Confirm analysis approach used
+3. **Next Steps**: Guide user on proceeding to design phase
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Keep summary concise (under 300 words)
+- Detailed analysis follows gap-analysis.md output guidelines
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Requirements**: If requirements.md doesn't exist, stop with message: "Run `/kiro-spec-requirements {feature}` first to generate requirements"
+- **Requirements Not Approved**: If requirements not approved, warn user but proceed (gap analysis can inform requirement revisions)
+- **Empty Steering Directory**: Warn user that project context is missing and may affect analysis quality
+- **Complex Integration Unclear**: Flag for comprehensive research in design phase rather than blocking
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+
+### Next Phase: Design Generation
+
+**If Gap Analysis Complete**:
+- Review gap analysis insights
+- Run `/kiro-spec-design {feature}` to create technical design document
+- Or `/kiro-spec-design {feature} -y` to auto-approve requirements and proceed directly
+
+**Note**: Gap analysis is optional but recommended for brownfield projects to inform design decisions.

+ 144 - 0
.claude/skills/kiro-validate-gap/rules/gap-analysis.md

@@ -0,0 +1,144 @@
+# Gap Analysis Process
+
+## Objective
+Analyze the gap between requirements and existing codebase to inform implementation strategy decisions.
+
+## Analysis Framework
+
+### 1. Current State Investigation
+
+- Scan for domain-related assets:
+  - Key files/modules and directory layout
+  - Reusable components/services/utilities
+  - Dominant architecture patterns and constraints
+
+- Extract conventions:
+  - Naming, layering, dependency direction
+  - Import/export patterns and dependency hotspots
+  - Testing placement and approach
+
+- Note integration surfaces:
+  - Data models/schemas, API clients, auth mechanisms
+
+### 2. Requirements Feasibility Analysis
+
+- From EARS requirements, list technical needs:
+  - Data models, APIs/services, UI/components
+  - Business rules/validation
+  - Non-functionals: security, performance, scalability, reliability
+
+- Identify gaps and constraints:
+  - Missing capabilities in current codebase
+  - Unknowns to be researched later (mark as "Research Needed")
+  - Constraints from existing architecture and patterns
+
+- Note complexity signals:
+  - Simple CRUD / algorithmic logic / workflows / external integrations
+
+### 3. Implementation Approach Options
+
+#### Option A: Extend Existing Components
+**When to consider**: Feature fits naturally into existing structure
+
+- **Which files/modules to extend**:
+  - Identify specific files requiring changes
+  - Assess impact on existing functionality
+  - Evaluate backward compatibility concerns
+
+- **Compatibility assessment**:
+  - Check if extension respects existing interfaces
+  - Verify no breaking changes to consumers
+  - Assess test coverage impact
+
+- **Complexity and maintainability**:
+  - Evaluate cognitive load of additional functionality
+  - Check if single responsibility principle is maintained
+  - Assess if file size remains manageable
+
+**Trade-offs**:
+- ✅ Minimal new files, faster initial development
+- ✅ Leverages existing patterns and infrastructure
+- ❌ Risk of bloating existing components
+- ❌ May complicate existing logic
+
+#### Option B: Create New Components
+**When to consider**: Feature has distinct responsibility or existing components are already complex
+
+- **Rationale for new creation**:
+  - Clear separation of concerns justifies new file
+  - Existing components are already complex
+  - Feature has distinct lifecycle or dependencies
+
+- **Integration points**:
+  - How new components connect to existing system
+  - APIs or interfaces exposed
+  - Dependencies on existing components
+
+- **Responsibility boundaries**:
+  - Clear definition of what new component owns
+  - Interfaces with existing components
+  - Data flow and control flow
+
+**Trade-offs**:
+- ✅ Clean separation of concerns
+- ✅ Easier to test in isolation
+- ✅ Reduces complexity in existing components
+- ❌ More files to navigate
+- ❌ Requires careful interface design
+
+#### Option C: Hybrid Approach
+**When to consider**: Complex features requiring both extension and new creation
+
+- **Combination strategy**:
+  - Which parts extend existing components
+  - Which parts warrant new components
+  - How they interact
+
+- **Phased implementation**:
+  - Initial phase: minimal viable changes
+  - Subsequent phases: refactoring or new components
+  - Migration strategy if needed
+
+- **Risk mitigation**:
+  - Incremental rollout approach
+  - Feature flags or configuration
+  - Rollback strategy
+
+**Trade-offs**:
+- ✅ Balanced approach for complex features
+- ✅ Allows iterative refinement
+- ❌ More complex planning required
+- ❌ Potential for inconsistency if not well-coordinated
+### 4. Out-of-Scope for Gap Analysis
+
+- Defer deep research activities to the design phase.
+- Record unknowns as concise "Research Needed" items only.
+
+### 5. Implementation Complexity & Risk
+
+  - Effort:
+    - S (1–3 days): existing patterns, minimal deps, straightforward integration
+    - M (3–7 days): some new patterns/integrations, moderate complexity
+    - L (1–2 weeks): significant functionality, multiple integrations or workflows
+    - XL (2+ weeks): architectural changes, unfamiliar tech, broad impact
+  - Risk:
+    - High: unknown tech, complex integrations, architectural shifts, unclear perf/security path
+    - Medium: new patterns with guidance, manageable integrations, known perf solutions
+    - Low: extend established patterns, familiar tech, clear scope, minimal integration
+
+### Output Checklist
+
+- Requirement-to-Asset Map with gaps tagged (Missing / Unknown / Constraint)
+- Options A/B/C with short rationale and trade-offs
+- Effort (S/M/L/XL) and Risk (High/Medium/Low) with one-line justification each
+- Recommendations for design phase:
+  - Preferred approach and key decisions
+  - Research items to carry forward
+
+## Principles
+
+- **Information over decisions**: Provide analysis and options, not final choices
+- **Multiple viable options**: Offer credible alternatives when applicable
+- **Explicit gaps and assumptions**: Flag unknowns and constraints clearly
+- **Context-aware**: Align with existing patterns and architecture limits
+- **Transparent effort and risk**: Justify labels succinctly

+ 204 - 0
.claude/skills/kiro-validate-impl/SKILL.md

@@ -0,0 +1,204 @@
+---
+name: kiro-validate-impl
+description: Validate feature-level integration after all tasks are implemented. Checks cross-task consistency, full test suite, and overall spec coverage.
+allowed-tools: Read, Bash, Grep, Glob, Agent
+argument-hint: <feature-name> [task-numbers]
+---
+
+# kiro-validate-impl Skill
+
+## Role
+Individual tasks have already been reviewed by the per-task reviewer during implementation. Your job is to catch problems that only become visible when looking across all tasks together.
+
+Boundary terminology continuity:
+- discovery identifies `Boundary Candidates`
+- design fixes `Boundary Commitments`
+- tasks constrain execution with `_Boundary:_`
+- feature validation checks for cross-task `Boundary Violations`
+
+## Core Mission
+- **Success Criteria**:
+  - All tasks marked `[x]` in tasks.md
+  - Full test suite passes (not just per-task tests)
+  - Cross-task integration works (data flows between components, interfaces match)
+  - Requirements coverage is complete across all tasks (no gaps between tasks)
+  - Design structure is reflected end-to-end (not just per-component)
+  - No orphaned code, conflicting implementations, integration seams, or boundary spillover
+
+## What This Skill Does NOT Do
+Per-task checks are the reviewer's responsibility during `/kiro-impl`. This skill does **not** re-check:
+- Individual task acceptance criteria
+- Per-file reality checks (mock/stub detection)
+- Single-task spec alignment
+
+This skill's main question is: when the completed tasks are viewed together, do they still respect the designed boundary seams and dependency direction?
+
+## Execution Steps
+
+### Step 1: Detect Validation Target
+
+**If no arguments provided**:
+- Parse conversation history for `/kiro-impl` commands to detect recently implemented features and tasks
+- Scan `.kiro/specs/` for features with completed tasks `[x]`
+- Report detected implementations (e.g., "user-auth: 1.1, 1.2, 1.3")
+
+**If feature provided** (feature specified, tasks empty):
+- Use specified feature
+- Detect all completed tasks `[x]` in `.kiro/specs/{feature}/tasks.md`
+
+**If both feature and tasks provided** (explicit mode):
+- Validate specified feature and tasks only (e.g., `user-auth 1.1,1.2`)
+
+### Step 2: Gather Context
+
+If steering/spec context is already available from conversation, skip redundant file reads.
+Otherwise, for each detected feature:
+- Read `.kiro/specs/<feature>/spec.json` for metadata
+- Read `.kiro/specs/<feature>/requirements.md` for requirements
+- Read `.kiro/specs/<feature>/design.md` for design structure
+- Read `.kiro/specs/<feature>/tasks.md` for task list and Implementation Notes
+- Core steering context: `product.md`, `tech.md`, `structure.md`
+- Additional steering files only when directly relevant to the validated boundaries, runtime prerequisites, integrations, domain rules, security/performance constraints, or team conventions that affect the GO/NO-GO call
+
+**Discover canonical validation commands**:
+- Inspect repository-local sources of truth in this order: project scripts/manifests (`package.json`, `pyproject.toml`, `go.mod`, `Cargo.toml`, app manifests), task runners (`Makefile`, `justfile`), CI/workflow files, existing e2e/integration configs, then `README*`
+- Derive a feature-level validation set for this repo: `TEST_COMMANDS`, `BUILD_COMMANDS`, and `SMOKE_COMMANDS`
+- Prefer commands already used by repo automation over ad hoc shell pipelines
+- For `SMOKE_COMMANDS`, choose the lightest trustworthy runtime-liveness check for the app shape (for example: root URL load, Electron launch, CLI `--help`, service health endpoint, mobile simulator/e2e harness if one already exists)
+- If multiple candidates exist, prefer the command with the smallest setup cost that still exercises the real built artifact
+
+### Step 3: Execute Integration Validation
+
+#### Subagent Dispatch (parallel)
+
+The following validation dimensions are independent and can be dispatched as **subagents** via the Agent tool. The agent should decide the optimal decomposition based on feature scope — split, merge, or skip subagents as appropriate. Each subagent returns a **structured findings summary** to keep the main context clean for GO/NO-GO synthesis.
+
+**Typical validation dimensions** (adjust as appropriate):
+- **Test execution**: Run the complete test suite, report pass/fail with details
+- **Requirements coverage**: Build requirements → implementation matrix, report gaps
+- **Design alignment**: Verify architecture matches design.md, report drift and dependency violations
+- **Cross-task integration**: Verify data flows, API contracts, shared state consistency
+
+For simple features (few tasks, small scope), run checks in main context without subagent dispatch.
+
+#### Mechanical Checks (run commands, use results)
+
+These checks apply at the feature level. Use command output as the primary signal.
+
+**A. Full Test Suite**
+- Run the discovered canonical full-test command. Use the exit code.
+- If tests fail → NO-GO. No judgment needed.
+- If the canonical test command cannot be identified → `MANUAL_VERIFY_REQUIRED`
+
+**B. Residual TBD/TODO/FIXME**
+- Run: `grep -rn "TBD\|TODO\|FIXME\|HACK\|XXX" <files-in-feature-boundary>`
+- If matches found that were introduced by this feature → flag as Warning
+
+**C. Residual Hardcoded Secrets**
+- Run: `grep -rn "password\s*=\|api_key\s*=\|secret\s*=\|token\s*=" <files-in-feature-boundary>` (case-insensitive)
+- If matches found that aren't environment variable references → flag as Critical
+
+**D. Runtime Liveness (Smoke Boot)**
+- Run the discovered canonical smoke command that proves the built artifact actually starts and reaches its first usable state.
+- Examples if relevant: open the root URL in a headless browser and require zero boot-time console errors; launch Electron and wait for the main process ready signal and first renderer load; run a CLI with `--help`; start a service and hit its health endpoint.
+- If boot produces a runtime crash, unhandled exception, module-load failure, native ABI mismatch, or missing required env/config → NO-GO.
+- If no trustworthy smoke command can be identified, or the required runtime environment is unavailable → `MANUAL_VERIFY_REQUIRED`
+
+#### Judgment Checks (read code, compare to spec)
+
+**E. Cross-Task Integration**
+- Identify where tasks share interfaces, data models, or API contracts
+- Verify that Task A's output format matches Task B's expected input
+- Check for conflicting assumptions between tasks (naming conventions, error codes, data shapes)
+- Verify shared state (database schemas, config, environment) is consistent across tasks
+- Verify integration work happens at the intended seams rather than by leaking one boundary's behavior into another
+
+**F. Requirements Coverage Gaps**
+- Map every requirement section to at least one completed task
+- Identify requirements that no single task fully covers (cross-cutting requirements)
+- Identify requirements partially covered by multiple tasks but not fully by any
+- Use the original section numbering from `requirements.md`; do NOT invent `REQ-*` aliases
+
+**G. Design End-to-End Alignment**
+- Verify the overall component graph matches design.md
+- Check that integration patterns (event flow, API boundaries, dependency injection) work as designed
+- Verify dependency direction follows design.md's architecture (no upward imports)
+- Verify File Structure Plan matches the actual file layout
+- Identify any architectural drift from the original design
+- Use the original section numbering from `design.md`
+
+**G.5 Boundary Audit**
+- Compare completed work against the design's `Boundary Commitments`, `Out of Boundary`, `Allowed Dependencies`, and `Revalidation Triggers`
+- Identify cross-task spillover where one area quietly absorbed another boundary's responsibility
+- Identify downstream-specific workarounds embedded upstream "to make integration easier"
+- Identify new hidden dependencies or shared ownership that were not declared in the design
+- If a revalidation trigger fired, verify the affected adjacent specs or integration points were actually re-checked
+
+**H. Blocked Tasks & Implementation Notes**
+- Check for any tasks still marked `_Blocked:_` — report why and assess impact on feature completeness
+- Review `## Implementation Notes` in tasks.md for cross-cutting insights that need attention
+
+### Step 4: Generate Report
+
+Before returning `GO`, apply the `kiro-verify-completion` protocol to the feature-level claim. Tests alone are insufficient: include full-suite, runtime liveness, coverage, integration, design-alignment, and blocked-task status in the evidence.
+
+Classify concrete failures by ownership before writing remediation:
+- `LOCAL` if the defect belongs to the feature being validated
+- `UPSTREAM` if the root cause belongs to a dependency, foundation, shared platform, or earlier spec
+- `UNCLEAR` if ownership cannot be established from the available evidence
+
+If ownership is `UPSTREAM`, do not collapse the issue into local remediation for this feature. Name the owning upstream spec and explain which dependent specs should be revalidated after that upstream fix lands.
+
+Provide summary in the language specified in spec.json:
+
+```
+## Validation Report
+- DECISION: GO | NO-GO | MANUAL_VERIFY_REQUIRED
+- MECHANICAL_RESULTS:
+  - Tests: PASS | FAIL (command and exit code)
+  - TBD/TODO grep: CLEAN | <count> matches
+  - Secrets grep: CLEAN | <count> matches
+  - Smoke boot: PASS | FAIL | MANUAL_REQUIRED
+- INTEGRATION:
+  - Cross-task contracts: <status>
+  - Shared state consistency: <status>
+  - Boundary audit: <status>
+- COVERAGE:
+  - Requirements mapped: <X/Y sections covered>
+  - Coverage gaps: <list of uncovered requirement sections>
+- DESIGN:
+  - Architecture drift: <findings>
+  - Dependency direction: <violations if any>
+  - File Structure Plan vs actual: <match/mismatch>
+- OWNERSHIP: LOCAL | UPSTREAM | UNCLEAR
+- UPSTREAM_SPEC: <feature-name | N/A>
+- BLOCKED_TASKS: <list and impact assessment>
+- REMEDIATION: <if NO-GO: specific, actionable steps to fix each issue>
+```
+
+If NO-GO, REMEDIATION is mandatory — identify the exact issue and what needs to change. Vague feedback is not acceptable.
+
+## Important Constraints
+- **Strict Final Gate**: Return `GO` only when all integration checks passed; return `NO-GO` for concrete failures and `MANUAL_VERIFY_REQUIRED` when mandatory validation could not be completed
+- **Boundary integrity over convenience**: Do not return `GO` if the feature only works by smearing responsibilities across boundaries, even when tests pass
+
+## Safety & Fallback
+
+### Error Scenarios
+- **No Implementation Found**: If no `[x]` tasks found, report "No implementations detected"
+- **Test Command Unknown**: Return `MANUAL_VERIFY_REQUIRED` and explain which validation command is missing; do not return `GO`
+- **Missing Spec Files**: Stop with error if spec.json/requirements.md/design.md missing
+
+### Next Steps Guidance
+
+**If GO Decision**:
+- Feature validated end-to-end and ready for deployment or next feature
+
+**If NO-GO Decision**:
+- Address issues listed in REMEDIATION
+- Re-run `/kiro-impl {feature} [tasks]` for targeted fixes
+- Re-validate with `/kiro-validate-impl {feature}`
+
+**If MANUAL_VERIFY_REQUIRED**:
+- Do not treat the feature as complete
+- Provide the exact missing validation step or environment prerequisite

+ 131 - 0
.claude/skills/kiro-verify-completion/SKILL.md

@@ -0,0 +1,131 @@
+---
+name: kiro-verify-completion
+description: Verify completion and success claims with fresh evidence. Use before claiming a task is complete, a fix works, tests pass, or a feature is ready for GO.
+allowed-tools: Read, Bash, Grep, Glob
+argument-hint: <claim-type> <claim>
+---
+
+# kiro-verify-completion
+
+## Overview
+
+This skill prevents false completion claims. A task, fix, or feature is only complete when supported by fresh evidence that matches the scope of the claim.
+
+## When to Use
+
+- Before saying a task is complete
+- Before saying a bug is fixed
+- Before saying tests pass
+- Before moving to the next task in autonomous execution
+- Before reporting `GO` from feature-level validation
+- Before trusting another subagent's success report
+
+Do not use this skill for early planning or speculative status updates.
+
+## Inputs
+
+Provide:
+- The exact claim to verify
+- Claim type:
+  - `TASK`
+  - `FIX`
+  - `TEST_OR_BUILD`
+  - `FEATURE_GO`
+- Validation commands discovered by the controller
+- Fresh command output and exit codes
+- Relevant task IDs, requirement IDs, and design refs where applicable
+- For feature-level claims:
+  - requirements coverage status
+  - design alignment status
+  - integration status
+  - blocked task status
+
+## Outputs
+
+Return one of:
+- `VERIFIED`
+- `NOT_VERIFIED`
+- `MANUAL_VERIFY_REQUIRED`
+
+Also return:
+- Claim reviewed
+- Evidence used
+- Scope/evidence mismatch, if any
+
+Use the language specified in `spec.json`.
+
+## Gate Function
+
+1. Identify the exact claim.
+2. Identify the exact command or checklist that proves that claim.
+3. Require fresh evidence from the current code state.
+4. Check exit code, failure count, skipped scope, and missing coverage.
+5. Reject claims that are broader than the evidence.
+6. If mandatory validation cannot be completed, return `MANUAL_VERIFY_REQUIRED`.
+7. Only then allow the claim.
+
+## Claim-Specific Rules
+
+### TASK
+Require:
+- task-local verification evidence
+- no unresolved blocking findings from review
+- evidence aligned with the task boundary
+
+### FIX
+Require:
+- evidence that the original symptom is resolved
+- no broader regressions in the relevant verification scope
+
+### TEST_OR_BUILD
+Require:
+- actual command output
+- exit code
+- no inference from unrelated checks
+
+### FEATURE_GO
+Require:
+- full test suite result
+- runtime smoke boot result showing the built artifact reaches its first usable state
+- requirements coverage assessment
+- cross-task integration assessment
+- design end-to-end alignment assessment
+- blocked tasks assessment
+
+A passing test suite alone is not enough for `FEATURE_GO`.
+
+## Stop / Escalate
+
+Return `MANUAL_VERIFY_REQUIRED` when:
+- No canonical validation command is known
+- The required environment is unavailable
+- A mandatory manual verification step cannot be executed
+
+Return `NOT_VERIFIED` when:
+- The command failed
+- Evidence is stale
+- Evidence is partial
+- The claim exceeds the evidence
+- The feature still has unresolved blocked tasks or uncovered requirements
+
+## Common Rationalizations
+
+| Rationalization | Reality |
+|---|---|
+| “The subagent said it succeeded” | Reported success is not verification evidence. |
+| “Tests passed earlier” | Fresh evidence only. |
+| “Build should be fine because lint passed” | Lint does not prove build success. |
+| “Tests passed and build succeeded, so it must run” | Type erasure, module loading, native ABI, and boot-time config issues can still fail at runtime. |
+| “The feature is done because all tasks are checked off” | `FEATURE_GO` also requires coverage, integration, and design alignment. |
+
+## Output Format
+
+```md
+## Verification Result
+- STATUS: VERIFIED | NOT_VERIFIED | MANUAL_VERIFY_REQUIRED
+- CLAIM_TYPE: TASK | FIX | TEST_OR_BUILD | FEATURE_GO
+- CLAIM: <exact claim>
+- EVIDENCE: <command/checklist and result>
+- GAPS: <scope/evidence mismatch or missing validation>
+- NOTES: <next action if not verified>
+```

+ 3 - 3
.devcontainer/app/postCreateCommand.sh

@@ -14,6 +14,9 @@ sudo chmod 700 /tmp/page-bulk-export
 # Install uv
 curl -LsSf https://astral.sh/uv/install.sh | sh
 
+# Install Claude Code
+curl -fsSL https://claude.ai/install.sh | bash
+
 # Setup pnpm
 SHELL=bash pnpm setup
 eval "$(cat /home/vscode/.bashrc)"
@@ -22,9 +25,6 @@ pnpm config set store-dir /workspace/.pnpm-store
 # Install turbo
 pnpm install turbo --global
 
-# Install Claude Code
-pnpm install @anthropic-ai/claude-code --global
-
 # Install dependencies
 turbo run bootstrap
 

+ 3 - 3
.github/workflows/reusable-app-prod.yml

@@ -236,7 +236,7 @@ jobs:
         HOME: /root # ref: https://github.com/microsoft/playwright/issues/6500
         GROWI_WEBSERVER_COMMAND: 'cd /tmp/growi-prod/apps/app && pnpm run server'
         MONGO_URI: mongodb://mongodb:27017/growi-playwright-installer
-        ELASTICSEARCH_URI: http://localhost:${{ job.services.elasticsearch.ports['9200'] }}/growi
+        ELASTICSEARCH_URI: http://elasticsearch:9200/growi
 
     - name: Copy dotenv file for automatic installation
       run: |
@@ -251,7 +251,7 @@ jobs:
         HOME: /root # ref: https://github.com/microsoft/playwright/issues/6500
         GROWI_WEBSERVER_COMMAND: 'cd /tmp/growi-prod/apps/app && pnpm run server'
         MONGO_URI: mongodb://mongodb:27017/growi-playwright
-        ELASTICSEARCH_URI: http://localhost:${{ job.services.elasticsearch.ports['9200'] }}/growi
+        ELASTICSEARCH_URI: http://elasticsearch:9200/growi
 
     - name: Copy dotenv file for automatic installation with allowing guest mode
       run: |
@@ -266,7 +266,7 @@ jobs:
         HOME: /root # ref: https://github.com/microsoft/playwright/issues/6500
         GROWI_WEBSERVER_COMMAND: 'cd /tmp/growi-prod/apps/app && pnpm run server'
         MONGO_URI: mongodb://mongodb:27017/growi-playwright-guest-mode
-        ELASTICSEARCH_URI: http://localhost:${{ job.services.elasticsearch.ports['9200'] }}/growi
+        ELASTICSEARCH_URI: http://elasticsearch:9200/growi
 
     - name: Generate shard ID
       id: shard-id

+ 56 - 1
.kiro/settings/templates/specs/design.md

@@ -9,7 +9,7 @@
 - Match detail level to feature complexity
 - Use diagrams and tables over lengthy prose
 
-**Warning**: Approaching 1000 lines indicates excessive feature complexity that may require design simplification.
+**Warning**: Approaching 1000 lines indicates excessive feature complexity that may require design simplification or splitting into multiple specs.
 ---
 
 > Sections may be reordered (e.g., surfacing Requirements Traceability earlier or moving Data Models nearer Architecture) when it improves clarity. Within each section, keep the flow **Summary → Scope → Decisions → Impacts/Risks** so reviewers can scan consistently.
@@ -31,10 +31,38 @@
 - Future considerations outside current scope
 - Integration points deferred
 
+## Boundary Commitments
+
+State the responsibility boundary of this spec in concrete terms. Treat this as the anchor for architecture, tasks, and later validation.
+
+### This Spec Owns
+- Capabilities and behaviors this spec is responsible for
+- Data it owns or is authoritative for
+- Interfaces or contracts it defines or stabilizes
+
+### Out of Boundary
+- Related concerns this spec explicitly does NOT own
+- Work deferred to another spec, existing subsystem, or later phase
+- Changes this spec must not absorb as "just one more thing"
+
+### Allowed Dependencies
+- Upstream systems/specs/components this design may depend on
+- Shared infrastructure this design may use
+- Dependency constraints that must not be violated
+
+### Revalidation Triggers
+List the kinds of changes that should force dependent specs or consumers to re-check integration.
+
+- Contract shape changes
+- Data ownership changes
+- Dependency direction changes
+- Startup/runtime prerequisite changes
+
 ## Architecture
 
 > Reference detailed discovery notes in `research.md` only for background; keep design.md self-contained for reviewers by capturing all decisions and contracts here.
 > Capture key decisions in text and let diagrams carry structural detail—avoid repeating the same information in prose.
+> Supporting sections below should remain as light as possible unless they materially clarify the responsibility boundary, dependency rules, or integration seams.
 
 ### Existing Architecture Analysis (if applicable)
 When modifying existing systems:
@@ -65,6 +93,33 @@ When modifying existing systems:
 
 > Keep rationale concise here and, when more depth is required (trade-offs, benchmarks), add a short summary plus pointer to the Supporting References section and `research.md` for raw investigation notes.
 
+## File Structure Plan
+
+Map the directory structure and file responsibilities for this feature. This section directly drives task `_Boundary:_` annotations and implementation Task Briefs. Use the appropriate level of detail:
+
+- **Small features**: List individual files with responsibilities
+- **Large features**: Describe directory-level structure + per-domain/module pattern, list only non-obvious files individually
+
+### Directory Structure
+```
+src/
+├── domain-a/              # Domain A responsibility
+│   ├── controller.ts      # Endpoint handlers
+│   ├── service.ts         # Business logic
+│   └── types.ts           # Domain types
+├── domain-b/              # Domain B (same pattern as domain-a)
+└── shared/
+    └── cross-cutting.ts   # Non-obvious: why this exists
+```
+
+> For repeated structures, describe the pattern once (e.g., "domain-b follows same pattern as domain-a"). List individual files only when their responsibility isn't obvious from the path.
+
+### Modified Files
+- `path/to/existing.ts` — What changes and why
+
+> Each file should have one clear responsibility. Group files that change together. For repeated structures, describe the pattern once rather than listing every file.
+> Avoid duplicating what Components and Interfaces already describes — focus on the physical file layout that Components maps to.
+
 ## System Flows
 
 Provide only the diagrams needed to explain non-trivial flows. Use pure Mermaid syntax. Common patterns:

+ 1 - 1
.kiro/settings/templates/specs/requirements-init.md

@@ -4,6 +4,6 @@
 {{PROJECT_DESCRIPTION}}
 
 ## Requirements
-<!-- Will be generated in /kiro:spec-requirements phase -->
+<!-- Will be generated in /kiro-spec-requirements phase -->
 
 

+ 6 - 0
.kiro/settings/templates/specs/requirements.md

@@ -3,6 +3,12 @@
 ## Introduction
 {{INTRODUCTION}}
 
+<!-- Optional when scope could be misread or the feature touches adjacent systems/specs -->
+## Boundary Context (Optional)
+- **In scope**: {{IN_SCOPE_BEHAVIORS}}
+- **Out of scope**: {{OUT_OF_SCOPE_BEHAVIORS}}
+- **Adjacent expectations**: {{ADJACENT_SYSTEM_OR_SPEC_EXPECTATIONS}}
+
 ## Requirements
 
 ### Requirement 1: {{REQUIREMENT_AREA_1}}

+ 3 - 0
.kiro/settings/templates/specs/tasks.md

@@ -14,7 +14,10 @@ Use whichever pattern fits the work breakdown:
 - [ ] {{MAJOR_NUMBER}}.{{SUB_NUMBER}} {{SUB_TASK_DESCRIPTION}}{{SUB_PARALLEL_MARK}}
   - {{DETAIL_ITEM_1}}
   - {{DETAIL_ITEM_2}}
+  - {{OBSERVABLE_COMPLETION_ITEM}} *(At least one detail item should state the observable completion condition for this task.)*
   - _Requirements: {{REQUIREMENT_IDS}}_ *(IDs only; do not add descriptions or parentheses.)*
+  - _Boundary: {{COMPONENT_NAMES}}_ *(Only for (P) tasks. Omit when scope is obvious.)*
+  - _Depends: {{TASK_IDS}}_ *(Only for non-obvious cross-boundary dependencies. Most tasks omit this.)*
 
 > **Parallel marker**: Append ` (P)` only to tasks that can be executed in parallel. Omit the marker when running in `--sequential` mode.
 >

+ 815 - 0
.kiro/specs/collaborative-editor-awareness/design.md

@@ -0,0 +1,815 @@
+# Design Document: collaborative-editor-awareness
+
+## Overview
+
+**Purpose**: This feature fixes intermittent disappearance of the `EditingUserList` component, upgrades in-editor cursors to display a user's name and avatar, adds off-screen cursor indicators with click-to-scroll navigation, and surfaces username tooltips in `EditingUserList`.
+
+**Users**: All GROWI users who use real-time collaborative page editing. They will see stable editing-user indicators, rich avatar-bearing cursor flags, off-screen indicators they can click to jump to a co-editor's position, and username tooltips on hover in the editing user list.
+
+**Impact**: Modifies `use-collaborative-editor-mode` in `@growi/editor`, replaces the default `yRemoteSelections` cursor plugin with `yRichCursors`, adds off-screen click-to-scroll via a mutable ref pattern, and enhances `EditingUserList` with color-matched borders, click-to-scroll, and username tooltips.
+
+### Goals
+
+- Eliminate `EditingUserList` disappearance caused by `undefined` entries from uninitialized awareness states
+- Remove incorrect direct mutation of Yjs-managed `awareness.getStates()` map
+- Render remote cursors with display name and profile image avatar
+- Read user data exclusively from `state.editors` (GROWI's canonical awareness field), eliminating the current `state.user` mismatch
+- Enable click-to-scroll on both `EditingUserList` avatars and off-screen cursor indicators
+- Display username tooltips on `EditingUserList` avatar hover without reintroducing the HOC Fragment layout issue
+
+### Non-Goals
+
+- Server-side awareness bridging (covered in `collaborative-editor` spec)
+- Upgrading `y-codemirror.next` or `yjs`
+- Cursor rendering for the local user's own cursor
+
+## Architecture
+
+### Existing Architecture Analysis
+
+The current flow has two defects:
+
+1. **`emitEditorList` in `use-collaborative-editor-mode`**: maps `awareness.getStates().values()` to `value.editors`, producing `undefined` for any client whose awareness state has not yet included an `editors` field. The `Array.isArray` guard is always true and does not filter. `EditingUserList` then receives a list containing `undefined`, leading to a React render error that wipes the component.
+
+2. **Cursor field mismatch**: `yCollab(activeText, provider.awareness, { undoManager })` adds `yRemoteSelections`, which reads `state.user.name` and `state.user.color`. GROWI sets `state.editors` (not `state.user`). The result is that all cursors render as "Anonymous" with a default blue color. This is also fixed by the new design.
+
+### Architecture Pattern & Boundary Map
+
+```mermaid
+graph TB
+    subgraph packages_editor
+        COLLAB[use-collaborative-editor-mode]
+        RICH[yRichCursors ViewPlugin]
+        YCOLLAB[yCollab - null awareness]
+        SCROLLREF[scrollCallbackRef - MutableRef]
+    end
+
+    subgraph y_codemirror_next
+        YSYNC[ySync - text sync]
+        YUNDO[yUndoManager - undo]
+    end
+
+    subgraph Yjs_Awareness
+        AWR[provider.awareness]
+    end
+
+    subgraph apps_app
+        CM[CodeMirrorEditorMain]
+        EUL[EditingUserList]
+        ATOM[editingClientsAtom - Jotai]
+        ATOM2[scrollToRemoteCursorAtom - Jotai]
+    end
+
+    CM --> COLLAB
+    COLLAB -->|null awareness| YCOLLAB
+    YCOLLAB --> YSYNC
+    YCOLLAB --> YUNDO
+    COLLAB -->|awareness + scrollCallbackRef| RICH
+    RICH -->|reads state.editors| AWR
+    RICH -->|sets state.cursor| AWR
+    RICH -->|viewport comparison| RICH
+    RICH -->|indicator click| SCROLLREF
+    COLLAB -->|filtered clientList| ATOM
+    ATOM --> EUL
+    COLLAB -->|scrollFn written to ref| SCROLLREF
+    COLLAB -->|onScrollToRemoteCursorReady| ATOM2
+    ATOM2 -->|onUserClick| EUL
+```
+
+**Key architectural properties**:
+- `yCollab` is called with `null` awareness to suppress the built-in `yRemoteSelections` plugin; text-sync (`ySync`) and undo (`yUndoManager`) are not affected
+- `yRichCursors` is added as a separate extension alongside `yCollab`'s output; it owns all awareness-cursor interaction, including in-viewport widget rendering and off-screen indicators
+- `state.editors` remains the single source of truth for user identity data
+- `state.cursor` (anchor/head relative positions) continues to be used for cursor position broadcasting, consistent with `y-codemirror.next` convention
+- Off-screen indicators are managed within the same `yRichCursors` ViewPlugin — it compares each remote cursor's absolute position against `view.visibleRanges` (the actually visible content range, excluding CodeMirror's pre-render buffer) to decide between widget decoration (in-view) and DOM overlay (off-screen)
+- **`scrollCallbackRef`** is a `{ current: ((clientId: number) => void) | null }` mutable object created once alongside the `yRichCursors` extension. Because the scroll function is created in a separate `useEffect` from the extension instantiation, passing it as a plain value would require recreating the extension on every update. The mutable ref allows `yRichCursors` to hold a stable reference to the container while the hook silently updates `.current` when the scroll function is registered or cleared.
+
+**Dual-path scroll delivery — why both `scrollCallbackRef` and `onScrollToRemoteCursorReady` coexist**:
+
+The scroll-to-remote-cursor function has two independent consumers that live in fundamentally different runtime contexts:
+
+| Consumer | Context | Why this delivery mechanism |
+|----------|---------|----------------------------|
+| Off-screen indicator (DOM click) | CodeMirror `ViewPlugin` — vanilla JS, not a React component | Cannot call React hooks (`useAtomValue`) to read a Jotai atom. Needs a plain mutable ref whose `.current` is read at click time. |
+| `EditingUserList` avatar click | React component in `apps/app` | Needs a React-compatible state update (Jotai atom) so that `EditorNavbar` re-renders when the scroll function becomes available. A mutable ref change does not trigger re-render. |
+
+Consolidating into a single mechanism is not feasible:
+- **Ref-only**: React components that read `useRef` do not re-render when `.current` changes; `EditorNavbar` would receive `null` on initial render and never update.
+- **Atom-only**: `yRichCursors` is a CodeMirror `ViewPlugin` class (not a React component) and cannot call `useAtomValue`. Importing the atom directly from `apps/app` into `packages/editor` would violate the monorepo dependency direction (lower package must not depend on higher).
+- **Event-emitter**: Considered as an alternative to the callback prop chain. A typed event emitter (e.g., `mitt`) would replace the two callback props (`onEditorsUpdated`, `onScrollToRemoteCursorReady`) with a single event bus prop. However, with only two events, the abstraction cost outweighs the benefit: event emitters introduce implicit coupling (string-keyed subscriptions are harder to trace and not caught by the compiler if one side is renamed), require manual subscribe/unsubscribe lifecycle management (risk of stale handler leaks), and add an external dependency — all for marginal reduction in prop drilling (2 → 1).
+
+The `onScrollToRemoteCursorReady` callback follows the same pattern as the existing `onEditorsUpdated` callback, which also bridges `packages/editor` → `apps/app` across the package boundary via props.
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Editor extensions | `y-codemirror.next@0.3.5` | `yCollab` for text-sync and undo; `yRemoteSelectionsTheme` for base caret CSS | No version change; `yRemoteSelections` no longer used |
+| Cursor rendering | CodeMirror `ViewPlugin` + `WidgetType` (`@codemirror/view`) | DOM-based cursor widget with avatar `<img>` | No new dependency |
+| Awareness | `y-websocket` `awareness` object | State read (`getStates`) and write (`setLocalStateField`) | `Awareness` type derived via `WebsocketProvider['awareness']` — `y-protocols` is not a direct dependency |
+
+## System Flows
+
+### Click-to-Scroll Flow — EditingUserList Avatar (Requirements 6.1–6.5)
+
+```mermaid
+sequenceDiagram
+    participant EUL as EditingUserList
+    participant ATOM2 as scrollToRemoteCursorAtom
+    participant HOOK as use-collaborative-editor-mode
+    participant AW as provider.awareness
+    participant CM as CodeMirror EditorView
+
+    EUL->>ATOM2: onUserClick(clientId)
+    ATOM2->>HOOK: scrollFn(clientId)
+    HOOK->>AW: getStates().get(clientId)
+    AW-->>HOOK: AwarenessState { cursor.head }
+    Note over HOOK: cursor.head == null → return (no-op, req 6.3)
+    HOOK->>HOOK: createAbsolutePositionFromRelativePosition(head, activeDoc)
+    HOOK->>CM: view.dispatch(EditorView.scrollIntoView(pos.index, { y: 'center' }))
+```
+
+**Key design decisions**:
+- `scrollFn` closes over `codeMirrorEditor` (accessed lazily via `codeMirrorEditor?.view` at call time) so late-mounted editors are handled correctly.
+- `activeDoc` (Y.Doc) is captured in the same effect that creates `scrollFn`; the function is invalidated and recreated whenever `activeDoc` or `provider` changes.
+- If `cursor.head` is absent (user connected but not focused), the click is silently ignored per requirement 6.3.
+
+### Click-to-Scroll Flow — Off-Screen Indicator (Requirements 6.6–6.7)
+
+```mermaid
+sequenceDiagram
+    participant IND as Off-Screen Indicator DOM
+    participant REF as scrollCallbackRef
+    participant HOOK as use-collaborative-editor-mode
+    participant AW as provider.awareness
+    participant CM as CodeMirror EditorView
+
+    Note over REF: Ref created alongside yRichCursors extension
+    HOOK->>REF: scrollCallbackRef.current = scrollFn (on setup)
+    IND->>REF: click handler calls scrollCallbackRef.current(clientId)
+    REF->>HOOK: scrollFn(clientId)
+    HOOK->>AW: getStates().get(clientId)
+    AW-->>HOOK: AwarenessState { cursor.head }
+    Note over HOOK: cursor.head == null → return (no-op, req 6.3)
+    HOOK->>HOOK: createAbsolutePositionFromRelativePosition(head, activeDoc)
+    HOOK->>CM: view.dispatch(EditorView.scrollIntoView(pos.index, { y: 'center' }))
+```
+
+**Key design decisions for off-screen click**:
+- `scrollCallbackRef` is a plain object `{ current: Fn | null }` created with `useRef` in `use-collaborative-editor-mode` and passed to `yRichCursors(awareness, { onClickIndicator: scrollCallbackRef })`. This is the standard React mutable-ref pattern but without the React import constraint (the `packages/editor` package uses it as a plain typed object).
+- The extension is created once; the ref's `.current` value is updated silently by the hook's scroll-function `useEffect`. This avoids recreating CodeMirror extensions on every provider change.
+- `createOffScreenIndicator` receives `clientId` and `onClick` callback, attaching a `click` event listener that calls `onClick(clientId)`. The indicator element has `cursor: pointer` via the theme CSS or inline style.
+
+### Awareness Update → EditingUserList
+
+```mermaid
+sequenceDiagram
+    participant AW as provider.awareness
+    participant HOOK as use-collaborative-editor-mode
+    participant ATOM as editingClientsAtom
+    participant EUL as EditingUserList
+
+    AW->>HOOK: awareness.on('update', handler)
+    HOOK->>HOOK: filter: state.editors != null
+    HOOK->>ATOM: onEditorsUpdated(filteredList)
+    ATOM->>EUL: re-render with valid EditingClient[]
+```
+
+The filter (`value.editors != null`) ensures `EditingUserList` never receives `undefined` entries. The `.delete()` call on `getStates()` is removed; Yjs clears stale entries before emitting `update`.
+
+### Cursor Render Cycle
+
+```mermaid
+sequenceDiagram
+    participant CM as CodeMirror EditorView
+    participant RC as yRichCursors Plugin
+    participant AW as provider.awareness
+
+    CM->>RC: update(ViewUpdate)
+    RC->>AW: setLocalStateField('cursor', {anchor, head})
+    Note over AW,RC: awareness fires 'change' — but changeListener<br/>ignores events where only the local client changed
+    AW-->>RC: awareness.on('change') for REMOTE client
+    RC->>CM: dispatch with yRichCursorsAnnotation
+    CM->>RC: update(ViewUpdate) — triggered by annotation
+    RC->>RC: rebuild decorations from state.editors + state.cursor
+```
+
+**Annotation-driven update strategy**: The awareness `change` listener does not call `view.dispatch()` unconditionally — doing so would crash with "Calls to EditorView.update are not allowed while an update is in progress" because `setLocalStateField` in the `update()` method itself triggers an awareness `change` event synchronously. Instead, the listener filters by `clientID`: it dispatches (with a `yRichCursorsAnnotation`) only when at least one **remote** client's state has changed. Local-only awareness changes (from the cursor broadcast in the same `update()` cycle) are silently ignored, and the decoration set is rebuilt in the next `update()` call naturally.
+
+## Requirements Traceability
+
+| Requirement | Summary | Components | Key Interfaces |
+|-------------|---------|------------|----------------|
+| 1.1 | Filter undefined awareness entries | `use-collaborative-editor-mode` | `emitEditorList` filter |
+| 1.2 | Remove `getStates().delete()` mutation | `use-collaborative-editor-mode` | `updateAwarenessHandler` |
+| 1.3 | EditingUserList remains stable | `use-collaborative-editor-mode` → `editingClientsAtom` | `onEditorsUpdated` callback |
+| 1.4 | Skip entries without `editors` field | `use-collaborative-editor-mode` | `emitEditorList` filter |
+| 2.1 | Broadcast user presence via awareness | `use-collaborative-editor-mode` | `awareness.setLocalStateField('editors', ...)` |
+| 2.2–2.3 | Socket.IO awareness events (server) | Out of scope — `collaborative-editor` spec | — |
+| 2.4 | Display active editors | `EditingUserList` (unchanged) | — |
+| 3.1 | Avatar overlay below caret (no block space) | `yRichCursors` | `RichCaretWidget.toDOM()` — `position: absolute` overlay |
+| 3.2 | Avatar size (`AVATAR_SIZE` in `theme.ts`) | `yRichCursors` | `RichCaretWidget.toDOM()` — CSS sizing via shared token |
+| 3.3 | Name label visible on hover only | `yRichCursors` | CSS `:hover` on `.cm-yRichCursorFlag` |
+| 3.4 | Avatar image with initials fallback | `yRichCursors` | `RichCaretWidget.toDOM()` — `<img>` onerror → initials |
+| 3.5 | Cursor caret color, fallback background, and avatar border from `state.editors.color` | `yRichCursors` | `RichCaretWidget` constructor + `borderColor` inline style |
+| 3.6 | Custom cursor via replacement plugin | `yRichCursors` replaces `yRemoteSelections` | `yCollab(activeText, null, { undoManager })` |
+| 3.7 | Cursor updates on awareness change | `yRichCursors` awareness change listener | `awareness.on('change', ...)` |
+| 3.8 | Default semi-transparent avatar | `yRichCursors` | CSS `opacity` on `.cm-yRichCursorFlag` |
+| 3.9 | Full opacity on hover | `yRichCursors` | CSS `:hover` rule |
+| 3.10 | Full opacity during active editing (3s) | `yRichCursors` | `lastActivityMap` + `.cm-yRichCursorActive` class + `setTimeout` |
+| 4.1 | Off-screen indicator at top edge with `arrow_drop_up` above avatar | `yRichCursors` | `topContainer` + Material Symbol icon |
+| 4.2 | Off-screen indicator at bottom edge with `arrow_drop_down` below avatar | `yRichCursors` | `bottomContainer` + Material Symbol icon |
+| 4.3 | No indicator when cursor is in viewport | `yRichCursors` | multi-mode classification in `update()` (rangedMode / coords mode) |
+| 4.4 | Same avatar/color as in-editor widget | `yRichCursors` | shared `state.editors` data |
+| 4.5 | Indicators positioned at cursor's column | `yRichCursors` | `requestMeasure` → `coordsAtPos` → `left: Xpx; transform: translateX(-50%)` |
+| 4.6 | Transition on scroll (indicator ↔ widget) | `yRichCursors` | classification re-run on every `update()` |
+| 4.7 | Overlay positioning (no layout impact) | `yRichCursors` | `position: absolute` on `view.dom` |
+| 4.8 | Indicator X position derived from cursor column | `yRichCursors` | `view.coordsAtPos` (measure phase) or char-width fallback |
+| 4.9 | Arrow always fully opaque in cursor color; avatar fades when idle | `yRichCursors` | `opacity: 1` on `.cm-offScreenArrow`; `opacity: IDLE_OPACITY` on avatar/initials |
+| 5.1 | Avatar border color = `editingClient.color` (replaces fixed `border-info`) | `EditingUserList` | Wrapper `<span>` with `style={{ border: '2px solid {color}', borderRadius: '50%' }}` |
+| 5.2 | Border weight equivalent to existing border | `EditingUserList` | 2 px solid, same as Bootstrap `border` baseline |
+| 5.3 | Color-matched border in overflow popover | `EditingUserList` | Replace `UserPictureList` with inline rendering sharing the same wrapper pattern |
+| 6.1 | Click avatar → editor scrolls to that user's cursor | `EditingUserList` + `use-collaborative-editor-mode` | `onUserClick(clientId)` → `scrollFn` → `view.dispatch(scrollIntoView)` |
+| 6.2 | Scroll centers cursor vertically | `use-collaborative-editor-mode` | `EditorView.scrollIntoView(pos, { y: 'center' })` |
+| 6.3 | No-op when cursor absent from awareness | `use-collaborative-editor-mode` | Guard: `cursor?.head == null → return` |
+| 6.4 | `cursor: pointer` on each avatar | `EditingUserList` | CSS `cursor: pointer` on the clickable wrapper element |
+| 6.5 | Overflow popover avatars also support click-to-scroll | `EditingUserList` | Inline rendering in popover body shares same `onUserClick` prop |
+| 6.6 | Click off-screen indicator → scroll to remote cursor | `yRichCursors` + `use-collaborative-editor-mode` | `scrollCallbackRef.current(clientId)` → same `scrollFn` path as 6.1–6.3 |
+| 6.7 | `cursor: pointer` on each off-screen indicator | `yRichCursors` | `cursor: pointer` via theme or inline style in `createOffScreenIndicator` |
+| 7.1 | Tooltip shows display name on avatar hover in EditingUserList | `UserPicture` (refactored) | Built-in tooltip renders `@username` + display name via portal child |
+| 7.2 | Tooltip on both direct and overflow popover avatars | `EditingUserList` | `noTooltip` removed from `UserPicture`; tooltip renders automatically for all avatars |
+| 7.3 | Tooltip coexists with color-matched border and click-to-scroll | `UserPicture` (refactored) | Tooltip is a portal child of the root `<span>`; no Fragment siblings to disturb flex layout |
+| 7.4 | Tooltip mechanism does not use `UserPicture` HOC | `UserPicture` (refactored) | `withTooltip` HOC eliminated; tooltip inlined in `UserPicture` render function |
+| 7.5 | Tooltip appears with hover-intent delay; disappears on pointer leave | `UserPicture` (refactored) | `UncontrolledTooltip` with `delay={0}` and `fade={false}` (existing behavior preserved) |
+
+## Components and Interfaces
+
+| Component | Domain/Layer | Intent | Req Coverage | Key Dependencies (P0) | Contracts |
+|-----------|--------------|--------|--------------|----------------------|-----------|
+| `use-collaborative-editor-mode` | packages/editor — Hook | Fix awareness filter bug; compose extensions with rich cursor; expose scroll-to-remote-cursor callback; own `scrollCallbackRef` lifecycle | 1.1–1.4, 2.1, 2.4, 6.1–6.3, 6.6 | `yCollab` (P0), `yRichCursors` (P0) | State |
+| `yRichCursors` | packages/editor — Extension | Custom ViewPlugin: broadcasts local cursor position, renders in-viewport cursors with overlay avatar+hover name+activity opacity, renders clickable off-screen indicators at editor edges | 3.1–3.10, 4.1–4.9, 6.6, 6.7 | `@codemirror/view` (P0), `y-websocket awareness` (P0) | Service |
+| `CodeMirrorEditorMain` | packages/editor — Component | Bridge: passes `onScrollToRemoteCursorReady` prop from apps/app into `useCollaborativeEditorMode` | 6.1 | `useCollaborativeEditorMode` (P0) | State |
+| `scrollToRemoteCursorAtom` | apps/app — Jotai atom | Stores the scroll callback registered by `useCollaborativeEditorMode`; read by EditorNavbar | 6.1 | `jotai` (P0) | State |
+| `UserPicture` | packages/ui — Component | Refactored: eliminates `withTooltip` HOC; renders tooltip as portal child of root `<span>` instead of Fragment sibling | 7.1, 7.3–7.5 | `UncontrolledTooltip` (P1) | View |
+| `EditingUserList` | apps/app — Component | Renders active editor avatars with color-matched borders, click-to-scroll; tooltips via native `UserPicture` (no `noTooltip`) | 5.1–5.3, 6.1, 6.4–6.5, 7.2 | `EditingClient[]` (P0) | View |
+
+### packages/editor — Hook
+
+#### `use-collaborative-editor-mode` (modified)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Orchestrates WebSocket provider, awareness, and CodeMirror extension lifecycle for collaborative editing |
+| Requirements | 1.1, 1.2, 1.3, 1.4, 2.1, 2.4, 6.1–6.3, 6.6 |
+
+**Responsibilities & Constraints**
+- Filters `undefined` awareness entries before calling `onEditorsUpdated`
+- Does not mutate `awareness.getStates()` directly
+- Composes `yCollab(null)` + `yRichCursors(awareness, { onClickIndicator: scrollCallbackRef })` to achieve text-sync, undo, rich cursor rendering, and off-screen indicator click handling
+- Creates and registers a `scrollFn` callback (requirement 6) that resolves a remote user's cursor position and dispatches a CodeMirror scroll effect
+- Owns the `scrollCallbackRef` lifecycle: writes `scrollFn` to `scrollCallbackRef.current` when the scroll function is ready; writes `null` on cleanup
+
+**Dependencies**
+- Outbound: `yCollab` from `y-codemirror.next` — text-sync and undo (P0)
+- Outbound: `yRichCursors` — rich cursor rendering (P0)
+- Outbound: `provider.awareness` — read states, set local state (P0)
+- Outbound: `EditorView.scrollIntoView` — scroll dispatch (P0)
+
+**Contracts**: State [x]
+
+##### State Management
+
+- **Bug fix — `emitEditorList`**:
+  ```
+  Before: Array.from(getStates().values(), v => v.editors)   // contains undefined
+  After:  Array.from(getStates().values())
+            .map(v => v.editors)
+            .filter((v): v is EditingClient => v != null)
+  ```
+- **Bug fix — `updateAwarenessHandler`**: Remove `awareness.getStates().delete(clientId)` for all `update.removed` entries; Yjs removes them before emitting the event.
+- **Extension composition change**:
+  ```
+  Before: yCollab(activeText, provider.awareness, { undoManager })
+  After:  [
+            yCollab(activeText, null, { undoManager }),
+            yRichCursors(provider.awareness),
+          ]
+  ```
+  Note: `yCollab` already includes `yUndoManagerKeymap` in its return array, so it must NOT be added separately to avoid keymap duplication. Verify during implementation by inspecting the return value of `yCollab`.
+
+**Implementation Notes**
+- Integration: `yCollab` with `null` awareness suppresses `yRemoteSelections` and `yRemoteSelectionsTheme`. Text-sync (`ySync`) and undo (`yUndoManager`) are not affected by the null awareness value.
+- Risks: If `y-codemirror.next` is upgraded, re-verify that passing `null` awareness still suppresses only the cursor plugins.
+
+##### Configuration Type Extension (Requirements 6, 6.6)
+
+```typescript
+type Configuration = {
+  user?: IUserHasId;
+  pageId?: string;
+  reviewMode?: boolean;
+  onEditorsUpdated?: (clientList: EditingClient[]) => void;
+  // called with the scroll function when provider+ydoc are ready; null on cleanup
+  onScrollToRemoteCursorReady?: (fn: ((clientId: number) => void) | null) => void;
+};
+```
+
+**`scrollCallbackRef` pattern** (new for req 6.6):
+
+```typescript
+// Defined inside useCollaborativeEditorMode, created once per hook mount
+const scrollCallbackRef: { current: ((clientId: number) => void) | null } = useRef(null);
+
+// Extension creation effect (depends on provider, activeDoc, codeMirrorEditor)
+// scrollCallbackRef is captured by reference — stable across provider changes
+yRichCursors(provider.awareness, { onClickIndicator: scrollCallbackRef })
+
+// Scroll function registration effect (same dependencies)
+scrollCallbackRef.current = scrollFn;   // updated silently — no extension recreation
+onScrollToRemoteCursorReady?.(scrollFn);
+
+// Cleanup
+scrollCallbackRef.current = null;
+onScrollToRemoteCursorReady?.(null);
+```
+
+The `scrollFn` is shared by both paths (avatar click via atom, indicator click via ref). Its logic:
+
+```
+scrollFn(clientId: number):
+  1. view = codeMirrorEditor?.view          → undefined → return (editor not mounted)
+  2. rawState = awareness.getStates().get(clientId) as AwarenessState | undefined
+  3. cursor?.head == null                   → return (req 6.3: no-op)
+  4. absPos = Y.createAbsolutePositionFromRelativePosition(cursor.head, activeDoc)
+  5. absPos == null                         → return
+  6. view.dispatch({ effects: EditorView.scrollIntoView(absPos.index, { y: 'center' }) })
+```
+
+---
+
+### packages/editor — Extension
+
+#### `yRichCursors` (new)
+
+| Field | Detail |
+|-------|--------|
+| Intent | CodeMirror ViewPlugin — broadcasts local cursor position, renders in-viewport cursors with overlay avatar and hover-revealed name, renders clickable off-screen indicators pinned to editor edges for cursors outside the viewport |
+| Requirements | 3.1–3.10, 4.1–4.9, 6.6, 6.7 |
+
+**Responsibilities & Constraints**
+- On each `ViewUpdate`: derives local cursor anchor/head → converts to Yjs relative positions → calls `awareness.setLocalStateField('cursor', { anchor, head })` (matches `state.cursor` convention from `y-codemirror.next`)
+- On awareness `change` event: rebuilds decoration set reading `state.editors` (color, name, imageUrlCached) and `state.cursor` (anchor, head) for each remote client
+- Does NOT render a cursor for the local client (`clientid === awareness.doc.clientID`)
+- Selection highlight (background color from `state.editors.colorLight`) is rendered alongside the caret widget
+
+**Dependencies**
+- External: `@codemirror/view` `ViewPlugin`, `WidgetType`, `Decoration`, `EditorView` (P0)
+- External: `@codemirror/state` `RangeSet`, `Annotation` (P0) — `Annotation.define<number[]>()` used for `yRichCursorsAnnotation`
+- External: `yjs` `createRelativePositionFromTypeIndex`, `createAbsolutePositionFromRelativePosition` (P0)
+- External: `y-codemirror.next` `ySyncFacet` (to access `ytext` for position conversion) (P0)
+- External: `y-websocket` — `Awareness` type derived via `WebsocketProvider['awareness']` (not `y-protocols/awareness`, which is not a direct dependency) (P0)
+- Inbound: `provider.awareness` passed as parameter (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+/** Mutable ref container for the scroll-to-remote-cursor function. */
+type ScrollCallbackRef = { current: ((clientId: number) => void) | null };
+
+/** Options for the yRichCursors extension. */
+type YRichCursorsOptions = {
+  /**
+   * Mutable ref holding the scroll-to-remote-cursor callback.
+   * When set, off-screen indicator clicks invoke ref.current(clientId).
+   * Null or unset means clicks are no-ops.
+   */
+  onClickIndicator?: ScrollCallbackRef;
+};
+
+/**
+ * Creates a CodeMirror Extension that renders remote user cursors with
+ * name labels and avatar images, reading user data from state.editors.
+ * Also broadcasts the local user's cursor position via state.cursor.
+ * Renders clickable off-screen indicators for cursors outside the viewport.
+ */
+export function yRichCursors(awareness: Awareness, options?: YRichCursorsOptions): Extension;
+```
+
+Preconditions:
+- `awareness` is an active `y-websocket` Awareness instance
+- `ySyncFacet` is installed by a preceding `yCollab` call so that `ytext` can be resolved for position conversion
+- If `options.onClickIndicator` is provided, `onClickIndicator.current` must be set before any indicator click occurs (typically set synchronously by `use-collaborative-editor-mode` in the scroll-function registration effect)
+
+Postconditions:
+- Remote cursors within the visible viewport are rendered as `cm-yRichCaret` widget decorations at each remote client's head position
+- Remote cursors outside the visible viewport are rendered as off-screen indicator overlays pinned to the top or bottom edge of `view.dom`; each indicator responds to click events by invoking `options.onClickIndicator?.current(clientId)` 
+- Local cursor position is broadcast to awareness as `state.cursor.{ anchor, head }` on each focus-selection change
+
+Invariants:
+- Local client's own cursor is never rendered
+- Cursor decorations are rebuilt when awareness `change` fires for **remote** clients (dispatched via `yRichCursorsAnnotation`); local-only changes are ignored to prevent recursive `dispatch` during an in-progress update
+- `state.cursor` field is written exclusively by `yRichCursors`; no other plugin or code path may call `awareness.setLocalStateField('cursor', ...)` to avoid data races
+- Off-screen indicator click is a no-op when `options.onClickIndicator` is undefined or `.current` is null
+
+##### Widget DOM Structure
+
+```
+<span class="cm-yRichCaret" style="border-color: {color}">
+  ⁠ <!-- Word Joiner (\u2060): inherits line font-size so caret height follows headers -->
+  <span class="cm-yRichCursorFlag [cm-yRichCursorActive]">
+    <img class="cm-yRichCursorAvatar" style="border-color: {color}" />
+      OR  <span class="cm-yRichCursorInitials" style="background-color: {color}; border-color: {color}" />
+    <span class="cm-yRichCursorInfo" style="background-color: {color}">{name}</span>
+  </span>
+</span>
+```
+
+**CSS strategy**: Applied via `EditorView.baseTheme` in `theme.ts`, exported alongside the ViewPlugin.
+
+Key design decisions:
+- **Caret**: Both-side 1px borders with negative margins (zero layout width). Modeled after `yRemoteSelectionsTheme` in `y-codemirror.next`.
+- **Overlay flag**: `position: absolute; top: 100%` below the caret. Always hoverable (no `pointer-events: none`), so the avatar is a direct hover target.
+- **Name label**: Positioned at `left: 0; z-index: -1` (behind the avatar). Left border-radius matches the avatar circle, creating a tab shape that flows from the avatar. Left padding clears the avatar width. Shown on `.cm-yRichCursorFlag:hover`.
+- **Opacity**: `cm-yRichCursorFlag` carries `opacity: IDLE_OPACITY` and transitions to `opacity: 1` on hover or `.cm-yRichCursorActive` (3-second activity window).
+- **Avatar border**: `1.5px solid` border in the cursor's `color` with `box-sizing: border-box` so the 20×20 outer size is preserved. Applied via inline `style.borderColor` in `toDOM()` / `createInitialsElement()`.
+- **Design tokens**: `AVATAR_SIZE = '20px'` and `IDLE_OPACITY = '0.6'` are defined at the top of `theme.ts` and shared across all cursor/off-screen styles.
+
+**Design decision — CSS-only, no React**: The overlay, sizing, and hover behavior are achievable with `position: absolute` and `:hover`. `document.createElement` in `toDOM()` avoids React's async rendering overhead and context isolation.
+
+**Activity tracking** (JavaScript, within `YRichCursorsPluginValue`):
+- `lastActivityMap: Map<number, number>` — `clientId` → timestamp of last awareness change
+- `activeTimers: Map<number, ReturnType<typeof setTimeout>>` — per-client 3-second inactivity timers
+- On awareness `change` for remote clients: update timestamp, reset timer. Timer expiry dispatches with `yRichCursorsAnnotation` to trigger decoration rebuild.
+- `isActive` is passed to both `RichCaretWidget` and off-screen indicators. `eq()` includes `isActive` so state transitions trigger widget re-creation (at most twice per user per 3-second cycle).
+
+`RichCaretWidget` (extends `WidgetType`):
+- Constructor: `RichCaretWidgetOptions` object (`color`, `name`, `imageUrlCached`, `isActive`)
+- `toDOM()`: creates the DOM tree above; `onerror` on `<img>` replaces with initials fallback
+- `eq(other)`: true when all option fields match
+- `estimatedHeight`: `-1` (inline widget), `ignoreEvent()`: `true`
+
+Selection highlight: `Decoration.mark` on selected range with `background-color: {colorLight}`.
+
+##### Off-Screen Cursor Indicators
+
+When a remote cursor's absolute position falls outside the actually visible viewport, the ViewPlugin renders an off-screen indicator instead of a widget decoration.
+
+**Viewport classification — multi-mode strategy**: Because `view.visibleRanges` and `view.viewport` are equal in GROWI's page-scroll editor setup (the editor expands to full content height; the browser page handles scrolling), a single character-position comparison is insufficient. The plugin uses three modes, chosen once per `update()` call:
+
+| Mode | Condition | Method |
+|------|-----------|--------|
+| **rangedMode** | `visibleRanges` is a non-empty, non-trivial sub-range of `viewport` (internal-scroll editor, or jsdom tests with styled heights) | Compare `headIndex` against `visibleRanges[0].from` / `visibleRanges[last].to` |
+| **coords mode** | `visibleRanges == viewport` AND `scrollDOM.getBoundingClientRect().height > 0` (GROWI's page-scroll production setup) | `lineBlockAt(headIndex)` + `scrollDOMRect.top` vs `window.innerHeight` |
+| **degenerate** | `scrollRect.height == 0` (jsdom with 0-height container) | No off-screen classification; every cursor gets a widget decoration |
+
+`view.lineBlockAt()` reads stored height-map data (safe to call in `update()`). `scrollDOM.getBoundingClientRect()` is a raw DOM call, not restricted by CodeMirror's "Reading the editor layout isn't allowed during an update" guard.
+
+**DOM management**: The ViewPlugin creates two persistent container elements (`topContainer`, `bottomContainer`) and appends them to `view.dom` in the `constructor`. They are removed in `destroy()`. The containers are always present in the DOM but empty when no off-screen cursors exist in that direction.
+
+```
+view.dom (position: relative — already set by CodeMirror)
+├── .cm-scroller (managed by CM)
+│   └── .cm-content ...
+├── .cm-offScreenTop    ← topContainer (absolute, top: 0, height: AVATAR_SIZE + 14px)
+│   ├── .cm-offScreenIndicator  style="left: {colX}px; transform: translateX(-50%)"
+│   │   ├── .cm-offScreenArrow (material-symbols-outlined) — "arrow_drop_up"
+│   │   └── .cm-offScreenAvatar / .cm-offScreenInitials
+│   └── .cm-offScreenIndicator  (another user, different column)
+└── .cm-offScreenBottom ← bottomContainer (absolute, bottom: 0, height: AVATAR_SIZE + 14px)
+    └── .cm-offScreenIndicator  style="left: {colX}px; transform: translateX(-50%)"
+        ├── .cm-offScreenAvatar / .cm-offScreenInitials
+        └── .cm-offScreenArrow (material-symbols-outlined) — "arrow_drop_down"
+```
+
+**`OffScreenIndicatorOptions` type extension** (req 6.6, 6.7):
+
+```typescript
+export type OffScreenIndicatorOptions = {
+  direction: 'above' | 'below';
+  clientId: number;                          // NEW: identifies user for click handler
+  color: string;
+  name: string;
+  imageUrlCached: string | undefined;
+  isActive: boolean;
+  onClick?: (clientId: number) => void;      // NEW: invoked on indicator click
+};
+```
+
+`createOffScreenIndicator` attaches a `click` event listener on the root `<span>` element that calls `onClick(clientId)`. The indicator root element also receives `style.cursor = 'pointer'` when `onClick` is provided, satisfying req 6.7.
+
+**Indicator DOM structure** (built by `createOffScreenIndicator()` in `off-screen-indicator.ts`):
+- **above**: `[arrow_drop_up icon][avatar or initials]` stacked vertically (flex-column)
+- **below**: `[avatar or initials][arrow_drop_down icon]` stacked vertically (flex-column)
+- Arrow element: `<span class="material-symbols-outlined cm-offScreenArrow" style="color: {color}">arrow_drop_up</span>` — font loaded via `var(--grw-font-family-material-symbols-outlined)` (Next.js-registered Material Symbols Outlined)
+- Avatar: same `borderColor`, `AVATAR_SIZE`, and onerror→initials fallback as in-editor widget
+- Opacity: arrow always `opacity: 1`; avatar/initials use `IDLE_OPACITY` → `1` via `.cm-yRichCursorActive` on the indicator
+
+**Horizontal positioning** (deferred to measure phase):
+After `replaceChildren()`, the plugin calls `view.requestMeasure()`:
+- **read phase**: for each indicator, call `view.coordsAtPos(headIndex, 1)` to get screen X. If null (virtualized position), fall back to `contentDOM.getBoundingClientRect().left + col * view.defaultCharacterWidth`.
+- **write phase**: set `indicator.style.left = Xpx` and `indicator.style.transform = 'translateX(-50%)'` to center the indicator on the cursor column.
+
+**Update cycle**:
+1. Classify all remote cursors (mode-dependent: rangedMode/coords/degenerate)
+2. Build `aboveIndicators: {el, headIndex}[]` and `belowIndicators: {el, headIndex}[]`
+3. `topContainer.replaceChildren(...aboveIndicators.map(i => i.el))`; same for bottom
+4. If any indicators exist, call `view.requestMeasure()` to set horizontal positions
+5. Cursors that lack `state.cursor` or `state.editors` are excluded from both in-view and off-screen rendering
+
+**Implementation Notes**
+- Integration: file location `packages/editor/src/client/services-internal/extensions/y-rich-cursors/` (directory — split into `index.ts`, `plugin.ts`, `widget.ts`, `off-screen-indicator.ts`, `theme.ts`); exported from `packages/editor/src/client/services-internal/extensions/index.ts` and consumed directly in `use-collaborative-editor-mode.ts`
+- Validation: `imageUrlCached` is optional; if undefined or empty, the `<img>` element is skipped and only initials are shown
+- Risks: `ySyncFacet` must be present in the editor state when the plugin initializes; guaranteed since `yCollab` (which installs `ySyncFacet`) is added before `yRichCursors` in the extension array
+
+---
+
+### apps/app — Jotai Atom
+
+#### `scrollToRemoteCursorAtom` (new)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Stores the scroll-to-remote-cursor callback registered by `useCollaborativeEditorMode`; consumed by `EditorNavbar` → `EditingUserList` |
+| Requirements | 6.1 |
+
+**File**: `apps/app/src/states/ui/editor/scroll-to-remote-cursor.ts`
+
+```typescript
+const scrollToRemoteCursorAtom = atom<((clientId: number) => void) | null>(null);
+
+/** Read the scroll callback (null when collaboration is not active) */
+export const useScrollToRemoteCursor = (): ((clientId: number) => void) | null =>
+  useAtomValue(scrollToRemoteCursorAtom);
+
+/** Register or clear the scroll callback.
+ *  Wraps the raw setAtom call to prevent Jotai from treating a function
+ *  value as an updater.  Jotai's `setAtom(fn)` signature interprets `fn`
+ *  as `(prev) => next`; passing `setAtom(() => fn)` forces it to store
+ *  the function value itself instead of invoking it. */
+export const useSetScrollToRemoteCursor = (): ((
+  fn: ((clientId: number) => void) | null,
+) => void) => {
+  const setAtom = useSetAtom(scrollToRemoteCursorAtom);
+  return useCallback(
+    (fn: ((clientId: number) => void) | null) => {
+      setAtom(() => fn);
+    },
+    [setAtom],
+  );
+};
+```
+
+**Lifecycle**: set when `useCollaborativeEditorMode`'s extension effect runs, cleared on effect cleanup.
+
+---
+
+### packages/ui — Component
+
+#### `UserPicture` (refactored)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Eliminate the `withTooltip` HOC that returns a React Fragment, replacing it with inline tooltip rendering as a portal child of the root `<span>` |
+| Requirements | 7.1, 7.3–7.5 |
+
+**Problem**: The current `withTooltip` HOC wraps `UserPictureRootWithoutLink`/`UserPictureRootWithLink` and returns a Fragment (`<> <span ref={ref}><img/></span> <UncontrolledTooltip target={ref}/> </>`). When the HOC-wrapped `UserPicture` is placed inside a flex container (e.g., the `<button>` in `EditingUserList`), the Fragment's two React children can cause unpredictable flex layout behavior.
+
+**Refactoring approach**: Eliminate the `withTooltip` HOC entirely. Render the tooltip inline within `UserPicture`'s render function as a child of the root `<span>`:
+
+```typescript
+export const UserPicture = memo((userProps: Props): JSX.Element => {
+  const { user, size, noLink, noTooltip, className: additionalClassName } = userProps;
+  // ... existing field extraction (username, displayName, src, className) ...
+
+  const showTooltip = !noTooltip && hasName(user);
+  const rootRef = useRef<HTMLSpanElement>(null);
+
+  const tooltipClassName = `${moduleTooltipClass} user-picture-tooltip-${size ?? 'md'}`;
+
+  const children = (
+    <>
+      {imgElement}
+      {showTooltip && (
+        <UncontrolledTooltip
+          placement="bottom"
+          target={rootRef}
+          popperClassName={tooltipClassName}
+          delay={0}
+          fade={false}
+        >
+          {username ? <>{`@${username}`}<br /></> : null}
+          {displayName}
+        </UncontrolledTooltip>
+      )}
+    </>
+  );
+
+  if (username == null || noLink) {
+    return (
+      <UserPictureRootWithoutLink ref={rootRef} displayName={displayName} size={size}>
+        {children}
+      </UserPictureRootWithoutLink>
+    );
+  }
+
+  return (
+    <UserPictureRootWithLink ref={rootRef} displayName={displayName} size={size} username={username}>
+      {children}
+    </UserPictureRootWithLink>
+  );
+});
+```
+
+**Why this works**: `UncontrolledTooltip` (reactstrap) uses `ReactDOM.createPortal` to render tooltip markup into `document.body`. When placed as a child of the root `<span>`, it occupies no space in the parent's DOM — only the `<img>` is a visible child. The root element is always a single `<span>`, regardless of whether the tooltip is shown. This eliminates the Fragment-induced flex layout issue.
+
+**Key changes**:
+- `withTooltip` HOC function: **deleted**
+- `useRef` for tooltip targeting: moved from HOC into `UserPicture` render body (always called unconditionally — satisfies React hooks rules)
+- `rootRef` is passed to `UserPictureRootWithoutLink`/`UserPictureRootWithLink` via `forwardRef` (they already support it)
+- Tooltip content, `popperClassName`, `delay`, `fade` values: preserved from the existing HOC
+- **`next/dynamic()` import: preserved** — the module-level `const UncontrolledTooltip = dynamic(() => import('reactstrap')..., { ssr: false })` is unchanged. This maintains the code-split boundary: reactstrap is loaded as a separate chunk only when the tooltip is actually rendered. Consumers passing `noTooltip` never trigger the chunk load.
+- `UserPicture.module.scss`: **unchanged** (tooltip margin classes still referenced via `popperClassName`)
+- `noTooltip` prop: **preserved** for consumers that intentionally suppress tooltips (e.g., sidebar dropdowns, inline notifications)
+
+**Impact on existing consumers**: The public API (`Props`) is unchanged. The only observable difference is that the returned React element is always a single-root `<span>` (no Fragment), which is layout-safe in all container types (flex, grid, inline). Existing `noTooltip` usages continue to work.
+
+---
+
+### apps/app — Component
+
+#### `EditingUserList` (modified)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Displays active editor avatars with color-matched borders, click-to-scroll, and username tooltips on hover |
+| Requirements | 5.1–5.3, 6.1, 6.4–6.5, 7.1–7.5 |
+
+**Props change**:
+
+```typescript
+type Props = {
+  clientList: EditingClient[];
+  onUserClick?: (clientId: number) => void;  // NEW: scroll-to-cursor callback
+};
+```
+
+**Color-matched border (req 5.1–5.3)**:
+
+`UserPicture` does not accept a `style` prop (the prop is applied to the `<img>` tag, not the root element). A wrapper `<span>` with an inline border style is used instead:
+
+```
+<span
+  style={{ border: `2px solid ${editingClient.color}`, borderRadius: '50%', display: 'inline-block' }}
+>
+  <UserPicture user={editingClient} noLink />
+</span>
+```
+
+The `border border-info` className is removed from `UserPicture`.
+
+**Click-to-scroll (req 6.1, 6.4)**:
+
+Each avatar wrapper is made interactive:
+
+```
+<button
+  type="button"
+  style={{ cursor: 'pointer', background: 'none', border: 'none', padding: 0 }}
+  onClick={() => onUserClick?.(editingClient.clientId)}
+>
+  <span style={{ border: `2px solid ${editingClient.color}`, borderRadius: '50%', display: 'inline-block' }}>
+    <UserPicture user={editingClient} noLink />
+  </span>
+</button>
+```
+
+**Username tooltip (req 7.1–7.5)**:
+
+Tooltips are provided by `UserPicture` natively, after the HOC refactoring (see `UserPicture` component section below). The `noTooltip` prop is **removed** from all `UserPicture` usages in `EditingUserList`. Because the refactored `UserPicture` renders the tooltip as a portal child of its root `<span>` (not a Fragment sibling), the tooltip coexists cleanly with the flex `<button>` wrapper.
+
+```
+// AvatarWrapper renders (simplified — no external tooltip needed):
+<button
+  type="button"
+  data-testid={`avatar-wrapper-${client.clientId}`}
+  className={`${avatarWrapperClass} d-inline-flex ...`}
+  style={{ border: `2px solid ${client.color}` }}
+  onClick={() => onUserClick?.(client.clientId)}
+>
+  <UserPicture user={client} noLink />
+</button>
+```
+
+**Key decisions**:
+- The same `AvatarWrapper` sub-component is reused for both the first-4 direct avatars and the overflow popover avatars, so the tooltip applies uniformly (req 7.2).
+- No `id` attribute generation or external `UncontrolledTooltip` needed — the tooltip is fully encapsulated within `UserPicture`.
+
+**Overflow popover (req 5.3, 6.5, 7.2)**:
+
+`UserPictureList` (a generic legacy class component that does not accept `onUserClick` or color props) is replaced by inline rendering within `EditingUserList`, using the same `AvatarWrapper` sub-component for `remainingUsers`. This gives overflow avatars the same color border, click-to-scroll, and tooltip behavior.
+
+**`EditorNavbar` wiring**:
+
+```typescript
+// EditorNavbar.tsx
+const EditingUsers = (): JSX.Element => {
+  const editingClients = useEditingClients();
+  const scrollToRemoteCursor = useScrollToRemoteCursor();
+  return (
+    <EditingUserList
+      clientList={editingClients}
+      onUserClick={scrollToRemoteCursor ?? undefined}
+    />
+  );
+};
+```
+
+**`PageEditor` wiring**:
+
+```typescript
+// PageEditor.tsx — existing hook setup
+const setScrollToRemoteCursor = useSetScrollToRemoteCursor();
+// ...
+<CodeMirrorEditorMain
+  onScrollToRemoteCursorReady={setScrollToRemoteCursor}
+  // ...existing props
+/>
+```
+
+## Data Models
+
+### Domain Model
+
+No new persistent data. The awareness state already carries all required fields via the `EditingClient` interface in `state.editors`.
+
+```typescript
+// Existing — no changes
+type EditingClient = Pick<IUser, 'name'> &
+  Partial<Pick<IUser, 'username' | 'imageUrlCached'>> & {
+    clientId: number;
+    userId?: string;
+    color: string;       // cursor caret and flag background color
+    colorLight: string;  // selection range highlight color
+  };
+```
+
+The `state.cursor` awareness field follows the existing `y-codemirror.next` convention:
+```typescript
+type CursorState = {
+  anchor: RelativePosition; // Y.RelativePosition JSON
+  head: RelativePosition;
+};
+```
+
+## Error Handling
+
+| Error Type | Scenario | Response |
+|------------|----------|----------|
+| Missing `editors` field | Client connects but has not set awareness yet | Filtered out in `emitEditorList`; not rendered in `EditingUserList` |
+| Avatar image load failure | `imageUrlCached` URL returns 4xx/5xx | `<img>` `onerror` replaces element with initials `<span>` (colored circle with user initials) |
+| `state.cursor` absent | Remote client connected but editor not focused | Cursor widget not rendered for that client (no `cursor.anchor` → skip) |
+| `ySyncFacet` not installed | `yRichCursors` initialized before `yCollab` | Position conversion returns `null`; cursor is skipped for that update cycle. Extension array order in `use-collaborative-editor-mode` guarantees correct sequencing. |
+| Off-screen container detached | `view.dom` removed from DOM before `destroy()` | `destroy()` calls `remove()` on both containers; if already detached, `remove()` is a no-op |
+| Viewport not yet initialized | First `update()` before CM calculates viewport | `view.viewport` always has valid `from`/`to` from initialization; safe to compare |
+| Click-to-scroll: view not mounted | `scrollFn` called before CodeMirror mounts | `codeMirrorEditor?.view == null` guard returns early; no crash |
+| Click-to-scroll: cursor absent | Clicked user has no `cursor.head` in awareness | Guard `cursor?.head == null → return`; no-op per req 6.3 |
+| Click-to-scroll: position unresolvable | `createAbsolutePositionFromRelativePosition` returns `null` (stale document state) | Guard `absPos == null → return`; no crash |
+
+## Testing Strategy
+
+Test files are co-located with source in `y-rich-cursors/`:
+- **Unit**: `widget.spec.ts` (DOM structure, eq, fallback), `off-screen-indicator.spec.ts` (indicator DOM, direction, fallback)
+- **Integration**: `plugin.integ.ts` (awareness filter, cursor broadcast, viewport classification, activity timers)
+- **E2E** (Playwright, deferred): hover behavior, off-screen scroll transitions, pointer-events pass-through
+
+### Additional Tests for Requirements 5, 6, 6.6–6.7, and 7
+
+- **Unit — `UserPicture.spec.tsx`** (new or extended in `packages/ui`):
+  - Without `noTooltip`: renders a single root `<span>` (no Fragment) containing `<img>` and portal tooltip (req 7.4)
+  - With `noTooltip`: renders a single root `<span>` containing only `<img>` (existing behavior preserved)
+  - Tooltip content includes `@username` and display name when both available (req 7.1)
+  - Root element is flex-layout-safe (single child, not Fragment) (req 7.3)
+
+- **Unit — `EditingUserList.spec.tsx`** (new or extended):
+  - Renders a colored border wrapper matching `editingClient.color` (req 5.1)
+  - Does not render `border-info` class (req 5.1)
+  - Calls `onUserClick(clientId)` when avatar is clicked (req 6.1, 6.4)
+  - Overflow popover avatars also call `onUserClick` (req 6.5)
+  - `UserPicture` rendered without `noTooltip` (tooltip delegated to `UserPicture`) (req 7.2)
+
+- **Integration — `use-collaborative-editor-mode` scroll test** (extended):
+  - `onScrollToRemoteCursorReady` is called with a function when provider is set up
+  - `scrollFn(clientId)` dispatches `scrollIntoView` to the view when cursor is available (req 6.1–6.2)
+  - `scrollFn(clientId)` is a no-op when cursor is absent (req 6.3)
+  - `scrollCallbackRef.current` is set when the scroll function is registered; cleared on cleanup (req 6.6)
+
+- **Unit — `off-screen-indicator.spec.ts`** (extended):
+  - Indicator root element has `cursor: pointer` when `onClick` is provided (req 6.7)
+  - Clicking the indicator element invokes `onClick(clientId)` (req 6.6)
+  - No click handler or `cursor: pointer` when `onClick` is not provided (boundary test)
+
+- **Integration — `plugin.integ.ts`** (extended):
+  - When `onClickIndicator` ref is set and an off-screen indicator is clicked, `ref.current` is invoked with the correct `clientId` (req 6.6)
+  - When `onClickIndicator.current` is null, clicking an off-screen indicator does not throw (req 6.6, no-op guard)

+ 107 - 0
.kiro/specs/collaborative-editor-awareness/requirements.md

@@ -0,0 +1,107 @@
+# Requirements Document
+
+## Project Description (Input)
+collaborative-editor-awareness
+
+## Introduction
+
+GROWI's collaborative editor uses Yjs awareness protocol to track which users are currently editing a page and where their cursors are positioned. This awareness information is surfaced in two places: the `EditingUserList` component in the editor navbar (showing active user avatars), and the in-editor cursor decorations rendered by `y-codemirror.next`.
+
+**Scope**: Client-side awareness state management, `EditingUserList` display stability (bug fix), and rich cursor rendering (username + avatar) in the CodeMirror editor.
+
+**Out of Scope**: Server-side awareness bridging to Socket.IO (covered in `collaborative-editor` spec), WebSocket transport, MongoDB persistence, or authentication.
+
+**Inherited from**: `collaborative-editor` — Requirement 5 (Awareness and Presence Tracking). That spec now delegates awareness display behavior to this specification.
+
+## Requirements
+
+### Requirement 1: Awareness State Stability
+
+**Objective:** As a wiki user viewing the collaborative editor, I want the editing user list to remain visible and accurate at all times while other users are connected, so that I can reliably see who is co-editing with me.
+
+#### Acceptance Criteria
+
+1. The Collaborative Editor Client shall filter out any awareness state entries that do not contain a valid `editors` field before passing the client list to `EditingUserList`, so that `undefined` values never appear in the rendered list.
+2. The Collaborative Editor Client shall not manually mutate `awareness.getStates()` (e.g., call `.delete()` on removed client IDs), as the Yjs awareness system already removes stale entries before firing the `update` event.
+3. While a user is connected and at least one other user is in the same editing session, the EditingUserList shall remain visible and not disappear due to transient undefined values or internal map mutations.
+4. If an awareness state entry is received without an `editors` field (e.g., from a client that has not yet broadcast its presence), the Collaborative Editor Client shall silently skip that entry rather than propagating an undefined value.
+
+### Requirement 2: Awareness Presence Tracking (Inherited)
+
+**Objective:** As a wiki user, I want to see which other users are currently editing the same page, so that I can coordinate edits and avoid conflicts.
+
+#### Acceptance Criteria
+
+1. While a user is editing a page, the Collaborative Editor Client shall broadcast the user's presence information (name, username, avatar URL, cursor color) via the Yjs awareness protocol using the `editors` field on the local awareness state.
+2. When a user connects or disconnects from a collaborative editing session, the Yjs Service shall emit awareness state size updates to the page's Socket.IO room (`page:{pageId}`) via `YjsAwarenessStateSizeUpdated`.
+3. When the last user disconnects from a document, the Yjs Service shall emit a draft status notification (`YjsHasYdocsNewerThanLatestRevisionUpdated`) to the page's Socket.IO room.
+4. The Collaborative Editor Client shall display the list of active editors based on awareness state updates received from the Yjs WebSocket provider.
+
+### Requirement 3: Rich Cursor Display (Overlay Avatar)
+
+**Objective:** As a wiki user editing collaboratively, I want to see other users' cursors with their profile image as an overlay, so that I can easily identify who is editing where in the document without the cursor widget disrupting the text layout.
+
+#### Acceptance Criteria
+
+1. While multiple users are editing the same page, the Collaborative Editor Client shall render each remote user's cursor with a profile image (avatar) positioned directly below the caret line, as an overlay that does not consume block space in the editor content flow.
+2. The avatar overlay size shall be 20×20 CSS pixels (circular), smaller than `EditingUserList` to minimize interference with editor content.
+3. While hovering over the avatar overlay, the Collaborative Editor Client shall display the user's display name in a tooltip-like label adjacent to the avatar. When not hovered, the name label shall be hidden.
+4. When `imageUrlCached` is available in the remote user's awareness state, the avatar shall display that image. If `imageUrlCached` is unavailable or fails to load, the avatar shall fall back to the user's initials rendered in a colored circle.
+5. The cursor caret color, avatar fallback background color, and avatar border color shall all match the `color` value from the user's awareness state, consistent with the color shown in `EditingUserList`. A colored circular border (matching `color`) shall be applied to both avatar images and initials circles to visually associate the avatar with the cursor.
+6. The Collaborative Editor Client shall suppress the default cursor plugin by passing `null` as the awareness argument to `yCollab` (from `y-codemirror.next`), and use the separate `yRichCursors` extension for cursor rendering.
+7. When a user's awareness state changes (e.g., cursor moves), the Collaborative Editor Client shall re-render that user's cursor with up-to-date information without re-mounting the entire cursor set.
+8. The avatar overlay shall be rendered at reduced opacity (semi-transparent) by default to minimize visual distraction.
+9. While the user hovers over the avatar overlay or cursor caret, the avatar shall be displayed at full opacity (1.0).
+10. When a remote user is actively editing (awareness cursor state has changed within the last 3 seconds), their avatar shall be displayed at full opacity (1.0). After 3 seconds of inactivity (no cursor/awareness change), the avatar shall return to the reduced opacity state.
+
+### Requirement 4: Off-Screen Cursor Indicators
+
+**Objective:** As a wiki user editing collaboratively, I want to know when other users are editing parts of the document that are not currently visible in my viewport, so that I am aware of all editing activity even outside my scroll position.
+
+#### Acceptance Criteria
+
+1. When a remote user's cursor is positioned above the current visible viewport, the Collaborative Editor Client shall display that user's avatar icon pinned to the top edge of the editor. A `arrow_drop_up` Material Symbol icon (in the cursor's color) shall be stacked above the avatar to indicate the user is editing above the visible area.
+2. When a remote user's cursor is positioned below the current visible viewport, the Collaborative Editor Client shall display that user's avatar icon pinned to the bottom edge of the editor. A `arrow_drop_down` Material Symbol icon (in the cursor's color) shall be stacked below the avatar to indicate the user is editing below the visible area.
+3. When a remote user's cursor is within the visible viewport, no off-screen indicator shall be shown for that user (the in-editor cursor widget from Requirement 3 is shown instead).
+4. The off-screen indicator shall use the same avatar image (or initials fallback) and color as the in-editor cursor widget, maintaining visual consistency.
+5. When multiple remote users are off-screen in the same direction (above or below), each indicator shall be independently positioned horizontally to reflect its remote cursor's column position in the document. Indicators at different columns appear at different horizontal positions within the container.
+6. When the user scrolls and a previously off-screen cursor enters the viewport, the off-screen indicator for that user shall be removed and the in-editor cursor widget shall appear instead. Conversely, when a previously visible cursor leaves the viewport due to scrolling, an off-screen indicator shall appear.
+7. The off-screen indicators shall be rendered as overlays (absolute positioning within the editor container) and shall not affect the editor's scroll height or content layout.
+8. The horizontal position of each off-screen indicator shall reflect the remote cursor's column position in the document. The `left` CSS value of the indicator shall be derived from the cursor's screen X coordinate (via `view.coordsAtPos` in the measure phase) or, for virtualized positions, approximated using character-width estimation. The indicator shall be centered on the cursor column (`transform: translateX(-50%)`).
+9. The direction arrow icon of the off-screen indicator shall always be rendered at full opacity (1.0) in the cursor's color, regardless of the idle/active state. Only the avatar image or initials element shall fade to reduced opacity (`IDLE_OPACITY`) when the user is idle, and return to full opacity when active.
+
+### Requirement 5: Color-Matched User Avatars in EditingUserList
+
+**Objective:** As a wiki user editing collaboratively, I want the avatar border in `EditingUserList` to use each user's cursor color, so that I can visually associate an avatar in the list with that user's cursor in the editor.
+
+#### Acceptance Criteria
+
+1. Each `UserPicture` in `EditingUserList` shall display a border whose color equals `editingClient.color` from the user's `EditingClient` data, replacing the current fixed `border-info` color.
+2. The border width and visual weight shall be equivalent to the existing border appearance (1–2 px solid ring).
+3. The same color-matched border shall be applied to avatars in the overflow popover (the remaining users shown via the `+N` button).
+
+### Requirement 6: Scroll to Remote Cursor on Avatar Click
+
+**Objective:** As a wiki user editing collaboratively, I want to click a user's avatar in `EditingUserList` to jump to that user's cursor position in the editor, so that I can quickly navigate to where they are editing.
+
+#### Acceptance Criteria
+
+1. Clicking an avatar in `EditingUserList` shall scroll the editor viewport so that the clicked user's remote cursor becomes visible.
+2. The editor shall scroll to center the cursor vertically (`y: 'center'`).
+3. If the clicked user has no active cursor position in the awareness state, the click shall have no effect (no error or crash).
+4. Each avatar shall display a `cursor: pointer` affordance to indicate it is clickable.
+5. Both the first-4 avatars displayed directly in the navbar and the avatars in the overflow popover shall support click-to-scroll.
+6. When an off-screen indicator (pinned to the top or bottom edge of the editor) is clicked, the Collaborative Editor Client shall scroll the editor viewport so that the corresponding remote user's cursor is centered vertically in the visible area, applying the same scroll behavior as Criteria 1–3 above.
+7. Each off-screen indicator shall display a `cursor: pointer` affordance to indicate it is clickable.
+
+### Requirement 7: Username Tooltip in EditingUserList
+
+**Objective:** As a wiki user editing collaboratively, I want to see a tooltip with the co-editor's username when hovering over their avatar in `EditingUserList`, so that I can identify each co-editor by name even when the avatar image is not recognizable.
+
+#### Acceptance Criteria
+
+1. While hovering over any avatar in `EditingUserList`, the Collaborative Editor Client shall display a tooltip containing the user's display name.
+2. The tooltip shall be applied to both the first-4 directly visible avatars and the avatars inside the overflow popover.
+3. The tooltip shall coexist with the color-matched border (Requirement 5) and the click-to-scroll behavior (Requirement 6) without visual or functional conflict.
+4. If the `UserPicture` component's `noTooltip` flag or a higher-order component (HoC) prevents native tooltip rendering, the tooltip mechanism shall be refactored — for example by embedding tooltip functionality directly in the component or by adopting a tooltip primitive (e.g., `UncontrolledTooltip`, `Tooltip` from `reactstrap`) — so that username display is not suppressed.
+5. The tooltip shall appear with standard UI delay (hover intent) and disappear when the pointer leaves the avatar area.

+ 218 - 0
.kiro/specs/collaborative-editor-awareness/research.md

@@ -0,0 +1,218 @@
+# Research & Design Decisions
+
+---
+**Purpose**: Capture discovery findings, architectural investigations, and rationale that inform the technical design.
+
+---
+
+## Summary
+
+- **Feature**: `collaborative-editor-awareness`
+- **Discovery Scope**: Extension (existing collaborative editor system); Phase 2 adds Requirements 5 & 6 (color-matched avatars + click-to-scroll)
+- **Key Findings** (original):
+  - `y-codemirror.next@0.3.5` reads `state.user` for cursor info, but GROWI sets `state.editors` — causing all cursors to render as "Anonymous" with default blue color today
+  - `yCollab` in v0.3.5 does NOT support a `cursorBuilder` option; the cursor DOM is hardcoded in `YRemoteCaretWidget`
+  - `awareness.getStates().delete(clientId)` in the current `updateAwarenessHandler` is an incorrect direct mutation of Yjs-managed internal state; Yjs removes stale entries before emitting `update`
+- **Key Findings** (Phase 2):
+  - `UserPicture` (`@growi/ui`) does not accept a `style` prop; dynamic border colors require a wrapper element approach
+  - `packages/editor` cannot import from `apps/app`; callback props (`onScrollToRemoteCursorReady`) are used to cross the package boundary
+  - `EditorView.scrollIntoView(pos, { y: 'center' })` (CodeMirror built-in) is sufficient for the scroll-to-cursor feature; no new dependencies required
+
+## Research Log
+
+### y-codemirror.next@0.3.5 Cursor API Analysis
+
+- **Context**: Requirement 3.5 proposed a `cursorBuilder` option for `yCollab`. Does the installed version support it?
+- **Sources Consulted**: Package source at `node_modules/.pnpm/y-codemirror.next@0.3.5_.../src/index.js` and `y-remote-selections.js`
+- **Findings**:
+  - `yCollab` signature: `(ytext, awareness, { undoManager }) => Extension[]`; no `cursorBuilder` parameter
+  - Cursor rendering is entirely inside `YRemoteCaretWidget.toDOM()` — hardcoded name-only label
+  - Public exports include `ySync`, `ySyncFacet`, `YSyncConfig`, `yRemoteSelections`, `yRemoteSelectionsTheme`, `yUndoManagerKeymap`. NOT exported: `yUndoManager`, `yUndoManagerFacet`, `YUndoManagerConfig`
+  - `y-remote-selections.js` reads `state.user.color` and `state.user.name`, but GROWI awareness sets `state.editors`
+- **Implications**: Requirement 3.5 cannot be fulfilled via `yCollab` option. Must replace `yRemoteSelections` with a custom ViewPlugin. Since `yUndoManager`/`yUndoManagerFacet`/`YUndoManagerConfig` are not in the public API, `yCollab` must still be used for undo; awareness must be suppressed at call site.
+
+### Awareness Field Mismatch (state.user vs state.editors)
+
+- **Context**: Why do cursors show "Anonymous" despite the provider being set up with user data?
+- **Findings**:
+  - GROWI sets: `awareness.setLocalStateField('editors', { name, color, imageUrlCached, ... })`
+  - `y-remote-selections.js` reads: `const { color, name } = state.user || {}`
+  - Result: `state.user` is always undefined → name = "Anonymous", color = default `#30bced`
+- **Implications**: Cursor name/color are currently broken. Fix requires either (a) also setting `state.user`, or (b) replacing the cursor plugin. Since we are building a rich cursor plugin anyway, the clean fix is (b).
+
+### EditingUserList Disappearance Bug Root Cause
+
+- **Context**: `EditingUserList` intermittently disappears when users are actively editing.
+- **Findings** (from `use-collaborative-editor-mode.ts` source):
+  1. `Array.from(awareness.getStates().values(), v => v.editors)` produces `undefined` for clients whose awareness state has not yet included an `editors` field
+  2. `Array.isArray(clientList)` is always `true` — the guard never filters undefined values
+  3. `EditingUserList` maps `editingClient.clientId` which throws/renders `undefined` element → React key error or render bail-out, causing the list to disappear
+  4. `awareness.getStates().delete(clientId)` for removed clients is redundant and incorrect: the Yjs awareness protocol removes stale entries from the `Map` before emitting the `update` event. This mutation may cause stale data re-entry or missed subsequent updates
+- **Implications**: Filter undefined entries and remove the `.delete()` call; no other changes to awareness-update logic required.
+
+### yCollab with null awareness
+
+- **Context**: Can we suppress `yRemoteSelections` without losing text-sync or undo functionality?
+- **Findings**:
+  - `ySync` (`YSyncPluginValue`) reads only `conf.ytext` — does not touch `conf.awareness`
+  - `yUndoManager` reads only `conf.undoManager` (via `yUndoManagerFacet`) and `conf.ytext` (via `ySyncFacet`) — does not touch awareness
+  - `yCollab` skips `yRemoteSelections` and `yRemoteSelectionsTheme` when `awareness` is falsy: `if (awareness) { plugins.push(yRemoteSelectionsTheme, yRemoteSelections) }`
+  - Calling `yCollab(activeText, null, { undoManager })` therefore produces only: `[ySyncFacet.of(ySyncConfig), ySync, yUndoManagerFacet.of(...), yUndoManager, EditorView.domEventHandlers]`
+- **Implications**: Safe to pass `null` as awareness to `yCollab` to suppress the default cursor plugin, then add `yRichCursors(provider.awareness)` separately.
+
+### Local Cursor Broadcasting Responsibility
+
+- **Context**: `yRemoteSelections` (`YRemoteSelectionsPluginValue.update()`) broadcasts the local cursor position via `awareness.setLocalStateField('cursor', { anchor, head })`. If we remove `yRemoteSelections`, who does this?
+- **Findings**:
+  - The broadcast is implemented entirely in `y-remote-selections.js` — not in `ySync`
+  - Our custom `yRichCursors` ViewPlugin must include equivalent broadcast logic: on each `view.update`, derive anchor/head from `update.state.selection.main`, convert to Yjs relative positions, and call `awareness.setLocalStateField('cursor', ...)`
+  - Cursor position uses the existing `state.cursor` field convention (unchanged)
+- **Implications**: `yRichCursors` is a full replacement for `yRemoteSelections`, not just an additive decoration layer.
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks / Limitations |
+|--------|-------------|-----------|---------------------|
+| A: Set `state.user` alongside `state.editors` | Keep existing `yRemoteSelections`; set both awareness fields | Minimal code change | No avatar support; maintains the two-field redundancy; cursor info is the name only |
+| B: Custom ViewPlugin (replace `yRemoteSelections`) | `yCollab(null)` + `yRichCursors(awareness)` | Full avatar+name rendering; single source of truth in `state.editors`; clean separation | Must re-implement cursor broadcast logic (~30 lines of `y-remote-selections.js`) |
+| C: Fork `y-codemirror.next` | Patch `YRemoteCaretWidget` to accept avatar | Full control | Maintenance burden; diverges from upstream; breaks on package upgrades |
+
+**Selected: Option B** — replaces `yRemoteSelections` entirely with a purpose-built `yRichCursors` ViewPlugin.
+
+## Design Decisions
+
+### Decision: yCollab with null awareness + custom yRichCursors
+
+- **Context**: `yCollab` has no `cursorBuilder` hook; `yUndoManager` is not publicly exported; default cursor reads wrong awareness field
+- **Alternatives Considered**:
+  1. Set `state.user` — minimal change but no avatar, still redundant field
+  2. Fork library — too brittle
+- **Selected Approach**: `yCollab(activeText, null, { undoManager })` to get text-sync and undo without default cursor, plus a custom `yRichCursors(awareness)` ViewPlugin for rich cursor rendering
+- **Rationale**: Reads directly from `state.editors` (GROWI's canonical field), supports avatar, eliminates `state.user` redundancy, requires ~60 lines of new code
+- **Trade-offs**: Must maintain the cursor-broadcast logic in `yRichCursors`; if `y-codemirror.next` updates its broadcast logic we won't get those changes automatically
+- **Follow-up**: When upgrading to `y-codemirror.next >= 1.x` or `y-websocket v3`, re-evaluate if a native `cursorBuilder` API becomes available
+
+### Decision: Avatar rendered as plain DOM `<img>` in WidgetType.toDOM()
+
+- **Context**: CodeMirror cursor widgets are DOM-based (not React); `UserPicture` is a React component and cannot be used directly
+- **Selected**: Construct DOM directly using `document.createElement` in `toDOM()`: `<img>` tag for avatar with `onerror` fallback to initials
+- **Rationale**: CodeMirror `WidgetType.toDOM()` returns an `HTMLElement`; React components cannot be server-rendered in this context
+- **Trade-offs**: Slightly duplicates `UserPicture` avatar rendering; acceptable as cursor widget is presentation-only
+
+## Risks & Mitigations
+
+- `yRichCursors` broadcasts cursor positions via `awareness.setLocalStateField('cursor', ...)` on every `update` call — same as the original `yRemoteSelections`. Throttle is not needed because Yjs awareness batches broadcasts internally.
+- Avatar `<img>` may fail to load (404, CORS) — mitigate with `onerror` handler that replaces the `<img>` with initials fallback span.
+- `awareness.getStates().delete()` removal: confirm Yjs v13 awareness `update` event fires after removing the client from the internal map (verified in Yjs source: removal happens before the event).
+- **Recursive dispatch crash** (discovered during implementation): `setLocalStateField('cursor', ...)` inside the `update()` method fires an awareness `change` event **synchronously**. If the `change` listener calls `view.dispatch()` unconditionally, CodeMirror throws "Calls to EditorView.update are not allowed while an update is in progress". Mitigated by filtering the `change` listener to dispatch only when at least one **remote** client is in the changed set (`clients.findIndex(id => id !== awareness.doc.clientID) >= 0`). This matches the same pattern used by `y-remote-selections.js` in `y-codemirror.next`.
+- **`y-protocols` not a direct dependency**: `y-protocols/awareness` exports the `Awareness` class, but neither `@growi/editor` nor `apps/app` list `y-protocols` as a direct dependency. `import type { Awareness } from 'y-protocols/awareness'` fails under strict pnpm resolution. Mitigated by deriving the type from the existing `y-websocket` dependency: `type Awareness = WebsocketProvider['awareness']`.
+- **`view.viewport` vs `view.visibleRanges`** (discovered during validation): CodeMirror's `view.viewport` returns the **rendered** content range, which includes a pre-render buffer beyond the visible area for smooth scrolling. Using it for off-screen classification causes cursors in the buffer zone to be treated as in-viewport, resulting in invisible widget decorations instead of off-screen indicators. Must use `view.visibleRanges` (the ranges actually visible to the user) for accurate classification. Precedent: `setDataLine.ts` in the same package already uses `view.visibleRanges`.
+
+## Implementation Discoveries
+
+### Multi-Mode Viewport Classification
+
+- **Context**: Off-screen cursor classification using `view.visibleRanges` worked in tests (jsdom with fixed-height containers) but failed in GROWI production.
+- **Finding**: In GROWI's page-scroll editor setup, CodeMirror's `view.visibleRanges` and `view.viewport` return the **same** range (the full document), because the editor expands to content height and scrolling is handled by the browser page — not CodeMirror's own scroller. Character-position comparison is therefore useless for off-screen detection.
+- **Solution**: Three-mode classification strategy in `plugin.ts`:
+  1. **rangedMode** (`visibleRanges < viewport`): internal-scroll editor (jsdom tests, fixed-height editors) — use character-position boundaries from `visibleRanges`
+  2. **coords mode** (`visibleRanges == viewport`, `scrollDOM.height > 0`): page-scroll editor (GROWI production) — use `view.lineBlockAt(pos)` + `scrollDOM.getBoundingClientRect()` to compute screen Y coordinates
+  3. **degenerate** (`scrollDOM.height == 0`): jsdom with 0-height container — skip classification, all cursors get widget decorations
+- **Constraint**: `view.coordsAtPos()` calls `readMeasured()` internally, which throws "Reading the editor layout isn't allowed during an update". Must use `view.lineBlockAt()` (reads stored height map, safe during update) + raw `getBoundingClientRect()` (not CodeMirror-restricted) instead.
+
+### Material Symbols Font Loading
+
+- **Context**: Off-screen indicator arrow (`arrow_drop_up`/`arrow_drop_down`) rendered as literal text instead of icon.
+- **Finding**: GROWI loads Material Symbols Outlined via Next.js `next/font` in `use-material-symbols-outlined.tsx`. Next.js registers the font with a **hashed family name** (e.g., `__MaterialSymbolsOutlined_xxxxx`), stored in the CSS variable `--grw-font-family-material-symbols-outlined`. Hardcoding `font-family: 'Material Symbols Outlined'` in CodeMirror's `baseTheme` causes a mismatch — the browser cannot find the font.
+- **Solution**: Use `fontFamily: 'var(--grw-font-family-material-symbols-outlined)'` in `theme.ts` so the hashed name is resolved at runtime.
+
+### Parent Container `overflow-y: hidden` Limitation
+
+- **Context**: Off-screen indicator arrow tip was clipped when positioned a few pixels beyond the editor border.
+- **Finding**: `.page-editor-editor-container` inherits `overflow-y: hidden` from `.flex-expand-vert` within the `.flex-expand-vh-100` context (`packages/core-styles/scss/helpers/_flex-expand.scss` + `apps/app/src/styles/scss/layout/_editor.scss`). This clips any content extending beyond `.cm-editor`'s border box. `.cm-editor` itself has no overflow restriction.
+- **Implication**: Off-screen indicators must stay within `.cm-editor`'s border box. Arrow icons use `clip-path` and negative margins to visually align with the border without extending past it.
+
+### Horizontal Positioning via `requestMeasure`
+
+- **Context**: Off-screen indicators should reflect the remote cursor's column position horizontally.
+- **Finding**: `view.coordsAtPos()` cannot be called during `update()` (throws "Reading the editor layout" error). Horizontal positioning must be deferred.
+- **Solution**: After `replaceChildren()`, call `view.requestMeasure()` to schedule a read phase (`coordsAtPos` → screen X) and write phase (`style.left` + `transform: translateX(-50%)`). For virtualized positions (outside viewport), fall back to `contentDOM.getBoundingClientRect().left + col * view.defaultCharacterWidth`.
+
+### Phase 2 — Color-Matched Avatars & Click-to-Scroll
+
+#### UserPicture Style API Analysis
+
+- **Context**: Requirement 5.1 requires setting the border color of `UserPicture` avatars dynamically per user.
+- **Findings**: `UserPicture.tsx` in `packages/ui/src/components/UserPicture.tsx` accepts only `{ user, size, noLink, noTooltip, className }`. The `className` is applied to the `<img>` element (not the root `<span>`). There is no `style` prop forwarded to either element.
+- **Implications**: Cannot set `borderColor` via `UserPicture`'s own props. Must wrap in a parent element with an inline `border` style. The `border border-info` className on `UserPicture` is removed; the wrapper element provides the colored border.
+
+#### Cross-Package Callback Pattern
+
+- **Context**: `use-collaborative-editor-mode` (in `packages/editor`) needs to provide a scroll function to `EditingUserList` (in `apps/app`). Direct import from `apps/app` → `packages/editor` is the existing direction; reverse import is prohibited.
+- **Findings**: The existing `onEditorsUpdated` callback in `Configuration` follows exactly this pattern: `packages/editor` calls a callback provided by `apps/app`. The same pattern is appropriate for `onScrollToRemoteCursorReady`.
+- **Implications**: No new dependency or architectural mechanism needed; extend `Configuration` type with the new callback.
+
+#### CodeMirror Scroll API
+
+- **Context**: How to programmatically scroll the editor to a specific character position.
+- **Findings**: `EditorView.scrollIntoView(pos: number, options?: { y?: 'nearest' | 'start' | 'end' | 'center' })` is the standard CodeMirror API. Dispatching `{ effects: EditorView.scrollIntoView(pos, { y: 'center' }) }` scrolls the editor so the position is vertically centered. No additional plugins or dependencies required.
+- **Implications**: Scroll is a one-liner dispatch; no new package dependencies. The position is resolved from `Y.createAbsolutePositionFromRelativePosition(cursor.head, ydoc)` which is already used in `plugin.ts`.
+
+#### Jotai Function Setter Pitfall
+
+- **Context**: `scrollToRemoteCursorAtom` stores a `(clientId: number) => void` function. `useSetAtom` returns a setter that is passed as the `onScrollToRemoteCursorReady` callback.
+- **Finding**: Jotai's atom setter interprets any **function argument** as an **updater function**: `setAtom(fn)` is treated as `setAtom(prev => fn(prev))`, not `setAtom(fn_as_value)`. When `onScrollToRemoteCursorReady(scrollFn)` was called, Jotai invoked `scrollFn(null)` (current atom value) as if it were an updater, then stored `scrollFn`'s return value (`undefined`) in the atom — the scroll function was never stored.
+- **Symptom**: `[scrollToRemoteCursor] called with clientId: null` appeared in logs immediately after "scroll function registered", and the atom value flipped to `undefined`.
+- **Solution**: Wrap the function value in `useSetScrollToRemoteCursor`:
+  ```typescript
+  setAtom(() => fn);  // updater that returns the function value
+  ```
+  This pattern must be applied to any Jotai atom that stores a function value.
+- **Implication**: When designing Jotai atoms that store callbacks or any function-typed value, the setter must always use the `() => value` wrapper form. Document this in code review checklists for Jotai usage.
+
+#### AvatarWrapper Styling — UserPicture Tooltip Fragment Issue
+
+- **Context**: Wrapping `UserPicture` in a `<button>` for click handling caused visual misalignment and layout instability.
+- **Finding**: When `noTooltip` is not set, `UserPicture` uses a `withTooltip` HOC that returns a React **Fragment** (`<span><img/></span> + <UncontrolledTooltip/>`). As flex children of the `<button>`, the Fragment's two children introduced unpredictable layout. Additionally, the `<span>` as an inline element contributed ghost space from `line-height`, making the circular border appear offset.
+- **Solution**:
+  - Pass `noTooltip` to `UserPicture` to get a predictable single-child render (`<span><img/></span>`)
+  - Use Bootstrap utilities for layout: `d-inline-flex align-items-center justify-content-center p-0 bg-transparent rounded-circle`
+  - Add `line-height: 0` to `.avatar-wrapper` in the CSS module to eliminate inline ghost space
+  - Keep only the dynamic border color as inline style: `border: 2px solid ${color}`
+
+#### Smooth Scroll via scrollDOM Style
+
+- **Context**: Click-to-scroll should animate smoothly rather than jump instantly.
+- **Finding**: `EditorView.scrollIntoView` dispatches a CodeMirror state effect that CodeMirror resolves by scrolling `view.scrollDOM`. Setting `view.scrollDOM.style.scrollBehavior = 'smooth'` before the dispatch causes the browser to animate the scroll. Restoring the value after ~500 ms (typical animation window) avoids affecting other programmatic scrolls.
+- **Constraint**: This approach works when `view.scrollDOM` is the actual scrolling element. In GROWI's page-scroll setup, the effective scrolling element may be a parent container; if smooth scrolling does not animate as expected, the `scrollBehavior` may need to be set on the parent scroll container instead.
+
+### Phase 3 — Off-Screen Indicator Click & Username Tooltip
+
+#### scrollCallbackRef Pattern — Why Not Pass scrollFn Directly to yRichCursors
+
+- **Context**: Req 6.6 requires off-screen indicators to invoke the same `scrollFn` used by `EditingUserList`. The natural approach would be `yRichCursors(awareness, { onClickIndicator: scrollFn })`, but this fails because `yRichCursors` and `scrollFn` are created in two separate `useEffect` calls with slightly different dependency sets.
+- **Finding**: If `scrollFn` is passed as a plain value, every time the scroll function is recreated (on provider/activeDoc/codeMirrorEditor change), the extension array must also be recreated — causing a full CodeMirror extension reload. This is expensive and unnecessary.
+- **Solution**: Pass a mutable ref `scrollCallbackRef = useRef(null)` to `yRichCursors`. The plugin captures the ref object (stable reference across re-renders). The scroll-function registration effect updates `.current` silently without touching the extension.
+- **Implication**: This is the standard React pattern for exposing a stable callback to an imperative API. The `ScrollCallbackRef` type (`{ current: Fn | null }`) is defined in `packages/editor` without importing React, making it usable in the non-React CodeMirror extension context.
+
+#### UserPicture Tooltip — withTooltip HOC Elimination (Design Review Outcome)
+
+- **Context**: Req 7 requires username tooltips in `EditingUserList`. The `UserPicture` component's `withTooltip` HOC returns a React Fragment (`<span><img/></span> + <UncontrolledTooltip/>`), which caused layout instability when used inside a flex `<button>` (Phase 2 finding). The initial approach (Phase 2) was to use `noTooltip` + external `UncontrolledTooltip` at the wrapper level, but design review identified this as a workaround that would need to be repeated by every consumer facing the same Fragment/flex issue.
+- **Root cause analysis**: The `withTooltip` HOC returns a Fragment because `UncontrolledTooltip` is placed as a **sibling** of the wrapped component. While `UncontrolledTooltip` uses `ReactDOM.createPortal` (tooltip content renders to `document.body`), the Fragment still produces two React children at the parent level, which can destabilize flex layout.
+- **Key insight**: Since `UncontrolledTooltip` is a portal, it can be placed as a **child** of the root `<span>` instead of a sibling. As a portal child, it occupies no DOM space in the parent — only the `<img>` is a visible child. The root element becomes a single `<span>` with predictable layout behavior in any container type.
+- **Solution**: Eliminate the `withTooltip` HOC. Move tooltip rendering inline into `UserPicture`'s render function:
+  1. Create `rootRef = useRef<HTMLSpanElement>(null)` unconditionally (hooks rules compliant)
+  2. Pass `rootRef` to `UserPictureRootWithoutLink`/`UserPictureRootWithLink` via `forwardRef` (they already support it)
+  3. Conditionally render `UncontrolledTooltip` as a child of the root element alongside `imgElement`
+  4. Delete the `withTooltip` HOC function
+- **Impact verification**: `withTooltip` is not exported — it's only used internally in `UserPicture.tsx`. The public API (`Props`: `user, size, noLink, noTooltip, className`) is unchanged. All existing consumers (30+ usages across `apps/app`) are unaffected.
+- **`noTooltip` usages** (16 call sites): Consumers that pass `noTooltip` (sidebar dropdowns, inline notifications, comment editors, conflict modals) continue to suppress tooltips. `EditingUserList` is the only consumer that **removes** `noTooltip` to gain the tooltip.
+- **Implication**: `EditingUserList` no longer needs external tooltip code (`UncontrolledTooltip`, `id` generation, `clientId`-based targeting). The `AvatarWrapper` sub-component is simplified to just a `<button>` wrapping `<UserPicture>` with color border.
+
+## References
+
+- y-codemirror.next v0.3.5 source: `node_modules/.pnpm/y-codemirror.next@0.3.5_.../src/`
+- Yjs awareness protocol: https://docs.yjs.dev/api/about-awareness
+- CodeMirror WidgetType: https://codemirror.net/docs/ref/#view.WidgetType
+- CodeMirror EditorView.lineBlockAt: https://codemirror.net/docs/ref/#view.EditorView.lineBlockAt
+- CodeMirror EditorView.scrollIntoView: https://codemirror.net/docs/ref/#view.EditorView^scrollIntoView

+ 23 - 0
.kiro/specs/collaborative-editor-awareness/spec.json

@@ -0,0 +1,23 @@
+{
+  "feature_name": "collaborative-editor-awareness",
+  "created_at": "2026-04-07T00:00:00.000Z",
+  "updated_at": "2026-04-16T09:00:00.000Z",
+  "language": "en",
+  "phase": "implementation-complete",
+  "approvals": {
+    "requirements": {
+      "generated": true,
+      "approved": true
+    },
+    "design": {
+      "generated": true,
+      "approved": true
+    },
+    "tasks": {
+      "generated": true,
+      "approved": true
+    }
+  },
+  "ready_for_implementation": true,
+  "cleanup_completed": false
+}

+ 144 - 0
.kiro/specs/collaborative-editor-awareness/tasks.md

@@ -0,0 +1,144 @@
+# Implementation Plan
+
+- [x] 1. Stabilize the Editing User List
+- [x] 1.1 Fix awareness state filter (undefined → skip) — _Req 1.1, 1.4_
+- [x] 1.2 Remove direct mutation of Yjs-managed awareness map — _Req 1.2_
+
+- [x] 2. Build the Rich Cursor Extension (Initial)
+- [x] 2.1 (P) Cursor widget DOM: name label, avatar image, initials fallback — _Req 3.4, 3.5_
+- [x] 2.2 (P) Broadcast local cursor position to awareness — _Req 3.6, 3.7_
+- [x] 2.3 (P) Render remote cursor decorations from awareness — _Req 3.6, 3.7_
+
+- [x] 3. Integrate Rich Cursor Extension into Editor Configuration — _Req 1.3, 2.4, 3.6_
+
+- [x] 4. Unit Tests for Core Behaviors (Initial)
+- [x] 4.1 (P) Awareness state filtering and mutation-free disconnect — _Req 1.1, 1.2, 1.4_
+- [x] 4.2 (P) Cursor widget construction, equality, avatar fallback — _Req 3.4, 3.5_
+
+- [x] 5. Integration Tests for Multi-Client Collaborative Scenarios
+- [x] 5.1 Awareness update flow to EditingUserList — _Req 1.3, 2.1, 2.4_
+- [x] 5.2 Cursor position broadcasting and remote rendering — _Req 3.6, 3.7_
+
+- [x] 6. Add baseTheme with Overlay Positioning, Hover, and Opacity Rules
+- [x] 6.1 (P) Cursor overlay CSS rules — _Req 3.1, 3.2, 3.3, 3.8, 3.9_
+- [x] 6.2 (P) Off-screen container and indicator styles — _Req 4.5, 4.7_
+
+- [x] 7. Rework RichCaretWidget for Overlay Avatar with Activity State
+- [x] 7.1 Widget DOM: overlay flag, avatar/initials, hover name label, isActive — _Req 3.1–3.5, 3.10_
+- [x] 7.2 Activity tracking with per-client timers (3s inactivity) — _Req 3.10_
+
+- [x] 8. Build Off-Screen Cursor Indicators
+- [x] 8.1 Persistent off-screen containers on editor DOM — _Req 4.7_
+- [x] 8.2 Classify cursors by visible range, render indicators — _Req 4.1–4.6_
+
+- [x] 9. Unit Tests for Updated Widget and Off-Screen Indicators
+- [x] 9.1 (P) Widget DOM structure, sizing, isActive, borderColor — _Req 3.1–3.5, 3.10_
+- [x] 9.2 (P) Off-screen indicator DOM, Material Symbols arrow, avatar fallback — _Req 4.1, 4.2, 4.4, 4.9_
+
+- [x] 10. Integration Tests for Viewport Classification and Activity Tracking
+- [x] 10.1 Off-screen exclusion from widget decorations — _Req 4.3, 4.6_
+- [x] 10.2 Activity tracking timer lifecycle (fake timers) — _Req 3.10_
+
+- [x] 12. Fix Off-Screen Visibility Classification
+- [x] 12.1 Multi-mode classification: rangedMode / coordsMode / degenerate — _Req 4.1–4.3, 4.6_
+- [x] 12.2 Integration test for render-buffer cursor → off-screen indicator — _Req 4.3, 4.6_
+
+- [ ]\* 11. E2E Tests for Hover, Opacity, and Off-Screen Transitions (deferred)
+- [ ]\* 11.1 (P) Hover behavior on cursor overlay flag — _Req 3.3, 3.9_
+- [ ]\* 11.2 (P) Off-screen indicator visibility on scroll — _Req 4.1–4.3, 4.6_
+
+---
+
+## Phase 2: Color-Matched Avatars & Click-to-Scroll (Requirements 5–6)
+
+- [x] 13. Scroll callback infrastructure
+- [x] 13.1 (P) Create a Jotai atom for storing the scroll-to-remote-cursor callback
+  - Define an atom that holds either a scroll function accepting a client ID or null
+  - Export a reader hook and a setter hook, following the same pattern as the existing editing-clients atom
+  - _Requirements: 6.1_
+
+- [x] 13.2 (P) Extend the collaborative editor mode hook to create and register a scroll-to-remote-cursor function
+  - Add a new callback option to the hook's configuration that receives the scroll function when the provider and document are ready, and null on cleanup
+  - The scroll function reads the target user's cursor position from awareness, resolves the Yjs relative position to an absolute document index, and dispatches a vertically centered scroll command to the CodeMirror editor view
+  - Guard against missing cursor data and unmounted editor view by returning silently
+  - _Requirements: 6.1, 6.2, 6.3_
+
+- [x] 14. (P) Update EditingUserList with color-matched borders and click-to-scroll
+  - Replace the fixed blue border on each avatar with a wrapper element whose border color matches the user's cursor color from the awareness state
+  - Accept a new click callback prop and wrap each avatar in a clickable element with pointer cursor styling
+  - Replace the generic UserPictureList component in the overflow popover with inline rendering so that color-matched borders and click handling apply to all avatars consistently
+  - _Requirements: 5.1, 5.2, 5.3, 6.4, 6.5_
+
+- [x] 15. Connect all components end-to-end
+  - Bridge the scroll-ready callback through the main editor component's props into the collaborative editor mode hook
+  - Wire the page editor to store the received scroll callback in the Jotai atom
+  - Wire the editor navbar to read the atom and pass the scroll function to the editing user list as the click callback
+  - Verify that clicking an avatar scrolls the editor to that user's remote cursor position; verify no-op for users without a cursor
+  - _Requirements: 6.1, 6.5_
+
+- [x]\* 16. Test coverage for color-matched borders and click-to-scroll
+- [x]\* 16.1 (P) Unit tests for EditingUserList rendering and click behavior
+  - Verify that each avatar renders a colored border matching the user's cursor color instead of the fixed blue
+  - Verify that clicking an avatar invokes the callback with the correct client ID
+  - Verify that overflow popover avatars also invoke the callback on click
+  - _Requirements: 5.1, 6.4, 6.5_
+
+- [x]\* 16.2 (P) Integration test for the scroll function in the collaborative editor mode hook
+  - Verify that the configuration callback receives a function when the provider is set up
+  - Verify that calling the scroll function with a valid remote client ID dispatches a centered scroll effect to the editor view
+  - Verify that calling the scroll function for a client without a cursor position is a silent no-op
+  - _Requirements: 6.1, 6.2, 6.3_
+
+---
+
+## Phase 3: Off-Screen Indicator Click & Username Tooltip (Requirements 6.6–6.7, 7)
+
+- [x] 17. Add click-to-scroll to off-screen cursor indicators
+- [x] 17.1 Extend the off-screen indicator to accept and fire a click callback
+  - Add a user identifier and an optional click callback to the indicator creation options
+  - Attach a click event listener on the indicator's root element that invokes the callback with the user identifier
+  - Apply pointer cursor styling when a click handler is provided; omit it when not
+  - _Requirements: 6.6, 6.7_
+
+- [x] 17.2 Wire the scroll function to off-screen indicators via a mutable ref in the editor mode hook
+  - Accept a mutable ref option in the rich cursor extension factory for the indicator click callback
+  - When building off-screen indicators, pass the ref's current value as the click handler for each indicator
+  - In the collaborative editor mode hook, create the mutable ref alongside the extension, write the existing scroll function to it when the provider is ready, and clear it on cleanup
+  - _Requirements: 6.6_
+
+- [x] 18. (P) Refactor the UserPicture component to eliminate the tooltip higher-order component
+  - Remove the higher-order component that wraps the root element and tooltip in a React Fragment
+  - Render the tooltip directly within the component's render body as a child of the root element, conditionally based on the noTooltip flag
+  - Preserve the dynamic import for the tooltip component so that consumers who suppress tooltips never trigger the tooltip chunk load
+  - Move the ref for tooltip targeting into the component body (unconditional call) and pass it to the root element via its existing forwardRef support
+  - The root element is always a single span, making it safe inside flex containers
+  - _Requirements: 7.1, 7.3, 7.4, 7.5_
+
+- [x] 19. Enable the native UserPicture tooltip in EditingUserList
+  - Remove the tooltip-suppression flag from UserPicture in the avatar wrapper so the built-in tooltip renders automatically for all avatars
+  - Both the first-four direct avatars and the overflow popover avatars use the same wrapper, so tooltips appear uniformly
+  - _Requirements: 7.2_
+  - _Depends on: Task 18_
+
+- [x]\* 20. Test coverage for off-screen click and tooltip refactoring
+- [x]\* 20.1 (P) Unit tests for off-screen indicator click behavior
+  - Verify the indicator root has pointer cursor when a click handler is provided
+  - Verify clicking the indicator calls the callback with the correct user identifier
+  - Verify no click handler or pointer cursor when the callback is omitted
+  - _Requirements: 6.6, 6.7_
+
+- [x]\* 20.2 (P) Integration test for off-screen indicator scroll wiring
+  - Verify that when the mutable ref holds a function and an off-screen indicator is clicked, the function is called with the correct user identifier
+  - Verify that clicking when the ref is null does not throw
+  - _Requirements: 6.6_
+
+- [x]\* 20.3 (P) Unit tests for UserPicture tooltip refactoring
+  - Verify that without the tooltip-suppression flag, the component renders a single root element containing the image and a portal tooltip
+  - Verify that with the tooltip-suppression flag, only the image is rendered inside the root element
+  - Verify tooltip content includes the username prefix and display name
+  - _Requirements: 7.1, 7.3, 7.4_
+
+- [x]\* 20.4 Unit tests for EditingUserList tooltip integration
+  - Verify the avatar wrapper renders UserPicture without the tooltip-suppression flag
+  - Verify tooltips are present for both direct avatars and overflow popover avatars
+  - _Requirements: 7.2_

+ 3 - 1
.kiro/specs/collaborative-editor/design.md

@@ -77,7 +77,7 @@ graph TB
 |-------|------------------|------|
 | Client Provider | `y-websocket@^2.x` (WebsocketProvider) | Yjs document sync over WebSocket |
 | Server WebSocket | `ws@^8.x` (WebSocket.Server) | Native WebSocket server, `noServer: true` mode |
-| Server Yjs Utils | `y-websocket@^2.x` (`bin/utils`) | `setupWSConnection`, `getYDoc`, `WSSharedDoc` |
+| Server Yjs Utils | `y-websocket@^2.x` (`bin/utils`) | `setupWSConnection`, `getYDoc`, `WSSharedDoc`. Server-side type declarations (`y-websocket-server.d.ts`) derive the `Awareness` type via `WebsocketProvider['awareness']` instead of importing from `y-protocols/awareness`, because `y-protocols` is not a direct dependency. |
 | Persistence | `y-mongodb-provider` (extended) | Yjs document persistence to `yjs-writings` collection |
 | Event Bridge | Socket.IO `io` instance | Awareness state broadcasting to page rooms |
 | Auth | express-session + passport | WebSocket upgrade authentication via cookie |
@@ -239,6 +239,8 @@ interface YWebsocketPersistence {
 - Awareness API: `provider.awareness.setLocalStateField`, `.on('update', ...)`
 - All side effects (provider creation, awareness setup) must be outside React state updaters to avoid render-phase violations
 
+> **Note**: Client-side awareness display (EditingUserList stability, rich cursor rendering) is designed in the [`collaborative-editor-awareness`](../collaborative-editor-awareness/) spec.
+
 ## Data Models
 
 No custom data models. Uses the existing `yjs-writings` MongoDB collection via `MongodbPersistence` (extended `y-mongodb-provider`). Collection schema, indexes, and persistence interface (`bindState` / `writeState`) are unchanged.

+ 4 - 2
.kiro/specs/collaborative-editor/requirements.md

@@ -58,14 +58,16 @@ GROWI provides real-time collaborative editing powered by Yjs, allowing multiple
 
 ### Requirement 5: Awareness and Presence Tracking
 
-**Objective:** As a wiki user, I want to see which other users are currently editing the same page, so that I can coordinate edits and avoid conflicts.
+> **Note**: Client-side awareness display behavior (EditingUserList stability, rich cursor rendering) is specified in the [`collaborative-editor-awareness`](../collaborative-editor-awareness/requirements.md) spec. This requirement covers only the server-side awareness event bridging responsibility of the Yjs service.
+
+**Objective:** As a system component, I want awareness state changes to be broadcast to page-level Socket.IO rooms, so that non-editor UI components can reflect collaborative editing activity.
 
 #### Acceptance Criteria
 
 1. While a user is editing a page, the Editor Client shall broadcast the user's presence information (name, username, avatar, cursor color) via the Yjs awareness protocol.
 2. When a user connects or disconnects from a collaborative editing session, the Yjs Service shall emit awareness state size updates to the page's Socket.IO room (`page:{pageId}`).
 3. When the last user disconnects from a document, the Yjs Service shall emit a draft status notification (`YjsHasYdocsNewerThanLatestRevisionUpdated`) to the page's Socket.IO room.
-4. The Editor Client shall display the list of active editors based on awareness state updates from the Yjs provider.
+4. The Editor Client shall display the list of active editors based on awareness state updates from the Yjs provider. See `collaborative-editor-awareness` spec for display-level requirements.
 
 ### Requirement 6: YDoc Status and Sync Integration
 

+ 638 - 0
.kiro/specs/editor-keymaps/design.md

@@ -0,0 +1,638 @@
+# Design Document: editor-keymaps
+
+## Overview
+
+**Purpose**: This feature refactors the GROWI editor's keymap system into a clean, uniform module architecture and extends Emacs keybindings to cover the full range of markdown-mode operations.
+
+**Users**: Developers maintaining the editor codebase benefit from consistent module boundaries. End users using Emacs keymap mode gain a complete markdown-mode editing experience.
+
+**Impact**: Changes the internal structure of `packages/editor/src/client/services-internal/` and `stores/use-editor-shortcuts.ts`. No external API changes; EditorSettings interface and UI selector remain unchanged.
+
+### Goals
+- Uniform factory interface for all 4 keymap modules with encapsulated precedence and override declarations
+- Eliminate markdown toggle logic duplication between emacs.ts and editor-shortcuts
+- Data-driven shortcut exclusion replacing hard-coded mode checks
+- Relocate `editor-shortcuts/` from public services layer to services-internal where it belongs
+- Complete Emacs markdown-mode keybindings (formatting, structural, navigation, save)
+
+### Non-Goals
+- Changing the keymap selection UI or persistence mechanism (Requirement 8 is verification-only)
+- Adding new keymap modes beyond the existing 4
+- Modifying Vim keybindings beyond structural consistency
+- Full Emacs M-x command palette
+
+## Architecture
+
+### Existing Architecture Analysis
+
+Current module layout and problems:
+
+```
+services/ (PUBLIC API)
+  use-codemirror-editor/
+    utils/
+      insert-markdown-elements.ts   ← hook, exposed via public API ✓
+      insert-prefix.ts              ← hook, exposed via public API ✓
+      editor-shortcuts/             ← NOT exported, only used by stores/ ✗ MISPLACED
+        make-text-bold.ts
+        make-text-italic.ts
+        ...
+
+services-internal/ (INTERNAL)
+  keymaps/
+    index.ts        ← Dispatcher with inline default/vscode logic
+    vim.ts          ← Top-level side effects (Vim.map at module scope)
+    emacs.ts        ← Local toggleMarkdownSymbol duplicating hook logic
+
+stores/
+  use-editor-settings.ts   ← Contains getKeymapPrecedence() mode branching
+  use-editor-shortcuts.ts  ← Hard-coded `if (mode === 'emacs')` exclusion
+```
+
+**Problems**:
+1. `editor-shortcuts/` is in public `services/` tree but never exported — layer violation
+2. No dedicated module for default/vscode modes
+3. Precedence logic leaked to consumer (`getKeymapPrecedence`)
+4. Override knowledge leaked to shortcut registration (`if emacs` check)
+5. Markdown toggle duplicated in emacs.ts vs `useInsertMarkdownElements`
+6. emacs.ts will accumulate 19+ commands in a single file — low cohesion
+
+### Architecture Pattern & Boundary Map
+
+```mermaid
+graph TB
+    subgraph consts
+        KeyMapMode[KeyMapMode type]
+        KeymapResult[KeymapResult interface]
+        ShortcutCategory[ShortcutCategory type]
+    end
+
+    subgraph services-internal
+        subgraph markdown-utils
+            ToggleSymbol[toggleMarkdownSymbol]
+            LinePrefix[insertLinePrefix]
+        end
+        subgraph keymaps
+            Dispatcher[index.ts dispatcher]
+            DefaultMod[default.ts]
+            VscodeMod[vscode.ts]
+            VimMod[vim.ts]
+            subgraph emacs
+                EmacsIndex[emacs/index.ts]
+                EmacsFormatting[emacs/formatting.ts]
+                EmacsStructural[emacs/structural.ts]
+                EmacsNavigation[emacs/navigation.ts]
+            end
+        end
+        subgraph editor-shortcuts
+            ShortcutDefs[CategorizedKeyBindings definitions]
+        end
+    end
+
+    subgraph services-public[services - public]
+        InsertMdHook[useInsertMarkdownElements]
+        InsertPrefixHook[useInsertPrefix]
+    end
+
+    subgraph stores
+        EditorSettings[useEditorSettings]
+        EditorShortcuts[useEditorShortcuts]
+    end
+
+    Dispatcher --> DefaultMod
+    Dispatcher --> VscodeMod
+    Dispatcher --> VimMod
+    Dispatcher --> EmacsIndex
+    EmacsIndex --> EmacsFormatting
+    EmacsIndex --> EmacsStructural
+    EmacsIndex --> EmacsNavigation
+    EmacsFormatting --> ToggleSymbol
+    EmacsStructural --> LinePrefix
+    InsertMdHook --> ToggleSymbol
+    InsertPrefixHook --> LinePrefix
+    EditorSettings --> Dispatcher
+    EditorSettings --> EditorShortcuts
+    EditorShortcuts --> ShortcutDefs
+    EditorShortcuts -.->|reads overrides| KeymapResult
+```
+
+**Architecture Integration**:
+- Selected pattern: Factory with structured return object (see `research.md` — Pattern A)
+- Domain boundaries: Each keymap module owns its bindings, precedence, and override declarations
+- Emacs module split into submodules by responsibility (formatting / structural / navigation)
+- Pure functions in `markdown-utils/` shared by both public hooks and internal keymaps
+- `editor-shortcuts/` relocated to `services-internal/` to match its actual visibility
+- Existing patterns preserved: Async lazy loading, `appendExtensions` lifecycle
+- Steering compliance: Feature-based organization, named exports, immutability, high cohesion
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Frontend | CodeMirror 6 (@codemirror/view, @codemirror/state) | Extension system, keymap API | Existing |
+| Frontend | @replit/codemirror-emacs 6.1.0 | EmacsHandler.bindKey/addCommands | Existing |
+| Frontend | @replit/codemirror-vim 6.2.1 | Vim.map/defineEx | Existing |
+| Frontend | @replit/codemirror-vscode-keymap 6.0.2 | VSCode keybindings | Existing |
+
+No new dependencies introduced.
+
+## System Flows
+
+### Keymap Loading Flow
+
+```mermaid
+sequenceDiagram
+    participant Settings as useEditorSettings
+    participant Dispatcher as getKeymap
+    participant Module as KeymapModule
+    participant CM as CodeMirror
+
+    Settings->>Dispatcher: getKeymap(mode, onSave)
+    Dispatcher->>Module: module.create(onSave?)
+    Module-->>Dispatcher: KeymapResult
+    Dispatcher-->>Settings: KeymapResult
+    Settings->>CM: appendExtensions(result.precedence(result.extension))
+    Settings->>CM: pass result.overrides to useEditorShortcuts
+```
+
+Key decisions:
+- Dispatcher is a thin router; all logic lives in modules
+- `KeymapResult.precedence` is a function (`Prec.high` or `Prec.low`) applied by the consumer
+- `overrides` array flows to shortcut registration for data-driven exclusion
+
+## Requirements Traceability
+
+| Requirement | Summary | Components | Interfaces | Flows |
+|-------------|---------|------------|------------|-------|
+| 1.1 | Dedicated module per mode | default.ts, vscode.ts, vim.ts, emacs/ | KeymapFactory | Keymap Loading |
+| 1.2 | Uniform async factory interface | All keymap modules | KeymapFactory | Keymap Loading |
+| 1.3 | No inline logic in dispatcher | keymaps/index.ts | — | Keymap Loading |
+| 1.4 | Encapsulated precedence | KeymapResult interface | KeymapResult | Keymap Loading |
+| 2.1 | Shared toggle utility | markdown-utils/toggleMarkdownSymbol | — | — |
+| 2.2 | Emacs uses shared logic | emacs/formatting.ts | — | — |
+| 2.3 | No duplicate toggle impl | Remove local emacs.ts toggle | — | — |
+| 3.1 | Keymap declares overrides | KeymapResult.overrides | ShortcutCategory | — |
+| 3.2 | Shortcut registration consults overrides | useEditorShortcuts | CategorizedKeyBindings | — |
+| 3.3 | New modes need no shortcut changes | Data-driven exclusion | ShortcutCategory | — |
+| 4.1-4.5 | Existing Emacs formatting bindings | emacs/formatting.ts | EmacsHandler | — |
+| 5.1-5.7 | Emacs structural bindings | emacs/structural.ts | EmacsHandler | — |
+| 6.1-6.2 | Emacs C-x C-s save | emacs/index.ts | KeymapFactory (onSave) | — |
+| 7.1-7.2 | Vim module consistency | vim.ts | KeymapFactory | — |
+| 8.1-8.3 | UI consistency | OptionsSelector | — | — |
+| 9.1-9.9 | Extended markdown-mode bindings | emacs/navigation.ts | EmacsHandler | — |
+
+## Components and Interfaces
+
+| Component | Domain/Layer | Intent | Req Coverage | Key Dependencies | Contracts |
+|-----------|-------------|--------|--------------|------------------|-----------|
+| KeymapResult | consts | Structured return type for keymap factories | 1.2, 1.4, 3.1 | — | Type |
+| ShortcutCategory | consts | Override category type | 3.1, 3.2, 3.3 | — | Type |
+| CategorizedKeyBindings | consts | KeyBindings grouped by category | 3.2 | — | Type |
+| toggleMarkdownSymbol | markdown-utils | Pure function for markdown wrap/unwrap | 2.1, 2.2, 2.3 | @codemirror/state | Service |
+| insertLinePrefix | markdown-utils | Pure function for line prefix operations | 5.1, 5.3, 5.4, 5.5 | @codemirror/state | Service |
+| keymaps/default.ts | keymaps | Default keymap module | 1.1 | @codemirror/commands | Service |
+| keymaps/vscode.ts | keymaps | VSCode keymap module | 1.1 | @replit/codemirror-vscode-keymap | Service |
+| keymaps/vim.ts | keymaps | Vim keymap module (refactored) | 1.1, 7.1, 7.2 | @replit/codemirror-vim | Service |
+| keymaps/emacs/ | keymaps | Emacs keymap module (split by responsibility) | 1.1, 4-6, 9 | @replit/codemirror-emacs | Service |
+| keymaps/index.ts | keymaps | Thin dispatcher | 1.2, 1.3 | All keymap modules | Service |
+| editor-shortcuts/ | services-internal | Categorized shortcut definitions | 3.2 | markdown-utils | Service |
+| useEditorShortcuts | stores | Data-driven shortcut registration | 3.1, 3.2, 3.3 | editor-shortcuts, KeymapResult | State |
+| useEditorSettings | stores | Keymap lifecycle (simplified) | 1.4 | getKeymap | State |
+| OptionsSelector | UI | Keymap selector (no changes) | 8.1-8.3 | — | — |
+
+### Consts Layer
+
+#### KeymapResult Interface
+
+| Field | Detail |
+|-------|--------|
+| Intent | Structured return type encapsulating keymap extension, precedence, and override metadata |
+| Requirements | 1.2, 1.4, 3.1 |
+
+**Contracts**: Type [x]
+
+```typescript
+type ShortcutCategory = 'formatting' | 'structural' | 'navigation';
+
+interface KeymapResult {
+  readonly extension: Extension;
+  readonly precedence: (ext: Extension) => Extension; // Prec.high or Prec.low
+  readonly overrides: readonly ShortcutCategory[];
+}
+
+type KeymapFactory = (onSave?: () => void) => Promise<KeymapResult>;
+```
+
+#### CategorizedKeyBindings Type
+
+| Field | Detail |
+|-------|--------|
+| Intent | Group KeyBindings by ShortcutCategory for data-driven exclusion |
+| Requirements | 3.2 |
+
+**Contracts**: Type [x]
+
+```typescript
+interface CategorizedKeyBindings {
+  readonly category: ShortcutCategory | null; // null = always included (e.g., multiCursor)
+  readonly bindings: readonly KeyBinding[];
+}
+```
+
+Each shortcut definition module returns a `CategorizedKeyBindings` object instead of raw `KeyBinding[]`. `null` category means always active regardless of overrides.
+
+### Shared Utils Layer (`services-internal/markdown-utils/`)
+
+Pure functions usable by both public hooks and internal keymaps. No React dependencies.
+
+#### toggleMarkdownSymbol
+
+| Field | Detail |
+|-------|--------|
+| Intent | Pure function to wrap/unwrap selected text with markdown symbols |
+| Requirements | 2.1, 2.2, 2.3 |
+
+**Contracts**: Service [x]
+
+```typescript
+/**
+ * Toggle markdown symbols around the current selection.
+ * If the selection is already wrapped with prefix/suffix, remove them.
+ * If no text is selected, insert prefix+suffix and position cursor between them.
+ */
+const toggleMarkdownSymbol: (
+  view: EditorView,
+  prefix: string,
+  suffix: string,
+) => void;
+```
+
+**Implementation Notes**
+- Extracted from current `emacs.ts` local function
+- `useInsertMarkdownElements` hook in `services/` becomes a thin wrapper: `useCallback((p, s) => toggleMarkdownSymbol(view, p, s), [view])`
+- Location: `packages/editor/src/client/services-internal/markdown-utils/toggle-markdown-symbol.ts`
+
+#### insertLinePrefix
+
+| Field | Detail |
+|-------|--------|
+| Intent | Pure function to insert/toggle prefix at line beginnings |
+| Requirements | 5.1, 5.3, 5.4, 5.5 |
+
+**Contracts**: Service [x]
+
+```typescript
+/**
+ * Insert or toggle a prefix at the beginning of the current line(s).
+ * Handles multi-line selections. Removes prefix if all lines already have it.
+ */
+const insertLinePrefix: (
+  view: EditorView,
+  prefix: string,
+  noSpaceIfPrefixExists?: boolean,
+) => void;
+```
+
+**Implementation Notes**
+- Extracted from current `useInsertPrefix` hook
+- Hook becomes a thin wrapper
+- Location: `packages/editor/src/client/services-internal/markdown-utils/insert-line-prefix.ts`
+
+#### Dependency Direction
+
+```
+services/ (public hooks)
+  useInsertMarkdownElements ──imports──> services-internal/markdown-utils/toggleMarkdownSymbol
+  useInsertPrefix           ──imports──> services-internal/markdown-utils/insertLinePrefix
+
+services-internal/ (internal)
+  keymaps/emacs/formatting  ──imports──> services-internal/markdown-utils/toggleMarkdownSymbol
+  keymaps/emacs/structural  ──imports──> services-internal/markdown-utils/insertLinePrefix
+  editor-shortcuts/         ──imports──> services-internal/markdown-utils/ (via pure functions)
+```
+
+Both public hooks and internal modules depend on the same internal pure functions.
+
+**Layer Rule Exception**: `markdown-utils/` is explicitly designated as a **shared pure-function sublayer** within `services-internal/`. Public hooks in `services/` are permitted to import from `services-internal/markdown-utils/` as thin wrappers. This exception is scoped to pure functions with no React dependencies — other `services-internal/` modules remain off-limits to `services/`. This pattern avoids duplication (Req 2.3) while keeping the public API surface minimal.
+
+### Keymaps Layer
+
+#### keymaps/default.ts
+
+| Field | Detail |
+|-------|--------|
+| Intent | Default CodeMirror keymap module |
+| Requirements | 1.1 |
+
+**Contracts**: Service [x]
+
+```typescript
+const defaultKeymap: KeymapFactory;
+// Returns:
+// - extension: keymap.of(defaultKeymap from @codemirror/commands)
+// - precedence: Prec.low
+// - overrides: [] (no overrides)
+```
+
+#### keymaps/vscode.ts
+
+| Field | Detail |
+|-------|--------|
+| Intent | VSCode keymap module |
+| Requirements | 1.1 |
+
+**Contracts**: Service [x]
+
+```typescript
+const vscodeKeymap: KeymapFactory;
+// Returns:
+// - extension: keymap.of(vscodeKeymap from @replit/codemirror-vscode-keymap)
+// - precedence: Prec.low
+// - overrides: [] (no overrides)
+```
+
+#### keymaps/vim.ts (Refactored)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Vim keymap module with side effects encapsulated in factory |
+| Requirements | 1.1, 7.1, 7.2 |
+
+**Responsibilities & Constraints**
+- Moves `Vim.map('jj', '<Esc>', 'insert')` and `Vim.map('jk', '<Esc>', 'insert')` inside factory
+- Registers `:w` ex-command inside factory when onSave provided
+- Uses idempotency guard to prevent duplicate registration on re-import
+
+**Contracts**: Service [x]
+
+```typescript
+const vimKeymap: KeymapFactory;
+// Returns:
+// - extension: vim()
+// - precedence: Prec.high
+// - overrides: [] (Vim uses its own modal system, no standard shortcut conflicts)
+```
+
+#### keymaps/emacs/ (Split Module)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Emacs keymap module split by responsibility for high cohesion |
+| Requirements | 1.1, 4.1-4.5, 5.1-5.7, 6.1-6.2, 9.1-9.9 |
+
+**Module Structure**:
+```
+keymaps/emacs/
+├── index.ts          ← Factory: composes submodules, registers with EmacsHandler, returns KeymapResult
+├── formatting.ts     ← C-c C-s formatting commands (bold, italic, code, strikethrough, code block)
+├── structural.ts     ← C-c C-s/C-c C- structural commands (headings, lists, blockquote, link, HR)
+└── navigation.ts     ← C-c C- navigation/editing commands (heading nav, promote/demote, kill, image, table)
+```
+
+**Submodule Responsibilities**:
+
+Each submodule exports a registration function:
+```typescript
+type EmacsBindingRegistrar = (
+  EmacsHandler: typeof import('@replit/codemirror-emacs').EmacsHandler,
+  options?: { onSave?: () => void },
+) => void;
+```
+
+**emacs/index.ts** — Factory & Composition:
+```typescript
+const emacsKeymap: KeymapFactory;
+// 1. Dynamically imports @replit/codemirror-emacs
+// 2. Calls registerFormattingBindings(EmacsHandler)
+// 3. Calls registerStructuralBindings(EmacsHandler)
+// 4. Calls registerNavigationBindings(EmacsHandler)
+// 5. Registers save: C-x C-s → onSave callback
+// 6. Returns { extension: emacs(), precedence: Prec.high, overrides: ['formatting', 'structural'] }
+```
+
+**emacs/formatting.ts** — Req 4.1-4.5:
+
+| Command Name | Binding | Action |
+|-------------|---------|--------|
+| markdownBold | `C-c C-s b\|C-c C-s S-b` | toggleMarkdownSymbol(view, '**', '**') |
+| markdownItalic | `C-c C-s i\|C-c C-s S-i` | toggleMarkdownSymbol(view, '*', '*') |
+| markdownCode | `C-c C-s c` | toggleMarkdownSymbol(view, '`', '`') |
+| markdownStrikethrough | `C-c C-s s` | toggleMarkdownSymbol(view, '~~', '~~') |
+| markdownCodeBlock | `C-c C-s p` | toggleMarkdownSymbol(view, '```\n', '\n```') |
+
+**emacs/structural.ts** — Req 5.1-5.7:
+
+| Command Name | Binding | Action |
+|-------------|---------|--------|
+| markdownBlockquote | `C-c C-s q` | insertLinePrefix(view, '>') |
+| markdownLink | `C-c C-l` | toggleMarkdownSymbol(view, '[', ']()') |
+| markdownHorizontalRule | `C-c C-s -` | Insert '---' at current line |
+| markdownHeadingDwim | `C-c C-s h` | Auto-determine heading level |
+| markdownHeading1-6 | `C-c C-s 1`~`6` | insertLinePrefix(view, '# '...'###### ') |
+| markdownNewListItem | `C-c C-j` | Insert new list item matching context |
+| markdownFencedCodeBlock | `C-c C-s S-c` | Insert GFM fenced code block |
+
+**emacs/navigation.ts** — Req 9.1-9.9:
+
+**Multi-Key Prefix Compatibility Note**: All `C-c C-{key}` bindings use the same 2-stroke prefix mechanism validated in PR #10980 (`C-c C-s` prefix). `EmacsHandler.bindKey` supports multi-key sequences where `C-c` acts as a prefix map — subsequent keystrokes (`C-n`, `C-f`, `C-b`, `C-p`, etc.) are dispatched from the prefix map, not as standalone Emacs commands. This has been confirmed working with the `C-c C-s` prefix in production. If any binding conflicts with a base Emacs command (e.g., `C-c C-f` shadowing `forward-char` after `C-c`), the prefix map takes priority by design — the base command remains accessible without the `C-c` prefix.
+
+| Command Name | Binding | Action |
+|-------------|---------|--------|
+| markdownPromote | `C-c C--` | Decrease heading level or outdent list |
+| markdownDemote | `C-c C-=` | Increase heading level or indent list |
+| markdownNextHeading | `C-c C-n` | Navigate to next heading |
+| markdownPrevHeading | `C-c C-p` | Navigate to previous heading |
+| markdownNextSiblingHeading | `C-c C-f` | Navigate to next heading at same level |
+| markdownPrevSiblingHeading | `C-c C-b` | Navigate to previous heading at same level |
+| markdownUpHeading | `C-c C-u` | Navigate to parent heading |
+| markdownKill | `C-c C-k` | Kill element at point |
+| markdownImage | `C-c C-i` | Insert image template |
+| markdownTable | `C-c C-s t` | Insert table template |
+| markdownFootnote | `C-c C-s f` | Insert footnote pair |
+
+#### keymaps/index.ts (Simplified Dispatcher)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Thin routing dispatcher delegating to keymap modules |
+| Requirements | 1.2, 1.3 |
+
+**Contracts**: Service [x]
+
+```typescript
+const getKeymap: (
+  keyMapName?: KeyMapMode,
+  onSave?: () => void,
+) => Promise<KeymapResult>;
+```
+
+Implementation is a simple switch delegating to each module's factory. No inline keymap construction.
+
+### Editor Shortcuts Layer (`services-internal/editor-shortcuts/`)
+
+Relocated from `services/use-codemirror-editor/utils/editor-shortcuts/`.
+
+| Field | Detail |
+|-------|--------|
+| Intent | Categorized shortcut definitions for data-driven registration |
+| Requirements | 3.2 |
+
+**Key Change**: Each shortcut module returns `CategorizedKeyBindings` instead of raw `KeyBinding`:
+
+```typescript
+// Example: formatting shortcuts
+const formattingKeyBindings: (view?: EditorView, keymapMode?: KeyMapMode) => CategorizedKeyBindings;
+// Returns: { category: 'formatting', bindings: [bold, italic, strikethrough, code] }
+
+// Example: structural shortcuts
+const structuralKeyBindings: (view?: EditorView) => CategorizedKeyBindings;
+// Returns: { category: 'structural', bindings: [numbered, bullet, blockquote, link] }
+
+// Example: always-on shortcuts
+const alwaysOnKeyBindings: () => CategorizedKeyBindings;
+// Returns: { category: null, bindings: [...multiCursor] }
+```
+
+**Implementation Notes**:
+- Individual shortcut files (make-text-bold.ts, etc.) remain as-is internally but are grouped by the categorized wrapper
+- `generateAddMarkdownSymbolCommand` refactored to use pure `toggleMarkdownSymbol` directly instead of via hook
+- Move path: `services/use-codemirror-editor/utils/editor-shortcuts/` → `services-internal/editor-shortcuts/`
+
+### Stores Layer
+
+#### useEditorShortcuts (Refactored)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Data-driven shortcut registration using keymap override metadata |
+| Requirements | 3.1, 3.2, 3.3 |
+
+**Contracts**: State [x]
+
+```typescript
+const useEditorShortcuts: (
+  codeMirrorEditor?: UseCodeMirrorEditor,
+  overrides?: readonly ShortcutCategory[],
+) => void;
+```
+
+**Key Change**: Parameter changes from `keymapModeName?: KeyMapMode` to `overrides?: readonly ShortcutCategory[]`.
+
+Exclusion logic:
+```typescript
+const allGroups: CategorizedKeyBindings[] = [
+  formattingKeyBindings(view, keymapMode),
+  structuralKeyBindings(view),
+  alwaysOnKeyBindings(),
+];
+
+const activeBindings = allGroups
+  .filter(group => group.category === null || !overrides?.includes(group.category))
+  .flatMap(group => group.bindings);
+```
+
+#### useEditorSettings (Simplified)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Keymap lifecycle with simplified precedence handling |
+| Requirements | 1.4 |
+
+**Key Change**: Remove `getKeymapPrecedence()` function. Use `keymapResult.precedence` directly:
+
+```typescript
+// Before:
+const wrapWithPrecedence = getKeymapPrecedence(keymapMode);
+codeMirrorEditor?.appendExtensions(wrapWithPrecedence(keymapExtension));
+
+// After:
+codeMirrorEditor?.appendExtensions(keymapResult.precedence(keymapResult.extension));
+```
+
+Pass `keymapResult.overrides` to `useEditorShortcuts` instead of `keymapMode`.
+
+## Target Directory Structure
+
+```
+packages/editor/src/client/
+├── services/                              (PUBLIC API — unchanged contract)
+│   └── use-codemirror-editor/
+│       ├── use-codemirror-editor.ts       (hook: wraps pure functions for public API)
+│       └── utils/
+│           ├── insert-markdown-elements.ts (thin wrapper → markdown-utils/toggleMarkdownSymbol)
+│           ├── insert-prefix.ts           (thin wrapper → markdown-utils/insertLinePrefix)
+│           └── ...                        (other utils unchanged)
+│           (editor-shortcuts/ REMOVED — moved to services-internal/)
+│
+├── services-internal/
+│   ├── markdown-utils/                    (NEW: pure functions, no React deps)
+│   │   ├── index.ts
+│   │   ├── toggle-markdown-symbol.ts
+│   │   └── insert-line-prefix.ts
+│   ├── keymaps/
+│   │   ├── index.ts                       (thin dispatcher)
+│   │   ├── types.ts                       (KeymapResult, KeymapFactory, ShortcutCategory)
+│   │   ├── default.ts                     (NEW)
+│   │   ├── vscode.ts                      (NEW)
+│   │   ├── vim.ts                         (refactored: side effects inside factory)
+│   │   └── emacs/                         (SPLIT from single file)
+│   │       ├── index.ts                   (factory + composition)
+│   │       ├── formatting.ts              (C-c C-s formatting bindings)
+│   │       ├── structural.ts              (C-c C-s/C-c C- structural bindings)
+│   │       └── navigation.ts              (C-c C- navigation/editing bindings)
+│   ├── editor-shortcuts/                  (MOVED from services/)
+│   │   ├── index.ts                       (re-exports CategorizedKeyBindings groups)
+│   │   ├── types.ts                       (CategorizedKeyBindings)
+│   │   ├── formatting.ts                  (bold, italic, strikethrough, code)
+│   │   ├── structural.ts                  (numbered, bullet, blockquote, link)
+│   │   ├── always-on.ts                   (multiCursor)
+│   │   ├── make-code-block-extension.ts   (4-key combo as Extension)
+│   │   └── generate-add-markdown-symbol-command.ts
+│   └── ...                                (other services-internal unchanged)
+│
+└── stores/
+    ├── use-editor-settings.ts             (simplified: no getKeymapPrecedence)
+    └── use-editor-shortcuts.ts            (refactored: category-based exclusion)
+```
+
+### Affected Files: `editor-shortcuts/` Relocation
+
+Moving `editor-shortcuts/` from `services/use-codemirror-editor/utils/` to `services-internal/` affects the following import paths:
+
+| File | Import Count | Change |
+|------|-------------|--------|
+| `stores/use-editor-shortcuts.ts` | 10 imports | Rewrite all `../services/use-codemirror-editor/utils/editor-shortcuts/` → `../services-internal/editor-shortcuts/` |
+| `stores/use-editor-settings.ts` | 1 import (indirect via `use-editor-shortcuts`) | No change needed (imports `useEditorShortcuts` hook, not shortcuts directly) |
+
+No other files in the codebase import from `editor-shortcuts/`. The relocation is self-contained within `stores/use-editor-shortcuts.ts` import rewrites plus the physical directory move.
+
+## Data Models
+
+No data model changes. EditorSettings interface and localStorage persistence remain unchanged.
+
+## Error Handling
+
+### Error Strategy
+- EmacsHandler command registration failures: Log warning, continue with base emacs bindings
+- Missing onSave callback: Silently ignore C-x C-s / :w (6.2)
+- Duplicate command registration: Idempotency guard prevents double-registration
+
+## Testing Strategy
+
+### Unit Tests
+- `toggleMarkdownSymbol`: wrap, unwrap, empty selection, nested symbols — 5+ cases
+- `insertLinePrefix`: single line, multi-line, toggle off, with indent — 4+ cases
+- Each keymap factory returns correct `KeymapResult` shape (precedence, overrides)
+- `CategorizedKeyBindings` exclusion logic with various override combinations
+- Emacs submodule registration: formatting, structural, navigation each register expected commands
+
+### Integration Tests
+- Emacs mode: C-c C-s b toggles bold in editor
+- Emacs mode: C-x C-s triggers save callback
+- Vim mode: :w triggers save callback
+- Mode switching preserves document content
+- Shortcut exclusion: formatting shortcuts absent in Emacs mode, present in default mode
+
+### E2E Tests
+- Extend existing `playwright/23-editor/vim-keymap.spec.ts` pattern for Emacs keybindings
+- Keymap selector switches modes without reload

+ 189 - 0
.kiro/specs/editor-keymaps/requirements.md

@@ -0,0 +1,189 @@
+# Requirements Document
+
+## Introduction
+
+GROWI のエディタは CodeMirror 6 をベースに、4 つのキーマップモード(default, vscode, vim, emacs)をサポートしている。本仕様では以下の 2 つの目的を達成する:
+
+1. **モジュール構成のリファクタリング**: 各キーマップモードの責務・モジュール境界を整理し、一貫性のあるクリーンなアーキテクチャにリファクタする
+2. **Emacs キーバインディングの拡充**: PR #10980 で導入された Emacs markdown-mode バインディング(`C-c C-s` プレフィックス)を拡張し、本家 [jrblevin/markdown-mode](https://github.com/jrblevin/markdown-mode) を参考にした網羅的な Markdown 操作バインディングを提供する
+
+### Priority Order
+
+- **高優先**: Requirement 1-3 (モジュールリファクタリング) → Requirement 6-7 (save/vim 一貫性) → Requirement 8 (UI)
+- **中優先**: Requirement 4 (既存 formatting bindings の維持)
+- **低優先**: Requirement 5, 9 (追加 Emacs バインディング) — 本家 markdown-mode 準拠の拡充は最後に対応
+
+### Current State (PR #10980)
+
+- `packages/editor/src/client/services-internal/keymaps/` に vim.ts, emacs.ts が存在し、index.ts がディスパッチャ
+- default と vscode は index.ts 内でインラインに処理されており、独立モジュールがない
+- `toggleMarkdownSymbol` が emacs.ts 内にローカル実装されており、既存の `generateAddMarkdownSymbolCommand` / `useInsertMarkdownElements` と責務が重複
+- `use-editor-shortcuts.ts` が emacs モード判定のための条件分岐を持ち、各キーマップの差異を外部から管理している
+- Emacs モードでは formatting 系ショートカット(bold, italic, strikethrough, code)のみ C-c C-s で提供、リスト・引用・リンク等は未対応
+
+### Reference: jrblevin/markdown-mode Keybindings
+
+本家 Emacs markdown-mode の主要キーバインド(実装対象の参照用):
+
+**Text Styling (C-c C-s)**
+| Key | Command |
+|-----|---------|
+| `C-c C-s i` | Italic |
+| `C-c C-s b` | Bold |
+| `C-c C-s c` | Inline code |
+| `C-c C-s k` | `<kbd>` tag |
+| `C-c C-s q` / `C-c C-s Q` | Blockquote (word / region) |
+| `C-c C-s p` / `C-c C-s P` | Preformatted code block (word / region) |
+| `C-c C-s C` | GFM fenced code block |
+| `C-c C-s s` | Strikethrough (GROWI extension, not in original) |
+
+**Headings (C-c C-s)**
+| Key | Command |
+|-----|---------|
+| `C-c C-s h` / `C-c C-s H` | Auto heading (atx / setext) |
+| `C-c C-s 1` ~ `C-c C-s 6` | ATX heading level 1-6 |
+| `C-c C-s !` | Setext heading level 1 |
+| `C-c C-s @` | Setext heading level 2 |
+
+**Links & Images (C-c C-)**
+| Key | Command |
+|-----|---------|
+| `C-c C-l` | Insert/edit link |
+| `C-c C-i` | Insert/edit image |
+
+**Horizontal Rule & Footnotes (C-c C-s)**
+| Key | Command |
+|-----|---------|
+| `C-c C-s -` | Horizontal rule |
+| `C-c C-s f` | Footnote |
+| `C-c C-s w` | Wiki link |
+| `C-c C-s t` | Table |
+
+**Promotion & Demotion**
+| Key | Command |
+|-----|---------|
+| `C-c C--` / `C-c LEFT` | Promote (outdent) |
+| `C-c C-=` / `C-c RIGHT` | Demote (indent) |
+
+**List Editing**
+| Key | Command |
+|-----|---------|
+| `M-RET` / `C-c C-j` | New list item |
+| `C-c UP/DOWN` | Move list item up/down |
+
+**Outline Navigation**
+| Key | Command |
+|-----|---------|
+| `C-c C-n` / `C-c C-p` | Next/previous heading (any level) |
+| `C-c C-f` / `C-c C-b` | Next/previous heading (same level) |
+| `C-c C-u` | Up to parent heading |
+
+**Other**
+| Key | Command |
+|-----|---------|
+| `C-c C-k` | Kill element at point |
+| `C-c C-o` | Open link at point |
+| `C-c C-x C-s` / `C-x C-s` | Save |
+
+## Requirements
+
+### Requirement 1: Uniform Keymap Module Structure
+
+**Objective:** As a developer, I want each keymap mode to have a consistent module structure, so that adding or modifying keymaps follows a predictable pattern and reduces coupling.
+
+#### Acceptance Criteria
+
+1. The Editor shall provide a dedicated module file for each keymap mode (default, vscode, vim, emacs) under `keymaps/` directory.
+2. When a keymap mode is loaded, the Keymap Dispatcher shall delegate to the corresponding module via the same async factory interface (`() => Promise<Extension>`).
+3. The Editor shall not contain inline keymap construction logic in the dispatcher; all mode-specific logic shall reside in each mode's dedicated module.
+4. Each keymap module shall encapsulate its own precedence requirement (high/low) so that the consumer does not need mode-specific branching for precedence.
+
+### Requirement 2: Shared Markdown Formatting Utility
+
+**Objective:** As a developer, I want markdown symbol toggling logic to be shared across keymap modules and editor shortcuts, so that formatting behavior is consistent and not duplicated.
+
+#### Acceptance Criteria
+
+1. The Editor shall provide a single shared utility for toggling markdown symbols (wrap/unwrap with prefix/suffix) that can be used by both keymap modules and editor shortcut hooks.
+2. When the Emacs keymap module applies markdown formatting, the Editor shall use the same toggling logic as the standard editor shortcuts.
+3. The Editor shall not have duplicate implementations of markdown symbol toggling in separate modules.
+
+### Requirement 3: Keymap-Aware Shortcut Registration
+
+**Objective:** As a developer, I want each keymap module to declare which standard shortcuts it overrides, so that the shortcut registration layer can exclude conflicts without hard-coded mode checks.
+
+#### Acceptance Criteria
+
+1. Each keymap module shall declare which categories of editor shortcuts it handles internally (e.g., formatting, navigation).
+2. When editor shortcuts are registered, the Shortcut Registration Hook shall consult the active keymap's declared overrides to exclude conflicting bindings.
+3. If a new keymap mode is added, the Shortcut Registration Hook shall not require code changes to handle the new mode's overrides.
+
+### Requirement 4: Emacs Markdown-Mode Formatting Bindings (Existing)
+
+**Objective:** As an Emacs user, I want C-c C-s prefix keybindings for markdown formatting, so that I can use familiar Emacs markdown-mode conventions in the GROWI editor.
+
+#### Acceptance Criteria
+
+1. While Emacs keymap mode is active, when the user types `C-c C-s b` or `C-c C-s B`, the Editor shall toggle bold formatting (`**`) around the selection or at the cursor.
+2. While Emacs keymap mode is active, when the user types `C-c C-s i` or `C-c C-s I`, the Editor shall toggle italic formatting (`*`) around the selection or at the cursor.
+3. While Emacs keymap mode is active, when the user types `C-c C-s c`, the Editor shall toggle inline code formatting (`` ` ``) around the selection or at the cursor.
+4. While Emacs keymap mode is active, when the user types `C-c C-s s`, the Editor shall toggle strikethrough formatting (`~~`) around the selection or at the cursor.
+5. While Emacs keymap mode is active, when the user types `C-c C-s p`, the Editor shall toggle code block formatting (` ``` `) around the selection or at the cursor.
+
+### Requirement 5: Emacs Structural Editing Bindings
+
+**Objective:** As an Emacs user, I want C-c prefix keybindings for structural markdown operations (lists, blockquotes, links, headings), so that I can perform all common markdown editing without leaving Emacs-style key sequences.
+
+#### Acceptance Criteria
+
+1. While Emacs keymap mode is active, when the user types `C-c C-s q`, the Editor shall insert or toggle a blockquote prefix (`>`) on the current line, consistent with markdown-mode `markdown-insert-blockquote`.
+2. While Emacs keymap mode is active, when the user types `C-c C-l`, the Editor shall insert a markdown link template (`[]()`) around the selection or at the cursor, consistent with markdown-mode `markdown-insert-link`.
+3. While Emacs keymap mode is active, when the user types `C-c C-s -`, the Editor shall insert a horizontal rule (`---`) at the current line, consistent with markdown-mode `markdown-insert-hr`.
+4. While Emacs keymap mode is active, when the user types `C-c C-s h`, the Editor shall insert an ATX heading with auto-determined level based on context, consistent with markdown-mode `markdown-insert-header-dwim`.
+5. While Emacs keymap mode is active, when the user types `C-c C-s 1` through `C-c C-s 6`, the Editor shall insert or replace the corresponding heading level (`#` through `######`) at the beginning of the current line.
+6. While Emacs keymap mode is active, when the user types `C-c C-j`, the Editor shall insert a new list item appropriate to the current list context (bullet or numbered).
+7. While Emacs keymap mode is active, when the user types `C-c C-s C`, the Editor shall insert a GFM-style fenced code block with language specifier prompt.
+
+### Requirement 6: Emacs Save Binding
+
+**Objective:** As an Emacs user, I want `C-x C-s` to save the page, so that the standard Emacs save keybinding works in the GROWI editor.
+
+#### Acceptance Criteria
+
+1. While Emacs keymap mode is active, when the user types `C-x C-s`, the Editor shall invoke the save action (same as the existing onSave callback used by Vim's `:w`).
+2. If no save callback is provided, the Editor shall silently ignore `C-x C-s` without error.
+
+### Requirement 7: Vim Keymap Module Consistency
+
+**Objective:** As a developer, I want the Vim keymap module to follow the same structural pattern as other keymap modules, so that the codebase is consistent.
+
+#### Acceptance Criteria
+
+1. The Vim keymap module shall follow the same factory interface pattern as all other keymap modules.
+2. The Vim keymap module shall encapsulate its top-level side effects (e.g., `Vim.map` calls) within the factory function rather than at module scope.
+
+### Requirement 8: Keymap Selection UI Consistency
+
+**Objective:** As a user, I want the keymap selector UI to accurately represent all available keymap modes, so that I can choose my preferred editing style.
+
+#### Acceptance Criteria
+
+1. The Keymap Selector shall display all registered keymap modes with appropriate labels and icons.
+2. When the user selects a keymap mode, the Editor shall switch to that mode without requiring a page reload.
+3. The Editor shall persist the selected keymap mode across sessions.
+
+### Requirement 9: Emacs Extended Markdown-Mode Bindings
+
+**Objective:** As an Emacs power user, I want additional markdown-mode keybindings for navigation, promotion/demotion, and advanced editing, so that the GROWI editor feels as close to native Emacs markdown-mode as possible.
+
+#### Acceptance Criteria
+
+1. While Emacs keymap mode is active, when the user types `C-c C--`, the Editor shall promote (outdent) the current element (heading level decrease or list outdent).
+2. While Emacs keymap mode is active, when the user types `C-c C-=`, the Editor shall demote (indent) the current element (heading level increase or list indent).
+3. While Emacs keymap mode is active, when the user types `C-c C-n` / `C-c C-p`, the Editor shall navigate to the next/previous heading.
+4. While Emacs keymap mode is active, when the user types `C-c C-f` / `C-c C-b`, the Editor shall navigate to the next/previous heading at the same level.
+5. While Emacs keymap mode is active, when the user types `C-c C-u`, the Editor shall navigate up to the parent heading.
+6. While Emacs keymap mode is active, when the user types `C-c C-k`, the Editor shall kill (delete) the element at point and add text content to the clipboard.
+7. While Emacs keymap mode is active, when the user types `C-c C-i`, the Editor shall insert a markdown image template (`![]()`).
+8. While Emacs keymap mode is active, when the user types `C-c C-s t`, the Editor shall insert a markdown table template.
+9. While Emacs keymap mode is active, when the user types `C-c C-s f`, the Editor shall insert a footnote marker and definition pair.

+ 118 - 0
.kiro/specs/editor-keymaps/research.md

@@ -0,0 +1,118 @@
+# Research & Design Decisions
+
+## Summary
+- **Feature**: editor-keymaps
+- **Discovery Scope**: Extension (existing keymap system)
+- **Key Findings**:
+  - `@replit/codemirror-emacs` EmacsHandler supports multi-stroke key chains natively via `bindKey` and `addCommands`; no C-x C-s save built-in
+  - Existing `toggleMarkdownSymbol` in emacs.ts duplicates logic from `useInsertMarkdownElements` hook; both perform wrap/unwrap but with different APIs (EditorView direct vs hook-based)
+  - Current dispatcher (`getKeymap`) mixes mode-specific concerns (inline vscode/default construction, precedence branching in consumer)
+
+## Research Log
+
+### @replit/codemirror-emacs API Surface
+- **Context**: Need to understand what multi-stroke bindings are possible for C-c C-s, C-c C-, C-x C-s
+- **Sources Consulted**: `node_modules/@replit/codemirror-emacs/dist/index.d.ts`, compiled source
+- **Findings**:
+  - `EmacsHandler.bindKey(keyGroup: string, command: any)` supports pipe-separated alternatives and multi-stroke chains
+  - `EmacsHandler.addCommands(commands: object)` registers named commands; command receives `{ view: EditorView }`
+  - Key chain state tracked via `$data.keyChain`; intermediate keys store `null` in binding map
+  - Built-in bindings include C-k (kill line), C-w (kill region), C-y (yank), C-Space (set mark), but NOT C-x C-s
+  - Package version: 6.1.0
+- **Implications**: C-x C-s must be explicitly registered. All proposed Emacs bindings are achievable via the existing API.
+
+### Markdown Symbol Toggle Duplication
+- **Context**: emacs.ts has `toggleMarkdownSymbol(view, prefix, suffix)` while editor-shortcuts use `useInsertMarkdownElements` hook
+- **Sources Consulted**: `insert-markdown-elements.ts`, `emacs.ts`, `generate-add-markdown-symbol-command.ts`
+- **Findings**:
+  - `useInsertMarkdownElements` is a React hook returning `(prefix: string, suffix: string) => void`
+  - `toggleMarkdownSymbol` is a pure function taking `(view: EditorView, prefix: string, suffix: string) => void`
+  - Both implement wrap/unwrap toggle logic but with slightly different selection handling
+  - Emacs commands receive handler object with `view` property, not a React context
+  - Hook-based approach cannot be used inside `EmacsHandler.addCommands` since it's not a React component
+- **Implications**: Need a shared pure function (non-hook) that both the hook and Emacs commands can use. The hook wraps the pure function; Emacs calls it directly.
+
+### Prefix Insertion for Structural Bindings
+- **Context**: Need to support blockquote, list, heading insertion from Emacs commands
+- **Sources Consulted**: `insert-prefix.ts`, `insert-blockquote.ts`, `insert-numbered-list.ts`
+- **Findings**:
+  - `useInsertPrefix` is also a React hook: `(prefix: string, noSpaceIfPrefixExists?: boolean) => void`
+  - Handles multi-line selections, indentation-aware
+  - Same constraint: cannot be used inside EmacsHandler commands directly
+- **Implications**: Need pure function extraction for prefix operations too, callable with EditorView directly.
+
+### Precedence Architecture
+- **Context**: Emacs/Vim use Prec.high, default/vscode use Prec.low; currently branched in consumer
+- **Sources Consulted**: `use-editor-settings.ts` lines 87-99
+- **Findings**:
+  - Emacs/Vim use ViewPlugin DOM event handlers intercepting at keydown level
+  - Must run before CodeMirror's keymap handler to avoid Mac Ctrl-* and completionKeymap conflicts
+  - VSCode/default use `keymap.of()` which integrates with CodeMirror's handler directly
+- **Implications**: Precedence is inherent to the keymap type. Encapsulating it in the module return value eliminates consumer branching.
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| A: Return-object factory | Each module returns `{ extension, precedence, overrides }` | Clean interface, no consumer branching | Slightly more complex return type | Preferred |
+| B: Pre-wrapped extension | Each module returns `Prec.high(extension)` directly | Simplest consumer code | Consumer loses control over precedence | Less flexible |
+| C: Config registry | Central registry maps mode → config | Extensible | Over-engineering for 4 modes | Rejected |
+
+## Design Decisions
+
+### Decision: Pure Function Extraction for Markdown Operations
+- **Context**: Emacs commands need markdown toggle/prefix but can't use React hooks
+- **Alternatives Considered**:
+  1. Extract pure functions from hooks, hooks become thin wrappers
+  2. Create entirely new utility functions for Emacs
+  3. Use CodeMirror commands directly in Emacs module
+- **Selected Approach**: Option 1 — Extract pure functions, hooks wrap them
+- **Rationale**: Eliminates duplication, both hooks and Emacs commands share the same logic
+- **Trade-offs**: Slight refactoring of existing hooks, but no behavioral change
+- **Follow-up**: Verify existing tests still pass after extraction
+
+### Decision: Factory Return Object Pattern
+- **Context**: Need to encapsulate precedence and override declarations per keymap
+- **Alternatives Considered**:
+  1. Return `{ extension, precedence, overrides }` object
+  2. Return pre-wrapped extension with separate metadata query
+- **Selected Approach**: Option 1 — Structured return object
+- **Rationale**: Single source of truth per keymap; consumer code becomes a simple loop
+- **Trade-offs**: Breaking change to getKeymap interface, but internal-only API
+
+### Decision: Override Categories for Shortcut Exclusion
+- **Context**: Need to replace `if (keymapModeName === 'emacs')` hard-coding
+- **Selected Approach**: Each keymap declares `overrides: ShortcutCategory[]` where categories are `'formatting' | 'navigation' | 'structural'`
+- **Rationale**: New keymaps can declare their overrides without modifying shortcut registration code
+- **Binding Mechanism**: `CategorizedKeyBindings` wrapper type groups `KeyBinding[]` with a `category` field, allowing `useEditorShortcuts` to filter by category match against overrides
+
+### Decision: Emacs Submodule Split
+- **Context**: emacs.ts accumulates 19+ commands spanning formatting, structural, navigation, and save — low cohesion
+- **Alternatives Considered**:
+  1. Single file with sections (current approach)
+  2. Split into `emacs/` directory with submodules per responsibility
+  3. Split by binding prefix (C-c C-s vs C-c C-)
+- **Selected Approach**: Option 2 — submodules by responsibility (formatting, structural, navigation)
+- **Rationale**: Each submodule has a single reason to change. Adding a new heading command only touches structural.ts. Adding navigation only touches navigation.ts.
+- **Trade-offs**: More files, but each is small (<80 lines) and focused
+
+### Decision: Relocate editor-shortcuts to services-internal
+- **Context**: `editor-shortcuts/` is currently under `services/use-codemirror-editor/utils/` (public layer) but is never exported — only consumed by `stores/use-editor-shortcuts.ts`
+- **Alternatives Considered**:
+  1. Keep in services/, add explicit non-export marker
+  2. Move to services-internal/editor-shortcuts/
+  3. Inline into stores/use-editor-shortcuts.ts
+- **Selected Approach**: Option 2 — move to services-internal/
+- **Rationale**: Aligns actual visibility with directory convention. services/ = public API, services-internal/ = internal only. The shortcut definitions are internal implementation details that should not be importable by external consumers.
+- **Trade-offs**: Requires updating import paths in use-editor-shortcuts.ts and any internal consumers
+- **Follow-up**: Verify no external package imports from this path
+
+## Risks & Mitigations
+- EmacsHandler.addCommands is called at module load time (static method); ensure idempotency if module is re-imported → Mitigation: guard with registration flag
+- Multi-stroke key chains may conflict with browser shortcuts on some platforms → Mitigation: Test on Mac/Windows/Linux; C-c C-s prefix is safe since C-c alone is intercepted by Emacs plugin
+- Pure function extraction may subtly change selection behavior → Mitigation: Write unit tests for toggle behavior before refactoring
+
+## References
+- [@replit/codemirror-emacs](https://github.com/nicknisi/replit-codemirror-emacs) — v6.1.0, EmacsHandler API
+- [jrblevin/markdown-mode](https://github.com/jrblevin/markdown-mode) — Reference for Emacs markdown-mode keybindings
+- [CodeMirror 6 Keymap API](https://codemirror.net/docs/ref/#view.keymap) — Precedence and extension system

+ 22 - 0
.kiro/specs/editor-keymaps/spec.json

@@ -0,0 +1,22 @@
+{
+  "feature_name": "editor-keymaps",
+  "created_at": "2026-04-08T00:00:00.000Z",
+  "updated_at": "2026-04-08T00:00:00.000Z",
+  "language": "en",
+  "phase": "tasks-generated",
+  "approvals": {
+    "requirements": {
+      "generated": true,
+      "approved": true
+    },
+    "design": {
+      "generated": true,
+      "approved": true
+    },
+    "tasks": {
+      "generated": true,
+      "approved": true
+    }
+  },
+  "ready_for_implementation": true
+}

+ 147 - 0
.kiro/specs/editor-keymaps/tasks.md

@@ -0,0 +1,147 @@
+# Implementation Plan
+
+- [x] 1. Extract shared markdown utility functions
+- [x] 1.1 Create the toggle markdown symbol utility
+  - Extract the inline markdown wrap/unwrap logic from the current Emacs keymap module into a standalone pure function
+  - Handle three cases: wrap selection, unwrap existing symbols, and insert empty symbols with cursor positioning
+  - Ensure no React or hook dependencies — pure CodeMirror state/view operations only
+  - _Requirements: 2.1, 2.3_
+
+- [x] 1.2 (P) Create the line prefix utility
+  - Extract line-prefix insertion logic into a standalone pure function alongside the toggle utility
+  - Support single-line and multi-line selections, toggle-off when all lines already have the prefix
+  - _Requirements: 2.1_
+
+- [x] 1.3 Rewire existing public hooks to delegate to the new shared utilities
+  - Update the insert-markdown-elements hook to become a thin wrapper calling the shared toggle function
+  - Update the insert-prefix hook to delegate to the shared line-prefix function
+  - Verify that existing editor behavior (bold, italic, etc. via toolbar/shortcuts) remains unchanged
+  - _Requirements: 2.2, 2.3_
+
+- [x] 2. Define keymap type system and refactor the dispatcher
+- [x] 2.1 Define the keymap result interface, factory type, and shortcut category types
+  - Introduce a structured return type that bundles extension, precedence wrapper, and override category declarations
+  - Define the shortcut category union type and the categorized key-bindings grouping type
+  - Place all types in a dedicated types module within the keymaps directory
+  - _Requirements: 1.2, 1.4, 3.1_
+
+- [x] 2.2 Simplify the keymap dispatcher to a thin router
+  - Remove all inline keymap construction logic (default and vscode mode handling) from the dispatcher
+  - Replace with a simple switch that delegates to each mode's factory function
+  - Ensure the dispatcher returns the structured keymap result to callers
+  - _Requirements: 1.2, 1.3_
+
+- [x] 3. Create dedicated keymap modules for each mode
+- [x] 3.1 (P) Create the default keymap module
+  - Implement as an async factory returning the standard CodeMirror default keymap with low precedence and no overrides
+  - _Requirements: 1.1_
+
+- [x] 3.2 (P) Create the VSCode keymap module
+  - Implement as an async factory returning the VSCode keymap extension with low precedence and no overrides
+  - _Requirements: 1.1_
+
+- [x] 3.3 Refactor the Vim keymap module for structural consistency
+  - Move top-level side effects (key mappings like jj/jk escape, :w ex-command) inside the factory function
+  - Add an idempotency guard to prevent duplicate registration on re-import
+  - Return high precedence and empty overrides (Vim uses its own modal system)
+  - Accept the optional onSave callback and register `:w` ex-command when provided
+  - _Requirements: 1.1, 7.1, 7.2_
+
+- [x] 4. Build the Emacs keymap module with formatting submodule
+- [x] 4.1 Create the Emacs module structure and factory entry point
+  - Set up the Emacs subdirectory with an index module that dynamically imports the Emacs extension
+  - The factory composes all submodule registrations, registers save binding, and returns high precedence with formatting and structural overrides declared
+  - _Requirements: 1.1, 1.4_
+
+- [x] 4.2 Implement the formatting bindings submodule
+  - Register C-c C-s prefix bindings for bold, italic, inline code, strikethrough, and code block
+  - Delegate all formatting operations to the shared toggle-markdown-symbol utility
+  - Support both lowercase and uppercase variants where specified (bold: b/B, italic: i/I)
+  - _Requirements: 4.1, 4.2, 4.3, 4.4, 4.5_
+
+- [x] 5. Relocate editor shortcuts and introduce category-based grouping
+- [x] 5.1 Move the editor-shortcuts directory from the public services layer to services-internal
+  - Physically relocate the directory and update all import paths in the consuming store module (10 imports)
+  - Verify build passes after relocation
+  - _Requirements: 3.2_
+
+- [x] 5.2 Wrap each shortcut group with categorized key-bindings metadata
+  - Group formatting shortcuts (bold, italic, strikethrough, code) under the formatting category
+  - Group structural shortcuts (numbered list, bullet list, blockquote, link) under the structural category
+  - Group always-on shortcuts (multi-cursor) with null category so they are never excluded
+  - _Requirements: 3.2, 3.3_
+
+- [x] 6. Refactor store layer for data-driven shortcut registration
+- [x] 6.1 Update the editor shortcuts store to use category-based exclusion
+  - Replace the hard-coded emacs mode check with data-driven filtering using the override categories from the keymap result
+  - Change the parameter from keymap mode name to an array of shortcut categories to exclude
+  - Filter categorized binding groups: include groups with null category always, exclude groups whose category appears in the overrides
+  - _Requirements: 3.1, 3.2, 3.3_
+
+- [x] 6.2 Simplify the editor settings store to use keymap result metadata
+  - Remove the standalone precedence-determination function
+  - Apply precedence directly from the keymap result's encapsulated precedence wrapper
+  - Pass the keymap result's override declarations to the editor shortcuts store
+  - _Requirements: 1.4_
+
+- [x] 7. Implement Emacs structural editing bindings
+- [x] 7.1 (P) Implement blockquote, link, and horizontal rule bindings
+  - Register C-c C-s q for blockquote toggle using the shared line-prefix utility
+  - Register C-c C-l for markdown link insertion using the shared toggle utility
+  - Register C-c C-s - for horizontal rule insertion
+  - _Requirements: 5.1, 5.2, 5.3_
+
+- [x] 7.2 (P) Implement heading bindings
+  - Register C-c C-s h for auto-determined heading level insertion
+  - Register C-c C-s 1 through C-c C-s 6 for explicit heading level insertion using the line-prefix utility
+  - _Requirements: 5.4, 5.5_
+
+- [x] 7.3 (P) Implement list item and fenced code block bindings
+  - Register C-c C-j for context-aware new list item insertion (detect bullet vs numbered from current context)
+  - Register C-c C-s C (shift-c) for GFM fenced code block insertion
+  - _Requirements: 5.6, 5.7_
+
+- [x] 8. Implement Emacs save binding
+  - Register C-x C-s as a two-stroke key sequence that invokes the onSave callback passed to the Emacs factory
+  - Silently ignore the binding when no save callback is provided
+  - Verify the same save mechanism used by Vim's :w command
+  - _Requirements: 6.1, 6.2_
+
+- [x] 9. Implement Emacs extended navigation and editing bindings
+- [x] 9.1 (P) Implement heading navigation bindings
+  - Register C-c C-n / C-c C-p to navigate to the next/previous heading at any level
+  - Register C-c C-f / C-c C-b to navigate to the next/previous heading at the same level
+  - Register C-c C-u to navigate up to the parent heading
+  - Use regex-based heading detection to scan document structure
+  - _Requirements: 9.3, 9.4, 9.5_
+
+- [x] 9.2 (P) Implement promotion and demotion bindings
+  - Register C-c C-- to promote (outdent) the current element: decrease heading level or outdent list item
+  - Register C-c C-= to demote (indent) the current element: increase heading level or indent list item
+  - Detect element type at cursor to apply the appropriate operation
+  - _Requirements: 9.1, 9.2_
+
+- [x] 9.3 (P) Implement kill, image, table, and footnote bindings
+  - Register C-c C-k to kill (delete) the element at point and copy its text content to the clipboard
+  - Register C-c C-i to insert a markdown image template
+  - Register C-c C-s t to insert a markdown table template
+  - Register C-c C-s f to insert a footnote marker and definition pair
+  - _Requirements: 9.6, 9.7, 9.8, 9.9_
+
+- [x] 10. Integration verification and UI consistency check
+- [x] 10.1 Verify keymap selection UI displays all modes correctly
+  - Confirm the keymap selector shows all four modes with appropriate labels
+  - Verify switching between modes applies immediately without page reload
+  - Confirm the selected mode persists across sessions via existing storage mechanism
+  - _Requirements: 8.1, 8.2, 8.3_
+
+- [x] 10.2 Add integration tests for keymap mode switching and shortcut exclusion
+  - Test that formatting shortcuts are excluded in Emacs mode but present in default mode
+  - Test that mode switching preserves document content
+  - Test that C-x C-s triggers save in Emacs mode and :w triggers save in Vim mode
+  - _Requirements: 1.4, 3.2, 6.1_
+
+- [x] 10.3 (P) Add E2E tests for Emacs keybindings
+  - Extend the existing Playwright editor test pattern to cover Emacs formatting bindings (C-c C-s b for bold, etc.)
+  - Cover at least one structural binding (C-c C-l for link) and one navigation binding (C-c C-n for next heading)
+  - _Requirements: 4.1, 5.2, 9.3_

+ 544 - 0
.kiro/specs/growi-logger/design.md

@@ -0,0 +1,544 @@
+# Design Document: growi-logger
+
+## Overview
+
+**Purpose**: `@growi/logger` is the shared logging infrastructure for the GROWI monorepo, providing namespace-based level control, platform detection (Node.js/browser), and Express HTTP middleware — built on pino.
+
+**Users**: All GROWI developers (logger consumers), operators (log level configuration), and the CI/CD pipeline.
+
+**Scope**: All GROWI applications (`apps/app`, `apps/slackbot-proxy`) and packages (`packages/slack`, `packages/remark-attachment-refs`, `packages/remark-lsx`) import from `@growi/logger` as the single logging entry point. Consumer applications do not import pino or pino-http directly.
+
+### Goals
+- Provide namespace-based log level control via config objects and environment variable overrides
+- Consolidate HTTP request logging under `createHttpLoggerMiddleware()` (pino-http encapsulated)
+- Maintain OpenTelemetry diagnostic logger integration
+- Serve as the single `@growi/logger` entry point for all monorepo consumers
+- Preserve pino's worker-thread performance model (single Worker thread, child loggers)
+
+### Non-Goals
+- Adding new logging capabilities (structured context propagation, remote log shipping)
+- Changing the namespace naming convention (e.g., `growi:service:page`)
+- Publishing `@growi/logger` to npm (private package, monorepo-internal only)
+- Migrating to pino v10 (blocked on `@opentelemetry/instrumentation-pino` v10 support)
+
+## Architecture
+
+### Architecture Overview
+
+`@growi/logger` is organized into these layers:
+
+1. **LoggerFactory** — creates and caches namespace-bound pino child loggers; `initializeLoggerFactory` spawns one Worker thread; `loggerFactory(name)` returns `rootLogger.child({ name })` with resolved level
+2. **LevelResolver + EnvVarParser** — resolve log level from config patterns and env var overrides via minimatch glob matching
+3. **TransportFactory** — produces pino transport config for Node.js (dev: bunyan-format, prod+FORMAT_NODE_LOG: pino-pretty singleLine, prod: raw JSON) and browser (console)
+4. **HttpLoggerFactory** — encapsulates pino-http as `createHttpLoggerMiddleware()`; dev-mode morgan-like formatting dynamically imported from `src/dev/`
+
+Key invariants:
+- `loggerFactory(name: string): Logger<string>` as the sole logger creation API
+- Hierarchical colon-delimited namespaces with glob pattern matching
+- `pino.transport()` called **once** in `initializeLoggerFactory`; all namespace loggers share the Worker thread
+- Dev-only modules (`src/dev/`) are never statically imported in production paths
+- Browser-unsafe modules (pino-http) are imported lazily inside function bodies
+
+### Architecture Pattern & Boundary Map
+
+```mermaid
+graph TB
+    subgraph ConsumerApps[Consumer Applications]
+        App[apps/app]
+        Slackbot[apps/slackbot-proxy]
+    end
+
+    subgraph ConsumerPkgs[Consumer Packages]
+        Slack[packages/slack]
+        Remark[packages/remark-attachment-refs]
+    end
+
+    subgraph GrowiLogger[@growi/logger]
+        Factory[LoggerFactory]
+        LevelResolver[LevelResolver]
+        EnvParser[EnvVarParser]
+        TransportSetup[TransportFactory]
+        HttpLogger[HttpLoggerFactory]
+    end
+
+    subgraph External[External Packages]
+        Pino[pino v9.x]
+        PinoPretty[pino-pretty]
+        PinoHttp[pino-http]
+        Minimatch[minimatch]
+    end
+
+    App --> Factory
+    App --> HttpLogger
+    Slackbot --> Factory
+    Slackbot --> HttpLogger
+    Slack --> Factory
+    Remark --> Factory
+
+    Factory --> LevelResolver
+    Factory --> TransportSetup
+    LevelResolver --> EnvParser
+    LevelResolver --> Minimatch
+
+    Factory --> Pino
+    TransportSetup --> PinoPretty
+
+    HttpLogger --> Factory
+    HttpLogger --> PinoHttp
+```
+
+**Architecture Integration**:
+- `@growi/logger` wraps pino with namespace-level control, transport setup, and HTTP middleware — the single logging entry point for all monorepo consumers
+- Domain boundary: `@growi/logger` owns all logger creation, level resolution, and transport setup; consumer apps only call `loggerFactory(name)`
+- Existing patterns preserved: factory function signature, namespace conventions, config file structure
+- New components: `LevelResolver` (namespace-to-level matching), `TransportFactory` (dev/prod stream setup), `EnvVarParser` (env variable parsing)
+- Steering compliance: shared package in `packages/` follows monorepo conventions
+- **Dev-only isolation**: modules that are only used in development (`bunyan-format`, `morgan-like-format-options`) reside under `src/dev/` to make the boundary explicit; all are loaded via dynamic import, never statically bundled in production
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Logging Core | pino v9.x | Structured JSON logger for Node.js and browser | Pinned to v9.x for OTel compatibility; see research.md |
+| Dev Formatting | pino-pretty v13.x | Human-readable log output in development | Used as transport (worker thread) |
+| HTTP Logging | pino-http v11.x | Express middleware for request/response logging | Dependency of @growi/logger; not directly imported by consumer apps |
+| Glob Matching | minimatch (existing) | Namespace pattern matching for level config | Already a transitive dependency via universal-bunyan |
+| Shared Package | @growi/logger | Logger factory with namespace/config/env support and HTTP middleware | New package in packages/logger/ |
+
+## System Flows
+
+### Logger Creation Flow
+
+```mermaid
+sequenceDiagram
+    participant App as Application Startup
+    participant Factory as LoggerFactory
+    participant Transport as pino.transport (Worker)
+    participant Root as Root pino Logger
+
+    App->>Factory: initializeLoggerFactory(options)
+    Factory->>Transport: pino.transport(config) — spawns ONE Worker thread
+    Transport-->>Factory: transport stream
+    Factory->>Root: pino({ level: 'trace' }, transport)
+    Root-->>Factory: rootLogger stored in module scope
+```
+
+```mermaid
+sequenceDiagram
+    participant Consumer as Consumer Module
+    participant Factory as LoggerFactory
+    participant Cache as Logger Cache
+    participant Resolver as LevelResolver
+    participant Root as Root pino Logger
+
+    Consumer->>Factory: loggerFactory(namespace)
+    Factory->>Cache: lookup(namespace)
+    alt Cache hit
+        Cache-->>Factory: cached child logger
+    else Cache miss
+        Factory->>Resolver: resolveLevel(namespace, config, envOverrides)
+        Resolver-->>Factory: resolved level
+        Factory->>Root: rootLogger.child({ name: namespace })
+        Root-->>Factory: child logger (shares Worker thread)
+        Factory->>Factory: childLogger.level = resolved level
+        Factory->>Cache: store(namespace, childLogger)
+    end
+    Factory-->>Consumer: Logger
+```
+
+### Level Resolution Flow
+
+```mermaid
+flowchart TD
+    Start[resolveLevel namespace] --> EnvCheck{Env var match?}
+    EnvCheck -->|Yes| EnvLevel[Use env var level]
+    EnvCheck -->|No| ConfigCheck{Config pattern match?}
+    ConfigCheck -->|Yes| ConfigLevel[Use config level]
+    ConfigCheck -->|No| DefaultLevel[Use config default level]
+
+    EnvLevel --> Done[Return level]
+    ConfigLevel --> Done
+    DefaultLevel --> Done
+```
+
+## Requirements Traceability
+
+| Requirement | Summary | Components | Interfaces | Flows |
+|-------------|---------|------------|------------|-------|
+| 1.1–1.4 | Logger factory with namespace support | LoggerFactory, LoggerCache | `loggerFactory()` | Logger Creation |
+| 2.1–2.4 | Config-file level control | LevelResolver, ConfigLoader | `LoggerConfig` type | Level Resolution |
+| 3.1–3.5 | Env var level override | EnvVarParser, LevelResolver | `parseEnvLevels()` | Level Resolution |
+| 4.1–4.4 | Platform-aware logger | LoggerFactory, TransportFactory | `createTransport()` | Logger Creation |
+| 5.1–5.4 | Dev/prod output formatting | TransportFactory | `TransportOptions` | Logger Creation |
+| 6.1–6.4 | HTTP request logging | HttpLoggerMiddleware | `createHttpLogger()` | — |
+| 7.1–7.3 | OpenTelemetry integration | DiagLoggerPinoAdapter | `DiagLogger` interface | — |
+| 8.1–8.5 | Multi-app consistency | @growi/logger package | Package exports | — |
+| 10.1–10.3 | Pino logger type export | LoggerFactory | `Logger<string>` export | — |
+| 11.1–11.4 | Pino performance preservation | LoggerFactory | `initializeLoggerFactory`, shared root logger | Logger Creation |
+| 12.1–12.6 | Bunyan-like output format | BunyanFormatTransport, TransportFactory | Custom transport target | Logger Creation |
+| 13.1–13.5 | HTTP logger encapsulation | HttpLoggerFactory | `createHttpLoggerMiddleware()` | — |
+
+## Components and Interfaces
+
+| Component | Domain/Layer | Intent | Req Coverage | Key Dependencies | Contracts |
+|-----------|-------------|--------|--------------|-----------------|-----------|
+| LoggerFactory | @growi/logger / Core | Create and cache namespace-bound pino loggers | 1, 4, 8, 10, 11 | pino (P0), LevelResolver (P0), TransportFactory (P0) | Service |
+| LevelResolver | @growi/logger / Core | Resolve log level for a namespace from config + env | 2, 3 | minimatch (P0), EnvVarParser (P0) | Service |
+| EnvVarParser | @growi/logger / Core | Parse env vars into namespace-level map | 3 | — | Service |
+| TransportFactory | @growi/logger / Core | Create pino transport/options for Node.js and browser | 4, 5, 12 | pino-pretty (P1) | Service |
+| BunyanFormatTransport | @growi/logger / Transport | Custom pino transport producing bunyan-format "short" output | 12 | pino-pretty (P1) | Transport |
+| HttpLoggerFactory | @growi/logger / Core | Factory for pino-http Express middleware | 6, 13 | pino-http (P0), LoggerFactory (P0) | Service |
+| DiagLoggerPinoAdapter | apps/app / OpenTelemetry | Wrap pino logger as OTel DiagLogger | 7 | pino (P0) | Service |
+| ConfigLoader | Per-app | Load dev/prod config files | 2 | — | — |
+
+### @growi/logger Package
+
+#### LoggerFactory
+
+| Field | Detail |
+|-------|--------|
+| Intent | Central entry point for creating namespace-bound pino loggers with level resolution and caching |
+| Requirements | 1.1, 1.2, 1.3, 1.4, 4.1, 8.5, 10.1, 10.3 |
+
+**Responsibilities & Constraints**
+- Create pino logger instances with resolved level and transport configuration
+- Cache logger instances per namespace to ensure singleton behavior
+- Detect platform (Node.js vs browser) and apply appropriate configuration
+- Expose `loggerFactory(name: string): pino.Logger` as the public API
+
+**Dependencies**
+- Outbound: LevelResolver — resolve level for namespace (P0)
+- Outbound: TransportFactory — create transport options (P0)
+- External: pino v9.x — logger creation (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+import type { Logger } from 'pino';
+
+interface LoggerConfig {
+  [namespacePattern: string]: string; // pattern → level ('info', 'debug', etc.)
+}
+
+interface LoggerFactoryOptions {
+  config: LoggerConfig;
+}
+
+/**
+ * Initialize the logger factory module with configuration.
+ * Must be called once at application startup before any loggerFactory() calls.
+ */
+function initializeLoggerFactory(options: LoggerFactoryOptions): void;
+
+/**
+ * Create or retrieve a cached pino logger for the given namespace.
+ */
+function loggerFactory(name: string): Logger;
+```
+
+- Preconditions: `initializeLoggerFactory()` called before first `loggerFactory()` call
+- Postconditions: Returns a pino.Logger bound to the namespace with resolved level
+- Invariants: Same namespace always returns the same logger instance
+
+**Implementation Notes**
+- The `initializeLoggerFactory` is called once per app at startup, receiving the merged dev/prod config
+- Browser detection: `typeof window !== 'undefined' && typeof window.document !== 'undefined'`
+- In browser mode, skip transport setup and use pino's built-in `browser` option
+- The factory is a module-level singleton (module scope cache + config)
+- **Performance critical**: `pino.transport()` spawns a Worker thread. It MUST be called **once** inside `initializeLoggerFactory`, not inside `loggerFactory`. Each `loggerFactory(name)` call creates a child logger via `rootLogger.child({ name })` which shares the single Worker thread. Calling `pino.transport()` per namespace would spawn N Worker threads for N namespaces, negating pino's core performance advantage.
+
+#### LevelResolver
+
+| Field | Detail |
+|-------|--------|
+| Intent | Determine the effective log level for a given namespace by matching against config patterns and env var overrides |
+| Requirements | 2.1, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, 3.5 |
+
+**Responsibilities & Constraints**
+- Match namespace against glob patterns in config (using minimatch)
+- Match namespace against env var-derived patterns (env vars take precedence)
+- Return the most specific matching level, or the `default` level as fallback
+- Parse is done once at module initialization; resolution is per-namespace at logger creation time
+
+**Dependencies**
+- Outbound: EnvVarParser — get env-derived level map (P0)
+- External: minimatch — glob pattern matching (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+interface LevelResolver {
+  /**
+   * Resolve the log level for a namespace.
+   * Priority: env var match > config pattern match > config default.
+   */
+  resolveLevel(
+    namespace: string,
+    config: LoggerConfig,
+    envOverrides: LoggerConfig,
+  ): string;
+}
+```
+
+- Preconditions: `config` contains a `default` key
+- Postconditions: Returns a valid pino log level string
+- Invariants: Env overrides always take precedence over config
+
+#### EnvVarParser
+
+| Field | Detail |
+|-------|--------|
+| Intent | Parse environment variables (DEBUG, TRACE, INFO, WARN, ERROR, FATAL) into a namespace-to-level map |
+| Requirements | 3.1, 3.4, 3.5 |
+
+**Responsibilities & Constraints**
+- Read `process.env.DEBUG`, `process.env.TRACE`, etc.
+- Split comma-separated values into individual namespace patterns
+- Return a flat `LoggerConfig` map: `{ 'growi:*': 'debug', 'growi:service:page': 'trace' }`
+- Parsed once at module load time (not per-logger)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+/**
+ * Parse log-level environment variables into a namespace-to-level map.
+ * Reads: DEBUG, TRACE, INFO, WARN, ERROR, FATAL from process.env.
+ */
+function parseEnvLevels(): LoggerConfig;
+```
+
+- Preconditions: Environment is available (`process.env`)
+- Postconditions: Returns a map where each key is a namespace pattern and value is a level string
+- Invariants: Only the six known env vars are read; unknown vars are ignored
+
+#### TransportFactory
+
+| Field | Detail |
+|-------|--------|
+| Intent | Create pino transport configuration appropriate for the current environment |
+| Requirements | 4.1, 4.2, 4.3, 4.4, 5.1, 5.2, 5.3, 5.4, 12.1, 12.6, 12.7, 12.8 |
+
+**Responsibilities & Constraints**
+- Node.js development: return BunyanFormatTransport config (`singleLine: false`) — **dev only, not imported in production**
+- Node.js production + `FORMAT_NODE_LOG`: return standard `pino-pretty` transport with `singleLine: true` (not bunyan-format)
+- Node.js production default: return raw JSON (stdout) — no transport
+- Browser: return pino `browser` option config (console output, production error-level default)
+- Include `name` field in all output via pino's `name` option
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+import type { LoggerOptions } from 'pino';
+
+interface TransportConfig {
+  /** Pino options for Node.js environment */
+  nodeOptions: Partial<LoggerOptions>;
+  /** Pino options for browser environment */
+  browserOptions: Partial<LoggerOptions>;
+}
+
+/**
+ * Create transport configuration based on environment.
+ * @param isProduction - Whether NODE_ENV is 'production'
+ */
+function createTransportConfig(isProduction: boolean): TransportConfig;
+```
+
+- Preconditions: Called during logger factory initialization
+- Postconditions: Returns valid pino options for the detected environment
+- Invariants: Browser options never include Node.js transports
+
+**Implementation Notes**
+- Dev transport: `{ target: '<resolved-path>/dev/bunyan-format.js' }` — target path resolved via `path.join(path.dirname(fileURLToPath(import.meta.url)), 'dev', 'bunyan-format.js')`; no `options` passed (singleLine defaults to false inside the module)
+- Prod with FORMAT_NODE_LOG: `{ target: 'pino-pretty', options: { translateTime: 'SYS:standard', ignore: 'pid,hostname', singleLine: true } }` — standard pino-pretty, no custom prettifiers
+- Prod without FORMAT_NODE_LOG (or false): raw JSON to stdout (no transport)
+- Browser production: `{ browser: { asObject: false }, level: 'error' }`
+- Browser development: `{ browser: { asObject: false } }` (inherits resolved level)
+- **Important**: The bunyan-format transport path is only resolved/referenced in the dev branch, ensuring the module is never imported in production
+
+#### BunyanFormatTransport
+
+| Field | Detail |
+|-------|--------|
+| Intent | Custom pino transport that produces bunyan-format "short" mode output (development only) |
+| Requirements | 12.1, 12.2, 12.3, 12.4, 12.5, 12.6, 12.7 |
+
+**Responsibilities & Constraints**
+- Loaded by `pino.transport()` in a Worker thread — must be a module file, not inline functions
+- Uses pino-pretty internally with `customPrettifiers` to match bunyan-format "short" layout
+- **Development only**: This module is only referenced by TransportFactory in the dev branch; never imported in production
+
+**Dependencies**
+- External: pino-pretty v13.x (P1) — used internally for colorization and base formatting
+
+**Contracts**: Transport [x]
+
+##### Transport Module
+
+```typescript
+// packages/logger/src/dev/bunyan-format.ts
+// Default export: function(opts) → Writable stream (pino transport protocol)
+
+interface BunyanFormatOptions {
+  singleLine?: boolean;
+  colorize?: boolean;
+  destination?: NodeJS.WritableStream;
+}
+```
+
+**Implementation Notes**
+- Uses `messageFormat` in pino-pretty to produce the full line: timestamp + level + name + message
+- `ignore: 'pid,hostname,name,req,res,responseTime'` — suppresses pino-http's verbose req/res objects in dev; the morgan-like `customSuccessMessage` already provides method/URL/status/time on the same line
+- `customPrettifiers: { time: () => '', level: () => '' }` — suppresses pino-pretty's default time/level rendering (handled inside `messageFormat`)
+- Level right-alignment and colorization are implemented inside `messageFormat` using ANSI codes
+- `singleLine` defaults to `false` inside the module; no `options` need to be passed from TransportFactory
+- Since the transport is a separate module loaded by the Worker thread, function options work (no serialization issue)
+- Vite's `preserveModules` ensures `src/dev/bunyan-format.ts` → `dist/dev/bunyan-format.js`
+- `NO_COLOR` environment variable is respected to disable colorization
+
+##### Output Examples
+
+**Dev** (bunyan-format, singleLine: false):
+```
+10:06:30.419Z DEBUG growi:service:PassportService: LdapStrategy: serverUrl is invalid
+10:06:30.420Z  WARN growi:service:PassportService: SamlStrategy: cert is not set.
+    extra: {"field":"value"}
+```
+
+**Dev HTTP log** (bunyan-format + morgan-like format, req/res suppressed):
+```
+10:06:30.730Z  INFO express: GET /applicable-grant?pageId=abc 304 - 16ms
+```
+
+**Prod + FORMAT_NODE_LOG** (standard pino-pretty, singleLine: true):
+```
+[2026-03-30 12:00:00.000] INFO (growi:service:search): Elasticsearch is enabled
+```
+
+**Prod default**: raw JSON (no transport, unchanged)
+
+### HTTP Logging Layer
+
+#### HttpLoggerFactory
+
+| Field | Detail |
+|-------|--------|
+| Intent | Encapsulate pino-http middleware creation within @growi/logger so consumers don't depend on pino-http |
+| Requirements | 6.1, 6.2, 6.3, 6.4, 13.1, 13.2, 13.3, 13.4, 13.5, 13.6 |
+
+**Responsibilities & Constraints**
+- Create pino-http middleware using a logger from LoggerFactory
+- In development mode: dynamically import and apply `morganLikeFormatOptions` (customSuccessMessage, customErrorMessage, customLogLevel)
+- In production mode: use pino-http's default message format (no morgan-like module imported)
+- Accept optional `autoLogging` configuration for route filtering
+- Return Express-compatible middleware
+- Encapsulate `pino-http` as an internal dependency of `@growi/logger`
+
+**Dependencies**
+- External: pino-http v11.x (P0)
+- Inbound: LoggerFactory — provides base logger (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+import type { RequestHandler } from 'express';
+
+interface HttpLoggerOptions {
+  /** Logger namespace, defaults to 'express' */
+  namespace?: string;
+  /** Auto-logging configuration (e.g., route ignore patterns) */
+  autoLogging?: {
+    ignore: (req: { url?: string }) => boolean;
+  };
+}
+
+/**
+ * Create Express middleware for HTTP request logging.
+ * In dev: uses pino-http with morgan-like formatting (dynamically imported).
+ * In prod: uses pino-http with default formatting.
+ */
+async function createHttpLoggerMiddleware(options?: HttpLoggerOptions): Promise<RequestHandler>;
+```
+
+- Preconditions: LoggerFactory initialized
+- Postconditions: Returns Express middleware that logs HTTP requests
+- Invariants: morganLikeFormatOptions applied only in dev; static file paths skipped when autoLogging.ignore provided
+
+**Implementation Notes**
+- The type assertion for Logger<string> → pino-http's Logger is handled internally, hidden from consumers
+- `pino-http` moves from apps' dependencies to `@growi/logger`'s dependencies
+- **Browser compatibility**: `pino-http` is imported lazily inside the function body (`const { default: pinoHttp } = await import('pino-http')`) rather than at the module top-level. This prevents bundlers (Turbopack/webpack) from pulling the Node.js-only `pino-http` into browser bundles when `@growi/logger` is imported by shared code
+- `morganLikeFormatOptions` is dynamically imported (`await import('./dev/morgan-like-format-options')`) only when `NODE_ENV !== 'production'`, ensuring the module is not loaded in production
+- The function is `async` to support the dynamic imports; consumers call: `express.use(await createHttpLoggerMiddleware({ autoLogging: { ignore: ... } }))`
+
+### OpenTelemetry Layer
+
+#### DiagLoggerPinoAdapter
+
+| Field | Detail |
+|-------|--------|
+| Intent | Adapt a pino logger to the OpenTelemetry DiagLogger interface |
+| Requirements | 7.1, 7.2, 7.3 |
+
+**Responsibilities & Constraints**
+- Implement the OTel `DiagLogger` interface (`error`, `warn`, `info`, `debug`, `verbose`)
+- Map `verbose()` to pino's `trace()` level
+- Parse JSON strings in message arguments (preserving current behavior)
+- Disable `@opentelemetry/instrumentation-pino` if enabled by default
+
+**Dependencies**
+- External: pino v9.x (P0)
+- External: @opentelemetry/api (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+import type { DiagLogger } from '@opentelemetry/api';
+
+/**
+ * Create a DiagLogger that delegates to a pino logger.
+ * Maps OTel verbose level to pino trace level.
+ */
+function createDiagLoggerAdapter(): DiagLogger;
+```
+
+- Preconditions: LoggerFactory initialized, pino logger available for OTel namespace
+- Postconditions: Returns a valid DiagLogger implementation
+- Invariants: All DiagLogger methods delegate to the corresponding pino level
+
+**Implementation Notes**
+- Minimal change from current `DiagLoggerBunyanAdapter` — rename class, update import from bunyan to pino
+- `parseMessage` helper can remain largely unchanged
+- In OTel SDK configuration, replace `'@opentelemetry/instrumentation-bunyan': { enabled: false }` with `'@opentelemetry/instrumentation-pino': { enabled: false }` if the instrumentation package is present
+
+## Data Models
+
+Not applicable. This feature modifies runtime logging behavior and does not introduce or change persisted data models.
+
+## Error Handling
+
+### Error Strategy
+Logging infrastructure must be resilient — a logger failure must never crash the application.
+
+### Error Categories and Responses
+- **Missing config file**: Fall back to `{ default: 'info' }` and emit a console warning
+- **Invalid log level in config/env**: Ignore the entry and log a warning to stderr
+- **Transport initialization failure** (pino-pretty not available): Fall back to raw JSON output
+- **Logger creation failure**: Return a no-op logger that silently discards messages
+
+### Monitoring
+- Logger initialization errors are written to `process.stderr` directly (cannot use the logger itself)
+- No additional monitoring infrastructure required — this is the monitoring infrastructure
+

+ 79 - 0
.kiro/specs/growi-logger/requirements.md

@@ -0,0 +1,79 @@
+# Requirements Document
+
+## Introduction
+
+`@growi/logger` is the shared logging package for the GROWI monorepo, wrapping pino with namespace-based level control, platform detection (Node.js/browser), and Express HTTP middleware. All GROWI applications and packages import from `@growi/logger` as the single logging entry point.
+
+## Requirements
+
+### Requirement 1: Logger Factory with Namespace Support
+
+**Objective:** Provide `loggerFactory(name: string)` returning a pino logger bound to the given namespace, so developers can identify the source of log messages and control granularity per module.
+
+**Summary**: `loggerFactory(name)` returns a cached pino child logger for the namespace — same namespace always returns the same instance. Namespaces follow colon-delimited hierarchical convention (e.g., `growi:service:page`). The logger exposes `.info()`, `.debug()`, `.warn()`, `.error()`, `.trace()`, and `.fatal()` methods compatible with all existing call sites.
+
+### Requirement 2: Namespace-Based Log Level Configuration via Config Files
+
+**Objective:** Load per-namespace log levels from configuration objects (separate for dev and prod), allowing fine-tuned verbosity per module without restart.
+
+**Summary**: Accepts a `LoggerConfig` object mapping namespace patterns to log levels (e.g., `{ 'growi:service:*': 'debug', 'default': 'info' }`). Uses minimatch-compatible glob patterns. When no pattern matches, falls back to the `default` level. Per-app loggerFactory wrappers load dev/prod config files and pass the result to `initializeLoggerFactory`.
+
+### Requirement 3: Environment Variable-Based Log Level Override
+
+**Objective:** Override log levels at runtime via environment variables, enabling debug/trace logging for specific namespaces without modifying config files.
+
+**Summary**: Reads `DEBUG`, `TRACE`, `INFO`, `WARN`, `ERROR`, and `FATAL` environment variables. Each supports comma-separated namespace patterns with glob wildcards (e.g., `DEBUG=growi:routes:*,growi:service:page`). Environment variable matches take precedence over config file entries.
+
+### Requirement 4: Platform-Aware Logger (Node.js and Browser)
+
+**Objective:** Work seamlessly in both Node.js and browser environments using the same `loggerFactory` import.
+
+**Summary**: Detects runtime environment via `typeof window` check and applies appropriate transport. In browsers, outputs to the developer console; defaults to `error` level in production to minimize console noise. In Node.js, uses transport-based formatting as defined in Requirement 5.
+
+### Requirement 5: Output Formatting (Development vs Production)
+
+**Objective:** Provide distinct log output formats for development (human-readable) and production (structured JSON).
+
+**Summary**: Development uses the bunyan-format custom transport (`HH:mm:ss.SSSZ LEVEL name: message` format, colorized). Production defaults to raw JSON. When `FORMAT_NODE_LOG` is set, production uses standard pino-pretty with `singleLine: true`. The logger namespace is included in all output.
+
+### Requirement 6: HTTP Request Logging
+
+**Objective:** Provide Express HTTP request logging via `createHttpLoggerMiddleware()`, encapsulating pino-http so consumer apps do not depend on it directly.
+
+**Summary**: `createHttpLoggerMiddleware(options?)` returns Express-compatible middleware. In development, applies morgan-like message formatting (method, URL, status, response time) via dynamic import of `src/dev/morgan-like-format-options.ts`. In production, uses pino-http's default format. Static file paths can be excluded via `autoLogging.ignore`.
+
+### Requirement 7: OpenTelemetry Integration
+
+**Objective:** Integrate with OpenTelemetry diagnostics so observability tooling continues to function.
+
+**Summary**: `DiagLoggerPinoAdapter` in apps/app wraps pino as an OTel `DiagLogger`, mapping `verbose` to pino `trace`. The OTel SDK configuration disables `@opentelemetry/instrumentation-pino`.
+
+### Requirement 8: Multi-App Consistency
+
+**Objective:** All GROWI monorepo applications use the same pino-based logging solution from `@growi/logger`.
+
+**Summary**: `apps/app`, `apps/slackbot-proxy`, `packages/slack`, `packages/remark-attachment-refs`, and `packages/remark-lsx` all import from `@growi/logger` via `workspace:*`. The package is `"private": true` — monorepo-internal only, not published to npm.
+
+### Requirement 10: Pino Logger Type Export
+
+**Objective:** Export a TypeScript type for logger instances compatible with pino-http and other pino-ecosystem packages.
+
+**Summary**: `@growi/logger` exports `Logger<string>` (not the default `Logger<never>`) so the type is assignable to pino-http's `logger` option and other external APIs. Consumers type-annotate logger variables using this export without importing pino directly.
+
+### Requirement 11: Single Worker Thread Performance Model
+
+**Objective:** Honor pino's design philosophy of minimal main-thread overhead.
+
+**Summary**: `pino.transport()` is called exactly once in `initializeLoggerFactory()`. All namespace loggers are created via `rootLogger.child({ name })`, sharing the single Worker thread. The root logger level is `'trace'` so children can independently apply their resolved level. The Worker thread count never exceeds 1, regardless of namespace count.
+
+### Requirement 12: Bunyan-Like Output Format (Development Only)
+
+**Objective:** Provide human-readable log output in development mode matching the legacy bunyan-format "short" style.
+
+**Summary**: In development, each log line uses `HH:mm:ss.SSSZ LEVEL name: message` with 5-char right-aligned level labels and level-based colorization (cyan/green/yellow/red). Implemented as a custom pino transport at `src/dev/bunyan-format.ts` — only loaded in development. Standard pino-pretty is used for `FORMAT_NODE_LOG` in production. The `NO_COLOR` environment variable is respected.
+
+### Requirement 13: HTTP Logger Middleware Encapsulation
+
+**Objective:** Encapsulate pino-http within `@growi/logger` so consumer apps do not import pino-http directly.
+
+**Summary**: `createHttpLoggerMiddleware(options?)` is the sole HTTP logging API. `pino-http` is a dependency of `@growi/logger`, imported lazily inside the async function body (preventing browser bundle inclusion via Turbopack/webpack). Morgan-like formatting (`src/dev/morgan-like-format-options.ts`) is dynamically imported only in development. Status codes are colorized (2xx=green, 3xx=cyan, 4xx=yellow, 5xx=red) with `NO_COLOR` env var support.

+ 224 - 0
.kiro/specs/growi-logger/research.md

@@ -0,0 +1,224 @@
+# Research & Design Decisions
+
+---
+**Purpose**: Capture discovery findings, architectural investigations, and rationale that inform the technical design.
+---
+
+## Summary
+- **Feature**: `growi-logger`
+- **Discovery Scope**: Complex Integration
+- **Key Findings**:
+  - Pino and bunyan share identical argument patterns (`logger.info(obj, msg)`) — no call-site changes needed
+  - No `logger.child()` or custom serializers used in GROWI — simplifies migration significantly
+  - `@opentelemetry/instrumentation-pino` supports pino `<10`; need to verify v9.x or v10 compatibility
+  - No off-the-shelf pino package replicates universal-bunyan's namespace-based level control; custom wrapper required
+
+## Research Log
+
+### Pino Core API Compatibility with Bunyan
+- **Context**: Need to confirm argument pattern compatibility to minimize call-site changes
+- **Sources Consulted**: pino GitHub docs (api.md), npm pino@10.3.1
+- **Findings**:
+  - Log level numeric values are identical: trace=10, debug=20, info=30, warn=40, error=50, fatal=60
+  - Method signature: `logger[level]([mergingObject], [message], [...interpolationValues])` — same as bunyan
+  - `name` option adds a `"name"` field to JSON output, same as bunyan
+  - `msg` is the default message key (same as bunyan), configurable via `messageKey`
+  - `pino.child(bindings, options)` works similarly to bunyan's `child()`
+- **Implications**: Call sites using `logger.info('msg')`, `logger.info({obj}, 'msg')`, `logger.error(err)` require no changes
+
+### Pino Browser Support
+- **Context**: universal-bunyan uses browser-bunyan + ConsoleFormattedStream for client-side logging
+- **Sources Consulted**: pino GitHub docs (browser.md)
+- **Findings**:
+  - Pino has built-in browser mode activated via package.json `browser` field
+  - Maps to console methods: `console.error` (fatal/error), `console.warn`, `console.info`, `console.debug`, `console.trace`
+  - `browser.asObject: true` outputs structured objects
+  - `browser.write` allows custom per-level handlers
+  - Level control works the same as Node.js (`level` option)
+  - No separate package needed (unlike browser-bunyan)
+- **Implications**: Eliminates browser-bunyan and @browser-bunyan/console-formatted-stream dependencies entirely
+
+### Pino-Pretty as Bunyan-Format Replacement
+- **Context**: universal-bunyan uses bunyan-format with `short` (dev) and `long` (prod) output modes
+- **Sources Consulted**: pino-pretty npm (v13.1.3)
+- **Findings**:
+  - Can be used as transport (worker thread) or stream (main thread)
+  - Short mode equivalent: `singleLine: true` + `ignore: 'pid,hostname'`
+  - Long mode equivalent: default multi-line output
+  - `translateTime: 'SYS:standard'` for human-readable timestamps
+  - TTY-only pattern: conditionally enable based on `process.stdout.isTTY`
+- **Implications**: Direct replacement for bunyan-format with equivalent modes
+
+### Pino-HTTP as Morgan/Express-Bunyan-Logger Replacement
+- **Context**: GROWI uses morgan (dev) and express-bunyan-logger (prod) for HTTP request logging
+- **Sources Consulted**: pino-http npm (v11.0.0)
+- **Findings**:
+  - Express middleware with `autoLogging.ignore` for route skipping (replaces morgan's `skip`)
+  - Accepts custom pino logger instance via `logger` option
+  - `customLogLevel` for status-code-based level selection
+  - `req.log` provides child logger with request context
+  - Replaces both morgan and express-bunyan-logger in a single package
+- **Implications**: Unified HTTP logging for both dev and prod, with route filtering support
+
+### Namespace-Based Level Control
+- **Context**: universal-bunyan provides namespace-to-level mapping with minimatch glob patterns and env var overrides
+- **Sources Consulted**: pino-debug (v4.0.2), pino ecosystem search
+- **Findings**:
+  - pino-debug bridges the `debug` module but doesn't provide general namespace-level control
+  - No official pino package replicates universal-bunyan's behavior
+  - Custom implementation needed: wrapper that caches pino instances per namespace, reads config + env vars, applies minimatch matching
+  - Can use pino's `level` option per-instance (set at creation time)
+- **Implications**: Must build `@growi/logger` package as a custom wrapper around pino, replacing universal-bunyan
+
+### OpenTelemetry Instrumentation
+- **Context**: GROWI has a custom DiagLogger adapter wrapping bunyan, and disables @opentelemetry/instrumentation-bunyan
+- **Sources Consulted**: @opentelemetry/instrumentation-pino npm (v0.59.0)
+- **Findings**:
+  - Supports pino `>=5.14.0 <10` — pino v10 may not be supported yet
+  - Provides trace correlation (trace_id, span_id injection) and log sending to OTel SDK
+  - GROWI's DiagLoggerBunyanAdapter pattern maps cleanly to pino (same method names)
+  - Current code disables bunyan instrumentation; equivalent disable for pino instrumentation may be needed
+- **Implications**: Pin pino to v9.x for OTel compatibility, or verify v10 support. DiagLogger adapter changes are minimal.
+
+### Existing Call-Site Analysis
+- **Context**: Need to understand what API surface is actually used to minimize migration risk
+- **Sources Consulted**: Codebase grep across all apps and packages
+- **Findings**:
+  - **No `logger.child()` usage** anywhere in the codebase
+  - **No custom serializers** registered or used
+  - **No `logger.fields` access** or other bunyan-specific APIs
+  - Call patterns: ~30% simple string, ~50% string+object, ~10% error-only, ~10% string+error
+  - All loggers created via `loggerFactory(name)` — single entry point
+- **Implications**: Migration is primarily a factory-level change; call sites need no modification
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| Drop-in wrapper (`@growi/logger`) | Shared package providing `loggerFactory()` over pino with namespace/config/env support | Minimal call-site changes, single source of truth, testable in isolation | Must implement namespace matching (minimatch) | Mirrors universal-bunyan's role |
+| Direct pino usage per app | Each app creates pino instances directly | No wrapper overhead | Duplicated config logic, inconsistent behavior across apps | Rejected: violates Req 8 |
+| pino-debug bridge | Use pino-debug for namespace control | Leverages existing package | Only works with `debug()` calls, not general logging | Rejected: wrong abstraction |
+
+## Design Decisions
+
+### Decision: Create `@growi/logger` as Shared Package
+- **Context**: universal-bunyan is a custom wrapper; need equivalent for pino
+- **Alternatives Considered**:
+  1. Direct pino usage in each app — too much duplication
+  2. Fork/patch universal-bunyan for pino — complex, hard to maintain
+  3. New shared package `@growi/logger` — clean, purpose-built
+- **Selected Approach**: New `@growi/logger` package in `packages/logger/`
+- **Rationale**: Single source of truth, testable, follows monorepo patterns (like @growi/core)
+- **Trade-offs**: One more package to maintain, but replaces external dependency
+- **Follow-up**: Define package exports, ensure tree-shaking for browser builds
+
+### Decision: Pin Pino to v9.x for OpenTelemetry Compatibility
+- **Context**: @opentelemetry/instrumentation-pino supports `<10`
+- **Alternatives Considered**:
+  1. Use pino v10 and skip OTel auto-instrumentation — loses correlation
+  2. Use pino v9 for compatibility — safe choice
+  3. Use pino v10 and verify latest instrumentation support — risky
+- **Selected Approach**: Start with pino v9.x; upgrade to v10 when OTel adds support
+- **Rationale**: OTel trace correlation is valuable for production observability
+- **Trade-offs**: Miss latest pino features temporarily
+- **Follow-up**: Monitor @opentelemetry/instrumentation-pino releases for v10 support
+
+### Decision: Use pino-pretty as Transport in Development
+- **Context**: Need human-readable output for dev, JSON for prod
+- **Alternatives Considered**:
+  1. pino-pretty as transport (worker thread) — standard approach
+  2. pino-pretty as sync stream — simpler but blocks main thread
+- **Selected Approach**: Transport for async dev logging; raw JSON in production
+- **Rationale**: Transport keeps main thread clear; dev perf is less critical but the pattern is correct
+- **Trade-offs**: Slightly more complex setup
+- **Follow-up**: Verify transport works correctly with Next.js dev server
+
+### Decision: Unified HTTP Logging with pino-http
+- **Context**: Currently uses morgan (dev) and express-bunyan-logger (prod) — two different middlewares
+- **Alternatives Considered**:
+  1. Separate dev/prod middleware (maintain split) — unnecessary complexity
+  2. Single pino-http middleware for both — clean, consistent
+- **Selected Approach**: pino-http with route filtering replaces both
+- **Rationale**: Single middleware, consistent output format, built-in request context
+- **Trade-offs**: Dev output slightly different from morgan's compact format (mitigated by pino-pretty)
+- **Follow-up**: Configure `autoLogging.ignore` for `/_next/static/` paths
+
+## Risks & Mitigations
+- **OTel instrumentation compatibility with pino version** — Mitigated by pinning to v9.x
+- **Browser bundle size increase** — Pino browser mode is lightweight; monitor with build metrics
+- **Subtle log format differences** — Acceptance test comparing output before/after
+- **Missing env var behavior** — Port minimatch logic carefully with unit tests
+- **Express middleware ordering** — Ensure pino-http is added at the same point in middleware chain
+
+### Phase 2: Formatting Improvement Research
+
+#### pino-http Custom Message API (v11.0.0)
+- **Context**: Need morgan-like concise HTTP log messages instead of pino-http's verbose default
+- **Sources Consulted**: pino-http v11.0.0 type definitions (index.d.ts), source code (logger.js)
+- **Findings**:
+  - `customSuccessMessage: (req: IM, res: SR, responseTime: number) => string` — called on successful response (statusCode < 500)
+  - `customErrorMessage: (req: IM, res: SR, error: Error) => string` — called on error response
+  - `customReceivedMessage: (req: IM, res: SR) => string` — called when request received (optional, only if autoLogging enabled)
+  - `customLogLevel: (req: IM, res: SR, error?: Error) => LevelWithSilent` — dynamic log level based on status code
+  - `customSuccessObject: (req, res, val) => any` — custom fields for successful response log
+  - `customErrorObject: (req, res, error, val) => any` — custom fields for error response log
+  - `customAttributeKeys: { req?, res?, err?, reqId?, responseTime? }` — rename default keys
+  - Response time is calculated as `Date.now() - res[startTime]` in milliseconds
+  - Error conditions: error passed to handler, `res.err` set, or `res.statusCode >= 500`
+- **Implications**: `customSuccessMessage` + `customErrorMessage` + `customLogLevel` are sufficient to achieve morgan-like output format
+
+#### pino-pretty singleLine Option
+- **Context**: User wants one-liner readable logs when FORMAT_NODE_LOG=true
+- **Sources Consulted**: pino-pretty v13.x documentation
+- **Findings**:
+  - `singleLine: true` forces all log properties onto a single line
+  - `singleLine: false` (default) outputs properties on separate indented lines
+  - Combined with `ignore: 'pid,hostname'`, singleLine produces concise output
+  - The `messageFormat` option can further customize the format string
+- **Implications**: Changing `singleLine` from `false` to `true` in the production FORMAT_NODE_LOG path directly addresses the user's readability concern
+
+#### FORMAT_NODE_LOG Default Semantics Analysis
+- **Context**: `isFormattedOutputEnabled()` returns `true` when env var is unset; production JSON depends on `.env.production`
+- **Analysis**:
+  - `.env.production` sets `FORMAT_NODE_LOG=false` — this is the mechanism that ensures JSON in production
+  - CI sets `FORMAT_NODE_LOG=true` explicitly — not affected by default change
+  - If `.env.production` fails to load in a Docker override scenario, production would silently get pino-pretty
+  - However, inverting the default is a behavioral change with broader implications
+- **Decision**: Defer to separate PR. Current behavior is correct in practice (`.env.production` always loaded by Next.js dotenv-flow).
+
+## Phase 3: Implementation Discoveries
+
+### Browser Bundle Compatibility — pino-http Top-Level Import
+- **Context**: `pino-http` was initially imported at the module top-level in `http-logger.ts`. This caused Turbopack to include the Node.js-only module in browser bundles, producing `TypeError: __turbopack_context__.r(...).symbols is undefined`.
+- **Root cause**: `@growi/logger` is imported by shared page code that runs in both browser and server contexts. Any top-level import of a Node.js-only module (like pino-http) gets pulled into the browser bundle.
+- **Fix**: Move the `pino-http` import inside the async function body using dynamic import: `const { default: pinoHttp } = await import('pino-http')`. This defers the import to runtime when the function is actually called (server-side only).
+- **Pattern**: This is the standard pattern for Node.js-only modules in packages shared with browser code. Apply the same treatment to any future Node.js-only additions to `@growi/logger`.
+
+### Dev-Only Module Physical Isolation (`src/dev/`)
+- **Context**: `bunyan-format.ts` (custom pino transport) and `morgan-like-format-options.ts` were initially placed at `src/transports/` and `src/` root respectively, mixed with production modules.
+- **Problem**: No clear boundary between dev-only and production-safe modules; risk of accidentally importing dev modules in production paths.
+- **Fix**: Created `src/dev/` directory as the explicit boundary for development-only modules. `TransportFactory` references `./dev/bunyan-format.js` only in the dev branch — the path is never constructed in production code paths.
+- **Vite config**: `preserveModules: true` ensures `src/dev/bunyan-format.ts` builds to `dist/dev/bunyan-format.js` with the exact path that `pino.transport({ target: ... })` references at runtime.
+
+### Single Worker Thread Model — Critical Implementation Detail
+- **Context**: Initial implementation called `pino.transport()` inside `loggerFactory(name)`, spawning a new Worker thread for each namespace.
+- **Fix**: Refactored so `pino.transport()` is called **once** in `initializeLoggerFactory`, and `loggerFactory(name)` calls `rootLogger.child({ name })` to create namespace-bound loggers sharing the single Worker thread.
+- **Root logger level**: Must be set to `'trace'` (not `'info'`) so child loggers can independently set their resolved level without being silenced by the root. If the root is `'info'`, a child with `level: 'debug'` will still be filtered at the root level.
+- **Constraint for future changes**: Never call `pino.transport()` or `pino()` inside `loggerFactory()`. All transport setup belongs in `initializeLoggerFactory()`.
+
+### pino Logger Type Compatibility with pino-http
+- **Context**: `loggerFactory()` returned `pino.Logger<never>` (the default), which is not assignable to pino-http's expected `Logger` type.
+- **Fix**: Export `Logger<string>` from `@growi/logger` and type `loggerFactory` to return `Logger<string>`. This is compatible with pino-http's `logger` option.
+- **Why `<string>` not `<never>`**: pino's default generic `CustomLevels` is `never`, which makes the type incompatible with APIs expecting custom levels to potentially be strings. `Logger<string>` is the correct type for external APIs.
+
+### `@growi/logger` Package Visibility
+- **Decision**: `"private": true` is correct and intentional.
+- **Rationale**: All consumers (`apps/app`, `apps/slackbot-proxy`, `packages/slack`, etc.) are monorepo-internal packages that reference `@growi/logger` via `workspace:*` protocol. The `private` flag only prevents npm publish, not workspace usage. `@growi/logger` is logging infrastructure — there is no reason to expose it externally (unlike `@growi/core` or `@growi/pluginkit` which are published for external plugin developers).
+
+## References
+- [pino API docs](https://github.com/pinojs/pino/blob/main/docs/api.md)
+- [pino browser docs](https://github.com/pinojs/pino/blob/main/docs/browser.md)
+- [pino-pretty npm](https://www.npmjs.com/package/pino-pretty)
+- [pino-http npm](https://www.npmjs.com/package/pino-http)
+- [@opentelemetry/instrumentation-pino](https://www.npmjs.com/package/@opentelemetry/instrumentation-pino)
+- [universal-bunyan source](https://github.com/weseek/universal-bunyan) — current implementation reference

+ 23 - 0
.kiro/specs/growi-logger/spec.json

@@ -0,0 +1,23 @@
+{
+  "feature_name": "growi-logger",
+  "created_at": "2026-03-23T00:00:00.000Z",
+  "updated_at": "2026-04-10T00:00:00.000Z",
+  "language": "en",
+  "phase": "implementation-complete",
+  "cleanup_completed": true,
+  "approvals": {
+    "requirements": {
+      "generated": true,
+      "approved": true
+    },
+    "design": {
+      "generated": true,
+      "approved": true
+    },
+    "tasks": {
+      "generated": true,
+      "approved": false
+    }
+  },
+  "ready_for_implementation": true
+}

+ 18 - 0
.kiro/specs/growi-logger/tasks.md

@@ -0,0 +1,18 @@
+# Implementation History
+
+All tasks completed (2026-03-23 → 2026-04-06). This section records the implementation scope for future reference.
+
+- [x] 1. Scaffold `@growi/logger` shared package — package.json (pino v9.x, minimatch, pino-pretty peer), TypeScript ESM config, vitest setup, package entry points (main/types/browser)
+- [x] 2. Environment variable parsing and level resolution — `EnvVarParser` (reads DEBUG/TRACE/INFO/WARN/ERROR/FATAL), `LevelResolver` (minimatch glob matching, env-override precedence)
+- [x] 3. Transport factory — `TransportFactory` for Node.js dev (bunyan-format), prod+FORMAT_NODE_LOG (pino-pretty singleLine), prod default (raw JSON), and browser (console)
+- [x] 4. Logger factory — `initializeLoggerFactory` (spawns one Worker thread), `loggerFactory(name)` (child logger cache, level resolution)
+- [x] 5. Migrate shared packages — packages/slack, packages/remark-attachment-refs, packages/remark-lsx; fix pino-style call sites (object-first argument order)
+- [x] 6. Migrate apps/slackbot-proxy — logger factory, pino-http HTTP middleware, type imports, pino-style call sites
+- [x] 7. Migrate apps/app — logger factory, pino-http HTTP middleware, DiagLoggerPinoAdapter (OTel), bunyan type references
+- [x] 8. Remove all bunyan/morgan dependencies; verify no residual imports across monorepo
+- [x] 9. Full monorepo validation — lint, type-check, test, build for @growi/app, @growi/slackbot-proxy, @growi/logger
+- [x] 10. Differentiate pino-pretty `singleLine`: dev=false (multi-line context), prod+FORMAT_NODE_LOG=true (concise one-liners)
+- [x] 11. Morgan-like HTTP formatting — `customSuccessMessage`, `customErrorMessage`, `customLogLevel` in pino-http config
+- [x] 12. Bunyan-format custom transport (`src/dev/bunyan-format.ts`) — `HH:mm:ss.SSSZ LEVEL name: message` format, colorization, NO_COLOR support, pino.transport() worker thread
+- [x] 13. `createHttpLoggerMiddleware` — encapsulate pino-http in `@growi/logger`; move morgan-like options inside; add to @growi/logger deps
+- [x] 14. Dev-only module isolation (`src/dev/`) and browser bundle fix — lazy pino-http import, extended `ignore` field in bunyan-format

+ 598 - 0
.kiro/specs/news-inappnotification/design.md

@@ -0,0 +1,598 @@
+# Design Document: news-inappnotification
+
+## Overview
+
+本機能は GROWI インスタンスが外部の静的 JSON フィード(GitHub Pages)を定期取得し、ニュースとして InAppNotification パネルに表示する。既存の通知(InAppNotification)とニュース(NewsItem)は別モデルで管理し、UI のみクライアント側で時系列マージして統合表示する。
+
+**Purpose**: GROWI 運営者が配信するニュース(リリース情報、セキュリティ通知、お知らせ等)を、ユーザーが既存の通知導線から確認できるようにする。
+
+**Users**: すべての GROWI ログインユーザー。ロール(admin/general)により表示対象を制御できる。
+
+**Impact**: InAppNotification サイドバーパネルに「すべて/通知/お知らせ」フィルタタブと無限スクロールを追加する。既存の「未読のみ」トグルは維持し、フィルタタブとの2重フィルタリングを提供する。
+
+### Goals
+
+- 外部フィード(`NEWS_FEED_URL`)を cron で定期取得し、MongoDB にキャッシュする
+- InAppNotification パネルで通知とニュースを統合表示する
+- ニュースの既読/未読状態をユーザー単位で管理する
+- ロール別表示制御(admin/general)をサーバーサイドで強制する
+- 多言語ニュース(`ja_JP`, `en_US` 等)をブラウザ言語に応じて表示する
+
+### Non-Goals
+
+- GROWI 管理者によるニュース作成・編集 UI(フィードリポジトリで管理)
+- リアルタイムプッシュ通知(cron ポーリングのみ)
+- `growiVersionRegExps` 以外の条件によるフィルタ(将来フェーズ)
+- RSS/Atom フォーマットへの対応(将来フェーズ)
+
+---
+
+## Architecture
+
+### Existing Architecture Analysis
+
+InAppNotification は per-user ドキュメント設計であり、`user` フィールドが必須。通知発生時に全対象ユーザー分のドキュメントを生成する(push 型)。ニュースは全ユーザーで1件のドキュメントを共有し、ユーザーがパネルを開いたときに取得する(pull 型)。この設計上の差異により、ニュースは別モデルとして実装する(詳細は `research.md` の Design Decisions を参照)。
+
+サイドバーパネルは `Sidebar/InAppNotification/InAppNotification.tsx` が `useState` でトグル state を管理し、`InAppNotificationSubstance.tsx` へ prop として渡すパターンを採用している。本機能のフィルタ state も同じパターンで実装する。
+
+### Architecture Pattern & Boundary Map
+
+```mermaid
+graph TB
+  GitHubPages[GitHub Pages\nfeed.json]
+  NewsCron[NewsCronService]
+  NewsItemModel[NewsItem Model]
+  NewsReadModel[NewsReadStatus Model]
+  NewsService[NewsService]
+  NewsAPI[News API\napiv3/news]
+  SidebarPanel[InAppNotification Panel\nSidebar/InAppNotification/]
+  NewsHooks[useSWRINFxNews\nstores/news.ts]
+  IANHooks[useSWRINFxInAppNotifications\nstores/in-app-notification.ts]
+  InfScroll[InfiniteScroll Component]
+  BadgeItem[PrimaryItemForNotification]
+
+  GitHubPages -->|HTTP GET cron| NewsCron
+  NewsCron -->|upsert / delete| NewsItemModel
+  NewsAPI -->|delegates| NewsService
+  NewsService -->|query| NewsItemModel
+  NewsService -->|query / write| NewsReadModel
+  SidebarPanel -->|fetch| NewsHooks
+  SidebarPanel -->|fetch| IANHooks
+  NewsHooks -->|apiv3Get| NewsAPI
+  SidebarPanel -->|renders| InfScroll
+  BadgeItem -->|count sum| NewsHooks
+```
+
+**Architecture Integration**:
+- 選択パターン: Pull 型 + クライアントサイドマージ
+- 新規コンポーネント: `NewsCronService`, `NewsItem Model`, `NewsReadStatus Model`, `NewsService`, `News API`, `NewsItem Component`, `useSWRINFxNews`
+- 既存コンポーネント拡張: `InAppNotification.tsx`(フィルタ state 追加), `InAppNotificationSubstance.tsx`(フィルタタブ + InfiniteScroll), `useSWRINFxInAppNotifications`(新設), `PrimaryItemForNotification`(未読カウント合算)
+- 既存 `InfiniteScroll.tsx` をそのまま再利用
+
+### Technology Stack
+
+| Layer | 選択 / バージョン | 役割 |
+|---|---|---|
+| Backend Cron | node-cron(既存) | フィード定期取得スケジューリング |
+| Backend HTTP | node `fetch` / axios(既存) | `NEWS_FEED_URL` から feed.json 取得 |
+| Data Store | MongoDB + Mongoose(既存) | NewsItem, NewsReadStatus の永続化 |
+| Frontend Data | SWR `useSWRInfinite`(既存) | ニュース・通知の無限スクロール取得 |
+| Frontend State | React `useState`(既存パターン) | フィルタタブ・未読トグルのローカル state |
+| i18n | next-i18next / `commons.json`(既存) | UI ラベルの多言語化 |
+
+---
+
+## System Flows
+
+### フィード取得フロー
+
+```mermaid
+sequenceDiagram
+  participant Cron as NewsCronService
+  participant Feed as GitHub Pages
+  participant DB as MongoDB
+
+  Cron->>Cron: getCronSchedule() = '0 1 * * *'
+  Cron->>Cron: NEWS_FEED_URL 未設定? → スキップ
+  Cron->>Feed: HTTP GET feed.json
+  alt 取得失敗
+    Cron->>Cron: ログ記録、既存 DB データ維持
+  else 取得成功
+    Cron->>Cron: growiVersionRegExps でフィルタ
+    Cron->>DB: externalId で upsert(新規/更新)
+    Cron->>DB: フィードにないアイテムを削除
+  end
+  Note over DB: TTL インデックス(90日)で自動削除
+```
+
+### パネル表示フロー
+
+```mermaid
+sequenceDiagram
+  participant User
+  participant Panel as InAppNotification Panel
+  participant NewsAPI as News API
+  participant IANAPI as InAppNotification API
+
+  User->>Panel: パネルを開く
+  Panel->>NewsAPI: useSWRINFxNews(limit, { onlyUnread, userRole })
+  Panel->>IANAPI: useSWRINFxInAppNotifications(limit, { status })
+  alt フィルタ = 'all'
+    Panel->>Panel: 両データを publishedAt/createdAt で降順マージ
+  else フィルタ = 'news'
+    Panel->>Panel: NewsItem のみ表示
+  else フィルタ = 'notifications'
+    Panel->>Panel: InAppNotification のみ表示
+  end
+  Panel->>User: レンダリング
+  User->>Panel: スクロール末端に達する
+  Panel->>NewsAPI: setSize(size + 1)(次ページ fetch)
+```
+
+### 既読フロー
+
+```mermaid
+sequenceDiagram
+  participant User
+  participant Component as NewsItem Component
+  participant API as News API
+  participant DB as MongoDB
+
+  User->>Component: クリック
+  Component->>API: POST /apiv3/news/mark-read { newsItemId }
+  API->>DB: NewsReadStatus upsert(userId + newsItemId)
+  Component->>Component: SWR mutate(ローカルキャッシュ更新)
+  Component->>User: url が存在すれば新タブで開く
+```
+
+---
+
+## Requirements Traceability
+
+| 要件 | Summary | コンポーネント | インターフェース | フロー |
+|---|---|---|---|---|
+| 1.1–1.7 | フィード定期取得 | NewsCronService | `executeJob()` | フィード取得フロー |
+| 2.1–2.4 | NewsItem モデル | NewsItem Model | MongoDB schema | フィード取得フロー |
+| 3.1–3.5 | 既読/未読管理 | NewsReadStatus Model, NewsService, News API | `POST /mark-read`, `GET /unread-count` | 既読フロー |
+| 4.1–4.2 | ロール別表示制御 | NewsService | `listForUser(userRole)` | パネル表示フロー |
+| 5.1–5.7 | UI 統合表示 | InAppNotification Panel, InAppNotificationSubstance | filter state props | パネル表示フロー |
+| 6.1–6.4 | 視覚表示 | NewsItem Component | CSS classes(`fw-bold`, `bg-primary`) | — |
+| 7.1–7.2 | 未読バッジ | PrimaryItemForNotification | `useSWRxNewsUnreadCount` | — |
+| 8.1–8.4 | 多言語対応 | NewsItem Component, locales | locale fallback logic | — |
+
+---
+
+## Components and Interfaces
+
+### サーバーサイド
+
+| コンポーネント | 層 | Intent | 要件 | 主要依存 |
+|---|---|---|---|---|
+| NewsCronService | Server / Cron | フィード定期取得・DB 同期 | 1.1–1.7 | CronService (P0), NewsService (P0) |
+| NewsItem Model | Server / Data | ニュースアイテムの永続化 | 2.1–2.4 | MongoDB (P0) |
+| NewsReadStatus Model | Server / Data | ユーザー既読状態の永続化 | 3.1–3.3 | MongoDB (P0) |
+| NewsService | Server / Domain | ニュース一覧・既読管理のビジネスロジック | 3.4–3.5, 4.1–4.2 | NewsItem Model (P0), NewsReadStatus Model (P0) |
+| News API | Server / API | HTTP エンドポイント提供 | 3.1–3.5, 4.1–4.2 | NewsService (P0) |
+
+---
+
+#### NewsCronService
+
+| Field | Detail |
+|---|---|
+| Intent | フィード URL から JSON を定期取得し NewsItem を upsert/delete する |
+| Requirements | 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7 |
+
+**Responsibilities & Constraints**
+- 毎日 AM 1:00 に実行(`'0 1 * * *'`)
+- `NEWS_FEED_URL` 未設定時はスキップ(エラーなし)
+- 取得失敗時は既存 DB データを維持
+- `growiVersionRegExps` の照合はここで実施(DB には合致アイテムのみ保存)
+- ランダムスリープ(0–5分)で複数インスタンスのリクエストを分散
+
+**Dependencies**
+- Inbound: node-cron — スケジュール実行(P0)
+- Outbound: NewsService — upsert/delete(P0)
+- External: `NEWS_FEED_URL` の HTTP エンドポイント — feed.json 取得(P0)
+
+**Contracts**: Batch [x]
+
+##### Batch / Job Contract
+- Trigger: `node-cron` スケジュール `'0 1 * * *'`
+- Input: `NEWS_FEED_URL` 環境変数、GROWI バージョン文字列
+- Output: MongoDB の NewsItem コレクションを最新フィードと同期
+- Idempotency: `externalId` ユニークインデックスにより冪等。再実行しても重複なし
+
+##### Service Interface
+```typescript
+class NewsCronService extends CronService {
+  getCronSchedule(): string;  // '0 1 * * *'
+  executeJob(): Promise<void>;
+}
+```
+
+**Implementation Notes**
+- Integration: `server/service/cron.ts` の `CronService` を継承。`startCron()` をアプリ起動時に呼ぶ
+- Validation: `NEWS_FEED_URL` が `https://` で始まることを確認。`growiVersionRegExps` は try-catch で個別評価し、不正 regex はスキップ
+- Risks: フィード取得タイムアウト(10秒推奨)。外部依存のため失敗を前提に設計する
+
+---
+
+#### NewsItem Model
+
+| Field | Detail |
+|---|---|
+| Intent | フィードから取得したニュースアイテムを全ユーザー共通で1件保持する |
+| Requirements | 2.1, 2.2, 2.3, 2.4 |
+
+**Contracts**: State [x]
+
+##### State Management
+```typescript
+interface INewsItem {
+  _id: Types.ObjectId;
+  externalId: string;                    // unique index
+  title: Record<string, string>;         // { ja_JP: string, en_US?: string, ... }
+  body?: Record<string, string>;
+  emoji?: string;
+  url?: string;
+  publishedAt: Date;                     // index
+  fetchedAt: Date;                       // TTL index (90 days = 7776000s)
+  conditions?: {
+    targetRoles?: string[];              // ['admin'] | ['admin', 'general'] | undefined
+  };
+}
+```
+
+**Indexes**:
+- `externalId`: unique index(重複排除)
+- `publishedAt`: index(降順ソート)
+- `fetchedAt`: TTL index(90日で自動削除)
+
+---
+
+#### NewsReadStatus Model
+
+| Field | Detail |
+|---|---|
+| Intent | ユーザーが既読にした時のみドキュメントを作成。ドキュメント不在 = 未読 |
+| Requirements | 3.1, 3.2, 3.3 |
+
+**Contracts**: State [x]
+
+##### State Management
+```typescript
+interface INewsReadStatus {
+  _id: Types.ObjectId;
+  userId: Types.ObjectId;              // compound unique index with newsItemId
+  newsItemId: Types.ObjectId;         // compound unique index with userId
+  readAt: Date;
+}
+```
+
+**Indexes**:
+- `{ userId, newsItemId }`: compound unique index(重複防止・冪等性保証)
+
+---
+
+#### NewsService
+
+| Field | Detail |
+|---|---|
+| Intent | ニュース一覧取得・既読管理のビジネスロジックを担う |
+| Requirements | 3.4, 3.5, 4.1, 4.2 |
+
+**Contracts**: Service [x]
+
+##### Service Interface
+```typescript
+interface INewsService {
+  listForUser(
+    userId: Types.ObjectId,
+    userRoles: string[],
+    options: { limit: number; offset: number; onlyUnread?: boolean }
+  ): Promise<PaginateResult<INewsItemWithReadStatus>>;
+
+  getUnreadCount(userId: Types.ObjectId, userRoles: string[]): Promise<number>;
+
+  markRead(userId: Types.ObjectId, newsItemId: Types.ObjectId): Promise<void>;
+
+  markAllRead(userId: Types.ObjectId, userRoles: string[]): Promise<void>;
+
+  upsertNewsItems(items: INewsItemInput[]): Promise<void>;
+
+  deleteNewsItemsByExternalIds(externalIds: string[]): Promise<void>;
+}
+
+interface INewsItemWithReadStatus extends INewsItem {
+  isRead: boolean;
+}
+```
+
+- Preconditions: `userId` は有効な ObjectId
+- Postconditions: `listForUser` の結果は `publishedAt` 降順。各アイテムに `isRead` が付与される
+- ロールフィルタ: `conditions.targetRoles` が未設定または `userRoles` に一致するアイテムのみ返す
+
+---
+
+#### News API
+
+| Field | Detail |
+|---|---|
+| Intent | ニュース一覧取得・既読管理の HTTP エンドポイントを提供する |
+| Requirements | 3.1, 3.4, 3.5, 4.1, 4.2 |
+
+**Contracts**: API [x]
+
+##### API Contract
+
+| Method | Endpoint | Request | Response | Errors |
+|---|---|---|---|---|
+| GET | `/apiv3/news/list` | `?limit&offset&onlyUnread` | `PaginateResult<INewsItemWithReadStatus>` | 401 |
+| GET | `/apiv3/news/unread-count` | — | `{ count: number }` | 401 |
+| POST | `/apiv3/news/mark-read` | `{ newsItemId: string }` | `{ ok: true }` | 400, 401 |
+| POST | `/apiv3/news/mark-all-read` | — | `{ ok: true }` | 401 |
+
+全エンドポイントに `loginRequiredStrictly` と `accessTokenParser` を適用する。
+
+**Implementation Notes**
+- Integration: `apps/app/src/server/routes/apiv3/news.ts` に新規作成
+- Validation: `newsItemId` は `mongoose.isValidObjectId()` で検証
+- Risks: ロールフィルタはサーバーサイドで強制。クライアントから `targetRoles` を受け取らない
+
+---
+
+### クライアントサイド
+
+| コンポーネント | 層 | Intent | 要件 | 主要依存 |
+|---|---|---|---|---|
+| useSWRINFxNews | Client / Hooks | ニュースアイテムの無限スクロール取得 | 5.4 | News API (P0) |
+| useSWRxNewsUnreadCount | Client / Hooks | ニュース未読カウント取得 | 7.1 | News API (P0) |
+| useSWRINFxInAppNotifications | Client / Hooks | 通知の無限スクロール取得(既存 hook を拡張) | 5.4 | InAppNotification API (P0) |
+| InAppNotification.tsx(変更) | Client / UI | フィルタ state を追加管理 | 5.2, 5.3 | useState (P0) |
+| InAppNotificationSubstance.tsx(変更) | Client / UI | フィルタタブ + InfiniteScroll | 5.1–5.5 | useSWRINFxNews (P0), InfiniteScroll (P0) |
+| NewsItem Component | Client / UI | ニュースアイテム1件の表示 | 5.5, 5.6, 5.7, 6.1–6.4, 8.1–8.2 | — |
+| PrimaryItemForNotification(変更) | Client / UI | 未読バッジに NewsItem の未読数を合算 | 7.1, 7.2 | useSWRxNewsUnreadCount (P0) |
+
+---
+
+#### useSWRINFxNews
+
+| Field | Detail |
+|---|---|
+| Intent | ニュースアイテムの無限スクロールデータ取得 |
+| Requirements | 5.4 |
+
+**Contracts**: State [x]
+
+##### State Management
+```typescript
+// stores/news.ts
+export const useSWRINFxNews = (
+  limit: number,
+  options?: { onlyUnread?: boolean },
+  config?: SWRConfiguration,
+): SWRInfiniteResponse<PaginateResult<INewsItemWithReadStatus>, Error>;
+
+export const useSWRxNewsUnreadCount = (): SWRResponse<number, Error>;
+```
+
+キー: `['/news/list', limit, pageIndex, options.onlyUnread]`
+
+---
+
+#### InAppNotification.tsx(変更)
+
+| Field | Detail |
+|---|---|
+| Intent | フィルタタブ state を追加し、子コンポーネントへ伝播する |
+| Requirements | 5.2, 5.3 |
+
+**Implementation Notes**
+- 既存 `isUnopendNotificationsVisible` state はそのまま維持
+- `activeFilter: 'all' | 'news' | 'notifications'` を `useState('all')` で追加
+- `InAppNotificationForms` と `InAppNotificationContent` へ prop を追加
+
+```typescript
+type FilterType = 'all' | 'news' | 'notifications';
+```
+
+---
+
+#### InAppNotificationSubstance.tsx(変更)
+
+| Field | Detail |
+|---|---|
+| Intent | フィルタタブ UI の追加と、InfiniteScroll を用いた統合リスト表示 |
+| Requirements | 5.1, 5.2, 5.3, 5.4, 5.5 |
+
+**Contracts**: State [x]
+
+**InAppNotificationForms への追加**:
+- フィルタボタン(「すべて」「通知」「お知らせ」)を Bootstrap `btn-group` で実装
+- 既存「未読のみ」トグルは維持
+
+**InAppNotificationContent の変更**:
+- `activeFilter` に応じて3パターンに分岐
+  - `'all'`: `useSWRINFxNews` + `useSWRINFxInAppNotifications` の結果を `publishedAt/createdAt` 降順でマージ
+  - `'news'`: `useSWRINFxNews` のみ。`NewsList` に渡す
+  - `'notifications'`: `useSWRINFxInAppNotifications` のみ。既存 `InAppNotificationList` に渡す
+- 既存 `InfiniteScroll` コンポーネントを使用(`client/components/InfiniteScroll.tsx`)
+- 既存 `// TODO: Infinite scroll implemented` コメントを解消
+
+---
+
+#### NewsItem Component
+
+| Field | Detail |
+|---|---|
+| Intent | ニュースアイテム1件を表示する(emoji、タイトル、未読インジケータ) |
+| Requirements | 5.5, 5.6, 5.7, 6.1, 6.2, 6.3, 6.4, 8.1, 8.2 |
+
+**Implementation Notes**
+- 配置: `features/news/client/components/NewsItem.tsx`
+- ロケールフォールバック: `browserLocale → ja_JP → en_US → 最初に利用可能なキー`
+- 未読: `fw-bold` + 左端に `bg-primary` 8px 丸ドット
+- 既読: `fw-normal` + 同幅の透明スペーサー
+- `emoji` 未設定時は `📢` をフォールバック
+- クリック時: `POST /mark-read` + SWR mutate + `url` があれば新タブで開く
+
+---
+
+## Data Models
+
+### Domain Model
+
+```mermaid
+erDiagram
+  NewsItem {
+    ObjectId _id
+    string externalId
+    object title
+    object body
+    string emoji
+    string url
+    Date publishedAt
+    Date fetchedAt
+    object conditions
+  }
+  NewsReadStatus {
+    ObjectId _id
+    ObjectId userId
+    ObjectId newsItemId
+    Date readAt
+  }
+  User {
+    ObjectId _id
+    string username
+    string role
+  }
+
+  NewsReadStatus }o--|| User : "userId"
+  NewsReadStatus }o--|| NewsItem : "newsItemId"
+```
+
+- NewsItem は全ユーザーで共有する集約ルート(per-instance、not per-user)
+- NewsReadStatus は「ユーザーが既読にした」という事実のみを記録。削除によって「未読に戻す」ことも可能
+
+### Physical Data Model
+
+**NewsItem Collection** (`newsitems`):
+
+```typescript
+const NewsItemSchema = new Schema<INewsItem>({
+  externalId: { type: String, required: true, unique: true },
+  title: { type: Map, of: String, required: true },
+  body: { type: Map, of: String },
+  emoji: { type: String },
+  url: { type: String },
+  publishedAt: { type: Date, required: true, index: true },
+  fetchedAt: { type: Date, required: true, index: { expires: '90d' } },
+  conditions: {
+    targetRoles: [{ type: String }],
+  },
+});
+```
+
+**NewsReadStatus Collection** (`newsreadstatuses`):
+
+```typescript
+const NewsReadStatusSchema = new Schema<INewsReadStatus>({
+  userId: { type: Schema.Types.ObjectId, required: true, ref: 'User' },
+  newsItemId: { type: Schema.Types.ObjectId, required: true, ref: 'NewsItem' },
+  readAt: { type: Date, required: true, default: Date.now },
+});
+NewsReadStatusSchema.index({ userId: 1, newsItemId: 1 }, { unique: true });
+```
+
+### Data Contracts & Integration
+
+**API レスポンス型**:
+
+```typescript
+interface INewsItemWithReadStatus {
+  _id: string;
+  externalId: string;
+  title: Record<string, string>;
+  body?: Record<string, string>;
+  emoji?: string;
+  url?: string;
+  publishedAt: string;  // ISO 8601
+  conditions?: { targetRoles?: string[] };
+  isRead: boolean;
+}
+
+interface PaginateResult<T> {
+  docs: T[];
+  totalDocs: number;
+  limit: number;
+  offset: number;
+  hasNextPage: boolean;
+}
+```
+
+---
+
+## Error Handling
+
+### Error Strategy
+
+フィード取得はフォールバック優先(失敗しても既存データを維持)。API エンドポイントは fail-fast(認証エラーは即時 401)。
+
+### Error Categories and Responses
+
+| カテゴリ | エラー | 対応 |
+|---|---|---|
+| Cron / External | フィード取得失敗(ネットワーク、タイムアウト) | `logger.error` + 既存 DB データ維持。次回 cron で再試行 |
+| Cron / Config | `NEWS_FEED_URL` 未設定 | スキップ(ログなし)。設定されるまで無害に動作 |
+| Cron / Validation | `growiVersionRegExps` に不正 regex | try-catch で該当アイテムをスキップ、`logger.warn` |
+| API / Auth | 未認証リクエスト | 401(`loginRequiredStrictly` が処理) |
+| API / Validation | 不正な `newsItemId` フォーマット | 400(`mongoose.isValidObjectId()` チェック) |
+| API / Conflict | `mark-read` の重複呼び出し | upsert で冪等処理。エラーなし |
+
+### Monitoring
+
+- `NewsCronService.executeJob()` の成功/失敗を `logger.info` / `logger.error` で記録
+- `mark-read` 件数を `logger.debug` で記録(デバッグ用)
+
+---
+
+## Testing Strategy
+
+### Unit Tests
+
+- `NewsCronService.executeJob()`: 正常取得 → upsert、取得失敗 → DB 変更なし、`NEWS_FEED_URL` 未設定 → スキップ
+- `NewsCronService.executeJob()`: `growiVersionRegExps` 一致 → 保存、不一致 → 除外
+- `NewsService.listForUser()`: `targetRoles` フィルタ(admin のみ、general 除外)
+- `NewsService.listForUser()`: `onlyUnread=true` で未読のみ返す
+- `NewsService.getUnreadCount()`: 未読件数の正確な計算
+
+### Integration Tests
+
+- `GET /apiv3/news/list`: ロール別フィルタが正しく動作する
+- `POST /apiv3/news/mark-read`: 2回呼んでもエラーなし(冪等性)
+- `POST /apiv3/news/mark-all-read` 後に `GET /apiv3/news/unread-count` が 0 を返す
+- 未認証リクエストが 401 を返す
+
+### Component Tests
+
+- `NewsItem`: `emoji` 未設定時に 📢 が表示される
+- `NewsItem`: `title` ロケールフォールバック(`browserLocale → ja_JP → en_US`)
+- `NewsItem`: 未読時に `fw-bold` + 青ドット、既読時に `fw-normal` + スペーサー
+- `InAppNotificationForms`: フィルタタブのクリックで `activeFilter` が変わる
+
+---
+
+## Security Considerations
+
+- すべての `/apiv3/news/*` エンドポイントに `loginRequiredStrictly` を適用する
+- `conditions.targetRoles` のフィルタリングはサーバーサイドの `NewsService.listForUser()` で強制する。クライアントから `targetRoles` パラメータを受け付けない
+- `NEWS_FEED_URL` は `https://` のみ許可(HTTP 不可)
+- フィードから取得したデータはそのまま DB に保存し、クライアントへのレスポンス時に Mongoose スキーマで型安全に扱う
+
+## Performance & Scalability
+
+- NewsItem は全ユーザーで1件共有のため、ユーザー数に比例してドキュメントが増えない
+- `publishedAt` インデックスにより降順ソートが効率的
+- `fetchedAt` TTL インデックス(90日)で古いデータを自動削除し、コレクションサイズを制限
+- `NewsReadStatus` の compound unique index により `listForUser` の LEFT JOIN 相当クエリが効率的

+ 1 - 1
.kiro/specs/news-inappnotification/requirements.md

@@ -72,7 +72,7 @@ GROWI の InAppNotification にニュース配信・表示機能を追加する
 2. The InAppNotificationパネル shall 上部にフィルタボタン(「すべて」「通知」「お知らせ」)を配置し、デフォルトは「すべて」とする。「お知らせ」選択時はニュースのみ、「通知」選択時はニュース以外のすべての通知を表示する
 3. The InAppNotificationパネル shall 既存の「未読のみ」トグルスイッチを維持し、種別フィルタと組み合わせた2重フィルタリングを提供する。種別フィルタ(すべて/通知/お知らせ)で表示対象を絞り込んだ上で、トグルON時は未読アイテムのみをさらに絞り込む
 4. The InAppNotificationパネル shall リスト領域に最大高さを設定し、超過分はスクロールで表示する。スクロールが末端に達した場合は次のページを自動で読み込む無限スクロールとする
-5. The InAppNotificationパネル shall ニュースアイテムの `type` に応じた絵文字アイコンをタイトル前に表示する(`release`→🎉, `security`→⚠️, `tips`→💡, `maintenance`→🔧, `announcement`→📢, 未設定→📢)
+5. The InAppNotificationパネル shall ニュースアイテムの `emoji` フィールドをタイトル前に表示する。`emoji` 未設定の場合は 📢 をフォールバックとして使用する
 6. When ユーザーがニュースアイテムをクリックした場合, the InAppNotification UI shall ニュースの詳細 URL を新しいタブで開く
 7. When ユーザーがニュースアイテムをクリックした場合, the InAppNotification UI shall 該当ニュースを既読としてマークし、未読インジケータを更新する
 

+ 142 - 0
.kiro/specs/news-inappnotification/research.md

@@ -0,0 +1,142 @@
+# Research & Design Decisions
+
+---
+**Purpose**: Discovery findings and architectural rationale for the news-inappnotification feature.
+
+---
+
+## Summary
+
+- **Feature**: `news-inappnotification`
+- **Discovery Scope**: Complex Integration(新機能 + 既存 InAppNotification UI 拡張)
+- **Key Findings**:
+  - `CronService` 抽象クラスが `server/service/cron.ts` に存在。`NewsCronService extends CronService` のみで cron 基盤が利用可能
+  - `InfiniteScroll` コンポーネントが `client/components/InfiniteScroll.tsx` に存在。`SWRInfiniteResponse` を受け取る汎用実装で再利用可能
+  - サイドバーパネルは `Sidebar/InAppNotification/InAppNotification.tsx` が state を管理。フィルタ追加はここへの `useState` 追加で対応できる
+  - マージドビュー(すべて)はサーバーサイド JOIN 不要。クライアントサイドで日時ソートするだけで実現できる
+  - 既存 `useSWRxInAppNotifications` は `useSWR`(ページネーション)ベース。無限スクロールのために `useSWRInfinite` 版(`useSWRINFx` prefix)を新設する必要がある
+
+---
+
+## Research Log
+
+### InAppNotification 既存実装の分析
+
+- **Context**: NewsItem を既存 InAppNotification に乗せるか、別モデルにするかの判断
+- **Sources**: `server/models/in-app-notification.ts`, `server/routes/apiv3/in-app-notification.ts`, `server/service/in-app-notification.ts`
+- **Findings**:
+  - InAppNotification は per-user ドキュメント設計。`user` フィールドが必須で、配信時点で全ユーザー分のドキュメントを生成する
+  - `status` フィールド(UNOPENED/OPENED)は per-user ドキュメントが存在することを前提としており、配信時点でのドキュメント生成が不可避
+  - `targetModel` と `action` が enum 制約を持ち、ニュースの externalId 管理に使えない
+  - `snapshot` フィールドにニュース本文を格納した場合、ユーザー数分の本文コピーが発生する
+- **Implications**: NewsItem は別モデルとして実装する。requirements.md の Note に記載された設計根拠が技術的に正確であることを確認
+
+### CronService パターンの確認
+
+- **Context**: フィード定期取得の実装方針
+- **Sources**: `server/service/cron.ts`, `server/service/access-token/access-token-deletion-cron.ts`
+- **Findings**:
+  - `abstract getCronSchedule(): string` と `abstract executeJob(): Promise<void>` を実装するだけでよい
+  - `node-cron` を使用。スケジュール変更は `getCronSchedule()` のオーバーライドで対応
+  - `startCron()` を呼ぶだけで cron が開始される
+- **Implications**: `NewsCronService` の実装は最小限で済む
+
+### InfiniteScroll 実装パターン
+
+- **Context**: 要件 5.4「無限スクロール」の実装方針
+- **Sources**: `client/components/InfiniteScroll.tsx`, `stores/page-listing.tsx`
+- **Findings**:
+  - `InfiniteScroll` コンポーネントは `SWRInfiniteResponse` を props で受け取る汎用コンポーネント
+  - `IntersectionObserver` でセンチネル要素を監視し、`setSize(size + 1)` でページ追加
+  - `useSWRInfinite` のキー命名規則: `useSWRINFx*` prefix
+  - `InAppNotificationSubstance.tsx` に `// TODO: Infinite scroll implemented` コメントあり。今回の実装でこの TODO を解消する
+- **Implications**: `useSWRINFxNews` と `useSWRINFxInAppNotifications` を新設し、既存の `InfiniteScroll` コンポーネントをそのまま利用する
+
+### フロントエンド状態管理パターン
+
+- **Context**: フィルタタブ(すべて/通知/お知らせ)と未読トグルの状態管理方針
+- **Sources**: `Sidebar/InAppNotification/InAppNotification.tsx`, Jotai atom パターン
+- **Findings**:
+  - 既存の「未読のみ」トグルは `useState` で管理され、prop として子コンポーネントに渡している
+  - Jotai は cross-component の持続的 state に使用。パネル内のローカル UI state には `useState` で十分
+  - フィルタタブは同様に `useState` で `'all' | 'news' | 'notifications'` を管理する
+- **Implications**: Jotai は不要。`useState` で統一する
+
+### クライアントサイドマージの実現可能性
+
+- **Context**: 「すべて」フィルタで通知とニュースを時系列マージする実装
+- **Findings**:
+  - InAppNotification は `createdAt` 順、NewsItem は `publishedAt` 順
+  - 両者を `useSWRInfinite` で別々に取得し、各ページのデータをマージしてソート
+  - ページング境界をまたぐマージは複雑になるため、「すべて」フィルタ時は両 API を large limit(例: 20件)で fetch し、クライアントマージする方針
+- **Implications**: 無限スクロールのマージは実装複雑度が高い。「すべて」フィルタ時は両データソースを独立した `useSWRInfinite` で管理し、表示時にマージする
+
+### i18n キー管理
+
+- **Context**: 新規 UI ラベルの多言語化
+- **Sources**: `public/static/locales/ja_JP/commons.json`
+- **Findings**:
+  - `in_app_notification` 名前空間に既存キーが存在(`only_unread`, `no_notification` 等)
+  - 対応ロケール: `ja_JP`, `en_US`, `zh_CN`, `ko_KR`, `fr_FR`
+- **Implications**: 同名前空間に追加キー(`news`, `all`, `notifications`, `no_news`)を追加する
+
+---
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks | Notes |
+|---|---|---|---|---|
+| サーバーサイドマージ | DB の aggregate で通知+ニュースを JOIN してソート | クライアントが単純 | 異なるモデルの JOIN は複雑、ページング境界の処理が難しい | 採用しない |
+| **クライアントサイドマージ** | 別 API で取得しクライアントで日時ソート | 各 API が独立してシンプル | 「すべて」時は2回 API コール | **採用** |
+| ニュース専用ページ | `/me/news` 等の別ページにニュースを表示 | 実装シンプル | 導線が分散、要件 5.1 に不合致 | 採用しない |
+
+---
+
+## Design Decisions
+
+### Decision: NewsItem と NewsReadStatus を別モデルとする
+
+- **Context**: InAppNotification モデルで代替できないか検討
+- **Alternatives Considered**:
+  1. InAppNotification モデルを拡張して newsItem を追加
+  2. 新規 NewsItem + NewsReadStatus モデルを作成
+- **Selected Approach**: 新規モデルを作成(Option 2)
+- **Rationale**: InAppNotification は per-user ドキュメント設計。配信時に全ユーザー分のドキュメントを生成する必要があり、SaaS 規模でストレージ効率が悪い。NewsItem は全ユーザーで1件を共有し、NewsReadStatus は既読時のみ作成する
+- **Trade-offs**: 新モデル追加のコストはあるが、スケール時のストレージ効率は大幅に向上する
+- **Follow-up**: TTL インデックス(90日)の動作確認
+
+### Decision: growiVersionRegExps のフィルタは cron 側で適用
+
+- **Context**: バージョン条件のフィルタタイミング
+- **Alternatives Considered**:
+  1. DB に全件保存し、API クエリ時にフィルタ
+  2. cron 取得時にフィルタし、該当アイテムのみ保存
+- **Selected Approach**: cron 取得時にフィルタ(Option 2)
+- **Rationale**: GROWI のバージョンはインスタンス起動時に確定し、動的に変わらない。DB に不要なデータを保存しない方がクリーン
+- **Trade-offs**: バージョンアップ後に古いアイテムが再表示されない(次回 cron まで)。許容範囲内
+
+### Decision: useSWRInfinite で InAppNotification も再実装
+
+- **Context**: 既存 `useSWRxInAppNotifications` は `useSWR` ベース(ページネーション)
+- **Alternatives Considered**:
+  1. 既存 hook をそのまま使い、InAppNotification の無限スクロールは別途実装
+  2. `useSWRInfinite` ベースの新 hook に切り替え
+- **Selected Approach**: `useSWRINFxInAppNotifications` を新設(Option 2)
+- **Rationale**: `InfiniteScroll` コンポーネントは `SWRInfiniteResponse` を要求する。既存 TODO コメントも無限スクロール実装を示唆している
+- **Trade-offs**: 既存 `useSWRxInAppNotifications` は `InAppNotificationPage.tsx` でも使われているため、両方を維持する
+
+---
+
+## Risks & Mitigations
+
+- クライアントサイドマージで「すべて」フィルタ時に2倍の API コール — 初回は許容。将来的にサーバーサイド集約 API を検討
+- フィード URL が HTTPS でない場合のセキュリティリスク — `NEWS_FEED_URL` のバリデーションで `https://` を強制
+- `growiVersionRegExps` の regex が不正な場合 — try-catch でキャッチし、そのアイテムをスキップしてログ記録
+
+---
+
+## References
+
+- [node-cron documentation](https://github.com/node-cron/node-cron) — cron スケジュール構文
+- [SWR Infinite Loading](https://swr.vercel.app/docs/pagination#infinite-loading) — `useSWRInfinite` パターン
+- [Mongoose TTL indexes](https://mongoosejs.com/docs/guide.html#indexes) — TTL インデックス設定

+ 5 - 5
.kiro/specs/news-inappnotification/spec.json

@@ -3,18 +3,18 @@
   "created_at": "2026-03-24T00:00:00Z",
   "updated_at": "2026-03-24T01:00:00Z",
   "language": "ja",
-  "phase": "requirements-generated",
+  "phase": "tasks-generated",
   "approvals": {
     "requirements": {
       "generated": true,
-      "approved": false
+      "approved": true
     },
     "design": {
-      "generated": false,
-      "approved": false
+      "generated": true,
+      "approved": true
     },
     "tasks": {
-      "generated": false,
+      "generated": true,
       "approved": false
     }
   },

+ 150 - 0
.kiro/specs/news-inappnotification/tasks.md

@@ -0,0 +1,150 @@
+# Implementation Plan
+
+- [ ] 0. 動作確認用ローカルフィードサーバーをセットアップする
+  - `/tmp/feed.json` にサンプルフィードファイルを作成する。`emoji` あり・なし(未設定時は 📢 フォールバック確認)、`title`/`body` の多言語フィールド(`ja_JP`, `en_US`)、`url` あり・なし、`conditions.targetRoles`(admin のみ、全ユーザー)の両パターンを含む複数アイテムで構成する
+  - devcontainer 内で `cd /tmp && python3 -m http.server 8099` を起動し、`http://localhost:8099/feed.json` でアクセスできることを確認する
+  - `.env` に `NEWS_FEED_URL=http://localhost:8099/feed.json` を追加する
+  - 以降のタスクで cron 動作確認が必要な場合はこのサーバーを使用する
+  - _Requirements: 1.1, 1.6_
+
+- [ ] 1. データモデルを実装する
+- [ ] 1.1 (P) NewsItem モデルを実装する
+  - `externalId`(ユニークインデックス)、多言語 `title`/`body`(Map of String)、`emoji`、`url`、`publishedAt`(インデックス)、`fetchedAt`(TTL 90日インデックス)、`conditions.targetRoles` を持つ Mongoose スキーマを定義する
+  - 型インターフェース `INewsItem` と `INewsItemHasId` を定義する
+  - _Requirements: 2.1, 2.2, 2.3, 2.4_
+
+- [ ] 1.2 (P) NewsReadStatus モデルを実装する
+  - `userId`・`newsItemId` の複合ユニークインデックス、`readAt` を持つ Mongoose スキーマを定義する
+  - 型インターフェース `INewsReadStatus` を定義する
+  - _Requirements: 3.3_
+
+- [ ] 2. ニュースサービス層を実装する
+- [ ] 2.1 ニュース一覧取得ロジックを実装する
+  - `listForUser(userId, userRoles, { limit, offset, onlyUnread })` を実装する
+  - `conditions.targetRoles` が未設定または `userRoles` に一致するアイテムのみ返すロール別フィルタを適用する
+  - NewsReadStatus との突き合わせにより各アイテムに `isRead: boolean` を付与する
+  - 結果は `publishedAt` 降順で返す
+  - _Requirements: 3.4, 4.1, 4.2_
+
+- [ ] 2.2 既読管理ロジックを実装する
+  - `markRead(userId, newsItemId)` を実装する。NewsReadStatus を upsert することで冪等性を保証する
+  - `markAllRead(userId, userRoles)` を実装する。ロール別フィルタに合致する全未読アイテムを一括既読にする
+  - `getUnreadCount(userId, userRoles)` を実装する
+  - _Requirements: 3.1, 3.2, 3.5_
+
+- [ ] 2.3 フィード同期ロジックを実装する
+  - `upsertNewsItems(items)` を実装する。`externalId` をキーに upsert し、`fetchedAt` を更新する
+  - `deleteNewsItemsByExternalIds(externalIds)` を実装する
+  - _Requirements: 1.2, 1.3_
+
+- [ ] 3. News API エンドポイントを実装する
+- [ ] 3.1 (P) ニュース取得エンドポイントを実装する
+  - `GET /apiv3/news/list`(`limit`, `offset`, `onlyUnread` クエリパラメータ)を実装する
+  - `GET /apiv3/news/unread-count` を実装する
+  - 全エンドポイントに `loginRequiredStrictly` と `accessTokenParser` を適用する
+  - _Requirements: 3.4, 3.5, 4.1, 4.2_
+
+- [ ] 3.2 (P) ニュース既読操作エンドポイントを実装する
+  - `POST /apiv3/news/mark-read`(`newsItemId` を受け取る)を実装する。`newsItemId` を `mongoose.isValidObjectId()` で検証する
+  - `POST /apiv3/news/mark-all-read` を実装する
+  - 全エンドポイントに `loginRequiredStrictly` と `accessTokenParser` を適用する
+  - _Requirements: 3.1, 3.2_
+
+- [ ] 3.3 News API ルートをアプリに登録する
+  - Express アプリの apiv3 ルーター定義に `news.ts` を追加する
+  - _Requirements: 3.1, 3.4_
+
+- [ ] 4. NewsCronService を実装する
+- [ ] 4.1 (P) フィード取得・DB 同期処理を実装する
+  - `CronService` を継承し `getCronSchedule()` で `'0 1 * * *'` を返す
+  - `executeJob()` を実装する:`NEWS_FEED_URL` 未設定時はスキップ、HTTP GET、取得失敗時はログ記録のみ(既存データ維持)
+  - 取得した各アイテムの `growiVersionRegExps` と現バージョンを照合し、不一致アイテムを除外する。不正 regex は try-catch でスキップしてログ警告する
+  - フィード外のアイテムを DB から削除し、ランダムスリープ(0–5分)でリクエストを分散する
+  - _Requirements: 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7_
+
+- [ ] 4.2 cron をアプリ起動時に登録する
+  - アプリの初期化処理で `NewsCronService.startCron()` を呼ぶ
+  - _Requirements: 1.1_
+
+- [ ] 5. フロントエンド SWR フックを実装する
+- [ ] 5.1 (P) ニュース用 SWR フックを新設する
+  - `useSWRINFxNews(limit, options)` を `useSWRInfinite` ベースで実装する。キーに `limit`, `pageIndex`, `onlyUnread` を含める
+  - `useSWRxNewsUnreadCount()` を実装する
+  - _Requirements: 5.4, 7.1_
+
+- [ ] 5.2 (P) InAppNotification 用の無限スクロール対応フックを追加する
+  - 既存 `useSWRxInAppNotifications`(`useSWR` ベース)に加えて `useSWRINFxInAppNotifications(limit, options)` を `useSWRInfinite` ベースで新設する
+  - 既存フックは `InAppNotificationPage.tsx` での利用のため維持する
+  - _Requirements: 5.4_
+
+- [ ] 6. InAppNotification パネルを改修する
+- [ ] 6.1 フィルタタブを追加する
+  - `InAppNotification.tsx` に `activeFilter: 'all' | 'news' | 'notifications'` の state(デフォルト `'all'`)を追加し、`InAppNotificationForms` と `InAppNotificationContent` へ prop として渡す
+  - `InAppNotificationForms` に Bootstrap `btn-group` でフィルタボタン(「すべて」「通知」「お知らせ」)を追加する。既存「未読のみ」トグルは維持する
+  - _Requirements: 5.2, 5.3_
+
+- [ ] 6.2 無限スクロールを導入する
+  - `InAppNotificationContent` で `useSWRINFxNews` と `useSWRINFxInAppNotifications` を使用するよう変更する
+  - 既存の `InfiniteScroll` コンポーネントをラップしてリストを表示する
+  - 既存の `// TODO: Infinite scroll implemented` コメントを解消する
+  - _Requirements: 5.4_
+
+- [ ] 6.3 「すべて」フィルタ時のクライアントサイドマージを実装する
+  - `activeFilter === 'all'` の場合、通知(`createdAt`)とニュース(`publishedAt`)を日時降順でマージして表示する
+  - `activeFilter === 'news'` の場合は NewsItem のみ、`activeFilter === 'notifications'` の場合は InAppNotification のみ表示する
+  - _Requirements: 5.1, 5.2_
+
+- [ ] 7. NewsItem コンポーネントを実装する
+- [ ] 7.1 (P) ニュースアイテムの表示コンポーネントを実装する
+  - `emoji` フィールドをタイトル前に表示する。未設定時は 📢 をフォールバックとする
+  - 多言語タイトルをブラウザ言語で解決する。フォールバック順は `browserLocale → ja_JP → en_US → 最初に利用可能なキー`
+  - 未読時はタイトルを `fw-bold` + 左端に `bg-primary` 8px 丸ドット、既読時は `fw-normal` + 同幅の透明スペーサーで表示する
+  - _Requirements: 5.5, 6.1, 6.2, 6.3, 6.4, 8.1, 8.2_
+
+- [ ] 7.2 (P) ニュースアイテムのクリック処理を実装する
+  - クリック時に `POST /apiv3/news/mark-read` を呼び、SWR キャッシュを mutate して未読インジケータを更新する
+  - `url` が設定されている場合は新しいタブで開く
+  - _Requirements: 5.6, 5.7_
+
+- [ ] 8. (P) 未読バッジにニュース未読数を合算する
+  - `PrimaryItemForNotification` で `useSWRxNewsUnreadCount` を呼び、既存の InAppNotification 未読カウントと合算してバッジに表示する
+  - 全ニュースが既読の場合はニュース分のカウントを含めない
+  - _Requirements: 7.1, 7.2_
+
+- [ ] 9. (P) i18n ロケールファイルを更新する
+  - `commons.json` の `in_app_notification` 名前空間に以下のキーを全ロケール(`ja_JP`, `en_US`, `zh_CN`, `ko_KR`, `fr_FR`)に追加する:`news`(お知らせ)、`notifications`(通知)、`all`(すべて)、`no_news`(ニュースはありません)
+  - _Requirements: 8.3, 8.4_
+
+- [ ] 10. サーバーサイドテストを実装する
+- [ ] 10.1 NewsCronService のテストを実装する
+  - `executeJob()` が正常取得時に upsert・削除を行うことを確認する
+  - `NEWS_FEED_URL` 未設定時にスキップすることを確認する
+  - フィード取得失敗時に DB データが変更されないことを確認する
+  - `growiVersionRegExps` の一致・不一致・不正 regex の各ケースをテストする
+  - _Requirements: 1.1, 1.2, 1.3, 1.5, 1.6, 1.7_
+
+- [ ] 10.2 NewsService のテストを実装する
+  - `listForUser()` がロール別フィルタを正しく適用し `isRead` を付与することを確認する
+  - `onlyUnread=true` で未読のみ返ることを確認する
+  - `markRead()` の冪等性(2回呼んでもエラーなし)を確認する
+  - `getUnreadCount()` が `markAllRead()` 後に 0 を返すことを確認する
+  - _Requirements: 3.1, 3.2, 3.3, 3.4, 3.5, 4.1, 4.2_
+
+- [ ] 10.3 News API 統合テストを実装する
+  - `GET /apiv3/news/list` がロール別フィルタを強制することを確認する
+  - `POST /apiv3/news/mark-read` が冪等であることを確認する
+  - 未認証リクエストが 401 を返すことを確認する
+  - _Requirements: 3.1, 3.4, 4.1_
+
+- [ ] 11. フロントエンドテストを実装する
+- [ ] 11.1 NewsItem コンポーネントのテストを実装する
+  - `emoji` 未設定時に 📢 が表示されることをテストする
+  - タイトルのロケールフォールバック(`browserLocale → ja_JP → en_US`)をテストする
+  - 未読・既読の視覚表示(`fw-bold`、青ドット、スペーサー)をテストする
+  - クリック時に `mark-read` が呼ばれ、`url` がある場合に新タブで開くことをテストする
+  - _Requirements: 5.5, 5.6, 5.7, 6.1, 6.2, 6.3, 6.4, 8.1, 8.2_
+
+- [ ]* 11.2 InAppNotification パネルのフィルタ動作をテストする
+  - フィルタタブ切り替えで表示対象が変わることを確認する(5.2 の AC カバレッジ)
+  - 「未読のみ」トグルとの組み合わせで2重フィルタリングが機能することを確認する(5.3 の AC カバレッジ)
+  - _Requirements: 5.2, 5.3_

+ 0 - 14
.kiro/specs/suggest-path/design.md

@@ -343,17 +343,3 @@ interface SuggestPathResponse {
 
 Each component fails independently. Memo is always generated first as guaranteed fallback.
 
-## Security Considerations
-
-- **Authentication**: All requests require valid API token or login session (standard middleware)
-- **Authorization**: Search results are permission-scoped via `searchKeyword()` user/group parameters
-- **Input safety**: Content body is passed to GROWI AI, not directly to Elasticsearch — no NoSQL injection risk
-- **AI prompt injection**: System prompt and user content are separated to minimize prompt injection risk
-- **Information leakage**: Error responses use generic messages (Req 9.2)
-
-## Performance Considerations
-
-- Content analysis and candidate evaluation are sequential (ES sits between) — 2 AI roundtrips minimum
-- Search-evaluate pipeline and category generation run in parallel to minimize total latency
-- ES snippets (not full page bodies) are passed to AI to manage context budget
-- Score threshold filtering reduces the number of candidates passed to the 2nd AI call

+ 1 - 1
.kiro/specs/suggest-path/requirements.md

@@ -36,7 +36,7 @@ Phase 2 was revised based on reviewer feedback: (1) flow/stock information class
 
 **Summary**: Extracts top-level path segment from keyword-matched pages as a `category` type suggestion. Includes parent grant. Omitted if no match found.
 
-> **Note**: May overlap with the AI-based evaluation approach (Reqs 11, 12). Whether to retain, merge, or remove will be determined after reviewer discussion.
+> **Note**: After reviewer discussion, the prior implementation was retained as-is. Potential overlap with the AI-based evaluation approach (Reqs 11, 12) was acknowledged; merging or removal deferred to a future iteration.
 
 ### Requirement 5: Content Analysis via GROWI AI (Phase 2)
 

+ 1 - 1
.kiro/specs/suggest-path/spec.json

@@ -1,7 +1,7 @@
 {
   "feature_name": "suggest-path",
   "created_at": "2026-02-10T12:00:00Z",
-  "updated_at": "2026-03-23T00:00:00Z",
+  "updated_at": "2026-04-15T00:00:00Z",
   "language": "en",
   "phase": "implementation-complete",
   "approvals": {

+ 0 - 2
.kiro/specs/suggest-path/tasks.md

@@ -20,8 +20,6 @@
 
 ## Post-Implementation Refactoring (from code review)
 
-See `gap-analysis.md` for detailed rationale.
-
 - [x] 8. Simplify service layer abstractions
 - [x] 8.1 Remove `GenerateSuggestionsDeps` DI pattern from `generate-suggestions.ts`
 - [x] 8.2 Remove `RetrieveSearchCandidatesOptions` from `retrieve-search-candidates.ts`

+ 8 - 4
.kiro/steering/product.md

@@ -5,10 +5,13 @@ GROWI is a team collaboration wiki platform using Markdown, designed to help tea
 ## Core Capabilities
 
 1. **Hierarchical Wiki Pages**: Tree-structured page organization with path-based navigation (`/path/to/page`)
-2. **Markdown-First Editing**: Rich Markdown support with extensions (drawio, lsx, math) and real-time collaborative editing
-3. **Authentication Integrations**: Multiple auth methods (LDAP, SAML, OAuth, Passkey) for enterprise environments
-4. **Plugin System**: Extensible architecture via `@growi/pluginkit` for custom remark plugins and functionality
-5. **Multi-Service Architecture**: Modular services (PDF export, Slack integration) deployed independently
+2. **Markdown-First Editing**: Rich Markdown support with extensions (drawio, lsx, math) and Yjs-based real-time collaborative editing
+3. **AI-Assisted Editing**: OpenAI/Azure OpenAI integration for editor assistance, page path suggestion, and customizable AI assistants with knowledge bases (vector stores)
+4. **Authentication Integrations**: Multiple auth methods (LDAP, SAML, OAuth, Passkey) for enterprise environments
+5. **Plugin System**: Extensible architecture via `@growi/pluginkit` for custom remark plugins and functionality
+6. **Audit & Compliance**: Activity logging, audit log search (Elasticsearch-backed), and bulk export for compliance needs
+7. **Multi-Service Architecture**: Modular services (PDF export, Slack integration) deployed independently
+8. **Observability**: OpenTelemetry integration for monitoring and tracing
 
 ## Target Use Cases
 
@@ -31,4 +34,5 @@ GROWI is a team collaboration wiki platform using Markdown, designed to help tea
 - **Microservices**: Optional services (pdf-converter, slackbot-proxy) for enhanced functionality
 
 ---
+_Updated: 2026-04-16. Added AI assistant, audit/compliance, and observability capabilities._
 _Focus on patterns and purpose, not exhaustive feature lists_

+ 5 - 1
.kiro/steering/tech.md

@@ -55,5 +55,9 @@ The release image includes `node_modules/` at workspace root alongside `apps/app
 
 For apps/app-specific build optimization details (webpack config, null-loader rules, SuperJSON architecture, module count KPI), see `apps/app/.claude/skills/build-optimization/SKILL.md`.
 
+### Logging
+
+The monorepo uses **pino** (via `@growi/logger`) as the standard logging library. Legacy bunyan usage has been migrated.
+
 ---
-_Updated: 2026-03-17. Turbopack now used for production builds; expanded justified-deps list; added Production Assembly Pattern._
+_Updated: 2026-04-16. Added pino logging note._

+ 32 - 18
CLAUDE.md

@@ -1,8 +1,8 @@
 @AGENTS.md
 
-# AI-DLC and Spec-Driven Development
+# Agentic SDLC and Spec-Driven Development
 
-Kiro-style Spec Driven Development implementation on AI-DLC (AI Development Life Cycle)
+Kiro-style Spec-Driven Development on an agentic SDLC
 
 ## Project Context
 
@@ -17,34 +17,48 @@ Kiro-style Spec Driven Development implementation on AI-DLC (AI Development Life
 
 ### Active Specifications
 - Check `.kiro/specs/` for active specifications
-- Use `/kiro:spec-status [feature-name]` to check progress
+- Use `/kiro-spec-status [feature-name]` to check progress
 
 ## Development Guidelines
 - Think in English, generate responses in English. All Markdown content written to project files (e.g., requirements.md, design.md, tasks.md, research.md, validation reports) MUST be written in the target language configured for this specification (see spec.json.language).
-- **Note**: `spec.json.language` controls the language of spec document content only. It does NOT control the conversation response language. The conversation language is governed by the Language Policy in AGENTS.md.
 
 ## Minimal Workflow
-- Phase 0 (optional): `/kiro:steering`, `/kiro:steering-custom`
+- Phase 0 (optional): `/kiro-steering`, `/kiro-steering-custom`
+- Discovery: `/kiro-discovery "idea"` — determines action path, writes brief.md + roadmap.md for multi-spec projects
 - Phase 1 (Specification):
-  - `/kiro:spec-init "description"`
-  - `/kiro:spec-requirements {feature}`
-  - `/kiro:validate-gap {feature}` (optional: for existing codebase)
-  - `/kiro:spec-design {feature} [-y]`
-  - `/kiro:validate-design {feature}` (optional: design review)
-  - `/kiro:spec-tasks {feature} [-y]`
-- Phase 2 (Implementation): `/kiro:spec-impl {feature} [tasks]`
-  - `/kiro:validate-impl {feature}` (optional: after implementation)
-  - `/kiro:spec-cleanup {feature}` (optional: organize specs post-implementation)
-- Progress check: `/kiro:spec-status {feature}` (use anytime)
+  - Single spec: `/kiro-spec-quick {feature} [--auto]` or step by step:
+    - `/kiro-spec-init "description"`
+    - `/kiro-spec-requirements {feature}`
+    - `/kiro-validate-gap {feature}` (optional: for existing codebase)
+    - `/kiro-spec-design {feature} [-y]`
+    - `/kiro-validate-design {feature}` (optional: design review)
+    - `/kiro-spec-tasks {feature} [-y]`
+  - Multi-spec: `/kiro-spec-batch` — creates all specs from roadmap.md in parallel by dependency wave
+- Phase 2 (Implementation): `/kiro-impl {feature} [tasks]`
+  - Without task numbers: autonomous mode (subagent per task + independent review + final validation)
+  - With task numbers: manual mode (selected tasks in main context, still reviewer-gated before completion)
+  - `/kiro-validate-impl {feature}` (standalone re-validation)
+- Phase 3 (Post-implementation): `/kiro-spec-cleanup {feature}` — trim HOW, preserve WHY for future refactoring
+- Progress check: `/kiro-spec-status {feature}` (use anytime)
+
+## Skills Structure
+Skills are located in `.claude/skills/kiro-*/SKILL.md`
+- Each skill is a directory with a `SKILL.md` file
+- Skills run inline with access to conversation context
+- Skills may delegate parallel research to subagents for efficiency
+- Additional files (templates, examples) can be added to skill directories
+- `kiro-review` — task-local adversarial review protocol used by reviewer subagents
+- `kiro-debug` — root-cause-first debug protocol used by debugger subagents
+- `kiro-verify-completion` — fresh-evidence gate before success or completion claims
+- **If there is even a 1% chance a skill applies to the current task, invoke it.** Do not skip skills because the task seems simple.
 
 ## Development Rules
 - 3-phase approval workflow: Requirements → Design → Tasks → Implementation
 - Human review required each phase; use `-y` only for intentional fast-track
-- Keep steering current and verify alignment with `/kiro:spec-status`
+- Keep steering current and verify alignment with `/kiro-spec-status`
 - Follow the user's instructions precisely, and within that scope act autonomously: gather the necessary context and complete the requested work end-to-end in this run, asking questions only when essential information is missing or the instructions are critically ambiguous.
 
 ## Steering Configuration
 - Load entire `.kiro/steering/` as project memory
 - Default files: `product.md`, `tech.md`, `structure.md`
-- Custom files are supported (managed via `/kiro:steering-custom`)
-
+- Custom files are supported (managed via `/kiro-steering-custom`)

+ 2 - 3
README.md

@@ -82,15 +82,14 @@ See [GROWI Docs: Environment Variables](https://docs.growi.org/en/admin-guide/ad
 ## Dependencies
 
 - Node.js v24.x
-- npm 6.x
-- pnpm 9.x
+- pnpm 10.x
 - [Turborepo](https://turbo.build/repo)
 - MongoDB v6.x or v8.x
 
 ### Optional Dependencies
 
 - Redis 3.x
-- ElasticSearch 7.x or 8.x (needed when using Full-text search)
+- ElasticSearch 7.x or 8.x or 9.x (needed when using Full-text search)
   - **CAUTION: Following plugins are required**
     - [Japanese (kuromoji) Analysis plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-kuromoji.html)
     - [ICU Analysis Plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-icu.html)

+ 2 - 3
README_JP.md

@@ -82,15 +82,14 @@ Crowi からの移行は **[こちら](https://docs.growi.org/en/admin-guide/mig
 ## 依存関係
 
 - Node.js v24.x
-- npm 6.x
-- pnpm 9.x
+- pnpm 10.x
 - [Turborepo](https://turbo.build/repo)
 - MongoDB v6.x or v8.x
 
 ### オプションの依存関係
 
 - Redis 3.x
-- ElasticSearch 7.x or 8.x (needed when using Full-text search)
+- ElasticSearch 7.x or 8.x or 9.x (needed when using Full-text search)
   - **注意: 次のプラグインが必要です**
     - [Japanese (kuromoji) Analysis plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-kuromoji.html)
     - [ICU Analysis Plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-icu.html)

+ 1 - 4
apps/app/.claude/skills/build-optimization/SKILL.md

@@ -27,16 +27,13 @@ user-invocable: false
 
 ### Resolve Aliases (`turbopack.resolveAlias`)
 
-7 server-only packages + `fs` are aliased to `./src/lib/empty-module.ts` in browser context:
+4 server-only packages + `fs` are aliased to `./src/lib/empty-module.ts` in browser context:
 
 | Package | Reason |
 |---------|--------|
 | `fs` | Node.js built-in, not available in browser |
-| `dtrace-provider` | Native module, server-only |
 | `mongoose` | MongoDB driver, server-only |
 | `i18next-fs-backend` | File-system i18n loader, server-only |
-| `bunyan` | Server-side logger |
-| `bunyan-format` | Server-side logger formatter |
 | `core-js` | Server-side polyfills |
 
 - Uses conditional `{ browser: './src/lib/empty-module.ts' }` syntax so server-side resolution is unaffected

+ 2 - 0
apps/app/.gitignore

@@ -11,6 +11,8 @@ next.config.js
 /build/
 /dist/
 /transpiled/
+/config/**/*.js
+/config/**/*.d.ts
 /public/static/fonts
 /public/static/js
 /public/static/styles

+ 14 - 3
apps/app/bin/postbuild-server.ts

@@ -1,19 +1,25 @@
 /**
  * Post-build script for server compilation.
  *
- * tspc compiles both `src/` and `config/` (which will be migrated to TypeScript),
+ * tspc compiles both `src/` and `config/` (TypeScript files under config/),
  * so the output directory (`transpiled/`) mirrors the source tree structure
  * (e.g. `transpiled/src/`, `transpiled/config/`).
  *
  * Setting `rootDir: "src"` and `outDir: "dist"` in tsconfig would eliminate this script,
  * but that would break once `config/` is included in the compilation.
- * Instead, this script extracts only `transpiled/src/` into `dist/` and discards the rest.
+ *
+ * This script:
+ * 1. Extracts `transpiled/src/` into `dist/`
+ * 2. Copies compiled `transpiled/config/` files into `config/` so that
+ *    relative imports from `dist/` (e.g. `../../../config/logger/config.dev`)
+ *    resolve correctly at runtime.
  */
-import { readdirSync, renameSync, rmSync } from 'node:fs';
+import { cpSync, existsSync, readdirSync, renameSync, rmSync } from 'node:fs';
 
 const TRANSPILED_DIR = 'transpiled';
 const DIST_DIR = 'dist';
 const SRC_SUBDIR = `${TRANSPILED_DIR}/src`;
+const CONFIG_SUBDIR = `${TRANSPILED_DIR}/config`;
 
 // List transpiled contents for debugging
 // biome-ignore lint/suspicious/noConsole: This is a build script, console output is expected.
@@ -27,5 +33,10 @@ rmSync(DIST_DIR, { recursive: true, force: true });
 // Move transpiled/src -> dist
 renameSync(SRC_SUBDIR, DIST_DIR);
 
+// Copy compiled config files to app root config/ so runtime imports resolve
+if (existsSync(CONFIG_SUBDIR)) {
+  cpSync(CONFIG_SUBDIR, 'config', { recursive: true, force: true });
+}
+
 // Remove leftover transpiled directory
 rmSync(TRANSPILED_DIR, { recursive: true, force: true });

+ 5 - 1
apps/app/config/logger/config.dev.js → apps/app/config/logger/config.dev.ts

@@ -1,4 +1,6 @@
-module.exports = {
+import type { LoggerConfig } from '@growi/logger';
+
+const config: LoggerConfig = {
   default: 'info',
 
   // 'express-session': 'debug',
@@ -47,3 +49,5 @@ module.exports = {
   'growi:service:openai': 'debug',
   'growi:middleware:access-token-parser:access-token': 'debug',
 };
+
+export default config;

+ 5 - 1
apps/app/config/logger/config.prod.js → apps/app/config/logger/config.prod.ts

@@ -1,6 +1,10 @@
-module.exports = {
+import type { LoggerConfig } from '@growi/logger';
+
+const config: LoggerConfig = {
   default: 'info',
 
   'growi:routes:login-passport': 'debug',
   'growi:service:PassportService': 'debug',
 };
+
+export default config;

+ 2 - 0
apps/app/docker/Dockerfile.dockerignore

@@ -37,6 +37,8 @@ apps/slackbot-proxy
 # Documentation (no .md files are needed for build)
 # ============================================================
 **/*.md
+# Keep locale template .md files required at runtime by the installer
+!apps/app/resource/locales/**/*.md
 
 # ============================================================
 # Local environment overrides

+ 0 - 3
apps/app/next.config.ts

@@ -133,11 +133,8 @@ const nextConfig: NextConfig = {
       // Exclude fs from client bundle
       fs: { browser: './src/lib/empty-module.ts' },
       // Exclude server-only packages from client bundle
-      'dtrace-provider': { browser: './src/lib/empty-module.ts' },
       mongoose: { browser: './src/lib/empty-module.ts' },
       'i18next-fs-backend': { browser: './src/lib/empty-module.ts' },
-      bunyan: { browser: './src/lib/empty-module.ts' },
-      'bunyan-format': { browser: './src/lib/empty-module.ts' },
       'core-js': { browser: './src/lib/empty-module.ts' },
     },
   },

+ 3 - 9
apps/app/package.json

@@ -1,6 +1,6 @@
 {
   "name": "@growi/app",
-  "version": "7.5.0",
+  "version": "7.5.1-RC.0",
   "license": "MIT",
   "private": true,
   "scripts": {
@@ -66,7 +66,6 @@
     "@azure/identity": "^4.4.1",
     "@azure/openai": "^2.0.0",
     "@azure/storage-blob": "^12.16.0",
-    "@browser-bunyan/console-formatted-stream": "^1.8.0",
     "@codemirror/autocomplete": "^6.18.4",
     "@codemirror/commands": "^6.8.0",
     "@codemirror/lang-markdown": "^6.3.2",
@@ -83,6 +82,7 @@
     "@google-cloud/storage": "^5.8.5",
     "@growi/core": "workspace:^",
     "@growi/emoji-mart-data": "workspace:^",
+    "@growi/logger": "workspace:*",
     "@growi/pdf-converter-client": "workspace:^",
     "@growi/pluginkit": "workspace:^",
     "@growi/presentation": "workspace:^",
@@ -121,14 +121,12 @@
     "archiver": "^5.3.0",
     "array.prototype.flatmap": "^1.2.2",
     "async-canvas-to-blob": "^1.0.3",
-    "axios": "^1.11.0",
+    "axios": "^1.15.0",
     "axios-retry": "^3.2.4",
     "babel-plugin-superjson-next": "^0.4.2",
     "body-parser": "^1.20.3",
     "bootstrap": "^5.3.8",
-    "browser-bunyan": "^1.8.0",
     "bson-objectid": "^2.0.4",
-    "bunyan": "^1.8.15",
     "cm6-theme-basic-light": "^0.2.0",
     "codemirror": "^6.0.1",
     "compression": "^1.7.4",
@@ -150,7 +148,6 @@
     "ejs": "^3.1.10",
     "expose-gc": "^1.0.0",
     "express": "^4.20.0",
-    "express-bunyan-logger": "^1.3.3",
     "express-mongo-sanitize": "^2.1.0",
     "express-session": "^1.16.1",
     "express-validator": "^6.14.0",
@@ -275,7 +272,6 @@
     "uid-safe": "^2.1.5",
     "unified": "^11.0.0",
     "unist-util-visit": "^5.0.0",
-    "universal-bunyan": "^0.9.2",
     "unstated": "^2.1.1",
     "unzip-stream": "^0.3.2",
     "url-join": "^4.0.0",
@@ -304,7 +300,6 @@
     "@testing-library/jest-dom": "^6.5.0",
     "@testing-library/user-event": "^14.5.2",
     "@types/archiver": "^6.0.2",
-    "@types/bunyan": "^1.8.11",
     "@types/express": "^4.17.21",
     "@types/hast": "^3.0.4",
     "@types/js-cookie": "^3.0.6",
@@ -338,7 +333,6 @@
     "mdast-util-find-and-replace": "^3.0.1",
     "mongodb-connection-string-url": "^7.0.0",
     "mongodb-memory-server-core": "^9.1.1",
-    "morgan": "^1.10.0",
     "openapi-typescript": "^7.8.0",
     "rehype-rewrite": "^4.0.2",
     "remark-github-admonitions-to-directives": "^2.0.0",

+ 128 - 0
apps/app/playwright/23-editor/emacs-keymap.spec.ts

@@ -0,0 +1,128 @@
+import { expect, test } from '@playwright/test';
+
+/**
+ * Tests for Emacs keymap functionality in the editor.
+ * Verifies that the registered EmacsHandler bindings produce the expected
+ * markdown output in the editor source — i.e. the observable contract
+ * (content changes) rather than internal implementation details.
+ *
+ * Keymap isolation strategy: page.route intercepts GET /_api/v3/personal-setting/editor-settings
+ * and returns keymapMode:'emacs' without touching the database.  PUT requests are swallowed for
+ * the same reason.  Because the route is scoped to the test's page instance, no other test file
+ * is affected and no afterEach cleanup is required.
+ *
+ * @see packages/editor/src/client/services-internal/keymaps/emacs/
+ * Requirements: 4.1, 5.2, 9.3
+ */
+
+const EDITOR_SETTINGS_ROUTE = '**/_api/v3/personal-setting/editor-settings';
+
+test.describe
+  .serial('Emacs keymap mode', () => {
+    test.beforeEach(async ({ page }) => {
+      // Return keymapMode:'emacs' for every settings fetch without writing to DB.
+      // PUT requests (e.g. from UI interactions) are also swallowed so the DB stays clean.
+      await page.route(EDITOR_SETTINGS_ROUTE, async (route) => {
+        if (route.request().method() === 'GET') {
+          await route.fulfill({
+            contentType: 'application/json',
+            body: JSON.stringify({ keymapMode: 'emacs' }),
+          });
+        } else {
+          await route.fulfill({
+            status: 200,
+            contentType: 'application/json',
+            body: '{}',
+          });
+        }
+      });
+
+      await page.goto('/Sandbox/emacs-keymap-test-page');
+
+      // Open Editor
+      await expect(page.getByTestId('editor-button')).toBeVisible();
+      await page.getByTestId('editor-button').click();
+      await expect(page.locator('.cm-content')).toBeVisible();
+      await expect(page.getByTestId('grw-editor-navbar-bottom')).toBeVisible();
+    });
+
+    test('C-c C-s b should wrap text in bold markdown markers (Req 4.1)', async ({
+      page,
+    }) => {
+      // Focus the editor
+      await page.locator('.cm-content').click();
+
+      // With no selection, C-c C-s b inserts ** markers and positions cursor between them
+      await page.keyboard.press('Control+c');
+      await page.keyboard.press('Control+s');
+      await page.keyboard.press('b');
+
+      // Type text inside the inserted markers
+      await page.keyboard.type('bold text');
+
+      // Verify: bold markdown markers surround the typed text in the editor source
+      await expect(page.locator('.cm-content')).toContainText('**bold text**');
+    });
+
+    test('C-c C-l should insert a markdown link template (Req 5.2)', async ({
+      page,
+    }) => {
+      // Focus the editor
+      await page.locator('.cm-content').click();
+
+      // With no selection, C-c C-l inserts []() and positions cursor after [
+      await page.keyboard.press('Control+c');
+      await page.keyboard.press('Control+l');
+
+      // Type the link display text inside the brackets
+      await page.keyboard.type('link text');
+
+      // Verify: link template with typed display text appears in the editor source
+      await expect(page.locator('.cm-content')).toContainText('[link text]()');
+    });
+
+    test('C-c C-n should navigate cursor to the next heading (Req 9.3)', async ({
+      page,
+    }) => {
+      // Set up document with two headings.
+      // Fill directly and wait for the rendered heading text (without # markers) to appear in the
+      // preview, because appendTextToEditorUntilContains checks raw text which markdown headings
+      // strip on render.
+      await page
+        .locator('.cm-content')
+        .fill('# First Heading\n\n## Second Heading');
+      await expect(page.getByTestId('page-editor-preview-body')).toContainText(
+        'Second Heading',
+      );
+
+      // Click on the first line to position cursor before "## Second Heading"
+      await page.locator('.cm-line').first().click();
+
+      // Navigate to the next heading with C-c C-n
+      await page.keyboard.press('Control+c');
+      await page.keyboard.press('Control+n');
+
+      // Cursor is now at the beginning of "## Second Heading".
+      // Move to end of that line and append a unique marker to verify cursor position.
+      await page.keyboard.press('End');
+      await page.keyboard.type(' NAVIGATED');
+
+      // Verify: the marker was appended at the second heading, not the first
+      await expect(page.locator('.cm-content')).toContainText(
+        '## Second Heading NAVIGATED',
+      );
+    });
+
+    test('C-x C-s should save the page (Req 6.1)', async ({ page }) => {
+      // Type content to ensure there is something to save
+      await page.locator('.cm-content').click();
+      await page.keyboard.type('Emacs save test');
+
+      // Save with the Emacs two-stroke save binding
+      await page.keyboard.press('Control+x');
+      await page.keyboard.press('Control+s');
+
+      // Expect a success toast notification confirming the page was saved
+      await expect(page.locator('.Toastify__toast--success')).toBeVisible();
+    });
+  });

+ 2 - 1
apps/app/public/static/locales/en_US/translation.json

@@ -528,7 +528,8 @@
       "Code Block": "Code Block",
       "Comment Out": "Comment Out",
       "Comment Out Desc": "(Hide)"
-    }
+    },
+    "Other Shortcuts": "Other Shortcuts"
   },
   "modal_resolve_conflict": {
     "conflicts_with_new_body_on_server_side": "Conflict with new body on server side. Please select or edit the page body to resolve the conflict.",

+ 2 - 1
apps/app/public/static/locales/fr_FR/translation.json

@@ -524,7 +524,8 @@
       "Code Block": "Bloc de code",
       "Comment Out": "Masquer",
       "Comment Out Desc": "(Commenter)"
-    }
+    },
+    "Other Shortcuts": "Autres raccourcis"
   },
   "modal_resolve_conflict": {
     "file_conflicting_with_newer_remote": "Ce fichier est en conflit avec une autre version",

+ 2 - 1
apps/app/public/static/locales/ja_JP/translation.json

@@ -562,7 +562,8 @@
       "Code Block": "コードブロック",
       "Comment Out": "非表示にする",
       "Comment Out Desc": "(コメントアウト)"
-    }
+    },
+    "Other Shortcuts": "その他のショートカット"
   },
   "modal_resolve_conflict": {
     "conflicts_with_new_body_on_server_side": "サーバー側の新しい本文と衝突します。ページ本文を選択または編集して衝突を解消してください。",

+ 2 - 1
apps/app/public/static/locales/ko_KR/translation.json

@@ -499,7 +499,8 @@
       "Code Block": "코드 블록",
       "Comment Out": "주석 처리",
       "Comment Out Desc": "(숨기기)"
-    }
+    },
+    "Other Shortcuts": "기타 단축키"
   },
   "modal_resolve_conflict": {
     "conflicts_with_new_body_on_server_side": "서버 측의 새 본문과 충돌합니다. 충돌을 해결하려면 페이지 본문을 선택하거나 편집하십시오.",

+ 2 - 1
apps/app/public/static/locales/zh_CN/translation.json

@@ -520,7 +520,8 @@
       "Code Block": "代码块",
       "Comment Out": "隐藏",
       "Comment Out Desc": "(注释)"
-    }
+    },
+    "Other Shortcuts": "其他快捷键"
   },
   "modal_resolve_conflict": {
     "conflicts_with_new_body_on_server_side": "与服务器端的新正文文本冲突。 请选择或编辑页面正文以解决冲突",

+ 9 - 2
apps/app/resource/Contributor.js

@@ -18,6 +18,7 @@ const contributors = [
           { position: 'Haberion', name: 'hakumizuki' },
           { position: 'Undefined', name: 'miya' },
           { position: 'Hoimi Slime', name: 'satof3' },
+          { position: 'Archer', name: 'Ryosei-Fukushima' },
         ],
       },
       {
@@ -172,9 +173,15 @@ const contributors = [
         additionalClass: 'col-md-4 my-4',
         members: [
           { name: 'Crowi Team' },
-          { position: 'Ambassador', name: 'Tsuyoshi Suzuki' },
+          { name: 'RIKEN' },
           { name: 'JPCERT/CC' },
-          { name: 'goofmint' },
+        ],
+      },
+      {
+        additionalClass: 'col-md-6 my-4',
+        members: [
+          { position: 'Ambassador', name: 'Tsuyoshi Suzuki' },
+          { position: 'Ambassador', name: 'goofmint' },
         ],
       },
       {

+ 4 - 3
apps/app/src/client/components/Admin/App/AppSettingsPageContents.tsx

@@ -1,9 +1,10 @@
-import React, { useEffect } from 'react';
+import { useEffect } from 'react';
 import { useTranslation } from 'next-i18next';
 
 import AdminAppContainer from '~/client/services/AdminAppContainer';
 import { toastError } from '~/client/util/toastr';
 import { useIsMaintenanceMode } from '~/states/global';
+import { useSWRxAppSettings } from '~/stores/admin/app-settings';
 import { toArrayIfNot } from '~/utils/array-utils';
 import loggerFactory from '~/utils/logger';
 
@@ -28,7 +29,7 @@ const AppSettingsPageContents = (props: Props) => {
 
   const isMaintenanceMode = useIsMaintenanceMode();
 
-  const { isV5Compatible } = adminAppContainer.state;
+  const { data: appSettings } = useSWRxAppSettings();
 
   useEffect(() => {
     const fetchAppSettingsData = async () => {
@@ -73,7 +74,7 @@ const AppSettingsPageContents = (props: Props) => {
           </div>
         )
       }
-      {!isV5Compatible && (
+      {appSettings?.isV5Compatible === false && (
         <div className="row">
           <div className="col-lg-12">
             <h2

+ 6 - 7
apps/app/src/client/components/PageCreateModal.tsx

@@ -21,12 +21,14 @@ import { debounce } from 'throttle-debounce';
 import { useCreateTemplatePage } from '~/client/services/create-page';
 import { useCreatePage } from '~/client/services/create-page/use-create-page';
 import { useToastrOnError } from '~/client/services/use-toastr-on-error';
-import { useCurrentUser, useGrowiCloudUri } from '~/states/global';
+import { useGrowiDocumentationUrl } from '~/states/context';
+import { useCurrentUser } from '~/states/global';
 import { isSearchServiceReachableAtom } from '~/states/server-configurations';
 import {
   usePageCreateModalActions,
   usePageCreateModalStatus,
 } from '~/states/ui/modal/page-create';
+import { getLocale } from '~/utils/locale-utils';
 
 import PagePathAutoComplete from './PagePathAutoComplete';
 
@@ -38,7 +40,7 @@ const PageCreateModal: React.FC = () => {
   const { t, i18n } = useTranslation();
 
   const currentUser = useCurrentUser();
-  const growiCloudUri = useGrowiCloudUri();
+  const documentationUrl = useGrowiDocumentationUrl();
 
   const { isOpened, path: pathname = '' } = usePageCreateModalStatus();
   const { close: closeCreateModal } = usePageCreateModalActions();
@@ -72,11 +74,8 @@ const PageCreateModal: React.FC = () => {
     [userHomepagePath, t, now],
   );
 
-  const templateHelpLang = i18n.language === 'ja' ? 'ja' : 'en';
-  const templateHelpUrl =
-    growiCloudUri != null
-      ? `https://growi.cloud/help/${templateHelpLang}/guide/features/template.html`
-      : `https://docs.growi.org/${templateHelpLang}/guide/features/template.html`;
+  const docsLang = getLocale(i18n.language).code === 'ja' ? 'ja' : 'en';
+  const templateHelpUrl = `${documentationUrl}/${docsLang}/guide/features/template.html`;
 
   const [todayInput, setTodayInput] = useState('');
   const [pageNameInput, setPageNameInput] = useState(pageNameInputInitialValue);

+ 5 - 0
apps/app/src/client/components/PageEditor/EditorNavbar/EditingUserList.module.scss

@@ -3,3 +3,8 @@
 .user-list-popover {
   @extend %user-list-popover;
 }
+
+.avatar-wrapper {
+  // Collapse inline-element ghost space inside the flex container
+  line-height: 0;
+}

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است