Просмотр исходного кода

Merge branch 'master' of https://github.com/growilabs/growi into feat/158232-prevent-inline-mime-type-sniffing-vulnerabilities

arvid-e 1 месяц назад
Родитель
Сommit
836369b311
100 измененных файлов с 10831 добавлено и 1104 удалено
  1. 391 0
      .claude/agents/build-error-resolver.md
  2. 277 0
      .claude/commands/kiro/spec-cleanup.md
  3. 179 0
      .claude/commands/kiro/spec-design.md
  4. 110 0
      .claude/commands/kiro/spec-impl.md
  5. 65 0
      .claude/commands/kiro/spec-init.md
  6. 98 0
      .claude/commands/kiro/spec-requirements.md
  7. 87 0
      .claude/commands/kiro/spec-status.md
  8. 138 0
      .claude/commands/kiro/spec-tasks.md
  9. 127 0
      .claude/commands/kiro/steering-custom.md
  10. 143 0
      .claude/commands/kiro/steering.md
  11. 92 0
      .claude/commands/kiro/validate-design.md
  12. 88 0
      .claude/commands/kiro/validate-gap.md
  13. 138 0
      .claude/commands/kiro/validate-impl.md
  14. 116 0
      .claude/commands/learn.md
  15. 287 0
      .claude/commands/tdd.md
  16. 217 0
      .claude/rules/coding-style.md
  17. 37 0
      .claude/rules/performance.md
  18. 33 0
      .claude/rules/security.md
  19. 38 0
      .claude/rules/testing.md
  20. 23 0
      .claude/settings.json
  21. 122 0
      .claude/skills/learned/essential-test-design/SKILL.md
  22. 494 0
      .claude/skills/learned/essential-test-patterns/SKILL.md
  23. 207 0
      .claude/skills/monorepo-overview/SKILL.md
  24. 269 0
      .claude/skills/tech-stack/SKILL.md
  25. 6 2
      .devcontainer/app/devcontainer.json
  26. 3 0
      .devcontainer/app/postCreateCommand.sh
  27. 4 1
      .devcontainer/pdf-converter/devcontainer.json
  28. 3 0
      .devcontainer/pdf-converter/postCreateCommand.sh
  29. 0 88
      .eslintrc.js
  30. 0 1
      .github/mergify.yml
  31. 2 2
      .github/workflows/ci-app.yml
  32. 1 1
      .github/workflows/ci-pdf-converter.yml
  33. 1 1
      .github/workflows/ci-slackbot-proxy.yml
  34. 5 30
      .github/workflows/release-rc.yml
  35. 11 46
      .github/workflows/release.yml
  36. 1 1
      .github/workflows/reusable-app-build-image.yml
  37. 3 0
      .gitignore
  38. 93 0
      .kiro/settings/rules/design-discovery-full.md
  39. 49 0
      .kiro/settings/rules/design-discovery-light.md
  40. 182 0
      .kiro/settings/rules/design-principles.md
  41. 110 0
      .kiro/settings/rules/design-review.md
  42. 49 0
      .kiro/settings/rules/ears-format.md
  43. 144 0
      .kiro/settings/rules/gap-analysis.md
  44. 90 0
      .kiro/settings/rules/steering-principles.md
  45. 131 0
      .kiro/settings/rules/tasks-generation.md
  46. 34 0
      .kiro/settings/rules/tasks-parallel-analysis.md
  47. 276 0
      .kiro/settings/templates/specs/design.md
  48. 22 0
      .kiro/settings/templates/specs/init.json
  49. 9 0
      .kiro/settings/templates/specs/requirements-init.md
  50. 26 0
      .kiro/settings/templates/specs/requirements.md
  51. 61 0
      .kiro/settings/templates/specs/research.md
  52. 21 0
      .kiro/settings/templates/specs/tasks.md
  53. 69 0
      .kiro/settings/templates/steering-custom/api-standards.md
  54. 67 0
      .kiro/settings/templates/steering-custom/authentication.md
  55. 46 0
      .kiro/settings/templates/steering-custom/database.md
  56. 54 0
      .kiro/settings/templates/steering-custom/deployment.md
  57. 59 0
      .kiro/settings/templates/steering-custom/error-handling.md
  58. 55 0
      .kiro/settings/templates/steering-custom/security.md
  59. 47 0
      .kiro/settings/templates/steering-custom/testing.md
  60. 18 0
      .kiro/settings/templates/steering/product.md
  61. 41 0
      .kiro/settings/templates/steering/structure.md
  62. 45 0
      .kiro/settings/templates/steering/tech.md
  63. 764 0
      .kiro/specs/oauth2-email-support/design.md
  64. 57 0
      .kiro/specs/oauth2-email-support/requirements.md
  65. 449 0
      .kiro/specs/oauth2-email-support/research.md
  66. 23 0
      .kiro/specs/oauth2-email-support/spec.json
  67. 41 0
      .kiro/specs/oauth2-email-support/tasks.md
  68. 34 0
      .kiro/steering/product.md
  69. 8 0
      .kiro/steering/structure.md
  70. 8 0
      .kiro/steering/tdd.md
  71. 8 0
      .kiro/steering/tech.md
  72. 1 20
      .mcp.json
  73. 192 0
      .serena/memories/apps-app-jotai-directory-structure.md
  74. 84 0
      .serena/memories/apps-app-modal-performance-optimization-v2-completion-summary.md
  75. 640 0
      .serena/memories/apps-app-modal-performance-optimization-v3-completion-summary.md
  76. 105 0
      .serena/memories/apps-app-page-path-nav-and-sub-navigation-layering.md
  77. 683 0
      .serena/memories/apps-app-page-tree-specification.md
  78. 0 186
      .serena/memories/apps-app-pagetree-performance-refactor-plan.md
  79. 0 71
      .serena/memories/coding_conventions.md
  80. 0 45
      .serena/memories/development_environment.md
  81. 390 0
      .serena/memories/nextjs-pages-router-getLayout-pattern.md
  82. 441 0
      .serena/memories/page-state-hooks-useLatestRevision-degradation.md
  83. 65 0
      .serena/memories/page-transition-and-rendering-flow.md
  84. 0 26
      .serena/memories/project_overview.md
  85. 0 90
      .serena/memories/project_structure.md
  86. 0 100
      .serena/memories/suggested_commands.md
  87. 0 95
      .serena/memories/task_completion_checklist.md
  88. 0 42
      .serena/memories/tech_stack.md
  89. 65 9
      .serena/project.yml
  90. 4 1
      .vscode/mcp.json
  91. 8 61
      .vscode/settings.json
  92. 139 0
      AGENTS.md
  93. 185 1
      CHANGELOG.md
  94. 38 83
      CLAUDE.md
  95. 105 0
      apps/app/.claude/skills/app-architecture/SKILL.md
  96. 202 0
      apps/app/.claude/skills/app-commands/SKILL.md
  97. 173 0
      apps/app/.claude/skills/app-specific-patterns/SKILL.md
  98. 302 0
      apps/app/.claude/skills/learned/page-save-origin-semantics/SKILL.md
  99. 0 101
      apps/app/.eslintrc.js
  100. 3 0
      apps/app/.gitignore

+ 391 - 0
.claude/agents/build-error-resolver.md

@@ -0,0 +1,391 @@
+---
+name: build-error-resolver
+description: Build and TypeScript error resolution specialist. Use PROACTIVELY when build fails or type errors occur. Fixes build/type errors only with minimal diffs, no architectural edits. Focuses on getting the build green quickly.
+tools: Read, Write, Edit, Bash, Grep, Glob
+model: opus
+---
+
+# Build Error Resolver
+
+You are an expert build error resolution specialist focused on fixing TypeScript, compilation, and build errors quickly and efficiently. Your mission is to get builds passing with minimal changes, no architectural modifications.
+
+## Core Responsibilities
+
+1. **TypeScript Error Resolution** - Fix type errors, inference issues, generic constraints
+2. **Build Error Fixing** - Resolve compilation failures, module resolution
+3. **Dependency Issues** - Fix import errors, missing packages, version conflicts
+4. **Configuration Errors** - Resolve tsconfig.json, Next.js config issues
+5. **Minimal Diffs** - Make smallest possible changes to fix errors
+6. **No Architecture Changes** - Only fix errors, don't refactor or redesign
+
+## Tools at Your Disposal
+
+### Build & Type Checking Tools
+- **tsgo** - TypeScript Go compiler for type checking
+- **pnpm** - Package management
+- **biome** - Linting and formatting (NOT ESLint)
+- **turbo** - Monorepo build orchestration
+
+### Diagnostic Commands
+```bash
+# Full lint (typecheck + biome + styles + openapi)
+turbo run lint --filter {package}
+
+# Or directly in apps/app
+cd apps/app && pnpm run lint:typecheck
+cd apps/app && pnpm run lint:biome
+
+# Check specific file
+pnpm biome check path/to/file.ts
+pnpm tsgo --noEmit path/to/file.ts
+
+# Production build
+turbo run build --filter {package}
+```
+
+## Error Resolution Workflow
+
+### 1. Collect All Errors
+```
+a) Run full type check
+   - turbo run lint --filter {package}
+   - Capture ALL errors, not just first
+
+b) Categorize errors by type
+   - Type inference failures
+   - Missing type definitions
+   - Import/export errors
+   - Configuration errors
+   - Dependency issues
+
+c) Prioritize by impact
+   - Blocking build: Fix first
+   - Type errors: Fix in order
+   - Warnings: Fix if time permits
+```
+
+### 2. Fix Strategy (Minimal Changes)
+```
+For each error:
+
+1. Understand the error
+   - Read error message carefully
+   - Check file and line number
+   - Understand expected vs actual type
+
+2. Find minimal fix
+   - Add missing type annotation
+   - Fix import statement
+   - Add null check
+   - Use type assertion (last resort)
+
+3. Verify fix doesn't break other code
+   - Run lint again after each fix
+   - Check related files
+   - Ensure no new errors introduced
+
+4. Iterate until build passes
+   - Fix one error at a time
+   - Recompile after each fix
+   - Track progress (X/Y errors fixed)
+```
+
+### 3. Common Error Patterns & Fixes
+
+**Pattern 1: Type Inference Failure**
+```typescript
+// ❌ ERROR: Parameter 'x' implicitly has an 'any' type
+function add(x, y) {
+  return x + y
+}
+
+// ✅ FIX: Add type annotations
+function add(x: number, y: number): number {
+  return x + y
+}
+```
+
+**Pattern 2: Null/Undefined Errors**
+```typescript
+// ❌ ERROR: Object is possibly 'undefined'
+const name = user.name.toUpperCase()
+
+// ✅ FIX: Optional chaining
+const name = user?.name?.toUpperCase()
+
+// ✅ OR: Null check
+const name = user && user.name ? user.name.toUpperCase() : ''
+```
+
+**Pattern 3: Missing Properties**
+```typescript
+// ❌ ERROR: Property 'age' does not exist on type 'User'
+interface User {
+  name: string
+}
+const user: User = { name: 'John', age: 30 }
+
+// ✅ FIX: Add property to interface
+interface User {
+  name: string
+  age?: number // Optional if not always present
+}
+```
+
+**Pattern 4: Import Errors**
+```typescript
+// ❌ ERROR: Cannot find module '~/lib/utils'
+import { formatDate } from '~/lib/utils'
+
+// ✅ FIX 1: Check tsconfig paths (GROWI uses ~/ for apps/app/src)
+{
+  "compilerOptions": {
+    "paths": {
+      "~/*": ["./src/*"]
+    }
+  }
+}
+
+// ✅ FIX 2: Use relative import
+import { formatDate } from '../lib/utils'
+
+// ✅ FIX 3: Install missing package
+pnpm add <package-name>
+```
+
+**Pattern 5: Type Mismatch**
+```typescript
+// ❌ ERROR: Type 'string' is not assignable to type 'number'
+const age: number = "30"
+
+// ✅ FIX: Parse string to number
+const age: number = parseInt("30", 10)
+
+// ✅ OR: Change type
+const age: string = "30"
+```
+
+**Pattern 6: Generic Constraints**
+```typescript
+// ❌ ERROR: Type 'T' is not assignable to type 'string'
+function getLength<T>(item: T): number {
+  return item.length
+}
+
+// ✅ FIX: Add constraint
+function getLength<T extends { length: number }>(item: T): number {
+  return item.length
+}
+
+// ✅ OR: More specific constraint
+function getLength<T extends string | any[]>(item: T): number {
+  return item.length
+}
+```
+
+**Pattern 7: React Hook Errors**
+```typescript
+// ❌ ERROR: React Hook "useState" cannot be called in a function
+function MyComponent() {
+  if (condition) {
+    const [state, setState] = useState(0) // ERROR!
+  }
+}
+
+// ✅ FIX: Move hooks to top level
+function MyComponent() {
+  const [state, setState] = useState(0)
+
+  if (!condition) {
+    return null
+  }
+
+  // Use state here
+}
+```
+
+**Pattern 8: Async/Await Errors**
+```typescript
+// ❌ ERROR: 'await' expressions are only allowed within async functions
+function fetchData() {
+  const data = await fetch('/api/data')
+}
+
+// ✅ FIX: Add async keyword
+async function fetchData() {
+  const data = await fetch('/api/data')
+}
+```
+
+**Pattern 9: Module Not Found**
+```typescript
+// ❌ ERROR: Cannot find module 'react' or its corresponding type declarations
+import React from 'react'
+
+// ✅ FIX: Install dependencies
+pnpm add react
+pnpm add -D @types/react
+
+// ✅ CHECK: Verify package.json has dependency
+{
+  "dependencies": {
+    "react": "^18.0.0"
+  },
+  "devDependencies": {
+    "@types/react": "^18.0.0"
+  }
+}
+```
+
+## Minimal Diff Strategy
+
+**CRITICAL: Make smallest possible changes**
+
+### DO:
+✅ Add type annotations where missing
+✅ Add null checks where needed
+✅ Fix imports/exports
+✅ Add missing dependencies
+✅ Update type definitions
+✅ Fix configuration files
+
+### DON'T:
+❌ Refactor unrelated code
+❌ Change architecture
+❌ Rename variables/functions (unless causing error)
+❌ Add new features
+❌ Change logic flow (unless fixing error)
+❌ Optimize performance
+❌ Improve code style
+
+**Example of Minimal Diff:**
+
+```typescript
+// File has 200 lines, error on line 45
+
+// ❌ WRONG: Refactor entire file
+// - Rename variables
+// - Extract functions
+// - Change patterns
+// Result: 50 lines changed
+
+// ✅ CORRECT: Fix only the error
+// - Add type annotation on line 45
+// Result: 1 line changed
+
+function processData(data) { // Line 45 - ERROR: 'data' implicitly has 'any' type
+  return data.map(item => item.value)
+}
+
+// ✅ MINIMAL FIX:
+function processData(data: any[]) { // Only change this line
+  return data.map(item => item.value)
+}
+
+// ✅ BETTER MINIMAL FIX (if type known):
+function processData(data: Array<{ value: number }>) {
+  return data.map(item => item.value)
+}
+```
+
+## Build Error Report Format
+
+```markdown
+# Build Error Resolution Report
+
+**Date:** YYYY-MM-DD
+**Build Target:** Next.js Production / TypeScript Check / Biome
+**Initial Errors:** X
+**Errors Fixed:** Y
+**Build Status:** ✅ PASSING / ❌ FAILING
+
+## Errors Fixed
+
+### 1. [Error Category - e.g., Type Inference]
+**Location:** `apps/app/src/components/PageCard.tsx:45`
+**Error Message:**
+```
+Parameter 'page' implicitly has an 'any' type.
+```
+
+**Root Cause:** Missing type annotation for function parameter
+
+**Fix Applied:**
+```diff
+- function getPagePath(page) {
++ function getPagePath(page: IPage) {
+    return page.path;
+  }
+```
+
+**Lines Changed:** 1
+**Impact:** NONE - Type safety improvement only
+
+---
+
+## Verification Steps
+
+1. ✅ TypeScript check passes: `turbo run lint --filter {package}`
+2. ✅ Next.js build succeeds: `turbo run build --filter {package}`
+3. ✅ No new errors introduced
+4. ✅ Development server runs: `turbo run dev`
+
+## Summary
+
+- Total errors resolved: X
+- Total lines changed: Y
+- Build status: ✅ PASSING
+- Blocking issues: 0 remaining
+```
+
+## When to Use This Agent
+
+**USE when:**
+- `turbo run build --filter {package}` fails
+- `turbo run lint --filter {package}` shows errors
+- Type errors blocking development
+- Import/module resolution errors
+- Configuration errors
+- Dependency version conflicts
+
+**DON'T USE when:**
+- Code needs refactoring
+- Architectural changes needed
+- New features required
+- Tests failing (run tests separately)
+- Security issues found (use security-reviewer)
+
+## Build Error Priority Levels
+
+### 🔴 CRITICAL (Fix Immediately)
+- Build completely broken
+- No development server
+- Production deployment blocked
+- Multiple files failing
+
+### 🟡 HIGH (Fix Soon)
+- Single file failing
+- Type errors in new code
+- Import errors
+- Non-critical build warnings
+
+### 🟢 MEDIUM (Fix When Possible)
+- Biome warnings
+- Deprecated API usage
+- Non-strict type issues
+- Minor configuration warnings
+
+## Success Metrics
+
+After build error resolution:
+- ✅ `turbo run lint --filter {package}` exits with code 0
+- ✅ `turbo run build --filter {package}` completes successfully
+- ✅ No new errors introduced
+- ✅ Minimal lines changed (< 5% of affected file)
+- ✅ Build time not significantly increased
+- ✅ Development server runs without errors
+- ✅ Tests still passing
+
+---
+
+**Remember**: The goal is to fix errors quickly with minimal changes. Don't refactor, don't optimize, don't redesign. Fix the error, verify the build passes, move on. Speed and precision over perfection.

+ 277 - 0
.claude/commands/kiro/spec-cleanup.md

@@ -0,0 +1,277 @@
+---
+description: Organize and clean up specification documents after implementation completion
+allowed-tools: Bash, Glob, Grep, Read, Write, Edit, MultiEdit, Update
+argument-hint: <feature-name>
+---
+
+# Specification Cleanup
+
+<background_information>
+- **Mission**: Organize specification documents after implementation completion, removing implementation details while preserving essential context for future refactoring
+- **Success Criteria**:
+  - Implementation details (testing procedures, deployment checklists) removed
+  - Design decisions and constraints preserved in research.md and design.md
+  - Requirements simplified (Acceptance Criteria condensed to summaries)
+  - Unimplemented features removed or documented
+  - Documents remain valuable for future refactoring work
+</background_information>
+
+<instructions>
+## Core Task
+Clean up and organize specification documents for feature **$1** after implementation is complete.
+
+## Organizing Principle
+
+**"Can we read essential context from these spec documents when refactoring this feature months later?"**
+
+- **Keep**: "Why" (design decisions, architectural constraints, limitations, trade-offs)
+- **Remove**: "How" (testing procedures, deployment steps, detailed implementation examples)
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Discover all spec files**:
+- Use Glob to find all files in `.kiro/specs/$1/` directory
+- Categorize files:
+  - **Core files** (must preserve): `spec.json`, `requirements.md`, `design.md`, `tasks.md`, `research.md`
+  - **Other files** (evaluate case-by-case): validation reports, notes, prototypes, migration guides, etc.
+
+**Read all discovered files**:
+- Read all core files first
+- Read other files to understand their content and value
+
+**Verify implementation status**:
+- Check that tasks are marked complete `[x]` in tasks.md
+- If implementation incomplete, warn user and ask to confirm cleanup
+
+### Step 2: Analyze Current State
+
+**Identify cleanup opportunities**:
+
+1. **Other files** (non-core files like validation-report.md, notes.md, etc.):
+   - Read each file to understand its content and purpose
+   - Identify valuable information that should be preserved:
+     * Implementation discoveries and lessons learned
+     * Critical constraints or design decisions
+     * Historical context for future refactoring
+   - Determine salvage strategy:
+     * Migrate valuable content to research.md or design.md
+     * Keep file if it contains essential reference information
+     * Delete if content is redundant or no longer relevant
+   - **Case-by-case evaluation required** - never assume files should be deleted
+
+2. **research.md**:
+   - Should contain production discoveries and implementation lessons learned
+   - Check if implementation revealed new constraints or patterns to document
+   - Identify content from other files that should be migrated here
+
+3. **requirements.md**:
+   - Identify verbose Acceptance Criteria that can be condensed to summaries
+   - Find unimplemented requirements (compare with tasks.md)
+   - Detect duplicate or redundant content
+
+4. **design.md**:
+   - Identify implementation-specific sections that can be removed:
+     * Detailed Testing Strategy (test procedures)
+     * Security Considerations (if covered in implementation)
+     * Error Handling code examples (if implemented)
+     * Migration Strategy (after migration complete)
+     * Deployment Checklist (after deployment)
+   - Identify sections to preserve:
+     * Architecture diagrams (essential for understanding)
+     * Component interfaces (API contracts)
+     * Design decisions and rationale
+     * Critical implementation constraints
+     * Known limitations
+   - Check if content from other files should be migrated here
+
+### Step 3: Interactive Confirmation
+
+**Present cleanup plan to user**:
+
+For each file and section identified in Step 2, ask:
+- "Should I delete/simplify/keep/salvage this section?"
+- Provide recommendations based on organizing principle
+- Show brief preview of content to aid decision
+
+**Example questions for other files**:
+- "validation-report.md found. Contains {brief summary}. Options:"
+  - "A: Migrate valuable content to research.md, then delete"
+  - "B: Keep as historical reference"
+  - "C: Delete (content no longer needed)"
+- "notes.md found. Contains {brief summary}. Salvage to research.md before deleting? [Y/n]"
+
+**Example questions for core files**:
+- "research.md: Add 'Session N: Production Discoveries' section to document implementation lessons? [Y/n]"
+- "requirements.md: Simplify Acceptance Criteria from detailed bullet points to summary paragraphs? [Y/n]"
+- "requirements.md: Remove unimplemented requirements (e.g., Req 4.4 field masking not implemented)? [Y/n]"
+- "design.md: Delete 'Testing Strategy' section (lines X-Y)? [Y/n]"
+- "design.md: Delete 'Security Considerations' section (lines X-Y)? [Y/n]"
+- "design.md: Keep Architecture diagrams (essential for refactoring)? [Y/n]"
+
+**Batch similar decisions**:
+- Group related sections (e.g., all "delete implementation details" decisions)
+- Allow user to approve categories rather than individual items
+- Present file-by-file salvage decisions for other files
+
+### Step 4: Execute Cleanup
+
+**For each approved action**:
+
+1. **Salvage and cleanup other files** (if approved):
+   - For each non-core file (validation-report.md, notes.md, etc.):
+     * Extract valuable information (implementation lessons, constraints, decisions)
+     * Migrate content to appropriate core file:
+       - Technical discoveries → research.md
+       - Design constraints → design.md
+       - Requirement clarifications → requirements.md
+     * Delete file after salvage (if approved)
+   - Document salvaged content with source reference (e.g., "From validation-report.md:")
+
+2. **Update research.md** (if new discoveries or salvaged content):
+   - Add new section "Session N: Production Implementation Discoveries" (if needed)
+   - Document critical technical constraints discovered during implementation
+   - Include code examples for critical patterns (e.g., falsy checks, credential preservation)
+   - Integrate salvaged content from other files
+   - Cross-reference requirements.md and design.md where relevant
+
+3. **Simplify requirements.md** (if approved):
+   - Transform detailed Acceptance Criteria into summary paragraphs
+   - Remove unimplemented requirements entirely
+   - Preserve requirement objectives and summaries
+   - Example transformation:
+     ```
+     Before: "1. System shall X... 2. System shall Y... [7 criteria]"
+     After: "**Summary**: System provides X and Y. Configuration includes..."
+     ```
+
+4. **Clean up design.md** (if approved):
+   - Delete approved sections (Testing Strategy, Security Considerations, etc.)
+   - Add "Critical Implementation Constraints" section if implementation revealed new constraints
+   - Integrate salvaged content from other files (if relevant)
+   - Preserve architecture diagrams and component interfaces
+   - Keep design decisions and rationale sections
+
+5. **Update spec.json metadata**:
+   - Set `phase: "implementation-complete"` (if not already set)
+   - Add `cleanup_completed: true` flag
+   - Update `updated_at` timestamp
+
+### Step 5: Generate Cleanup Summary
+
+**Provide summary report**:
+- List of files modified/deleted
+- Sections removed and lines saved
+- Critical information preserved
+- Recommendations for future refactoring
+
+**Format**:
+```markdown
+## Cleanup Summary for {feature-name}
+
+### Files Modified
+- ✅ validation-report.md: Salvaged to research.md, then deleted (730 lines removed)
+- ✅ notes.md: Salvaged to design.md, then deleted (120 lines removed)
+- ✅ research.md: Added Session 2 discoveries + salvaged content (180 lines added)
+- ✅ requirements.md: Simplified 6 requirements (350 lines → 180 lines)
+- ✅ design.md: Removed 4 sections, added constraints + salvaged content (250 lines removed, 100 added)
+
+### Information Salvaged
+- Implementation discoveries from validation-report.md → research.md
+- Design notes from notes.md → design.md
+- Historical context preserved with source attribution
+
+### Information Preserved
+- Architecture diagrams and component interfaces
+- Design decisions and rationale
+- Critical implementation constraints
+- Known limitations and trade-offs
+
+### Next Steps
+- Spec documents ready for future refactoring reference
+- Consider creating knowledge base entry if pattern is reusable
+```
+
+## Critical Constraints
+
+- **User approval required**: Never delete content without explicit confirmation
+- **Language consistency**: Use language specified in spec.json for all updates
+- **Preserve history**: Don't delete discovery rationale or design decisions
+- **Balance brevity with completeness**: Remove redundancy but keep essential context
+- **Interactive workflow**: Pause for user input rather than making assumptions
+
+## Tool Guidance
+
+- **Glob**: Discover all files in `.kiro/specs/{feature}/` directory
+- **Read**: Load all discovered files for analysis
+- **Grep**: Search for patterns (e.g., unimplemented requirements, completed tasks)
+- **Edit/Write**: Update files based on approved changes, salvage content
+- **Bash**: Delete files after salvage (if approved)
+- **MultiEdit**: For batch edits across multiple sections
+
+## Output Description
+
+Provide cleanup plan and execution report in the language specified in spec.json.
+
+**Report Structure**:
+1. **Current State Analysis**: What needs cleanup and why
+2. **Cleanup Plan**: Proposed changes with recommendations
+3. **Confirmation Prompts**: Interactive questions for user approval
+4. **Execution Summary**: What was changed and why
+5. **Preserved Context**: What critical information remains for future refactoring
+
+**Format**: Clear, scannable format with sections and bullet points
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Implementation Incomplete**:
+- **Condition**: Less than 90% of tasks marked `[x]` in tasks.md
+- **Action**: Warn user: "Implementation appears incomplete (X/Y tasks done). Continue cleanup? [y/N]"
+- **Recommendation**: Wait until implementation complete before cleanup
+
+**Spec Not Found**:
+- **Message**: "No spec found for `$1`. Check available specs in `.kiro/specs/`"
+- **Action**: List available spec directories
+
+**Missing Critical Files**:
+- **Condition**: requirements.md or design.md missing
+- **Action**: Skip cleanup for missing files, proceed with available files
+- **Warning**: "requirements.md missing - cannot simplify requirements"
+
+### Dry Run Mode (Future Enhancement)
+
+**If `-n` or `--dry-run` flag provided**:
+- Show cleanup plan without executing changes
+- Allow user to review before committing to cleanup
+
+### Backup Recommendation
+
+**Before cleanup**:
+- Recommend user create git commit or backup
+- Warning: "This will modify spec files. Commit current state first? [Y/n]"
+
+### Undo Support
+
+**If cleanup goes wrong**:
+- Use git to restore previous state: `git checkout HEAD -- .kiro/specs/{feature}/`
+- Remind user to commit before cleanup for easy rollback
+
+## Example Usage
+
+```bash
+# Basic cleanup after implementation
+/kiro:spec-cleanup oauth2-email-support
+
+# With conversation context about implementation discoveries
+# Command will prompt for Session N discoveries to document
+/kiro:spec-cleanup user-authentication
+```
+
+## Related Commands
+
+- `/kiro:spec-impl {feature}` - Implement tasks (run before cleanup)
+- `/kiro:validate-impl {feature}` - Validate implementation (run before cleanup)
+- `/kiro:spec-status {feature}` - Check implementation status

+ 179 - 0
.claude/commands/kiro/spec-design.md

@@ -0,0 +1,179 @@
+---
+description: Create comprehensive technical design for a specification
+allowed-tools: Bash, Glob, Grep, LS, Read, Write, Edit, MultiEdit, Update, WebSearch, WebFetch
+argument-hint: <feature-name> [-y]
+---
+
+# Technical Design Generator
+
+<background_information>
+- **Mission**: Generate comprehensive technical design document that translates requirements (WHAT) into architectural design (HOW)
+- **Success Criteria**:
+  - All requirements mapped to technical components with clear interfaces
+  - Appropriate architecture discovery and research completed
+  - Design aligns with steering context and existing patterns
+  - Visual diagrams included for complex architectures
+</background_information>
+
+<instructions>
+## Core Task
+Generate technical design document for feature **$1** based on approved requirements.
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Read all necessary context**:
+- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md` (if exists)
+- **Entire `.kiro/steering/` directory** for complete project memory
+- `.kiro/settings/templates/specs/design.md` for document structure
+- `.kiro/settings/rules/design-principles.md` for design principles
+- `.kiro/settings/templates/specs/research.md` for discovery log structure
+
+**Validate requirements approval**:
+- If `-y` flag provided ($2 == "-y"): Auto-approve requirements in spec.json
+- Otherwise: Verify approval status (stop if unapproved, see Safety & Fallback)
+
+### Step 2: Discovery & Analysis
+
+**Critical: This phase ensures design is based on complete, accurate information.**
+
+1. **Classify Feature Type**:
+   - **New Feature** (greenfield) → Full discovery required
+   - **Extension** (existing system) → Integration-focused discovery
+   - **Simple Addition** (CRUD/UI) → Minimal or no discovery
+   - **Complex Integration** → Comprehensive analysis required
+
+2. **Execute Appropriate Discovery Process**:
+   
+   **For Complex/New Features**:
+   - Read and execute `.kiro/settings/rules/design-discovery-full.md`
+   - Conduct thorough research using WebSearch/WebFetch:
+     - Latest architectural patterns and best practices
+     - External dependency verification (APIs, libraries, versions, compatibility)
+     - Official documentation, migration guides, known issues
+     - Performance benchmarks and security considerations
+   
+   **For Extensions**:
+   - Read and execute `.kiro/settings/rules/design-discovery-light.md`
+   - Focus on integration points, existing patterns, compatibility
+   - Use Grep to analyze existing codebase patterns
+   
+   **For Simple Additions**:
+   - Skip formal discovery, quick pattern check only
+
+3. **Retain Discovery Findings for Step 3**:
+- External API contracts and constraints
+- Technology decisions with rationale
+- Existing patterns to follow or extend
+- Integration points and dependencies
+- Identified risks and mitigation strategies
+- Potential architecture patterns and boundary options (note details in `research.md`)
+- Parallelization considerations for future tasks (capture dependencies in `research.md`)
+
+4. **Persist Findings to Research Log**:
+- Create or update `.kiro/specs/$1/research.md` using the shared template
+- Summarize discovery scope and key findings (Summary section)
+- Record investigations in Research Log topics with sources and implications
+- Document architecture pattern evaluation, design decisions, and risks using the template sections
+- Use the language specified in spec.json when writing or updating `research.md`
+
+### Step 3: Generate Design Document
+
+1. **Load Design Template and Rules**:
+- Read `.kiro/settings/templates/specs/design.md` for structure
+- Read `.kiro/settings/rules/design-principles.md` for principles
+
+2. **Generate Design Document**:
+- **Follow specs/design.md template structure and generation instructions strictly**
+- **Integrate all discovery findings**: Use researched information (APIs, patterns, technologies) throughout component definitions, architecture decisions, and integration points
+- If existing design.md found in Step 1, use it as reference context (merge mode)
+- Apply design rules: Type Safety, Visual Communication, Formal Tone
+- Use language specified in spec.json
+- Ensure sections reflect updated headings ("Architecture Pattern & Boundary Map", "Technology Stack & Alignment", "Components & Interface Contracts") and reference supporting details from `research.md`
+
+3. **Update Metadata** in spec.json:
+- Set `phase: "design-generated"`
+- Set `approvals.design.generated: true, approved: false`
+- Set `approvals.requirements.approved: true`
+- Update `updated_at` timestamp
+
+## Critical Constraints
+ - **Type Safety**:
+   - Enforce strong typing aligned with the project's technology stack.
+   - For statically typed languages, define explicit types/interfaces and avoid unsafe casts.
+   - For TypeScript, never use `any`; prefer precise types and generics.
+   - For dynamically typed languages, provide type hints/annotations where available (e.g., Python type hints) and validate inputs at boundaries.
+   - Document public interfaces and contracts clearly to ensure cross-component type safety.
+- **Latest Information**: Use WebSearch/WebFetch for external dependencies and best practices
+- **Steering Alignment**: Respect existing architecture patterns from steering context
+- **Template Adherence**: Follow specs/design.md template structure and generation instructions strictly
+- **Design Focus**: Architecture and interfaces ONLY, no implementation code
+- **Requirements Traceability IDs**: Use numeric requirement IDs only (e.g. "1.1", "1.2", "3.1", "3.3") exactly as defined in requirements.md. Do not invent new IDs or use alphabetic labels.
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context before taking action (specs, steering, templates, rules)
+- **Research when uncertain**: Use WebSearch/WebFetch for external dependencies, APIs, and latest best practices
+- **Analyze existing code**: Use Grep to find patterns and integration points in codebase
+- **Write last**: Generate design.md only after all research and analysis complete
+
+## Output Description
+
+**Command execution output** (separate from design.md content):
+
+Provide brief summary in the language specified in spec.json:
+
+1. **Status**: Confirm design document generated at `.kiro/specs/$1/design.md`
+2. **Discovery Type**: Which discovery process was executed (full/light/minimal)
+3. **Key Findings**: 2-3 critical insights from `research.md` that shaped the design
+4. **Next Action**: Approval workflow guidance (see Safety & Fallback)
+5. **Research Log**: Confirm `research.md` updated with latest decisions
+
+**Format**: Concise Markdown (under 200 words) - this is the command output, NOT the design document itself
+
+**Note**: The actual design document follows `.kiro/settings/templates/specs/design.md` structure.
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Requirements Not Approved**:
+- **Stop Execution**: Cannot proceed without approved requirements
+- **User Message**: "Requirements not yet approved. Approval required before design generation."
+- **Suggested Action**: "Run `/kiro:spec-design $1 -y` to auto-approve requirements and proceed"
+
+**Missing Requirements**:
+- **Stop Execution**: Requirements document must exist
+- **User Message**: "No requirements.md found at `.kiro/specs/$1/requirements.md`"
+- **Suggested Action**: "Run `/kiro:spec-requirements $1` to generate requirements first"
+
+**Template Missing**:
+- **User Message**: "Template file missing at `.kiro/settings/templates/specs/design.md`"
+- **Suggested Action**: "Check repository setup or restore template file"
+- **Fallback**: Use inline basic structure with warning
+
+**Steering Context Missing**:
+- **Warning**: "Steering directory empty or missing - design may not align with project standards"
+- **Proceed**: Continue with generation but note limitation in output
+
+**Discovery Complexity Unclear**:
+- **Default**: Use full discovery process (`.kiro/settings/rules/design-discovery-full.md`)
+- **Rationale**: Better to over-research than miss critical context
+- **Invalid Requirement IDs**:
+  - **Stop Execution**: If requirements.md is missing numeric IDs or uses non-numeric headings (for example, "Requirement A"), stop and instruct the user to fix requirements.md before continuing.
+
+### Next Phase: Task Generation
+
+**If Design Approved**:
+- Review generated design at `.kiro/specs/$1/design.md`
+- **Optional**: Run `/kiro:validate-design $1` for interactive quality review
+- Then `/kiro:spec-tasks $1 -y` to generate implementation tasks
+
+**If Modifications Needed**:
+- Provide feedback and re-run `/kiro:spec-design $1`
+- Existing design used as reference (merge mode)
+
+**Note**: Design approval is mandatory before proceeding to task generation.
+
+think hard

+ 110 - 0
.claude/commands/kiro/spec-impl.md

@@ -0,0 +1,110 @@
+---
+description: Execute spec tasks using TDD methodology
+allowed-tools: Bash, Read, Write, Edit, MultiEdit, Grep, Glob, LS, WebFetch, WebSearch
+argument-hint: <feature-name> [task-numbers]
+---
+
+# Implementation Task Executor
+
+<background_information>
+- **Mission**: Execute implementation tasks using Test-Driven Development methodology based on approved specifications
+- **Success Criteria**:
+  - All tests written before implementation code
+  - Code passes all tests with no regressions
+  - Tasks marked as completed in tasks.md
+  - Implementation aligns with design and requirements
+</background_information>
+
+<instructions>
+## Core Task
+Execute implementation tasks for feature **$1** using Test-Driven Development.
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Read all necessary context**:
+- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md`, `tasks.md`
+- **Entire `.kiro/steering/` directory** for complete project memory
+
+**Validate approvals**:
+- Verify tasks are approved in spec.json (stop if not, see Safety & Fallback)
+
+### Step 2: Select Tasks
+
+**Determine which tasks to execute**:
+- If `$2` provided: Execute specified task numbers (e.g., "1.1" or "1,2,3")
+- Otherwise: Execute all pending tasks (unchecked `- [ ]` in tasks.md)
+
+### Step 3: Execute with TDD
+
+For each selected task, follow Kent Beck's TDD cycle:
+
+1. **RED - Write Failing Test**:
+   - Write test for the next small piece of functionality
+   - Test should fail (code doesn't exist yet)
+   - Use descriptive test names
+
+2. **GREEN - Write Minimal Code**:
+   - Implement simplest solution to make test pass
+   - Focus only on making THIS test pass
+   - Avoid over-engineering
+
+3. **REFACTOR - Clean Up**:
+   - Improve code structure and readability
+   - Remove duplication
+   - Apply design patterns where appropriate
+   - Ensure all tests still pass after refactoring
+
+4. **VERIFY - Validate Quality**:
+   - All tests pass (new and existing)
+   - No regressions in existing functionality
+   - Code coverage maintained or improved
+
+5. **MARK COMPLETE**:
+   - Update checkbox from `- [ ]` to `- [x]` in tasks.md
+
+## Critical Constraints
+- **TDD Mandatory**: Tests MUST be written before implementation code
+- **Task Scope**: Implement only what the specific task requires
+- **Test Coverage**: All new code must have tests
+- **No Regressions**: Existing tests must continue to pass
+- **Design Alignment**: Implementation must follow design.md specifications
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context before implementation
+- **Test first**: Write tests before code
+- Use **WebSearch/WebFetch** for library documentation when needed
+
+## Output Description
+
+Provide brief summary in the language specified in spec.json:
+
+1. **Tasks Executed**: Task numbers and test results
+2. **Status**: Completed tasks marked in tasks.md, remaining tasks count
+
+**Format**: Concise (under 150 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Tasks Not Approved or Missing Spec Files**:
+- **Stop Execution**: All spec files must exist and tasks must be approved
+- **Suggested Action**: "Complete previous phases: `/kiro:spec-requirements`, `/kiro:spec-design`, `/kiro:spec-tasks`"
+
+**Test Failures**:
+- **Stop Implementation**: Fix failing tests before continuing
+- **Action**: Debug and fix, then re-run
+
+### Task Execution
+
+**Execute specific task(s)**:
+- `/kiro:spec-impl $1 1.1` - Single task
+- `/kiro:spec-impl $1 1,2,3` - Multiple tasks
+
+**Execute all pending**:
+- `/kiro:spec-impl $1` - All unchecked tasks
+
+think

+ 65 - 0
.claude/commands/kiro/spec-init.md

@@ -0,0 +1,65 @@
+---
+description: Initialize a new specification with detailed project description
+allowed-tools: Bash, Read, Write, Glob
+argument-hint: <project-description>
+---
+
+# Spec Initialization
+
+<background_information>
+- **Mission**: Initialize the first phase of spec-driven development by creating directory structure and metadata for a new specification
+- **Success Criteria**:
+  - Generate appropriate feature name from project description
+  - Create unique spec structure without conflicts
+  - Provide clear path to next phase (requirements generation)
+</background_information>
+
+<instructions>
+## Core Task
+Generate a unique feature name from the project description ($ARGUMENTS) and initialize the specification structure.
+
+## Execution Steps
+1. **Check Uniqueness**: Verify `.kiro/specs/` for naming conflicts (append number suffix if needed)
+2. **Create Directory**: `.kiro/specs/[feature-name]/`
+3. **Initialize Files Using Templates**:
+   - Read `.kiro/settings/templates/specs/init.json`
+   - Read `.kiro/settings/templates/specs/requirements-init.md`
+   - Replace placeholders:
+     - `{{FEATURE_NAME}}` → generated feature name
+     - `{{TIMESTAMP}}` → current ISO 8601 timestamp
+     - `{{PROJECT_DESCRIPTION}}` → $ARGUMENTS
+   - Write `spec.json` and `requirements.md` to spec directory
+
+## Important Constraints
+- DO NOT generate requirements/design/tasks at this stage
+- Follow stage-by-stage development principles
+- Maintain strict phase separation
+- Only initialization is performed in this phase
+</instructions>
+
+## Tool Guidance
+- Use **Glob** to check existing spec directories for name uniqueness
+- Use **Read** to fetch templates: `init.json` and `requirements-init.md`
+- Use **Write** to create spec.json and requirements.md after placeholder replacement
+- Perform validation before any file write operation
+
+## Output Description
+Provide output in the language specified in `spec.json` with the following structure:
+
+1. **Generated Feature Name**: `feature-name` format with 1-2 sentence rationale
+2. **Project Summary**: Brief summary (1 sentence)
+3. **Created Files**: Bullet list with full paths
+4. **Next Step**: Command block showing `/kiro:spec-requirements <feature-name>`
+5. **Notes**: Explain why only initialization was performed (2-3 sentences on phase separation)
+
+**Format Requirements**:
+- Use Markdown headings (##, ###)
+- Wrap commands in code blocks
+- Keep total output concise (under 250 words)
+- Use clear, professional language per `spec.json.language`
+
+## Safety & Fallback
+- **Ambiguous Feature Name**: If feature name generation is unclear, propose 2-3 options and ask user to select
+- **Template Missing**: If template files don't exist in `.kiro/settings/templates/specs/`, report error with specific missing file path and suggest checking repository setup
+- **Directory Conflict**: If feature name already exists, append numeric suffix (e.g., `feature-name-2`) and notify user of automatic conflict resolution
+- **Write Failure**: Report error with specific path and suggest checking permissions or disk space

+ 98 - 0
.claude/commands/kiro/spec-requirements.md

@@ -0,0 +1,98 @@
+---
+description: Generate comprehensive requirements for a specification
+allowed-tools: Bash, Glob, Grep, LS, Read, Write, Edit, MultiEdit, Update, WebSearch, WebFetch
+argument-hint: <feature-name>
+---
+
+# Requirements Generation
+
+<background_information>
+- **Mission**: Generate comprehensive, testable requirements in EARS format based on the project description from spec initialization
+- **Success Criteria**:
+  - Create complete requirements document aligned with steering context
+  - Follow the project's EARS patterns and constraints for all acceptance criteria
+  - Focus on core functionality without implementation details
+  - Update metadata to track generation status
+</background_information>
+
+<instructions>
+## Core Task
+Generate complete requirements for feature **$1** based on the project description in requirements.md.
+
+## Execution Steps
+
+1. **Load Context**:
+   - Read `.kiro/specs/$1/spec.json` for language and metadata
+   - Read `.kiro/specs/$1/requirements.md` for project description
+   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
+     - Default files: `structure.md`, `tech.md`, `product.md`
+     - All custom steering files (regardless of mode settings)
+     - This provides complete project memory and context
+
+2. **Read Guidelines**:
+   - Read `.kiro/settings/rules/ears-format.md` for EARS syntax rules
+   - Read `.kiro/settings/templates/specs/requirements.md` for document structure
+
+3. **Generate Requirements**:
+   - Create initial requirements based on project description
+   - Group related functionality into logical requirement areas
+   - Apply EARS format to all acceptance criteria
+   - Use language specified in spec.json
+
+4. **Update Metadata**:
+   - Set `phase: "requirements-generated"`
+   - Set `approvals.requirements.generated: true`
+   - Update `updated_at` timestamp
+
+## Important Constraints
+- Focus on WHAT, not HOW (no implementation details)
+- Requirements must be testable and verifiable
+- Choose appropriate subject for EARS statements (system/service name for software)
+- Generate initial version first, then iterate with user feedback (no sequential questions upfront)
+- Requirement headings in requirements.md MUST include a leading numeric ID only (for example: "Requirement 1", "1.", "2 Feature ..."); do not use alphabetic IDs like "Requirement A".
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context (spec, steering, rules, templates) before generation
+- **Write last**: Update requirements.md only after complete generation
+- Use **WebSearch/WebFetch** only if external domain knowledge needed
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Generated Requirements Summary**: Brief overview of major requirement areas (3-5 bullets)
+2. **Document Status**: Confirm requirements.md updated and spec.json metadata updated
+3. **Next Steps**: Guide user on how to proceed (approve and continue, or modify)
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Include file paths in code blocks
+- Keep summary concise (under 300 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Project Description**: If requirements.md lacks project description, ask user for feature details
+- **Ambiguous Requirements**: Propose initial version and iterate with user rather than asking many upfront questions
+- **Template Missing**: If template files don't exist, use inline fallback structure with warning
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+- **Incomplete Requirements**: After generation, explicitly ask user if requirements cover all expected functionality
+- **Steering Directory Empty**: Warn user that project context is missing and may affect requirement quality
+- **Non-numeric Requirement Headings**: If existing headings do not include a leading numeric ID (for example, they use "Requirement A"), normalize them to numeric IDs and keep that mapping consistent (never mix numeric and alphabetic labels).
+
+### Next Phase: Design Generation
+
+**If Requirements Approved**:
+- Review generated requirements at `.kiro/specs/$1/requirements.md`
+- **Optional Gap Analysis** (for existing codebases):
+  - Run `/kiro:validate-gap $1` to analyze implementation gap with current code
+  - Identifies existing components, integration points, and implementation strategy
+  - Recommended for brownfield projects; skip for greenfield
+- Then `/kiro:spec-design $1 -y` to proceed to design phase
+
+**If Modifications Needed**:
+- Provide feedback and re-run `/kiro:spec-requirements $1`
+
+**Note**: Approval is mandatory before proceeding to design phase.
+
+think

+ 87 - 0
.claude/commands/kiro/spec-status.md

@@ -0,0 +1,87 @@
+---
+description: Show specification status and progress
+allowed-tools: Bash, Read, Glob, Write, Edit, MultiEdit, Update
+argument-hint: <feature-name>
+---
+
+# Specification Status
+
+<background_information>
+- **Mission**: Display comprehensive status and progress for a specification
+- **Success Criteria**:
+  - Show current phase and completion status
+  - Identify next actions and blockers
+  - Provide clear visibility into progress
+</background_information>
+
+<instructions>
+## Core Task
+Generate status report for feature **$1** showing progress across all phases.
+
+## Execution Steps
+
+### Step 1: Load Spec Context
+- Read `.kiro/specs/$1/spec.json` for metadata and phase status
+- Read existing files: `requirements.md`, `design.md`, `tasks.md` (if they exist)
+- Check `.kiro/specs/$1/` directory for available files
+
+### Step 2: Analyze Status
+
+**Parse each phase**:
+- **Requirements**: Count requirements and acceptance criteria
+- **Design**: Check for architecture, components, diagrams
+- **Tasks**: Count completed vs total tasks (parse `- [x]` vs `- [ ]`)
+- **Approvals**: Check approval status in spec.json
+
+### Step 3: Generate Report
+
+Create report in the language specified in spec.json covering:
+1. **Current Phase & Progress**: Where the spec is in the workflow
+2. **Completion Status**: Percentage complete for each phase
+3. **Task Breakdown**: If tasks exist, show completed/remaining counts
+4. **Next Actions**: What needs to be done next
+5. **Blockers**: Any issues preventing progress
+
+## Critical Constraints
+- Use language from spec.json
+- Calculate accurate completion percentages
+- Identify specific next action commands
+</instructions>
+
+## Tool Guidance
+- **Read**: Load spec.json first, then other spec files as needed
+- **Parse carefully**: Extract completion data from tasks.md checkboxes
+- Use **Glob** to check which spec files exist
+
+## Output Description
+
+Provide status report in the language specified in spec.json:
+
+**Report Structure**:
+1. **Feature Overview**: Name, phase, last updated
+2. **Phase Status**: Requirements, Design, Tasks with completion %
+3. **Task Progress**: If tasks exist, show X/Y completed
+4. **Next Action**: Specific command to run next
+5. **Issues**: Any blockers or missing elements
+
+**Format**: Clear, scannable format with emojis (✅/⏳/❌) for status
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Spec Not Found**:
+- **Message**: "No spec found for `$1`. Check available specs in `.kiro/specs/`"
+- **Action**: List available spec directories
+
+**Incomplete Spec**:
+- **Warning**: Identify which files are missing
+- **Suggested Action**: Point to next phase command
+
+### List All Specs
+
+To see all available specs:
+- Run with no argument or use wildcard
+- Shows all specs in `.kiro/specs/` with their status
+
+think

+ 138 - 0
.claude/commands/kiro/spec-tasks.md

@@ -0,0 +1,138 @@
+---
+description: Generate implementation tasks for a specification
+allowed-tools: Read, Write, Edit, MultiEdit, Glob, Grep
+argument-hint: <feature-name> [-y] [--sequential]
+---
+
+# Implementation Tasks Generator
+
+<background_information>
+- **Mission**: Generate detailed, actionable implementation tasks that translate technical design into executable work items
+- **Success Criteria**:
+  - All requirements mapped to specific tasks
+  - Tasks properly sized (1-3 hours each)
+  - Clear task progression with proper hierarchy
+  - Natural language descriptions focused on capabilities
+</background_information>
+
+<instructions>
+## Core Task
+Generate implementation tasks for feature **$1** based on approved requirements and design.
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Read all necessary context**:
+- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md`
+- `.kiro/specs/$1/tasks.md` (if exists, for merge mode)
+- **Entire `.kiro/steering/` directory** for complete project memory
+
+**Validate approvals**:
+- If `-y` flag provided ($2 == "-y"): Auto-approve requirements and design in spec.json
+- Otherwise: Verify both approved (stop if not, see Safety & Fallback)
+- Determine sequential mode based on presence of `--sequential`
+
+### Step 2: Generate Implementation Tasks
+
+**Load generation rules and template**:
+- Read `.kiro/settings/rules/tasks-generation.md` for principles
+- If `sequential` is **false**: Read `.kiro/settings/rules/tasks-parallel-analysis.md` for parallel judgement criteria
+- Read `.kiro/settings/templates/specs/tasks.md` for format (supports `(P)` markers)
+
+**Generate task list following all rules**:
+- Use language specified in spec.json
+- Map all requirements to tasks
+- When documenting requirement coverage, list numeric requirement IDs only (comma-separated) without descriptive suffixes, parentheses, translations, or free-form labels
+- Ensure all design components included
+- Verify task progression is logical and incremental
+- Collapse single-subtask structures by promoting them to major tasks and avoid duplicating details on container-only major tasks (use template patterns accordingly)
+- Apply `(P)` markers to tasks that satisfy parallel criteria (omit markers in sequential mode)
+- Mark optional test coverage subtasks with `- [ ]*` only when they strictly cover acceptance criteria already satisfied by core implementation and can be deferred post-MVP
+- If existing tasks.md found, merge with new content
+
+### Step 3: Finalize
+
+**Write and update**:
+- Create/update `.kiro/specs/$1/tasks.md`
+- Update spec.json metadata:
+  - Set `phase: "tasks-generated"`
+  - Set `approvals.tasks.generated: true, approved: false`
+  - Set `approvals.requirements.approved: true`
+  - Set `approvals.design.approved: true`
+  - Update `updated_at` timestamp
+
+## Critical Constraints
+- **Follow rules strictly**: All principles in tasks-generation.md are mandatory
+- **Natural Language**: Describe what to do, not code structure details
+- **Complete Coverage**: ALL requirements must map to tasks
+- **Maximum 2 Levels**: Major tasks and sub-tasks only (no deeper nesting)
+- **Sequential Numbering**: Major tasks increment (1, 2, 3...), never repeat
+- **Task Integration**: Every task must connect to the system (no orphaned work)
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context, rules, and templates before generation
+- **Write last**: Generate tasks.md only after complete analysis and verification
+
+## Output Description
+
+Provide brief summary in the language specified in spec.json:
+
+1. **Status**: Confirm tasks generated at `.kiro/specs/$1/tasks.md`
+2. **Task Summary**: 
+   - Total: X major tasks, Y sub-tasks
+   - All Z requirements covered
+   - Average task size: 1-3 hours per sub-task
+3. **Quality Validation**:
+   - ✅ All requirements mapped to tasks
+   - ✅ Task dependencies verified
+   - ✅ Testing tasks included
+4. **Next Action**: Review tasks and proceed when ready
+
+**Format**: Concise (under 200 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Requirements or Design Not Approved**:
+- **Stop Execution**: Cannot proceed without approved requirements and design
+- **User Message**: "Requirements and design must be approved before task generation"
+- **Suggested Action**: "Run `/kiro:spec-tasks $1 -y` to auto-approve both and proceed"
+
+**Missing Requirements or Design**:
+- **Stop Execution**: Both documents must exist
+- **User Message**: "Missing requirements.md or design.md at `.kiro/specs/$1/`"
+- **Suggested Action**: "Complete requirements and design phases first"
+
+**Incomplete Requirements Coverage**:
+- **Warning**: "Not all requirements mapped to tasks. Review coverage."
+- **User Action Required**: Confirm intentional gaps or regenerate tasks
+
+**Template/Rules Missing**:
+- **User Message**: "Template or rules files missing in `.kiro/settings/`"
+- **Fallback**: Use inline basic structure with warning
+- **Suggested Action**: "Check repository setup or restore template files"
+- **Missing Numeric Requirement IDs**:
+  - **Stop Execution**: All requirements in requirements.md MUST have numeric IDs. If any requirement lacks a numeric ID, stop and request that requirements.md be fixed before generating tasks.
+
+### Next Phase: Implementation
+
+**Before Starting Implementation**:
+- **IMPORTANT**: Clear conversation history and free up context before running `/kiro:spec-impl`
+- This applies when starting first task OR switching between tasks
+- Fresh context ensures clean state and proper task focus
+
+**If Tasks Approved**:
+- Execute specific task: `/kiro:spec-impl $1 1.1` (recommended: clear context between each task)
+- Execute multiple tasks: `/kiro:spec-impl $1 1.1,1.2` (use cautiously, clear context between tasks)
+- Without arguments: `/kiro:spec-impl $1` (executes all pending tasks - NOT recommended due to context bloat)
+
+**If Modifications Needed**:
+- Provide feedback and re-run `/kiro:spec-tasks $1`
+- Existing tasks used as reference (merge mode)
+
+**Note**: The implementation phase will guide you through executing tasks with appropriate context and validation.
+
+think

+ 127 - 0
.claude/commands/kiro/steering-custom.md

@@ -0,0 +1,127 @@
+---
+description: Create custom steering documents for specialized project contexts
+allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
+---
+
+# Kiro Custom Steering Creation
+
+<background_information>
+**Role**: Create specialized steering documents beyond core files (product, tech, structure).
+
+**Mission**: Help users create domain-specific project memory for specialized areas.
+
+**Success Criteria**:
+- Custom steering captures specialized patterns
+- Follows same granularity principles as core steering
+- Provides clear value for specific domain
+</background_information>
+
+<instructions>
+## Workflow
+
+1. **Ask user** for custom steering needs:
+   - Domain/topic (e.g., "API standards", "testing approach")
+   - Specific requirements or patterns to document
+
+2. **Check if template exists**:
+   - Load from `.kiro/settings/templates/steering-custom/{name}.md` if available
+   - Use as starting point, customize based on project
+
+3. **Analyze codebase** (JIT) for relevant patterns:
+   - **Glob** for related files
+   - **Read** for existing implementations
+   - **Grep** for specific patterns
+
+4. **Generate custom steering**:
+   - Follow template structure if available
+   - Apply principles from `.kiro/settings/rules/steering-principles.md`
+   - Focus on patterns, not exhaustive lists
+   - Keep to 100-200 lines (2-3 minute read)
+
+5. **Create file** in `.kiro/steering/{name}.md`
+
+## Available Templates
+
+Templates available in `.kiro/settings/templates/steering-custom/`:
+
+1. **api-standards.md** - REST/GraphQL conventions, error handling
+2. **testing.md** - Test organization, mocking, coverage
+3. **security.md** - Auth patterns, input validation, secrets
+4. **database.md** - Schema design, migrations, query patterns
+5. **error-handling.md** - Error types, logging, retry strategies
+6. **authentication.md** - Auth flows, permissions, session management
+7. **deployment.md** - CI/CD, environments, rollback procedures
+
+Load template when needed, customize for project.
+
+## Steering Principles
+
+From `.kiro/settings/rules/steering-principles.md`:
+
+- **Patterns over lists**: Document patterns, not every file/component
+- **Single domain**: One topic per file
+- **Concrete examples**: Show patterns with code
+- **Maintainable size**: 100-200 lines typical
+- **Security first**: Never include secrets or sensitive data
+
+</instructions>
+
+## Tool guidance
+
+- **Read**: Load template, analyze existing code
+- **Glob**: Find related files for pattern analysis
+- **Grep**: Search for specific patterns
+- **LS**: Understand relevant structure
+
+**JIT Strategy**: Load template only when creating that type of steering.
+
+## Output description
+
+Chat summary with file location (file created directly).
+
+```
+✅ Custom Steering Created
+
+## Created:
+- .kiro/steering/api-standards.md
+
+## Based On:
+- Template: api-standards.md
+- Analyzed: src/api/ directory patterns
+- Extracted: REST conventions, error format
+
+## Content:
+- Endpoint naming patterns
+- Request/response format
+- Error handling conventions
+- Authentication approach
+
+Review and customize as needed.
+```
+
+## Examples
+
+### Success: API Standards
+**Input**: "Create API standards steering"  
+**Action**: Load template, analyze src/api/, extract patterns  
+**Output**: api-standards.md with project-specific REST conventions
+
+### Success: Testing Strategy
+**Input**: "Document our testing approach"  
+**Action**: Load template, analyze test files, extract patterns  
+**Output**: testing.md with test organization and mocking strategies
+
+## Safety & Fallback
+
+- **No template**: Generate from scratch based on domain knowledge
+- **Security**: Never include secrets (load principles)
+- **Validation**: Ensure doesn't duplicate core steering content
+
+## Notes
+
+- Templates are starting points, customize for project
+- Follow same granularity principles as core steering
+- All steering files loaded as project memory
+- Custom files equally important as core files
+- Avoid documenting agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
+- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories

+ 143 - 0
.claude/commands/kiro/steering.md

@@ -0,0 +1,143 @@
+---
+description: Manage .kiro/steering/ as persistent project knowledge
+allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
+---
+
+# Kiro Steering Management
+
+<background_information>
+**Role**: Maintain `.kiro/steering/` as persistent project memory.
+
+**Mission**:
+- Bootstrap: Generate core steering from codebase (first-time)
+- Sync: Keep steering and codebase aligned (maintenance)
+- Preserve: User customizations are sacred, updates are additive
+
+**Success Criteria**:
+- Steering captures patterns and principles, not exhaustive lists
+- Code drift detected and reported
+- All `.kiro/steering/*.md` treated equally (core + custom)
+</background_information>
+
+<instructions>
+## Scenario Detection
+
+Check `.kiro/steering/` status:
+
+**Bootstrap Mode**: Empty OR missing core files (product.md, tech.md, structure.md)  
+**Sync Mode**: All core files exist
+
+---
+
+## Bootstrap Flow
+
+1. Load templates from `.kiro/settings/templates/steering/`
+2. Analyze codebase (JIT):
+   - `glob_file_search` for source files
+   - `read_file` for README, package.json, etc.
+   - `grep` for patterns
+3. Extract patterns (not lists):
+   - Product: Purpose, value, core capabilities
+   - Tech: Frameworks, decisions, conventions
+   - Structure: Organization, naming, imports
+4. Generate steering files (follow templates)
+5. Load principles from `.kiro/settings/rules/steering-principles.md`
+6. Present summary for review
+
+**Focus**: Patterns that guide decisions, not catalogs of files/dependencies.
+
+---
+
+## Sync Flow
+
+1. Load all existing steering (`.kiro/steering/*.md`)
+2. Analyze codebase for changes (JIT)
+3. Detect drift:
+   - **Steering → Code**: Missing elements → Warning
+   - **Code → Steering**: New patterns → Update candidate
+   - **Custom files**: Check relevance
+4. Propose updates (additive, preserve user content)
+5. Report: Updates, warnings, recommendations
+
+**Update Philosophy**: Add, don't replace. Preserve user sections.
+
+---
+
+## Granularity Principle
+
+From `.kiro/settings/rules/steering-principles.md`:
+
+> "If new code follows existing patterns, steering shouldn't need updating."
+
+Document patterns and principles, not exhaustive lists.
+
+**Bad**: List every file in directory tree  
+**Good**: Describe organization pattern with examples
+
+</instructions>
+
+## Tool guidance
+
+- `glob_file_search`: Find source/config files
+- `read_file`: Read steering, docs, configs
+- `grep`: Search patterns
+- `list_dir`: Analyze structure
+
+**JIT Strategy**: Fetch when needed, not upfront.
+
+## Output description
+
+Chat summary only (files updated directly).
+
+### Bootstrap:
+```
+✅ Steering Created
+
+## Generated:
+- product.md: [Brief description]
+- tech.md: [Key stack]
+- structure.md: [Organization]
+
+Review and approve as Source of Truth.
+```
+
+### Sync:
+```
+✅ Steering Updated
+
+## Changes:
+- tech.md: React 18 → 19
+- structure.md: Added API pattern
+
+## Code Drift:
+- Components not following import conventions
+
+## Recommendations:
+- Consider api-standards.md
+```
+
+## Examples
+
+### Bootstrap
+**Input**: Empty steering, React TypeScript project  
+**Output**: 3 files with patterns - "Feature-first", "TypeScript strict", "React 19"
+
+### Sync
+**Input**: Existing steering, new `/api` directory  
+**Output**: Updated structure.md, flagged non-compliant files, suggested api-standards.md
+
+## Safety & Fallback
+
+- **Security**: Never include keys, passwords, secrets (see principles)
+- **Uncertainty**: Report both states, ask user
+- **Preservation**: Add rather than replace when in doubt
+
+## Notes
+
+- All `.kiro/steering/*.md` loaded as project memory
+- Templates and principles are external for customization
+- Focus on patterns, not catalogs
+- "Golden Rule": New code following patterns shouldn't require steering updates
+- Avoid documenting agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
+- `.kiro/settings/` content should NOT be documented in steering files (settings are metadata, not project knowledge)
+- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories

+ 92 - 0
.claude/commands/kiro/validate-design.md

@@ -0,0 +1,92 @@
+---
+description: Interactive technical design quality review and validation
+allowed-tools: Read, Glob, Grep
+argument-hint: <feature-name>
+---
+
+# Technical Design Validation
+
+<background_information>
+- **Mission**: Conduct interactive quality review of technical design to ensure readiness for implementation
+- **Success Criteria**:
+  - Critical issues identified (maximum 3 most important concerns)
+  - Balanced assessment with strengths recognized
+  - Clear GO/NO-GO decision with rationale
+  - Actionable feedback for improvements if needed
+</background_information>
+
+<instructions>
+## Core Task
+Interactive design quality review for feature **$1** based on approved requirements and design document.
+
+## Execution Steps
+
+1. **Load Context**:
+   - Read `.kiro/specs/$1/spec.json` for language and metadata
+   - Read `.kiro/specs/$1/requirements.md` for requirements
+   - Read `.kiro/specs/$1/design.md` for design document
+   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
+     - Default files: `structure.md`, `tech.md`, `product.md`
+     - All custom steering files (regardless of mode settings)
+     - This provides complete project memory and context
+
+2. **Read Review Guidelines**:
+   - Read `.kiro/settings/rules/design-review.md` for review criteria and process
+
+3. **Execute Design Review**:
+   - Follow design-review.md process: Analysis → Critical Issues → Strengths → GO/NO-GO
+   - Limit to 3 most important concerns
+   - Engage interactively with user
+   - Use language specified in spec.json for output
+
+4. **Provide Decision and Next Steps**:
+   - Clear GO/NO-GO decision with rationale
+   - Guide user on proceeding based on decision
+
+## Important Constraints
+- **Quality assurance, not perfection seeking**: Accept acceptable risk
+- **Critical focus only**: Maximum 3 issues, only those significantly impacting success
+- **Interactive approach**: Engage in dialogue, not one-way evaluation
+- **Balanced assessment**: Recognize both strengths and weaknesses
+- **Actionable feedback**: All suggestions must be implementable
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context (spec, steering, rules) before review
+- **Grep if needed**: Search codebase for pattern validation or integration checks
+- **Interactive**: Engage with user throughout the review process
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Review Summary**: Brief overview (2-3 sentences) of design quality and readiness
+2. **Critical Issues**: Maximum 3, following design-review.md format
+3. **Design Strengths**: 1-2 positive aspects
+4. **Final Assessment**: GO/NO-GO decision with rationale and next steps
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Follow design-review.md output format
+- Keep summary concise
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Design**: If design.md doesn't exist, stop with message: "Run `/kiro:spec-design $1` first to generate design document"
+- **Design Not Generated**: If design phase not marked as generated in spec.json, warn but proceed with review
+- **Empty Steering Directory**: Warn user that project context is missing and may affect review quality
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+
+### Next Phase: Task Generation
+
+**If Design Passes Validation (GO Decision)**:
+- Review feedback and apply changes if needed
+- Run `/kiro:spec-tasks $1` to generate implementation tasks
+- Or `/kiro:spec-tasks $1 -y` to auto-approve and proceed directly
+
+**If Design Needs Revision (NO-GO Decision)**:
+- Address critical issues identified
+- Re-run `/kiro:spec-design $1` with improvements
+- Re-validate with `/kiro:validate-design $1`
+
+**Note**: Design validation is recommended but optional. Quality review helps catch issues early.

+ 88 - 0
.claude/commands/kiro/validate-gap.md

@@ -0,0 +1,88 @@
+---
+description: Analyze implementation gap between requirements and existing codebase
+allowed-tools: Bash, Glob, Grep, Read, Write, Edit, MultiEdit, WebSearch, WebFetch
+argument-hint: <feature-name>
+---
+
+# Implementation Gap Validation
+
+<background_information>
+- **Mission**: Analyze the gap between requirements and existing codebase to inform implementation strategy
+- **Success Criteria**:
+  - Comprehensive understanding of existing codebase patterns and components
+  - Clear identification of missing capabilities and integration challenges
+  - Multiple viable implementation approaches evaluated
+  - Technical research needs identified for design phase
+</background_information>
+
+<instructions>
+## Core Task
+Analyze implementation gap for feature **$1** based on approved requirements and existing codebase.
+
+## Execution Steps
+
+1. **Load Context**:
+   - Read `.kiro/specs/$1/spec.json` for language and metadata
+   - Read `.kiro/specs/$1/requirements.md` for requirements
+   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
+     - Default files: `structure.md`, `tech.md`, `product.md`
+     - All custom steering files (regardless of mode settings)
+     - This provides complete project memory and context
+
+2. **Read Analysis Guidelines**:
+   - Read `.kiro/settings/rules/gap-analysis.md` for comprehensive analysis framework
+
+3. **Execute Gap Analysis**:
+   - Follow gap-analysis.md framework for thorough investigation
+   - Analyze existing codebase using Grep and Read tools
+   - Use WebSearch/WebFetch for external dependency research if needed
+   - Evaluate multiple implementation approaches (extend/new/hybrid)
+   - Use language specified in spec.json for output
+
+4. **Generate Analysis Document**:
+   - Create comprehensive gap analysis following the output guidelines in gap-analysis.md
+   - Present multiple viable options with trade-offs
+   - Flag areas requiring further research
+
+## Important Constraints
+- **Information over Decisions**: Provide analysis and options, not final implementation choices
+- **Multiple Options**: Present viable alternatives when applicable
+- **Thorough Investigation**: Use tools to deeply understand existing codebase
+- **Explicit Gaps**: Clearly flag areas needing research or investigation
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context (spec, steering, rules) before analysis
+- **Grep extensively**: Search codebase for patterns, conventions, and integration points
+- **WebSearch/WebFetch**: Research external dependencies and best practices when needed
+- **Write last**: Generate analysis only after complete investigation
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Analysis Summary**: Brief overview (3-5 bullets) of scope, challenges, and recommendations
+2. **Document Status**: Confirm analysis approach used
+3. **Next Steps**: Guide user on proceeding to design phase
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Keep summary concise (under 300 words)
+- Detailed analysis follows gap-analysis.md output guidelines
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Requirements**: If requirements.md doesn't exist, stop with message: "Run `/kiro:spec-requirements $1` first to generate requirements"
+- **Requirements Not Approved**: If requirements not approved, warn user but proceed (gap analysis can inform requirement revisions)
+- **Empty Steering Directory**: Warn user that project context is missing and may affect analysis quality
+- **Complex Integration Unclear**: Flag for comprehensive research in design phase rather than blocking
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+
+### Next Phase: Design Generation
+
+**If Gap Analysis Complete**:
+- Review gap analysis insights
+- Run `/kiro:spec-design $1` to create technical design document
+- Or `/kiro:spec-design $1 -y` to auto-approve requirements and proceed directly
+
+**Note**: Gap analysis is optional but recommended for brownfield projects to inform design decisions.

+ 138 - 0
.claude/commands/kiro/validate-impl.md

@@ -0,0 +1,138 @@
+---
+description: Validate implementation against requirements, design, and tasks
+allowed-tools: Bash, Glob, Grep, Read, LS
+argument-hint: [feature-name] [task-numbers]
+---
+
+# Implementation Validation
+
+<background_information>
+- **Mission**: Verify that implementation aligns with approved requirements, design, and tasks
+- **Success Criteria**:
+  - All specified tasks marked as completed
+  - Tests exist and pass for implemented functionality
+  - Requirements traceability confirmed (EARS requirements covered)
+  - Design structure reflected in implementation
+  - No regressions in existing functionality
+</background_information>
+
+<instructions>
+## Core Task
+Validate implementation for feature(s) and task(s) based on approved specifications.
+
+## Execution Steps
+
+### 1. Detect Validation Target
+
+**If no arguments provided** (`$1` empty):
+- Parse conversation history for `/kiro:spec-impl <feature> [tasks]` commands
+- Extract feature names and task numbers from each execution
+- Aggregate all implemented tasks by feature
+- Report detected implementations (e.g., "user-auth: 1.1, 1.2, 1.3")
+- If no history found, scan `.kiro/specs/` for features with completed tasks `[x]`
+
+**If feature provided** (`$1` present, `$2` empty):
+- Use specified feature
+- Detect all completed tasks `[x]` in `.kiro/specs/$1/tasks.md`
+
+**If both feature and tasks provided** (`$1` and `$2` present):
+- Validate specified feature and tasks only (e.g., `user-auth 1.1,1.2`)
+
+### 2. Load Context
+
+For each detected feature:
+- Read `.kiro/specs/<feature>/spec.json` for metadata
+- Read `.kiro/specs/<feature>/requirements.md` for requirements
+- Read `.kiro/specs/<feature>/design.md` for design structure
+- Read `.kiro/specs/<feature>/tasks.md` for task list
+- **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
+  - Default files: `structure.md`, `tech.md`, `product.md`
+  - All custom steering files (regardless of mode settings)
+
+### 3. Execute Validation
+
+For each task, verify:
+
+#### Task Completion Check
+- Checkbox is `[x]` in tasks.md
+- If not completed, flag as "Task not marked complete"
+
+#### Test Coverage Check
+- Tests exist for task-related functionality
+- Tests pass (no failures or errors)
+- Use Bash to run test commands (e.g., `npm test`, `pytest`)
+- If tests fail or don't exist, flag as "Test coverage issue"
+
+#### Requirements Traceability
+- Identify EARS requirements related to the task
+- Use Grep to search implementation for evidence of requirement coverage
+- If requirement not traceable to code, flag as "Requirement not implemented"
+
+#### Design Alignment
+- Check if design.md structure is reflected in implementation
+- Verify key interfaces, components, and modules exist
+- Use Grep/LS to confirm file structure matches design
+- If misalignment found, flag as "Design deviation"
+
+#### Regression Check
+- Run full test suite (if available)
+- Verify no existing tests are broken
+- If regressions detected, flag as "Regression detected"
+
+### 4. Generate Report
+
+Provide summary in the language specified in spec.json:
+- Validation summary by feature
+- Coverage report (tasks, requirements, design)
+- Issues and deviations with severity (Critical/Warning)
+- GO/NO-GO decision
+
+## Important Constraints
+- **Conversation-aware**: Prioritize conversation history for auto-detection
+- **Non-blocking warnings**: Design deviations are warnings unless critical
+- **Test-first focus**: Test coverage is mandatory for GO decision
+- **Traceability required**: All requirements must be traceable to implementation
+</instructions>
+
+## Tool Guidance
+- **Conversation parsing**: Extract `/kiro:spec-impl` patterns from history
+- **Read context**: Load all specs and steering before validation
+- **Bash for tests**: Execute test commands to verify pass status
+- **Grep for traceability**: Search codebase for requirement evidence
+- **LS/Glob for structure**: Verify file structure matches design
+
+## Output Description
+
+Provide output in the language specified in spec.json with:
+
+1. **Detected Target**: Features and tasks being validated (if auto-detected)
+2. **Validation Summary**: Brief overview per feature (pass/fail counts)
+3. **Issues**: List of validation failures with severity and location
+4. **Coverage Report**: Requirements/design/task coverage percentages
+5. **Decision**: GO (ready for next phase) / NO-GO (needs fixes)
+
+**Format Requirements**:
+- Use Markdown headings and tables for clarity
+- Flag critical issues with ⚠️ or 🔴
+- Keep summary concise (under 400 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+- **No Implementation Found**: If no `/kiro:spec-impl` in history and no `[x]` tasks, report "No implementations detected"
+- **Test Command Unknown**: If test framework unclear, warn and skip test validation (manual verification required)
+- **Missing Spec Files**: If spec.json/requirements.md/design.md missing, stop with error
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+
+### Next Steps Guidance
+
+**If GO Decision**:
+- Implementation validated and ready
+- Proceed to deployment or next feature
+
+**If NO-GO Decision**:
+- Address critical issues listed
+- Re-run `/kiro:spec-impl <feature> [tasks]` for fixes
+- Re-validate with `/kiro:validate-impl [feature] [tasks]`
+
+**Note**: Validation is recommended after implementation to ensure spec alignment and quality.

+ 116 - 0
.claude/commands/learn.md

@@ -0,0 +1,116 @@
+---
+name: learn
+description: /learn - Pattern Extraction for GROWI
+---
+
+# /learn - Pattern Extraction for GROWI
+
+Extract reusable problem-solving patterns from development sessions and save them as auto-invoked Skills.
+
+## Core Purpose
+
+Capture "non-trivial problems" solved during GROWI development, converting them into reusable skills that will be automatically applied in future sessions.
+
+## Pattern Categories to Extract
+
+Focus on four key areas:
+
+1. **Error Resolution** — Document what went wrong, root causes, and fixes applicable to similar issues (e.g., Mongoose query pitfalls, Next.js hydration errors, TypeScript strict mode issues)
+
+2. **Debugging Techniques** — Capture non-obvious diagnostic steps and tool combinations (e.g., MongoDB query profiling, React DevTools with Jotai, Vitest debugging patterns)
+
+3. **Workarounds** — Record library quirks, API limitations, and version-specific solutions (e.g., @headless-tree edge cases, Socket.io reconnection handling, SWR cache invalidation)
+
+4. **GROWI Patterns** — Note codebase conventions, architecture decisions, and integration approaches (e.g., feature-based structure, Jotai + Socket.io sync, API v3 design patterns)
+
+## Skill File Structure
+
+Extracted patterns are saved in **appropriate skill directories** based on the scope of the pattern:
+
+**Workspace-specific patterns**:
+- `apps/{workspace}/.claude/skills/learned/{topic-name}/SKILL.md`
+- `packages/{package}/.claude/skills/learned/{topic-name}/SKILL.md`
+- Examples: patterns specific to a single app or package
+
+**Global patterns** (monorepo-wide):
+- `.claude/skills/learned/{topic-name}/SKILL.md`
+- Examples: patterns applicable across all workspaces
+
+### File Template
+
+```yaml
+---
+name: descriptive-name
+description: Brief description (auto-invoked when working on related code)
+---
+
+## Problem
+[What was the issue]
+
+## Solution
+[How it was solved]
+
+## Example
+[Code snippet or scenario]
+
+## When to Apply
+[Specific conditions where this pattern is useful]
+```
+
+## GROWI-Specific Examples
+
+Topics commonly learned in GROWI development:
+
+**Apps/app-specific** (`apps/app/.claude/skills/learned/`):
+- `page-save-origin-semantics` — Origin-based conflict detection for collaborative editing
+- `socket-jotai-integration` — Real-time state synchronization patterns
+- `api-v3-error-handling` — RESTful API error response patterns
+- `mongodb-query-optimization` — Mongoose indexing and aggregation patterns
+
+**Global monorepo patterns** (`.claude/skills/learned/`):
+- `virtualized-tree-patterns` — @headless-tree + @tanstack/react-virtual optimizations (if used across apps)
+- `jotai-atom-composition` — Derived atoms and state composition (if shared pattern)
+- `turborepo-cache-invalidation` — Build cache debugging techniques
+- `pnpm-workspace-dependencies` — Workspace dependency resolution issues
+
+## Quality Guidelines
+
+**Extract:**
+- Patterns that will save time in future sessions
+- Non-obvious solutions worth remembering
+- Integration techniques between GROWI's tech stack
+- Performance optimizations with measurable impact
+
+**Avoid:**
+- Trivial fixes (typos, syntax errors)
+- One-time issues (service outages, environment-specific problems)
+- Information already documented in existing Skills
+- Feature-specific details (these stay in code comments)
+
+## Workflow
+
+1. User triggers `/learn` after solving a complex problem
+2. Review the session to identify valuable patterns
+3. Draft skill file(s) with clear structure
+4. **Autonomously determine the appropriate directory**:
+   - Analyze the pattern's scope (which files/modules were involved)
+   - If pattern is specific to a workspace in `apps/*` or `packages/*`:
+     - Save to `{workspace}/.claude/skills/learned/{topic-name}/SKILL.md`
+   - If pattern is applicable across multiple workspaces:
+     - Save to `.claude/skills/learned/{topic-name}/SKILL.md`
+5. Skills automatically apply in future sessions when working on related code
+
+### Directory Selection Logic
+
+**Workspace-specific** (save to `{workspace}/.claude/skills/learned/`):
+- Pattern involves workspace-specific concepts, models, or APIs
+- References files primarily in one `apps/*` or `packages/*` directory
+- Example: Page save logic in `apps/app` → `apps/app/.claude/skills/learned/`
+
+**Global** (save to `.claude/skills/learned/`):
+- Pattern applies across multiple workspaces
+- Involves monorepo-wide tools (Turborepo, pnpm, Biome, Vitest)
+- Shared coding patterns or architectural principles
+- Example: Turborepo caching pitfall → `.claude/skills/learned/`
+
+Learned skills are automatically invoked based on their description when working on related code.

+ 287 - 0
.claude/commands/tdd.md

@@ -0,0 +1,287 @@
+---
+name: tdd
+description: Enforce test-driven development workflow. Scaffold interfaces, generate tests FIRST, then implement minimal code to pass. Ensure 80%+ coverage.
+---
+
+# TDD Command
+
+This command enforces test-driven development methodology for GROWI.
+
+---
+
+## ⚠️ CRITICAL: MANDATORY TDD PROTOCOL
+
+**THIS IS NOT OPTIONAL. THIS IS A TEAM REQUIREMENT.**
+
+### Non-Negotiable Rules
+
+1. **ALL phases of TDD MUST be followed: RED → GREEN → REFACTOR**
+2. **Tests MUST be written BEFORE any implementation code (RED phase)**
+3. **Implementation MUST be minimal and only to pass tests (GREEN phase)**
+4. **Tests MUST remain passing during refactoring (REFACTOR phase)**
+5. **NEVER skip any phase of the TDD cycle**
+6. **If you believe you cannot follow TDD, STOP and ask the user IMMEDIATELY**
+
+### Violation Protocol
+
+If you find yourself about to deviate from the TDD cycle:
+
+1. **STOP IMMEDIATELY** - Do not proceed with any phase out of order
+2. **ASK THE USER** - Explain why you think following TDD is difficult
+3. **WAIT FOR APPROVAL** - Do not proceed without explicit user consent
+4. **DOCUMENT THE EXCEPTION** - If approved, document why TDD was skipped
+
+**Example of required user confirmation:**
+
+```
+"This task requires writing integration tests with complex setup
+(database mocks, authentication, etc.).
+
+I have two options:
+
+Option A: Follow strict TDD (recommended)
+- RED: Write failing integration test first
+- GREEN: Implement minimal code to pass
+- REFACTOR: Improve code while keeping tests green
+- May take 15-30 minutes for test setup
+- Ensures proper test coverage
+
+Option B: Skip TDD for this specific change
+- Implement directly
+- Verify with existing test suite only
+- Risk: No specific test for this change
+
+Which approach would you prefer?"
+```
+
+**Never assume. Always ask.**
+
+---
+
+## Pre-Implementation Checklist
+
+Before writing ANY code, confirm:
+
+- [ ] I have read and understood the task requirements
+- [ ] I am starting with Step 1: Write a FAILING test (RED phase)
+- [ ] I have NOT written any implementation code yet
+- [ ] I will follow all phases: RED → GREEN → REFACTOR
+- [ ] If I cannot follow the complete TDD cycle, I have ASKED THE USER for guidance
+
+**If any checkbox is unchecked, STOP and ask the user.**
+
+---
+
+## What This Command Does
+
+1. **Scaffold Interfaces** - Define types/interfaces first
+2. **Generate Tests First** - Write failing tests (RED)
+3. **Implement Minimal Code** - Write just enough to pass (GREEN)
+4. **Refactor** - Improve code while keeping tests green (REFACTOR)
+5. **Verify Coverage** - Ensure 80%+ test coverage
+
+## When to Use
+
+Use `/tdd` when:
+- Implementing new features
+- Adding new functions/components
+- Fixing bugs (write test that reproduces bug first)
+- Refactoring existing code
+- Building critical business logic
+
+## TDD Cycle
+
+```
+RED → GREEN → REFACTOR → REPEAT
+
+RED:      Write a failing test
+GREEN:    Write minimal code to pass
+REFACTOR: Improve code, keep tests passing
+REPEAT:   Next feature/scenario
+```
+
+## Example Usage
+
+```
+User: /tdd I need a function to validate page paths
+
+Agent:
+```
+
+# TDD Session: Page Path Validator
+
+## Step 1: Define Interface (SCAFFOLD)
+
+```typescript
+// src/utils/page-path-validator.ts
+export interface ValidationResult {
+  isValid: boolean;
+  errorMessage?: string;
+}
+
+export function validatePagePath(path: string): ValidationResult {
+  throw new Error('Not implemented');
+}
+```
+
+## Step 2: Write Failing Test (RED)
+
+**⚠️ CHECKPOINT: Confirm you have NOT written any implementation code yet.**
+
+```typescript
+// src/utils/page-path-validator.spec.ts
+describe('validatePagePath', () => {
+  it('should return valid for correct path', () => {
+    const result = validatePagePath('/user/docs/page');
+    expect(result.isValid).toBe(true);
+  });
+
+  it('should reject path without leading slash', () => {
+    const result = validatePagePath('user/docs');
+    expect(result.isValid).toBe(false);
+    expect(result.errorMessage).toBeDefined();
+  });
+
+  it('should reject empty path', () => {
+    const result = validatePagePath('');
+    expect(result.isValid).toBe(false);
+  });
+});
+```
+
+## Step 3: Run Tests - Verify FAIL
+
+**⚠️ MANDATORY: Tests MUST fail before proceeding to implementation.**
+
+```bash
+turbo run test --filter @growi/app -- src/utils/page-path-validator.spec.ts
+
+FAIL src/utils/page-path-validator.spec.ts
+  ✕ should return valid for correct path
+    Error: Not implemented
+```
+
+**✅ CHECKPOINT PASSED: Tests fail as expected. Ready to implement.**
+
+**❌ If tests pass or don't run: STOP. Fix the test first.**
+
+## Step 4: Implement Minimal Code (GREEN)
+
+**⚠️ CHECKPOINT: Only write the MINIMUM code needed to pass the tests.**
+
+```typescript
+export function validatePagePath(path: string): ValidationResult {
+  if (!path) {
+    return { isValid: false, errorMessage: 'Path cannot be empty' };
+  }
+  if (!path.startsWith('/')) {
+    return { isValid: false, errorMessage: 'Path must start with /' };
+  }
+  return { isValid: true };
+}
+```
+
+## Step 5: Run Tests - Verify PASS
+
+**⚠️ MANDATORY: ALL tests MUST pass before proceeding to refactoring.**
+
+```bash
+turbo run test --filter @growi/app -- src/utils/page-path-validator.spec.ts
+
+PASS  ✓ All tests passing!
+```
+
+**✅ CHECKPOINT PASSED: Ready to refactor if needed.**
+
+**❌ If tests fail: Fix implementation, do NOT move to refactoring.**
+
+## Step 6: Check Coverage
+
+**⚠️ MANDATORY: Verify test coverage meets requirements (80% minimum).**
+
+```bash
+cd {package_dir} && pnpm vitest run --coverage src/utils/page-path-validator.spec.ts
+
+Coverage: 100% ✅ (Target: 80%)
+```
+
+**✅ TDD CYCLE COMPLETE: All phases completed successfully.**
+
+- ✅ RED: Failing tests written
+- ✅ GREEN: Implementation passes tests
+- ✅ REFACTOR: Code improved (if needed)
+- ✅ COVERAGE: 80%+ achieved
+
+## TDD Best Practices
+
+**DO:**
+- ✅ Write the test FIRST, before any implementation
+- ✅ Run tests and verify they FAIL before implementing
+- ✅ Write minimal code to make tests pass
+- ✅ Refactor only after tests are green
+- ✅ Add edge cases and error scenarios
+- ✅ Aim for 80%+ coverage (100% for critical code)
+- ✅ Use `vitest-mock-extended` for type-safe mocks
+
+**DON'T:**
+- ❌ Write implementation before tests
+- ❌ Skip running tests after each change
+- ❌ Write too much code at once
+- ❌ Ignore failing tests
+- ❌ Test implementation details (test behavior)
+- ❌ Mock everything (prefer integration tests)
+
+## Test Types to Include
+
+**Unit Tests** (`*.spec.ts`):
+- Happy path scenarios
+- Edge cases (empty, null, max values)
+- Error conditions
+- Boundary values
+
+**Integration Tests** (`*.integ.ts`):
+- API endpoints
+- Database operations
+- External service calls
+
+**Component Tests** (`*.spec.tsx`):
+- React components with hooks
+- User interactions
+- Jotai state integration
+
+## Coverage Requirements
+
+- **80% minimum** for all code
+- **100% required** for:
+  - Authentication/authorization logic
+  - Security-critical code
+  - Core business logic (page operations, permissions)
+  - Data validation utilities
+
+## Important Notes
+
+**MANDATORY - NO EXCEPTIONS**: The complete TDD cycle MUST be followed:
+
+1. **RED** - Write failing test FIRST
+2. **GREEN** - Implement minimal code to pass the test
+3. **REFACTOR** - Improve code while keeping tests green
+
+**Absolute Requirements:**
+- ❌ NEVER skip the RED phase
+- ❌ NEVER skip the GREEN phase
+- ❌ NEVER skip the REFACTOR phase
+- ❌ NEVER write implementation code before tests
+- ❌ NEVER proceed without explicit user approval if you cannot follow TDD
+
+**If you violate these rules:**
+1. STOP immediately
+2. Discard any implementation code written before tests
+3. Inform the user of the violation
+4. Start over with RED phase
+
+**This is a team development standard. Violations are not acceptable.**
+
+## Related Skills
+
+This command uses patterns from:
+- **growi-testing-patterns** - Vitest, React Testing Library, vitest-mock-extended

+ 217 - 0
.claude/rules/coding-style.md

@@ -0,0 +1,217 @@
+# Coding Style
+
+General coding standards and best practices. These rules apply to all code in the GROWI monorepo.
+
+## Immutability (CRITICAL)
+
+ALWAYS create new objects, NEVER mutate:
+
+```typescript
+// ❌ WRONG: Mutation
+function updateUser(user, name) {
+  user.name = name  // MUTATION!
+  return user
+}
+
+// ✅ CORRECT: Immutability
+function updateUser(user, name) {
+  return {
+    ...user,
+    name
+  }
+}
+
+// ✅ CORRECT: Array immutable update
+const updatedPages = pages.map(p => p.id === id ? { ...p, title: newTitle } : p);
+
+// ❌ WRONG: Array mutation
+pages[index].title = newTitle;
+```
+
+## File Organization
+
+MANY SMALL FILES > FEW LARGE FILES:
+
+- High cohesion, low coupling
+- 200-400 lines typical, 800 max
+- Functions < 50 lines
+- Extract utilities from large components
+- Organize by feature/domain, not by type
+
+## Naming Conventions
+
+### Variables and Functions
+
+- **camelCase** for variables and functions
+- **PascalCase** for classes, interfaces, types, React components
+- **UPPER_SNAKE_CASE** for constants
+
+```typescript
+const pageId = '123';
+const MAX_PAGE_SIZE = 1000;
+
+function getPageById(id: string) { }
+class PageService { }
+interface PageData { }
+type PageStatus = 'draft' | 'published';
+```
+
+### Files and Directories
+
+- **PascalCase** for React components: `Button.tsx`, `PageTree.tsx`
+- **kebab-case** for utilities: `page-utils.ts`
+- **lowercase** for directories: `features/page-tree/`, `utils/`
+
+## Export Style
+
+**Prefer named exports** over default exports:
+
+```typescript
+// ✅ Good: Named exports
+export const MyComponent = () => { };
+export function myFunction() { }
+export class MyClass { }
+
+// ❌ Avoid: Default exports
+export default MyComponent;
+```
+
+**Why?**
+- Better refactoring (IDEs can reliably rename across files)
+- Better tree shaking
+- Explicit imports improve readability
+- No ambiguity (import name matches export name)
+
+**Exception**: Next.js pages require default exports.
+
+## Type Safety
+
+**Always provide explicit types** for function parameters and return values:
+
+```typescript
+// ✅ Good: Explicit types
+function createPage(path: string, body: string): Promise<Page> {
+  // ...
+}
+
+// ❌ Avoid: Implicit any
+function createPage(path, body) {
+  // ...
+}
+```
+
+Use `import type` for type-only imports:
+
+```typescript
+import type { PageData } from '~/interfaces/page';
+```
+
+## Error Handling
+
+ALWAYS handle errors comprehensively:
+
+```typescript
+try {
+  const result = await riskyOperation();
+  return result;
+} catch (error) {
+  logger.error('Operation failed:', { error, context });
+  throw new Error('Detailed user-friendly message');
+}
+```
+
+## Async/Await
+
+Prefer async/await over Promise chains:
+
+```typescript
+// ✅ Good: async/await
+async function loadPages() {
+  const pages = await fetchPages();
+  const enriched = await enrichPageData(pages);
+  return enriched;
+}
+
+// ❌ Avoid: Promise chains
+function loadPages() {
+  return fetchPages()
+    .then(pages => enrichPageData(pages))
+    .then(enriched => enriched);
+}
+```
+
+## Comments
+
+**Write comments in English** (even for Japanese developers):
+
+```typescript
+// ✅ Good: English comment
+// Calculate the total number of pages in the workspace
+
+// ❌ Avoid: Japanese comment
+// ワークスペース内のページ総数を計算
+```
+
+**When to comment**:
+- Complex algorithms or business logic
+- Non-obvious workarounds
+- Public APIs and interfaces
+
+**When NOT to comment**:
+- Self-explanatory code (good naming is better)
+- Restating what the code does
+
+## Test File Placement
+
+**Co-locate tests with source files** in the same directory:
+
+```
+src/utils/
+├── helper.ts
+└── helper.spec.ts        # Test next to source
+
+src/components/Button/
+├── Button.tsx
+└── Button.spec.tsx       # Test next to component
+```
+
+### Test File Naming
+
+- Unit tests: `*.spec.{ts,js}`
+- Integration tests: `*.integ.ts`
+- Component tests: `*.spec.{tsx,jsx}`
+
+## Git Commit Messages
+
+Follow conventional commit format:
+
+```
+<type>(<scope>): <subject>
+
+<body>
+```
+
+**Types**: `feat`, `fix`, `refactor`, `test`, `docs`, `chore`
+
+**Example**:
+```
+feat(page-tree): add virtualization for large trees
+
+Implemented react-window for virtualizing page tree
+to improve performance with 10k+ pages.
+```
+
+## Code Quality Checklist
+
+Before marking work complete:
+
+- [ ] Code is readable and well-named
+- [ ] Functions are small (<50 lines)
+- [ ] Files are focused (<800 lines)
+- [ ] No deep nesting (>4 levels)
+- [ ] Proper error handling
+- [ ] No console.log statements (use logger)
+- [ ] No mutation (immutable patterns used)
+- [ ] Named exports (except Next.js pages)
+- [ ] English comments
+- [ ] Co-located tests

+ 37 - 0
.claude/rules/performance.md

@@ -0,0 +1,37 @@
+# Performance Optimization
+
+## Model Selection Strategy
+
+**Haiku** - Lightweight tasks:
+- Frequent, simple agent invocations
+- Straightforward code generation
+- Worker agents in multi-agent systems
+
+**Sonnet** - Standard development:
+- Main development work
+- Orchestrating multi-agent workflows
+- Most coding tasks
+
+**Opus** - Complex reasoning:
+- Architectural decisions
+- Difficult debugging
+- Research and analysis
+
+## Context Window Management
+
+Avoid last 20% of context for:
+- Large-scale refactoring
+- Multi-file feature implementation
+- Complex debugging sessions
+
+Lower context sensitivity:
+- Single-file edits
+- Simple bug fixes
+- Documentation updates
+
+## Build Troubleshooting
+
+If build fails:
+1. Use **build-error-resolver** agent
+2. Run `turbo run lint --filter {package}`
+3. Fix incrementally, verify after each fix

+ 33 - 0
.claude/rules/security.md

@@ -0,0 +1,33 @@
+# Security Guidelines
+
+## Mandatory Security Checks
+
+Before ANY commit:
+- [ ] No hardcoded secrets (API keys, passwords, tokens)
+- [ ] All user inputs validated and sanitized
+- [ ] NoSQL injection prevention (use Mongoose properly)
+- [ ] XSS prevention (sanitize HTML output)
+- [ ] CSRF protection enabled
+- [ ] Authentication/authorization verified
+- [ ] Error messages don't leak sensitive data
+
+## Secret Management
+
+```typescript
+// NEVER: Hardcoded secrets
+const apiKey = "sk-xxxxx"
+
+// ALWAYS: Environment variables
+const apiKey = process.env.API_KEY
+if (!apiKey) {
+  throw new Error('API_KEY not configured')
+}
+```
+
+## Security Response Protocol
+
+If security issue found:
+1. STOP immediately
+2. Use **security-reviewer** agent
+3. Fix CRITICAL issues before continuing
+4. Rotate any exposed secrets

+ 38 - 0
.claude/rules/testing.md

@@ -0,0 +1,38 @@
+# Testing Rules
+
+## Package Manager (CRITICAL)
+
+**NEVER use `npx` to run tests. ALWAYS use `pnpm`.**
+
+```bash
+# ❌ WRONG
+npx vitest run yjs.integ
+
+# ✅ CORRECT
+pnpm vitest run yjs.integ
+```
+
+## Test Execution Commands
+
+### Individual Test File (from package directory)
+
+```bash
+# Use partial file name - Vitest auto-matches
+pnpm vitest run yjs.integ
+pnpm vitest run helper.spec
+pnpm vitest run Button.spec
+
+# Flaky test detection
+pnpm vitest run yjs.integ --repeat=10
+```
+
+- Use **partial file name** (no `src/` prefix or full path needed)
+- No `--project` flag needed (Vitest auto-detects from file extension)
+
+### All Tests for a Package (from monorepo root)
+
+```bash
+turbo run test --filter @growi/app
+```
+
+For testing patterns (mocking, assertions, structure), see the `.claude/skills/learned/essential-test-patterns` skill.

+ 23 - 0
.claude/settings.json

@@ -0,0 +1,23 @@
+{
+  "hooks": {
+    "PostToolUse": [
+      {
+        "matcher": "Write|Edit",
+        "hooks": [
+          {
+            "type": "command",
+            "command": "if [[ \"$FILE\" == */apps/* ]] || [[ \"$FILE\" == */packages/* ]]; then REPO_ROOT=$(echo \"$FILE\" | sed 's|/\\(apps\\|packages\\)/.*|/|'); cd \"$REPO_ROOT\" && pnpm biome check --write \"$FILE\" 2>/dev/null || true; fi",
+            "timeout": 30,
+            "description": "Auto-format edited files in apps/* and packages/* with Biome"
+          }
+        ]
+      }
+    ]
+  },
+  "enabledPlugins": {
+    "context7@claude-plugins-official": true,
+    "github@claude-plugins-official": true,
+    "typescript-lsp@claude-plugins-official": true,
+    "playwright@claude-plugins-official": true
+  }
+}

+ 122 - 0
.claude/skills/learned/essential-test-design/SKILL.md

@@ -0,0 +1,122 @@
+---
+name: essential-test-design
+description: Write tests that verify observable behavior (contract), not implementation details. Auto-invoked when writing or reviewing tests.
+---
+
+## Problem
+
+Tests that are tightly coupled to implementation details cause two failures:
+
+1. **False positives** — Tests pass even when behavior is broken (e.g., delay shortened but test still passes because it only checks `setTimeout` was called)
+2. **False negatives** — Tests fail even when behavior is correct (e.g., implementation switches from `setTimeout` to a `delay()` utility, spy breaks)
+
+Both undermine the purpose of testing: detecting regressions in behavior.
+
+## Principle: Test the Contract, Not the Mechanism
+
+A test is "essential" when it:
+- **Fails if the behavior degrades** (catches real bugs)
+- **Passes if the behavior is preserved** (survives refactoring)
+- **Does not depend on how the behavior is implemented** (implementation-agnostic)
+
+Ask: "What does the caller of this function experience?" — test that.
+
+## Anti-Patterns and Corrections
+
+### Anti-Pattern 1: Implementation Spy
+
+```typescript
+// BAD: Tests implementation, not behavior
+// Breaks if implementation changes from setTimeout to any other delay mechanism
+const spy = vi.spyOn(global, 'setTimeout');
+await exponentialBackoff(1);
+expect(spy).toHaveBeenCalledWith(expect.any(Function), 1000);
+```
+
+### Anti-Pattern 2: Arrange That Serves the Assert
+
+```typescript
+// BAD: The "arrange" is set up only to make the "assert" trivially pass
+// This is a self-fulfilling prophecy, not a meaningful test
+vi.advanceTimersByTime(1000);
+await promise;
+// No assertion — "it didn't throw" is not a valuable test
+```
+
+### Correct: Behavior Boundary Test
+
+```typescript
+// GOOD: Tests the observable contract
+// "Does not resolve before the expected delay, resolves at the expected delay"
+let resolved = false;
+mailService.exponentialBackoff(1).then(() => { resolved = true });
+
+await vi.advanceTimersByTimeAsync(999);
+expect(resolved).toBe(false);  // Catches: delay too short
+
+await vi.advanceTimersByTimeAsync(1);
+expect(resolved).toBe(true);   // Catches: delay too long or hangs
+```
+
+## Decision Framework
+
+When writing a test, ask these questions in order:
+
+1. **What is the contract?** — What does the caller expect to experience?
+   - e.g., "Wait for N ms before resolving"
+2. **What breakage should this test catch?** — Define the regression scenario
+   - e.g., "Someone changes the delay from 1000ms to 500ms"
+3. **Would this test still pass if I refactored the internals?** — If no, you're testing implementation
+   - e.g., Switching from `setTimeout` to `Bun.sleep()` shouldn't break the test
+4. **Would this test fail if the behavior degraded?** — If no, the test has no value
+   - e.g., If delay is halved, `expect(resolved).toBe(false)` at 999ms would catch it
+
+## Common Scenarios
+
+### Async Delay / Throttle / Debounce
+
+Use fake timers + boundary assertions (as shown above).
+
+### Data Transformation
+
+Assert on output shape/values, not on which internal helper was called.
+
+```typescript
+// BAD
+const spy = vi.spyOn(utils, 'formatDate');
+transform(input);
+expect(spy).toHaveBeenCalled();
+
+// GOOD
+const result = transform(input);
+expect(result.date).toBe('2026-01-01');
+```
+
+### Side Effects (API calls, DB writes)
+
+Mocking the boundary (API/DB) is acceptable — that IS the observable behavior.
+
+```typescript
+// OK: The contract IS "sends an email via mailer"
+expect(mockMailer.sendMail).toHaveBeenCalledWith(
+  expect.objectContaining({ to: 'user@example.com' })
+);
+```
+
+### Retry Logic
+
+Test the number of attempts and the final outcome, not the internal flow.
+
+```typescript
+// GOOD: Contract = "retries N times, then fails with specific error"
+mockMailer.sendMail.mockRejectedValue(new Error('fail'));
+await expect(sendWithRetry(config, 3)).rejects.toThrow('failed after 3 attempts');
+expect(mockMailer.sendMail).toHaveBeenCalledTimes(3);
+```
+
+## When to Apply
+
+- Writing new test cases for any function or method
+- Reviewing existing tests for flakiness or brittleness
+- Refactoring tests after fixing flaky CI failures
+- Code review of test pull requests

+ 494 - 0
.claude/skills/learned/essential-test-patterns/SKILL.md

@@ -0,0 +1,494 @@
+---
+name: essential-test-patterns
+description: GROWI testing patterns with Vitest, React Testing Library, and vitest-mock-extended.
+---
+
+# GROWI Testing Patterns
+
+GROWI uses **Vitest** for all testing (unit, integration, component). This skill covers universal testing patterns applicable across the monorepo.
+
+## Test File Placement (Global Standard)
+
+Place test files **in the same directory** as the source file:
+
+```
+src/components/Button/
+├── Button.tsx
+└── Button.spec.tsx       # Component test
+
+src/utils/
+├── helper.ts
+└── helper.spec.ts        # Unit test
+
+src/services/api/
+├── pageService.ts
+└── pageService.integ.ts  # Integration test
+```
+
+## Test Types & Environments
+
+| File Pattern | Type | Environment | Use Case |
+|--------------|------|-------------|----------|
+| `*.spec.{ts,js}` | Unit Test | Node.js | Pure functions, utilities, services |
+| `*.integ.ts` | Integration Test | Node.js + DB | API routes, database operations |
+| `*.spec.{tsx,jsx}` | Component Test | happy-dom | React components |
+
+Vitest automatically selects the environment based on file extension and configuration.
+
+## Vitest Configuration
+
+### Global APIs (No Imports Needed)
+
+All GROWI packages configure Vitest globals in `tsconfig.json`:
+
+```json
+{
+  "compilerOptions": {
+    "types": ["vitest/globals"]
+  }
+}
+```
+
+This enables auto-import of testing APIs:
+
+```typescript
+// No imports needed!
+describe('MyComponent', () => {
+  it('should render', () => {
+    expect(true).toBe(true);
+  });
+
+  beforeEach(() => {
+    // Setup
+  });
+
+  afterEach(() => {
+    // Cleanup
+  });
+});
+```
+
+**Available globals**: `describe`, `it`, `test`, `expect`, `beforeEach`, `afterEach`, `beforeAll`, `afterAll`, `vi`
+
+## Type-Safe Mocking with vitest-mock-extended
+
+### Basic Usage
+
+`vitest-mock-extended` provides **fully type-safe mocks** with TypeScript autocomplete:
+
+```typescript
+import { mockDeep, type DeepMockProxy } from 'vitest-mock-extended';
+
+// Create type-safe mock
+const mockRouter: DeepMockProxy<NextRouter> = mockDeep<NextRouter>();
+
+// TypeScript autocomplete works!
+mockRouter.asPath = '/test-path';
+mockRouter.query = { id: '123' };
+mockRouter.push.mockResolvedValue(true);
+
+// Use in tests
+expect(mockRouter.push).toHaveBeenCalledWith('/new-path');
+```
+
+### Complex Types with Optional Properties
+
+```typescript
+interface ComplexProps {
+  currentPageId?: string | null;
+  currentPathname?: string | null;
+  data?: Record<string, unknown>;
+  onSubmit?: (value: string) => void;
+}
+
+const mockProps: DeepMockProxy<ComplexProps> = mockDeep<ComplexProps>();
+mockProps.currentPageId = 'page-123';
+mockProps.data = { key: 'value' };
+mockProps.onSubmit?.mockImplementation((value) => {
+  console.log(value);
+});
+```
+
+### Why vitest-mock-extended?
+
+- ✅ **Type safety**: Catches typos at compile time
+- ✅ **Autocomplete**: IDE suggestions for all properties/methods
+- ✅ **Deep mocking**: Automatically mocks nested objects
+- ✅ **Vitest integration**: Works seamlessly with `vi.fn()`
+
+## React Testing Library Patterns
+
+### Basic Component Test
+
+```typescript
+import { render } from '@testing-library/react';
+import { Button } from './Button';
+
+describe('Button', () => {
+  it('should render with text', () => {
+    const { getByText } = render(<Button>Click me</Button>);
+    expect(getByText('Click me')).toBeInTheDocument();
+  });
+
+  it('should call onClick when clicked', async () => {
+    const onClick = vi.fn();
+    const { getByRole } = render(<Button onClick={onClick}>Click</Button>);
+
+    const button = getByRole('button');
+    await userEvent.click(button);
+
+    expect(onClick).toHaveBeenCalledTimes(1);
+  });
+});
+```
+
+### Testing with Jotai (Global Pattern)
+
+When testing components that use Jotai atoms, wrap with `<Provider>`:
+
+```typescript
+import { render } from '@testing-library/react';
+import { Provider } from 'jotai';
+
+const renderWithJotai = (ui: React.ReactElement) => {
+  const Wrapper = ({ children }: { children: React.ReactNode }) => (
+    <Provider>{children}</Provider>
+  );
+  return render(ui, { wrapper: Wrapper });
+};
+
+describe('ComponentWithJotai', () => {
+  it('should render with atom state', () => {
+    const { getByText } = renderWithJotai(<MyComponent />);
+    expect(getByText('Hello')).toBeInTheDocument();
+  });
+});
+```
+
+### Isolated Jotai Scope (For Testing)
+
+To isolate atom state between tests:
+
+```typescript
+import { createScope } from 'jotai-scope';
+
+describe('ComponentWithIsolatedState', () => {
+  it('test 1', () => {
+    const scope = createScope();
+    const { getByText } = renderWithJotai(<MyComponent />, scope);
+    // ...
+  });
+
+  it('test 2', () => {
+    const scope = createScope(); // Fresh scope
+    const { getByText } = renderWithJotai(<MyComponent />, scope);
+    // ...
+  });
+});
+```
+
+## Async Testing Patterns (Global Standard)
+
+### Using `act()` and `waitFor()`
+
+When testing async state updates:
+
+```typescript
+import { waitFor, act } from '@testing-library/react';
+import { renderHook } from '@testing-library/react';
+
+test('async hook', async () => {
+  const { result } = renderHook(() => useMyAsyncHook());
+
+  // Trigger async action
+  await act(async () => {
+    result.current.triggerAsyncAction();
+  });
+
+  // Wait for state update
+  await waitFor(() => {
+    expect(result.current.isLoading).toBe(false);
+  });
+
+  expect(result.current.data).toBeDefined();
+});
+```
+
+### Testing Async Functions
+
+```typescript
+it('should fetch data successfully', async () => {
+  const data = await fetchData();
+  expect(data).toEqual({ id: '123', name: 'Test' });
+});
+
+it('should handle errors', async () => {
+  await expect(fetchDataWithError()).rejects.toThrow('Error');
+});
+```
+
+## Advanced Assertions
+
+### Object Matching
+
+```typescript
+expect(mockFunction).toHaveBeenCalledWith(
+  expect.objectContaining({
+    pathname: '/expected-path',
+    data: expect.any(Object),
+    timestamp: expect.any(Number),
+  })
+);
+```
+
+### Array Matching
+
+```typescript
+expect(result).toEqual(
+  expect.arrayContaining([
+    expect.objectContaining({ id: '123' }),
+    expect.objectContaining({ id: '456' }),
+  ])
+);
+```
+
+### Partial Matching
+
+```typescript
+expect(user).toMatchObject({
+  name: 'John',
+  email: 'john@example.com',
+  // Other properties are ignored
+});
+```
+
+## Test Structure Best Practices
+
+### AAA Pattern (Arrange-Act-Assert)
+
+```typescript
+describe('MyComponent', () => {
+  beforeEach(() => {
+    vi.clearAllMocks(); // Clear mocks before each test
+  });
+
+  describe('rendering', () => {
+    it('should render with default props', () => {
+      // Arrange: Setup test data
+      const props = { title: 'Test' };
+
+      // Act: Render component
+      const { getByText } = render(<MyComponent {...props} />);
+
+      // Assert: Verify output
+      expect(getByText('Test')).toBeInTheDocument();
+    });
+  });
+
+  describe('user interactions', () => {
+    it('should submit form on button click', async () => {
+      // Arrange
+      const onSubmit = vi.fn();
+      const { getByRole, getByLabelText } = render(
+        <MyForm onSubmit={onSubmit} />
+      );
+
+      // Act
+      await userEvent.type(getByLabelText('Name'), 'John');
+      await userEvent.click(getByRole('button', { name: 'Submit' }));
+
+      // Assert
+      expect(onSubmit).toHaveBeenCalledWith({ name: 'John' });
+    });
+  });
+});
+```
+
+### Nested `describe` for Organization
+
+```typescript
+describe('PageService', () => {
+  describe('createPage', () => {
+    it('should create a page successfully', async () => {
+      // ...
+    });
+
+    it('should throw error if path is invalid', async () => {
+      // ...
+    });
+  });
+
+  describe('updatePage', () => {
+    it('should update page content', async () => {
+      // ...
+    });
+  });
+});
+```
+
+## Common Mocking Patterns
+
+### Mocking SWR
+
+```typescript
+vi.mock('swr', () => ({
+  default: vi.fn(() => ({
+    data: mockData,
+    error: null,
+    isLoading: false,
+    mutate: vi.fn(),
+  })),
+}));
+```
+
+### Mocking Modules
+
+```typescript
+// Mock entire module
+vi.mock('~/services/PageService', () => ({
+  PageService: {
+    findById: vi.fn().mockResolvedValue({ id: '123', title: 'Test' }),
+    create: vi.fn().mockResolvedValue({ id: '456', title: 'New' }),
+  },
+}));
+
+// Use in test
+import { PageService } from '~/services/PageService';
+
+it('should call PageService.findById', async () => {
+  await myFunction();
+  expect(PageService.findById).toHaveBeenCalledWith('123');
+});
+```
+
+### Mocking Specific Functions
+
+```typescript
+import { myFunction } from '~/utils/myUtils';
+
+vi.mock('~/utils/myUtils', () => ({
+  myFunction: vi.fn().mockReturnValue('mocked'),
+  otherFunction: vi.fn(), // Mock other exports
+}));
+```
+
+### Mocking CommonJS Modules with mock-require
+
+**IMPORTANT**: When `vi.mock()` fails with ESModule/CommonJS compatibility issues, use `mock-require` instead:
+
+```typescript
+import mockRequire from 'mock-require';
+
+describe('Service with CommonJS dependencies', () => {
+  beforeEach(() => {
+    // Mock CommonJS module before importing the code under test
+    mockRequire('legacy-module', {
+      someFunction: vi.fn().mockReturnValue('mocked'),
+      someProperty: 'mocked-value',
+    });
+  });
+
+  afterEach(() => {
+    // Clean up mocks to avoid leakage between tests
+    mockRequire.stopAll();
+  });
+
+  it('should use mocked module', async () => {
+    // Import AFTER mocking (dynamic import if needed)
+    const { MyService } = await import('~/services/MyService');
+
+    const result = MyService.doSomething();
+    expect(result).toBe('mocked');
+  });
+});
+```
+
+**When to use `mock-require`**:
+- Legacy CommonJS modules that don't work with `vi.mock()`
+- Mixed ESM/CJS environments causing module resolution issues
+- Third-party libraries with complex module systems
+- When `vi.mock()` fails with "Cannot redefine property" or "Module is not defined"
+
+**Key points**:
+- ✅ Mock **before** importing the code under test
+- ✅ Use `mockRequire.stopAll()` in `afterEach()` to prevent test leakage
+- ✅ Use dynamic imports (`await import()`) when needed
+- ✅ Works with both CommonJS and ESModule targets
+
+### Choosing the Right Mocking Strategy
+
+```typescript
+// ✅ Prefer vi.mock() for ESModules (simplest)
+vi.mock('~/modern-module', () => ({
+  myFunction: vi.fn(),
+}));
+
+// ✅ Use mock-require for CommonJS or mixed environments
+import mockRequire from 'mock-require';
+mockRequire('legacy-module', { myFunction: vi.fn() });
+
+// ✅ Use vitest-mock-extended for type-safe object mocks
+import { mockDeep } from 'vitest-mock-extended';
+const mockService = mockDeep<MyService>();
+```
+
+**Decision tree**:
+1. Can use `vi.mock()`? → Use it (simplest)
+2. CommonJS or module error? → Use `mock-require`
+3. Need type-safe object mock? → Use `vitest-mock-extended`
+
+## Integration Tests (with Database)
+
+Integration tests (*.integ.ts) can access in-memory databases:
+
+```typescript
+describe('PageService Integration', () => {
+  beforeEach(async () => {
+    // Setup: Seed test data
+    await Page.create({ path: '/test', body: 'content' });
+  });
+
+  afterEach(async () => {
+    // Cleanup: Clear database
+    await Page.deleteMany({});
+  });
+
+  it('should create a page', async () => {
+    const page = await PageService.create({
+      path: '/new-page',
+      body: 'content',
+    });
+
+    expect(page._id).toBeDefined();
+    expect(page.path).toBe('/new-page');
+  });
+});
+```
+
+## Testing Checklist
+
+Before committing tests, ensure:
+
+- ✅ **Co-location**: Test files are next to source files
+- ✅ **Descriptive names**: Test descriptions clearly state what is being tested
+- ✅ **AAA pattern**: Tests follow Arrange-Act-Assert structure
+- ✅ **Mocks cleared**: Use `beforeEach(() => vi.clearAllMocks())`
+- ✅ **Async handled**: Use `async/await` and `waitFor()` for async operations
+- ✅ **Type safety**: Use `vitest-mock-extended` for type-safe mocks
+- ✅ **Isolated state**: Jotai tests use separate scopes if needed
+
+## Running Tests
+
+See the `testing` rule (`.claude/rules/testing.md`) for test execution commands.
+
+## Summary: GROWI Testing Philosophy
+
+1. **Co-locate tests**: Keep tests close to source code
+2. **Type-safe mocks**: Use `vitest-mock-extended` for TypeScript support
+3. **React Testing Library**: Test user behavior, not implementation details
+4. **Async patterns**: Use `act()` and `waitFor()` for async state updates
+5. **Jotai integration**: Wrap components with `<Provider>` for atom state
+6. **Clear structure**: Use nested `describe` and AAA pattern
+7. **Clean mocks**: Always clear mocks between tests
+
+These patterns apply to **all GROWI packages** with React/TypeScript code.

+ 207 - 0
.claude/skills/monorepo-overview/SKILL.md

@@ -0,0 +1,207 @@
+---
+name: monorepo-overview
+description: GROWI monorepo structure, workspace organization, and architectural principles. Auto-invoked for all GROWI development work.
+user-invocable: false
+---
+
+# GROWI Monorepo Overview
+
+GROWI is a team collaboration wiki platform built as a monorepo using **pnpm workspace + Turborepo**.
+
+## Monorepo Structure
+
+```
+growi/
+├── apps/                    # Applications
+│   ├── app/                # Main GROWI application (Next.js + Express + MongoDB)
+│   ├── pdf-converter/      # PDF conversion microservice (Ts.ED + Puppeteer)
+│   └── slackbot-proxy/     # Slack integration proxy (Ts.ED + TypeORM + MySQL)
+├── packages/               # Shared libraries
+│   ├── core/              # Core utilities and shared logic
+│   ├── core-styles/       # Common styles (SCSS)
+│   ├── editor/            # Markdown editor components
+│   ├── ui/                # UI component library
+│   ├── pluginkit/         # Plugin framework
+│   ├── slack/             # Slack integration utilities
+│   ├── presentation/      # Presentation mode
+│   ├── pdf-converter-client/ # PDF converter client library
+│   └── remark-*/          # Markdown plugins (remark-lsx, remark-drawio, etc.)
+└── Configuration files
+    ├── pnpm-workspace.yaml
+    ├── turbo.json
+    ├── package.json
+    └── .changeset/
+```
+
+## Workspace Management
+
+### pnpm Workspace
+
+All packages are managed via **pnpm workspace**. Package references use the `workspace:` protocol:
+
+```json
+{
+  "dependencies": {
+    "@growi/core": "workspace:^",
+    "@growi/ui": "workspace:^"
+  }
+}
+```
+
+### Turborepo Orchestration
+
+Turborepo handles task orchestration with caching and parallelization:
+
+```bash
+# Run tasks across all workspaces
+turbo run dev
+turbo run test
+turbo run lint
+turbo run build
+
+# Filter to specific package
+turbo run test --filter @growi/app
+turbo run lint --filter @growi/core
+```
+
+## Architectural Principles
+
+### 1. Feature-Based Architecture (Recommended)
+
+**All packages should prefer feature-based organization**:
+
+```
+{package}/src/
+├── features/              # Feature modules
+│   ├── {feature-name}/
+│   │   ├── index.ts      # Main export
+│   │   ├── interfaces/   # TypeScript types
+│   │   ├── server/       # Server-side logic (if applicable)
+│   │   ├── client/       # Client-side logic (if applicable)
+│   │   └── utils/        # Shared utilities
+```
+
+**Benefits**:
+- Clear boundaries between features
+- Easy to locate related code
+- Facilitates gradual migration from legacy structure
+
+### 2. Server-Client Separation
+
+For full-stack packages (like apps/app), separate server and client logic:
+
+- **Server code**: Node.js runtime, database access, API routes
+- **Client code**: Browser runtime, React components, UI state
+
+This enables better code splitting and prevents server-only code from being bundled into client.
+
+### 3. Shared Libraries in packages/
+
+Common code should be extracted to `packages/`:
+
+- **core**: Utilities, constants, type definitions
+- **ui**: Reusable React components
+- **editor**: Markdown editor
+- **pluginkit**: Plugin system framework
+
+## Version Management with Changeset
+
+GROWI uses **Changesets** for version management and release notes:
+
+```bash
+# Add a changeset (after making changes)
+npx changeset
+
+# Version bump (generates CHANGELOGs and updates versions)
+pnpm run version-subpackages
+
+# Publish packages to npm (for @growi/core, @growi/pluginkit)
+pnpm run release-subpackages
+```
+
+### Changeset Workflow
+
+1. Make code changes
+2. Run `npx changeset` and describe the change
+3. Commit both code and `.changeset/*.md` file
+4. On release, run `pnpm run version-subpackages`
+5. Changesets automatically updates `CHANGELOG.md` and `package.json` versions
+
+### Version Schemes
+
+- **Main app** (`apps/app`): Manual versioning with RC prereleases
+  - `pnpm run version:patch`, `pnpm run version:prerelease`
+- **Shared libraries** (`packages/core`, `packages/pluginkit`): Changeset-managed
+- **Microservices** (`apps/pdf-converter`, `apps/slackbot-proxy`): Independent versioning
+
+## Package Categories
+
+### Applications (apps/)
+
+| Package | Description | Tech Stack |
+|---------|-------------|------------|
+| **@growi/app** | Main wiki application | Next.js (Pages Router), Express, MongoDB, Jotai, SWR |
+| **@growi/pdf-converter** | PDF export service | Ts.ED, Puppeteer |
+| **@growi/slackbot-proxy** | Slack bot proxy | Ts.ED, TypeORM, MySQL |
+
+### Core Libraries (packages/)
+
+| Package | Description | Published to npm |
+|---------|-------------|------------------|
+| **@growi/core** | Core utilities | ✅ |
+| **@growi/pluginkit** | Plugin framework | ✅ |
+| **@growi/ui** | UI components | ❌ (internal) |
+| **@growi/editor** | Markdown editor | ❌ (internal) |
+| **@growi/core-styles** | Common styles | ❌ (internal) |
+
+## Development Workflow
+
+### Initial Setup
+
+```bash
+# Install dependencies for all packages
+pnpm install
+
+# Bootstrap (install + build dependencies)
+turbo run bootstrap
+```
+
+### Daily Development
+
+```bash
+# Start all dev servers (apps/app + dependencies)
+turbo run dev
+
+# Run a specific test file (from package directory)
+pnpm vitest run yjs.integ
+
+# Run ALL tests / lint for a package
+turbo run test --filter @growi/app
+turbo run lint --filter @growi/core
+```
+
+### Cross-Package Development
+
+When modifying shared libraries (packages/*), ensure dependent apps reflect changes:
+
+1. Make changes to `packages/core`
+2. Turborepo automatically detects changes and rebuilds dependents
+3. Test in `apps/app` to verify
+
+## Key Configuration Files
+
+- **pnpm-workspace.yaml**: Defines workspace packages
+- **turbo.json**: Turborepo pipeline configuration
+- **.changeset/config.json**: Changeset configuration
+- **tsconfig.base.json**: Base TypeScript config for all packages
+- **vitest.workspace.mts**: Vitest workspace config
+- **biome.json**: Biome linter/formatter config
+
+## Design Principles Summary
+
+1. **Feature Isolation**: Use feature-based architecture for new code
+2. **Server-Client Separation**: Keep server and client code separate
+3. **Shared Libraries**: Extract common code to packages/
+4. **Type-Driven Development**: Define interfaces before implementation
+5. **Progressive Enhancement**: Migrate legacy code gradually
+6. **Version Control**: Use Changesets for release management

+ 269 - 0
.claude/skills/tech-stack/SKILL.md

@@ -0,0 +1,269 @@
+---
+name: tech-stack
+description: GROWI technology stack, build tools, and global commands. Auto-invoked for all GROWI development work.
+user-invocable: false
+---
+
+# GROWI Tech Stack
+
+## Core Technologies
+
+- **TypeScript** ~5.0.0
+- **Node.js** ^18 || ^20
+- **MongoDB** with **Mongoose** ^6.13.6 (apps/app)
+- **MySQL** with **TypeORM** 0.2.x (apps/slackbot-proxy)
+
+## Frontend Framework
+
+- **React** 18.x
+- **Next.js** (Pages Router) - Full-stack framework for apps/app
+
+## State Management & Data Fetching (Global Standard)
+
+- **Jotai** - Atomic state management (recommended for all packages with UI state)
+  - Use for UI state, form state, modal state, etc.
+  - Lightweight, TypeScript-first, minimal boilerplate
+
+- **SWR** ^2.3.2 - Data fetching with caching
+  - Use for API data fetching with automatic revalidation
+  - Works seamlessly with RESTful APIs
+
+### Why Jotai + SWR?
+
+- **Separation of concerns**: Jotai for UI state, SWR for server state
+- **Performance**: Fine-grained reactivity (Jotai) + intelligent caching (SWR)
+- **Type safety**: Both libraries have excellent TypeScript support
+- **Simplicity**: Minimal API surface, easy to learn
+
+## Build & Development Tools
+
+### Package Management
+- **pnpm** 10.4.1 - Package manager (faster, more efficient than npm/yarn)
+
+### Monorepo Orchestration
+- **Turborepo** ^2.1.3 - Build system with caching and parallelization
+
+### Linter & Formatter
+- **Biome** ^2.2.6 - Unified linter and formatter (recommended)
+  - Replaces ESLint + Prettier
+  - Significantly faster (10-100x)
+  - Configuration: `biome.json`
+
+```bash
+# Lint and format check
+biome check <files>
+
+# Auto-fix issues
+biome check --write <files>
+```
+
+- **Stylelint** ^16.5.0 - SCSS/CSS linter
+  - Configuration: `.stylelintrc.js`
+
+```bash
+# Lint styles
+stylelint "src/**/*.scss"
+```
+
+### Testing
+- **Vitest** ^2.1.1 - Unit and integration testing (recommended)
+  - Fast, Vite-powered
+  - Jest-compatible API
+  - Configuration: `vitest.workspace.mts`
+
+- **React Testing Library** ^16.0.1 - Component testing
+  - User-centric testing approach
+
+- **vitest-mock-extended** ^2.0.2 - Type-safe mocking
+  - TypeScript autocomplete for mocks
+
+- **Playwright** ^1.49.1 - E2E testing
+  - Cross-browser testing
+
+## Essential Commands (Global)
+
+### Development
+
+```bash
+# Start all dev servers (apps/app + dependencies)
+turbo run dev
+
+# Start dev server for specific package
+turbo run dev --filter @growi/app
+
+# Install dependencies for all packages
+pnpm install
+
+# Bootstrap (install + build dependencies)
+turbo run bootstrap
+```
+
+### Testing & Quality
+
+```bash
+# Run a specific test file (from package directory, e.g. apps/app)
+pnpm vitest run yjs.integ          # Partial file name match
+pnpm vitest run helper.spec        # Works for any test file
+pnpm vitest run yjs.integ --repeat=10  # Repeat for flaky test detection
+
+# Run ALL tests for a package (uses Turborepo caching)
+turbo run test --filter @growi/app
+
+# Run linters for specific package
+turbo run lint --filter @growi/app
+```
+
+### Building
+
+```bash
+# Build all packages
+turbo run build
+
+# Build specific package
+turbo run build --filter @growi/core
+```
+
+## Turborepo Task Filtering
+
+Turborepo uses `--filter` to target specific packages:
+
+```bash
+# Run task for single package
+turbo run test --filter @growi/app
+
+# Run task for multiple packages
+turbo run build --filter @growi/core --filter @growi/ui
+
+# Run task for package and its dependencies
+turbo run build --filter @growi/app...
+```
+
+## Important Configuration Files
+
+### Workspace Configuration
+- **pnpm-workspace.yaml** - Defines workspace packages
+  ```yaml
+  packages:
+    - 'apps/*'
+    - 'packages/*'
+  ```
+
+### Build Configuration
+- **turbo.json** - Turborepo pipeline configuration
+  - Defines task dependencies, caching, and outputs
+
+### TypeScript Configuration
+- **tsconfig.base.json** - Base TypeScript config extended by all packages
+  - **Target**: ESNext
+  - **Module**: ESNext
+  - **Strict Mode**: Enabled (`strict: true`)
+  - **Module Resolution**: Bundler
+  - **Allow JS**: true (for gradual migration)
+  - **Isolated Modules**: true (required for Vite, SWC)
+
+Package-specific tsconfig.json example:
+```json
+{
+  "extends": "../../tsconfig.base.json",
+  "compilerOptions": {
+    "outDir": "./dist",
+    "rootDir": "./src"
+  },
+  "include": ["src/**/*"],
+  "exclude": ["node_modules", "dist", "**/*.spec.ts"]
+}
+```
+
+### Testing Configuration
+- **vitest.workspace.mts** - Vitest workspace config
+  - Defines test environments (Node.js, happy-dom)
+  - Configures coverage
+
+### Linter Configuration
+- **biome.json** - Biome linter/formatter config
+  - Rules, ignore patterns, formatting options
+
+## Development Best Practices
+
+### Command Usage
+
+1. **Use Turborepo for full-package tasks** (all tests, lint, build):
+   - ✅ `turbo run test --filter @growi/app`
+   - ❌ `cd apps/app && pnpm test` (bypasses Turborepo caching)
+2. **Use vitest directly for individual test files** (from package directory):
+   - ✅ `pnpm vitest run yjs.integ` (simple, fast)
+   - ❌ `turbo run test --filter @growi/app -- yjs.integ` (unnecessary overhead)
+
+2. **Use pnpm for package management**:
+   - ✅ `pnpm install`
+   - ❌ `npm install` or `yarn install`
+
+3. **Run tasks from workspace root**:
+   - Turborepo handles cross-package dependencies
+   - Caching works best from root
+
+### State Management Guidelines
+
+1. **Use Jotai for UI state**:
+   ```typescript
+   // Example: Modal state
+   import { atom } from 'jotai';
+
+   export const isModalOpenAtom = atom(false);
+   ```
+
+2. **Use SWR for server state**:
+   ```typescript
+   // Example: Fetching pages
+   import useSWR from 'swr';
+
+   const { data, error, isLoading } = useSWR('/api/pages', fetcher);
+   ```
+
+3. **Avoid mixing concerns**:
+   - Don't store server data in Jotai atoms
+   - Don't manage UI state with SWR
+
+## Migration Notes
+
+- **New packages**: Use Biome + Vitest from the start
+- **Legacy packages**: Can continue using existing tools during migration
+- **Gradual migration**: Prefer updating to Biome + Vitest when modifying existing files
+
+## Technology Decisions
+
+### Why Next.js Pages Router (not App Router)?
+
+- GROWI started before App Router was stable
+- Pages Router is well-established and stable
+- Migration to App Router is being considered for future versions
+
+### Why Jotai (not Redux/Zustand)?
+
+- **Atomic approach**: More flexible than Redux, simpler than Recoil
+- **TypeScript-first**: Excellent type inference
+- **Performance**: Fine-grained reactivity, no unnecessary re-renders
+- **Minimal boilerplate**: Less code than Redux
+
+### Why SWR (not React Query)?
+
+- **Simplicity**: Smaller API surface
+- **Vercel integration**: Built by Vercel (same as Next.js)
+- **Performance**: Optimized for Next.js SSR/SSG
+
+### Why Biome (not ESLint + Prettier)?
+
+- **Speed**: 10-100x faster than ESLint
+- **Single tool**: Replaces both ESLint and Prettier
+- **Consistency**: No conflicts between linter and formatter
+- **Growing ecosystem**: Active development, Rust-based
+
+## Package-Specific Tech Stacks
+
+Different apps in the monorepo may use different tech stacks:
+
+- **apps/app**: Next.js + Express + MongoDB + Jotai + SWR
+- **apps/pdf-converter**: Ts.ED + Puppeteer
+- **apps/slackbot-proxy**: Ts.ED + TypeORM + MySQL
+
+See package-specific CLAUDE.md or skills for details.

+ 6 - 2
.devcontainer/app/devcontainer.json

@@ -9,7 +9,8 @@
   "features": {
   "features": {
     "ghcr.io/devcontainers/features/node:1": {
     "ghcr.io/devcontainers/features/node:1": {
       "version": "20.18.3"
       "version": "20.18.3"
-    }
+    },
+    "ghcr.io/devcontainers/features/github-cli:1": {}
   },
   },
 
 
   // Use 'forwardPorts' to make a list of ports inside the container available locally.
   // Use 'forwardPorts' to make a list of ports inside the container available locally.
@@ -26,11 +27,14 @@
         // AI
         // AI
         "anthropic.claude-code",
         "anthropic.claude-code",
         // linter
         // linter
-        "dbaeumer.vscode-eslint",
         "biomejs.biome",
         "biomejs.biome",
         "editorconfig.editorconfig",
         "editorconfig.editorconfig",
         "shinnn.stylelint",
         "shinnn.stylelint",
         "stylelint.vscode-stylelint",
         "stylelint.vscode-stylelint",
+        // markdown
+        "bierner.markdown-mermaid",
+        // TypeScript (Native Preview)
+        "typescriptteam.native-preview",
         // Test
         // Test
         "vitest.explorer",
         "vitest.explorer",
         "ms-playwright.playwright",
         "ms-playwright.playwright",

+ 3 - 0
.devcontainer/app/postCreateCommand.sh

@@ -27,3 +27,6 @@ pnpm install @anthropic-ai/claude-code --global
 
 
 # Install dependencies
 # Install dependencies
 turbo run bootstrap
 turbo run bootstrap
+
+# Install Lefthook git hooks
+pnpm lefthook install

+ 4 - 1
.devcontainer/pdf-converter/devcontainer.json

@@ -4,6 +4,10 @@
   "service": "pdf-converter",
   "service": "pdf-converter",
   "workspaceFolder": "/workspace/growi",
   "workspaceFolder": "/workspace/growi",
 
 
+  "features": {
+    "ghcr.io/devcontainers/features/github-cli:1": {}
+  },
+
   // Use 'forwardPorts' to make a list of ports inside the container available locally.
   // Use 'forwardPorts' to make a list of ports inside the container available locally.
   // "forwardPorts": [],
   // "forwardPorts": [],
 
 
@@ -15,7 +19,6 @@
   "customizations": {
   "customizations": {
     "vscode": {
     "vscode": {
       "extensions": [
       "extensions": [
-        "dbaeumer.vscode-eslint",
         "biomejs.biome",
         "biomejs.biome",
         "mhutchie.git-graph",
         "mhutchie.git-graph",
         "eamodio.gitlens"
         "eamodio.gitlens"

+ 3 - 0
.devcontainer/pdf-converter/postCreateCommand.sh

@@ -22,3 +22,6 @@ pnpm install turbo --global
 
 
 # Install dependencies
 # Install dependencies
 turbo run bootstrap
 turbo run bootstrap
+
+# Install Lefthook git hooks
+pnpm lefthook install

+ 0 - 88
.eslintrc.js

@@ -1,88 +0,0 @@
-/**
- * @type {import('eslint').Linter.Config}
- */
-module.exports = {
-  root: true, // https://eslint.org/docs/user-guide/configuring/configuration-files#cascading-and-hierarchy
-  extends: [
-    'weseek',
-    'weseek/typescript',
-  ],
-  plugins: [
-    'regex',
-  ],
-  ignorePatterns: [
-    'node_modules/**',
-  ],
-  rules: {
-    'import/prefer-default-export': 'off',
-    'import/order': [
-      'warn',
-      {
-        pathGroups: [
-          {
-            pattern: 'react',
-            group: 'builtin',
-            position: 'before',
-          },
-          {
-            pattern: '^/**',
-            group: 'parent',
-            position: 'before',
-          },
-          {
-            pattern: '~/**',
-            group: 'parent',
-            position: 'before',
-          },
-          {
-            pattern: '*.css',
-            group: 'type',
-            patternOptions: { matchBase: true },
-            position: 'after',
-          },
-          {
-            pattern: '*.scss',
-            group: 'type',
-            patternOptions: { matchBase: true },
-            position: 'after',
-          },
-        ],
-        alphabetize: {
-          order: 'asc',
-        },
-        pathGroupsExcludedImportTypes: ['react'],
-        'newlines-between': 'always',
-      },
-    ],
-    '@typescript-eslint/consistent-type-imports': 'warn',
-    '@typescript-eslint/explicit-module-boundary-types': 'off',
-    indent: [
-      'error',
-      2,
-      {
-        SwitchCase: 1,
-        ArrayExpression: 'first',
-        FunctionDeclaration: { body: 1, parameters: 2 },
-        FunctionExpression: { body: 1, parameters: 2 },
-      },
-    ],
-    'regex/invalid': ['error', [
-      {
-        regex: '\\?\\<\\!',
-        message: 'Do not use any negative lookbehind',
-      }, {
-        regex: '\\?\\<\\=',
-        message: 'Do not use any Positive lookbehind',
-      },
-    ]],
-  },
-  overrides: [
-    {
-      // enable the rule specifically for TypeScript files
-      files: ['*.ts', '*.mts', '*.tsx'],
-      rules: {
-        '@typescript-eslint/explicit-module-boundary-types': ['error'],
-      },
-    },
-  ],
-};

+ 0 - 1
.github/mergify.yml

@@ -1,6 +1,5 @@
 queue_rules:
 queue_rules:
   - name: default
   - name: default
-    allow_inplace_checks: false
     queue_conditions:
     queue_conditions:
       - check-success ~= ci-app-lint
       - check-success ~= ci-app-lint
       - check-success ~= ci-app-test
       - check-success ~= ci-app-test

+ 2 - 2
.github/workflows/ci-app.yml

@@ -11,7 +11,7 @@ on:
     paths:
     paths:
       - .github/mergify.yml
       - .github/mergify.yml
       - .github/workflows/ci-app.yml
       - .github/workflows/ci-app.yml
-      - .eslint*
+      - biome.json
       - tsconfig.base.json
       - tsconfig.base.json
       - turbo.json
       - turbo.json
       - pnpm-lock.yaml
       - pnpm-lock.yaml
@@ -24,7 +24,7 @@ on:
     paths:
     paths:
       - .github/mergify.yml
       - .github/mergify.yml
       - .github/workflows/ci-app.yml
       - .github/workflows/ci-app.yml
-      - .eslint*
+      - biome.json
       - tsconfig.base.json
       - tsconfig.base.json
       - turbo.json
       - turbo.json
       - pnpm-lock.yaml
       - pnpm-lock.yaml

+ 1 - 1
.github/workflows/ci-pdf-converter.yml

@@ -9,7 +9,7 @@ on:
     paths:
     paths:
       - .github/mergify.yml
       - .github/mergify.yml
       - .github/workflows/ci-pdf-converter.yml
       - .github/workflows/ci-pdf-converter.yml
-      - .eslint*
+      - biome.json
       - tsconfig.base.json
       - tsconfig.base.json
       - turbo.json
       - turbo.json
       - pnpm-lock.yaml
       - pnpm-lock.yaml

+ 1 - 1
.github/workflows/ci-slackbot-proxy.yml

@@ -9,7 +9,7 @@ on:
     paths:
     paths:
       - .github/mergify.yml
       - .github/mergify.yml
       - .github/workflows/ci-slackbot-proxy.yml
       - .github/workflows/ci-slackbot-proxy.yml
-      - .eslint*
+      - biome.json
       - tsconfig.base.json
       - tsconfig.base.json
       - turbo.json
       - turbo.json
       - pnpm-lock.yaml
       - pnpm-lock.yaml

+ 5 - 30
.github/workflows/release-rc.yml

@@ -17,8 +17,7 @@ jobs:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
 
 
     outputs:
     outputs:
-      TAGS_WESEEK: ${{ steps.meta-weseek.outputs.tags }}
-      TAGS_GROWILABS: ${{ steps.meta-growilabs.outputs.tags }}
+      TAGS: ${{ steps.meta.outputs.tags }}
 
 
     steps:
     steps:
     - uses: actions/checkout@v4
     - uses: actions/checkout@v4
@@ -27,19 +26,9 @@ jobs:
       uses: myrotvorets/info-from-package-json-action@v2.0.2
       uses: myrotvorets/info-from-package-json-action@v2.0.2
       id: package-json
       id: package-json
 
 
-    - name: Docker meta for weseek/growi
+    - name: Docker meta for docker.io
       uses: docker/metadata-action@v5
       uses: docker/metadata-action@v5
-      id: meta-weseek
-      with:
-        images: docker.io/weseek/growi
-        sep-tags: ','
-        tags: |
-          type=raw,value=${{ steps.package-json.outputs.packageVersion }}
-          type=raw,value=${{ steps.package-json.outputs.packageVersion }}.{{sha}}
-
-    - name: Docker meta for growilabs/growi
-      uses: docker/metadata-action@v5
-      id: meta-growilabs
+      id: meta
       with:
       with:
         images: docker.io/growilabs/growi
         images: docker.io/growilabs/growi
         sep-tags: ','
         sep-tags: ','
@@ -55,29 +44,15 @@ jobs:
     secrets:
     secrets:
       AWS_ROLE_TO_ASSUME_FOR_OIDC: ${{ secrets.AWS_ROLE_TO_ASSUME_FOR_OIDC }}
       AWS_ROLE_TO_ASSUME_FOR_OIDC: ${{ secrets.AWS_ROLE_TO_ASSUME_FOR_OIDC }}
 
 
-
-  publish-rc-image-for-growilabs:
+  publish-image-rc:
     needs: [determine-tags, build-image-rc]
     needs: [determine-tags, build-image-rc]
 
 
     uses: growilabs/growi/.github/workflows/reusable-app-create-manifests.yml@master
     uses: growilabs/growi/.github/workflows/reusable-app-create-manifests.yml@master
     with:
     with:
-      tags: ${{ needs.determine-tags.outputs.TAGS_GROWILABS }}
+      tags: ${{ needs.determine-tags.outputs.TAGS }}
       registry: docker.io
       registry: docker.io
       image-name: 'growilabs/growi'
       image-name: 'growilabs/growi'
       docker-registry-username: 'growimoogle'
       docker-registry-username: 'growimoogle'
       tag-temporary: latest-rc
       tag-temporary: latest-rc
     secrets:
     secrets:
       DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_REGISTRY_PASSWORD_GROWIMOOGLE }}
       DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_REGISTRY_PASSWORD_GROWIMOOGLE }}
-
-  publish-rc-image-for-weseek:
-    needs: [determine-tags, build-image-rc]
-
-    uses: growilabs/growi/.github/workflows/reusable-app-create-manifests.yml@master
-    with:
-      tags: ${{ needs.determine-tags.outputs.TAGS_WESEEK }}
-      registry: docker.io
-      image-name: 'growilabs/growi'
-      docker-registry-username: 'wsmoogle'
-      tag-temporary: latest-rc
-    secrets:
-      DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_REGISTRY_PASSWORD }}

+ 11 - 46
.github/workflows/release.yml

@@ -1,4 +1,3 @@
-# TODO: https://redmine.weseek.co.jp/issues/171293
 name: Release
 name: Release
 
 
 on:
 on:
@@ -81,8 +80,7 @@ jobs:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
 
 
     outputs:
     outputs:
-      TAGS_WESEEK: ${{ steps.meta-weseek.outputs.tags }}
-      TAGS_GROWILABS: ${{ steps.meta-growilabs.outputs.tags }}
+      TAGS: ${{ steps.meta.outputs.tags }}
 
 
     steps:
     steps:
     - uses: actions/checkout@v4
     - uses: actions/checkout@v4
@@ -91,21 +89,9 @@ jobs:
       uses: myrotvorets/info-from-package-json-action@v2.0.2
       uses: myrotvorets/info-from-package-json-action@v2.0.2
       id: package-json
       id: package-json
 
 
-    - name: Docker meta for weseek/growi
+    - name: Docker meta for docker.io
       uses: docker/metadata-action@v5
       uses: docker/metadata-action@v5
-      id: meta-weseek
-      with:
-        images: docker.io/weseek/growi
-        sep-tags: ','
-        tags: |
-          type=raw,value=latest
-          type=semver,value=${{ needs.create-github-release.outputs.RELEASED_VERSION }},pattern={{major}}
-          type=semver,value=${{ needs.create-github-release.outputs.RELEASED_VERSION }},pattern={{major}}.{{minor}}
-          type=semver,value=${{ needs.create-github-release.outputs.RELEASED_VERSION }},pattern={{major}}.{{minor}}.{{patch}}
-
-    - name: Docker meta for growilabs/growi
-      uses: docker/metadata-action@v5
-      id: meta-growilabs
+      id: meta
       with:
       with:
         images: docker.io/growilabs/growi
         images: docker.io/growilabs/growi
         sep-tags: ','
         sep-tags: ','
@@ -126,12 +112,12 @@ jobs:
     secrets:
     secrets:
       AWS_ROLE_TO_ASSUME_FOR_OIDC: ${{ secrets.AWS_ROLE_TO_ASSUME_FOR_OIDC }}
       AWS_ROLE_TO_ASSUME_FOR_OIDC: ${{ secrets.AWS_ROLE_TO_ASSUME_FOR_OIDC }}
 
 
-  publish-app-image-for-growilabs:
+  publish-app-image:
     needs: [determine-tags, build-app-image]
     needs: [determine-tags, build-app-image]
 
 
     uses: growilabs/growi/.github/workflows/reusable-app-create-manifests.yml@master
     uses: growilabs/growi/.github/workflows/reusable-app-create-manifests.yml@master
     with:
     with:
-      tags: ${{ needs.determine-tags.outputs.TAGS_GROWILABS }}
+      tags: ${{ needs.determine-tags.outputs.TAGS }}
       registry: docker.io
       registry: docker.io
       image-name: 'growilabs/growi'
       image-name: 'growilabs/growi'
       docker-registry-username: 'growimoogle'
       docker-registry-username: 'growimoogle'
@@ -139,42 +125,21 @@ jobs:
     secrets:
     secrets:
       DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_REGISTRY_PASSWORD_GROWIMOOGLE }}
       DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_REGISTRY_PASSWORD_GROWIMOOGLE }}
 
 
-  publish-app-image-for-weseek:
-    needs: [determine-tags, build-app-image]
-
-    uses: growilabs/growi/.github/workflows/reusable-app-create-manifests.yml@master
-    with:
-      tags: ${{ needs.determine-tags.outputs.TAGS_WESEEK }}
-      registry: docker.io
-      image-name: 'growilabs/growi'
-      docker-registry-username: 'wsmoogle'
-      tag-temporary: latest
-    secrets:
-      DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_REGISTRY_PASSWORD }}
-
   post-publish:
   post-publish:
-    needs: [create-github-release, publish-app-image-for-growilabs, publish-app-image-for-weseek]
+    needs: [create-github-release, publish-app-image]
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
 
 
-    strategy:
-      matrix:
-        include:
-          - repository: weseek/growi
-            username: wsmoogle
-          - repository: growilabs/growi
-            username: growimoogle
-
     steps:
     steps:
     - uses: actions/checkout@v4
     - uses: actions/checkout@v4
       with:
       with:
         ref: v${{ needs.create-github-release.outputs.RELEASED_VERSION }}
         ref: v${{ needs.create-github-release.outputs.RELEASED_VERSION }}
 
 
     - name: Update Docker Hub Description
     - name: Update Docker Hub Description
-      uses: peter-evans/dockerhub-description@v4
+      uses: peter-evans/dockerhub-description@v3
       with:
       with:
-        username: ${{ matrix.username }}
-        password: ${{ (matrix.repository == 'weseek/growi' && secrets.DOCKER_REGISTRY_PASSWORD) || (matrix.repository == 'growilabs/growi' && secrets.DOCKER_REGISTRY_PASSWORD_GROWIMOOGLE) || 'INVALID_SECRET' }}
-        repository: ${{ matrix.repository }}
+        username: growimoogle
+        password: ${{ secrets.DOCKER_REGISTRY_PASSWORD_GROWIMOOGLE }}
+        repository: growilabs/growi
         readme-filepath: ./apps/app/docker/README.md
         readme-filepath: ./apps/app/docker/README.md
 
 
     - name: Slack Notification
     - name: Slack Notification
@@ -186,7 +151,7 @@ jobs:
 
 
 
 
   create-pr-for-next-rc:
   create-pr-for-next-rc:
-    needs: [create-github-release, publish-app-image-for-growilabs, publish-app-image-for-weseek]
+    needs: [create-github-release, publish-app-image]
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
 
 
     steps:
     steps:

+ 1 - 1
.github/workflows/reusable-app-build-image.yml

@@ -40,7 +40,7 @@ jobs:
       with:
       with:
         aws-region: ap-northeast-1
         aws-region: ap-northeast-1
         role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME_FOR_OIDC }}
         role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME_FOR_OIDC }}
-        role-session-name: SessionForReleaseGROWI-RC
+        role-session-name: GitHubActions-SessionForReleaseGROWI-${{ github.run_id }}
 
 
     - name: Run CodeBuild
     - name: Run CodeBuild
       uses: dark-mechanicum/aws-codebuild@v1
       uses: dark-mechanicum/aws-codebuild@v1

+ 3 - 0
.gitignore

@@ -33,11 +33,14 @@ yarn-error.log*
 # Terraform
 # Terraform
 **/.terraform/*
 **/.terraform/*
 *.tfstate.*
 *.tfstate.*
+/aws/
 
 
 # IDE, dev #
 # IDE, dev #
 .idea
 .idea
+**/.claude/settings.local.json
 *.orig
 *.orig
 *.code-workspace
 *.code-workspace
+*.timestamp-*.mjs
 
 
 # turborepo
 # turborepo
 .turbo
 .turbo

+ 93 - 0
.kiro/settings/rules/design-discovery-full.md

@@ -0,0 +1,93 @@
+# Full Discovery Process for Technical Design
+
+## Objective
+Conduct comprehensive research and analysis to ensure the technical design is based on complete, accurate, and up-to-date information.
+
+## Discovery Steps
+
+### 1. Requirements Analysis
+**Map Requirements to Technical Needs**
+- Extract all functional requirements from EARS format
+- Identify non-functional requirements (performance, security, scalability)
+- Determine technical constraints and dependencies
+- List core technical challenges
+
+### 2. Existing Implementation Analysis
+**Understand Current System** (if modifying/extending):
+- Analyze codebase structure and architecture patterns
+- Map reusable components, services, utilities
+- Identify domain boundaries and data flows
+- Document integration points and dependencies
+- Determine approach: extend vs refactor vs wrap
+
+### 3. Technology Research
+**Investigate Best Practices and Solutions**:
+- **Use WebSearch** to find:
+  - Latest architectural patterns for similar problems
+  - Industry best practices for the technology stack
+  - Recent updates or changes in relevant technologies
+  - Common pitfalls and solutions
+
+- **Use WebFetch** to analyze:
+  - Official documentation for frameworks/libraries
+  - API references and usage examples
+  - Migration guides and breaking changes
+  - Performance benchmarks and comparisons
+
+### 4. External Dependencies Investigation
+**For Each External Service/Library**:
+- Search for official documentation and GitHub repositories
+- Verify API signatures and authentication methods
+- Check version compatibility with existing stack
+- Investigate rate limits and usage constraints
+- Find community resources and known issues
+- Document security considerations
+- Note any gaps requiring implementation investigation
+
+### 5. Architecture Pattern & Boundary Analysis
+**Evaluate Architectural Options**:
+- Compare relevant patterns (MVC, Clean, Hexagonal, Event-driven)
+- Assess fit with existing architecture and steering principles
+- Identify domain boundaries and ownership seams required to avoid team conflicts
+- Consider scalability implications and operational concerns
+- Evaluate maintainability and team expertise
+- Document preferred pattern and rejected alternatives in `research.md`
+
+### 6. Risk Assessment
+**Identify Technical Risks**:
+- Performance bottlenecks and scaling limits
+- Security vulnerabilities and attack vectors
+- Integration complexity and coupling
+- Technical debt creation vs resolution
+- Knowledge gaps and training needs
+
+## Research Guidelines
+
+### When to Search
+**Always search for**:
+- External API documentation and updates
+- Security best practices for authentication/authorization
+- Performance optimization techniques for identified bottlenecks
+- Latest versions and migration paths for dependencies
+
+**Search if uncertain about**:
+- Architectural patterns for specific use cases
+- Industry standards for data formats/protocols
+- Compliance requirements (GDPR, HIPAA, etc.)
+- Scalability approaches for expected load
+
+### Search Strategy
+1. Start with official sources (documentation, GitHub)
+2. Check recent blog posts and articles (last 6 months)
+3. Review Stack Overflow for common issues
+4. Investigate similar open-source implementations
+
+## Output Requirements
+Capture all findings that impact design decisions in `research.md` using the shared template:
+- Key insights affecting architecture, technology alignment, and contracts
+- Constraints discovered during research
+- Recommended approaches and selected architecture pattern with rationale
+- Rejected alternatives and trade-offs (documented in the Design Decisions section)
+- Updated domain boundaries that inform Components & Interface Contracts
+- Risks and mitigation strategies
+- Gaps requiring further investigation during implementation

+ 49 - 0
.kiro/settings/rules/design-discovery-light.md

@@ -0,0 +1,49 @@
+# Light Discovery Process for Extensions
+
+## Objective
+Quickly analyze existing system and integration requirements for feature extensions.
+
+## Focused Discovery Steps
+
+### 1. Extension Point Analysis
+**Identify Integration Approach**:
+- Locate existing extension points or interfaces
+- Determine modification scope (files, components)
+- Check for existing patterns to follow
+- Identify backward compatibility requirements
+
+### 2. Dependency Check
+**Verify Compatibility**:
+- Check version compatibility of new dependencies
+- Validate API contracts haven't changed
+- Ensure no breaking changes in pipeline
+
+### 3. Quick Technology Verification
+**For New Libraries Only**:
+- Use WebSearch for official documentation
+- Verify basic usage patterns
+- Check for known compatibility issues
+- Confirm licensing compatibility
+- Record key findings in `research.md` (technology alignment section)
+
+### 4. Integration Risk Assessment
+**Quick Risk Check**:
+- Impact on existing functionality
+- Performance implications
+- Security considerations
+- Testing requirements
+
+## When to Escalate to Full Discovery
+Switch to full discovery if you find:
+- Significant architectural changes needed
+- Complex external service integrations
+- Security-sensitive implementations
+- Performance-critical components
+- Unknown or poorly documented dependencies
+
+## Output Requirements
+- Clear integration approach (note boundary impacts in `research.md`)
+- List of files/components to modify
+- New dependencies with versions
+- Integration risks and mitigations
+- Testing focus areas

+ 182 - 0
.kiro/settings/rules/design-principles.md

@@ -0,0 +1,182 @@
+# Technical Design Rules and Principles
+
+## Core Design Principles
+
+### 1. Type Safety is Mandatory
+- **NEVER** use `any` type in TypeScript interfaces
+- Define explicit types for all parameters and returns
+- Use discriminated unions for error handling
+- Specify generic constraints clearly
+
+### 2. Design vs Implementation
+- **Focus on WHAT, not HOW**
+- Define interfaces and contracts, not code
+- Specify behavior through pre/post conditions
+- Document architectural decisions, not algorithms
+
+### 3. Visual Communication
+- **Simple features**: Basic component diagram or none
+- **Medium complexity**: Architecture + data flow
+- **High complexity**: Multiple diagrams (architecture, sequence, state)
+- **Always pure Mermaid**: No styling, just structure
+
+### 4. Component Design Rules
+- **Single Responsibility**: One clear purpose per component
+- **Clear Boundaries**: Explicit domain ownership
+- **Dependency Direction**: Follow architectural layers
+- **Interface Segregation**: Minimal, focused interfaces
+- **Team-safe Interfaces**: Design boundaries that allow parallel implementation without merge conflicts
+- **Research Traceability**: Record boundary decisions and rationale in `research.md`
+
+### 5. Data Modeling Standards
+- **Domain First**: Start with business concepts
+- **Consistency Boundaries**: Clear aggregate roots
+- **Normalization**: Balance between performance and integrity
+- **Evolution**: Plan for schema changes
+
+### 6. Error Handling Philosophy
+- **Fail Fast**: Validate early and clearly
+- **Graceful Degradation**: Partial functionality over complete failure
+- **User Context**: Actionable error messages
+- **Observability**: Comprehensive logging and monitoring
+
+### 7. Integration Patterns
+- **Loose Coupling**: Minimize dependencies
+- **Contract First**: Define interfaces before implementation
+- **Versioning**: Plan for API evolution
+- **Idempotency**: Design for retry safety
+- **Contract Visibility**: Surface API and event contracts in design.md while linking extended details from `research.md`
+
+## Documentation Standards
+
+### Language and Tone
+- **Declarative**: "The system authenticates users" not "The system should authenticate"
+- **Precise**: Specific technical terms over vague descriptions
+- **Concise**: Essential information only
+- **Formal**: Professional technical writing
+
+### Structure Requirements
+- **Hierarchical**: Clear section organization
+- **Traceable**: Requirements to components mapping
+- **Complete**: All aspects covered for implementation
+- **Consistent**: Uniform terminology throughout
+- **Focused**: Keep design.md centered on architecture and contracts; move investigation logs and lengthy comparisons to `research.md`
+
+## Section Authoring Guidance
+
+### Global Ordering
+- Default flow: Overview → Goals/Non-Goals → Requirements Traceability → Architecture → Technology Stack → System Flows → Components & Interfaces → Data Models → Optional sections.
+- Teams may swap Traceability earlier or place Data Models nearer Architecture when it improves clarity, but keep section headings intact.
+- Within each section, follow **Summary → Scope → Decisions → Impacts/Risks** so reviewers can scan consistently.
+
+### Requirement IDs
+- Reference requirements as `2.1, 2.3` without prefixes (no “Requirement 2.1”).
+- All requirements MUST have numeric IDs. If a requirement lacks a numeric ID, stop and fix `requirements.md` before continuing.
+- Use `N.M`-style numeric IDs where `N` is the top-level requirement number from requirements.md (for example, Requirement 1 → 1.1, 1.2; Requirement 2 → 2.1, 2.2).
+- Every component, task, and traceability row must reference the same canonical numeric ID.
+
+### Technology Stack
+- Include ONLY layers impacted by this feature (frontend, backend, data, messaging, infra).
+- For each layer specify tool/library + version + the role it plays; push extended rationale, comparisons, or benchmarks to `research.md`.
+- When extending an existing system, highlight deviations from the current stack and list new dependencies.
+
+### System Flows
+- Add diagrams only when they clarify behavior:  
+  - **Sequence** for multi-step interactions  
+  - **Process/State** for branching rules or lifecycle  
+  - **Data/Event** for pipelines or async patterns
+- Always use pure Mermaid. If no complex flow exists, omit the entire section.
+
+### Requirements Traceability
+- Use the standard table (`Requirement | Summary | Components | Interfaces | Flows`) to prove coverage.
+- Collapse to bullet form only when a single requirement maps 1:1 to a component.
+- Prefer the component summary table for simple mappings; reserve the full traceability table for complex or compliance-sensitive requirements.
+- Re-run this mapping whenever requirements or components change to avoid drift.
+
+### Components & Interfaces Authoring
+- Group components by domain/layer and provide one block per component.
+- Begin with a summary table listing Component, Domain, Intent, Requirement coverage, key dependencies, and selected contracts.
+- Table fields: Intent (one line), Requirements (`2.1, 2.3`), Owner/Reviewers (optional).
+- Dependencies table must mark each entry as Inbound/Outbound/External and assign Criticality (`P0` blocking, `P1` high-risk, `P2` informational).
+- Summaries of external dependency research stay here; detailed investigation (API signatures, rate limits, migration notes) belongs in `research.md`.
+- design.md must remain a self-contained reviewer artifact. Reference `research.md` only for background, and restate any conclusions or decisions here.
+- Contracts: tick only the relevant types (Service/API/Event/Batch/State). Unchecked types should not appear later in the component section.
+- Service interfaces must declare method signatures, inputs/outputs, and error envelopes. API/Event/Batch contracts require schema tables or bullet lists covering trigger, payload, delivery, idempotency.
+- Use **Integration & Migration Notes**, **Validation Hooks**, and **Open Questions / Risks** to document rollout strategy, observability, and unresolved decisions.
+- Detail density rules:
+  - **Full block**: components introducing new boundaries (logic hooks, shared services, external integrations, data layers).
+  - **Summary-only**: presentational/UI components with no new boundaries (plus a short Implementation Note if needed).
+- Implementation Notes must combine Integration / Validation / Risks into a single bulleted subsection to reduce repetition.
+- Prefer lists or inline descriptors for short data (dependencies, contract selections). Use tables only when comparing multiple items.
+
+### Shared Interfaces & Props
+- Define a base interface (e.g., `BaseUIPanelProps`) for recurring UI components and extend it per component to capture only the deltas.
+- Hooks, utilities, and integration adapters that introduce new contracts should still include full TypeScript signatures.
+- When reusing a base contract, reference it explicitly (e.g., “Extends `BaseUIPanelProps` with `onSubmitAnswer` callback”) instead of duplicating the code block.
+
+### Data Models
+- Domain Model covers aggregates, entities, value objects, domain events, and invariants. Add Mermaid diagrams only when relationships are non-trivial.
+- Logical Data Model should articulate structure, indexing, sharding, and storage-specific considerations (event store, KV/wide-column) relevant to the change.
+- Data Contracts & Integration section documents API payloads, event schemas, and cross-service synchronization patterns when the feature crosses boundaries.
+- Lengthy type definitions or vendor-specific option objects should be placed in the Supporting References section within design.md, linked from the relevant section. Investigation notes stay in `research.md`.
+- Supporting References usage is optional; only create it when keeping the content in the main body would reduce readability. All decisions must still appear in the main sections so design.md stands alone.
+
+### Error/Testing/Security/Performance Sections
+- Record only feature-specific decisions or deviations. Link or reference organization-wide standards (steering) for baseline practices instead of restating them.
+
+### Diagram & Text Deduplication
+- Do not restate diagram content verbatim in prose. Use the text to highlight key decisions, trade-offs, or impacts that are not obvious from the visual.
+- When a decision is fully captured in the diagram annotations, a short “Key Decisions” bullet is sufficient.
+
+### General Deduplication
+- Avoid repeating the same information across Overview, Architecture, and Components. Reference earlier sections when context is identical.
+- If a requirement/component relationship is captured in the summary table, do not rewrite it elsewhere unless extra nuance is added.
+
+## Diagram Guidelines
+
+### When to include a diagram
+- **Architecture**: Use a structural diagram when 3+ components or external systems interact.
+- **Sequence**: Draw a sequence diagram when calls/handshakes span multiple steps.
+- **State / Flow**: Capture complex state machines or business flows in a dedicated diagram.
+- **ER**: Provide an entity-relationship diagram for non-trivial data models.
+- **Skip**: Minor one-component changes generally do not need diagrams.
+
+### Mermaid requirements
+```mermaid
+graph TB
+    Client --> ApiGateway
+    ApiGateway --> ServiceA
+    ApiGateway --> ServiceB
+    ServiceA --> Database
+```
+
+- **Plain Mermaid only** – avoid custom styling or unsupported syntax.
+- **Node IDs** – alphanumeric plus underscores only (e.g., `Client`, `ServiceA`). Do not use `@`, `/`, or leading `-`.
+- **Labels** – simple words. Do not embed parentheses `()`, square brackets `[]`, quotes `"`, or slashes `/`.
+  - ❌ `DnD[@dnd-kit/core]` → invalid ID (`@`).
+  - ❌ `UI[KanbanBoard(React)]` → invalid label (`()`).
+  - ✅ `DndKit[dnd-kit core]` → use plain text in labels, keep technology details in the accompanying description.
+  - ℹ️ Mermaid strict-mode will otherwise fail with errors like `Expecting 'SQE' ... got 'PS'`; remove punctuation from labels before rendering.
+- **Edges** – show data or control flow direction.
+- **Groups** – using Mermaid subgraphs to cluster related components is allowed; use it sparingly for clarity.
+
+## Quality Metrics
+### Design Completeness Checklist
+- All requirements addressed
+- No implementation details leaked
+- Clear component boundaries
+- Explicit error handling
+- Comprehensive test strategy
+- Security considered
+- Performance targets defined
+- Migration path clear (if applicable)
+
+### Common Anti-patterns to Avoid
+❌ Mixing design with implementation
+❌ Vague interface definitions
+❌ Missing error scenarios
+❌ Ignored non-functional requirements
+❌ Overcomplicated architectures
+❌ Tight coupling between components
+❌ Missing data consistency strategy
+❌ Incomplete dependency analysis

+ 110 - 0
.kiro/settings/rules/design-review.md

@@ -0,0 +1,110 @@
+# Design Review Process
+
+## Objective
+Conduct interactive quality review of technical design documents to ensure they are solid enough to proceed to implementation with acceptable risk.
+
+## Review Philosophy
+- **Quality assurance, not perfection seeking**
+- **Critical focus**: Limit to 3 most important concerns
+- **Interactive dialogue**: Engage with designer, not one-way evaluation
+- **Balanced assessment**: Recognize strengths and weaknesses
+- **Clear decision**: Definitive GO/NO-GO with rationale
+
+## Scope & Non-Goals
+
+- Scope: Evaluate the quality of the design document against project context and standards to decide GO/NO-GO.
+- Non-Goals: Do not perform implementation-level design, deep technology research, or finalize technology choices. Defer such items to the design phase iteration.
+
+## Core Review Criteria
+
+### 1. Existing Architecture Alignment (Critical)
+- Integration with existing system boundaries and layers
+- Consistency with established architectural patterns
+- Proper dependency direction and coupling management
+- Alignment with current module organization
+
+### 2. Design Consistency & Standards
+- Adherence to project naming conventions and code standards
+- Consistent error handling and logging strategies
+- Uniform configuration and dependency management
+- Alignment with established data modeling patterns
+
+### 3. Extensibility & Maintainability
+- Design flexibility for future requirements
+- Clear separation of concerns and single responsibility
+- Testability and debugging considerations
+- Appropriate complexity for requirements
+
+### 4. Type Safety & Interface Design
+- Proper type definitions and interface contracts
+- Avoidance of unsafe patterns (e.g., `any` in TypeScript)
+- Clear API boundaries and data structures
+- Input validation and error handling coverage
+
+## Review Process
+
+### Step 1: Analyze
+Analyze design against all review criteria, focusing on critical issues impacting integration, maintainability, complexity, and requirements fulfillment.
+
+### Step 2: Identify Critical Issues (≤3)
+For each issue:
+```
+🔴 **Critical Issue [1-3]**: [Brief title]
+**Concern**: [Specific problem]
+**Impact**: [Why it matters]
+**Suggestion**: [Concrete improvement]
+**Traceability**: [Requirement ID/section from requirements.md]
+**Evidence**: [Design doc section/heading]
+```
+
+### Step 3: Recognize Strengths
+Acknowledge 1-2 strong aspects to maintain balanced feedback.
+
+### Step 4: Decide GO/NO-GO
+- **GO**: No critical architectural misalignment, requirements addressed, clear implementation path, acceptable risks
+- **NO-GO**: Fundamental conflicts, critical gaps, high failure risk, disproportionate complexity
+
+## Traceability & Evidence
+
+- Link each critical issue to the relevant requirement(s) from `requirements.md` (ID or section).
+- Cite evidence locations in the design document (section/heading, diagram, or artifact) to support the assessment.
+- When applicable, reference constraints from steering context to justify the issue.
+
+## Output Format
+
+### Design Review Summary
+2-3 sentences on overall quality and readiness.
+
+### Critical Issues (≤3)
+For each: Issue, Impact, Recommendation, Traceability (e.g., 1.1, 1.2), Evidence (design.md section).
+
+### Design Strengths
+1-2 positive aspects.
+
+### Final Assessment
+Decision (GO/NO-GO), Rationale (1-2 sentences), Next Steps.
+
+### Interactive Discussion
+Engage on designer's perspective, alternatives, clarifications, and necessary changes.
+
+## Length & Focus
+
+- Summary: 2–3 sentences
+- Each critical issue: 5–7 lines total (including Issue/Impact/Recommendation/Traceability/Evidence)
+- Overall review: keep concise (~400 words guideline)
+
+## Review Guidelines
+
+1. **Critical Focus**: Only flag issues that significantly impact success
+2. **Constructive Tone**: Provide solutions, not just criticism
+3. **Interactive Approach**: Engage in dialogue rather than one-way evaluation
+4. **Balanced Assessment**: Recognize both strengths and weaknesses
+5. **Clear Decision**: Make definitive GO/NO-GO recommendation
+6. **Actionable Feedback**: Ensure all suggestions are implementable
+
+## Final Checklist
+
+- **Critical Issues ≤ 3** and each includes Impact and Recommendation
+- **Traceability**: Each issue references requirement ID/section
+- **Evidence**: Each issue cites design doc location
+- **Decision**: GO/NO-GO with clear rationale and next steps

+ 49 - 0
.kiro/settings/rules/ears-format.md

@@ -0,0 +1,49 @@
+# EARS Format Guidelines
+
+## Overview
+EARS (Easy Approach to Requirements Syntax) is the standard format for acceptance criteria in spec-driven development.
+
+EARS patterns describe the logical structure of a requirement (condition + subject + response) and are not tied to any particular natural language.  
+All acceptance criteria should be written in the target language configured for the specification (for example, `spec.json.language` / `en`).  
+Keep EARS trigger keywords and fixed phrases in English (`When`, `If`, `While`, `Where`, `The system shall`, `The [system] shall`) and localize only the variable parts (`[event]`, `[precondition]`, `[trigger]`, `[feature is included]`, `[response/action]`) into the target language. Do not interleave target-language text inside the trigger or fixed English phrases themselves.
+
+## Primary EARS Patterns
+
+### 1. Event-Driven Requirements
+- **Pattern**: When [event], the [system] shall [response/action]
+- **Use Case**: Responses to specific events or triggers
+- **Example**: When user clicks checkout button, the Checkout Service shall validate cart contents
+
+### 2. State-Driven Requirements
+- **Pattern**: While [precondition], the [system] shall [response/action]
+- **Use Case**: Behavior dependent on system state or preconditions
+- **Example**: While payment is processing, the Checkout Service shall display loading indicator
+
+### 3. Unwanted Behavior Requirements
+- **Pattern**: If [trigger], the [system] shall [response/action]
+- **Use Case**: System response to errors, failures, or undesired situations
+- **Example**: If invalid credit card number is entered, then the website shall display error message
+
+### 4. Optional Feature Requirements
+- **Pattern**: Where [feature is included], the [system] shall [response/action]
+- **Use Case**: Requirements for optional or conditional features
+- **Example**: Where the car has a sunroof, the car shall have a sunroof control panel
+
+### 5. Ubiquitous Requirements
+- **Pattern**: The [system] shall [response/action]
+- **Use Case**: Always-active requirements and fundamental system properties
+- **Example**: The mobile phone shall have a mass of less than 100 grams
+
+## Combined Patterns
+- While [precondition], when [event], the [system] shall [response/action]
+- When [event] and [additional condition], the [system] shall [response/action]
+
+## Subject Selection Guidelines
+- **Software Projects**: Use concrete system/service name (e.g., "Checkout Service", "User Auth Module")
+- **Process/Workflow**: Use responsible team/role (e.g., "Support Team", "Review Process")
+- **Non-Software**: Use appropriate subject (e.g., "Marketing Campaign", "Documentation")
+
+## Quality Criteria
+- Requirements must be testable, verifiable, and describe a single behavior.
+- Use objective language: "shall" for mandatory behavior, "should" for recommendations; avoid ambiguous terms.
+- Follow EARS syntax: [condition], the [system] shall [response/action].

+ 144 - 0
.kiro/settings/rules/gap-analysis.md

@@ -0,0 +1,144 @@
+# Gap Analysis Process
+
+## Objective
+Analyze the gap between requirements and existing codebase to inform implementation strategy decisions.
+
+## Analysis Framework
+
+### 1. Current State Investigation
+
+- Scan for domain-related assets:
+  - Key files/modules and directory layout
+  - Reusable components/services/utilities
+  - Dominant architecture patterns and constraints
+
+- Extract conventions:
+  - Naming, layering, dependency direction
+  - Import/export patterns and dependency hotspots
+  - Testing placement and approach
+
+- Note integration surfaces:
+  - Data models/schemas, API clients, auth mechanisms
+
+### 2. Requirements Feasibility Analysis
+
+- From EARS requirements, list technical needs:
+  - Data models, APIs/services, UI/components
+  - Business rules/validation
+  - Non-functionals: security, performance, scalability, reliability
+
+- Identify gaps and constraints:
+  - Missing capabilities in current codebase
+  - Unknowns to be researched later (mark as "Research Needed")
+  - Constraints from existing architecture and patterns
+
+- Note complexity signals:
+  - Simple CRUD / algorithmic logic / workflows / external integrations
+
+### 3. Implementation Approach Options
+
+#### Option A: Extend Existing Components
+**When to consider**: Feature fits naturally into existing structure
+
+- **Which files/modules to extend**:
+  - Identify specific files requiring changes
+  - Assess impact on existing functionality
+  - Evaluate backward compatibility concerns
+
+- **Compatibility assessment**:
+  - Check if extension respects existing interfaces
+  - Verify no breaking changes to consumers
+  - Assess test coverage impact
+
+- **Complexity and maintainability**:
+  - Evaluate cognitive load of additional functionality
+  - Check if single responsibility principle is maintained
+  - Assess if file size remains manageable
+
+**Trade-offs**:
+- ✅ Minimal new files, faster initial development
+- ✅ Leverages existing patterns and infrastructure
+- ❌ Risk of bloating existing components
+- ❌ May complicate existing logic
+
+#### Option B: Create New Components
+**When to consider**: Feature has distinct responsibility or existing components are already complex
+
+- **Rationale for new creation**:
+  - Clear separation of concerns justifies new file
+  - Existing components are already complex
+  - Feature has distinct lifecycle or dependencies
+
+- **Integration points**:
+  - How new components connect to existing system
+  - APIs or interfaces exposed
+  - Dependencies on existing components
+
+- **Responsibility boundaries**:
+  - Clear definition of what new component owns
+  - Interfaces with existing components
+  - Data flow and control flow
+
+**Trade-offs**:
+- ✅ Clean separation of concerns
+- ✅ Easier to test in isolation
+- ✅ Reduces complexity in existing components
+- ❌ More files to navigate
+- ❌ Requires careful interface design
+
+#### Option C: Hybrid Approach
+**When to consider**: Complex features requiring both extension and new creation
+
+- **Combination strategy**:
+  - Which parts extend existing components
+  - Which parts warrant new components
+  - How they interact
+
+- **Phased implementation**:
+  - Initial phase: minimal viable changes
+  - Subsequent phases: refactoring or new components
+  - Migration strategy if needed
+
+- **Risk mitigation**:
+  - Incremental rollout approach
+  - Feature flags or configuration
+  - Rollback strategy
+
+**Trade-offs**:
+- ✅ Balanced approach for complex features
+- ✅ Allows iterative refinement
+- ❌ More complex planning required
+- ❌ Potential for inconsistency if not well-coordinated
+### 4. Out-of-Scope for Gap Analysis
+
+- Defer deep research activities to the design phase.
+- Record unknowns as concise "Research Needed" items only.
+
+### 5. Implementation Complexity & Risk
+
+  - Effort:
+    - S (1–3 days): existing patterns, minimal deps, straightforward integration
+    - M (3–7 days): some new patterns/integrations, moderate complexity
+    - L (1–2 weeks): significant functionality, multiple integrations or workflows
+    - XL (2+ weeks): architectural changes, unfamiliar tech, broad impact
+  - Risk:
+    - High: unknown tech, complex integrations, architectural shifts, unclear perf/security path
+    - Medium: new patterns with guidance, manageable integrations, known perf solutions
+    - Low: extend established patterns, familiar tech, clear scope, minimal integration
+
+### Output Checklist
+
+- Requirement-to-Asset Map with gaps tagged (Missing / Unknown / Constraint)
+- Options A/B/C with short rationale and trade-offs
+- Effort (S/M/L/XL) and Risk (High/Medium/Low) with one-line justification each
+- Recommendations for design phase:
+  - Preferred approach and key decisions
+  - Research items to carry forward
+
+## Principles
+
+- **Information over decisions**: Provide analysis and options, not final choices
+- **Multiple viable options**: Offer credible alternatives when applicable
+- **Explicit gaps and assumptions**: Flag unknowns and constraints clearly
+- **Context-aware**: Align with existing patterns and architecture limits
+- **Transparent effort and risk**: Justify labels succinctly

+ 90 - 0
.kiro/settings/rules/steering-principles.md

@@ -0,0 +1,90 @@
+# Steering Principles
+
+Steering files are **project memory**, not exhaustive specifications.
+
+---
+
+## Content Granularity
+
+### Golden Rule
+> "If new code follows existing patterns, steering shouldn't need updating."
+
+### ✅ Document
+- Organizational patterns (feature-first, layered)
+- Naming conventions (PascalCase rules)
+- Import strategies (absolute vs relative)
+- Architectural decisions (state management)
+- Technology standards (key frameworks)
+
+### ❌ Avoid
+- Complete file listings
+- Every component description
+- All dependencies
+- Implementation details
+- Agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
+- Detailed documentation of `.kiro/` metadata directories (settings, automation)
+
+### Example Comparison
+
+**Bad** (Specification-like):
+```markdown
+- /components/Button.tsx - Primary button with variants
+- /components/Input.tsx - Text input with validation
+- /components/Modal.tsx - Modal dialog
+... (50+ files)
+```
+
+**Good** (Project Memory):
+```markdown
+## UI Components (`/components/ui/`)
+Reusable, design-system aligned primitives
+- Named by function (Button, Input, Modal)
+- Export component + TypeScript interface
+- No business logic
+```
+
+---
+
+## Security
+
+Never include:
+- API keys, passwords, credentials
+- Database URLs, internal IPs
+- Secrets or sensitive data
+
+---
+
+## Quality Standards
+
+- **Single domain**: One topic per file
+- **Concrete examples**: Show patterns with code
+- **Explain rationale**: Why decisions were made
+- **Maintainable size**: 100-200 lines typical
+
+---
+
+## Preservation (when updating)
+
+- Preserve user sections and custom examples
+- Additive by default (add, don't replace)
+- Add `updated_at` timestamp
+- Note why changes were made
+
+---
+
+## Notes
+
+- Templates are starting points, customize as needed
+- Follow same granularity principles as core steering
+- All steering files loaded as project memory
+- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories
+- Custom files equally important as core files
+
+---
+
+## File-Specific Focus
+
+- **product.md**: Purpose, value, business context (not exhaustive features)
+- **tech.md**: Key frameworks, standards, conventions (not all dependencies)
+- **structure.md**: Organization patterns, naming rules (not directory trees)
+- **Custom files**: Specialized patterns (API, testing, security, etc.)

+ 131 - 0
.kiro/settings/rules/tasks-generation.md

@@ -0,0 +1,131 @@
+# Task Generation Rules
+
+## Core Principles
+
+### 1. Natural Language Descriptions
+Focus on capabilities and outcomes, not code structure.
+
+**Describe**:
+- What functionality to achieve
+- Business logic and behavior
+- Features and capabilities
+- Domain language and concepts
+- Data relationships and workflows
+
+**Avoid**:
+- File paths and directory structure
+- Function/method names and signatures
+- Type definitions and interfaces
+- Class names and API contracts
+- Specific data structures
+
+**Rationale**: Implementation details (files, methods, types) are defined in design.md. Tasks describe the functional work to be done.
+
+### 2. Task Integration & Progression
+
+**Every task must**:
+- Build on previous outputs (no orphaned code)
+- Connect to the overall system (no hanging features)
+- Progress incrementally (no big jumps in complexity)
+- Validate core functionality early in sequence
+- Respect architecture boundaries defined in design.md (Architecture Pattern & Boundary Map)
+- Honor interface contracts documented in design.md
+- Use major task summaries sparingly—omit detail bullets if the work is fully captured by child tasks.
+
+**End with integration tasks** to wire everything together.
+
+### 3. Flexible Task Sizing
+
+**Guidelines**:
+- **Major tasks**: As many sub-tasks as logically needed (group by cohesion)
+- **Sub-tasks**: 1-3 hours each, 3-10 details per sub-task
+- Balance between too granular and too broad
+
+**Don't force arbitrary numbers** - let logical grouping determine structure.
+
+### 4. Requirements Mapping
+
+**End each task detail section with**:
+- `_Requirements: X.X, Y.Y_` listing **only numeric requirement IDs** (comma-separated). Never append descriptive text, parentheses, translations, or free-form labels.
+- For cross-cutting requirements, list every relevant requirement ID. All requirements MUST have numeric IDs in requirements.md. If an ID is missing, stop and correct requirements.md before generating tasks.
+- Reference components/interfaces from design.md when helpful (e.g., `_Contracts: AuthService API`)
+
+### 5. Code-Only Focus
+
+**Include ONLY**:
+- Coding tasks (implementation)
+- Testing tasks (unit, integration, E2E)
+- Technical setup tasks (infrastructure, configuration)
+
+**Exclude**:
+- Deployment tasks
+- Documentation tasks
+- User testing
+- Marketing/business activities
+
+### Optional Test Coverage Tasks
+
+- When the design already guarantees functional coverage and rapid MVP delivery is prioritized, mark purely test-oriented follow-up work (e.g., baseline rendering/unit tests) as **optional** using the `- [ ]*` checkbox form.
+- Only apply the optional marker when the sub-task directly references acceptance criteria from requirements.md in its detail bullets.
+- Never mark implementation work or integration-critical verification as optional—reserve `*` for auxiliary/deferrable test coverage that can be revisited post-MVP.
+
+## Task Hierarchy Rules
+
+### Maximum 2 Levels
+- **Level 1**: Major tasks (1, 2, 3, 4...)
+- **Level 2**: Sub-tasks (1.1, 1.2, 2.1, 2.2...)
+- **No deeper nesting** (no 1.1.1)
+- If a major task would contain only a single actionable item, collapse the structure and promote the sub-task to the major level (e.g., replace `1.1` with `1.`).
+- When a major task exists purely as a container, keep the checkbox description concise and avoid duplicating detailed bullets—reserve specifics for its sub-tasks.
+
+### Sequential Numbering
+- Major tasks MUST increment: 1, 2, 3, 4, 5...
+- Sub-tasks reset per major task: 1.1, 1.2, then 2.1, 2.2...
+- Never repeat major task numbers
+
+### Parallel Analysis (default)
+- Assume parallel analysis is enabled unless explicitly disabled (e.g. `--sequential` flag).
+- Identify tasks that can run concurrently when **all** conditions hold:
+  - No data dependency on other pending tasks
+  - No shared file or resource contention
+  - No prerequisite review/approval from another task
+- Validate that identified parallel tasks operate within separate boundaries defined in the Architecture Pattern & Boundary Map.
+- Confirm API/event contracts from design.md do not overlap in ways that cause conflicts.
+- Append `(P)` immediately after the task number for each parallel-capable task:
+  - Example: `- [ ] 2.1 (P) Build background worker`
+  - Apply to both major tasks and sub-tasks when appropriate.
+- If sequential mode is requested, omit `(P)` markers entirely.
+- Group parallel tasks logically (same parent when possible) and highlight any ordering caveats in detail bullets.
+- Explicitly call out dependencies that prevent `(P)` even when tasks look similar.
+
+### Checkbox Format
+```markdown
+- [ ] 1. Major task description
+- [ ] 1.1 Sub-task description
+  - Detail item 1
+  - Detail item 2
+  - _Requirements: X.X_
+
+- [ ] 1.2 Sub-task description
+  - Detail items...
+  - _Requirements: Y.Y_
+
+- [ ] 1.3 Sub-task description
+  - Detail items...
+  - _Requirements: Z.Z, W.W_
+
+- [ ] 2. Next major task (NOT 1 again!)
+- [ ] 2.1 Sub-task...
+```
+
+## Requirements Coverage
+
+**Mandatory Check**:
+- ALL requirements from requirements.md MUST be covered
+- Cross-reference every requirement ID with task mappings
+- If gaps found: Return to requirements or design phase
+- No requirement should be left without corresponding tasks
+
+Use `N.M`-style numeric requirement IDs where `N` is the top-level requirement number from requirements.md (for example, Requirement 1 → 1.1, 1.2; Requirement 2 → 2.1, 2.2), and `M` is a local index within that requirement group.
+
+Document any intentionally deferred requirements with rationale.

+ 34 - 0
.kiro/settings/rules/tasks-parallel-analysis.md

@@ -0,0 +1,34 @@
+# Parallel Task Analysis Rules
+
+## Purpose
+Provide a consistent way to identify implementation tasks that can be safely executed in parallel while generating `tasks.md`.
+
+## When to Consider Tasks Parallel
+Only mark a task as parallel-capable when **all** of the following are true:
+
+1. **No data dependency** on pending tasks.
+2. **No conflicting files or shared mutable resources** are touched.
+3. **No prerequisite review/approval** from another task is required beforehand.
+4. **Environment/setup work** needed by this task is already satisfied or covered within the task itself.
+
+## Marking Convention
+- Append `(P)` immediately after the numeric identifier for each qualifying task.
+  - Example: `- [ ] 2.1 (P) Build background worker for emails`
+- Apply `(P)` to both major tasks and sub-tasks when appropriate.
+- If sequential execution is requested (e.g. via `--sequential` flag), omit `(P)` markers entirely.
+- Keep `(P)` **outside** of checkbox brackets to avoid confusion with completion state.
+
+## Grouping & Ordering Guidelines
+- Group parallel tasks under the same parent whenever the work belongs to the same theme.
+- List obvious prerequisites or caveats in the detail bullets (e.g., "Requires schema migration from 1.2").
+- When two tasks look similar but are not parallel-safe, call out the blocking dependency explicitly.
+- Skip marking container-only major tasks (those without their own actionable detail bullets) with `(P)`—evaluate parallel execution at the sub-task level instead.
+
+## Quality Checklist
+Before marking a task with `(P)`, ensure you have:
+
+- Verified that running this task concurrently will not create merge or deployment conflicts.
+- Captured any shared state expectations in the detail bullets.
+- Confirmed that the implementation can be tested independently.
+
+If any check fails, **do not** mark the task with `(P)` and explain the dependency in the task details.

+ 276 - 0
.kiro/settings/templates/specs/design.md

@@ -0,0 +1,276 @@
+# Design Document Template
+
+---
+**Purpose**: Provide sufficient detail to ensure implementation consistency across different implementers, preventing interpretation drift.
+
+**Approach**:
+- Include essential sections that directly inform implementation decisions
+- Omit optional sections unless critical to preventing implementation errors
+- Match detail level to feature complexity
+- Use diagrams and tables over lengthy prose
+
+**Warning**: Approaching 1000 lines indicates excessive feature complexity that may require design simplification.
+---
+
+> Sections may be reordered (e.g., surfacing Requirements Traceability earlier or moving Data Models nearer Architecture) when it improves clarity. Within each section, keep the flow **Summary → Scope → Decisions → Impacts/Risks** so reviewers can scan consistently.
+
+## Overview 
+2-3 paragraphs max
+**Purpose**: This feature delivers [specific value] to [target users].
+**Users**: [Target user groups] will utilize this for [specific workflows].
+**Impact** (if applicable): Changes the current [system state] by [specific modifications].
+
+
+### Goals
+- Primary objective 1
+- Primary objective 2  
+- Success criteria
+
+### Non-Goals
+- Explicitly excluded functionality
+- Future considerations outside current scope
+- Integration points deferred
+
+## Architecture
+
+> Reference detailed discovery notes in `research.md` only for background; keep design.md self-contained for reviewers by capturing all decisions and contracts here.
+> Capture key decisions in text and let diagrams carry structural detail—avoid repeating the same information in prose.
+
+### Existing Architecture Analysis (if applicable)
+When modifying existing systems:
+- Current architecture patterns and constraints
+- Existing domain boundaries to be respected
+- Integration points that must be maintained
+- Technical debt addressed or worked around
+
+### Architecture Pattern & Boundary Map
+**RECOMMENDED**: Include Mermaid diagram showing the chosen architecture pattern and system boundaries (required for complex features, optional for simple additions)
+
+**Architecture Integration**:
+- Selected pattern: [name and brief rationale]
+- Domain/feature boundaries: [how responsibilities are separated to avoid conflicts]
+- Existing patterns preserved: [list key patterns]
+- New components rationale: [why each is needed]
+- Steering compliance: [principles maintained]
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Frontend / CLI | | | |
+| Backend / Services | | | |
+| Data / Storage | | | |
+| Messaging / Events | | | |
+| Infrastructure / Runtime | | | |
+
+> Keep rationale concise here and, when more depth is required (trade-offs, benchmarks), add a short summary plus pointer to the Supporting References section and `research.md` for raw investigation notes.
+
+## System Flows
+
+Provide only the diagrams needed to explain non-trivial flows. Use pure Mermaid syntax. Common patterns:
+- Sequence (multi-party interactions)
+- Process / state (branching logic or lifecycle)
+- Data / event flow (pipelines, async messaging)
+
+Skip this section entirely for simple CRUD changes.
+> Describe flow-level decisions (e.g., gating conditions, retries) briefly after the diagram instead of restating each step.
+
+## Requirements Traceability
+
+Use this section for complex or compliance-sensitive features where requirements span multiple domains. Straightforward 1:1 mappings can rely on the Components summary table.
+
+Map each requirement ID (e.g., `2.1`) to the design elements that realize it.
+
+| Requirement | Summary | Components | Interfaces | Flows |
+|-------------|---------|------------|------------|-------|
+| 1.1 | | | | |
+| 1.2 | | | | |
+
+> Omit this section only when a single component satisfies a single requirement without cross-cutting concerns.
+
+## Components and Interfaces
+
+Provide a quick reference before diving into per-component details.
+
+- Summaries can be a table or compact list. Example table:
+  | Component | Domain/Layer | Intent | Req Coverage | Key Dependencies (P0/P1) | Contracts |
+  |-----------|--------------|--------|--------------|--------------------------|-----------|
+  | ExampleComponent | UI | Displays XYZ | 1, 2 | GameProvider (P0), MapPanel (P1) | Service, State |
+- Only components introducing new boundaries (e.g., logic hooks, external integrations, persistence) require full detail blocks. Simple presentation components can rely on the summary row plus a short Implementation Note.
+
+Group detailed blocks by domain or architectural layer. For each detailed component, list requirement IDs as `2.1, 2.3` (omit “Requirement”). When multiple UI components share the same contract, reference a base interface/props definition instead of duplicating code blocks.
+
+### [Domain / Layer]
+
+#### [Component Name]
+
+| Field | Detail |
+|-------|--------|
+| Intent | 1-line description of the responsibility |
+| Requirements | 2.1, 2.3 |
+| Owner / Reviewers | (optional) |
+
+**Responsibilities & Constraints**
+- Primary responsibility
+- Domain boundary and transaction scope
+- Data ownership / invariants
+
+**Dependencies**
+- Inbound: Component/service name — purpose (Criticality)
+- Outbound: Component/service name — purpose (Criticality)
+- External: Service/library — purpose (Criticality)
+
+Summarize external dependency findings here; deeper investigation (API signatures, rate limits, migration notes) lives in `research.md`.
+
+**Contracts**: Service [ ] / API [ ] / Event [ ] / Batch [ ] / State [ ]  ← check only the ones that apply.
+
+##### Service Interface
+```typescript
+interface [ComponentName]Service {
+  methodName(input: InputType): Result<OutputType, ErrorType>;
+}
+```
+- Preconditions:
+- Postconditions:
+- Invariants:
+
+##### API Contract
+| Method | Endpoint | Request | Response | Errors |
+|--------|----------|---------|----------|--------|
+| POST | /api/resource | CreateRequest | Resource | 400, 409, 500 |
+
+##### Event Contract
+- Published events:  
+- Subscribed events:  
+- Ordering / delivery guarantees:
+
+##### Batch / Job Contract
+- Trigger:  
+- Input / validation:  
+- Output / destination:  
+- Idempotency & recovery:
+
+##### State Management
+- State model:  
+- Persistence & consistency:  
+- Concurrency strategy:
+
+**Implementation Notes**
+- Integration: 
+- Validation: 
+- Risks:
+
+## Data Models
+
+Focus on the portions of the data landscape that change with this feature.
+
+### Domain Model
+- Aggregates and transactional boundaries
+- Entities, value objects, domain events
+- Business rules & invariants
+- Optional Mermaid diagram for complex relationships
+
+### Logical Data Model
+
+**Structure Definition**:
+- Entity relationships and cardinality
+- Attributes and their types
+- Natural keys and identifiers
+- Referential integrity rules
+
+**Consistency & Integrity**:
+- Transaction boundaries
+- Cascading rules
+- Temporal aspects (versioning, audit)
+
+### Physical Data Model
+**When to include**: When implementation requires specific storage design decisions
+
+**For Relational Databases**:
+- Table definitions with data types
+- Primary/foreign keys and constraints
+- Indexes and performance optimizations
+- Partitioning strategy for scale
+
+**For Document Stores**:
+- Collection structures
+- Embedding vs referencing decisions
+- Sharding key design
+- Index definitions
+
+**For Event Stores**:
+- Event schema definitions
+- Stream aggregation strategies
+- Snapshot policies
+- Projection definitions
+
+**For Key-Value/Wide-Column Stores**:
+- Key design patterns
+- Column families or value structures
+- TTL and compaction strategies
+
+### Data Contracts & Integration
+
+**API Data Transfer**
+- Request/response schemas
+- Validation rules
+- Serialization format (JSON, Protobuf, etc.)
+
+**Event Schemas**
+- Published event structures
+- Schema versioning strategy
+- Backward/forward compatibility rules
+
+**Cross-Service Data Management**
+- Distributed transaction patterns (Saga, 2PC)
+- Data synchronization strategies
+- Eventual consistency handling
+
+Skip subsections that are not relevant to this feature.
+
+## Error Handling
+
+### Error Strategy
+Concrete error handling patterns and recovery mechanisms for each error type.
+
+### Error Categories and Responses
+**User Errors** (4xx): Invalid input → field-level validation; Unauthorized → auth guidance; Not found → navigation help
+**System Errors** (5xx): Infrastructure failures → graceful degradation; Timeouts → circuit breakers; Exhaustion → rate limiting  
+**Business Logic Errors** (422): Rule violations → condition explanations; State conflicts → transition guidance
+
+**Process Flow Visualization** (when complex business logic exists):
+Include Mermaid flowchart only for complex error scenarios with business workflows.
+
+### Monitoring
+Error tracking, logging, and health monitoring implementation.
+
+## Testing Strategy
+
+### Default sections (adapt names/sections to fit the domain)
+- Unit Tests: 3–5 items from core functions/modules (e.g., auth methods, subscription logic)
+- Integration Tests: 3–5 cross-component flows (e.g., webhook handling, notifications)
+- E2E/UI Tests (if applicable): 3–5 critical user paths (e.g., forms, dashboards)
+- Performance/Load (if applicable): 3–4 items (e.g., concurrency, high-volume ops)
+
+## Optional Sections (include when relevant)
+
+### Security Considerations
+_Use this section for features handling auth, sensitive data, external integrations, or user permissions. Capture only decisions unique to this feature; defer baseline controls to steering docs._
+- Threat modeling, security controls, compliance requirements
+- Authentication and authorization patterns
+- Data protection and privacy considerations
+
+### Performance & Scalability
+_Use this section when performance targets, high load, or scaling concerns exist. Record only feature-specific targets or trade-offs and rely on steering documents for general practices._
+- Target metrics and measurement strategies
+- Scaling approaches (horizontal/vertical)
+- Caching strategies and optimization techniques
+
+### Migration Strategy
+Include a Mermaid flowchart showing migration phases when schema/data movement is required.
+- Phase breakdown, rollback triggers, validation checkpoints
+
+## Supporting References (Optional)
+- Create this section only when keeping the information in the main body would hurt readability (e.g., very long TypeScript definitions, vendor option matrices, exhaustive schema tables). Keep decision-making context in the main sections so the design stays self-contained.
+- Link to the supporting references from the main text instead of inlining large snippets.
+- Background research notes and comparisons continue to live in `research.md`, but their conclusions must be summarized in the main design.

+ 22 - 0
.kiro/settings/templates/specs/init.json

@@ -0,0 +1,22 @@
+{
+  "feature_name": "{{FEATURE_NAME}}",
+  "created_at": "{{TIMESTAMP}}",
+  "updated_at": "{{TIMESTAMP}}",
+  "language": "en",
+  "phase": "initialized",
+  "approvals": {
+    "requirements": {
+      "generated": false,
+      "approved": false
+    },
+    "design": {
+      "generated": false,
+      "approved": false
+    },
+    "tasks": {
+      "generated": false,
+      "approved": false
+    }
+  },
+  "ready_for_implementation": false
+}

+ 9 - 0
.kiro/settings/templates/specs/requirements-init.md

@@ -0,0 +1,9 @@
+# Requirements Document
+
+## Project Description (Input)
+{{PROJECT_DESCRIPTION}}
+
+## Requirements
+<!-- Will be generated in /kiro:spec-requirements phase -->
+
+

+ 26 - 0
.kiro/settings/templates/specs/requirements.md

@@ -0,0 +1,26 @@
+# Requirements Document
+
+## Introduction
+{{INTRODUCTION}}
+
+## Requirements
+
+### Requirement 1: {{REQUIREMENT_AREA_1}}
+<!-- Requirement headings MUST include a leading numeric ID only (for example: "Requirement 1: ...", "1. Overview", "2 Feature: ..."). Alphabetic IDs like "Requirement A" are not allowed. -->
+**Objective:** As a {{ROLE}}, I want {{CAPABILITY}}, so that {{BENEFIT}}
+
+#### Acceptance Criteria
+1. When [event], the [system] shall [response/action]
+2. If [trigger], then the [system] shall [response/action]
+3. While [precondition], the [system] shall [response/action]
+4. Where [feature is included], the [system] shall [response/action]
+5. The [system] shall [response/action]
+
+### Requirement 2: {{REQUIREMENT_AREA_2}}
+**Objective:** As a {{ROLE}}, I want {{CAPABILITY}}, so that {{BENEFIT}}
+
+#### Acceptance Criteria
+1. When [event], the [system] shall [response/action]
+2. When [event] and [condition], the [system] shall [response/action]
+
+<!-- Additional requirements follow the same pattern -->

+ 61 - 0
.kiro/settings/templates/specs/research.md

@@ -0,0 +1,61 @@
+# Research & Design Decisions Template
+
+---
+**Purpose**: Capture discovery findings, architectural investigations, and rationale that inform the technical design.
+
+**Usage**:
+- Log research activities and outcomes during the discovery phase.
+- Document design decision trade-offs that are too detailed for `design.md`.
+- Provide references and evidence for future audits or reuse.
+---
+
+## Summary
+- **Feature**: `<feature-name>`
+- **Discovery Scope**: New Feature / Extension / Simple Addition / Complex Integration
+- **Key Findings**:
+  - Finding 1
+  - Finding 2
+  - Finding 3
+
+## Research Log
+Document notable investigation steps and their outcomes. Group entries by topic for readability.
+
+### [Topic or Question]
+- **Context**: What triggered this investigation?
+- **Sources Consulted**: Links, documentation, API references, benchmarks
+- **Findings**: Concise bullet points summarizing the insights
+- **Implications**: How this affects architecture, contracts, or implementation
+
+_Repeat the subsection for each major topic._
+
+## Architecture Pattern Evaluation
+List candidate patterns or approaches that were considered. Use the table format where helpful.
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| Hexagonal | Ports & adapters abstraction around core domain | Clear boundaries, testable core | Requires adapter layer build-out | Aligns with existing steering principle X |
+
+## Design Decisions
+Record major decisions that influence `design.md`. Focus on choices with significant trade-offs.
+
+### Decision: `<Title>`
+- **Context**: Problem or requirement driving the decision
+- **Alternatives Considered**:
+  1. Option A — short description
+  2. Option B — short description
+- **Selected Approach**: What was chosen and how it works
+- **Rationale**: Why this approach fits the current project context
+- **Trade-offs**: Benefits vs. compromises
+- **Follow-up**: Items to verify during implementation or testing
+
+_Repeat the subsection for each decision._
+
+## Risks & Mitigations
+- Risk 1 — Proposed mitigation
+- Risk 2 — Proposed mitigation
+- Risk 3 — Proposed mitigation
+
+## References
+Provide canonical links and citations (official docs, standards, ADRs, internal guidelines).
+- [Title](https://example.com) — brief note on relevance
+- ...

+ 21 - 0
.kiro/settings/templates/specs/tasks.md

@@ -0,0 +1,21 @@
+# Implementation Plan
+
+## Task Format Template
+
+Use whichever pattern fits the work breakdown:
+
+### Major task only
+- [ ] {{NUMBER}}. {{TASK_DESCRIPTION}}{{PARALLEL_MARK}}
+  - {{DETAIL_ITEM_1}} *(Include details only when needed. If the task stands alone, omit bullet items.)*
+  - _Requirements: {{REQUIREMENT_IDS}}_
+
+### Major + Sub-task structure
+- [ ] {{MAJOR_NUMBER}}. {{MAJOR_TASK_SUMMARY}}
+- [ ] {{MAJOR_NUMBER}}.{{SUB_NUMBER}} {{SUB_TASK_DESCRIPTION}}{{SUB_PARALLEL_MARK}}
+  - {{DETAIL_ITEM_1}}
+  - {{DETAIL_ITEM_2}}
+  - _Requirements: {{REQUIREMENT_IDS}}_ *(IDs only; do not add descriptions or parentheses.)*
+
+> **Parallel marker**: Append ` (P)` only to tasks that can be executed in parallel. Omit the marker when running in `--sequential` mode.
+>
+> **Optional test coverage**: When a sub-task is deferrable test work tied to acceptance criteria, mark the checkbox as `- [ ]*` and explain the referenced requirements in the detail bullets.

+ 69 - 0
.kiro/settings/templates/steering-custom/api-standards.md

@@ -0,0 +1,69 @@
+# API Standards
+
+[Purpose: consistent API patterns for naming, structure, auth, versioning, and errors]
+
+## Philosophy
+- Prefer predictable, resource-oriented design
+- Be explicit in contracts; minimize breaking changes
+- Secure by default (auth first, least privilege)
+
+## Endpoint Pattern
+```
+/{version}/{resource}[/{id}][/{sub-resource}]
+```
+Examples:
+- `/api/v1/users`
+- `/api/v1/users/:id`
+- `/api/v1/users/:id/posts`
+
+HTTP verbs:
+- GET (read, safe, idempotent)
+- POST (create)
+- PUT/PATCH (update)
+- DELETE (remove, idempotent)
+
+## Request/Response
+
+Request (typical):
+```json
+{ "data": { ... }, "metadata": { "requestId": "..." } }
+```
+
+Success:
+```json
+{ "data": { ... }, "meta": { "timestamp": "...", "version": "..." } }
+```
+
+Error:
+```json
+{ "error": { "code": "ERROR_CODE", "message": "...", "field": "optional" } }
+```
+(See error-handling for rules.)
+
+## Status Codes (pattern)
+- 2xx: Success (200 read, 201 create, 204 delete)
+- 4xx: Client issues (400 validation, 401/403 auth, 404 missing)
+- 5xx: Server issues (500 generic, 503 unavailable)
+Choose the status that best reflects the outcome.
+
+## Authentication
+- Credentials in standard location
+```
+Authorization: Bearer {token}
+```
+- Reject unauthenticated before business logic
+
+## Versioning
+- Version via URL/header/media-type
+- Breaking change → new version
+- Non-breaking → same version
+- Provide deprecation window and comms
+
+## Pagination/Filtering (if applicable)
+- Pagination: `page`, `pageSize` or cursor-based
+- Filtering: explicit query params
+- Sorting: `sort=field:asc|desc`
+Return pagination metadata in `meta`.
+
+---
+_Focus on patterns and decisions, not endpoint catalogs._

+ 67 - 0
.kiro/settings/templates/steering-custom/authentication.md

@@ -0,0 +1,67 @@
+# Authentication & Authorization Standards
+
+[Purpose: unify auth model, token/session lifecycle, permission checks, and security]
+
+## Philosophy
+- Clear separation: authentication (who) vs authorization (what)
+- Secure by default: least privilege, fail closed, short-lived tokens
+- UX-aware: friction where risk is high, smooth otherwise
+
+## Authentication
+
+### Method (choose + rationale)
+- Options: JWT, Session, OAuth2, hybrid
+- Choice: [our method] because [reason]
+
+### Flow (high-level)
+```
+1) User proves identity (credentials or provider)
+2) Server verifies and issues token/session
+3) Client sends token per request
+4) Server verifies token and proceeds
+```
+
+### Token/Session Lifecycle
+- Storage: httpOnly cookie or Authorization header
+- Expiration: short-lived access, longer refresh (if used)
+- Refresh: rotate tokens; respect revocation
+- Revocation: blacklist/rotate on logout/compromise
+
+### Security Pattern
+- Enforce TLS; never expose tokens to JS when avoidable
+- Bind token to audience/issuer; include minimal claims
+- Consider device binding and IP/risk checks for sensitive actions
+
+## Authorization
+
+### Permission Model
+- Choose one: RBAC / ABAC / ownership-based / hybrid
+- Define roles/attributes centrally; avoid hardcoding across codebase
+
+### Checks (where to enforce)
+- Route/middleware: coarse-grained gate
+- Domain/service: fine-grained decisions
+- UI: conditional rendering (no security reliance)
+
+Example pattern:
+```typescript
+requirePermission('resource:action'); // route
+if (!user.can('resource:action')) throw ForbiddenError(); // domain
+```
+
+### Ownership
+- Pattern: owner OR privileged role can act
+- Verify on entity boundary before mutation
+
+## Passwords & MFA
+- Passwords: strong policy, hashed (bcrypt/argon2), never plaintext
+- Reset: time-limited token, single-use, notify user
+- MFA: step-up for risky operations (policy-driven)
+
+## API-to-API Auth
+- Use API keys or OAuth client credentials
+- Scope keys minimally; rotate and audit usage
+- Rate limit by identity (user/key)
+
+---
+_Focus on patterns and decisions. No library-specific code._

+ 46 - 0
.kiro/settings/templates/steering-custom/database.md

@@ -0,0 +1,46 @@
+# Database Standards
+
+[Purpose: guide schema design, queries, migrations, and integrity]
+
+## Philosophy
+- Model the domain first; optimize after correctness
+- Prefer explicit constraints; let database enforce invariants
+- Query only what you need; measure before optimizing
+
+## Naming & Types
+- Tables: `snake_case`, plural (`users`, `order_items`)
+- Columns: `snake_case` (`created_at`, `user_id`)
+- FKs: `{table}_id` referencing `{table}.id`
+- Types: timezone-aware timestamps; strong IDs; precise money types
+
+## Relationships
+- 1:N: FK in child
+- N:N: join table with compound key
+- 1:1: FK + UNIQUE
+
+## Migrations
+- Immutable migrations; always add rollback
+- Small, focused steps; test on non-prod first
+- Naming: `{seq}_{action}_{object}` (e.g., `002_add_email_index`)
+
+## Query Patterns
+- ORM for simple CRUD and safety; raw SQL for complex/perf-critical
+- Avoid N+1 (eager load/batching); paginate large sets
+- Index FKs and frequently filtered/sorted columns
+
+## Connection & Transactions
+- Use pooling (size/timeouts based on workload)
+- One connection per unit of work; close/return promptly
+- Wrap multi-step changes in transactions
+
+## Data Integrity
+- Use NOT NULL/UNIQUE/CHECK/FK constraints
+- Validate at DB when appropriate (defense in depth)
+- Prefer generated columns for consistent derivations
+
+## Backup & Recovery
+- Regular backups with retention; test restores
+- Document RPO/RTO targets; monitor backup jobs
+
+---
+_Focus on patterns and decisions. No environment-specific settings._

+ 54 - 0
.kiro/settings/templates/steering-custom/deployment.md

@@ -0,0 +1,54 @@
+# Deployment Standards
+
+[Purpose: safe, repeatable releases with clear environment and pipeline patterns]
+
+## Philosophy
+- Automate; test before deploy; verify after deploy
+- Prefer incremental rollout with fast rollback
+- Production changes must be observable and reversible
+
+## Environments
+- Dev: fast iteration; debugging enabled
+- Staging: mirrors prod; release validation
+- Prod: hardened; monitored; least privilege
+
+## CI/CD Flow
+```
+Code → Test → Build → Scan → Deploy (staged) → Verify
+```
+Principles:
+- Fail fast on tests/scans; block deploy
+- Artifact builds are reproducible (lockfiles, pinned versions)
+- Manual approval for prod; auditable trail
+
+## Deployment Strategies
+- Rolling: gradual instance replacement
+- Blue-Green: switch traffic between two pools
+- Canary: small % users first, expand on health
+Choose per risk profile; document default.
+
+## Zero-Downtime & Migrations
+- Health checks gate traffic; graceful shutdown
+- Backwards-compatible DB changes during rollout
+- Separate migration step; test rollback paths
+
+## Rollback
+- Keep previous version ready; automate revert
+- Rollback faster than fix-forward; document triggers
+
+## Configuration & Secrets
+- 12-factor config via env; never commit secrets
+- Secret manager; rotate; least privilege; audit access
+- Validate required env vars at startup
+
+## Health & Monitoring
+- Endpoints: `/health`, `/health/live`, `/health/ready`
+- Monitor latency, error rate, throughput, saturation
+- Alerts on SLO breaches/spikes; tune to avoid fatigue
+
+## Incident Response & DR
+- Standard playbook: detect → assess → mitigate → communicate → resolve → post-mortem
+- Backups with retention; test restore; defined RPO/RTO
+
+---
+_Focus on rollout patterns and safeguards. No provider-specific steps._

+ 59 - 0
.kiro/settings/templates/steering-custom/error-handling.md

@@ -0,0 +1,59 @@
+# Error Handling Standards
+
+[Purpose: unify how errors are classified, shaped, propagated, logged, and monitored]
+
+## Philosophy
+- Fail fast where possible; degrade gracefully at system boundaries
+- Consistent error shape across the stack (human + machine readable)
+- Handle known errors close to source; surface unknowns to a global handler
+
+## Classification (decide handling by source)
+- Client: Input/validation/user action issues → 4xx
+- Server: System failures/unexpected exceptions → 5xx
+- Business: Rule/state violations → 4xx (e.g., 409)
+- External: 3rd-party/network failures → map to 5xx or 4xx with context
+
+## Error Shape (single canonical format)
+```json
+{
+  "error": {
+    "code": "ERROR_CODE",
+    "message": "Human-readable message",
+    "requestId": "trace-id",
+    "timestamp": "ISO-8601"
+  }
+}
+```
+Principles: stable code enums, no secrets, include trace info.
+
+## Propagation (where to convert)
+- API layer: Convert domain errors → HTTP status + canonical body
+- Service layer: Throw typed business errors, avoid stringly-typed errors
+- Data/external layer: Wrap provider errors with safe, actionable codes
+- Unknown errors: Bubble to global handler → 500 + generic message
+
+Example pattern:
+```typescript
+try { return await useCase(); }
+catch (e) {
+  if (e instanceof BusinessError) return respondMapped(e);
+  logError(e); return respondInternal();
+}
+```
+
+## Logging (context over noise)
+Log: operation, userId (if available), code, message, stack, requestId, minimal context.
+Do not log: passwords, tokens, secrets, full PII, full bodies with sensitive data.
+Levels: ERROR (failures), WARN (recoverable/edge), INFO (key events), DEBUG (diagnostics).
+
+## Retry (only when safe)
+Retry when: network/timeouts/transient 5xx AND operation is idempotent.
+Do not retry: 4xx, business errors, non-idempotent flows.
+Strategy: exponential backoff + jitter, capped attempts; require idempotency keys.
+
+## Monitoring & Health
+Track: error rates by code/category, latency, saturation; alert on spikes/SLI breaches.
+Expose health: `/health` (live), `/health/ready` (ready). Link errors to traces.
+
+---
+_Focus on patterns and decisions. No implementation details or exhaustive lists._

+ 55 - 0
.kiro/settings/templates/steering-custom/security.md

@@ -0,0 +1,55 @@
+# Security Standards
+
+[Purpose: define security posture with patterns for validation, authz, secrets, and data]
+
+## Philosophy
+- Defense in depth; least privilege; secure by default; fail closed
+- Validate at boundaries; sanitize for context; never trust input
+- Separate authentication (who) and authorization (what)
+
+## Input & Output
+- Validate at API boundaries and UI forms; enforce types and constraints
+- Sanitize/escape based on destination (HTML, SQL, shell, logs)
+- Prefer allow-lists over block-lists; reject early with minimal detail
+
+## Authentication & Authorization
+- Authentication: verify identity; issue short-lived tokens/sessions
+- Authorization: check permissions before actions; deny by default
+- Centralize policies; avoid duplicating checks across code
+
+Pattern:
+```typescript
+if (!user.hasPermission('resource:action')) throw ForbiddenError();
+```
+
+## Secrets & Configuration
+- Never commit secrets; store in secret manager or env
+- Rotate regularly; audit access; scope minimal
+- Validate required env vars at startup; fail fast on missing
+
+## Sensitive Data
+- Minimize collection; mask/redact in logs; encrypt at rest and in transit
+- Restrict access by role/need-to-know; track access to sensitive records
+
+## Session/Token Security
+- httpOnly + secure cookies where possible; TLS everywhere
+- Short expiration; rotate on refresh; revoke on logout/compromise
+- Bind tokens to audience/issuer; include minimal claims
+
+## Logging (security-aware)
+- Log auth attempts, permission denials, and sensitive operations
+- Never log passwords, tokens, secrets, full PII; avoid full bodies
+- Include requestId and context to correlate events
+
+## Headers & Transport
+- Enforce TLS; HSTS
+- Set security headers (CSP, X-Frame-Options, X-Content-Type-Options)
+- Prefer modern crypto; disable weak protocols/ciphers
+
+## Vulnerability Posture
+- Prefer secure libraries; keep dependencies updated
+- Static/dynamic scans in CI; track and remediate
+- Educate team on common classes; encode as patterns above
+
+---
+_Focus on patterns and principles. Link concrete configs to ops docs._

+ 47 - 0
.kiro/settings/templates/steering-custom/testing.md

@@ -0,0 +1,47 @@
+# Testing Standards
+
+[Purpose: guide what to test, where tests live, and how to structure them]
+
+## Philosophy
+- Test behavior, not implementation
+- Prefer fast, reliable tests; minimize brittle mocks
+- Cover critical paths deeply; breadth over 100% pursuit
+
+## Organization
+Options:
+- Co-located: `component.tsx` + `component.test.tsx`
+- Separate: `/src/...` and `/tests/...`
+Pick one as default; allow exceptions with rationale.
+
+Naming:
+- Files: `*.test.*` or `*.spec.*`
+- Suites: what is under test; Cases: expected behavior
+
+## Test Types
+- Unit: single unit, mocked dependencies, very fast
+- Integration: multiple units together, mock externals only
+- E2E: full flows, minimal mocks, only for critical journeys
+
+## Structure (AAA)
+```typescript
+it('does X when Y', () => {
+  // Arrange
+  const input = setup();
+  // Act
+  const result = act(input);
+  // Assert
+  expect(result).toEqual(expected);
+});
+```
+
+## Mocking & Data
+- Mock externals (API/DB); never mock the system under test
+- Use factories/fixtures; reset state between tests
+- Keep test data minimal and intention-revealing
+
+## Coverage
+- Target: [% overall]; higher for critical domains
+- Enforce thresholds in CI; exceptions require review rationale
+
+---
+_Focus on patterns and decisions. Tool-specific config lives elsewhere._

+ 18 - 0
.kiro/settings/templates/steering/product.md

@@ -0,0 +1,18 @@
+# Product Overview
+
+[Brief description of what this product does and who it serves]
+
+## Core Capabilities
+
+[3-5 key capabilities, not exhaustive features]
+
+## Target Use Cases
+
+[Primary scenarios this product addresses]
+
+## Value Proposition
+
+[What makes this product unique or valuable]
+
+---
+_Focus on patterns and purpose, not exhaustive feature lists_

+ 41 - 0
.kiro/settings/templates/steering/structure.md

@@ -0,0 +1,41 @@
+# Project Structure
+
+## Organization Philosophy
+
+[Describe approach: feature-first, layered, domain-driven, etc.]
+
+## Directory Patterns
+
+### [Pattern Name]
+**Location**: `/path/`  
+**Purpose**: [What belongs here]  
+**Example**: [Brief example]
+
+### [Pattern Name]
+**Location**: `/path/`  
+**Purpose**: [What belongs here]  
+**Example**: [Brief example]
+
+## Naming Conventions
+
+- **Files**: [Pattern, e.g., PascalCase, kebab-case]
+- **Components**: [Pattern]
+- **Functions**: [Pattern]
+
+## Import Organization
+
+```typescript
+// Example import patterns
+import { Something } from '@/path'  // Absolute
+import { Local } from './local'     // Relative
+```
+
+**Path Aliases**:
+- `@/`: [Maps to]
+
+## Code Organization Principles
+
+[Key architectural patterns and dependency rules]
+
+---
+_Document patterns, not file trees. New files following patterns shouldn't require updates_

+ 45 - 0
.kiro/settings/templates/steering/tech.md

@@ -0,0 +1,45 @@
+# Technology Stack
+
+## Architecture
+
+[High-level system design approach]
+
+## Core Technologies
+
+- **Language**: [e.g., TypeScript, Python]
+- **Framework**: [e.g., React, Next.js, Django]
+- **Runtime**: [e.g., Node.js 20+]
+
+## Key Libraries
+
+[Only major libraries that influence development patterns]
+
+## Development Standards
+
+### Type Safety
+[e.g., TypeScript strict mode, no `any`]
+
+### Code Quality
+[e.g., ESLint, Prettier rules]
+
+### Testing
+[e.g., Jest, coverage requirements]
+
+## Development Environment
+
+### Required Tools
+[Key tools and version requirements]
+
+### Common Commands
+```bash
+# Dev: [command]
+# Build: [command]
+# Test: [command]
+```
+
+## Key Technical Decisions
+
+[Important architectural choices and rationale]
+
+---
+_Document standards and patterns, not every dependency_

+ 764 - 0
.kiro/specs/oauth2-email-support/design.md

@@ -0,0 +1,764 @@
+# OAuth 2.0 Email Support - Technical Design
+
+## Overview
+
+This feature adds OAuth 2.0 authentication support for sending emails through Google Workspace accounts in GROWI. Administrators can configure email transmission using OAuth 2.0 credentials (Client ID, Client Secret, Refresh Token) instead of traditional SMTP passwords. This integration extends the existing mail service architecture while maintaining full backward compatibility with SMTP and SES configurations.
+
+**Purpose**: Enable secure, token-based email authentication for Google Workspace accounts, improving security by eliminating password-based SMTP authentication and following Google's recommended practices for application email integration.
+
+**Users**: GROWI administrators configuring email transmission settings will use the new OAuth 2.0 option alongside existing SMTP and SES methods.
+
+**Impact**: Extends the mail service to support a third transmission method (oauth2) without modifying existing SMTP or SES functionality. No breaking changes to existing deployments.
+
+### Goals
+
+- Add OAuth 2.0 as a transmission method option in mail settings
+- Support Google Workspace email sending via Gmail API with OAuth 2.0 credentials
+- Maintain backward compatibility with existing SMTP and SES configurations
+- Provide consistent admin UI experience following SMTP/SES patterns
+- Implement automatic OAuth 2.0 token refresh using nodemailer's built-in support
+- Ensure secure storage and handling of OAuth 2.0 credentials
+
+### Non-Goals
+
+- OAuth 2.0 providers beyond Google Workspace (Microsoft 365, generic OAuth 2.0 servers)
+- Migration tool from SMTP to OAuth 2.0 (administrators manually reconfigure)
+- Authorization flow UI for obtaining refresh tokens (documented external process via Google Cloud Console)
+- Multi-account or account rotation support (single OAuth 2.0 account per instance)
+- Email queuing or rate limiting specific to OAuth 2.0 (relies on existing mail service behavior)
+
+## Architecture
+
+### Existing Architecture Analysis
+
+**Current Mail Service Implementation**:
+- **Service Location**: `apps/app/src/server/service/mail.ts` (MailService class)
+- **Initialization**: MailService instantiated from Crowi container, loaded on app startup
+- **Transmission Methods**: Currently supports 'smtp' and 'ses' via `mail:transmissionMethod` config
+- **Factory Pattern**: `createSMTPClient()` and `createSESClient()` create nodemailer transports
+- **Configuration**: ConfigManager loads settings from MongoDB via `mail:*` namespace keys
+- **S2S Messaging**: Supports distributed config updates via `mailServiceUpdated` events
+- **Test Email**: SMTP-only test email functionality in admin UI
+
+**Current Admin UI Structure**:
+- **Main Component**: `MailSetting.tsx` - form container with transmission method radio buttons
+- **Sub-Components**: `SmtpSetting.tsx`, `SesSetting.tsx` - conditional rendering based on selected method
+- **State Management**: AdminAppContainer (unstated) manages form state and API calls
+- **Form Library**: react-hook-form for validation and submission
+- **API Integration**: `updateMailSettingHandler()` saves all mail settings via REST API
+
+**Integration Points**:
+- Config definition in `config-definition.ts` (add OAuth 2.0 keys)
+- MailService initialize() method (add OAuth 2.0 branch)
+- MailSetting.tsx transmission method array (add 'oauth2' option)
+- AdminAppContainer state methods (add OAuth 2.0 credential methods)
+
+### Architecture Pattern & Boundary Map
+
+```mermaid
+graph TB
+    subgraph "Client Layer"
+        MailSettingUI[MailSetting Component]
+        OAuth2SettingUI[OAuth2Setting Component]
+        SmtpSettingUI[SmtpSetting Component]
+        SesSettingUI[SesSetting Component]
+        AdminContainer[AdminAppContainer]
+    end
+
+    subgraph "API Layer"
+        AppSettingsAPI[App Settings API]
+        MailTestAPI[Mail Test API]
+    end
+
+    subgraph "Service Layer"
+        MailService[MailService]
+        ConfigManager[ConfigManager]
+        S2SMessaging[S2S Messaging]
+    end
+
+    subgraph "External Services"
+        GoogleOAuth[Google OAuth 2.0 API]
+        GmailAPI[Gmail API]
+        SMTPServer[SMTP Server]
+        SESAPI[AWS SES API]
+    end
+
+    subgraph "Data Layer"
+        MongoDB[(MongoDB Config)]
+    end
+
+    MailSettingUI --> AdminContainer
+    OAuth2SettingUI --> AdminContainer
+    SmtpSettingUI --> AdminContainer
+    SesSettingUI --> AdminContainer
+
+    AdminContainer --> AppSettingsAPI
+    AdminContainer --> MailTestAPI
+
+    AppSettingsAPI --> ConfigManager
+    MailTestAPI --> MailService
+
+    MailService --> ConfigManager
+    MailService --> S2SMessaging
+
+    ConfigManager --> MongoDB
+
+    MailService --> GoogleOAuth
+    MailService --> GmailAPI
+    MailService --> SMTPServer
+    MailService --> SESAPI
+
+    S2SMessaging -.->|mailServiceUpdated| MailService
+```
+
+**Architecture Integration**:
+- **Selected Pattern**: Factory Method Extension - adds `createOAuth2Client()` to existing MailService factory methods
+- **Domain Boundaries**:
+  - **Client**: Admin UI components for OAuth 2.0 configuration (follows existing SmtpSetting/SesSetting pattern)
+  - **Service**: MailService handles all transmission methods; OAuth 2.0 isolated in new factory method
+  - **Config**: ConfigManager persists OAuth 2.0 credentials using `mail:oauth2*` namespace
+  - **External**: Google OAuth 2.0 API for token management; Gmail API for email transmission
+- **Existing Patterns Preserved**:
+  - Transmission method selection pattern (radio buttons, conditional rendering)
+  - Factory method pattern for transport creation
+  - Config namespace pattern (`mail:*` keys)
+  - Unstated container state management
+  - S2S messaging for distributed config updates
+- **New Components Rationale**:
+  - **OAuth2Setting Component**: Maintains UI consistency with SMTP/SES; enables modular development
+  - **createOAuth2Client() Method**: Isolates OAuth 2.0 transport logic; follows existing factory pattern
+  - **Four Config Keys**: Minimal set for OAuth 2.0 (user, clientId, clientSecret, refreshToken)
+- **Steering Compliance**:
+  - Feature-based organization (mail service domain)
+  - Named exports throughout
+  - Type safety with explicit TypeScript interfaces
+  - Immutable config updates
+  - Security-first credential handling
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Frontend | React 18.x + TypeScript | OAuth2Setting UI component | Existing stack, no new dependencies |
+| Frontend | react-hook-form | Form validation and state | Existing dependency, consistent with SmtpSetting/SesSetting |
+| Backend | Node.js + TypeScript | MailService OAuth 2.0 integration | Existing runtime, no version changes |
+| Backend | nodemailer 6.x | OAuth 2.0 transport creation | Existing dependency with built-in OAuth 2.0 support |
+| Data | MongoDB | Config storage for OAuth 2.0 credentials | Existing database, new config keys only |
+| External | Google OAuth 2.0 API | Token refresh endpoint | Standard Google API, https://oauth2.googleapis.com/token |
+| External | Gmail API | Email transmission via OAuth 2.0 | Accessed via nodemailer Gmail transport |
+
+**Key Technology Decisions**:
+- **Nodemailer OAuth 2.0**: Built-in support eliminates need for additional OAuth 2.0 libraries; automatic token refresh reduces complexity
+- **No New Dependencies**: Feature fully implemented with existing packages; zero dependency risk
+- **MongoDB Encryption**: Credentials stored using existing ConfigManager encryption (same as SMTP passwords)
+- **Gmail Service Shortcut**: Nodemailer's `service: "gmail"` simplifies configuration and handles Gmail API specifics
+
+## System Flows
+
+### OAuth 2.0 Configuration Flow
+
+```mermaid
+sequenceDiagram
+    participant Admin as Administrator
+    participant UI as MailSetting UI
+    participant Container as AdminAppContainer
+    participant API as App Settings API
+    participant Config as ConfigManager
+    participant DB as MongoDB
+
+    Admin->>UI: Select "oauth2" transmission method
+    UI->>UI: Render OAuth2Setting component
+    Admin->>UI: Enter OAuth 2.0 credentials
+    Admin->>UI: Click Update button
+    UI->>Container: handleSubmit formData
+    Container->>API: POST app-settings
+    API->>API: Validate OAuth 2.0 fields
+    alt Validation fails
+        API-->>Container: 400 Bad Request
+        Container-->>UI: Display error toast
+    else Validation passes
+        API->>Config: setConfig mail:oauth2*
+        Config->>DB: Save encrypted credentials
+        DB-->>Config: Success
+        Config-->>API: Success
+        API-->>Container: 200 OK
+        Container-->>UI: Display success toast
+    end
+```
+
+### Email Sending with OAuth 2.0 Flow
+
+```mermaid
+sequenceDiagram
+    participant App as GROWI Application
+    participant Mail as MailService
+    participant Nodemailer as Nodemailer Transport
+    participant Google as Google OAuth 2.0 API
+    participant Gmail as Gmail API
+
+    App->>Mail: send emailConfig
+    Mail->>Mail: Check mailer setup
+    alt Mailer not setup
+        Mail-->>App: Error Mailer not set up
+    else Mailer setup oauth2
+        Mail->>Nodemailer: sendMail mailConfig
+        Nodemailer->>Nodemailer: Check access token validity
+        alt Access token expired
+            Nodemailer->>Google: POST token refresh
+            Google-->>Nodemailer: New access token
+            Nodemailer->>Nodemailer: Cache access token
+        end
+        Nodemailer->>Gmail: POST send message
+        alt Authentication failure
+            Gmail-->>Nodemailer: 401 Unauthorized
+            Nodemailer-->>Mail: Error Invalid credentials
+            Mail-->>App: Error with OAuth 2.0 details
+        else Success
+            Gmail-->>Nodemailer: 200 OK message ID
+            Nodemailer-->>Mail: Success
+            Mail->>Mail: Log transmission success
+            Mail-->>App: Email sent successfully
+        end
+    end
+```
+
+**Flow-Level Decisions**:
+- **Token Refresh**: Handled entirely by nodemailer; MailService does not implement custom refresh logic
+- **Error Handling**: OAuth 2.0 errors logged with specific Google API error codes for admin troubleshooting
+- **Credential Validation**: Performed at API layer before persisting to database; prevents invalid config states
+- **S2S Sync**: OAuth 2.0 config changes trigger `mailServiceUpdated` event for distributed deployments (existing pattern)
+
+## Requirements Traceability
+
+| Requirement | Summary | Components | Interfaces | Flows |
+|-------------|---------|------------|------------|-------|
+| 1.1 | Add OAuth 2.0 transmission method option | MailSetting.tsx, config-definition.ts | ConfigDefinition | Configuration |
+| 1.2 | Display OAuth 2.0 config fields when selected | OAuth2Setting.tsx, MailSetting.tsx | React Props | Configuration |
+| 1.3 | Validate email address format | AdminAppContainer, App Settings API | API Contract | Configuration |
+| 1.4 | Validate non-empty OAuth 2.0 credentials | AdminAppContainer, App Settings API | API Contract | Configuration |
+| 1.5 | Securely store OAuth 2.0 credentials with encryption | ConfigManager, MongoDB | Data Model | Configuration |
+| 1.6 | Confirm successful configuration save | AdminAppContainer, MailSetting.tsx | API Contract | Configuration |
+| 1.7 | Display descriptive error messages on save failure | AdminAppContainer, MailSetting.tsx | API Contract | Configuration |
+| 2.1 | Use nodemailer Gmail OAuth 2.0 transport | MailService.createOAuth2Client() | Service Interface | Email Sending |
+| 2.2 | Authenticate to Gmail API with OAuth 2.0 | MailService.createOAuth2Client() | External API | Email Sending |
+| 2.3 | Set FROM address to configured email | MailService.setupMailConfig() | Service Interface | Email Sending |
+| 2.4 | Log successful email transmission | MailService.send() | Service Interface | Email Sending |
+| 2.5 | Support all email content types | MailService.send() (existing) | Service Interface | Email Sending |
+| 2.6 | Process email queue sequentially | MailService.send() (existing) | Service Interface | Email Sending |
+| 3.1 | Use nodemailer automatic token refresh | Nodemailer OAuth 2.0 transport | External Library | Email Sending |
+| 3.2 | Request new access token with refresh token | Nodemailer OAuth 2.0 transport | External API | Email Sending |
+| 3.3 | Continue email sending after token refresh | Nodemailer OAuth 2.0 transport | External Library | Email Sending |
+| 3.4 | Log error and notify admin on refresh failure | MailService.send(), Error Handler | Service Interface | Email Sending |
+| 3.5 | Cache access tokens in memory | Nodemailer OAuth 2.0 transport | External Library | Email Sending |
+| 3.6 | Invalidate cached tokens on config update | MailService.initialize() | Service Interface | Configuration |
+| 4.1 | Display OAuth 2.0 form with consistent styling | OAuth2Setting.tsx | React Component | Configuration |
+| 4.2 | Preserve OAuth 2.0 credentials when switching methods | AdminAppContainer state | State Management | Configuration |
+| 4.3 | Provide field-level help text | OAuth2Setting.tsx | React Component | Configuration |
+| 4.4 | Mask sensitive fields (last 4 characters) | OAuth2Setting.tsx | React Component | Configuration |
+| 4.5 | Provide test email button | MailSetting.tsx | API Contract | Email Sending |
+| 4.6 | Display test email result with detailed errors | AdminAppContainer, MailSetting.tsx | API Contract | Email Sending |
+| 5.1 | Log specific OAuth 2.0 error codes | MailService error handler | Service Interface | Email Sending |
+| 5.2 | Retry email sending with exponential backoff | MailService.send() | Service Interface | Email Sending |
+| 5.3 | Store failed emails after all retries | MailService.send() | Service Interface | Email Sending |
+| 5.4 | Never log credentials in plain text | MailService, ConfigManager | Security Pattern | All flows |
+| 5.5 | Require admin authentication for config page | App Settings API | API Contract | Configuration |
+| 5.6 | Stop OAuth 2.0 sending when credentials deleted | MailService.initialize() | Service Interface | Email Sending |
+| 5.7 | Validate SSL/TLS for OAuth 2.0 endpoints | Nodemailer OAuth 2.0 transport | External Library | Email Sending |
+| 6.1 | Maintain backward compatibility with SMTP/SES | MailService, config-definition.ts | All Interfaces | All flows |
+| 6.2 | Use only active transmission method | MailService.initialize() | Service Interface | Email Sending |
+| 6.3 | Allow switching transmission methods without data loss | AdminAppContainer, ConfigManager | State Management | Configuration |
+| 6.4 | Display configuration error if no method set | MailService, MailSetting.tsx | Service Interface | Configuration |
+| 6.5 | Expose OAuth 2.0 status via admin API | App Settings API | API Contract | Configuration |
+
+## Components and Interfaces
+
+### Component Summary
+
+| Component | Domain/Layer | Intent | Req Coverage | Key Dependencies (P0/P1) | Contracts |
+|-----------|--------------|--------|--------------|--------------------------|-----------|
+| MailService | Server/Service | Email transmission with OAuth 2.0 support | 2.1-2.6, 3.1-3.6, 5.1-5.7, 6.2, 6.4 | ConfigManager (P0), Nodemailer (P0), S2SMessaging (P1) | Service |
+| OAuth2Setting | Client/UI | OAuth 2.0 credential input form | 1.2, 4.1, 4.3, 4.4 | AdminAppContainer (P0), react-hook-form (P0) | State |
+| AdminAppContainer | Client/State | State management for mail settings | 1.3, 1.4, 1.6, 1.7, 4.2, 6.3 | App Settings API (P0) | API |
+| ConfigManager | Server/Service | Persist OAuth 2.0 credentials | 1.5, 6.1, 6.3 | MongoDB (P0) | Service, State |
+| App Settings API | Server/API | Mail settings CRUD operations | 1.3-1.7, 4.5-4.6, 5.5, 6.5 | ConfigManager (P0), MailService (P1) | API |
+| Config Definition | Server/Config | OAuth 2.0 config schema | 1.1, 6.1 | None | State |
+
+### Server / Service Layer
+
+#### MailService
+
+| Field | Detail |
+|-------|--------|
+| Intent | Extend email transmission service with OAuth 2.0 support using Gmail API |
+| Requirements | 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 5.1, 5.2, 5.3, 5.4, 5.6, 5.7, 6.2, 6.4 |
+| Owner / Reviewers | Backend team |
+
+**Responsibilities & Constraints**
+- Create OAuth 2.0 nodemailer transport using Gmail service with credentials from ConfigManager
+- Handle OAuth 2.0 authentication failures and token refresh errors with specific error logging
+- Implement retry logic with exponential backoff (1s, 2s, 4s) for transient failures
+- Store failed emails after all retry attempts for manual review
+- Maintain single active transmission method (smtp, ses, or oauth2) per instance
+- Invalidate cached OAuth 2.0 tokens when configuration changes via S2S messaging
+
+**Dependencies**
+- Inbound: Crowi container — service initialization (P0)
+- Inbound: Application modules — email sending requests (P0)
+- Inbound: S2S Messaging — config update notifications (P1)
+- Outbound: ConfigManager — load OAuth 2.0 credentials (P0)
+- Outbound: Nodemailer — create transport and send emails (P0)
+- External: Google OAuth 2.0 API — token refresh (P0)
+- External: Gmail API — email transmission (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+interface MailServiceOAuth2Extension {
+  /**
+   * Create OAuth 2.0 nodemailer transport for Gmail
+   */
+  createOAuth2Client(option?: OAuth2TransportOptions): Transporter | null;
+
+  /**
+   * Send email with retry logic and error handling
+   */
+  sendWithRetry(config: EmailConfig, maxRetries?: number): Promise<SendResult>;
+
+  /**
+   * Store failed email for manual review
+   */
+  storeFailedEmail(config: EmailConfig, error: Error): Promise<void>;
+
+  /**
+   * Wait with exponential backoff
+   */
+  exponentialBackoff(attempt: number): Promise<void>;
+}
+
+interface OAuth2TransportOptions {
+  user: string;
+  clientId: string;
+  clientSecret: string;
+  refreshToken: string;
+}
+
+interface MailService {
+  send(config: EmailConfig): Promise<void>;
+  initialize(): void;
+  isMailerSetup: boolean;
+}
+
+interface EmailConfig {
+  to: string;
+  from?: string;
+  subject?: string;
+  template: string;
+  vars?: Record<string, unknown>;
+}
+
+interface SendResult {
+  messageId: string;
+  response: string;
+  envelope: {
+    from: string;
+    to: string[];
+  };
+}
+```
+
+- **Preconditions**:
+  - ConfigManager loaded with valid `mail:oauth2*` configuration values
+  - Nodemailer package version supports OAuth 2.0 (v6.x+)
+  - Google OAuth 2.0 refresh token has `https://mail.google.com/` scope
+
+- **Postconditions**:
+  - OAuth 2.0 transport created with automatic token refresh enabled
+  - `isMailerSetup` flag set to true when transport successfully created
+  - Failed transport creation returns null and logs error
+  - Successful email sends logged with messageId and recipient
+  - Failed emails stored after retry exhaustion
+
+- **Invariants**:
+  - Only one transmission method active at a time
+  - Credentials never logged in plain text
+  - Token refresh handled transparently by nodemailer
+  - Retry backoff: 1s, 2s, 4s
+
+
+#### ConfigManager
+
+| Field | Detail |
+|-------|--------|
+| Intent | Persist and retrieve OAuth 2.0 credentials with encryption |
+| Requirements | 1.5, 6.1, 6.3 |
+
+**Responsibilities & Constraints**
+- Store four new OAuth 2.0 config keys with encryption
+- Support transmission method value 'oauth2'
+- Maintain all SMTP and SES config values when OAuth 2.0 is configured
+
+**Dependencies**
+- Inbound: MailService, App Settings API (P0)
+- Outbound: MongoDB, Encryption Service (P0)
+
+**Contracts**: Service [x] / State [x]
+
+##### Service Interface
+
+```typescript
+interface ConfigManagerOAuth2Extension {
+  getConfig(key: 'mail:oauth2User'): string | undefined;
+  getConfig(key: 'mail:oauth2ClientId'): string | undefined;
+  getConfig(key: 'mail:oauth2ClientSecret'): string | undefined;
+  getConfig(key: 'mail:oauth2RefreshToken'): string | undefined;
+  getConfig(key: 'mail:transmissionMethod'): 'smtp' | 'ses' | 'oauth2' | undefined;
+
+  setConfig(key: 'mail:oauth2User', value: string): Promise<void>;
+  setConfig(key: 'mail:oauth2ClientId', value: string): Promise<void>;
+  setConfig(key: 'mail:oauth2ClientSecret', value: string): Promise<void>;
+  setConfig(key: 'mail:oauth2RefreshToken', value: string): Promise<void>;
+  setConfig(key: 'mail:transmissionMethod', value: 'smtp' | 'ses' | 'oauth2'): Promise<void>;
+}
+```
+
+##### State Management
+
+- **State Model**: OAuth 2.0 credentials stored as separate config documents in MongoDB
+- **Persistence**: Encrypted at write time; decrypted at read time
+- **Consistency**: Atomic writes per config key
+- **Concurrency**: Last-write-wins; S2S messaging for eventual consistency
+
+
+### Client / UI Layer
+
+#### OAuth2Setting Component
+
+| Field | Detail |
+|-------|--------|
+| Intent | Render OAuth 2.0 credential input form with help text and field masking |
+| Requirements | 1.2, 4.1, 4.3, 4.4 |
+
+**Responsibilities & Constraints**
+- Display four input fields with help text
+- Mask saved Client Secret and Refresh Token (show last 4 characters)
+- Follow SMTP/SES visual patterns
+- Use react-hook-form register
+
+**Dependencies**
+- Inbound: MailSetting component (P0)
+- Outbound: AdminAppContainer (P1)
+- External: react-hook-form (P0)
+
+**Contracts**: State [x]
+
+##### State Management
+
+```typescript
+interface OAuth2SettingProps {
+  register: UseFormRegister<MailSettingsFormData>;
+  adminAppContainer?: AdminAppContainer;
+}
+
+interface MailSettingsFormData {
+  fromAddress: string;
+  transmissionMethod: 'smtp' | 'ses' | 'oauth2';
+  smtpHost: string;
+  smtpPort: string;
+  smtpUser: string;
+  smtpPassword: string;
+  sesAccessKeyId: string;
+  sesSecretAccessKey: string;
+  oauth2User: string;
+  oauth2ClientId: string;
+  oauth2ClientSecret: string;
+  oauth2RefreshToken: string;
+}
+```
+
+
+#### AdminAppContainer (Extension)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Manage OAuth 2.0 credential state and API interactions |
+| Requirements | 1.3, 1.4, 1.6, 1.7, 4.2, 6.3 |
+
+**Responsibilities & Constraints**
+- Add four state properties and setter methods
+- Include OAuth 2.0 credentials in API payload
+- Validate email format before API call
+- Display success/error toasts
+
+**Dependencies**
+- Inbound: MailSetting, OAuth2Setting (P0)
+- Outbound: App Settings API (P0)
+
+**Contracts**: State [x] / API [x]
+
+##### State Management
+
+```typescript
+interface AdminAppContainerOAuth2State {
+  fromAddress?: string;
+  transmissionMethod?: 'smtp' | 'ses' | 'oauth2';
+  smtpHost?: string;
+  smtpPort?: string;
+  smtpUser?: string;
+  smtpPassword?: string;
+  sesAccessKeyId?: string;
+  sesSecretAccessKey?: string;
+  isMailerSetup: boolean;
+  oauth2User?: string;
+  oauth2ClientId?: string;
+  oauth2ClientSecret?: string;
+  oauth2RefreshToken?: string;
+}
+
+interface AdminAppContainerOAuth2Methods {
+  changeOAuth2User(oauth2User: string): Promise<void>;
+  changeOAuth2ClientId(oauth2ClientId: string): Promise<void>;
+  changeOAuth2ClientSecret(oauth2ClientSecret: string): Promise<void>;
+  changeOAuth2RefreshToken(oauth2RefreshToken: string): Promise<void>;
+  updateMailSettingHandler(): Promise<void>;
+}
+```
+
+
+### Server / API Layer
+
+#### App Settings API (Extension)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Handle OAuth 2.0 credential CRUD operations with validation |
+| Requirements | 1.3, 1.4, 1.5, 1.6, 1.7, 4.5, 4.6, 5.5, 6.5 |
+
+**Responsibilities & Constraints**
+- Accept OAuth 2.0 credentials in PUT request
+- Validate email format and non-empty credentials
+- Persist via ConfigManager
+- Trigger S2S messaging
+- Require admin authentication
+
+**Dependencies**
+- Inbound: AdminAppContainer (P0)
+- Outbound: ConfigManager, MailService, S2S Messaging (P0/P1)
+
+**Contracts**: API [x]
+
+##### API Contract
+
+| Method | Endpoint | Request | Response | Errors |
+|--------|----------|---------|----------|--------|
+| PUT | /api/v3/app-settings | UpdateMailSettingsRequest | AppSettingsResponse | 400, 401, 500 |
+| GET | /api/v3/app-settings | - | AppSettingsResponse | 401, 500 |
+| POST | /api/v3/mail/send-test | - | TestEmailResponse | 400, 401, 500 |
+
+**Request/Response Schemas**:
+
+```typescript
+interface UpdateMailSettingsRequest {
+  'mail:from'?: string;
+  'mail:transmissionMethod'?: 'smtp' | 'ses' | 'oauth2';
+  'mail:smtpHost'?: string;
+  'mail:smtpPort'?: string;
+  'mail:smtpUser'?: string;
+  'mail:smtpPassword'?: string;
+  'mail:sesAccessKeyId'?: string;
+  'mail:sesSecretAccessKey'?: string;
+  'mail:oauth2User'?: string;
+  'mail:oauth2ClientId'?: string;
+  'mail:oauth2ClientSecret'?: string;
+  'mail:oauth2RefreshToken'?: string;
+}
+
+interface AppSettingsResponse {
+  appSettings: {
+    'mail:from'?: string;
+    'mail:transmissionMethod'?: 'smtp' | 'ses' | 'oauth2';
+    'mail:smtpHost'?: string;
+    'mail:smtpPort'?: string;
+    'mail:smtpUser'?: string;
+    'mail:sesAccessKeyId'?: string;
+    'mail:oauth2User'?: string;
+    'mail:oauth2ClientId'?: string;
+  };
+  isMailerSetup: boolean;
+}
+
+interface TestEmailResponse {
+  success: boolean;
+  message?: string;
+  error?: {
+    code: string;
+    message: string;
+  };
+}
+```
+
+
+### Server / Config Layer
+
+#### Config Definition (Extension)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Define OAuth 2.0 configuration schema with type safety |
+| Requirements | 1.1, 6.1 |
+
+**Config Schema**:
+
+```typescript
+const CONFIG_KEYS = [
+  'mail:oauth2User',
+  'mail:oauth2ClientId',
+  'mail:oauth2ClientSecret',
+  'mail:oauth2RefreshToken',
+];
+
+'mail:transmissionMethod': defineConfig<'smtp' | 'ses' | 'oauth2' | undefined>({
+  defaultValue: undefined,
+}),
+
+'mail:oauth2User': defineConfig<string | undefined>({
+  defaultValue: undefined,
+}),
+'mail:oauth2ClientId': defineConfig<string | undefined>({
+  defaultValue: undefined,
+}),
+'mail:oauth2ClientSecret': defineConfig<string | undefined>({
+  defaultValue: undefined,
+  isSecret: true,
+}),
+'mail:oauth2RefreshToken': defineConfig<string | undefined>({
+  defaultValue: undefined,
+  isSecret: true,
+}),
+```
+
+## Data Models
+
+### Domain Model
+
+**Mail Configuration Aggregate**:
+- **Root Entity**: MailConfiguration
+- **Value Objects**: TransmissionMethod, OAuth2Credentials, SmtpCredentials, SesCredentials
+- **Business Rules**: Only one transmission method active; OAuth2Credentials complete when all fields present
+- **Invariants**: Credentials encrypted; FROM address required
+
+### Logical Data Model
+
+**Structure Definition**:
+- **Entity**: Config (MongoDB document)
+- **Attributes**: ns, key, value, createdAt, updatedAt
+- **Natural Keys**: ns field (unique)
+
+**Consistency & Integrity**:
+- **Transaction Boundaries**: Each config key saved independently
+- **Temporal Aspects**: updatedAt tracked per entry
+
+### Physical Data Model
+
+- Config documents stored in MongoDB with ns/key/value pattern
+- FailedEmail documents track failed email attempts with error context
+- **Encryption**: AES-256 for clientSecret and refreshToken via environment-provided key
+
+### Data Contracts & Integration
+
+**API Data Transfer**:
+- OAuth 2.0 credentials via JSON in PUT /api/v3/app-settings
+- Client Secret and Refresh Token never returned in GET responses
+
+**Cross-Service Data Management**:
+- S2S messaging broadcasts mailServiceUpdated event
+- Eventual consistency across instances
+
+
+## Critical Implementation Constraints
+
+### Nodemailer XOAuth2 Compatibility (CRITICAL)
+
+**Constraint**: OAuth 2.0 credential validation **must use falsy checks** (`!value`) not null checks (`value != null`) to match nodemailer's internal XOAuth2 handler behavior.
+
+**Rationale**: Nodemailer's XOAuth2.generateToken() method uses `!this.options.refreshToken` at line 184, which rejects empty strings as invalid. Using `!= null` checks in GROWI would allow empty strings through validation, causing runtime failures when nodemailer rejects them.
+
+**Implementation Pattern**:
+```typescript
+// ✅ CORRECT: Falsy check matches nodemailer behavior
+if (!clientId || !clientSecret || !refreshToken || !user) {
+  return null;
+}
+```
+
+**Impact**: Affects MailService.createOAuth2Client(), ConfigManager validation, and API validators. All OAuth 2.0 credential checks must follow this pattern.
+
+**Reference**: [mail.ts:219-226](../../../apps/app/src/server/service/mail.ts#L219-L226), [research.md](research.md#1-nodemailer-xoauth2-falsy-check-requirement)
+
+---
+
+### Credential Preservation Pattern (CRITICAL)
+
+**Constraint**: PUT requests updating OAuth 2.0 configuration **must only include secret fields (clientSecret, refreshToken) when non-empty values are provided**, preventing accidental credential overwrites.
+
+**Rationale**: Standard PUT pattern sending all form fields would overwrite secrets with empty strings when administrators update non-secret fields (from address, user email). GET endpoint returns `undefined` for secrets (not masked placeholders) to prevent re-submission of placeholder text.
+
+**Implementation Pattern**:
+```typescript
+// Build params with non-secret fields
+const params = {
+  'mail:oauth2ClientId': req.body.oauth2ClientId,
+  'mail:oauth2User': req.body.oauth2User,
+};
+
+// Only include secrets if non-empty
+if (req.body.oauth2ClientSecret) {
+  params['mail:oauth2ClientSecret'] = req.body.oauth2ClientSecret;
+}
+```
+
+**Impact**: Affects App Settings API PUT handler and any future API that updates OAuth 2.0 credentials.
+
+**Reference**: [apiv3/app-settings/index.ts:293-306](../../../apps/app/src/server/routes/apiv3/app-settings/index.ts#L293-L306), [research.md](research.md#3-credential-preservation-pattern)
+
+---
+
+### Gmail API FROM Address Behavior (LIMITATION)
+
+**Limitation**: Gmail API **rewrites FROM addresses to the authenticated account email** unless send-as aliases are configured in Google Workspace.
+
+**Example**:
+```
+Configured: mail:from = "notifications@example.com"
+Authenticated: oauth2User = "admin@company.com"
+Actual sent FROM: "admin@company.com"
+```
+
+**Workaround**: Google Workspace administrators must configure send-as aliases in Gmail Settings → Accounts and Import → Send mail as, then verify domain ownership.
+
+**Why This Happens**: Gmail API security policy prevents email spoofing by restricting FROM addresses to authenticated accounts or verified aliases.
+
+**Impact**: GROWI's `mail:from` configuration has limited effect with OAuth 2.0. Custom FROM addresses require Google Workspace configuration. This is expected Gmail behavior, not a GROWI limitation.
+
+**Reference**: [research.md](research.md#2-gmail-api-from-address-rewriting)
+
+---
+
+### OAuth 2.0 Retry Integration (DESIGN DECISION)
+
+**Decision**: OAuth 2.0 transmission uses `sendWithRetry()` with exponential backoff (1s, 2s, 4s), while SMTP/SES use direct `sendMail()` without retries.
+
+**Rationale**: OAuth 2.0 token refresh can fail transiently due to network issues or Google API rate limiting. Exponential backoff provides resilience without overwhelming the API.
+
+**Implementation**:
+```typescript
+if (transmissionMethod === 'oauth2') {
+  return this.sendWithRetry(mailConfig);
+}
+return this.mailer.sendMail(mailConfig);
+```
+
+**Impact**: OAuth 2.0 email failures are automatically retried, improving reliability for production deployments.
+
+**Reference**: [mail.ts:392-400](../../../apps/app/src/server/service/mail.ts#L392-L400)

+ 57 - 0
.kiro/specs/oauth2-email-support/requirements.md

@@ -0,0 +1,57 @@
+# Requirements Document
+
+## Project Description (Input)
+OAuth 2.0 authentication で Google Workspace を利用し email を送信する機能を追加したい
+
+### Context from User
+This implementation adds OAuth 2.0 authentication support for sending emails using Google Workspace accounts. The feature is fully integrated into the admin settings UI and follows the existing patterns for SMTP and SES configuration.
+
+Key configuration parameters:
+- Email Address: The authorized Google account email
+- Client ID: OAuth 2.0 Client ID from Google Cloud Console
+- Client Secret: OAuth 2.0 Client Secret
+- Refresh Token: OAuth 2.0 Refresh Token obtained from authorization flow
+
+The implementation uses nodemailer's built-in Gmail OAuth 2.0 support, which handles token refresh automatically.
+
+## Introduction
+
+This specification defines the requirements for adding OAuth 2.0 authentication support for email transmission using Google Workspace accounts in GROWI. The feature enables administrators to configure email sending through Google's Gmail API using OAuth 2.0 credentials instead of traditional SMTP authentication. This provides enhanced security through token-based authentication and follows Google's recommended practices for application email integration.
+
+## Requirements
+
+### Requirement 1: OAuth 2.0 Configuration Management
+
+**Objective:** As a GROWI administrator, I want to configure OAuth 2.0 credentials for Google Workspace email sending, so that the system can securely send emails without using SMTP passwords.
+
+**Summary**: The Admin Settings UI provides OAuth 2.0 as a transmission method option alongside SMTP and SES. The configuration form includes fields for Email Address, Client ID, Client Secret, and Refresh Token. All fields are validated (email format, non-empty strings using falsy checks), and secrets are encrypted before database storage. Configuration updates preserve existing secrets when empty values are submitted, preventing accidental credential overwrites. Success and error feedback is displayed to administrators.
+
+### Requirement 2: Email Sending Functionality
+
+**Objective:** As a GROWI system, I want to send emails using OAuth 2.0 authenticated Google Workspace accounts, so that notifications and system emails can be delivered securely without SMTP credentials.
+
+**Summary**: The Email Service uses nodemailer with Gmail OAuth 2.0 transport for email sending when OAuth 2.0 is configured. Authentication to Gmail API is automatic using configured credentials. The service supports all email content types (plain text, HTML, attachments, standard headers). Successful transmissions are logged with timestamp and recipient information. OAuth 2.0 sends use retry logic with exponential backoff (1s, 2s, 4s) to handle transient failures. Note: Gmail API rewrites FROM address to the authenticated account unless send-as aliases are configured in Google Workspace.
+
+### Requirement 3: Token Management
+
+**Objective:** As a GROWI system, I want to automatically manage OAuth 2.0 access token lifecycle, so that email sending continues without manual intervention when tokens expire.
+
+**Summary**: Token refresh is handled automatically by nodemailer's built-in OAuth 2.0 support. Access tokens are cached in memory and reused until expiration. When refresh tokens are used, nodemailer requests new access tokens from Google's OAuth 2.0 endpoint transparently. Token refresh failures are logged with specific error codes for troubleshooting. When OAuth 2.0 configuration is updated, cached tokens are invalidated via service reinitialization triggered by S2S messaging.
+
+### Requirement 4: Admin UI Integration
+
+**Objective:** As a GROWI administrator, I want OAuth 2.0 email configuration to follow the same UI patterns as SMTP and SES, so that I can configure it consistently with existing mail settings.
+
+**Summary**: The Mail Settings page displays OAuth 2.0 configuration form with consistent visual styling, preserves credentials when switching transmission methods, and shows configuration status. Browser autofill is prevented for secret fields, and placeholder text indicates that blank fields will preserve existing values.
+
+### Requirement 5: Error Handling and Security
+
+**Objective:** As a GROWI administrator, I want clear error messages and secure credential handling, so that I can troubleshoot configuration issues and ensure credentials are protected.
+
+**Summary**: Authentication failures are logged with specific OAuth 2.0 error codes from Google's API for troubleshooting. Email sending failures trigger automatic retry with exponential backoff (3 attempts: 1s, 2s, 4s). Failed emails after retry exhaustion are stored in the database for manual review. Credentials are never logged in plain text (Client ID masked to last 4 characters). Admin authentication is required to access configuration. SSL/TLS validation is enforced by nodemailer. When OAuth 2.0 credentials are incomplete or deleted, the Email Service stops sending and displays configuration errors via isMailerSetup flag.
+
+### Requirement 6: Migration and Compatibility
+
+**Objective:** As a GROWI system, I want OAuth 2.0 email support to coexist with existing SMTP and SES configurations, so that administrators can choose the most appropriate transmission method for their deployment.
+
+**Summary**: OAuth 2.0 is added as a third transmission method option without breaking changes to existing SMTP and SES functionality. Only the active transmission method is used for sending emails. Administrators can switch between methods without data loss (credentials for all methods are preserved). Configuration errors are displayed when no transmission method is properly configured (via isMailerSetup flag). OAuth 2.0 configuration status is exposed through existing admin API endpoints following the same pattern as SMTP/SES.

+ 449 - 0
.kiro/specs/oauth2-email-support/research.md

@@ -0,0 +1,449 @@
+# Research & Design Decisions
+
+---
+**Purpose**: Capture discovery findings, architectural investigations, and rationale that inform the technical design for OAuth 2.0 email support.
+
+**Usage**:
+- Log research activities and outcomes during the discovery phase.
+- Document design decision trade-offs that are too detailed for `design.md`.
+- Provide references and evidence for future audits or reuse.
+---
+
+## Summary
+- **Feature**: `oauth2-email-support`
+- **Discovery Scope**: Extension (integrating OAuth2 into existing mail service architecture)
+- **Key Findings**:
+  - Existing mail service supports SMTP and SES via transmission method pattern
+  - Nodemailer has built-in OAuth2 support for Gmail with automatic token refresh
+  - Admin UI follows modular pattern with separate setting components per transmission method
+  - Config management uses `mail:*` namespace with type-safe definitions
+
+## Research Log
+
+### Existing Mail Service Architecture
+
+- **Context**: Need to understand integration points for OAuth2 support
+- **Sources Consulted**:
+  - `apps/app/src/server/service/mail.ts` (MailService implementation)
+  - `apps/app/src/client/components/Admin/App/MailSetting.tsx` (Admin UI)
+  - `apps/app/src/server/service/config-manager/config-definition.ts` (Config schema)
+- **Findings**:
+  - MailService uses factory pattern: `createSMTPClient()`, `createSESClient()`
+  - Transmission method determined by `mail:transmissionMethod` config value ('smtp' | 'ses')
+  - `initialize()` method called on service startup and S2S message updates
+  - Nodemailer transporter created based on transmission method
+  - Admin UI uses conditional rendering for SMTP vs SES settings
+  - State management via AdminAppContainer (unstated pattern)
+  - Test email functionality exists for SMTP only
+- **Implications**:
+  - OAuth2 follows same pattern: add `createOAuth2Client()` method
+  - Extend `mail:transmissionMethod` type to `'smtp' | 'ses' | 'oauth2'`
+  - Create new `OAuth2Setting.tsx` component following SMTP/SES pattern
+  - Add OAuth2-specific config keys following `mail:*` namespace
+
+### Nodemailer OAuth2 Integration
+
+- **Context**: Verify OAuth2 support in nodemailer and configuration requirements
+- **Sources Consulted**:
+  - [OAuth2 | Nodemailer](https://nodemailer.com/smtp/oauth2)
+  - [Using Gmail | Nodemailer](https://nodemailer.com/usage/using-gmail)
+  - [Sending Emails Securely Using Node.js, Nodemailer, SMTP, Gmail, and OAuth2](https://dev.to/chandrapantachhetri/sending-emails-securely-using-node-js-nodemailer-smtp-gmail-and-oauth2-g3a)
+  - Web search: "nodemailer gmail oauth2 configuration 2026"
+- **Findings**:
+  - Nodemailer has first-class OAuth2 support with type `'OAuth2'`
+  - Configuration structure:
+    ```javascript
+    {
+      service: "gmail",
+      auth: {
+        type: "OAuth2",
+        user: "user@gmail.com",
+        clientId: process.env.GOOGLE_CLIENT_ID,
+        clientSecret: process.env.GOOGLE_CLIENT_SECRET,
+        refreshToken: process.env.GOOGLE_REFRESH_TOKEN
+      }
+    }
+    ```
+  - Automatic access token refresh handled by nodemailer
+  - Requires `https://mail.google.com/` OAuth scope
+  - Gmail service shortcut available (simplifies configuration)
+  - Production consideration: Gmail designed for individual users, not automated services
+- **Implications**:
+  - No additional dependencies needed (nodemailer already installed)
+  - Four config values required: user email, clientId, clientSecret, refreshToken
+  - Token refresh is automatic - no manual refresh logic needed
+  - Should validate credentials before saving to config
+  - Security: clientSecret and refreshToken must be encrypted in database
+
+### Config Manager Pattern Analysis
+
+- **Context**: Understand how to add new config keys for OAuth2 credentials
+- **Sources Consulted**:
+  - `apps/app/src/server/service/config-manager/config-definition.ts`
+  - Existing mail config keys: `mail:from`, `mail:transmissionMethod`, `mail:smtpHost`, etc.
+- **Findings**:
+  - Config keys use namespace pattern: `mail:*`
+  - Type-safe definitions using `defineConfig<T>()`
+  - Existing transmission method: `defineConfig<'smtp' | 'ses' | undefined>()`
+  - Config values stored in database via ConfigManager
+  - No explicit encryption layer visible in config definition (handled elsewhere)
+- **Implications**:
+  - Add four new keys: `mail:oauth2User`, `mail:oauth2ClientId`, `mail:oauth2ClientSecret`, `mail:oauth2RefreshToken`
+  - Update `mail:transmissionMethod` type to `'smtp' | 'ses' | 'oauth2' | undefined`
+  - Encryption should be handled at persistence layer (ConfigManager or database model)
+  - Follow same pattern as SMTP/SES for consistency
+
+### Admin UI State Management Pattern
+
+- **Context**: Understand how to integrate OAuth2 settings into admin UI
+- **Sources Consulted**:
+  - `apps/app/src/client/components/Admin/App/SmtpSetting.tsx`
+  - `apps/app/src/client/components/Admin/App/SesSetting.tsx`
+  - `apps/app/src/client/services/AdminAppContainer.js`
+- **Findings**:
+  - Separate component per transmission method (SmtpSetting, SesSetting)
+  - Components receive `register` from react-hook-form
+  - Unstated container pattern for state management
+  - Container methods: `changeSmtpHost()`, `changeFromAddress()`, etc.
+  - `updateMailSettingHandler()` saves all settings via API
+  - Test email button only shown for SMTP
+- **Implications**:
+  - Create `OAuth2Setting.tsx` component following same structure
+  - Add four state methods to AdminAppContainer: `changeOAuth2User()`, `changeOAuth2ClientId()`, etc.
+  - Include OAuth2 credentials in `updateMailSettingHandler()` API call
+  - Test email functionality should work for OAuth2 (same as SMTP)
+  - Field masking needed for clientSecret and refreshToken
+
+### Security Considerations
+
+- **Context**: Ensure secure handling of OAuth2 credentials
+- **Sources Consulted**:
+  - GROWI security guidelines (`.claude/rules/security.md`)
+  - Existing SMTP/SES credential handling
+- **Findings**:
+  - Credentials stored in MongoDB via ConfigManager
+  - Input fields use `type="password"` for sensitive values
+  - No explicit encryption visible in UI layer
+  - Logging should not expose credentials
+- **Implications**:
+  - Use `type="password"` for clientSecret and refreshToken fields
+  - Mask values when displaying saved configuration (show last 4 characters)
+  - Never log credentials in plain text
+  - Validate SSL/TLS when connecting to Google OAuth endpoints
+  - Ensure admin authentication required before accessing config page
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| Factory Method Extension | Add `createOAuth2Client()` to existing MailService | Follows existing pattern, minimal changes, consistent with SMTP/SES | None significant | Recommended - aligns with current architecture |
+| Separate OAuth2Service | Create dedicated service for OAuth2 mail | Better separation of concerns | Over-engineering for simple extension, breaks existing pattern | Not recommended - unnecessary complexity |
+| Adapter Pattern | Wrap OAuth2 in adapter implementing mail interface | More flexible for future auth methods | Premature abstraction, more code to maintain | Not needed for single OAuth2 implementation |
+
+## Design Decisions
+
+### Decision: Extend Existing MailService with OAuth2 Support
+
+- **Context**: Need to add OAuth2 email sending without breaking existing SMTP/SES functionality
+- **Alternatives Considered**:
+  1. Create separate OAuth2MailService - more modular but introduces service management complexity
+  2. Refactor to plugin architecture - future-proof but over-engineered for current needs
+  3. Extend existing MailService with factory method - follows current pattern
+- **Selected Approach**: Extend existing MailService with `createOAuth2Client()` method
+- **Rationale**:
+  - Maintains consistency with existing architecture
+  - Minimal code changes reduce risk
+  - Clear migration path (no breaking changes)
+  - GROWI already uses this pattern successfully for SMTP/SES
+- **Trade-offs**:
+  - Benefits: Low risk, fast implementation, familiar pattern
+  - Compromises: All transmission methods in single service (acceptable given simplicity)
+- **Follow-up**: Ensure test coverage for OAuth2 path alongside existing SMTP/SES tests
+
+### Decision: Use Nodemailer's Built-in OAuth2 Support
+
+- **Context**: Need reliable OAuth2 implementation with automatic token refresh
+- **Alternatives Considered**:
+  1. Manual OAuth2 implementation with googleapis library - more control but complex
+  2. Third-party OAuth2 wrapper - additional dependency
+  3. Nodemailer built-in OAuth2 - zero additional dependencies
+- **Selected Approach**: Use nodemailer's native OAuth2 support with Gmail service
+- **Rationale**:
+  - No additional dependencies (nodemailer already installed)
+  - Automatic token refresh reduces complexity
+  - Well-documented and actively maintained
+  - Matches user's original plan (stated in requirements)
+- **Trade-offs**:
+  - Benefits: Simple, reliable, no new dependencies
+  - Compromises: Limited to Gmail/Google Workspace (acceptable per requirements)
+- **Follow-up**: Document Google Cloud Console setup steps for administrators
+
+### Decision: Preserve Existing Transmission Method Pattern
+
+- **Context**: Maintain backward compatibility while adding OAuth2 option
+- **Alternatives Considered**:
+  1. Deprecate transmission method concept - breaking change
+  2. Add OAuth2 as transmission method option - extends existing pattern
+  3. Support multiple simultaneous methods - unnecessary complexity
+- **Selected Approach**: Add 'oauth2' as third transmission method option
+- **Rationale**:
+  - Zero breaking changes for existing users
+  - Consistent admin UI experience
+  - Clear mutual exclusivity (one method active at a time)
+  - Easy to test and validate
+- **Trade-offs**:
+  - Benefits: Backward compatible, simple mental model
+  - Compromises: Only one transmission method active (acceptable per requirements)
+- **Follow-up**: Ensure switching between methods preserves all config values
+
+### Decision: Component-Based UI Following SMTP/SES Pattern
+
+- **Context**: Need consistent admin UI for OAuth2 configuration
+- **Alternatives Considered**:
+  1. Inline OAuth2 fields in main form - cluttered UI
+  2. Modal dialog for OAuth2 setup - breaks existing pattern
+  3. Separate OAuth2Setting component - matches SMTP/SES pattern
+- **Selected Approach**: Create `OAuth2Setting.tsx` component rendered conditionally
+- **Rationale**:
+  - Maintains visual consistency across transmission methods
+  - Reuses existing form patterns (react-hook-form, unstated)
+  - Easy for admins familiar with SMTP/SES setup
+  - Supports incremental development (component isolation)
+- **Trade-offs**:
+  - Benefits: Consistent UX, modular code, easy testing
+  - Compromises: Minor code duplication in form field rendering (acceptable)
+- **Follow-up**: Add help text for each OAuth2 field explaining Google Cloud Console setup
+
+## Risks & Mitigations
+
+- **Risk**: OAuth2 credentials stored in plain text in database
+  - **Mitigation**: Implement encryption at ConfigManager persistence layer; use same encryption as SMTP passwords
+
+- **Risk**: Refresh token expiration or revocation not handled
+  - **Mitigation**: Nodemailer handles refresh automatically; log specific error codes for troubleshooting; document token refresh in admin help text
+
+- **Risk**: Google rate limiting or account suspension
+  - **Mitigation**: Document production usage considerations; implement exponential backoff retry logic; log detailed error responses from Gmail API
+
+- **Risk**: Incomplete credential configuration causing service failure
+  - **Mitigation**: Validate all four required fields before saving; display clear error messages; maintain isMailerSetup flag for health checks
+
+- **Risk**: Breaking changes to existing SMTP/SES functionality
+  - **Mitigation**: Preserve all existing code paths; add OAuth2 as isolated branch; comprehensive integration tests for all three methods
+
+## Session 2: Production Implementation Discoveries (2026-02-10)
+
+### Critical Technical Constraints Identified
+
+#### 1. Nodemailer XOAuth2 Falsy Check Requirement
+
+**Discovery**: Production testing revealed "Can't create new access token for user" errors from nodemailer's XOAuth2 handler.
+
+**Root Cause**: Nodemailer's XOAuth2 implementation uses **falsy checks** (`!this.options.refreshToken`) at line 184, not null checks, rejecting empty strings as invalid credentials.
+
+**Implementation Requirement**:
+```typescript
+// ❌ WRONG: Allows empty strings to pass validation
+if (clientId != null && clientSecret != null && refreshToken != null) {
+  // This passes validation but nodemailer will reject it
+}
+
+// ✅ CORRECT: Matches nodemailer's falsy check behavior
+if (!clientId || !clientSecret || !refreshToken || !user) {
+  logger.warn('OAuth 2.0 credentials incomplete, skipping transport creation');
+  return null;
+}
+```
+
+**Why This Matters**: Empty strings (`""`) are falsy in JavaScript. Using `!= null` in GROWI would allow empty strings through validation, but nodemailer's falsy check would then reject them, causing runtime failures.
+
+**Impact**: All credential validation logic in MailService and ConfigManager **must use falsy checks** for OAuth 2.0 credentials to maintain compatibility with nodemailer.
+
+**Reference**: [mail.ts:219-226](../../../apps/app/src/server/service/mail.ts#L219-L226)
+
+---
+
+#### 2. Gmail API FROM Address Rewriting
+
+**Discovery**: Gmail API rewrites the FROM address to the authenticated account email, ignoring GROWI's configured `mail:from` address.
+
+**Gmail API Behavior**: Gmail API enforces that emails are sent FROM the authenticated account unless send-as aliases are explicitly configured in Google Workspace.
+
+**Example**:
+```
+Configured: mail:from = "notifications@example.com"
+Authenticated: oauth2User = "admin@company.com"
+Actual sent FROM: "admin@company.com"
+```
+
+**Workaround**: Google Workspace administrators must configure **send-as aliases**:
+1. Gmail Settings → Accounts and Import → Send mail as
+2. Add desired FROM address as an alias
+3. Verify domain ownership
+
+**Why This Happens**: Gmail API security policy prevents email spoofing by restricting FROM addresses to authenticated account or verified aliases.
+
+**Impact**:
+- GROWI's `mail:from` configuration has **limited effect** with OAuth 2.0
+- Custom FROM addresses require Google Workspace send-as alias configuration
+- This is **expected Gmail behavior**, not a GROWI limitation
+
+**Documentation Note**: This behavior must be documented in admin UI help text and user guides.
+
+---
+
+#### 3. Credential Preservation Pattern
+
+**Discovery**: Initial implementation allowed secret credentials to be accidentally overwritten with empty strings or masked placeholder values when updating non-secret fields.
+
+**Problem**: Standard PUT request pattern sending all form fields would overwrite secrets with empty values when administrators only wanted to update non-secret fields like `from` address or `oauth2User`.
+
+**Solution**: Conditional secret inclusion pattern:
+
+```typescript
+// Build request params with non-secret fields
+const requestOAuth2SettingParams: Record<string, any> = {
+  'mail:from': req.body.fromAddress,
+  'mail:transmissionMethod': req.body.transmissionMethod,
+  'mail:oauth2ClientId': req.body.oauth2ClientId,
+  'mail:oauth2User': req.body.oauth2User,
+};
+
+// Only include secrets if non-empty values provided
+if (req.body.oauth2ClientSecret) {
+  requestOAuth2SettingParams['mail:oauth2ClientSecret'] = req.body.oauth2ClientSecret;
+}
+if (req.body.oauth2RefreshToken) {
+  requestOAuth2SettingParams['mail:oauth2RefreshToken'] = req.body.oauth2RefreshToken;
+}
+```
+
+**Frontend Consideration**: GET endpoint returns `undefined` for secrets (not masked values) to prevent accidental re-submission:
+
+```typescript
+// ❌ WRONG: Returns masked value that could be saved back
+oauth2ClientSecret: '(set)',
+
+// ✅ CORRECT: Returns undefined, frontend shows placeholder
+oauth2ClientSecret: undefined,
+```
+
+**Why This Pattern**: Allows administrators to update non-secret OAuth 2.0 settings without re-entering sensitive credentials every time.
+
+**Impact**: This pattern must be followed for **any API that updates OAuth 2.0 credentials** to prevent accidental secret overwrites.
+
+**Reference**:
+- PUT handler: [apiv3/app-settings/index.ts:293-306](../../../apps/app/src/server/routes/apiv3/app-settings/index.ts#L293-L306)
+- GET response: [apiv3/app-settings/index.ts:273-276](../../../apps/app/src/server/routes/apiv3/app-settings/index.ts#L273-L276)
+
+---
+
+### Type Safety Enhancements
+
+**NonBlankString Type**: OAuth 2.0 config definitions use `NonBlankString | undefined` for compile-time protection against empty string assignments:
+
+```typescript
+'mail:oauth2ClientSecret': defineConfig<NonBlankString | undefined>({
+  defaultValue: undefined,
+  isSecret: true,
+}),
+```
+
+This provides **compile-time protection** complementing runtime falsy checks.
+
+---
+
+### Integration Pattern Discovered
+
+**OAuth 2.0 Retry Logic**: OAuth 2.0 requires retry logic with exponential backoff due to potential token refresh failures:
+
+```typescript
+// OAuth 2.0 uses sendWithRetry() for automatic retry
+if (transmissionMethod === 'oauth2') {
+  return this.sendWithRetry(mailConfig as EmailConfig);
+}
+
+// SMTP/SES use direct sendMail()
+return this.mailer.sendMail(mailConfig);
+```
+
+**Rationale**: OAuth 2.0 token refresh can fail transiently due to network issues or Google API rate limiting. Exponential backoff (1s, 2s, 4s) provides resilience.
+
+---
+
+## Session 3: Post-Refactoring Architecture (2026-02-10)
+
+### MailService Modular Structure
+
+The MailService was refactored from a single monolithic file (`mail.ts`, ~408 lines) into a feature-based directory structure with separate transport modules. This is the current production architecture.
+
+#### Directory Structure
+
+```
+src/server/service/mail/
+├── index.ts              # Barrel export (default: MailService, backward-compatible)
+├── mail.ts               # MailService class (orchestration, S2S, retry logic)
+├── mail.spec.ts          # MailService tests
+├── smtp.ts               # SMTP transport factory: createSMTPClient()
+├── smtp.spec.ts          # SMTP transport tests
+├── ses.ts                # SES transport factory: createSESClient()
+├── ses.spec.ts           # SES transport tests
+├── oauth2.ts             # OAuth2 transport factory: createOAuth2Client()
+├── oauth2.spec.ts        # OAuth2 transport tests
+└── types.ts              # Shared types (StrictOAuth2Options, MailConfig, etc.)
+```
+
+#### Transport Factory Pattern
+
+Each transport module exports a factory function with a consistent signature:
+
+```typescript
+export function create[Transport]Client(
+  configManager: IConfigManagerForApp,
+  option?: TransportOptions
+): Transporter | null;
+```
+
+- Returns `null` if required credentials are missing (logs warning)
+- MailService delegates transport creation based on `mail:transmissionMethod` config
+
+#### StrictOAuth2Options Type
+
+Defined in `types.ts`, this branded type prevents empty string credentials at compile time:
+
+```typescript
+import type { NonBlankString } from '@growi/core/dist/interfaces';
+
+export type StrictOAuth2Options = {
+  service: 'gmail';
+  auth: {
+    type: 'OAuth2';
+    user: NonBlankString;
+    clientId: NonBlankString;
+    clientSecret: NonBlankString;
+    refreshToken: NonBlankString;
+  };
+};
+```
+
+This is stricter than nodemailer's default `XOAuth2.Options` which allows `string | undefined`. The branded type ensures compile-time validation complementing runtime falsy checks.
+
+#### Backward Compatibility
+
+The barrel export at `mail/index.ts` maintains the existing import pattern:
+```typescript
+import MailService from '~/server/service/mail';  // Still works
+```
+
+**Source**: Migrated from `.kiro/specs/refactor-mailer-service/` (spec deleted after implementation completion).
+
+---
+
+## References
+
+- [OAuth2 | Nodemailer](https://nodemailer.com/smtp/oauth2) - Official OAuth2 configuration documentation
+- [Using Gmail | Nodemailer](https://nodemailer.com/usage/using-gmail) - Gmail-specific integration guide
+- [Sending Emails Securely Using Node.js, Nodemailer, SMTP, Gmail, and OAuth2](https://dev.to/chandrapantachhetri/sending-emails-securely-using-node-js-nodemailer-smtp-gmail-and-oauth2-g3a) - Implementation tutorial
+- [Using OAuth2 with Nodemailer for Secure Email Sending](https://shazaali.substack.com/p/using-oauth2-with-nodemailer-for) - Security best practices
+- Internal: `apps/app/src/server/service/mail.ts` - Existing mail service implementation
+- Internal: `apps/app/src/client/components/Admin/App/MailSetting.tsx` - Admin UI patterns

+ 23 - 0
.kiro/specs/oauth2-email-support/spec.json

@@ -0,0 +1,23 @@
+{
+  "feature_name": "oauth2-email-support",
+  "created_at": "2026-02-06T11:43:56Z",
+  "updated_at": "2026-02-13T00:00:00Z",
+  "language": "en",
+  "phase": "implementation-complete",
+  "cleanup_completed": true,
+  "approvals": {
+    "requirements": {
+      "generated": true,
+      "approved": true
+    },
+    "design": {
+      "generated": true,
+      "approved": true
+    },
+    "tasks": {
+      "generated": true,
+      "approved": true
+    }
+  },
+  "ready_for_implementation": true
+}

+ 41 - 0
.kiro/specs/oauth2-email-support/tasks.md

@@ -0,0 +1,41 @@
+# Implementation Tasks - OAuth 2.0 Email Support
+
+## Status Overview
+
+**Final Status**: Production-Ready (2026-02-10)
+**Requirements Coverage**: 35/37 (95%)
+
+## Completed Tasks
+
+### Phase A: Critical Production Requirements (3 tasks)
+
+- [x] 1. Retry logic with exponential backoff (1s, 2s, 4s) - Req: 5.1, 5.2
+- [x] 2. Failed email storage after retry exhaustion - Req: 5.3
+- [x] 3. Enhanced OAuth 2.0 error logging - Req: 5.4, 5.7
+
+Session 2 additional fixes:
+- Credential validation changed to falsy check (nodemailer XOAuth2 compatibility)
+- PUT handler preserves secrets when empty values submitted
+- Config types changed to `NonBlankString | undefined`
+- GET response returns `undefined` for secrets
+- Browser autofill prevention (`autoComplete="new-password"`)
+- Static IDs replaced with `useId()` hook (Biome lint compliance)
+
+### Baseline Implementation (12 tasks)
+
+- [x] Configuration schema (4 config keys, encryption, NonBlankString types) - Req: 1.1, 1.5, 6.1
+- [x] OAuth 2.0 transport creation (nodemailer Gmail service) - Req: 2.1, 2.2, 3.1-3.3, 3.5, 6.2
+- [x] Service initialization and token management (S2S integration) - Req: 2.3, 2.5, 2.6, 3.6, 5.6, 6.2, 6.4
+- [x] API validation and persistence (PUT/GET endpoints) - Req: 1.3, 1.4, 1.5, 1.6, 5.5, 6.5
+- [x] Field-specific validation error messages - Req: 1.7
+- [x] OAuth2Setting UI component (react-hook-form integration) - Req: 1.2, 4.1
+- [x] AdminAppContainer state management (4 state properties) - Req: 4.2, 6.3
+- [x] Mail settings form submission - Req: 1.3, 1.6, 1.7
+- [x] Transmission method selection ('oauth2' option) - Req: 1.1, 1.2
+- [x] Multi-language translations (en, ja, fr, ko, zh) - Req: 1.2, 4.1, 4.3
+
+## Not Implemented (Optional Enhancements)
+
+- Help text for 2 of 4 fields incomplete (Req 4.3)
+- Credential field masking in UI (Req 4.4)
+- Test email button for OAuth 2.0 (Req 4.5)

+ 34 - 0
.kiro/steering/product.md

@@ -0,0 +1,34 @@
+# Product Overview
+
+GROWI is a team collaboration wiki platform using Markdown, designed to help teams document, share, and organize knowledge effectively.
+
+## Core Capabilities
+
+1. **Hierarchical Wiki Pages**: Tree-structured page organization with path-based navigation (`/path/to/page`)
+2. **Markdown-First Editing**: Rich Markdown support with extensions (drawio, lsx, math) and real-time collaborative editing
+3. **Authentication Integrations**: Multiple auth methods (LDAP, SAML, OAuth, Passkey) for enterprise environments
+4. **Plugin System**: Extensible architecture via `@growi/pluginkit` for custom remark plugins and functionality
+5. **Multi-Service Architecture**: Modular services (PDF export, Slack integration) deployed independently
+
+## Target Use Cases
+
+- **Team Documentation**: Technical documentation, meeting notes, project wikis
+- **Knowledge Management**: Searchable, organized information repository
+- **Enterprise Deployment**: Self-hosted wiki with SSO/LDAP integration
+- **Developer Teams**: Markdown-native, Git-friendly documentation workflow
+
+## Value Proposition
+
+- **Open Source**: MIT licensed, self-hostable, community-driven
+- **Markdown Native**: First-class Markdown support with powerful extensions
+- **Hierarchical Organization**: Intuitive path-based page structure (unlike flat wikis)
+- **Enterprise Ready**: Authentication integrations, access control, scalability
+- **Extensible**: Plugin system for customization without forking
+
+## Deployment Models
+
+- **Self-Hosted**: Docker, Kubernetes, or bare metal deployment
+- **Microservices**: Optional services (pdf-converter, slackbot-proxy) for enhanced functionality
+
+---
+_Focus on patterns and purpose, not exhaustive feature lists_

+ 8 - 0
.kiro/steering/structure.md

@@ -0,0 +1,8 @@
+# Project Structure
+
+See: `.claude/skills/monorepo-overview/SKILL.md` (auto-loaded by Claude Code)
+
+## cc-sdd Specific Notes
+
+Currently, there are no additional instructions specific to Kiro.
+If instructions specific to the cc-sdd workflow are needed in the future, add them to this section.

+ 8 - 0
.kiro/steering/tdd.md

@@ -0,0 +1,8 @@
+# Test-Driven Development
+
+See: `.claude/commands/tdd.md`, `.claude/skills/learned/essential-test-patterns/SKILL.md` and `.claude/skills/learned/essential-test-design/SKILL.md`
+
+## cc-sdd Specific Notes
+
+Currently, there are no additional instructions specific to Kiro.
+If instructions specific to the cc-sdd workflow are needed in the future, add them to this section.

+ 8 - 0
.kiro/steering/tech.md

@@ -0,0 +1,8 @@
+# Technology Stack
+
+See: `.claude/skills/tech-stack/SKILL.md` (auto-loaded by Claude Code)
+
+## cc-sdd Specific Notes
+
+Currently, there are no additional instructions specific to Kiro.
+If instructions specific to the cc-sdd workflow are needed in the future, add them to this section.

+ 1 - 20
.mcp.json

@@ -1,22 +1,3 @@
 {
 {
-  "mcpServers": {
-    "context7": {
-      "type": "http",
-      "url": "https://mcp.context7.com/mcp"
-    },
-    "serena": {
-      "type": "stdio",
-      "command": "uvx",
-      "args": [
-        "--from",
-        "git+https://github.com/oraios/serena",
-        "serena-mcp-server",
-        "--context",
-        "ide-assistant",
-        "--project",
-        "."
-      ],
-      "env": {}
-    }
-  }
+  "mcpServers": {}
 }
 }

+ 192 - 0
.serena/memories/apps-app-jotai-directory-structure.md

@@ -0,0 +1,192 @@
+# Jotai ディレクトリ構造・ファイル配置
+
+## 📁 確立されたディレクトリ構造
+
+```
+states/
+├── ui/
+│   ├── sidebar/                    # サイドバー状態 ✅
+│   ├── editor/                     # エディター状態 ✅
+│   ├── device.ts                   # デバイス状態 ✅
+│   ├── page.ts                     # ページUI状態 ✅
+│   ├── toc.ts                      # TOC状態 ✅
+│   ├── untitled-page.ts            # 無題ページ状態 ✅
+│   ├── page-abilities.ts           # ページ権限判定状態 ✅ DERIVED ATOM!
+│   ├── unsaved-warning.ts          # 未保存警告状態 ✅ JOTAI PATTERN!
+│   └── modal/                      # 個別モーダルファイル ✅
+│       ├── page-create.ts          # ページ作成モーダル ✅
+│       ├── page-delete.ts          # ページ削除モーダル ✅
+│       ├── empty-trash.ts          # ゴミ箱空モーダル ✅
+│       ├── delete-attachment.ts    # 添付ファイル削除 ✅
+│       ├── delete-bookmark-folder.ts # ブックマークフォルダ削除 ✅
+│       ├── update-user-group-confirm.ts # ユーザーグループ更新確認 ✅
+│       ├── page-select.ts          # ページ選択モーダル ✅
+│       ├── page-presentation.ts    # プレゼンテーションモーダル ✅
+│       ├── put-back-page.ts        # ページ復元モーダル ✅
+│       ├── granted-groups-inheritance-select.ts # 権限グループ継承選択 ✅
+│       ├── drawio.ts               # Draw.ioモーダル ✅
+│       ├── handsontable.ts         # Handsontableモーダル ✅
+│       ├── private-legacy-pages-migration.ts # プライベートレガシーページ移行 ✅
+│       ├── descendants-page-list.ts # 子孫ページリスト ✅
+│       ├── conflict-diff.ts        # 競合差分モーダル ✅
+│       ├── page-bulk-export-select.ts # ページ一括エクスポート選択 ✅
+│       ├── drawio-for-editor.ts    # エディタ用Draw.io ✅
+│       ├── link-edit.ts            # リンク編集モーダル ✅
+│       └── template.ts             # テンプレートモーダル ✅
+├── page/                           # ページ関連状態 ✅
+├── server-configurations/          # サーバー設定状態 ✅
+├── global/                         # グローバル状態 ✅
+├── socket-io/                      # Socket.IO状態 ✅
+├── context.ts                      # 共通コンテキスト ✅
+└── features/
+    └── openai/
+        └── client/
+            └── states/             # OpenAI専用状態 ✅
+                ├── index.ts        # exports ✅
+                └── unified-merge-view.ts # UnifiedMergeView状態 ✅
+
+features/                           # Feature Directory Pattern ✅
+└── page-tree/                      # ページツリー機能 ✅ (NEW!)
+    ├── index.ts                    # メインエクスポート
+    ├── client/
+    │   ├── components/             # 汎用UIコンポーネント
+    │   │   ├── SimplifiedItemsTree.tsx
+    │   │   ├── TreeItemLayout.tsx
+    │   │   └── SimpleItemContent.tsx
+    │   ├── hooks/                  # 汎用フック
+    │   │   ├── use-data-loader.ts
+    │   │   └── use-scroll-to-selected-item.ts
+    │   ├── interfaces/             # インターフェース定義
+    │   │   └── index.ts            # TreeItemProps, TreeItemToolProps
+    │   └── states/                 # Jotai状態 ✅
+    │       ├── page-tree-update.ts # ツリー更新状態
+    │       └── page-tree-desc-count-map.ts # 子孫カウント状態
+    └── constants/
+        └── index.ts                # ROOT_PAGE_VIRTUAL_ID
+```
+
+## 📋 ファイル配置ルール
+
+### UI状態系 (`states/ui/`)
+- **個別機能ファイル**: デバイス、TOC、無題ページ等の単一機能
+- **複合機能ディレクトリ**: サイドバー、エディター等の複数機能
+- **モーダル専用ディレクトリ**: `modal/` 配下に個別モーダルファイル
+
+### データ関連状態 (`states/`)
+- **ページ関連**: `page/` ディレクトリ
+- **サーバー設定**: `server-configurations/` ディレクトリ
+- **グローバル状態**: `global/` ディレクトリ
+- **通信系**: `socket-io/` ディレクトリ
+
+### 機能別専用states (`states/features/` および `features/`)
+
+**OpenAI機能**: `states/features/openai/client/states/`
+**ページツリー機能**: `features/page-tree/client/states/` ✅ (Feature Directory Pattern)
+
+### Feature Directory Pattern (新パターン) ✅
+
+`features/{feature-name}/` パターンは、特定機能に関連するコンポーネント、フック、状態、定数をすべて一箇所に集約する構造。
+
+**適用例**: `features/page-tree/`
+```
+features/page-tree/
+├── index.ts           # 全エクスポートの集約
+├── client/
+│   ├── components/    # UIコンポーネント
+│   ├── hooks/         # カスタムフック
+│   ├── interfaces/    # 型定義
+│   └── states/        # Jotai状態
+└── constants/         # 定数
+```
+
+**インポート方法**:
+```typescript
+import { 
+  SimplifiedItemsTree,
+  TreeItemLayout,
+  usePageTreeInformationUpdate,
+  ROOT_PAGE_VIRTUAL_ID 
+} from '~/features/page-tree';
+```
+
+## 🏷️ ファイル命名規則
+
+### 状態ファイル
+- **単一機能**: `{機能名}.ts` (例: `device.ts`, `toc.ts`)
+- **複合機能**: `{機能名}/` ディレクトリ(例: `sidebar/`, `editor/`)
+- **モーダル**: `modal/{モーダル名}.ts`(例: `modal/page-create.ts`)
+
+### export/import規則
+- **公開API**: `index.ts` でのre-export
+- **内部atom**: `_atomsForDerivedAbilities` 特殊名export
+- **機能専用**: 機能ディレクトリ配下の独立したstates
+
+## 📊 ファイルサイズ・複雑度の目安
+
+### 適切なファイル分割
+- **単一ファイル**: ~100行以内、単一責務
+- **ディレクトリ分割**: 複数のhook・機能がある場合
+- **個別モーダルファイル**: 1モーダル = 1ファイル原則
+
+### 複雑度による分類
+- **シンプル**: Boolean状態、基本的な値管理
+- **中程度**: 複数プロパティ、actions分離
+- **複雑**: Derived Atom、Map操作、副作用統合
+
+## 🔗 依存関係・インポート構造
+
+### インポート階層
+```
+components/
+├── import from states/ui/          # UI状態
+├── import from states/page/        # ページ状態  
+├── import from states/global/      # グローバル状態
+└── import from states/features/    # 機能別状態
+
+states/ui/
+├── 内部相互参照可能
+└── states/page/, states/global/ からのimport
+
+states/features/{feature}/
+├── states/ui/ からのimport
+├── 他のfeatures からのimport禁止
+└── 独立性を保つ
+```
+
+### 特殊名Export使用箇所
+```
+states/page/internal-atoms.ts → _atomsForDerivedAbilities
+states/ui/editor/atoms.ts → _atomsForDerivedAbilities  
+states/global/global.ts → _atomsForDerivedAbilities
+states/context.ts → _atomsForDerivedAbilities
+```
+
+## 🎯 今後の拡張指針
+
+### 新規機能追加時
+1. **機能専用度評価**: 汎用 → `states/ui/`、専用 → `features/{feature-name}/client/states/`
+2. **複雑度評価**: シンプル → 単一ファイル、複雑 → ディレクトリ
+3. **依存関係確認**: 既存atomの活用可能性
+4. **命名規則遵守**: 確立された命名パターンに従う
+5. **Feature Directory Pattern検討**: 複数のコンポーネント・フック・状態が関連する場合は `features/` 配下に集約
+
+### ディレクトリ構造維持
+- **責務単一原則**: 1ファイル = 1機能・責務
+- **依存関係最小化**: 循環参照の回避
+- **拡張性**: 将来の機能追加を考慮した構造
+- **検索性**: ファイル名から機能が推測できる命名
+
+### Feature Directory Pattern 採用基準
+以下の条件を満たす場合は `features/` 配下に配置:
+- 複数のUIコンポーネントが関連している
+- 専用のカスタムフックがある
+- 専用のJotai状態がある
+- 機能として独立性が高い
+
+**例**: `features/page-tree/` は SimplifiedItemsTree, TreeItemLayout, useDataLoader, page-tree-update.ts などが密接に関連
+
+---
+
+## 📝 最終更新日
+
+2025-11-28 (Feature Directory Pattern 追加)

+ 84 - 0
.serena/memories/apps-app-modal-performance-optimization-v2-completion-summary.md

@@ -0,0 +1,84 @@
+# モーダル最適化 V2 完了サマリー
+
+## 📊 最終結果
+
+**完了日**: 2025-10-15  
+**達成率**: **46/51モーダル (90%)**
+
+## ✅ 完了内容
+
+### Phase 1-7: 全46モーダル最適化完了
+
+#### 主要最適化パターン
+1. **Container-Presentation分離** (14モーダル)
+   - 重いロジックをSubstanceに分離
+   - Containerで条件付きレンダリング
+   
+2. **Container超軽量化** (11モーダル - Category B)
+   - Container: 6-15行に削減
+   - 全hooks/state/callbacksをSubstanceに移動
+   - Props最小化 (1-4個のみ)
+   - **実績**: AssociateModal 40行→6行 (85%削減)
+
+3. **Fadeout Transition修正** (25モーダル)
+   - 早期return削除: `if (!isOpen) return <></>;` → `{isOpen && <Substance />}`
+   - Modal常時レンダリングでtransition保証
+
+4. **計算処理メモ化** (全モーダル)
+   - useMemo/useCallbackで不要な再計算防止
+
+## 🎯 確立されたパターン
+
+### Ultra Slim Container Pattern
+```tsx
+// Container (6-10行)
+const Modal = () => {
+  const status = useModalStatus();
+  const { close } = useModalActions();
+  return (
+    <Modal isOpen={status?.isOpened} toggle={close}>
+      {status?.isOpened && <Substance data={status.data} closeModal={close} />}
+    </Modal>
+  );
+};
+
+// Substance (全ロジック)
+const Substance = ({ data, closeModal }) => {
+  const { t } = useTranslation();
+  const { mutate } = useSWR(...);
+  const handler = useCallback(...);
+  // 全てのロジック
+};
+```
+
+## 🔶 未完了 (優先度低)
+
+### Admin系モーダル (11個)
+ユーザー要望により優先度低下、V3では対象外:
+- UserGroupDeleteModal.tsx
+- UserGroupUserModal.tsx
+- UpdateParentConfirmModal.tsx
+- SelectCollectionsModal.tsx
+- ConfirmModal.tsx
+- その他6個
+
+### クラスコンポーネント (2個) - 対象外
+- UserInviteModal.jsx
+- GridEditModal.jsx
+
+## 📈 期待される効果
+
+1. **初期読み込み高速化** - 不要なコンポーネントレンダリング削減
+2. **メモリ効率化** - Container-Presentation分離
+3. **レンダリング最適化** - 計算処理のメモ化
+4. **UX向上** - Fadeout transition保証
+5. **保守性向上** - Container超軽量化 (最大85%削減)
+
+## ➡️ Next: V3へ
+
+V3では動的ロード最適化に移行:
+- モーダルの遅延読み込み実装
+- 初期バンドルサイズ削減
+- useDynamicModalLoader実装
+
+**V2の成果物を基盤として、V3でさらなる最適化を実現**

+ 640 - 0
.serena/memories/apps-app-modal-performance-optimization-v3-completion-summary.md

@@ -0,0 +1,640 @@
+# モーダル・コンポーネント パフォーマンス最適化 V3 - 完了記録
+
+**完了日**: 2025-10-20  
+**プロジェクト期間**: 2025-10-15 〜 2025-10-20  
+**最終成果**: 34コンポーネント最適化完了 🎉
+
+---
+
+## 📊 最終成果サマリー
+
+### 実装完了コンポーネント
+
+| カテゴリ | 完了数 | 詳細 |
+|---------|--------|------|
+| **モーダル** | 25個 | useLazyLoader動的ロード |
+| **PageAlerts** | 4個 | Container-Presentation分離 + 条件付きレンダリング |
+| **Sidebar** | 1個 | AiAssistantSidebar (useLazyLoader + SWR最適化) |
+| **その他** | 4個 | 既存のLazyLoaded実装 |
+| **合計** | **34個** | **全体最適化達成** ✨ |
+
+### V3の主要改善
+
+1. **useLazyLoader実装**: 汎用的な動的ローディングフック
+   - グローバルキャッシュによる重複実行防止
+   - 表示条件に基づく真の遅延ロード
+   - テストカバレッジ完備 (12 tests passing)
+
+2. **3つのケース別最適化パターン確立**:
+   - **ケースA**: 単一ファイル → ディレクトリ構造化
+   - **ケースB**: Container-Presentation分離 (Modal外枠なし) → リファクタリング
+   - **ケースC**: Container-Presentation分離 (Modal外枠あり) → 最短経路 ⭐
+
+3. **PageAlerts最適化**: Next.js dynamic()からuseLazyLoaderへの移行
+   - 全ページの初期ロード削減
+   - Container-Presentation分離による不要なレンダリング削減
+   - 条件付きレンダリングによるパフォーマンス向上
+
+4. **Sidebar最適化**: AiAssistantSidebar
+   - useLazyLoader適用(isOpened時のみロード)
+   - useSWRxThreads を Substance へ移動(条件付き実行)
+
+---
+
+## 🎯 パフォーマンス効果
+
+### 初期バンドルサイズ削減
+- **34コンポーネント分の遅延ロード**
+- モーダル平均150行 × 25個 = 約3,750行
+- PageAlerts 4個(最大412行)
+- Sidebar 1個(約600行)
+- **合計: 約5,000行以上のコード削減**
+
+### 初期レンダリングコスト削減
+- Container-Presentation分離による無駄なレンダリング回避
+- 条件が満たされない場合、Substance が全くレンダリングされない
+- SWR hooks の不要な実行を防止
+
+### メモリ効率向上
+- グローバルキャッシュによる重複ロード防止
+- 一度ロードされたコンポーネントは再利用
+
+---
+
+## 📚 技術ガイド
+
+### 1. useLazyLoader フック
+
+**ファイル**: `apps/app/src/client/util/use-lazy-loader.ts`
+
+**特徴**:
+- グローバルキャッシュによる重複実行防止
+- 型安全性(ジェネリクス対応)
+- エラーハンドリング内蔵
+
+**基本的な使い方**:
+```tsx
+const Component = useLazyLoader(
+  'unique-key',           // グローバルキャッシュ用の一意なキー
+  () => import('./Component'), // dynamic import
+  isActive,               // ロードトリガー条件
+);
+
+return Component ? <Component /> : null;
+```
+
+**テスト**: 12 tests passing
+
+---
+
+### 2. ディレクトリ構造と命名規則
+
+```
+apps/app/.../[ComponentName]/
+├── index.ts                    # エクスポート用 (named export)
+├── [ComponentName].tsx         # 実際のコンポーネント (named export)
+└── dynamic.tsx                 # 動的ローダー (named export)
+```
+
+**命名規則**:
+- Hook: `useLazyLoader`
+- 動的ローダーコンポーネント: `[ComponentName]LazyLoaded`
+- ファイル名: `dynamic.tsx`
+- Named Export: 全てのコンポーネントで使用
+
+---
+
+### 3. 実装パターン: モーダル
+
+#### モーダル最適化の3ケース
+
+**ケースA: 単一ファイル**
+- 現状: 単一ファイルで完結
+- 対応: ディレクトリ化 + dynamic.tsx作成
+- 所要時間: 約10分
+
+**ケースB: Container無Modal**
+- 現状: Substance と Container あり、但し Container に `<Modal>` なし
+- 対応: Container に `<Modal>` 外枠追加 + リファクタリング
+- 所要時間: 約15分
+
+**ケースC: Container有Modal** ⭐
+- 現状: 理想的な構造(V2完了済み)
+- 対応: named export化 + dynamic.tsx作成のみ
+- 所要時間: 約5分(最短経路)
+
+#### 実装例: ShortcutsModal (ケースC)
+
+**dynamic.tsx**:
+```tsx
+import type { JSX } from 'react';
+import { useLazyLoader } from '~/components/utils/use-lazy-loader';
+import { useShortcutsModalStatus } from '~/states/ui/modal/shortcuts';
+
+export const ShortcutsModalLazyLoaded = (): JSX.Element => {
+  const status = useShortcutsModalStatus();
+
+  const ShortcutsModal = useLazyLoader(
+    'shortcuts-modal',
+    () => import('./ShortcutsModal').then(mod => ({ default: mod.ShortcutsModal })),
+    status?.isOpened ?? false,
+  );
+
+  return ShortcutsModal ? <ShortcutsModal /> : <></>;
+};
+```
+
+**index.ts**:
+```tsx
+export { ShortcutsModalLazyLoaded } from './dynamic';
+```
+
+**BasicLayout.tsx**:
+```tsx
+// Before: Next.js dynamic()
+const ShortcutsModal = dynamic(() => import('~/client/components/ShortcutsModal'), { ssr: false });
+
+// After: 直接import (named)
+import { ShortcutsModalLazyLoaded } from '~/client/components/ShortcutsModal';
+```
+
+---
+
+### 4. 実装パターン: PageAlerts
+
+#### Container-Presentation分離による最適化
+
+**特徴**:
+- Container: 軽量な条件チェックのみ(SWR hooks を含まない)
+- Substance: UI + 状態管理 + SWR データフェッチ
+- 条件が満たされない場合、Substance は全くレンダリングされない
+
+#### 実装例: FixPageGrantAlert
+
+**構造**:
+```
+FixPageGrantAlert/
+├── FixPageGrantModal.tsx (新規) - 342行のモーダルコンポーネント
+├── FixPageGrantAlert.tsx (リファクタリング済み)
+│   ├── FixPageGrantAlert (Container) - ~35行、簡素化
+│   └── FixPageGrantAlertSubstance (Presentation) - ~30行
+└── dynamic.tsx (useLazyLoader パターン)
+```
+
+**Container** (~35行):
+```tsx
+export const FixPageGrantAlert = (): JSX.Element => {
+  const currentUser = useCurrentUser();
+  const pageData = useCurrentPageData();
+  const hasParent = pageData != null ? pageData.parent != null : false;
+  const pageId = pageData?._id;
+
+  const { data: dataIsGrantNormalized } = useSWRxCurrentGrantData(
+    currentUser != null ? pageId : null,
+  );
+  const { data: dataApplicableGrant } = useSWRxApplicableGrant(
+    currentUser != null ? pageId : null,
+  );
+
+  // Early returns for invalid states
+  if (pageData == null) return <></>;
+  if (!hasParent) return <></>;
+  if (dataIsGrantNormalized?.isGrantNormalized == null || dataIsGrantNormalized.isGrantNormalized) {
+    return <></>;
+  }
+
+  // Render Substance only when all conditions are met
+  if (pageId != null && dataApplicableGrant != null) {
+    return (
+      <FixPageGrantAlertSubstance
+        pageId={pageId}
+        dataApplicableGrant={dataApplicableGrant}
+        currentAndParentPageGrantData={dataIsGrantNormalized.grantData}
+      />
+    );
+  }
+
+  return <></>;
+};
+```
+
+**効果**:
+- 条件が満たされない場合、Substance が全くレンダリングされない
+- Modal コンポーネント(342行)が別ファイルで管理しやすい
+- コードサイズ: 412行 → Container 35行 + Substance 30行 + Modal 342行(別ファイル)
+
+#### 実装例: TrashPageAlert
+
+**特徴**:
+- Container で条件チェックのみ
+- Substance 内で useSWRxPageInfo を実行(条件付き)
+
+**Container** (~20行):
+```tsx
+export const TrashPageAlert = (): JSX.Element => {
+  const pageData = useCurrentPageData();
+  const isTrashPage = useIsTrashPage();
+  const pageId = pageData?._id;
+  const pagePath = pageData?.path;
+  const revisionId = pageData?.revision?._id;
+
+  // Lightweight condition checks in Container
+  const isEmptyPage = pageId == null || revisionId == null || pagePath == null;
+
+  // Show this alert only for non-empty pages in trash.
+  if (!isTrashPage || isEmptyPage) {
+    return <></>;
+  }
+
+  // Render Substance only when conditions are met
+  // useSWRxPageInfo will be executed only here
+  return (
+    <TrashPageAlertSubstance
+      pageId={pageId}
+      pagePath={pagePath}
+      revisionId={revisionId}
+    />
+  );
+};
+```
+
+**Substance** (~130行):
+```tsx
+const TrashPageAlertSubstance = (props: SubstanceProps): JSX.Element => {
+  const { pageId, pagePath, revisionId } = props;
+  
+  const pageData = useCurrentPageData();
+  
+  // useSWRxPageInfo is executed only when Substance is rendered
+  const { data: pageInfo } = useSWRxPageInfo(pageId);
+  
+  // ... UI レンダリング + モーダル操作
+};
+```
+
+**効果**:
+- ❌ **Before**: `useSWRxPageInfo` が常に実行される
+- ✅ **After**: Substance がレンダリングされる時のみ `useSWRxPageInfo` が実行される
+- ゴミ箱ページでない場合、不要な API 呼び出しを回避
+
+---
+
+### 5. 実装パターン: Sidebar
+
+#### AiAssistantSidebar の最適化
+
+**構造**:
+```
+AiAssistantSidebar/
+├── dynamic.tsx (新規) - useLazyLoader パターン
+├── AiAssistantSidebar.tsx (リファクタリング済み)
+│   ├── AiAssistantSidebar (Container) - 簡素化、~30行
+│   └── AiAssistantSidebarSubstance (Presentation) - 複雑なロジック、~500行
+└── (その他のサブコンポーネント)
+```
+
+**dynamic.tsx**:
+```tsx
+import type { FC } from 'react';
+import { memo } from 'react';
+import { useLazyLoader } from '~/components/utils/use-lazy-loader';
+import { useAiAssistantSidebarStatus } from '../../../states';
+
+export const AiAssistantSidebarLazyLoaded: FC = memo(() => {
+  const aiAssistantSidebarData = useAiAssistantSidebarStatus();
+  const isOpened = aiAssistantSidebarData?.isOpened ?? false;
+
+  const ComponentToRender = useLazyLoader(
+    'ai-assistant-sidebar',
+    () => import('./AiAssistantSidebar').then(mod => ({ default: mod.AiAssistantSidebar })),
+    isOpened,
+  );
+
+  if (ComponentToRender == null) {
+    return null;
+  }
+
+  return <ComponentToRender />;
+});
+```
+
+**Container の軽量化**:
+```tsx
+export const AiAssistantSidebar: FC = memo((): JSX.Element => {
+  const aiAssistantSidebarData = useAiAssistantSidebarStatus();
+  const { close: closeAiAssistantSidebar } = useAiAssistantSidebarActions();
+  const { disable: disableUnifiedMergeView } = useUnifiedMergeViewActions();
+
+  const aiAssistantData = aiAssistantSidebarData?.aiAssistantData;
+  const threadData = aiAssistantSidebarData?.threadData;
+  const isOpened = aiAssistantSidebarData?.isOpened;
+  const isEditorAssistant = aiAssistantSidebarData?.isEditorAssistant ?? false;
+
+  // useSWRxThreads を削除(Substance に移動)
+
+  useEffect(() => {
+    if (!aiAssistantSidebarData?.isOpened) {
+      disableUnifiedMergeView();
+    }
+  }, [aiAssistantSidebarData?.isOpened, disableUnifiedMergeView]);
+
+  if (!isOpened) {
+    return <></>;
+  }
+
+  return (
+    <div className="...">
+      <AiAssistantSidebarSubstance
+        isEditorAssistant={isEditorAssistant}
+        threadData={threadData}
+        aiAssistantData={aiAssistantData}
+        onCloseButtonClicked={closeAiAssistantSidebar}
+      />
+    </div>
+  );
+});
+```
+
+**Substance に useSWRxThreads を移動**:
+```tsx
+const AiAssistantSidebarSubstance: React.FC<Props> = (props) => {
+  // useSWRxThreads is executed only when Substance is rendered
+  const { data: threads, mutate: mutateThreads } = useSWRxThreads(aiAssistantData?._id);
+  const { refreshThreadData } = useAiAssistantSidebarActions();
+
+  // refresh thread data when the data is changed
+  useEffect(() => {
+    if (threads == null) return;
+    const currentThread = threads.find(t => t.threadId === threadData?.threadId);
+    if (currentThread != null) {
+      refreshThreadData(currentThread);
+    }
+  }, [threads, refreshThreadData, threadData?.threadId]);
+
+  // ... UI レンダリング
+};
+```
+
+**効果**:
+- ❌ **Before**: Container で `useSWRxThreads` が実行される(isOpened が false でも)
+- ✅ **After**: Substance がレンダリングされる時のみ `useSWRxThreads` が実行される
+- サイドバーが開かれていない場合、不要な API 呼び出しを回避
+
+---
+
+## ✅ 完了コンポーネント一覧
+
+### モーダル (25個)
+
+#### 高頻度モーダル (0/2 - 意図的にスキップ) ⏭️
+- ⏭️ SearchModal (192行) - 検索機能、初期ロード維持
+- ⏭️ PageCreateModal (319行) - ページ作成、初期ロード維持
+
+#### 中頻度モーダル (6/6 - 100%完了) ✅
+- ✅ PageAccessoriesModal (2025-10-15) - ケースB
+- ✅ ShortcutsModal (2025-10-15) - ケースC
+- ✅ PageRenameModal (2025-10-16) - ケースC
+- ✅ PageDuplicateModal (2025-10-16) - ケースC
+- ✅ DescendantsPageListModal (2025-10-16) - ケースC
+- ✅ PageDeleteModal (2025-10-16) - ケースA
+
+#### 低頻度モーダル (19/38完了)
+
+**Session 1完了 (6個)** ✅:
+- ✅ DrawioModal (2025-10-16) - ケースC
+- ✅ HandsontableModal (2025-10-16) - ケースC + 複数ステータス対応
+- ✅ TemplateModal (2025-10-16) - ケースC + @growi/editor state
+- ✅ LinkEditModal (2025-10-16) - ケースC + @growi/editor state
+- ✅ TagEditModal (2025-10-16) - ケースC
+- ✅ ConflictDiffModal (2025-10-16) - ケースC
+
+**Session 2完了 (11個)** ✅:
+- ✅ DeleteBookmarkFolderModal (2025-10-17) - ケースC, BasicLayout
+- ✅ PutbackPageModal (2025-10-17) - ケースC, JSX→TSX変換
+- ✅ AiAssistantManagementModal (2025-10-17) - ケースC
+- ✅ PageSelectModal (2025-10-17) - ケースC
+- ✅ GrantedGroupsInheritanceSelectModal (2025-10-17) - ケースC
+- ✅ DeleteAttachmentModal (2025-10-17) - ケースC
+- ✅ PageBulkExportSelectModal (2025-10-17) - ケースC
+- ✅ PagePresentationModal (2025-10-17) - ケースC
+- ✅ EmptyTrashModal (2025-10-17) - ケースB
+- ✅ CreateTemplateModal (2025-10-17) - ケースB
+- ✅ DeleteCommentModal (2025-10-17) - ケースB
+
+**Session 3 & 4完了 (2個)** ✅:
+- ✅ SearchOptionModal (2025-10-17) - ケースA, SearchPage配下
+- ✅ DeleteAiAssistantModal (2025-10-17) - ケースC, AiAssistantSidebar配下
+
+---
+
+### PageAlerts (4個) 🎉
+
+**Session 5完了 (2025-10-17)** ✅:
+
+全てPageAlerts.tsxで`useLazyLoader`を使用した動的ロード実装に変更。
+
+1. **TrashPageAlert** (171行)
+   - **Container**: ~20行、条件チェックのみ
+   - **Substance**: ~130行、useSWRxPageInfo + UI
+   - **表示条件**: `isTrashPage`
+   - **効果**: ゴミ箱ページでない場合、useSWRxPageInfo が実行されない
+
+2. **PageRedirectedAlert** (60行)
+   - **Container**: ~12行、条件チェックのみ
+   - **Substance**: ~65行、UI + 状態管理 + 非同期処理
+   - **表示条件**: `redirectFrom != null && redirectFrom !== ''`
+   - **効果**: リダイレクトされていない場合、Substance が全くレンダリングされない
+
+3. **FullTextSearchNotCoverAlert** (40行)
+   - **isActive props パターン**: 条件付きレンダリング
+   - **表示条件**: `markdownLength > elasticsearchMaxBodyLengthToIndex`
+   - **効果**: 長いページのみで表示
+
+4. **FixPageGrantAlert** ⭐ 最重要 (412行)
+   - **構造**: Modal分離 + Container-Presentation分離
+   - **Container**: ~35行、SWR hooks + 条件チェック
+   - **Substance**: ~30行、Alert UI + Modal 状態管理
+   - **Modal**: 342行、別ファイル
+   - **表示条件**: `!dataIsGrantNormalized.isGrantNormalized`
+   - **効果**: 最大のバンドル削減、条件が満たされない場合 Substance レンダリングなし
+
+---
+
+### Sidebar (1個) ✨
+
+**Session 6完了 (2025-10-20)** ✅:
+
+**AiAssistantSidebar** (約600行)
+- **dynamic.tsx**: useLazyLoader パターン
+- **Container**: ~30行、aiAssistantSidebarData + actions
+- **Substance**: ~500行、useSWRxThreads + UI + ハンドラー
+- **最適化**:
+  - isOpened 時のみコンポーネントをロード
+  - useSWRxThreads を Substance へ移動(条件付き実行)
+  - threads のリフレッシュロジックも Substance 内に移動
+- **効果**: サイドバーが開かれていない場合、useSWRxThreads が実行されない
+
+---
+
+### 既存のLazyLoaded実装 (4個)
+
+既にuseLazyLoaderパターンで実装済み:
+- ✅ DeleteBookmarkFolderModalLazyLoaded
+- ✅ DeleteAttachmentModalLazyLoaded
+- ✅ PageSelectModalLazyLoaded
+- ✅ PutBackPageModalLazyLoaded
+
+---
+
+## ⏭️ 最適化不要/スキップ(19個)
+
+### 非モーダルコンポーネント(1個)
+- ❌ **ShowShortcutsModal** (35行) - 実体はモーダルではなくホットキートリガーのみ
+
+### 親ページ低頻度 - Me画面(2個)
+- ⏸️ **AssociateModal** (142行) - Me画面(低頻度)内のモーダル
+- ⏸️ **DisassociateModal** (94行) - Me画面(低頻度)内のモーダル
+
+### 親ページ低頻度 - Admin画面(3個)
+- ⏸️ **ImageCropModal** (194行) - Admin/Customize(低頻度)内のモーダル
+- ⏸️ **DeleteSlackBotSettingsModal** (103行) - Admin/SlackIntegration(低頻度)内のモーダル
+- ⏸️ **PluginDeleteModal** (103行) - Admin/Plugins(低頻度)内のモーダル
+
+### 低優先スキップ(1個)
+- ⏸️ **PrivateLegacyPagesMigrationModal** (133行) - ユーザー指示によりスキップ
+
+### クラスコンポーネント(2個)
+- ❌ **UserInviteModal** (299行) - .jsx、対象外
+- ❌ **GridEditModal** (263行) - .jsx、対象外
+
+### 管理画面専用・低頻度(10個)
+
+管理画面自体が遅延ロードされており、使用頻度が極めて低いため最適化不要:
+
+- SelectCollectionsModal (222行) - ExportArchiveData
+- ImportCollectionConfigurationModal (228行) - ImportData
+- NotificationDeleteModal (53行) - Notification
+- DeleteAllShareLinksModal (61行) - Security
+- LdapAuthTestModal (72行) - Security
+- ConfirmBotChangeModal (58行) - SlackIntegration
+- UpdateParentConfirmModal (93行) - UserGroupDetail
+- UserGroupUserModal (110行) - UserGroupDetail
+- UserGroupDeleteModal (208行) - UserGroup
+- UserGroupModal (138行) - ExternalUserGroupManagement
+
+---
+
+## 📈 最適化進捗チャート
+
+```
+完了済み: ████████████████████████████████████████████████████████████  34/53 (64%) 🎉
+スキップ:  ████████                                                      8/53 (15%)
+対象外:   ██                                                            2/53 (4%)
+不要:     ███████████                                                  11/53 (21%)
+```
+
+**V3最適化完了!** 🎉
+
+---
+
+## 🎉 V3最適化完了サマリー
+
+### 達成内容
+- **モーダル最適化**: 25個
+- **PageAlerts最適化**: 4個
+- **Sidebar最適化**: 1個
+- **既存LazyLoaded**: 4個
+- **合計**: 34/53 (64%)
+
+### 主要成果
+
+1. **useLazyLoader実装**: 汎用的な動的ローディングフック
+   - グローバルキャッシュによる重複実行防止
+   - 表示条件に基づく真の遅延ロード
+   - テストカバレッジ完備
+
+2. **3つのケース別最適化パターン確立**:
+   - ケースA: 単一ファイル → ディレクトリ構造化
+   - ケースB: Container-Presentation分離 (Modal外枠なし) → リファクタリング
+   - ケースC: Container-Presentation分離 (Modal外枠あり) → 最短経路 ⭐
+
+3. **PageAlerts最適化**: Next.js dynamic()からuseLazyLoaderへの移行
+   - 全ページの初期ロード削減
+   - Container-Presentation分離による不要なレンダリング削減
+   - FixPageGrantAlert (412行) の大規模バンドル削減
+
+4. **Sidebar最適化**: AiAssistantSidebar
+   - useLazyLoader適用(isOpened時のみロード)
+   - useSWRxThreads を Substance へ移動(条件付き実行)
+
+### パフォーマンス効果
+
+- **初期バンドルサイズ削減**: 34コンポーネント分の遅延ロード(約5,000行以上)
+- **初期レンダリングコスト削減**: Container-Presentation分離による無駄なレンダリング回避
+- **メモリ効率向上**: グローバルキャッシュによる重複ロード防止
+- **API呼び出し削減**: SWR hooks の条件付き実行
+
+### 技術的成果
+
+- **Named Export標準化**: コード可読性とメンテナンス性向上
+- **型安全性保持**: ジェネリクスによる完全な型サポート
+- **開発体験向上**: 既存のインポートパスは変更不要
+- **テストカバレッジ**: useLazyLoader に12テスト
+
+---
+
+## 📝 今後の展開(オプション)
+
+### 残りの19個の評価
+
+現在スキップ・対象外としている19個について、将来的に再評価可能:
+
+1. **Me画面モーダル** (2個): Me画面自体の使用頻度が上がれば最適化検討
+2. **Admin画面モーダル** (13個): 管理機能の使用パターン変化で再評価
+3. **クラスコンポーネント** (2個): Function Component化後に最適化可能
+4. **高頻度モーダル** (2個): コード分割などの別アプローチを検討
+
+### さらなる最適化の可能性
+
+- 高頻度モーダル (SearchModal, PageCreateModal) のコード分割検討
+- 他のレイアウトでの同様パターン適用
+- ページトランジションの最適化
+- Sidebar系コンポーネントの同様最適化
+
+---
+
+## 🏆 完了日: 2025-10-20
+
+**V3最適化プロジェクト完了!** 🎉
+
+- モーダル最適化: 25個 ✅
+- PageAlerts最適化: 4個 ✅
+- Sidebar最適化: 1個 ✅
+- 既存LazyLoaded: 4個 ✅
+- 合計達成率: 64% (34/53) ✅
+- 目標達成! 🎊
+
+---
+
+## 📚 参考情報
+
+### 関連ドキュメント
+- V2完了サマリー: `apps-app-modal-performance-optimization-v2-completion-summary.md`
+- useLazyLoader実装: `apps/app/src/client/util/use-lazy-loader.ts`
+- useLazyLoaderテスト: `apps/app/src/client/util/use-lazy-loader.spec.tsx`
+
+### 重要な学び
+
+1. **正しい判断基準**:
+   - モーダル自身の利用頻度(親ページの頻度ではない)
+   - ファイルサイズ/複雑さ(50行以上で効果的、100行以上で強く推奨)
+   - レンダリングコスト
+
+2. **親の遅延ロード ≠ 子の遅延ロード**:
+   - 親がdynamic()でも、子モーダルは親と一緒にダウンロードされる
+   - 子モーダル自体の最適化が必要
+
+3. **Container-Presentation分離の効果**:
+   - Containerで条件チェック
+   - 条件が満たされない場合、Substanceは全くレンダリングされない
+   - SWR hooksの不要な実行を防止

+ 105 - 0
.serena/memories/apps-app-page-path-nav-and-sub-navigation-layering.md

@@ -0,0 +1,105 @@
+# PagePathNav と SubNavigation の z-index レイヤリング
+
+## 概要
+
+PagePathNav(ページパス表示)と GrowiContextualSubNavigation(PageControls等を含むサブナビゲーション)の
+Sticky 状態における z-index の重なり順を修正した際の知見。
+
+## 修正したバグ
+
+### 症状
+スクロールしていって PagePathNav がウィンドウ上端に近づいたときに、PageControls のボタンが
+PagePathNav の要素の裏側に回ってしまい、クリックできなくなる。
+
+### 原因
+z-index 的に以下のように重なっていたため:
+
+**[Before]** 下層から順に:
+1. PageView の children - z-0
+2. ( GroundGlassBar = PageControls ) ← 同じ層 z-1
+3. PagePathNav
+
+PageControls が PagePathNav より下層にいたため、sticky 境界付近でクリック不能になっていた。
+
+## 修正後の構成
+
+**[After]** 下層から順に:
+1. PageView の children - z-0
+2. GroundGlassBar(磨りガラス背景)- z-1
+3. PagePathNav - z-2(通常時)/ z-3(sticky時)
+4. PageControls(nav要素)- z-3
+
+### ファイル構成
+
+- `GrowiContextualSubNavigation.tsx` - GroundGlassBar を分離してレンダリング
+  - 1つ目: GroundGlassBar のみ(`position-fixed`, `z-1`)
+  - 2つ目: nav 要素(`z-3`)
+- `PagePathNavSticky.tsx` - z-index を動的に切り替え
+  - 通常時: `z-2`
+  - sticky時: `z-3`
+
+## 実装のポイント
+
+### GroundGlassBar を分離した理由
+GroundGlassBar を `position-fixed` で常に固定表示にすることで、
+PageControls と切り離して独立した z-index 層として扱えるようにした。
+
+これにより、GroundGlassBar → PagePathNav → PageControls という
+理想的なレイヤー構造を実現できた。
+
+## CopyDropdown が z-2 で動作しない理由(解決済み)
+
+### 問題
+
+`PagePathNavSticky.tsx` の sticky 時の z-index について:
+
+```tsx
+// これだと CopyDropdown(マウスオーバーで表示されるドロップダウン)が出ない
+innerActiveClass="active z-2 mt-1"
+
+// これだと正常に動作する
+innerActiveClass="active z-3 mt-1"
+```
+
+### 原因
+
+1. `GrowiContextualSubNavigation` の sticky-inner-wrapper は `z-3` かつ横幅いっぱい(Flex アイテム)
+2. この要素が PagePathNavSticky(`z-2`)の上に重なる
+3. CopyDropdown は `.grw-page-path-nav-layout:hover` で `visibility: visible` になる仕組み
+   (参照: `PagePathNavLayout.module.scss`)
+4. **z-3 の要素が上に被さっているため、hover イベントが PagePathNavSticky に届かない**
+5. 結果、CopyDropdown のアイコンが表示されない
+
+### なぜ z-3 で動作するか
+
+- 同じ z-index: 3 になるため、DOM 順序で前後が決まる
+- PagePathNavSticky は GrowiContextualSubNavigation より後にレンダリングされるため前面に来る
+- hover イベントが正常に届き、CopyDropdown が表示される
+
+### 結論
+
+PagePathNavSticky の sticky 時の z-index は `z-3` である必要がある。
+これは GrowiContextualSubNavigation と同じ層に置くことで、DOM 順序による前後関係を利用するため。
+
+## 関連ファイル
+
+- `apps/app/src/client/components/PageView/PageView.tsx`
+- `apps/app/src/client/components/Navbar/GrowiContextualSubNavigation.tsx`
+- `apps/app/src/client/components/Navbar/GrowiContextualSubNavigation.module.scss`
+- `apps/app/src/client/components/PagePathNavSticky/PagePathNavSticky.tsx`
+- `apps/app/src/client/components/PagePathNavSticky/PagePathNavSticky.module.scss`
+- `apps/app/src/components/Common/PagePathNav/PagePathNavLayout.tsx`(CopyDropdown を含む)
+
+## ライブラリの注意事項
+
+### react-stickynode の deprecation
+`react-stickynode` は **2025-12-31 で deprecated** となる予定。
+https://github.com/yahoo/react-stickynode
+
+将来的には CSS `position: sticky` + `IntersectionObserver` への移行を検討する必要がある。
+
+## 注意事項
+
+- z-index の値を変更する際は、上記のレイヤー構造を壊さないよう注意
+- Sticky コンポーネントの `innerActiveClass` で z-index を指定する際、
+  他のコンポーネントとの相互作用を確認すること

+ 683 - 0
.serena/memories/apps-app-page-tree-specification.md

@@ -0,0 +1,683 @@
+# PageTree 仕様書
+
+## 概要
+
+GROWIのPageTreeは、`@headless-tree/react` と `@tanstack/react-virtual` を使用したVirtualized Tree実装です。
+5000件以上の兄弟ページでも快適に動作するよう設計されています。
+
+---
+
+## 1. アーキテクチャ
+
+### 1.1 ディレクトリ構成
+
+```
+src/features/page-tree/
+├── index.ts                                # メインエクスポート
+├── components/
+│   ├── ItemsTree.tsx                       # コアvirtualizedツリーコンポーネント
+│   ├── ItemsTree.spec.tsx                  # テスト
+│   ├── TreeItemLayout.tsx                  # 汎用ツリーアイテムレイアウト
+│   ├── TreeItemLayout.module.scss
+│   ├── SimpleItemContent.tsx               # シンプルなアイテムコンテンツ表示
+│   ├── SimpleItemContent.module.scss
+│   ├── TreeNameInput.tsx                   # リネーム/新規作成用入力コンポーネント
+│   ├── _tree-item-variables.scss           # SCSS変数
+│   └── index.ts
+├── hooks/
+│   ├── use-page-rename.tsx                 # Renameビジネスロジック
+│   ├── use-page-create.tsx                 # Createビジネスロジック
+│   ├── use-page-create.spec.tsx
+│   ├── use-page-dnd.tsx                    # Drag & Dropビジネスロジック
+│   ├── use-page-dnd.spec.ts
+│   ├── use-page-dnd.module.scss            # D&D用スタイル
+│   ├── use-placeholder-rename-effect.ts    # プレースホルダーリネームエフェクト
+│   ├── use-socket-update-desc-count.ts     # Socket.ioリアルタイム更新フック
+│   ├── index.ts
+│   └── _inner/
+│       ├── use-data-loader.ts              # データローダーフック
+│       ├── use-data-loader.spec.tsx
+│       ├── use-data-loader.integration.spec.tsx
+│       ├── use-scroll-to-selected-item.ts  # スクロール制御フック
+│       ├── use-tree-features.ts            # Feature統合フック(checkbox・DnD含む)
+│       ├── use-tree-revalidation.ts        # ツリー再検証フック
+│       ├── use-tree-item-handlers.tsx      # アイテムハンドラーフック
+│       ├── use-auto-expand-ancestors.ts    # 祖先自動展開フック
+│       ├── use-auto-expand-ancestors.spec.tsx
+│       ├── use-expand-parent-on-create.ts  # 作成時親展開フック
+│       ├── use-checkbox.ts                 # チェックボックス状態フック
+│       └── index.ts
+├── interfaces/
+│   └── index.ts                            # TreeItemProps, TreeItemToolProps
+├── states/
+│   ├── page-tree-update.ts                 # ツリー更新状態(Jotai)
+│   ├── page-tree-desc-count-map.ts         # 子孫カウント状態(Jotai)
+│   ├── index.ts
+│   └── _inner/
+│       ├── page-tree-create.ts             # 作成中状態(Jotai)
+│       ├── page-tree-create.spec.tsx
+│       └── tree-rebuild.ts                 # ツリー再構築状態
+├── services/
+│   └── page-tree-children.ts               # 子ページ取得サービス
+└── constants/
+    └── _inner.ts                           # ROOT_PAGE_VIRTUAL_ID
+```
+
+### 1.2 Sidebar専用コンポーネント(移動しなかったファイル)
+
+以下は `components/Sidebar/PageTreeItem/` に残留:
+
+- `PageTreeItem.tsx` - Sidebar専用の実装
+- `CountBadgeForPageTreeItem.tsx` - PageTree専用バッジ
+- `use-page-item-control.tsx` - コンテキストメニュー制御
+
+---
+
+## 2. 主要コンポーネント
+
+### 2.1 ItemsTree
+
+**ファイル**: `features/page-tree/components/ItemsTree.tsx`
+
+Virtualizedツリーのコアコンポーネント。`@headless-tree/react` と `@tanstack/react-virtual` を統合。
+
+#### Props
+
+```typescript
+interface ItemsTreeProps {
+  // 表示対象のターゲットパスまたはID
+  targetPathOrId: string | null;
+  // WIPページを表示するか
+  isWipPageShown?: boolean;
+  // 仮想スクロール用の親要素
+  scrollerElem: HTMLElement | null;
+  // カスタムTreeItemコンポーネント
+  CustomTreeItem?: React.ComponentType<TreeItemProps<IPageForTreeItem>>;
+  // チェックボックス機能
+  enableCheckboxes?: boolean;
+  initialCheckedItems?: string[];
+  onCheckedItemsChange?: (checkedItems: IPageForTreeItem[]) => void;
+}
+```
+
+#### 使用している @headless-tree/core Features
+
+- `asyncDataLoaderFeature` - 非同期データローディング
+- `selectionFeature` - 選択機能
+- `renamingFeature` - リネーム機能
+- `hotkeysCoreFeature` - キーボードショートカット
+- `checkboxesFeature` - チェックボックス(オプション)
+- `dragAndDropFeature` - ドラッグ&ドロップ(オプション)
+
+#### 重要な実装詳細
+
+1. **データローダー**: `use-data-loader.ts` で既存API(`/page-listing/root`, `/page-listing/children`)を活用
+2. **Virtualization**: `@tanstack/react-virtual` の `useVirtualizer` を使用、`overscan: 5` で最適化
+3. **初期スクロール**: `scrollToIndex` で選択アイテムまでスクロール
+
+### 2.2 TreeItemLayout
+
+**ファイル**: `features/page-tree/components/TreeItemLayout.tsx`
+
+汎用的なツリーアイテムレイアウト。展開/折りたたみ、アイコン、カスタムコンポーネントを配置。
+
+#### Props
+
+```typescript
+interface TreeItemLayoutProps {
+  page: IPageForTreeItem;
+  level: number;
+  isOpen: boolean;
+  isSelected: boolean;
+  onToggle?: () => void;
+  onClick?: () => void;
+  // カスタムコンポーネント
+  customEndComponents?: React.ReactNode[];
+  customHoveredEndComponents?: React.ReactNode[];
+  customAlternativeComponents?: React.ReactNode[];
+  showAlternativeContent?: boolean;
+}
+```
+
+#### 自動展開ロジック
+
+```typescript
+useEffect(() => {
+  if (isExpanded) return;
+  const isPathToTarget = page.path != null
+    && targetPath.startsWith(addTrailingSlash(page.path))
+    && targetPath !== page.path;
+  if (isPathToTarget) onToggle?.();
+}, [targetPath, page.path, isExpanded, onToggle]);
+```
+
+### 2.3 PageTreeItem
+
+**ファイル**: `components/Sidebar/PageTreeItem/PageTreeItem.tsx`
+
+Sidebar用のツリーアイテム実装。TreeItemLayoutを使用し、Rename/Create/Control機能を統合。
+
+#### 機能
+
+- WIPページフィルター
+- descendantCountバッジ
+- hover時の操作ボタン(duplicate/delete/rename/create)
+- リネームモード表示
+- 新規作成入力表示(子として)
+
+---
+
+## 3. 機能実装
+
+### 3.1 Rename(ページ名変更)
+
+**実装ファイル**:
+- `features/page-tree/hooks/use-page-rename.tsx`
+- `features/page-tree/components/TreeNameInput.tsx`
+
+#### 使用方法
+
+```typescript
+const { rename, isRenaming, RenameAlternativeComponent } = usePageRename(item);
+
+// TreeItemLayoutに渡す
+<TreeItemLayout
+  showAlternativeContent={isRenaming(item)}
+  customAlternativeComponents={[RenameAlternativeComponent]}
+/>
+```
+
+#### 操作方法
+
+- **開始**: F2キー or コンテキストメニュー
+- **確定**: Enter
+- **キャンセル**: Escape
+
+### 3.2 Create(ページ新規作成)
+
+**実装ファイル**:
+- `features/page-tree/hooks/use-page-create.tsx`
+- `features/page-tree/components/TreeNameInput.tsx`
+- `features/page-tree/states/_inner/page-tree-create.ts`
+
+#### 状態管理(Jotai)
+
+```typescript
+// page-tree-create.ts
+creatingParentIdAtom: 作成中の親ノードID
+useCreatingParentId(): 現在の作成中親ID取得
+useIsCreatingChild(parentId): 特定アイテムが作成中か判定
+usePageTreeCreateActions(): startCreating, cancelCreating
+```
+
+#### 使用方法
+
+```typescript
+const { isCreatingChild, CreateInputComponent, startCreating } = usePageCreate(item);
+
+// PageTreeItemで使用
+{isCreatingChild() && <CreateInputComponent />}
+```
+
+#### 操作方法
+
+- **開始**: コンテキストメニューから「作成」を選択
+- **確定**: Enter → POST /page API → 新規ページに遷移
+- **キャンセル**: Escape or ブラー
+
+### 3.3 Drag and Drop(ページ移動)
+
+**実装ファイル**:
+- `features/page-tree/hooks/use-page-dnd.tsx`
+- `features/page-tree/hooks/use-page-dnd.module.scss`
+- `features/page-tree/hooks/_inner/use-tree-features.ts`
+
+#### 機能概要
+
+ページをドラッグ&ドロップして別のページの子として移動する機能。複数選択D&Dにも対応。
+
+#### 使用方法
+
+```typescript
+<ItemsTree
+  enableDragAndDrop={true}
+  // ...他のprops
+/>
+```
+
+#### 主要コンポーネント
+
+- `usePageDnd(isEnabled)`: D&Dロジックを提供するフック(`UsePageDndProperties`を返す)
+  - `canDrag`: ドラッグ可否判定
+  - `canDrop`: ドロップ可否判定
+  - `onDrop`: ドロップ時の処理(APIコール、ツリー更新)
+  - `renderDragLine`: ドラッグライン描画(treeインスタンスを引数に取る)
+
+**統合方法**:
+- `useTreeFeatures`が内部で`usePageDnd`を呼び出し、`dndProperties`として返す
+- ItemsTree側で`dndProperties.renderDragLine(tree)`を呼び出してドラッグライン表示
+
+#### バリデーションロジック
+
+**canDrag チェック項目**:
+1. 祖先-子孫関係チェック: 選択されたアイテム間に祖先-子孫関係がある場合は禁止
+2. 保護ページチェック: `pagePathUtils.isUsersProtectedPages(path)`が`true`の場合は禁止
+
+**canDrop チェック項目**:
+1. ユーザートップページチェック: `pagePathUtils.isUsersTopPage(targetPath)`が`true`の場合は禁止
+2. 移動可否チェック: `pagePathUtils.canMoveByPath(fromPath, newPath)`で検証
+
+#### エラーハンドリング
+
+- `operation__blocked`エラー: 「このページは現在移動できません」トースト表示
+- その他のエラー: 「ページの移動に失敗しました」トースト表示
+
+#### ドロップ処理の流れ
+
+1. 移動APIコール: `/pages/rename`エンドポイントで各ページを新しいパスに移動
+2. SWRキャッシュ更新: `mutatePageTree()`でページツリーデータを再取得
+3. headless-tree更新: `notifyUpdateItems()`で親ノードの子リストを無効化
+4. ターゲット更新: `targetItem.invalidateItemData()`でdescendantCountを再取得
+5. 自動展開: `targetItem.expand()`でドロップ先を展開
+
+#### 制限事項
+
+- 並び替え(Reorder)は無効(子として追加のみ)
+- キーボードD&Dは非対応
+
+### 3.4 リアルタイム更新(Socket.io統合)
+
+**実装ファイル**:
+- `features/page-tree/hooks/use-socket-update-desc-count.ts`
+- `features/page-tree/states/page-tree-desc-count-map.ts`
+- `features/page-tree/states/page-tree-update.ts`
+
+#### 設計方針
+
+**descendantCountバッジの更新** と **ツリー構造の更新** は別々の関心事として分離:
+
+| 更新タイプ | トリガー | 動作 | 対象 |
+|-----------|---------|------|------|
+| バッジ更新 | Socket.io `UpdateDescCount` | 数字のみ更新(軽量) | 全祖先 |
+| ツリー構造更新 | リロードボタン / 自分の操作後 | 子リスト再取得(重い) | 操作した本人のみ |
+
+**この分離の理由:**
+- 大規模環境で多くのユーザーが同時に操作する場合、全員のツリーが頻繁に再構築されるとパフォーマンス問題が発生
+- バッジ(数字)の更新は軽量なので全員にリアルタイム反映してもOK
+- ツリー構造の変更は操作した本人のウィンドウのみで即時反映し、他ユーザーはリロードボタンで対応
+
+#### 使用方法
+
+`ItemsTree`コンポーネント内で自動的に有効化されます。
+
+```typescript
+// ItemsTree.tsx内で呼び出し
+useSocketUpdateDescCount();
+```
+
+#### 受信イベント
+
+- `UpdateDescCount`: ページの子孫カウント(descendantCount)の更新
+  - サーバーからページ作成/削除/移動時に発行される
+  - 受信データ(Record形式)をMap形式に変換してJotai stateに保存
+  - **バッジ表示のみ更新、ツリー構造は更新しない**
+
+#### 実装詳細
+
+```typescript
+export const useSocketUpdateDescCount = (): void => {
+  const socket = useGlobalSocket();
+  const { update: updatePtDescCountMap } = usePageTreeDescCountMapAction();
+
+  useEffect(() => {
+    if (socket == null) return;
+
+    const handler = (data: UpdateDescCountRawData) => {
+      // バッジの数字のみ更新(ツリー構造は更新しない)
+      const newData: UpdateDescCountData = new Map(Object.entries(data));
+      updatePtDescCountMap(newData);
+    };
+
+    socket.on(SocketEventName.UpdateDescCount, handler);
+    return () => socket.off(SocketEventName.UpdateDescCount, handler);
+  }, [socket, updatePtDescCountMap]);
+};
+```
+
+#### ツリー構造の更新
+
+ツリー構造(子リスト)の更新は以下のタイミングで行われる:
+
+1. **リロードボタン**: `notifyUpdateAllTrees()` を呼び出し、全ツリーを再取得
+2. **自分の操作後**: 
+   - Create/Delete/Move操作の完了コールバックで `notifyUpdateItems([parentId])` を呼び出し
+   - 操作した親ノードの子リストのみ再取得
+
+```typescript
+// リロードボタンの例
+const { notifyUpdateAllTrees } = usePageTreeInformationUpdate();
+const handleReload = () => notifyUpdateAllTrees();
+
+// 操作完了後の例(Create, Delete, Move)
+const { notifyUpdateItems } = usePageTreeInformationUpdate();
+const handleOperationComplete = (parentId: string) => notifyUpdateItems([parentId]);
+```
+
+#### 関連状態
+
+- `page-tree-desc-count-map.ts`: 子孫カウントを管理するJotai atom
+  - `usePageTreeDescCountMap()`: カウント取得(バッジ表示用)
+  - `usePageTreeDescCountMapAction()`: カウント更新(Socket.ioから)
+
+- `page-tree-update.ts`: ツリー更新を管理するJotai atom
+  - `generationAtom`: 更新世代番号
+  - `lastUpdatedItemIdsAtom`: 更新対象アイテムID(nullは全体更新)
+  - `usePageTreeInformationUpdate()`: 更新通知(notifyUpdateItems, notifyUpdateAllTrees)
+  - `usePageTreeRevalidationEffect()`: 更新検知と再取得実行
+
+### 3.5 Checkboxes(AI Assistant用)
+
+**使用箇所**: `AiAssistantManagementPageTreeSelection.tsx`
+
+ItemsTreeのcheckboxesオプションを使用。
+
+#### Props
+
+```typescript
+<ItemsTree
+  enableCheckboxes={true}
+  initialCheckedItems={['page-id-1', 'page-id-2']}
+  onCheckedItemsChange={(checkedItems) => {
+    // チェック変更時の処理
+    // ページパスに `/*` を付加して保存
+  }}
+/>
+```
+
+#### 実装詳細
+
+**フック構成**:
+- `useTreeFeatures`: feature設定とチェックボックス・D&D機能を統合管理
+- `useCheckbox`: チェックボックス状態管理(`checkedItemIds`, `setCheckedItems`, `createNotifyEffect`)
+- `createNotifyEffect`: 親コンポーネントへの変更通知用ヘルパー関数を提供
+
+**循環依存の回避**:
+- `useTreeFeatures`はtreeインスタンスに依存しない
+- `createNotifyEffect`がtreeインスタンスとコールバックを受け取り、useEffectのコールバック関数を返す
+- ItemsTree側で`useEffect(createNotifyEffect(tree, onCheckedItemsChange), [createNotifyEffect, tree])`を呼び出す
+
+**設定**:
+- `checkboxesFeature` を条件付きで追加
+- `propagateCheckedState: false` で子への伝播を無効化
+- `canCheckFolders: true` でフォルダもチェック可能
+
+---
+
+## 4. バックエンドAPI
+
+### 4.1 使用エンドポイント
+
+```
+GET /page-listing/root
+→ ルートページ "/" のデータ
+
+GET /page-listing/children?id={pageId}
+→ 指定ページの直下の子のみ
+
+GET /page-listing/item?id={pageId}
+→ 単一ページデータ(新規追加)
+```
+
+### 4.2 IPageForTreeItem インターフェース
+
+```typescript
+interface IPageForTreeItem {
+  _id: string;
+  path: string;
+  parent?: string;
+  descendantCount: number;
+  revision?: string;
+  grant: PageGrant;
+  isEmpty: boolean;
+  wip: boolean;
+  processData?: IPageOperationProcessData;
+}
+```
+
+---
+
+## 5. @headless-tree/react 基礎知識
+
+### 5.1 データ構造
+
+- **IDベースの参照**: ツリーアイテムは文字列IDで識別
+- **フラット構造を推奨**: dataLoaderで親子関係を定義
+- **ジェネリック型対応**: `useTree<IPageForTreeItem>` でカスタム型を指定
+
+### 5.2 非同期データローダー
+
+```typescript
+const tree = useTree<IPageForTreeItem>({
+  rootItemId: "root",
+  dataLoader: {
+    getItem: async (itemId) => await api.fetchItem(itemId),
+    getChildren: async (itemId) => await api.fetchChildren(itemId),
+  },
+  createLoadingItemData: () => ({ /* loading state */ }),
+  features: [asyncDataLoaderFeature],
+});
+```
+
+#### キャッシュの無効化
+
+```typescript
+const item = tree.getItemInstance("item1");
+item.invalidateItemData();      // アイテムデータの再取得
+item.invalidateChildrenIds();   // 子IDリストの再取得
+```
+
+### 5.3 Virtualization統合
+
+```typescript
+const items = tree.getItems(); // フラット化されたアイテムリスト
+
+const virtualizer = useVirtualizer({
+  count: items.length,
+  getScrollElement: () => scrollElementRef.current,
+  estimateSize: () => 32,
+  overscan: 5,
+});
+```
+
+### 5.4 主要API
+
+#### Tree インスタンス
+- `tree.getItems()`: フラット化されたツリーアイテムのリスト
+- `tree.getItemInstance(id)`: IDからアイテムインスタンスを取得
+- `tree.getContainerProps()`: ツリーコンテナのprops(ホットキー有効化に必須)
+- `tree.rebuildTree()`: ツリー構造を再構築
+
+#### Item インスタンス
+- `item.getProps()`: アイテム要素のprops
+- `item.getId()`: アイテムID
+- `item.getItemData()`: カスタムペイロード(IPageForTreeItem)
+- `item.getItemMeta()`: メタデータ(level, indexなど)
+- `item.isFolder()`: フォルダかどうか
+- `item.isExpanded()`: 展開されているか
+- `item.expand()` / `item.collapse()`: 展開/折りたたみ
+- `item.startRenaming()`: リネームモード開始
+- `item.isRenaming()`: リネーム中か判定
+
+---
+
+## 6. パフォーマンス最適化
+
+### 6.1 headless-tree のキャッシュ無効化と再取得
+
+#### 重要な知見
+
+`@headless-tree/core` の `asyncDataLoaderFeature` は内部キャッシュを持ち、`invalidateChildrenIds()` メソッドでキャッシュを無効化できます。
+
+**invalidateChildrenIds(optimistic?: boolean) の動作:**
+
+```typescript
+// 内部実装(feature.ts より)
+invalidateChildrenIds: async ({ tree, itemId }, optimistic) => {
+  if (!optimistic) {
+    delete getDataRef(tree).current.childrenIds?.[itemId];  // キャッシュ削除
+  }
+  await loadChildrenIds(tree, itemId);  // データ再取得
+  // loadChildrenIds 内で自動的に tree.rebuildTree() が呼ばれる
+};
+```
+
+**optimistic パラメータの影響:**
+
+| パラメータ | 動作 | 用途 |
+|-----------|------|------|
+| `false` (デフォルト) | ローディング状態を更新、再レンダリングをトリガー | 最後の呼び出しに使用 |
+| `true` | ローディング状態を更新しない、古いデータを表示し続ける | バッチ処理の途中に使用 |
+
+**パフォーマンス最適化パターン:**
+
+```typescript
+// ❌ 非効率: 全アイテムに optimistic=false
+items.forEach(item => item.invalidateChildrenIds(false));
+// → 各呼び出しで rebuildTree() が実行され、N回の再構築が発生
+
+// ✅ 効率的: 展開済みアイテムのみ対象、最後だけ optimistic=false
+const expandedItems = tree.getItems().filter(item => item.isExpanded());
+expandedItems.forEach(item => item.invalidateChildrenIds(true));  // 楽観的
+rootItem.invalidateChildrenIds(false);  // 最後に1回だけ再構築
+```
+
+**実際の実装 (page-tree-update.ts):**
+
+```typescript
+useEffect(() => {
+  if (globalGeneration <= generation) return;
+
+  const shouldUpdateAll = globalLastUpdatedItemIds == null;
+
+  if (shouldUpdateAll) {
+    // pendingリクエストキャッシュをクリア
+    invalidatePageTreeChildren();
+
+    // 展開済みアイテムのみ楽観的に無効化(rebuildTree回避)
+    const expandedItems = tree.getItems().filter(item => item.isExpanded());
+    expandedItems.forEach(item => item.invalidateChildrenIds(true));
+
+    // ルートのみ optimistic=false で再構築トリガー
+    getItemInstance(ROOT_PAGE_VIRTUAL_ID)?.invalidateChildrenIds(false);
+  } else {
+    // 部分更新: 指定アイテムのみ
+    invalidatePageTreeChildren(globalLastUpdatedItemIds);
+    globalLastUpdatedItemIds.forEach(itemId => {
+      getItemInstance(itemId)?.invalidateChildrenIds(false);
+    });
+  }
+
+  onRevalidatedRef.current?.();
+}, [globalGeneration, generation, getItemInstance, globalLastUpdatedItemIds, tree]);
+```
+
+#### 注意事項
+
+1. **invalidateChildrenIds は async 関数** - Promise を返すが、await しなくても動作する
+2. **loadChildrenIds 完了後に自動で rebuildTree()** - 明示的な呼び出し不要
+3. **optimistic=true でもデータは再取得される** - ただしローディングUIは表示されない
+4. **tree.getItems() は表示中のアイテムのみ** - 折りたたまれた子は含まれない
+
+### 6.2 Virtualization
+
+- **100k+アイテムでテスト済み**
+- `overscan: 5` で表示範囲外の先読み
+- `estimateSize: 32` でアイテム高さを推定
+
+### 6.3 非同期データローダーのキャッシング
+
+- asyncDataLoaderFeatureが自動キャッシング
+- 展開済みアイテムは再取得なし
+- `invalidateChildrenIds()` で明示的に無効化可能
+
+### 6.4 ツリー更新
+
+```typescript
+// Jotai atomでツリー更新を通知
+const { notifyUpdateItems } = usePageTreeInformationUpdate();
+notifyUpdateItems(updatedPages);
+
+// SWRでページデータを再取得
+const { mutate: mutatePageTree } = useSWRxPageTree();
+await mutatePageTree();
+```
+
+---
+
+## 7. 実装済み機能
+
+- ✅ Virtualizedツリー表示
+- ✅ 展開/折りたたみ
+- ✅ ページ遷移(クリック)
+- ✅ 選択状態表示
+- ✅ WIPページフィルター
+- ✅ descendantCountバッジ
+- ✅ hover時の操作ボタン
+- ✅ 選択ページまでの自動展開
+- ✅ 選択ページへの初期スクロール
+- ✅ Rename(F2、コンテキストメニュー)
+- ✅ Create(コンテキストメニュー)
+- ✅ Duplicate(hover時ボタン)
+- ✅ Delete(hover時ボタン)
+- ✅ Checkboxes(AI Assistant用)
+- ✅ Drag and Drop(ページ移動)
+- ✅ リアルタイム更新(Socket.io統合)
+
+---
+
+## 8. 未実装機能
+
+なし(全機能実装済み)
+
+---
+
+## 9. 参考リンク
+
+- @headless-tree/react 公式ドキュメント: https://headless-tree.lukasbach.com/
+- GitHub: https://github.com/lukasbach/headless-tree
+- @tanstack/react-virtual: https://tanstack.com/virtual/latest
+
+---
+
+## 10. 改修時の注意点
+
+### 10.1 ホットキーサポート
+
+`hotkeysCoreFeature` と `getContainerProps()` の組み合わせが必須。
+`getContainerProps()` がないとホットキーが動作しない。
+
+### 10.2 ツリー更新の通知
+
+操作完了後は以下を呼び出す:
+1. `mutatePageTree()` - SWRでデータ再取得
+2. `notifyUpdateItems()` - Jotai atomで更新通知
+
+### 10.3 旧実装について
+
+以下のファイルはTypeScriptエラーあり(許容):
+- `ItemsTree.tsx` - 旧実装
+- `PageTreeItem.tsx` - 旧Sidebar用
+- `TreeItemForModal.tsx` - 旧Modal用
+
+---
+
+## 更新履歴
+
+- 2025-11-10: 初版作成(Virtualization計画)
+- 2025-11-28: Rename/Create実装完了、ディレクトリ再編成
+- 2025-12-05: 仕様書として統合
+- 2025-12-08: Drag and Drop実装完了、ディレクトリ構成更新
+- 2025-12-08: リアルタイム更新(Socket.io統合)実装完了
+- 2025-12-08: headless-tree キャッシュ無効化の知見を追加(invalidateChildrenIds の optimistic パラメータ)
+- 2025-12-08: Socket.io更新の設計方針を明確化(バッジ更新とツリー構造更新の分離)
+- 2025-12-09: useTreeFeaturesリファクタリング完了(checkboxとDnD機能を統合、循環依存を回避)

+ 0 - 186
.serena/memories/apps-app-pagetree-performance-refactor-plan.md

@@ -1,186 +0,0 @@
-# PageTree パフォーマンス改善リファクタ計画 - 現実的戦略
-
-## 🎯 目標
-現在のパフォーマンス問題を解決:
-- **問題**: 5000件の兄弟ページで初期レンダリングが重い
-- **目標**: 表示速度を10-20倍改善、UX維持
-
-## ✅ 戦略2: API軽量化 - **完了済み**
-
-### 実装済み内容
-- **ファイル**: `apps/app/src/server/service/page-listing/page-listing.ts:77`
-- **変更内容**: `.select('_id path parent descendantCount grant isEmpty createdAt updatedAt wip')` を追加
-- **型定義**: `apps/app/src/interfaces/page.ts` の `IPageForTreeItem` 型も対応済み
-- **追加改善**: 計画にはなかった `wip` フィールドも最適化対象に含める
-
-### 実現できた効果
-- **データサイズ**: 推定 500バイト → 約100バイト(5倍軽量化)
-- **ネットワーク転送**: 5000ページ時 2.5MB → 500KB程度に削減
-- **状況**: **実装完了・効果発現中**
-
----
-
-## 🚀 戦略1: 既存アーキテクチャ活用 + headless-tree部分導入 - **現実的戦略**
-
-### 前回のreact-window失敗原因
-1. **動的itemCount**: ツリー展開時にアイテム数が変化→react-windowの前提と衝突
-2. **非同期ローディング**: APIレスポンス待ちでフラット化不可
-3. **複雑な状態管理**: SWRとreact-windowの状態同期が困難
-
-### 現実的制約の認識
-**ItemsTree/TreeItemLayoutは廃止困難**:
-- **CustomTreeItemの出し分け**: `PageTreeItem` vs `TreeItemForModal`  
-- **共通副作用処理**: rename/duplicate/delete時のmutation処理
-- **多箇所からの利用**: PageTree, PageSelectModal, AiAssistant等
-
-## 📋 修正された実装戦略: **ハイブリッドアプローチ**
-
-### **核心アプローチ**: ItemsTreeを**dataProvider**として活用
-
-**既存の責務は保持しつつ、内部実装のみheadless-tree化**:
-
-1. **ItemsTree**: UIロジック・副作用処理はそのまま
-2. **TreeItemLayout**: 個別アイテムレンダリングはそのまま  
-3. **データ管理**: 内部でheadless-treeを使用(SWR → headless-tree)
-4. **Virtualization**: ItemsTree内部にreact-virtualを導入
-
-### **実装計画: 段階的移行**
-
-#### **Phase 1: データ層のheadless-tree化**
-
-**ファイル**: `ItemsTree.tsx`
-```typescript
-// Before: 複雑なSWR + 子コンポーネント管理
-const tree = useTree<IPageForTreeItem>({
-  rootItemId: initialItemNode.page._id,
-  dataLoader: {
-    getItem: async (itemId) => {
-      const response = await apiv3Get('/page-listing/item', { id: itemId });
-      return response.data;
-    },
-    getChildren: async (itemId) => {
-      const response = await apiv3Get('/page-listing/children', { id: itemId });
-      return response.data.children.map(child => child._id);
-    },
-  },
-  features: [asyncDataLoaderFeature],
-});
-
-// 既存のCustomTreeItemに渡すためのアダプター
-const adaptedNodes = tree.getItems().map(item => 
-  new ItemNode(item.getItemData())
-);
-
-return (
-  <ul className={`${moduleClass} list-group`}>
-    {adaptedNodes.map(node => (
-      <CustomTreeItem
-        key={node.page._id}
-        itemNode={node}
-        // ... 既存のpropsをそのまま渡す
-        onRenamed={onRenamed}
-        onClickDuplicateMenuItem={onClickDuplicateMenuItem}
-        onClickDeleteMenuItem={onClickDeleteMenuItem}
-      />
-    ))}
-  </ul>
-);
-```
-
-#### **Phase 2: Virtualization導入**
-
-**ファイル**: `ItemsTree.tsx` (Phase1をベースに拡張)
-```typescript
-const virtualizer = useVirtualizer({
-  count: adaptedNodes.length,
-  getScrollElement: () => containerRef.current,
-  estimateSize: () => 40,
-});
-
-return (
-  <div ref={containerRef} className="tree-container">
-    <div style={{ height: virtualizer.getTotalSize() }}>
-      {virtualizer.getVirtualItems().map(virtualItem => {
-        const node = adaptedNodes[virtualItem.index];
-        return (
-          <div
-            key={node.page._id}
-            style={{
-              position: 'absolute',
-              top: virtualItem.start,
-              height: virtualItem.size,
-              width: '100%',
-            }}
-          >
-            <CustomTreeItem
-              itemNode={node}
-              // ... 既存props
-            />
-          </div>
-        );
-      })}
-    </div>
-  </div>
-);
-```
-
-#### **Phase 3 (将来): 完全なheadless-tree移行**
-
-最終的にはdrag&drop、selection等のUI機能もheadless-treeに移行可能ですが、**今回のスコープ外**。
-
-## 📁 現実的なファイル変更まとめ
-
-| アクション | ファイル | 内容 | スコープ |
-|---------|---------|------|------|
-| ✅ **完了** | **apps/app/src/server/service/page-listing/page-listing.ts** | selectクエリ追加 | API軽量化 |
-| ✅ **完了** | **apps/app/src/interfaces/page.ts** | IPageForTreeItem型定義 | API軽量化 |
-| 🔄 **修正** | **src/client/components/ItemsTree/ItemsTree.tsx** | headless-tree + virtualization導入 | **今回の核心** |
-| 🆕 **新規** | **src/client/components/ItemsTree/usePageTreeDataLoader.ts** | データローダー分離 | 保守性向上 |
-| ⚠️ **保持** | **src/client/components/TreeItem/TreeItemLayout.tsx** | 既存のまま(後方互換) | 既存責務保持 |
-| ⚠️ **部分削除** | **src/stores/page-listing.tsx** | useSWRxPageChildren削除 | 重複排除 |
-
-**新規ファイル**: 1個(データローダー分離のみ)  
-**変更ファイル**: 2個(ItemsTree改修 + store整理)  
-**削除ファイル**: 0個(既存アーキテクチャ尊重)
-
----
-
-## 🎯 実装優先順位
-
-**✅ Phase 1**: API軽量化(低リスク・即効性) - **完了**
-
-**📋 Phase 2-A**: ItemsTree内部のheadless-tree化
-- **工数**: 2-3日
-- **リスク**: 低(外部IF変更なし)
-- **効果**: 非同期ローディング最適化、キャッシュ改善
-
-**📋 Phase 2-B**: Virtualization導入  
-- **工数**: 2-3日
-- **リスク**: 低(内部実装のみ)
-- **効果**: レンダリング性能10-20倍改善
-
-**現在の効果**: API軽量化により 5倍のデータ転送量削減済み  
-**Phase 2完了時の予想効果**: 初期表示速度 20-50倍改善
-
----
-
-## 🏗️ 実装方針: **既存アーキテクチャ尊重**
-
-**基本方針**:
-- **既存のCustomTreeItem責務**は保持(rename/duplicate/delete等)
-- **データ管理層のみ**をheadless-tree化  
-- **外部インターフェース**は変更せず、内部最適化に集中
-- **段階的移行**で低リスク実装
-
-**今回のスコープ**:
-- ✅ 非同期データローディング最適化
-- ✅ Virtualizationによる大量要素対応  
-- ❌ drag&drop/selection(将来フェーズ)
-- ❌ 既存アーキテクチャの破壊的変更
-
----
-
-## 技術的参考資料
-- **headless-tree**: https://headless-tree.lukasbach.com/ (データ管理層のみ利用)
-- **react-virtual**: @tanstack/react-virtualを使用  
-- **アプローチ**: 既存ItemsTree内部でheadless-tree + virtualizationをハイブリッド活用

+ 0 - 71
.serena/memories/coding_conventions.md

@@ -1,71 +0,0 @@
-# コーディング規約とスタイルガイド
-
-## Linter・フォーマッター設定
-
-### Biome設定(統一予定)
-- **適用範囲**: 
-  - dist/, node_modules/, coverage/ などは除外
-  - .next/, bin/, config/ などのビルド成果物は除外
-  - package.json, .eslintrc.js などの設定ファイルは除外
-- **推奨**: 新規開発では Biome を使用
-
-### ESLint設定(廃止予定・過渡期)
-- **ベース設定**: weseek ESLint設定を使用
-- **TypeScript**: weseek/typescript 設定を適用
-- **React**: React関連のルールを適用
-- **主要なルール**:
-  - `import/prefer-default-export`: オフ(名前付きエクスポートを推奨)
-  - `import/order`: import文の順序を規定
-    - React を最初に
-    - 内部モジュール(`/**`)をparentグループの前に配置
-
-## TypeScript設定
-- **ターゲット**: ESNext
-- **モジュール**: ESNext  
-- **厳格モード**: 有効(strict: true)
-- **モジュール解決**: Bundler
-- **その他**:
-  - allowJs: true(JSファイルも許可)
-  - skipLibCheck: true(型チェックの最適化)
-  - isolatedModules: true(単独モジュールとしてコンパイル)
-
-## Stylelint設定
-- SCSS/CSSファイルに対して適用
-- recess-order設定を使用(プロパティの順序規定)
-- recommended-scss設定を適用
-
-## ファイル命名規則
-- TypeScript/JavaScriptファイル: キャメルケースまたはケバブケース
-- コンポーネントファイル: PascalCase(Reactコンポーネント)
-- 設定ファイル: ドット記法(.eslintrc.js など)
-
-## テストファイル命名規則(Vitest)
-vitest.workspace.mts の設定に基づく:
-
-### 単体テスト(Unit Test)
-- **ファイル名**: `*.spec.{ts,js}`
-- **環境**: Node.js
-- **例**: `utils.spec.ts`, `helper.spec.js`
-
-### 統合テスト(Integration Test)
-- **ファイル名**: `*.integ.ts`
-- **環境**: Node.js(MongoDB設定あり)
-- **例**: `api.integ.ts`, `service.integ.ts`
-
-### コンポーネントテスト(Component Test)
-- **ファイル名**: `*.spec.{tsx,jsx}`
-- **環境**: happy-dom
-- **例**: `Button.spec.tsx`, `Modal.spec.jsx`
-
-## ディレクトリ構造の規則
-- `src/`: ソースコード
-- `test/`: Jest用の古いテストファイル(廃止予定)
-- `test-with-vite/`: Vitest用の新しいテストファイル
-- `playwright/`: E2Eテストファイル
-- `config/`: 設定ファイル
-- `public/`: 静的ファイル
-- `dist/`: ビルド出力
-
-## 移行ガイドライン
-- 新規開発: Biome + Vitest を使用
-- 既存コード: 段階的に ESLint → Biome、Jest → Vitest に移行

+ 0 - 45
.serena/memories/development_environment.md

@@ -1,45 +0,0 @@
-# 開発環境とツール
-
-## 推奨システム要件
-- **Node.js**: ^20 || ^22
-- **パッケージマネージャー**: pnpm 10.4.1
-- **OS**: Linux(Ubuntuベース)、macOS、Windows
-
-## 利用可能なLinuxコマンド
-基本的なLinuxコマンドが利用可能:
-- `apt`, `dpkg`: パッケージ管理
-- `git`: バージョン管理
-- `curl`, `wget`: HTTP クライアント
-- `ssh`, `scp`, `rsync`: ネットワーク関連
-- `ps`, `lsof`, `netstat`, `top`: プロセス・ネットワーク監視
-- `tree`, `find`, `grep`: ファイル検索・操作
-- `zip`, `unzip`, `tar`, `gzip`, `bzip2`, `xz`: アーカイブ操作
-
-## 開発用ブラウザ
-```bash
-# ローカルサーバーをブラウザで開く
-"$BROWSER" http://localhost:3000
-```
-
-## 環境変数管理
-- **dotenv-flow**: 環境ごとの設定管理
-- 環境ファイル:
-  - `.env.development`: 開発環境
-  - `.env.production`: 本番環境
-  - `.env.test`: テスト環境
-  - `.env.*.local`: ローカル固有設定
-
-## デバッグ
-```bash
-# デバッグモードでサーバー起動
-cd apps/app && pnpm run dev  # --inspectフラグ付きでnodemon起動
-
-# REPL(Read-Eval-Print Loop)
-cd apps/app && pnpm run repl
-```
-
-## VS Code設定
-`.vscode/` ディレクトリに設定ファイルが含まれており、推奨拡張機能や設定が適用される。
-
-## Docker対応
-各アプリケーションにDockerファイルが含まれており、コンテナベースでの開発も可能。

+ 390 - 0
.serena/memories/nextjs-pages-router-getLayout-pattern.md

@@ -0,0 +1,390 @@
+# Next.js Pages Router における getLayout パターン完全ガイド
+
+## getLayout パターンの基本概念と仕組み
+
+getLayout パターンは、Next.js Pages Router における**ページごとのレイアウト定義を可能にする強力なアーキテクチャパターン**です。このパターンを使用することで、各ページが独自のレイアウト階層を静的な `getLayout` 関数を通じて定義できます。
+
+### 技術的な仕組み
+
+getLayout パターンは React のコンポーネントツリー構成を活用して動作します:
+
+```typescript
+// pages/dashboard.tsx
+import DashboardLayout from '../components/DashboardLayout'
+
+const Dashboard = () => <div>ダッシュボードコンテンツ</div>
+
+Dashboard.getLayout = function getLayout(page) {
+  return <DashboardLayout>{page}</DashboardLayout>
+}
+
+export default Dashboard
+
+// pages/_app.tsx
+export default function MyApp({ Component, pageProps }) {
+  const getLayout = Component.getLayout ?? ((page) => page)
+  return getLayout(<Component {...pageProps} />)
+}
+```
+
+**動作原理:**
+1. Next.js がページを初期化する際、`getLayout` プロパティをチェック
+2. `getLayout` 関数がページコンポーネントを受け取り、完全なレイアウトツリーを返す
+3. React の差分アルゴリズムがコンポーネントツリーの同じ位置を維持し、効率的な差分更新を実現
+
+## パフォーマンス向上の具体的なメリット
+
+### レンダリング回数の削減
+
+getLayout パターンの最大の利点は、**ページ遷移時のレイアウトコンポーネントの再マウント防止**です。React の差分アルゴリズムは、コンポーネントツリーの同じ位置に同じタイプのコンポーネントが存在する場合、そのインスタンスを再利用します。
+
+**実測データ(Zenn.dev の事例):**
+```
+実装前:
+├ /_app      97.7 kB (全ページで Recoil を含む)
+├ /articles  98 kB
+├ /profile   98 kB
+
+実装後:
+├ /_app      75 kB (22.7 kB 削減)
+├ /articles  75.3 kB (最適化されたバンドル)
+├ /profile   98.3 kB (必要な依存関係のみ)
+```
+
+### メモリ効率の改善
+
+**主要な最適化ポイント:**
+- **状態の永続化**: 入力値、スクロール位置、コンポーネント状態がナビゲーション間で保持
+- **イベントリスナーの永続性**: イベントハンドラーの再アタッチ回避
+- **DOM 参照の安定性**: サードパーティ統合用の DOM ノード参照の維持
+
+## 実装のベストプラクティス
+
+### TypeScript での型安全な実装
+
+```typescript
+// types/layout.ts
+import type { NextPage } from 'next'
+import type { AppProps } from 'next/app'
+import type { ReactElement, ReactNode } from 'react'
+
+export type NextPageWithLayout<P = {}, IP = P> = NextPage<P, IP> & {
+  getLayout?: (page: ReactElement) => ReactNode
+}
+
+export type AppPropsWithLayout = AppProps & {
+  Component: NextPageWithLayout
+}
+
+// pages/_app.tsx
+import type { AppPropsWithLayout } from '../types/layout'
+
+export default function MyApp({ Component, pageProps }: AppPropsWithLayout) {
+  const getLayout = Component.getLayout ?? ((page) => page)
+  return getLayout(<Component {...pageProps} />)
+}
+```
+
+### ネストレイアウトの実装
+
+```typescript
+// utils/nestLayout.ts
+export function nestLayout(
+  parentLayout: (page: ReactElement) => ReactNode,
+  childLayout: (page: ReactElement) => ReactNode
+) {
+  return (page: ReactElement) => parentLayout(childLayout(page))
+}
+
+// pages/dashboard/profile.tsx
+import { nestLayout } from '../../utils/nestLayout'
+import { getLayout as getBaseLayout } from '../../components/BaseLayout'
+import { getLayout as getDashboardLayout } from '../../components/DashboardLayout'
+
+const ProfilePage: NextPageWithLayout = () => {
+  return <div>プロフィールコンテンツ</div>
+}
+
+ProfilePage.getLayout = nestLayout(getBaseLayout, getDashboardLayout)
+```
+
+### 状態管理の最適化
+
+```typescript
+// レイアウトごとのコンテキスト分割
+const AuthLayout = ({ children }) => (
+  <AuthProvider>
+    <UserProvider>
+      {children}
+    </UserProvider>
+  </AuthProvider>
+)
+
+const PublicLayout = ({ children }) => (
+  <ThemeProvider>
+    {children}
+  </ThemeProvider>
+)
+
+// 各ページで適切なレイアウトを選択
+Page.getLayout = (page) => <AuthLayout>{page}</AuthLayout>
+```
+
+## バッドプラクティスと実装時の落とし穴
+
+### 避けるべきアンチパターン
+
+**❌ レイアウトの再作成**
+```typescript
+// 悪い例:レイアウトの永続性が失われる
+const BadPage = () => {
+  return (
+    <Layout>
+      <div>ページコンテンツ</div>
+    </Layout>
+  )
+}
+
+// ✅ 良い例:getLayout パターンを使用
+const GoodPage = () => <div>ページコンテンツ</div>
+GoodPage.getLayout = (page) => <Layout>{page}</Layout>
+```
+
+**❌ _app.tsx での条件付きレンダリング**
+```typescript
+// 悪い例:レイアウトの再マウントを引き起こす
+function MyApp({ Component, pageProps, router }) {
+  if (router.pathname.startsWith('/dashboard')) {
+    return <DashboardLayout><Component {...pageProps} /></DashboardLayout>
+  }
+  return <Component {...pageProps} />
+}
+```
+
+### メモリリークの防止
+
+```typescript
+// ✅ 適切なクリーンアップ
+const Layout = ({ children }) => {
+  useEffect(() => {
+    const handleResize = () => { /* 処理 */ }
+    
+    window.addEventListener('resize', handleResize)
+    
+    return () => {
+      window.removeEventListener('resize', handleResize)
+    }
+  }, [])
+
+  return <div>{children}</div>
+}
+```
+
+## 他のレイアウト管理手法との比較
+
+### Pages Router 内での比較
+
+| 手法 | 複雑度 | パフォーマンス | 柔軟性 | 学習曲線 |
+|------|--------|----------------|--------|----------|
+| getLayout | 中 | 高 | 高 | 中 |
+| HOCs | 高 | 中 | 高 | 高 |
+| _app.js ルーティング | 低 | 高 | 低 | 低 |
+| Context ベース | 高 | 中 | 高 | 高 |
+| ラッパーコンポーネント | 低 | 低 | 低 | 低 |
+
+### Next.js 13+ App Router との比較
+
+**App Router の利点:**
+- ビルトインのレイアウトネスティング
+- ファイルシステムベースの直感的な構造
+- 自動的な状態永続化
+- `loading.js` と `error.js` による組み込みの状態管理
+
+**getLayout パターンの利点:**
+- 明示的なレイアウト制御
+- 成熟した安定したパターン
+- シンプルなメンタルモデル
+- 優れた TTFB パフォーマンス
+
+**パフォーマンス比較:**
+- **TTFB**: Pages Router が App Router より最大 2 倍高速
+- **開発サーバー**: Pages Router がより安定
+- **バンドルサイズ**: getLayout により選択的な読み込みが可能
+
+## SEO と SSR/SSG への影響
+
+### Core Web Vitals への影響
+
+**測定された改善効果:**
+- **LCP (Largest Contentful Paint)**: レイアウトの永続化により改善
+- **INP (Interaction to Next Paint)**: JavaScript 実行時間の削減
+- **CLS (Cumulative Layout Shift)**: レイアウトシフトの除去
+
+**Netflix の事例:**
+- Time-to-Interactive が **50% 削減**
+- JavaScript バンドルサイズが **200KB 削減**
+- デスクトップユーザーの 97% が高速な First Input Delay を体験
+
+### SSR/SSG との統合
+
+```typescript
+// SSR との完全な互換性
+export async function getServerSideProps() {
+  const data = await fetchData()
+  return { props: { data } }
+}
+
+function Page({ data }) {
+  return <div>{data.content}</div>
+}
+
+Page.getLayout = (page) => <Layout>{page}</Layout>
+```
+
+## 実際のプロジェクトでの活用例
+
+### 企業での実装事例
+
+**Netflix:**
+- ログアウト済みホームページで Time-to-Interactive を 50% 削減
+- 戦略的なプリフェッチで後続ページロードを 30% 改善
+
+**Hulu:**
+- Next.js による統一されたフロントエンドアーキテクチャ
+- CSS-in-JS の自動コード分割を実装
+
+**Sonos:**
+- ビルド時間を **75% 短縮**
+- パフォーマンススコアを **10% 改善**
+
+## パフォーマンス測定と最適化
+
+### 測定ツールの設定
+
+```javascript
+// next.config.js - Bundle Analyzer の設定
+const withBundleAnalyzer = require('@next/bundle-analyzer')({
+  enabled: process.env.ANALYZE === 'true',
+});
+
+module.exports = withBundleAnalyzer(nextConfig);
+
+// 使用方法
+// ANALYZE=true npm run build
+```
+
+### React DevTools Profiler の活用
+
+```javascript
+import { Profiler } from 'react';
+
+function onRenderCallback(id, phase, actualDuration, baseDuration) {
+  console.log({ id, phase, actualDuration, baseDuration });
+}
+
+<Profiler id="LayoutProfile" onRender={onRenderCallback}>
+  <MyLayout>{children}</MyLayout>
+</Profiler>
+```
+
+### 最適化テクニック
+
+**メモ化の実装:**
+```typescript
+import { memo, useMemo, useCallback } from 'react'
+
+const Layout = memo(({ children, menuItems }) => {
+  const processedMenu = useMemo(() => 
+    menuItems.filter(item => item.visible).sort(), 
+    [menuItems]
+  );
+  
+  const handleNavigation = useCallback((path) => {
+    router.push(path);
+  }, [router]);
+  
+  return (
+    <div>
+      <Navigation items={processedMenu} onNavigate={handleNavigation} />
+      {children}
+    </div>
+  );
+});
+```
+
+**動的インポートによるコード分割:**
+```typescript
+import dynamic from 'next/dynamic';
+
+const DynamicSidebar = dynamic(() => import('../components/Sidebar'), {
+  loading: () => <SidebarSkeleton />,
+  ssr: false
+});
+
+const Layout = ({ children }) => (
+  <div>
+    <Header />
+    <DynamicSidebar />
+    <main>{children}</main>
+  </div>
+);
+```
+
+### パフォーマンスバジェットの実装
+
+```javascript
+export const PERFORMANCE_BUDGETS = {
+  layoutRenderTime: 16, // 60fps のための 16ms
+  memoryUsage: 50 * 1024 * 1024, // 50MB
+  bundleSize: 200 * 1024, // 200KB
+  firstContentfulPaint: 2000, // 2秒
+};
+
+const measureLayoutPerformance = (layoutName, renderFn) => {
+  const start = performance.now();
+  const result = renderFn();
+  const duration = performance.now() - start;
+  
+  if (duration > PERFORMANCE_BUDGETS.layoutRenderTime) {
+    console.warn(`Layout ${layoutName} がレンダーバジェットを超過: ${duration}ms`);
+  }
+  
+  return result;
+};
+```
+
+## 実装チェックリスト
+
+### 初期設定
+- [ ] TypeScript の型定義を設定
+- [ ] `_app.tsx` に getLayout パターンを実装
+- [ ] React DevTools をインストール
+- [ ] Bundle Analyzer を設定
+
+### 最適化の優先順位
+
+**高影響・低労力:**
+- [ ] レイアウトコンポーネントに React.memo を実装
+- [ ] Bundle Analyzer で大きな依存関係を特定
+- [ ] Context Provider をレイアウトごとに分割
+
+**中影響・中労力:**
+- [ ] 非クリティカルなレイアウトコンポーネントに動的インポートを実装
+- [ ] Suspense 境界を追加してストリーミングを改善
+- [ ] 自動パフォーマンス監視を設定
+
+**高影響・高労力:**
+- [ ] 状態管理アーキテクチャの再設計
+- [ ] 包括的なプログレッシブエンハンスメントの実装
+- [ ] 高度なパフォーマンスバジェットシステムの作成
+
+## まとめ
+
+getLayout パターンは、Next.js Pages Router において**強力なパフォーマンス最適化とアーキテクチャの柔軟性**を提供します。適切に実装すれば、以下の利点が得られます:
+
+1. **パフォーマンスの向上**: 不要な再レンダリングの削減とバンドルサイズの最適化
+2. **ユーザー体験の向上**: 状態の永続化とスムーズなページ遷移
+3. **アーキテクチャの柔軟性**: ページごとのレイアウトカスタマイズとパフォーマンスの維持
+4. **メモリ効率**: コンポーネントの再利用による最適なリソース使用
+
+App Router が新しい代替手段を提供する一方で、getLayout パターンの理解は React のレンダリング最適化とコンポーネントライフサイクル管理への深い洞察を提供します。Pages Router アプリケーションでは、プロジェクトの開始時から getLayout を実装することで、アプリケーションのスケールに応じて最大限のパフォーマンス利点とアーキテクチャの柔軟性を維持できます。

+ 441 - 0
.serena/memories/page-state-hooks-useLatestRevision-degradation.md

@@ -0,0 +1,441 @@
+# Page State Hooks - useLatestRevision リファクタリング記録
+
+**Date**: 2025-10-31
+**Branch**: support/use-jotai
+
+## 🎯 実施内容のサマリー
+
+`support/use-jotai` ブランチで `useLatestRevision` が機能していなかった問題を解決し、リビジョン管理の状態管理を大幅に改善しました。
+
+### 主な成果
+
+1. ✅ `IPageInfoForEntity.latestRevisionId` を導入
+2. ✅ `useSWRxIsLatestRevision` を SWR ベースで実装(Jotai atom から脱却)
+3. ✅ `remoteRevisionIdAtom` を完全削除(状態管理の簡素化)
+4. ✅ `useIsRevisionOutdated` の意味論を改善(「意図的な過去閲覧」を考慮)
+5. ✅ `useRevisionIdFromUrl` で URL パラメータ取得を一元化
+
+---
+
+## 📋 実装の要点
+
+### 1. `IPageInfoForEntity` に `latestRevisionId` を追加
+
+**ファイル**: `packages/core/src/interfaces/page.ts`
+
+```typescript
+export type IPageInfoForEntity = Omit<IPageInfo, 'isNotFound' | 'isEmpty'> & {
+  // ... existing fields
+  latestRevisionId?: string;  // ✅ 追加
+};
+```
+
+**ファイル**: `apps/app/src/server/service/page/index.ts:2605`
+
+```typescript
+const infoForEntity: Omit<IPageInfoForEntity, 'bookmarkCount'> = {
+  // ... existing fields
+  latestRevisionId: page.revision != null ? getIdStringForRef(page.revision) : undefined,
+};
+```
+
+**データフロー**: SSR で `constructBasicPageInfo` が自動的に `latestRevisionId` を設定 → `useSWRxPageInfo` で参照
+
+---
+
+### 2. `useSWRxIsLatestRevision` を SWR ベースで実装
+
+**ファイル**: `stores/page.tsx:164-191`
+
+```typescript
+export const useSWRxIsLatestRevision = (): SWRResponse<boolean, Error> => {
+  const currentPage = useCurrentPageData();
+  const pageId = currentPage?._id;
+  const shareLinkId = useShareLinkId();
+  const { data: pageInfo } = useSWRxPageInfo(pageId, shareLinkId);
+
+  const latestRevisionId = pageInfo && 'latestRevisionId' in pageInfo
+    ? pageInfo.latestRevisionId
+    : undefined;
+
+  const key = useMemo(() => {
+    if (currentPage?.revision?._id == null) {
+      return null;
+    }
+    return ['isLatestRevision', currentPage.revision._id, latestRevisionId ?? null];
+  }, [currentPage?.revision?._id, latestRevisionId]);
+
+  return useSWRImmutable(
+    key,
+    ([, currentRevisionId, latestRevisionId]) => {
+      if (latestRevisionId == null) {
+        return true;  // Assume latest if not available
+      }
+      return latestRevisionId === currentRevisionId;
+    },
+  );
+};
+```
+
+**使用箇所**: OldRevisionAlert, DisplaySwitcher, PageEditorReadOnly
+
+**判定**: `.data !== false` で「古いリビジョン」を検出
+
+---
+
+### 3. `remoteRevisionIdAtom` の完全削除
+
+**削除理由**:
+- `useSWRxPageInfo.data.latestRevisionId` で代替可能
+- 「Socket.io 更新検知」と「最新リビジョン保持」の用途が混在していた
+- 状態管理が複雑化していた
+
+**重要**: `RemoteRevisionData.remoteRevisionId` は型定義に残した
+→ コンフリクト解決時に「どのリビジョンに対して保存するか」の情報として必要
+
+---
+
+### 4. `useIsRevisionOutdated` の意味論的改善
+
+**改善前**: 単純に「現在のリビジョン ≠ 最新リビジョン」を判定
+**問題**: URL `?revisionId=xxx` で意図的に過去を見ている場合も `true` を返していた
+
+**改善後**: 「ユーザーが意図的に過去リビジョンを見ているか」を考慮
+
+**ファイル**: `states/context.ts:82-100`
+
+```typescript
+export const useRevisionIdFromUrl = (): string | undefined => {
+  const router = useRouter();
+  const revisionId = router.query.revisionId;
+  return typeof revisionId === 'string' ? revisionId : undefined;
+};
+
+export const useIsViewingSpecificRevision = (): boolean => {
+  const revisionId = useRevisionIdFromUrl();
+  return revisionId != null;
+};
+```
+
+**ファイル**: `stores/page.tsx:193-219`
+
+```typescript
+export const useIsRevisionOutdated = (): boolean => {
+  const { data: isLatestRevision } = useSWRxIsLatestRevision();
+  const isViewingSpecificRevision = useIsViewingSpecificRevision();
+
+  // If user intentionally views a specific revision, don't show "outdated" alert
+  if (isViewingSpecificRevision) {
+    return false;
+  }
+
+  if (isLatestRevision == null) {
+    return false;
+  }
+
+  // User expects latest, but it's not latest = outdated
+  return !isLatestRevision;
+};
+```
+
+---
+
+## 🎭 動作例
+
+| 状況 | isLatestRevision | isViewingSpecificRevision | isRevisionOutdated | 意味 |
+|------|------------------|---------------------------|---------------------|------|
+| 最新を表示中 | true | false | false | 正常 |
+| Socket.io更新を受信 | false | false | **true** | 「再fetchせよ」 |
+| URL `?revisionId=old` で過去を閲覧 | false | true | false | 「意図的な過去閲覧」 |
+
+---
+
+## 🔄 現状の remoteRevision 系 atom と useSetRemoteLatestPageData
+
+### 削除済み
+- ✅ `remoteRevisionIdAtom` - 完全削除(`useSWRxPageInfo.data.latestRevisionId` で代替)
+
+### 残存している atom(未整理)
+- ⚠️ `remoteRevisionBodyAtom` - ConflictDiffModal で使用
+- ⚠️ `remoteRevisionLastUpdateUserAtom` - ConflictDiffModal, PageStatusAlert で使用
+- ⚠️ `remoteRevisionLastUpdatedAtAtom` - ConflictDiffModal で使用
+
+### `useSetRemoteLatestPageData` の役割
+
+**定義**: `states/page/use-set-remote-latest-page-data.ts`
+
+```typescript
+export type RemoteRevisionData = {
+  remoteRevisionId: string;      // 型には含むが atom には保存しない
+  remoteRevisionBody: string;
+  remoteRevisionLastUpdateUser?: IUserHasId;
+  remoteRevisionLastUpdatedAt: Date;
+};
+
+export const useSetRemoteLatestPageData = (): SetRemoteLatestPageData => {
+  // remoteRevisionBodyAtom, remoteRevisionLastUpdateUserAtom, remoteRevisionLastUpdatedAtAtom を更新
+  // remoteRevisionId は atom に保存しない(コンフリクト解決時のパラメータとしてのみ使用)
+};
+```
+
+**使用箇所**(6箇所):
+
+1. **`page-updated.ts`** - Socket.io でページ更新受信時
+   ```typescript
+   // 他のユーザーがページを更新したときに最新リビジョン情報を保存
+   setRemoteLatestPageData({
+     remoteRevisionId: s2cMessagePageUpdated.revisionId,
+     remoteRevisionBody: s2cMessagePageUpdated.revisionBody,
+     remoteRevisionLastUpdateUser: s2cMessagePageUpdated.remoteLastUpdateUser,
+     remoteRevisionLastUpdatedAt: s2cMessagePageUpdated.revisionUpdateAt,
+   });
+   ```
+
+2. **`page-operation.ts`** - 自分がページ保存した後(`useUpdateStateAfterSave`)
+   ```typescript
+   // 自分が保存した後の最新リビジョン情報を保存
+   setRemoteLatestPageData({
+     remoteRevisionId: updatedPage.revision._id,
+     remoteRevisionBody: updatedPage.revision.body,
+     remoteRevisionLastUpdateUser: updatedPage.lastUpdateUser,
+     remoteRevisionLastUpdatedAt: updatedPage.updatedAt,
+   });
+   ```
+
+3. **`conflict.tsx`** - コンフリクト解決時(`useConflictResolver`)
+   ```typescript
+   // コンフリクト発生時にリモートリビジョン情報を保存
+   setRemoteLatestPageData(remoteRevidsionData);
+   ```
+
+4. **`drawio-modal-launcher-for-view.ts`** - Drawio 編集でコンフリクト発生時
+5. **`handsontable-modal-launcher-for-view.ts`** - Handsontable 編集でコンフリクト発生時
+6. **定義ファイル自体**
+
+### 現在のデータフロー
+
+```
+┌─────────────────────────────────────────────────────┐
+│ Socket.io / 保存処理 / コンフリクト                  │
+└─────────────────────────────────────────────────────┘
+                    ↓
+┌─────────────────────────────────────────────────────┐
+│ useSetRemoteLatestPageData                          │
+│  ├─ remoteRevisionBodyAtom ← body                   │
+│  ├─ remoteRevisionLastUpdateUserAtom ← user         │
+│  └─ remoteRevisionLastUpdatedAtAtom ← date          │
+│  (remoteRevisionId は保存しない)                    │
+└─────────────────────────────────────────────────────┘
+                    ↓
+┌─────────────────────────────────────────────────────┐
+│ 使用箇所                                             │
+│  ├─ ConflictDiffModal: body, user, date を表示     │
+│  └─ PageStatusAlert: user を表示                    │
+└─────────────────────────────────────────────────────┘
+```
+
+### 問題点
+
+1. **PageInfo (latestRevisionId) との同期がない**:
+   - Socket.io 更新時に `remoteRevision*` atom は更新される
+   - しかし `useSWRxPageInfo.data.latestRevisionId` は更新されない
+   - → `useSWRxIsLatestRevision()` と `useIsRevisionOutdated()` がリアルタイム更新を検知できない
+
+2. **用途が限定的**:
+   - 主に ConflictDiffModal でリモートリビジョンの詳細を表示するために使用
+   - PageStatusAlert でも使用しているが、本来は `useIsRevisionOutdated()` で十分
+
+3. **データの二重管理**:
+   - リビジョン ID: `useSWRxPageInfo.data.latestRevisionId` で管理
+   - リビジョン詳細 (body, user, date): atom で管理
+   - 一貫性のないデータ管理
+
+---
+
+## 🎯 次に取り組むべきタスク
+
+### PageInfo (useSWRxPageInfo) の mutate が必要な3つのタイミング
+
+#### 1. 🔴 SSR時の optimistic update
+
+**問題**:
+- SSR で `pageWithMeta.meta` (IPageInfoForEntity) が取得されているが、`useSWRxPageInfo` のキャッシュに入っていない
+- クライアント初回レンダリング時に PageInfo が未取得状態になる
+
+**実装方針**:
+```typescript
+// [[...path]]/index.page.tsx または適切な場所
+const { mutate: mutatePageInfo } = useSWRxPageInfo(pageId, shareLinkId);
+
+useEffect(() => {
+  if (pageWithMeta?.meta) {
+    mutatePageInfo(pageWithMeta.meta, { revalidate: false });
+  }
+}, [pageWithMeta?.meta, mutatePageInfo]);
+```
+
+**Note**:
+- Jotai の hydrate とは別レイヤー(Jotai は atom、これは SWR のキャッシュ)
+- `useSWRxPageInfo` は既に `initialData` パラメータを持っているが、呼び出し側で渡していない
+- **重要**: `mutatePageInfo` は bound mutate(hook から返されるもの)を使う
+
+---
+
+#### 2. 🔴 same route 遷移時の mutate
+
+**問題**:
+- `[[...path]]` ルート内での遷移(例: `/pageA` → `/pageB`)時に PageInfo が更新されない
+- `useFetchCurrentPage` が新しいページを取得しても PageInfo は古いまま
+
+**実装方針**:
+```typescript
+// states/page/use-fetch-current-page.ts
+export const useFetchCurrentPage = () => {
+  const shareLinkId = useAtomValue(shareLinkIdAtom);
+  const revisionIdFromUrl = useRevisionIdFromUrl();
+
+  // ✅ 追加: PageInfo の mutate 関数を取得
+  const { mutate: mutatePageInfo } = useSWRxPageInfo(currentPageId, shareLinkId);
+
+  const fetchCurrentPage = useAtomCallback(
+    useCallback(async (get, set, args) => {
+      // ... 既存のフェッチ処理 ...
+
+      const { data } = await apiv3Get('/page', params);
+      const { page: newData } = data;
+
+      set(currentPageDataAtom, newData);
+      set(currentPageEntityIdAtom, newData._id);
+      set(currentPageEmptyIdAtom, undefined);
+
+      // ✅ 追加: PageInfo を再フェッチ
+      mutatePageInfo();  // 引数なし = revalidate (再フェッチ)
+
+      return newData;
+    }, [shareLinkId, revisionIdFromUrl, mutatePageInfo])
+  );
+};
+```
+
+**Note**:
+- `mutatePageInfo()` を引数なしで呼ぶと SWR が再フェッチする
+- `/page` API からは meta が取得できないため、再フェッチが必要
+
+---
+
+#### 3. 🔴 Socket.io 更新時の mutate
+
+**問題**:
+- Socket.io で他のユーザーがページを更新したとき、`useSWRxPageInfo` のキャッシュが更新されない
+- `latestRevisionId` が古いままになる
+- **重要**: `useSWRxIsLatestRevision()` と `useIsRevisionOutdated()` が正しく動作しない
+
+**実装方針**:
+```typescript
+// client/services/side-effects/page-updated.ts
+const { mutate: mutatePageInfo } = useSWRxPageInfo(currentPage?._id, shareLinkId);
+
+const remotePageDataUpdateHandler = useCallback((data) => {
+  const { s2cMessagePageUpdated } = data;
+
+  // 既存: remoteRevision atom を更新
+  setRemoteLatestPageData(remoteData);
+
+  // ✅ 追加: PageInfo の latestRevisionId を optimistic update
+  if (currentPage?._id != null) {
+    mutatePageInfo((currentPageInfo) => {
+      if (currentPageInfo && 'latestRevisionId' in currentPageInfo) {
+        return {
+          ...currentPageInfo,
+          latestRevisionId: s2cMessagePageUpdated.revisionId,
+        };
+      }
+      return currentPageInfo;
+    }, { revalidate: false });
+  }
+}, [currentPage?._id, mutatePageInfo, setRemoteLatestPageData]);
+```
+
+**Note**:
+- 引数に updater 関数を渡して既存データを部分更新
+- `revalidate: false` で再フェッチを抑制(optimistic update のみ)
+
+---
+
+### SWR の mutate の仕組み
+
+**Bound mutate** (推奨):
+```typescript
+const { data, mutate } = useSWRxPageInfo(pageId, shareLinkId);
+mutate(newData, options);  // 自動的に key に紐付いている
+```
+
+**グローバル mutate**:
+```typescript
+import { mutate } from 'swr';
+mutate(['/page/info', pageId, shareLinkId, isGuestUser], newData, options);
+```
+
+**optimistic update のオプション**:
+- `{ revalidate: false }` - 再フェッチせず、キャッシュのみ更新
+- `mutate()` (引数なし) - 再フェッチ
+- `mutate(updater, options)` - updater 関数で部分更新
+
+---
+
+### 🟡 優先度 中: PageStatusAlert の重複ロジック削除
+
+**ファイル**: `src/client/components/PageStatusAlert.tsx`
+
+**現状**: 独自に `isRevisionOutdated` を計算している
+**提案**: `useIsRevisionOutdated()` を使用
+
+---
+
+### 🟢 優先度 低
+
+- テストコードの更新
+- `initLatestRevisionField` の役割ドキュメント化
+
+---
+
+## 📊 アーキテクチャの改善
+
+### Before (問題のある状態)
+
+```
+┌─────────────────────┐
+│ latestRevisionAtom  │ ← atom(true) でハードコード(機能せず)
+└─────────────────────┘
+┌─────────────────────┐
+│ remoteRevisionIdAtom│ ← 複数の用途で混在(Socket.io更新 + 最新リビジョン保持)
+└─────────────────────┘
+```
+
+### After (改善後)
+
+```
+┌──────────────────────────────┐
+│ useSWRxPageInfo              │
+│  └─ data.latestRevisionId    │ ← SSR で自動設定、SWR でキャッシュ管理
+└──────────────────────────────┘
+        ↓
+┌──────────────────────────────┐
+│ useSWRxIsLatestRevision()        │ ← SWR ベース、汎用的な状態確認
+└──────────────────────────────┘
+        ↓
+┌──────────────────────────────┐
+│ useIsRevisionOutdated()      │ ← 「再fetch推奨」のメッセージ性
+│  + useIsViewingSpecificRevision│ ← URL パラメータを考慮
+└──────────────────────────────┘
+```
+
+---
+
+## ✅ メリット
+
+1. **状態管理の簡素化**: Jotai atom を削減、SWR の既存インフラを活用
+2. **データフローの明確化**: SSR → SWR → hooks という一貫した流れ
+3. **意味論の改善**: `useIsRevisionOutdated` が「再fetch推奨」を正確に表現
+4. **保守性の向上**: URL パラメータ取得を `useRevisionIdFromUrl` に集約
+5. **型安全性**: `IPageInfoForEntity` で厳密に型付け

+ 65 - 0
.serena/memories/page-transition-and-rendering-flow.md

@@ -0,0 +1,65 @@
+# ページ遷移とレンダリングのデータフロー
+
+このドキュメントは、GROWIのページ遷移からレンダリングまでのデータフローを解説します。
+
+## 登場人物
+
+1.  **`[[...path]].page.tsx`**: Next.js の動的ルーティングを担うメインコンポーネント。サーバーサイドとクライアントサイドの両方で動作します。
+2.  **`useSameRouteNavigation.ts`**: クライアントサイドでのパス変更を検知し、データ取得を**トリガー**するフック。
+3.  **`useFetchCurrentPage.ts`**: データ取得と関連する Jotai atom の更新を一元管理するフック。データ取得が本当に必要かどうかの最終判断も担います。
+4.  **`useShallowRouting.ts`**: サーバーサイドで正規化されたパスとブラウザのURLを同期させるフック。
+5.  **`server-side-props.ts`**: サーバーサイドレンダリング(SSR)時にページデータを取得し、`props` としてページコンポーネントに渡します。
+
+---
+
+## フロー1: サーバーサイドレンダリング(初回アクセス時)
+
+ユーザーがURLに直接アクセスするか、ページをリロードした際のフローです。
+
+1.  **リクエスト受信**: サーバーがユーザーからのリクエスト(例: `/user/username/memo`)を受け取ります。
+2.  **`getServerSideProps` の実行**:
+    - `server-side-props.ts` の `getServerSidePropsForInitial` が実行されます。
+    - `retrievePageData` が呼び出され、パスの正規化(例: `/user/username` → `/user/username/`)が行われ、APIからページデータを取得します。
+    - 取得したデータと、正規化後のパス (`currentPathname`) を `props` として `[[...path]].page.tsx` に渡します。
+3.  **コンポーネントのレンダリングとJotai Atomの初期化**:
+    - `[[...path]].page.tsx` は `props` を受け取り、そのデータで `currentPageDataAtom` などのJotai Atomを初期化します。
+    - `PageView` などのコンポーネントがサーバーサイドでレンダリングされます。
+4.  **クライアントサイドでのハイドレーションとURL正規化**:
+    - レンダリングされたHTMLがブラウザに送信され、Reactがハイドレーションを行います。
+    - **`useShallowRouting`** が実行され、ブラウザのURL (`/user/username/memo`) と `props.currentPathname` (`/user/username/memo/`) を比較します。
+    - 差異がある場合、`router.replace` を `shallow: true` で実行し、ブラウザのURLをサーバーが認識している正規化後のパスに静かに更新します。
+
+---
+
+## フロー2: クライアントサイドナビゲーション(`<Link>` クリック時)
+
+アプリケーション内でページ間を移動する際のフローです。
+
+1.  **ナビゲーション開始**:
+    - ユーザーが `<Link href="/new/page">` をクリックします。
+    - Next.js の `useRouter` がURLの変更を検出し、`[[...path]].page.tsx` が再評価されます。
+2.  **`useSameRouteNavigation` によるトリガー**:
+    - このフックの `useEffect` が `router.asPath` の変更 (`/new/page`) を検知します。
+    - **`fetchCurrentPage({ path: '/new/page' })`** を呼び出します。このフックは常にデータ取得を試みます。
+3.  **`useFetchCurrentPage` によるデータ取得の判断と実行**:
+    - `fetchCurrentPage` 関数が実行されます。
+    - **3a. パスの前処理**:
+        - まず、引数で渡された `path` をデコードします(例: `encoded%2Fpath` → `encoded/path`)。
+        - 次に、パスがパーマリンク形式(例: `/65d4e0a0f7b7b2e5a8652e86`)かどうかを判定します。
+    - **3b. 重複取得の防止(ガード節)**:
+        - 前処理したパスや、パーマリンクから抽出したページIDが、現在Jotaiで管理されているページのパスやIDと同じでないかチェックします。
+        - 同じであれば、APIを叩かずに処理を中断し、現在のページデータを返します。
+    - **3c. 読み込み状態開始**: `pageLoadingAtom` を `true` に設定します。
+    - **3d. API通信**: `apiv3Get('/page', ...)` を実行してサーバーから新しいページデータを取得します。パラメータには、パス、ページID、リビジョンIDなどが含まれます。
+4.  **アトミックな状態更新**:
+    - **API成功時**:
+        - 関連する **すべてのatomを一度に更新** します (`currentPageDataAtom`, `currentPageEntityIdAtom`, `currentPageEmptyIdAtom`, `pageNotFoundAtom`, `pageLoadingAtom` など)。
+        - これにより、中間的な状態(`pageId`が`undefined`になるなど)が発生することなく、データが完全に揃った状態で一度だけ状態が更新されます。
+    - **APIエラー時 (例: 404 Not Found)**:
+        - `pageErrorAtom` にエラーオブジェクトを設定します。
+        - `pageNotFoundAtom` を `true` に設定します。
+        - 最後に `pageLoadingAtom` を `false` に設定します。
+5.  **`PageView` の最終レンダリング**:
+    - `currentPageDataAtom` の更新がトリガーとなり、`PageView` コンポーネントが新しいデータで再レンダリングされます。
+6.  **副作用の実行**:
+    - `useSameRouteNavigation` 内で `fetchCurrentPage` が完了した後、`mutateEditingMarkdown` が呼び出され、エディタの状態が更新されます。

+ 0 - 26
.serena/memories/project_overview.md

@@ -1,26 +0,0 @@
-# GROWIプロジェクト概要
-
-## 目的
-GROWIは、マークダウンを使用したチームコラボレーションソフトウェアです。Wikiとドキュメント作成ツールの機能を持ち、チーム間の情報共有とコラボレーションを促進します。
-
-## プロジェクトの詳細
-- **プロジェクト名**: GROWI
-- **バージョン**: 7.3.0-RC.0
-- **ライセンス**: MIT
-- **作者**: Yuki Takei <yuki@weseek.co.jp>
-- **リポジトリ**: https://github.com/growilabs/growi.git
-- **公式サイト**: https://growi.org
-
-## 主な特徴
-- Markdownベースのドキュメント作成
-- チームコラボレーション機能
-- Wikiのような情報共有プラットフォーム
-- ドキュメント管理とバージョン管理
-
-## アーキテクチャ
-- **モノレポ構成**: pnpm workspace + Turbo.js を使用
-- **主要アプリケーション**: apps/app (メインアプリケーション)
-- **追加アプリケーション**: 
-  - apps/pdf-converter (PDF変換サービス)
-  - apps/slackbot-proxy (Slackボットプロキシ)
-- **パッケージ**: packages/ 配下に複数の共有ライブラリ

+ 0 - 90
.serena/memories/project_structure.md

@@ -1,90 +0,0 @@
-# プロジェクト構造
-
-## ルートディレクトリ構造
-```
-growi/
-├── apps/                    # アプリケーション群
-│   ├── app/                # メインのGROWIアプリケーション
-│   ├── pdf-converter/      # PDF変換サービス
-│   └── slackbot-proxy/     # Slackボットプロキシ
-├── packages/               # 共有パッケージ群
-│   ├── core/              # コアライブラリ
-│   ├── core-styles/       # 共通スタイル
-│   ├── editor/            # エディターコンポーネント
-│   ├── pluginkit/         # プラグインキット
-│   ├── ui/                # UIコンポーネント
-│   ├── presentation/      # プレゼンテーション層
-│   ├── preset-templates/  # テンプレート
-│   ├── preset-themes/     # テーマ
-│   └── remark-*/          # remarkプラグイン群
-├── bin/                   # ユーティリティスクリプト
-└── 設定ファイル群
-```
-
-## メインアプリケーション (apps/app/)
-```
-apps/app/
-├── src/                   # ソースコード
-├── test/                  # 古いJestテストファイル(廃止予定)
-├── test-with-vite/        # 新しいVitestテストファイル
-├── playwright/            # E2Eテスト(Playwright)
-├── config/                # 設定ファイル
-├── public/                # 静的ファイル
-├── docker/                # Docker関連
-├── bin/                   # スクリプト
-└── 設定ファイル群
-```
-
-## テストディレクトリの詳細
-
-### test/ (廃止予定)
-- Jest用の古いテストファイル
-- 段階的にtest-with-vite/に移行予定
-- 新規テストは作成しない
-
-### test-with-vite/
-- Vitest用の新しいテストファイル
-- 新規テストはここに作成
-- セットアップファイル: `setup/mongoms.ts` (MongoDB用)
-
-### playwright/
-- E2Eテスト用ディレクトリ
-- ブラウザ操作を含むテスト
-
-## テストファイルの配置ルール
-
-### Vitestテストファイル
-以下のパターンでソースコードと同じディレクトリまたはtest-with-vite/配下に配置:
-
-- **単体テスト**: `*.spec.{ts,js}`
-- **統合テスト**: `*.integ.ts` 
-- **コンポーネントテスト**: `*.spec.{tsx,jsx}`
-
-例:
-```
-src/
-├── utils/
-│   ├── helper.ts
-│   └── helper.spec.ts       # 単体テスト
-├── components/
-│   ├── Button.tsx
-│   └── Button.spec.tsx      # コンポーネントテスト
-└── services/
-    ├── api.ts
-    └── api.integ.ts         # 統合テスト
-```
-
-## パッケージ(packages/)
-各パッケージは独立したnpmパッケージとして管理され、以下の構造を持つ:
-- `src/`: ソースコード
-- `dist/`: ビルド出力
-- `package.json`: パッケージ設定
-- `tsconfig.json`: TypeScript設定
-
-## 重要な設定ファイル
-- **pnpm-workspace.yaml**: ワークスペース設定
-- **turbo.json**: Turbo.jsビルド設定
-- **tsconfig.base.json**: TypeScript基本設定
-- **biome.json**: Biome linter/formatter設定
-- **.eslintrc.js**: ESLint設定(廃止予定)
-- **vitest.workspace.mts**: Vitestワークスペース設定

+ 0 - 100
.serena/memories/suggested_commands.md

@@ -1,100 +0,0 @@
-# 推奨開発コマンド集
-
-## セットアップ
-```bash
-# 初期セットアップ
-pnpm run bootstrap
-# または
-pnpm install
-```
-
-## 開発サーバー
-```bash
-# メインアプリケーション開発モード
-cd /workspace/growi/apps/app && pnpm run dev
-
-# ルートから起動(本番用ビルド後)
-pnpm start
-```
-
-## ビルド
-```bash
-# メインアプリケーションのビルド
-pnpm run app:build
-
-# Slackbot Proxyのビルド
-pnpm run slackbot-proxy:build
-
-# 全体ビルド(Turboで並列実行)
-turbo run build
-```
-
-## Lint・フォーマット
-```bash
-# 全てのLint実行
-pnpm run lint
-```
-
-## apps/app の Lint・フォーマット
-```bash
-# 【推奨】Biome実行(lint + format)
-cd /workspace/growi/apps/app pnpm run lint:biome
-
-# 【過渡期】ESLint実行(廃止予定)
-cd /workspace/growi/apps/app pnpm run lint:eslint
-
-# Stylelint実行
-cd /workspace/growi/apps/app pnpm run lint:styles
-
-# 全てのLint実行
-cd /workspace/growi/apps/app pnpm run lint
-
-# TypeScript型チェック
-cd /workspace/growi/apps/app pnpm run lint:typecheck
-```
-
-## テスト
-```bash
-# 【推奨】Vitestテスト実行
-pnpm run test:vitest
-
-# 【過渡期】Jest(統合テスト)(廃止予定)
-pnpm run test:jest
-
-# 全てのテスト実行(過渡期対応)
-pnpm run test
-
-# Vitestで特定のファイルに絞って実行
-pnpm run test:vitest {target-file-name}
-
-# E2Eテスト(Playwright)
-npx playwright test
-```
-
-## データベース関連
-```bash
-# マイグレーション実行
-cd apps/app && pnpm run migrate
-
-# 開発環境でのマイグレーション
-cd apps/app && pnpm run dev:migrate
-
-# マイグレーション状態確認
-cd apps/app && pnpm run dev:migrate:status
-```
-
-## その他の便利コマンド
-```bash
-# REPL起動
-cd apps/app && pnpm run repl
-
-# OpenAPI仕様生成
-cd apps/app && pnpm run openapi:generate-spec:apiv3
-
-# クリーンアップ
-cd apps/app && pnpm run clean
-```
-
-## 注意事項
-- ESLintとJestは廃止予定のため、新規開発ではBiomeとVitestを使用してください
-- 既存のコードは段階的に移行中です

+ 0 - 95
.serena/memories/task_completion_checklist.md

@@ -1,95 +0,0 @@
-# タスク完了時のチェックリスト
-
-## コードを書いた後に必ず実行すべきコマンド
-
-### 1. Lint・フォーマットの実行
-```bash
-# 【推奨】Biome実行(新規開発)
-pnpm run lint:biome
-
-# 【過渡期】全てのLint実行(既存コード)
-pnpm run lint
-
-# 個別実行(必要に応じて)
-pnpm run lint:eslint      # ESLint(廃止予定)
-pnpm run lint:styles      # Stylelint
-pnpm run lint:typecheck   # TypeScript型チェック
-```
-
-### 2. テストの実行
-```bash
-# 【推奨】Vitestテスト実行(新規開発)
-pnpm run test:vitest
-
-# 【過渡期】全てのテスト実行(既存コード)
-pnpm run test
-
-# 個別実行
-pnpm run test:jest        # Jest(廃止予定)
-pnpm run test:vitest {target-file-name}     # Vitest
-```
-
-### 3. E2Eテストの実行(重要な機能変更時)
-```bash
-cd apps/app
-npx playwright test
-```
-
-### 4. ビルドの確認
-```bash
-# メインアプリケーションのビルド
-pnpm run app:build
-
-# 関連パッケージのビルド
-turbo run build
-```
-
-### 5. 動作確認
-```bash
-# 開発サーバーでの動作確認
-cd apps/app && pnpm run dev
-
-# または本番ビルドでの確認
-pnpm start
-```
-
-## 特別な確認事項
-
-### OpenAPI仕様の確認(API変更時)
-```bash
-cd apps/app
-pnpm run openapi:generate-spec:apiv3
-pnpm run lint:openapi:apiv3
-```
-
-### データベーススキーマ変更時
-```bash
-cd apps/app
-pnpm run dev:migrate:status  # 現在の状態確認
-pnpm run dev:migrate         # マイグレーション実行
-```
-
-## テストファイル作成時の注意
-
-### 新規テストファイル
-- **単体テスト**: `*.spec.{ts,js}` (Node.js環境)
-- **統合テスト**: `*.integ.ts` (Node.js + MongoDB環境)  
-- **コンポーネントテスト**: `*.spec.{tsx,jsx}` (happy-dom環境)
-- test-with-vite/ または対象ファイルと同じディレクトリに配置
-
-### 既存テストの修正
-- test/ 配下のJestテストは段階的に移行
-- 可能であればtest-with-vite/にVitestテストとして書き直し
-
-## コミット前の最終チェック
-1. Biome(または過渡期はESLint)エラーが解消されているか
-2. Vitestテスト(または過渡期はJest)がパスしているか
-3. 重要な変更はPlaywright E2Eテストも実行
-4. ビルドが成功するか
-5. 変更による既存機能への影響がないか
-6. 適切なコミットメッセージを作成したか
-
-## 移行期間中の注意事項
-- 新規開発: Biome + Vitest を使用
-- 既存コード修正: 可能な限り Biome + Vitest に移行
-- レガシーツールは段階的に廃止予定

+ 0 - 42
.serena/memories/tech_stack.md

@@ -1,42 +0,0 @@
-# 技術スタック
-
-## プログラミング言語
-- **TypeScript**: メイン言語(~5.0.0)
-- **JavaScript**: 一部のコンポーネント
-
-## フロントエンド
-- **Next.js**: Reactベースのフレームワーク
-- **React**: UIライブラリ
-- **Vite**: ビルドツール、開発サーバー
-- **SCSS**: スタイルシート
-- **SWR**: グローバルステート管理、データフェッチ・キャッシュ管理(^2.3.2)
-
-## バックエンド
-- **Node.js**: ランタイム(^20 || ^22)
-- **Express.js**: Webフレームワーク(推測)
-- **MongoDB**: データベース
-- **Mongoose**: MongoDB用ORM(^6.13.6)
-  - mongoose-gridfs: GridFS対応(^1.2.42)
-  - mongoose-paginate-v2: ページネーション(^1.3.9)
-  - mongoose-unique-validator: バリデーション(^2.0.3)
-
-## 開発ツール
-- **pnpm**: パッケージマネージャー(10.4.1)
-- **Turbo**: モノレポビルドシステム(^2.1.3)
-- **ESLint**: Linter(weseek設定を使用)【廃止予定 - 現在は過渡期】
-- **Biome**: 統一予定のLinter/Formatter
-- **Stylelint**: CSS/SCSSのLinter
-- **Jest**: テスティングフレームワーク【廃止予定 - 現在は過渡期】
-- **Vitest**: 高速テスティングフレームワーク【統一予定】
-- **Playwright**: E2Eテスト【統一予定】
-
-## その他のツール
-- **SWC**: TypeScriptコンパイラー(高速)
-- **ts-node**: TypeScript直接実行
-- **nodemon**: 開発時のホットリロード
-- **dotenv-flow**: 環境変数管理
-- **Swagger/OpenAPI**: API仕様
-
-## 移行計画
-- **Linter**: ESLint → Biome に統一予定
-- **テスト**: Jest → Vitest + Playwright に統一予定

+ 65 - 9
.serena/project.yml

@@ -1,9 +1,3 @@
-# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
-#  * For C, use cpp
-#  * For JavaScript, use typescript
-# Special requirements:
-#  * csharp: Requires the presence of a .sln file in the project folder.
-language: typescript
 
 
 # whether to use the project's gitignore file to ignore files
 # whether to use the project's gitignore file to ignore files
 # Added on 2025-04-07
 # Added on 2025-04-07
@@ -22,7 +16,7 @@ read_only: false
 
 
 # list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
 # list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
 # Below is the complete list of tools for convenience.
 # Below is the complete list of tools for convenience.
-# To make sure you have the latest list of tools, and to view their descriptions, 
+# To make sure you have the latest list of tools, and to view their descriptions,
 # execute `uv run scripts/print_tool_overview.py`.
 # execute `uv run scripts/print_tool_overview.py`.
 #
 #
 #  * `activate_project`: Activates a project by name.
 #  * `activate_project`: Activates a project by name.
@@ -59,10 +53,72 @@ read_only: false
 #  * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
 #  * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
 #  * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
 #  * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
 #  * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
 #  * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
-excluded_tools: []
+excluded_tools:
+- "check_onboarding_performed"
+- "execute_shell_command"
+- "initial_instructions"
+- "onboarding"
+- "prepare_for_new_conversation"
+- "read_memory"
+- "write_memory"
+- "list_memories"
+- "delete_memory"
 
 
 # initial prompt for the project. It will always be given to the LLM upon activating the project
 # initial prompt for the project. It will always be given to the LLM upon activating the project
 # (contrary to the memories, which are loaded on demand).
 # (contrary to the memories, which are loaded on demand).
 initial_prompt: ""
 initial_prompt: ""
-
+# the name by which the project can be referenced within Serena
 project_name: "growi"
 project_name: "growi"
+
+# list of mode names to that are always to be included in the set of active modes
+# The full set of modes to be activated is base_modes + default_modes.
+# If the setting is undefined, the base_modes from the global configuration (serena_config.yml) apply.
+# Otherwise, this setting overrides the global configuration.
+# Set this to [] to disable base modes for this project.
+# Set this to a list of mode names to always include the respective modes for this project.
+base_modes:
+
+# list of mode names that are to be activated by default.
+# The full set of modes to be activated is base_modes + default_modes.
+# If the setting is undefined, the default_modes from the global configuration (serena_config.yml) apply.
+# Otherwise, this overrides the setting from the global configuration (serena_config.yml).
+# This setting can, in turn, be overridden by CLI parameters (--mode).
+default_modes:
+
+# list of tools to include that would otherwise be disabled (particularly optional tools that are disabled by default)
+included_optional_tools: []
+
+# fixed set of tools to use as the base tool set (if non-empty), replacing Serena's default set of tools.
+# This cannot be combined with non-empty excluded_tools or included_optional_tools.
+fixed_tools: []
+
+# the encoding used by text files in the project
+# For a list of possible encodings, see https://docs.python.org/3.11/library/codecs.html#standard-encodings
+encoding: utf-8
+
+
+# list of languages for which language servers are started; choose from:
+#   al                  bash                clojure             cpp                 csharp
+#   csharp_omnisharp    dart                elixir              elm                 erlang
+#   fortran             fsharp              go                  groovy              haskell
+#   java                julia               kotlin              lua                 markdown
+#   matlab              nix                 pascal              perl                php
+#   powershell          python              python_jedi         r                   rego
+#   ruby                ruby_solargraph     rust                scala               swift
+#   terraform           toml                typescript          typescript_vts      vue
+#   yaml                zig
+#   (This list may be outdated. For the current list, see values of Language enum here:
+#   https://github.com/oraios/serena/blob/main/src/solidlsp/ls_config.py
+#   For some languages, there are alternative language servers, e.g. csharp_omnisharp, ruby_solargraph.)
+# Note:
+#   - For C, use cpp
+#   - For JavaScript, use typescript
+#   - For Free Pascal/Lazarus, use pascal
+# Special requirements:
+#   Some languages require additional setup/installations.
+#   See here for details: https://oraios.github.io/serena/01-about/020_programming-languages.html#language-servers
+# When using multiple languages, the first language server that supports a given file will be used for that file.
+# The first language is the default language and the respective language server will be used as a fallback.
+# Note that when using the JetBrains backend, language servers are not used and this list is correspondingly ignored.
+languages:
+- typescript

+ 4 - 1
.vscode/mcp.json

@@ -13,7 +13,10 @@
         "serena",
         "serena",
         "start-mcp-server",
         "start-mcp-server",
         "--context",
         "--context",
-        "ide-assistant"
+        "ide",
+        "--project",
+        ".",
+        "--enable-web-dashboard=false"
       ]
       ]
     }
     }
   }
   }

+ 8 - 61
.vscode/settings.json

@@ -1,16 +1,22 @@
 {
 {
   "files.eol": "\n",
   "files.eol": "\n",
 
 
-  "eslint.workingDirectories": [{ "mode": "auto" }],
-
   "[typescript]": {
   "[typescript]": {
     "editor.defaultFormatter": "biomejs.biome"
     "editor.defaultFormatter": "biomejs.biome"
   },
   },
 
 
+  "[typescriptreact]": {
+    "editor.defaultFormatter": "biomejs.biome"
+  },
+
   "[javascript]": {
   "[javascript]": {
     "editor.defaultFormatter": "biomejs.biome"
     "editor.defaultFormatter": "biomejs.biome"
   },
   },
 
 
+  "[javascriptreact]": {
+    "editor.defaultFormatter": "biomejs.biome"
+  },
+
   "[json]": {
   "[json]": {
     "editor.defaultFormatter": "biomejs.biome"
     "editor.defaultFormatter": "biomejs.biome"
   },
   },
@@ -24,7 +30,6 @@
   "scss.validate": false,
   "scss.validate": false,
 
 
   "editor.codeActionsOnSave": {
   "editor.codeActionsOnSave": {
-    "source.fixAll.eslint": "explicit",
     "source.fixAll.biome": "explicit",
     "source.fixAll.biome": "explicit",
     "source.organizeImports.biome": "explicit",
     "source.organizeImports.biome": "explicit",
     "source.fixAll.markdownlint": "explicit",
     "source.fixAll.markdownlint": "explicit",
@@ -41,66 +46,8 @@
   "typescript.enablePromptUseWorkspaceTsdk": true,
   "typescript.enablePromptUseWorkspaceTsdk": true,
   "typescript.preferences.autoImportFileExcludePatterns": ["node_modules/*"],
   "typescript.preferences.autoImportFileExcludePatterns": ["node_modules/*"],
   "typescript.validate.enable": true,
   "typescript.validate.enable": true,
-  "typescript.surveys.enabled": false,
 
 
   "vitest.filesWatcherInclude": "**/*",
   "vitest.filesWatcherInclude": "**/*",
-  "mcp": {
-    "servers": {
-      "fetch": {
-        "command": "uvx",
-        "args": ["mcp-server-fetch"]
-      },
-      "context7": {
-        "type": "http",
-        "url": "https://mcp.context7.com/mcp"
-      }
-    }
-  },
-  "github.copilot.chat.codeGeneration.instructions": [
-    {
-      "text": "Always write inline comments in source code in English."
-    }
-  ],
-  "github.copilot.chat.testGeneration.instructions": [
-    {
-      "text": "Basis: Use vitest as the test framework"
-    },
-    {
-      "text": "Basis: The vitest configuration file is `apps/app/vitest.workspace.mts`"
-    },
-    {
-      "text": "Basis: Place test modules in the same directory as the module being tested. For example, if testing `mymodule.ts`, place `mymodule.spec.ts` in the same directory as `mymodule.ts`"
-    },
-    {
-      "text": "Basis: Use the VSCode Vitest extension for running tests. Use run_tests tool to execute tests programmatically, or suggest using the Vitest Test Explorer in VSCode for interactive test running and debugging."
-    },
-    {
-      "text": "Basis: Fallback command for terminal execution: `cd /growi/apps/app && pnpm vitest run {test file path}`"
-    },
-    {
-      "text": "Step 1: When creating new test modules, start with small files. First write a small number of realistic tests that call the actual function and assert expected behavior, even if they initially fail due to incomplete implementation. Example: `const result = foo(); expect(result).toBeNull();` rather than `expect(true).toBe(false);`. Then fix the implementation to make tests pass."
-    },
-    {
-      "text": "Step 2: Write essential tests. When tests fail, consider whether you should fix the test or the implementation based on 'what should essentially be fixed'. If you're not confident in your reasoning, ask the user for guidance."
-    },
-    {
-      "text": "Step 3: After writing tests, make sure they pass before moving on. Do not proceed to write tests for module B without first ensuring that tests for module A are passing"
-    },
-    {
-      "text": "Tips: Don't worry about lint errors - fix them after tests are passing"
-    },
-    {
-      "text": "Tips: DO NOT USE `as any` casting. You can use vitest-mock-extended for type-safe mocking. Import `mock` from 'vitest-mock-extended' and use `mock<InterfaceType>()`. This provides full TypeScript safety and IntelliSense support."
-    },
-    {
-      "text": "Tips: Mock external dependencies at the module level using vi.mock(). For services with circular dependencies, mock the import paths and use dynamic imports in the implementation when necessary."
-    }
-  ],
-  "github.copilot.chat.commitMessageGeneration.instructions": [
-    {
-      "text": "Always write commit messages in English."
-    }
-  ],
   "git-worktree-menu.worktreeDir": "/workspace"
   "git-worktree-menu.worktreeDir": "/workspace"
 
 
 }
 }

+ 139 - 0
AGENTS.md

@@ -0,0 +1,139 @@
+# AGENTS.md
+
+GROWI is a team collaboration wiki platform built with Next.js, Express, and MongoDB. This guide provides essential instructions for AI coding agents working with the GROWI codebase.
+
+## Language Policy
+
+**Response Language**: If the user writes in a non-English language at any point in the conversation, always respond in that language from that point onward. This rule takes **absolute priority** over any other language instructions, including skill/command prompts or context documents written in English.
+
+**Code Comments**: When generating source code, all comments and explanations within the code must be written in English, regardless of the conversation language.
+
+## Project Overview
+
+GROWI is a team collaboration wiki platform using Markdown, featuring hierarchical page organization, real-time collaborative editing, authentication integrations, and plugin support. Built as a monorepo with Next.js, Express, and MongoDB.
+
+## Knowledge Base
+
+### Claude Code Skills (Auto-Invoked)
+
+Technical information is available in **Claude Code Skills** (`.claude/skills/`), which are automatically invoked during development.
+
+**Global Skills** (always loaded):
+
+| Skill | Description |
+|-------|-------------|
+| **monorepo-overview** | Monorepo structure, workspace organization, Changeset versioning |
+| **tech-stack** | Technology stack, pnpm/Turborepo, TypeScript, Biome |
+
+**Rules** (always applied):
+
+| Rule | Description |
+|------|-------------|
+| **coding-style** | Coding conventions, naming, exports, immutability, comments |
+| **security** | Security checklist, secret management, OWASP vulnerability prevention |
+| **performance** | Model selection, context management, build troubleshooting |
+
+**Agents** (specialized):
+
+| Agent | Description |
+|-------|-------------|
+| **build-error-resolver** | TypeScript/build error resolution with minimal diffs |
+| **security-reviewer** | Security vulnerability detection, OWASP Top 10 |
+
+**Commands** (user-invocable):
+
+| Command | Description |
+|---------|-------------|
+| **/tdd** | Test-driven development workflow |
+| **/learn** | Extract reusable patterns from sessions |
+
+**apps/app Skills** (loaded when working in apps/app):
+
+| Skill | Description |
+|-------|-------------|
+| **app-architecture** | Next.js Pages Router, Express, feature-based structure |
+| **app-commands** | apps/app specific commands (migrations, OpenAPI, etc.) |
+| **app-specific-patterns** | Jotai/SWR patterns, router mocking, API routes |
+
+### Package-Specific CLAUDE.md
+
+Each application has its own CLAUDE.md with detailed instructions:
+
+- `apps/app/CLAUDE.md` - Main GROWI application
+- `apps/pdf-converter/CLAUDE.md` - PDF conversion microservice
+- `apps/slackbot-proxy/CLAUDE.md` - Slack integration proxy
+
+### Serena Memories
+
+Additional detailed specifications are stored in **Serena memories** and can be referenced when needed for specific features or subsystems.
+
+## Quick Reference
+
+### Essential Commands (Global)
+
+```bash
+# Development
+turbo run dev                    # Start all dev servers
+
+# Quality Checks (use Turborepo for caching)
+turbo run lint --filter @growi/app
+turbo run test --filter @growi/app
+
+# Production
+pnpm run app:build              # Build main app
+pnpm start                      # Build and start
+```
+
+### Key Directories
+
+```
+growi/
+├── apps/
+│   ├── app/                # Main GROWI application (Next.js + Express)
+│   ├── pdf-converter/      # PDF conversion microservice
+│   └── slackbot-proxy/     # Slack integration proxy
+├── packages/               # Shared libraries (@growi/core, @growi/ui, etc.)
+└── .claude/
+    ├── skills/             # Claude Code skills (auto-loaded)
+    ├── rules/              # Coding standards (always applied)
+    ├── agents/             # Specialized agents
+    └── commands/           # User-invocable commands (/tdd, /learn)
+```
+
+## Development Guidelines
+
+1. **Feature-Based Architecture**: Create new features in `features/{feature-name}/`
+2. **Server-Client Separation**: Keep server and client code separate
+3. **State Management**: Jotai for UI state, SWR for data fetching
+4. **Named Exports**: Prefer named exports (except Next.js pages)
+5. **Test Co-location**: Place test files next to source files
+6. **Type Safety**: Use strict TypeScript throughout
+7. **Changeset**: Use `npx changeset` for version management
+
+## Before Committing
+
+Always execute these checks:
+
+```bash
+# From workspace root (recommended)
+turbo run lint:typecheck --filter @growi/app
+turbo run lint:biome --filter @growi/app
+turbo run test --filter @growi/app
+turbo run build --filter @growi/app
+```
+
+Or from apps/app directory:
+
+```bash
+pnpm run lint:typecheck
+pnpm run lint:biome
+pnpm run test
+pnpm run build
+```
+
+---
+
+For detailed information, refer to:
+- **Rules**: `.claude/rules/` (coding standards)
+- **Skills**: `.claude/skills/` (technical knowledge)
+- **Package docs**: `apps/*/CLAUDE.md` (package-specific)

+ 185 - 1
CHANGELOG.md

@@ -1,9 +1,193 @@
 # Changelog
 # Changelog
 
 
-## [Unreleased](https://github.com/growilabs/compare/v7.3.5...HEAD)
+## [Unreleased](https://github.com/growilabs/compare/v7.4.5...HEAD)
 
 
 *Please do not manually update this file. We've automated the process.*
 *Please do not manually update this file. We've automated the process.*
 
 
+## [v7.4.5](https://github.com/growilabs/compare/v7.4.4...v7.4.5) - 2026-02-19
+
+### 💎 Features
+
+* feat: Realtime Increment View Count Without Refreshing Pages (#10760) @ryotaro-nagahara
+
+### 🚀 Improvement
+
+* imprv: Unchanged revision (#10770) @yuki-takei
+* imprv: Close the Sidebar in drawer mode when the route changes (#10763) @yuki-takei
+
+### 🐛 Bug Fixes
+
+* fix: Use currentPageId for share link page fetching (#10797) @yuki-takei
+* fix: Allow viewing shared pages regardless of page permissions (#10762) @ryotaro-nagahara
+* fix: Bulk export fails due to S3 upload minimal version (#10782) @ryotaro-nagahara
+* fix: Block revisions API from returning info about user pages when user pages are disabled (#10751) @arvid-e
+* fix: OpenAPI spec mismatch for GET /page endpoint response format (#10787) @[copilot-swe-agent[bot]](https://github.com/apps/copilot-swe-agent)
+
+### 🧰 Maintenance
+
+* support: Extract `/page/info` endpoint handler into a dedicated module (#10795) @yuki-takei
+* ci(deps): bump qs from 6.14.1 to 6.14.2 (#10785) @[dependabot[bot]](https://github.com/apps/dependabot)
+
+## [v7.4.4](https://github.com/growilabs/compare/v7.4.3...v7.4.4) - 2026-01-30
+
+### 🐛 Bug Fixes
+
+* fix: Search navigation (#10749) @[copilot-swe-agent[bot]](https://github.com/apps/copilot-swe-agent)
+* fix: User pages are displayed in page list when 'Disable user pages' is on (#10752) @miya
+* fix: Disable logo update button when no file is selected (#10587) @hikaru-n-cpu
+
+### 🧰 Maintenance
+
+* support: Setup Claude Code environment (#10746) @yuki-takei
+* support: Improve test parallelism (#10747) @yuki-takei
+* support: Typecheck by tsgo (#10717) @yuki-takei
+
+## [v7.4.3](https://github.com/growilabs/compare/v7.4.2...v7.4.3) - 2026-01-21
+
+### 💎 Features
+
+* feat: Disable user page (#10735) @miya
+* feat: New admin setting for hiding user pages (#10708) @arvid-e
+* feat: Block other user's user pages (#10725) @arvid-e
+
+### 🚀 Improvement
+
+* imprv: New sidebar tool icon appearance (#10672) @satof3
+* imprv: Admin Home (#10692) @yuki-takei
+
+### 🐛 Bug Fixes
+
+* fix: Vim keymap insert mode exiting after single keystroke  (#10714) @miya
+* fix: Cannot create `/Sidebar` page from custom sidebar (#10690) @miya
+* fix: PageTree does not auto-scroll to target page path on initial render (#10699) @miya
+
+### 🧰 Maintenance
+
+* support: Upgrade headless-tree (#10733) @miya
+* support: Integrate Lefthook for pre-commit Biome formatting (#10694) @[copilot-swe-agent[bot]](https://github.com/apps/copilot-swe-agent)
+* support: Stop pushing docker image to weseek repository (#10681) @miya
+* support: Migrate the rest of files to Biome from Eslint (#10683) @yuki-takei
+
+## [v7.4.2](https://github.com/growilabs/compare/v7.4.1...v7.4.2) - 2026-01-08
+
+### 🚀 Improvement
+
+* imprv: New help button (#10553) @satof3
+* imprv: PagePathNavTitle spacing and z-index layering (#10665) @yuki-takei
+
+### 🐛 Bug Fixes
+
+* fix: Handle blank configurations for SAML settings (#10674) @yuki-takei
+* fix: Text strings inside invitation email modal are incorrect (#10679) @miya
+* fix: Scroll jumps back to current PageTreeItem when creating page from PageTree (#10671) @miya
+
+### 🧰 Maintenance
+
+* support: Update dependencies (#10685) @miya
+* support: Update dependencies (#10682) @miya
+* ci(mergify): upgrade configuration to current format (#10673) @[mergify[bot]](https://github.com/apps/mergify)
+* support: Configure biome for some client components inside app 8 (#10668) @arafubeatbox
+* support: Configure biome for some client components inside app 7 (#10667) @arafubeatbox
+* support: Configure biome for some client components in app 6 (#10636) @arafubeatbox
+* support: Configure biome for some client components in app 4 (#10634) @arafubeatbox
+* support: Configure biome for some client components in app 3 (#10633) @arafubeatbox
+* ci(deps): bump qs from 6.13.0 to 6.14.1 (#10669) @[dependabot[bot]](https://github.com/apps/dependabot)
+* support: Configure biome for some client components in app 5 (#10635) @arafubeatbox
+* support: Configure biome for some client components in app 2 (#10632) @arafubeatbox
+* support: Configure biome for some client components in app 1 (#10631) @arafubeatbox
+* ci(deps): bump next from 14.2.33 to 14.2.35 (#10597) @[dependabot[bot]](https://github.com/apps/dependabot)
+
+## [v7.4.1](https://github.com/growilabs/compare/v7.4.0...v7.4.1) - 2025-12-26
+
+### 🚀 Improvement
+
+* imprv: Show page name and link for affected pages in Activity Log (#10590) @arvid-e
+
+### 🧰 Maintenance
+
+* support: Update terraform settings and the policy for OIDC GitHub (#10653) @yuki-takei
+
+## [v7.4.0](https://github.com/growilabs/compare/v7.3.9...v7.4.0) - 2025-12-24
+
+### 💎 Features
+
+* feat: PageTree Virtualization (#10581) @yuki-takei
+* feat: Can set default user role as read-only for new users (#10623) @Ryosei-Fukushima
+* feat: Can create page when executing page edit shortcut key on empty page (#10594) @miya
+
+### 🚀 Improvement
+
+* imprv: Admin sidebar mode setting (#10617) @miya
+* imprv: Empty page operation (#10604) @yuki-takei
+* imprv: Support target attribute for anchor links (#10566) @yuki-takei
+* imprv: Use EventTarget instead of EventEmitter on the client side (#10472) @yuki-takei
+
+### 🐛 Bug Fixes
+
+* fix: Aftercare for Revisions migration script-bug (#10620) @yuki-takei
+* fix: Omit file upload restriction feature for non image files (#10602) @miya
+
+### 🧰 Maintenance
+
+* support: Use jotai for state management (#10474) @yuki-takei
+* support: Omit importers for esa.io and Qiita (#10584) @yuki-takei
+* support: Configure biome for app client services (#10600) @arafubeatbox
+* support: Configure biome for app client utils (#10601) @arafubeatbox
+* support: Configure biome for app client models/interfaces (#10599) @arafubeatbox
+* support: Configure biome for app server services 4 (#10583) @arafubeatbox
+* support: Configure biome for app server services 3 (#10578) @arafubeatbox
+* ci(mergify): upgrade configuration to current format (#10372) @[mergify[bot]](https://github.com/apps/mergify)
+* support: Configure biome for app server services 2 (#10575) @arafubeatbox
+* support: Configure biome for some app server services (#10574) @arafubeatbox
+* support: Configure biome for apiv3 js files (#10537) @arafubeatbox
+* support: Reapply biome configuration for app apiv3 routes (app-settings, page) (#10555) @arafubeatbox
+* support: Configure biome for apiv3 routes (remaining ts files) (#10536) @arafubeatbox
+* support: Configure biome for app apiv3 routes (app-settings, page) (#10532) @arafubeatbox
+* support: Configure biome for app apiv3 routes (personal-setting, security-settings, interfaces, pages, user) (#10500) @arafubeatbox
+* support: Configure biome for app server middlewares (#10507) @arafubeatbox
+
+## [v7.3.9](https://github.com/growilabs/compare/v7.3.8...v7.3.9) - 2025-12-09
+
+### 🐛 Bug Fixes
+
+* fix: Change the name of maintenance mode. (#10559) @hikaru-n-cpu
+
+### 🧰 Maintenance
+
+* support: Add new intern names to staff credits (#10556) @riona-k
+
+## [v7.3.8](https://github.com/growilabs/compare/v7.3.7...v7.3.8) - 2025-12-04
+
+### 💎 Features
+
+* feat: Enable page bulk export for GROWI.cloud (#10292) @arafubeatbox
+* feat: Users statistics table for admin (#10539) @riona-k
+
+### 🧰 Maintenance
+
+* ci(deps): bump validator from 13.15.20 to 13.15.22 (#10560) @[dependabot[bot]](https://github.com/apps/dependabot)
+
+## [v7.3.7](https://github.com/growilabs/compare/v7.3.6...v7.3.7) - 2025-11-25
+
+### 💎 Features
+
+* feat(pdf-converter): Enable puppeteer-cluster config of pdf-converter from env var (#10516) @arafubeatbox
+
+### 🐛 Bug Fixes
+
+* fix: Admin form degradation (#10540) @yuki-takei
+
+## [v7.3.6](https://github.com/growilabs/compare/v7.3.5...v7.3.6) - 2025-11-18
+
+### 🐛 Bug Fixes
+
+* fix: Printing styles (#10505) @yuki-takei
+
+### 🧰 Maintenance
+
+* ci(deps): bump js-yaml from 4.1.0 to 4.1.1 (#10511) @[dependabot[bot]](https://github.com/apps/dependabot)
+* support: Configure biome for app routes excluding apiv3 (#10496) @arafubeatbox
+
 ## [v7.3.5](https://github.com/growilabs/compare/v7.3.4...v7.3.5) - 2025-11-10
 ## [v7.3.5](https://github.com/growilabs/compare/v7.3.4...v7.3.5) - 2025-11-10
 
 
 ### 💎 Features
 ### 💎 Features

+ 38 - 83
CLAUDE.md

@@ -1,95 +1,50 @@
-# CLAUDE.md
+@AGENTS.md
 
 
-This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
+# AI-DLC and Spec-Driven Development
 
 
-## Language
+Kiro-style Spec Driven Development implementation on AI-DLC (AI Development Life Cycle)
 
 
-If it is detected at the start or during a session that the user's primary language is not English, always respond in that language from then on. However, technical terms may remain in English as needed.
+## Project Context
 
 
-## Project Overview
+### Paths
+- Steering: `.kiro/steering/`
+- Specs: `.kiro/specs/`
 
 
-GROWI is a team collaboration software using markdown - a wiki platform with hierarchical page organization. It's built with Next.js, Express, MongoDB, and includes features like real-time collaborative editing, authentication integrations, and plugin support.
+### Steering vs Specification
 
 
-## Development Commands
+**Steering** (`.kiro/steering/`) - Guide AI with project-wide rules and context
+**Specs** (`.kiro/specs/`) - Formalize development process for individual features
 
 
-### Core Development
-- `turbo run bootstrap` - Install dependencies for all workspace packages
-- `turbo run dev` - Start development server (automatically runs migrations and pre-builds styles)
+### Active Specifications
+- Check `.kiro/specs/` for active specifications
+- Use `/kiro:spec-status [feature-name]` to check progress
 
 
-### Production Commands
-- `pnpm run app:build` - Build GROWI app client and server for production
-- `pnpm run app:server` - Launch GROWI app server in production mode
-- `pnpm start` - Build and start the application (runs both build and server commands)
+## Development Guidelines
+- Think in English, generate responses in English. All Markdown content written to project files (e.g., requirements.md, design.md, tasks.md, research.md, validation reports) MUST be written in the target language configured for this specification (see spec.json.language).
+- **Note**: `spec.json.language` controls the language of spec document content only. It does NOT control the conversation response language. The conversation language is governed by the Language Policy in AGENTS.md.
 
 
-### Database Migrations
-- `pnpm run migrate` - Run MongoDB migrations (production)
-- `turbo run dev:migrate @apps/app` - Run migrations in development (or wait for automatic execution with dev)
-- `cd apps/app && pnpm run dev:migrate:status` - Check migration status
-- `cd apps/app && pnpm run dev:migrate:down` - Rollback last migration
+## Minimal Workflow
+- Phase 0 (optional): `/kiro:steering`, `/kiro:steering-custom`
+- Phase 1 (Specification):
+  - `/kiro:spec-init "description"`
+  - `/kiro:spec-requirements {feature}`
+  - `/kiro:validate-gap {feature}` (optional: for existing codebase)
+  - `/kiro:spec-design {feature} [-y]`
+  - `/kiro:validate-design {feature}` (optional: design review)
+  - `/kiro:spec-tasks {feature} [-y]`
+- Phase 2 (Implementation): `/kiro:spec-impl {feature} [tasks]`
+  - `/kiro:validate-impl {feature}` (optional: after implementation)
+  - `/kiro:spec-cleanup {feature}` (optional: organize specs post-implementation)
+- Progress check: `/kiro:spec-status {feature}` (use anytime)
 
 
-### Testing and Quality
-- `turbo run test @apps/app` - Run Jest and Vitest test suites with coverage
-- `turbo run lint @apps/app` - Run all linters (TypeScript, ESLint, Biome, Stylelint, OpenAPI)
-- `cd apps/app && pnpm run lint:typecheck` - TypeScript type checking only
-- `cd apps/app && pnpm run test:vitest` - Run Vitest unit tests
-- `cd apps/app && pnpm run test:jest` - Run Jest integration tests
+## Development Rules
+- 3-phase approval workflow: Requirements → Design → Tasks → Implementation
+- Human review required each phase; use `-y` only for intentional fast-track
+- Keep steering current and verify alignment with `/kiro:spec-status`
+- Follow the user's instructions precisely, and within that scope act autonomously: gather the necessary context and complete the requested work end-to-end in this run, asking questions only when essential information is missing or the instructions are critically ambiguous.
 
 
-### Development Utilities  
-- `cd apps/app && pnpm run repl` - Start Node.js REPL with application context loaded
-- `turbo run pre:styles @apps/app` - Pre-build styles with Vite
+## Steering Configuration
+- Load entire `.kiro/steering/` as project memory
+- Default files: `product.md`, `tech.md`, `structure.md`
+- Custom files are supported (managed via `/kiro:steering-custom`)
 
 
-## Architecture Overview
-
-### Monorepo Structure
-- `/apps/app/` - Main GROWI application (Next.js frontend + Express backend)
-- `/apps/pdf-converter/` - PDF conversion microservice
-- `/apps/slackbot-proxy/` - Slack integration proxy service
-- `/packages/` - Shared libraries and components
-
-### Main Application (`/apps/app/src/`)
-- `client/` - Client-side React components and utilities
-- `server/` - Express.js backend (API routes, models, services)  
-- `components/` - Shared React components and layouts
-- `pages/` - Next.js page components using file-based routing
-- `stores/` - State management (SWR-based stores with React context)
-- `styles/` - SCSS stylesheets with modular architecture
-- `migrations/` - MongoDB database migration scripts
-- `interfaces/` - TypeScript type definitions
-
-### Key Technical Details
-- **Frontend**: Next.js 14 with React 18, TypeScript, SCSS modules
-- **Backend**: Express.js with TypeScript, MongoDB with Mongoose
-- **State Management**: SWR for server state, React Context for client state
-- **Authentication**: Passport.js with multiple strategies (local, LDAP, OAuth, SAML)
-- **Real-time Features**: Socket.io for collaborative editing and notifications
-- **Editor**: Custom markdown editor with collaborative editing using Yjs
-- **Database**: MongoDB 8.0+ with migration system using migrate-mongo
-- **Package Manager**: pnpm with workspace support
-- **Build System**: Turborepo for monorepo orchestration
-
-### Development Dependencies
-- Node.js v20.x or v22.x
-- pnpm 10.x  
-- MongoDB v6.x or v8.x
-- Optional: Redis 3.x, Elasticsearch 7.x/8.x/9.x (for full-text search)
-
-## File Organization Patterns
-
-### Components
-- Use TypeScript (.tsx) for React components
-- Co-locate styles as `.module.scss` files
-- Export components through `index.ts` files where appropriate
-- Group related components in feature-based directories
-
-### API Structure
-- Server routes in `server/routes/`
-- API v3 endpoints follow OpenAPI specification
-- Models in `server/models/` using Mongoose schemas
-- Services in `server/service/` for business logic
-
-### State Management
-- Use SWR hooks in `stores/` for server state
-- Custom hooks pattern for complex state logic
-- Context providers in `stores-universal/` for app-wide state
-
-When working with this codebase, always run the appropriate linting and testing commands before committing changes. The application uses strict TypeScript checking and comprehensive test coverage requirements.

+ 105 - 0
apps/app/.claude/skills/app-architecture/SKILL.md

@@ -0,0 +1,105 @@
+---
+name: app-architecture
+description: GROWI main application (apps/app) architecture, directory structure, and design patterns. Auto-invoked when working in apps/app.
+user-invocable: false
+---
+
+# App Architecture (apps/app)
+
+The main GROWI application is a **full-stack Next.js application** with Express.js backend and MongoDB database.
+
+For technology stack details, see the global `tech-stack` skill.
+
+## Directory Structure
+
+```
+apps/app/src/
+├── pages/                 # Next.js Pages Router (*.page.tsx)
+├── features/             # Feature modules (recommended for new code)
+│   └── {feature-name}/
+│       ├── index.ts      # Public exports
+│       ├── interfaces/   # TypeScript types
+│       ├── server/       # models/, routes/, services/
+│       └── client/       # components/, states/, hooks/
+├── server/               # Express server (legacy)
+│   ├── models/           # Mongoose models
+│   ├── routes/apiv3/     # RESTful API v3
+│   └── services/         # Business logic
+├── components/           # React components (legacy)
+├── states/               # Jotai atoms
+└── stores-universal/     # SWR hooks
+```
+
+## Feature-Based Architecture
+
+Organize code by **business feature** rather than by technical layer:
+
+```
+❌ Layer-based (old):          ✅ Feature-based (new):
+├── models/User.ts             ├── features/user/
+├── routes/user.ts             │   ├── server/models/User.ts
+├── components/UserList.tsx    │   ├── server/routes/user.ts
+                               │   └── client/components/UserList.tsx
+```
+
+### Creating a New Feature
+
+1. Create `features/{feature-name}/`
+2. Define interfaces in `interfaces/`
+3. Implement server logic in `server/` (models, routes, services)
+4. Implement client logic in `client/` (components, hooks, states)
+5. Export public API through `index.ts`
+
+## Entry Points
+
+- **Server**: `server/app.ts` - Express + Next.js initialization
+- **Client**: `pages/_app.page.tsx` - Jotai + SWR providers
+- **Wiki Pages**: `pages/[[...path]]/index.page.tsx` - Catch-all route (SSR)
+
+## API Design (RESTful API v3)
+
+Routes in `server/routes/apiv3/` with OpenAPI specs:
+
+```typescript
+/**
+ * @openapi
+ * /api/v3/pages/{id}:
+ *   get:
+ *     summary: Get page by ID
+ */
+router.get('/pages/:id', async (req, res) => {
+  const page = await PageService.findById(req.params.id);
+  res.json(page);
+});
+```
+
+## State Management
+
+- **Jotai**: UI state (modals, forms) in `states/`
+- **SWR**: Server data (pages, users) in `stores-universal/`
+
+For detailed patterns, see `app-specific-patterns` skill.
+
+## Design Principles
+
+1. **Feature Isolation**: New features self-contained in `features/`
+2. **Server-Client Separation**: Prevent server code bundled into client
+3. **API-First**: Define OpenAPI specs before implementation
+4. **Type-Driven**: Define interfaces before implementation
+5. **Progressive Migration**: Gradually move legacy code to `features/`
+
+## Legacy Migration
+
+Legacy directories (`components/`, `server/models/`, `client/`) should be gradually migrated to `features/`:
+
+- New features → `features/`
+- Bug fixes → Can stay in legacy
+- Refactoring → Move to `features/`
+
+## Summary
+
+1. **New features**: `features/{feature-name}/` structure
+2. **Server-client separation**: Keep separate
+3. **API-first**: OpenAPI specs for API v3
+4. **State**: Jotai (UI) + SWR (server data)
+5. **Progressive migration**: No rush for stable legacy code

+ 202 - 0
apps/app/.claude/skills/app-commands/SKILL.md

@@ -0,0 +1,202 @@
+---
+name: app-commands
+description: GROWI main application (apps/app) specific commands and scripts. Auto-invoked when working in apps/app.
+user-invocable: false
+---
+
+# App Commands (apps/app)
+
+Commands specific to the main GROWI application. For global commands (turbo, pnpm), see the global `tech-stack` skill.
+
+## Quality Check Commands
+
+**IMPORTANT**: Distinguish between Turborepo tasks and package-specific scripts.
+
+### Turbo Tasks vs Package Scripts
+
+| Task | Turborepo (turbo.json) | Package Script (package.json) |
+|------|------------------------|-------------------------------|
+| `lint` | ✅ Yes | ✅ Yes (runs all lint:\*) |
+| `test` | ✅ Yes | ✅ Yes |
+| `build` | ✅ Yes | ✅ Yes |
+| `lint:typecheck` | ❌ No | ✅ Yes |
+| `lint:biome` | ❌ No | ✅ Yes |
+| `lint:styles` | ❌ No | ✅ Yes |
+
+### Recommended Commands
+
+```bash
+# Run ALL quality checks (uses Turborepo caching)
+turbo run lint --filter @growi/app
+turbo run test --filter @growi/app
+turbo run build --filter @growi/app
+
+# Run INDIVIDUAL lint checks (package-specific scripts, from apps/app directory)
+pnpm run lint:typecheck   # TypeScript only
+pnpm run lint:biome       # Biome only
+pnpm run lint:styles      # Stylelint only
+```
+
+> **Running individual test files**: See the `testing` rule (`.claude/rules/testing.md`).
+
+### Common Mistake
+
+```bash
+# ❌ WRONG: lint:typecheck is NOT a Turborepo task
+turbo run lint:typecheck --filter @growi/app
+# Error: could not find task `lint:typecheck` in project
+
+# ✅ CORRECT: Use pnpm for package-specific scripts
+pnpm --filter @growi/app run lint:typecheck
+```
+
+## Quick Reference
+
+| Task | Command |
+|------|---------|
+| **Migration** | `pnpm run dev:migrate` |
+| **OpenAPI generate** | `pnpm run openapi:generate-spec:apiv3` |
+| **REPL console** | `pnpm run console` |
+| **Visual regression** | `pnpm run reg:run` |
+| **Version bump** | `pnpm run version:patch` |
+
+## Database Migration
+
+```bash
+# Run pending migrations
+pnpm run dev:migrate
+
+# Check migration status
+pnpm run dev:migrate:status
+
+# Apply migrations
+pnpm run dev:migrate:up
+
+# Rollback last migration
+pnpm run dev:migrate:down
+
+# Production migration
+pnpm run migrate
+```
+
+**Note**: Migrations use `migrate-mongo`. Files are in `config/migrate-mongo/`.
+
+### Creating a New Migration
+
+```bash
+# Create migration file manually in config/migrate-mongo/
+# Format: YYYYMMDDHHMMSS-migration-name.js
+
+# Test migration cycle
+pnpm run dev:migrate:up
+pnpm run dev:migrate:down
+pnpm run dev:migrate:up
+```
+
+## OpenAPI Commands
+
+```bash
+# Generate OpenAPI spec for API v3
+pnpm run openapi:generate-spec:apiv3
+
+# Validate API v3 spec
+pnpm run lint:openapi:apiv3
+
+# Generate operation IDs
+pnpm run openapi:build:generate-operation-ids
+```
+
+Generated specs output to `tmp/openapi-spec-apiv3.json`.
+
+## Style Pre-build (Vite)
+
+```bash
+# Development mode
+pnpm run dev:pre:styles
+
+# Production mode
+pnpm run pre:styles
+```
+
+Pre-builds SCSS styles into CSS bundles using Vite.
+
+## Debug & Utility
+
+### REPL Console
+
+```bash
+pnpm run console
+# or
+pnpm run repl
+```
+
+Interactive Node.js REPL with Mongoose models loaded. Useful for debugging database queries.
+
+### Visual Regression Testing
+
+```bash
+pnpm run reg:run
+```
+
+## Version Commands
+
+```bash
+# Bump patch version (e.g., 7.4.3 → 7.4.4)
+pnpm run version:patch
+
+# Create prerelease (e.g., 7.4.4 → 7.4.5-RC.0)
+pnpm run version:prerelease
+
+# Create preminor (e.g., 7.4.4 → 7.5.0-RC.0)
+pnpm run version:preminor
+```
+
+## Production
+
+```bash
+# Start server (after build)
+pnpm run server
+
+# Start for CI environments
+pnpm run server:ci
+```
+
+**Note**: `preserver` hook automatically runs migrations before starting.
+
+## CI/CD
+
+```bash
+# Launch dev server for CI
+pnpm run launch-dev:ci
+
+# Start production server for CI
+pnpm run server:ci
+```
+
+## Environment Variables
+
+Development uses `dotenv-flow`:
+
+- `.env` - Default values
+- `.env.local` - Local overrides (not committed)
+- `.env.development` - Development-specific
+- `.env.production` - Production-specific
+
+See `.env.example` for available variables.
+
+## Troubleshooting
+
+### Migration Issues
+
+```bash
+pnpm run dev:migrate:status   # Check status
+pnpm run dev:migrate:down     # Rollback
+pnpm run dev:migrate:up       # Re-apply
+```
+
+### Build Issues
+
+```bash
+pnpm run clean                # Clear artifacts
+pnpm run build                # Rebuild
+```

+ 173 - 0
apps/app/.claude/skills/app-specific-patterns/SKILL.md

@@ -0,0 +1,173 @@
+---
+name: app-specific-patterns
+description: GROWI main application (apps/app) specific patterns for Next.js, Jotai, SWR, and testing. Auto-invoked when working in apps/app.
+user-invocable: false
+---
+
+# App Specific Patterns (apps/app)
+
+For general testing patterns, see the global `.claude/skills/learned/essential-test-patterns` and `.claude/skills/learned/essential-test-design` skills.
+
+## Next.js Pages Router
+
+### File Naming
+
+Pages must use `.page.tsx` suffix:
+
+```
+pages/
+├── _app.page.tsx           # App wrapper
+├── [[...path]]/index.page.tsx  # Catch-all wiki pages
+└── admin/index.page.tsx    # Admin pages
+```
+
+### getLayout Pattern
+
+```typescript
+// pages/admin/index.page.tsx
+import type { NextPageWithLayout } from '~/interfaces/next-page';
+
+const AdminPage: NextPageWithLayout = () => <AdminDashboard />;
+
+AdminPage.getLayout = (page) => <AdminLayout>{page}</AdminLayout>;
+
+export default AdminPage;
+```
+
+## Jotai State Management
+
+### Directory Structure
+
+```
+src/states/
+├── ui/
+│   ├── sidebar/              # Multi-file feature
+│   ├── device.ts             # Single-file feature
+│   └── modal/                # 1 modal = 1 file
+│       ├── page-create.ts
+│       └── page-delete.ts
+├── page/                     # Page data state
+├── server-configurations/
+└── context.ts
+
+features/{name}/client/states/  # Feature-scoped atoms
+```
+
+### Placement Rules
+
+| Category | Location |
+|----------|----------|
+| UI state | `states/ui/` |
+| Modal state | `states/ui/modal/` (1 file per modal) |
+| Page data | `states/page/` |
+| Feature-specific | `features/{name}/client/states/` |
+
+### Derived Atoms
+
+```typescript
+import { atom } from 'jotai';
+
+export const currentPageAtom = atom<Page | null>(null);
+
+// Derived (read-only)
+export const currentPagePathAtom = atom((get) => {
+  return get(currentPageAtom)?.path ?? null;
+});
+```
+
+## SWR Data Fetching
+
+### Directory
+
+```
+src/stores-universal/
+├── pages.ts       # Page hooks
+├── users.ts       # User hooks
+└── admin/settings.ts
+```
+
+### Patterns
+
+```typescript
+import useSWR from 'swr';
+import useSWRImmutable from 'swr/immutable';
+
+// Auto-revalidation
+export const usePageList = () => useSWR<Page[]>('/api/v3/pages', fetcher);
+
+// No auto-revalidation (static data)
+export const usePageById = (id: string | null) =>
+  useSWRImmutable<Page>(id ? `/api/v3/pages/${id}` : null, fetcher);
+```
+
+## Testing (apps/app Specific)
+
+### Mocking Next.js Router
+
+```typescript
+import { mockDeep } from 'vitest-mock-extended';
+import type { NextRouter } from 'next/router';
+
+const createMockRouter = (overrides = {}) => {
+  const mock = mockDeep<NextRouter>();
+  mock.pathname = '/test';
+  mock.push.mockResolvedValue(true);
+  return Object.assign(mock, overrides);
+};
+
+vi.mock('next/router', () => ({
+  useRouter: () => createMockRouter(),
+}));
+```
+
+### Testing with Jotai
+
+```typescript
+import { Provider } from 'jotai';
+import { useHydrateAtoms } from 'jotai/utils';
+
+const HydrateAtoms = ({ initialValues, children }) => {
+  useHydrateAtoms(initialValues);
+  return children;
+};
+
+const renderWithJotai = (ui, initialValues = []) => render(
+  <Provider>
+    <HydrateAtoms initialValues={initialValues}>{ui}</HydrateAtoms>
+  </Provider>
+);
+
+// Usage
+renderWithJotai(<PageHeader />, [[currentPageAtom, mockPage]]);
+```
+
+### Testing SWR
+
+```typescript
+import { SWRConfig } from 'swr';
+
+const wrapper = ({ children }) => (
+  <SWRConfig value={{ dedupingInterval: 0, provider: () => new Map() }}>
+    {children}
+  </SWRConfig>
+);
+
+const { result } = renderHook(() => usePageById('123'), { wrapper });
+```
+
+## Path Aliases
+
+Always use `~/` for imports:
+
+```typescript
+import { PageService } from '~/server/services/PageService';
+import { currentPageAtom } from '~/states/page/page-atoms';
+```
+
+## Summary
+
+1. **Next.js**: `.page.tsx` suffix, `getLayout` for layouts
+2. **Jotai**: `states/` global, `features/*/client/states/` feature-scoped
+3. **SWR**: `stores-universal/`, null key for conditional fetch
+4. **Testing**: Mock router, hydrate Jotai, wrap SWR config
+5. **Imports**: Always `~/` path alias

+ 302 - 0
apps/app/.claude/skills/learned/page-save-origin-semantics/SKILL.md

@@ -0,0 +1,302 @@
+---
+name: page-save-origin-semantics
+description: Auto-invoked when modifying origin-based conflict detection, revision validation logic, or isUpdatable() method. Explains the two-stage origin check mechanism for conflict detection and its separation from diff detection.
+---
+
+# Page Save Origin Semantics
+
+## Problem
+
+When modifying page save logic, it's easy to accidentally break the carefully designed origin-based conflict detection system. The system uses a two-stage check mechanism (frontend + backend) to determine when revision validation should be enforced vs. bypassed for collaborative editing (Yjs).
+
+**Key Insight**: **Conflict detection (revision check)** and **diff detection (hasDiffToPrev)** serve different purposes and require separate logic.
+
+## Solution
+
+Understanding the two-stage origin check mechanism:
+
+### Stage 1: Frontend Determines revisionId Requirement
+
+```typescript
+// apps/app/src/client/components/PageEditor/PageEditor.tsx:158
+const isRevisionIdRequiredForPageUpdate = currentPage?.revision?.origin === undefined;
+
+// lines 308-310
+const revisionId = isRevisionIdRequiredForPageUpdate
+  ? currentRevisionId
+  : undefined;
+```
+
+**Logic**: Check the **latest revision's origin** on the page:
+- If `origin === undefined` (legacy/API save) → Send `revisionId`
+- If `origin === "editor"` or `"view"` → Do NOT send `revisionId`
+
+### Stage 2: Backend Determines Conflict Check Behavior
+
+```javascript
+// apps/app/src/server/models/obsolete-page.js:167-172
+const ignoreLatestRevision =
+  origin === Origin.Editor &&
+  (latestRevisionOrigin === Origin.Editor || latestRevisionOrigin === Origin.View);
+
+if (ignoreLatestRevision) {
+  return true;  // Bypass revision check
+}
+
+// Otherwise, enforce strict revision matching
+if (revision != previousRevision) {
+  return false;  // Reject save
+}
+return true;
+```
+
+**Logic**: Check **current request's origin** AND **latest revision's origin**:
+- If `origin === "editor"` AND latest is `"editor"` or `"view"` → Bypass revision check
+- Otherwise → Enforce strict revision ID matching
+
+## Origin Values
+
+Three types of page update methods (called "origin"):
+
+- **`Origin.Editor = "editor"`** - Save from editor mode (collaborative editing via Yjs)
+- **`Origin.View = "view"`** - Save from view mode
+  - Examples: HandsontableModal, DrawioModal editing
+- **`undefined`** - API-based saves or legacy pages
+
+## Origin Strength (強弱)
+
+**Basic Rule**: Page updates require the previous revision ID in the request. If the latest revision doesn't match, the server rejects the request.
+
+**Exception - Editor origin is stronger than View origin**:
+- **UX Goal**: Avoid `Posted param "revisionId" is outdated` errors when multiple members are using the Editor and View changes interrupt them
+- **Special Case**: When the latest revision's origin is View, Editor origin requests can update WITHOUT requiring revision ID
+
+### Origin Strength Matrix
+
+|        | Latest Revision: Editor | Latest Revision: View | Latest Revision: API |
+| ------ | ----------------------- | --------------------- | -------------------- |
+| **Request: Editor** | ⭕️ Bypass revision check | ⭕️ Bypass revision check | ❌ Strict check |
+| **Request: View**   | ❌ Strict check | ❌ Strict check | ❌ Strict check |
+| **Request: API**    | ❌ Strict check | ❌ Strict check | ❌ Strict check |
+
+**Reading the table**:
+- ⭕️ = Revision check bypassed (revisionId not required)
+- ❌ = Strict revision check required (revisionId must match)
+
+## Behavior by Scenario
+
+| Latest Revision Origin | Request Origin | revisionId Sent? | Revision Check | Use Case |
+|------------------------|----------------|------------------|----------------|----------|
+| `editor` or `view` | `editor` | ❌ No | ✅ Bypassed | Normal Editor use (most common) |
+| `undefined` | `editor` | ✅ Yes | ✅ Enforced | Legacy page in Editor |
+| `undefined` | `undefined` (API) | ✅ Yes (required) | ✅ Enforced | API save |
+
+## Example: Server-Side Logic Respecting Origin Semantics
+
+When adding server-side functionality that needs previous revision data:
+
+```typescript
+// ✅ CORRECT: Separate concerns - conflict detection vs. diff detection
+let previousRevision: IRevisionHasId | null = null;
+
+// Priority 1: Use provided revisionId (for conflict detection)
+if (sanitizeRevisionId != null) {
+  previousRevision = await Revision.findById(sanitizeRevisionId);
+}
+
+// Priority 2: Fallback to currentPage.revision (for other purposes like diff detection)
+if (previousRevision == null && currentPage.revision != null) {
+  previousRevision = await Revision.findById(currentPage.revision);
+}
+
+const previousBody = previousRevision?.body ?? null;
+
+// Continue with existing conflict detection logic (unchanged)
+if (currentPage != null && !(await currentPage.isUpdatable(sanitizeRevisionId, origin))) {
+  // ... return conflict error
+}
+
+// Use previousBody for diff detection or other purposes
+updatedPage = await crowi.pageService.updatePage(
+  currentPage,
+  body,
+  previousBody,  // ← Available regardless of conflict detection logic
+  req.user,
+  options,
+);
+```
+
+```typescript
+// ❌ WRONG: Forcing frontend to always send revisionId
+const revisionId = currentRevisionId;  // Always send, regardless of origin
+// This breaks Yjs collaborative editing semantics!
+```
+
+```typescript
+// ❌ WRONG: Changing backend conflict detection logic
+// Don't modify isUpdatable() unless you fully understand the implications
+// for collaborative editing
+```
+
+## When to Apply
+
+**Always consider this pattern when**:
+- Modifying page save/update API handlers
+- Adding functionality that needs previous revision data
+- Working on conflict detection or revision validation logic
+- Implementing features that interact with page history
+- Debugging save operation issues
+
+**Key Principles**:
+1. **Do NOT modify frontend revisionId logic** unless explicitly required for conflict detection
+2. **Do NOT modify isUpdatable() logic** unless fixing conflict detection bugs
+3. **Separate concerns**: Conflict detection ≠ Other revision-based features (diff detection, history, etc.)
+4. **Server-side fallback**: If you need previous revision data when revisionId is not provided, fetch from `currentPage.revision`
+
+## Detailed Scenario Analysis
+
+### Scenario A: Normal Editor Mode (Most Common Case)
+
+**Latest revision has `origin=editor`**:
+
+1. **Frontend Logic**:
+   - `isRevisionIdRequiredForPageUpdate = false` (latest revision origin is not undefined)
+   - Does NOT send `revisionId` in request
+   - Sends `origin: Origin.Editor`
+
+2. **API Layer**:
+   ```typescript
+   previousRevision = await Revision.findById(undefined);  // → null
+   ```
+   Result: No previousRevision fetched via revisionId
+
+3. **Backend Conflict Check** (`isUpdatable`):
+   ```javascript
+   ignoreLatestRevision =
+     (Origin.Editor === Origin.Editor) &&
+     (latestRevisionOrigin === Origin.Editor || latestRevisionOrigin === Origin.View)
+   // → true (latest revision is editor)
+   return true;  // Bypass revision check
+   ```
+   Result: ✅ Save succeeds without revision validation
+
+4. **Impact on Other Features**:
+   - If you need previousRevision data (e.g., for diff detection), it won't be available unless you implement server-side fallback
+   - This is where `currentPage.revision` fallback becomes necessary
+
+### Scenario B: Legacy Page in Editor Mode
+
+**Latest revision has `origin=undefined`**:
+
+1. **Frontend Logic**:
+   - `isRevisionIdRequiredForPageUpdate = true` (latest revision origin is undefined)
+   - Sends `revisionId` in request
+   - Sends `origin: Origin.Editor`
+
+2. **API Layer**:
+   ```typescript
+   previousRevision = await Revision.findById(sanitizeRevisionId);  // → revision object
+   ```
+   Result: previousRevision fetched successfully
+
+3. **Backend Conflict Check** (`isUpdatable`):
+   ```javascript
+   ignoreLatestRevision =
+     (Origin.Editor === Origin.Editor) &&
+     (latestRevisionOrigin === undefined)
+   // → false (latest revision is undefined, not editor/view)
+
+   // Strict revision check
+   if (revision != sanitizeRevisionId) {
+     return false;  // Reject if mismatch
+   }
+   return true;
+   ```
+   Result: ✅ Save succeeds only if revisionId matches
+
+4. **Impact on Other Features**:
+   - previousRevision data is available
+   - All revision-based features work correctly
+
+### Scenario C: API-Based Save
+
+**Request has `origin=undefined` or omitted**:
+
+1. **Frontend**: Not applicable (API client)
+
+2. **API Layer**:
+   - API client MUST send `revisionId` in request
+   - `previousRevision = await Revision.findById(sanitizeRevisionId)`
+
+3. **Backend Conflict Check** (`isUpdatable`):
+   ```javascript
+   ignoreLatestRevision =
+     (undefined === Origin.Editor) && ...
+   // → false
+
+   // Strict revision check
+   if (revision != sanitizeRevisionId) {
+     return false;
+   }
+   return true;
+   ```
+   Result: Strict validation enforced
+
+## Root Cause: Why This Separation Matters
+
+**Historical Context**: At some point, the frontend stopped sending `previousRevision` (revisionId) for certain scenarios to support Yjs collaborative editing. This broke features that relied on previousRevision data being available.
+
+**The Core Issue**:
+- **Conflict detection** needs to know "Is this save conflicting with another user's changes?" (Answered by revision check)
+- **Diff detection** needs to know "Did the content actually change?" (Answered by comparing body)
+- **Current implementation conflates these**: When conflict detection is bypassed, previousRevision is not fetched, breaking diff detection
+
+**The Solution Pattern**:
+```typescript
+// Separate the two concerns:
+
+// 1. Fetch previousRevision for data purposes (diff detection, history, etc.)
+let previousRevision: IRevisionHasId | null = null;
+if (sanitizeRevisionId != null) {
+  previousRevision = await Revision.findById(sanitizeRevisionId);
+} else if (currentPage.revision != null) {
+  previousRevision = await Revision.findById(currentPage.revision);  // Fallback
+}
+
+// 2. Use previousRevision data for your feature
+const previousBody = previousRevision?.body ?? null;
+
+// 3. Conflict detection happens independently via isUpdatable()
+if (currentPage != null && !(await currentPage.isUpdatable(sanitizeRevisionId, origin))) {
+  // Return conflict error
+}
+```
+
+## Reference
+
+**Official Documentation**:
+- https://dev.growi.org/651a6f4a008fee2f99187431#origin-%E3%81%AE%E5%BC%B7%E5%BC%B1
+
+**Related Files**:
+- Frontend: `apps/app/src/client/components/PageEditor/PageEditor.tsx` (lines 158, 240, 308-310)
+- Backend: `apps/app/src/server/models/obsolete-page.js` (lines 159-182, isUpdatable method)
+- API: `apps/app/src/server/routes/apiv3/page/update-page.ts` (lines 260-282, conflict check)
+- Interface: `packages/core/src/interfaces/revision.ts` (lines 6-11, Origin definition)
+
+## Common Pitfalls
+
+1. **Assuming revisionId is always available**: It's not! Editor mode with recent editor/view saves omits it by design.
+2. **Conflating conflict detection with other features**: They serve different purposes and need separate logic.
+3. **Breaking Yjs collaborative editing**: Forcing revisionId to always be sent breaks the bypass mechanism.
+4. **Ignoring origin values**: The system behavior changes significantly based on origin combinations.
+
+## Lessons Learned
+
+This pattern was identified during the "improve-unchanged-revision" feature implementation, where the initial assumption was that frontend should always send `revisionId` for diff detection. Deep analysis revealed:
+
+- The frontend logic is correct for conflict detection and should NOT be changed
+- Server-side fallback is the correct approach to get previous revision data
+- Two-stage checking is intentional and critical for Yjs collaborative editing
+- Conflict detection and diff detection must be separated
+
+**Key Takeaway**: Always understand the existing architectural patterns before proposing changes. What appears to be a "fix" might actually break carefully designed functionality.

+ 0 - 101
apps/app/.eslintrc.js

@@ -1,101 +0,0 @@
-/**
- * @type {import('eslint').Linter.Config}
- */
-module.exports = {
-  extends: ['next/core-web-vitals', 'weseek/react'],
-  plugins: [],
-  ignorePatterns: [
-    'dist/**',
-    '**/dist/**',
-    'transpiled/**',
-    'public/**',
-    'src/linter-checker/**',
-    'tmp/**',
-    'next-env.d.ts',
-    'next.config.js',
-    'playwright.config.ts',
-    'test/integration/global-setup.js',
-    'test/integration/global-teardown.js',
-    'test/integration/setup-crowi.ts',
-    'test/integration/crowi/**',
-    'test/integration/middlewares/**',
-    'test/integration/migrations/**',
-    'test/integration/models/**',
-    'test/integration/service/**',
-    'test/integration/setup.js',
-    'playwright/**',
-    'test-with-vite/**',
-    'public/**',
-    'bin/**',
-    'config/**',
-    'src/styles/**',
-    'src/linter-checker/**',
-    'src/migrations/**',
-    'src/models/**',
-    'src/features/callout/**',
-    'src/features/comment/**',
-    'src/features/templates/**',
-    'src/features/mermaid/**',
-    'src/features/search/**',
-    'src/features/plantuml/**',
-    'src/features/external-user-group/**',
-    'src/features/page-bulk-export/**',
-    'src/features/growi-plugin/**',
-    'src/features/opentelemetry/**',
-    'src/features/openai/**',
-    'src/features/rate-limiter/**',
-    'src/stores-universal/**',
-    'src/interfaces/**',
-    'src/utils/**',
-    'src/components/**',
-    'src/services/**',
-    'src/stores/**',
-    'src/pages/**',
-    'src/server/crowi/**',
-    'src/server/events/**',
-    'src/server/interfaces/**',
-    'src/server/models/**',
-    'src/server/util/**',
-    'src/server/app.ts',
-    'src/server/repl.ts',
-    'src/server/routes/*.js',
-    'src/server/routes/*.ts',
-    'src/server/routes/attachment/**',
-  ],
-  settings: {
-    // resolve path aliases by eslint-import-resolver-typescript
-    'import/resolver': {
-      typescript: {},
-    },
-  },
-  rules: {
-    '@typescript-eslint/no-var-requires': 'off',
-
-    // set 'warn' temporarily -- 2021.08.02 Yuki Takei
-    '@typescript-eslint/no-use-before-define': ['warn'],
-    '@typescript-eslint/no-this-alias': ['warn'],
-  },
-  overrides: [
-    {
-      // enable the rule specifically for JavaScript files
-      files: ['*.js', '*.mjs', '*.jsx'],
-      rules: {
-        // set 'warn' temporarily -- 2023.08.14 Yuki Takei
-        'react/prop-types': 'warn',
-        // set 'warn' temporarily -- 2023.08.14 Yuki Takei
-        'no-unused-vars': ['warn'],
-      },
-    },
-    {
-      // enable the rule specifically for TypeScript files
-      files: ['*.ts', '*.mts', '*.tsx'],
-      rules: {
-        'no-unused-vars': 'off',
-        // set 'warn' temporarily -- 2023.08.14 Yuki Takei
-        'react/prop-types': 'warn',
-        // set 'warn' temporarily -- 2022.07.25 Yuki Takei
-        '@typescript-eslint/explicit-module-boundary-types': ['warn'],
-      },
-    },
-  ],
-};

+ 3 - 0
apps/app/.gitignore

@@ -14,3 +14,6 @@
 /public/uploads
 /public/uploads
 /src/styles/prebuilt
 /src/styles/prebuilt
 /tmp/
 /tmp/
+
+# cache
+/.swc/

Некоторые файлы не были показаны из-за большого количества измененных файлов