Просмотр исходного кода

Merge branch 'master' into fix/176753-fix-mobile-editor-page-title-display

satof3 1 месяц назад
Родитель
Сommit
1a9f849475
100 измененных файлов с 8586 добавлено и 1041 удалено
  1. 391 0
      .claude/agents/build-error-resolver.md
  2. 144 0
      .claude/agents/security-reviewer.md
  3. 277 0
      .claude/commands/kiro/spec-cleanup.md
  4. 179 0
      .claude/commands/kiro/spec-design.md
  5. 110 0
      .claude/commands/kiro/spec-impl.md
  6. 65 0
      .claude/commands/kiro/spec-init.md
  7. 98 0
      .claude/commands/kiro/spec-requirements.md
  8. 87 0
      .claude/commands/kiro/spec-status.md
  9. 138 0
      .claude/commands/kiro/spec-tasks.md
  10. 127 0
      .claude/commands/kiro/steering-custom.md
  11. 143 0
      .claude/commands/kiro/steering.md
  12. 92 0
      .claude/commands/kiro/validate-design.md
  13. 88 0
      .claude/commands/kiro/validate-gap.md
  14. 138 0
      .claude/commands/kiro/validate-impl.md
  15. 116 0
      .claude/commands/learn.md
  16. 287 0
      .claude/commands/tdd.md
  17. 217 0
      .claude/rules/coding-style.md
  18. 37 0
      .claude/rules/performance.md
  19. 33 0
      .claude/rules/security.md
  20. 38 0
      .claude/rules/testing.md
  21. 23 0
      .claude/settings.json
  22. 122 0
      .claude/skills/learned/essential-test-design/SKILL.md
  23. 494 0
      .claude/skills/learned/essential-test-patterns/SKILL.md
  24. 207 0
      .claude/skills/monorepo-overview/SKILL.md
  25. 269 0
      .claude/skills/tech-stack/SKILL.md
  26. 6 1
      .devcontainer/app/devcontainer.json
  27. 4 0
      .devcontainer/pdf-converter/devcontainer.json
  28. 1 1
      .gitignore
  29. 93 0
      .kiro/settings/rules/design-discovery-full.md
  30. 49 0
      .kiro/settings/rules/design-discovery-light.md
  31. 182 0
      .kiro/settings/rules/design-principles.md
  32. 110 0
      .kiro/settings/rules/design-review.md
  33. 49 0
      .kiro/settings/rules/ears-format.md
  34. 144 0
      .kiro/settings/rules/gap-analysis.md
  35. 90 0
      .kiro/settings/rules/steering-principles.md
  36. 131 0
      .kiro/settings/rules/tasks-generation.md
  37. 34 0
      .kiro/settings/rules/tasks-parallel-analysis.md
  38. 276 0
      .kiro/settings/templates/specs/design.md
  39. 22 0
      .kiro/settings/templates/specs/init.json
  40. 9 0
      .kiro/settings/templates/specs/requirements-init.md
  41. 26 0
      .kiro/settings/templates/specs/requirements.md
  42. 61 0
      .kiro/settings/templates/specs/research.md
  43. 21 0
      .kiro/settings/templates/specs/tasks.md
  44. 69 0
      .kiro/settings/templates/steering-custom/api-standards.md
  45. 67 0
      .kiro/settings/templates/steering-custom/authentication.md
  46. 46 0
      .kiro/settings/templates/steering-custom/database.md
  47. 54 0
      .kiro/settings/templates/steering-custom/deployment.md
  48. 59 0
      .kiro/settings/templates/steering-custom/error-handling.md
  49. 55 0
      .kiro/settings/templates/steering-custom/security.md
  50. 47 0
      .kiro/settings/templates/steering-custom/testing.md
  51. 18 0
      .kiro/settings/templates/steering/product.md
  52. 41 0
      .kiro/settings/templates/steering/structure.md
  53. 45 0
      .kiro/settings/templates/steering/tech.md
  54. 764 0
      .kiro/specs/oauth2-email-support/design.md
  55. 57 0
      .kiro/specs/oauth2-email-support/requirements.md
  56. 449 0
      .kiro/specs/oauth2-email-support/research.md
  57. 23 0
      .kiro/specs/oauth2-email-support/spec.json
  58. 41 0
      .kiro/specs/oauth2-email-support/tasks.md
  59. 34 0
      .kiro/steering/product.md
  60. 8 0
      .kiro/steering/structure.md
  61. 8 0
      .kiro/steering/tdd.md
  62. 8 0
      .kiro/steering/tech.md
  63. 1 20
      .mcp.json
  64. 0 104
      .serena/memories/apps-app-detailed-architecture.md
  65. 0 162
      .serena/memories/apps-app-development-patterns.md
  66. 0 37
      .serena/memories/apps-app-google-workspace-oauth2-mail.md
  67. 0 35
      .serena/memories/apps-app-technical-specs.md
  68. 0 61
      .serena/memories/coding_conventions.md
  69. 0 26
      .serena/memories/project_overview.md
  70. 0 89
      .serena/memories/project_structure.md
  71. 0 94
      .serena/memories/task_completion_checklist.md
  72. 0 41
      .serena/memories/tech_stack.md
  73. 0 95
      .serena/memories/vitest-testing-tips-and-best-practices.md
  74. 65 9
      .serena/project.yml
  75. 0 10
      .serena/serena_config.yml
  76. 4 1
      .vscode/mcp.json
  77. 115 50
      AGENTS.md
  78. 41 1
      CHANGELOG.md
  79. 49 0
      CLAUDE.md
  80. 105 0
      apps/app/.claude/skills/app-architecture/SKILL.md
  81. 202 0
      apps/app/.claude/skills/app-commands/SKILL.md
  82. 173 0
      apps/app/.claude/skills/app-specific-patterns/SKILL.md
  83. 302 0
      apps/app/.claude/skills/learned/page-save-origin-semantics/SKILL.md
  84. 151 74
      apps/app/AGENTS.md
  85. 18 0
      apps/app/CLAUDE.md
  86. 4 0
      apps/app/config/next-i18next.config.d.ts
  87. 1 1
      apps/app/docker/README.md
  88. 0 86
      apps/app/jest.config.js
  89. 0 1
      apps/app/nodemon.json
  90. 12 17
      apps/app/package.json
  91. 2 7
      apps/app/playwright/20-basic-features/access-to-page.spec.ts
  92. 2 7
      apps/app/playwright/23-editor/saving.spec.ts
  93. 77 0
      apps/app/playwright/23-editor/vim-keymap.spec.ts
  94. 3 8
      apps/app/playwright/23-editor/with-navigation.spec.ts
  95. 67 0
      apps/app/playwright/30-search/search.spec.ts
  96. 6 3
      apps/app/playwright/50-sidebar/access-to-sidebar.spec.ts
  97. 11 0
      apps/app/playwright/utils/AppendTextToEditorUntilContains.ts
  98. 14 0
      apps/app/public/static/locales/en_US/admin.json
  99. 14 0
      apps/app/public/static/locales/fr_FR/admin.json
  100. 26 0
      apps/app/public/static/locales/ja_JP/admin.json

+ 391 - 0
.claude/agents/build-error-resolver.md

@@ -0,0 +1,391 @@
+---
+name: build-error-resolver
+description: Build and TypeScript error resolution specialist. Use PROACTIVELY when build fails or type errors occur. Fixes build/type errors only with minimal diffs, no architectural edits. Focuses on getting the build green quickly.
+tools: Read, Write, Edit, Bash, Grep, Glob
+model: opus
+---
+
+# Build Error Resolver
+
+You are an expert build error resolution specialist focused on fixing TypeScript, compilation, and build errors quickly and efficiently. Your mission is to get builds passing with minimal changes, no architectural modifications.
+
+## Core Responsibilities
+
+1. **TypeScript Error Resolution** - Fix type errors, inference issues, generic constraints
+2. **Build Error Fixing** - Resolve compilation failures, module resolution
+3. **Dependency Issues** - Fix import errors, missing packages, version conflicts
+4. **Configuration Errors** - Resolve tsconfig.json, Next.js config issues
+5. **Minimal Diffs** - Make smallest possible changes to fix errors
+6. **No Architecture Changes** - Only fix errors, don't refactor or redesign
+
+## Tools at Your Disposal
+
+### Build & Type Checking Tools
+- **tsgo** - TypeScript Go compiler for type checking
+- **pnpm** - Package management
+- **biome** - Linting and formatting (NOT ESLint)
+- **turbo** - Monorepo build orchestration
+
+### Diagnostic Commands
+```bash
+# Full lint (typecheck + biome + styles + openapi)
+turbo run lint --filter {package}
+
+# Or directly in apps/app
+cd apps/app && pnpm run lint:typecheck
+cd apps/app && pnpm run lint:biome
+
+# Check specific file
+pnpm biome check path/to/file.ts
+pnpm tsgo --noEmit path/to/file.ts
+
+# Production build
+turbo run build --filter {package}
+```
+
+## Error Resolution Workflow
+
+### 1. Collect All Errors
+```
+a) Run full type check
+   - turbo run lint --filter {package}
+   - Capture ALL errors, not just first
+
+b) Categorize errors by type
+   - Type inference failures
+   - Missing type definitions
+   - Import/export errors
+   - Configuration errors
+   - Dependency issues
+
+c) Prioritize by impact
+   - Blocking build: Fix first
+   - Type errors: Fix in order
+   - Warnings: Fix if time permits
+```
+
+### 2. Fix Strategy (Minimal Changes)
+```
+For each error:
+
+1. Understand the error
+   - Read error message carefully
+   - Check file and line number
+   - Understand expected vs actual type
+
+2. Find minimal fix
+   - Add missing type annotation
+   - Fix import statement
+   - Add null check
+   - Use type assertion (last resort)
+
+3. Verify fix doesn't break other code
+   - Run lint again after each fix
+   - Check related files
+   - Ensure no new errors introduced
+
+4. Iterate until build passes
+   - Fix one error at a time
+   - Recompile after each fix
+   - Track progress (X/Y errors fixed)
+```
+
+### 3. Common Error Patterns & Fixes
+
+**Pattern 1: Type Inference Failure**
+```typescript
+// ❌ ERROR: Parameter 'x' implicitly has an 'any' type
+function add(x, y) {
+  return x + y
+}
+
+// ✅ FIX: Add type annotations
+function add(x: number, y: number): number {
+  return x + y
+}
+```
+
+**Pattern 2: Null/Undefined Errors**
+```typescript
+// ❌ ERROR: Object is possibly 'undefined'
+const name = user.name.toUpperCase()
+
+// ✅ FIX: Optional chaining
+const name = user?.name?.toUpperCase()
+
+// ✅ OR: Null check
+const name = user && user.name ? user.name.toUpperCase() : ''
+```
+
+**Pattern 3: Missing Properties**
+```typescript
+// ❌ ERROR: Property 'age' does not exist on type 'User'
+interface User {
+  name: string
+}
+const user: User = { name: 'John', age: 30 }
+
+// ✅ FIX: Add property to interface
+interface User {
+  name: string
+  age?: number // Optional if not always present
+}
+```
+
+**Pattern 4: Import Errors**
+```typescript
+// ❌ ERROR: Cannot find module '~/lib/utils'
+import { formatDate } from '~/lib/utils'
+
+// ✅ FIX 1: Check tsconfig paths (GROWI uses ~/ for apps/app/src)
+{
+  "compilerOptions": {
+    "paths": {
+      "~/*": ["./src/*"]
+    }
+  }
+}
+
+// ✅ FIX 2: Use relative import
+import { formatDate } from '../lib/utils'
+
+// ✅ FIX 3: Install missing package
+pnpm add <package-name>
+```
+
+**Pattern 5: Type Mismatch**
+```typescript
+// ❌ ERROR: Type 'string' is not assignable to type 'number'
+const age: number = "30"
+
+// ✅ FIX: Parse string to number
+const age: number = parseInt("30", 10)
+
+// ✅ OR: Change type
+const age: string = "30"
+```
+
+**Pattern 6: Generic Constraints**
+```typescript
+// ❌ ERROR: Type 'T' is not assignable to type 'string'
+function getLength<T>(item: T): number {
+  return item.length
+}
+
+// ✅ FIX: Add constraint
+function getLength<T extends { length: number }>(item: T): number {
+  return item.length
+}
+
+// ✅ OR: More specific constraint
+function getLength<T extends string | any[]>(item: T): number {
+  return item.length
+}
+```
+
+**Pattern 7: React Hook Errors**
+```typescript
+// ❌ ERROR: React Hook "useState" cannot be called in a function
+function MyComponent() {
+  if (condition) {
+    const [state, setState] = useState(0) // ERROR!
+  }
+}
+
+// ✅ FIX: Move hooks to top level
+function MyComponent() {
+  const [state, setState] = useState(0)
+
+  if (!condition) {
+    return null
+  }
+
+  // Use state here
+}
+```
+
+**Pattern 8: Async/Await Errors**
+```typescript
+// ❌ ERROR: 'await' expressions are only allowed within async functions
+function fetchData() {
+  const data = await fetch('/api/data')
+}
+
+// ✅ FIX: Add async keyword
+async function fetchData() {
+  const data = await fetch('/api/data')
+}
+```
+
+**Pattern 9: Module Not Found**
+```typescript
+// ❌ ERROR: Cannot find module 'react' or its corresponding type declarations
+import React from 'react'
+
+// ✅ FIX: Install dependencies
+pnpm add react
+pnpm add -D @types/react
+
+// ✅ CHECK: Verify package.json has dependency
+{
+  "dependencies": {
+    "react": "^18.0.0"
+  },
+  "devDependencies": {
+    "@types/react": "^18.0.0"
+  }
+}
+```
+
+## Minimal Diff Strategy
+
+**CRITICAL: Make smallest possible changes**
+
+### DO:
+✅ Add type annotations where missing
+✅ Add null checks where needed
+✅ Fix imports/exports
+✅ Add missing dependencies
+✅ Update type definitions
+✅ Fix configuration files
+
+### DON'T:
+❌ Refactor unrelated code
+❌ Change architecture
+❌ Rename variables/functions (unless causing error)
+❌ Add new features
+❌ Change logic flow (unless fixing error)
+❌ Optimize performance
+❌ Improve code style
+
+**Example of Minimal Diff:**
+
+```typescript
+// File has 200 lines, error on line 45
+
+// ❌ WRONG: Refactor entire file
+// - Rename variables
+// - Extract functions
+// - Change patterns
+// Result: 50 lines changed
+
+// ✅ CORRECT: Fix only the error
+// - Add type annotation on line 45
+// Result: 1 line changed
+
+function processData(data) { // Line 45 - ERROR: 'data' implicitly has 'any' type
+  return data.map(item => item.value)
+}
+
+// ✅ MINIMAL FIX:
+function processData(data: any[]) { // Only change this line
+  return data.map(item => item.value)
+}
+
+// ✅ BETTER MINIMAL FIX (if type known):
+function processData(data: Array<{ value: number }>) {
+  return data.map(item => item.value)
+}
+```
+
+## Build Error Report Format
+
+```markdown
+# Build Error Resolution Report
+
+**Date:** YYYY-MM-DD
+**Build Target:** Next.js Production / TypeScript Check / Biome
+**Initial Errors:** X
+**Errors Fixed:** Y
+**Build Status:** ✅ PASSING / ❌ FAILING
+
+## Errors Fixed
+
+### 1. [Error Category - e.g., Type Inference]
+**Location:** `apps/app/src/components/PageCard.tsx:45`
+**Error Message:**
+```
+Parameter 'page' implicitly has an 'any' type.
+```
+
+**Root Cause:** Missing type annotation for function parameter
+
+**Fix Applied:**
+```diff
+- function getPagePath(page) {
++ function getPagePath(page: IPage) {
+    return page.path;
+  }
+```
+
+**Lines Changed:** 1
+**Impact:** NONE - Type safety improvement only
+
+---
+
+## Verification Steps
+
+1. ✅ TypeScript check passes: `turbo run lint --filter {package}`
+2. ✅ Next.js build succeeds: `turbo run build --filter {package}`
+3. ✅ No new errors introduced
+4. ✅ Development server runs: `turbo run dev`
+
+## Summary
+
+- Total errors resolved: X
+- Total lines changed: Y
+- Build status: ✅ PASSING
+- Blocking issues: 0 remaining
+```
+
+## When to Use This Agent
+
+**USE when:**
+- `turbo run build --filter {package}` fails
+- `turbo run lint --filter {package}` shows errors
+- Type errors blocking development
+- Import/module resolution errors
+- Configuration errors
+- Dependency version conflicts
+
+**DON'T USE when:**
+- Code needs refactoring
+- Architectural changes needed
+- New features required
+- Tests failing (run tests separately)
+- Security issues found (use security-reviewer)
+
+## Build Error Priority Levels
+
+### 🔴 CRITICAL (Fix Immediately)
+- Build completely broken
+- No development server
+- Production deployment blocked
+- Multiple files failing
+
+### 🟡 HIGH (Fix Soon)
+- Single file failing
+- Type errors in new code
+- Import errors
+- Non-critical build warnings
+
+### 🟢 MEDIUM (Fix When Possible)
+- Biome warnings
+- Deprecated API usage
+- Non-strict type issues
+- Minor configuration warnings
+
+## Success Metrics
+
+After build error resolution:
+- ✅ `turbo run lint --filter {package}` exits with code 0
+- ✅ `turbo run build --filter {package}` completes successfully
+- ✅ No new errors introduced
+- ✅ Minimal lines changed (< 5% of affected file)
+- ✅ Build time not significantly increased
+- ✅ Development server runs without errors
+- ✅ Tests still passing
+
+---
+
+**Remember**: The goal is to fix errors quickly with minimal changes. Don't refactor, don't optimize, don't redesign. Fix the error, verify the build passes, move on. Speed and precision over perfection.

+ 144 - 0
.claude/agents/security-reviewer.md

@@ -0,0 +1,144 @@
+---
+name: security-reviewer
+description: Security vulnerability detection specialist for GROWI. Use after writing code that handles user input, authentication, API endpoints, or sensitive data. Flags secrets, injection, XSS, and OWASP Top 10 vulnerabilities.
+tools: Read, Write, Edit, Bash, Grep, Glob
+model: opus
+---
+
+# Security Reviewer
+
+You are a security specialist focused on identifying vulnerabilities in the GROWI codebase. Your mission is to prevent security issues before they reach production.
+
+## GROWI Security Stack
+
+GROWI uses these security measures:
+- **helmet**: Security headers
+- **express-mongo-sanitize**: NoSQL injection prevention
+- **xss**, **rehype-sanitize**: XSS prevention
+- **Passport.js**: Authentication (Local, LDAP, SAML, OAuth)
+
+## Security Review Workflow
+
+### 1. Automated Checks
+```bash
+# Check for vulnerable dependencies
+pnpm audit
+
+# Search for potential secrets
+grep -r "api[_-]?key\|password\|secret\|token" --include="*.ts" --include="*.tsx" .
+```
+
+### 2. OWASP Top 10 Checklist
+
+1. **Injection (NoSQL)** - Are Mongoose queries safe? No string concatenation in queries?
+2. **Broken Authentication** - Passwords hashed? Sessions secure? Passport configured correctly?
+3. **Sensitive Data Exposure** - Secrets in env vars? HTTPS enforced? Logs sanitized?
+4. **Broken Access Control** - Authorization on all routes? CORS configured?
+5. **Security Misconfiguration** - Helmet enabled? Debug mode off in production?
+6. **XSS** - Output escaped? Content-Security-Policy set?
+7. **Components with Vulnerabilities** - `pnpm audit` clean?
+8. **Insufficient Logging** - Security events logged?
+
+## Vulnerability Patterns
+
+### Hardcoded Secrets (CRITICAL)
+```typescript
+// ❌ CRITICAL
+const apiKey = "sk-xxxxx"
+
+// ✅ CORRECT
+const apiKey = process.env.API_KEY
+```
+
+### NoSQL Injection (CRITICAL)
+```typescript
+// ❌ CRITICAL: Unsafe query
+const user = await User.findOne({ email: req.body.email, password: req.body.password })
+
+// ✅ CORRECT: Use express-mongo-sanitize middleware + validate input
+```
+
+### XSS (HIGH)
+```typescript
+// ❌ HIGH: Direct HTML insertion
+element.innerHTML = userInput
+
+// ✅ CORRECT: Use textContent or sanitize
+element.textContent = userInput
+// OR use xss library
+import xss from 'xss'
+element.innerHTML = xss(userInput)
+```
+
+### SSRF (HIGH)
+```typescript
+// ❌ HIGH: User-controlled URL
+const response = await fetch(userProvidedUrl)
+
+// ✅ CORRECT: Validate URL against allowlist
+const allowedDomains = ['api.example.com']
+const url = new URL(userProvidedUrl)
+if (!allowedDomains.includes(url.hostname)) {
+  throw new Error('Invalid URL')
+}
+```
+
+### Authorization Check (CRITICAL)
+```typescript
+// ❌ CRITICAL: No authorization
+app.get('/api/page/:id', async (req, res) => {
+  const page = await Page.findById(req.params.id)
+  res.json(page)
+})
+
+// ✅ CORRECT: Check user access
+app.get('/api/page/:id', loginRequired, async (req, res) => {
+  const page = await Page.findById(req.params.id)
+  if (!page.isAccessibleBy(req.user)) {
+    return res.status(403).json({ error: 'Forbidden' })
+  }
+  res.json(page)
+})
+```
+
+## Security Report Format
+
+```markdown
+## Security Review Summary
+- **Critical Issues:** X
+- **High Issues:** Y
+- **Risk Level:** 🔴 HIGH / 🟡 MEDIUM / 🟢 LOW
+
+### Issues Found
+1. **[SEVERITY]** Description @ `file:line`
+   - Impact: ...
+   - Fix: ...
+```
+
+## When to Review
+
+**ALWAYS review when:**
+- New API endpoints added
+- Authentication/authorization changed
+- User input handling added
+- Database queries modified
+- File upload features added
+- Dependencies updated
+
+## Best Practices
+
+1. **Defense in Depth** - Multiple security layers
+2. **Least Privilege** - Minimum permissions
+3. **Fail Securely** - Errors don't expose data
+4. **Separation of Concerns** - Isolate security-critical code
+5. **Keep it Simple** - Complex code has more vulnerabilities
+6. **Don't Trust Input** - Validate everything
+7. **Update Regularly** - Keep dependencies current
+
+## Emergency Response
+
+If CRITICAL vulnerability found:
+1. Document the issue
+2. Provide secure code fix
+3. Check if vulnerability was exploited
+4. Rotate any exposed secrets

+ 277 - 0
.claude/commands/kiro/spec-cleanup.md

@@ -0,0 +1,277 @@
+---
+description: Organize and clean up specification documents after implementation completion
+allowed-tools: Bash, Glob, Grep, Read, Write, Edit, MultiEdit, Update
+argument-hint: <feature-name>
+---
+
+# Specification Cleanup
+
+<background_information>
+- **Mission**: Organize specification documents after implementation completion, removing implementation details while preserving essential context for future refactoring
+- **Success Criteria**:
+  - Implementation details (testing procedures, deployment checklists) removed
+  - Design decisions and constraints preserved in research.md and design.md
+  - Requirements simplified (Acceptance Criteria condensed to summaries)
+  - Unimplemented features removed or documented
+  - Documents remain valuable for future refactoring work
+</background_information>
+
+<instructions>
+## Core Task
+Clean up and organize specification documents for feature **$1** after implementation is complete.
+
+## Organizing Principle
+
+**"Can we read essential context from these spec documents when refactoring this feature months later?"**
+
+- **Keep**: "Why" (design decisions, architectural constraints, limitations, trade-offs)
+- **Remove**: "How" (testing procedures, deployment steps, detailed implementation examples)
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Discover all spec files**:
+- Use Glob to find all files in `.kiro/specs/$1/` directory
+- Categorize files:
+  - **Core files** (must preserve): `spec.json`, `requirements.md`, `design.md`, `tasks.md`, `research.md`
+  - **Other files** (evaluate case-by-case): validation reports, notes, prototypes, migration guides, etc.
+
+**Read all discovered files**:
+- Read all core files first
+- Read other files to understand their content and value
+
+**Verify implementation status**:
+- Check that tasks are marked complete `[x]` in tasks.md
+- If implementation incomplete, warn user and ask to confirm cleanup
+
+### Step 2: Analyze Current State
+
+**Identify cleanup opportunities**:
+
+1. **Other files** (non-core files like validation-report.md, notes.md, etc.):
+   - Read each file to understand its content and purpose
+   - Identify valuable information that should be preserved:
+     * Implementation discoveries and lessons learned
+     * Critical constraints or design decisions
+     * Historical context for future refactoring
+   - Determine salvage strategy:
+     * Migrate valuable content to research.md or design.md
+     * Keep file if it contains essential reference information
+     * Delete if content is redundant or no longer relevant
+   - **Case-by-case evaluation required** - never assume files should be deleted
+
+2. **research.md**:
+   - Should contain production discoveries and implementation lessons learned
+   - Check if implementation revealed new constraints or patterns to document
+   - Identify content from other files that should be migrated here
+
+3. **requirements.md**:
+   - Identify verbose Acceptance Criteria that can be condensed to summaries
+   - Find unimplemented requirements (compare with tasks.md)
+   - Detect duplicate or redundant content
+
+4. **design.md**:
+   - Identify implementation-specific sections that can be removed:
+     * Detailed Testing Strategy (test procedures)
+     * Security Considerations (if covered in implementation)
+     * Error Handling code examples (if implemented)
+     * Migration Strategy (after migration complete)
+     * Deployment Checklist (after deployment)
+   - Identify sections to preserve:
+     * Architecture diagrams (essential for understanding)
+     * Component interfaces (API contracts)
+     * Design decisions and rationale
+     * Critical implementation constraints
+     * Known limitations
+   - Check if content from other files should be migrated here
+
+### Step 3: Interactive Confirmation
+
+**Present cleanup plan to user**:
+
+For each file and section identified in Step 2, ask:
+- "Should I delete/simplify/keep/salvage this section?"
+- Provide recommendations based on organizing principle
+- Show brief preview of content to aid decision
+
+**Example questions for other files**:
+- "validation-report.md found. Contains {brief summary}. Options:"
+  - "A: Migrate valuable content to research.md, then delete"
+  - "B: Keep as historical reference"
+  - "C: Delete (content no longer needed)"
+- "notes.md found. Contains {brief summary}. Salvage to research.md before deleting? [Y/n]"
+
+**Example questions for core files**:
+- "research.md: Add 'Session N: Production Discoveries' section to document implementation lessons? [Y/n]"
+- "requirements.md: Simplify Acceptance Criteria from detailed bullet points to summary paragraphs? [Y/n]"
+- "requirements.md: Remove unimplemented requirements (e.g., Req 4.4 field masking not implemented)? [Y/n]"
+- "design.md: Delete 'Testing Strategy' section (lines X-Y)? [Y/n]"
+- "design.md: Delete 'Security Considerations' section (lines X-Y)? [Y/n]"
+- "design.md: Keep Architecture diagrams (essential for refactoring)? [Y/n]"
+
+**Batch similar decisions**:
+- Group related sections (e.g., all "delete implementation details" decisions)
+- Allow user to approve categories rather than individual items
+- Present file-by-file salvage decisions for other files
+
+### Step 4: Execute Cleanup
+
+**For each approved action**:
+
+1. **Salvage and cleanup other files** (if approved):
+   - For each non-core file (validation-report.md, notes.md, etc.):
+     * Extract valuable information (implementation lessons, constraints, decisions)
+     * Migrate content to appropriate core file:
+       - Technical discoveries → research.md
+       - Design constraints → design.md
+       - Requirement clarifications → requirements.md
+     * Delete file after salvage (if approved)
+   - Document salvaged content with source reference (e.g., "From validation-report.md:")
+
+2. **Update research.md** (if new discoveries or salvaged content):
+   - Add new section "Session N: Production Implementation Discoveries" (if needed)
+   - Document critical technical constraints discovered during implementation
+   - Include code examples for critical patterns (e.g., falsy checks, credential preservation)
+   - Integrate salvaged content from other files
+   - Cross-reference requirements.md and design.md where relevant
+
+3. **Simplify requirements.md** (if approved):
+   - Transform detailed Acceptance Criteria into summary paragraphs
+   - Remove unimplemented requirements entirely
+   - Preserve requirement objectives and summaries
+   - Example transformation:
+     ```
+     Before: "1. System shall X... 2. System shall Y... [7 criteria]"
+     After: "**Summary**: System provides X and Y. Configuration includes..."
+     ```
+
+4. **Clean up design.md** (if approved):
+   - Delete approved sections (Testing Strategy, Security Considerations, etc.)
+   - Add "Critical Implementation Constraints" section if implementation revealed new constraints
+   - Integrate salvaged content from other files (if relevant)
+   - Preserve architecture diagrams and component interfaces
+   - Keep design decisions and rationale sections
+
+5. **Update spec.json metadata**:
+   - Set `phase: "implementation-complete"` (if not already set)
+   - Add `cleanup_completed: true` flag
+   - Update `updated_at` timestamp
+
+### Step 5: Generate Cleanup Summary
+
+**Provide summary report**:
+- List of files modified/deleted
+- Sections removed and lines saved
+- Critical information preserved
+- Recommendations for future refactoring
+
+**Format**:
+```markdown
+## Cleanup Summary for {feature-name}
+
+### Files Modified
+- ✅ validation-report.md: Salvaged to research.md, then deleted (730 lines removed)
+- ✅ notes.md: Salvaged to design.md, then deleted (120 lines removed)
+- ✅ research.md: Added Session 2 discoveries + salvaged content (180 lines added)
+- ✅ requirements.md: Simplified 6 requirements (350 lines → 180 lines)
+- ✅ design.md: Removed 4 sections, added constraints + salvaged content (250 lines removed, 100 added)
+
+### Information Salvaged
+- Implementation discoveries from validation-report.md → research.md
+- Design notes from notes.md → design.md
+- Historical context preserved with source attribution
+
+### Information Preserved
+- Architecture diagrams and component interfaces
+- Design decisions and rationale
+- Critical implementation constraints
+- Known limitations and trade-offs
+
+### Next Steps
+- Spec documents ready for future refactoring reference
+- Consider creating knowledge base entry if pattern is reusable
+```
+
+## Critical Constraints
+
+- **User approval required**: Never delete content without explicit confirmation
+- **Language consistency**: Use language specified in spec.json for all updates
+- **Preserve history**: Don't delete discovery rationale or design decisions
+- **Balance brevity with completeness**: Remove redundancy but keep essential context
+- **Interactive workflow**: Pause for user input rather than making assumptions
+
+## Tool Guidance
+
+- **Glob**: Discover all files in `.kiro/specs/{feature}/` directory
+- **Read**: Load all discovered files for analysis
+- **Grep**: Search for patterns (e.g., unimplemented requirements, completed tasks)
+- **Edit/Write**: Update files based on approved changes, salvage content
+- **Bash**: Delete files after salvage (if approved)
+- **MultiEdit**: For batch edits across multiple sections
+
+## Output Description
+
+Provide cleanup plan and execution report in the language specified in spec.json.
+
+**Report Structure**:
+1. **Current State Analysis**: What needs cleanup and why
+2. **Cleanup Plan**: Proposed changes with recommendations
+3. **Confirmation Prompts**: Interactive questions for user approval
+4. **Execution Summary**: What was changed and why
+5. **Preserved Context**: What critical information remains for future refactoring
+
+**Format**: Clear, scannable format with sections and bullet points
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Implementation Incomplete**:
+- **Condition**: Less than 90% of tasks marked `[x]` in tasks.md
+- **Action**: Warn user: "Implementation appears incomplete (X/Y tasks done). Continue cleanup? [y/N]"
+- **Recommendation**: Wait until implementation complete before cleanup
+
+**Spec Not Found**:
+- **Message**: "No spec found for `$1`. Check available specs in `.kiro/specs/`"
+- **Action**: List available spec directories
+
+**Missing Critical Files**:
+- **Condition**: requirements.md or design.md missing
+- **Action**: Skip cleanup for missing files, proceed with available files
+- **Warning**: "requirements.md missing - cannot simplify requirements"
+
+### Dry Run Mode (Future Enhancement)
+
+**If `-n` or `--dry-run` flag provided**:
+- Show cleanup plan without executing changes
+- Allow user to review before committing to cleanup
+
+### Backup Recommendation
+
+**Before cleanup**:
+- Recommend user create git commit or backup
+- Warning: "This will modify spec files. Commit current state first? [Y/n]"
+
+### Undo Support
+
+**If cleanup goes wrong**:
+- Use git to restore previous state: `git checkout HEAD -- .kiro/specs/{feature}/`
+- Remind user to commit before cleanup for easy rollback
+
+## Example Usage
+
+```bash
+# Basic cleanup after implementation
+/kiro:spec-cleanup oauth2-email-support
+
+# With conversation context about implementation discoveries
+# Command will prompt for Session N discoveries to document
+/kiro:spec-cleanup user-authentication
+```
+
+## Related Commands
+
+- `/kiro:spec-impl {feature}` - Implement tasks (run before cleanup)
+- `/kiro:validate-impl {feature}` - Validate implementation (run before cleanup)
+- `/kiro:spec-status {feature}` - Check implementation status

+ 179 - 0
.claude/commands/kiro/spec-design.md

@@ -0,0 +1,179 @@
+---
+description: Create comprehensive technical design for a specification
+allowed-tools: Bash, Glob, Grep, LS, Read, Write, Edit, MultiEdit, Update, WebSearch, WebFetch
+argument-hint: <feature-name> [-y]
+---
+
+# Technical Design Generator
+
+<background_information>
+- **Mission**: Generate comprehensive technical design document that translates requirements (WHAT) into architectural design (HOW)
+- **Success Criteria**:
+  - All requirements mapped to technical components with clear interfaces
+  - Appropriate architecture discovery and research completed
+  - Design aligns with steering context and existing patterns
+  - Visual diagrams included for complex architectures
+</background_information>
+
+<instructions>
+## Core Task
+Generate technical design document for feature **$1** based on approved requirements.
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Read all necessary context**:
+- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md` (if exists)
+- **Entire `.kiro/steering/` directory** for complete project memory
+- `.kiro/settings/templates/specs/design.md` for document structure
+- `.kiro/settings/rules/design-principles.md` for design principles
+- `.kiro/settings/templates/specs/research.md` for discovery log structure
+
+**Validate requirements approval**:
+- If `-y` flag provided ($2 == "-y"): Auto-approve requirements in spec.json
+- Otherwise: Verify approval status (stop if unapproved, see Safety & Fallback)
+
+### Step 2: Discovery & Analysis
+
+**Critical: This phase ensures design is based on complete, accurate information.**
+
+1. **Classify Feature Type**:
+   - **New Feature** (greenfield) → Full discovery required
+   - **Extension** (existing system) → Integration-focused discovery
+   - **Simple Addition** (CRUD/UI) → Minimal or no discovery
+   - **Complex Integration** → Comprehensive analysis required
+
+2. **Execute Appropriate Discovery Process**:
+   
+   **For Complex/New Features**:
+   - Read and execute `.kiro/settings/rules/design-discovery-full.md`
+   - Conduct thorough research using WebSearch/WebFetch:
+     - Latest architectural patterns and best practices
+     - External dependency verification (APIs, libraries, versions, compatibility)
+     - Official documentation, migration guides, known issues
+     - Performance benchmarks and security considerations
+   
+   **For Extensions**:
+   - Read and execute `.kiro/settings/rules/design-discovery-light.md`
+   - Focus on integration points, existing patterns, compatibility
+   - Use Grep to analyze existing codebase patterns
+   
+   **For Simple Additions**:
+   - Skip formal discovery, quick pattern check only
+
+3. **Retain Discovery Findings for Step 3**:
+- External API contracts and constraints
+- Technology decisions with rationale
+- Existing patterns to follow or extend
+- Integration points and dependencies
+- Identified risks and mitigation strategies
+- Potential architecture patterns and boundary options (note details in `research.md`)
+- Parallelization considerations for future tasks (capture dependencies in `research.md`)
+
+4. **Persist Findings to Research Log**:
+- Create or update `.kiro/specs/$1/research.md` using the shared template
+- Summarize discovery scope and key findings (Summary section)
+- Record investigations in Research Log topics with sources and implications
+- Document architecture pattern evaluation, design decisions, and risks using the template sections
+- Use the language specified in spec.json when writing or updating `research.md`
+
+### Step 3: Generate Design Document
+
+1. **Load Design Template and Rules**:
+- Read `.kiro/settings/templates/specs/design.md` for structure
+- Read `.kiro/settings/rules/design-principles.md` for principles
+
+2. **Generate Design Document**:
+- **Follow specs/design.md template structure and generation instructions strictly**
+- **Integrate all discovery findings**: Use researched information (APIs, patterns, technologies) throughout component definitions, architecture decisions, and integration points
+- If existing design.md found in Step 1, use it as reference context (merge mode)
+- Apply design rules: Type Safety, Visual Communication, Formal Tone
+- Use language specified in spec.json
+- Ensure sections reflect updated headings ("Architecture Pattern & Boundary Map", "Technology Stack & Alignment", "Components & Interface Contracts") and reference supporting details from `research.md`
+
+3. **Update Metadata** in spec.json:
+- Set `phase: "design-generated"`
+- Set `approvals.design.generated: true, approved: false`
+- Set `approvals.requirements.approved: true`
+- Update `updated_at` timestamp
+
+## Critical Constraints
+ - **Type Safety**:
+   - Enforce strong typing aligned with the project's technology stack.
+   - For statically typed languages, define explicit types/interfaces and avoid unsafe casts.
+   - For TypeScript, never use `any`; prefer precise types and generics.
+   - For dynamically typed languages, provide type hints/annotations where available (e.g., Python type hints) and validate inputs at boundaries.
+   - Document public interfaces and contracts clearly to ensure cross-component type safety.
+- **Latest Information**: Use WebSearch/WebFetch for external dependencies and best practices
+- **Steering Alignment**: Respect existing architecture patterns from steering context
+- **Template Adherence**: Follow specs/design.md template structure and generation instructions strictly
+- **Design Focus**: Architecture and interfaces ONLY, no implementation code
+- **Requirements Traceability IDs**: Use numeric requirement IDs only (e.g. "1.1", "1.2", "3.1", "3.3") exactly as defined in requirements.md. Do not invent new IDs or use alphabetic labels.
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context before taking action (specs, steering, templates, rules)
+- **Research when uncertain**: Use WebSearch/WebFetch for external dependencies, APIs, and latest best practices
+- **Analyze existing code**: Use Grep to find patterns and integration points in codebase
+- **Write last**: Generate design.md only after all research and analysis complete
+
+## Output Description
+
+**Command execution output** (separate from design.md content):
+
+Provide brief summary in the language specified in spec.json:
+
+1. **Status**: Confirm design document generated at `.kiro/specs/$1/design.md`
+2. **Discovery Type**: Which discovery process was executed (full/light/minimal)
+3. **Key Findings**: 2-3 critical insights from `research.md` that shaped the design
+4. **Next Action**: Approval workflow guidance (see Safety & Fallback)
+5. **Research Log**: Confirm `research.md` updated with latest decisions
+
+**Format**: Concise Markdown (under 200 words) - this is the command output, NOT the design document itself
+
+**Note**: The actual design document follows `.kiro/settings/templates/specs/design.md` structure.
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Requirements Not Approved**:
+- **Stop Execution**: Cannot proceed without approved requirements
+- **User Message**: "Requirements not yet approved. Approval required before design generation."
+- **Suggested Action**: "Run `/kiro:spec-design $1 -y` to auto-approve requirements and proceed"
+
+**Missing Requirements**:
+- **Stop Execution**: Requirements document must exist
+- **User Message**: "No requirements.md found at `.kiro/specs/$1/requirements.md`"
+- **Suggested Action**: "Run `/kiro:spec-requirements $1` to generate requirements first"
+
+**Template Missing**:
+- **User Message**: "Template file missing at `.kiro/settings/templates/specs/design.md`"
+- **Suggested Action**: "Check repository setup or restore template file"
+- **Fallback**: Use inline basic structure with warning
+
+**Steering Context Missing**:
+- **Warning**: "Steering directory empty or missing - design may not align with project standards"
+- **Proceed**: Continue with generation but note limitation in output
+
+**Discovery Complexity Unclear**:
+- **Default**: Use full discovery process (`.kiro/settings/rules/design-discovery-full.md`)
+- **Rationale**: Better to over-research than miss critical context
+- **Invalid Requirement IDs**:
+  - **Stop Execution**: If requirements.md is missing numeric IDs or uses non-numeric headings (for example, "Requirement A"), stop and instruct the user to fix requirements.md before continuing.
+
+### Next Phase: Task Generation
+
+**If Design Approved**:
+- Review generated design at `.kiro/specs/$1/design.md`
+- **Optional**: Run `/kiro:validate-design $1` for interactive quality review
+- Then `/kiro:spec-tasks $1 -y` to generate implementation tasks
+
+**If Modifications Needed**:
+- Provide feedback and re-run `/kiro:spec-design $1`
+- Existing design used as reference (merge mode)
+
+**Note**: Design approval is mandatory before proceeding to task generation.
+
+think hard

+ 110 - 0
.claude/commands/kiro/spec-impl.md

@@ -0,0 +1,110 @@
+---
+description: Execute spec tasks using TDD methodology
+allowed-tools: Bash, Read, Write, Edit, MultiEdit, Grep, Glob, LS, WebFetch, WebSearch
+argument-hint: <feature-name> [task-numbers]
+---
+
+# Implementation Task Executor
+
+<background_information>
+- **Mission**: Execute implementation tasks using Test-Driven Development methodology based on approved specifications
+- **Success Criteria**:
+  - All tests written before implementation code
+  - Code passes all tests with no regressions
+  - Tasks marked as completed in tasks.md
+  - Implementation aligns with design and requirements
+</background_information>
+
+<instructions>
+## Core Task
+Execute implementation tasks for feature **$1** using Test-Driven Development.
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Read all necessary context**:
+- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md`, `tasks.md`
+- **Entire `.kiro/steering/` directory** for complete project memory
+
+**Validate approvals**:
+- Verify tasks are approved in spec.json (stop if not, see Safety & Fallback)
+
+### Step 2: Select Tasks
+
+**Determine which tasks to execute**:
+- If `$2` provided: Execute specified task numbers (e.g., "1.1" or "1,2,3")
+- Otherwise: Execute all pending tasks (unchecked `- [ ]` in tasks.md)
+
+### Step 3: Execute with TDD
+
+For each selected task, follow Kent Beck's TDD cycle:
+
+1. **RED - Write Failing Test**:
+   - Write test for the next small piece of functionality
+   - Test should fail (code doesn't exist yet)
+   - Use descriptive test names
+
+2. **GREEN - Write Minimal Code**:
+   - Implement simplest solution to make test pass
+   - Focus only on making THIS test pass
+   - Avoid over-engineering
+
+3. **REFACTOR - Clean Up**:
+   - Improve code structure and readability
+   - Remove duplication
+   - Apply design patterns where appropriate
+   - Ensure all tests still pass after refactoring
+
+4. **VERIFY - Validate Quality**:
+   - All tests pass (new and existing)
+   - No regressions in existing functionality
+   - Code coverage maintained or improved
+
+5. **MARK COMPLETE**:
+   - Update checkbox from `- [ ]` to `- [x]` in tasks.md
+
+## Critical Constraints
+- **TDD Mandatory**: Tests MUST be written before implementation code
+- **Task Scope**: Implement only what the specific task requires
+- **Test Coverage**: All new code must have tests
+- **No Regressions**: Existing tests must continue to pass
+- **Design Alignment**: Implementation must follow design.md specifications
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context before implementation
+- **Test first**: Write tests before code
+- Use **WebSearch/WebFetch** for library documentation when needed
+
+## Output Description
+
+Provide brief summary in the language specified in spec.json:
+
+1. **Tasks Executed**: Task numbers and test results
+2. **Status**: Completed tasks marked in tasks.md, remaining tasks count
+
+**Format**: Concise (under 150 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Tasks Not Approved or Missing Spec Files**:
+- **Stop Execution**: All spec files must exist and tasks must be approved
+- **Suggested Action**: "Complete previous phases: `/kiro:spec-requirements`, `/kiro:spec-design`, `/kiro:spec-tasks`"
+
+**Test Failures**:
+- **Stop Implementation**: Fix failing tests before continuing
+- **Action**: Debug and fix, then re-run
+
+### Task Execution
+
+**Execute specific task(s)**:
+- `/kiro:spec-impl $1 1.1` - Single task
+- `/kiro:spec-impl $1 1,2,3` - Multiple tasks
+
+**Execute all pending**:
+- `/kiro:spec-impl $1` - All unchecked tasks
+
+think

+ 65 - 0
.claude/commands/kiro/spec-init.md

@@ -0,0 +1,65 @@
+---
+description: Initialize a new specification with detailed project description
+allowed-tools: Bash, Read, Write, Glob
+argument-hint: <project-description>
+---
+
+# Spec Initialization
+
+<background_information>
+- **Mission**: Initialize the first phase of spec-driven development by creating directory structure and metadata for a new specification
+- **Success Criteria**:
+  - Generate appropriate feature name from project description
+  - Create unique spec structure without conflicts
+  - Provide clear path to next phase (requirements generation)
+</background_information>
+
+<instructions>
+## Core Task
+Generate a unique feature name from the project description ($ARGUMENTS) and initialize the specification structure.
+
+## Execution Steps
+1. **Check Uniqueness**: Verify `.kiro/specs/` for naming conflicts (append number suffix if needed)
+2. **Create Directory**: `.kiro/specs/[feature-name]/`
+3. **Initialize Files Using Templates**:
+   - Read `.kiro/settings/templates/specs/init.json`
+   - Read `.kiro/settings/templates/specs/requirements-init.md`
+   - Replace placeholders:
+     - `{{FEATURE_NAME}}` → generated feature name
+     - `{{TIMESTAMP}}` → current ISO 8601 timestamp
+     - `{{PROJECT_DESCRIPTION}}` → $ARGUMENTS
+   - Write `spec.json` and `requirements.md` to spec directory
+
+## Important Constraints
+- DO NOT generate requirements/design/tasks at this stage
+- Follow stage-by-stage development principles
+- Maintain strict phase separation
+- Only initialization is performed in this phase
+</instructions>
+
+## Tool Guidance
+- Use **Glob** to check existing spec directories for name uniqueness
+- Use **Read** to fetch templates: `init.json` and `requirements-init.md`
+- Use **Write** to create spec.json and requirements.md after placeholder replacement
+- Perform validation before any file write operation
+
+## Output Description
+Provide output in the language specified in `spec.json` with the following structure:
+
+1. **Generated Feature Name**: `feature-name` format with 1-2 sentence rationale
+2. **Project Summary**: Brief summary (1 sentence)
+3. **Created Files**: Bullet list with full paths
+4. **Next Step**: Command block showing `/kiro:spec-requirements <feature-name>`
+5. **Notes**: Explain why only initialization was performed (2-3 sentences on phase separation)
+
+**Format Requirements**:
+- Use Markdown headings (##, ###)
+- Wrap commands in code blocks
+- Keep total output concise (under 250 words)
+- Use clear, professional language per `spec.json.language`
+
+## Safety & Fallback
+- **Ambiguous Feature Name**: If feature name generation is unclear, propose 2-3 options and ask user to select
+- **Template Missing**: If template files don't exist in `.kiro/settings/templates/specs/`, report error with specific missing file path and suggest checking repository setup
+- **Directory Conflict**: If feature name already exists, append numeric suffix (e.g., `feature-name-2`) and notify user of automatic conflict resolution
+- **Write Failure**: Report error with specific path and suggest checking permissions or disk space

+ 98 - 0
.claude/commands/kiro/spec-requirements.md

@@ -0,0 +1,98 @@
+---
+description: Generate comprehensive requirements for a specification
+allowed-tools: Bash, Glob, Grep, LS, Read, Write, Edit, MultiEdit, Update, WebSearch, WebFetch
+argument-hint: <feature-name>
+---
+
+# Requirements Generation
+
+<background_information>
+- **Mission**: Generate comprehensive, testable requirements in EARS format based on the project description from spec initialization
+- **Success Criteria**:
+  - Create complete requirements document aligned with steering context
+  - Follow the project's EARS patterns and constraints for all acceptance criteria
+  - Focus on core functionality without implementation details
+  - Update metadata to track generation status
+</background_information>
+
+<instructions>
+## Core Task
+Generate complete requirements for feature **$1** based on the project description in requirements.md.
+
+## Execution Steps
+
+1. **Load Context**:
+   - Read `.kiro/specs/$1/spec.json` for language and metadata
+   - Read `.kiro/specs/$1/requirements.md` for project description
+   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
+     - Default files: `structure.md`, `tech.md`, `product.md`
+     - All custom steering files (regardless of mode settings)
+     - This provides complete project memory and context
+
+2. **Read Guidelines**:
+   - Read `.kiro/settings/rules/ears-format.md` for EARS syntax rules
+   - Read `.kiro/settings/templates/specs/requirements.md` for document structure
+
+3. **Generate Requirements**:
+   - Create initial requirements based on project description
+   - Group related functionality into logical requirement areas
+   - Apply EARS format to all acceptance criteria
+   - Use language specified in spec.json
+
+4. **Update Metadata**:
+   - Set `phase: "requirements-generated"`
+   - Set `approvals.requirements.generated: true`
+   - Update `updated_at` timestamp
+
+## Important Constraints
+- Focus on WHAT, not HOW (no implementation details)
+- Requirements must be testable and verifiable
+- Choose appropriate subject for EARS statements (system/service name for software)
+- Generate initial version first, then iterate with user feedback (no sequential questions upfront)
+- Requirement headings in requirements.md MUST include a leading numeric ID only (for example: "Requirement 1", "1.", "2 Feature ..."); do not use alphabetic IDs like "Requirement A".
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context (spec, steering, rules, templates) before generation
+- **Write last**: Update requirements.md only after complete generation
+- Use **WebSearch/WebFetch** only if external domain knowledge needed
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Generated Requirements Summary**: Brief overview of major requirement areas (3-5 bullets)
+2. **Document Status**: Confirm requirements.md updated and spec.json metadata updated
+3. **Next Steps**: Guide user on how to proceed (approve and continue, or modify)
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Include file paths in code blocks
+- Keep summary concise (under 300 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Project Description**: If requirements.md lacks project description, ask user for feature details
+- **Ambiguous Requirements**: Propose initial version and iterate with user rather than asking many upfront questions
+- **Template Missing**: If template files don't exist, use inline fallback structure with warning
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+- **Incomplete Requirements**: After generation, explicitly ask user if requirements cover all expected functionality
+- **Steering Directory Empty**: Warn user that project context is missing and may affect requirement quality
+- **Non-numeric Requirement Headings**: If existing headings do not include a leading numeric ID (for example, they use "Requirement A"), normalize them to numeric IDs and keep that mapping consistent (never mix numeric and alphabetic labels).
+
+### Next Phase: Design Generation
+
+**If Requirements Approved**:
+- Review generated requirements at `.kiro/specs/$1/requirements.md`
+- **Optional Gap Analysis** (for existing codebases):
+  - Run `/kiro:validate-gap $1` to analyze implementation gap with current code
+  - Identifies existing components, integration points, and implementation strategy
+  - Recommended for brownfield projects; skip for greenfield
+- Then `/kiro:spec-design $1 -y` to proceed to design phase
+
+**If Modifications Needed**:
+- Provide feedback and re-run `/kiro:spec-requirements $1`
+
+**Note**: Approval is mandatory before proceeding to design phase.
+
+think

+ 87 - 0
.claude/commands/kiro/spec-status.md

@@ -0,0 +1,87 @@
+---
+description: Show specification status and progress
+allowed-tools: Bash, Read, Glob, Write, Edit, MultiEdit, Update
+argument-hint: <feature-name>
+---
+
+# Specification Status
+
+<background_information>
+- **Mission**: Display comprehensive status and progress for a specification
+- **Success Criteria**:
+  - Show current phase and completion status
+  - Identify next actions and blockers
+  - Provide clear visibility into progress
+</background_information>
+
+<instructions>
+## Core Task
+Generate status report for feature **$1** showing progress across all phases.
+
+## Execution Steps
+
+### Step 1: Load Spec Context
+- Read `.kiro/specs/$1/spec.json` for metadata and phase status
+- Read existing files: `requirements.md`, `design.md`, `tasks.md` (if they exist)
+- Check `.kiro/specs/$1/` directory for available files
+
+### Step 2: Analyze Status
+
+**Parse each phase**:
+- **Requirements**: Count requirements and acceptance criteria
+- **Design**: Check for architecture, components, diagrams
+- **Tasks**: Count completed vs total tasks (parse `- [x]` vs `- [ ]`)
+- **Approvals**: Check approval status in spec.json
+
+### Step 3: Generate Report
+
+Create report in the language specified in spec.json covering:
+1. **Current Phase & Progress**: Where the spec is in the workflow
+2. **Completion Status**: Percentage complete for each phase
+3. **Task Breakdown**: If tasks exist, show completed/remaining counts
+4. **Next Actions**: What needs to be done next
+5. **Blockers**: Any issues preventing progress
+
+## Critical Constraints
+- Use language from spec.json
+- Calculate accurate completion percentages
+- Identify specific next action commands
+</instructions>
+
+## Tool Guidance
+- **Read**: Load spec.json first, then other spec files as needed
+- **Parse carefully**: Extract completion data from tasks.md checkboxes
+- Use **Glob** to check which spec files exist
+
+## Output Description
+
+Provide status report in the language specified in spec.json:
+
+**Report Structure**:
+1. **Feature Overview**: Name, phase, last updated
+2. **Phase Status**: Requirements, Design, Tasks with completion %
+3. **Task Progress**: If tasks exist, show X/Y completed
+4. **Next Action**: Specific command to run next
+5. **Issues**: Any blockers or missing elements
+
+**Format**: Clear, scannable format with emojis (✅/⏳/❌) for status
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Spec Not Found**:
+- **Message**: "No spec found for `$1`. Check available specs in `.kiro/specs/`"
+- **Action**: List available spec directories
+
+**Incomplete Spec**:
+- **Warning**: Identify which files are missing
+- **Suggested Action**: Point to next phase command
+
+### List All Specs
+
+To see all available specs:
+- Run with no argument or use wildcard
+- Shows all specs in `.kiro/specs/` with their status
+
+think

+ 138 - 0
.claude/commands/kiro/spec-tasks.md

@@ -0,0 +1,138 @@
+---
+description: Generate implementation tasks for a specification
+allowed-tools: Read, Write, Edit, MultiEdit, Glob, Grep
+argument-hint: <feature-name> [-y] [--sequential]
+---
+
+# Implementation Tasks Generator
+
+<background_information>
+- **Mission**: Generate detailed, actionable implementation tasks that translate technical design into executable work items
+- **Success Criteria**:
+  - All requirements mapped to specific tasks
+  - Tasks properly sized (1-3 hours each)
+  - Clear task progression with proper hierarchy
+  - Natural language descriptions focused on capabilities
+</background_information>
+
+<instructions>
+## Core Task
+Generate implementation tasks for feature **$1** based on approved requirements and design.
+
+## Execution Steps
+
+### Step 1: Load Context
+
+**Read all necessary context**:
+- `.kiro/specs/$1/spec.json`, `requirements.md`, `design.md`
+- `.kiro/specs/$1/tasks.md` (if exists, for merge mode)
+- **Entire `.kiro/steering/` directory** for complete project memory
+
+**Validate approvals**:
+- If `-y` flag provided ($2 == "-y"): Auto-approve requirements and design in spec.json
+- Otherwise: Verify both approved (stop if not, see Safety & Fallback)
+- Determine sequential mode based on presence of `--sequential`
+
+### Step 2: Generate Implementation Tasks
+
+**Load generation rules and template**:
+- Read `.kiro/settings/rules/tasks-generation.md` for principles
+- If `sequential` is **false**: Read `.kiro/settings/rules/tasks-parallel-analysis.md` for parallel judgement criteria
+- Read `.kiro/settings/templates/specs/tasks.md` for format (supports `(P)` markers)
+
+**Generate task list following all rules**:
+- Use language specified in spec.json
+- Map all requirements to tasks
+- When documenting requirement coverage, list numeric requirement IDs only (comma-separated) without descriptive suffixes, parentheses, translations, or free-form labels
+- Ensure all design components included
+- Verify task progression is logical and incremental
+- Collapse single-subtask structures by promoting them to major tasks and avoid duplicating details on container-only major tasks (use template patterns accordingly)
+- Apply `(P)` markers to tasks that satisfy parallel criteria (omit markers in sequential mode)
+- Mark optional test coverage subtasks with `- [ ]*` only when they strictly cover acceptance criteria already satisfied by core implementation and can be deferred post-MVP
+- If existing tasks.md found, merge with new content
+
+### Step 3: Finalize
+
+**Write and update**:
+- Create/update `.kiro/specs/$1/tasks.md`
+- Update spec.json metadata:
+  - Set `phase: "tasks-generated"`
+  - Set `approvals.tasks.generated: true, approved: false`
+  - Set `approvals.requirements.approved: true`
+  - Set `approvals.design.approved: true`
+  - Update `updated_at` timestamp
+
+## Critical Constraints
+- **Follow rules strictly**: All principles in tasks-generation.md are mandatory
+- **Natural Language**: Describe what to do, not code structure details
+- **Complete Coverage**: ALL requirements must map to tasks
+- **Maximum 2 Levels**: Major tasks and sub-tasks only (no deeper nesting)
+- **Sequential Numbering**: Major tasks increment (1, 2, 3...), never repeat
+- **Task Integration**: Every task must connect to the system (no orphaned work)
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context, rules, and templates before generation
+- **Write last**: Generate tasks.md only after complete analysis and verification
+
+## Output Description
+
+Provide brief summary in the language specified in spec.json:
+
+1. **Status**: Confirm tasks generated at `.kiro/specs/$1/tasks.md`
+2. **Task Summary**: 
+   - Total: X major tasks, Y sub-tasks
+   - All Z requirements covered
+   - Average task size: 1-3 hours per sub-task
+3. **Quality Validation**:
+   - ✅ All requirements mapped to tasks
+   - ✅ Task dependencies verified
+   - ✅ Testing tasks included
+4. **Next Action**: Review tasks and proceed when ready
+
+**Format**: Concise (under 200 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+
+**Requirements or Design Not Approved**:
+- **Stop Execution**: Cannot proceed without approved requirements and design
+- **User Message**: "Requirements and design must be approved before task generation"
+- **Suggested Action**: "Run `/kiro:spec-tasks $1 -y` to auto-approve both and proceed"
+
+**Missing Requirements or Design**:
+- **Stop Execution**: Both documents must exist
+- **User Message**: "Missing requirements.md or design.md at `.kiro/specs/$1/`"
+- **Suggested Action**: "Complete requirements and design phases first"
+
+**Incomplete Requirements Coverage**:
+- **Warning**: "Not all requirements mapped to tasks. Review coverage."
+- **User Action Required**: Confirm intentional gaps or regenerate tasks
+
+**Template/Rules Missing**:
+- **User Message**: "Template or rules files missing in `.kiro/settings/`"
+- **Fallback**: Use inline basic structure with warning
+- **Suggested Action**: "Check repository setup or restore template files"
+- **Missing Numeric Requirement IDs**:
+  - **Stop Execution**: All requirements in requirements.md MUST have numeric IDs. If any requirement lacks a numeric ID, stop and request that requirements.md be fixed before generating tasks.
+
+### Next Phase: Implementation
+
+**Before Starting Implementation**:
+- **IMPORTANT**: Clear conversation history and free up context before running `/kiro:spec-impl`
+- This applies when starting first task OR switching between tasks
+- Fresh context ensures clean state and proper task focus
+
+**If Tasks Approved**:
+- Execute specific task: `/kiro:spec-impl $1 1.1` (recommended: clear context between each task)
+- Execute multiple tasks: `/kiro:spec-impl $1 1.1,1.2` (use cautiously, clear context between tasks)
+- Without arguments: `/kiro:spec-impl $1` (executes all pending tasks - NOT recommended due to context bloat)
+
+**If Modifications Needed**:
+- Provide feedback and re-run `/kiro:spec-tasks $1`
+- Existing tasks used as reference (merge mode)
+
+**Note**: The implementation phase will guide you through executing tasks with appropriate context and validation.
+
+think

+ 127 - 0
.claude/commands/kiro/steering-custom.md

@@ -0,0 +1,127 @@
+---
+description: Create custom steering documents for specialized project contexts
+allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
+---
+
+# Kiro Custom Steering Creation
+
+<background_information>
+**Role**: Create specialized steering documents beyond core files (product, tech, structure).
+
+**Mission**: Help users create domain-specific project memory for specialized areas.
+
+**Success Criteria**:
+- Custom steering captures specialized patterns
+- Follows same granularity principles as core steering
+- Provides clear value for specific domain
+</background_information>
+
+<instructions>
+## Workflow
+
+1. **Ask user** for custom steering needs:
+   - Domain/topic (e.g., "API standards", "testing approach")
+   - Specific requirements or patterns to document
+
+2. **Check if template exists**:
+   - Load from `.kiro/settings/templates/steering-custom/{name}.md` if available
+   - Use as starting point, customize based on project
+
+3. **Analyze codebase** (JIT) for relevant patterns:
+   - **Glob** for related files
+   - **Read** for existing implementations
+   - **Grep** for specific patterns
+
+4. **Generate custom steering**:
+   - Follow template structure if available
+   - Apply principles from `.kiro/settings/rules/steering-principles.md`
+   - Focus on patterns, not exhaustive lists
+   - Keep to 100-200 lines (2-3 minute read)
+
+5. **Create file** in `.kiro/steering/{name}.md`
+
+## Available Templates
+
+Templates available in `.kiro/settings/templates/steering-custom/`:
+
+1. **api-standards.md** - REST/GraphQL conventions, error handling
+2. **testing.md** - Test organization, mocking, coverage
+3. **security.md** - Auth patterns, input validation, secrets
+4. **database.md** - Schema design, migrations, query patterns
+5. **error-handling.md** - Error types, logging, retry strategies
+6. **authentication.md** - Auth flows, permissions, session management
+7. **deployment.md** - CI/CD, environments, rollback procedures
+
+Load template when needed, customize for project.
+
+## Steering Principles
+
+From `.kiro/settings/rules/steering-principles.md`:
+
+- **Patterns over lists**: Document patterns, not every file/component
+- **Single domain**: One topic per file
+- **Concrete examples**: Show patterns with code
+- **Maintainable size**: 100-200 lines typical
+- **Security first**: Never include secrets or sensitive data
+
+</instructions>
+
+## Tool guidance
+
+- **Read**: Load template, analyze existing code
+- **Glob**: Find related files for pattern analysis
+- **Grep**: Search for specific patterns
+- **LS**: Understand relevant structure
+
+**JIT Strategy**: Load template only when creating that type of steering.
+
+## Output description
+
+Chat summary with file location (file created directly).
+
+```
+✅ Custom Steering Created
+
+## Created:
+- .kiro/steering/api-standards.md
+
+## Based On:
+- Template: api-standards.md
+- Analyzed: src/api/ directory patterns
+- Extracted: REST conventions, error format
+
+## Content:
+- Endpoint naming patterns
+- Request/response format
+- Error handling conventions
+- Authentication approach
+
+Review and customize as needed.
+```
+
+## Examples
+
+### Success: API Standards
+**Input**: "Create API standards steering"  
+**Action**: Load template, analyze src/api/, extract patterns  
+**Output**: api-standards.md with project-specific REST conventions
+
+### Success: Testing Strategy
+**Input**: "Document our testing approach"  
+**Action**: Load template, analyze test files, extract patterns  
+**Output**: testing.md with test organization and mocking strategies
+
+## Safety & Fallback
+
+- **No template**: Generate from scratch based on domain knowledge
+- **Security**: Never include secrets (load principles)
+- **Validation**: Ensure doesn't duplicate core steering content
+
+## Notes
+
+- Templates are starting points, customize for project
+- Follow same granularity principles as core steering
+- All steering files loaded as project memory
+- Custom files equally important as core files
+- Avoid documenting agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
+- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories

+ 143 - 0
.claude/commands/kiro/steering.md

@@ -0,0 +1,143 @@
+---
+description: Manage .kiro/steering/ as persistent project knowledge
+allowed-tools: Bash, Read, Write, Edit, MultiEdit, Glob, Grep, LS
+---
+
+# Kiro Steering Management
+
+<background_information>
+**Role**: Maintain `.kiro/steering/` as persistent project memory.
+
+**Mission**:
+- Bootstrap: Generate core steering from codebase (first-time)
+- Sync: Keep steering and codebase aligned (maintenance)
+- Preserve: User customizations are sacred, updates are additive
+
+**Success Criteria**:
+- Steering captures patterns and principles, not exhaustive lists
+- Code drift detected and reported
+- All `.kiro/steering/*.md` treated equally (core + custom)
+</background_information>
+
+<instructions>
+## Scenario Detection
+
+Check `.kiro/steering/` status:
+
+**Bootstrap Mode**: Empty OR missing core files (product.md, tech.md, structure.md)  
+**Sync Mode**: All core files exist
+
+---
+
+## Bootstrap Flow
+
+1. Load templates from `.kiro/settings/templates/steering/`
+2. Analyze codebase (JIT):
+   - `glob_file_search` for source files
+   - `read_file` for README, package.json, etc.
+   - `grep` for patterns
+3. Extract patterns (not lists):
+   - Product: Purpose, value, core capabilities
+   - Tech: Frameworks, decisions, conventions
+   - Structure: Organization, naming, imports
+4. Generate steering files (follow templates)
+5. Load principles from `.kiro/settings/rules/steering-principles.md`
+6. Present summary for review
+
+**Focus**: Patterns that guide decisions, not catalogs of files/dependencies.
+
+---
+
+## Sync Flow
+
+1. Load all existing steering (`.kiro/steering/*.md`)
+2. Analyze codebase for changes (JIT)
+3. Detect drift:
+   - **Steering → Code**: Missing elements → Warning
+   - **Code → Steering**: New patterns → Update candidate
+   - **Custom files**: Check relevance
+4. Propose updates (additive, preserve user content)
+5. Report: Updates, warnings, recommendations
+
+**Update Philosophy**: Add, don't replace. Preserve user sections.
+
+---
+
+## Granularity Principle
+
+From `.kiro/settings/rules/steering-principles.md`:
+
+> "If new code follows existing patterns, steering shouldn't need updating."
+
+Document patterns and principles, not exhaustive lists.
+
+**Bad**: List every file in directory tree  
+**Good**: Describe organization pattern with examples
+
+</instructions>
+
+## Tool guidance
+
+- `glob_file_search`: Find source/config files
+- `read_file`: Read steering, docs, configs
+- `grep`: Search patterns
+- `list_dir`: Analyze structure
+
+**JIT Strategy**: Fetch when needed, not upfront.
+
+## Output description
+
+Chat summary only (files updated directly).
+
+### Bootstrap:
+```
+✅ Steering Created
+
+## Generated:
+- product.md: [Brief description]
+- tech.md: [Key stack]
+- structure.md: [Organization]
+
+Review and approve as Source of Truth.
+```
+
+### Sync:
+```
+✅ Steering Updated
+
+## Changes:
+- tech.md: React 18 → 19
+- structure.md: Added API pattern
+
+## Code Drift:
+- Components not following import conventions
+
+## Recommendations:
+- Consider api-standards.md
+```
+
+## Examples
+
+### Bootstrap
+**Input**: Empty steering, React TypeScript project  
+**Output**: 3 files with patterns - "Feature-first", "TypeScript strict", "React 19"
+
+### Sync
+**Input**: Existing steering, new `/api` directory  
+**Output**: Updated structure.md, flagged non-compliant files, suggested api-standards.md
+
+## Safety & Fallback
+
+- **Security**: Never include keys, passwords, secrets (see principles)
+- **Uncertainty**: Report both states, ask user
+- **Preservation**: Add rather than replace when in doubt
+
+## Notes
+
+- All `.kiro/steering/*.md` loaded as project memory
+- Templates and principles are external for customization
+- Focus on patterns, not catalogs
+- "Golden Rule": New code following patterns shouldn't require steering updates
+- Avoid documenting agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
+- `.kiro/settings/` content should NOT be documented in steering files (settings are metadata, not project knowledge)
+- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories

+ 92 - 0
.claude/commands/kiro/validate-design.md

@@ -0,0 +1,92 @@
+---
+description: Interactive technical design quality review and validation
+allowed-tools: Read, Glob, Grep
+argument-hint: <feature-name>
+---
+
+# Technical Design Validation
+
+<background_information>
+- **Mission**: Conduct interactive quality review of technical design to ensure readiness for implementation
+- **Success Criteria**:
+  - Critical issues identified (maximum 3 most important concerns)
+  - Balanced assessment with strengths recognized
+  - Clear GO/NO-GO decision with rationale
+  - Actionable feedback for improvements if needed
+</background_information>
+
+<instructions>
+## Core Task
+Interactive design quality review for feature **$1** based on approved requirements and design document.
+
+## Execution Steps
+
+1. **Load Context**:
+   - Read `.kiro/specs/$1/spec.json` for language and metadata
+   - Read `.kiro/specs/$1/requirements.md` for requirements
+   - Read `.kiro/specs/$1/design.md` for design document
+   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
+     - Default files: `structure.md`, `tech.md`, `product.md`
+     - All custom steering files (regardless of mode settings)
+     - This provides complete project memory and context
+
+2. **Read Review Guidelines**:
+   - Read `.kiro/settings/rules/design-review.md` for review criteria and process
+
+3. **Execute Design Review**:
+   - Follow design-review.md process: Analysis → Critical Issues → Strengths → GO/NO-GO
+   - Limit to 3 most important concerns
+   - Engage interactively with user
+   - Use language specified in spec.json for output
+
+4. **Provide Decision and Next Steps**:
+   - Clear GO/NO-GO decision with rationale
+   - Guide user on proceeding based on decision
+
+## Important Constraints
+- **Quality assurance, not perfection seeking**: Accept acceptable risk
+- **Critical focus only**: Maximum 3 issues, only those significantly impacting success
+- **Interactive approach**: Engage in dialogue, not one-way evaluation
+- **Balanced assessment**: Recognize both strengths and weaknesses
+- **Actionable feedback**: All suggestions must be implementable
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context (spec, steering, rules) before review
+- **Grep if needed**: Search codebase for pattern validation or integration checks
+- **Interactive**: Engage with user throughout the review process
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Review Summary**: Brief overview (2-3 sentences) of design quality and readiness
+2. **Critical Issues**: Maximum 3, following design-review.md format
+3. **Design Strengths**: 1-2 positive aspects
+4. **Final Assessment**: GO/NO-GO decision with rationale and next steps
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Follow design-review.md output format
+- Keep summary concise
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Design**: If design.md doesn't exist, stop with message: "Run `/kiro:spec-design $1` first to generate design document"
+- **Design Not Generated**: If design phase not marked as generated in spec.json, warn but proceed with review
+- **Empty Steering Directory**: Warn user that project context is missing and may affect review quality
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+
+### Next Phase: Task Generation
+
+**If Design Passes Validation (GO Decision)**:
+- Review feedback and apply changes if needed
+- Run `/kiro:spec-tasks $1` to generate implementation tasks
+- Or `/kiro:spec-tasks $1 -y` to auto-approve and proceed directly
+
+**If Design Needs Revision (NO-GO Decision)**:
+- Address critical issues identified
+- Re-run `/kiro:spec-design $1` with improvements
+- Re-validate with `/kiro:validate-design $1`
+
+**Note**: Design validation is recommended but optional. Quality review helps catch issues early.

+ 88 - 0
.claude/commands/kiro/validate-gap.md

@@ -0,0 +1,88 @@
+---
+description: Analyze implementation gap between requirements and existing codebase
+allowed-tools: Bash, Glob, Grep, Read, Write, Edit, MultiEdit, WebSearch, WebFetch
+argument-hint: <feature-name>
+---
+
+# Implementation Gap Validation
+
+<background_information>
+- **Mission**: Analyze the gap between requirements and existing codebase to inform implementation strategy
+- **Success Criteria**:
+  - Comprehensive understanding of existing codebase patterns and components
+  - Clear identification of missing capabilities and integration challenges
+  - Multiple viable implementation approaches evaluated
+  - Technical research needs identified for design phase
+</background_information>
+
+<instructions>
+## Core Task
+Analyze implementation gap for feature **$1** based on approved requirements and existing codebase.
+
+## Execution Steps
+
+1. **Load Context**:
+   - Read `.kiro/specs/$1/spec.json` for language and metadata
+   - Read `.kiro/specs/$1/requirements.md` for requirements
+   - **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
+     - Default files: `structure.md`, `tech.md`, `product.md`
+     - All custom steering files (regardless of mode settings)
+     - This provides complete project memory and context
+
+2. **Read Analysis Guidelines**:
+   - Read `.kiro/settings/rules/gap-analysis.md` for comprehensive analysis framework
+
+3. **Execute Gap Analysis**:
+   - Follow gap-analysis.md framework for thorough investigation
+   - Analyze existing codebase using Grep and Read tools
+   - Use WebSearch/WebFetch for external dependency research if needed
+   - Evaluate multiple implementation approaches (extend/new/hybrid)
+   - Use language specified in spec.json for output
+
+4. **Generate Analysis Document**:
+   - Create comprehensive gap analysis following the output guidelines in gap-analysis.md
+   - Present multiple viable options with trade-offs
+   - Flag areas requiring further research
+
+## Important Constraints
+- **Information over Decisions**: Provide analysis and options, not final implementation choices
+- **Multiple Options**: Present viable alternatives when applicable
+- **Thorough Investigation**: Use tools to deeply understand existing codebase
+- **Explicit Gaps**: Clearly flag areas needing research or investigation
+</instructions>
+
+## Tool Guidance
+- **Read first**: Load all context (spec, steering, rules) before analysis
+- **Grep extensively**: Search codebase for patterns, conventions, and integration points
+- **WebSearch/WebFetch**: Research external dependencies and best practices when needed
+- **Write last**: Generate analysis only after complete investigation
+
+## Output Description
+Provide output in the language specified in spec.json with:
+
+1. **Analysis Summary**: Brief overview (3-5 bullets) of scope, challenges, and recommendations
+2. **Document Status**: Confirm analysis approach used
+3. **Next Steps**: Guide user on proceeding to design phase
+
+**Format Requirements**:
+- Use Markdown headings for clarity
+- Keep summary concise (under 300 words)
+- Detailed analysis follows gap-analysis.md output guidelines
+
+## Safety & Fallback
+
+### Error Scenarios
+- **Missing Requirements**: If requirements.md doesn't exist, stop with message: "Run `/kiro:spec-requirements $1` first to generate requirements"
+- **Requirements Not Approved**: If requirements not approved, warn user but proceed (gap analysis can inform requirement revisions)
+- **Empty Steering Directory**: Warn user that project context is missing and may affect analysis quality
+- **Complex Integration Unclear**: Flag for comprehensive research in design phase rather than blocking
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+
+### Next Phase: Design Generation
+
+**If Gap Analysis Complete**:
+- Review gap analysis insights
+- Run `/kiro:spec-design $1` to create technical design document
+- Or `/kiro:spec-design $1 -y` to auto-approve requirements and proceed directly
+
+**Note**: Gap analysis is optional but recommended for brownfield projects to inform design decisions.

+ 138 - 0
.claude/commands/kiro/validate-impl.md

@@ -0,0 +1,138 @@
+---
+description: Validate implementation against requirements, design, and tasks
+allowed-tools: Bash, Glob, Grep, Read, LS
+argument-hint: [feature-name] [task-numbers]
+---
+
+# Implementation Validation
+
+<background_information>
+- **Mission**: Verify that implementation aligns with approved requirements, design, and tasks
+- **Success Criteria**:
+  - All specified tasks marked as completed
+  - Tests exist and pass for implemented functionality
+  - Requirements traceability confirmed (EARS requirements covered)
+  - Design structure reflected in implementation
+  - No regressions in existing functionality
+</background_information>
+
+<instructions>
+## Core Task
+Validate implementation for feature(s) and task(s) based on approved specifications.
+
+## Execution Steps
+
+### 1. Detect Validation Target
+
+**If no arguments provided** (`$1` empty):
+- Parse conversation history for `/kiro:spec-impl <feature> [tasks]` commands
+- Extract feature names and task numbers from each execution
+- Aggregate all implemented tasks by feature
+- Report detected implementations (e.g., "user-auth: 1.1, 1.2, 1.3")
+- If no history found, scan `.kiro/specs/` for features with completed tasks `[x]`
+
+**If feature provided** (`$1` present, `$2` empty):
+- Use specified feature
+- Detect all completed tasks `[x]` in `.kiro/specs/$1/tasks.md`
+
+**If both feature and tasks provided** (`$1` and `$2` present):
+- Validate specified feature and tasks only (e.g., `user-auth 1.1,1.2`)
+
+### 2. Load Context
+
+For each detected feature:
+- Read `.kiro/specs/<feature>/spec.json` for metadata
+- Read `.kiro/specs/<feature>/requirements.md` for requirements
+- Read `.kiro/specs/<feature>/design.md` for design structure
+- Read `.kiro/specs/<feature>/tasks.md` for task list
+- **Load ALL steering context**: Read entire `.kiro/steering/` directory including:
+  - Default files: `structure.md`, `tech.md`, `product.md`
+  - All custom steering files (regardless of mode settings)
+
+### 3. Execute Validation
+
+For each task, verify:
+
+#### Task Completion Check
+- Checkbox is `[x]` in tasks.md
+- If not completed, flag as "Task not marked complete"
+
+#### Test Coverage Check
+- Tests exist for task-related functionality
+- Tests pass (no failures or errors)
+- Use Bash to run test commands (e.g., `npm test`, `pytest`)
+- If tests fail or don't exist, flag as "Test coverage issue"
+
+#### Requirements Traceability
+- Identify EARS requirements related to the task
+- Use Grep to search implementation for evidence of requirement coverage
+- If requirement not traceable to code, flag as "Requirement not implemented"
+
+#### Design Alignment
+- Check if design.md structure is reflected in implementation
+- Verify key interfaces, components, and modules exist
+- Use Grep/LS to confirm file structure matches design
+- If misalignment found, flag as "Design deviation"
+
+#### Regression Check
+- Run full test suite (if available)
+- Verify no existing tests are broken
+- If regressions detected, flag as "Regression detected"
+
+### 4. Generate Report
+
+Provide summary in the language specified in spec.json:
+- Validation summary by feature
+- Coverage report (tasks, requirements, design)
+- Issues and deviations with severity (Critical/Warning)
+- GO/NO-GO decision
+
+## Important Constraints
+- **Conversation-aware**: Prioritize conversation history for auto-detection
+- **Non-blocking warnings**: Design deviations are warnings unless critical
+- **Test-first focus**: Test coverage is mandatory for GO decision
+- **Traceability required**: All requirements must be traceable to implementation
+</instructions>
+
+## Tool Guidance
+- **Conversation parsing**: Extract `/kiro:spec-impl` patterns from history
+- **Read context**: Load all specs and steering before validation
+- **Bash for tests**: Execute test commands to verify pass status
+- **Grep for traceability**: Search codebase for requirement evidence
+- **LS/Glob for structure**: Verify file structure matches design
+
+## Output Description
+
+Provide output in the language specified in spec.json with:
+
+1. **Detected Target**: Features and tasks being validated (if auto-detected)
+2. **Validation Summary**: Brief overview per feature (pass/fail counts)
+3. **Issues**: List of validation failures with severity and location
+4. **Coverage Report**: Requirements/design/task coverage percentages
+5. **Decision**: GO (ready for next phase) / NO-GO (needs fixes)
+
+**Format Requirements**:
+- Use Markdown headings and tables for clarity
+- Flag critical issues with ⚠️ or 🔴
+- Keep summary concise (under 400 words)
+
+## Safety & Fallback
+
+### Error Scenarios
+- **No Implementation Found**: If no `/kiro:spec-impl` in history and no `[x]` tasks, report "No implementations detected"
+- **Test Command Unknown**: If test framework unclear, warn and skip test validation (manual verification required)
+- **Missing Spec Files**: If spec.json/requirements.md/design.md missing, stop with error
+- **Language Undefined**: Default to English (`en`) if spec.json doesn't specify language
+
+### Next Steps Guidance
+
+**If GO Decision**:
+- Implementation validated and ready
+- Proceed to deployment or next feature
+
+**If NO-GO Decision**:
+- Address critical issues listed
+- Re-run `/kiro:spec-impl <feature> [tasks]` for fixes
+- Re-validate with `/kiro:validate-impl [feature] [tasks]`
+
+**Note**: Validation is recommended after implementation to ensure spec alignment and quality.

+ 116 - 0
.claude/commands/learn.md

@@ -0,0 +1,116 @@
+---
+name: learn
+description: /learn - Pattern Extraction for GROWI
+---
+
+# /learn - Pattern Extraction for GROWI
+
+Extract reusable problem-solving patterns from development sessions and save them as auto-invoked Skills.
+
+## Core Purpose
+
+Capture "non-trivial problems" solved during GROWI development, converting them into reusable skills that will be automatically applied in future sessions.
+
+## Pattern Categories to Extract
+
+Focus on four key areas:
+
+1. **Error Resolution** — Document what went wrong, root causes, and fixes applicable to similar issues (e.g., Mongoose query pitfalls, Next.js hydration errors, TypeScript strict mode issues)
+
+2. **Debugging Techniques** — Capture non-obvious diagnostic steps and tool combinations (e.g., MongoDB query profiling, React DevTools with Jotai, Vitest debugging patterns)
+
+3. **Workarounds** — Record library quirks, API limitations, and version-specific solutions (e.g., @headless-tree edge cases, Socket.io reconnection handling, SWR cache invalidation)
+
+4. **GROWI Patterns** — Note codebase conventions, architecture decisions, and integration approaches (e.g., feature-based structure, Jotai + Socket.io sync, API v3 design patterns)
+
+## Skill File Structure
+
+Extracted patterns are saved in **appropriate skill directories** based on the scope of the pattern:
+
+**Workspace-specific patterns**:
+- `apps/{workspace}/.claude/skills/learned/{topic-name}/SKILL.md`
+- `packages/{package}/.claude/skills/learned/{topic-name}/SKILL.md`
+- Examples: patterns specific to a single app or package
+
+**Global patterns** (monorepo-wide):
+- `.claude/skills/learned/{topic-name}/SKILL.md`
+- Examples: patterns applicable across all workspaces
+
+### File Template
+
+```yaml
+---
+name: descriptive-name
+description: Brief description (auto-invoked when working on related code)
+---
+
+## Problem
+[What was the issue]
+
+## Solution
+[How it was solved]
+
+## Example
+[Code snippet or scenario]
+
+## When to Apply
+[Specific conditions where this pattern is useful]
+```
+
+## GROWI-Specific Examples
+
+Topics commonly learned in GROWI development:
+
+**Apps/app-specific** (`apps/app/.claude/skills/learned/`):
+- `page-save-origin-semantics` — Origin-based conflict detection for collaborative editing
+- `socket-jotai-integration` — Real-time state synchronization patterns
+- `api-v3-error-handling` — RESTful API error response patterns
+- `mongodb-query-optimization` — Mongoose indexing and aggregation patterns
+
+**Global monorepo patterns** (`.claude/skills/learned/`):
+- `virtualized-tree-patterns` — @headless-tree + @tanstack/react-virtual optimizations (if used across apps)
+- `jotai-atom-composition` — Derived atoms and state composition (if shared pattern)
+- `turborepo-cache-invalidation` — Build cache debugging techniques
+- `pnpm-workspace-dependencies` — Workspace dependency resolution issues
+
+## Quality Guidelines
+
+**Extract:**
+- Patterns that will save time in future sessions
+- Non-obvious solutions worth remembering
+- Integration techniques between GROWI's tech stack
+- Performance optimizations with measurable impact
+
+**Avoid:**
+- Trivial fixes (typos, syntax errors)
+- One-time issues (service outages, environment-specific problems)
+- Information already documented in existing Skills
+- Feature-specific details (these stay in code comments)
+
+## Workflow
+
+1. User triggers `/learn` after solving a complex problem
+2. Review the session to identify valuable patterns
+3. Draft skill file(s) with clear structure
+4. **Autonomously determine the appropriate directory**:
+   - Analyze the pattern's scope (which files/modules were involved)
+   - If pattern is specific to a workspace in `apps/*` or `packages/*`:
+     - Save to `{workspace}/.claude/skills/learned/{topic-name}/SKILL.md`
+   - If pattern is applicable across multiple workspaces:
+     - Save to `.claude/skills/learned/{topic-name}/SKILL.md`
+5. Skills automatically apply in future sessions when working on related code
+
+### Directory Selection Logic
+
+**Workspace-specific** (save to `{workspace}/.claude/skills/learned/`):
+- Pattern involves workspace-specific concepts, models, or APIs
+- References files primarily in one `apps/*` or `packages/*` directory
+- Example: Page save logic in `apps/app` → `apps/app/.claude/skills/learned/`
+
+**Global** (save to `.claude/skills/learned/`):
+- Pattern applies across multiple workspaces
+- Involves monorepo-wide tools (Turborepo, pnpm, Biome, Vitest)
+- Shared coding patterns or architectural principles
+- Example: Turborepo caching pitfall → `.claude/skills/learned/`
+
+Learned skills are automatically invoked based on their description when working on related code.

+ 287 - 0
.claude/commands/tdd.md

@@ -0,0 +1,287 @@
+---
+name: tdd
+description: Enforce test-driven development workflow. Scaffold interfaces, generate tests FIRST, then implement minimal code to pass. Ensure 80%+ coverage.
+---
+
+# TDD Command
+
+This command enforces test-driven development methodology for GROWI.
+
+---
+
+## ⚠️ CRITICAL: MANDATORY TDD PROTOCOL
+
+**THIS IS NOT OPTIONAL. THIS IS A TEAM REQUIREMENT.**
+
+### Non-Negotiable Rules
+
+1. **ALL phases of TDD MUST be followed: RED → GREEN → REFACTOR**
+2. **Tests MUST be written BEFORE any implementation code (RED phase)**
+3. **Implementation MUST be minimal and only to pass tests (GREEN phase)**
+4. **Tests MUST remain passing during refactoring (REFACTOR phase)**
+5. **NEVER skip any phase of the TDD cycle**
+6. **If you believe you cannot follow TDD, STOP and ask the user IMMEDIATELY**
+
+### Violation Protocol
+
+If you find yourself about to deviate from the TDD cycle:
+
+1. **STOP IMMEDIATELY** - Do not proceed with any phase out of order
+2. **ASK THE USER** - Explain why you think following TDD is difficult
+3. **WAIT FOR APPROVAL** - Do not proceed without explicit user consent
+4. **DOCUMENT THE EXCEPTION** - If approved, document why TDD was skipped
+
+**Example of required user confirmation:**
+
+```
+"This task requires writing integration tests with complex setup
+(database mocks, authentication, etc.).
+
+I have two options:
+
+Option A: Follow strict TDD (recommended)
+- RED: Write failing integration test first
+- GREEN: Implement minimal code to pass
+- REFACTOR: Improve code while keeping tests green
+- May take 15-30 minutes for test setup
+- Ensures proper test coverage
+
+Option B: Skip TDD for this specific change
+- Implement directly
+- Verify with existing test suite only
+- Risk: No specific test for this change
+
+Which approach would you prefer?"
+```
+
+**Never assume. Always ask.**
+
+---
+
+## Pre-Implementation Checklist
+
+Before writing ANY code, confirm:
+
+- [ ] I have read and understood the task requirements
+- [ ] I am starting with Step 1: Write a FAILING test (RED phase)
+- [ ] I have NOT written any implementation code yet
+- [ ] I will follow all phases: RED → GREEN → REFACTOR
+- [ ] If I cannot follow the complete TDD cycle, I have ASKED THE USER for guidance
+
+**If any checkbox is unchecked, STOP and ask the user.**
+
+---
+
+## What This Command Does
+
+1. **Scaffold Interfaces** - Define types/interfaces first
+2. **Generate Tests First** - Write failing tests (RED)
+3. **Implement Minimal Code** - Write just enough to pass (GREEN)
+4. **Refactor** - Improve code while keeping tests green (REFACTOR)
+5. **Verify Coverage** - Ensure 80%+ test coverage
+
+## When to Use
+
+Use `/tdd` when:
+- Implementing new features
+- Adding new functions/components
+- Fixing bugs (write test that reproduces bug first)
+- Refactoring existing code
+- Building critical business logic
+
+## TDD Cycle
+
+```
+RED → GREEN → REFACTOR → REPEAT
+
+RED:      Write a failing test
+GREEN:    Write minimal code to pass
+REFACTOR: Improve code, keep tests passing
+REPEAT:   Next feature/scenario
+```
+
+## Example Usage
+
+```
+User: /tdd I need a function to validate page paths
+
+Agent:
+```
+
+# TDD Session: Page Path Validator
+
+## Step 1: Define Interface (SCAFFOLD)
+
+```typescript
+// src/utils/page-path-validator.ts
+export interface ValidationResult {
+  isValid: boolean;
+  errorMessage?: string;
+}
+
+export function validatePagePath(path: string): ValidationResult {
+  throw new Error('Not implemented');
+}
+```
+
+## Step 2: Write Failing Test (RED)
+
+**⚠️ CHECKPOINT: Confirm you have NOT written any implementation code yet.**
+
+```typescript
+// src/utils/page-path-validator.spec.ts
+describe('validatePagePath', () => {
+  it('should return valid for correct path', () => {
+    const result = validatePagePath('/user/docs/page');
+    expect(result.isValid).toBe(true);
+  });
+
+  it('should reject path without leading slash', () => {
+    const result = validatePagePath('user/docs');
+    expect(result.isValid).toBe(false);
+    expect(result.errorMessage).toBeDefined();
+  });
+
+  it('should reject empty path', () => {
+    const result = validatePagePath('');
+    expect(result.isValid).toBe(false);
+  });
+});
+```
+
+## Step 3: Run Tests - Verify FAIL
+
+**⚠️ MANDATORY: Tests MUST fail before proceeding to implementation.**
+
+```bash
+turbo run test --filter @growi/app -- src/utils/page-path-validator.spec.ts
+
+FAIL src/utils/page-path-validator.spec.ts
+  ✕ should return valid for correct path
+    Error: Not implemented
+```
+
+**✅ CHECKPOINT PASSED: Tests fail as expected. Ready to implement.**
+
+**❌ If tests pass or don't run: STOP. Fix the test first.**
+
+## Step 4: Implement Minimal Code (GREEN)
+
+**⚠️ CHECKPOINT: Only write the MINIMUM code needed to pass the tests.**
+
+```typescript
+export function validatePagePath(path: string): ValidationResult {
+  if (!path) {
+    return { isValid: false, errorMessage: 'Path cannot be empty' };
+  }
+  if (!path.startsWith('/')) {
+    return { isValid: false, errorMessage: 'Path must start with /' };
+  }
+  return { isValid: true };
+}
+```
+
+## Step 5: Run Tests - Verify PASS
+
+**⚠️ MANDATORY: ALL tests MUST pass before proceeding to refactoring.**
+
+```bash
+turbo run test --filter @growi/app -- src/utils/page-path-validator.spec.ts
+
+PASS  ✓ All tests passing!
+```
+
+**✅ CHECKPOINT PASSED: Ready to refactor if needed.**
+
+**❌ If tests fail: Fix implementation, do NOT move to refactoring.**
+
+## Step 6: Check Coverage
+
+**⚠️ MANDATORY: Verify test coverage meets requirements (80% minimum).**
+
+```bash
+cd {package_dir} && pnpm vitest run --coverage src/utils/page-path-validator.spec.ts
+
+Coverage: 100% ✅ (Target: 80%)
+```
+
+**✅ TDD CYCLE COMPLETE: All phases completed successfully.**
+
+- ✅ RED: Failing tests written
+- ✅ GREEN: Implementation passes tests
+- ✅ REFACTOR: Code improved (if needed)
+- ✅ COVERAGE: 80%+ achieved
+
+## TDD Best Practices
+
+**DO:**
+- ✅ Write the test FIRST, before any implementation
+- ✅ Run tests and verify they FAIL before implementing
+- ✅ Write minimal code to make tests pass
+- ✅ Refactor only after tests are green
+- ✅ Add edge cases and error scenarios
+- ✅ Aim for 80%+ coverage (100% for critical code)
+- ✅ Use `vitest-mock-extended` for type-safe mocks
+
+**DON'T:**
+- ❌ Write implementation before tests
+- ❌ Skip running tests after each change
+- ❌ Write too much code at once
+- ❌ Ignore failing tests
+- ❌ Test implementation details (test behavior)
+- ❌ Mock everything (prefer integration tests)
+
+## Test Types to Include
+
+**Unit Tests** (`*.spec.ts`):
+- Happy path scenarios
+- Edge cases (empty, null, max values)
+- Error conditions
+- Boundary values
+
+**Integration Tests** (`*.integ.ts`):
+- API endpoints
+- Database operations
+- External service calls
+
+**Component Tests** (`*.spec.tsx`):
+- React components with hooks
+- User interactions
+- Jotai state integration
+
+## Coverage Requirements
+
+- **80% minimum** for all code
+- **100% required** for:
+  - Authentication/authorization logic
+  - Security-critical code
+  - Core business logic (page operations, permissions)
+  - Data validation utilities
+
+## Important Notes
+
+**MANDATORY - NO EXCEPTIONS**: The complete TDD cycle MUST be followed:
+
+1. **RED** - Write failing test FIRST
+2. **GREEN** - Implement minimal code to pass the test
+3. **REFACTOR** - Improve code while keeping tests green
+
+**Absolute Requirements:**
+- ❌ NEVER skip the RED phase
+- ❌ NEVER skip the GREEN phase
+- ❌ NEVER skip the REFACTOR phase
+- ❌ NEVER write implementation code before tests
+- ❌ NEVER proceed without explicit user approval if you cannot follow TDD
+
+**If you violate these rules:**
+1. STOP immediately
+2. Discard any implementation code written before tests
+3. Inform the user of the violation
+4. Start over with RED phase
+
+**This is a team development standard. Violations are not acceptable.**
+
+## Related Skills
+
+This command uses patterns from:
+- **growi-testing-patterns** - Vitest, React Testing Library, vitest-mock-extended

+ 217 - 0
.claude/rules/coding-style.md

@@ -0,0 +1,217 @@
+# Coding Style
+
+General coding standards and best practices. These rules apply to all code in the GROWI monorepo.
+
+## Immutability (CRITICAL)
+
+ALWAYS create new objects, NEVER mutate:
+
+```typescript
+// ❌ WRONG: Mutation
+function updateUser(user, name) {
+  user.name = name  // MUTATION!
+  return user
+}
+
+// ✅ CORRECT: Immutability
+function updateUser(user, name) {
+  return {
+    ...user,
+    name
+  }
+}
+
+// ✅ CORRECT: Array immutable update
+const updatedPages = pages.map(p => p.id === id ? { ...p, title: newTitle } : p);
+
+// ❌ WRONG: Array mutation
+pages[index].title = newTitle;
+```
+
+## File Organization
+
+MANY SMALL FILES > FEW LARGE FILES:
+
+- High cohesion, low coupling
+- 200-400 lines typical, 800 max
+- Functions < 50 lines
+- Extract utilities from large components
+- Organize by feature/domain, not by type
+
+## Naming Conventions
+
+### Variables and Functions
+
+- **camelCase** for variables and functions
+- **PascalCase** for classes, interfaces, types, React components
+- **UPPER_SNAKE_CASE** for constants
+
+```typescript
+const pageId = '123';
+const MAX_PAGE_SIZE = 1000;
+
+function getPageById(id: string) { }
+class PageService { }
+interface PageData { }
+type PageStatus = 'draft' | 'published';
+```
+
+### Files and Directories
+
+- **PascalCase** for React components: `Button.tsx`, `PageTree.tsx`
+- **kebab-case** for utilities: `page-utils.ts`
+- **lowercase** for directories: `features/page-tree/`, `utils/`
+
+## Export Style
+
+**Prefer named exports** over default exports:
+
+```typescript
+// ✅ Good: Named exports
+export const MyComponent = () => { };
+export function myFunction() { }
+export class MyClass { }
+
+// ❌ Avoid: Default exports
+export default MyComponent;
+```
+
+**Why?**
+- Better refactoring (IDEs can reliably rename across files)
+- Better tree shaking
+- Explicit imports improve readability
+- No ambiguity (import name matches export name)
+
+**Exception**: Next.js pages require default exports.
+
+## Type Safety
+
+**Always provide explicit types** for function parameters and return values:
+
+```typescript
+// ✅ Good: Explicit types
+function createPage(path: string, body: string): Promise<Page> {
+  // ...
+}
+
+// ❌ Avoid: Implicit any
+function createPage(path, body) {
+  // ...
+}
+```
+
+Use `import type` for type-only imports:
+
+```typescript
+import type { PageData } from '~/interfaces/page';
+```
+
+## Error Handling
+
+ALWAYS handle errors comprehensively:
+
+```typescript
+try {
+  const result = await riskyOperation();
+  return result;
+} catch (error) {
+  logger.error('Operation failed:', { error, context });
+  throw new Error('Detailed user-friendly message');
+}
+```
+
+## Async/Await
+
+Prefer async/await over Promise chains:
+
+```typescript
+// ✅ Good: async/await
+async function loadPages() {
+  const pages = await fetchPages();
+  const enriched = await enrichPageData(pages);
+  return enriched;
+}
+
+// ❌ Avoid: Promise chains
+function loadPages() {
+  return fetchPages()
+    .then(pages => enrichPageData(pages))
+    .then(enriched => enriched);
+}
+```
+
+## Comments
+
+**Write comments in English** (even for Japanese developers):
+
+```typescript
+// ✅ Good: English comment
+// Calculate the total number of pages in the workspace
+
+// ❌ Avoid: Japanese comment
+// ワークスペース内のページ総数を計算
+```
+
+**When to comment**:
+- Complex algorithms or business logic
+- Non-obvious workarounds
+- Public APIs and interfaces
+
+**When NOT to comment**:
+- Self-explanatory code (good naming is better)
+- Restating what the code does
+
+## Test File Placement
+
+**Co-locate tests with source files** in the same directory:
+
+```
+src/utils/
+├── helper.ts
+└── helper.spec.ts        # Test next to source
+
+src/components/Button/
+├── Button.tsx
+└── Button.spec.tsx       # Test next to component
+```
+
+### Test File Naming
+
+- Unit tests: `*.spec.{ts,js}`
+- Integration tests: `*.integ.ts`
+- Component tests: `*.spec.{tsx,jsx}`
+
+## Git Commit Messages
+
+Follow conventional commit format:
+
+```
+<type>(<scope>): <subject>
+
+<body>
+```
+
+**Types**: `feat`, `fix`, `refactor`, `test`, `docs`, `chore`
+
+**Example**:
+```
+feat(page-tree): add virtualization for large trees
+
+Implemented react-window for virtualizing page tree
+to improve performance with 10k+ pages.
+```
+
+## Code Quality Checklist
+
+Before marking work complete:
+
+- [ ] Code is readable and well-named
+- [ ] Functions are small (<50 lines)
+- [ ] Files are focused (<800 lines)
+- [ ] No deep nesting (>4 levels)
+- [ ] Proper error handling
+- [ ] No console.log statements (use logger)
+- [ ] No mutation (immutable patterns used)
+- [ ] Named exports (except Next.js pages)
+- [ ] English comments
+- [ ] Co-located tests

+ 37 - 0
.claude/rules/performance.md

@@ -0,0 +1,37 @@
+# Performance Optimization
+
+## Model Selection Strategy
+
+**Haiku** - Lightweight tasks:
+- Frequent, simple agent invocations
+- Straightforward code generation
+- Worker agents in multi-agent systems
+
+**Sonnet** - Standard development:
+- Main development work
+- Orchestrating multi-agent workflows
+- Most coding tasks
+
+**Opus** - Complex reasoning:
+- Architectural decisions
+- Difficult debugging
+- Research and analysis
+
+## Context Window Management
+
+Avoid last 20% of context for:
+- Large-scale refactoring
+- Multi-file feature implementation
+- Complex debugging sessions
+
+Lower context sensitivity:
+- Single-file edits
+- Simple bug fixes
+- Documentation updates
+
+## Build Troubleshooting
+
+If build fails:
+1. Use **build-error-resolver** agent
+2. Run `turbo run lint --filter {package}`
+3. Fix incrementally, verify after each fix

+ 33 - 0
.claude/rules/security.md

@@ -0,0 +1,33 @@
+# Security Guidelines
+
+## Mandatory Security Checks
+
+Before ANY commit:
+- [ ] No hardcoded secrets (API keys, passwords, tokens)
+- [ ] All user inputs validated and sanitized
+- [ ] NoSQL injection prevention (use Mongoose properly)
+- [ ] XSS prevention (sanitize HTML output)
+- [ ] CSRF protection enabled
+- [ ] Authentication/authorization verified
+- [ ] Error messages don't leak sensitive data
+
+## Secret Management
+
+```typescript
+// NEVER: Hardcoded secrets
+const apiKey = "sk-xxxxx"
+
+// ALWAYS: Environment variables
+const apiKey = process.env.API_KEY
+if (!apiKey) {
+  throw new Error('API_KEY not configured')
+}
+```
+
+## Security Response Protocol
+
+If security issue found:
+1. STOP immediately
+2. Use **security-reviewer** agent
+3. Fix CRITICAL issues before continuing
+4. Rotate any exposed secrets

+ 38 - 0
.claude/rules/testing.md

@@ -0,0 +1,38 @@
+# Testing Rules
+
+## Package Manager (CRITICAL)
+
+**NEVER use `npx` to run tests. ALWAYS use `pnpm`.**
+
+```bash
+# ❌ WRONG
+npx vitest run yjs.integ
+
+# ✅ CORRECT
+pnpm vitest run yjs.integ
+```
+
+## Test Execution Commands
+
+### Individual Test File (from package directory)
+
+```bash
+# Use partial file name - Vitest auto-matches
+pnpm vitest run yjs.integ
+pnpm vitest run helper.spec
+pnpm vitest run Button.spec
+
+# Flaky test detection
+pnpm vitest run yjs.integ --repeat=10
+```
+
+- Use **partial file name** (no `src/` prefix or full path needed)
+- No `--project` flag needed (Vitest auto-detects from file extension)
+
+### All Tests for a Package (from monorepo root)
+
+```bash
+turbo run test --filter @growi/app
+```
+
+For testing patterns (mocking, assertions, structure), see the `.claude/skills/learned/essential-test-patterns` skill.

+ 23 - 0
.claude/settings.json

@@ -0,0 +1,23 @@
+{
+  "hooks": {
+    "PostToolUse": [
+      {
+        "matcher": "Write|Edit",
+        "hooks": [
+          {
+            "type": "command",
+            "command": "if [[ \"$FILE\" == */apps/* ]] || [[ \"$FILE\" == */packages/* ]]; then REPO_ROOT=$(echo \"$FILE\" | sed 's|/\\(apps\\|packages\\)/.*|/|'); cd \"$REPO_ROOT\" && pnpm biome check --write \"$FILE\" 2>/dev/null || true; fi",
+            "timeout": 30,
+            "description": "Auto-format edited files in apps/* and packages/* with Biome"
+          }
+        ]
+      }
+    ]
+  },
+  "enabledPlugins": {
+    "context7@claude-plugins-official": true,
+    "github@claude-plugins-official": true,
+    "typescript-lsp@claude-plugins-official": true,
+    "playwright@claude-plugins-official": true
+  }
+}

+ 122 - 0
.claude/skills/learned/essential-test-design/SKILL.md

@@ -0,0 +1,122 @@
+---
+name: essential-test-design
+description: Write tests that verify observable behavior (contract), not implementation details. Auto-invoked when writing or reviewing tests.
+---
+
+## Problem
+
+Tests that are tightly coupled to implementation details cause two failures:
+
+1. **False positives** — Tests pass even when behavior is broken (e.g., delay shortened but test still passes because it only checks `setTimeout` was called)
+2. **False negatives** — Tests fail even when behavior is correct (e.g., implementation switches from `setTimeout` to a `delay()` utility, spy breaks)
+
+Both undermine the purpose of testing: detecting regressions in behavior.
+
+## Principle: Test the Contract, Not the Mechanism
+
+A test is "essential" when it:
+- **Fails if the behavior degrades** (catches real bugs)
+- **Passes if the behavior is preserved** (survives refactoring)
+- **Does not depend on how the behavior is implemented** (implementation-agnostic)
+
+Ask: "What does the caller of this function experience?" — test that.
+
+## Anti-Patterns and Corrections
+
+### Anti-Pattern 1: Implementation Spy
+
+```typescript
+// BAD: Tests implementation, not behavior
+// Breaks if implementation changes from setTimeout to any other delay mechanism
+const spy = vi.spyOn(global, 'setTimeout');
+await exponentialBackoff(1);
+expect(spy).toHaveBeenCalledWith(expect.any(Function), 1000);
+```
+
+### Anti-Pattern 2: Arrange That Serves the Assert
+
+```typescript
+// BAD: The "arrange" is set up only to make the "assert" trivially pass
+// This is a self-fulfilling prophecy, not a meaningful test
+vi.advanceTimersByTime(1000);
+await promise;
+// No assertion — "it didn't throw" is not a valuable test
+```
+
+### Correct: Behavior Boundary Test
+
+```typescript
+// GOOD: Tests the observable contract
+// "Does not resolve before the expected delay, resolves at the expected delay"
+let resolved = false;
+mailService.exponentialBackoff(1).then(() => { resolved = true });
+
+await vi.advanceTimersByTimeAsync(999);
+expect(resolved).toBe(false);  // Catches: delay too short
+
+await vi.advanceTimersByTimeAsync(1);
+expect(resolved).toBe(true);   // Catches: delay too long or hangs
+```
+
+## Decision Framework
+
+When writing a test, ask these questions in order:
+
+1. **What is the contract?** — What does the caller expect to experience?
+   - e.g., "Wait for N ms before resolving"
+2. **What breakage should this test catch?** — Define the regression scenario
+   - e.g., "Someone changes the delay from 1000ms to 500ms"
+3. **Would this test still pass if I refactored the internals?** — If no, you're testing implementation
+   - e.g., Switching from `setTimeout` to `Bun.sleep()` shouldn't break the test
+4. **Would this test fail if the behavior degraded?** — If no, the test has no value
+   - e.g., If delay is halved, `expect(resolved).toBe(false)` at 999ms would catch it
+
+## Common Scenarios
+
+### Async Delay / Throttle / Debounce
+
+Use fake timers + boundary assertions (as shown above).
+
+### Data Transformation
+
+Assert on output shape/values, not on which internal helper was called.
+
+```typescript
+// BAD
+const spy = vi.spyOn(utils, 'formatDate');
+transform(input);
+expect(spy).toHaveBeenCalled();
+
+// GOOD
+const result = transform(input);
+expect(result.date).toBe('2026-01-01');
+```
+
+### Side Effects (API calls, DB writes)
+
+Mocking the boundary (API/DB) is acceptable — that IS the observable behavior.
+
+```typescript
+// OK: The contract IS "sends an email via mailer"
+expect(mockMailer.sendMail).toHaveBeenCalledWith(
+  expect.objectContaining({ to: 'user@example.com' })
+);
+```
+
+### Retry Logic
+
+Test the number of attempts and the final outcome, not the internal flow.
+
+```typescript
+// GOOD: Contract = "retries N times, then fails with specific error"
+mockMailer.sendMail.mockRejectedValue(new Error('fail'));
+await expect(sendWithRetry(config, 3)).rejects.toThrow('failed after 3 attempts');
+expect(mockMailer.sendMail).toHaveBeenCalledTimes(3);
+```
+
+## When to Apply
+
+- Writing new test cases for any function or method
+- Reviewing existing tests for flakiness or brittleness
+- Refactoring tests after fixing flaky CI failures
+- Code review of test pull requests

+ 494 - 0
.claude/skills/learned/essential-test-patterns/SKILL.md

@@ -0,0 +1,494 @@
+---
+name: essential-test-patterns
+description: GROWI testing patterns with Vitest, React Testing Library, and vitest-mock-extended.
+---
+
+# GROWI Testing Patterns
+
+GROWI uses **Vitest** for all testing (unit, integration, component). This skill covers universal testing patterns applicable across the monorepo.
+
+## Test File Placement (Global Standard)
+
+Place test files **in the same directory** as the source file:
+
+```
+src/components/Button/
+├── Button.tsx
+└── Button.spec.tsx       # Component test
+
+src/utils/
+├── helper.ts
+└── helper.spec.ts        # Unit test
+
+src/services/api/
+├── pageService.ts
+└── pageService.integ.ts  # Integration test
+```
+
+## Test Types & Environments
+
+| File Pattern | Type | Environment | Use Case |
+|--------------|------|-------------|----------|
+| `*.spec.{ts,js}` | Unit Test | Node.js | Pure functions, utilities, services |
+| `*.integ.ts` | Integration Test | Node.js + DB | API routes, database operations |
+| `*.spec.{tsx,jsx}` | Component Test | happy-dom | React components |
+
+Vitest automatically selects the environment based on file extension and configuration.
+
+## Vitest Configuration
+
+### Global APIs (No Imports Needed)
+
+All GROWI packages configure Vitest globals in `tsconfig.json`:
+
+```json
+{
+  "compilerOptions": {
+    "types": ["vitest/globals"]
+  }
+}
+```
+
+This enables auto-import of testing APIs:
+
+```typescript
+// No imports needed!
+describe('MyComponent', () => {
+  it('should render', () => {
+    expect(true).toBe(true);
+  });
+
+  beforeEach(() => {
+    // Setup
+  });
+
+  afterEach(() => {
+    // Cleanup
+  });
+});
+```
+
+**Available globals**: `describe`, `it`, `test`, `expect`, `beforeEach`, `afterEach`, `beforeAll`, `afterAll`, `vi`
+
+## Type-Safe Mocking with vitest-mock-extended
+
+### Basic Usage
+
+`vitest-mock-extended` provides **fully type-safe mocks** with TypeScript autocomplete:
+
+```typescript
+import { mockDeep, type DeepMockProxy } from 'vitest-mock-extended';
+
+// Create type-safe mock
+const mockRouter: DeepMockProxy<NextRouter> = mockDeep<NextRouter>();
+
+// TypeScript autocomplete works!
+mockRouter.asPath = '/test-path';
+mockRouter.query = { id: '123' };
+mockRouter.push.mockResolvedValue(true);
+
+// Use in tests
+expect(mockRouter.push).toHaveBeenCalledWith('/new-path');
+```
+
+### Complex Types with Optional Properties
+
+```typescript
+interface ComplexProps {
+  currentPageId?: string | null;
+  currentPathname?: string | null;
+  data?: Record<string, unknown>;
+  onSubmit?: (value: string) => void;
+}
+
+const mockProps: DeepMockProxy<ComplexProps> = mockDeep<ComplexProps>();
+mockProps.currentPageId = 'page-123';
+mockProps.data = { key: 'value' };
+mockProps.onSubmit?.mockImplementation((value) => {
+  console.log(value);
+});
+```
+
+### Why vitest-mock-extended?
+
+- ✅ **Type safety**: Catches typos at compile time
+- ✅ **Autocomplete**: IDE suggestions for all properties/methods
+- ✅ **Deep mocking**: Automatically mocks nested objects
+- ✅ **Vitest integration**: Works seamlessly with `vi.fn()`
+
+## React Testing Library Patterns
+
+### Basic Component Test
+
+```typescript
+import { render } from '@testing-library/react';
+import { Button } from './Button';
+
+describe('Button', () => {
+  it('should render with text', () => {
+    const { getByText } = render(<Button>Click me</Button>);
+    expect(getByText('Click me')).toBeInTheDocument();
+  });
+
+  it('should call onClick when clicked', async () => {
+    const onClick = vi.fn();
+    const { getByRole } = render(<Button onClick={onClick}>Click</Button>);
+
+    const button = getByRole('button');
+    await userEvent.click(button);
+
+    expect(onClick).toHaveBeenCalledTimes(1);
+  });
+});
+```
+
+### Testing with Jotai (Global Pattern)
+
+When testing components that use Jotai atoms, wrap with `<Provider>`:
+
+```typescript
+import { render } from '@testing-library/react';
+import { Provider } from 'jotai';
+
+const renderWithJotai = (ui: React.ReactElement) => {
+  const Wrapper = ({ children }: { children: React.ReactNode }) => (
+    <Provider>{children}</Provider>
+  );
+  return render(ui, { wrapper: Wrapper });
+};
+
+describe('ComponentWithJotai', () => {
+  it('should render with atom state', () => {
+    const { getByText } = renderWithJotai(<MyComponent />);
+    expect(getByText('Hello')).toBeInTheDocument();
+  });
+});
+```
+
+### Isolated Jotai Scope (For Testing)
+
+To isolate atom state between tests:
+
+```typescript
+import { createScope } from 'jotai-scope';
+
+describe('ComponentWithIsolatedState', () => {
+  it('test 1', () => {
+    const scope = createScope();
+    const { getByText } = renderWithJotai(<MyComponent />, scope);
+    // ...
+  });
+
+  it('test 2', () => {
+    const scope = createScope(); // Fresh scope
+    const { getByText } = renderWithJotai(<MyComponent />, scope);
+    // ...
+  });
+});
+```
+
+## Async Testing Patterns (Global Standard)
+
+### Using `act()` and `waitFor()`
+
+When testing async state updates:
+
+```typescript
+import { waitFor, act } from '@testing-library/react';
+import { renderHook } from '@testing-library/react';
+
+test('async hook', async () => {
+  const { result } = renderHook(() => useMyAsyncHook());
+
+  // Trigger async action
+  await act(async () => {
+    result.current.triggerAsyncAction();
+  });
+
+  // Wait for state update
+  await waitFor(() => {
+    expect(result.current.isLoading).toBe(false);
+  });
+
+  expect(result.current.data).toBeDefined();
+});
+```
+
+### Testing Async Functions
+
+```typescript
+it('should fetch data successfully', async () => {
+  const data = await fetchData();
+  expect(data).toEqual({ id: '123', name: 'Test' });
+});
+
+it('should handle errors', async () => {
+  await expect(fetchDataWithError()).rejects.toThrow('Error');
+});
+```
+
+## Advanced Assertions
+
+### Object Matching
+
+```typescript
+expect(mockFunction).toHaveBeenCalledWith(
+  expect.objectContaining({
+    pathname: '/expected-path',
+    data: expect.any(Object),
+    timestamp: expect.any(Number),
+  })
+);
+```
+
+### Array Matching
+
+```typescript
+expect(result).toEqual(
+  expect.arrayContaining([
+    expect.objectContaining({ id: '123' }),
+    expect.objectContaining({ id: '456' }),
+  ])
+);
+```
+
+### Partial Matching
+
+```typescript
+expect(user).toMatchObject({
+  name: 'John',
+  email: 'john@example.com',
+  // Other properties are ignored
+});
+```
+
+## Test Structure Best Practices
+
+### AAA Pattern (Arrange-Act-Assert)
+
+```typescript
+describe('MyComponent', () => {
+  beforeEach(() => {
+    vi.clearAllMocks(); // Clear mocks before each test
+  });
+
+  describe('rendering', () => {
+    it('should render with default props', () => {
+      // Arrange: Setup test data
+      const props = { title: 'Test' };
+
+      // Act: Render component
+      const { getByText } = render(<MyComponent {...props} />);
+
+      // Assert: Verify output
+      expect(getByText('Test')).toBeInTheDocument();
+    });
+  });
+
+  describe('user interactions', () => {
+    it('should submit form on button click', async () => {
+      // Arrange
+      const onSubmit = vi.fn();
+      const { getByRole, getByLabelText } = render(
+        <MyForm onSubmit={onSubmit} />
+      );
+
+      // Act
+      await userEvent.type(getByLabelText('Name'), 'John');
+      await userEvent.click(getByRole('button', { name: 'Submit' }));
+
+      // Assert
+      expect(onSubmit).toHaveBeenCalledWith({ name: 'John' });
+    });
+  });
+});
+```
+
+### Nested `describe` for Organization
+
+```typescript
+describe('PageService', () => {
+  describe('createPage', () => {
+    it('should create a page successfully', async () => {
+      // ...
+    });
+
+    it('should throw error if path is invalid', async () => {
+      // ...
+    });
+  });
+
+  describe('updatePage', () => {
+    it('should update page content', async () => {
+      // ...
+    });
+  });
+});
+```
+
+## Common Mocking Patterns
+
+### Mocking SWR
+
+```typescript
+vi.mock('swr', () => ({
+  default: vi.fn(() => ({
+    data: mockData,
+    error: null,
+    isLoading: false,
+    mutate: vi.fn(),
+  })),
+}));
+```
+
+### Mocking Modules
+
+```typescript
+// Mock entire module
+vi.mock('~/services/PageService', () => ({
+  PageService: {
+    findById: vi.fn().mockResolvedValue({ id: '123', title: 'Test' }),
+    create: vi.fn().mockResolvedValue({ id: '456', title: 'New' }),
+  },
+}));
+
+// Use in test
+import { PageService } from '~/services/PageService';
+
+it('should call PageService.findById', async () => {
+  await myFunction();
+  expect(PageService.findById).toHaveBeenCalledWith('123');
+});
+```
+
+### Mocking Specific Functions
+
+```typescript
+import { myFunction } from '~/utils/myUtils';
+
+vi.mock('~/utils/myUtils', () => ({
+  myFunction: vi.fn().mockReturnValue('mocked'),
+  otherFunction: vi.fn(), // Mock other exports
+}));
+```
+
+### Mocking CommonJS Modules with mock-require
+
+**IMPORTANT**: When `vi.mock()` fails with ESModule/CommonJS compatibility issues, use `mock-require` instead:
+
+```typescript
+import mockRequire from 'mock-require';
+
+describe('Service with CommonJS dependencies', () => {
+  beforeEach(() => {
+    // Mock CommonJS module before importing the code under test
+    mockRequire('legacy-module', {
+      someFunction: vi.fn().mockReturnValue('mocked'),
+      someProperty: 'mocked-value',
+    });
+  });
+
+  afterEach(() => {
+    // Clean up mocks to avoid leakage between tests
+    mockRequire.stopAll();
+  });
+
+  it('should use mocked module', async () => {
+    // Import AFTER mocking (dynamic import if needed)
+    const { MyService } = await import('~/services/MyService');
+
+    const result = MyService.doSomething();
+    expect(result).toBe('mocked');
+  });
+});
+```
+
+**When to use `mock-require`**:
+- Legacy CommonJS modules that don't work with `vi.mock()`
+- Mixed ESM/CJS environments causing module resolution issues
+- Third-party libraries with complex module systems
+- When `vi.mock()` fails with "Cannot redefine property" or "Module is not defined"
+
+**Key points**:
+- ✅ Mock **before** importing the code under test
+- ✅ Use `mockRequire.stopAll()` in `afterEach()` to prevent test leakage
+- ✅ Use dynamic imports (`await import()`) when needed
+- ✅ Works with both CommonJS and ESModule targets
+
+### Choosing the Right Mocking Strategy
+
+```typescript
+// ✅ Prefer vi.mock() for ESModules (simplest)
+vi.mock('~/modern-module', () => ({
+  myFunction: vi.fn(),
+}));
+
+// ✅ Use mock-require for CommonJS or mixed environments
+import mockRequire from 'mock-require';
+mockRequire('legacy-module', { myFunction: vi.fn() });
+
+// ✅ Use vitest-mock-extended for type-safe object mocks
+import { mockDeep } from 'vitest-mock-extended';
+const mockService = mockDeep<MyService>();
+```
+
+**Decision tree**:
+1. Can use `vi.mock()`? → Use it (simplest)
+2. CommonJS or module error? → Use `mock-require`
+3. Need type-safe object mock? → Use `vitest-mock-extended`
+
+## Integration Tests (with Database)
+
+Integration tests (*.integ.ts) can access in-memory databases:
+
+```typescript
+describe('PageService Integration', () => {
+  beforeEach(async () => {
+    // Setup: Seed test data
+    await Page.create({ path: '/test', body: 'content' });
+  });
+
+  afterEach(async () => {
+    // Cleanup: Clear database
+    await Page.deleteMany({});
+  });
+
+  it('should create a page', async () => {
+    const page = await PageService.create({
+      path: '/new-page',
+      body: 'content',
+    });
+
+    expect(page._id).toBeDefined();
+    expect(page.path).toBe('/new-page');
+  });
+});
+```
+
+## Testing Checklist
+
+Before committing tests, ensure:
+
+- ✅ **Co-location**: Test files are next to source files
+- ✅ **Descriptive names**: Test descriptions clearly state what is being tested
+- ✅ **AAA pattern**: Tests follow Arrange-Act-Assert structure
+- ✅ **Mocks cleared**: Use `beforeEach(() => vi.clearAllMocks())`
+- ✅ **Async handled**: Use `async/await` and `waitFor()` for async operations
+- ✅ **Type safety**: Use `vitest-mock-extended` for type-safe mocks
+- ✅ **Isolated state**: Jotai tests use separate scopes if needed
+
+## Running Tests
+
+See the `testing` rule (`.claude/rules/testing.md`) for test execution commands.
+
+## Summary: GROWI Testing Philosophy
+
+1. **Co-locate tests**: Keep tests close to source code
+2. **Type-safe mocks**: Use `vitest-mock-extended` for TypeScript support
+3. **React Testing Library**: Test user behavior, not implementation details
+4. **Async patterns**: Use `act()` and `waitFor()` for async state updates
+5. **Jotai integration**: Wrap components with `<Provider>` for atom state
+6. **Clear structure**: Use nested `describe` and AAA pattern
+7. **Clean mocks**: Always clear mocks between tests
+
+These patterns apply to **all GROWI packages** with React/TypeScript code.

+ 207 - 0
.claude/skills/monorepo-overview/SKILL.md

@@ -0,0 +1,207 @@
+---
+name: monorepo-overview
+description: GROWI monorepo structure, workspace organization, and architectural principles. Auto-invoked for all GROWI development work.
+user-invocable: false
+---
+
+# GROWI Monorepo Overview
+
+GROWI is a team collaboration wiki platform built as a monorepo using **pnpm workspace + Turborepo**.
+
+## Monorepo Structure
+
+```
+growi/
+├── apps/                    # Applications
+│   ├── app/                # Main GROWI application (Next.js + Express + MongoDB)
+│   ├── pdf-converter/      # PDF conversion microservice (Ts.ED + Puppeteer)
+│   └── slackbot-proxy/     # Slack integration proxy (Ts.ED + TypeORM + MySQL)
+├── packages/               # Shared libraries
+│   ├── core/              # Core utilities and shared logic
+│   ├── core-styles/       # Common styles (SCSS)
+│   ├── editor/            # Markdown editor components
+│   ├── ui/                # UI component library
+│   ├── pluginkit/         # Plugin framework
+│   ├── slack/             # Slack integration utilities
+│   ├── presentation/      # Presentation mode
+│   ├── pdf-converter-client/ # PDF converter client library
+│   └── remark-*/          # Markdown plugins (remark-lsx, remark-drawio, etc.)
+└── Configuration files
+    ├── pnpm-workspace.yaml
+    ├── turbo.json
+    ├── package.json
+    └── .changeset/
+```
+
+## Workspace Management
+
+### pnpm Workspace
+
+All packages are managed via **pnpm workspace**. Package references use the `workspace:` protocol:
+
+```json
+{
+  "dependencies": {
+    "@growi/core": "workspace:^",
+    "@growi/ui": "workspace:^"
+  }
+}
+```
+
+### Turborepo Orchestration
+
+Turborepo handles task orchestration with caching and parallelization:
+
+```bash
+# Run tasks across all workspaces
+turbo run dev
+turbo run test
+turbo run lint
+turbo run build
+
+# Filter to specific package
+turbo run test --filter @growi/app
+turbo run lint --filter @growi/core
+```
+
+## Architectural Principles
+
+### 1. Feature-Based Architecture (Recommended)
+
+**All packages should prefer feature-based organization**:
+
+```
+{package}/src/
+├── features/              # Feature modules
+│   ├── {feature-name}/
+│   │   ├── index.ts      # Main export
+│   │   ├── interfaces/   # TypeScript types
+│   │   ├── server/       # Server-side logic (if applicable)
+│   │   ├── client/       # Client-side logic (if applicable)
+│   │   └── utils/        # Shared utilities
+```
+
+**Benefits**:
+- Clear boundaries between features
+- Easy to locate related code
+- Facilitates gradual migration from legacy structure
+
+### 2. Server-Client Separation
+
+For full-stack packages (like apps/app), separate server and client logic:
+
+- **Server code**: Node.js runtime, database access, API routes
+- **Client code**: Browser runtime, React components, UI state
+
+This enables better code splitting and prevents server-only code from being bundled into client.
+
+### 3. Shared Libraries in packages/
+
+Common code should be extracted to `packages/`:
+
+- **core**: Utilities, constants, type definitions
+- **ui**: Reusable React components
+- **editor**: Markdown editor
+- **pluginkit**: Plugin system framework
+
+## Version Management with Changeset
+
+GROWI uses **Changesets** for version management and release notes:
+
+```bash
+# Add a changeset (after making changes)
+npx changeset
+
+# Version bump (generates CHANGELOGs and updates versions)
+pnpm run version-subpackages
+
+# Publish packages to npm (for @growi/core, @growi/pluginkit)
+pnpm run release-subpackages
+```
+
+### Changeset Workflow
+
+1. Make code changes
+2. Run `npx changeset` and describe the change
+3. Commit both code and `.changeset/*.md` file
+4. On release, run `pnpm run version-subpackages`
+5. Changesets automatically updates `CHANGELOG.md` and `package.json` versions
+
+### Version Schemes
+
+- **Main app** (`apps/app`): Manual versioning with RC prereleases
+  - `pnpm run version:patch`, `pnpm run version:prerelease`
+- **Shared libraries** (`packages/core`, `packages/pluginkit`): Changeset-managed
+- **Microservices** (`apps/pdf-converter`, `apps/slackbot-proxy`): Independent versioning
+
+## Package Categories
+
+### Applications (apps/)
+
+| Package | Description | Tech Stack |
+|---------|-------------|------------|
+| **@growi/app** | Main wiki application | Next.js (Pages Router), Express, MongoDB, Jotai, SWR |
+| **@growi/pdf-converter** | PDF export service | Ts.ED, Puppeteer |
+| **@growi/slackbot-proxy** | Slack bot proxy | Ts.ED, TypeORM, MySQL |
+
+### Core Libraries (packages/)
+
+| Package | Description | Published to npm |
+|---------|-------------|------------------|
+| **@growi/core** | Core utilities | ✅ |
+| **@growi/pluginkit** | Plugin framework | ✅ |
+| **@growi/ui** | UI components | ❌ (internal) |
+| **@growi/editor** | Markdown editor | ❌ (internal) |
+| **@growi/core-styles** | Common styles | ❌ (internal) |
+
+## Development Workflow
+
+### Initial Setup
+
+```bash
+# Install dependencies for all packages
+pnpm install
+
+# Bootstrap (install + build dependencies)
+turbo run bootstrap
+```
+
+### Daily Development
+
+```bash
+# Start all dev servers (apps/app + dependencies)
+turbo run dev
+
+# Run a specific test file (from package directory)
+pnpm vitest run yjs.integ
+
+# Run ALL tests / lint for a package
+turbo run test --filter @growi/app
+turbo run lint --filter @growi/core
+```
+
+### Cross-Package Development
+
+When modifying shared libraries (packages/*), ensure dependent apps reflect changes:
+
+1. Make changes to `packages/core`
+2. Turborepo automatically detects changes and rebuilds dependents
+3. Test in `apps/app` to verify
+
+## Key Configuration Files
+
+- **pnpm-workspace.yaml**: Defines workspace packages
+- **turbo.json**: Turborepo pipeline configuration
+- **.changeset/config.json**: Changeset configuration
+- **tsconfig.base.json**: Base TypeScript config for all packages
+- **vitest.workspace.mts**: Vitest workspace config
+- **biome.json**: Biome linter/formatter config
+
+## Design Principles Summary
+
+1. **Feature Isolation**: Use feature-based architecture for new code
+2. **Server-Client Separation**: Keep server and client code separate
+3. **Shared Libraries**: Extract common code to packages/
+4. **Type-Driven Development**: Define interfaces before implementation
+5. **Progressive Enhancement**: Migrate legacy code gradually
+6. **Version Control**: Use Changesets for release management

+ 269 - 0
.claude/skills/tech-stack/SKILL.md

@@ -0,0 +1,269 @@
+---
+name: tech-stack
+description: GROWI technology stack, build tools, and global commands. Auto-invoked for all GROWI development work.
+user-invocable: false
+---
+
+# GROWI Tech Stack
+
+## Core Technologies
+
+- **TypeScript** ~5.0.0
+- **Node.js** ^18 || ^20
+- **MongoDB** with **Mongoose** ^6.13.6 (apps/app)
+- **MySQL** with **TypeORM** 0.2.x (apps/slackbot-proxy)
+
+## Frontend Framework
+
+- **React** 18.x
+- **Next.js** (Pages Router) - Full-stack framework for apps/app
+
+## State Management & Data Fetching (Global Standard)
+
+- **Jotai** - Atomic state management (recommended for all packages with UI state)
+  - Use for UI state, form state, modal state, etc.
+  - Lightweight, TypeScript-first, minimal boilerplate
+
+- **SWR** ^2.3.2 - Data fetching with caching
+  - Use for API data fetching with automatic revalidation
+  - Works seamlessly with RESTful APIs
+
+### Why Jotai + SWR?
+
+- **Separation of concerns**: Jotai for UI state, SWR for server state
+- **Performance**: Fine-grained reactivity (Jotai) + intelligent caching (SWR)
+- **Type safety**: Both libraries have excellent TypeScript support
+- **Simplicity**: Minimal API surface, easy to learn
+
+## Build & Development Tools
+
+### Package Management
+- **pnpm** 10.4.1 - Package manager (faster, more efficient than npm/yarn)
+
+### Monorepo Orchestration
+- **Turborepo** ^2.1.3 - Build system with caching and parallelization
+
+### Linter & Formatter
+- **Biome** ^2.2.6 - Unified linter and formatter (recommended)
+  - Replaces ESLint + Prettier
+  - Significantly faster (10-100x)
+  - Configuration: `biome.json`
+
+```bash
+# Lint and format check
+biome check <files>
+
+# Auto-fix issues
+biome check --write <files>
+```
+
+- **Stylelint** ^16.5.0 - SCSS/CSS linter
+  - Configuration: `.stylelintrc.js`
+
+```bash
+# Lint styles
+stylelint "src/**/*.scss"
+```
+
+### Testing
+- **Vitest** ^2.1.1 - Unit and integration testing (recommended)
+  - Fast, Vite-powered
+  - Jest-compatible API
+  - Configuration: `vitest.workspace.mts`
+
+- **React Testing Library** ^16.0.1 - Component testing
+  - User-centric testing approach
+
+- **vitest-mock-extended** ^2.0.2 - Type-safe mocking
+  - TypeScript autocomplete for mocks
+
+- **Playwright** ^1.49.1 - E2E testing
+  - Cross-browser testing
+
+## Essential Commands (Global)
+
+### Development
+
+```bash
+# Start all dev servers (apps/app + dependencies)
+turbo run dev
+
+# Start dev server for specific package
+turbo run dev --filter @growi/app
+
+# Install dependencies for all packages
+pnpm install
+
+# Bootstrap (install + build dependencies)
+turbo run bootstrap
+```
+
+### Testing & Quality
+
+```bash
+# Run a specific test file (from package directory, e.g. apps/app)
+pnpm vitest run yjs.integ          # Partial file name match
+pnpm vitest run helper.spec        # Works for any test file
+pnpm vitest run yjs.integ --repeat=10  # Repeat for flaky test detection
+
+# Run ALL tests for a package (uses Turborepo caching)
+turbo run test --filter @growi/app
+
+# Run linters for specific package
+turbo run lint --filter @growi/app
+```
+
+### Building
+
+```bash
+# Build all packages
+turbo run build
+
+# Build specific package
+turbo run build --filter @growi/core
+```
+
+## Turborepo Task Filtering
+
+Turborepo uses `--filter` to target specific packages:
+
+```bash
+# Run task for single package
+turbo run test --filter @growi/app
+
+# Run task for multiple packages
+turbo run build --filter @growi/core --filter @growi/ui
+
+# Run task for package and its dependencies
+turbo run build --filter @growi/app...
+```
+
+## Important Configuration Files
+
+### Workspace Configuration
+- **pnpm-workspace.yaml** - Defines workspace packages
+  ```yaml
+  packages:
+    - 'apps/*'
+    - 'packages/*'
+  ```
+
+### Build Configuration
+- **turbo.json** - Turborepo pipeline configuration
+  - Defines task dependencies, caching, and outputs
+
+### TypeScript Configuration
+- **tsconfig.base.json** - Base TypeScript config extended by all packages
+  - **Target**: ESNext
+  - **Module**: ESNext
+  - **Strict Mode**: Enabled (`strict: true`)
+  - **Module Resolution**: Bundler
+  - **Allow JS**: true (for gradual migration)
+  - **Isolated Modules**: true (required for Vite, SWC)
+
+Package-specific tsconfig.json example:
+```json
+{
+  "extends": "../../tsconfig.base.json",
+  "compilerOptions": {
+    "outDir": "./dist",
+    "rootDir": "./src"
+  },
+  "include": ["src/**/*"],
+  "exclude": ["node_modules", "dist", "**/*.spec.ts"]
+}
+```
+
+### Testing Configuration
+- **vitest.workspace.mts** - Vitest workspace config
+  - Defines test environments (Node.js, happy-dom)
+  - Configures coverage
+
+### Linter Configuration
+- **biome.json** - Biome linter/formatter config
+  - Rules, ignore patterns, formatting options
+
+## Development Best Practices
+
+### Command Usage
+
+1. **Use Turborepo for full-package tasks** (all tests, lint, build):
+   - ✅ `turbo run test --filter @growi/app`
+   - ❌ `cd apps/app && pnpm test` (bypasses Turborepo caching)
+2. **Use vitest directly for individual test files** (from package directory):
+   - ✅ `pnpm vitest run yjs.integ` (simple, fast)
+   - ❌ `turbo run test --filter @growi/app -- yjs.integ` (unnecessary overhead)
+
+2. **Use pnpm for package management**:
+   - ✅ `pnpm install`
+   - ❌ `npm install` or `yarn install`
+
+3. **Run tasks from workspace root**:
+   - Turborepo handles cross-package dependencies
+   - Caching works best from root
+
+### State Management Guidelines
+
+1. **Use Jotai for UI state**:
+   ```typescript
+   // Example: Modal state
+   import { atom } from 'jotai';
+
+   export const isModalOpenAtom = atom(false);
+   ```
+
+2. **Use SWR for server state**:
+   ```typescript
+   // Example: Fetching pages
+   import useSWR from 'swr';
+
+   const { data, error, isLoading } = useSWR('/api/pages', fetcher);
+   ```
+
+3. **Avoid mixing concerns**:
+   - Don't store server data in Jotai atoms
+   - Don't manage UI state with SWR
+
+## Migration Notes
+
+- **New packages**: Use Biome + Vitest from the start
+- **Legacy packages**: Can continue using existing tools during migration
+- **Gradual migration**: Prefer updating to Biome + Vitest when modifying existing files
+
+## Technology Decisions
+
+### Why Next.js Pages Router (not App Router)?
+
+- GROWI started before App Router was stable
+- Pages Router is well-established and stable
+- Migration to App Router is being considered for future versions
+
+### Why Jotai (not Redux/Zustand)?
+
+- **Atomic approach**: More flexible than Redux, simpler than Recoil
+- **TypeScript-first**: Excellent type inference
+- **Performance**: Fine-grained reactivity, no unnecessary re-renders
+- **Minimal boilerplate**: Less code than Redux
+
+### Why SWR (not React Query)?
+
+- **Simplicity**: Smaller API surface
+- **Vercel integration**: Built by Vercel (same as Next.js)
+- **Performance**: Optimized for Next.js SSR/SSG
+
+### Why Biome (not ESLint + Prettier)?
+
+- **Speed**: 10-100x faster than ESLint
+- **Single tool**: Replaces both ESLint and Prettier
+- **Consistency**: No conflicts between linter and formatter
+- **Growing ecosystem**: Active development, Rust-based
+
+## Package-Specific Tech Stacks
+
+Different apps in the monorepo may use different tech stacks:
+
+- **apps/app**: Next.js + Express + MongoDB + Jotai + SWR
+- **apps/pdf-converter**: Ts.ED + Puppeteer
+- **apps/slackbot-proxy**: Ts.ED + TypeORM + MySQL
+
+See package-specific CLAUDE.md or skills for details.

+ 6 - 1
.devcontainer/app/devcontainer.json

@@ -9,7 +9,8 @@
   "features": {
   "features": {
     "ghcr.io/devcontainers/features/node:1": {
     "ghcr.io/devcontainers/features/node:1": {
       "version": "20.18.3"
       "version": "20.18.3"
-    }
+    },
+    "ghcr.io/devcontainers/features/github-cli:1": {}
   },
   },
 
 
   // Use 'forwardPorts' to make a list of ports inside the container available locally.
   // Use 'forwardPorts' to make a list of ports inside the container available locally.
@@ -30,6 +31,10 @@
         "editorconfig.editorconfig",
         "editorconfig.editorconfig",
         "shinnn.stylelint",
         "shinnn.stylelint",
         "stylelint.vscode-stylelint",
         "stylelint.vscode-stylelint",
+        // markdown
+        "bierner.markdown-mermaid",
+        // TypeScript (Native Preview)
+        "typescriptteam.native-preview",
         // Test
         // Test
         "vitest.explorer",
         "vitest.explorer",
         "ms-playwright.playwright",
         "ms-playwright.playwright",

+ 4 - 0
.devcontainer/pdf-converter/devcontainer.json

@@ -4,6 +4,10 @@
   "service": "pdf-converter",
   "service": "pdf-converter",
   "workspaceFolder": "/workspace/growi",
   "workspaceFolder": "/workspace/growi",
 
 
+  "features": {
+    "ghcr.io/devcontainers/features/github-cli:1": {}
+  },
+
   // Use 'forwardPorts' to make a list of ports inside the container available locally.
   // Use 'forwardPorts' to make a list of ports inside the container available locally.
   // "forwardPorts": [],
   // "forwardPorts": [],
 
 

+ 1 - 1
.gitignore

@@ -37,7 +37,7 @@ yarn-error.log*
 
 
 # IDE, dev #
 # IDE, dev #
 .idea
 .idea
-.claude
+**/.claude/settings.local.json
 *.orig
 *.orig
 *.code-workspace
 *.code-workspace
 *.timestamp-*.mjs
 *.timestamp-*.mjs

+ 93 - 0
.kiro/settings/rules/design-discovery-full.md

@@ -0,0 +1,93 @@
+# Full Discovery Process for Technical Design
+
+## Objective
+Conduct comprehensive research and analysis to ensure the technical design is based on complete, accurate, and up-to-date information.
+
+## Discovery Steps
+
+### 1. Requirements Analysis
+**Map Requirements to Technical Needs**
+- Extract all functional requirements from EARS format
+- Identify non-functional requirements (performance, security, scalability)
+- Determine technical constraints and dependencies
+- List core technical challenges
+
+### 2. Existing Implementation Analysis
+**Understand Current System** (if modifying/extending):
+- Analyze codebase structure and architecture patterns
+- Map reusable components, services, utilities
+- Identify domain boundaries and data flows
+- Document integration points and dependencies
+- Determine approach: extend vs refactor vs wrap
+
+### 3. Technology Research
+**Investigate Best Practices and Solutions**:
+- **Use WebSearch** to find:
+  - Latest architectural patterns for similar problems
+  - Industry best practices for the technology stack
+  - Recent updates or changes in relevant technologies
+  - Common pitfalls and solutions
+
+- **Use WebFetch** to analyze:
+  - Official documentation for frameworks/libraries
+  - API references and usage examples
+  - Migration guides and breaking changes
+  - Performance benchmarks and comparisons
+
+### 4. External Dependencies Investigation
+**For Each External Service/Library**:
+- Search for official documentation and GitHub repositories
+- Verify API signatures and authentication methods
+- Check version compatibility with existing stack
+- Investigate rate limits and usage constraints
+- Find community resources and known issues
+- Document security considerations
+- Note any gaps requiring implementation investigation
+
+### 5. Architecture Pattern & Boundary Analysis
+**Evaluate Architectural Options**:
+- Compare relevant patterns (MVC, Clean, Hexagonal, Event-driven)
+- Assess fit with existing architecture and steering principles
+- Identify domain boundaries and ownership seams required to avoid team conflicts
+- Consider scalability implications and operational concerns
+- Evaluate maintainability and team expertise
+- Document preferred pattern and rejected alternatives in `research.md`
+
+### 6. Risk Assessment
+**Identify Technical Risks**:
+- Performance bottlenecks and scaling limits
+- Security vulnerabilities and attack vectors
+- Integration complexity and coupling
+- Technical debt creation vs resolution
+- Knowledge gaps and training needs
+
+## Research Guidelines
+
+### When to Search
+**Always search for**:
+- External API documentation and updates
+- Security best practices for authentication/authorization
+- Performance optimization techniques for identified bottlenecks
+- Latest versions and migration paths for dependencies
+
+**Search if uncertain about**:
+- Architectural patterns for specific use cases
+- Industry standards for data formats/protocols
+- Compliance requirements (GDPR, HIPAA, etc.)
+- Scalability approaches for expected load
+
+### Search Strategy
+1. Start with official sources (documentation, GitHub)
+2. Check recent blog posts and articles (last 6 months)
+3. Review Stack Overflow for common issues
+4. Investigate similar open-source implementations
+
+## Output Requirements
+Capture all findings that impact design decisions in `research.md` using the shared template:
+- Key insights affecting architecture, technology alignment, and contracts
+- Constraints discovered during research
+- Recommended approaches and selected architecture pattern with rationale
+- Rejected alternatives and trade-offs (documented in the Design Decisions section)
+- Updated domain boundaries that inform Components & Interface Contracts
+- Risks and mitigation strategies
+- Gaps requiring further investigation during implementation

+ 49 - 0
.kiro/settings/rules/design-discovery-light.md

@@ -0,0 +1,49 @@
+# Light Discovery Process for Extensions
+
+## Objective
+Quickly analyze existing system and integration requirements for feature extensions.
+
+## Focused Discovery Steps
+
+### 1. Extension Point Analysis
+**Identify Integration Approach**:
+- Locate existing extension points or interfaces
+- Determine modification scope (files, components)
+- Check for existing patterns to follow
+- Identify backward compatibility requirements
+
+### 2. Dependency Check
+**Verify Compatibility**:
+- Check version compatibility of new dependencies
+- Validate API contracts haven't changed
+- Ensure no breaking changes in pipeline
+
+### 3. Quick Technology Verification
+**For New Libraries Only**:
+- Use WebSearch for official documentation
+- Verify basic usage patterns
+- Check for known compatibility issues
+- Confirm licensing compatibility
+- Record key findings in `research.md` (technology alignment section)
+
+### 4. Integration Risk Assessment
+**Quick Risk Check**:
+- Impact on existing functionality
+- Performance implications
+- Security considerations
+- Testing requirements
+
+## When to Escalate to Full Discovery
+Switch to full discovery if you find:
+- Significant architectural changes needed
+- Complex external service integrations
+- Security-sensitive implementations
+- Performance-critical components
+- Unknown or poorly documented dependencies
+
+## Output Requirements
+- Clear integration approach (note boundary impacts in `research.md`)
+- List of files/components to modify
+- New dependencies with versions
+- Integration risks and mitigations
+- Testing focus areas

+ 182 - 0
.kiro/settings/rules/design-principles.md

@@ -0,0 +1,182 @@
+# Technical Design Rules and Principles
+
+## Core Design Principles
+
+### 1. Type Safety is Mandatory
+- **NEVER** use `any` type in TypeScript interfaces
+- Define explicit types for all parameters and returns
+- Use discriminated unions for error handling
+- Specify generic constraints clearly
+
+### 2. Design vs Implementation
+- **Focus on WHAT, not HOW**
+- Define interfaces and contracts, not code
+- Specify behavior through pre/post conditions
+- Document architectural decisions, not algorithms
+
+### 3. Visual Communication
+- **Simple features**: Basic component diagram or none
+- **Medium complexity**: Architecture + data flow
+- **High complexity**: Multiple diagrams (architecture, sequence, state)
+- **Always pure Mermaid**: No styling, just structure
+
+### 4. Component Design Rules
+- **Single Responsibility**: One clear purpose per component
+- **Clear Boundaries**: Explicit domain ownership
+- **Dependency Direction**: Follow architectural layers
+- **Interface Segregation**: Minimal, focused interfaces
+- **Team-safe Interfaces**: Design boundaries that allow parallel implementation without merge conflicts
+- **Research Traceability**: Record boundary decisions and rationale in `research.md`
+
+### 5. Data Modeling Standards
+- **Domain First**: Start with business concepts
+- **Consistency Boundaries**: Clear aggregate roots
+- **Normalization**: Balance between performance and integrity
+- **Evolution**: Plan for schema changes
+
+### 6. Error Handling Philosophy
+- **Fail Fast**: Validate early and clearly
+- **Graceful Degradation**: Partial functionality over complete failure
+- **User Context**: Actionable error messages
+- **Observability**: Comprehensive logging and monitoring
+
+### 7. Integration Patterns
+- **Loose Coupling**: Minimize dependencies
+- **Contract First**: Define interfaces before implementation
+- **Versioning**: Plan for API evolution
+- **Idempotency**: Design for retry safety
+- **Contract Visibility**: Surface API and event contracts in design.md while linking extended details from `research.md`
+
+## Documentation Standards
+
+### Language and Tone
+- **Declarative**: "The system authenticates users" not "The system should authenticate"
+- **Precise**: Specific technical terms over vague descriptions
+- **Concise**: Essential information only
+- **Formal**: Professional technical writing
+
+### Structure Requirements
+- **Hierarchical**: Clear section organization
+- **Traceable**: Requirements to components mapping
+- **Complete**: All aspects covered for implementation
+- **Consistent**: Uniform terminology throughout
+- **Focused**: Keep design.md centered on architecture and contracts; move investigation logs and lengthy comparisons to `research.md`
+
+## Section Authoring Guidance
+
+### Global Ordering
+- Default flow: Overview → Goals/Non-Goals → Requirements Traceability → Architecture → Technology Stack → System Flows → Components & Interfaces → Data Models → Optional sections.
+- Teams may swap Traceability earlier or place Data Models nearer Architecture when it improves clarity, but keep section headings intact.
+- Within each section, follow **Summary → Scope → Decisions → Impacts/Risks** so reviewers can scan consistently.
+
+### Requirement IDs
+- Reference requirements as `2.1, 2.3` without prefixes (no “Requirement 2.1”).
+- All requirements MUST have numeric IDs. If a requirement lacks a numeric ID, stop and fix `requirements.md` before continuing.
+- Use `N.M`-style numeric IDs where `N` is the top-level requirement number from requirements.md (for example, Requirement 1 → 1.1, 1.2; Requirement 2 → 2.1, 2.2).
+- Every component, task, and traceability row must reference the same canonical numeric ID.
+
+### Technology Stack
+- Include ONLY layers impacted by this feature (frontend, backend, data, messaging, infra).
+- For each layer specify tool/library + version + the role it plays; push extended rationale, comparisons, or benchmarks to `research.md`.
+- When extending an existing system, highlight deviations from the current stack and list new dependencies.
+
+### System Flows
+- Add diagrams only when they clarify behavior:  
+  - **Sequence** for multi-step interactions  
+  - **Process/State** for branching rules or lifecycle  
+  - **Data/Event** for pipelines or async patterns
+- Always use pure Mermaid. If no complex flow exists, omit the entire section.
+
+### Requirements Traceability
+- Use the standard table (`Requirement | Summary | Components | Interfaces | Flows`) to prove coverage.
+- Collapse to bullet form only when a single requirement maps 1:1 to a component.
+- Prefer the component summary table for simple mappings; reserve the full traceability table for complex or compliance-sensitive requirements.
+- Re-run this mapping whenever requirements or components change to avoid drift.
+
+### Components & Interfaces Authoring
+- Group components by domain/layer and provide one block per component.
+- Begin with a summary table listing Component, Domain, Intent, Requirement coverage, key dependencies, and selected contracts.
+- Table fields: Intent (one line), Requirements (`2.1, 2.3`), Owner/Reviewers (optional).
+- Dependencies table must mark each entry as Inbound/Outbound/External and assign Criticality (`P0` blocking, `P1` high-risk, `P2` informational).
+- Summaries of external dependency research stay here; detailed investigation (API signatures, rate limits, migration notes) belongs in `research.md`.
+- design.md must remain a self-contained reviewer artifact. Reference `research.md` only for background, and restate any conclusions or decisions here.
+- Contracts: tick only the relevant types (Service/API/Event/Batch/State). Unchecked types should not appear later in the component section.
+- Service interfaces must declare method signatures, inputs/outputs, and error envelopes. API/Event/Batch contracts require schema tables or bullet lists covering trigger, payload, delivery, idempotency.
+- Use **Integration & Migration Notes**, **Validation Hooks**, and **Open Questions / Risks** to document rollout strategy, observability, and unresolved decisions.
+- Detail density rules:
+  - **Full block**: components introducing new boundaries (logic hooks, shared services, external integrations, data layers).
+  - **Summary-only**: presentational/UI components with no new boundaries (plus a short Implementation Note if needed).
+- Implementation Notes must combine Integration / Validation / Risks into a single bulleted subsection to reduce repetition.
+- Prefer lists or inline descriptors for short data (dependencies, contract selections). Use tables only when comparing multiple items.
+
+### Shared Interfaces & Props
+- Define a base interface (e.g., `BaseUIPanelProps`) for recurring UI components and extend it per component to capture only the deltas.
+- Hooks, utilities, and integration adapters that introduce new contracts should still include full TypeScript signatures.
+- When reusing a base contract, reference it explicitly (e.g., “Extends `BaseUIPanelProps` with `onSubmitAnswer` callback”) instead of duplicating the code block.
+
+### Data Models
+- Domain Model covers aggregates, entities, value objects, domain events, and invariants. Add Mermaid diagrams only when relationships are non-trivial.
+- Logical Data Model should articulate structure, indexing, sharding, and storage-specific considerations (event store, KV/wide-column) relevant to the change.
+- Data Contracts & Integration section documents API payloads, event schemas, and cross-service synchronization patterns when the feature crosses boundaries.
+- Lengthy type definitions or vendor-specific option objects should be placed in the Supporting References section within design.md, linked from the relevant section. Investigation notes stay in `research.md`.
+- Supporting References usage is optional; only create it when keeping the content in the main body would reduce readability. All decisions must still appear in the main sections so design.md stands alone.
+
+### Error/Testing/Security/Performance Sections
+- Record only feature-specific decisions or deviations. Link or reference organization-wide standards (steering) for baseline practices instead of restating them.
+
+### Diagram & Text Deduplication
+- Do not restate diagram content verbatim in prose. Use the text to highlight key decisions, trade-offs, or impacts that are not obvious from the visual.
+- When a decision is fully captured in the diagram annotations, a short “Key Decisions” bullet is sufficient.
+
+### General Deduplication
+- Avoid repeating the same information across Overview, Architecture, and Components. Reference earlier sections when context is identical.
+- If a requirement/component relationship is captured in the summary table, do not rewrite it elsewhere unless extra nuance is added.
+
+## Diagram Guidelines
+
+### When to include a diagram
+- **Architecture**: Use a structural diagram when 3+ components or external systems interact.
+- **Sequence**: Draw a sequence diagram when calls/handshakes span multiple steps.
+- **State / Flow**: Capture complex state machines or business flows in a dedicated diagram.
+- **ER**: Provide an entity-relationship diagram for non-trivial data models.
+- **Skip**: Minor one-component changes generally do not need diagrams.
+
+### Mermaid requirements
+```mermaid
+graph TB
+    Client --> ApiGateway
+    ApiGateway --> ServiceA
+    ApiGateway --> ServiceB
+    ServiceA --> Database
+```
+
+- **Plain Mermaid only** – avoid custom styling or unsupported syntax.
+- **Node IDs** – alphanumeric plus underscores only (e.g., `Client`, `ServiceA`). Do not use `@`, `/`, or leading `-`.
+- **Labels** – simple words. Do not embed parentheses `()`, square brackets `[]`, quotes `"`, or slashes `/`.
+  - ❌ `DnD[@dnd-kit/core]` → invalid ID (`@`).
+  - ❌ `UI[KanbanBoard(React)]` → invalid label (`()`).
+  - ✅ `DndKit[dnd-kit core]` → use plain text in labels, keep technology details in the accompanying description.
+  - ℹ️ Mermaid strict-mode will otherwise fail with errors like `Expecting 'SQE' ... got 'PS'`; remove punctuation from labels before rendering.
+- **Edges** – show data or control flow direction.
+- **Groups** – using Mermaid subgraphs to cluster related components is allowed; use it sparingly for clarity.
+
+## Quality Metrics
+### Design Completeness Checklist
+- All requirements addressed
+- No implementation details leaked
+- Clear component boundaries
+- Explicit error handling
+- Comprehensive test strategy
+- Security considered
+- Performance targets defined
+- Migration path clear (if applicable)
+
+### Common Anti-patterns to Avoid
+❌ Mixing design with implementation
+❌ Vague interface definitions
+❌ Missing error scenarios
+❌ Ignored non-functional requirements
+❌ Overcomplicated architectures
+❌ Tight coupling between components
+❌ Missing data consistency strategy
+❌ Incomplete dependency analysis

+ 110 - 0
.kiro/settings/rules/design-review.md

@@ -0,0 +1,110 @@
+# Design Review Process
+
+## Objective
+Conduct interactive quality review of technical design documents to ensure they are solid enough to proceed to implementation with acceptable risk.
+
+## Review Philosophy
+- **Quality assurance, not perfection seeking**
+- **Critical focus**: Limit to 3 most important concerns
+- **Interactive dialogue**: Engage with designer, not one-way evaluation
+- **Balanced assessment**: Recognize strengths and weaknesses
+- **Clear decision**: Definitive GO/NO-GO with rationale
+
+## Scope & Non-Goals
+
+- Scope: Evaluate the quality of the design document against project context and standards to decide GO/NO-GO.
+- Non-Goals: Do not perform implementation-level design, deep technology research, or finalize technology choices. Defer such items to the design phase iteration.
+
+## Core Review Criteria
+
+### 1. Existing Architecture Alignment (Critical)
+- Integration with existing system boundaries and layers
+- Consistency with established architectural patterns
+- Proper dependency direction and coupling management
+- Alignment with current module organization
+
+### 2. Design Consistency & Standards
+- Adherence to project naming conventions and code standards
+- Consistent error handling and logging strategies
+- Uniform configuration and dependency management
+- Alignment with established data modeling patterns
+
+### 3. Extensibility & Maintainability
+- Design flexibility for future requirements
+- Clear separation of concerns and single responsibility
+- Testability and debugging considerations
+- Appropriate complexity for requirements
+
+### 4. Type Safety & Interface Design
+- Proper type definitions and interface contracts
+- Avoidance of unsafe patterns (e.g., `any` in TypeScript)
+- Clear API boundaries and data structures
+- Input validation and error handling coverage
+
+## Review Process
+
+### Step 1: Analyze
+Analyze design against all review criteria, focusing on critical issues impacting integration, maintainability, complexity, and requirements fulfillment.
+
+### Step 2: Identify Critical Issues (≤3)
+For each issue:
+```
+🔴 **Critical Issue [1-3]**: [Brief title]
+**Concern**: [Specific problem]
+**Impact**: [Why it matters]
+**Suggestion**: [Concrete improvement]
+**Traceability**: [Requirement ID/section from requirements.md]
+**Evidence**: [Design doc section/heading]
+```
+
+### Step 3: Recognize Strengths
+Acknowledge 1-2 strong aspects to maintain balanced feedback.
+
+### Step 4: Decide GO/NO-GO
+- **GO**: No critical architectural misalignment, requirements addressed, clear implementation path, acceptable risks
+- **NO-GO**: Fundamental conflicts, critical gaps, high failure risk, disproportionate complexity
+
+## Traceability & Evidence
+
+- Link each critical issue to the relevant requirement(s) from `requirements.md` (ID or section).
+- Cite evidence locations in the design document (section/heading, diagram, or artifact) to support the assessment.
+- When applicable, reference constraints from steering context to justify the issue.
+
+## Output Format
+
+### Design Review Summary
+2-3 sentences on overall quality and readiness.
+
+### Critical Issues (≤3)
+For each: Issue, Impact, Recommendation, Traceability (e.g., 1.1, 1.2), Evidence (design.md section).
+
+### Design Strengths
+1-2 positive aspects.
+
+### Final Assessment
+Decision (GO/NO-GO), Rationale (1-2 sentences), Next Steps.
+
+### Interactive Discussion
+Engage on designer's perspective, alternatives, clarifications, and necessary changes.
+
+## Length & Focus
+
+- Summary: 2–3 sentences
+- Each critical issue: 5–7 lines total (including Issue/Impact/Recommendation/Traceability/Evidence)
+- Overall review: keep concise (~400 words guideline)
+
+## Review Guidelines
+
+1. **Critical Focus**: Only flag issues that significantly impact success
+2. **Constructive Tone**: Provide solutions, not just criticism
+3. **Interactive Approach**: Engage in dialogue rather than one-way evaluation
+4. **Balanced Assessment**: Recognize both strengths and weaknesses
+5. **Clear Decision**: Make definitive GO/NO-GO recommendation
+6. **Actionable Feedback**: Ensure all suggestions are implementable
+
+## Final Checklist
+
+- **Critical Issues ≤ 3** and each includes Impact and Recommendation
+- **Traceability**: Each issue references requirement ID/section
+- **Evidence**: Each issue cites design doc location
+- **Decision**: GO/NO-GO with clear rationale and next steps

+ 49 - 0
.kiro/settings/rules/ears-format.md

@@ -0,0 +1,49 @@
+# EARS Format Guidelines
+
+## Overview
+EARS (Easy Approach to Requirements Syntax) is the standard format for acceptance criteria in spec-driven development.
+
+EARS patterns describe the logical structure of a requirement (condition + subject + response) and are not tied to any particular natural language.  
+All acceptance criteria should be written in the target language configured for the specification (for example, `spec.json.language` / `en`).  
+Keep EARS trigger keywords and fixed phrases in English (`When`, `If`, `While`, `Where`, `The system shall`, `The [system] shall`) and localize only the variable parts (`[event]`, `[precondition]`, `[trigger]`, `[feature is included]`, `[response/action]`) into the target language. Do not interleave target-language text inside the trigger or fixed English phrases themselves.
+
+## Primary EARS Patterns
+
+### 1. Event-Driven Requirements
+- **Pattern**: When [event], the [system] shall [response/action]
+- **Use Case**: Responses to specific events or triggers
+- **Example**: When user clicks checkout button, the Checkout Service shall validate cart contents
+
+### 2. State-Driven Requirements
+- **Pattern**: While [precondition], the [system] shall [response/action]
+- **Use Case**: Behavior dependent on system state or preconditions
+- **Example**: While payment is processing, the Checkout Service shall display loading indicator
+
+### 3. Unwanted Behavior Requirements
+- **Pattern**: If [trigger], the [system] shall [response/action]
+- **Use Case**: System response to errors, failures, or undesired situations
+- **Example**: If invalid credit card number is entered, then the website shall display error message
+
+### 4. Optional Feature Requirements
+- **Pattern**: Where [feature is included], the [system] shall [response/action]
+- **Use Case**: Requirements for optional or conditional features
+- **Example**: Where the car has a sunroof, the car shall have a sunroof control panel
+
+### 5. Ubiquitous Requirements
+- **Pattern**: The [system] shall [response/action]
+- **Use Case**: Always-active requirements and fundamental system properties
+- **Example**: The mobile phone shall have a mass of less than 100 grams
+
+## Combined Patterns
+- While [precondition], when [event], the [system] shall [response/action]
+- When [event] and [additional condition], the [system] shall [response/action]
+
+## Subject Selection Guidelines
+- **Software Projects**: Use concrete system/service name (e.g., "Checkout Service", "User Auth Module")
+- **Process/Workflow**: Use responsible team/role (e.g., "Support Team", "Review Process")
+- **Non-Software**: Use appropriate subject (e.g., "Marketing Campaign", "Documentation")
+
+## Quality Criteria
+- Requirements must be testable, verifiable, and describe a single behavior.
+- Use objective language: "shall" for mandatory behavior, "should" for recommendations; avoid ambiguous terms.
+- Follow EARS syntax: [condition], the [system] shall [response/action].

+ 144 - 0
.kiro/settings/rules/gap-analysis.md

@@ -0,0 +1,144 @@
+# Gap Analysis Process
+
+## Objective
+Analyze the gap between requirements and existing codebase to inform implementation strategy decisions.
+
+## Analysis Framework
+
+### 1. Current State Investigation
+
+- Scan for domain-related assets:
+  - Key files/modules and directory layout
+  - Reusable components/services/utilities
+  - Dominant architecture patterns and constraints
+
+- Extract conventions:
+  - Naming, layering, dependency direction
+  - Import/export patterns and dependency hotspots
+  - Testing placement and approach
+
+- Note integration surfaces:
+  - Data models/schemas, API clients, auth mechanisms
+
+### 2. Requirements Feasibility Analysis
+
+- From EARS requirements, list technical needs:
+  - Data models, APIs/services, UI/components
+  - Business rules/validation
+  - Non-functionals: security, performance, scalability, reliability
+
+- Identify gaps and constraints:
+  - Missing capabilities in current codebase
+  - Unknowns to be researched later (mark as "Research Needed")
+  - Constraints from existing architecture and patterns
+
+- Note complexity signals:
+  - Simple CRUD / algorithmic logic / workflows / external integrations
+
+### 3. Implementation Approach Options
+
+#### Option A: Extend Existing Components
+**When to consider**: Feature fits naturally into existing structure
+
+- **Which files/modules to extend**:
+  - Identify specific files requiring changes
+  - Assess impact on existing functionality
+  - Evaluate backward compatibility concerns
+
+- **Compatibility assessment**:
+  - Check if extension respects existing interfaces
+  - Verify no breaking changes to consumers
+  - Assess test coverage impact
+
+- **Complexity and maintainability**:
+  - Evaluate cognitive load of additional functionality
+  - Check if single responsibility principle is maintained
+  - Assess if file size remains manageable
+
+**Trade-offs**:
+- ✅ Minimal new files, faster initial development
+- ✅ Leverages existing patterns and infrastructure
+- ❌ Risk of bloating existing components
+- ❌ May complicate existing logic
+
+#### Option B: Create New Components
+**When to consider**: Feature has distinct responsibility or existing components are already complex
+
+- **Rationale for new creation**:
+  - Clear separation of concerns justifies new file
+  - Existing components are already complex
+  - Feature has distinct lifecycle or dependencies
+
+- **Integration points**:
+  - How new components connect to existing system
+  - APIs or interfaces exposed
+  - Dependencies on existing components
+
+- **Responsibility boundaries**:
+  - Clear definition of what new component owns
+  - Interfaces with existing components
+  - Data flow and control flow
+
+**Trade-offs**:
+- ✅ Clean separation of concerns
+- ✅ Easier to test in isolation
+- ✅ Reduces complexity in existing components
+- ❌ More files to navigate
+- ❌ Requires careful interface design
+
+#### Option C: Hybrid Approach
+**When to consider**: Complex features requiring both extension and new creation
+
+- **Combination strategy**:
+  - Which parts extend existing components
+  - Which parts warrant new components
+  - How they interact
+
+- **Phased implementation**:
+  - Initial phase: minimal viable changes
+  - Subsequent phases: refactoring or new components
+  - Migration strategy if needed
+
+- **Risk mitigation**:
+  - Incremental rollout approach
+  - Feature flags or configuration
+  - Rollback strategy
+
+**Trade-offs**:
+- ✅ Balanced approach for complex features
+- ✅ Allows iterative refinement
+- ❌ More complex planning required
+- ❌ Potential for inconsistency if not well-coordinated
+### 4. Out-of-Scope for Gap Analysis
+
+- Defer deep research activities to the design phase.
+- Record unknowns as concise "Research Needed" items only.
+
+### 5. Implementation Complexity & Risk
+
+  - Effort:
+    - S (1–3 days): existing patterns, minimal deps, straightforward integration
+    - M (3–7 days): some new patterns/integrations, moderate complexity
+    - L (1–2 weeks): significant functionality, multiple integrations or workflows
+    - XL (2+ weeks): architectural changes, unfamiliar tech, broad impact
+  - Risk:
+    - High: unknown tech, complex integrations, architectural shifts, unclear perf/security path
+    - Medium: new patterns with guidance, manageable integrations, known perf solutions
+    - Low: extend established patterns, familiar tech, clear scope, minimal integration
+
+### Output Checklist
+
+- Requirement-to-Asset Map with gaps tagged (Missing / Unknown / Constraint)
+- Options A/B/C with short rationale and trade-offs
+- Effort (S/M/L/XL) and Risk (High/Medium/Low) with one-line justification each
+- Recommendations for design phase:
+  - Preferred approach and key decisions
+  - Research items to carry forward
+
+## Principles
+
+- **Information over decisions**: Provide analysis and options, not final choices
+- **Multiple viable options**: Offer credible alternatives when applicable
+- **Explicit gaps and assumptions**: Flag unknowns and constraints clearly
+- **Context-aware**: Align with existing patterns and architecture limits
+- **Transparent effort and risk**: Justify labels succinctly

+ 90 - 0
.kiro/settings/rules/steering-principles.md

@@ -0,0 +1,90 @@
+# Steering Principles
+
+Steering files are **project memory**, not exhaustive specifications.
+
+---
+
+## Content Granularity
+
+### Golden Rule
+> "If new code follows existing patterns, steering shouldn't need updating."
+
+### ✅ Document
+- Organizational patterns (feature-first, layered)
+- Naming conventions (PascalCase rules)
+- Import strategies (absolute vs relative)
+- Architectural decisions (state management)
+- Technology standards (key frameworks)
+
+### ❌ Avoid
+- Complete file listings
+- Every component description
+- All dependencies
+- Implementation details
+- Agent-specific tooling directories (e.g. `.cursor/`, `.gemini/`, `.claude/`)
+- Detailed documentation of `.kiro/` metadata directories (settings, automation)
+
+### Example Comparison
+
+**Bad** (Specification-like):
+```markdown
+- /components/Button.tsx - Primary button with variants
+- /components/Input.tsx - Text input with validation
+- /components/Modal.tsx - Modal dialog
+... (50+ files)
+```
+
+**Good** (Project Memory):
+```markdown
+## UI Components (`/components/ui/`)
+Reusable, design-system aligned primitives
+- Named by function (Button, Input, Modal)
+- Export component + TypeScript interface
+- No business logic
+```
+
+---
+
+## Security
+
+Never include:
+- API keys, passwords, credentials
+- Database URLs, internal IPs
+- Secrets or sensitive data
+
+---
+
+## Quality Standards
+
+- **Single domain**: One topic per file
+- **Concrete examples**: Show patterns with code
+- **Explain rationale**: Why decisions were made
+- **Maintainable size**: 100-200 lines typical
+
+---
+
+## Preservation (when updating)
+
+- Preserve user sections and custom examples
+- Additive by default (add, don't replace)
+- Add `updated_at` timestamp
+- Note why changes were made
+
+---
+
+## Notes
+
+- Templates are starting points, customize as needed
+- Follow same granularity principles as core steering
+- All steering files loaded as project memory
+- Light references to `.kiro/specs/` and `.kiro/steering/` are acceptable; avoid other `.kiro/` directories
+- Custom files equally important as core files
+
+---
+
+## File-Specific Focus
+
+- **product.md**: Purpose, value, business context (not exhaustive features)
+- **tech.md**: Key frameworks, standards, conventions (not all dependencies)
+- **structure.md**: Organization patterns, naming rules (not directory trees)
+- **Custom files**: Specialized patterns (API, testing, security, etc.)

+ 131 - 0
.kiro/settings/rules/tasks-generation.md

@@ -0,0 +1,131 @@
+# Task Generation Rules
+
+## Core Principles
+
+### 1. Natural Language Descriptions
+Focus on capabilities and outcomes, not code structure.
+
+**Describe**:
+- What functionality to achieve
+- Business logic and behavior
+- Features and capabilities
+- Domain language and concepts
+- Data relationships and workflows
+
+**Avoid**:
+- File paths and directory structure
+- Function/method names and signatures
+- Type definitions and interfaces
+- Class names and API contracts
+- Specific data structures
+
+**Rationale**: Implementation details (files, methods, types) are defined in design.md. Tasks describe the functional work to be done.
+
+### 2. Task Integration & Progression
+
+**Every task must**:
+- Build on previous outputs (no orphaned code)
+- Connect to the overall system (no hanging features)
+- Progress incrementally (no big jumps in complexity)
+- Validate core functionality early in sequence
+- Respect architecture boundaries defined in design.md (Architecture Pattern & Boundary Map)
+- Honor interface contracts documented in design.md
+- Use major task summaries sparingly—omit detail bullets if the work is fully captured by child tasks.
+
+**End with integration tasks** to wire everything together.
+
+### 3. Flexible Task Sizing
+
+**Guidelines**:
+- **Major tasks**: As many sub-tasks as logically needed (group by cohesion)
+- **Sub-tasks**: 1-3 hours each, 3-10 details per sub-task
+- Balance between too granular and too broad
+
+**Don't force arbitrary numbers** - let logical grouping determine structure.
+
+### 4. Requirements Mapping
+
+**End each task detail section with**:
+- `_Requirements: X.X, Y.Y_` listing **only numeric requirement IDs** (comma-separated). Never append descriptive text, parentheses, translations, or free-form labels.
+- For cross-cutting requirements, list every relevant requirement ID. All requirements MUST have numeric IDs in requirements.md. If an ID is missing, stop and correct requirements.md before generating tasks.
+- Reference components/interfaces from design.md when helpful (e.g., `_Contracts: AuthService API`)
+
+### 5. Code-Only Focus
+
+**Include ONLY**:
+- Coding tasks (implementation)
+- Testing tasks (unit, integration, E2E)
+- Technical setup tasks (infrastructure, configuration)
+
+**Exclude**:
+- Deployment tasks
+- Documentation tasks
+- User testing
+- Marketing/business activities
+
+### Optional Test Coverage Tasks
+
+- When the design already guarantees functional coverage and rapid MVP delivery is prioritized, mark purely test-oriented follow-up work (e.g., baseline rendering/unit tests) as **optional** using the `- [ ]*` checkbox form.
+- Only apply the optional marker when the sub-task directly references acceptance criteria from requirements.md in its detail bullets.
+- Never mark implementation work or integration-critical verification as optional—reserve `*` for auxiliary/deferrable test coverage that can be revisited post-MVP.
+
+## Task Hierarchy Rules
+
+### Maximum 2 Levels
+- **Level 1**: Major tasks (1, 2, 3, 4...)
+- **Level 2**: Sub-tasks (1.1, 1.2, 2.1, 2.2...)
+- **No deeper nesting** (no 1.1.1)
+- If a major task would contain only a single actionable item, collapse the structure and promote the sub-task to the major level (e.g., replace `1.1` with `1.`).
+- When a major task exists purely as a container, keep the checkbox description concise and avoid duplicating detailed bullets—reserve specifics for its sub-tasks.
+
+### Sequential Numbering
+- Major tasks MUST increment: 1, 2, 3, 4, 5...
+- Sub-tasks reset per major task: 1.1, 1.2, then 2.1, 2.2...
+- Never repeat major task numbers
+
+### Parallel Analysis (default)
+- Assume parallel analysis is enabled unless explicitly disabled (e.g. `--sequential` flag).
+- Identify tasks that can run concurrently when **all** conditions hold:
+  - No data dependency on other pending tasks
+  - No shared file or resource contention
+  - No prerequisite review/approval from another task
+- Validate that identified parallel tasks operate within separate boundaries defined in the Architecture Pattern & Boundary Map.
+- Confirm API/event contracts from design.md do not overlap in ways that cause conflicts.
+- Append `(P)` immediately after the task number for each parallel-capable task:
+  - Example: `- [ ] 2.1 (P) Build background worker`
+  - Apply to both major tasks and sub-tasks when appropriate.
+- If sequential mode is requested, omit `(P)` markers entirely.
+- Group parallel tasks logically (same parent when possible) and highlight any ordering caveats in detail bullets.
+- Explicitly call out dependencies that prevent `(P)` even when tasks look similar.
+
+### Checkbox Format
+```markdown
+- [ ] 1. Major task description
+- [ ] 1.1 Sub-task description
+  - Detail item 1
+  - Detail item 2
+  - _Requirements: X.X_
+
+- [ ] 1.2 Sub-task description
+  - Detail items...
+  - _Requirements: Y.Y_
+
+- [ ] 1.3 Sub-task description
+  - Detail items...
+  - _Requirements: Z.Z, W.W_
+
+- [ ] 2. Next major task (NOT 1 again!)
+- [ ] 2.1 Sub-task...
+```
+
+## Requirements Coverage
+
+**Mandatory Check**:
+- ALL requirements from requirements.md MUST be covered
+- Cross-reference every requirement ID with task mappings
+- If gaps found: Return to requirements or design phase
+- No requirement should be left without corresponding tasks
+
+Use `N.M`-style numeric requirement IDs where `N` is the top-level requirement number from requirements.md (for example, Requirement 1 → 1.1, 1.2; Requirement 2 → 2.1, 2.2), and `M` is a local index within that requirement group.
+
+Document any intentionally deferred requirements with rationale.

+ 34 - 0
.kiro/settings/rules/tasks-parallel-analysis.md

@@ -0,0 +1,34 @@
+# Parallel Task Analysis Rules
+
+## Purpose
+Provide a consistent way to identify implementation tasks that can be safely executed in parallel while generating `tasks.md`.
+
+## When to Consider Tasks Parallel
+Only mark a task as parallel-capable when **all** of the following are true:
+
+1. **No data dependency** on pending tasks.
+2. **No conflicting files or shared mutable resources** are touched.
+3. **No prerequisite review/approval** from another task is required beforehand.
+4. **Environment/setup work** needed by this task is already satisfied or covered within the task itself.
+
+## Marking Convention
+- Append `(P)` immediately after the numeric identifier for each qualifying task.
+  - Example: `- [ ] 2.1 (P) Build background worker for emails`
+- Apply `(P)` to both major tasks and sub-tasks when appropriate.
+- If sequential execution is requested (e.g. via `--sequential` flag), omit `(P)` markers entirely.
+- Keep `(P)` **outside** of checkbox brackets to avoid confusion with completion state.
+
+## Grouping & Ordering Guidelines
+- Group parallel tasks under the same parent whenever the work belongs to the same theme.
+- List obvious prerequisites or caveats in the detail bullets (e.g., "Requires schema migration from 1.2").
+- When two tasks look similar but are not parallel-safe, call out the blocking dependency explicitly.
+- Skip marking container-only major tasks (those without their own actionable detail bullets) with `(P)`—evaluate parallel execution at the sub-task level instead.
+
+## Quality Checklist
+Before marking a task with `(P)`, ensure you have:
+
+- Verified that running this task concurrently will not create merge or deployment conflicts.
+- Captured any shared state expectations in the detail bullets.
+- Confirmed that the implementation can be tested independently.
+
+If any check fails, **do not** mark the task with `(P)` and explain the dependency in the task details.

+ 276 - 0
.kiro/settings/templates/specs/design.md

@@ -0,0 +1,276 @@
+# Design Document Template
+
+---
+**Purpose**: Provide sufficient detail to ensure implementation consistency across different implementers, preventing interpretation drift.
+
+**Approach**:
+- Include essential sections that directly inform implementation decisions
+- Omit optional sections unless critical to preventing implementation errors
+- Match detail level to feature complexity
+- Use diagrams and tables over lengthy prose
+
+**Warning**: Approaching 1000 lines indicates excessive feature complexity that may require design simplification.
+---
+
+> Sections may be reordered (e.g., surfacing Requirements Traceability earlier or moving Data Models nearer Architecture) when it improves clarity. Within each section, keep the flow **Summary → Scope → Decisions → Impacts/Risks** so reviewers can scan consistently.
+
+## Overview 
+2-3 paragraphs max
+**Purpose**: This feature delivers [specific value] to [target users].
+**Users**: [Target user groups] will utilize this for [specific workflows].
+**Impact** (if applicable): Changes the current [system state] by [specific modifications].
+
+
+### Goals
+- Primary objective 1
+- Primary objective 2  
+- Success criteria
+
+### Non-Goals
+- Explicitly excluded functionality
+- Future considerations outside current scope
+- Integration points deferred
+
+## Architecture
+
+> Reference detailed discovery notes in `research.md` only for background; keep design.md self-contained for reviewers by capturing all decisions and contracts here.
+> Capture key decisions in text and let diagrams carry structural detail—avoid repeating the same information in prose.
+
+### Existing Architecture Analysis (if applicable)
+When modifying existing systems:
+- Current architecture patterns and constraints
+- Existing domain boundaries to be respected
+- Integration points that must be maintained
+- Technical debt addressed or worked around
+
+### Architecture Pattern & Boundary Map
+**RECOMMENDED**: Include Mermaid diagram showing the chosen architecture pattern and system boundaries (required for complex features, optional for simple additions)
+
+**Architecture Integration**:
+- Selected pattern: [name and brief rationale]
+- Domain/feature boundaries: [how responsibilities are separated to avoid conflicts]
+- Existing patterns preserved: [list key patterns]
+- New components rationale: [why each is needed]
+- Steering compliance: [principles maintained]
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Frontend / CLI | | | |
+| Backend / Services | | | |
+| Data / Storage | | | |
+| Messaging / Events | | | |
+| Infrastructure / Runtime | | | |
+
+> Keep rationale concise here and, when more depth is required (trade-offs, benchmarks), add a short summary plus pointer to the Supporting References section and `research.md` for raw investigation notes.
+
+## System Flows
+
+Provide only the diagrams needed to explain non-trivial flows. Use pure Mermaid syntax. Common patterns:
+- Sequence (multi-party interactions)
+- Process / state (branching logic or lifecycle)
+- Data / event flow (pipelines, async messaging)
+
+Skip this section entirely for simple CRUD changes.
+> Describe flow-level decisions (e.g., gating conditions, retries) briefly after the diagram instead of restating each step.
+
+## Requirements Traceability
+
+Use this section for complex or compliance-sensitive features where requirements span multiple domains. Straightforward 1:1 mappings can rely on the Components summary table.
+
+Map each requirement ID (e.g., `2.1`) to the design elements that realize it.
+
+| Requirement | Summary | Components | Interfaces | Flows |
+|-------------|---------|------------|------------|-------|
+| 1.1 | | | | |
+| 1.2 | | | | |
+
+> Omit this section only when a single component satisfies a single requirement without cross-cutting concerns.
+
+## Components and Interfaces
+
+Provide a quick reference before diving into per-component details.
+
+- Summaries can be a table or compact list. Example table:
+  | Component | Domain/Layer | Intent | Req Coverage | Key Dependencies (P0/P1) | Contracts |
+  |-----------|--------------|--------|--------------|--------------------------|-----------|
+  | ExampleComponent | UI | Displays XYZ | 1, 2 | GameProvider (P0), MapPanel (P1) | Service, State |
+- Only components introducing new boundaries (e.g., logic hooks, external integrations, persistence) require full detail blocks. Simple presentation components can rely on the summary row plus a short Implementation Note.
+
+Group detailed blocks by domain or architectural layer. For each detailed component, list requirement IDs as `2.1, 2.3` (omit “Requirement”). When multiple UI components share the same contract, reference a base interface/props definition instead of duplicating code blocks.
+
+### [Domain / Layer]
+
+#### [Component Name]
+
+| Field | Detail |
+|-------|--------|
+| Intent | 1-line description of the responsibility |
+| Requirements | 2.1, 2.3 |
+| Owner / Reviewers | (optional) |
+
+**Responsibilities & Constraints**
+- Primary responsibility
+- Domain boundary and transaction scope
+- Data ownership / invariants
+
+**Dependencies**
+- Inbound: Component/service name — purpose (Criticality)
+- Outbound: Component/service name — purpose (Criticality)
+- External: Service/library — purpose (Criticality)
+
+Summarize external dependency findings here; deeper investigation (API signatures, rate limits, migration notes) lives in `research.md`.
+
+**Contracts**: Service [ ] / API [ ] / Event [ ] / Batch [ ] / State [ ]  ← check only the ones that apply.
+
+##### Service Interface
+```typescript
+interface [ComponentName]Service {
+  methodName(input: InputType): Result<OutputType, ErrorType>;
+}
+```
+- Preconditions:
+- Postconditions:
+- Invariants:
+
+##### API Contract
+| Method | Endpoint | Request | Response | Errors |
+|--------|----------|---------|----------|--------|
+| POST | /api/resource | CreateRequest | Resource | 400, 409, 500 |
+
+##### Event Contract
+- Published events:  
+- Subscribed events:  
+- Ordering / delivery guarantees:
+
+##### Batch / Job Contract
+- Trigger:  
+- Input / validation:  
+- Output / destination:  
+- Idempotency & recovery:
+
+##### State Management
+- State model:  
+- Persistence & consistency:  
+- Concurrency strategy:
+
+**Implementation Notes**
+- Integration: 
+- Validation: 
+- Risks:
+
+## Data Models
+
+Focus on the portions of the data landscape that change with this feature.
+
+### Domain Model
+- Aggregates and transactional boundaries
+- Entities, value objects, domain events
+- Business rules & invariants
+- Optional Mermaid diagram for complex relationships
+
+### Logical Data Model
+
+**Structure Definition**:
+- Entity relationships and cardinality
+- Attributes and their types
+- Natural keys and identifiers
+- Referential integrity rules
+
+**Consistency & Integrity**:
+- Transaction boundaries
+- Cascading rules
+- Temporal aspects (versioning, audit)
+
+### Physical Data Model
+**When to include**: When implementation requires specific storage design decisions
+
+**For Relational Databases**:
+- Table definitions with data types
+- Primary/foreign keys and constraints
+- Indexes and performance optimizations
+- Partitioning strategy for scale
+
+**For Document Stores**:
+- Collection structures
+- Embedding vs referencing decisions
+- Sharding key design
+- Index definitions
+
+**For Event Stores**:
+- Event schema definitions
+- Stream aggregation strategies
+- Snapshot policies
+- Projection definitions
+
+**For Key-Value/Wide-Column Stores**:
+- Key design patterns
+- Column families or value structures
+- TTL and compaction strategies
+
+### Data Contracts & Integration
+
+**API Data Transfer**
+- Request/response schemas
+- Validation rules
+- Serialization format (JSON, Protobuf, etc.)
+
+**Event Schemas**
+- Published event structures
+- Schema versioning strategy
+- Backward/forward compatibility rules
+
+**Cross-Service Data Management**
+- Distributed transaction patterns (Saga, 2PC)
+- Data synchronization strategies
+- Eventual consistency handling
+
+Skip subsections that are not relevant to this feature.
+
+## Error Handling
+
+### Error Strategy
+Concrete error handling patterns and recovery mechanisms for each error type.
+
+### Error Categories and Responses
+**User Errors** (4xx): Invalid input → field-level validation; Unauthorized → auth guidance; Not found → navigation help
+**System Errors** (5xx): Infrastructure failures → graceful degradation; Timeouts → circuit breakers; Exhaustion → rate limiting  
+**Business Logic Errors** (422): Rule violations → condition explanations; State conflicts → transition guidance
+
+**Process Flow Visualization** (when complex business logic exists):
+Include Mermaid flowchart only for complex error scenarios with business workflows.
+
+### Monitoring
+Error tracking, logging, and health monitoring implementation.
+
+## Testing Strategy
+
+### Default sections (adapt names/sections to fit the domain)
+- Unit Tests: 3–5 items from core functions/modules (e.g., auth methods, subscription logic)
+- Integration Tests: 3–5 cross-component flows (e.g., webhook handling, notifications)
+- E2E/UI Tests (if applicable): 3–5 critical user paths (e.g., forms, dashboards)
+- Performance/Load (if applicable): 3–4 items (e.g., concurrency, high-volume ops)
+
+## Optional Sections (include when relevant)
+
+### Security Considerations
+_Use this section for features handling auth, sensitive data, external integrations, or user permissions. Capture only decisions unique to this feature; defer baseline controls to steering docs._
+- Threat modeling, security controls, compliance requirements
+- Authentication and authorization patterns
+- Data protection and privacy considerations
+
+### Performance & Scalability
+_Use this section when performance targets, high load, or scaling concerns exist. Record only feature-specific targets or trade-offs and rely on steering documents for general practices._
+- Target metrics and measurement strategies
+- Scaling approaches (horizontal/vertical)
+- Caching strategies and optimization techniques
+
+### Migration Strategy
+Include a Mermaid flowchart showing migration phases when schema/data movement is required.
+- Phase breakdown, rollback triggers, validation checkpoints
+
+## Supporting References (Optional)
+- Create this section only when keeping the information in the main body would hurt readability (e.g., very long TypeScript definitions, vendor option matrices, exhaustive schema tables). Keep decision-making context in the main sections so the design stays self-contained.
+- Link to the supporting references from the main text instead of inlining large snippets.
+- Background research notes and comparisons continue to live in `research.md`, but their conclusions must be summarized in the main design.

+ 22 - 0
.kiro/settings/templates/specs/init.json

@@ -0,0 +1,22 @@
+{
+  "feature_name": "{{FEATURE_NAME}}",
+  "created_at": "{{TIMESTAMP}}",
+  "updated_at": "{{TIMESTAMP}}",
+  "language": "en",
+  "phase": "initialized",
+  "approvals": {
+    "requirements": {
+      "generated": false,
+      "approved": false
+    },
+    "design": {
+      "generated": false,
+      "approved": false
+    },
+    "tasks": {
+      "generated": false,
+      "approved": false
+    }
+  },
+  "ready_for_implementation": false
+}

+ 9 - 0
.kiro/settings/templates/specs/requirements-init.md

@@ -0,0 +1,9 @@
+# Requirements Document
+
+## Project Description (Input)
+{{PROJECT_DESCRIPTION}}
+
+## Requirements
+<!-- Will be generated in /kiro:spec-requirements phase -->
+
+

+ 26 - 0
.kiro/settings/templates/specs/requirements.md

@@ -0,0 +1,26 @@
+# Requirements Document
+
+## Introduction
+{{INTRODUCTION}}
+
+## Requirements
+
+### Requirement 1: {{REQUIREMENT_AREA_1}}
+<!-- Requirement headings MUST include a leading numeric ID only (for example: "Requirement 1: ...", "1. Overview", "2 Feature: ..."). Alphabetic IDs like "Requirement A" are not allowed. -->
+**Objective:** As a {{ROLE}}, I want {{CAPABILITY}}, so that {{BENEFIT}}
+
+#### Acceptance Criteria
+1. When [event], the [system] shall [response/action]
+2. If [trigger], then the [system] shall [response/action]
+3. While [precondition], the [system] shall [response/action]
+4. Where [feature is included], the [system] shall [response/action]
+5. The [system] shall [response/action]
+
+### Requirement 2: {{REQUIREMENT_AREA_2}}
+**Objective:** As a {{ROLE}}, I want {{CAPABILITY}}, so that {{BENEFIT}}
+
+#### Acceptance Criteria
+1. When [event], the [system] shall [response/action]
+2. When [event] and [condition], the [system] shall [response/action]
+
+<!-- Additional requirements follow the same pattern -->

+ 61 - 0
.kiro/settings/templates/specs/research.md

@@ -0,0 +1,61 @@
+# Research & Design Decisions Template
+
+---
+**Purpose**: Capture discovery findings, architectural investigations, and rationale that inform the technical design.
+
+**Usage**:
+- Log research activities and outcomes during the discovery phase.
+- Document design decision trade-offs that are too detailed for `design.md`.
+- Provide references and evidence for future audits or reuse.
+---
+
+## Summary
+- **Feature**: `<feature-name>`
+- **Discovery Scope**: New Feature / Extension / Simple Addition / Complex Integration
+- **Key Findings**:
+  - Finding 1
+  - Finding 2
+  - Finding 3
+
+## Research Log
+Document notable investigation steps and their outcomes. Group entries by topic for readability.
+
+### [Topic or Question]
+- **Context**: What triggered this investigation?
+- **Sources Consulted**: Links, documentation, API references, benchmarks
+- **Findings**: Concise bullet points summarizing the insights
+- **Implications**: How this affects architecture, contracts, or implementation
+
+_Repeat the subsection for each major topic._
+
+## Architecture Pattern Evaluation
+List candidate patterns or approaches that were considered. Use the table format where helpful.
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| Hexagonal | Ports & adapters abstraction around core domain | Clear boundaries, testable core | Requires adapter layer build-out | Aligns with existing steering principle X |
+
+## Design Decisions
+Record major decisions that influence `design.md`. Focus on choices with significant trade-offs.
+
+### Decision: `<Title>`
+- **Context**: Problem or requirement driving the decision
+- **Alternatives Considered**:
+  1. Option A — short description
+  2. Option B — short description
+- **Selected Approach**: What was chosen and how it works
+- **Rationale**: Why this approach fits the current project context
+- **Trade-offs**: Benefits vs. compromises
+- **Follow-up**: Items to verify during implementation or testing
+
+_Repeat the subsection for each decision._
+
+## Risks & Mitigations
+- Risk 1 — Proposed mitigation
+- Risk 2 — Proposed mitigation
+- Risk 3 — Proposed mitigation
+
+## References
+Provide canonical links and citations (official docs, standards, ADRs, internal guidelines).
+- [Title](https://example.com) — brief note on relevance
+- ...

+ 21 - 0
.kiro/settings/templates/specs/tasks.md

@@ -0,0 +1,21 @@
+# Implementation Plan
+
+## Task Format Template
+
+Use whichever pattern fits the work breakdown:
+
+### Major task only
+- [ ] {{NUMBER}}. {{TASK_DESCRIPTION}}{{PARALLEL_MARK}}
+  - {{DETAIL_ITEM_1}} *(Include details only when needed. If the task stands alone, omit bullet items.)*
+  - _Requirements: {{REQUIREMENT_IDS}}_
+
+### Major + Sub-task structure
+- [ ] {{MAJOR_NUMBER}}. {{MAJOR_TASK_SUMMARY}}
+- [ ] {{MAJOR_NUMBER}}.{{SUB_NUMBER}} {{SUB_TASK_DESCRIPTION}}{{SUB_PARALLEL_MARK}}
+  - {{DETAIL_ITEM_1}}
+  - {{DETAIL_ITEM_2}}
+  - _Requirements: {{REQUIREMENT_IDS}}_ *(IDs only; do not add descriptions or parentheses.)*
+
+> **Parallel marker**: Append ` (P)` only to tasks that can be executed in parallel. Omit the marker when running in `--sequential` mode.
+>
+> **Optional test coverage**: When a sub-task is deferrable test work tied to acceptance criteria, mark the checkbox as `- [ ]*` and explain the referenced requirements in the detail bullets.

+ 69 - 0
.kiro/settings/templates/steering-custom/api-standards.md

@@ -0,0 +1,69 @@
+# API Standards
+
+[Purpose: consistent API patterns for naming, structure, auth, versioning, and errors]
+
+## Philosophy
+- Prefer predictable, resource-oriented design
+- Be explicit in contracts; minimize breaking changes
+- Secure by default (auth first, least privilege)
+
+## Endpoint Pattern
+```
+/{version}/{resource}[/{id}][/{sub-resource}]
+```
+Examples:
+- `/api/v1/users`
+- `/api/v1/users/:id`
+- `/api/v1/users/:id/posts`
+
+HTTP verbs:
+- GET (read, safe, idempotent)
+- POST (create)
+- PUT/PATCH (update)
+- DELETE (remove, idempotent)
+
+## Request/Response
+
+Request (typical):
+```json
+{ "data": { ... }, "metadata": { "requestId": "..." } }
+```
+
+Success:
+```json
+{ "data": { ... }, "meta": { "timestamp": "...", "version": "..." } }
+```
+
+Error:
+```json
+{ "error": { "code": "ERROR_CODE", "message": "...", "field": "optional" } }
+```
+(See error-handling for rules.)
+
+## Status Codes (pattern)
+- 2xx: Success (200 read, 201 create, 204 delete)
+- 4xx: Client issues (400 validation, 401/403 auth, 404 missing)
+- 5xx: Server issues (500 generic, 503 unavailable)
+Choose the status that best reflects the outcome.
+
+## Authentication
+- Credentials in standard location
+```
+Authorization: Bearer {token}
+```
+- Reject unauthenticated before business logic
+
+## Versioning
+- Version via URL/header/media-type
+- Breaking change → new version
+- Non-breaking → same version
+- Provide deprecation window and comms
+
+## Pagination/Filtering (if applicable)
+- Pagination: `page`, `pageSize` or cursor-based
+- Filtering: explicit query params
+- Sorting: `sort=field:asc|desc`
+Return pagination metadata in `meta`.
+
+---
+_Focus on patterns and decisions, not endpoint catalogs._

+ 67 - 0
.kiro/settings/templates/steering-custom/authentication.md

@@ -0,0 +1,67 @@
+# Authentication & Authorization Standards
+
+[Purpose: unify auth model, token/session lifecycle, permission checks, and security]
+
+## Philosophy
+- Clear separation: authentication (who) vs authorization (what)
+- Secure by default: least privilege, fail closed, short-lived tokens
+- UX-aware: friction where risk is high, smooth otherwise
+
+## Authentication
+
+### Method (choose + rationale)
+- Options: JWT, Session, OAuth2, hybrid
+- Choice: [our method] because [reason]
+
+### Flow (high-level)
+```
+1) User proves identity (credentials or provider)
+2) Server verifies and issues token/session
+3) Client sends token per request
+4) Server verifies token and proceeds
+```
+
+### Token/Session Lifecycle
+- Storage: httpOnly cookie or Authorization header
+- Expiration: short-lived access, longer refresh (if used)
+- Refresh: rotate tokens; respect revocation
+- Revocation: blacklist/rotate on logout/compromise
+
+### Security Pattern
+- Enforce TLS; never expose tokens to JS when avoidable
+- Bind token to audience/issuer; include minimal claims
+- Consider device binding and IP/risk checks for sensitive actions
+
+## Authorization
+
+### Permission Model
+- Choose one: RBAC / ABAC / ownership-based / hybrid
+- Define roles/attributes centrally; avoid hardcoding across codebase
+
+### Checks (where to enforce)
+- Route/middleware: coarse-grained gate
+- Domain/service: fine-grained decisions
+- UI: conditional rendering (no security reliance)
+
+Example pattern:
+```typescript
+requirePermission('resource:action'); // route
+if (!user.can('resource:action')) throw ForbiddenError(); // domain
+```
+
+### Ownership
+- Pattern: owner OR privileged role can act
+- Verify on entity boundary before mutation
+
+## Passwords & MFA
+- Passwords: strong policy, hashed (bcrypt/argon2), never plaintext
+- Reset: time-limited token, single-use, notify user
+- MFA: step-up for risky operations (policy-driven)
+
+## API-to-API Auth
+- Use API keys or OAuth client credentials
+- Scope keys minimally; rotate and audit usage
+- Rate limit by identity (user/key)
+
+---
+_Focus on patterns and decisions. No library-specific code._

+ 46 - 0
.kiro/settings/templates/steering-custom/database.md

@@ -0,0 +1,46 @@
+# Database Standards
+
+[Purpose: guide schema design, queries, migrations, and integrity]
+
+## Philosophy
+- Model the domain first; optimize after correctness
+- Prefer explicit constraints; let database enforce invariants
+- Query only what you need; measure before optimizing
+
+## Naming & Types
+- Tables: `snake_case`, plural (`users`, `order_items`)
+- Columns: `snake_case` (`created_at`, `user_id`)
+- FKs: `{table}_id` referencing `{table}.id`
+- Types: timezone-aware timestamps; strong IDs; precise money types
+
+## Relationships
+- 1:N: FK in child
+- N:N: join table with compound key
+- 1:1: FK + UNIQUE
+
+## Migrations
+- Immutable migrations; always add rollback
+- Small, focused steps; test on non-prod first
+- Naming: `{seq}_{action}_{object}` (e.g., `002_add_email_index`)
+
+## Query Patterns
+- ORM for simple CRUD and safety; raw SQL for complex/perf-critical
+- Avoid N+1 (eager load/batching); paginate large sets
+- Index FKs and frequently filtered/sorted columns
+
+## Connection & Transactions
+- Use pooling (size/timeouts based on workload)
+- One connection per unit of work; close/return promptly
+- Wrap multi-step changes in transactions
+
+## Data Integrity
+- Use NOT NULL/UNIQUE/CHECK/FK constraints
+- Validate at DB when appropriate (defense in depth)
+- Prefer generated columns for consistent derivations
+
+## Backup & Recovery
+- Regular backups with retention; test restores
+- Document RPO/RTO targets; monitor backup jobs
+
+---
+_Focus on patterns and decisions. No environment-specific settings._

+ 54 - 0
.kiro/settings/templates/steering-custom/deployment.md

@@ -0,0 +1,54 @@
+# Deployment Standards
+
+[Purpose: safe, repeatable releases with clear environment and pipeline patterns]
+
+## Philosophy
+- Automate; test before deploy; verify after deploy
+- Prefer incremental rollout with fast rollback
+- Production changes must be observable and reversible
+
+## Environments
+- Dev: fast iteration; debugging enabled
+- Staging: mirrors prod; release validation
+- Prod: hardened; monitored; least privilege
+
+## CI/CD Flow
+```
+Code → Test → Build → Scan → Deploy (staged) → Verify
+```
+Principles:
+- Fail fast on tests/scans; block deploy
+- Artifact builds are reproducible (lockfiles, pinned versions)
+- Manual approval for prod; auditable trail
+
+## Deployment Strategies
+- Rolling: gradual instance replacement
+- Blue-Green: switch traffic between two pools
+- Canary: small % users first, expand on health
+Choose per risk profile; document default.
+
+## Zero-Downtime & Migrations
+- Health checks gate traffic; graceful shutdown
+- Backwards-compatible DB changes during rollout
+- Separate migration step; test rollback paths
+
+## Rollback
+- Keep previous version ready; automate revert
+- Rollback faster than fix-forward; document triggers
+
+## Configuration & Secrets
+- 12-factor config via env; never commit secrets
+- Secret manager; rotate; least privilege; audit access
+- Validate required env vars at startup
+
+## Health & Monitoring
+- Endpoints: `/health`, `/health/live`, `/health/ready`
+- Monitor latency, error rate, throughput, saturation
+- Alerts on SLO breaches/spikes; tune to avoid fatigue
+
+## Incident Response & DR
+- Standard playbook: detect → assess → mitigate → communicate → resolve → post-mortem
+- Backups with retention; test restore; defined RPO/RTO
+
+---
+_Focus on rollout patterns and safeguards. No provider-specific steps._

+ 59 - 0
.kiro/settings/templates/steering-custom/error-handling.md

@@ -0,0 +1,59 @@
+# Error Handling Standards
+
+[Purpose: unify how errors are classified, shaped, propagated, logged, and monitored]
+
+## Philosophy
+- Fail fast where possible; degrade gracefully at system boundaries
+- Consistent error shape across the stack (human + machine readable)
+- Handle known errors close to source; surface unknowns to a global handler
+
+## Classification (decide handling by source)
+- Client: Input/validation/user action issues → 4xx
+- Server: System failures/unexpected exceptions → 5xx
+- Business: Rule/state violations → 4xx (e.g., 409)
+- External: 3rd-party/network failures → map to 5xx or 4xx with context
+
+## Error Shape (single canonical format)
+```json
+{
+  "error": {
+    "code": "ERROR_CODE",
+    "message": "Human-readable message",
+    "requestId": "trace-id",
+    "timestamp": "ISO-8601"
+  }
+}
+```
+Principles: stable code enums, no secrets, include trace info.
+
+## Propagation (where to convert)
+- API layer: Convert domain errors → HTTP status + canonical body
+- Service layer: Throw typed business errors, avoid stringly-typed errors
+- Data/external layer: Wrap provider errors with safe, actionable codes
+- Unknown errors: Bubble to global handler → 500 + generic message
+
+Example pattern:
+```typescript
+try { return await useCase(); }
+catch (e) {
+  if (e instanceof BusinessError) return respondMapped(e);
+  logError(e); return respondInternal();
+}
+```
+
+## Logging (context over noise)
+Log: operation, userId (if available), code, message, stack, requestId, minimal context.
+Do not log: passwords, tokens, secrets, full PII, full bodies with sensitive data.
+Levels: ERROR (failures), WARN (recoverable/edge), INFO (key events), DEBUG (diagnostics).
+
+## Retry (only when safe)
+Retry when: network/timeouts/transient 5xx AND operation is idempotent.
+Do not retry: 4xx, business errors, non-idempotent flows.
+Strategy: exponential backoff + jitter, capped attempts; require idempotency keys.
+
+## Monitoring & Health
+Track: error rates by code/category, latency, saturation; alert on spikes/SLI breaches.
+Expose health: `/health` (live), `/health/ready` (ready). Link errors to traces.
+
+---
+_Focus on patterns and decisions. No implementation details or exhaustive lists._

+ 55 - 0
.kiro/settings/templates/steering-custom/security.md

@@ -0,0 +1,55 @@
+# Security Standards
+
+[Purpose: define security posture with patterns for validation, authz, secrets, and data]
+
+## Philosophy
+- Defense in depth; least privilege; secure by default; fail closed
+- Validate at boundaries; sanitize for context; never trust input
+- Separate authentication (who) and authorization (what)
+
+## Input & Output
+- Validate at API boundaries and UI forms; enforce types and constraints
+- Sanitize/escape based on destination (HTML, SQL, shell, logs)
+- Prefer allow-lists over block-lists; reject early with minimal detail
+
+## Authentication & Authorization
+- Authentication: verify identity; issue short-lived tokens/sessions
+- Authorization: check permissions before actions; deny by default
+- Centralize policies; avoid duplicating checks across code
+
+Pattern:
+```typescript
+if (!user.hasPermission('resource:action')) throw ForbiddenError();
+```
+
+## Secrets & Configuration
+- Never commit secrets; store in secret manager or env
+- Rotate regularly; audit access; scope minimal
+- Validate required env vars at startup; fail fast on missing
+
+## Sensitive Data
+- Minimize collection; mask/redact in logs; encrypt at rest and in transit
+- Restrict access by role/need-to-know; track access to sensitive records
+
+## Session/Token Security
+- httpOnly + secure cookies where possible; TLS everywhere
+- Short expiration; rotate on refresh; revoke on logout/compromise
+- Bind tokens to audience/issuer; include minimal claims
+
+## Logging (security-aware)
+- Log auth attempts, permission denials, and sensitive operations
+- Never log passwords, tokens, secrets, full PII; avoid full bodies
+- Include requestId and context to correlate events
+
+## Headers & Transport
+- Enforce TLS; HSTS
+- Set security headers (CSP, X-Frame-Options, X-Content-Type-Options)
+- Prefer modern crypto; disable weak protocols/ciphers
+
+## Vulnerability Posture
+- Prefer secure libraries; keep dependencies updated
+- Static/dynamic scans in CI; track and remediate
+- Educate team on common classes; encode as patterns above
+
+---
+_Focus on patterns and principles. Link concrete configs to ops docs._

+ 47 - 0
.kiro/settings/templates/steering-custom/testing.md

@@ -0,0 +1,47 @@
+# Testing Standards
+
+[Purpose: guide what to test, where tests live, and how to structure them]
+
+## Philosophy
+- Test behavior, not implementation
+- Prefer fast, reliable tests; minimize brittle mocks
+- Cover critical paths deeply; breadth over 100% pursuit
+
+## Organization
+Options:
+- Co-located: `component.tsx` + `component.test.tsx`
+- Separate: `/src/...` and `/tests/...`
+Pick one as default; allow exceptions with rationale.
+
+Naming:
+- Files: `*.test.*` or `*.spec.*`
+- Suites: what is under test; Cases: expected behavior
+
+## Test Types
+- Unit: single unit, mocked dependencies, very fast
+- Integration: multiple units together, mock externals only
+- E2E: full flows, minimal mocks, only for critical journeys
+
+## Structure (AAA)
+```typescript
+it('does X when Y', () => {
+  // Arrange
+  const input = setup();
+  // Act
+  const result = act(input);
+  // Assert
+  expect(result).toEqual(expected);
+});
+```
+
+## Mocking & Data
+- Mock externals (API/DB); never mock the system under test
+- Use factories/fixtures; reset state between tests
+- Keep test data minimal and intention-revealing
+
+## Coverage
+- Target: [% overall]; higher for critical domains
+- Enforce thresholds in CI; exceptions require review rationale
+
+---
+_Focus on patterns and decisions. Tool-specific config lives elsewhere._

+ 18 - 0
.kiro/settings/templates/steering/product.md

@@ -0,0 +1,18 @@
+# Product Overview
+
+[Brief description of what this product does and who it serves]
+
+## Core Capabilities
+
+[3-5 key capabilities, not exhaustive features]
+
+## Target Use Cases
+
+[Primary scenarios this product addresses]
+
+## Value Proposition
+
+[What makes this product unique or valuable]
+
+---
+_Focus on patterns and purpose, not exhaustive feature lists_

+ 41 - 0
.kiro/settings/templates/steering/structure.md

@@ -0,0 +1,41 @@
+# Project Structure
+
+## Organization Philosophy
+
+[Describe approach: feature-first, layered, domain-driven, etc.]
+
+## Directory Patterns
+
+### [Pattern Name]
+**Location**: `/path/`  
+**Purpose**: [What belongs here]  
+**Example**: [Brief example]
+
+### [Pattern Name]
+**Location**: `/path/`  
+**Purpose**: [What belongs here]  
+**Example**: [Brief example]
+
+## Naming Conventions
+
+- **Files**: [Pattern, e.g., PascalCase, kebab-case]
+- **Components**: [Pattern]
+- **Functions**: [Pattern]
+
+## Import Organization
+
+```typescript
+// Example import patterns
+import { Something } from '@/path'  // Absolute
+import { Local } from './local'     // Relative
+```
+
+**Path Aliases**:
+- `@/`: [Maps to]
+
+## Code Organization Principles
+
+[Key architectural patterns and dependency rules]
+
+---
+_Document patterns, not file trees. New files following patterns shouldn't require updates_

+ 45 - 0
.kiro/settings/templates/steering/tech.md

@@ -0,0 +1,45 @@
+# Technology Stack
+
+## Architecture
+
+[High-level system design approach]
+
+## Core Technologies
+
+- **Language**: [e.g., TypeScript, Python]
+- **Framework**: [e.g., React, Next.js, Django]
+- **Runtime**: [e.g., Node.js 20+]
+
+## Key Libraries
+
+[Only major libraries that influence development patterns]
+
+## Development Standards
+
+### Type Safety
+[e.g., TypeScript strict mode, no `any`]
+
+### Code Quality
+[e.g., ESLint, Prettier rules]
+
+### Testing
+[e.g., Jest, coverage requirements]
+
+## Development Environment
+
+### Required Tools
+[Key tools and version requirements]
+
+### Common Commands
+```bash
+# Dev: [command]
+# Build: [command]
+# Test: [command]
+```
+
+## Key Technical Decisions
+
+[Important architectural choices and rationale]
+
+---
+_Document standards and patterns, not every dependency_

+ 764 - 0
.kiro/specs/oauth2-email-support/design.md

@@ -0,0 +1,764 @@
+# OAuth 2.0 Email Support - Technical Design
+
+## Overview
+
+This feature adds OAuth 2.0 authentication support for sending emails through Google Workspace accounts in GROWI. Administrators can configure email transmission using OAuth 2.0 credentials (Client ID, Client Secret, Refresh Token) instead of traditional SMTP passwords. This integration extends the existing mail service architecture while maintaining full backward compatibility with SMTP and SES configurations.
+
+**Purpose**: Enable secure, token-based email authentication for Google Workspace accounts, improving security by eliminating password-based SMTP authentication and following Google's recommended practices for application email integration.
+
+**Users**: GROWI administrators configuring email transmission settings will use the new OAuth 2.0 option alongside existing SMTP and SES methods.
+
+**Impact**: Extends the mail service to support a third transmission method (oauth2) without modifying existing SMTP or SES functionality. No breaking changes to existing deployments.
+
+### Goals
+
+- Add OAuth 2.0 as a transmission method option in mail settings
+- Support Google Workspace email sending via Gmail API with OAuth 2.0 credentials
+- Maintain backward compatibility with existing SMTP and SES configurations
+- Provide consistent admin UI experience following SMTP/SES patterns
+- Implement automatic OAuth 2.0 token refresh using nodemailer's built-in support
+- Ensure secure storage and handling of OAuth 2.0 credentials
+
+### Non-Goals
+
+- OAuth 2.0 providers beyond Google Workspace (Microsoft 365, generic OAuth 2.0 servers)
+- Migration tool from SMTP to OAuth 2.0 (administrators manually reconfigure)
+- Authorization flow UI for obtaining refresh tokens (documented external process via Google Cloud Console)
+- Multi-account or account rotation support (single OAuth 2.0 account per instance)
+- Email queuing or rate limiting specific to OAuth 2.0 (relies on existing mail service behavior)
+
+## Architecture
+
+### Existing Architecture Analysis
+
+**Current Mail Service Implementation**:
+- **Service Location**: `apps/app/src/server/service/mail.ts` (MailService class)
+- **Initialization**: MailService instantiated from Crowi container, loaded on app startup
+- **Transmission Methods**: Currently supports 'smtp' and 'ses' via `mail:transmissionMethod` config
+- **Factory Pattern**: `createSMTPClient()` and `createSESClient()` create nodemailer transports
+- **Configuration**: ConfigManager loads settings from MongoDB via `mail:*` namespace keys
+- **S2S Messaging**: Supports distributed config updates via `mailServiceUpdated` events
+- **Test Email**: SMTP-only test email functionality in admin UI
+
+**Current Admin UI Structure**:
+- **Main Component**: `MailSetting.tsx` - form container with transmission method radio buttons
+- **Sub-Components**: `SmtpSetting.tsx`, `SesSetting.tsx` - conditional rendering based on selected method
+- **State Management**: AdminAppContainer (unstated) manages form state and API calls
+- **Form Library**: react-hook-form for validation and submission
+- **API Integration**: `updateMailSettingHandler()` saves all mail settings via REST API
+
+**Integration Points**:
+- Config definition in `config-definition.ts` (add OAuth 2.0 keys)
+- MailService initialize() method (add OAuth 2.0 branch)
+- MailSetting.tsx transmission method array (add 'oauth2' option)
+- AdminAppContainer state methods (add OAuth 2.0 credential methods)
+
+### Architecture Pattern & Boundary Map
+
+```mermaid
+graph TB
+    subgraph "Client Layer"
+        MailSettingUI[MailSetting Component]
+        OAuth2SettingUI[OAuth2Setting Component]
+        SmtpSettingUI[SmtpSetting Component]
+        SesSettingUI[SesSetting Component]
+        AdminContainer[AdminAppContainer]
+    end
+
+    subgraph "API Layer"
+        AppSettingsAPI[App Settings API]
+        MailTestAPI[Mail Test API]
+    end
+
+    subgraph "Service Layer"
+        MailService[MailService]
+        ConfigManager[ConfigManager]
+        S2SMessaging[S2S Messaging]
+    end
+
+    subgraph "External Services"
+        GoogleOAuth[Google OAuth 2.0 API]
+        GmailAPI[Gmail API]
+        SMTPServer[SMTP Server]
+        SESAPI[AWS SES API]
+    end
+
+    subgraph "Data Layer"
+        MongoDB[(MongoDB Config)]
+    end
+
+    MailSettingUI --> AdminContainer
+    OAuth2SettingUI --> AdminContainer
+    SmtpSettingUI --> AdminContainer
+    SesSettingUI --> AdminContainer
+
+    AdminContainer --> AppSettingsAPI
+    AdminContainer --> MailTestAPI
+
+    AppSettingsAPI --> ConfigManager
+    MailTestAPI --> MailService
+
+    MailService --> ConfigManager
+    MailService --> S2SMessaging
+
+    ConfigManager --> MongoDB
+
+    MailService --> GoogleOAuth
+    MailService --> GmailAPI
+    MailService --> SMTPServer
+    MailService --> SESAPI
+
+    S2SMessaging -.->|mailServiceUpdated| MailService
+```
+
+**Architecture Integration**:
+- **Selected Pattern**: Factory Method Extension - adds `createOAuth2Client()` to existing MailService factory methods
+- **Domain Boundaries**:
+  - **Client**: Admin UI components for OAuth 2.0 configuration (follows existing SmtpSetting/SesSetting pattern)
+  - **Service**: MailService handles all transmission methods; OAuth 2.0 isolated in new factory method
+  - **Config**: ConfigManager persists OAuth 2.0 credentials using `mail:oauth2*` namespace
+  - **External**: Google OAuth 2.0 API for token management; Gmail API for email transmission
+- **Existing Patterns Preserved**:
+  - Transmission method selection pattern (radio buttons, conditional rendering)
+  - Factory method pattern for transport creation
+  - Config namespace pattern (`mail:*` keys)
+  - Unstated container state management
+  - S2S messaging for distributed config updates
+- **New Components Rationale**:
+  - **OAuth2Setting Component**: Maintains UI consistency with SMTP/SES; enables modular development
+  - **createOAuth2Client() Method**: Isolates OAuth 2.0 transport logic; follows existing factory pattern
+  - **Four Config Keys**: Minimal set for OAuth 2.0 (user, clientId, clientSecret, refreshToken)
+- **Steering Compliance**:
+  - Feature-based organization (mail service domain)
+  - Named exports throughout
+  - Type safety with explicit TypeScript interfaces
+  - Immutable config updates
+  - Security-first credential handling
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Frontend | React 18.x + TypeScript | OAuth2Setting UI component | Existing stack, no new dependencies |
+| Frontend | react-hook-form | Form validation and state | Existing dependency, consistent with SmtpSetting/SesSetting |
+| Backend | Node.js + TypeScript | MailService OAuth 2.0 integration | Existing runtime, no version changes |
+| Backend | nodemailer 6.x | OAuth 2.0 transport creation | Existing dependency with built-in OAuth 2.0 support |
+| Data | MongoDB | Config storage for OAuth 2.0 credentials | Existing database, new config keys only |
+| External | Google OAuth 2.0 API | Token refresh endpoint | Standard Google API, https://oauth2.googleapis.com/token |
+| External | Gmail API | Email transmission via OAuth 2.0 | Accessed via nodemailer Gmail transport |
+
+**Key Technology Decisions**:
+- **Nodemailer OAuth 2.0**: Built-in support eliminates need for additional OAuth 2.0 libraries; automatic token refresh reduces complexity
+- **No New Dependencies**: Feature fully implemented with existing packages; zero dependency risk
+- **MongoDB Encryption**: Credentials stored using existing ConfigManager encryption (same as SMTP passwords)
+- **Gmail Service Shortcut**: Nodemailer's `service: "gmail"` simplifies configuration and handles Gmail API specifics
+
+## System Flows
+
+### OAuth 2.0 Configuration Flow
+
+```mermaid
+sequenceDiagram
+    participant Admin as Administrator
+    participant UI as MailSetting UI
+    participant Container as AdminAppContainer
+    participant API as App Settings API
+    participant Config as ConfigManager
+    participant DB as MongoDB
+
+    Admin->>UI: Select "oauth2" transmission method
+    UI->>UI: Render OAuth2Setting component
+    Admin->>UI: Enter OAuth 2.0 credentials
+    Admin->>UI: Click Update button
+    UI->>Container: handleSubmit formData
+    Container->>API: POST app-settings
+    API->>API: Validate OAuth 2.0 fields
+    alt Validation fails
+        API-->>Container: 400 Bad Request
+        Container-->>UI: Display error toast
+    else Validation passes
+        API->>Config: setConfig mail:oauth2*
+        Config->>DB: Save encrypted credentials
+        DB-->>Config: Success
+        Config-->>API: Success
+        API-->>Container: 200 OK
+        Container-->>UI: Display success toast
+    end
+```
+
+### Email Sending with OAuth 2.0 Flow
+
+```mermaid
+sequenceDiagram
+    participant App as GROWI Application
+    participant Mail as MailService
+    participant Nodemailer as Nodemailer Transport
+    participant Google as Google OAuth 2.0 API
+    participant Gmail as Gmail API
+
+    App->>Mail: send emailConfig
+    Mail->>Mail: Check mailer setup
+    alt Mailer not setup
+        Mail-->>App: Error Mailer not set up
+    else Mailer setup oauth2
+        Mail->>Nodemailer: sendMail mailConfig
+        Nodemailer->>Nodemailer: Check access token validity
+        alt Access token expired
+            Nodemailer->>Google: POST token refresh
+            Google-->>Nodemailer: New access token
+            Nodemailer->>Nodemailer: Cache access token
+        end
+        Nodemailer->>Gmail: POST send message
+        alt Authentication failure
+            Gmail-->>Nodemailer: 401 Unauthorized
+            Nodemailer-->>Mail: Error Invalid credentials
+            Mail-->>App: Error with OAuth 2.0 details
+        else Success
+            Gmail-->>Nodemailer: 200 OK message ID
+            Nodemailer-->>Mail: Success
+            Mail->>Mail: Log transmission success
+            Mail-->>App: Email sent successfully
+        end
+    end
+```
+
+**Flow-Level Decisions**:
+- **Token Refresh**: Handled entirely by nodemailer; MailService does not implement custom refresh logic
+- **Error Handling**: OAuth 2.0 errors logged with specific Google API error codes for admin troubleshooting
+- **Credential Validation**: Performed at API layer before persisting to database; prevents invalid config states
+- **S2S Sync**: OAuth 2.0 config changes trigger `mailServiceUpdated` event for distributed deployments (existing pattern)
+
+## Requirements Traceability
+
+| Requirement | Summary | Components | Interfaces | Flows |
+|-------------|---------|------------|------------|-------|
+| 1.1 | Add OAuth 2.0 transmission method option | MailSetting.tsx, config-definition.ts | ConfigDefinition | Configuration |
+| 1.2 | Display OAuth 2.0 config fields when selected | OAuth2Setting.tsx, MailSetting.tsx | React Props | Configuration |
+| 1.3 | Validate email address format | AdminAppContainer, App Settings API | API Contract | Configuration |
+| 1.4 | Validate non-empty OAuth 2.0 credentials | AdminAppContainer, App Settings API | API Contract | Configuration |
+| 1.5 | Securely store OAuth 2.0 credentials with encryption | ConfigManager, MongoDB | Data Model | Configuration |
+| 1.6 | Confirm successful configuration save | AdminAppContainer, MailSetting.tsx | API Contract | Configuration |
+| 1.7 | Display descriptive error messages on save failure | AdminAppContainer, MailSetting.tsx | API Contract | Configuration |
+| 2.1 | Use nodemailer Gmail OAuth 2.0 transport | MailService.createOAuth2Client() | Service Interface | Email Sending |
+| 2.2 | Authenticate to Gmail API with OAuth 2.0 | MailService.createOAuth2Client() | External API | Email Sending |
+| 2.3 | Set FROM address to configured email | MailService.setupMailConfig() | Service Interface | Email Sending |
+| 2.4 | Log successful email transmission | MailService.send() | Service Interface | Email Sending |
+| 2.5 | Support all email content types | MailService.send() (existing) | Service Interface | Email Sending |
+| 2.6 | Process email queue sequentially | MailService.send() (existing) | Service Interface | Email Sending |
+| 3.1 | Use nodemailer automatic token refresh | Nodemailer OAuth 2.0 transport | External Library | Email Sending |
+| 3.2 | Request new access token with refresh token | Nodemailer OAuth 2.0 transport | External API | Email Sending |
+| 3.3 | Continue email sending after token refresh | Nodemailer OAuth 2.0 transport | External Library | Email Sending |
+| 3.4 | Log error and notify admin on refresh failure | MailService.send(), Error Handler | Service Interface | Email Sending |
+| 3.5 | Cache access tokens in memory | Nodemailer OAuth 2.0 transport | External Library | Email Sending |
+| 3.6 | Invalidate cached tokens on config update | MailService.initialize() | Service Interface | Configuration |
+| 4.1 | Display OAuth 2.0 form with consistent styling | OAuth2Setting.tsx | React Component | Configuration |
+| 4.2 | Preserve OAuth 2.0 credentials when switching methods | AdminAppContainer state | State Management | Configuration |
+| 4.3 | Provide field-level help text | OAuth2Setting.tsx | React Component | Configuration |
+| 4.4 | Mask sensitive fields (last 4 characters) | OAuth2Setting.tsx | React Component | Configuration |
+| 4.5 | Provide test email button | MailSetting.tsx | API Contract | Email Sending |
+| 4.6 | Display test email result with detailed errors | AdminAppContainer, MailSetting.tsx | API Contract | Email Sending |
+| 5.1 | Log specific OAuth 2.0 error codes | MailService error handler | Service Interface | Email Sending |
+| 5.2 | Retry email sending with exponential backoff | MailService.send() | Service Interface | Email Sending |
+| 5.3 | Store failed emails after all retries | MailService.send() | Service Interface | Email Sending |
+| 5.4 | Never log credentials in plain text | MailService, ConfigManager | Security Pattern | All flows |
+| 5.5 | Require admin authentication for config page | App Settings API | API Contract | Configuration |
+| 5.6 | Stop OAuth 2.0 sending when credentials deleted | MailService.initialize() | Service Interface | Email Sending |
+| 5.7 | Validate SSL/TLS for OAuth 2.0 endpoints | Nodemailer OAuth 2.0 transport | External Library | Email Sending |
+| 6.1 | Maintain backward compatibility with SMTP/SES | MailService, config-definition.ts | All Interfaces | All flows |
+| 6.2 | Use only active transmission method | MailService.initialize() | Service Interface | Email Sending |
+| 6.3 | Allow switching transmission methods without data loss | AdminAppContainer, ConfigManager | State Management | Configuration |
+| 6.4 | Display configuration error if no method set | MailService, MailSetting.tsx | Service Interface | Configuration |
+| 6.5 | Expose OAuth 2.0 status via admin API | App Settings API | API Contract | Configuration |
+
+## Components and Interfaces
+
+### Component Summary
+
+| Component | Domain/Layer | Intent | Req Coverage | Key Dependencies (P0/P1) | Contracts |
+|-----------|--------------|--------|--------------|--------------------------|-----------|
+| MailService | Server/Service | Email transmission with OAuth 2.0 support | 2.1-2.6, 3.1-3.6, 5.1-5.7, 6.2, 6.4 | ConfigManager (P0), Nodemailer (P0), S2SMessaging (P1) | Service |
+| OAuth2Setting | Client/UI | OAuth 2.0 credential input form | 1.2, 4.1, 4.3, 4.4 | AdminAppContainer (P0), react-hook-form (P0) | State |
+| AdminAppContainer | Client/State | State management for mail settings | 1.3, 1.4, 1.6, 1.7, 4.2, 6.3 | App Settings API (P0) | API |
+| ConfigManager | Server/Service | Persist OAuth 2.0 credentials | 1.5, 6.1, 6.3 | MongoDB (P0) | Service, State |
+| App Settings API | Server/API | Mail settings CRUD operations | 1.3-1.7, 4.5-4.6, 5.5, 6.5 | ConfigManager (P0), MailService (P1) | API |
+| Config Definition | Server/Config | OAuth 2.0 config schema | 1.1, 6.1 | None | State |
+
+### Server / Service Layer
+
+#### MailService
+
+| Field | Detail |
+|-------|--------|
+| Intent | Extend email transmission service with OAuth 2.0 support using Gmail API |
+| Requirements | 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 5.1, 5.2, 5.3, 5.4, 5.6, 5.7, 6.2, 6.4 |
+| Owner / Reviewers | Backend team |
+
+**Responsibilities & Constraints**
+- Create OAuth 2.0 nodemailer transport using Gmail service with credentials from ConfigManager
+- Handle OAuth 2.0 authentication failures and token refresh errors with specific error logging
+- Implement retry logic with exponential backoff (1s, 2s, 4s) for transient failures
+- Store failed emails after all retry attempts for manual review
+- Maintain single active transmission method (smtp, ses, or oauth2) per instance
+- Invalidate cached OAuth 2.0 tokens when configuration changes via S2S messaging
+
+**Dependencies**
+- Inbound: Crowi container — service initialization (P0)
+- Inbound: Application modules — email sending requests (P0)
+- Inbound: S2S Messaging — config update notifications (P1)
+- Outbound: ConfigManager — load OAuth 2.0 credentials (P0)
+- Outbound: Nodemailer — create transport and send emails (P0)
+- External: Google OAuth 2.0 API — token refresh (P0)
+- External: Gmail API — email transmission (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+interface MailServiceOAuth2Extension {
+  /**
+   * Create OAuth 2.0 nodemailer transport for Gmail
+   */
+  createOAuth2Client(option?: OAuth2TransportOptions): Transporter | null;
+
+  /**
+   * Send email with retry logic and error handling
+   */
+  sendWithRetry(config: EmailConfig, maxRetries?: number): Promise<SendResult>;
+
+  /**
+   * Store failed email for manual review
+   */
+  storeFailedEmail(config: EmailConfig, error: Error): Promise<void>;
+
+  /**
+   * Wait with exponential backoff
+   */
+  exponentialBackoff(attempt: number): Promise<void>;
+}
+
+interface OAuth2TransportOptions {
+  user: string;
+  clientId: string;
+  clientSecret: string;
+  refreshToken: string;
+}
+
+interface MailService {
+  send(config: EmailConfig): Promise<void>;
+  initialize(): void;
+  isMailerSetup: boolean;
+}
+
+interface EmailConfig {
+  to: string;
+  from?: string;
+  subject?: string;
+  template: string;
+  vars?: Record<string, unknown>;
+}
+
+interface SendResult {
+  messageId: string;
+  response: string;
+  envelope: {
+    from: string;
+    to: string[];
+  };
+}
+```
+
+- **Preconditions**:
+  - ConfigManager loaded with valid `mail:oauth2*` configuration values
+  - Nodemailer package version supports OAuth 2.0 (v6.x+)
+  - Google OAuth 2.0 refresh token has `https://mail.google.com/` scope
+
+- **Postconditions**:
+  - OAuth 2.0 transport created with automatic token refresh enabled
+  - `isMailerSetup` flag set to true when transport successfully created
+  - Failed transport creation returns null and logs error
+  - Successful email sends logged with messageId and recipient
+  - Failed emails stored after retry exhaustion
+
+- **Invariants**:
+  - Only one transmission method active at a time
+  - Credentials never logged in plain text
+  - Token refresh handled transparently by nodemailer
+  - Retry backoff: 1s, 2s, 4s
+
+
+#### ConfigManager
+
+| Field | Detail |
+|-------|--------|
+| Intent | Persist and retrieve OAuth 2.0 credentials with encryption |
+| Requirements | 1.5, 6.1, 6.3 |
+
+**Responsibilities & Constraints**
+- Store four new OAuth 2.0 config keys with encryption
+- Support transmission method value 'oauth2'
+- Maintain all SMTP and SES config values when OAuth 2.0 is configured
+
+**Dependencies**
+- Inbound: MailService, App Settings API (P0)
+- Outbound: MongoDB, Encryption Service (P0)
+
+**Contracts**: Service [x] / State [x]
+
+##### Service Interface
+
+```typescript
+interface ConfigManagerOAuth2Extension {
+  getConfig(key: 'mail:oauth2User'): string | undefined;
+  getConfig(key: 'mail:oauth2ClientId'): string | undefined;
+  getConfig(key: 'mail:oauth2ClientSecret'): string | undefined;
+  getConfig(key: 'mail:oauth2RefreshToken'): string | undefined;
+  getConfig(key: 'mail:transmissionMethod'): 'smtp' | 'ses' | 'oauth2' | undefined;
+
+  setConfig(key: 'mail:oauth2User', value: string): Promise<void>;
+  setConfig(key: 'mail:oauth2ClientId', value: string): Promise<void>;
+  setConfig(key: 'mail:oauth2ClientSecret', value: string): Promise<void>;
+  setConfig(key: 'mail:oauth2RefreshToken', value: string): Promise<void>;
+  setConfig(key: 'mail:transmissionMethod', value: 'smtp' | 'ses' | 'oauth2'): Promise<void>;
+}
+```
+
+##### State Management
+
+- **State Model**: OAuth 2.0 credentials stored as separate config documents in MongoDB
+- **Persistence**: Encrypted at write time; decrypted at read time
+- **Consistency**: Atomic writes per config key
+- **Concurrency**: Last-write-wins; S2S messaging for eventual consistency
+
+
+### Client / UI Layer
+
+#### OAuth2Setting Component
+
+| Field | Detail |
+|-------|--------|
+| Intent | Render OAuth 2.0 credential input form with help text and field masking |
+| Requirements | 1.2, 4.1, 4.3, 4.4 |
+
+**Responsibilities & Constraints**
+- Display four input fields with help text
+- Mask saved Client Secret and Refresh Token (show last 4 characters)
+- Follow SMTP/SES visual patterns
+- Use react-hook-form register
+
+**Dependencies**
+- Inbound: MailSetting component (P0)
+- Outbound: AdminAppContainer (P1)
+- External: react-hook-form (P0)
+
+**Contracts**: State [x]
+
+##### State Management
+
+```typescript
+interface OAuth2SettingProps {
+  register: UseFormRegister<MailSettingsFormData>;
+  adminAppContainer?: AdminAppContainer;
+}
+
+interface MailSettingsFormData {
+  fromAddress: string;
+  transmissionMethod: 'smtp' | 'ses' | 'oauth2';
+  smtpHost: string;
+  smtpPort: string;
+  smtpUser: string;
+  smtpPassword: string;
+  sesAccessKeyId: string;
+  sesSecretAccessKey: string;
+  oauth2User: string;
+  oauth2ClientId: string;
+  oauth2ClientSecret: string;
+  oauth2RefreshToken: string;
+}
+```
+
+
+#### AdminAppContainer (Extension)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Manage OAuth 2.0 credential state and API interactions |
+| Requirements | 1.3, 1.4, 1.6, 1.7, 4.2, 6.3 |
+
+**Responsibilities & Constraints**
+- Add four state properties and setter methods
+- Include OAuth 2.0 credentials in API payload
+- Validate email format before API call
+- Display success/error toasts
+
+**Dependencies**
+- Inbound: MailSetting, OAuth2Setting (P0)
+- Outbound: App Settings API (P0)
+
+**Contracts**: State [x] / API [x]
+
+##### State Management
+
+```typescript
+interface AdminAppContainerOAuth2State {
+  fromAddress?: string;
+  transmissionMethod?: 'smtp' | 'ses' | 'oauth2';
+  smtpHost?: string;
+  smtpPort?: string;
+  smtpUser?: string;
+  smtpPassword?: string;
+  sesAccessKeyId?: string;
+  sesSecretAccessKey?: string;
+  isMailerSetup: boolean;
+  oauth2User?: string;
+  oauth2ClientId?: string;
+  oauth2ClientSecret?: string;
+  oauth2RefreshToken?: string;
+}
+
+interface AdminAppContainerOAuth2Methods {
+  changeOAuth2User(oauth2User: string): Promise<void>;
+  changeOAuth2ClientId(oauth2ClientId: string): Promise<void>;
+  changeOAuth2ClientSecret(oauth2ClientSecret: string): Promise<void>;
+  changeOAuth2RefreshToken(oauth2RefreshToken: string): Promise<void>;
+  updateMailSettingHandler(): Promise<void>;
+}
+```
+
+
+### Server / API Layer
+
+#### App Settings API (Extension)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Handle OAuth 2.0 credential CRUD operations with validation |
+| Requirements | 1.3, 1.4, 1.5, 1.6, 1.7, 4.5, 4.6, 5.5, 6.5 |
+
+**Responsibilities & Constraints**
+- Accept OAuth 2.0 credentials in PUT request
+- Validate email format and non-empty credentials
+- Persist via ConfigManager
+- Trigger S2S messaging
+- Require admin authentication
+
+**Dependencies**
+- Inbound: AdminAppContainer (P0)
+- Outbound: ConfigManager, MailService, S2S Messaging (P0/P1)
+
+**Contracts**: API [x]
+
+##### API Contract
+
+| Method | Endpoint | Request | Response | Errors |
+|--------|----------|---------|----------|--------|
+| PUT | /api/v3/app-settings | UpdateMailSettingsRequest | AppSettingsResponse | 400, 401, 500 |
+| GET | /api/v3/app-settings | - | AppSettingsResponse | 401, 500 |
+| POST | /api/v3/mail/send-test | - | TestEmailResponse | 400, 401, 500 |
+
+**Request/Response Schemas**:
+
+```typescript
+interface UpdateMailSettingsRequest {
+  'mail:from'?: string;
+  'mail:transmissionMethod'?: 'smtp' | 'ses' | 'oauth2';
+  'mail:smtpHost'?: string;
+  'mail:smtpPort'?: string;
+  'mail:smtpUser'?: string;
+  'mail:smtpPassword'?: string;
+  'mail:sesAccessKeyId'?: string;
+  'mail:sesSecretAccessKey'?: string;
+  'mail:oauth2User'?: string;
+  'mail:oauth2ClientId'?: string;
+  'mail:oauth2ClientSecret'?: string;
+  'mail:oauth2RefreshToken'?: string;
+}
+
+interface AppSettingsResponse {
+  appSettings: {
+    'mail:from'?: string;
+    'mail:transmissionMethod'?: 'smtp' | 'ses' | 'oauth2';
+    'mail:smtpHost'?: string;
+    'mail:smtpPort'?: string;
+    'mail:smtpUser'?: string;
+    'mail:sesAccessKeyId'?: string;
+    'mail:oauth2User'?: string;
+    'mail:oauth2ClientId'?: string;
+  };
+  isMailerSetup: boolean;
+}
+
+interface TestEmailResponse {
+  success: boolean;
+  message?: string;
+  error?: {
+    code: string;
+    message: string;
+  };
+}
+```
+
+
+### Server / Config Layer
+
+#### Config Definition (Extension)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Define OAuth 2.0 configuration schema with type safety |
+| Requirements | 1.1, 6.1 |
+
+**Config Schema**:
+
+```typescript
+const CONFIG_KEYS = [
+  'mail:oauth2User',
+  'mail:oauth2ClientId',
+  'mail:oauth2ClientSecret',
+  'mail:oauth2RefreshToken',
+];
+
+'mail:transmissionMethod': defineConfig<'smtp' | 'ses' | 'oauth2' | undefined>({
+  defaultValue: undefined,
+}),
+
+'mail:oauth2User': defineConfig<string | undefined>({
+  defaultValue: undefined,
+}),
+'mail:oauth2ClientId': defineConfig<string | undefined>({
+  defaultValue: undefined,
+}),
+'mail:oauth2ClientSecret': defineConfig<string | undefined>({
+  defaultValue: undefined,
+  isSecret: true,
+}),
+'mail:oauth2RefreshToken': defineConfig<string | undefined>({
+  defaultValue: undefined,
+  isSecret: true,
+}),
+```
+
+## Data Models
+
+### Domain Model
+
+**Mail Configuration Aggregate**:
+- **Root Entity**: MailConfiguration
+- **Value Objects**: TransmissionMethod, OAuth2Credentials, SmtpCredentials, SesCredentials
+- **Business Rules**: Only one transmission method active; OAuth2Credentials complete when all fields present
+- **Invariants**: Credentials encrypted; FROM address required
+
+### Logical Data Model
+
+**Structure Definition**:
+- **Entity**: Config (MongoDB document)
+- **Attributes**: ns, key, value, createdAt, updatedAt
+- **Natural Keys**: ns field (unique)
+
+**Consistency & Integrity**:
+- **Transaction Boundaries**: Each config key saved independently
+- **Temporal Aspects**: updatedAt tracked per entry
+
+### Physical Data Model
+
+- Config documents stored in MongoDB with ns/key/value pattern
+- FailedEmail documents track failed email attempts with error context
+- **Encryption**: AES-256 for clientSecret and refreshToken via environment-provided key
+
+### Data Contracts & Integration
+
+**API Data Transfer**:
+- OAuth 2.0 credentials via JSON in PUT /api/v3/app-settings
+- Client Secret and Refresh Token never returned in GET responses
+
+**Cross-Service Data Management**:
+- S2S messaging broadcasts mailServiceUpdated event
+- Eventual consistency across instances
+
+
+## Critical Implementation Constraints
+
+### Nodemailer XOAuth2 Compatibility (CRITICAL)
+
+**Constraint**: OAuth 2.0 credential validation **must use falsy checks** (`!value`) not null checks (`value != null`) to match nodemailer's internal XOAuth2 handler behavior.
+
+**Rationale**: Nodemailer's XOAuth2.generateToken() method uses `!this.options.refreshToken` at line 184, which rejects empty strings as invalid. Using `!= null` checks in GROWI would allow empty strings through validation, causing runtime failures when nodemailer rejects them.
+
+**Implementation Pattern**:
+```typescript
+// ✅ CORRECT: Falsy check matches nodemailer behavior
+if (!clientId || !clientSecret || !refreshToken || !user) {
+  return null;
+}
+```
+
+**Impact**: Affects MailService.createOAuth2Client(), ConfigManager validation, and API validators. All OAuth 2.0 credential checks must follow this pattern.
+
+**Reference**: [mail.ts:219-226](../../../apps/app/src/server/service/mail.ts#L219-L226), [research.md](research.md#1-nodemailer-xoauth2-falsy-check-requirement)
+
+---
+
+### Credential Preservation Pattern (CRITICAL)
+
+**Constraint**: PUT requests updating OAuth 2.0 configuration **must only include secret fields (clientSecret, refreshToken) when non-empty values are provided**, preventing accidental credential overwrites.
+
+**Rationale**: Standard PUT pattern sending all form fields would overwrite secrets with empty strings when administrators update non-secret fields (from address, user email). GET endpoint returns `undefined` for secrets (not masked placeholders) to prevent re-submission of placeholder text.
+
+**Implementation Pattern**:
+```typescript
+// Build params with non-secret fields
+const params = {
+  'mail:oauth2ClientId': req.body.oauth2ClientId,
+  'mail:oauth2User': req.body.oauth2User,
+};
+
+// Only include secrets if non-empty
+if (req.body.oauth2ClientSecret) {
+  params['mail:oauth2ClientSecret'] = req.body.oauth2ClientSecret;
+}
+```
+
+**Impact**: Affects App Settings API PUT handler and any future API that updates OAuth 2.0 credentials.
+
+**Reference**: [apiv3/app-settings/index.ts:293-306](../../../apps/app/src/server/routes/apiv3/app-settings/index.ts#L293-L306), [research.md](research.md#3-credential-preservation-pattern)
+
+---
+
+### Gmail API FROM Address Behavior (LIMITATION)
+
+**Limitation**: Gmail API **rewrites FROM addresses to the authenticated account email** unless send-as aliases are configured in Google Workspace.
+
+**Example**:
+```
+Configured: mail:from = "notifications@example.com"
+Authenticated: oauth2User = "admin@company.com"
+Actual sent FROM: "admin@company.com"
+```
+
+**Workaround**: Google Workspace administrators must configure send-as aliases in Gmail Settings → Accounts and Import → Send mail as, then verify domain ownership.
+
+**Why This Happens**: Gmail API security policy prevents email spoofing by restricting FROM addresses to authenticated accounts or verified aliases.
+
+**Impact**: GROWI's `mail:from` configuration has limited effect with OAuth 2.0. Custom FROM addresses require Google Workspace configuration. This is expected Gmail behavior, not a GROWI limitation.
+
+**Reference**: [research.md](research.md#2-gmail-api-from-address-rewriting)
+
+---
+
+### OAuth 2.0 Retry Integration (DESIGN DECISION)
+
+**Decision**: OAuth 2.0 transmission uses `sendWithRetry()` with exponential backoff (1s, 2s, 4s), while SMTP/SES use direct `sendMail()` without retries.
+
+**Rationale**: OAuth 2.0 token refresh can fail transiently due to network issues or Google API rate limiting. Exponential backoff provides resilience without overwhelming the API.
+
+**Implementation**:
+```typescript
+if (transmissionMethod === 'oauth2') {
+  return this.sendWithRetry(mailConfig);
+}
+return this.mailer.sendMail(mailConfig);
+```
+
+**Impact**: OAuth 2.0 email failures are automatically retried, improving reliability for production deployments.
+
+**Reference**: [mail.ts:392-400](../../../apps/app/src/server/service/mail.ts#L392-L400)

+ 57 - 0
.kiro/specs/oauth2-email-support/requirements.md

@@ -0,0 +1,57 @@
+# Requirements Document
+
+## Project Description (Input)
+OAuth 2.0 authentication で Google Workspace を利用し email を送信する機能を追加したい
+
+### Context from User
+This implementation adds OAuth 2.0 authentication support for sending emails using Google Workspace accounts. The feature is fully integrated into the admin settings UI and follows the existing patterns for SMTP and SES configuration.
+
+Key configuration parameters:
+- Email Address: The authorized Google account email
+- Client ID: OAuth 2.0 Client ID from Google Cloud Console
+- Client Secret: OAuth 2.0 Client Secret
+- Refresh Token: OAuth 2.0 Refresh Token obtained from authorization flow
+
+The implementation uses nodemailer's built-in Gmail OAuth 2.0 support, which handles token refresh automatically.
+
+## Introduction
+
+This specification defines the requirements for adding OAuth 2.0 authentication support for email transmission using Google Workspace accounts in GROWI. The feature enables administrators to configure email sending through Google's Gmail API using OAuth 2.0 credentials instead of traditional SMTP authentication. This provides enhanced security through token-based authentication and follows Google's recommended practices for application email integration.
+
+## Requirements
+
+### Requirement 1: OAuth 2.0 Configuration Management
+
+**Objective:** As a GROWI administrator, I want to configure OAuth 2.0 credentials for Google Workspace email sending, so that the system can securely send emails without using SMTP passwords.
+
+**Summary**: The Admin Settings UI provides OAuth 2.0 as a transmission method option alongside SMTP and SES. The configuration form includes fields for Email Address, Client ID, Client Secret, and Refresh Token. All fields are validated (email format, non-empty strings using falsy checks), and secrets are encrypted before database storage. Configuration updates preserve existing secrets when empty values are submitted, preventing accidental credential overwrites. Success and error feedback is displayed to administrators.
+
+### Requirement 2: Email Sending Functionality
+
+**Objective:** As a GROWI system, I want to send emails using OAuth 2.0 authenticated Google Workspace accounts, so that notifications and system emails can be delivered securely without SMTP credentials.
+
+**Summary**: The Email Service uses nodemailer with Gmail OAuth 2.0 transport for email sending when OAuth 2.0 is configured. Authentication to Gmail API is automatic using configured credentials. The service supports all email content types (plain text, HTML, attachments, standard headers). Successful transmissions are logged with timestamp and recipient information. OAuth 2.0 sends use retry logic with exponential backoff (1s, 2s, 4s) to handle transient failures. Note: Gmail API rewrites FROM address to the authenticated account unless send-as aliases are configured in Google Workspace.
+
+### Requirement 3: Token Management
+
+**Objective:** As a GROWI system, I want to automatically manage OAuth 2.0 access token lifecycle, so that email sending continues without manual intervention when tokens expire.
+
+**Summary**: Token refresh is handled automatically by nodemailer's built-in OAuth 2.0 support. Access tokens are cached in memory and reused until expiration. When refresh tokens are used, nodemailer requests new access tokens from Google's OAuth 2.0 endpoint transparently. Token refresh failures are logged with specific error codes for troubleshooting. When OAuth 2.0 configuration is updated, cached tokens are invalidated via service reinitialization triggered by S2S messaging.
+
+### Requirement 4: Admin UI Integration
+
+**Objective:** As a GROWI administrator, I want OAuth 2.0 email configuration to follow the same UI patterns as SMTP and SES, so that I can configure it consistently with existing mail settings.
+
+**Summary**: The Mail Settings page displays OAuth 2.0 configuration form with consistent visual styling, preserves credentials when switching transmission methods, and shows configuration status. Browser autofill is prevented for secret fields, and placeholder text indicates that blank fields will preserve existing values.
+
+### Requirement 5: Error Handling and Security
+
+**Objective:** As a GROWI administrator, I want clear error messages and secure credential handling, so that I can troubleshoot configuration issues and ensure credentials are protected.
+
+**Summary**: Authentication failures are logged with specific OAuth 2.0 error codes from Google's API for troubleshooting. Email sending failures trigger automatic retry with exponential backoff (3 attempts: 1s, 2s, 4s). Failed emails after retry exhaustion are stored in the database for manual review. Credentials are never logged in plain text (Client ID masked to last 4 characters). Admin authentication is required to access configuration. SSL/TLS validation is enforced by nodemailer. When OAuth 2.0 credentials are incomplete or deleted, the Email Service stops sending and displays configuration errors via isMailerSetup flag.
+
+### Requirement 6: Migration and Compatibility
+
+**Objective:** As a GROWI system, I want OAuth 2.0 email support to coexist with existing SMTP and SES configurations, so that administrators can choose the most appropriate transmission method for their deployment.
+
+**Summary**: OAuth 2.0 is added as a third transmission method option without breaking changes to existing SMTP and SES functionality. Only the active transmission method is used for sending emails. Administrators can switch between methods without data loss (credentials for all methods are preserved). Configuration errors are displayed when no transmission method is properly configured (via isMailerSetup flag). OAuth 2.0 configuration status is exposed through existing admin API endpoints following the same pattern as SMTP/SES.

+ 449 - 0
.kiro/specs/oauth2-email-support/research.md

@@ -0,0 +1,449 @@
+# Research & Design Decisions
+
+---
+**Purpose**: Capture discovery findings, architectural investigations, and rationale that inform the technical design for OAuth 2.0 email support.
+
+**Usage**:
+- Log research activities and outcomes during the discovery phase.
+- Document design decision trade-offs that are too detailed for `design.md`.
+- Provide references and evidence for future audits or reuse.
+---
+
+## Summary
+- **Feature**: `oauth2-email-support`
+- **Discovery Scope**: Extension (integrating OAuth2 into existing mail service architecture)
+- **Key Findings**:
+  - Existing mail service supports SMTP and SES via transmission method pattern
+  - Nodemailer has built-in OAuth2 support for Gmail with automatic token refresh
+  - Admin UI follows modular pattern with separate setting components per transmission method
+  - Config management uses `mail:*` namespace with type-safe definitions
+
+## Research Log
+
+### Existing Mail Service Architecture
+
+- **Context**: Need to understand integration points for OAuth2 support
+- **Sources Consulted**:
+  - `apps/app/src/server/service/mail.ts` (MailService implementation)
+  - `apps/app/src/client/components/Admin/App/MailSetting.tsx` (Admin UI)
+  - `apps/app/src/server/service/config-manager/config-definition.ts` (Config schema)
+- **Findings**:
+  - MailService uses factory pattern: `createSMTPClient()`, `createSESClient()`
+  - Transmission method determined by `mail:transmissionMethod` config value ('smtp' | 'ses')
+  - `initialize()` method called on service startup and S2S message updates
+  - Nodemailer transporter created based on transmission method
+  - Admin UI uses conditional rendering for SMTP vs SES settings
+  - State management via AdminAppContainer (unstated pattern)
+  - Test email functionality exists for SMTP only
+- **Implications**:
+  - OAuth2 follows same pattern: add `createOAuth2Client()` method
+  - Extend `mail:transmissionMethod` type to `'smtp' | 'ses' | 'oauth2'`
+  - Create new `OAuth2Setting.tsx` component following SMTP/SES pattern
+  - Add OAuth2-specific config keys following `mail:*` namespace
+
+### Nodemailer OAuth2 Integration
+
+- **Context**: Verify OAuth2 support in nodemailer and configuration requirements
+- **Sources Consulted**:
+  - [OAuth2 | Nodemailer](https://nodemailer.com/smtp/oauth2)
+  - [Using Gmail | Nodemailer](https://nodemailer.com/usage/using-gmail)
+  - [Sending Emails Securely Using Node.js, Nodemailer, SMTP, Gmail, and OAuth2](https://dev.to/chandrapantachhetri/sending-emails-securely-using-node-js-nodemailer-smtp-gmail-and-oauth2-g3a)
+  - Web search: "nodemailer gmail oauth2 configuration 2026"
+- **Findings**:
+  - Nodemailer has first-class OAuth2 support with type `'OAuth2'`
+  - Configuration structure:
+    ```javascript
+    {
+      service: "gmail",
+      auth: {
+        type: "OAuth2",
+        user: "user@gmail.com",
+        clientId: process.env.GOOGLE_CLIENT_ID,
+        clientSecret: process.env.GOOGLE_CLIENT_SECRET,
+        refreshToken: process.env.GOOGLE_REFRESH_TOKEN
+      }
+    }
+    ```
+  - Automatic access token refresh handled by nodemailer
+  - Requires `https://mail.google.com/` OAuth scope
+  - Gmail service shortcut available (simplifies configuration)
+  - Production consideration: Gmail designed for individual users, not automated services
+- **Implications**:
+  - No additional dependencies needed (nodemailer already installed)
+  - Four config values required: user email, clientId, clientSecret, refreshToken
+  - Token refresh is automatic - no manual refresh logic needed
+  - Should validate credentials before saving to config
+  - Security: clientSecret and refreshToken must be encrypted in database
+
+### Config Manager Pattern Analysis
+
+- **Context**: Understand how to add new config keys for OAuth2 credentials
+- **Sources Consulted**:
+  - `apps/app/src/server/service/config-manager/config-definition.ts`
+  - Existing mail config keys: `mail:from`, `mail:transmissionMethod`, `mail:smtpHost`, etc.
+- **Findings**:
+  - Config keys use namespace pattern: `mail:*`
+  - Type-safe definitions using `defineConfig<T>()`
+  - Existing transmission method: `defineConfig<'smtp' | 'ses' | undefined>()`
+  - Config values stored in database via ConfigManager
+  - No explicit encryption layer visible in config definition (handled elsewhere)
+- **Implications**:
+  - Add four new keys: `mail:oauth2User`, `mail:oauth2ClientId`, `mail:oauth2ClientSecret`, `mail:oauth2RefreshToken`
+  - Update `mail:transmissionMethod` type to `'smtp' | 'ses' | 'oauth2' | undefined`
+  - Encryption should be handled at persistence layer (ConfigManager or database model)
+  - Follow same pattern as SMTP/SES for consistency
+
+### Admin UI State Management Pattern
+
+- **Context**: Understand how to integrate OAuth2 settings into admin UI
+- **Sources Consulted**:
+  - `apps/app/src/client/components/Admin/App/SmtpSetting.tsx`
+  - `apps/app/src/client/components/Admin/App/SesSetting.tsx`
+  - `apps/app/src/client/services/AdminAppContainer.js`
+- **Findings**:
+  - Separate component per transmission method (SmtpSetting, SesSetting)
+  - Components receive `register` from react-hook-form
+  - Unstated container pattern for state management
+  - Container methods: `changeSmtpHost()`, `changeFromAddress()`, etc.
+  - `updateMailSettingHandler()` saves all settings via API
+  - Test email button only shown for SMTP
+- **Implications**:
+  - Create `OAuth2Setting.tsx` component following same structure
+  - Add four state methods to AdminAppContainer: `changeOAuth2User()`, `changeOAuth2ClientId()`, etc.
+  - Include OAuth2 credentials in `updateMailSettingHandler()` API call
+  - Test email functionality should work for OAuth2 (same as SMTP)
+  - Field masking needed for clientSecret and refreshToken
+
+### Security Considerations
+
+- **Context**: Ensure secure handling of OAuth2 credentials
+- **Sources Consulted**:
+  - GROWI security guidelines (`.claude/rules/security.md`)
+  - Existing SMTP/SES credential handling
+- **Findings**:
+  - Credentials stored in MongoDB via ConfigManager
+  - Input fields use `type="password"` for sensitive values
+  - No explicit encryption visible in UI layer
+  - Logging should not expose credentials
+- **Implications**:
+  - Use `type="password"` for clientSecret and refreshToken fields
+  - Mask values when displaying saved configuration (show last 4 characters)
+  - Never log credentials in plain text
+  - Validate SSL/TLS when connecting to Google OAuth endpoints
+  - Ensure admin authentication required before accessing config page
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| Factory Method Extension | Add `createOAuth2Client()` to existing MailService | Follows existing pattern, minimal changes, consistent with SMTP/SES | None significant | Recommended - aligns with current architecture |
+| Separate OAuth2Service | Create dedicated service for OAuth2 mail | Better separation of concerns | Over-engineering for simple extension, breaks existing pattern | Not recommended - unnecessary complexity |
+| Adapter Pattern | Wrap OAuth2 in adapter implementing mail interface | More flexible for future auth methods | Premature abstraction, more code to maintain | Not needed for single OAuth2 implementation |
+
+## Design Decisions
+
+### Decision: Extend Existing MailService with OAuth2 Support
+
+- **Context**: Need to add OAuth2 email sending without breaking existing SMTP/SES functionality
+- **Alternatives Considered**:
+  1. Create separate OAuth2MailService - more modular but introduces service management complexity
+  2. Refactor to plugin architecture - future-proof but over-engineered for current needs
+  3. Extend existing MailService with factory method - follows current pattern
+- **Selected Approach**: Extend existing MailService with `createOAuth2Client()` method
+- **Rationale**:
+  - Maintains consistency with existing architecture
+  - Minimal code changes reduce risk
+  - Clear migration path (no breaking changes)
+  - GROWI already uses this pattern successfully for SMTP/SES
+- **Trade-offs**:
+  - Benefits: Low risk, fast implementation, familiar pattern
+  - Compromises: All transmission methods in single service (acceptable given simplicity)
+- **Follow-up**: Ensure test coverage for OAuth2 path alongside existing SMTP/SES tests
+
+### Decision: Use Nodemailer's Built-in OAuth2 Support
+
+- **Context**: Need reliable OAuth2 implementation with automatic token refresh
+- **Alternatives Considered**:
+  1. Manual OAuth2 implementation with googleapis library - more control but complex
+  2. Third-party OAuth2 wrapper - additional dependency
+  3. Nodemailer built-in OAuth2 - zero additional dependencies
+- **Selected Approach**: Use nodemailer's native OAuth2 support with Gmail service
+- **Rationale**:
+  - No additional dependencies (nodemailer already installed)
+  - Automatic token refresh reduces complexity
+  - Well-documented and actively maintained
+  - Matches user's original plan (stated in requirements)
+- **Trade-offs**:
+  - Benefits: Simple, reliable, no new dependencies
+  - Compromises: Limited to Gmail/Google Workspace (acceptable per requirements)
+- **Follow-up**: Document Google Cloud Console setup steps for administrators
+
+### Decision: Preserve Existing Transmission Method Pattern
+
+- **Context**: Maintain backward compatibility while adding OAuth2 option
+- **Alternatives Considered**:
+  1. Deprecate transmission method concept - breaking change
+  2. Add OAuth2 as transmission method option - extends existing pattern
+  3. Support multiple simultaneous methods - unnecessary complexity
+- **Selected Approach**: Add 'oauth2' as third transmission method option
+- **Rationale**:
+  - Zero breaking changes for existing users
+  - Consistent admin UI experience
+  - Clear mutual exclusivity (one method active at a time)
+  - Easy to test and validate
+- **Trade-offs**:
+  - Benefits: Backward compatible, simple mental model
+  - Compromises: Only one transmission method active (acceptable per requirements)
+- **Follow-up**: Ensure switching between methods preserves all config values
+
+### Decision: Component-Based UI Following SMTP/SES Pattern
+
+- **Context**: Need consistent admin UI for OAuth2 configuration
+- **Alternatives Considered**:
+  1. Inline OAuth2 fields in main form - cluttered UI
+  2. Modal dialog for OAuth2 setup - breaks existing pattern
+  3. Separate OAuth2Setting component - matches SMTP/SES pattern
+- **Selected Approach**: Create `OAuth2Setting.tsx` component rendered conditionally
+- **Rationale**:
+  - Maintains visual consistency across transmission methods
+  - Reuses existing form patterns (react-hook-form, unstated)
+  - Easy for admins familiar with SMTP/SES setup
+  - Supports incremental development (component isolation)
+- **Trade-offs**:
+  - Benefits: Consistent UX, modular code, easy testing
+  - Compromises: Minor code duplication in form field rendering (acceptable)
+- **Follow-up**: Add help text for each OAuth2 field explaining Google Cloud Console setup
+
+## Risks & Mitigations
+
+- **Risk**: OAuth2 credentials stored in plain text in database
+  - **Mitigation**: Implement encryption at ConfigManager persistence layer; use same encryption as SMTP passwords
+
+- **Risk**: Refresh token expiration or revocation not handled
+  - **Mitigation**: Nodemailer handles refresh automatically; log specific error codes for troubleshooting; document token refresh in admin help text
+
+- **Risk**: Google rate limiting or account suspension
+  - **Mitigation**: Document production usage considerations; implement exponential backoff retry logic; log detailed error responses from Gmail API
+
+- **Risk**: Incomplete credential configuration causing service failure
+  - **Mitigation**: Validate all four required fields before saving; display clear error messages; maintain isMailerSetup flag for health checks
+
+- **Risk**: Breaking changes to existing SMTP/SES functionality
+  - **Mitigation**: Preserve all existing code paths; add OAuth2 as isolated branch; comprehensive integration tests for all three methods
+
+## Session 2: Production Implementation Discoveries (2026-02-10)
+
+### Critical Technical Constraints Identified
+
+#### 1. Nodemailer XOAuth2 Falsy Check Requirement
+
+**Discovery**: Production testing revealed "Can't create new access token for user" errors from nodemailer's XOAuth2 handler.
+
+**Root Cause**: Nodemailer's XOAuth2 implementation uses **falsy checks** (`!this.options.refreshToken`) at line 184, not null checks, rejecting empty strings as invalid credentials.
+
+**Implementation Requirement**:
+```typescript
+// ❌ WRONG: Allows empty strings to pass validation
+if (clientId != null && clientSecret != null && refreshToken != null) {
+  // This passes validation but nodemailer will reject it
+}
+
+// ✅ CORRECT: Matches nodemailer's falsy check behavior
+if (!clientId || !clientSecret || !refreshToken || !user) {
+  logger.warn('OAuth 2.0 credentials incomplete, skipping transport creation');
+  return null;
+}
+```
+
+**Why This Matters**: Empty strings (`""`) are falsy in JavaScript. Using `!= null` in GROWI would allow empty strings through validation, but nodemailer's falsy check would then reject them, causing runtime failures.
+
+**Impact**: All credential validation logic in MailService and ConfigManager **must use falsy checks** for OAuth 2.0 credentials to maintain compatibility with nodemailer.
+
+**Reference**: [mail.ts:219-226](../../../apps/app/src/server/service/mail.ts#L219-L226)
+
+---
+
+#### 2. Gmail API FROM Address Rewriting
+
+**Discovery**: Gmail API rewrites the FROM address to the authenticated account email, ignoring GROWI's configured `mail:from` address.
+
+**Gmail API Behavior**: Gmail API enforces that emails are sent FROM the authenticated account unless send-as aliases are explicitly configured in Google Workspace.
+
+**Example**:
+```
+Configured: mail:from = "notifications@example.com"
+Authenticated: oauth2User = "admin@company.com"
+Actual sent FROM: "admin@company.com"
+```
+
+**Workaround**: Google Workspace administrators must configure **send-as aliases**:
+1. Gmail Settings → Accounts and Import → Send mail as
+2. Add desired FROM address as an alias
+3. Verify domain ownership
+
+**Why This Happens**: Gmail API security policy prevents email spoofing by restricting FROM addresses to authenticated account or verified aliases.
+
+**Impact**:
+- GROWI's `mail:from` configuration has **limited effect** with OAuth 2.0
+- Custom FROM addresses require Google Workspace send-as alias configuration
+- This is **expected Gmail behavior**, not a GROWI limitation
+
+**Documentation Note**: This behavior must be documented in admin UI help text and user guides.
+
+---
+
+#### 3. Credential Preservation Pattern
+
+**Discovery**: Initial implementation allowed secret credentials to be accidentally overwritten with empty strings or masked placeholder values when updating non-secret fields.
+
+**Problem**: Standard PUT request pattern sending all form fields would overwrite secrets with empty values when administrators only wanted to update non-secret fields like `from` address or `oauth2User`.
+
+**Solution**: Conditional secret inclusion pattern:
+
+```typescript
+// Build request params with non-secret fields
+const requestOAuth2SettingParams: Record<string, any> = {
+  'mail:from': req.body.fromAddress,
+  'mail:transmissionMethod': req.body.transmissionMethod,
+  'mail:oauth2ClientId': req.body.oauth2ClientId,
+  'mail:oauth2User': req.body.oauth2User,
+};
+
+// Only include secrets if non-empty values provided
+if (req.body.oauth2ClientSecret) {
+  requestOAuth2SettingParams['mail:oauth2ClientSecret'] = req.body.oauth2ClientSecret;
+}
+if (req.body.oauth2RefreshToken) {
+  requestOAuth2SettingParams['mail:oauth2RefreshToken'] = req.body.oauth2RefreshToken;
+}
+```
+
+**Frontend Consideration**: GET endpoint returns `undefined` for secrets (not masked values) to prevent accidental re-submission:
+
+```typescript
+// ❌ WRONG: Returns masked value that could be saved back
+oauth2ClientSecret: '(set)',
+
+// ✅ CORRECT: Returns undefined, frontend shows placeholder
+oauth2ClientSecret: undefined,
+```
+
+**Why This Pattern**: Allows administrators to update non-secret OAuth 2.0 settings without re-entering sensitive credentials every time.
+
+**Impact**: This pattern must be followed for **any API that updates OAuth 2.0 credentials** to prevent accidental secret overwrites.
+
+**Reference**:
+- PUT handler: [apiv3/app-settings/index.ts:293-306](../../../apps/app/src/server/routes/apiv3/app-settings/index.ts#L293-L306)
+- GET response: [apiv3/app-settings/index.ts:273-276](../../../apps/app/src/server/routes/apiv3/app-settings/index.ts#L273-L276)
+
+---
+
+### Type Safety Enhancements
+
+**NonBlankString Type**: OAuth 2.0 config definitions use `NonBlankString | undefined` for compile-time protection against empty string assignments:
+
+```typescript
+'mail:oauth2ClientSecret': defineConfig<NonBlankString | undefined>({
+  defaultValue: undefined,
+  isSecret: true,
+}),
+```
+
+This provides **compile-time protection** complementing runtime falsy checks.
+
+---
+
+### Integration Pattern Discovered
+
+**OAuth 2.0 Retry Logic**: OAuth 2.0 requires retry logic with exponential backoff due to potential token refresh failures:
+
+```typescript
+// OAuth 2.0 uses sendWithRetry() for automatic retry
+if (transmissionMethod === 'oauth2') {
+  return this.sendWithRetry(mailConfig as EmailConfig);
+}
+
+// SMTP/SES use direct sendMail()
+return this.mailer.sendMail(mailConfig);
+```
+
+**Rationale**: OAuth 2.0 token refresh can fail transiently due to network issues or Google API rate limiting. Exponential backoff (1s, 2s, 4s) provides resilience.
+
+---
+
+## Session 3: Post-Refactoring Architecture (2026-02-10)
+
+### MailService Modular Structure
+
+The MailService was refactored from a single monolithic file (`mail.ts`, ~408 lines) into a feature-based directory structure with separate transport modules. This is the current production architecture.
+
+#### Directory Structure
+
+```
+src/server/service/mail/
+├── index.ts              # Barrel export (default: MailService, backward-compatible)
+├── mail.ts               # MailService class (orchestration, S2S, retry logic)
+├── mail.spec.ts          # MailService tests
+├── smtp.ts               # SMTP transport factory: createSMTPClient()
+├── smtp.spec.ts          # SMTP transport tests
+├── ses.ts                # SES transport factory: createSESClient()
+├── ses.spec.ts           # SES transport tests
+├── oauth2.ts             # OAuth2 transport factory: createOAuth2Client()
+├── oauth2.spec.ts        # OAuth2 transport tests
+└── types.ts              # Shared types (StrictOAuth2Options, MailConfig, etc.)
+```
+
+#### Transport Factory Pattern
+
+Each transport module exports a factory function with a consistent signature:
+
+```typescript
+export function create[Transport]Client(
+  configManager: IConfigManagerForApp,
+  option?: TransportOptions
+): Transporter | null;
+```
+
+- Returns `null` if required credentials are missing (logs warning)
+- MailService delegates transport creation based on `mail:transmissionMethod` config
+
+#### StrictOAuth2Options Type
+
+Defined in `types.ts`, this branded type prevents empty string credentials at compile time:
+
+```typescript
+import type { NonBlankString } from '@growi/core/dist/interfaces';
+
+export type StrictOAuth2Options = {
+  service: 'gmail';
+  auth: {
+    type: 'OAuth2';
+    user: NonBlankString;
+    clientId: NonBlankString;
+    clientSecret: NonBlankString;
+    refreshToken: NonBlankString;
+  };
+};
+```
+
+This is stricter than nodemailer's default `XOAuth2.Options` which allows `string | undefined`. The branded type ensures compile-time validation complementing runtime falsy checks.
+
+#### Backward Compatibility
+
+The barrel export at `mail/index.ts` maintains the existing import pattern:
+```typescript
+import MailService from '~/server/service/mail';  // Still works
+```
+
+**Source**: Migrated from `.kiro/specs/refactor-mailer-service/` (spec deleted after implementation completion).
+
+---
+
+## References
+
+- [OAuth2 | Nodemailer](https://nodemailer.com/smtp/oauth2) - Official OAuth2 configuration documentation
+- [Using Gmail | Nodemailer](https://nodemailer.com/usage/using-gmail) - Gmail-specific integration guide
+- [Sending Emails Securely Using Node.js, Nodemailer, SMTP, Gmail, and OAuth2](https://dev.to/chandrapantachhetri/sending-emails-securely-using-node-js-nodemailer-smtp-gmail-and-oauth2-g3a) - Implementation tutorial
+- [Using OAuth2 with Nodemailer for Secure Email Sending](https://shazaali.substack.com/p/using-oauth2-with-nodemailer-for) - Security best practices
+- Internal: `apps/app/src/server/service/mail.ts` - Existing mail service implementation
+- Internal: `apps/app/src/client/components/Admin/App/MailSetting.tsx` - Admin UI patterns

+ 23 - 0
.kiro/specs/oauth2-email-support/spec.json

@@ -0,0 +1,23 @@
+{
+  "feature_name": "oauth2-email-support",
+  "created_at": "2026-02-06T11:43:56Z",
+  "updated_at": "2026-02-13T00:00:00Z",
+  "language": "en",
+  "phase": "implementation-complete",
+  "cleanup_completed": true,
+  "approvals": {
+    "requirements": {
+      "generated": true,
+      "approved": true
+    },
+    "design": {
+      "generated": true,
+      "approved": true
+    },
+    "tasks": {
+      "generated": true,
+      "approved": true
+    }
+  },
+  "ready_for_implementation": true
+}

+ 41 - 0
.kiro/specs/oauth2-email-support/tasks.md

@@ -0,0 +1,41 @@
+# Implementation Tasks - OAuth 2.0 Email Support
+
+## Status Overview
+
+**Final Status**: Production-Ready (2026-02-10)
+**Requirements Coverage**: 35/37 (95%)
+
+## Completed Tasks
+
+### Phase A: Critical Production Requirements (3 tasks)
+
+- [x] 1. Retry logic with exponential backoff (1s, 2s, 4s) - Req: 5.1, 5.2
+- [x] 2. Failed email storage after retry exhaustion - Req: 5.3
+- [x] 3. Enhanced OAuth 2.0 error logging - Req: 5.4, 5.7
+
+Session 2 additional fixes:
+- Credential validation changed to falsy check (nodemailer XOAuth2 compatibility)
+- PUT handler preserves secrets when empty values submitted
+- Config types changed to `NonBlankString | undefined`
+- GET response returns `undefined` for secrets
+- Browser autofill prevention (`autoComplete="new-password"`)
+- Static IDs replaced with `useId()` hook (Biome lint compliance)
+
+### Baseline Implementation (12 tasks)
+
+- [x] Configuration schema (4 config keys, encryption, NonBlankString types) - Req: 1.1, 1.5, 6.1
+- [x] OAuth 2.0 transport creation (nodemailer Gmail service) - Req: 2.1, 2.2, 3.1-3.3, 3.5, 6.2
+- [x] Service initialization and token management (S2S integration) - Req: 2.3, 2.5, 2.6, 3.6, 5.6, 6.2, 6.4
+- [x] API validation and persistence (PUT/GET endpoints) - Req: 1.3, 1.4, 1.5, 1.6, 5.5, 6.5
+- [x] Field-specific validation error messages - Req: 1.7
+- [x] OAuth2Setting UI component (react-hook-form integration) - Req: 1.2, 4.1
+- [x] AdminAppContainer state management (4 state properties) - Req: 4.2, 6.3
+- [x] Mail settings form submission - Req: 1.3, 1.6, 1.7
+- [x] Transmission method selection ('oauth2' option) - Req: 1.1, 1.2
+- [x] Multi-language translations (en, ja, fr, ko, zh) - Req: 1.2, 4.1, 4.3
+
+## Not Implemented (Optional Enhancements)
+
+- Help text for 2 of 4 fields incomplete (Req 4.3)
+- Credential field masking in UI (Req 4.4)
+- Test email button for OAuth 2.0 (Req 4.5)

+ 34 - 0
.kiro/steering/product.md

@@ -0,0 +1,34 @@
+# Product Overview
+
+GROWI is a team collaboration wiki platform using Markdown, designed to help teams document, share, and organize knowledge effectively.
+
+## Core Capabilities
+
+1. **Hierarchical Wiki Pages**: Tree-structured page organization with path-based navigation (`/path/to/page`)
+2. **Markdown-First Editing**: Rich Markdown support with extensions (drawio, lsx, math) and real-time collaborative editing
+3. **Authentication Integrations**: Multiple auth methods (LDAP, SAML, OAuth, Passkey) for enterprise environments
+4. **Plugin System**: Extensible architecture via `@growi/pluginkit` for custom remark plugins and functionality
+5. **Multi-Service Architecture**: Modular services (PDF export, Slack integration) deployed independently
+
+## Target Use Cases
+
+- **Team Documentation**: Technical documentation, meeting notes, project wikis
+- **Knowledge Management**: Searchable, organized information repository
+- **Enterprise Deployment**: Self-hosted wiki with SSO/LDAP integration
+- **Developer Teams**: Markdown-native, Git-friendly documentation workflow
+
+## Value Proposition
+
+- **Open Source**: MIT licensed, self-hostable, community-driven
+- **Markdown Native**: First-class Markdown support with powerful extensions
+- **Hierarchical Organization**: Intuitive path-based page structure (unlike flat wikis)
+- **Enterprise Ready**: Authentication integrations, access control, scalability
+- **Extensible**: Plugin system for customization without forking
+
+## Deployment Models
+
+- **Self-Hosted**: Docker, Kubernetes, or bare metal deployment
+- **Microservices**: Optional services (pdf-converter, slackbot-proxy) for enhanced functionality
+
+---
+_Focus on patterns and purpose, not exhaustive feature lists_

+ 8 - 0
.kiro/steering/structure.md

@@ -0,0 +1,8 @@
+# Project Structure
+
+See: `.claude/skills/monorepo-overview/SKILL.md` (auto-loaded by Claude Code)
+
+## cc-sdd Specific Notes
+
+Currently, there are no additional instructions specific to Kiro.
+If instructions specific to the cc-sdd workflow are needed in the future, add them to this section.

+ 8 - 0
.kiro/steering/tdd.md

@@ -0,0 +1,8 @@
+# Test-Driven Development
+
+See: `.claude/commands/tdd.md`, `.claude/skills/learned/essential-test-patterns/SKILL.md` and `.claude/skills/learned/essential-test-design/SKILL.md`
+
+## cc-sdd Specific Notes
+
+Currently, there are no additional instructions specific to Kiro.
+If instructions specific to the cc-sdd workflow are needed in the future, add them to this section.

+ 8 - 0
.kiro/steering/tech.md

@@ -0,0 +1,8 @@
+# Technology Stack
+
+See: `.claude/skills/tech-stack/SKILL.md` (auto-loaded by Claude Code)
+
+## cc-sdd Specific Notes
+
+Currently, there are no additional instructions specific to Kiro.
+If instructions specific to the cc-sdd workflow are needed in the future, add them to this section.

+ 1 - 20
.mcp.json

@@ -1,22 +1,3 @@
 {
 {
-  "mcpServers": {
-    "context7": {
-      "type": "http",
-      "url": "https://mcp.context7.com/mcp"
-    },
-    "serena": {
-      "type": "stdio",
-      "command": "uvx",
-      "args": [
-        "--from",
-        "git+https://github.com/oraios/serena",
-        "serena-mcp-server",
-        "--context",
-        "ide-assistant",
-        "--project",
-        "."
-      ],
-      "env": {}
-    }
-  }
+  "mcpServers": {}
 }
 }

+ 0 - 104
.serena/memories/apps-app-detailed-architecture.md

@@ -1,104 +0,0 @@
-# apps/app アーキテクチャ詳細ガイド
-
-## 概要
-`apps/app` は GROWI のメインアプリケーションで、Next.js ベースのフルスタック Web アプリケーションです。
-
-## エントリーポイント
-- **サーバーサイド**: `server/app.ts` - OpenTelemetry 初期化と Crowi サーバー起動を担当
-- **クライアントサイド**: `pages/_app.page.tsx` - Next.js アプリのルートコンポーネント
-
-## ディレクトリ構成の方針
-
-### フィーチャーベース(新しい方針)
-`features/` ディレクトリは機能ごとに整理され、各フィーチャーは以下の構造を持つ:
-- `interfaces/` - TypeScript 型定義
-- `server/` - サーバーサイドロジック(models, routes, services)
-- `client/` - クライアントサイドロジック(components, stores, services)
-- `utils/` - 共通ユーティリティ
-
-#### 主要フィーチャー
-- `openai/` - AI アシスタント機能(OpenAI 統合)
-- `external-user-group/` - 外部ユーザーグループ管理
-- `page-bulk-export/` - ページ一括エクスポート
-- `growi-plugin/` - プラグインシステム
-- `search/` - 検索機能
-- `mermaid/` - Mermaid 図表レンダリング
-- `plantuml/` - PlantUML 図表レンダリング
-- `callout/` - コールアウト(注意書き)機能
-- `comment/` - コメント機能
-- `templates/` - テンプレート機能
-- `rate-limiter/` - レート制限
-- `opentelemetry/` - テレメトリ・監視
-
-### レガシー構造(段階的移行予定)
-
-#### ユニバーサル(サーバー・クライアント共通)
-- `components/` - React コンポーネント(ページレベル、レイアウト、共通)
-- `interfaces/` - TypeScript インターフェース
-- `models/` - データモデル定義
-- `services/` - ビジネスロジック(レンダラーなど)
-- `stores-universal/` - ユニバーサル状態管理(SWR コンテキスト等)
-
-#### サーバーサイド専用
-- `server/` - サーバーサイドコード
-  - `models/` - Mongoose モデル
-  - `routes/` - Express ルート(API v3含む)
-  - `service/` - サーバーサイドサービス
-  - `middlewares/` - Express ミドルウェア
-  - `util/` - サーバーサイドユーティリティ
-  - `events/` - イベントエミッター
-  - `crowi/` - アプリケーション初期化
-
-#### クライアントサイド専用
-- `client/` - クライアントサイドコード
-  - `components/` - React コンポーネント
-  - `services/` - クライアントサイドサービス
-  - `util/` - クライアントサイドユーティリティ
-  - `interfaces/` - クライアント固有の型定義
-  - `models/` - クライアントサイドモデル
-
-#### Next.js Pages Router
-- `pages/` - Next.js ページルート
-  - `admin/` - 管理画面ページ
-  - `me/` - ユーザー設定ページ
-  - `[[...path]]/` - 動的ページルート(Catch-all)
-  - `share/` - 共有ページ
-  - `login/` - ログインページ
-
-#### 状態管理・UI
-- `states/` - Jotai 状態管理(ページ、UI、サーバー設定)
-- `stores/` - レガシー状態管理(段階的に states/ に移行)
-- `styles/` - SCSS スタイル
-
-#### その他
-- `utils/` - 汎用ユーティリティ
-- `migrations/` - データベースマイグレーション
-- `@types/` - TypeScript 型拡張
-
-## 開発指針
-
-### 新機能開発
-新しい機能は `features/` ディレクトリにフィーチャーベースで実装し、以下を含める:
-1. インターフェース定義
-2. サーバーサイド実装(必要に応じて)
-3. クライアントサイド実装(必要に応じて)
-4. 共通ユーティリティ
-
-### 既存機能の改修
-既存のレガシー構造は段階的に features/ に移行することが推奨される。
-
-### 重要な技術スタック
-- **フレームワーク**: Next.js (Pages Router)
-- **状態管理**: Jotai (新), SWR (データフェッチング)
-- **スタイル**: SCSS, CSS Modules
-- **サーバー**: Express.js
-- **データベース**: MongoDB (Mongoose)
-- **型システム**: TypeScript
-- **監視**: OpenTelemetry
-
-## 特記事項
-- AI 統合機能(OpenAI)は最も複雑なフィーチャーの一つ
-- プラグインシステムにより機能拡張可能
-- 多言語対応(i18next)
-- 複数の認証方式サポート
-- レート制限・セキュリティ機能内蔵

+ 0 - 162
.serena/memories/apps-app-development-patterns.md

@@ -1,162 +0,0 @@
-# apps/app 開発ワークフロー・パターン集
-
-## よくある開発パターン
-
-### 新しいページ作成
-1. `pages/` にページファイル作成(`.page.tsx`)
-2. 必要に応じてレイアウト定義
-3. サーバーサイドプロパティ設定 (`getServerSideProps`)
-4. 状態管理セットアップ
-5. スタイル追加
-
-### 新しい API エンドポイント
-1. `server/routes/apiv3/` にルートファイル作成
-2. バリデーション定義
-3. サービス層実装
-4. レスポンス形式定義
-5. OpenAPI 仕様更新
-
-### 新しいフィーチャー実装
-1. `features/新機能名/` ディレクトリ作成
-2. `interfaces/` で型定義
-3. `server/` でバックエンド実装
-4. `client/` でフロントエンド実装
-5. `utils/` で共通ロジック
-
-### コンポーネント作成
-1. 適切なディレクトリに配置
-2. TypeScript プロパティ定義
-3. CSS Modules でスタイル
-4. JSDoc コメント追加
-5. テストファイル作成
-
-## 重要な設計パターン
-
-### SWR データフェッチング
-```typescript
-const { data, error, mutate } = useSWR('/api/v3/pages', fetcher);
-```
-
-### Jotai 状態管理
-```typescript
-const pageAtom = atom(initialPageState);
-const [page, setPage] = useAtom(pageAtom);
-```
-
-### CSS Modules スタイリング
-```scss
-.componentName {
-  @extend %some-placeholder;
-  @include some-mixin;
-}
-```
-
-### API ルート実装
-```typescript
-export const getPageHandler = async(req: NextApiRequest, res: NextApiResponse) => {
-  // バリデーション
-  // ビジネスロジック
-  // レスポンス
-};
-```
-
-## ファイル構成のベストプラクティス
-
-### フィーチャーディレクトリ例
-```
-features/my-feature/
-├── interfaces/
-│   └── my-feature.ts
-├── server/
-│   ├── models/
-│   ├── routes/
-│   └── services/
-├── client/
-│   ├── components/
-│   ├── stores/
-│   └── services/
-└── utils/
-    └── common-logic.ts
-```
-
-### コンポーネントディレクトリ例
-```
-components/MyComponent/
-├── MyComponent.tsx
-├── MyComponent.module.scss
-├── MyComponent.spec.tsx
-├── index.ts
-└── sub-components/
-```
-
-## 開発時のチェックリスト
-
-### コード品質
-- [ ] TypeScript エラーなし
-- [ ] テストケース作成
-- [ ] 型安全性確保
-- [ ] パフォーマンス影響確認
-
-### 機能要件
-- [ ] 国際化対応(i18n)
-- [ ] セキュリティチェック
-- [ ] アクセシビリティ対応
-- [ ] レスポンシブデザイン
-- [ ] エラーハンドリング
-
-### API 設計
-- [ ] RESTful 設計原則
-- [ ] 適切な HTTP ステータスコード
-- [ ] バリデーション実装
-- [ ] レート制限対応
-- [ ] ドキュメント更新
-
-## デバッグ・トラブルシューティング
-
-### よくある問題
-1. **型エラー**: tsconfig.json 設定確認
-2. **スタイル適用されない**: CSS Modules インポート確認
-3. **API エラー**: ミドルウェア順序確認
-4. **状態同期問題**: SWR キー重複確認
-5. **ビルドエラー**: 依存関係バージョン確認
-
-### デバッグツール
-- Next.js Dev Tools
-- React Developer Tools
-- Network タブ(API 監視)
-- Console ログ
-- Lighthouse(パフォーマンス)
-
-## パフォーマンス最適化
-
-### フロントエンド
-- コンポーネント lazy loading
-- 画像最適化
-- Bundle サイズ監視
-- メモ化(useMemo, useCallback)
-
-### バックエンド
-- データベースクエリ最適化
-- キャッシュ戦略
-- 非同期処理
-- リソース使用量監視
-
-## セキュリティ考慮事項
-
-### 実装時の注意
-- 入力サニタイゼーション
-- CSRF 対策
-- XSS 防止
-- 認証・認可チェック
-- 機密情報の適切な取り扱い
-
-## デプロイ・運用
-
-### 環境設定
-- 環境変数管理
-- データベース接続
-- 外部サービス連携
-- ログ設定
-- 監視設定
-
-このガイドは apps/app の開発を効率的に進めるための包括的な情報源として活用してください。

+ 0 - 37
.serena/memories/apps-app-google-workspace-oauth2-mail.md

@@ -1,37 +0,0 @@
-# Google Workspace OAuth 2.0 メール送信機能実装計画
-
-## 概要
-
-Google Workspace (Gmail) の OAuth 2.0 (XOAUTH2) 認証を使ったメール送信機能を実装する。2025年5月1日以降、Gmail SMTP ではユーザー名とパスワード認証がサポートされなくなったため、OAuth 2.0 への移行が必要。
-
-## 背景
-
-- **問題**: Gmail SMTP でのユーザー名・パスワード認証が2025年5月1日にサポート終了
-- **解決策**: OAuth 2.0 (XOAUTH2) 認証方式の実装
-- **参考**: https://support.google.com/a/answer/2956491?hl=ja
-- **ライブラリ**: nodemailer v6.9.15 は OAuth 2.0 をサポート済み(バージョンアップ不要)
-
-## 技術仕様
-
-### 必須設定パラメータ
-
-| パラメータ | 説明 | セキュリティ |
-|-----------|------|------------|
-| `mail:oauth2ClientId` | Google Cloud Console で取得する OAuth 2.0 クライアント ID | 通常 |
-| `mail:oauth2ClientSecret` | OAuth 2.0 クライアントシークレット | `isSecret: true` |
-| `mail:oauth2RefreshToken` | OAuth 2.0 リフレッシュトークン | `isSecret: true` |
-| `mail:oauth2User` | 送信者のGmailアドレス | 通常 |
-
-### nodemailer 設定例
-
-```typescript
-const transportOptions = {
-  service: 'gmail',
-  auth: {
-    type: 'OAuth2',
-    user: 'user@example.com',
-    clientId: 'CLIENT_ID',
-    clientSecret: 'CLIENT_SECRET',
-    refreshToken: 'REFRESH_TOKEN',
-  },
-};

+ 0 - 35
.serena/memories/apps-app-technical-specs.md

@@ -1,35 +0,0 @@
-# apps/app 技術仕様
-
-## ファイル構造・命名
-- Next.js: `*.page.tsx`
-- テスト: `*.spec.ts`, `*.integ.ts`
-- コンポーネント: `ComponentName.tsx`
-
-## API構造
-- **API v3**: `server/routes/apiv3/` (RESTful + OpenAPI準拠)
-- **Features API**: `features/*/server/routes/`
-
-## 状態管理
-- **Jotai** (推奨): `states/` - アトミック分離
-- **SWR**: `stores/` - データフェッチ・キャッシュ
-
-## データベース
-- **Mongoose**: `server/models/` (スキーマ定義)
-- **Serializers**: `serializers/` (レスポンス変換)
-
-## セキュリティ・i18n
-- **認証**: 複数プロバイダー + アクセストークン
-- **XSS対策**: `services/general-xss-filter/`
-- **i18n**: next-i18next (サーバー・クライアント両対応)
-
-## システム機能
-- **検索**: Elasticsearch統合
-- **監視**: OpenTelemetry (`features/opentelemetry/`)
-- **プラグイン**: 動的読み込み (`features/growi-plugin/`)
-
-## 開発ガイドライン
-1. 新機能は `features/` 実装
-2. TypeScript strict準拠
-3. Jotai状態管理優先
-4. API v3形式
-5. セキュリティ・i18n・テスト必須

+ 0 - 61
.serena/memories/coding_conventions.md

@@ -1,61 +0,0 @@
-# コーディング規約とスタイルガイド
-
-## Linter・フォーマッター設定
-
-### Biome設定(統一予定)
-- **適用範囲**: 
-  - dist/, node_modules/, coverage/ などは除外
-  - .next/, bin/, config/ などのビルド成果物は除外
-  - package.json などの設定ファイルは除外
-- **推奨**: 新規開発では Biome を使用
-
-## TypeScript設定
-- **ターゲット**: ESNext
-- **モジュール**: ESNext  
-- **厳格モード**: 有効(strict: true)
-- **モジュール解決**: Bundler
-- **その他**:
-  - allowJs: true(JSファイルも許可)
-  - skipLibCheck: true(型チェックの最適化)
-  - isolatedModules: true(単独モジュールとしてコンパイル)
-
-## Stylelint設定
-- SCSS/CSSファイルに対して適用
-- recess-order設定を使用(プロパティの順序規定)
-- recommended-scss設定を適用
-
-## ファイル命名規則
-- TypeScript/JavaScriptファイル: キャメルケースまたはケバブケース
-- コンポーネントファイル: PascalCase(Reactコンポーネント)
-- 設定ファイル: ドット記法(.biome.json など)
-
-## テストファイル命名規則(Vitest)
-vitest.workspace.mts の設定に基づく:
-
-### 単体テスト(Unit Test)
-- **ファイル名**: `*.spec.{ts,js}`
-- **環境**: Node.js
-- **例**: `utils.spec.ts`, `helper.spec.js`
-
-### 統合テスト(Integration Test)
-- **ファイル名**: `*.integ.ts`
-- **環境**: Node.js(MongoDB設定あり)
-- **例**: `api.integ.ts`, `service.integ.ts`
-
-### コンポーネントテスト(Component Test)
-- **ファイル名**: `*.spec.{tsx,jsx}`
-- **環境**: happy-dom
-- **例**: `Button.spec.tsx`, `Modal.spec.jsx`
-
-## ディレクトリ構造の規則
-- `src/`: ソースコード
-- `test/`: Jest用の古いテストファイル(廃止予定)
-- `test-with-vite/`: Vitest用の新しいテストファイル
-- `playwright/`: E2Eテストファイル
-- `config/`: 設定ファイル
-- `public/`: 静的ファイル
-- `dist/`: ビルド出力
-
-## 移行ガイドライン
-- 新規開発: Biome + Vitest を使用
-- 既存コード: 段階的に Jest → Vitest に移行

+ 0 - 26
.serena/memories/project_overview.md

@@ -1,26 +0,0 @@
-# GROWIプロジェクト概要
-
-## 目的
-GROWIは、マークダウンを使用したチームコラボレーションソフトウェアです。Wikiとドキュメント作成ツールの機能を持ち、チーム間の情報共有とコラボレーションを促進します。
-
-## プロジェクトの詳細
-- **プロジェクト名**: GROWI
-- **バージョン**: 7.3.0-RC.0
-- **ライセンス**: MIT
-- **作者**: Yuki Takei <yuki@weseek.co.jp>
-- **リポジトリ**: https://github.com/growilabs/growi.git
-- **公式サイト**: https://growi.org
-
-## 主な特徴
-- Markdownベースのドキュメント作成
-- チームコラボレーション機能
-- Wikiのような情報共有プラットフォーム
-- ドキュメント管理とバージョン管理
-
-## アーキテクチャ
-- **モノレポ構成**: pnpm workspace + Turbo.js を使用
-- **主要アプリケーション**: apps/app (メインアプリケーション)
-- **追加アプリケーション**: 
-  - apps/pdf-converter (PDF変換サービス)
-  - apps/slackbot-proxy (Slackボットプロキシ)
-- **パッケージ**: packages/ 配下に複数の共有ライブラリ

+ 0 - 89
.serena/memories/project_structure.md

@@ -1,89 +0,0 @@
-# プロジェクト構造
-
-## ルートディレクトリ構造
-```
-growi/
-├── apps/                    # アプリケーション群
-│   ├── app/                # メインのGROWIアプリケーション
-│   ├── pdf-converter/      # PDF変換サービス
-│   └── slackbot-proxy/     # Slackボットプロキシ
-├── packages/               # 共有パッケージ群
-│   ├── core/              # コアライブラリ
-│   ├── core-styles/       # 共通スタイル
-│   ├── editor/            # エディターコンポーネント
-│   ├── pluginkit/         # プラグインキット
-│   ├── ui/                # UIコンポーネント
-│   ├── presentation/      # プレゼンテーション層
-│   ├── preset-templates/  # テンプレート
-│   ├── preset-themes/     # テーマ
-│   └── remark-*/          # remarkプラグイン群
-├── bin/                   # ユーティリティスクリプト
-└── 設定ファイル群
-```
-
-## メインアプリケーション (apps/app/)
-```
-apps/app/
-├── src/                   # ソースコード
-├── test/                  # 古いJestテストファイル(廃止予定)
-├── test-with-vite/        # 新しいVitestテストファイル
-├── playwright/            # E2Eテスト(Playwright)
-├── config/                # 設定ファイル
-├── public/                # 静的ファイル
-├── docker/                # Docker関連
-├── bin/                   # スクリプト
-└── 設定ファイル群
-```
-
-## テストディレクトリの詳細
-
-### test/ (廃止予定)
-- Jest用の古いテストファイル
-- 段階的にtest-with-vite/に移行予定
-- 新規テストは作成しない
-
-### test-with-vite/
-- Vitest用の新しいテストファイル
-- 新規テストはここに作成
-- セットアップファイル: `setup/mongoms.ts` (MongoDB用)
-
-### playwright/
-- E2Eテスト用ディレクトリ
-- ブラウザ操作を含むテスト
-
-## テストファイルの配置ルール
-
-### Vitestテストファイル
-以下のパターンでソースコードと同じディレクトリまたはtest-with-vite/配下に配置:
-
-- **単体テスト**: `*.spec.{ts,js}`
-- **統合テスト**: `*.integ.ts` 
-- **コンポーネントテスト**: `*.spec.{tsx,jsx}`
-
-例:
-```
-src/
-├── utils/
-│   ├── helper.ts
-│   └── helper.spec.ts       # 単体テスト
-├── components/
-│   ├── Button.tsx
-│   └── Button.spec.tsx      # コンポーネントテスト
-└── services/
-    ├── api.ts
-    └── api.integ.ts         # 統合テスト
-```
-
-## パッケージ(packages/)
-各パッケージは独立したnpmパッケージとして管理され、以下の構造を持つ:
-- `src/`: ソースコード
-- `dist/`: ビルド出力
-- `package.json`: パッケージ設定
-- `tsconfig.json`: TypeScript設定
-
-## 重要な設定ファイル
-- **pnpm-workspace.yaml**: ワークスペース設定
-- **turbo.json**: Turbo.jsビルド設定
-- **tsconfig.base.json**: TypeScript基本設定
-- **biome.json**: Biome linter/formatter設定
-- **vitest.workspace.mts**: Vitestワークスペース設定

+ 0 - 94
.serena/memories/task_completion_checklist.md

@@ -1,94 +0,0 @@
-# タスク完了時のチェックリスト
-
-## コードを書いた後に必ず実行すべきコマンド
-
-### 1. Lint・フォーマットの実行
-```bash
-# 【推奨】Biome実行(新規開発)
-pnpm run lint:biome
-
-# 【過渡期】全てのLint実行(既存コード)
-pnpm run lint
-
-# 個別実行(必要に応じて)
-pnpm run lint:styles      # Stylelint
-pnpm run lint:typecheck   # TypeScript型チェック
-```
-
-### 2. テストの実行
-```bash
-# 【推奨】Vitestテスト実行(新規開発)
-pnpm run test:vitest
-
-# 【過渡期】全てのテスト実行(既存コード)
-pnpm run test
-
-# 個別実行
-pnpm run test:jest        # Jest(廃止予定)
-pnpm run test:vitest {target-file-name}     # Vitest
-```
-
-### 3. E2Eテストの実行(重要な機能変更時)
-```bash
-cd apps/app
-npx playwright test
-```
-
-### 4. ビルドの確認
-```bash
-# メインアプリケーションのビルド
-pnpm run app:build
-
-# 関連パッケージのビルド
-turbo run build
-```
-
-### 5. 動作確認
-```bash
-# 開発サーバーでの動作確認
-cd apps/app && pnpm run dev
-
-# または本番ビルドでの確認
-pnpm start
-```
-
-## 特別な確認事項
-
-### OpenAPI仕様の確認(API変更時)
-```bash
-cd apps/app
-pnpm run openapi:generate-spec:apiv3
-pnpm run lint:openapi:apiv3
-```
-
-### データベーススキーマ変更時
-```bash
-cd apps/app
-pnpm run dev:migrate:status  # 現在の状態確認
-pnpm run dev:migrate         # マイグレーション実行
-```
-
-## テストファイル作成時の注意
-
-### 新規テストファイル
-- **単体テスト**: `*.spec.{ts,js}` (Node.js環境)
-- **統合テスト**: `*.integ.ts` (Node.js + MongoDB環境)  
-- **コンポーネントテスト**: `*.spec.{tsx,jsx}` (happy-dom環境)
-- test-with-vite/ または対象ファイルと同じディレクトリに配置
-
-### 既存テストの修正
-- test/ 配下のJestテストは段階的に移行
-- 可能であればtest-with-vite/にVitestテストとして書き直し
-
-## コミット前の最終チェック
-1. Biome エラーが解消されているか
-2. Vitestテスト(または過渡期はJest)がパスしているか
-3. 重要な変更はPlaywright E2Eテストも実行
-4. ビルドが成功するか
-5. 変更による既存機能への影響がないか
-6. 適切なコミットメッセージを作成したか
-
-## 移行期間中の注意事項
-- 新規開発: Biome + Vitest を使用
-- 既存コード修正: 可能な限り Biome + Vitest に移行
-- レガシーツールは段階的に廃止予定

+ 0 - 41
.serena/memories/tech_stack.md

@@ -1,41 +0,0 @@
-# 技術スタック & 開発環境
-
-## コア技術
-- **TypeScript** ~5.0.0 + **Next.js** (React)
-- **Node.js** ^20||^22 + **MongoDB** + **Mongoose** ^6.13.6
-- **pnpm** 10.4.1 + **Turbo** ^2.1.3 (モノレポ)
-
-## 状態管理・データ
-- **Jotai**: アトミック状態管理(推奨)
-- **SWR** ^2.3.2: データフェッチ・キャッシュ
-
-## 開発ツール移行状況
-| 従来 | 移行先 | 状況 |
-|------|--------|------|
-| ESLint | **Biome** | 新規推奨 |
-| Jest | **Vitest** + **Playwright** | 新規推奨 |
-
-## 主要コマンド
-```bash
-# 開発
-cd apps/app && pnpm run dev
-
-# 品質チェック
-pnpm run lint:biome        # 新規推奨
-pnpm run lint:typecheck    # 型チェック正式コマンド
-pnpm run test:vitest       # 新規推奨
-
-# ビルド
-pnpm run app:build
-turbo run build           # 並列ビルド
-```
-
-## ファイル命名規則
-- Next.js: `*.page.tsx`
-- テスト: `*.spec.ts` (Vitest), `*.integ.ts`
-- コンポーネント: `ComponentName.tsx`
-
-## API・アーキテクチャ
-- **API v3**: `server/routes/apiv3/` (RESTful + OpenAPI)
-- **Features**: `features/*/` (機能別分離)
-- **SCSS**: CSS Modules使用

+ 0 - 95
.serena/memories/vitest-testing-tips-and-best-practices.md

@@ -1,95 +0,0 @@
-# Vitest + TypeScript Testing Guide
-
-## 核心技術要素
-
-### tsconfig.json最適設定
-```json
-{
-  "compilerOptions": {
-    "types": ["vitest/globals"]  // グローバルAPI: describe, it, expect等をインポート不要化
-  }
-}
-```
-
-### vitest-mock-extended: 型安全モッキング
-```typescript
-import { mockDeep, type DeepMockProxy } from 'vitest-mock-extended';
-
-// 完全型安全なNext.js Routerモック
-const mockRouter: DeepMockProxy<NextRouter> = mockDeep<NextRouter>();
-mockRouter.asPath = '/test-path';  // TypeScript補完・型チェック有効
-
-// 複雑なUnion型も完全サポート
-interface ComplexProps {
-  currentPageId?: string | null;
-  currentPathname?: string | null;
-}
-const mockProps: DeepMockProxy<ComplexProps> = mockDeep<ComplexProps>();
-```
-
-### React Testing Library + Jotai統合
-```typescript
-const renderWithProvider = (ui: React.ReactElement, scope?: Scope) => {
-  const Wrapper = ({ children }: { children: React.ReactNode }) => (
-    <Provider scope={scope}>{children}</Provider>
-  );
-  return render(ui, { wrapper: Wrapper });
-};
-```
-
-## 実践パターン
-
-### 非同期テスト
-```typescript
-import { waitFor, act } from '@testing-library/react';
-
-await act(async () => {
-  result.current.triggerAsyncAction();
-});
-
-await waitFor(() => {
-  expect(result.current.isLoading).toBe(false);
-});
-```
-
-### 詳細アサーション
-```typescript
-expect(mockFunction).toHaveBeenCalledWith(
-  expect.objectContaining({
-    pathname: '/expected-path',
-    data: expect.any(Object)
-  })
-);
-```
-
-## 実行コマンド
-
-### 基本テスト実行
-```bash
-# Vitest単体
-pnpm run test:vitest
-
-# Vitest単体(coverageあり)
-pnpm run test:vitest:coverage
-
-# 特定ファイルのみ実行(coverageあり)
-pnpm run test:vitest src/path/to/test.spec.tsx
-```
-
-### package.jsonスクリプト参照
-```json
-{
-  "scripts": {
-    "test": "run-p test:*",
-    "test:jest": "cross-env NODE_ENV=test TS_NODE_PROJECT=test/integration/tsconfig.json jest",
-    "test:vitest": "vitest run --coverage"
-  }
-}
-```
-
-## Jest→Vitest移行要点
-- `jest.config.js` → `vitest.config.ts`
-- `@types/jest` → `vitest/globals`
-- ESModulesネイティブサポート → 高速起動・実行
-
-この設定により型安全性と保守性を両立した高品質テストが可能。

+ 65 - 9
.serena/project.yml

@@ -1,9 +1,3 @@
-# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
-#  * For C, use cpp
-#  * For JavaScript, use typescript
-# Special requirements:
-#  * csharp: Requires the presence of a .sln file in the project folder.
-language: typescript
 
 
 # whether to use the project's gitignore file to ignore files
 # whether to use the project's gitignore file to ignore files
 # Added on 2025-04-07
 # Added on 2025-04-07
@@ -22,7 +16,7 @@ read_only: false
 
 
 # list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
 # list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
 # Below is the complete list of tools for convenience.
 # Below is the complete list of tools for convenience.
-# To make sure you have the latest list of tools, and to view their descriptions, 
+# To make sure you have the latest list of tools, and to view their descriptions,
 # execute `uv run scripts/print_tool_overview.py`.
 # execute `uv run scripts/print_tool_overview.py`.
 #
 #
 #  * `activate_project`: Activates a project by name.
 #  * `activate_project`: Activates a project by name.
@@ -59,10 +53,72 @@ read_only: false
 #  * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
 #  * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
 #  * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
 #  * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
 #  * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
 #  * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
-excluded_tools: []
+excluded_tools:
+- "check_onboarding_performed"
+- "execute_shell_command"
+- "initial_instructions"
+- "onboarding"
+- "prepare_for_new_conversation"
+- "read_memory"
+- "write_memory"
+- "list_memories"
+- "delete_memory"
 
 
 # initial prompt for the project. It will always be given to the LLM upon activating the project
 # initial prompt for the project. It will always be given to the LLM upon activating the project
 # (contrary to the memories, which are loaded on demand).
 # (contrary to the memories, which are loaded on demand).
 initial_prompt: ""
 initial_prompt: ""
-
+# the name by which the project can be referenced within Serena
 project_name: "growi"
 project_name: "growi"
+
+# list of mode names to that are always to be included in the set of active modes
+# The full set of modes to be activated is base_modes + default_modes.
+# If the setting is undefined, the base_modes from the global configuration (serena_config.yml) apply.
+# Otherwise, this setting overrides the global configuration.
+# Set this to [] to disable base modes for this project.
+# Set this to a list of mode names to always include the respective modes for this project.
+base_modes:
+
+# list of mode names that are to be activated by default.
+# The full set of modes to be activated is base_modes + default_modes.
+# If the setting is undefined, the default_modes from the global configuration (serena_config.yml) apply.
+# Otherwise, this overrides the setting from the global configuration (serena_config.yml).
+# This setting can, in turn, be overridden by CLI parameters (--mode).
+default_modes:
+
+# list of tools to include that would otherwise be disabled (particularly optional tools that are disabled by default)
+included_optional_tools: []
+
+# fixed set of tools to use as the base tool set (if non-empty), replacing Serena's default set of tools.
+# This cannot be combined with non-empty excluded_tools or included_optional_tools.
+fixed_tools: []
+
+# the encoding used by text files in the project
+# For a list of possible encodings, see https://docs.python.org/3.11/library/codecs.html#standard-encodings
+encoding: utf-8
+
+
+# list of languages for which language servers are started; choose from:
+#   al                  bash                clojure             cpp                 csharp
+#   csharp_omnisharp    dart                elixir              elm                 erlang
+#   fortran             fsharp              go                  groovy              haskell
+#   java                julia               kotlin              lua                 markdown
+#   matlab              nix                 pascal              perl                php
+#   powershell          python              python_jedi         r                   rego
+#   ruby                ruby_solargraph     rust                scala               swift
+#   terraform           toml                typescript          typescript_vts      vue
+#   yaml                zig
+#   (This list may be outdated. For the current list, see values of Language enum here:
+#   https://github.com/oraios/serena/blob/main/src/solidlsp/ls_config.py
+#   For some languages, there are alternative language servers, e.g. csharp_omnisharp, ruby_solargraph.)
+# Note:
+#   - For C, use cpp
+#   - For JavaScript, use typescript
+#   - For Free Pascal/Lazarus, use pascal
+# Special requirements:
+#   Some languages require additional setup/installations.
+#   See here for details: https://oraios.github.io/serena/01-about/020_programming-languages.html#language-servers
+# When using multiple languages, the first language server that supports a given file will be used for that file.
+# The first language is the default language and the respective language server will be used as a fallback.
+# Note that when using the JetBrains backend, language servers are not used and this list is correspondingly ignored.
+languages:
+- typescript

+ 0 - 10
.serena/serena_config.yml

@@ -1,10 +0,0 @@
-web_dashboard: false
-# whether to open the Serena web dashboard (which will be accessible through your web browser) that
-# shows Serena's current session logs - as an alternative to the GUI log window which
-# is supported on all platforms.
-
-web_dashboard_open_on_launch: false
-# whether to open a browser window with the web dashboard when Serena starts (provided that web_dashboard
-# is enabled). If set to False, you can still open the dashboard manually by navigating to
-# http://localhost:24282/dashboard/ in your web browser (24282 = 0x5EDA, SErena DAshboard).
-# If you have multiple instances running, a higher port will be used; try port 24283, 24284, etc.

+ 4 - 1
.vscode/mcp.json

@@ -13,7 +13,10 @@
         "serena",
         "serena",
         "start-mcp-server",
         "start-mcp-server",
         "--context",
         "--context",
-        "ide-assistant"
+        "ide",
+        "--project",
+        ".",
+        "--enable-web-dashboard=false"
       ]
       ]
     }
     }
   }
   }

+ 115 - 50
AGENTS.md

@@ -1,74 +1,139 @@
 # AGENTS.md
 # AGENTS.md
 
 
-GROWI is a team collaboration wiki platform built with Next.js, Express, and MongoDB. This guide helps AI coding agents navigate the monorepo and work effectively with GROWI's architecture.
+GROWI is a team collaboration wiki platform built with Next.js, Express, and MongoDB. This guide provides essential instructions for AI coding agents working with the GROWI codebase.
 
 
-## Language
+## Language Policy
 
 
-If we detect at the beginning of a conversation that the user's primary language is not English, we will always respond in that language. However, we may retain technical terms in English if necessary.
+**Response Language**: If the user writes in a non-English language at any point in the conversation, always respond in that language from that point onward. This rule takes **absolute priority** over any other language instructions, including skill/command prompts or context documents written in English.
 
 
-When generating source code, all comments and explanations within the code will be written in English.
+**Code Comments**: When generating source code, all comments and explanations within the code must be written in English, regardless of the conversation language.
 
 
 ## Project Overview
 ## Project Overview
 
 
-GROWI is a team collaboration software using markdown - a wiki platform with hierarchical page organization. It's built with Next.js, Express, MongoDB, and includes features like real-time collaborative editing, authentication integrations, and plugin support.
+GROWI is a team collaboration wiki platform using Markdown, featuring hierarchical page organization, real-time collaborative editing, authentication integrations, and plugin support. Built as a monorepo with Next.js, Express, and MongoDB.
 
 
-## Development Tools
-- **Package Manager**: pnpm with workspace support
-- **Build System**: Turborepo for monorepo orchestration
-- **Code Quality**: 
-  - Biome for linting and formatting
-  - Stylelint for SCSS/CSS
+## Knowledge Base
 
 
-## Development Commands
+### Claude Code Skills (Auto-Invoked)
 
 
-### Core Development
-- `turbo run bootstrap` - Install dependencies for all workspace packages
-- `turbo run dev` - Start development server (automatically runs migrations and pre-builds styles)
+Technical information is available in **Claude Code Skills** (`.claude/skills/`), which are automatically invoked during development.
 
 
-### Production Commands
-- `pnpm run app:build` - Build GROWI app client and server for production
-- `pnpm run app:server` - Launch GROWI app server in production mode
-- `pnpm start` - Build and start the application (runs both build and server commands)
+**Global Skills** (always loaded):
 
 
-### Database Migrations
-- `pnpm run migrate` - Run MongoDB migrations (production)
-- `turbo run dev:migrate @apps/app` - Run migrations in development (or wait for automatic execution with dev)
-- `cd apps/app && pnpm run dev:migrate:status` - Check migration status
-- `cd apps/app && pnpm run dev:migrate:down` - Rollback last migration
+| Skill | Description |
+|-------|-------------|
+| **monorepo-overview** | Monorepo structure, workspace organization, Changeset versioning |
+| **tech-stack** | Technology stack, pnpm/Turborepo, TypeScript, Biome |
 
 
-### Testing and Quality
-- `turbo run test @apps/app` - Run Jest and Vitest test suites with coverage
-- `turbo run lint @apps/app` - Run all linters (TypeScript, Biome, Stylelint, OpenAPI)
-- `cd apps/app && pnpm run lint:typecheck` - TypeScript type checking only
-- `cd apps/app && pnpm run test:vitest` - Run Vitest unit tests
-- `cd apps/app && pnpm run test:jest` - Run Jest integration tests
+**Rules** (always applied):
 
 
-### Development Utilities  
-- `cd apps/app && pnpm run repl` - Start Node.js REPL with application context loaded
-- `turbo run pre:styles @apps/app` - Pre-build styles with Vite
+| Rule | Description |
+|------|-------------|
+| **coding-style** | Coding conventions, naming, exports, immutability, comments |
+| **security** | Security checklist, secret management, OWASP vulnerability prevention |
+| **performance** | Model selection, context management, build troubleshooting |
 
 
-## Architecture Overview
+**Agents** (specialized):
 
 
-### Monorepo Structure
-- `/apps/app/` - Main GROWI application (Next.js frontend + Express backend)
-- `/apps/pdf-converter/` - PDF conversion microservice
-- `/apps/slackbot-proxy/` - Slack integration proxy service
-- `/packages/` - Shared libraries and components
+| Agent | Description |
+|-------|-------------|
+| **build-error-resolver** | TypeScript/build error resolution with minimal diffs |
+| **security-reviewer** | Security vulnerability detection, OWASP Top 10 |
 
 
-## File Organization Patterns
+**Commands** (user-invocable):
 
 
-### Components
-- Use TypeScript (.tsx) for React components
-- Co-locate styles as `.module.scss` files
-- Export components through `index.ts` files where appropriate
-- Group related components in feature-based directories
+| Command | Description |
+|---------|-------------|
+| **/tdd** | Test-driven development workflow |
+| **/learn** | Extract reusable patterns from sessions |
 
 
-### Tests
-- Unit Test: `*.spec.ts`
-- Integration Test: `*.integ.ts`
-- Component Test: `*.spec.tsx`
+**apps/app Skills** (loaded when working in apps/app):
 
 
+| Skill | Description |
+|-------|-------------|
+| **app-architecture** | Next.js Pages Router, Express, feature-based structure |
+| **app-commands** | apps/app specific commands (migrations, OpenAPI, etc.) |
+| **app-specific-patterns** | Jotai/SWR patterns, router mocking, API routes |
+
+### Package-Specific CLAUDE.md
+
+Each application has its own CLAUDE.md with detailed instructions:
+
+- `apps/app/CLAUDE.md` - Main GROWI application
+- `apps/pdf-converter/CLAUDE.md` - PDF conversion microservice
+- `apps/slackbot-proxy/CLAUDE.md` - Slack integration proxy
+
+### Serena Memories
+
+Additional detailed specifications are stored in **Serena memories** and can be referenced when needed for specific features or subsystems.
+
+## Quick Reference
+
+### Essential Commands (Global)
+
+```bash
+# Development
+turbo run dev                    # Start all dev servers
+
+# Quality Checks (use Turborepo for caching)
+turbo run lint --filter @growi/app
+turbo run test --filter @growi/app
+
+# Production
+pnpm run app:build              # Build main app
+pnpm start                      # Build and start
+```
+
+### Key Directories
+
+```
+growi/
+├── apps/
+│   ├── app/                # Main GROWI application (Next.js + Express)
+│   ├── pdf-converter/      # PDF conversion microservice
+│   └── slackbot-proxy/     # Slack integration proxy
+├── packages/               # Shared libraries (@growi/core, @growi/ui, etc.)
+└── .claude/
+    ├── skills/             # Claude Code skills (auto-loaded)
+    ├── rules/              # Coding standards (always applied)
+    ├── agents/             # Specialized agents
+    └── commands/           # User-invocable commands (/tdd, /learn)
+```
+
+## Development Guidelines
+
+1. **Feature-Based Architecture**: Create new features in `features/{feature-name}/`
+2. **Server-Client Separation**: Keep server and client code separate
+3. **State Management**: Jotai for UI state, SWR for data fetching
+4. **Named Exports**: Prefer named exports (except Next.js pages)
+5. **Test Co-location**: Place test files next to source files
+6. **Type Safety**: Use strict TypeScript throughout
+7. **Changeset**: Use `npx changeset` for version management
+
+## Before Committing
+
+Always execute these checks:
+
+```bash
+# From workspace root (recommended)
+turbo run lint:typecheck --filter @growi/app
+turbo run lint:biome --filter @growi/app
+turbo run test --filter @growi/app
+turbo run build --filter @growi/app
+```
+
+Or from apps/app directory:
+
+```bash
+pnpm run lint:typecheck
+pnpm run lint:biome
+pnpm run test
+pnpm run build
+```
 
 
 ---
 ---
 
 
-When working with this codebase, always run the appropriate linting and testing commands before committing changes. The application uses strict TypeScript checking and comprehensive test coverage requirements.
+For detailed information, refer to:
+- **Rules**: `.claude/rules/` (coding standards)
+- **Skills**: `.claude/skills/` (technical knowledge)
+- **Package docs**: `apps/*/CLAUDE.md` (package-specific)

+ 41 - 1
CHANGELOG.md

@@ -1,9 +1,49 @@
 # Changelog
 # Changelog
 
 
-## [Unreleased](https://github.com/growilabs/compare/v7.4.2...HEAD)
+## [Unreleased](https://github.com/growilabs/compare/v7.4.4...HEAD)
 
 
 *Please do not manually update this file. We've automated the process.*
 *Please do not manually update this file. We've automated the process.*
 
 
+## [v7.4.4](https://github.com/growilabs/compare/v7.4.3...v7.4.4) - 2026-01-30
+
+### 🐛 Bug Fixes
+
+* fix: Search navigation (#10749) @[copilot-swe-agent[bot]](https://github.com/apps/copilot-swe-agent)
+* fix: User pages are displayed in page list when 'Disable user pages' is on (#10752) @miya
+* fix: Disable logo update button when no file is selected (#10587) @hikaru-n-cpu
+
+### 🧰 Maintenance
+
+* support: Setup Claude Code environment (#10746) @yuki-takei
+* support: Improve test parallelism (#10747) @yuki-takei
+* support: Typecheck by tsgo (#10717) @yuki-takei
+
+## [v7.4.3](https://github.com/growilabs/compare/v7.4.2...v7.4.3) - 2026-01-21
+
+### 💎 Features
+
+* feat: Disable user page (#10735) @miya
+* feat: New admin setting for hiding user pages (#10708) @arvid-e
+* feat: Block other user's user pages (#10725) @arvid-e
+
+### 🚀 Improvement
+
+* imprv: New sidebar tool icon appearance (#10672) @satof3
+* imprv: Admin Home (#10692) @yuki-takei
+
+### 🐛 Bug Fixes
+
+* fix: Vim keymap insert mode exiting after single keystroke  (#10714) @miya
+* fix: Cannot create `/Sidebar` page from custom sidebar (#10690) @miya
+* fix: PageTree does not auto-scroll to target page path on initial render (#10699) @miya
+
+### 🧰 Maintenance
+
+* support: Upgrade headless-tree (#10733) @miya
+* support: Integrate Lefthook for pre-commit Biome formatting (#10694) @[copilot-swe-agent[bot]](https://github.com/apps/copilot-swe-agent)
+* support: Stop pushing docker image to weseek repository (#10681) @miya
+* support: Migrate the rest of files to Biome from Eslint (#10683) @yuki-takei
+
 ## [v7.4.2](https://github.com/growilabs/compare/v7.4.1...v7.4.2) - 2026-01-08
 ## [v7.4.2](https://github.com/growilabs/compare/v7.4.1...v7.4.2) - 2026-01-08
 
 
 ### 🚀 Improvement
 ### 🚀 Improvement

+ 49 - 0
CLAUDE.md

@@ -1 +1,50 @@
 @AGENTS.md
 @AGENTS.md
+
+# AI-DLC and Spec-Driven Development
+
+Kiro-style Spec Driven Development implementation on AI-DLC (AI Development Life Cycle)
+
+## Project Context
+
+### Paths
+- Steering: `.kiro/steering/`
+- Specs: `.kiro/specs/`
+
+### Steering vs Specification
+
+**Steering** (`.kiro/steering/`) - Guide AI with project-wide rules and context
+**Specs** (`.kiro/specs/`) - Formalize development process for individual features
+
+### Active Specifications
+- Check `.kiro/specs/` for active specifications
+- Use `/kiro:spec-status [feature-name]` to check progress
+
+## Development Guidelines
+- Think in English, generate responses in English. All Markdown content written to project files (e.g., requirements.md, design.md, tasks.md, research.md, validation reports) MUST be written in the target language configured for this specification (see spec.json.language).
+- **Note**: `spec.json.language` controls the language of spec document content only. It does NOT control the conversation response language. The conversation language is governed by the Language Policy in AGENTS.md.
+
+## Minimal Workflow
+- Phase 0 (optional): `/kiro:steering`, `/kiro:steering-custom`
+- Phase 1 (Specification):
+  - `/kiro:spec-init "description"`
+  - `/kiro:spec-requirements {feature}`
+  - `/kiro:validate-gap {feature}` (optional: for existing codebase)
+  - `/kiro:spec-design {feature} [-y]`
+  - `/kiro:validate-design {feature}` (optional: design review)
+  - `/kiro:spec-tasks {feature} [-y]`
+- Phase 2 (Implementation): `/kiro:spec-impl {feature} [tasks]`
+  - `/kiro:validate-impl {feature}` (optional: after implementation)
+  - `/kiro:spec-cleanup {feature}` (optional: organize specs post-implementation)
+- Progress check: `/kiro:spec-status {feature}` (use anytime)
+
+## Development Rules
+- 3-phase approval workflow: Requirements → Design → Tasks → Implementation
+- Human review required each phase; use `-y` only for intentional fast-track
+- Keep steering current and verify alignment with `/kiro:spec-status`
+- Follow the user's instructions precisely, and within that scope act autonomously: gather the necessary context and complete the requested work end-to-end in this run, asking questions only when essential information is missing or the instructions are critically ambiguous.
+
+## Steering Configuration
+- Load entire `.kiro/steering/` as project memory
+- Default files: `product.md`, `tech.md`, `structure.md`
+- Custom files are supported (managed via `/kiro:steering-custom`)
+

+ 105 - 0
apps/app/.claude/skills/app-architecture/SKILL.md

@@ -0,0 +1,105 @@
+---
+name: app-architecture
+description: GROWI main application (apps/app) architecture, directory structure, and design patterns. Auto-invoked when working in apps/app.
+user-invocable: false
+---
+
+# App Architecture (apps/app)
+
+The main GROWI application is a **full-stack Next.js application** with Express.js backend and MongoDB database.
+
+For technology stack details, see the global `tech-stack` skill.
+
+## Directory Structure
+
+```
+apps/app/src/
+├── pages/                 # Next.js Pages Router (*.page.tsx)
+├── features/             # Feature modules (recommended for new code)
+│   └── {feature-name}/
+│       ├── index.ts      # Public exports
+│       ├── interfaces/   # TypeScript types
+│       ├── server/       # models/, routes/, services/
+│       └── client/       # components/, states/, hooks/
+├── server/               # Express server (legacy)
+│   ├── models/           # Mongoose models
+│   ├── routes/apiv3/     # RESTful API v3
+│   └── services/         # Business logic
+├── components/           # React components (legacy)
+├── states/               # Jotai atoms
+└── stores-universal/     # SWR hooks
+```
+
+## Feature-Based Architecture
+
+Organize code by **business feature** rather than by technical layer:
+
+```
+❌ Layer-based (old):          ✅ Feature-based (new):
+├── models/User.ts             ├── features/user/
+├── routes/user.ts             │   ├── server/models/User.ts
+├── components/UserList.tsx    │   ├── server/routes/user.ts
+                               │   └── client/components/UserList.tsx
+```
+
+### Creating a New Feature
+
+1. Create `features/{feature-name}/`
+2. Define interfaces in `interfaces/`
+3. Implement server logic in `server/` (models, routes, services)
+4. Implement client logic in `client/` (components, hooks, states)
+5. Export public API through `index.ts`
+
+## Entry Points
+
+- **Server**: `server/app.ts` - Express + Next.js initialization
+- **Client**: `pages/_app.page.tsx` - Jotai + SWR providers
+- **Wiki Pages**: `pages/[[...path]]/index.page.tsx` - Catch-all route (SSR)
+
+## API Design (RESTful API v3)
+
+Routes in `server/routes/apiv3/` with OpenAPI specs:
+
+```typescript
+/**
+ * @openapi
+ * /api/v3/pages/{id}:
+ *   get:
+ *     summary: Get page by ID
+ */
+router.get('/pages/:id', async (req, res) => {
+  const page = await PageService.findById(req.params.id);
+  res.json(page);
+});
+```
+
+## State Management
+
+- **Jotai**: UI state (modals, forms) in `states/`
+- **SWR**: Server data (pages, users) in `stores-universal/`
+
+For detailed patterns, see `app-specific-patterns` skill.
+
+## Design Principles
+
+1. **Feature Isolation**: New features self-contained in `features/`
+2. **Server-Client Separation**: Prevent server code bundled into client
+3. **API-First**: Define OpenAPI specs before implementation
+4. **Type-Driven**: Define interfaces before implementation
+5. **Progressive Migration**: Gradually move legacy code to `features/`
+
+## Legacy Migration
+
+Legacy directories (`components/`, `server/models/`, `client/`) should be gradually migrated to `features/`:
+
+- New features → `features/`
+- Bug fixes → Can stay in legacy
+- Refactoring → Move to `features/`
+
+## Summary
+
+1. **New features**: `features/{feature-name}/` structure
+2. **Server-client separation**: Keep separate
+3. **API-first**: OpenAPI specs for API v3
+4. **State**: Jotai (UI) + SWR (server data)
+5. **Progressive migration**: No rush for stable legacy code

+ 202 - 0
apps/app/.claude/skills/app-commands/SKILL.md

@@ -0,0 +1,202 @@
+---
+name: app-commands
+description: GROWI main application (apps/app) specific commands and scripts. Auto-invoked when working in apps/app.
+user-invocable: false
+---
+
+# App Commands (apps/app)
+
+Commands specific to the main GROWI application. For global commands (turbo, pnpm), see the global `tech-stack` skill.
+
+## Quality Check Commands
+
+**IMPORTANT**: Distinguish between Turborepo tasks and package-specific scripts.
+
+### Turbo Tasks vs Package Scripts
+
+| Task | Turborepo (turbo.json) | Package Script (package.json) |
+|------|------------------------|-------------------------------|
+| `lint` | ✅ Yes | ✅ Yes (runs all lint:\*) |
+| `test` | ✅ Yes | ✅ Yes |
+| `build` | ✅ Yes | ✅ Yes |
+| `lint:typecheck` | ❌ No | ✅ Yes |
+| `lint:biome` | ❌ No | ✅ Yes |
+| `lint:styles` | ❌ No | ✅ Yes |
+
+### Recommended Commands
+
+```bash
+# Run ALL quality checks (uses Turborepo caching)
+turbo run lint --filter @growi/app
+turbo run test --filter @growi/app
+turbo run build --filter @growi/app
+
+# Run INDIVIDUAL lint checks (package-specific scripts, from apps/app directory)
+pnpm run lint:typecheck   # TypeScript only
+pnpm run lint:biome       # Biome only
+pnpm run lint:styles      # Stylelint only
+```
+
+> **Running individual test files**: See the `testing` rule (`.claude/rules/testing.md`).
+
+### Common Mistake
+
+```bash
+# ❌ WRONG: lint:typecheck is NOT a Turborepo task
+turbo run lint:typecheck --filter @growi/app
+# Error: could not find task `lint:typecheck` in project
+
+# ✅ CORRECT: Use pnpm for package-specific scripts
+pnpm --filter @growi/app run lint:typecheck
+```
+
+## Quick Reference
+
+| Task | Command |
+|------|---------|
+| **Migration** | `pnpm run dev:migrate` |
+| **OpenAPI generate** | `pnpm run openapi:generate-spec:apiv3` |
+| **REPL console** | `pnpm run console` |
+| **Visual regression** | `pnpm run reg:run` |
+| **Version bump** | `pnpm run version:patch` |
+
+## Database Migration
+
+```bash
+# Run pending migrations
+pnpm run dev:migrate
+
+# Check migration status
+pnpm run dev:migrate:status
+
+# Apply migrations
+pnpm run dev:migrate:up
+
+# Rollback last migration
+pnpm run dev:migrate:down
+
+# Production migration
+pnpm run migrate
+```
+
+**Note**: Migrations use `migrate-mongo`. Files are in `config/migrate-mongo/`.
+
+### Creating a New Migration
+
+```bash
+# Create migration file manually in config/migrate-mongo/
+# Format: YYYYMMDDHHMMSS-migration-name.js
+
+# Test migration cycle
+pnpm run dev:migrate:up
+pnpm run dev:migrate:down
+pnpm run dev:migrate:up
+```
+
+## OpenAPI Commands
+
+```bash
+# Generate OpenAPI spec for API v3
+pnpm run openapi:generate-spec:apiv3
+
+# Validate API v3 spec
+pnpm run lint:openapi:apiv3
+
+# Generate operation IDs
+pnpm run openapi:build:generate-operation-ids
+```
+
+Generated specs output to `tmp/openapi-spec-apiv3.json`.
+
+## Style Pre-build (Vite)
+
+```bash
+# Development mode
+pnpm run dev:pre:styles
+
+# Production mode
+pnpm run pre:styles
+```
+
+Pre-builds SCSS styles into CSS bundles using Vite.
+
+## Debug & Utility
+
+### REPL Console
+
+```bash
+pnpm run console
+# or
+pnpm run repl
+```
+
+Interactive Node.js REPL with Mongoose models loaded. Useful for debugging database queries.
+
+### Visual Regression Testing
+
+```bash
+pnpm run reg:run
+```
+
+## Version Commands
+
+```bash
+# Bump patch version (e.g., 7.4.3 → 7.4.4)
+pnpm run version:patch
+
+# Create prerelease (e.g., 7.4.4 → 7.4.5-RC.0)
+pnpm run version:prerelease
+
+# Create preminor (e.g., 7.4.4 → 7.5.0-RC.0)
+pnpm run version:preminor
+```
+
+## Production
+
+```bash
+# Start server (after build)
+pnpm run server
+
+# Start for CI environments
+pnpm run server:ci
+```
+
+**Note**: `preserver` hook automatically runs migrations before starting.
+
+## CI/CD
+
+```bash
+# Launch dev server for CI
+pnpm run launch-dev:ci
+
+# Start production server for CI
+pnpm run server:ci
+```
+
+## Environment Variables
+
+Development uses `dotenv-flow`:
+
+- `.env` - Default values
+- `.env.local` - Local overrides (not committed)
+- `.env.development` - Development-specific
+- `.env.production` - Production-specific
+
+See `.env.example` for available variables.
+
+## Troubleshooting
+
+### Migration Issues
+
+```bash
+pnpm run dev:migrate:status   # Check status
+pnpm run dev:migrate:down     # Rollback
+pnpm run dev:migrate:up       # Re-apply
+```
+
+### Build Issues
+
+```bash
+pnpm run clean                # Clear artifacts
+pnpm run build                # Rebuild
+```

+ 173 - 0
apps/app/.claude/skills/app-specific-patterns/SKILL.md

@@ -0,0 +1,173 @@
+---
+name: app-specific-patterns
+description: GROWI main application (apps/app) specific patterns for Next.js, Jotai, SWR, and testing. Auto-invoked when working in apps/app.
+user-invocable: false
+---
+
+# App Specific Patterns (apps/app)
+
+For general testing patterns, see the global `.claude/skills/learned/essential-test-patterns` and `.claude/skills/learned/essential-test-design` skills.
+
+## Next.js Pages Router
+
+### File Naming
+
+Pages must use `.page.tsx` suffix:
+
+```
+pages/
+├── _app.page.tsx           # App wrapper
+├── [[...path]]/index.page.tsx  # Catch-all wiki pages
+└── admin/index.page.tsx    # Admin pages
+```
+
+### getLayout Pattern
+
+```typescript
+// pages/admin/index.page.tsx
+import type { NextPageWithLayout } from '~/interfaces/next-page';
+
+const AdminPage: NextPageWithLayout = () => <AdminDashboard />;
+
+AdminPage.getLayout = (page) => <AdminLayout>{page}</AdminLayout>;
+
+export default AdminPage;
+```
+
+## Jotai State Management
+
+### Directory Structure
+
+```
+src/states/
+├── ui/
+│   ├── sidebar/              # Multi-file feature
+│   ├── device.ts             # Single-file feature
+│   └── modal/                # 1 modal = 1 file
+│       ├── page-create.ts
+│       └── page-delete.ts
+├── page/                     # Page data state
+├── server-configurations/
+└── context.ts
+
+features/{name}/client/states/  # Feature-scoped atoms
+```
+
+### Placement Rules
+
+| Category | Location |
+|----------|----------|
+| UI state | `states/ui/` |
+| Modal state | `states/ui/modal/` (1 file per modal) |
+| Page data | `states/page/` |
+| Feature-specific | `features/{name}/client/states/` |
+
+### Derived Atoms
+
+```typescript
+import { atom } from 'jotai';
+
+export const currentPageAtom = atom<Page | null>(null);
+
+// Derived (read-only)
+export const currentPagePathAtom = atom((get) => {
+  return get(currentPageAtom)?.path ?? null;
+});
+```
+
+## SWR Data Fetching
+
+### Directory
+
+```
+src/stores-universal/
+├── pages.ts       # Page hooks
+├── users.ts       # User hooks
+└── admin/settings.ts
+```
+
+### Patterns
+
+```typescript
+import useSWR from 'swr';
+import useSWRImmutable from 'swr/immutable';
+
+// Auto-revalidation
+export const usePageList = () => useSWR<Page[]>('/api/v3/pages', fetcher);
+
+// No auto-revalidation (static data)
+export const usePageById = (id: string | null) =>
+  useSWRImmutable<Page>(id ? `/api/v3/pages/${id}` : null, fetcher);
+```
+
+## Testing (apps/app Specific)
+
+### Mocking Next.js Router
+
+```typescript
+import { mockDeep } from 'vitest-mock-extended';
+import type { NextRouter } from 'next/router';
+
+const createMockRouter = (overrides = {}) => {
+  const mock = mockDeep<NextRouter>();
+  mock.pathname = '/test';
+  mock.push.mockResolvedValue(true);
+  return Object.assign(mock, overrides);
+};
+
+vi.mock('next/router', () => ({
+  useRouter: () => createMockRouter(),
+}));
+```
+
+### Testing with Jotai
+
+```typescript
+import { Provider } from 'jotai';
+import { useHydrateAtoms } from 'jotai/utils';
+
+const HydrateAtoms = ({ initialValues, children }) => {
+  useHydrateAtoms(initialValues);
+  return children;
+};
+
+const renderWithJotai = (ui, initialValues = []) => render(
+  <Provider>
+    <HydrateAtoms initialValues={initialValues}>{ui}</HydrateAtoms>
+  </Provider>
+);
+
+// Usage
+renderWithJotai(<PageHeader />, [[currentPageAtom, mockPage]]);
+```
+
+### Testing SWR
+
+```typescript
+import { SWRConfig } from 'swr';
+
+const wrapper = ({ children }) => (
+  <SWRConfig value={{ dedupingInterval: 0, provider: () => new Map() }}>
+    {children}
+  </SWRConfig>
+);
+
+const { result } = renderHook(() => usePageById('123'), { wrapper });
+```
+
+## Path Aliases
+
+Always use `~/` for imports:
+
+```typescript
+import { PageService } from '~/server/services/PageService';
+import { currentPageAtom } from '~/states/page/page-atoms';
+```
+
+## Summary
+
+1. **Next.js**: `.page.tsx` suffix, `getLayout` for layouts
+2. **Jotai**: `states/` global, `features/*/client/states/` feature-scoped
+3. **SWR**: `stores-universal/`, null key for conditional fetch
+4. **Testing**: Mock router, hydrate Jotai, wrap SWR config
+5. **Imports**: Always `~/` path alias

+ 302 - 0
apps/app/.claude/skills/learned/page-save-origin-semantics/SKILL.md

@@ -0,0 +1,302 @@
+---
+name: page-save-origin-semantics
+description: Auto-invoked when modifying origin-based conflict detection, revision validation logic, or isUpdatable() method. Explains the two-stage origin check mechanism for conflict detection and its separation from diff detection.
+---
+
+# Page Save Origin Semantics
+
+## Problem
+
+When modifying page save logic, it's easy to accidentally break the carefully designed origin-based conflict detection system. The system uses a two-stage check mechanism (frontend + backend) to determine when revision validation should be enforced vs. bypassed for collaborative editing (Yjs).
+
+**Key Insight**: **Conflict detection (revision check)** and **diff detection (hasDiffToPrev)** serve different purposes and require separate logic.
+
+## Solution
+
+Understanding the two-stage origin check mechanism:
+
+### Stage 1: Frontend Determines revisionId Requirement
+
+```typescript
+// apps/app/src/client/components/PageEditor/PageEditor.tsx:158
+const isRevisionIdRequiredForPageUpdate = currentPage?.revision?.origin === undefined;
+
+// lines 308-310
+const revisionId = isRevisionIdRequiredForPageUpdate
+  ? currentRevisionId
+  : undefined;
+```
+
+**Logic**: Check the **latest revision's origin** on the page:
+- If `origin === undefined` (legacy/API save) → Send `revisionId`
+- If `origin === "editor"` or `"view"` → Do NOT send `revisionId`
+
+### Stage 2: Backend Determines Conflict Check Behavior
+
+```javascript
+// apps/app/src/server/models/obsolete-page.js:167-172
+const ignoreLatestRevision =
+  origin === Origin.Editor &&
+  (latestRevisionOrigin === Origin.Editor || latestRevisionOrigin === Origin.View);
+
+if (ignoreLatestRevision) {
+  return true;  // Bypass revision check
+}
+
+// Otherwise, enforce strict revision matching
+if (revision != previousRevision) {
+  return false;  // Reject save
+}
+return true;
+```
+
+**Logic**: Check **current request's origin** AND **latest revision's origin**:
+- If `origin === "editor"` AND latest is `"editor"` or `"view"` → Bypass revision check
+- Otherwise → Enforce strict revision ID matching
+
+## Origin Values
+
+Three types of page update methods (called "origin"):
+
+- **`Origin.Editor = "editor"`** - Save from editor mode (collaborative editing via Yjs)
+- **`Origin.View = "view"`** - Save from view mode
+  - Examples: HandsontableModal, DrawioModal editing
+- **`undefined`** - API-based saves or legacy pages
+
+## Origin Strength (強弱)
+
+**Basic Rule**: Page updates require the previous revision ID in the request. If the latest revision doesn't match, the server rejects the request.
+
+**Exception - Editor origin is stronger than View origin**:
+- **UX Goal**: Avoid `Posted param "revisionId" is outdated` errors when multiple members are using the Editor and View changes interrupt them
+- **Special Case**: When the latest revision's origin is View, Editor origin requests can update WITHOUT requiring revision ID
+
+### Origin Strength Matrix
+
+|        | Latest Revision: Editor | Latest Revision: View | Latest Revision: API |
+| ------ | ----------------------- | --------------------- | -------------------- |
+| **Request: Editor** | ⭕️ Bypass revision check | ⭕️ Bypass revision check | ❌ Strict check |
+| **Request: View**   | ❌ Strict check | ❌ Strict check | ❌ Strict check |
+| **Request: API**    | ❌ Strict check | ❌ Strict check | ❌ Strict check |
+
+**Reading the table**:
+- ⭕️ = Revision check bypassed (revisionId not required)
+- ❌ = Strict revision check required (revisionId must match)
+
+## Behavior by Scenario
+
+| Latest Revision Origin | Request Origin | revisionId Sent? | Revision Check | Use Case |
+|------------------------|----------------|------------------|----------------|----------|
+| `editor` or `view` | `editor` | ❌ No | ✅ Bypassed | Normal Editor use (most common) |
+| `undefined` | `editor` | ✅ Yes | ✅ Enforced | Legacy page in Editor |
+| `undefined` | `undefined` (API) | ✅ Yes (required) | ✅ Enforced | API save |
+
+## Example: Server-Side Logic Respecting Origin Semantics
+
+When adding server-side functionality that needs previous revision data:
+
+```typescript
+// ✅ CORRECT: Separate concerns - conflict detection vs. diff detection
+let previousRevision: IRevisionHasId | null = null;
+
+// Priority 1: Use provided revisionId (for conflict detection)
+if (sanitizeRevisionId != null) {
+  previousRevision = await Revision.findById(sanitizeRevisionId);
+}
+
+// Priority 2: Fallback to currentPage.revision (for other purposes like diff detection)
+if (previousRevision == null && currentPage.revision != null) {
+  previousRevision = await Revision.findById(currentPage.revision);
+}
+
+const previousBody = previousRevision?.body ?? null;
+
+// Continue with existing conflict detection logic (unchanged)
+if (currentPage != null && !(await currentPage.isUpdatable(sanitizeRevisionId, origin))) {
+  // ... return conflict error
+}
+
+// Use previousBody for diff detection or other purposes
+updatedPage = await crowi.pageService.updatePage(
+  currentPage,
+  body,
+  previousBody,  // ← Available regardless of conflict detection logic
+  req.user,
+  options,
+);
+```
+
+```typescript
+// ❌ WRONG: Forcing frontend to always send revisionId
+const revisionId = currentRevisionId;  // Always send, regardless of origin
+// This breaks Yjs collaborative editing semantics!
+```
+
+```typescript
+// ❌ WRONG: Changing backend conflict detection logic
+// Don't modify isUpdatable() unless you fully understand the implications
+// for collaborative editing
+```
+
+## When to Apply
+
+**Always consider this pattern when**:
+- Modifying page save/update API handlers
+- Adding functionality that needs previous revision data
+- Working on conflict detection or revision validation logic
+- Implementing features that interact with page history
+- Debugging save operation issues
+
+**Key Principles**:
+1. **Do NOT modify frontend revisionId logic** unless explicitly required for conflict detection
+2. **Do NOT modify isUpdatable() logic** unless fixing conflict detection bugs
+3. **Separate concerns**: Conflict detection ≠ Other revision-based features (diff detection, history, etc.)
+4. **Server-side fallback**: If you need previous revision data when revisionId is not provided, fetch from `currentPage.revision`
+
+## Detailed Scenario Analysis
+
+### Scenario A: Normal Editor Mode (Most Common Case)
+
+**Latest revision has `origin=editor`**:
+
+1. **Frontend Logic**:
+   - `isRevisionIdRequiredForPageUpdate = false` (latest revision origin is not undefined)
+   - Does NOT send `revisionId` in request
+   - Sends `origin: Origin.Editor`
+
+2. **API Layer**:
+   ```typescript
+   previousRevision = await Revision.findById(undefined);  // → null
+   ```
+   Result: No previousRevision fetched via revisionId
+
+3. **Backend Conflict Check** (`isUpdatable`):
+   ```javascript
+   ignoreLatestRevision =
+     (Origin.Editor === Origin.Editor) &&
+     (latestRevisionOrigin === Origin.Editor || latestRevisionOrigin === Origin.View)
+   // → true (latest revision is editor)
+   return true;  // Bypass revision check
+   ```
+   Result: ✅ Save succeeds without revision validation
+
+4. **Impact on Other Features**:
+   - If you need previousRevision data (e.g., for diff detection), it won't be available unless you implement server-side fallback
+   - This is where `currentPage.revision` fallback becomes necessary
+
+### Scenario B: Legacy Page in Editor Mode
+
+**Latest revision has `origin=undefined`**:
+
+1. **Frontend Logic**:
+   - `isRevisionIdRequiredForPageUpdate = true` (latest revision origin is undefined)
+   - Sends `revisionId` in request
+   - Sends `origin: Origin.Editor`
+
+2. **API Layer**:
+   ```typescript
+   previousRevision = await Revision.findById(sanitizeRevisionId);  // → revision object
+   ```
+   Result: previousRevision fetched successfully
+
+3. **Backend Conflict Check** (`isUpdatable`):
+   ```javascript
+   ignoreLatestRevision =
+     (Origin.Editor === Origin.Editor) &&
+     (latestRevisionOrigin === undefined)
+   // → false (latest revision is undefined, not editor/view)
+
+   // Strict revision check
+   if (revision != sanitizeRevisionId) {
+     return false;  // Reject if mismatch
+   }
+   return true;
+   ```
+   Result: ✅ Save succeeds only if revisionId matches
+
+4. **Impact on Other Features**:
+   - previousRevision data is available
+   - All revision-based features work correctly
+
+### Scenario C: API-Based Save
+
+**Request has `origin=undefined` or omitted**:
+
+1. **Frontend**: Not applicable (API client)
+
+2. **API Layer**:
+   - API client MUST send `revisionId` in request
+   - `previousRevision = await Revision.findById(sanitizeRevisionId)`
+
+3. **Backend Conflict Check** (`isUpdatable`):
+   ```javascript
+   ignoreLatestRevision =
+     (undefined === Origin.Editor) && ...
+   // → false
+
+   // Strict revision check
+   if (revision != sanitizeRevisionId) {
+     return false;
+   }
+   return true;
+   ```
+   Result: Strict validation enforced
+
+## Root Cause: Why This Separation Matters
+
+**Historical Context**: At some point, the frontend stopped sending `previousRevision` (revisionId) for certain scenarios to support Yjs collaborative editing. This broke features that relied on previousRevision data being available.
+
+**The Core Issue**:
+- **Conflict detection** needs to know "Is this save conflicting with another user's changes?" (Answered by revision check)
+- **Diff detection** needs to know "Did the content actually change?" (Answered by comparing body)
+- **Current implementation conflates these**: When conflict detection is bypassed, previousRevision is not fetched, breaking diff detection
+
+**The Solution Pattern**:
+```typescript
+// Separate the two concerns:
+
+// 1. Fetch previousRevision for data purposes (diff detection, history, etc.)
+let previousRevision: IRevisionHasId | null = null;
+if (sanitizeRevisionId != null) {
+  previousRevision = await Revision.findById(sanitizeRevisionId);
+} else if (currentPage.revision != null) {
+  previousRevision = await Revision.findById(currentPage.revision);  // Fallback
+}
+
+// 2. Use previousRevision data for your feature
+const previousBody = previousRevision?.body ?? null;
+
+// 3. Conflict detection happens independently via isUpdatable()
+if (currentPage != null && !(await currentPage.isUpdatable(sanitizeRevisionId, origin))) {
+  // Return conflict error
+}
+```
+
+## Reference
+
+**Official Documentation**:
+- https://dev.growi.org/651a6f4a008fee2f99187431#origin-%E3%81%AE%E5%BC%B7%E5%BC%B1
+
+**Related Files**:
+- Frontend: `apps/app/src/client/components/PageEditor/PageEditor.tsx` (lines 158, 240, 308-310)
+- Backend: `apps/app/src/server/models/obsolete-page.js` (lines 159-182, isUpdatable method)
+- API: `apps/app/src/server/routes/apiv3/page/update-page.ts` (lines 260-282, conflict check)
+- Interface: `packages/core/src/interfaces/revision.ts` (lines 6-11, Origin definition)
+
+## Common Pitfalls
+
+1. **Assuming revisionId is always available**: It's not! Editor mode with recent editor/view saves omits it by design.
+2. **Conflating conflict detection with other features**: They serve different purposes and need separate logic.
+3. **Breaking Yjs collaborative editing**: Forcing revisionId to always be sent breaks the bypass mechanism.
+4. **Ignoring origin values**: The system behavior changes significantly based on origin combinations.
+
+## Lessons Learned
+
+This pattern was identified during the "improve-unchanged-revision" feature implementation, where the initial assumption was that frontend should always send `revisionId` for diff detection. Deep analysis revealed:
+
+- The frontend logic is correct for conflict detection and should NOT be changed
+- Server-side fallback is the correct approach to get previous revision data
+- Two-stage checking is intentional and critical for Yjs collaborative editing
+- Conflict detection and diff detection must be separated
+
+**Key Takeaway**: Always understand the existing architectural patterns before proposing changes. What appears to be a "fix" might actually break carefully designed functionality.

+ 151 - 74
apps/app/AGENTS.md

@@ -1,84 +1,161 @@
-# GROWI Main Application Development Guide
-
-## Overview
-
-This guide provides comprehensive documentation for AI coding agents working on the GROWI main application (`/apps/app/`). GROWI is a team collaboration wiki platform built with Next.js, Express, and MongoDB.
-
-## Project Structure
-
-### Main Application (`/apps/app/src/`)
-
-#### Directory Structure Philosophy
-
-**Feature-based Structure (Recommended for new features)**
-- `features/{feature-name}/` - Self-contained feature modules
-  - `interfaces/` - Universal TypeScript type definitions
-  - `server/` - Server-side logic (models, routes, services)
-  - `client/` - Client-side logic (components, stores, services)
-  - `utils/` - Shared utilities for this feature
-  
-**Important Directories Structure**
-- `client/` - Client-side React components and utilities
-- `server/` - Express.js backend
-- `components/` - Universal React components
-- `pages/` - Next.js Pages Router
-- `states/` - Jotai state management
-- `stores/` - SWR-based state stores
-- `stores-universal/` - Universal SWR-based state stores
-- `styles/` - SCSS stylesheets with modular architecture
-- `migrations/` - MongoDB database migration scripts
-- `interfaces/` - Universal TypeScript type definitions
-- `models/` - Universal Data model definitions
-
-### Key Technical Details
-
-**Frontend Stack**
-- **Framework**: Next.js (Pages Router) with React
-- **Language**: TypeScript (strict mode enabled)
-- **Styling**: SCSS with CSS Modules by Bootstrap 5
-- **State Management**:
-  - **Jotai** (Primary, Recommended): Atomic state management for UI and application state
-  - **SWR**: Data fetching, caching, and revalidation
-  - **Unstated**: Legacy (being phased out, replaced by Jotai)
-- **Testing**: 
-  - Vitest for unit tests (`*.spec.ts`, `*.spec.tsx`)
-  - Jest for integration tests (`*.integ.ts`)
-  - React Testing Library for component testing
-  - Playwright for E2E testing
-- **i18n**: next-i18next for internationalization
-
-**Backend Stack**
-- **Runtime**: Node.js
-- **Framework**: Express.js with TypeScript
-- **Database**: MongoDB with Mongoose ODM
-- **Migration System**: migrate-mongo
-- **Authentication**: Passport.js with multiple strategies (local, LDAP, OAuth, SAML)
-- **Real-time**: Socket.io for collaborative editing and notifications
-- **Search**: Elasticsearch integration (optional)
-- **Observability**: OpenTelemetry integration
-
-**Common Commands**
+# GROWI Main Application (apps/app)
+
+The main GROWI wiki application - a full-stack Next.js application with Express.js backend and MongoDB database.
+
+## Technology Stack
+
+| Layer | Technology |
+|-------|------------|
+| **Frontend** | Next.js 14 (Pages Router), React 18 |
+| **Backend** | Express.js with custom server |
+| **Database** | MongoDB with Mongoose ^6.13.6 |
+| **State** | Jotai (UI state) + SWR (server state) |
+| **API** | RESTful API v3 with OpenAPI specs |
+| **Testing** | Vitest, React Testing Library |
+
+## Quick Reference
+
+### Essential Commands
+
 ```bash
 ```bash
-# Type checking only
-cd apps/app && pnpm run lint:typecheck
+# Development
+pnpm run dev                    # Start dev server (or turbo run dev from root)
+pnpm run dev:migrate            # Run database migrations
+
+# Quality Checks
+pnpm run lint:typecheck         # TypeScript type check
+pnpm run lint:biome             # Biome linter
+pnpm run test                   # Run tests
+
+# Build
+pnpm run build                  # Build for production
+
+# Run Specific Tests
+pnpm vitest run yjs.integ       # Use partial file name
+pnpm vitest run helper.spec     # Vitest auto-finds matching files
+```
+
+### Key Directories
+
+```
+src/
+├── pages/                 # Next.js Pages Router (*.page.tsx)
+├── features/             # Feature modules (recommended for new code)
+│   └── {feature-name}/
+│       ├── server/       # Server-side (models, routes, services)
+│       └── client/       # Client-side (components, hooks, states)
+├── server/               # Express server (legacy structure)
+│   ├── models/           # Mongoose models
+│   ├── routes/apiv3/     # API v3 routes
+│   └── services/         # Business logic
+├── components/           # React components (legacy)
+├── states/               # Jotai atoms
+└── stores-universal/     # SWR hooks
+```
+
+## Development Guidelines
+
+### 1. Feature-Based Architecture (New Code)
+
+Create new features in `features/{feature-name}/`:
+
+```
+features/my-feature/
+├── index.ts              # Public exports
+├── interfaces/           # TypeScript types
+├── server/
+│   ├── models/           # Mongoose models
+│   ├── routes/           # Express routes
+│   └── services/         # Business logic
+└── client/
+    ├── components/       # React components
+    ├── hooks/            # Custom hooks
+    └── states/           # Jotai atoms
+```
 
 
-# Run specific test file
-turbo run test:vitest @apps/app -- src/path/to/test.spec.tsx
+### 2. State Management
 
 
-# Check migration status
-cd apps/app && pnpm run dev:migrate:status
+- **Jotai**: UI state (modals, forms, selections)
+- **SWR**: Server data (pages, users, API responses)
 
 
-# Start REPL with app context
-cd apps/app && pnpm run repl
+```typescript
+// Jotai for UI state
+import { atom } from 'jotai';
+export const isModalOpenAtom = atom(false);
+
+// SWR for server data
+import useSWR from 'swr';
+export const usePageById = (id: string) => useSWR(`/api/v3/pages/${id}`);
+```
+
+### 3. Next.js Pages
+
+Use `.page.tsx` suffix and `getLayout` pattern:
+
+```typescript
+// pages/admin/index.page.tsx
+import type { NextPageWithLayout } from '~/interfaces/next-page';
+
+const AdminPage: NextPageWithLayout = () => <AdminDashboard />;
+AdminPage.getLayout = (page) => <AdminLayout>{page}</AdminLayout>;
+export default AdminPage;
+```
+
+### 4. API Routes (Express)
+
+Add routes to `server/routes/apiv3/` with OpenAPI docs:
+
+```typescript
+/**
+ * @openapi
+ * /api/v3/pages/{id}:
+ *   get:
+ *     summary: Get page by ID
+ */
+router.get('/pages/:id', async (req, res) => {
+  const page = await PageService.findById(req.params.id);
+  res.json(page);
+});
+```
+
+### 5. Path Aliases
+
+Use `~/` for absolute imports:
+
+```typescript
+import { PageService } from '~/server/services/PageService';
+import { Button } from '~/components/Button';
+```
+
+## Before Committing
+
+```bash
+pnpm run lint:typecheck   # 1. Type check
+pnpm run lint:biome       # 2. Lint
+pnpm run test             # 3. Run tests
+pnpm run build            # 4. Verify build (optional)
 ```
 ```
 
 
-### Important Technical Specifications
+## Key Features
+
+| Feature | Directory | Description |
+|---------|-----------|-------------|
+| Page Tree | `features/page-tree/` | Hierarchical page navigation |
+| OpenAI | `features/openai/` | AI assistant integration |
+| Search | `features/search/` | Elasticsearch full-text search |
+| Plugins | `features/growi-plugin/` | Plugin system |
+| OpenTelemetry | `features/opentelemetry/` | Monitoring/telemetry |
+
+## Skills (Auto-Loaded)
+
+When working in this directory, these skills are automatically loaded:
+
+- **app-architecture** - Directory structure, feature-based patterns
+- **app-commands** - apps/app specific commands (migrations, OpenAPI, etc.)
+- **app-specific-patterns** - Jotai/SWR/Next.js patterns, testing
 
 
-**Entry Points**
-- **Server**: `server/app.ts` - Handles OpenTelemetry initialization and Crowi server startup
-- **Client**: `pages/_app.page.tsx` - Root Next.js application component
-  - `pages/[[...path]]/` - Dynamic catch-all page routes
+Plus all global skills (monorepo-overview, tech-stack).
 
 
 ---
 ---
 
 
-*This guide was compiled from project memory files to assist AI coding agents in understanding the GROWI application architecture and development practices.*
+For detailed patterns and examples, refer to the Skills in `.claude/skills/`.

+ 18 - 0
apps/app/CLAUDE.md

@@ -1 +1,19 @@
 @AGENTS.md
 @AGENTS.md
+
+# apps/app Specific Knowledge
+
+## Critical Architectural Patterns
+
+### Page Save Origin Semantics
+
+**IMPORTANT**: When working on page save, update, or revision operations, always consult the **page-save-origin-semantics** skill for understanding the two-stage origin check mechanism.
+
+**Key Concept**: Origin-based conflict detection uses a two-stage check (frontend + backend) to determine when revision validation should be enforced vs. bypassed for Yjs collaborative editing.
+
+**Critical Rule**: **Conflict detection (revision check)** and **other revision-based features (diff detection, history, etc.)** serve different purposes and require separate logic. Do NOT conflate them.
+
+**Documentation**:
+- Skill (auto-invoked): `.claude/skills/learned/page-save-origin-semantics/SKILL.md`
+
+**Common Pitfall**: Assuming `revisionId` is always available or forcing frontend to always send it will break Yjs collaborative editing.
+

+ 4 - 0
apps/app/config/next-i18next.config.d.ts

@@ -0,0 +1,4 @@
+import type { UserConfig } from 'next-i18next';
+
+declare const config: UserConfig;
+export = config;

+ 1 - 1
apps/app/docker/README.md

@@ -10,7 +10,7 @@ GROWI Official docker image
 Supported tags and respective Dockerfile links
 Supported tags and respective Dockerfile links
 ------------------------------------------------
 ------------------------------------------------
 
 
-* [`7.4.2`, `7.4`, `7`, `latest` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.4.2/apps/app/docker/Dockerfile)
+* [`7.4.4`, `7.4`, `7`, `latest` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.4.4/apps/app/docker/Dockerfile)
 * [`7.3.0`, `7.3` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.3.0/apps/app/docker/Dockerfile)
 * [`7.3.0`, `7.3` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.3.0/apps/app/docker/Dockerfile)
 * [`7.2.0`, `7.2` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.2.0/apps/app/docker/Dockerfile)
 * [`7.2.0`, `7.2` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.2.0/apps/app/docker/Dockerfile)
 
 

+ 0 - 86
apps/app/jest.config.js

@@ -1,86 +0,0 @@
-// For a detailed explanation regarding each configuration property, visit:
-// https://jestjs.io/docs/en/configuration.html
-
-const MODULE_NAME_MAPPING = {
-  '^\\^/(.+)$': '<rootDir>/$1',
-  '^~/(.+)$': '<rootDir>/src/$1',
-};
-
-module.exports = {
-  // Indicates whether each individual test should be reported during the run
-  verbose: true,
-
-  moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx'],
-
-  projects: [
-    {
-      displayName: 'server',
-
-      transform: {
-        '^.+\\.(t|j)sx?$': '@swc-node/jest',
-      },
-
-      rootDir: '.',
-      roots: ['<rootDir>'],
-      testMatch: [
-        '<rootDir>/test/integration/**/*.test.ts',
-        '<rootDir>/test/integration/**/*.test.js',
-      ],
-      // https://regex101.com/r/jTaxYS/1
-      modulePathIgnorePatterns: [
-        '<rootDir>/test/integration/*.*/v5(..*)*.[t|j]s',
-      ],
-      testEnvironment: 'node',
-      globalSetup: '<rootDir>/test/integration/global-setup.js',
-      globalTeardown: '<rootDir>/test/integration/global-teardown.js',
-      setupFilesAfterEnv: ['<rootDir>/test/integration/setup.js'],
-
-      // Automatically clear mock calls and instances between every test
-      clearMocks: true,
-      moduleNameMapper: MODULE_NAME_MAPPING,
-    },
-    {
-      displayName: 'server-v5',
-
-      transform: {
-        '^.+\\.(t|j)sx?$': '@swc-node/jest',
-      },
-
-      rootDir: '.',
-      roots: ['<rootDir>'],
-      testMatch: [
-        '<rootDir>/test/integration/**/v5.*.test.ts',
-        '<rootDir>/test/integration/**/v5.*.test.js',
-      ],
-
-      testEnvironment: 'node',
-      globalSetup: '<rootDir>/test/integration/global-setup.js',
-      globalTeardown: '<rootDir>/test/integration/global-teardown.js',
-      setupFilesAfterEnv: ['<rootDir>/test/integration/setup.js'],
-
-      // Automatically clear mock calls and instances between every test
-      clearMocks: true,
-      moduleNameMapper: MODULE_NAME_MAPPING,
-    },
-  ],
-
-  // Automatically clear mock calls and instances between every test
-  clearMocks: true,
-
-  // Indicates whether the coverage information should be collected while executing the test
-  collectCoverage: true,
-
-  // An array of glob patterns indicating a set of files for which coverage information should be collected
-  // collectCoverageFrom: undefined,
-
-  // The directory where Jest should output its coverage files
-  coverageDirectory: 'coverage',
-
-  // An array of regexp pattern strings used to skip coverage collection
-  coveragePathIgnorePatterns: [
-    'index.ts',
-    '/config/',
-    '/resource/',
-    '/node_modules/',
-  ],
-};

+ 0 - 1
apps/app/nodemon.json

@@ -9,7 +9,6 @@
     "src/client",
     "src/client",
     "src/**/client",
     "src/**/client",
     "test",
     "test",
-    "test-with-vite",
     "tmp",
     "tmp",
     "*.mongodb.js"
     "*.mongodb.js"
   ]
   ]

+ 12 - 17
apps/app/package.json

@@ -1,6 +1,6 @@
 {
 {
   "name": "@growi/app",
   "name": "@growi/app",
-  "version": "7.4.3-RC.0",
+  "version": "7.4.5-RC.0",
   "license": "MIT",
   "license": "MIT",
   "private": "true",
   "private": "true",
   "scripts": {
   "scripts": {
@@ -26,7 +26,7 @@
     "dev:migrate:down": "pnpm run dev:migrate-mongo down -f config/migrate-mongo-config.js",
     "dev:migrate:down": "pnpm run dev:migrate-mongo down -f config/migrate-mongo-config.js",
     "//// for CI": "",
     "//// for CI": "",
     "launch-dev:ci": "cross-env NODE_ENV=development pnpm run dev:migrate && pnpm run ts-node src/server/app.ts --ci",
     "launch-dev:ci": "cross-env NODE_ENV=development pnpm run dev:migrate && pnpm run ts-node src/server/app.ts --ci",
-    "lint:typecheck": "vue-tsc --noEmit",
+    "lint:typecheck": "tsgo --noEmit",
     "lint:biome": "biome check --diagnostic-level=error",
     "lint:biome": "biome check --diagnostic-level=error",
     "lint:styles": "stylelint \"src/**/*.scss\"",
     "lint:styles": "stylelint \"src/**/*.scss\"",
     "lint:openapi:apiv3": "node node_modules/swagger2openapi/oas-validate tmp/openapi-spec-apiv3.json",
     "lint:openapi:apiv3": "node node_modules/swagger2openapi/oas-validate tmp/openapi-spec-apiv3.json",
@@ -34,12 +34,11 @@
     "lint": "run-p lint:**",
     "lint": "run-p lint:**",
     "prelint:openapi:apiv3": "pnpm run openapi:generate-spec:apiv3",
     "prelint:openapi:apiv3": "pnpm run openapi:generate-spec:apiv3",
     "prelint:openapi:apiv1": "pnpm run openapi:generate-spec:apiv1",
     "prelint:openapi:apiv1": "pnpm run openapi:generate-spec:apiv1",
-    "test": "run-p test:jest test:vitest:coverage",
-    "test:jest": "cross-env NODE_ENV=test TS_NODE_PROJECT=test/integration/tsconfig.json jest",
-    "test:vitest": "vitest run",
-    "test:vitest:coverage": "COLUMNS=200 vitest run --coverage",
-    "jest:run": "cross-env NODE_ENV=test TS_NODE_PROJECT=test/integration/tsconfig.json jest --passWithNoTests -- ",
-    "reg:run": "reg-suit run",
+    "test": "vitest run",
+    "test:coverage": "run-p test:coverage:* test:integ",
+    "test:coverage:unit": "COLUMNS=200 vitest run --coverage --project=app-unit",
+    "test:coverage:components": "COLUMNS=200 vitest run --coverage --project=app-components",
+    "test:integ": "vitest run --project=app-integration",
     "//// misc": "",
     "//// misc": "",
     "console": "npm run repl",
     "console": "npm run repl",
     "repl": "cross-env NODE_ENV=development npm run ts-node src/server/repl.ts",
     "repl": "cross-env NODE_ENV=development npm run ts-node src/server/repl.ts",
@@ -192,7 +191,7 @@
     "passport-saml": "^3.2.0",
     "passport-saml": "^3.2.0",
     "pathe": "^2.0.3",
     "pathe": "^2.0.3",
     "prop-types": "^15.8.1",
     "prop-types": "^15.8.1",
-    "qs": "^6.14.1",
+    "qs": "^6.14.2",
     "rate-limiter-flexible": "^2.3.7",
     "rate-limiter-flexible": "^2.3.7",
     "react": "^18.2.0",
     "react": "^18.2.0",
     "react-bootstrap-typeahead": "^6.3.2",
     "react-bootstrap-typeahead": "^6.3.2",
@@ -270,12 +269,10 @@
     "@growi/editor": "workspace:^",
     "@growi/editor": "workspace:^",
     "@growi/ui": "workspace:^",
     "@growi/ui": "workspace:^",
     "@handsontable/react": "=2.1.0",
     "@handsontable/react": "=2.1.0",
-    "@headless-tree/core": "^1.5.1",
-    "@headless-tree/react": "^1.5.1",
+    "@headless-tree/core": "^1.5.3",
+    "@headless-tree/react": "^1.5.3",
     "@next/bundle-analyzer": "^14.1.3",
     "@next/bundle-analyzer": "^14.1.3",
     "@popperjs/core": "^2.11.8",
     "@popperjs/core": "^2.11.8",
-    "@swc-node/jest": "^1.8.1",
-    "@swc/jest": "^0.2.36",
     "@tanstack/react-virtual": "^3.13.12",
     "@tanstack/react-virtual": "^3.13.12",
     "@testing-library/jest-dom": "^6.5.0",
     "@testing-library/jest-dom": "^6.5.0",
     "@testing-library/user-event": "^14.5.2",
     "@testing-library/user-event": "^14.5.2",
@@ -283,11 +280,11 @@
     "@types/bunyan": "^1.8.11",
     "@types/bunyan": "^1.8.11",
     "@types/express": "^4.17.21",
     "@types/express": "^4.17.21",
     "@types/hast": "^3.0.4",
     "@types/hast": "^3.0.4",
-    "@types/jest": "^29.5.2",
     "@types/js-cookie": "^3.0.6",
     "@types/js-cookie": "^3.0.6",
     "@types/ldapjs": "^2.2.5",
     "@types/ldapjs": "^2.2.5",
     "@types/mdast": "^4.0.4",
     "@types/mdast": "^4.0.4",
     "@types/node-cron": "^3.0.11",
     "@types/node-cron": "^3.0.11",
+    "@types/nodemailer": "6.4.22",
     "@types/react": "^18.2.14",
     "@types/react": "^18.2.14",
     "@types/react-dom": "^18.2.6",
     "@types/react-dom": "^18.2.6",
     "@types/react-input-autosize": "^2.2.4",
     "@types/react-input-autosize": "^2.2.4",
@@ -316,14 +313,12 @@
     "i18next-hmr": "^3.1.3",
     "i18next-hmr": "^3.1.3",
     "i18next-http-backend": "^2.6.2",
     "i18next-http-backend": "^2.6.2",
     "i18next-localstorage-backend": "^4.2.0",
     "i18next-localstorage-backend": "^4.2.0",
-    "jest": "^29.5.0",
-    "jest-date-mock": "^1.0.8",
-    "jest-localstorage-mock": "^2.4.14",
     "jotai-devtools": "^0.11.0",
     "jotai-devtools": "^0.11.0",
     "load-css-file": "^1.0.0",
     "load-css-file": "^1.0.0",
     "material-icons": "^1.11.3",
     "material-icons": "^1.11.3",
     "mdast-util-directive": "^3.0.0",
     "mdast-util-directive": "^3.0.0",
     "mdast-util-find-and-replace": "^3.0.1",
     "mdast-util-find-and-replace": "^3.0.1",
+    "mongodb-connection-string-url": "^7.0.0",
     "mongodb-memory-server-core": "^9.1.1",
     "mongodb-memory-server-core": "^9.1.1",
     "morgan": "^1.10.0",
     "morgan": "^1.10.0",
     "null-loader": "^4.0.1",
     "null-loader": "^4.0.1",

+ 2 - 7
apps/app/playwright/20-basic-features/access-to-page.spec.ts

@@ -1,11 +1,6 @@
-import { expect, type Page, test } from '@playwright/test';
+import { expect, test } from '@playwright/test';
 
 
-const appendTextToEditorUntilContains = async (page: Page, text: string) => {
-  await page.locator('.cm-content').fill(text);
-  await expect(page.getByTestId('page-editor-preview-body')).toContainText(
-    text,
-  );
-};
+import { appendTextToEditorUntilContains } from '../utils/AppendTextToEditorUntilContains';
 
 
 test('has title', async ({ page }) => {
 test('has title', async ({ page }) => {
   await page.goto('/Sandbox');
   await page.goto('/Sandbox');

+ 2 - 7
apps/app/playwright/23-editor/saving.spec.ts

@@ -1,12 +1,7 @@
-import { expect, type Page, test } from '@playwright/test';
+import { expect, test } from '@playwright/test';
 import path from 'path';
 import path from 'path';
 
 
-const appendTextToEditorUntilContains = async (page: Page, text: string) => {
-  await page.locator('.cm-content').fill(text);
-  await expect(page.getByTestId('page-editor-preview-body')).toContainText(
-    text,
-  );
-};
+import { appendTextToEditorUntilContains } from '../utils/AppendTextToEditorUntilContains';
 
 
 test('Successfully create page under specific path', async ({ page }) => {
 test('Successfully create page under specific path', async ({ page }) => {
   const newPagePath = '/child';
   const newPagePath = '/child';

+ 77 - 0
apps/app/playwright/23-editor/vim-keymap.spec.ts

@@ -0,0 +1,77 @@
+import { expect, type Page, test } from '@playwright/test';
+
+import { appendTextToEditorUntilContains } from '../utils/AppendTextToEditorUntilContains';
+
+/**
+ * Tests for Vim keymap functionality in the editor
+ * @see https://github.com/growilabs/growi/issues/8814
+ * @see https://github.com/growilabs/growi/issues/10701
+ */
+
+const changeKeymap = async (page: Page, keymap: string) => {
+  // Open OptionsSelector
+  await expect(page.getByTestId('options-selector-btn')).toBeVisible();
+  await page.getByTestId('options-selector-btn').click();
+  await expect(page.getByTestId('options-selector-menu')).toBeVisible();
+
+  // Click keymap selection button to navigate to keymap selector
+  await expect(page.getByTestId('keymap_current_selection')).toBeVisible();
+  await page.getByTestId('keymap_current_selection').click();
+
+  // Select Vim keymap
+  await expect(page.getByTestId(`keymap_radio_item_${keymap}`)).toBeVisible();
+  await page.getByTestId(`keymap_radio_item_${keymap}`).click();
+
+  // Close OptionsSelector
+  await page.getByTestId('options-selector-btn').click();
+  await expect(page.getByTestId('options-selector-menu')).not.toBeVisible();
+};
+
+test.describe
+  .serial('Vim keymap mode', () => {
+    test.beforeEach(async ({ page }) => {
+      await page.goto('/Sandbox/vim-keymap-test-page');
+
+      // Open Editor
+      await expect(page.getByTestId('editor-button')).toBeVisible();
+      await page.getByTestId('editor-button').click();
+      await expect(page.locator('.cm-content')).toBeVisible();
+      await expect(page.getByTestId('grw-editor-navbar-bottom')).toBeVisible();
+    });
+
+    test('Insert mode should persist while typing multiple characters', async ({
+      page,
+    }) => {
+      const testText = 'Hello World';
+
+      // Change to Vim keymap
+      await changeKeymap(page, 'vim');
+
+      // Focus the editor
+      await page.locator('.cm-content').click();
+
+      // Enter insert mode
+      await page.keyboard.type('i');
+
+      // Append text
+      await appendTextToEditorUntilContains(page, testText);
+    });
+
+    test('Write command (:w) should save the page successfully', async ({
+      page,
+    }) => {
+      // Enter command mode
+      await page.keyboard.type(':');
+      await expect(page.locator('.cm-vim-panel')).toBeVisible();
+
+      // Type write command and execute
+      await page.keyboard.type('w');
+      await page.keyboard.press('Enter');
+
+      // Expect a success toaster to be displayed
+      await expect(page.locator('.Toastify__toast--success')).toBeVisible();
+
+      // Restore keymap to default
+      await changeKeymap(page, 'default');
+    });
+  });

+ 3 - 8
apps/app/playwright/23-editor/with-navigation.spec.ts

@@ -1,7 +1,9 @@
-import { expect, type Page, test } from '@playwright/test';
+import { expect, test } from '@playwright/test';
 import { readFileSync } from 'fs';
 import { readFileSync } from 'fs';
 import path from 'path';
 import path from 'path';
 
 
+import { appendTextToEditorUntilContains } from '../utils/AppendTextToEditorUntilContains';
+
 /**
 /**
  * for the issues:
  * for the issues:
  * @see https://redmine.weseek.co.jp/issues/122040
  * @see https://redmine.weseek.co.jp/issues/122040
@@ -61,13 +63,6 @@ test('should not be cleared and should prevent GrantSelector from modified', asy
   );
   );
 });
 });
 
 
-const appendTextToEditorUntilContains = async (page: Page, text: string) => {
-  await page.locator('.cm-content').fill(text);
-  await expect(page.getByTestId('page-editor-preview-body')).toContainText(
-    text,
-  );
-};
-
 /**
 /**
  * for the issue:
  * for the issue:
  * @see https://redmine.weseek.co.jp/issues/115285
  * @see https://redmine.weseek.co.jp/issues/115285

+ 67 - 0
apps/app/playwright/30-search/search.spect.ts → apps/app/playwright/30-search/search.spec.ts

@@ -239,3 +239,70 @@ test('Search current tree by word is successfully loaded', async ({ page }) => {
   await expect(page.getByTestId('search-result-list')).toBeVisible();
   await expect(page.getByTestId('search-result-list')).toBeVisible();
   await expect(page.getByTestId('search-result-content')).toBeVisible();
   await expect(page.getByTestId('search-result-content')).toBeVisible();
 });
 });
+
+test.describe('Search result navigation and repeated search', () => {
+  test('Repeated search works', async ({ page }) => {
+    // Step 1: Start from the home page and reload to clear any state
+    await page.goto('/');
+    await page.reload();
+
+    // Step 2: Open search modal and search for "sandbox"
+    await page.getByTestId('open-search-modal-button').click();
+    await expect(page.getByTestId('search-modal')).toBeVisible();
+    await page.locator('.form-control').fill('sandbox');
+
+    // Step 3: Submit the search by clicking on "search in all" menu item
+    await expect(page.getByTestId('search-all-menu-item')).toBeVisible();
+    await page.getByTestId('search-all-menu-item').click();
+
+    // Step 4: Verify that the search page is displayed with results
+    await expect(page.getByTestId('search-result-base')).toBeVisible();
+    await expect(page.getByTestId('search-result-list')).toBeVisible();
+    await expect(page.getByTestId('search-result-content')).toBeVisible();
+    await expect(page).toHaveURL(/\/_search\?q=sandbox/);
+
+    // Step 5: Click on the first search result to navigate to a page
+    const sandboxPageLink = page
+      .getByTestId('search-result-list')
+      .getByRole('link', { name: 'Sandbox' })
+      .first();
+    await sandboxPageLink.click();
+    await expect(page).toHaveTitle(/Sandbox/);
+
+    // Step 6: Wait for leaving search results and verify page content is displayed
+    await expect(page.getByTestId('search-result-base')).not.toBeVisible();
+    // Verify page body is rendered (not empty due to stale atom data)
+    await expect(page.locator('.wiki')).toBeVisible();
+    await expect(page.locator('.wiki')).not.toBeEmpty();
+
+    // Step 7: From the navigated page, open search modal again
+    await page.getByTestId('open-search-modal-button').click();
+    await expect(page.getByTestId('search-modal')).toBeVisible();
+
+    // Step 8: Search for the same keyword ("sandbox")
+    await page.locator('.form-control').fill('sandbox');
+
+    // Step 9: Submit the search by clicking on "search in all" menu item
+    await expect(page.getByTestId('search-all-menu-item')).toBeVisible();
+    await page.getByTestId('search-all-menu-item').click();
+
+    // Step 10: Verify that the search page is displayed with results
+    await expect(page.getByTestId('search-result-base')).toBeVisible();
+    await expect(page.getByTestId('search-result-list')).toBeVisible();
+    await expect(page.getByTestId('search-result-content')).toBeVisible();
+    await expect(page).toHaveURL(/\/_search\?q=sandbox/);
+
+    // Step 11: Click on the second search result to navigate to a page
+    const mathPageLink = page
+      .getByTestId('search-result-list')
+      .getByRole('link', { name: 'Math' })
+      .first();
+    await mathPageLink.click();
+    // and verify the page that is not Sandbox is loaded
+    await expect(page).not.toHaveTitle(/Sandbox/);
+
+    // Step 12: Verify page body is rendered (not empty due to stale atom data)
+    await expect(page.locator('.wiki')).toBeVisible();
+    await expect(page.locator('.wiki')).not.toBeEmpty();
+  });
+});

+ 6 - 3
apps/app/playwright/50-sidebar/access-to-sidebar.spec.ts

@@ -29,9 +29,12 @@ test.describe('Access to sidebar', () => {
   test('Successfully access to custom sidebar', async ({ page }) => {
   test('Successfully access to custom sidebar', async ({ page }) => {
     await page.getByTestId('grw-sidebar-nav-primary-custom-sidebar').click();
     await page.getByTestId('grw-sidebar-nav-primary-custom-sidebar').click();
     await expect(page.getByTestId('grw-sidebar-contents')).toBeVisible();
     await expect(page.getByTestId('grw-sidebar-contents')).toBeVisible();
-    await expect(
-      page.locator('.grw-sidebar-content-header > h3').locator('a'),
-    ).toBeVisible();
+
+    // Check if edit_note icon is visible within the button
+    const editNoteIcon = page
+      .locator('.grw-custom-sidebar-content button .material-symbols-outlined')
+      .filter({ hasText: 'edit_note' });
+    await expect(editNoteIcon).toBeVisible();
   });
   });
 
 
   test('Successfully access to GROWI Docs page', async ({ page }) => {
   test('Successfully access to GROWI Docs page', async ({ page }) => {

+ 11 - 0
apps/app/playwright/utils/AppendTextToEditorUntilContains.ts

@@ -0,0 +1,11 @@
+import { expect, type Page } from '@playwright/test';
+
+export const appendTextToEditorUntilContains = async (
+  page: Page,
+  text: string,
+) => {
+  await page.locator('.cm-content').fill(text);
+  await expect(page.getByTestId('page-editor-preview-body')).toContainText(
+    text,
+  );
+};

+ 14 - 0
apps/app/public/static/locales/en_US/admin.json

@@ -56,6 +56,11 @@
       "enable_force_delete_user_homepage_on_user_deletion": "When you delete a user, the user's homepage and all its sub pages will be completely deleted",
       "enable_force_delete_user_homepage_on_user_deletion": "When you delete a user, the user's homepage and all its sub pages will be completely deleted",
       "desc": "You will be able to delete a deleted user's homepage."
       "desc": "You will be able to delete a deleted user's homepage."
     },
     },
+    "disable_user_pages": {
+      "disable_user_pages": "Disable user pages",
+      "disable_user_pages_label": "Disable user pages",
+      "desc": "By disabling user pages, creating, viewing, editing, and duplicating all user pages will be disabled.</br>Additionally, user pages will not appear in page trees, recent changes, or search results."
+    },
     "session": "Session",
     "session": "Session",
     "max_age": "Max age (msec)",
     "max_age": "Max age (msec)",
     "max_age_desc": "Specifies the number (in milliseconds) to expire users session.<br>Default: 2592000000 (30days)",
     "max_age_desc": "Specifies the number (in milliseconds) to expire users session.<br>Default: 2592000000 (30days)",
@@ -371,6 +376,15 @@
     "transmission_method": "Transmission Method",
     "transmission_method": "Transmission Method",
     "smtp_label": "SMTP",
     "smtp_label": "SMTP",
     "ses_label": "SES(AWS)",
     "ses_label": "SES(AWS)",
+    "oauth2_label": "OAuth 2.0 (Google Workspace)",
+    "oauth2_description": "Configure OAuth 2.0 authentication for sending emails using Google Workspace. You need to create OAuth 2.0 credentials in Google Cloud Console and obtain a refresh token.",
+    "oauth2_user": "Email Address",
+    "oauth2_user_help": "The email address of the authorized Google account",
+    "oauth2_client_id": "Client ID",
+    "oauth2_client_secret": "Client Secret",
+    "oauth2_refresh_token": "Refresh Token",
+    "oauth2_refresh_token_help": "The refresh token obtained from OAuth 2.0 authorization flow",
+    "placeholder_leave_blank": "Leave blank to keep existing value",
     "send_test_email": "Send a test-email",
     "send_test_email": "Send a test-email",
     "success_to_send_test_email": "Success to send a test-email",
     "success_to_send_test_email": "Success to send a test-email",
     "smtp_settings": "SMTP settings",
     "smtp_settings": "SMTP settings",

+ 14 - 0
apps/app/public/static/locales/fr_FR/admin.json

@@ -56,6 +56,11 @@
       "enable_force_delete_user_homepage_on_user_deletion": "Supprimer la page d'accueil et ses pages enfants",
       "enable_force_delete_user_homepage_on_user_deletion": "Supprimer la page d'accueil et ses pages enfants",
       "desc": "Les pages d'accueil utilisateurs pourront être supprimées."
       "desc": "Les pages d'accueil utilisateurs pourront être supprimées."
     },
     },
+    "disable_user_pages": {
+      "disable_user_pages": "Désactiver les pages utilisateur",
+      "disable_user_pages_label": "Désactiver les pages utilisateur",
+      "desc": "En désactivant les pages utilisateur, la création, la consultation, la modification et la duplication de toutes les pages utilisateur seront désactivées.</br>De plus, les pages utilisateur n'apparaîtront pas dans l'arborescence des pages, les modifications récentes ou les résultats de recherche."
+    },
     "session": "Session",
     "session": "Session",
     "max_age": "Âge maximal (ms)",
     "max_age": "Âge maximal (ms)",
     "max_age_desc": "Spécifie (en milliseconde) l'âge maximal d'une session <br>Par défaut: 2592000000 (30 jours)",
     "max_age_desc": "Spécifie (en milliseconde) l'âge maximal d'une session <br>Par défaut: 2592000000 (30 jours)",
@@ -371,6 +376,15 @@
     "transmission_method": "Mode",
     "transmission_method": "Mode",
     "smtp_label": "SMTP",
     "smtp_label": "SMTP",
     "ses_label": "SES(AWS)",
     "ses_label": "SES(AWS)",
+    "oauth2_label": "OAuth 2.0 (Google Workspace)",
+    "oauth2_description": "Configurez l'authentification OAuth 2.0 pour envoyer des courriels en utilisant Google Workspace. Vous devez créer des identifiants OAuth 2.0 dans la console Google Cloud et obtenir un jeton de rafraîchissement.",
+    "oauth2_user": "Adresse courriel",
+    "oauth2_user_help": "L'adresse courriel du compte Google autorisé",
+    "oauth2_client_id": "ID client",
+    "oauth2_client_secret": "Secret client",
+    "oauth2_refresh_token": "Jeton de rafraîchissement",
+    "oauth2_refresh_token_help": "Le jeton de rafraîchissement obtenu à partir du flux d'autorisation OAuth 2.0",
+    "placeholder_leave_blank": "Laisser vide pour conserver la valeur existante",
     "send_test_email": "Courriel d'essai",
     "send_test_email": "Courriel d'essai",
     "success_to_send_test_email": "Courriel d'essai envoyé",
     "success_to_send_test_email": "Courriel d'essai envoyé",
     "smtp_settings": "Configuration SMTP",
     "smtp_settings": "Configuration SMTP",

+ 26 - 0
apps/app/public/static/locales/ja_JP/admin.json

@@ -65,6 +65,11 @@
       "enable_force_delete_user_homepage_on_user_deletion": "ユーザーを削除したとき、ユーザーホームページとその配下のページを完全削除する",
       "enable_force_delete_user_homepage_on_user_deletion": "ユーザーを削除したとき、ユーザーホームページとその配下のページを完全削除する",
       "desc": "削除済みユーザーのユーザーホームページを削除できるようになります。"
       "desc": "削除済みユーザーのユーザーホームページを削除できるようになります。"
     },
     },
+    "disable_user_pages": {
+      "disable_user_pages": "ユーザーページの無効化",
+      "disable_user_pages_label": "ユーザーページを無効にする",
+      "desc": "ユーザーページを無効にすることで、すべてのユーザーページに対する作成・閲覧・編集・複製ができなくなります。</br>また、ページツリーや最近の変更、検索結果などでもユーザーページが表示されなくなります。"
+    },
     "session": "セッション",
     "session": "セッション",
     "max_age": "有効期間 (ミリ秒)",
     "max_age": "有効期間 (ミリ秒)",
     "max_age_desc": "ユーザーのセッション情報の有効期間をミリ秒で指定できます。<br>デフォルト値: 2592000000 (30日間)",
     "max_age_desc": "ユーザーのセッション情報の有効期間をミリ秒で指定できます。<br>デフォルト値: 2592000000 (30日間)",
@@ -380,6 +385,15 @@
     "transmission_method": "送信方法",
     "transmission_method": "送信方法",
     "smtp_label": "SMTP",
     "smtp_label": "SMTP",
     "ses_label": "SES(AWS)",
     "ses_label": "SES(AWS)",
+    "oauth2_label": "OAuth 2.0 (Google Workspace)",
+    "oauth2_description": "Google Workspaceを使用してメールを送信するためのOAuth 2.0認証を設定します。Google Cloud ConsoleでOAuth 2.0認証情報を作成し、リフレッシュトークンを取得する必要があります。",
+    "oauth2_user": "メールアドレス",
+    "oauth2_user_help": "認証されたGoogleアカウントのメールアドレス",
+    "oauth2_client_id": "クライアントID",
+    "oauth2_client_secret": "クライアントシークレット",
+    "oauth2_refresh_token": "リフレッシュトークン",
+    "oauth2_refresh_token_help": "OAuth 2.0認証フローから取得したリフレッシュトークン",
+    "placeholder_leave_blank": "既存の値を保持する場合は空白のままにしてください",
     "send_test_email": "テストメールを送信",
     "send_test_email": "テストメールを送信",
     "success_to_send_test_email": "テストメールを送信しました。",
     "success_to_send_test_email": "テストメールを送信しました。",
     "smtp_settings": "SMTP設定",
     "smtp_settings": "SMTP設定",
@@ -451,6 +465,18 @@
       "tag_names": "タグ名",
       "tag_names": "タグ名",
       "tag_attributes": "タグ属性",
       "tag_attributes": "タグ属性",
       "import_recommended": "{{target}} のおすすめをインポート"
       "import_recommended": "{{target}} のおすすめをインポート"
+    },
+    "content-disposition_header": "Content-Disposition MIMEタイプ設定",
+    "content-disposition_options": {
+      "add_header": "MIMEタイプを追加する",
+      "note": "注意: MIMEタイプを追加すると、もう一方のリストからは自動的に削除されます。",
+      "inline_header": "インライン MIMEタイプ",
+      "attachment_header": "添付ファイル MIMEタイプ",
+      "inline_button": "インライン",
+      "no_inline": "設定済みのインラインタイプはありません。",
+      "no_attachment": "設定済みの添付ファイルタイプはありません。",
+      "attachment_button": "添付ファイル",
+      "remove_button": "削除"
     }
     }
   },
   },
   "customize_settings": {
   "customize_settings": {

Некоторые файлы не были показаны из-за большого количества измененных файлов