Просмотр исходного кода

Merge remote-tracking branch 'origin/master' into prisma

mizozobu 2 дней назад
Родитель
Сommit
441cf674ce
100 измененных файлов с 8246 добавлено и 936 удалено
  1. 21 0
      .claude/rules/coding-style.md
  2. 28 0
      .claude/skills/monorepo-overview/SKILL.md
  3. 0 2
      .github/dependabot.yml
  4. 0 2
      .github/workflows/ci-app-prod.yml
  5. 5 10
      .github/workflows/release-subpackages.yml
  6. 3 3
      .github/workflows/reusable-app-prod.yml
  7. 0 88
      .github/workflows/reusable-app-reg-suit.yml
  8. 466 0
      .kiro/specs/auto-scroll/design.md
  9. 91 0
      .kiro/specs/auto-scroll/requirements.md
  10. 246 0
      .kiro/specs/auto-scroll/research.md
  11. 24 0
      .kiro/specs/auto-scroll/spec.json
  12. 154 0
      .kiro/specs/auto-scroll/tasks.md
  13. 670 0
      .kiro/specs/migrate-logger-to-pino/design.md
  14. 156 0
      .kiro/specs/migrate-logger-to-pino/requirements.md
  15. 224 0
      .kiro/specs/migrate-logger-to-pino/research.md
  16. 23 0
      .kiro/specs/migrate-logger-to-pino/spec.json
  17. 263 0
      .kiro/specs/migrate-logger-to-pino/tasks.md
  18. 108 0
      .kiro/specs/news-inappnotification/requirements.md
  19. 22 0
      .kiro/specs/news-inappnotification/spec.json
  20. 359 0
      .kiro/specs/suggest-path/design.md
  21. 77 0
      .kiro/specs/suggest-path/requirements.md
  22. 145 0
      .kiro/specs/suggest-path/research.md
  23. 4 3
      .kiro/specs/suggest-path/spec.json
  24. 82 0
      .kiro/specs/suggest-path/tasks.md
  25. 0 262
      .kiro/specs/upgrade-fixed-packages/design.md
  26. 0 75
      .kiro/specs/upgrade-fixed-packages/requirements.md
  27. 0 183
      .kiro/specs/upgrade-fixed-packages/research.md
  28. 0 89
      .kiro/specs/upgrade-fixed-packages/tasks.md
  29. 39 1
      CHANGELOG.md
  30. 2 3
      README.md
  31. 2 3
      README_JP.md
  32. 1 4
      apps/app/.claude/skills/build-optimization/SKILL.md
  33. 2 0
      apps/app/.gitignore
  34. 4 0
      apps/app/bin/openapi/definition-apiv3.js
  35. 1 0
      apps/app/bin/openapi/generate-spec-apiv3.sh
  36. 42 0
      apps/app/bin/postbuild-server.ts
  37. 5 1
      apps/app/config/logger/config.dev.ts
  38. 5 1
      apps/app/config/logger/config.prod.ts
  39. 1 1
      apps/app/docker/README.md
  40. 0 3
      apps/app/next.config.ts
  41. 4 11
      apps/app/package.json
  42. 15 3
      apps/app/public/static/locales/en_US/admin.json
  43. 25 1
      apps/app/public/static/locales/en_US/translation.json
  44. 15 3
      apps/app/public/static/locales/fr_FR/admin.json
  45. 25 1
      apps/app/public/static/locales/fr_FR/translation.json
  46. 15 3
      apps/app/public/static/locales/ja_JP/admin.json
  47. 25 1
      apps/app/public/static/locales/ja_JP/translation.json
  48. 15 3
      apps/app/public/static/locales/ko_KR/admin.json
  49. 25 1
      apps/app/public/static/locales/ko_KR/translation.json
  50. 15 3
      apps/app/public/static/locales/zh_CN/admin.json
  51. 25 1
      apps/app/public/static/locales/zh_CN/translation.json
  52. 0 26
      apps/app/regconfig.json
  53. 21 11
      apps/app/src/client/components/Admin/App/AzureSetting.tsx
  54. 64 39
      apps/app/src/client/components/Admin/App/FileUploadSetting.tsx
  55. 18 11
      apps/app/src/client/components/Admin/App/GcsSetting.tsx
  56. 214 0
      apps/app/src/client/components/Admin/AuditLog/AuditLogExportModal.tsx
  57. 39 0
      apps/app/src/client/components/Admin/AuditLog/DuplicateExportConfirmModal.tsx
  58. 12 1
      apps/app/src/client/components/Admin/AuditLog/SearchUsernameTypeahead.tsx
  59. 67 0
      apps/app/src/client/components/Admin/AuditLog/useAuditLogExport.ts
  60. 23 0
      apps/app/src/client/components/Admin/AuditLogManagement.tsx
  61. 1 0
      apps/app/src/client/components/Admin/UserManagement.tsx
  62. 8 1
      apps/app/src/client/components/Admin/Users/UserTable.tsx
  63. 98 0
      apps/app/src/client/components/InAppNotification/ModelNotification/AuditLogBulkExportJobModelNotification.tsx
  64. 5 1
      apps/app/src/client/components/InAppNotification/ModelNotification/ModelNotification.tsx
  65. 5 1
      apps/app/src/client/components/InAppNotification/ModelNotification/index.tsx
  66. 13 0
      apps/app/src/client/components/InAppNotification/ModelNotification/useActionAndMsg.ts
  67. 15 5
      apps/app/src/client/components/NotAvailable.tsx
  68. 1 1
      apps/app/src/client/components/PageComment/Comment.module.scss
  69. 20 2
      apps/app/src/client/components/PageCreateModal.tsx
  70. 6 8
      apps/app/src/client/components/PageEditor/PageEditor.tsx
  71. 1 1
      apps/app/src/client/components/RecentActivity/RecentActivity.tsx
  72. 8 0
      apps/app/src/client/components/Sidebar/PageCreateButton/CreateButton.module.scss
  73. 4 2
      apps/app/src/client/components/Sidebar/PageCreateButton/DropendToggle.module.scss
  74. 5 6
      apps/app/src/client/components/Sidebar/PageCreateButton/Hexagon.tsx
  75. 41 1
      apps/app/src/client/components/Sidebar/SidebarNav/PrimaryItems.tsx
  76. 2 2
      apps/app/src/client/components/StickyStretchableScroller.tsx
  77. 4 4
      apps/app/src/client/services/AdminUsersContainer.js
  78. 6 0
      apps/app/src/client/services/renderer/renderer.tsx
  79. 201 0
      apps/app/src/client/util/watch-rendering-and-rescroll.spec.tsx
  80. 84 0
      apps/app/src/client/util/watch-rendering-and-rescroll.ts
  81. 4 45
      apps/app/src/components/PageView/PageView.tsx
  82. 329 0
      apps/app/src/components/PageView/use-hash-auto-scroll.spec.tsx
  83. 106 0
      apps/app/src/components/PageView/use-hash-auto-scroll.ts
  84. 7 3
      apps/app/src/features/admin/states/socket-io.ts
  85. 10 0
      apps/app/src/features/ai-tools/server/routes/apiv3/index.ts
  86. 62 0
      apps/app/src/features/ai-tools/suggest-path/interfaces/suggest-path-types.ts
  87. 909 0
      apps/app/src/features/ai-tools/suggest-path/server/integration-tests/suggest-path-integration.spec.ts
  88. 182 0
      apps/app/src/features/ai-tools/suggest-path/server/routes/apiv3/index.spec.ts
  89. 176 0
      apps/app/src/features/ai-tools/suggest-path/server/routes/apiv3/index.ts
  90. 390 0
      apps/app/src/features/ai-tools/suggest-path/server/services/analyze-content.spec.ts
  91. 51 0
      apps/app/src/features/ai-tools/suggest-path/server/services/analyze-content.ts
  92. 60 0
      apps/app/src/features/ai-tools/suggest-path/server/services/call-llm-for-json.ts
  93. 511 0
      apps/app/src/features/ai-tools/suggest-path/server/services/evaluate-candidates.spec.ts
  94. 115 0
      apps/app/src/features/ai-tools/suggest-path/server/services/evaluate-candidates.ts
  95. 170 0
      apps/app/src/features/ai-tools/suggest-path/server/services/generate-category-suggestion.spec.ts
  96. 37 0
      apps/app/src/features/ai-tools/suggest-path/server/services/generate-category-suggestion.ts
  97. 136 0
      apps/app/src/features/ai-tools/suggest-path/server/services/generate-memo-suggestion.spec.ts
  98. 41 0
      apps/app/src/features/ai-tools/suggest-path/server/services/generate-memo-suggestion.ts
  99. 383 0
      apps/app/src/features/ai-tools/suggest-path/server/services/generate-suggestions.spec.ts
  100. 102 0
      apps/app/src/features/ai-tools/suggest-path/server/services/generate-suggestions.ts

+ 21 - 0
.claude/rules/coding-style.md

@@ -201,6 +201,27 @@ Implemented react-window for virtualizing page tree
 to improve performance with 10k+ pages.
 ```
 
+## Cross-Platform Compatibility
+
+GROWI must work on Windows, macOS, and Linux. Never use platform-specific shell commands in npm scripts.
+
+```json
+// ❌ WRONG: Unix-only commands in npm scripts
+"clean": "rm -rf dist",
+"copy": "cp src/foo.ts dist/foo.ts",
+"move": "mv src dist"
+
+// ✅ CORRECT: Cross-platform tools
+"clean": "rimraf dist",
+"copy": "node -e \"require('fs').cpSync('src/foo.ts','dist/foo.ts')\"",
+"move": "node -e \"require('fs').renameSync('src','dist')\""
+```
+
+**Rules**:
+- Use `rimraf` instead of `rm -rf`
+- Use Node.js one-liners or cross-platform tools (`cpy-cli`, `cpx2`) instead of `cp`, `mv`, `echo`, `ls`
+- Never assume a POSIX shell in npm scripts
+
 ## Code Quality Checklist
 
 Before marking work complete:

+ 28 - 0
.claude/skills/monorepo-overview/SKILL.md

@@ -64,6 +64,34 @@ turbo run test --filter @growi/app
 turbo run lint --filter @growi/core
 ```
 
+### Build Order Management
+
+Build dependencies in this monorepo are **not** declared with `dependsOn: ["^build"]` (the automatic workspace-dependency mode). Instead, they are declared **explicitly** — either in the root `turbo.json` for legacy entries, or in per-package `turbo.json` files for newer packages.
+
+**When to update**: whenever a package gains a new workspace dependency on another buildable package (one that produces a `dist/`), declare the build-order dependency explicitly. Without it, Turborepo may build in the wrong order, causing missing `dist/` files or type errors.
+
+**Pattern — per-package `turbo.json`** (preferred for new dependencies):
+
+```json
+// packages/my-package/turbo.json
+{
+  "extends": ["//"],
+  "tasks": {
+    "build": { "dependsOn": ["@growi/some-dep#build"] },
+    "dev":   { "dependsOn": ["@growi/some-dep#dev"] }
+  }
+}
+```
+
+- `"extends": ["//"]` inherits all root task definitions; only add the extra `dependsOn`
+- Keep root `turbo.json` clean — package-level overrides live with the package that owns the dependency
+- For packages with multiple tasks (watch, lint, test), mirror the dependency in each relevant task
+
+**Existing examples**:
+- `packages/slack/turbo.json` — `build`/`dev` depend on `@growi/logger`
+- `packages/remark-attachment-refs/turbo.json` — all tasks depend on `@growi/core`, `@growi/logger`, `@growi/remark-growi-directive`, `@growi/ui`
+- Root `turbo.json` — `@growi/ui#build` depends on `@growi/core#build` (pre-dates the per-package pattern)
+
 ## Architectural Principles
 
 ### 1. Feature-Based Architecture (Recommended)

+ 0 - 2
.github/dependabot.yml

@@ -24,8 +24,6 @@ updates:
       prefix: ci
       include: scope
     ignore:
-      - dependency-name: escape-string-regexp
-      - dependency-name: string-width
       - dependency-name: "@handsontable/react"
       - dependency-name: handsontable
       - dependency-name: typeorm

+ 0 - 2
.github/workflows/ci-app-prod.yml

@@ -9,7 +9,6 @@ on:
       - .github/mergify.yml
       - .github/workflows/ci-app-prod.yml
       - .github/workflows/reusable-app-prod.yml
-      - .github/workflows/reusable-app-reg-suit.yml
       - tsconfig.base.json
       - turbo.json
       - pnpm-lock.yaml
@@ -23,7 +22,6 @@ on:
       - .github/mergify.yml
       - .github/workflows/ci-app-prod.yml
       - .github/workflows/reusable-app-prod.yml
-      - .github/workflows/reusable-app-reg-suit.yml
       - tsconfig.base.json
       - pnpm-lock.yaml
       - turbo.json

+ 5 - 10
.github/workflows/release-subpackages.yml

@@ -14,6 +14,11 @@ on:
     branches:
       - master
 
+permissions:
+  id-token: write
+  contents: write
+  pull-requests: write
+
 concurrency:
   group: ${{ github.workflow }}-${{ github.ref }}
   cancel-in-progress: true
@@ -40,14 +45,6 @@ jobs:
         pnpm add turbo --global
         pnpm install --frozen-lockfile
 
-    - name: Setup .npmrc
-      run: |
-        cat << EOF > "$HOME/.npmrc"
-          //registry.npmjs.org/:_authToken=$NPM_TOKEN
-        EOF
-      env:
-        NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
-
     - name: Retrieve changesets information
       id: changesets-status
       run: |
@@ -61,7 +58,6 @@ jobs:
         pnpm run release-subpackages:snapshot
       env:
         GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
 
 
   release-subpackages:
@@ -92,4 +88,3 @@ jobs:
         publish: pnpm run release-subpackages
       env:
         GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        NPM_TOKEN: ${{ secrets.NPM_TOKEN }}

+ 3 - 3
.github/workflows/reusable-app-prod.yml

@@ -236,7 +236,7 @@ jobs:
         HOME: /root # ref: https://github.com/microsoft/playwright/issues/6500
         GROWI_WEBSERVER_COMMAND: 'cd /tmp/growi-prod/apps/app && pnpm run server'
         MONGO_URI: mongodb://mongodb:27017/growi-playwright-installer
-        ELASTICSEARCH_URI: http://localhost:${{ job.services.elasticsearch.ports['9200'] }}/growi
+        ELASTICSEARCH_URI: http://elasticsearch:9200/growi
 
     - name: Copy dotenv file for automatic installation
       run: |
@@ -251,7 +251,7 @@ jobs:
         HOME: /root # ref: https://github.com/microsoft/playwright/issues/6500
         GROWI_WEBSERVER_COMMAND: 'cd /tmp/growi-prod/apps/app && pnpm run server'
         MONGO_URI: mongodb://mongodb:27017/growi-playwright
-        ELASTICSEARCH_URI: http://localhost:${{ job.services.elasticsearch.ports['9200'] }}/growi
+        ELASTICSEARCH_URI: http://elasticsearch:9200/growi
 
     - name: Copy dotenv file for automatic installation with allowing guest mode
       run: |
@@ -266,7 +266,7 @@ jobs:
         HOME: /root # ref: https://github.com/microsoft/playwright/issues/6500
         GROWI_WEBSERVER_COMMAND: 'cd /tmp/growi-prod/apps/app && pnpm run server'
         MONGO_URI: mongodb://mongodb:27017/growi-playwright-guest-mode
-        ELASTICSEARCH_URI: http://localhost:${{ job.services.elasticsearch.ports['9200'] }}/growi
+        ELASTICSEARCH_URI: http://elasticsearch:9200/growi
 
     - name: Generate shard ID
       id: shard-id

+ 0 - 88
.github/workflows/reusable-app-reg-suit.yml

@@ -1,88 +0,0 @@
-name: Reusable VRT reporting workflow for production
-
-on:
-  workflow_call:
-    inputs:
-      node-version:
-        required: true
-        type: string
-      checkout-ref:
-        type: string
-        default: ${{ github.head_ref }}
-      skip-reg-suit:
-        type: boolean
-      cypress-report-artifact-name-pattern:
-        required: true
-        type: string
-    secrets:
-      REG_NOTIFY_GITHUB_PLUGIN_CLIENTID:
-        required: true
-      AWS_ACCESS_KEY_ID:
-        required: true
-      AWS_SECRET_ACCESS_KEY:
-        required: true
-      SLACK_WEBHOOK_URL:
-        required: true
-    outputs:
-      EXPECTED_IMAGES_EXIST:
-        value: ${{ jobs.run-reg-suit.outputs.EXPECTED_IMAGES_EXIST }}
-
-
-jobs:
-
-  run-reg-suit:
-    # use secrets for "VRT" environment
-    # https://github.com/growilabs/growi/settings/environments/376165508/edit
-    environment: VRT
-
-    if: ${{ !inputs.skip-reg-suit }}
-
-    env:
-      REG_NOTIFY_GITHUB_PLUGIN_CLIENTID: ${{ secrets.REG_NOTIFY_GITHUB_PLUGIN_CLIENTID }}
-      AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
-      AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-      SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
-
-    runs-on: ubuntu-latest
-
-    outputs:
-      EXPECTED_IMAGES_EXIST: ${{ steps.check-expected-images.outputs.EXPECTED_IMAGES_EXIST }}
-
-    steps:
-    - uses: actions/checkout@v4
-      with:
-        ref: ${{ inputs.checkout-ref }}
-        fetch-depth: 0
-
-    - uses: pnpm/action-setup@v4
-
-    - uses: actions/setup-node@v4
-      with:
-        node-version: ${{ inputs.node-version }}
-        cache: 'pnpm'
-
-    - name: Install dependencies
-      run: |
-        pnpm install --frozen-lockfile
-
-    - name: Download screenshots taken by cypress
-      uses: actions/download-artifact@v4
-      with:
-        path: apps/app/test/cypress
-        pattern: ${{ inputs.cypress-report-artifact-name-pattern }}
-        merge-multiple: true
-
-    - name: Run reg-suit
-      working-directory: ./apps/app
-      run: |
-        pnpm run reg:run
-
-    - name: Slack Notification
-      uses: weseek/ghaction-slack-notification@master
-      if: failure()
-      with:
-        type: ${{ job.status }}
-        job_name: '*Node CI for growi - run-reg-suit (${{ inputs.node-version }})*'
-        channel: '#ci'
-        isCompactMode: true
-        url: ${{ secrets.SLACK_WEBHOOK_URL }}

+ 466 - 0
.kiro/specs/auto-scroll/design.md

@@ -0,0 +1,466 @@
+# Design Document: auto-scroll
+
+## Overview
+
+**Purpose**: This feature provides a reusable hash-based auto-scroll mechanism that handles lazy-rendered content across GROWI's Markdown views. It compensates for layout shifts caused by asynchronous component rendering (e.g., Drawio diagrams, Mermaid charts, PlantUML images) by detecting in-progress renders and re-scrolling to the target.
+
+**Users**: End users navigating to hash-linked sections benefit from reliable scroll positioning. Developers integrating the hook into new views (PageView, SearchResultContent, future views) benefit from a standardized, configurable API.
+
+**Impact**: Refactors the existing `useHashAutoScroll` hook from a PageView-specific implementation into a shared, configurable hook. Renames and updates the rendering status attribute protocol for clarity and declarative usage. Also integrates hash-based auto-scroll into `SearchResultContent`, where the content pane has an independent scroll container.
+
+### Goals
+- Provide a single reusable hook for hash-based auto-scroll across all content views
+- Support customizable target resolution and scroll behavior per caller
+- Establish a clear, declarative rendering-status attribute protocol for async-rendering components
+- Maintain robust resource cleanup with timeout-based safety bounds
+- Integrate `SearchResultContent` as a second consumer with container-relative scroll strategy
+
+### Non-Goals
+- Adding `data-growi-is-content-rendering` to attachment-refs (Ref/Refs/RefImg/RefsImg/Gallery), or RichAttachment — these also cause layout shifts but require more complex integration; deferred to follow-up
+- Replacing SearchResultContent's keyword-highlight scroll with hash-based scroll (search pages have no URL hash)
+- Supporting non-browser environments (SSR) — this is a client-only hook
+
+## Architecture
+
+### Existing Architecture Analysis
+
+The current implementation lives in `apps/app/src/components/PageView/use-hash-auto-scroll.tsx`, tightly coupled to PageView via:
+- Hardcoded `document.getElementById(targetId)` for target resolution
+- Hardcoded `element.scrollIntoView()` for scroll execution
+- First parameter named `pageId` implying page-specific usage
+
+The rendering attribute `data-growi-rendering` is defined in `@growi/core` and consumed by:
+- `remark-drawio` (sets attribute on render start, removes on completion)
+- `use-hash-auto-scroll` (observes attribute presence via MutationObserver)
+
+### Architecture Pattern & Boundary Map
+
+> **Note**: This diagram reflects the final architecture after Task 8 module reorganization. See "Task 8 Design" section below for the migration details.
+
+```mermaid
+graph TB
+    subgraph growi_core[growi core]
+        CONST[Rendering Status Constants]
+    end
+
+    subgraph shared_util[src/client/util]
+        WATCH[watchRenderingAndReScroll]
+    end
+
+    subgraph page_view[src/components/PageView]
+        UHAS[useHashAutoScroll]
+        PV[PageView]
+    end
+
+    subgraph search[features/search/.../SearchPage]
+        UKR[useKeywordRescroll]
+        SRC[SearchResultContent]
+    end
+
+    subgraph renderers[Async Renderers]
+        DV[DrawioViewer]
+        MV[MermaidViewer]
+        PUV[PlantUmlViewer]
+        LSX[Lsx]
+    end
+
+    PV -->|calls| UHAS
+    UHAS -->|imports| WATCH
+    SRC -->|calls| UKR
+    UKR -->|imports| WATCH
+    WATCH -->|queries| CONST
+    DV -->|sets/toggles| CONST
+    MV -->|sets/toggles| CONST
+    PUV -->|sets/toggles| CONST
+    LSX -->|sets/toggles| CONST
+```
+
+**Architecture Integration**:
+- Selected pattern: Co-located hooks per consumer + shared utility function — idiomatic React, testable, minimal coupling
+- Domain boundaries: `watchRenderingAndReScroll` (shared pure function) in `src/client/util/`, consumer-specific hooks co-located with their components, constants in `@growi/core`, attribute lifecycle in each renderer package
+- Existing patterns preserved: MutationObserver + polling hybrid, timeout-based safety bounds
+- Steering compliance: Named exports, immutable patterns, co-located tests
+
+**Co-location rationale**: `watchRenderingAndReScroll` lives in `src/client/util/` (not `hooks/`) because it is a plain function, not a React hook — co-located with `smooth-scroll.ts` as both are DOM scroll utilities. `useHashAutoScroll` lives next to `PageView.tsx` because it is hash-navigation–specific (`window.location.hash`) and PageView is its only consumer. `useKeywordRescroll` lives next to `SearchResultContent.tsx` for the same reason. The old `src/client/hooks/use-content-auto-scroll/` shared directory was removed because the hook was never truly shared — only the underlying utility function was.
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Frontend | React 18 hooks (`useEffect`) | Hook lifecycle management | No new dependencies |
+| Browser API | MutationObserver, `setTimeout`, `requestAnimationFrame` | DOM observation, polling, and layout timing | Standard Web APIs |
+| Shared Constants | `@growi/core` | Rendering attribute definitions | Existing package |
+
+No new external dependencies are introduced.
+
+## System Flows
+
+### Auto-Scroll Lifecycle
+
+```mermaid
+sequenceDiagram
+    participant Caller as Content View (PageView)
+    participant Hook as useHashAutoScroll
+    participant DOM as DOM
+    participant Watch as watchRenderingAndReScroll
+
+    Caller->>Hook: useHashAutoScroll options
+    Hook->>Hook: Guard checks key, hash, container
+
+    alt Target exists in DOM
+        Hook->>DOM: resolveTarget
+        DOM-->>Hook: HTMLElement
+        Hook->>DOM: scrollTo target
+        Hook->>Watch: start rendering watch (always)
+    else Target not yet in DOM
+        Hook->>DOM: MutationObserver on container
+        DOM-->>Hook: target appears
+        Hook->>DOM: scrollTo target
+        Hook->>Watch: start rendering watch (always)
+    end
+
+    Note over Watch: MutationObserver detects rendering elements,<br/>including those that mount after the initial scroll
+
+    loop While rendering elements exist and within timeout
+        Watch->>DOM: query rendering-status attr
+        DOM-->>Watch: elements found
+        Watch-->>Watch: wait 5s
+        Watch->>DOM: scrollTo target
+    end
+
+    Note over Watch: Auto-cleanup after 10s timeout
+```
+
+Key decisions:
+- The two-phase approach (target observation → rendering watch) runs sequentially.
+- The rendering watch uses a non-resetting timer to prevent starvation from rapid DOM mutations.
+- **The rendering watch always starts after the initial scroll**, regardless of whether rendering elements exist at that moment. This is necessary because async renderers (Mermaid loaded via `dynamic()`, PlantUML images) may mount into the DOM *after* the hook's effect runs. The MutationObserver inside `watchRenderingAndReScroll` (`childList: true, subtree: true`) detects these late-mounting elements.
+
+## Requirements Traceability
+
+| Requirement | Summary | Components | Interfaces | Flows |
+|-------------|---------|------------|------------|-------|
+| 1.1, 1.2 | Immediate scroll to hash target | useHashAutoScroll | UseHashAutoScrollOptions.resolveTarget | Auto-Scroll Lifecycle |
+| 1.3, 1.4, 1.5 | Guard conditions | useHashAutoScroll | UseHashAutoScrollOptions.key, contentContainerId | — |
+| 2.1, 2.2, 2.3 | Deferred scroll for lazy targets | useHashAutoScroll (target observer) | — | Auto-Scroll Lifecycle |
+| 3.1–3.6 | Re-scroll after rendering | watchRenderingAndReScroll | scrollToTarget callback | Auto-Scroll Lifecycle |
+| 4.1–4.7 | Rendering attribute protocol | Rendering Status Constants, DrawioViewer, MermaidViewer, PlantUmlViewer, Lsx | GROWI_IS_CONTENT_RENDERING_ATTR | — |
+| 4.8 | ResizeObserver re-render cycle | DrawioViewer | GROWI_IS_CONTENT_RENDERING_ATTR | — |
+| 5.1–5.5 | Page-type agnostic design | watchRenderingAndReScroll (shared), useHashAutoScroll (PageView), useKeywordRescroll (Search) | — | — |
+| 5.6, 5.7, 6.1–6.3 | Cleanup and safety | useHashAutoScroll, useKeywordRescroll, watchRenderingAndReScroll | cleanup functions | — |
+
+## Components and Interfaces
+
+| Component | Domain/Layer | Intent | Req Coverage | Key Dependencies | Contracts |
+|-----------|--------------|--------|--------------|------------------|-----------|
+| useHashAutoScroll | src/components/PageView | Hash-based auto-scroll hook for PageView with configurable target resolution and scroll behavior | 1, 2, 5, 6 | watchRenderingAndReScroll (P0), Rendering Status Constants (P1) | Service |
+| useKeywordRescroll | features/search/.../SearchPage | Keyword-highlight scroll hook with rendering watch integration for SearchResultContent | 5, 6 | watchRenderingAndReScroll (P0), scrollWithinContainer (P0) | Service |
+| watchRenderingAndReScroll | src/client/util | Shared utility: polls for rendering-status attributes and re-scrolls until complete or timeout | 3, 6 | Rendering Status Constants (P0) | Service |
+| Rendering Status Constants | @growi/core | Shared attribute name, value, and selector constants | 4 | None | State |
+| DrawioViewer (modification) | remark-drawio | Declarative rendering-status attribute toggle | 4.3, 4.4, 4.8 | Rendering Status Constants (P0) | State |
+| MermaidViewer (modification) | features/mermaid | Add rendering-status attribute lifecycle to async SVG render | 4.3, 4.4, 4.7 | Rendering Status Constants (P0) | State |
+| PlantUmlViewer (new) | features/plantuml | Wrap PlantUML `<img>` to provide rendering-status attribute lifecycle | 4.3, 4.4, 4.7 | Rendering Status Constants (P0) | State |
+| Lsx (modification) | remark-lsx | Add rendering-status attribute lifecycle to async page list fetch | 4.3, 4.4, 4.7 | Rendering Status Constants (P0) | State |
+
+### Client Hooks
+
+#### useHashAutoScroll
+
+| Field | Detail |
+|-------|--------|
+| Intent | Hash-based auto-scroll hook for PageView that scrolls to a target element identified by URL hash, with support for lazy-rendered content and customizable scroll behavior |
+| Requirements | 1.1–1.5, 2.1–2.3, 5.1–5.7, 6.1–6.3 |
+
+**Responsibilities & Constraints**
+- Orchestrates the full hash-based auto-scroll lifecycle: guard → resolve target → scroll → watch rendering
+- Always delegates to `watchRenderingAndReScroll` after the initial scroll — does **not** skip the watch even when no rendering elements are present at scroll time, because async renderers may mount later
+- Co-located with `PageView.tsx` — this hook is hash-navigation–specific (`window.location.hash`)
+
+**Dependencies**
+- Outbound: `watchRenderingAndReScroll` from `~/client/util/watch-rendering-and-rescroll` (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+/** Configuration for the hash-based auto-scroll hook */
+interface UseHashAutoScrollOptions {
+  /**
+   * Unique key that triggers re-execution when changed.
+   * When null/undefined, all scroll processing is skipped.
+   */
+  key: string | undefined | null;
+
+  /** DOM id of the content container element to observe */
+  contentContainerId: string;
+
+  /**
+   * Optional function to resolve the scroll target element.
+   * Receives the decoded hash string (without '#').
+   * Defaults to: (hash) => document.getElementById(hash)
+   */
+  resolveTarget?: (decodedHash: string) => HTMLElement | null;
+
+  /**
+   * Optional function to scroll to the target element.
+   * Defaults to: (el) => el.scrollIntoView()
+   */
+  scrollTo?: (target: HTMLElement) => void;
+}
+
+/** Hook signature */
+function useHashAutoScroll(options: UseHashAutoScrollOptions): void;
+```
+
+- Preconditions: Called within a React component; browser environment with `window.location.hash` available
+- Postconditions: On unmount or key change, all observers and timers are cleaned up
+- Invariants: At most one target observer and one rendering watch active per hook instance
+
+**Implementation Notes**
+- File location: `apps/app/src/components/PageView/use-hash-auto-scroll.ts`
+- Test file: `apps/app/src/components/PageView/use-hash-auto-scroll.spec.tsx`
+- The `resolveTarget` and `scrollTo` callbacks should be wrapped in `useRef` to avoid re-triggering the effect when callback identity changes
+
+---
+
+#### useKeywordRescroll
+
+| Field | Detail |
+|-------|--------|
+| Intent | Keyword-highlight scroll hook for SearchResultContent that scrolls to the first `.highlighted-keyword` element and re-scrolls after async renderers settle |
+| Requirements | 5.1–5.7, 6.1–6.3 |
+
+**Responsibilities & Constraints**
+- MutationObserver on container for keyword highlight detection (debounced 500ms)
+- `watchRenderingAndReScroll` integration for async renderer layout shift compensation
+- Cleanup of both MO and rendering watch on key change or unmount
+- Co-located with `SearchResultContent.tsx`
+
+**Dependencies**
+- Outbound: `watchRenderingAndReScroll` from `~/client/util/watch-rendering-and-rescroll` (P0)
+- Outbound: `scrollWithinContainer` from `~/client/util/smooth-scroll` (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+interface UseKeywordRescrollOptions {
+  /** Ref to the scrollable container element */
+  scrollElementRef: RefObject<HTMLElement | null>;
+  /** Unique key that triggers re-execution (typically page._id) */
+  key: string;
+}
+
+function useKeywordRescroll(options: UseKeywordRescrollOptions): void;
+```
+
+- Preconditions: `scrollElementRef.current` is a mounted scroll container
+- Postconditions: On unmount or key change, MO disconnected, rendering watch cleaned up, debounce cancelled
+
+**Implementation Notes**
+- File location: `apps/app/src/features/search/client/components/SearchPage/use-keyword-rescroll.ts`
+- Test file: `apps/app/src/features/search/client/components/SearchPage/use-keyword-rescroll.spec.tsx`
+- Helper functions (`scrollToKeyword`, `scrollToTargetWithinContainer`) are defined in the hook file since only this hook uses them
+
+---
+
+#### watchRenderingAndReScroll
+
+| Field | Detail |
+|-------|--------|
+| Intent | Pure function (not a hook) that monitors rendering-status attributes and periodically re-scrolls until rendering completes or timeout. Shared utility consumed by both `useHashAutoScroll` and `useKeywordRescroll`. |
+| Requirements | 3.1–3.6, 6.1–6.3 |
+
+**Responsibilities & Constraints**
+- Sets up MutationObserver to detect rendering-status attribute changes **and** new rendering elements added to the DOM (childList + subtree)
+- Manages a non-resetting poll timer (5s interval)
+- Enforces a hard timeout (10s) to prevent unbounded observation
+- Returns a cleanup function
+
+**Dependencies**
+- External: `@growi/core` rendering status constants — attribute selector (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+/**
+ * Watches for elements with in-progress rendering status in the container.
+ * Periodically calls scrollToTarget while rendering elements remain.
+ * Returns a cleanup function that stops observation and clears timers.
+ */
+function watchRenderingAndReScroll(
+  contentContainer: HTMLElement,
+  scrollToTarget: () => boolean,
+): () => void;
+```
+
+- Preconditions: `contentContainer` is a mounted DOM element
+- Postconditions: Cleanup function disconnects observer, clears all timers
+- Invariants: At most one poll timer active at any time; stopped flag prevents post-cleanup execution
+
+**Implementation Notes**
+- File location: `apps/app/src/client/util/watch-rendering-and-rescroll.ts` (co-located with `smooth-scroll.ts`)
+- Test file: `apps/app/src/client/util/watch-rendering-and-rescroll.spec.tsx`
+- Add a `stopped` boolean flag checked inside timer callbacks to prevent race conditions between cleanup and queued timer execution
+- When `checkAndSchedule` detects that no rendering elements remain and a timer is currently active, cancel the active timer immediately — avoids a redundant re-scroll after rendering has already completed
+- The MutationObserver watches `childList`, `subtree`, and `attributes` (filtered to the rendering-status attribute) — the `childList` + `subtree` combination is what detects late-mounting async renderers
+- **Performance trade-off**: The function is always started regardless of whether rendering elements exist at call time. This means one MutationObserver + one 10s cleanup timeout run for every hash navigation, even on pages with no async renderers. The initial `checkAndSchedule()` call returns early if no rendering elements are present, so no poll timer is ever scheduled in that case — the only cost is the MO observation and the 10s cleanup timeout itself, which is acceptable.
+- **`querySelector` frequency**: The `checkAndSchedule` callback fires on every `childList` mutation (in addition to attribute changes). Each invocation runs `querySelector(GROWI_IS_CONTENT_RENDERING_SELECTOR)` on the container. This call is O(n) on the subtree but stops at the first match and is bounded by the 10s timeout, making it acceptable even for content-heavy pages.
+
+---
+
+### @growi/core Constants
+
+#### Rendering Status Constants
+
+| Field | Detail |
+|-------|--------|
+| Intent | Centralized constants for the rendering-status attribute name, values, and CSS selector |
+| Requirements | 4.1, 4.2, 4.6 |
+
+**Contracts**: State [x]
+
+##### State Management
+
+```typescript
+/** Attribute name applied to elements during async content rendering */
+const GROWI_IS_CONTENT_RENDERING_ATTR = 'data-growi-is-content-rendering' as const;
+
+/**
+ * CSS selector matching elements currently rendering.
+ * Matches only the "true" state, not completed ("false").
+ */
+const GROWI_IS_CONTENT_RENDERING_SELECTOR =
+  `[${GROWI_IS_CONTENT_RENDERING_ATTR}="true"]` as const;
+```
+
+- File location: `packages/core/src/consts/renderer.ts` (replaces existing constants)
+- Old constants (`GROWI_RENDERING_ATTR`, `GROWI_RENDERING_ATTR_SELECTOR`) are removed and replaced — no backward compatibility shim needed since all consumers are updated in the same change
+
+---
+
+### remark-drawio Modifications
+
+#### DrawioViewer (modification)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Adopt declarative attribute value toggling instead of imperative add/remove |
+| Requirements | 4.3, 4.4, 4.8 |
+
+**Implementation Notes**
+- Replace `removeAttribute(GROWI_RENDERING_ATTR)` calls with `setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'false')`
+- Initial JSX: `{[GROWI_IS_CONTENT_RENDERING_ATTR]: 'true'}` (unchanged pattern, new constant name)
+- Update `SUPPORTED_ATTRIBUTES` in `remark-drawio.ts` to use new constant name
+- Update sanitize option to allow the new attribute name
+- **ResizeObserver re-render cycle** (req 4.8): In the ResizeObserver handler, call `drawioContainerRef.current?.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true')` before `renderDrawioWithDebounce()`. The existing inner MutationObserver (childList) completion path already sets the attribute back to `"false"` after each render.
+
+---
+
+### MermaidViewer Modification
+
+#### MermaidViewer (modification)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Add rendering-status attribute lifecycle to async `mermaid.render()` SVG rendering |
+| Requirements | 4.3, 4.4, 4.7 |
+
+**Implementation Notes**
+- Set `data-growi-is-content-rendering="true"` on the container element at initial render (via JSX spread before `mermaid.render()` is called)
+- After `mermaid.render()` completes and SVG is injected via `innerHTML`, delay the `"false"` signal using **`requestAnimationFrame`** so that the browser can compute the SVG layout before the auto-scroll system re-scrolls. Setting `"false"` synchronously after `innerHTML` assignment would signal completion before the browser has determined the element's final dimensions.
+- Set attribute to `"false"` immediately (without rAF) in the error/catch path, since no layout shift is expected on error
+- Cancel the pending rAF on effect cleanup to prevent state updates on unmounted components
+- File: `apps/app/src/features/mermaid/components/MermaidViewer.tsx`
+- The mermaid remark plugin sanitize options must be updated to include the new attribute name
+
+---
+
+### PlantUmlViewer (new component)
+
+#### PlantUmlViewer
+
+| Field | Detail |
+|-------|--------|
+| Intent | Wrap PlantUML image rendering in a component that signals rendering status, enabling the auto-scroll system to compensate for the layout shift when the external image loads |
+| Requirements | 4.3, 4.4, 4.7 |
+
+**Background**: PlantUML diagrams are rendered as `<img>` tags pointing to an external PlantUML server. The image load is asynchronous and causes a layout shift. The previous implementation had no `data-growi-is-content-rendering` support, so layout shifts from PlantUML images were never compensated.
+
+**Implementation Notes**
+- New component at `apps/app/src/features/plantuml/components/PlantUmlViewer.tsx`
+- Wraps `<img>` in a `<div>` container with `data-growi-is-content-rendering="true"` initially
+- Sets attribute to `"false"` via `onLoad` and `onError` handlers on the `<img>` element
+- The plantuml remark plugin (`plantuml.ts`) is updated to output a custom `<plantuml src="...">` HAST element instead of a plain `<img>`. This allows the renderer to map the `plantuml` element to the `PlantUmlViewer` React component.
+- `sanitizeOption` is exported from the plantuml service and merged in `renderer.tsx` (same pattern as drawio and mermaid)
+- `PlantUmlViewer` is registered as `components.plantuml` in all view option generators (`generateViewOptions`, `generateSimpleViewOptions`, `generatePreviewOptions`)
+
+---
+
+### remark-lsx Modification
+
+#### Lsx (modification)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Add rendering-status attribute lifecycle to async SWR page list fetching |
+| Requirements | 4.3, 4.4, 4.7 |
+
+**Implementation Notes**
+- Set `data-growi-is-content-rendering="true"` on the outermost container element while `isLoading === true` (SWR fetch in progress)
+- Set attribute to `"false"` when data arrives — whether success, error, or empty result
+- Use declarative attribute binding via the existing `isLoading` state (no imperative DOM manipulation needed)
+- File: `packages/remark-lsx/src/client/components/Lsx.tsx`
+- The lsx remark plugin sanitize options must be updated to include the new attribute name
+- `@growi/core` must be added as a dependency of `remark-lsx` (same pattern as `remark-drawio`)
+- **SWR cache hit behavior**: When SWR returns a cached result immediately (`isLoading=false` on first render), the attribute starts at `"false"` and no re-scroll is triggered. This is correct: a cached result means the list renders without a layout shift, so no compensation is needed. The re-scroll mechanism only activates when `isLoading` starts as `"true"` (no cache) and transitions to `"false"` after the fetch completes.
+
+---
+
+### SearchResultContent Integration
+
+#### SearchResultContent (modification)
+
+| Field | Detail |
+|-------|--------|
+| Intent | Integrate rendering-watch into SearchResultContent's keyword scroll so that layout shifts from async renderers are compensated |
+| Requirements | 5.1, 5.4, 5.5, 6.1 |
+
+**Background**: `SearchResultContent` renders page content inside a div with `overflow-y-scroll` (`#search-result-content-body-container`). The keyword-highlight scroll mechanism was originally inlined as a `useEffect` with no dependency array and no cleanup.
+
+**Post-Implementation Correction**: The initial design (tasks 6.1–6.3) attempted to integrate `useContentAutoScroll` (hash-based) into SearchResultContent. This was architecturally incorrect — search pages use `/search?q=foo` with no URL hash, so the hash-driven hook would never activate. See `research.md` "Post-Implementation Finding" for details.
+
+**Final Architecture**: The keyword scroll effect was extracted into a dedicated `useKeywordRescroll` hook (co-located with SearchResultContent), which directly integrates `watchRenderingAndReScroll` for rendering compensation. No hash-based scroll is used in SearchResultContent.
+
+**Hook Call Site**
+
+```typescript
+useKeywordRescroll({ scrollElementRef, key: page._id });
+```
+
+- `scrollElementRef` is the existing React ref pointing to the scroll container
+- `key: page._id` triggers re-execution when the selected page changes
+- The hook internally handles MutationObserver setup, debounced keyword scroll, rendering watch, and full cleanup
+
+**File**: `apps/app/src/features/search/client/components/SearchPage/SearchResultContent.tsx`
+
+---
+
+## Error Handling
+
+### Error Strategy
+
+This feature operates entirely in the browser DOM layer with no server interaction. Errors are limited to DOM state mismatches.
+
+### Error Categories and Responses
+
+**Target Not Found** (2.3): If the hash target never appears within 10s, the observer disconnects silently. No error is surfaced to the user — this matches browser-native behavior for invalid hash links.
+
+**Container Not Found** (1.5): If the container element ID does not resolve, the hook returns immediately with no side effects.
+
+**Rendering Watch Timeout** (3.6): After 10s, all observers and timers are cleaned up regardless of remaining rendering elements. This prevents resource leaks from components that fail to signal completion.
+

+ 91 - 0
.kiro/specs/auto-scroll/requirements.md

@@ -0,0 +1,91 @@
+# Requirements Document
+
+## Introduction
+
+This specification defines the behavior of the **hash-based auto-scroll** mechanism used across GROWI's content pages. When a user navigates to a URL containing a fragment hash (e.g., `#section-title`), the system scrolls to the corresponding element in the rendered content. Because GROWI pages contain lazily-rendered elements (Drawio diagrams, Mermaid charts, etc.) that cause layout shifts after initial paint, the system must detect in-progress renders and re-scroll to compensate.
+
+This hook is designed to be **page-type agnostic** — it must work in any view that renders Markdown content with a hash-addressable container (PageView, search result previews, etc.).
+
+## Review Feedback (from yuki-takei, PR #10853)
+
+The following reviewer feedback is incorporated into these requirements:
+
+1. **Rendering attribute value**: Use declarative `true`/`false` toggling instead of `setAttribute`/`removeAttribute` — the attribute should always be present with a boolean-like value, not added/removed.
+2. **Attribute naming**: The attribute name should more clearly convey "rendering in progress" status. The name will be finalized in the design phase but must be more descriptive than `data-growi-rendering`.
+3. **Hook generalization**: Move to `src/client/hooks/` for shared use; accept a target-resolving closure instead of hardcoded `getElementById`; support customizable scroll behavior (e.g., `scrollIntoView` for PageView vs. a different method for SearchResultContent); rename the hook accordingly.
+
+## Requirements
+
+### Requirement 1: Immediate Scroll to Hash Target
+
+**Objective:** As a user, I want to be scrolled to the section referenced by the URL hash when I open a page, so that I can directly access the content I was linked to.
+
+#### Acceptance Criteria
+
+1. When the page loads with a URL hash and the target element already exists in the DOM, the hook shall scroll the target element into view immediately.
+2. When the page loads with a URL hash containing encoded characters (e.g., `%E6%97%A5%E6%9C%AC%E8%AA%9E`), the hook shall decode the hash and locate the corresponding element by its `id` attribute.
+3. If the key parameter is null or undefined, the hook shall skip all scroll processing.
+4. If the URL hash is empty, the hook shall skip all scroll processing.
+5. If the content container element is not found in the DOM, the hook shall skip all scroll processing.
+
+### Requirement 2: Deferred Scroll for Lazy-Rendered Targets
+
+**Objective:** As a user, I want the page to scroll to my target section even when the content is rendered after initial page load, so that dynamically rendered headings are still reachable via URL hash.
+
+#### Acceptance Criteria
+
+1. When the page loads with a URL hash and the target element does not yet exist in the DOM, the hook shall observe the content container for DOM mutations until the target appears.
+2. When the target element appears in the DOM during observation, the hook shall immediately scroll it into view.
+3. If the target element does not appear within the watch timeout period (default: 10 seconds), the hook shall stop observing and give up without error.
+
+### Requirement 3: Re-Scroll After Rendering Completion
+
+**Objective:** As a user, I want the view to re-adjust after lazy-rendered content (e.g., Drawio diagrams) finishes rendering, so that layout shifts do not push my target section out of view.
+
+#### Acceptance Criteria
+
+1. When an initial scroll completes and elements whose rendering-status attribute indicates "in progress" exist in the content container, the hook shall schedule a re-scroll after a poll interval (default: 5 seconds).
+2. While elements with in-progress rendering status remain in the container after a re-scroll, the hook shall repeat the poll-and-re-scroll cycle.
+3. When no elements with in-progress rendering status remain after a re-scroll check, the hook shall stop re-scrolling.
+4. When new elements with in-progress rendering status appear in the container (detected via MutationObserver), the hook shall schedule a re-scroll if one is not already pending.
+5. The hook shall not reset a running poll timer when additional DOM mutations occur — only schedule a new timer when no timer is active.
+6. The rendering watch shall automatically terminate after the watch timeout period (default: 10 seconds) regardless of remaining rendering elements.
+
+### Requirement 4: Rendering Status Attribute Protocol
+
+**Objective:** As a developer, I want a standardized attribute for components to signal their rendering status declaratively, so that the auto-scroll system can detect layout-shifting content generically.
+
+#### Acceptance Criteria
+
+1. The attribute name and its CSS selector for the "in progress" state shall be defined as shared constants in `@growi/core`.
+2. The attribute name shall clearly convey that rendering is in progress (e.g., more descriptive than a generic `data-growi-rendering`). The final name will be determined in the design phase.
+3. When a component begins rendering content that will change its dimensions (e.g., Drawio diagram initialization), the component shall set the attribute value to indicate "in progress" (e.g., `"true"`).
+4. When the component finishes rendering or encounters an error, the component shall set the attribute value to indicate "completed" (e.g., `"false"`) rather than removing the attribute entirely — the attribute lifecycle shall be declarative (value toggle), not imperative (add/remove).
+5. The attribute shall be included in the component's HTML sanitization allowlist so that it survives remark/rehype processing.
+6. The CSS selector used by the auto-scroll system shall match only the "in progress" state (e.g., `[attr="true"]`), not the completed state.
+7. The following async-rendering components shall adopt the attribute protocol in this scope: DrawioViewer, MermaidViewer, PlantUmlViewer (new wrapper component), and lsx (Lsx). Other async renderers (attachment-refs, RichAttachment) are deferred to follow-up work.
+8. When a component triggers a secondary re-render that will cause a layout shift (e.g., via ResizeObserver detecting container size changes after initial render), the component shall reset the attribute value to `"true"` before the re-render begins and allow the existing completion path to set it back to `"false"` when done. This ensures the auto-scroll system tracks all layout-shifting render cycles, not only the initial one.
+
+### Requirement 5: Page-Type Agnostic Design
+
+**Objective:** As a developer, I want the auto-scroll hook to be reusable across different page types (wiki pages, search results, etc.), so that hash-based scrolling behaves consistently throughout the application.
+
+#### Acceptance Criteria
+
+1. The hook shall accept a generic key parameter (not limited to page IDs) and a content container element ID as its inputs.
+2. The hook shall accept an optional target-resolving function (closure) that returns the target `HTMLElement | null`. When not provided, the hook shall default to resolving the target via `document.getElementById` using the decoded hash.
+3. The hook shall accept an optional scroll function that defines how to scroll to the target element. When not provided, the hook shall default to `element.scrollIntoView()`. This allows callers (e.g., SearchResultContent) to supply a custom scroll strategy.
+4. The hook shall not import or depend on any page-specific state (Jotai atoms, SWR hooks, or page models).
+5. The shared rendering-watch utility (`watchRenderingAndReScroll`) shall be located in a shared directory (e.g., `src/client/util/`). Each consumer-specific hook shall be co-located with its consumer component and named to reflect its purpose (e.g., hash-based scroll for PageView, keyword-based re-scroll for SearchResultContent).
+6. When the key parameter changes, the hook shall clean up any active observers and timers from the previous run and re-execute the scroll logic.
+7. When the component using the hook unmounts, the hook shall clean up all MutationObservers, timers, and rendering watch resources.
+
+### Requirement 6: Resource Cleanup and Safety
+
+**Objective:** As a developer, I want the hook to be safe against memory leaks and runaway timers, so that it can be used in any component lifecycle without side effects.
+
+#### Acceptance Criteria
+
+1. When the hook's effect cleanup runs, the hook shall disconnect all MutationObservers, clear all pending timers, and invoke any rendering watch cleanup functions.
+2. The hook shall enforce a maximum watch duration (default: 10 seconds) for both target observation and rendering watch, preventing indefinite resource consumption.
+3. While multiple elements with the rendering-status attribute (in-progress state) exist simultaneously, the hook shall execute only one re-scroll (not one per element).

+ 246 - 0
.kiro/specs/auto-scroll/research.md

@@ -0,0 +1,246 @@
+# Research & Design Decisions
+
+## Summary
+- **Feature**: `auto-scroll`
+- **Discovery Scope**: Extension (refactoring existing hook for reusability)
+- **Key Findings**:
+  - `src/client/hooks/` does not exist; hooks are collocated with features — a new shared hooks directory is needed
+  - SearchResultContent has independent scroll-to-highlighted-keyword logic using MutationObserver; coordination needed
+  - MermaidViewer does not implement the rendering attribute protocol; DrawioViewer is the only adopter
+
+## Research Log
+
+### Hook Location and Existing Patterns
+- **Context**: Requirement 5.5 specifies placing the hook in `src/client/hooks/`
+- **Findings**:
+  - `apps/app/src/client/hooks/` does not exist
+  - Existing hooks are collocated: `features/page-tree/hooks/`, `features/openai/client/components/.../hooks/`
+  - No precedent for a top-level shared hooks directory in `src/client/`
+- **Implications**: Creating `src/client/hooks/` establishes a new pattern for cross-feature hooks
+
+### SearchResultContent Scroll Behavior
+- **Context**: Requirement 5 mandates reusability for search result pages
+- **Sources**: `apps/app/src/features/search/client/components/SearchPage/SearchResultContent.tsx`
+- **Findings**:
+  - Container ID: `search-result-content-body-container`
+  - Container has `overflow-y-scroll` — is the scroll unit, not the viewport
+  - Uses MutationObserver to find `.highlighted-keyword` elements and scroll to the first one using `scrollWithinContainer`
+  - Debounced at 500ms; `SCROLL_OFFSET_TOP = 30`
+  - Does NOT use URL hash — scrolls to highlighted search terms
+  - `useEffect` has no dependency array (fires on every render); no cleanup (intentional per inline comment)
+- **Implications (updated)**:
+  - `scrollIntoView()` default is inappropriate; custom `scrollTo` using `scrollWithinContainer` is required
+  - When `window.location.hash` is non-empty, the keyword scroll overrides hash scroll after 500ms debounce — must be suppressed via early return guard
+  - The `resolveTarget` default (`document.getElementById`) works correctly; heading `id` attributes are set by the remark pipeline
+
+### DrawioViewer Rendering Attribute Pattern
+- **Context**: Requirement 4.4 mandates declarative true/false toggling
+- **Sources**: `packages/remark-drawio/src/components/DrawioViewer.tsx`
+- **Findings**:
+  - Initial render: `{[GROWI_RENDERING_ATTR]: 'true'}` in JSX spread (line 188)
+  - On error: `removeAttribute(GROWI_RENDERING_ATTR)` (line 131)
+  - On complete: `removeAttribute(GROWI_RENDERING_ATTR)` (line 148)
+  - This is imperative add/remove, not declarative value toggle
+- **Implications**: Needs refactoring to `setAttribute(attr, 'false')` on completion/error instead of `removeAttribute`
+
+### MermaidViewer Status
+- **Context**: Could benefit from rendering attribute protocol
+- **Sources**: `apps/app/src/features/mermaid/components/MermaidViewer.tsx`
+- **Findings**:
+  - Does NOT use `GROWI_RENDERING_ATTR`
+  - Uses `mermaid.render()` async with direct `innerHTML` assignment
+  - Mermaid sanitize options only allow `value` attribute
+- **Implications**: Adding Mermaid support is a separate task, not in scope for this spec, but the design should be compatible
+
+### Rendering Attribute Naming
+- **Context**: Reviewer feedback requests a more descriptive name
+- **Findings**:
+  - Current: `data-growi-rendering` — ambiguous (rendering what?)
+  - Candidates considered:
+    - `data-growi-is-rendering-in-progress` — explicit but verbose
+    - `data-growi-rendering-status` — implies multiple states
+    - `data-growi-content-rendering` — slightly more specific
+  - With declarative true/false, a boolean-style name like `data-growi-is-content-rendering` works well
+- **Implications**: Selected `data-growi-is-content-rendering` — clearly a boolean predicate, reads naturally as `is-content-rendering="true"/"false"`
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| Custom hook with options object | Single hook with configurable resolveTarget and scrollTo callbacks | Clean API, single import, testable | Options object may grow over time | Selected approach |
+| Separate hooks per page type | usePageHashScroll, useSearchScroll | Type-specific optimization | Duplicated watch/cleanup logic | Rejected — violates DRY |
+| HOC wrapper | Higher-order component wrapping scroll behavior | Framework-agnostic | Harder to compose, less idiomatic React | Rejected — hooks are idiomatic |
+
+## Design Decisions
+
+### Decision: Hook API Shape
+- **Context**: Hook must support PageView (hash-based) and SearchResultContent (keyword-based) with different scroll strategies
+- **Alternatives Considered**:
+  1. Positional parameters — `useAutoScroll(key, containerId, resolveTarget?, scrollFn?)`
+  2. Options object — `useAutoScroll(options)`
+- **Selected Approach**: Options object with required `key` and `contentContainerId`, optional `resolveTarget` and `scrollTo`
+- **Rationale**: Options object is extensible without breaking existing call sites and self-documents parameter intent
+- **Trade-offs**: Slightly more verbose at call site; mitigated by clear defaults
+
+### Decision: Attribute Name
+- **Context**: Reviewer feedback: name should clearly convey "rendering in progress"
+- **Selected Approach**: `data-growi-is-content-rendering` with values `"true"` / `"false"`
+- **Rationale**: Boolean predicate naming (`is-*`) is natural for a two-state attribute; `content-rendering` disambiguates from other rendering concepts
+- **Follow-up**: Update `@growi/core` constant and all consumers
+
+### Decision: CSS Selector for In-Progress State
+- **Context**: Requirement 4.6 — selector must match only in-progress state
+- **Selected Approach**: `[data-growi-is-content-rendering="true"]` instead of bare attribute selector
+- **Rationale**: With declarative true/false toggling, bare `[attr]` matches both states; value selector is required
+
+## Risks & Mitigations
+- **Risk**: SearchResultContent's existing keyword-highlight scroll may conflict with hash-based scroll — **Mitigation**: Guard the keyword-scroll `useEffect` with `if (window.location.hash.length > 0) return;` so hash scroll takes priority when a hash is present; keyword scroll proceeds unchanged otherwise
+- **Risk**: `scrollIntoView()` default scrolls the viewport when SearchResultContent's container has `overflow-y-scroll` — **Mitigation**: Provide a custom `scrollTo` closure using `scrollWithinContainer` with offset from the container's bounding rect
+- **Risk**: Renaming the attribute requires coordinated changes across `@growi/core`, `remark-drawio`, and consuming apps — **Mitigation**: Constants are centralized; single constant rename propagates via imports
+- **Risk**: MutationObserver on `subtree: true` may be expensive on large pages — **Mitigation**: Retained 10s maximum watch timeout from current implementation
+
+## Post-Implementation Finding: SearchResultContent Integration Misalignment
+
+**Discovered after task 6 implementation** during code review conversation.
+
+### Problem
+
+The task 6 implementation added `useContentAutoScroll` to `SearchResultContent`, but this was architecturally incorrect. `useContentAutoScroll` is URL-hash–driven (`if (hash.length === 0) return`) and will never activate in the search results context — the search page URL (`/search?q=foo`) carries no fragment identifier.
+
+### Actual Requirement
+
+The real requirement for SearchResultContent is:
+1. **Keyword scroll** (already working): scroll to the first `.highlighted-keyword` element when content loads, via MutationObserver + 500ms debounce
+2. **Re-scroll after rendering** (missing): when drawio / mermaid diagrams render asynchronously after the initial keyword scroll, the layout shifts and the keyword moves out of view — `watchRenderingAndReScroll` should re-scroll to the keyword once rendering settles
+
+### Current Code State (as of this writing)
+
+`apps/app/src/features/search/client/components/SearchPage/SearchResultContent.tsx` contains:
+- `useContentAutoScroll(...)` call — **should be removed**
+- keyword scroll `useEffect` with hash guard (`if (window.location.hash.length > 0) return`) — the guard may also be removable depending on how the hook is refactored
+- `scrollToTargetWithinContainer` local helper (shared distance calculation) — **keep**
+
+### Proposed Refactoring Direction
+
+Two-phase refactor, designed for the next session:
+
+**Phase 1 — Immediate fix (SearchResultContent)**
+
+Wire `watchRenderingAndReScroll` directly into the keyword scroll `useEffect`:
+
+```typescript
+useEffect(() => {
+  const scrollElement = scrollElementRef.current;
+  if (scrollElement == null) return;
+
+  const scrollToKeyword = (): boolean => {
+    const toElem = scrollElement.querySelector('.highlighted-keyword') as HTMLElement | null;
+    if (toElem == null) return false;
+    scrollToTargetWithinContainer(toElem, scrollElement);
+    return true;
+  };
+
+  // MutationObserver for incremental content loading (debounced)
+  const observer = new MutationObserver(() => {
+    scrollToFirstHighlightedKeywordDebounced(scrollElement);
+  });
+  observer.observe(scrollElement, MUTATION_OBSERVER_CONFIG);
+
+  // Rendering watch: re-scroll after drawio/mermaid layout shifts
+  const cleanupWatch = watchRenderingAndReScroll(scrollElement, scrollToKeyword);
+  return cleanupWatch;
+}, [page._id]);
+```
+
+Remove the `useContentAutoScroll` import and call entirely.
+
+**Phase 2 — Architecture improvement (shared hook)**
+
+Reorganize the relationship between `useContentAutoScroll` and `watchRenderingAndReScroll`:
+
+- `watchRenderingAndReScroll` (pure function) is the core shared primitive — **promote it to a named export** so callers other than `useContentAutoScroll` can use it directly
+- Consider introducing a thin React wrapper hook `useRenderingRescroll(scrollToTarget, deps)` that manages the `useEffect` lifecycle for `watchRenderingAndReScroll`, making it composable
+- `useContentAutoScroll` becomes the **hash-navigation–specific** hook: hash guard → target resolution → initial scroll → delegates to `useRenderingRescroll`
+- `SearchResultContent` keyword scroll becomes: MO-debounce → initial scroll → delegates to `useRenderingRescroll`
+- PageView-specific logic (default `scrollIntoView`, `getElementById` resolver) stays in PageView or in `useContentAutoScroll`
+
+Resulting dependency graph:
+
+```
+useContentAutoScroll  ─┐
+                        ├── useRenderingRescroll ── watchRenderingAndReScroll
+SearchResultContent   ─┘
+```
+
+### Key Questions for Next Session Design
+
+1. Should `useRenderingRescroll` be a hook (managing `useEffect` internally) or should callers be responsible for calling it inside their own effect? A hook is more ergonomic; a plain function is more flexible.
+2. The current keyword-scroll `useEffect` has no dependency array (fires every render) and no cleanup — intentional per inline comment. Adding `[page._id]` deps and a cleanup changes this behavior. Is that safe?
+3. Should the hash guard on the keyword-scroll `useEffect` be removed once `useContentAutoScroll` is also removed from `SearchResultContent`?
+
+## Task 8 Analysis: useRenderingRescroll Hook Extraction
+
+### Investigation (2026-04-06)
+
+**Objective**: Determine whether extracting a shared `useRenderingRescroll` hook is architecturally beneficial after tasks 1–7 completion.
+
+**Method**: Code review of current implementations — `useContentAutoScroll` (108 lines), `watchRenderingAndReScroll` (85 lines), `SearchResultContent` keyword-scroll effect (lines 133–161).
+
+### Findings
+
+**1. Hook extraction is architecturally infeasible for `useContentAutoScroll`**
+
+`useContentAutoScroll` calls `watchRenderingAndReScroll` conditionally inside its `useEffect`:
+- On the immediate path: only after `scrollToTarget()` returns true (line 77)
+- On the deferred path: only after the MutationObserver detects the target element (line 91)
+
+React hooks cannot be called conditionally or inside callbacks. A `useRenderingRescroll` hook would need an "enabled" flag pattern, adding complexity without simplification.
+
+**2. Co-located cleanup in SearchResultContent prevents separation**
+
+The keyword-scroll `useEffect` in `SearchResultContent` (lines 135–160) combines:
+- MutationObserver for keyword highlight detection
+- `watchRenderingAndReScroll` for async renderer compensation
+- Single cleanup return that handles both
+
+Extracting the watch into a separate hook would split cleanup across two effects, making the lifecycle harder to reason about.
+
+**3. All three design questions from the original research are resolved**
+
+| Question | Resolution | How |
+|----------|------------|-----|
+| Hook vs. function | Plain function | Conditional call inside effect prevents hook usage |
+| `[page._id]` deps + cleanup safe? | Yes, safe | Implemented in task 7.1, working correctly |
+| Hash guard removal | Already done | Removed in task 7.1 alongside `useContentAutoScroll` removal |
+
+**4. Current architecture is already optimal**
+
+`watchRenderingAndReScroll` as a plain function returning a cleanup closure is the correct abstraction level:
+- Composable into any `useEffect` (conditional or unconditional)
+- No React runtime coupling (testable without `renderHook`)
+- Clean dependency graph with two independent consumers
+
+### Initial Recommendation (superseded)
+
+Initially recommended closing Task 8 without code changes. However, after discussion the scope was revised from "hook extraction" to "module reorganization" — see below.
+
+### Revised Direction: Module Reorganization (2026-04-06)
+
+**Context**: The user observed that while a shared `useRenderingRescroll` hook adds no value (confirmed by analysis above), the current file layout is inconsistent:
+
+1. `useContentAutoScroll` lives in `src/client/hooks/` (shared) but is PageView-specific (hash-dependent)
+2. `watchRenderingAndReScroll` lives next to that hook as if internal, but is the actual shared primitive
+3. SearchResultContent's scroll logic is inlined rather than extracted
+
+**Revised approach**:
+- Move `watchRenderingAndReScroll` to `src/client/util/` — co-located with `smooth-scroll.ts` (both are DOM scroll utilities)
+- Rename `useContentAutoScroll` → `useHashAutoScroll` and move next to `PageView.tsx`
+- Extract keyword-scroll effect from `SearchResultContent` into co-located `useKeywordRescroll` hook
+- Delete `src/client/hooks/use-content-auto-scroll/` directory
+
+**Rationale**: Module co-location over shared directory. Each hook lives next to its only consumer. Only the truly shared primitive (`watchRenderingAndReScroll`) stays in a shared directory — and it moves from `hooks/` to `util/` since it's a plain function, not a hook.
+
+## References
+- [MutationObserver API](https://developer.mozilla.org/en-US/docs/Web/API/MutationObserver) — core browser API used for DOM observation
+- [Element.scrollIntoView()](https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView) — default scroll behavior
+- PR #10853 reviewer feedback from yuki-takei — driving force for this refactoring

+ 24 - 0
.kiro/specs/auto-scroll/spec.json

@@ -0,0 +1,24 @@
+{
+  "feature_name": "auto-scroll",
+  "created_at": "2026-04-02T00:00:00.000Z",
+  "updated_at": "2026-04-07T12:00:00.000Z",
+  "cleanup_completed": true,
+  "language": "en",
+  "phase": "implementation-complete",
+  "approvals": {
+    "requirements": {
+      "generated": true,
+      "approved": true
+    },
+    "design": {
+      "generated": true,
+      "approved": true
+    },
+    "tasks": {
+      "generated": true,
+      "approved": true,
+      "notes": "All tasks (1–8) complete. Design updated to reflect final architecture after module reorganization."
+    }
+  },
+  "ready_for_implementation": true
+}

+ 154 - 0
.kiro/specs/auto-scroll/tasks.md

@@ -0,0 +1,154 @@
+# Implementation Plan
+
+- [x] 1. Update rendering status constants in @growi/core
+  - Rename the attribute constant from the current name to `data-growi-is-content-rendering` to clearly convey boolean rendering-in-progress semantics
+  - Update the CSS selector constant to match only the in-progress state (`="true"`) rather than bare attribute presence
+  - Remove the old constants — no backward-compatibility aliases since all consumers are updated in the same change
+  - _Requirements: 4.1, 4.2, 4.6_
+
+- [x] 2. Update remark-drawio for declarative rendering attribute protocol
+- [x] 2.1 (P) Adopt declarative value toggling in DrawioViewer component
+  - Change rendering-complete and error paths to set the attribute value to `"false"` instead of removing the attribute entirely
+  - Update the initial JSX spread to use the renamed constant while keeping `"true"` as the initial value
+  - Verify that the wrapper component (DrawioViewerWithEditButton) continues to function without changes
+  - In the ResizeObserver handler, set `attr="true"` before `renderDrawioWithDebounce()` to signal re-render cycles to the auto-scroll system (req 4.8)
+  - _Requirements: 4.3, 4.4, 4.8_
+- [x] 2.2 (P) Update remark-drawio plugin sanitization and node rewriting
+  - Replace the old constant in the supported-attributes array with the new constant name
+  - Update node rewriting to set the new attribute name with `"true"` value on drawio nodes
+  - Confirm the sanitize export still passes the new attribute through HTML sanitization
+  - _Requirements: 4.5_
+
+- [x] 3. Add rendering attribute to MermaidViewer and Lsx
+- [x] 3.1 (P) Add rendering-status attribute lifecycle to MermaidViewer
+  - Set the rendering-status attribute to `"true"` on the container element at initial render before the async SVG render starts
+  - Set the attribute to `"false"` after `mermaid.render()` completes and the SVG is injected into the DOM
+  - Set the attribute to `"false"` in the error/catch path as well
+  - Update the mermaid remark plugin sanitize options to include the new attribute name in the allowlist
+  - _Requirements: 4.3, 4.4, 4.5, 4.7_
+- [x] 3.2 (P) Add rendering-status attribute lifecycle to Lsx component
+  - Set the rendering-status attribute to `"true"` on the outermost container while the SWR page list fetch is loading
+  - Set the attribute to `"false"` when data arrives — success, error, or empty result — using declarative binding from the existing `isLoading` state
+  - Update the lsx remark plugin sanitize options to include the new attribute name in the allowlist
+  - Add `@growi/core` as a dependency of `remark-lsx` (same pattern as `remark-drawio`)
+  - _Requirements: 4.3, 4.4, 4.5, 4.7_
+
+- [x] 4. Implement shared auto-scroll hook
+- [x] 4.1 Implement rendering watch function with safety improvements
+  - Create the `watchRenderingAndReScroll` function in the new shared hooks directory using the updated rendering-status selector
+  - Add a `stopped` boolean flag checked inside timer callbacks to prevent execution after cleanup (race condition fix from PR review)
+  - Maintain the existing non-resetting timer pattern: skip scheduling when a timer is already active
+  - When `checkAndSchedule` detects no rendering elements remain while a timer is still active, cancel the active timer immediately to avoid a redundant re-scroll after rendering has completed
+  - Enforce the 10-second hard timeout that cleans up observer and all timers regardless of remaining rendering elements
+  - _Requirements: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 6.1, 6.2, 6.3_
+  - _Contracts: watchRenderingAndReScroll service interface_
+- [x] 4.2 Implement useContentAutoScroll hook with options object API
+  - Create the hook accepting an options object with `key`, `contentContainerId`, optional `resolveTarget`, and optional `scrollTo`
+  - Implement guard logic: skip processing when key is null/undefined, hash is empty, or container element not found
+  - Implement immediate scroll path: resolve target via provided closure (default: `getElementById`), scroll via provided function (default: `scrollIntoView`), then check for rendering elements before delegating to rendering watch — skip watch entirely if no rendering elements exist
+  - Implement deferred scroll path: MutationObserver on container until target appears, then scroll and conditionally delegate to rendering watch (same check), with 10-second timeout
+  - Store `resolveTarget` and `scrollTo` callbacks in refs to avoid re-triggering the effect on callback identity changes
+  - Wire cleanup to disconnect all observers, clear all timers, and invoke rendering watch cleanup
+  - _Requirements: 1.1, 1.2, 1.3, 1.4, 1.5, 2.1, 2.2, 2.3, 5.1, 5.2, 5.3, 5.4, 5.6, 5.7, 6.1, 6.2_
+  - _Contracts: UseContentAutoScrollOptions, useContentAutoScroll service interface_
+- [x] 4.3 (P) Write tests for watchRenderingAndReScroll
+  - Test that no timer is scheduled when no rendering elements exist
+  - Test that a re-scroll fires after the 5-second poll interval when rendering elements are present
+  - Test that the timer is not reset by intermediate DOM mutations
+  - Test that late-appearing rendering elements are detected by the observer and trigger a timer
+  - Test that only one re-scroll executes per cycle even with multiple rendering elements
+  - Test that the 10-second watch timeout cleans up all resources
+  - Test that the stopped flag prevents timer callbacks from executing after cleanup
+  - Test that an active timer is cancelled when rendering elements are removed before the timer fires
+  - _Requirements: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 6.1, 6.2, 6.3_
+- [x] 4.4 (P) Write tests for useContentAutoScroll
+  - Test guard conditions: no-op when key is null, hash is empty, or container not found
+  - Test immediate scroll when target already exists in DOM
+  - Test deferred scroll when target appears after initial render via MutationObserver
+  - Test that encoded hash values are decoded correctly before target resolution
+  - Test that a custom `resolveTarget` closure is called instead of the default
+  - Test that a custom `scrollTo` function is called instead of the default
+  - Test cleanup on key change: observers and timers from previous run are released
+  - Test cleanup on unmount: all resources are released
+  - Test rendering watch integration: re-scroll fires when rendering elements exist after initial scroll
+  - Test that rendering watch is skipped when no rendering elements exist after initial scroll
+  - Test 10-second timeout for target observation when target never appears
+  - _Requirements: 1.1, 1.2, 1.3, 1.4, 1.5, 2.1, 2.2, 2.3, 5.1, 5.2, 5.3, 5.6, 5.7, 6.1, 6.2_
+
+- [x] 5. Integrate hook into PageView and remove old implementation
+  - Replace the import of the old hook with the new shared hook in PageView
+  - Update the call site to use the options object API with `key: currentPageId` and `contentContainerId` — no custom `resolveTarget` or `scrollTo` needed (defaults match PageView's behavior)
+  - Delete the old hook file and its test file from the PageView directory
+  - Verify that PageView auto-scroll behavior is preserved with manual testing or existing test coverage
+  - _Requirements: 5.1, 5.4, 5.5_
+
+- [x] 6. Integrate useContentAutoScroll into SearchResultContent
+- [x] 6.1 (P) Add hash-based auto-scroll with container-relative scroll strategy
+  - Call `useContentAutoScroll` with `key: page._id` and `contentContainerId: 'search-result-content-body-container'`
+  - Provide a custom `scrollTo` closure that calculates the target element's offset relative to the container's bounding rect and calls `scrollWithinContainer` with the same `SCROLL_OFFSET_TOP` constant already used for keyword scroll
+  - Capture the container via the existing `scrollElementRef` in the closure to avoid a redundant `getElementById` lookup
+  - Do not provide a custom `resolveTarget` — heading elements have `id` attributes set by the remark pipeline, so the default `getElementById` resolver works correctly
+  - _Requirements: 5.1, 5.2, 5.3, 5.5_
+
+- [x] 6.2 (P) Suppress keyword-highlight scroll when a URL hash is present
+  - Add an early return guard at the top of the existing keyword-scroll `useEffect`: if `window.location.hash` is non-empty, return immediately so hash-based scroll is not overridden by the debounced keyword scroll
+  - Preserve the existing keyword-scroll behavior fully when no hash is present — the MutationObserver, debounce interval, and `scrollWithinContainer` call remain unchanged
+  - _Requirements: 5.1, 5.5_
+
+- [x] 6.3 Write tests for SearchResultContent auto-scroll integration
+  - Test that `useContentAutoScroll` is called with the correct `key` and `contentContainerId` when the component mounts
+  - Test that the custom `scrollTo` scrolls within the container (not the viewport) by verifying `scrollWithinContainer` is called with the correct distance
+  - Test that the keyword-scroll `useEffect` skips observation when `window.location.hash` is non-empty
+  - Test that the keyword-scroll `useEffect` sets up the MutationObserver normally when no hash is present
+  - _Requirements: 5.1, 5.2, 5.3, 5.5_
+
+---
+
+## Phase 2: Module Reorganization
+
+> **Context**: Tasks 1–7 delivered all functional requirements. Task 8 reorganizes modules for co-location: each hook moves next to its consumer, and the shared rendering watch utility moves to `src/client/util/`. No behavior changes — pure structural improvement.
+
+- [x] 7. Fix SearchResultContent: replace `useContentAutoScroll` with `watchRenderingAndReScroll`
+- [x] 7.1 Wire `watchRenderingAndReScroll` into keyword-scroll effect
+  - Remove `useContentAutoScroll` import and call from `SearchResultContent.tsx`
+  - Import `watchRenderingAndReScroll` (already exported from `watch-rendering-and-rescroll.ts`)
+  - Inside the keyword-scroll `useEffect`, after setting up the MutationObserver, call `watchRenderingAndReScroll(scrollElement, scrollToKeyword)` where `scrollToKeyword` calls `scrollToTargetWithinContainer` on the first `.highlighted-keyword` element
+  - Add `[page._id]` to the dependency array (currently has no deps) and return the watch cleanup function
+  - Remove the hash guard (`if (window.location.hash.length > 0) return`) — no longer needed once `useContentAutoScroll` is removed
+  - _See research.md for proposed code sketch_
+
+- [x] 7.2 Update SearchResultContent tests
+  - Remove tests that assert `useContentAutoScroll` is called
+  - Add tests that `watchRenderingAndReScroll` re-scrolls to `.highlighted-keyword` after a rendering element settles
+  - Update MutationObserver suppression test: remove the hash-guard test (guard will be gone)
+
+- [x] 8. Reorganize auto-scroll modules by co-locating hooks with their consumers
+- [x] 8.1 Move the rendering watch utility to the shared utility directory
+  - Move the rendering watch function and its test file from the shared hooks directory to the client utility directory, alongside the existing smooth-scroll utility
+  - Update the import path in the hash-based auto-scroll hook to reference the new location
+  - Update the import path in SearchResultContent to reference the new location
+  - Run existing tests to verify no regressions from the path change
+  - _Requirements: 5.4, 5.5_
+- [x] 8.2 Rename and move the hash-based auto-scroll hook next to PageView
+  - Rename the hook and its options type to reflect its hash-navigation–specific purpose (not a generic "content auto-scroll")
+  - Move the hook file and its test file to the PageView component directory
+  - Update PageView's import to use the co-located hook with the new name
+  - Update the hook's internal import of the rendering watch utility to use the path established in 8.1
+  - Run existing tests to verify the rename and move introduce no regressions
+  - _Requirements: 5.4, 5.5_
+- [x] 8.3 Extract the keyword-scroll effect from SearchResultContent into a co-located hook
+  - Create a new hook that encapsulates the MutationObserver-based keyword detection, debounced scroll, and rendering watch integration currently inlined in the component
+  - Accept a ref to the scrollable container and a trigger key as inputs
+  - Move the scroll helper functions (container-relative scroll calculation, first-highlighted-keyword lookup) into the hook file if they are used only by this logic
+  - Replace the inline useEffect in SearchResultContent with a single call to the new hook
+  - _Requirements: 5.4, 5.5, 6.1_
+- [x] 8.4 (P) Write tests for the extracted keyword-rescroll hook
+  - Migrate rendering watch assertions from SearchResultContent tests into the new hook's test file
+  - Add tests for keyword scroll behavior: MutationObserver setup, debounced scroll to the first highlighted keyword, cleanup on key change and unmount
+  - Simplify SearchResultContent tests to verify the hook is called with the correct container ref and key, without re-testing internal scroll behavior
+  - _Requirements: 6.1, 6.2_
+- [x] 8.5 (P) Remove the old shared hooks directory and verify no stale imports
+  - Delete the now-empty auto-scroll hooks directory
+  - Search the codebase for any remaining references to the old directory path and fix them
+  - Run the full test suite and type check to confirm the reorganization is complete
+  - _Requirements: 5.5_

+ 670 - 0
.kiro/specs/migrate-logger-to-pino/design.md

@@ -0,0 +1,670 @@
+# Design Document: migrate-logger-to-pino
+
+## Overview
+
+**Purpose**: This feature migrates GROWI's logging infrastructure from bunyan (with the custom `universal-bunyan` wrapper) to pino, delivering faster structured logging with a smaller dependency footprint.
+
+**Users**: All GROWI developers (logger consumers), operators (log level configuration), and the CI/CD pipeline (dependency management).
+
+**Impact**: Replaces 7 logging-related packages (`bunyan`, `universal-bunyan`, `bunyan-format`, `express-bunyan-logger`, `morgan`, `browser-bunyan`, `@browser-bunyan/console-formatted-stream`) with 3 (`pino`, `pino-pretty`, `pino-http`) plus a new shared package `@growi/logger`. Consumer applications import only `@growi/logger`; `pino-http` is encapsulated within the package.
+
+### Goals
+- Replace bunyan with pino across all apps and packages without functional degradation
+- Preserve namespace-based log level control (config files + env var overrides)
+- Eliminate morgan by consolidating HTTP logging into pino-http
+- Maintain OpenTelemetry diagnostic logger integration
+- Provide a shared `@growi/logger` package as the single logging entry point
+
+### Non-Goals
+- Changing log output semantics (field names, message format) beyond what pino naturally produces
+- Adding new logging capabilities (structured context propagation, remote log shipping)
+- Migrating to pino v10 (deferred until OTel instrumentation supports it)
+- Changing the namespace naming convention (e.g., `growi:service:page`)
+
+## Architecture
+
+### Existing Architecture Analysis
+
+The current logging stack has these layers:
+
+1. **universal-bunyan** — custom wrapper providing: namespace-based level control via config + env vars, platform detection (Node.js/browser), stream selection (bunyan-format for Node.js, ConsoleFormattedStream for browser), logger caching
+2. **Per-app loggerFactory** — thin wrapper that loads dev/prod config and delegates to universal-bunyan
+3. **bunyan / browser-bunyan** — underlying logger implementations
+4. **express-bunyan-logger / morgan** — HTTP request logging middleware
+
+Key patterns to preserve:
+- `loggerFactory(name: string): Logger` as the sole logger creation API
+- Hierarchical colon-delimited namespaces with glob pattern matching
+- Environment variables (`DEBUG`, `TRACE`, etc.) overriding config file levels
+- Dev: human-readable output; Prod: JSON output (toggleable via `FORMAT_NODE_LOG`)
+- Browser: console output with error-level default in production
+
+### Architecture Pattern & Boundary Map
+
+```mermaid
+graph TB
+    subgraph ConsumerApps[Consumer Applications]
+        App[apps/app]
+        Slackbot[apps/slackbot-proxy]
+    end
+
+    subgraph ConsumerPkgs[Consumer Packages]
+        Slack[packages/slack]
+        Remark[packages/remark-attachment-refs]
+    end
+
+    subgraph GrowiLogger[@growi/logger]
+        Factory[LoggerFactory]
+        LevelResolver[LevelResolver]
+        EnvParser[EnvVarParser]
+        TransportSetup[TransportFactory]
+        HttpLogger[HttpLoggerFactory]
+    end
+
+    subgraph External[External Packages]
+        Pino[pino v9.x]
+        PinoPretty[pino-pretty]
+        PinoHttp[pino-http]
+        Minimatch[minimatch]
+    end
+
+    App --> Factory
+    App --> HttpLogger
+    Slackbot --> Factory
+    Slackbot --> HttpLogger
+    Slack --> Factory
+    Remark --> Factory
+
+    Factory --> LevelResolver
+    Factory --> TransportSetup
+    LevelResolver --> EnvParser
+    LevelResolver --> Minimatch
+
+    Factory --> Pino
+    TransportSetup --> PinoPretty
+
+    HttpLogger --> Factory
+    HttpLogger --> PinoHttp
+```
+
+**Architecture Integration**:
+- Selected pattern: Wrapper package (`@growi/logger`) encapsulating pino configuration — mirrors universal-bunyan's role
+- Domain boundary: `@growi/logger` owns all logger creation, level resolution, and transport setup; consumer apps only call `loggerFactory(name)`
+- Existing patterns preserved: factory function signature, namespace conventions, config file structure
+- New components: `LevelResolver` (namespace-to-level matching), `TransportFactory` (dev/prod stream setup), `EnvVarParser` (env variable parsing)
+- Steering compliance: shared package in `packages/` follows monorepo conventions
+- **Dev-only isolation**: modules that are only used in development (`bunyan-format`, `morgan-like-format-options`) reside under `src/dev/` to make the boundary explicit; all are loaded via dynamic import, never statically bundled in production
+
+### Technology Stack
+
+| Layer | Choice / Version | Role in Feature | Notes |
+|-------|------------------|-----------------|-------|
+| Logging Core | pino v9.x | Structured JSON logger for Node.js and browser | Pinned to v9.x for OTel compatibility; see research.md |
+| Dev Formatting | pino-pretty v13.x | Human-readable log output in development | Used as transport (worker thread) |
+| HTTP Logging | pino-http v11.x | Express middleware for request/response logging | Dependency of @growi/logger; not directly imported by consumer apps |
+| Glob Matching | minimatch (existing) | Namespace pattern matching for level config | Already a transitive dependency via universal-bunyan |
+| Shared Package | @growi/logger | Logger factory with namespace/config/env support and HTTP middleware | New package in packages/logger/ |
+
+## System Flows
+
+### Logger Creation Flow
+
+```mermaid
+sequenceDiagram
+    participant App as Application Startup
+    participant Factory as LoggerFactory
+    participant Transport as pino.transport (Worker)
+    participant Root as Root pino Logger
+
+    App->>Factory: initializeLoggerFactory(options)
+    Factory->>Transport: pino.transport(config) — spawns ONE Worker thread
+    Transport-->>Factory: transport stream
+    Factory->>Root: pino({ level: 'trace' }, transport)
+    Root-->>Factory: rootLogger stored in module scope
+```
+
+```mermaid
+sequenceDiagram
+    participant Consumer as Consumer Module
+    participant Factory as LoggerFactory
+    participant Cache as Logger Cache
+    participant Resolver as LevelResolver
+    participant Root as Root pino Logger
+
+    Consumer->>Factory: loggerFactory(namespace)
+    Factory->>Cache: lookup(namespace)
+    alt Cache hit
+        Cache-->>Factory: cached child logger
+    else Cache miss
+        Factory->>Resolver: resolveLevel(namespace, config, envOverrides)
+        Resolver-->>Factory: resolved level
+        Factory->>Root: rootLogger.child({ name: namespace })
+        Root-->>Factory: child logger (shares Worker thread)
+        Factory->>Factory: childLogger.level = resolved level
+        Factory->>Cache: store(namespace, childLogger)
+    end
+    Factory-->>Consumer: Logger
+```
+
+### Level Resolution Flow
+
+```mermaid
+flowchart TD
+    Start[resolveLevel namespace] --> EnvCheck{Env var match?}
+    EnvCheck -->|Yes| EnvLevel[Use env var level]
+    EnvCheck -->|No| ConfigCheck{Config pattern match?}
+    ConfigCheck -->|Yes| ConfigLevel[Use config level]
+    ConfigCheck -->|No| DefaultLevel[Use config default level]
+
+    EnvLevel --> Done[Return level]
+    ConfigLevel --> Done
+    DefaultLevel --> Done
+```
+
+## Requirements Traceability
+
+| Requirement | Summary | Components | Interfaces | Flows |
+|-------------|---------|------------|------------|-------|
+| 1.1–1.4 | Logger factory with namespace support | LoggerFactory, LoggerCache | `loggerFactory()` | Logger Creation |
+| 2.1–2.4 | Config-file level control | LevelResolver, ConfigLoader | `LoggerConfig` type | Level Resolution |
+| 3.1–3.5 | Env var level override | EnvVarParser, LevelResolver | `parseEnvLevels()` | Level Resolution |
+| 4.1–4.4 | Platform-aware logger | LoggerFactory, TransportFactory | `createTransport()` | Logger Creation |
+| 5.1–5.4 | Dev/prod output formatting | TransportFactory | `TransportOptions` | Logger Creation |
+| 6.1–6.4 | HTTP request logging | HttpLoggerMiddleware | `createHttpLogger()` | — |
+| 7.1–7.3 | OpenTelemetry integration | DiagLoggerPinoAdapter | `DiagLogger` interface | — |
+| 8.1–8.5 | Multi-app consistency | @growi/logger package | Package exports | — |
+| 9.1–9.3 | Dependency cleanup | — (removal task) | — | — |
+| 10.1–10.3 | Backward-compatible API | LoggerFactory | `Logger` type export | — |
+| 11.1–11.4 | Pino performance preservation | LoggerFactory | `initializeLoggerFactory`, shared root logger | Logger Creation |
+| 12.1–12.6 | Bunyan-like output format | BunyanFormatTransport, TransportFactory | Custom transport target | Logger Creation |
+| 13.1–13.5 | HTTP logger encapsulation | HttpLoggerFactory | `createHttpLoggerMiddleware()` | — |
+
+## Components and Interfaces
+
+| Component | Domain/Layer | Intent | Req Coverage | Key Dependencies | Contracts |
+|-----------|-------------|--------|--------------|-----------------|-----------|
+| LoggerFactory | @growi/logger / Core | Create and cache namespace-bound pino loggers | 1, 4, 8, 10, 11 | pino (P0), LevelResolver (P0), TransportFactory (P0) | Service |
+| LevelResolver | @growi/logger / Core | Resolve log level for a namespace from config + env | 2, 3 | minimatch (P0), EnvVarParser (P0) | Service |
+| EnvVarParser | @growi/logger / Core | Parse env vars into namespace-level map | 3 | — | Service |
+| TransportFactory | @growi/logger / Core | Create pino transport/options for Node.js and browser | 4, 5, 12 | pino-pretty (P1) | Service |
+| BunyanFormatTransport | @growi/logger / Transport | Custom pino transport producing bunyan-format "short" output | 12 | pino-pretty (P1) | Transport |
+| HttpLoggerFactory | @growi/logger / Core | Factory for pino-http Express middleware | 6, 13 | pino-http (P0), LoggerFactory (P0) | Service |
+| DiagLoggerPinoAdapter | apps/app / OpenTelemetry | Wrap pino logger as OTel DiagLogger | 7 | pino (P0) | Service |
+| ConfigLoader | Per-app | Load dev/prod config files | 2 | — | — |
+
+### @growi/logger Package
+
+#### LoggerFactory
+
+| Field | Detail |
+|-------|--------|
+| Intent | Central entry point for creating namespace-bound pino loggers with level resolution and caching |
+| Requirements | 1.1, 1.2, 1.3, 1.4, 4.1, 8.5, 10.1, 10.3 |
+
+**Responsibilities & Constraints**
+- Create pino logger instances with resolved level and transport configuration
+- Cache logger instances per namespace to ensure singleton behavior
+- Detect platform (Node.js vs browser) and apply appropriate configuration
+- Expose `loggerFactory(name: string): pino.Logger` as the public API
+
+**Dependencies**
+- Outbound: LevelResolver — resolve level for namespace (P0)
+- Outbound: TransportFactory — create transport options (P0)
+- External: pino v9.x — logger creation (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+import type { Logger } from 'pino';
+
+interface LoggerConfig {
+  [namespacePattern: string]: string; // pattern → level ('info', 'debug', etc.)
+}
+
+interface LoggerFactoryOptions {
+  config: LoggerConfig;
+}
+
+/**
+ * Initialize the logger factory module with configuration.
+ * Must be called once at application startup before any loggerFactory() calls.
+ */
+function initializeLoggerFactory(options: LoggerFactoryOptions): void;
+
+/**
+ * Create or retrieve a cached pino logger for the given namespace.
+ */
+function loggerFactory(name: string): Logger;
+```
+
+- Preconditions: `initializeLoggerFactory()` called before first `loggerFactory()` call
+- Postconditions: Returns a pino.Logger bound to the namespace with resolved level
+- Invariants: Same namespace always returns the same logger instance
+
+**Implementation Notes**
+- The `initializeLoggerFactory` is called once per app at startup, receiving the merged dev/prod config
+- Browser detection: `typeof window !== 'undefined' && typeof window.document !== 'undefined'`
+- In browser mode, skip transport setup and use pino's built-in `browser` option
+- The factory is a module-level singleton (module scope cache + config)
+- **Performance critical**: `pino.transport()` spawns a Worker thread. It MUST be called **once** inside `initializeLoggerFactory`, not inside `loggerFactory`. Each `loggerFactory(name)` call creates a child logger via `rootLogger.child({ name })` which shares the single Worker thread. Calling `pino.transport()` per namespace would spawn N Worker threads for N namespaces, negating pino's core performance advantage.
+
+#### LevelResolver
+
+| Field | Detail |
+|-------|--------|
+| Intent | Determine the effective log level for a given namespace by matching against config patterns and env var overrides |
+| Requirements | 2.1, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, 3.5 |
+
+**Responsibilities & Constraints**
+- Match namespace against glob patterns in config (using minimatch)
+- Match namespace against env var-derived patterns (env vars take precedence)
+- Return the most specific matching level, or the `default` level as fallback
+- Parse is done once at module initialization; resolution is per-namespace at logger creation time
+
+**Dependencies**
+- Outbound: EnvVarParser — get env-derived level map (P0)
+- External: minimatch — glob pattern matching (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+interface LevelResolver {
+  /**
+   * Resolve the log level for a namespace.
+   * Priority: env var match > config pattern match > config default.
+   */
+  resolveLevel(
+    namespace: string,
+    config: LoggerConfig,
+    envOverrides: LoggerConfig,
+  ): string;
+}
+```
+
+- Preconditions: `config` contains a `default` key
+- Postconditions: Returns a valid pino log level string
+- Invariants: Env overrides always take precedence over config
+
+#### EnvVarParser
+
+| Field | Detail |
+|-------|--------|
+| Intent | Parse environment variables (DEBUG, TRACE, INFO, WARN, ERROR, FATAL) into a namespace-to-level map |
+| Requirements | 3.1, 3.4, 3.5 |
+
+**Responsibilities & Constraints**
+- Read `process.env.DEBUG`, `process.env.TRACE`, etc.
+- Split comma-separated values into individual namespace patterns
+- Return a flat `LoggerConfig` map: `{ 'growi:*': 'debug', 'growi:service:page': 'trace' }`
+- Parsed once at module load time (not per-logger)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+/**
+ * Parse log-level environment variables into a namespace-to-level map.
+ * Reads: DEBUG, TRACE, INFO, WARN, ERROR, FATAL from process.env.
+ */
+function parseEnvLevels(): LoggerConfig;
+```
+
+- Preconditions: Environment is available (`process.env`)
+- Postconditions: Returns a map where each key is a namespace pattern and value is a level string
+- Invariants: Only the six known env vars are read; unknown vars are ignored
+
+#### TransportFactory
+
+| Field | Detail |
+|-------|--------|
+| Intent | Create pino transport configuration appropriate for the current environment |
+| Requirements | 4.1, 4.2, 4.3, 4.4, 5.1, 5.2, 5.3, 5.4, 12.1, 12.6, 12.7, 12.8 |
+
+**Responsibilities & Constraints**
+- Node.js development: return BunyanFormatTransport config (`singleLine: false`) — **dev only, not imported in production**
+- Node.js production + `FORMAT_NODE_LOG`: return standard `pino-pretty` transport with `singleLine: true` (not bunyan-format)
+- Node.js production default: return raw JSON (stdout) — no transport
+- Browser: return pino `browser` option config (console output, production error-level default)
+- Include `name` field in all output via pino's `name` option
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+import type { LoggerOptions } from 'pino';
+
+interface TransportConfig {
+  /** Pino options for Node.js environment */
+  nodeOptions: Partial<LoggerOptions>;
+  /** Pino options for browser environment */
+  browserOptions: Partial<LoggerOptions>;
+}
+
+/**
+ * Create transport configuration based on environment.
+ * @param isProduction - Whether NODE_ENV is 'production'
+ */
+function createTransportConfig(isProduction: boolean): TransportConfig;
+```
+
+- Preconditions: Called during logger factory initialization
+- Postconditions: Returns valid pino options for the detected environment
+- Invariants: Browser options never include Node.js transports
+
+**Implementation Notes**
+- Dev transport: `{ target: '<resolved-path>/dev/bunyan-format.js' }` — target path resolved via `path.join(path.dirname(fileURLToPath(import.meta.url)), 'dev', 'bunyan-format.js')`; no `options` passed (singleLine defaults to false inside the module)
+- Prod with FORMAT_NODE_LOG: `{ target: 'pino-pretty', options: { translateTime: 'SYS:standard', ignore: 'pid,hostname', singleLine: true } }` — standard pino-pretty, no custom prettifiers
+- Prod without FORMAT_NODE_LOG (or false): raw JSON to stdout (no transport)
+- Browser production: `{ browser: { asObject: false }, level: 'error' }`
+- Browser development: `{ browser: { asObject: false } }` (inherits resolved level)
+- **Important**: The bunyan-format transport path is only resolved/referenced in the dev branch, ensuring the module is never imported in production
+
+#### BunyanFormatTransport
+
+| Field | Detail |
+|-------|--------|
+| Intent | Custom pino transport that produces bunyan-format "short" mode output (development only) |
+| Requirements | 12.1, 12.2, 12.3, 12.4, 12.5, 12.6, 12.7 |
+
+**Responsibilities & Constraints**
+- Loaded by `pino.transport()` in a Worker thread — must be a module file, not inline functions
+- Uses pino-pretty internally with `customPrettifiers` to match bunyan-format "short" layout
+- **Development only**: This module is only referenced by TransportFactory in the dev branch; never imported in production
+
+**Dependencies**
+- External: pino-pretty v13.x (P1) — used internally for colorization and base formatting
+
+**Contracts**: Transport [x]
+
+##### Transport Module
+
+```typescript
+// packages/logger/src/dev/bunyan-format.ts
+// Default export: function(opts) → Writable stream (pino transport protocol)
+
+interface BunyanFormatOptions {
+  singleLine?: boolean;
+  colorize?: boolean;
+  destination?: NodeJS.WritableStream;
+}
+```
+
+**Implementation Notes**
+- Uses `messageFormat` in pino-pretty to produce the full line: timestamp + level + name + message
+- `ignore: 'pid,hostname,name,req,res,responseTime'` — suppresses pino-http's verbose req/res objects in dev; the morgan-like `customSuccessMessage` already provides method/URL/status/time on the same line
+- `customPrettifiers: { time: () => '', level: () => '' }` — suppresses pino-pretty's default time/level rendering (handled inside `messageFormat`)
+- Level right-alignment and colorization are implemented inside `messageFormat` using ANSI codes
+- `singleLine` defaults to `false` inside the module; no `options` need to be passed from TransportFactory
+- Since the transport is a separate module loaded by the Worker thread, function options work (no serialization issue)
+- Vite's `preserveModules` ensures `src/dev/bunyan-format.ts` → `dist/dev/bunyan-format.js`
+- `NO_COLOR` environment variable is respected to disable colorization
+
+##### Output Examples
+
+**Dev** (bunyan-format, singleLine: false):
+```
+10:06:30.419Z DEBUG growi:service:PassportService: LdapStrategy: serverUrl is invalid
+10:06:30.420Z  WARN growi:service:PassportService: SamlStrategy: cert is not set.
+    extra: {"field":"value"}
+```
+
+**Dev HTTP log** (bunyan-format + morgan-like format, req/res suppressed):
+```
+10:06:30.730Z  INFO express: GET /applicable-grant?pageId=abc 304 - 16ms
+```
+
+**Prod + FORMAT_NODE_LOG** (standard pino-pretty, singleLine: true):
+```
+[2026-03-30 12:00:00.000] INFO (growi:service:search): Elasticsearch is enabled
+```
+
+**Prod default**: raw JSON (no transport, unchanged)
+
+### HTTP Logging Layer
+
+#### HttpLoggerFactory
+
+| Field | Detail |
+|-------|--------|
+| Intent | Encapsulate pino-http middleware creation within @growi/logger so consumers don't depend on pino-http |
+| Requirements | 6.1, 6.2, 6.3, 6.4, 13.1, 13.2, 13.3, 13.4, 13.5, 13.6 |
+
+**Responsibilities & Constraints**
+- Create pino-http middleware using a logger from LoggerFactory
+- In development mode: dynamically import and apply `morganLikeFormatOptions` (customSuccessMessage, customErrorMessage, customLogLevel)
+- In production mode: use pino-http's default message format (no morgan-like module imported)
+- Accept optional `autoLogging` configuration for route filtering
+- Return Express-compatible middleware
+- Encapsulate `pino-http` as an internal dependency of `@growi/logger`
+
+**Dependencies**
+- External: pino-http v11.x (P0)
+- Inbound: LoggerFactory — provides base logger (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+import type { RequestHandler } from 'express';
+
+interface HttpLoggerOptions {
+  /** Logger namespace, defaults to 'express' */
+  namespace?: string;
+  /** Auto-logging configuration (e.g., route ignore patterns) */
+  autoLogging?: {
+    ignore: (req: { url?: string }) => boolean;
+  };
+}
+
+/**
+ * Create Express middleware for HTTP request logging.
+ * In dev: uses pino-http with morgan-like formatting (dynamically imported).
+ * In prod: uses pino-http with default formatting.
+ */
+async function createHttpLoggerMiddleware(options?: HttpLoggerOptions): Promise<RequestHandler>;
+```
+
+- Preconditions: LoggerFactory initialized
+- Postconditions: Returns Express middleware that logs HTTP requests
+- Invariants: morganLikeFormatOptions applied only in dev; static file paths skipped when autoLogging.ignore provided
+
+**Implementation Notes**
+- The type assertion for Logger<string> → pino-http's Logger is handled internally, hidden from consumers
+- `pino-http` moves from apps' dependencies to `@growi/logger`'s dependencies
+- **Browser compatibility**: `pino-http` is imported lazily inside the function body (`const { default: pinoHttp } = await import('pino-http')`) rather than at the module top-level. This prevents bundlers (Turbopack/webpack) from pulling the Node.js-only `pino-http` into browser bundles when `@growi/logger` is imported by shared code
+- `morganLikeFormatOptions` is dynamically imported (`await import('./dev/morgan-like-format-options')`) only when `NODE_ENV !== 'production'`, ensuring the module is not loaded in production
+- The function is `async` to support the dynamic imports; consumers call: `express.use(await createHttpLoggerMiddleware({ autoLogging: { ignore: ... } }))`
+
+### OpenTelemetry Layer
+
+#### DiagLoggerPinoAdapter
+
+| Field | Detail |
+|-------|--------|
+| Intent | Adapt a pino logger to the OpenTelemetry DiagLogger interface |
+| Requirements | 7.1, 7.2, 7.3 |
+
+**Responsibilities & Constraints**
+- Implement the OTel `DiagLogger` interface (`error`, `warn`, `info`, `debug`, `verbose`)
+- Map `verbose()` to pino's `trace()` level
+- Parse JSON strings in message arguments (preserving current behavior)
+- Disable `@opentelemetry/instrumentation-pino` if enabled by default
+
+**Dependencies**
+- External: pino v9.x (P0)
+- External: @opentelemetry/api (P0)
+
+**Contracts**: Service [x]
+
+##### Service Interface
+
+```typescript
+import type { DiagLogger } from '@opentelemetry/api';
+
+/**
+ * Create a DiagLogger that delegates to a pino logger.
+ * Maps OTel verbose level to pino trace level.
+ */
+function createDiagLoggerAdapter(): DiagLogger;
+```
+
+- Preconditions: LoggerFactory initialized, pino logger available for OTel namespace
+- Postconditions: Returns a valid DiagLogger implementation
+- Invariants: All DiagLogger methods delegate to the corresponding pino level
+
+**Implementation Notes**
+- Minimal change from current `DiagLoggerBunyanAdapter` — rename class, update import from bunyan to pino
+- `parseMessage` helper can remain largely unchanged
+- In OTel SDK configuration, replace `'@opentelemetry/instrumentation-bunyan': { enabled: false }` with `'@opentelemetry/instrumentation-pino': { enabled: false }` if the instrumentation package is present
+
+## Data Models
+
+Not applicable. This feature modifies runtime logging behavior and does not introduce or change persisted data models.
+
+## Error Handling
+
+### Error Strategy
+Logging infrastructure must be resilient — a logger failure must never crash the application.
+
+### Error Categories and Responses
+- **Missing config file**: Fall back to `{ default: 'info' }` and emit a console warning
+- **Invalid log level in config/env**: Ignore the entry and log a warning to stderr
+- **Transport initialization failure** (pino-pretty not available): Fall back to raw JSON output
+- **Logger creation failure**: Return a no-op logger that silently discards messages
+
+### Monitoring
+- Logger initialization errors are written to `process.stderr` directly (cannot use the logger itself)
+- No additional monitoring infrastructure required — this is the monitoring infrastructure
+
+## Addendum: Formatting Improvements (Post-Migration)
+
+> Added 2026-03-30. The core migration is complete. This section covers log output readability improvements based on operator feedback.
+
+### Background
+
+- Morgan was used in dev because bunyan's express logging was too verbose
+- Morgan's one-liner format (`GET /path 200 12ms`) was valued for readability
+- `FORMAT_NODE_LOG=true` should produce concise one-liner logs suitable for quick-glance monitoring
+- Production default should remain structured JSON (already working via `.env.production`)
+
+### Gap Summary
+
+| Gap | Issue | Resolution |
+|-----|-------|------------|
+| A | `singleLine: false` in prod FORMAT_NODE_LOG path | Change to `singleLine: true` |
+| B | `FORMAT_NODE_LOG` defaults to formatted when unset | Defer to separate PR (`.env.production` handles this) |
+| C | pino-http uses default verbose messages | Add `customSuccessMessage` / `customErrorMessage` / `customLogLevel` |
+| D | Dev and prod pino-pretty configs identical | Differentiate via `singleLine` |
+
+### Change 1: TransportFactory — Differentiated `singleLine`
+
+**File**: `packages/logger/src/transport-factory.ts`
+
+Current production + FORMAT_NODE_LOG branch uses `singleLine: false`. Change to `singleLine: true`:
+
+```
+Dev:                    singleLine: false  (unchanged — full context)
+Prod + FORMAT_NODE_LOG: singleLine: true   (concise one-liners)
+Prod default:           raw JSON           (unchanged)
+```
+
+The dev branch remains multi-line so developers see full object context. The production formatted path becomes single-line for operator readability.
+
+### Change 2: HttpLoggerMiddleware — Custom Message Format
+
+**Files**: `apps/app/src/server/crowi/index.ts`, `apps/slackbot-proxy/src/Server.ts`
+
+Add pino-http message customization to produce morgan-like output:
+
+```typescript
+const customSuccessMessage: PinoHttpOptions['customSuccessMessage'] = (req, res, responseTime) => {
+  return `${req.method} ${req.url} ${res.statusCode} - ${Math.round(responseTime)}ms`;
+};
+
+const customErrorMessage: PinoHttpOptions['customErrorMessage'] = (req, res, error) => {
+  return `${req.method} ${req.url} ${res.statusCode} - ${error.message}`;
+};
+
+const customLogLevel: PinoHttpOptions['customLogLevel'] = (_req, res, error) => {
+  if (error != null || res.statusCode >= 500) return 'error';
+  if (res.statusCode >= 400) return 'warn';
+  return 'info';
+};
+```
+
+### Output Examples (Updated with dev-only bunyan-like format)
+
+**Dev** (bunyan-format transport + morgan-like HTTP messages):
+```
+10:06:30.419Z  INFO express: GET /page/path 200 - 12ms
+    req: {"method":"GET","url":"/page/path"}
+    res: {"statusCode":200}
+```
+
+**Prod + FORMAT_NODE_LOG=true** (standard pino-pretty, default pino-http messages):
+```
+[2026-03-30 12:00:00.000] INFO (express): request completed
+```
+
+**Prod default** (JSON, default pino-http messages):
+```json
+{"level":30,"time":1711792800000,"name":"express","msg":"request completed","req":{"method":"GET","url":"/page/path"},"res":{"statusCode":200},"responseTime":12}
+```
+
+### Testing
+
+- `transport-factory.spec.ts`: Verify transport target contains `bunyan-format` (not pino-pretty directly); dev transport passes no options (singleLine handled inside bunyan-format); prod + FORMAT_NODE_LOG returns pino-pretty with `singleLine: true`
+- `bunyan-format.spec.ts`: Verify transport module produces `HH:mm:ss.SSSZ LEVEL name: message` format; verify req/res are excluded from output
+- `http-logger.spec.ts`: Verify `createHttpLoggerMiddleware` returns middleware, applies morganLikeFormatOptions in dev, passes autoLogging options
+- `morgan-like-format-options.spec.ts`: Verify message formats using `strip()` to remove ANSI codes before assertion; verify customLogLevel returns correct levels for 2xx/4xx/5xx
+
+---
+
+## Addendum: HTTP Logger Encapsulation (Post-Migration)
+
+> Added 2026-04-02. Moves pino-http usage from consumer apps into @growi/logger.
+
+### Background
+
+- Consumer apps (`apps/app`, `apps/slackbot-proxy`) currently import `pino-http` directly
+- This leaks implementation details and requires each app to configure morgan-like format options
+- Encapsulating in `@growi/logger` provides a single configuration point and cleaner dependency graph
+
+### Changes
+
+1. **New file**: `packages/logger/src/http-logger.ts` — exports `createHttpLoggerMiddleware(options)`
+2. **Package.json**: Add `pino-http` to `@growi/logger` dependencies
+3. **apps/app**: Replace direct `pino-http` import with `createHttpLoggerMiddleware` from `@growi/logger`
+4. **apps/slackbot-proxy**: Same as apps/app
+5. **Cleanup**: Remove `pino-http` from apps' direct dependencies (keep in @growi/logger)
+
+---
+
+## Addendum: Dev-Only Module Isolation and Browser Compatibility (Post-Migration)
+
+> Added 2026-04-06. Restructures dev-only modules and fixes browser bundle compatibility.
+
+### Background
+
+- `bunyan-format` and `morgan-like-format-options` were mixed with production modules at the `src/` root level
+- `pino-http` imported at the module top-level caused browser bundle errors (Turbopack: `TypeError: __turbopack_context__.r(...).symbols is undefined`) when `@growi/logger` was imported by shared page code
+- HTTP request logs in dev were verbose (multi-line `req`/`res` JSON objects)
+- HTTP status codes in dev lacked visual differentiation
+
+### Changes
+
+1. **`src/dev/` directory**: All dev-only modules moved under `src/dev/`
+   - `src/transports/bunyan-format.ts` → `src/dev/bunyan-format.ts`
+   - `src/morgan-like-format-options.ts` → `src/dev/morgan-like-format-options.ts`
+   - `src/transports/` directory removed
+2. **`index.ts`**: Removed static `export { morganLikeFormatOptions }` — dev-only module must not appear in production-facing package exports
+3. **`http-logger.ts`**: `pino-http` import moved from module top-level into the async function body (`const { default: pinoHttp } = await import('pino-http')`) — prevents browser bundlers from including the Node.js-only package
+4. **`bunyan-format.ts`**: `ignore` extended to `'pid,hostname,name,req,res,responseTime'` — suppresses verbose pino-http req/res objects; morgan-like `customSuccessMessage` already provides all relevant HTTP metadata on one line
+5. **`morgan-like-format-options.ts`**: ANSI color codes added for status code (2xx=green, 3xx=cyan, 4xx=yellow, 5xx=red) and dim response time; `NO_COLOR` env var respected

+ 156 - 0
.kiro/specs/migrate-logger-to-pino/requirements.md

@@ -0,0 +1,156 @@
+# Requirements Document
+
+## Introduction
+
+GROWI currently uses bunyan as its logging library, wrapped by the custom `universal-bunyan` package (developed by WeSeek). The system provides namespace-based hierarchical logging with environment variable-driven log level control, platform detection (Node.js/Browser), and different output formatting for development and production environments. Morgan is used for HTTP request logging in development mode while `express-bunyan-logger` handles production HTTP logging.
+
+This specification covers the complete migration from bunyan to pino, replacing `universal-bunyan` with an equivalent pino-based solution, and eliminating morgan by consolidating HTTP request logging under pino. The migration must preserve all existing functionality without degradation.
+
+### Current Components to Replace
+- `bunyan` → `pino`
+- `universal-bunyan` (custom) → pino-based equivalent (official packages preferred, custom wrapper where needed)
+- `bunyan-format` → pino transport equivalent (e.g., `pino-pretty`)
+- `express-bunyan-logger` → `pino-http` or equivalent
+- `morgan` (dev only) → consolidated into pino-http
+- `browser-bunyan` / `@browser-bunyan/console-formatted-stream` → pino browser mode or equivalent
+- `@types/bunyan` → pino's built-in types
+
+## Requirements
+
+### Requirement 1: Logger Factory with Namespace Support
+
+**Objective:** As a developer, I want to create loggers with hierarchical namespace identifiers (e.g., `growi:service:page`), so that I can identify the source of log messages and control granularity per module.
+
+#### Acceptance Criteria
+1. The Logger Factory shall provide a `loggerFactory(name: string)` function that returns a logger instance bound to the given namespace.
+2. When `loggerFactory` is called multiple times with the same namespace, the Logger Factory shall return the same cached logger instance.
+3. The Logger Factory shall support colon-delimited hierarchical namespaces (e.g., `growi:crowi`, `growi:routes:login`).
+4. The Logger Factory shall maintain API compatibility so that callers use `logger.info()`, `logger.debug()`, `logger.warn()`, `logger.error()`, `logger.trace()`, and `logger.fatal()` without changes to call sites.
+
+### Requirement 2: Namespace-Based Log Level Configuration via Config Files
+
+**Objective:** As a developer, I want to define per-namespace log levels in configuration files (separate for dev and prod), so that I can fine-tune verbosity for specific modules without restarting with different env vars.
+
+#### Acceptance Criteria
+1. The Logger Factory shall load a configuration object mapping namespace patterns to log levels (e.g., `{ 'growi:service:*': 'debug', 'default': 'info' }`).
+2. The Logger Factory shall select the dev or prod configuration based on the `NODE_ENV` environment variable.
+3. The Logger Factory shall support glob pattern matching (e.g., `growi:service:*`) for namespace-to-level mapping using minimatch-compatible syntax.
+4. When no specific namespace match exists, the Logger Factory shall fall back to the `default` level defined in the configuration.
+
+### Requirement 3: Environment Variable-Based Log Level Override
+
+**Objective:** As an operator, I want to override log levels at runtime via environment variables, so that I can enable debug/trace logging for specific namespaces without modifying code or config files.
+
+#### Acceptance Criteria
+1. The Logger Factory shall read the environment variables `DEBUG`, `TRACE`, `INFO`, `WARN`, `ERROR`, and `FATAL` to parse namespace patterns.
+2. When an environment variable (e.g., `DEBUG=growi:routes:*,growi:service:page`) is set, the Logger Factory shall apply the corresponding log level to all matching namespaces.
+3. When both a config file entry and an environment variable match the same namespace, the environment variable shall take precedence.
+4. The Logger Factory shall support comma-separated namespace patterns within a single environment variable value.
+5. The Logger Factory shall support glob wildcard patterns (e.g., `growi:*`) in environment variable values.
+
+### Requirement 4: Platform-Aware Logger (Node.js and Browser)
+
+**Objective:** As a developer, I want the logger to work seamlessly in both Node.js (server) and browser (client) environments, so that I can use the same `loggerFactory` import in universal/shared code.
+
+#### Acceptance Criteria
+1. The Logger Factory shall detect the runtime environment (Node.js vs browser) and instantiate the appropriate logger implementation.
+2. While running in a browser environment, the Logger Factory shall output logs to the browser's developer console with readable formatting.
+3. While running in a browser production environment, the Logger Factory shall default to `error` level to minimize console noise.
+4. While running in a Node.js environment, the Logger Factory shall output structured logs suitable for machine parsing or human-readable formatting depending on configuration.
+
+### Requirement 5: Output Formatting (Development vs Production)
+
+**Objective:** As a developer/operator, I want distinct log output formats for development and production, so that dev logs are human-readable while production logs are structured and parseable.
+
+#### Acceptance Criteria
+1. While `NODE_ENV` is not `production`, the Logger Factory shall output human-readable formatted logs (equivalent to bunyan-format `short` mode) using pino-pretty or an equivalent transport.
+2. While `NODE_ENV` is `production`, the Logger Factory shall output structured JSON logs by default.
+3. Where the `FORMAT_NODE_LOG` environment variable is set, the Logger Factory shall respect it to toggle between formatted and raw JSON output in production (formatted by default when `FORMAT_NODE_LOG` is unset or truthy).
+4. The Logger Factory shall include the logger namespace in all log output so that the source module is identifiable.
+
+### Requirement 6: HTTP Request Logging
+
+**Objective:** As a developer/operator, I want HTTP request logging integrated with pino, so that request/response metadata is captured in a consistent format alongside application logs, eliminating the need for morgan.
+
+#### Acceptance Criteria
+1. The GROWI Server shall log HTTP requests using `pino-http` or an equivalent pino-based middleware, replacing both `morgan` (dev) and `express-bunyan-logger` (prod).
+2. While in development mode, the HTTP Logger shall skip logging for Next.js static file requests (paths starting with `/_next/static/`).
+3. The HTTP Logger shall use a logger instance obtained from the Logger Factory with the namespace `express` (or equivalent) for consistency with existing log namespaces.
+4. The HTTP Logger shall include standard HTTP metadata (method, URL, status code, response time) in log entries.
+
+### Requirement 7: OpenTelemetry Integration
+
+**Objective:** As a developer, I want the pino-based logger to integrate with OpenTelemetry diagnostics, so that observability tooling continues to function after migration.
+
+#### Acceptance Criteria
+1. The OpenTelemetry DiagLogger adapter shall be updated to wrap pino instead of bunyan.
+2. The OpenTelemetry DiagLogger adapter shall map OpenTelemetry verbose level to pino trace level.
+3. The OpenTelemetry SDK configuration shall disable pino instrumentation if an equivalent auto-instrumentation exists (analogous to the current bunyan instrumentation disable).
+
+### Requirement 8: Multi-App Consistency
+
+**Objective:** As a developer, I want all GROWI monorepo applications to use the same pino-based logging solution, so that logging behavior and configuration are consistent across the platform.
+
+#### Acceptance Criteria
+1. The `apps/app` application shall use the pino-based Logger Factory.
+2. The `apps/slackbot-proxy` application shall use the pino-based Logger Factory.
+3. The `packages/slack` package shall use the pino-based Logger Factory.
+4. The `packages/remark-attachment-refs` package shall use the pino-based Logger Factory.
+5. The Logger Factory shall be published as a shared package within the monorepo so that all consumers import from a single source.
+
+### Requirement 9: Dependency Cleanup
+
+**Objective:** As a maintainer, I want all bunyan-related and morgan dependencies removed after migration, so that the dependency tree is clean and there is no dead code.
+
+#### Acceptance Criteria
+1. When migration is complete, the monorepo shall have no references to `bunyan`, `universal-bunyan`, `bunyan-format`, `express-bunyan-logger`, `browser-bunyan`, `@browser-bunyan/console-formatted-stream`, or `@types/bunyan` in any `package.json`.
+2. When migration is complete, the monorepo shall have no references to `morgan` or `@types/morgan` in any `package.json`.
+3. When migration is complete, no source file shall contain imports or requires of the removed packages.
+
+### Requirement 11: Preserve Pino's Performance Characteristics
+
+**Objective:** As a developer, I want the logger implementation to honour pino's design philosophy of minimal overhead in the main thread, so that migrating from bunyan does not introduce performance regressions.
+
+#### Acceptance Criteria
+1. The Logger Factory shall create pino's worker-thread transport (`pino.transport()`) **at most once** per application lifetime (i.e., during `initializeLoggerFactory`), regardless of the number of unique namespaces.
+2. The Logger Factory shall create per-namespace loggers by calling `.child()` on a shared root pino instance, not by calling `pino()` and `pino.transport()` independently for each namespace.
+3. The Logger Factory shall not perform any blocking I/O or expensive computation on the hot path of each log method call (level-checking is performed by pino's internal mechanism and is acceptable).
+4. The number of active Worker threads used by the logger subsystem shall remain constant after the first call to `loggerFactory()`, regardless of how many distinct namespaces are subsequently requested.
+
+### Requirement 10: Backward-Compatible Log API
+
+**Objective:** As a developer, I want the new logger to expose the same method signatures as the current bunyan logger, so that existing log call sites require minimal or no changes.
+
+#### Acceptance Criteria
+1. The pino logger shall support `.info()`, `.debug()`, `.warn()`, `.error()`, `.trace()`, and `.fatal()` methods with the same argument patterns as bunyan (message string, optional object, optional error).
+2. If bunyan-specific APIs (e.g., `logger.child()`, serializers) are used at any call sites, the pino equivalent shall be provided or the call site shall be adapted.
+3. The Logger Factory shall export a TypeScript type for the logger instance that is compatible with the pino Logger type.
+
+### Requirement 12: Bunyan-Like Output Format (Development Only)
+
+**Objective:** As a developer, I want the log output in development mode to resemble bunyan-format's "short" mode, so that the visual experience remains familiar after migration.
+
+#### Acceptance Criteria
+1. While in development mode (`NODE_ENV !== 'production'`), the Logger Factory shall output each log line in the format: `HH:mm:ss.SSSZ LEVEL name: message` (e.g., `10:06:30.419Z DEBUG growi:service:page: some message`).
+2. The level label shall be right-aligned to 5 characters (e.g., `DEBUG`, ` INFO`, ` WARN`).
+3. The timestamp shall be UTC time-only in ISO 8601 format (`HH:mm:ss.SSSZ`), without date or surrounding brackets.
+4. The logger namespace (`name` field) shall appear directly after the level label, followed by a colon and the message, without parentheses.
+5. Log lines shall be colorized by level (cyan for DEBUG, green for INFO, yellow for WARN, red for ERROR).
+6. The bunyan-like format shall be implemented as a custom pino transport module within `@growi/logger`, so that `pino.transport()` can load it in a worker thread without function serialization issues.
+7. The bunyan-format transport module shall only be imported in development mode. In production, the module shall not be imported or bundled.
+8. While in production mode with `FORMAT_NODE_LOG` enabled, the Logger Factory shall use standard pino-pretty (not the bunyan-format transport) for formatted output.
+
+### Requirement 13: HTTP Logger Middleware Encapsulation
+
+**Objective:** As a developer, I want the HTTP request logging middleware encapsulated within `@growi/logger`, so that consumer applications do not need to depend on or import `pino-http` directly.
+
+#### Acceptance Criteria
+1. The `@growi/logger` package shall export a `createHttpLoggerMiddleware(options)` function that returns Express-compatible middleware for HTTP request logging.
+2. The middleware factory shall accept options for the logger namespace (defaulting to `'express'`) and optional `autoLogging` configuration (e.g., route ignore patterns).
+3. While in development mode, the middleware shall apply morgan-like formatting (custom success/error messages, custom log levels) via dynamic import. In production mode, the morgan-like format module shall not be imported; pino-http's default message format shall be used.
+4. After the encapsulation, `apps/app` and `apps/slackbot-proxy` shall not import `pino-http` directly; all HTTP logging shall go through `@growi/logger`.
+5. The `pino-http` dependency shall move from consumer applications to `@growi/logger`'s `dependencies`.
+6. The `morganLikeFormatOptions` module shall only be imported in development mode (dynamic import). In production, the module shall not be imported or bundled.
+7. The `pino-http` module shall be imported lazily inside the `createHttpLoggerMiddleware` function body (not at module top-level), so that bundlers (e.g., Turbopack, webpack) do not include the Node.js-only `pino-http` in browser bundles when `@growi/logger` is imported by shared/universal code.
+8. While in development mode with morgan-like formatting enabled, the HTTP log output shall suppress the verbose `req` and `res` serialized objects; the `customSuccessMessage` output (method, URL, status code, response time) is sufficient for development readability.
+9. While in development mode, the morgan-like format shall colorize the HTTP status code by range (2xx=green, 3xx=cyan, 4xx=yellow, 5xx=red) and dim the response time, respecting the `NO_COLOR` environment variable.

+ 224 - 0
.kiro/specs/migrate-logger-to-pino/research.md

@@ -0,0 +1,224 @@
+# Research & Design Decisions
+
+---
+**Purpose**: Capture discovery findings, architectural investigations, and rationale that inform the technical design.
+---
+
+## Summary
+- **Feature**: `migrate-logger-to-pino`
+- **Discovery Scope**: Complex Integration
+- **Key Findings**:
+  - Pino and bunyan share identical argument patterns (`logger.info(obj, msg)`) — no call-site changes needed
+  - No `logger.child()` or custom serializers used in GROWI — simplifies migration significantly
+  - `@opentelemetry/instrumentation-pino` supports pino `<10`; need to verify v9.x or v10 compatibility
+  - No off-the-shelf pino package replicates universal-bunyan's namespace-based level control; custom wrapper required
+
+## Research Log
+
+### Pino Core API Compatibility with Bunyan
+- **Context**: Need to confirm argument pattern compatibility to minimize call-site changes
+- **Sources Consulted**: pino GitHub docs (api.md), npm pino@10.3.1
+- **Findings**:
+  - Log level numeric values are identical: trace=10, debug=20, info=30, warn=40, error=50, fatal=60
+  - Method signature: `logger[level]([mergingObject], [message], [...interpolationValues])` — same as bunyan
+  - `name` option adds a `"name"` field to JSON output, same as bunyan
+  - `msg` is the default message key (same as bunyan), configurable via `messageKey`
+  - `pino.child(bindings, options)` works similarly to bunyan's `child()`
+- **Implications**: Call sites using `logger.info('msg')`, `logger.info({obj}, 'msg')`, `logger.error(err)` require no changes
+
+### Pino Browser Support
+- **Context**: universal-bunyan uses browser-bunyan + ConsoleFormattedStream for client-side logging
+- **Sources Consulted**: pino GitHub docs (browser.md)
+- **Findings**:
+  - Pino has built-in browser mode activated via package.json `browser` field
+  - Maps to console methods: `console.error` (fatal/error), `console.warn`, `console.info`, `console.debug`, `console.trace`
+  - `browser.asObject: true` outputs structured objects
+  - `browser.write` allows custom per-level handlers
+  - Level control works the same as Node.js (`level` option)
+  - No separate package needed (unlike browser-bunyan)
+- **Implications**: Eliminates browser-bunyan and @browser-bunyan/console-formatted-stream dependencies entirely
+
+### Pino-Pretty as Bunyan-Format Replacement
+- **Context**: universal-bunyan uses bunyan-format with `short` (dev) and `long` (prod) output modes
+- **Sources Consulted**: pino-pretty npm (v13.1.3)
+- **Findings**:
+  - Can be used as transport (worker thread) or stream (main thread)
+  - Short mode equivalent: `singleLine: true` + `ignore: 'pid,hostname'`
+  - Long mode equivalent: default multi-line output
+  - `translateTime: 'SYS:standard'` for human-readable timestamps
+  - TTY-only pattern: conditionally enable based on `process.stdout.isTTY`
+- **Implications**: Direct replacement for bunyan-format with equivalent modes
+
+### Pino-HTTP as Morgan/Express-Bunyan-Logger Replacement
+- **Context**: GROWI uses morgan (dev) and express-bunyan-logger (prod) for HTTP request logging
+- **Sources Consulted**: pino-http npm (v11.0.0)
+- **Findings**:
+  - Express middleware with `autoLogging.ignore` for route skipping (replaces morgan's `skip`)
+  - Accepts custom pino logger instance via `logger` option
+  - `customLogLevel` for status-code-based level selection
+  - `req.log` provides child logger with request context
+  - Replaces both morgan and express-bunyan-logger in a single package
+- **Implications**: Unified HTTP logging for both dev and prod, with route filtering support
+
+### Namespace-Based Level Control
+- **Context**: universal-bunyan provides namespace-to-level mapping with minimatch glob patterns and env var overrides
+- **Sources Consulted**: pino-debug (v4.0.2), pino ecosystem search
+- **Findings**:
+  - pino-debug bridges the `debug` module but doesn't provide general namespace-level control
+  - No official pino package replicates universal-bunyan's behavior
+  - Custom implementation needed: wrapper that caches pino instances per namespace, reads config + env vars, applies minimatch matching
+  - Can use pino's `level` option per-instance (set at creation time)
+- **Implications**: Must build `@growi/logger` package as a custom wrapper around pino, replacing universal-bunyan
+
+### OpenTelemetry Instrumentation
+- **Context**: GROWI has a custom DiagLogger adapter wrapping bunyan, and disables @opentelemetry/instrumentation-bunyan
+- **Sources Consulted**: @opentelemetry/instrumentation-pino npm (v0.59.0)
+- **Findings**:
+  - Supports pino `>=5.14.0 <10` — pino v10 may not be supported yet
+  - Provides trace correlation (trace_id, span_id injection) and log sending to OTel SDK
+  - GROWI's DiagLoggerBunyanAdapter pattern maps cleanly to pino (same method names)
+  - Current code disables bunyan instrumentation; equivalent disable for pino instrumentation may be needed
+- **Implications**: Pin pino to v9.x for OTel compatibility, or verify v10 support. DiagLogger adapter changes are minimal.
+
+### Existing Call-Site Analysis
+- **Context**: Need to understand what API surface is actually used to minimize migration risk
+- **Sources Consulted**: Codebase grep across all apps and packages
+- **Findings**:
+  - **No `logger.child()` usage** anywhere in the codebase
+  - **No custom serializers** registered or used
+  - **No `logger.fields` access** or other bunyan-specific APIs
+  - Call patterns: ~30% simple string, ~50% string+object, ~10% error-only, ~10% string+error
+  - All loggers created via `loggerFactory(name)` — single entry point
+- **Implications**: Migration is primarily a factory-level change; call sites need no modification
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| Drop-in wrapper (`@growi/logger`) | Shared package providing `loggerFactory()` over pino with namespace/config/env support | Minimal call-site changes, single source of truth, testable in isolation | Must implement namespace matching (minimatch) | Mirrors universal-bunyan's role |
+| Direct pino usage per app | Each app creates pino instances directly | No wrapper overhead | Duplicated config logic, inconsistent behavior across apps | Rejected: violates Req 8 |
+| pino-debug bridge | Use pino-debug for namespace control | Leverages existing package | Only works with `debug()` calls, not general logging | Rejected: wrong abstraction |
+
+## Design Decisions
+
+### Decision: Create `@growi/logger` as Shared Package
+- **Context**: universal-bunyan is a custom wrapper; need equivalent for pino
+- **Alternatives Considered**:
+  1. Direct pino usage in each app — too much duplication
+  2. Fork/patch universal-bunyan for pino — complex, hard to maintain
+  3. New shared package `@growi/logger` — clean, purpose-built
+- **Selected Approach**: New `@growi/logger` package in `packages/logger/`
+- **Rationale**: Single source of truth, testable, follows monorepo patterns (like @growi/core)
+- **Trade-offs**: One more package to maintain, but replaces external dependency
+- **Follow-up**: Define package exports, ensure tree-shaking for browser builds
+
+### Decision: Pin Pino to v9.x for OpenTelemetry Compatibility
+- **Context**: @opentelemetry/instrumentation-pino supports `<10`
+- **Alternatives Considered**:
+  1. Use pino v10 and skip OTel auto-instrumentation — loses correlation
+  2. Use pino v9 for compatibility — safe choice
+  3. Use pino v10 and verify latest instrumentation support — risky
+- **Selected Approach**: Start with pino v9.x; upgrade to v10 when OTel adds support
+- **Rationale**: OTel trace correlation is valuable for production observability
+- **Trade-offs**: Miss latest pino features temporarily
+- **Follow-up**: Monitor @opentelemetry/instrumentation-pino releases for v10 support
+
+### Decision: Use pino-pretty as Transport in Development
+- **Context**: Need human-readable output for dev, JSON for prod
+- **Alternatives Considered**:
+  1. pino-pretty as transport (worker thread) — standard approach
+  2. pino-pretty as sync stream — simpler but blocks main thread
+- **Selected Approach**: Transport for async dev logging; raw JSON in production
+- **Rationale**: Transport keeps main thread clear; dev perf is less critical but the pattern is correct
+- **Trade-offs**: Slightly more complex setup
+- **Follow-up**: Verify transport works correctly with Next.js dev server
+
+### Decision: Unified HTTP Logging with pino-http
+- **Context**: Currently uses morgan (dev) and express-bunyan-logger (prod) — two different middlewares
+- **Alternatives Considered**:
+  1. Separate dev/prod middleware (maintain split) — unnecessary complexity
+  2. Single pino-http middleware for both — clean, consistent
+- **Selected Approach**: pino-http with route filtering replaces both
+- **Rationale**: Single middleware, consistent output format, built-in request context
+- **Trade-offs**: Dev output slightly different from morgan's compact format (mitigated by pino-pretty)
+- **Follow-up**: Configure `autoLogging.ignore` for `/_next/static/` paths
+
+## Risks & Mitigations
+- **OTel instrumentation compatibility with pino version** — Mitigated by pinning to v9.x
+- **Browser bundle size increase** — Pino browser mode is lightweight; monitor with build metrics
+- **Subtle log format differences** — Acceptance test comparing output before/after
+- **Missing env var behavior** — Port minimatch logic carefully with unit tests
+- **Express middleware ordering** — Ensure pino-http is added at the same point in middleware chain
+
+### Phase 2: Formatting Improvement Research
+
+#### pino-http Custom Message API (v11.0.0)
+- **Context**: Need morgan-like concise HTTP log messages instead of pino-http's verbose default
+- **Sources Consulted**: pino-http v11.0.0 type definitions (index.d.ts), source code (logger.js)
+- **Findings**:
+  - `customSuccessMessage: (req: IM, res: SR, responseTime: number) => string` — called on successful response (statusCode < 500)
+  - `customErrorMessage: (req: IM, res: SR, error: Error) => string` — called on error response
+  - `customReceivedMessage: (req: IM, res: SR) => string` — called when request received (optional, only if autoLogging enabled)
+  - `customLogLevel: (req: IM, res: SR, error?: Error) => LevelWithSilent` — dynamic log level based on status code
+  - `customSuccessObject: (req, res, val) => any` — custom fields for successful response log
+  - `customErrorObject: (req, res, error, val) => any` — custom fields for error response log
+  - `customAttributeKeys: { req?, res?, err?, reqId?, responseTime? }` — rename default keys
+  - Response time is calculated as `Date.now() - res[startTime]` in milliseconds
+  - Error conditions: error passed to handler, `res.err` set, or `res.statusCode >= 500`
+- **Implications**: `customSuccessMessage` + `customErrorMessage` + `customLogLevel` are sufficient to achieve morgan-like output format
+
+#### pino-pretty singleLine Option
+- **Context**: User wants one-liner readable logs when FORMAT_NODE_LOG=true
+- **Sources Consulted**: pino-pretty v13.x documentation
+- **Findings**:
+  - `singleLine: true` forces all log properties onto a single line
+  - `singleLine: false` (default) outputs properties on separate indented lines
+  - Combined with `ignore: 'pid,hostname'`, singleLine produces concise output
+  - The `messageFormat` option can further customize the format string
+- **Implications**: Changing `singleLine` from `false` to `true` in the production FORMAT_NODE_LOG path directly addresses the user's readability concern
+
+#### FORMAT_NODE_LOG Default Semantics Analysis
+- **Context**: `isFormattedOutputEnabled()` returns `true` when env var is unset; production JSON depends on `.env.production`
+- **Analysis**:
+  - `.env.production` sets `FORMAT_NODE_LOG=false` — this is the mechanism that ensures JSON in production
+  - CI sets `FORMAT_NODE_LOG=true` explicitly — not affected by default change
+  - If `.env.production` fails to load in a Docker override scenario, production would silently get pino-pretty
+  - However, inverting the default is a behavioral change with broader implications
+- **Decision**: Defer to separate PR. Current behavior is correct in practice (`.env.production` always loaded by Next.js dotenv-flow).
+
+## Phase 3: Implementation Discoveries
+
+### Browser Bundle Compatibility — pino-http Top-Level Import
+- **Context**: `pino-http` was initially imported at the module top-level in `http-logger.ts`. This caused Turbopack to include the Node.js-only module in browser bundles, producing `TypeError: __turbopack_context__.r(...).symbols is undefined`.
+- **Root cause**: `@growi/logger` is imported by shared page code that runs in both browser and server contexts. Any top-level import of a Node.js-only module (like pino-http) gets pulled into the browser bundle.
+- **Fix**: Move the `pino-http` import inside the async function body using dynamic import: `const { default: pinoHttp } = await import('pino-http')`. This defers the import to runtime when the function is actually called (server-side only).
+- **Pattern**: This is the standard pattern for Node.js-only modules in packages shared with browser code. Apply the same treatment to any future Node.js-only additions to `@growi/logger`.
+
+### Dev-Only Module Physical Isolation (`src/dev/`)
+- **Context**: `bunyan-format.ts` (custom pino transport) and `morgan-like-format-options.ts` were initially placed at `src/transports/` and `src/` root respectively, mixed with production modules.
+- **Problem**: No clear boundary between dev-only and production-safe modules; risk of accidentally importing dev modules in production paths.
+- **Fix**: Created `src/dev/` directory as the explicit boundary for development-only modules. `TransportFactory` references `./dev/bunyan-format.js` only in the dev branch — the path is never constructed in production code paths.
+- **Vite config**: `preserveModules: true` ensures `src/dev/bunyan-format.ts` builds to `dist/dev/bunyan-format.js` with the exact path that `pino.transport({ target: ... })` references at runtime.
+
+### Single Worker Thread Model — Critical Implementation Detail
+- **Context**: Initial implementation called `pino.transport()` inside `loggerFactory(name)`, spawning a new Worker thread for each namespace.
+- **Fix**: Refactored so `pino.transport()` is called **once** in `initializeLoggerFactory`, and `loggerFactory(name)` calls `rootLogger.child({ name })` to create namespace-bound loggers sharing the single Worker thread.
+- **Root logger level**: Must be set to `'trace'` (not `'info'`) so child loggers can independently set their resolved level without being silenced by the root. If the root is `'info'`, a child with `level: 'debug'` will still be filtered at the root level.
+- **Constraint for future changes**: Never call `pino.transport()` or `pino()` inside `loggerFactory()`. All transport setup belongs in `initializeLoggerFactory()`.
+
+### pino Logger Type Compatibility with pino-http
+- **Context**: `loggerFactory()` returned `pino.Logger<never>` (the default), which is not assignable to pino-http's expected `Logger` type.
+- **Fix**: Export `Logger<string>` from `@growi/logger` and type `loggerFactory` to return `Logger<string>`. This is compatible with pino-http's `logger` option.
+- **Why `<string>` not `<never>`**: pino's default generic `CustomLevels` is `never`, which makes the type incompatible with APIs expecting custom levels to potentially be strings. `Logger<string>` is the correct type for external APIs.
+
+### `@growi/logger` Package Visibility
+- **Decision**: `"private": true` is correct and intentional.
+- **Rationale**: All consumers (`apps/app`, `apps/slackbot-proxy`, `packages/slack`, etc.) are monorepo-internal packages that reference `@growi/logger` via `workspace:*` protocol. The `private` flag only prevents npm publish, not workspace usage. `@growi/logger` is logging infrastructure — there is no reason to expose it externally (unlike `@growi/core` or `@growi/pluginkit` which are published for external plugin developers).
+
+## References
+- [pino API docs](https://github.com/pinojs/pino/blob/main/docs/api.md)
+- [pino browser docs](https://github.com/pinojs/pino/blob/main/docs/browser.md)
+- [pino-pretty npm](https://www.npmjs.com/package/pino-pretty)
+- [pino-http npm](https://www.npmjs.com/package/pino-http)
+- [@opentelemetry/instrumentation-pino](https://www.npmjs.com/package/@opentelemetry/instrumentation-pino)
+- [universal-bunyan source](https://github.com/weseek/universal-bunyan) — current implementation reference

+ 23 - 0
.kiro/specs/migrate-logger-to-pino/spec.json

@@ -0,0 +1,23 @@
+{
+  "feature_name": "migrate-logger-to-pino",
+  "created_at": "2026-03-23T00:00:00.000Z",
+  "updated_at": "2026-04-06T00:00:00.000Z",
+  "language": "en",
+  "phase": "implementation-complete",
+  "cleanup_completed": true,
+  "approvals": {
+    "requirements": {
+      "generated": true,
+      "approved": true
+    },
+    "design": {
+      "generated": true,
+      "approved": true
+    },
+    "tasks": {
+      "generated": true,
+      "approved": false
+    }
+  },
+  "ready_for_implementation": true
+}

+ 263 - 0
.kiro/specs/migrate-logger-to-pino/tasks.md

@@ -0,0 +1,263 @@
+# Implementation Plan
+
+- [x] 1. Scaffold the @growi/logger shared package
+- [x] 1.1 Initialize the package directory, package.json, and TypeScript configuration within the monorepo packages directory
+  - Create the workspace entry as `@growi/logger` with pino v9.x and minimatch as dependencies, pino-pretty as an optional peer dependency
+  - Configure TypeScript with strict mode, ESM output, and appropriate path aliases
+  - Set up the package entry points (main, types, browser) so that bundlers resolve the correct build for Node.js vs browser
+  - Add vitest configuration for unit testing within the package
+  - _Requirements: 8.5_
+
+- [x] 1.2 Define the shared type contracts and configuration interface
+  - Define the `LoggerConfig` type representing a namespace-pattern-to-level mapping (including a `default` key)
+  - Define the `LoggerFactoryOptions` type accepted by the initialization function
+  - Export the pino `Logger` type so consumers can type-annotate their logger variables without importing pino directly
+  - _Requirements: 10.3_
+
+- [x] 2. Implement environment variable parsing and level resolution
+- [x] 2.1 (P) Build the environment variable parser
+  - Read the six log-level environment variables (`DEBUG`, `TRACE`, `INFO`, `WARN`, `ERROR`, `FATAL`) from the process environment
+  - Split each variable's value by commas and trim whitespace to extract individual namespace patterns
+  - Return a flat config map where each namespace pattern maps to its corresponding level string
+  - Handle edge cases: empty values, missing variables, duplicate patterns (last wins)
+  - Write unit tests covering: single variable with multiple patterns, all six variables set, no variables set, whitespace handling
+  - _Requirements: 3.1, 3.4, 3.5_
+
+- [x] 2.2 (P) Build the level resolver with glob pattern matching
+  - Accept a namespace string, a config map, and an env-override map; return the resolved level
+  - Check env-override map first (using minimatch for glob matching), then config map, then fall back to the config `default` entry
+  - When multiple patterns match, prefer the most specific (longest non-wildcard prefix) match
+  - Write unit tests covering: exact match, glob wildcard match, env override precedence over config, fallback to default, no matching pattern
+  - _Requirements: 2.1, 2.3, 2.4, 3.2, 3.3_
+
+- [x] 3. Implement the transport factory for dev, prod, and browser environments
+- [x] 3.1 (P) Build the Node.js transport configuration
+  - In development mode, produce pino-pretty transport options with human-readable timestamps, hidden pid/hostname fields, and multi-line output
+  - In production mode, produce raw JSON output to stdout by default
+  - When the `FORMAT_NODE_LOG` environment variable is unset or truthy in production, produce pino-pretty transport options with long-format output instead of raw JSON
+  - Include the logger namespace (`name` field) in all output configurations
+  - Write unit tests verifying correct options for each combination of NODE_ENV and FORMAT_NODE_LOG
+  - _Requirements: 5.1, 5.2, 5.3, 5.4_
+
+- [x] 3.2 (P) Build the browser transport configuration
+  - Detect the browser environment using window/document checks
+  - In browser development mode, produce pino browser options that output to the developer console with the resolved namespace level
+  - In browser production mode, produce pino browser options that default to `error` level to suppress non-critical console output
+  - Write unit tests verifying browser options for dev and prod scenarios
+  - _Requirements: 4.1, 4.2, 4.3, 4.4_
+
+- [x] 4. Implement the logger factory with caching and platform detection
+- [x] 4.1 Build the initialization and factory functions
+  - Implement `initializeLoggerFactory(options)` that stores the merged configuration, pre-parses environment overrides, and prepares the transport config
+  - Implement `loggerFactory(name)` that checks the cache for an existing logger, resolves the level via the level resolver, creates a pino instance with appropriate transport options, caches it, and returns it
+  - Detect the runtime platform (Node.js vs browser) and apply the corresponding transport configuration from the transport factory
+  - Ensure the module exports `loggerFactory` as the default export and `initializeLoggerFactory` as a named export for backward compatibility with existing import patterns
+  - Write unit tests covering: cache hit returns same instance, different namespaces return different instances, initialization stores config correctly
+  - _Requirements: 1.1, 1.2, 1.3, 1.4, 4.1, 10.1_
+
+- [x] 5. Migrate shared packages to @growi/logger (small scope first)
+- [x] 5.1 (P) Update packages/slack logger to use @growi/logger
+  - Replace the logger factory implementation to import from `@growi/logger` instead of universal-bunyan
+  - Update the inline config (`{ default: 'info' }`) to use the @growi/logger initialization pattern
+  - Replace bunyan type imports with the @growi/logger Logger type
+  - Add `@growi/logger` to packages/slack dependencies
+  - Run TypeScript compilation to verify no type errors
+  - _Requirements: 8.3_
+
+- [x] 5.2 (P) Update packages/remark-attachment-refs logger to use @growi/logger
+  - Replace the logger factory implementation to import from `@growi/logger`
+  - Update configuration and type imports to match the new package
+  - Add `@growi/logger` to packages/remark-attachment-refs dependencies
+  - Run TypeScript compilation to verify no type errors
+  - _Requirements: 8.4_
+
+- [x] 5.3 Fix pino-style logger call sites in packages/slack
+  - In the following files, convert all `logger.method('message', obj)` calls to the pino-canonical form `logger.method({ obj }, 'message')` (object first, message second)
+  - `src/middlewares/verify-growi-to-slack-request.ts` (lines 25, 34)
+  - `src/middlewares/verify-slack-request.ts` (lines 25, 36, 45, 76)
+  - `src/utils/interaction-payload-accessor.ts` (line 104)
+  - Run `pnpm --filter @growi/slack lint:typecheck` and confirm zero TS2769 errors
+  - _Requirements: 10.1_
+
+- [x] 5.4 Fix pino-style logger call site in packages/remark-attachment-refs
+  - In `src/client/services/renderer/refs.ts` (line 107), convert `logger.debug('message', attributes)` to `logger.debug({ attributes }, 'message')`
+  - Run `pnpm --filter @growi/remark-attachment-refs lint:typecheck` and confirm the TS2769 error is gone
+  - _Requirements: 10.1_
+
+- [x] 5.5 Migrate packages/remark-lsx server routes to use @growi/logger
+  - Add `@growi/logger` to packages/remark-lsx dependencies
+  - Create `src/utils/logger/index.ts` following the same pattern as remark-attachment-refs (import from `@growi/logger`, call `initializeLoggerFactory`, re-export `loggerFactory`)
+  - Replace `console.error` calls in `src/server/routes/list-pages/index.ts` (lines 89, 145-148) with proper logger calls using `loggerFactory('growi:remark-lsx:routes:list-pages')`
+  - Remove the `biome-ignore lint/suspicious/noConsole` comments from the replaced call sites
+  - Run `pnpm --filter @growi/remark-lsx lint:typecheck` to confirm no type errors
+  - _Requirements: 8.5_
+
+- [x] 6. Migrate apps/slackbot-proxy to @growi/logger
+- [x] 6.1 Replace the logger factory and HTTP middleware in slackbot-proxy
+  - Update the slackbot-proxy logger utility to import from `@growi/logger` and call `initializeLoggerFactory` with its existing dev/prod config
+  - Replace express-bunyan-logger and morgan usage in the server setup with pino-http middleware
+  - Replace all `import type Logger from 'bunyan'` references with the @growi/logger Logger type
+  - Add `@growi/logger` and `pino-http` to slackbot-proxy dependencies
+  - Run TypeScript compilation to verify no type errors
+  - _Requirements: 8.2, 6.1_
+
+- [x] 6.6 Fix pino-style logger call sites in apps/slackbot-proxy
+  - In the following files, convert all `logger.method('message', obj)` calls to `logger.method({ obj }, 'message')`
+  - `src/controllers/growi-to-slack.ts` (lines 109, 179, 231, 243, 359)
+  - `src/controllers/slack.ts` (lines 388, 586)
+  - `src/services/RegisterService.ts` (line 165)
+  - Run `pnpm --filter @growi/slackbot-proxy lint:typecheck` and confirm zero TS2769 errors
+  - _Requirements: 10.1_
+
+- [x] 6.7 Fix @growi/logger Logger type export and remove `as any` cast in slackbot-proxy
+  - In `packages/logger`, update the `loggerFactory` return type so it is compatible with `pino-http`'s `logger` option (i.e., `pino.Logger` without `<never>` narrowing, or by exporting `Logger<string>`)
+  - After the type export is fixed, remove the `as any` cast from `apps/slackbot-proxy/src/Server.ts` (line 166) and the associated `biome-ignore` comment
+  - Run `pnpm --filter @growi/slackbot-proxy lint:typecheck` to confirm no residual type errors
+  - _Requirements: 10.3_
+
+- [x] 6.5 Fix logger factory to preserve pino's single-worker-thread performance model
+  - Refactor `initializeLoggerFactory` to create the pino transport (`pino.transport()`) and root pino logger **once**, storing them in module scope
+  - Set the root logger's level to `'trace'` so that individual child loggers can apply their own resolved level without being silenced by the root
+  - Refactor `loggerFactory(name)` to call `rootLogger.child({ name })` and then set `childLogger.level = resolvedLevel` instead of calling `pino()` + `pino.transport()` per namespace
+  - Handle browser mode separately: the root browser logger is created once in `initializeLoggerFactory`; `loggerFactory` still calls `.child({ name })` and applies the resolved level
+  - Update unit tests in `logger-factory.spec.ts` to verify that calling `loggerFactory` for N distinct namespaces does not create N independent pino instances (all children share the root transport)
+  - _Requirements: 11.1, 11.2, 11.3, 11.4_
+
+- [x] 7. Migrate apps/app to @growi/logger (largest scope)
+- [x] 7.1 Replace the logger factory module in apps/app
+  - Update the apps/app logger utility to import from `@growi/logger` instead of `universal-bunyan`
+  - Call `initializeLoggerFactory` at application startup with the existing dev/prod config files (preserve current config content)
+  - Re-export `loggerFactory` as the default export so all existing consumer imports continue to work unchanged
+  - Add `@growi/logger` to apps/app dependencies and ensure pino-pretty is available for development formatting
+  - _Requirements: 8.1, 2.2_
+
+- [x] 7.2 Replace HTTP request logging middleware in apps/app
+  - Remove the morgan middleware (development mode) and express-bunyan-logger middleware (production mode) from the Express initialization
+  - Add pino-http middleware configured with a logger from the factory using the `express` namespace
+  - Configure route skipping to exclude `/_next/static/` paths in non-production mode
+  - Verify the middleware produces log entries containing method, URL, status code, and response time
+  - _Requirements: 6.1, 6.2, 6.3, 6.4_
+
+- [x] 7.3 Update the OpenTelemetry diagnostic logger adapter
+  - Rename the adapter class from `DiagLoggerBunyanAdapter` to `DiagLoggerPinoAdapter` and update the import to use pino types
+  - Preserve the existing `parseMessage` helper logic that parses JSON strings and merges argument objects
+  - Confirm the verbose-to-trace level mapping continues to work with pino's trace level
+  - Update the OpenTelemetry SDK configuration to disable `@opentelemetry/instrumentation-pino` instead of `@opentelemetry/instrumentation-bunyan`
+  - _Requirements: 7.1, 7.2, 7.3_
+
+- [x] 7.4 Update all bunyan type references in apps/app source files
+  - Replace `import type Logger from 'bunyan'` with the Logger type exported from `@growi/logger` across all source files in apps/app
+  - Verify that pino's Logger type is compatible with all existing usage patterns (info, debug, warn, error, trace, fatal method calls)
+  - Run the TypeScript compiler to confirm no type errors
+  - _Requirements: 10.1, 10.2, 10.3_
+
+- [x] 8. Remove old logging dependencies and verify cleanup
+- [x] 8.1 Remove bunyan-related packages from all package.json files
+  - Remove `bunyan`, `universal-bunyan`, `bunyan-format`, `express-bunyan-logger`, `browser-bunyan`, `@browser-bunyan/console-formatted-stream`, `@types/bunyan` from every package.json in the monorepo
+  - Remove `morgan` and `@types/morgan` from every package.json in the monorepo
+  - Run `pnpm install` to update the lockfile and verify no broken peer dependency warnings
+  - _Requirements: 9.1, 9.2_
+
+- [x] 8.2 Verify no residual references to removed packages
+  - Search all source files for any remaining imports or requires of the removed packages (bunyan, universal-bunyan, browser-bunyan, express-bunyan-logger, morgan, bunyan-format)
+  - Search all configuration and type definition files for stale bunyan references
+  - Fix any remaining references found during the search
+  - _Requirements: 9.3_
+
+- [x] 9. Run full monorepo validation
+- [x] 9.1 Execute lint, type-check, test, and build across the monorepo
+  - Run `turbo run lint --filter @growi/app` and fix any lint errors related to the migration
+  - Run `turbo run test --filter @growi/app` and verify all existing tests pass
+  - Run `turbo run build --filter @growi/app` and confirm the production build succeeds
+  - Run the same checks for slackbot-proxy and any other affected packages
+  - Verify the @growi/logger package's own tests pass
+  - _Requirements: 1.4, 8.1, 8.2, 8.3, 8.4, 10.1, 10.2_
+
+- [x] 10. Improve log output formatting for readability
+- [x] 10.1 (P) Differentiate pino-pretty singleLine between dev and production FORMAT_NODE_LOG
+  - In the transport factory, change the production + FORMAT_NODE_LOG path to use `singleLine: true` for concise one-liner output
+  - Keep the development path at `singleLine: false` so developers see full multi-line context
+  - Update unit tests to verify: dev returns `singleLine: false`, production + FORMAT_NODE_LOG returns `singleLine: true`, production without FORMAT_NODE_LOG still returns no transport
+  - _Requirements: 5.1, 5.3_
+
+- [x] 10.2 (P) Add morgan-like HTTP request message formatting to pino-http in apps/app
+  - Configure `customSuccessMessage` to produce `METHOD /url STATUS - TIMEms` format (e.g., `GET /page/path 200 - 12ms`)
+  - Configure `customErrorMessage` to include the error message alongside method, URL, and status code
+  - Configure `customLogLevel` to return `warn` for 4xx responses and `error` for 5xx or error responses, keeping `info` for successful requests
+  - Verify that `/_next/static/` path skipping in dev mode still works after the changes
+  - _Requirements: 6.1, 6.4_
+
+- [x] 10.3 (P) Add morgan-like HTTP request message formatting to pino-http in apps/slackbot-proxy
+  - Apply the same `customSuccessMessage`, `customErrorMessage`, and `customLogLevel` configuration as apps/app
+  - _Requirements: 6.1, 6.4_
+
+- [x] 11. Validate formatting improvements
+- [x] 11.1 Run tests and build for affected packages
+  - Run the @growi/logger package tests to confirm transport factory changes pass
+  - Run lint and type-check for apps/app and apps/slackbot-proxy
+  - Verify the production build succeeds
+  - _Requirements: 5.1, 5.3, 6.1, 6.4_
+
+- [x] 12. Implement bunyan-like output format (development only)
+- [x] 12.1 Create the bunyan-format custom transport module
+  - Create `packages/logger/src/transports/bunyan-format.ts` that default-exports a function returning a pino-pretty stream
+  - Use `customPrettifiers.time` to format epoch as `HH:mm:ss.SSSZ` (UTC time-only, no brackets)
+  - Use `customPrettifiers.level` to return `${label.padStart(5)} ${log.name}` (right-aligned 5-char level + namespace)
+  - Set `ignore: 'pid,hostname,name'` so name appears via the level prettifier, not in pino-pretty's default parens
+  - Accept `singleLine` option to pass through to pino-pretty
+  - Verify the module is built to `dist/transports/bunyan-format.js` by vite's `preserveModules` config
+  - _Requirements: 12.1, 12.2, 12.3, 12.4, 12.5, 12.6_
+
+- [x] 12.2 Update TransportFactory to use bunyan-format transport in dev only
+  - In the **development** branch of `createNodeTransportOptions`, change the transport target from `'pino-pretty'` to the resolved path of `bunyan-format.js` (via `import.meta.url`)
+  - Remove `translateTime` and `ignore` options from the dev transport config (now handled inside the custom transport)
+  - Pass `singleLine: false` for dev
+  - In the **production + FORMAT_NODE_LOG** branch, keep `target: 'pino-pretty'` with standard options (`translateTime: 'SYS:standard'`, `ignore: 'pid,hostname'`, `singleLine: true`) — do NOT use bunyan-format
+  - The bunyan-format module path is only resolved in the dev code path, ensuring it is never imported in production
+  - Update unit tests in `transport-factory.spec.ts`: dev target contains `bunyan-format`; prod + FORMAT_NODE_LOG target is `'pino-pretty'`
+  - _Requirements: 12.1, 12.6, 12.7, 12.8_
+
+- [x] 12.3 Verify bunyan-format output
+  - Run the dev server and confirm log output matches the bunyan-format "short" style: `HH:mm:ss.SSSZ LEVEL name: message`
+  - Confirm colorization works (DEBUG=cyan, INFO=green, WARN=yellow, ERROR=red)
+  - Confirm multi-line output in dev (extra fields on subsequent lines)
+  - _Requirements: 12.1, 12.2, 12.3, 12.4, 12.5_
+
+- [x] 13. Encapsulate pino-http in @growi/logger
+- [x] 13.1 Create HTTP logger middleware factory in @growi/logger
+  - Create `packages/logger/src/http-logger.ts` exporting `async createHttpLoggerMiddleware(options?)`
+  - The function creates `pinoHttp` middleware internally with `loggerFactory(namespace)`
+  - In development mode (`NODE_ENV !== 'production'`): dynamically import `morganLikeFormatOptions` via `await import('./morgan-like-format-options')` and apply to pino-http options
+  - In production mode: use pino-http with default message formatting (no morgan-like module imported)
+  - Accept optional `namespace` (default: `'express'`) and `autoLogging` options
+  - Handle the `Logger<string>` → pino-http's expected Logger type assertion internally
+  - Add `pino-http` to `@growi/logger` package.json dependencies
+  - Export `createHttpLoggerMiddleware` from `packages/logger/src/index.ts`
+  - _Requirements: 13.1, 13.2, 13.3, 13.5, 13.6_
+
+- [x] 13.2 (P) Migrate apps/app to use createHttpLoggerMiddleware
+  - Replace the direct `pinoHttp` import and configuration in `apps/app/src/server/crowi/index.ts` with `await createHttpLoggerMiddleware(...)` from `@growi/logger`
+  - Pass the `/_next/static/` autoLogging ignore function via the options
+  - Remove `pino-http` and its type imports from the file
+  - Remove `morganLikeFormatOptions` import (now applied internally in dev only)
+  - Remove `pino-http` from `apps/app/package.json` if no longer directly used
+  - Run `pnpm --filter @growi/app lint:typecheck` to confirm no type errors
+  - _Requirements: 13.4_
+
+- [x] 13.3 (P) Migrate apps/slackbot-proxy to use createHttpLoggerMiddleware
+  - Replace the direct `pinoHttp` import and configuration in `apps/slackbot-proxy/src/Server.ts` with `await createHttpLoggerMiddleware(...)` from `@growi/logger`
+  - Remove `pino-http` and its type imports from the file
+  - Remove `morganLikeFormatOptions` import (now applied internally in dev only)
+  - Remove the `as unknown as` type assertion (now handled internally)
+  - Remove `pino-http` from `apps/slackbot-proxy/package.json` if no longer directly used
+  - Run `pnpm --filter @growi/slackbot-proxy lint:typecheck` to confirm no type errors
+  - _Requirements: 13.4_
+
+- [x] 14. Validate bunyan-format and HTTP encapsulation
+- [x] 14.1 Run full validation
+  - Run `@growi/logger` package tests
+  - Run lint and type-check for apps/app and apps/slackbot-proxy
+  - Run `turbo run build --filter @growi/app` to verify production build succeeds
+  - Verify no remaining direct `pino-http` imports in apps/app or apps/slackbot-proxy source files
+  - Verify that bunyan-format transport and morganLikeFormatOptions are NOT imported in production (grep for dynamic import pattern)
+  - _Requirements: 12.1, 12.6, 12.7, 13.4, 13.5, 13.6_

+ 108 - 0
.kiro/specs/news-inappnotification/requirements.md

@@ -0,0 +1,108 @@
+# Requirements Document
+
+## Introduction
+
+GROWI の InAppNotification にニュース配信・表示機能を追加する。外部の静的 JSON フィード(GitHub Pages)を GROWI 本体が cron で定期取得し、ローカル MongoDB にキャッシュした上で、InAppNotificationパネルおよび通知一覧ページにニュースとして表示する。
+
+ニュースは既存の InAppNotification とは別モデル(NewsItem)として管理する。InAppNotification はユーザーアクション起因で関係者のみに配信されるのに対し、ニュースは全ユーザー(またはロール単位)に配信されるため、1件のニュースを全ユーザーで共有する設計が SaaS 規模で効率的である。UI ではクライアント側で両データを時系列マージして統合表示する。
+
+## Requirements
+
+### Requirement 1: ニュースフィードの定期取得
+
+**Objective:** As a GROWI 運営者, I want GROWI が外部フィードからニュースを自動取得する, so that 各 GROWI インスタンスに最新のニュースが配信される
+
+#### Acceptance Criteria
+
+1. When cron スケジュールの実行時刻に達した場合, the News Cron Service shall 設定された URL から JSON フィードを HTTP GET で取得する
+2. When フィードの取得に成功した場合, the News Cron Service shall 取得したニュースアイテムをローカル MongoDB に upsert(`externalId` で重複排除)する
+3. When フィードに含まれなくなったニュースアイテムがある場合, the News Cron Service shall 該当アイテムをローカル DB から削除する
+4. When 複数の GROWI インスタンスが同時に取得を試みる場合, the News Cron Service shall ランダムスリープにより配信元へのリクエストを時間分散する
+5. If フィードの取得に失敗した場合, then the News Cron Service shall エラーをログに記録し、既存のキャッシュデータを維持する
+6. Where `NEWS_FEED_URL` が未設定または空の場合, the News Cron Service shall フィード取得をスキップしエラーなく動作する
+7. When ニュースアイテムに `growiVersionRegExps` 条件が設定されている場合, the News Cron Service shall 現在の GROWI バージョンと照合し、一致しないアイテムを除外する
+
+### Requirement 2: ニュースアイテムのローカルキャッシュ
+
+**Objective:** As a GROWI システム, I want 取得したニュースをローカル DB にキャッシュする, so that フィード配信元に障害が起きてもニュースを表示できる
+
+**Note:** NewsItem を既存の InAppNotification モデルで代替できない理由:①外部フィード由来コンテンツの重複排除に必要な `externalId`(ユニークインデックス)が InAppNotification に存在しない。②InAppNotification は per-user ドキュメント設計のため、ニュースに適用すると配信時点で全ユーザー分のドキュメントを強制生成する必要がある(例: 1000ユーザー × 10件 = 10,000件、さらに `snapshot` にニュース本文がユーザー数分コピーされる)。NewsItem は全ユーザーで1件を共有するため、SaaS規模で効率的である。③TTL管理(90日)はニュース固有の要件。
+
+#### Acceptance Criteria
+
+1. The NewsItem モデル shall `externalId` にユニークインデックスを持ち、重複登録を防止する
+2. The NewsItem モデル shall `publishedAt` にインデックスを持ち、公開日時順のソートを効率的に行う
+3. The NewsItem モデル shall `fetchedAt` に TTL インデックス(90日)を持ち、古いニュースを自動削除する
+4. The NewsItem モデル shall 多言語対応のタイトル・本文(`ja_JP`, `en_US`)を格納できる
+
+### Requirement 3: 既読/未読管理
+
+**Objective:** As a GROWI ユーザー, I want ニュースの既読/未読状態を管理したい, so that 新しいニュースを見逃さない
+
+**Note:** NewsReadStatus を既存の InAppNotification モデルで代替できない理由:InAppNotification の `status` フィールドは per-user ドキュメントに依存しており、ニュースの既読状態を管理するには配信時に全ユーザー分のドキュメントを作成しなければならない(1000ユーザー × 10件 = 配信時点で強制的に 10,000件)。NewsReadStatus はユーザーが実際に既読アクションを起こした時のみ作成される(未読はレコードなし)。全員が全件読まない限り実際のレコード数は常に 10,000件を下回り、SaaS規模でのストレージ効率が高い。
+
+#### Acceptance Criteria
+
+1. When ユーザーがニュースアイテムをクリックした場合, the News API shall 該当ユーザーとニュースアイテムの組み合わせで `NewsReadStatus` レコードを作成する
+2. While `NewsReadStatus` レコードが存在しない場合, the News API shall 該当ニュースを未読として扱う
+3. The NewsReadStatus モデル shall `userId + newsItemId` の複合ユニークインデックスにより重複登録を防止する
+4. When ニュース一覧を取得する場合, the News API shall 各ニュースアイテムに `isRead: true/false` を付与して返却する
+5. The News API shall ログインユーザーの未読ニュース数を返却するエンドポイントを提供する
+
+### Requirement 4: ロール別表示制御
+
+**Objective:** As a GROWI 運営者, I want ニュースの表示対象をロールで制御したい, so that 管理者向け情報を一般ユーザーに見せない
+
+**Note:** 表示制御はニュース配信側(GROWI運営)がフィードJSON内の `conditions.targetRoles` で指定する。インスタンス側(GROWI管理者)による制御は設けない。
+
+#### Acceptance Criteria
+
+1. When ニュースアイテムに `conditions.targetRoles` が設定されている場合, the News API shall ユーザーのロール(admin/general)に基づいてフィルタリングする
+2. When ニュースアイテムに `conditions.targetRoles` が未設定の場合, the News API shall 全ユーザーにニュースを表示する
+
+### Requirement 5: InAppNotification UI 統合表示
+
+**Objective:** As a GROWI ユーザー, I want 既存の InAppNotification UI でニュースを確認したい, so that 通知と同じ導線でニュースにアクセスできる
+
+**Note:** NewsItem と InAppNotification は別モデルとして維持する。UI のみクライアント側で両データを時系列マージして表示する。
+
+#### Acceptance Criteria
+
+1. The InAppNotificationパネル shall 通知とニュースを公開日時/作成日時の降順で混合した1つのリストとして表示する
+2. The InAppNotificationパネル shall 上部にフィルタボタン(「すべて」「通知」「お知らせ」)を配置し、デフォルトは「すべて」とする。「お知らせ」選択時はニュースのみ、「通知」選択時はニュース以外のすべての通知を表示する
+3. The InAppNotificationパネル shall 既存の「未読のみ」トグルスイッチを維持し、種別フィルタと組み合わせた2重フィルタリングを提供する。種別フィルタ(すべて/通知/お知らせ)で表示対象を絞り込んだ上で、トグルON時は未読アイテムのみをさらに絞り込む
+4. The InAppNotificationパネル shall リスト領域に最大高さを設定し、超過分はスクロールで表示する。スクロールが末端に達した場合は次のページを自動で読み込む無限スクロールとする
+5. The InAppNotificationパネル shall ニュースアイテムの `type` に応じた絵文字アイコンをタイトル前に表示する(`release`→🎉, `security`→⚠️, `tips`→💡, `maintenance`→🔧, `announcement`→📢, 未設定→📢)
+6. When ユーザーがニュースアイテムをクリックした場合, the InAppNotification UI shall ニュースの詳細 URL を新しいタブで開く
+7. When ユーザーがニュースアイテムをクリックした場合, the InAppNotification UI shall 該当ニュースを既読としてマークし、未読インジケータを更新する
+
+### Requirement 6: 既読/未読の視覚表示
+
+**Objective:** As a GROWI ユーザー, I want 未読のニュース・通知を視覚的に区別したい, so that 未確認の項目をすぐに見分けられる
+
+#### Acceptance Criteria
+
+1. The 未読アイテム shall タイトルを太字(`fw-bold`)で表示する
+2. The 未読アイテム shall 左端に青色の丸ドット(8px, `bg-primary`)を表示する
+3. The 既読アイテム shall タイトルを通常ウェイト(`fw-normal`)で表示する
+4. The 既読アイテム shall ドットと同じ幅の透明スペーサーを配置し、インデントを統一する
+
+### Requirement 7: 未読バッジ表示
+
+**Objective:** As a GROWI ユーザー, I want 未読ニュースの存在をバッジで把握したい, so that 新しいニュースがあることに気づける
+
+#### Acceptance Criteria
+
+1. The サイドバー通知アイコン shall 通知の未読数とニュースの未読数を合算してバッジに表示する
+2. When 全てのニュースが既読の場合, the バッジ shall ニュース分のカウントを含めない
+
+### Requirement 8: 多言語対応
+
+**Objective:** As a GROWI ユーザー, I want ニュースを自分の言語で読みたい, so that 内容を正しく理解できる
+
+#### Acceptance Criteria
+
+1. When ニュースアイテムに複数言語のテキストが含まれる場合, the NewsItem コンポーネント shall ブラウザの言語設定に応じたテキストを表示する
+2. If ブラウザの言語に対応するテキストが存在しない場合, then the NewsItem コンポーネント shall `ja_JP` → `en_US` の順にフォールバックする
+3. The UI ラベル(「ニュース」「ニュースはありません。」等)shall `ja_JP`, `en_US`, `zh_CN`, `ko_KR`, `fr_FR` の i18n ロケールファイルで提供する
+4. The フィルタボタン用ラベル(「通知」「お知らせ」)shall 全対応言語のロケールファイルに追加する

+ 22 - 0
.kiro/specs/news-inappnotification/spec.json

@@ -0,0 +1,22 @@
+{
+  "feature_name": "news-inappnotification",
+  "created_at": "2026-03-24T00:00:00Z",
+  "updated_at": "2026-03-24T01:00:00Z",
+  "language": "ja",
+  "phase": "requirements-generated",
+  "approvals": {
+    "requirements": {
+      "generated": true,
+      "approved": false
+    },
+    "design": {
+      "generated": false,
+      "approved": false
+    },
+    "tasks": {
+      "generated": false,
+      "approved": false
+    }
+  },
+  "ready_for_implementation": false
+}

+ 359 - 0
.kiro/specs/suggest-path/design.md

@@ -0,0 +1,359 @@
+# Design Document
+
+## Overview
+
+**Purpose**: AI-powered path suggestion API that helps AI clients (e.g., Claude via MCP) determine optimal save locations for page content in GROWI. The system analyzes content, searches for related pages, evaluates candidates, and returns directory path suggestions with metadata.
+
+**Users**: AI clients (Claude via MCP) call this endpoint on behalf of GROWI users during the "save to GROWI" workflow.
+
+### Goals
+
+- Single POST endpoint returning path suggestions with metadata (type, path, label, description, grant)
+- Memo path: guaranteed fallback with fixed metadata
+- Search-based suggestions: AI-powered with flow/stock classification, multi-candidate evaluation, and intelligent path proposals (including new paths)
+- Independent access control via separate `ai-tools` namespace from `/page`
+
+### Design Principles
+
+- **Client LLM independence**: Heavy reasoning (content analysis, candidate evaluation, path proposal, description generation) is centralized in GROWI AI on the server side. The API response includes structured data fields (`informationType`, `type`, `grant`) alongside natural language (`description`) so that even less capable LLM clients can make correct decisions.
+
+### Non-Goals
+
+- Page creation/saving (existing `POST /_api/v3/page` handles this)
+- Page title suggestion (Claude handles this via user dialogue)
+- Client-side "enter manually" option (Agent Skill responsibility)
+
+## Architecture
+
+### Boundary Map
+
+```mermaid
+graph TB
+    subgraph Client
+        MCP[MCP Server]
+    end
+
+    subgraph GROWI_API[GROWI API]
+        Router[ai-tools Router]
+        Handler[suggest-path Handler]
+        MemoGen[Memo Suggestion]
+        Analyzer[Content Analyzer - 1st AI Call]
+        Retriever[Search Candidate Retriever]
+        Evaluator[Candidate Evaluator - 2nd AI Call]
+        CategoryGen[Category Suggestion - Under Review]
+    end
+
+    subgraph Existing[Existing Services]
+        SearchSvc[Search Service]
+        GrantSvc[Page Grant Service]
+        AIFeature[GROWI AI - OpenAI Feature]
+    end
+
+    subgraph Data
+        ES[Elasticsearch]
+        Mongo[MongoDB - Pages]
+    end
+
+    MCP -->|POST suggest-path| Router
+    Router --> Handler
+    Handler --> MemoGen
+    Handler --> Analyzer
+    Analyzer --> AIFeature
+    Handler --> Retriever
+    Retriever --> SearchSvc
+    Handler --> Evaluator
+    Evaluator --> AIFeature
+    Handler --> CategoryGen
+    CategoryGen --> SearchSvc
+    SearchSvc --> ES
+    Evaluator --> GrantSvc
+    CategoryGen --> GrantSvc
+    GrantSvc --> Mongo
+```
+
+**Integration notes**:
+
+- Layered handler following existing GROWI route conventions
+- Domain boundaries: Route layer owns the endpoint, delegates to existing services (search, grant, AI) without modifying them
+- Existing patterns preserved: Handler factory pattern, middleware chain, `res.apiv3()` response format
+
+### Code Organization
+
+All suggest-path code resides in `features/ai-tools/suggest-path/` following the project's feature-based architecture pattern.
+
+```text
+apps/app/src/features/ai-tools/
+├── server/routes/apiv3/
+│   └── index.ts                              # Aggregation router for ai-tools namespace
+└── suggest-path/
+    ├── interfaces/
+    │   └── suggest-path-types.ts              # Shared types (PathSuggestion, ContentAnalysis, etc.)
+    └── server/
+        ├── routes/apiv3/
+        │   ├── index.ts                       # Route factory, handler + middleware chain
+        │   └── index.spec.ts
+        ├── services/
+        │   ├── generate-suggestions.ts        # Orchestrator
+        │   ├── generate-memo-suggestion.ts
+        │   ├── analyze-content.ts             # AI call #1: keyword extraction + flow/stock
+        │   ├── retrieve-search-candidates.ts  # ES search with score filtering
+        │   ├── evaluate-candidates.ts         # AI call #2: candidate evaluation + path proposal
+        │   ├── call-llm-for-json.ts           # Shared LLM call utility
+        │   ├── generate-category-suggestion.ts # Under review
+        │   ├── resolve-parent-grant.ts
+        │   └── *.spec.ts                      # Co-located tests
+        └── integration-tests/
+            └── suggest-path-integration.spec.ts
+```
+
+**Key decisions**:
+
+- **No barrel export**: Consumers import directly from subpaths (following `features/openai/` convention)
+- **Aggregation router retained**: The `ai-tools` router at `features/ai-tools/server/routes/apiv3/` imports the suggest-path route factory. This allows future ai-tools features to register under the same namespace
+- **R4 (CategorySuggestionGenerator)**: Under review — may be merged into AI evaluation approach post-discussion
+
+### Implementation Paradigm
+
+All components are pure functions with immutable data. No classes — no component currently meets class adoption criteria (shared dependency management or singleton state).
+
+### Request Flow
+
+```mermaid
+sequenceDiagram
+    participant Client as MCP Client
+    participant Handler as Orchestrator
+    participant AI1 as Content Analyzer
+    participant Search as Search Service
+    participant AI2 as Candidate Evaluator
+    participant Grant as Grant Resolver
+    participant CatGen as Category Generator
+
+    Client->>Handler: POST with body content
+    Handler->>Handler: Generate memo suggestion
+
+    Handler->>AI1: Analyze content body
+    Note over AI1: 1st AI Call
+    AI1-->>Handler: keywords + informationType
+
+    par Search and evaluate
+        Handler->>Search: Search by keywords
+        Search-->>Handler: Raw results with scores
+        Handler->>Handler: Filter by score threshold
+        Handler->>AI2: body + analysis + candidates
+        Note over AI2: 2nd AI Call
+        AI2-->>Handler: Evaluated suggestions with paths and descriptions
+        loop For each evaluated suggestion
+            Handler->>Grant: Resolve grant for proposed path
+            Grant-->>Handler: Grant value
+        end
+    and Category suggestion
+        Handler->>CatGen: Generate from keywords
+        CatGen->>Search: Scoped keyword search
+        Search-->>CatGen: Top-level pages
+        CatGen->>Grant: Resolve parent grant
+        Grant-->>CatGen: Grant value
+        CatGen-->>Handler: Category suggestion or null
+    end
+
+    Handler-->>Client: 200 suggestions array
+```
+
+**Key decisions**:
+
+- Content analysis and candidate evaluation are structurally sequential — Elasticsearch sits between them
+- Search-evaluate flow and category generation run in parallel
+- If content analysis fails → memo-only response
+- If candidate evaluation fails → memo + category (if available)
+- Category generator runs independently (under review)
+
+## Component Interfaces
+
+### Orchestrator
+
+```typescript
+function generateSuggestions(
+  user: IUserHasId,
+  body: string,
+  userGroups: ObjectIdLike[],
+  searchService: SearchService,
+): Promise<PathSuggestion[]>;
+```
+
+- **No DI pattern**: Imports service functions directly; only `searchService` is passed as a parameter (the sole external dependency that cannot be statically imported)
+- **Invariant**: Returns array with at least one suggestion (memo type), regardless of failures
+- **informationType mapping**: Attaches `ContentAnalysis.informationType` to each search-type suggestion (Req 13.1)
+
+### Content Analyzer (1st AI Call)
+
+```typescript
+type ContentAnalysis = {
+  keywords: string[];            // 1-5 keywords, proper nouns prioritized
+  informationType: 'flow' | 'stock';
+};
+
+function analyzeContent(body: string): Promise<ContentAnalysis>;
+```
+
+### Search Candidate Retriever
+
+```typescript
+type SearchCandidate = {
+  pagePath: string;
+  snippet: string;
+  score: number;
+};
+
+function retrieveSearchCandidates(
+  keywords: string[],
+  user: IUserHasId,
+  userGroups: ObjectIdLike[],
+  searchService: SearchService,
+): Promise<SearchCandidate[]>;
+```
+
+- `searchService` is a direct positional argument (not wrapped in an options object)
+- Score threshold is a module-level constant (`SCORE_THRESHOLD = 5.0`)
+- Filters by ES score threshold; returns empty array if no results pass
+
+### Candidate Evaluator (2nd AI Call)
+
+```typescript
+type EvaluatedSuggestion = {
+  path: string;        // Proposed directory path with trailing /
+  label: string;
+  description: string; // AI-generated rationale
+};
+
+function evaluateCandidates(
+  body: string,
+  analysis: ContentAnalysis,
+  candidates: SearchCandidate[],
+): Promise<EvaluatedSuggestion[]>;
+```
+
+- Proposes paths using 3 structural patterns: (a) parent directory, (b) subdirectory, (c) sibling (may generate new paths at same hierarchy level)
+- Flow/stock alignment is a ranking factor, not a hard filter
+- Grant resolution performed by orchestrator after this returns
+
+### Category Suggestion Generator
+
+```typescript
+function generateCategorySuggestion(
+  candidates: SearchCandidate[],
+): Promise<PathSuggestion | null>;
+```
+
+- Under review — may be merged into AI evaluation approach post-discussion
+- Returns `null` when no matching top-level pages are found
+
+### Grant Resolver
+
+```typescript
+function resolveParentGrant(dirPath: string): Promise<number>;
+```
+
+- Traverses upward through ancestors for new paths (sibling pattern)
+- Returns `GRANT_OWNER` (4) as safe default if no ancestor found
+
+## Data Contracts
+
+### API Contract
+
+| Method | Endpoint | Request | Response | Errors |
+|--------|----------|---------|----------|--------|
+| POST | `/_api/v3/ai-tools/suggest-path` | `SuggestPathRequest` | `SuggestPathResponse` | 400, 401, 403, 500 |
+
+### Request / Response Types
+
+```typescript
+// Request
+interface SuggestPathRequest {
+  body: string; // Page content for analysis (required, non-empty)
+}
+
+// Response
+type SuggestionType = 'memo' | 'search' | 'category';
+type InformationType = 'flow' | 'stock';
+
+interface PathSuggestion {
+  type: SuggestionType;
+  path: string;                        // Directory path with trailing '/'
+  label: string;
+  description: string;                 // Fixed for memo, AI-generated for search
+  grant: number;                       // Parent page grant (PageGrant value)
+  informationType?: InformationType;   // Search-based only
+}
+
+interface SuggestPathResponse {
+  suggestions: PathSuggestion[];       // Always ≥1 element (memo)
+}
+```
+
+**Invariants**: `path` ends with `/`, `grant` is a valid PageGrant value (1, 2, 4, or 5)
+
+### Response Example
+
+```json
+{
+  "suggestions": [
+    {
+      "type": "memo",
+      "path": "/user/alice/memo/",
+      "label": "Save as memo",
+      "description": "Save to your personal memo area",
+      "grant": 4
+    },
+    {
+      "type": "search",
+      "path": "/tech-notes/React/state-management/",
+      "label": "Save near related pages",
+      "description": "This area contains pages about React state management. Your stock content fits well alongside this existing reference material.",
+      "grant": 1,
+      "informationType": "stock"
+    },
+    {
+      "type": "category",
+      "path": "/tech-notes/",
+      "label": "Save under category",
+      "description": "Top-level category: tech-notes",
+      "grant": 1
+    }
+  ]
+}
+```
+
+## Error Handling & Graceful Degradation
+
+### User Errors (4xx)
+
+| Error | Status | Requirement |
+|-------|--------|-------------|
+| Missing or empty `body` | 400 | 9.1 |
+| No authentication | 401 | 8.2 |
+| AI service not enabled | 403 | 1.4 |
+
+### Graceful Degradation (returns 200)
+
+| Failure | Fallback |
+|---------|----------|
+| Content analysis (1st AI call) | Memo only (skips entire search pipeline) |
+| Search service | Memo + category (if available) |
+| Candidate evaluation (2nd AI call) | Memo + category (if available) |
+| Category generation | Memo + search-based (if available) |
+
+Each component fails independently. Memo is always generated first as guaranteed fallback.
+
+## Security Considerations
+
+- **Authentication**: All requests require valid API token or login session (standard middleware)
+- **Authorization**: Search results are permission-scoped via `searchKeyword()` user/group parameters
+- **Input safety**: Content body is passed to GROWI AI, not directly to Elasticsearch — no NoSQL injection risk
+- **AI prompt injection**: System prompt and user content are separated to minimize prompt injection risk
+- **Information leakage**: Error responses use generic messages (Req 9.2)
+
+## Performance Considerations
+
+- Content analysis and candidate evaluation are sequential (ES sits between) — 2 AI roundtrips minimum
+- Search-evaluate pipeline and category generation run in parallel to minimize total latency
+- ES snippets (not full page bodies) are passed to AI to manage context budget
+- Score threshold filtering reduces the number of candidates passed to the 2nd AI call

+ 77 - 0
.kiro/specs/suggest-path/requirements.md

@@ -0,0 +1,77 @@
+# Requirements Document
+
+## Introduction
+
+The suggest-path feature provides an AI-powered API endpoint for GROWI that suggests optimal page save locations. When an AI client (e.g., Claude via MCP) sends page content, the endpoint analyzes it and returns directory path suggestions with metadata including descriptions and grant (permission) constraints.
+
+The feature was delivered in two phases:
+
+- **Phase 1 (MVP)**: Personal memo path suggestion — endpoint, authentication, and response structure.
+- **Phase 2 (Full)**: AI-powered search-based path suggestions with flow/stock information classification, multi-candidate evaluation, and intelligent path proposal (including new paths).
+
+### Phase 2 Revision History
+
+Phase 2 was revised based on reviewer feedback: (1) flow/stock information classification, (2) multi-candidate AI evaluation instead of top-1 selection, (3) three-pattern path proposals (parent/subdirectory/sibling), (4) AI-generated descriptions.
+
+## Out of Scope
+
+- **Page creation/saving**: Uses existing `POST /_api/v3/page`. This feature only suggests *where* to save.
+- **Page title determination**: Handled via AI client-user dialogue.
+
+## Requirements
+
+### Requirement 1: Path Suggestion API Endpoint
+
+**Summary**: POST endpoint at `/_api/v3/ai-tools/suggest-path` accepts a `body` field and returns an array of path suggestions. Each suggestion includes `type`, `path` (directory with trailing `/`), `label`, `description`, and `grant`. Endpoint is under a separate namespace from `/_api/v3/page/` for independent access control.
+
+### Requirement 2: Memo Path Suggestion (Phase 1)
+
+**Summary**: Always includes a `memo` type suggestion as guaranteed fallback. Path is `/user/{username}/memo/` when user pages are enabled, or `/memo/{username}/` when disabled. Grant is `4` (owner only). Description is fixed text.
+
+### Requirement 3: Search-Based Path Suggestion (Phase 2)
+
+**Summary**: Searches for related pages using extracted keywords, filters by Elasticsearch score threshold, then passes all passing candidates to AI-based evaluation (Req 11). Includes parent page's grant. Omitted if no candidates pass the threshold.
+
+### Requirement 4: Category-Based Path Suggestion (Phase 2) — Under Review
+
+**Summary**: Extracts top-level path segment from keyword-matched pages as a `category` type suggestion. Includes parent grant. Omitted if no match found.
+
+> **Note**: May overlap with the AI-based evaluation approach (Reqs 11, 12). Whether to retain, merge, or remove will be determined after reviewer discussion.
+
+### Requirement 5: Content Analysis via GROWI AI (Phase 2)
+
+**Summary**: Single AI call performs keyword extraction (1-5 keywords, proper nouns prioritized) and flow/stock information type classification. Keywords (not raw content) are used for search. On failure, falls back to memo-only response.
+
+### Requirement 6: Suggestion Description Generation
+
+**Summary**: Each suggestion includes a `description` field. Memo uses fixed text. Search-based suggestions use AI-generated descriptions from candidate evaluation (Req 11).
+
+### Requirement 7: Grant Constraint Information
+
+**Summary**: Each suggestion includes a `grant` field representing the parent page's grant value — the upper bound of settable permissions for child pages (a constraint, not a recommendation).
+
+### Requirement 8: Authentication and Authorization
+
+**Summary**: Requires valid API token or login session. Returns authentication error if missing. Uses authenticated user's identity for user-specific suggestions.
+
+### Requirement 9: Input Validation and Error Handling
+
+**Summary**: Returns validation error for missing/empty `body`. Internal errors return appropriate responses without exposing system details.
+
+### Requirement 10: Flow/Stock Information Type Awareness (Phase 2)
+
+**Summary**: Candidate evaluation considers flow/stock alignment between content and candidate locations. Flow = time-bound (date-based paths, meeting terms). Stock = reference (topic-based paths). Used as a ranking factor, not a hard filter.
+
+### Requirement 11: AI-Based Candidate Evaluation and Ranking (Phase 2)
+
+**Summary**: GROWI AI evaluates each candidate's suitability using content body, candidate path, and snippet. Ranks by content-destination fit considering relevance and flow/stock alignment. Generates description per suggestion. Falls back to memo-only on failure.
+
+### Requirement 12: Path Proposal Patterns (Phase 2)
+
+**Summary**: Three structural patterns relative to each matching page: (a) parent directory, (b) subdirectory, (c) sibling directory. Sibling pattern generates new directory names at the same hierarchy level as the candidate. AI determines the most appropriate pattern.
+
+### Requirement 13: Client LLM Independence (Phase 2)
+
+**Summary**: Response includes both structured metadata (`informationType`, `type`, `grant`) and natural language (`description`) so any LLM client can use it regardless of reasoning capability. All reasoning-intensive operations are server-side.
+
+**Design Rationale**: MCP clients are powered by varying LLM models. Heavy reasoning is centralized in GROWI AI to prevent quality degradation with less capable clients.

+ 145 - 0
.kiro/specs/suggest-path/research.md

@@ -0,0 +1,145 @@
+# Research & Design Decisions
+
+## Summary
+
+- **Feature**: `suggest-path`
+- **Discovery Scope**: Extension (new endpoint added to existing API infrastructure)
+- **Key Findings**:
+  - GROWI uses a handler factory pattern (`(crowi: Crowi) => RequestHandler[]`) for API routes
+  - The `ai-tools` namespace does not exist yet; closest is `/openai` under `features/openai/`
+  - Grant parent-child constraints are enforced by `page-grant.ts` — GRANT_OWNER children must share the same owner
+  - `searchService.searchKeyword()` accepts keyword string and returns scored results with page metadata
+  - User home path utilities exist in `@growi/core` (`userHomepagePath`, `isUsersHomepage`)
+
+## Research Log
+
+### GROWI API Route Patterns
+
+- **Context**: Need to understand how to add a new route namespace
+- **Sources Consulted**: `apps/app/src/server/routes/apiv3/index.js`, `page/create-page.ts`, `features/openai/server/routes/index.ts`
+- **Findings**:
+  - Three router types: standard, admin, auth. New endpoints go on standard router
+  - Route registration: `router.use('/namespace', require('./namespace')(crowi))` or factory import
+  - Handler factory pattern: exports `(crowi: Crowi) => RequestHandler[]` returning middleware chain
+  - Middleware ordering: `accessTokenParser` → `loginRequiredStrictly` → validators → `apiV3FormValidator` → handler
+  - Response helpers: `res.apiv3(data)` for success, `res.apiv3Err(error, status)` for errors
+  - Feature-based routes use dynamic import pattern (see openai routes)
+- **Implications**: suggest-path follows the handler factory pattern. Route factory in `features/ai-tools/suggest-path/server/routes/apiv3/`, aggregation router in `features/ai-tools/server/routes/apiv3/`
+
+### OpenAI Feature Structure
+
+- **Context**: Understanding existing AI feature patterns for alignment
+- **Sources Consulted**: `features/openai/server/routes/index.ts`, `middlewares/certify-ai-service.ts`
+- **Findings**:
+  - AI routes gate on `aiEnabled` config via `certifyAiService` middleware
+  - Dynamic imports used for route handlers
+  - Dedicated middleware directory for AI-specific checks
+  - Routes organized under `features/openai/` not `routes/apiv3/`
+- **Implications**: suggest-path gates on AI-enabled config via `certifyAiService`. Code lives under `features/ai-tools/suggest-path/` with an aggregation router at `features/ai-tools/server/routes/apiv3/`.
+
+### Grant System Constraints
+
+- **Context**: Need to return accurate grant constraints for suggested paths
+- **Sources Consulted**: `@growi/core` PageGrant enum, `apps/app/src/server/service/page-grant.ts`
+- **Findings**:
+  - PageGrant values: PUBLIC(1), RESTRICTED(2), SPECIFIED(3-deprecated), OWNER(4), USER_GROUP(5)
+  - Parent constrains child: OWNER parent → child must be OWNER by same user; USER_GROUP parent → child cannot be PUBLIC
+  - `calcApplicableGrantData(page, user)` returns allowed grant types for a page
+  - For memo path (`/user/{username}/memo/`), the user homepage `/user/{username}` is GRANT_OWNER(4) by default → memo path grant is fixed at 4
+- **Implications**: Phase 1 memo grant is trivially 4. Phase 2 needs to look up actual parent page grant via Page model
+
+### Search Service Integration
+
+- **Context**: Phase 2 requires keyword-based search for related pages
+- **Sources Consulted**: `apps/app/src/server/service/search.ts`
+- **Findings**:
+  - `searchKeyword(keyword, nqName, user, userGroups, searchOpts)` → `[ISearchResult, delegatorName]`
+  - Results include `_id`, `_score`, `_source`, `_highlight`
+  - Supports `prefix:` queries for path-scoped search
+  - User groups needed for permission-scoped search results
+- **Implications**: Phase 2 uses `searchKeyword` with extracted keywords. Category search uses `prefix:/` to scope to top-level. Need `getUserRelatedGroups()` for permission-correct results.
+
+### User Home Path Utilities
+
+- **Context**: Memo path generation needs user home path
+- **Sources Consulted**: `@growi/core` `page-path-utils/index.ts`
+- **Findings**:
+  - `userHomepagePath(user)` → `/user/{username}`
+  - `isUsersHomepage(path)` → boolean check
+  - `getUsernameByPath(path)` → extract username from path
+- **Implications**: Use `userHomepagePath(req.user)` + `/memo/` for memo suggestion path
+
+## Architecture Pattern Evaluation
+
+| Option | Description | Strengths | Risks / Limitations | Notes |
+|--------|-------------|-----------|---------------------|-------|
+| Route under `features/ai-tools/` | Feature-based directory with aggregation router | Clean separation, follows features pattern and `ai-tools` naming | — | **Selected** — aligns with project architecture and independent access control |
+| Route under `features/openai/` | Extend existing AI feature module | Reuses AI infrastructure, minimal setup | Provider-specific name, harder to separate for independent access control | Rejected in review — namespace should be provider-agnostic |
+| Route under `routes/apiv3/page/` | Add to existing page routes | Close to page creation | Cannot gate independently for access control | Rejected in review — yuki requested separation |
+
+## Design Decisions
+
+### Decision: Route Namespace Placement
+
+- **Context**: Endpoint needs independent access control
+- **Alternatives Considered**:
+  1. `/openai/suggest-path` — groups with AI features but provider-specific
+  2. `/page/suggest-path` — close to page creation but cannot gate independently
+  3. `/ai-tools/suggest-path` — new provider-agnostic namespace
+- **Selected Approach**: `/_api/v3/ai-tools/suggest-path` under `features/ai-tools/suggest-path/`
+- **Rationale**: Provider-agnostic, enables independent access control, follows features directory pattern
+- **Trade-offs**: Aggregation router at `features/ai-tools/server/routes/apiv3/` allows future ai-tools features under the same namespace
+
+### Decision: Phase 1 Handler Simplicity
+
+- **Context**: Phase 1 (MVP) only returns memo path — very simple logic
+- **Alternatives Considered**:
+  1. Full service layer from the start (SuggestionService class)
+  2. Inline logic in handler, extract to service when Phase 2 arrives
+- **Selected Approach**: Inline logic in handler for Phase 1, extract to service for Phase 2
+- **Rationale**: Avoid over-engineering. Phase 1 is ~10 lines of logic. Service abstraction added when needed
+- **Trade-offs**: Phase 2 will require refactoring handler → service extraction
+- **Follow-up**: Define service interface in design for Phase 2 readiness
+
+### Decision: GROWI AI Keyword Extraction Approach
+
+- **Context**: Phase 2 needs keyword extraction from content body
+- **Alternatives Considered**:
+  1. New dedicated keyword extraction service
+  2. Extend existing OpenAI feature module
+  3. Client-side keyword extraction (fallback option)
+- **Selected Approach**: Leverage existing `features/openai/` infrastructure for keyword extraction
+- **Rationale**: GROWI already has OpenAI integration. Keyword extraction is a new capability within the existing AI feature
+- **Trade-offs**: Couples suggest-path to OpenAI feature availability. Mitigated by fallback to memo-only response
+- **Follow-up**: Detailed keyword extraction implementation is out of scope for this spec (separate design)
+
+## Risks & Mitigations
+
+- **Large content body performance**: Sending full content for AI keyword extraction may be slow. Mitigation: fallback to memo-only if extraction fails
+- **Search service dependency**: Depends on Elasticsearch being available. Mitigation: graceful degradation — return memo suggestion if search fails
+
+## Post-Implementation Discoveries
+
+### Lesson: Avoid Testability-Motivated DI in Feature Services
+
+- **Context**: Initial Phase 2 implementation used a `GenerateSuggestionsDeps` pattern — a `deps` parameter containing 5 callback functions injected into the orchestrator for testability
+- **Problem**: The pattern was inconsistent with the rest of the codebase (other modules use `vi.mock()` for testing), added route handler boilerplate (10 lines wiring callbacks), and forced unnecessary abstractions like `RetrieveSearchCandidatesOptions`
+- **Resolution**: Removed `deps` pattern; service functions are imported directly. Only `searchService` is passed as a parameter (the sole external dependency that cannot be statically imported). Tests use `vi.mock()` — consistent with `generate-memo-suggestion` and other modules
+- **Guideline**: In this codebase, prefer `vi.mock()` over DI patterns for feature-specific service layers. Reserve DI for true cross-cutting concerns or when the dependency is a runtime-varying service instance (like `searchService`)
+
+### Lesson: Type Propagation from Legacy Code
+
+- **Context**: `searchService.searchKeyword()` in `src/server/service/search.ts` has untyped parameters (legacy JS-to-TS migration), so the suggest-path code initially used `userGroups: unknown` as a safe catch-all
+- **Resolution**: Traced the actual type from `findAllUserGroupIdsRelatedToUser()` which returns `ObjectIdLike[]` (from `@growi/core`), and propagated it through the `SearchService` interface and all service functions
+- **Guideline**: When integrating with legacy untyped services, trace the actual runtime type from the call site rather than defaulting to `unknown`
+
+## References
+
+- [GROWI Search Internals](https://dev.growi.org/69842ea0cb3a20a69b0a1985) — Search feature internal architecture
+- `apps/app/src/server/routes/apiv3/index.js` — Route registration entry point
+- `apps/app/src/server/routes/apiv3/page/create-page.ts` — Reference handler pattern
+- `apps/app/src/features/openai/server/routes/index.ts` — AI feature route pattern
+- `packages/core/src/interfaces/page.ts` — PageGrant enum definition
+- `apps/app/src/server/service/page-grant.ts` — Grant validation logic
+- `apps/app/src/server/service/search.ts` — Search service interface
+- `packages/core/src/utils/page-path-utils/index.ts` — User path utilities

+ 4 - 3
.kiro/specs/upgrade-fixed-packages/spec.json → .kiro/specs/suggest-path/spec.json

@@ -1,6 +1,6 @@
 {
-  "feature_name": "upgrade-fixed-packages",
-  "created_at": "2026-03-23T00:00:00Z",
+  "feature_name": "suggest-path",
+  "created_at": "2026-02-10T12:00:00Z",
   "updated_at": "2026-03-23T00:00:00Z",
   "language": "en",
   "phase": "implementation-complete",
@@ -18,5 +18,6 @@
       "approved": true
     }
   },
-  "ready_for_implementation": true
+  "ready_for_implementation": true,
+  "cleanup_completed": true
 }

+ 82 - 0
.kiro/specs/suggest-path/tasks.md

@@ -0,0 +1,82 @@
+# Implementation Plan
+
+## Phase 1 (MVP) — Implemented
+
+- [x] 1. Phase 1 MVP — Shared types, memo path suggestion, and endpoint registration
+- [x] 1.1 Define suggestion types and implement memo path generation
+- [x] 1.2 Register route endpoint with authentication and validation
+- [x] 1.3 Phase 1 integration verification
+
+## Phase 2 — Revised
+
+- [x] 2. (P) Enhance grant resolver for ancestor path traversal
+- [x] 3. (P) Content analysis via GROWI AI (1st AI call)
+- [x] 4. (P) Search candidate retrieval with score threshold filtering
+- [x] 5. (P) AI-based candidate evaluation and path proposal (2nd AI call)
+- [x] 6. (P) Category-based path suggestion (under review — prior implementation retained)
+- [x] 7. Phase 2 revised orchestration and integration
+- [x] 7.1 Rewrite orchestration for revised Phase 2 pipeline
+- [x] 7.2 Phase 2 integration verification
+
+## Post-Implementation Refactoring (from code review)
+
+See `gap-analysis.md` for detailed rationale.
+
+- [x] 8. Simplify service layer abstractions
+- [x] 8.1 Remove `GenerateSuggestionsDeps` DI pattern from `generate-suggestions.ts`
+- [x] 8.2 Remove `RetrieveSearchCandidatesOptions` from `retrieve-search-candidates.ts`
+- [x] 8.3 Add JSDoc to `call-llm-for-json.ts`
+- [x] 8.4 Narrow `userGroups: unknown` to `ObjectIdLike[]`
+
+## Requirements Coverage
+
+| Requirement | Task(s) |
+|-------------|---------|
+| 1.1 | 1.2, 1.3, 7.1 |
+| 1.2 | 1.1, 1.3, 7.1 |
+| 1.3 | 1.1, 1.3, 7.1 |
+| 1.4 | 1.2, 1.3 |
+| 2.1 | 1.1, 1.3 |
+| 2.2 | 1.1 |
+| 2.3 | 1.1 |
+| 2.4 | 1.1 |
+| 2.5 | 1.1 |
+| 3.1 | 4, 7.2 |
+| 3.2 | 4, 7.2 |
+| 3.3 | 5, 7.1, 7.2 |
+| 3.4 | 7.1, 7.2 |
+| 3.5 | 4, 7.2 |
+| 4.1 | 6 |
+| 4.2 | 6 |
+| 4.3 | 6 |
+| 4.4 | 6 |
+| 5.1 | 3, 7.2 |
+| 5.2 | 3 |
+| 5.3 | 4, 7.1 |
+| 5.4 | 3, 7.2 |
+| 5.5 | 7.1, 7.2 |
+| 6.1 | 1.1, 7.2 |
+| 6.2 | 1.1 |
+| 6.3 | 5, 7.2 |
+| 7.1 | 2 |
+| 7.2 | 2 |
+| 8.1 | 1.2, 1.3 |
+| 8.2 | 1.2, 1.3 |
+| 8.3 | 1.2, 7.1 |
+| 9.1 | 1.2, 1.3 |
+| 9.2 | 1.2, 7.1 |
+| 10.1 | 5, 7.2 |
+| 10.2 | 5 |
+| 10.3 | 5 |
+| 10.4 | 5 |
+| 11.1 | 5, 7.2 |
+| 11.2 | 5 |
+| 11.3 | 5 |
+| 11.4 | 7.1, 7.2 |
+| 12.1 | 5, 7.2 |
+| 12.2 | 5 |
+| 12.3 | 5 |
+| 12.4 | 5 |
+| 13.1 | 7.1, 7.2 |
+| 13.2 | 7.1, 7.2 |
+| 13.3 | 7.1 |

+ 0 - 262
.kiro/specs/upgrade-fixed-packages/design.md

@@ -1,262 +0,0 @@
-# Design Document: upgrade-fixed-packages
-
-## Overview
-
-**Purpose**: This feature audits and upgrades version-pinned packages in `apps/app/package.json` that were frozen due to upstream bugs, ESM-only migrations, or licensing constraints. The build environment has shifted from webpack to Turbopack, and the runtime now targets Node.js 24 with stable `require(esm)` support, invalidating several original pinning reasons.
-
-**Users**: Maintainers and developers benefit from up-to-date dependencies with bug fixes, security patches, and reduced technical debt.
-
-**Impact**: Modifies `apps/app/package.json` dependency versions and comment blocks; touches source files where `escape-string-regexp` is replaced by native `RegExp.escape()`.
-
-### Goals
-- Verify each pinning reason against current upstream status
-- Upgrade packages where the original constraint no longer applies
-- Replace `escape-string-regexp` with native `RegExp.escape()` (Node.js 24)
-- Update or remove comment blocks to reflect current state
-- Produce audit documentation for future reference
-
-### Non-Goals
-- Replacing handsontable with an alternative library (license constraint remains; replacement is a separate initiative)
-- Upgrading `@keycloak/keycloak-admin-client` to v19+ (significant API breaking changes; deferred to separate task)
-- Major version upgrades of unrelated packages
-- Modifying the build pipeline or Turbopack configuration
-
-## Architecture
-
-This is a dependency maintenance task, not a feature implementation. No new components or architectural changes are introduced.
-
-### Existing Architecture Analysis
-
-The pinned packages fall into distinct categories by their usage context:
-
-| Category | Packages | Build Context |
-|----------|----------|---------------|
-| Server-only (tsc → CJS) | `escape-string-regexp`, `@aws-sdk/*`, `@keycloak/*` | Express server compiled by tsc |
-| Client-only (Turbopack) | `string-width` (via @growi/editor), `bootstrap` | Bundled by Turbopack/Vite |
-| Client + SSR | `next-themes` | Turbopack + SSR rendering |
-| License-pinned | `handsontable`, `@handsontable/react` | Client-only |
-
-Key enabler: Node.js ^24 provides stable `require(esm)` support, removing the fundamental CJS/ESM incompatibility that caused several pins.
-
-### Technology Stack
-
-| Layer | Choice / Version | Role in Feature | Notes |
-|-------|------------------|-----------------|-------|
-| Runtime | Node.js ^24 | Enables `require(esm)` and `RegExp.escape()` | ES2026 Stage 4 features available |
-| Build (client) | Turbopack (Next.js 16) | Bundles ESM-only packages without issues | No changes needed |
-| Build (server) | tsc (CommonJS output) | `require(esm)` handles ESM-only imports | Node.js 24 native support |
-| Package manager | pnpm v10 | Manages dependency resolution | No changes needed |
-
-## System Flows
-
-### Upgrade Verification Flow
-
-```mermaid
-flowchart TD
-    Start[Select package to upgrade] --> Update[Update version in package.json]
-    Update --> Install[pnpm install]
-    Install --> Build{turbo run build}
-    Build -->|Pass| Lint{turbo run lint}
-    Build -->|Fail| Revert[Revert package change]
-    Lint -->|Pass| Test{turbo run test}
-    Lint -->|Fail| Revert
-    Test -->|Pass| Verify[Verify .next/node_modules symlinks]
-    Test -->|Fail| Revert
-    Verify --> Next[Proceed to next package]
-    Revert --> Document[Document failure reason]
-    Document --> Next
-```
-
-Each package is upgraded and verified independently. Failures are isolated and reverted without affecting other upgrades.
-
-## Requirements Traceability
-
-| Requirement | Summary | Components | Action |
-|-------------|---------|------------|--------|
-| 1.1 | Bootstrap bug investigation | PackageAudit | Verify #39798 fixed in v5.3.4 |
-| 1.2 | next-themes issue investigation | PackageAudit | Verify #122 resolved; check v0.4.x compatibility |
-| 1.3 | @aws-sdk constraint verification | PackageAudit | Confirm mongodb constraint is on different package |
-| 1.4 | Document investigation results | AuditReport | Summary table in research.md |
-| 2.1 | ESM compatibility per package | PackageAudit | Assess escape-string-regexp, string-width, @keycloak |
-| 2.2 | Server build ESM support | PackageAudit | Verify Node.js 24 require(esm) for server context |
-| 2.3 | Client build ESM support | PackageAudit | Confirm Turbopack handles ESM-only packages |
-| 2.4 | Compatibility matrix | AuditReport | Table in research.md |
-| 3.1 | Handsontable license check | PackageAudit | Confirm v7+ still non-MIT |
-| 3.2 | Document pinning requirement | AuditReport | Note in audit summary |
-| 4.1 | Update package.json versions and comments | UpgradeExecution | Modify versions and comment blocks |
-| 4.2 | Build verification | UpgradeExecution | `turbo run build --filter @growi/app` |
-| 4.3 | Lint verification | UpgradeExecution | `turbo run lint --filter @growi/app` |
-| 4.4 | Test verification | UpgradeExecution | `turbo run test --filter @growi/app` |
-| 4.5 | Revert on failure | UpgradeExecution | Git revert per package |
-| 4.6 | Update comment blocks | UpgradeExecution | Remove or update comments |
-| 5.1 | Audit summary table | AuditReport | Final summary with decisions |
-| 5.2 | Document continued pinning | AuditReport | Reasons for remaining pins |
-| 5.3 | Document upgrade rationale | AuditReport | What changed upstream |
-
-## Components and Interfaces
-
-| Component | Domain | Intent | Req Coverage | Key Dependencies |
-|-----------|--------|--------|--------------|------------------|
-| PackageAudit | Investigation | Research upstream status for each pinned package | 1.1–1.4, 2.1–2.4, 3.1–3.2 | GitHub issues, npm registry |
-| UpgradeExecution | Implementation | Apply version changes and verify build | 4.1–4.6 | pnpm, turbo, tsc |
-| SourceMigration | Implementation | Replace escape-string-regexp with RegExp.escape() | 4.1 | 9 source files |
-| AuditReport | Documentation | Produce summary of all decisions | 5.1–5.3 | research.md |
-
-### Investigation Layer
-
-#### PackageAudit
-
-| Field | Detail |
-|-------|--------|
-| Intent | Investigate upstream status of each pinned package and determine upgrade feasibility |
-| Requirements | 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2 |
-
-**Responsibilities & Constraints**
-- Check upstream issue trackers for bug fix status
-- Verify ESM compatibility against Node.js 24 `require(esm)` and Turbopack
-- Confirm license status for handsontable
-- Produce actionable recommendation per package
-
-**Audit Decision Matrix**
-
-| Package | Current | Action | Target | Risk | Rationale |
-|---------|---------|--------|--------|------|-----------|
-| `bootstrap` | `=5.3.2` | Upgrade | `^5.3.4` | Low | Bug #39798 fixed in v5.3.4 |
-| `next-themes` | `^0.2.1` | Upgrade | `^0.4.4` | Medium | Original issue was misattributed; v0.4.x works with Pages Router |
-| `escape-string-regexp` | `^4.0.0` | Replace | Remove dep | Low | Native `RegExp.escape()` in Node.js 24 |
-| `string-width` | `=4.2.2` | Upgrade | `^7.0.0` | Low | Used only in ESM context (@growi/editor) |
-| `@aws-sdk/client-s3` | `3.454.0` | Relax | `^3.454.0` | Low | Pinning comment was misleading |
-| `@aws-sdk/s3-request-presigner` | `3.454.0` | Relax | `^3.454.0` | Low | Same as above |
-| `@keycloak/keycloak-admin-client` | `^18.0.0` | Defer | No change | N/A | API breaking changes; separate task |
-| `handsontable` | `=6.2.2` | Keep | No change | N/A | License constraint (non-MIT since v7) |
-| `@handsontable/react` | `=2.1.0` | Keep | No change | N/A | Requires handsontable >= 7 |
-
-### Implementation Layer
-
-#### UpgradeExecution
-
-| Field | Detail |
-|-------|--------|
-| Intent | Apply version changes incrementally with build verification |
-| Requirements | 4.1, 4.2, 4.3, 4.4, 4.5, 4.6 |
-
-**Responsibilities & Constraints**
-- Upgrade one package at a time to isolate failures
-- Run full verification suite (build, lint, test) after each change
-- Revert and document any package that causes failures
-- Update `// comments for dependencies` block to reflect new state
-
-**Upgrade Order** (lowest risk first):
-1. `@aws-sdk/*` — relax version range (no code changes)
-2. `string-width` — upgrade in @growi/editor (isolated ESM package)
-3. `bootstrap` — upgrade to ^5.3.4 (verify SCSS compilation)
-4. `escape-string-regexp` → `RegExp.escape()` — source code changes across 9 files
-5. `next-themes` — upgrade to ^0.4.x (review API changes across 12 files)
-
-**Implementation Notes**
-- After each upgrade, verify `.next/node_modules/` symlinks for Turbopack externalisation compliance (per `package-dependencies` rule)
-- For bootstrap: run `pnpm run pre:styles-commons` and `pnpm run pre:styles-components` to verify SCSS compilation
-- For next-themes: review v0.3.0 and v0.4.0 changelogs for breaking API changes before modifying code
-
-#### SourceMigration
-
-| Field | Detail |
-|-------|--------|
-| Intent | Replace all `escape-string-regexp` usage with native `RegExp.escape()` |
-| Requirements | 4.1 |
-
-**Files to Modify**:
-
-`apps/app/src/` (6 files):
-- `server/models/page.ts`
-- `server/service/page/index.ts`
-- `server/service/page-grant.ts`
-- `server/routes/apiv3/users.js`
-- `server/models/obsolete-page.js`
-- `features/openai/server/services/openai.ts`
-
-`packages/` (3 files):
-- `packages/core/src/utils/page-path-utils/` (2 files)
-- `packages/remark-lsx/src/server/routes/list-pages/index.ts`
-
-**Migration Pattern**:
-```typescript
-// Before
-import escapeStringRegexp from 'escape-string-regexp';
-const pattern = new RegExp(escapeStringRegexp(input));
-
-// After
-const pattern = new RegExp(RegExp.escape(input));
-```
-
-**Implementation Notes**
-- Remove `escape-string-regexp` from `apps/app/package.json` dependencies after migration
-- Remove from `packages/core/package.json` and `packages/remark-lsx/package.json` if listed
-- Verify `RegExp.escape()` TypeScript types are available (may need `@types/node` update or lib config)
-
-### Documentation Layer
-
-#### AuditReport
-
-| Field | Detail |
-|-------|--------|
-| Intent | Document all audit decisions for future maintainers |
-| Requirements | 5.1, 5.2, 5.3 |
-
-**Deliverables**:
-- Updated `// comments for dependencies` in package.json (only retained pins with current reasons)
-- Updated `// comments for defDependencies` (handsontable entries unchanged)
-- Summary in research.md with final decision per package
-
-**Updated Comment Blocks** (target state):
-
-```json
-{
-  "// comments for dependencies": {
-    "@keycloak/keycloak-admin-client": "19.0.0 or above exports only ESM. API breaking changes require separate migration effort.",
-    "next-themes": "(if upgrade fails) Document specific failure reason here"
-  },
-  "// comments for defDependencies": {
-    "@handsontable/react": "v3 requires handsontable >= 7.0.0.",
-    "handsontable": "v7.0.0 or above is no longer MIT license."
-  }
-}
-```
-
-Note: The exact final state depends on which upgrades succeed. If all planned upgrades pass, only `@keycloak` and `handsontable` entries remain.
-
-## Testing Strategy
-
-### Build Verification (per package)
-- `turbo run build --filter @growi/app` — Turbopack client build + tsc server build
-- `ls apps/app/.next/node_modules/ | grep <package>` — Externalisation check
-- `pnpm run pre:styles-commons` — SCSS compilation (bootstrap only)
-
-### Lint Verification (per package)
-- `turbo run lint --filter @growi/app` — TypeScript type check + Biome
-
-### Unit/Integration Tests (per package)
-- `turbo run test --filter @growi/app` — Full test suite
-- For `RegExp.escape()` migration: run tests for page model, page service, page-grant service specifically
-
-### Regression Verification (final)
-- Full build + lint + test after all upgrades applied together
-- Verify `.next/node_modules/` symlink integrity via `check-next-symlinks.sh` (if available locally)
-
-## Migration Strategy
-
-```mermaid
-flowchart LR
-    Phase1[Phase 1: Low Risk] --> Phase2[Phase 2: Medium Risk]
-    Phase1 --> P1a[aws-sdk relax range]
-    Phase1 --> P1b[string-width upgrade]
-    Phase2 --> P2a[bootstrap upgrade]
-    Phase2 --> P2b[escape-string-regexp replace]
-    Phase2 --> P2c[next-themes upgrade]
-```
-
-- **Phase 1** (low risk): @aws-sdk range relaxation, string-width upgrade — minimal code changes
-- **Phase 2** (medium risk): bootstrap, escape-string-regexp replacement, next-themes — requires code review and/or source changes
-- Each upgrade is independently revertible
-- Deferred: @keycloak (high risk, separate task)
-- No change: handsontable (license constraint)

+ 0 - 75
.kiro/specs/upgrade-fixed-packages/requirements.md

@@ -1,75 +0,0 @@
-# Requirements Document
-
-## Introduction
-
-The `apps/app/package.json` file contains several packages whose versions are intentionally pinned due to ESM-only upgrades, upstream bugs, or licensing concerns. These pinning reasons were documented in `// comments for dependencies` and `// comments for defDependencies` comment blocks. Since the build environment has significantly changed (webpack → Turbopack), and upstream issues may have been resolved, a systematic audit is needed to determine which packages can now be safely upgraded.
-
-### Pinned Packages Inventory
-
-| # | Package | Current Version | Pinning Reason |
-|---|---------|----------------|----------------|
-| 1 | `@aws-sdk/client-s3`, `@aws-sdk/s3-request-presigner` | `3.454.0` | Fix version above 3.186.0 required by mongodb@4.16.0 |
-| 2 | `@keycloak/keycloak-admin-client` | `^18.0.0` | 19.0.0+ exports only ESM |
-| 3 | `bootstrap` | `=5.3.2` | v5.3.3 has a bug (twbs/bootstrap#39798) |
-| 4 | `escape-string-regexp` | `^4.0.0` | 5.0.0+ exports only ESM |
-| 5 | `next-themes` | `^0.2.1` | 0.3.0 causes type error (pacocoursey/next-themes#122) |
-| 6 | `string-width` | `=4.2.2` | 5.0.0+ exports only ESM |
-| 7 | `@handsontable/react` | `=2.1.0` | v3 requires handsontable >= 7.0.0 |
-| 8 | `handsontable` | `=6.2.2` | v7.0.0+ is no longer MIT license |
-
-## Requirements
-
-### Requirement 1: Upstream Bug and Issue Investigation
-
-**Objective:** As a maintainer, I want to verify whether upstream bugs and issues that originally caused version pinning have been resolved, so that I can make informed upgrade decisions.
-
-#### Acceptance Criteria
-
-1. When investigating the bootstrap pinning, the audit process shall check the current status of https://github.com/twbs/bootstrap/issues/39798 and determine whether v5.3.3+ has fixed the reported bug.
-2. When investigating the next-themes pinning, the audit process shall check the current status of https://github.com/pacocoursey/next-themes/issues/122 and determine whether v0.3.0+ has resolved the type error.
-3. When investigating the @aws-sdk pinning, the audit process shall verify whether the mongodb version used in GROWI still requires the `>=3.186.0` constraint and whether the latest @aws-sdk versions are compatible.
-4. The audit process shall document the investigation result for each package, including: current upstream status, whether the original issue is resolved, and the recommended action (upgrade/keep/replace).
-
-### Requirement 2: ESM-Only Package Compatibility Assessment
-
-**Objective:** As a maintainer, I want to assess whether ESM-only versions of pinned packages are now compatible with the current Turbopack-based build environment, so that outdated CJS-only constraints can be removed.
-
-#### Acceptance Criteria
-
-1. When assessing ESM compatibility, the audit process shall evaluate each ESM-pinned package (`escape-string-regexp`, `string-width`, `@keycloak/keycloak-admin-client`) against the current build pipeline (Turbopack for client, tsc for server).
-2. When a package is used in server-side code (transpiled via tsc with `tsconfig.build.server.json`), the audit process shall verify whether the server build output format (CJS or ESM) supports importing ESM-only packages.
-3. When a package is used only in client-side code (bundled via Turbopack), the audit process shall confirm that Turbopack can resolve ESM-only packages without issues.
-4. The audit process shall produce a compatibility matrix showing each ESM-pinned package, its usage context (server/client/both), and whether upgrading to the ESM-only version is feasible.
-
-### Requirement 3: License Compliance Verification
-
-**Objective:** As a maintainer, I want to confirm that the handsontable/`@handsontable/react` licensing situation has not changed, so that I can determine whether these packages must remain pinned or can be replaced.
-
-#### Acceptance Criteria
-
-1. When evaluating handsontable, the audit process shall verify the current license of handsontable v7.0.0+ and confirm whether it remains non-MIT.
-2. If handsontable v7.0.0+ is still non-MIT, the audit process shall document that `handsontable` (`=6.2.2`) and `@handsontable/react` (`=2.1.0`) must remain pinned or an alternative library must be identified.
-3. If a MIT-licensed alternative to handsontable exists, the audit process shall note it as a potential replacement candidate (out of scope for this spec but documented for future work).
-
-### Requirement 4: Safe Upgrade Execution
-
-**Objective:** As a maintainer, I want to upgrade packages that are confirmed safe to update, so that the project benefits from bug fixes, security patches, and new features.
-
-#### Acceptance Criteria
-
-1. When upgrading a pinned package, the upgrade process shall update the version specifier in `apps/app/package.json` and remove or update the corresponding entry in the `// comments for dependencies` or `// comments for defDependencies` block.
-2. When a package is upgraded, the upgrade process shall verify that `turbo run build --filter @growi/app` completes successfully.
-3. When a package is upgraded, the upgrade process shall verify that `turbo run lint --filter @growi/app` completes without new errors.
-4. When a package is upgraded, the upgrade process shall verify that `turbo run test --filter @growi/app` passes without new failures.
-5. If a package upgrade causes build, lint, or test failures, the upgrade process shall revert that specific package change and document the failure reason.
-6. When all upgrades are complete, the `// comments for dependencies` and `// comments for defDependencies` blocks shall accurately reflect only the packages that remain pinned, with updated reasons if applicable.
-
-### Requirement 5: Audit Documentation
-
-**Objective:** As a maintainer, I want a clear record of the audit results, so that future maintainers understand which packages were evaluated and why decisions were made.
-
-#### Acceptance Criteria
-
-1. The audit process shall produce a summary table documenting each pinned package with: package name, previous version, new version (or "unchanged"), and rationale for the decision.
-2. When a package remains pinned, the documentation shall include the verified reason for continued pinning.
-3. When a package is upgraded, the documentation shall note what changed upstream that made the upgrade possible.

+ 0 - 183
.kiro/specs/upgrade-fixed-packages/research.md

@@ -1,183 +0,0 @@
-# Research & Design Decisions
-
----
-**Purpose**: Capture discovery findings for the pinned package audit and upgrade initiative.
-**Usage**: Inform design.md decisions; provide evidence for future maintainers.
----
-
-## Summary
-- **Feature**: `upgrade-fixed-packages`
-- **Discovery Scope**: Extension (auditing existing dependency constraints)
-- **Key Findings**:
-  - Bootstrap bug (#39798) fixed in v5.3.4 — safe to upgrade to latest 5.3.x
-  - next-themes original issue (#122) was resolved long ago; upgrade to v0.4.x feasible but has Next.js 16 `cacheComponents` caveat
-  - Node.js ^24 enables stable `require(esm)`, unlocking ESM-only package upgrades for server code
-  - `escape-string-regexp` can be replaced entirely by native `RegExp.escape()` (ES2026, Node.js 24)
-  - handsontable license situation unchanged — must remain pinned at 6.2.2
-  - @aws-sdk pinning comment is misleading; packages can be freely upgraded
-
-## Research Log
-
-### Bootstrap v5.3.3 Bug (#39798)
-- **Context**: bootstrap pinned at `=5.3.2` due to modal header regression in v5.3.3
-- **Sources Consulted**: https://github.com/twbs/bootstrap/issues/39798, https://github.com/twbs/bootstrap/pull/41336
-- **Findings**:
-  - Issue CLOSED on 2025-04-03
-  - Fixed in v5.3.4 via PR #41336 (Fix modal and offcanvas header collapse)
-  - Bug: `.modal-header` lost `justify-content: space-between`, causing content collapse
-  - Latest stable: v5.3.8 (August 2025)
-- **Implications**: Safe to upgrade from `=5.3.2` to `^5.3.4`. Skip v5.3.3 entirely. Recommend `^5.3.4` or pin to latest `=5.3.8`.
-
-### next-themes Type Error (#122)
-- **Context**: next-themes pinned at `^0.2.1` due to reported type error in v0.3.0
-- **Sources Consulted**: https://github.com/pacocoursey/next-themes/issues/122, https://github.com/pacocoursey/next-themes/issues/375
-- **Findings**:
-  - Issue #122 CLOSED on 2022-06-02 — was specific to an old beta version (v0.0.13-beta.3), not v0.3.0
-  - The pinning reason was based on incomplete information; v0.2.0+ already had the fix
-  - Latest: v0.4.6 (March 2025). Peers: `react ^16.8 || ^17 || ^18 || ^19`
-  - **Caveat**: Issue #375 reports a bug with Next.js 16's `cacheComponents` feature — stale theme values when cached components reactivate
-  - PR #377 in progress to fix via `useSyncExternalStore`
-  - Without `cacheComponents`, v0.4.6 works fine with Next.js 16
-- **Implications**: Upgrade to v0.4.x is feasible. GROWI uses Pages Router (not App Router), so `cacheComponents` is likely not relevant. Breaking API changes between v0.2 → v0.4 need review. Used in 12 files across apps/app.
-
-### ESM-only Package Compatibility (escape-string-regexp, string-width, @keycloak)
-- **Context**: Three packages pinned to CJS-compatible versions because newer versions are ESM-only
-- **Sources Consulted**: Node.js v22.12.0 release notes (require(esm) enabled by default), TC39 RegExp.escape Stage 4, sindresorhus ESM guidance, npm package pages
-- **Findings**:
-
-  **escape-string-regexp** (^4.0.0):
-  - Used in 6 server-side files + 3 shared package files (all server context)
-  - Node.js 24 has stable `require(esm)` — ESM-only v5 would work
-  - **Better**: `RegExp.escape()` is ES2026 Stage 4, natively available in Node.js 24 (V8 support)
-  - Can eliminate the dependency entirely
-
-  **string-width** (=4.2.2):
-  - Used only in `packages/editor/src/models/markdown-table.js`
-  - `@growi/editor` has `"type": "module"` and builds with Vite (ESM context)
-  - No server-side value imports (only type imports in `sync-ydoc.ts`, erased at compile)
-  - Safe to upgrade to v7.x
-
-  **@keycloak/keycloak-admin-client** (^18.0.0):
-  - Used in 1 server-side file: `features/external-user-group/server/service/keycloak-user-group-sync.ts`
-  - Latest: v26.5.5 (February 2026)
-  - `require(esm)` in Node.js 24 should handle it, but API has significant breaking changes (v18 → v26)
-  - Sub-path exports need verification
-  - Higher risk upgrade — API surface changes expected
-
-- **Implications**: string-width is the easiest upgrade. escape-string-regexp should be replaced by native `RegExp.escape()`. @keycloak requires careful API migration and is higher risk.
-
-### @aws-sdk Pinning Analysis
-- **Context**: @aws-sdk/client-s3 and @aws-sdk/s3-request-presigner pinned at 3.454.0
-- **Sources Consulted**: mongodb package.json, npm registry, GROWI source code
-- **Findings**:
-  - Pinning comment says "required by mongodb@4.16.0" but is misleading
-  - mongodb@4.17.2 has `@aws-sdk/credential-providers: ^3.186.0` as **optional** dependency — a different package
-  - The S3 client packages are used directly by GROWI for file upload (server/service/file-uploader/aws/)
-  - Latest: @aws-sdk/client-s3@3.1014.0 (March 2026) — over 500 versions behind
-  - AWS SDK v3 follows semver; any 3.x should be compatible
-- **Implications**: Remove the misleading comment. Change from exact `3.454.0` to `^3.454.0` or update to latest. Low risk.
-
-### Handsontable License Status
-- **Context**: handsontable pinned at =6.2.2 (last MIT version), @handsontable/react at =2.1.0
-- **Sources Consulted**: handsontable.com/docs/software-license, npm, Hacker News discussion
-- **Findings**:
-  - v7.0.0+ (March 2019) switched from MIT to proprietary license — unchanged as of 2026
-  - Free "Hobby" license exists but restricted to non-commercial personal use
-  - Commercial use requires paid subscription
-  - MIT alternatives: AG Grid Community (most mature), Jspreadsheet CE, Univer (Apache 2.0)
-- **Implications**: Must remain pinned. No action possible without license purchase or library replacement. Library replacement is out of scope for this spec.
-
-## Design Decisions
-
-### Decision: Replace escape-string-regexp with native RegExp.escape()
-- **Context**: escape-string-regexp v5 is ESM-only; used in 9 files across server code
-- **Alternatives Considered**:
-  1. Upgrade to v5 with require(esm) support — works but adds unnecessary dependency
-  2. Replace with native `RegExp.escape()` — zero dependencies, future-proof
-- **Selected Approach**: Replace with `RegExp.escape()`
-- **Rationale**: Node.js 24 supports `RegExp.escape()` natively (ES2026 Stage 4). Eliminates a dependency entirely.
-- **Trade-offs**: Requires touching 9 files, but changes are mechanical (find-and-replace)
-- **Follow-up**: Verify `RegExp.escape()` is available in the project's Node.js 24 target
-
-### Decision: Upgrade string-width directly to v7.x
-- **Context**: Used only in @growi/editor (ESM package, Vite-bundled, client-only)
-- **Selected Approach**: Direct upgrade to latest v7.x
-- **Rationale**: Consumer is already ESM; zero CJS concern
-- **Trade-offs**: None significant; API is stable
-
-### Decision: Upgrade bootstrap to ^5.3.4
-- **Context**: Bug fixed in v5.3.4; latest is 5.3.8
-- **Selected Approach**: Change from `=5.3.2` to `^5.3.4`
-- **Rationale**: Original bug resolved; skip v5.3.3
-- **Trade-offs**: Need to verify GROWI's custom SCSS and modal usage against 5.3.4+ changes
-
-### Decision: Upgrade next-themes to latest 0.4.x
-- **Context**: Original issue was a misunderstanding; latest is v0.4.6
-- **Selected Approach**: Upgrade to `^0.4.4` (or latest)
-- **Rationale**: Issue #122 was specific to old beta, not v0.3.0. GROWI uses Pages Router, so cacheComponents bug is not relevant.
-- **Trade-offs**: Breaking API changes between v0.2 → v0.4 need review. 12 files import from next-themes.
-- **Follow-up**: Review v0.3.0 and v0.4.0 changelogs for breaking changes
-
-### Decision: Relax @aws-sdk version to caret range
-- **Context**: Pinning was based on misleading comment; packages are independent of mongodb constraint
-- **Selected Approach**: Change from `3.454.0` to `^3.454.0`
-- **Rationale**: AWS SDK v3 follows semver; the comment conflated credential-providers with S3 client
-- **Trade-offs**: Low risk. Conservative approach keeps minimum at 3.454.0.
-
-### Decision: Defer @keycloak upgrade (high risk)
-- **Context**: v18 → v26 has significant API breaking changes; only 1 file affected
-- **Selected Approach**: Document as upgradeable but defer to a separate task
-- **Rationale**: API migration requires Keycloak server compatibility testing; out of proportion for a batch upgrade task
-- **Trade-offs**: Remains on old version longer, but isolated to one feature
-
-### Decision: Keep handsontable pinned (license constraint)
-- **Context**: v7+ is proprietary; no free alternative that's drop-in
-- **Selected Approach**: No change. Document for future reference.
-- **Rationale**: License constraint is permanent unless library is replaced entirely
-- **Trade-offs**: None — this is a business/legal decision, not technical
-
-## Risks & Mitigations
-- **Bootstrap SCSS breakage**: v5.3.4+ may have SCSS variable changes → Run `pre:styles-commons` and `pre:styles-components` builds to verify
-- **next-themes API changes**: v0.2 → v0.4 has breaking changes → Review changelog; test all 12 consuming files
-- **RegExp.escape() availability**: Ensure Node.js 24 V8 includes it → Verify with simple runtime test
-- **@aws-sdk transitive dependency changes**: Newer AWS SDK may pull different transitive deps → Monitor bundle size
-- **Build regression**: Any upgrade could break Turbopack build → Follow incremental upgrade strategy with build verification per package
-
-## Future Considerations (Out of Scope)
-
-### transpilePackages cleanup in next.config.ts
-- **Context**: `next.config.ts` defines `getTranspilePackages()` listing 60+ ESM-only packages to force Turbopack to bundle them instead of externalising. The original comment says: "listing ESM packages until experimental.esmExternals works correctly to avoid ERR_REQUIRE_ESM".
-- **Relationship to require(esm)**: `transpilePackages` and `require(esm)` solve different problems. `transpilePackages` prevents Turbopack from externalising packages during SSR; `require(esm)` allows Node.js to load ESM packages via `require()` at runtime. With Node.js 24's stable `require(esm)`, externalised ESM packages *should* load correctly in SSR, meaning some `transpilePackages` entries may become unnecessary.
-- **Why not now**: (1) Turbopack's `esmExternals` handling is still `experimental`; (2) removing entries shifts packages from bundled to externalised, which means they appear in `.next/node_modules/` and must be classified as `dependencies` per the `package-dependencies` rule; (3) 60+ packages need individual verification. This is a separate investigation with a large blast radius.
-- **Recommendation**: Track as a separate task. Test by removing a few low-risk entries (e.g., `bail`, `ccount`, `zwitch`) and checking whether SSR still works with Turbopack externalisation + Node.js 24 `require(esm)`.
-
-## References
-- [Bootstrap issue #39798](https://github.com/twbs/bootstrap/issues/39798) — modal header regression, fixed in v5.3.4
-- [next-themes issue #122](https://github.com/pacocoursey/next-themes/issues/122) — type error, resolved in v0.2.0
-- [next-themes issue #375](https://github.com/pacocoursey/next-themes/issues/375) — Next.js 16 cacheComponents bug
-- [TC39 RegExp.escape() Stage 4](https://socket.dev/blog/tc39-advances-3-proposals-to-stage-4-regexp-escaping-float16array-and-redeclarable-global-eval) — ES2026
-- [Node.js require(esm) stability](https://joyeecheung.github.io/blog/2025/12/30/require-esm-in-node-js-from-experiment-to-stability/) — stable since Node.js 22.12.0
-- [Handsontable license change](https://handsontable.com/docs/javascript-data-grid/software-license/) — proprietary since v7.0.0
-
-## Final Audit Summary (2026-03-23)
-
-| Package | Previous Version | New Version | Action | Rationale |
-|---------|-----------------|-------------|--------|-----------|
-| `@aws-sdk/client-s3` | `3.454.0` | `^3.1014.0` | Upgraded | Pinning comment was misleading; S3 client is independent of mongodb constraint |
-| `@aws-sdk/s3-request-presigner` | `3.454.0` | `^3.1014.0` | Upgraded | Same as above |
-| `bootstrap` | `=5.3.2` | `^5.3.8` | Upgraded | Bug #39798 fixed in v5.3.4; SCSS compilation verified |
-| `escape-string-regexp` | `^4.0.0` | Removed | Replaced | Native `RegExp.escape()` (ES2026, Node.js 24) eliminates the dependency |
-| `string-width` | `=4.2.2` | `^7.0.0` | Upgraded | Used only in @growi/editor (ESM context, Vite-bundled) |
-| `next-themes` | `^0.2.1` | `^0.4.6` | Upgraded | Original issue #122 was misattributed; only change needed: type import path |
-| `@keycloak/keycloak-admin-client` | `^18.0.0` | Unchanged | Deferred | API breaking changes (v18→v26) require separate migration effort |
-| `handsontable` | `=6.2.2` | Unchanged | Kept | v7.0.0+ is proprietary (non-MIT license) |
-| `@handsontable/react` | `=2.1.0` | Unchanged | Kept | Requires handsontable >= 7.0.0 |
-
-### Additional Changes
-
-- Added `RegExp.escape()` TypeScript type declarations in `apps/app/src/@types/`, `packages/core/src/@types/`, and `packages/remark-lsx/src/@types/` (awaiting TypeScript built-in support)
-- Updated `tsconfig.build.client.json` to include `src/@types/**/*.d.ts` for Next.js build compatibility
-- Updated `generate-children-regexp.spec.ts` test expectations for `RegExp.escape()` output (escapes spaces as `\x20`)
-- Removed `escape-string-regexp` from `transpilePackages` in `next.config.ts`
-- Updated `bootstrap` version across 5 packages: apps/app, packages/editor, packages/core-styles, packages/preset-themes, apps/slackbot-proxy
-- Updated `// comments for dependencies` to retain only `@keycloak` entry with updated reason

+ 0 - 89
.kiro/specs/upgrade-fixed-packages/tasks.md

@@ -1,89 +0,0 @@
-# Implementation Plan
-
-- [x] 1. Pre-implementation verification
-- [x] 1.1 Verify RegExp.escape() availability and TypeScript support
-  - Confirm `RegExp.escape()` is available at runtime in the project's Node.js 24 target
-  - Check whether TypeScript recognizes `RegExp.escape()` — may need `lib` config update or `@types/node` update
-  - If unavailable, fall back to upgrading `escape-string-regexp` to v5 with `require(esm)` instead
-  - _Requirements: 2.2_
-
-- [x] 1.2 Review next-themes v0.3.0 and v0.4.0 breaking API changes
-  - Read changelogs for v0.3.0 and v0.4.0 releases to identify breaking changes
-  - Map breaking changes to the 12 consuming files in apps/app
-  - Determine migration effort and document required code changes
-  - Confirm GROWI's Pages Router usage is unaffected by the cacheComponents bug (issue #375)
-  - _Requirements: 1.2_
-
-- [x] 2. Low-risk package upgrades
-- [x] 2.1 (P) Relax @aws-sdk version range
-  - Change `@aws-sdk/client-s3` from `3.454.0` to `^3.1014.0` in apps/app/package.json
-  - Change `@aws-sdk/s3-request-presigner` from `3.454.0` to `^3.1014.0`
-  - Update the misleading `"@aws-skd/*"` comment to reflect the actual reason or remove it
-  - Run `pnpm install` and verify build with `turbo run build --filter @growi/app`
-  - Run `turbo run test --filter @growi/app` to confirm no regressions
-  - _Requirements: 1.3, 4.1, 4.2, 4.4_
-
-- [x] 2.2 (P) Upgrade string-width in @growi/editor
-  - Update `string-width` from `=4.2.2` to `^7.0.0` in packages/editor/package.json
-  - Verify @growi/editor builds successfully (Vite, ESM context)
-  - Run `turbo run build --filter @growi/app` to confirm downstream build passes
-  - Run `turbo run test --filter @growi/app` to confirm no regressions
-  - Remove the `string-width` comment from apps/app/package.json `// comments for dependencies`
-  - _Requirements: 2.1, 2.3, 4.1, 4.2, 4.4_
-
-- [x] 3. Upgrade bootstrap to ^5.3.8
-  - Change `bootstrap` from `=5.3.2` to `^5.3.8` in apps/app/package.json and all other packages
-  - Run `pnpm install` to resolve the new version
-  - Run `pnpm run pre:styles-commons` and `pnpm run pre:styles-components` to verify SCSS compilation
-  - Run `turbo run build --filter @growi/app` to confirm Turbopack build passes
-  - Run `turbo run lint --filter @growi/app` to check for type or lint errors
-  - Run `turbo run test --filter @growi/app` to confirm no regressions
-  - Visually inspect modal headers if a dev server is available (original bug was modal header layout)
-  - Remove the `bootstrap` comment from `// comments for dependencies`
-  - If build or SCSS fails, revert and document the failure reason
-  - _Requirements: 1.1, 4.1, 4.2, 4.3, 4.4, 4.5_
-
-- [x] 4. Replace escape-string-regexp with native RegExp.escape()
-- [x] 4.1 Migrate all source files from escape-string-regexp to RegExp.escape()
-  - Replace `import escapeStringRegexp from 'escape-string-regexp'` and corresponding calls with `RegExp.escape()` in each file
-  - Files in apps/app/src: page.ts, page/index.ts, page-grant.ts, users.js, obsolete-page.js, openai.ts (6 files)
-  - Files in packages: core/src/utils/page-path-utils (2 files), remark-lsx/src/server/routes/list-pages/index.ts (1 file)
-  - Ensure each replacement preserves the exact same escaping behavior
-  - _Requirements: 4.1_
-
-- [x] 4.2 Remove escape-string-regexp dependency and verify
-  - Remove `escape-string-regexp` from apps/app/package.json dependencies
-  - Remove from packages/core and packages/remark-lsx package.json if listed
-  - Remove the `escape-string-regexp` comment from `// comments for dependencies`
-  - Remove `escape-string-regexp` entry from `transpilePackages` in next.config.ts
-  - Run `pnpm install` to update lockfile
-  - Run `turbo run build --filter @growi/app` to verify build
-  - Run `turbo run lint --filter @growi/app` to verify no type errors
-  - Run `turbo run test --filter @growi/app` to verify no regressions
-  - If RegExp.escape() has TypeScript issues, add type declaration or adjust lib config
-  - _Requirements: 2.1, 2.2, 4.1, 4.2, 4.3, 4.4, 4.5_
-
-- [x] 5. Upgrade next-themes to ^0.4.x
-- [x] 5.1 Update next-themes and adapt consuming code
-  - Change `next-themes` from `^0.2.1` to `^0.4.6` in apps/app/package.json
-  - Apply required API migration changes across the 12 consuming files identified in design
-  - Pay attention to any renamed exports, changed hook signatures, or provider prop changes
-  - Ensure `useTheme()` and `ThemeProvider` usage is compatible with v0.4.x API
-  - _Requirements: 1.2, 4.1_
-
-- [x] 5.2 Verify next-themes upgrade
-  - Run `turbo run build --filter @growi/app` to confirm build passes
-  - Run `turbo run lint --filter @growi/app` to check for type errors (original pinning was about types)
-  - Run `turbo run test --filter @growi/app` to confirm no regressions
-  - Remove the `next-themes` comment from `// comments for dependencies`
-  - If build or type errors occur, investigate whether the issue is the same as #122 or a new problem
-  - If upgrade fails, revert and document the reason; keep the pin with an updated comment
-  - _Requirements: 4.2, 4.3, 4.4, 4.5, 4.6_
-
-- [x] 6. Finalize audit documentation and comment blocks
-  - Verify `// comments for dependencies` block contains only packages that remain pinned (@keycloak if unchanged)
-  - Verify `// comments for defDependencies` block is accurate (handsontable entries unchanged)
-  - Update comment text to reflect current reasons where applicable
-  - Produce a final summary table in research.md documenting: package name, previous version, new version or "unchanged", and rationale
-  - Confirm all requirements are satisfied by reviewing the checklist against actual changes made
-  - _Requirements: 3.1, 3.2, 4.6, 5.1, 5.2, 5.3_

+ 39 - 1
CHANGELOG.md

@@ -1,9 +1,47 @@
 # Changelog
 
-## [Unreleased](https://github.com/growilabs/compare/v7.4.7...HEAD)
+## [Unreleased](https://github.com/growilabs/compare/v7.5.0...HEAD)
 
 *Please do not manually update this file. We've automated the process.*
 
+## [v7.5.0](https://github.com/growilabs/compare/v7.4.7...v7.5.0) - 2026-04-07
+
+### 💎 Features
+
+* feat(ai): Suggest path to save (#10777) @tomoyuki-t-weseek
+* feat: Audit log bulk export (#10874) @Ryosei-Fukushima
+* feat(page-create-modal): add template help link icon (#10899) @tomoyuki-t-weseek
+* feat: add tooltips to editor toolbar (#10938) @Ryosei-Fukushima
+* feat: Add growi cloud link to audit log settings (#10881) @ryota-t0401
+
+### 🚀 Improvement
+
+* imprv: Staff credit (#10839) @yuki-takei
+* imprv(lsx): Allow spaces in attribute names (#10931) @NJisEverywhere
+* imprv: pre-fill export modal with current filter values (#10944) @NJisEverywhere
+* imprv(presentation): Decouple Marp from GrowiSlides (#10840) @yuki-takei
+* imprv(ui): Implement the improved New button. (#10937) @yuyaiwahori
+
+### 🐛 Bug Fixes
+
+* fix: re-scroll to hash target after lazy-rendered content completes (#10853) @miya
+* fix: Bulk export fails due to S3 upload complete version (#10833) @ryotaro-nagahara
+* fix: Duplicate user data is appearing in the user table at /user/admin (#10940) @miya
+* fix: Deleted users are not displayed in the user list on the user management page (/admin/users) (#10934) @miya
+* fix: Resolve React warnings during page rendering (#10913) @yuki-takei
+
+### 🧰 Maintenance
+
+* support: Upgrade Next.js to v16 (#10831) @yuki-takei
+* support(yjs): Migrate collaborative editing transport from y-socket.io to y-websocket (#10889) @yuki-takei
+* support: Upgrade vite v6, vitest v3, and related packages (#10945) @yuki-takei
+* ci(deps-dev): bump vite from 6.4.1 to 6.4.2 (#10960) @[dependabot[bot]](https://github.com/apps/dependabot)
+* support: Upgrade version-pinned packages and replace escape-string-regexp with RegExp.escape() (#10920) @yuki-takei
+* support: Migrate to Turbopack (#10838) @yuki-takei
+* support: Modernize Dockerfile (#10809) @yuki-takei
+* support: Reclassify deps (#10873) @yuki-takei
+* support(dev): Reduce modules loaded (#10822) @yuki-takei
+
 ## [v7.4.7](https://github.com/growilabs/compare/v7.4.6...v7.4.7) - 2026-03-23
 
 ### 💎 Features

+ 2 - 3
README.md

@@ -82,15 +82,14 @@ See [GROWI Docs: Environment Variables](https://docs.growi.org/en/admin-guide/ad
 ## Dependencies
 
 - Node.js v24.x
-- npm 6.x
-- pnpm 9.x
+- pnpm 10.x
 - [Turborepo](https://turbo.build/repo)
 - MongoDB v6.x or v8.x
 
 ### Optional Dependencies
 
 - Redis 3.x
-- ElasticSearch 7.x or 8.x (needed when using Full-text search)
+- ElasticSearch 7.x or 8.x or 9.x (needed when using Full-text search)
   - **CAUTION: Following plugins are required**
     - [Japanese (kuromoji) Analysis plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-kuromoji.html)
     - [ICU Analysis Plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-icu.html)

+ 2 - 3
README_JP.md

@@ -82,15 +82,14 @@ Crowi からの移行は **[こちら](https://docs.growi.org/en/admin-guide/mig
 ## 依存関係
 
 - Node.js v24.x
-- npm 6.x
-- pnpm 9.x
+- pnpm 10.x
 - [Turborepo](https://turbo.build/repo)
 - MongoDB v6.x or v8.x
 
 ### オプションの依存関係
 
 - Redis 3.x
-- ElasticSearch 7.x or 8.x (needed when using Full-text search)
+- ElasticSearch 7.x or 8.x or 9.x (needed when using Full-text search)
   - **注意: 次のプラグインが必要です**
     - [Japanese (kuromoji) Analysis plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-kuromoji.html)
     - [ICU Analysis Plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-icu.html)

+ 1 - 4
apps/app/.claude/skills/build-optimization/SKILL.md

@@ -27,16 +27,13 @@ user-invocable: false
 
 ### Resolve Aliases (`turbopack.resolveAlias`)
 
-7 server-only packages + `fs` are aliased to `./src/lib/empty-module.ts` in browser context:
+4 server-only packages + `fs` are aliased to `./src/lib/empty-module.ts` in browser context:
 
 | Package | Reason |
 |---------|--------|
 | `fs` | Node.js built-in, not available in browser |
-| `dtrace-provider` | Native module, server-only |
 | `mongoose` | MongoDB driver, server-only |
 | `i18next-fs-backend` | File-system i18n loader, server-only |
-| `bunyan` | Server-side logger |
-| `bunyan-format` | Server-side logger formatter |
 | `core-js` | Server-side polyfills |
 
 - Uses conditional `{ browser: './src/lib/empty-module.ts' }` syntax so server-side resolution is unaffected

+ 2 - 0
apps/app/.gitignore

@@ -11,6 +11,8 @@ next.config.js
 /build/
 /dist/
 /transpiled/
+/config/**/*.js
+/config/**/*.d.ts
 /public/static/fonts
 /public/static/js
 /public/static/styles

+ 4 - 0
apps/app/bin/openapi/definition-apiv3.js

@@ -116,6 +116,10 @@ module.exports = {
         'Install',
       ],
     },
+    {
+      name: 'AI API',
+      tags: ['AI Tools'],
+    },
     {
       name: 'Public API',
       tags: ['Healthcheck', 'Statistics', '', '', '', '', '', ''],

+ 1 - 0
apps/app/bin/openapi/generate-spec-apiv3.sh

@@ -12,6 +12,7 @@ swagger-jsdoc \
   -d "${APP_PATH}/bin/openapi/definition-apiv3.js" \
   "${APP_PATH}/src/features/external-user-group/server/routes/apiv3/*.ts" \
   "${APP_PATH}/src/features/templates/server/routes/apiv3/*.ts" \
+  "${APP_PATH}/src/features/ai-tools/**/server/routes/apiv3/*.ts" \
   "${APP_PATH}/src/features/growi-plugin/server/routes/apiv3/**/*.ts" \
   "${APP_PATH}/src/server/routes/apiv3/**/*.{js,ts}" \
   "${APP_PATH}/src/server/routes/login.js" \

+ 42 - 0
apps/app/bin/postbuild-server.ts

@@ -0,0 +1,42 @@
+/**
+ * Post-build script for server compilation.
+ *
+ * tspc compiles both `src/` and `config/` (TypeScript files under config/),
+ * so the output directory (`transpiled/`) mirrors the source tree structure
+ * (e.g. `transpiled/src/`, `transpiled/config/`).
+ *
+ * Setting `rootDir: "src"` and `outDir: "dist"` in tsconfig would eliminate this script,
+ * but that would break once `config/` is included in the compilation.
+ *
+ * This script:
+ * 1. Extracts `transpiled/src/` into `dist/`
+ * 2. Copies compiled `transpiled/config/` files into `config/` so that
+ *    relative imports from `dist/` (e.g. `../../../config/logger/config.dev`)
+ *    resolve correctly at runtime.
+ */
+import { cpSync, existsSync, readdirSync, renameSync, rmSync } from 'node:fs';
+
+const TRANSPILED_DIR = 'transpiled';
+const DIST_DIR = 'dist';
+const SRC_SUBDIR = `${TRANSPILED_DIR}/src`;
+const CONFIG_SUBDIR = `${TRANSPILED_DIR}/config`;
+
+// List transpiled contents for debugging
+// biome-ignore lint/suspicious/noConsole: This is a build script, console output is expected.
+console.log('Listing files under transpiled:');
+// biome-ignore lint/suspicious/noConsole: This is a build script, console output is expected.
+console.log(readdirSync(TRANSPILED_DIR).join('\n'));
+
+// Remove old dist
+rmSync(DIST_DIR, { recursive: true, force: true });
+
+// Move transpiled/src -> dist
+renameSync(SRC_SUBDIR, DIST_DIR);
+
+// Copy compiled config files to app root config/ so runtime imports resolve
+if (existsSync(CONFIG_SUBDIR)) {
+  cpSync(CONFIG_SUBDIR, 'config', { recursive: true, force: true });
+}
+
+// Remove leftover transpiled directory
+rmSync(TRANSPILED_DIR, { recursive: true, force: true });

+ 5 - 1
apps/app/config/logger/config.dev.js → apps/app/config/logger/config.dev.ts

@@ -1,4 +1,6 @@
-module.exports = {
+import type { LoggerConfig } from '@growi/logger';
+
+const config: LoggerConfig = {
   default: 'info',
 
   // 'express-session': 'debug',
@@ -47,3 +49,5 @@ module.exports = {
   'growi:service:openai': 'debug',
   'growi:middleware:access-token-parser:access-token': 'debug',
 };
+
+export default config;

+ 5 - 1
apps/app/config/logger/config.prod.js → apps/app/config/logger/config.prod.ts

@@ -1,6 +1,10 @@
-module.exports = {
+import type { LoggerConfig } from '@growi/logger';
+
+const config: LoggerConfig = {
   default: 'info',
 
   'growi:routes:login-passport': 'debug',
   'growi:service:PassportService': 'debug',
 };
+
+export default config;

+ 1 - 1
apps/app/docker/README.md

@@ -10,7 +10,7 @@ GROWI Official docker image
 Supported tags and respective Dockerfile links
 ------------------------------------------------
 
-* [`7.4.7`, `7.4`, `7`, `latest` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.4.7/apps/app/docker/Dockerfile)
+* [`7.5.0`, `7.4`, `7`, `latest` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.5.0/apps/app/docker/Dockerfile)
 * [`7.3.0`, `7.3` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.3.0/apps/app/docker/Dockerfile)
 * [`7.2.0`, `7.2` (Dockerfile)](https://github.com/growilabs/growi/blob/v7.2.0/apps/app/docker/Dockerfile)
 

+ 0 - 3
apps/app/next.config.ts

@@ -133,11 +133,8 @@ const nextConfig: NextConfig = {
       // Exclude fs from client bundle
       fs: { browser: './src/lib/empty-module.ts' },
       // Exclude server-only packages from client bundle
-      'dtrace-provider': { browser: './src/lib/empty-module.ts' },
       mongoose: { browser: './src/lib/empty-module.ts' },
       'i18next-fs-backend': { browser: './src/lib/empty-module.ts' },
-      bunyan: { browser: './src/lib/empty-module.ts' },
-      'bunyan-format': { browser: './src/lib/empty-module.ts' },
       'core-js': { browser: './src/lib/empty-module.ts' },
     },
   },

+ 4 - 11
apps/app/package.json

@@ -1,6 +1,6 @@
 {
   "name": "@growi/app",
-  "version": "7.5.0-RC.0",
+  "version": "7.5.1-RC.0",
   "license": "MIT",
   "private": true,
   "scripts": {
@@ -9,8 +9,8 @@
     "start": "next start",
     "build:client": "next build",
     "build:server": "cross-env NODE_ENV=production tspc -p tsconfig.build.server.json",
-    "postbuild:server": "shx echo \"Listing files under transpiled\" && shx ls transpiled && shx rm -rf dist && shx mv transpiled/src dist && shx rm -rf transpiled",
-    "clean": "shx rm -rf dist transpiled .next next.config.js",
+    "postbuild:server": "node bin/postbuild-server.ts",
+    "clean": "rimraf dist transpiled .next next.config.js",
     "server": "cross-env NODE_ENV=production node -r dotenv-flow/config dist/server/app.js",
     "server:ci": "pnpm run server --ci",
     "preserver": "cross-env NODE_ENV=production pnpm run migrate",
@@ -71,7 +71,6 @@
     "@azure/identity": "^4.4.1",
     "@azure/openai": "^2.0.0",
     "@azure/storage-blob": "^12.16.0",
-    "@browser-bunyan/console-formatted-stream": "^1.8.0",
     "@codemirror/autocomplete": "^6.18.4",
     "@codemirror/commands": "^6.8.0",
     "@codemirror/lang-markdown": "^6.3.2",
@@ -88,6 +87,7 @@
     "@google-cloud/storage": "^5.8.5",
     "@growi/core": "workspace:^",
     "@growi/emoji-mart-data": "workspace:^",
+    "@growi/logger": "workspace:*",
     "@growi/pdf-converter-client": "workspace:^",
     "@growi/pluginkit": "workspace:^",
     "@growi/presentation": "workspace:^",
@@ -131,9 +131,7 @@
     "babel-plugin-superjson-next": "^0.4.2",
     "body-parser": "^1.20.3",
     "bootstrap": "^5.3.8",
-    "browser-bunyan": "^1.8.0",
     "bson-objectid": "^2.0.4",
-    "bunyan": "^1.8.15",
     "cm6-theme-basic-light": "^0.2.0",
     "codemirror": "^6.0.1",
     "compression": "^1.7.4",
@@ -155,7 +153,6 @@
     "ejs": "^3.1.10",
     "expose-gc": "^1.0.0",
     "express": "^4.20.0",
-    "express-bunyan-logger": "^1.3.3",
     "express-mongo-sanitize": "^2.1.0",
     "express-session": "^1.16.1",
     "express-validator": "^6.14.0",
@@ -280,7 +277,6 @@
     "uid-safe": "^2.1.5",
     "unified": "^11.0.0",
     "unist-util-visit": "^5.0.0",
-    "universal-bunyan": "^0.9.2",
     "unstated": "^2.1.1",
     "unzip-stream": "^0.3.2",
     "url-join": "^4.0.0",
@@ -310,7 +306,6 @@
     "@testing-library/jest-dom": "^6.5.0",
     "@testing-library/user-event": "^14.5.2",
     "@types/archiver": "^6.0.2",
-    "@types/bunyan": "^1.8.11",
     "@types/express": "^4.17.21",
     "@types/hast": "^3.0.4",
     "@types/js-cookie": "^3.0.6",
@@ -330,7 +325,6 @@
     "@types/url-join": "^4.0.2",
     "@types/uuid": "^10.0.0",
     "@types/ws": "^8.18.1",
-    "babel-loader": "^8.2.5",
     "commander": "^14.0.0",
     "connect-browser-sync": "^2.1.0",
     "eazy-logger": "^3.1.0",
@@ -345,7 +339,6 @@
     "mdast-util-find-and-replace": "^3.0.1",
     "mongodb-connection-string-url": "^7.0.0",
     "mongodb-memory-server-core": "^9.1.1",
-    "morgan": "^1.10.0",
     "openapi-typescript": "^7.8.0",
     "rehype-rewrite": "^4.0.2",
     "remark-github-admonitions-to-directives": "^2.0.0",

+ 15 - 3
apps/app/public/static/locales/en_US/admin.json

@@ -413,6 +413,7 @@
     "azure_storage_account_name": "Storage Account Name",
     "azure_storage_container_name": "Container Name",
     "azure_note_for_the_only_env_option": "The Azure Settings is limited by the value of environment variable.<br>To change this setting, please change to false or delete the value of the environment variable <code>{{env}}</code> .",
+    "azure_note_for_the_only_env_option_cloud": "Azure settings can be changed from the GROWI.cloud admin panel.",
     "file_upload": "This is for uploading file settings. If you complete file upload settings, file upload function, profile picture function etc will be enabled.",
     "test_connection": "Test connection to mail",
     "change_setting": "Caution:if you change this setting not completed, you will not be able to access files you have uploaded so far.",
@@ -425,7 +426,8 @@
     "enable": "Enable",
     "disable": "Disable",
     "use_env_var_if_empty": "If the value in the database is empty, the value of the environment variable <code>{{variable}}</code> is used.",
-    "note_for_the_only_env_option": "The GCS Settings is limited by the value of environment variable.<br>To change this setting, please change to false or delete the value of the environment variable <code>{{env}}</code> ."
+    "note_for_the_only_env_option": "The GCS Settings is limited by the value of environment variable.<br>To change this setting, please change to false or delete the value of the environment variable <code>{{env}}</code> .",
+    "note_for_the_only_env_option_cloud": "GCS settings can be changed from the GROWI.cloud admin panel."
   },
   "markdown_settings": {
     "markdown_settings": "Markdown Settings",
@@ -781,7 +783,8 @@
       "revoke_read_only_access": "Revoke read only access",
       "grant_read_only_access": "Grant read only access",
       "send_invitation_email": "Send invitation email",
-      "resend_invitation_email": "Resend invitation email"
+      "resend_invitation_email": "Resend invitation email",
+      "deleted_user": "(Deleted User)"
     },
     "reset_password": "Reset Password",
     "reset_password_modal": {
@@ -881,6 +884,13 @@
     "available_action_list_explanation": "List of actions that can be searched/viewed in the current settings",
     "action_list": "Action List",
     "disable_mode_explanation": "Audit log is currently disabled. To enable it, set the environment variable <code>AUDIT_LOG_ENABLED</code> to true.",
+    "export": "Export",
+    "export_audit_log": "Export Audit Log",
+    "export_requested": "Export request accepted. You will be notified when the export is complete.",
+    "export_failed": "Failed to start export",
+    "duplicate_export_confirm": "An export with the same conditions is already in progress. Do you want to restart it?",
+    "restart_export": "Restart Export",
+    "confirm_export": "Confirm Export",
     "disable_mode_explanation_cloud": "Audit log is currently disabled. To enable it, please update the app settings from the GROWI.cloud management screen.",
     "docs_url": {
       "log_type": "https://docs.growi.org/en/admin-guide/admin-cookbook/audit-log-setup.html#log-types"
@@ -907,7 +917,9 @@
     "confirm": "Delete plugin?"
   },
   "cloud_setting_management": {
-    "to_cloud_settings": "Open GROWI.cloud Settings"
+    "to_cloud_settings": "Open GROWI.cloud Settings",
+    "change_from_cloud": "You can change the settings from the GROWI.cloud admin panel.",
+    "storage_change_from_cloud": "The current file upload method is {{fileUploadType}}. Settings can be changed from the GROWI.cloud admin panel."
   },
   "audit_log_action_category": {
     "Page": "Page",

+ 25 - 1
apps/app/public/static/locales/en_US/translation.json

@@ -660,7 +660,9 @@
     }
   },
   "default_ai_assistant": {
-    "not_set": "Default assistant is not set"
+    "not_set": "Default assistant is not set",
+    "open_cloud_settings_to_enable": "Please check the GROWI.cloud management screen to enable AI integration",
+    "to_cloud_settings": "Open GROWI.cloud Settings"
   },
   "ai_assistant_substance": {
     "add_assistant": "Add Assistant",
@@ -861,6 +863,11 @@
     "started_on": "Started on",
     "file_upload_not_configured": "File upload settings are not configured"
   },
+  "audit_log_bulk_export": {
+    "download_expired": "Download period has expired",
+    "job_expired": "Export process was canceled because it took too long",
+    "no_results": "No audit logs matched the specified filters"
+  },
   "message": {
     "successfully_connected": "Successfully Connected!",
     "fail_to_save_access_token": "Failed to save access_token. Please try again.",
@@ -1077,5 +1084,22 @@
     "success-toaster": "Latest text synchronized",
     "skipped-toaster": "Skipped synchronizing since the editor is not activated. Please open the editor and try again.",
     "error-toaster": "Synchronization of the latest text failed"
+  },
+  "toolbar": {
+    "attachments": "Attachments",
+    "bold": "Bold",
+    "bullet_list": "Bullet List",
+    "checklist": "Checklist",
+    "code": "Code",
+    "diagram": "Diagram",
+    "emoji": "Emoji",
+    "heading": "Heading",
+    "italic": "Italic",
+    "numbered_list": "Numbered List",
+    "quote": "Quote",
+    "strikethrough": "Strikethrough",
+    "table": "Table",
+    "template": "Template",
+    "text_formatting": "Text Formatting"
   }
 }

+ 15 - 3
apps/app/public/static/locales/fr_FR/admin.json

@@ -413,6 +413,7 @@
     "azure_storage_account_name": "Nom du compte de stockage",
     "azure_storage_container_name": "Nom du conteneur",
     "azure_note_for_the_only_env_option": "Les paramètres sont définis par des variables d'environnement.<br>Pour modifier ce paramètre, supprimer la variable d'environnement <code>{{env}}</code> .",
+    "azure_note_for_the_only_env_option_cloud": "Les paramètres Azure peuvent être modifiés depuis le panneau d'administration GROWI.cloud.",
     "file_upload": "Téléversement de fichiers",
     "test_connection": "Essai de la connection e-mail",
     "change_setting": "Si ce paramètre n'est pas complètement configuré, les fichiers existants seront inaccessibles.",
@@ -425,7 +426,8 @@
     "enable": "Activer",
     "disable": "Désactiver",
     "use_env_var_if_empty": "Si la valeur dans la base de données est vide, la valeur de variable d'environnement <code>{{variable}}</code> est utilisé.",
-    "note_for_the_only_env_option": "Les paramètres sont définis par des variables d'environnement.<br>Pour modifier ce paramètre, supprimer la variable d'environnement <code>{{env}}</code> ."
+    "note_for_the_only_env_option": "Les paramètres sont définis par des variables d'environnement.<br>Pour modifier ce paramètre, supprimer la variable d'environnement <code>{{env}}</code> .",
+    "note_for_the_only_env_option_cloud": "Les paramètres GCS peuvent être modifiés depuis le panneau d'administration GROWI.cloud."
   },
   "markdown_settings": {
     "markdown_settings": "Markdown",
@@ -781,7 +783,8 @@
       "revoke_read_only_access": "Révoquer permission de lecture",
       "grant_read_only_access": "Permission de lecture-seule",
       "send_invitation_email": "Envoyer courriel d'invitation",
-      "resend_invitation_email": "Renvoyer courriel d'invitation"
+      "resend_invitation_email": "Renvoyer courriel d'invitation",
+      "deleted_user": "(Utilisateur supprimé)"
     },
     "reset_password": "Réinitialiser mot de passe",
     "reset_password_modal": {
@@ -880,6 +883,13 @@
     "available_action_list_explanation": "Liste des actions pouvant être recherchées/vues",
     "action_list": "Liste d'actions",
     "disable_mode_explanation": "Cette fonctionnalité est désactivée. Afin de l'activer, mettre à jour <code>AUDIT_LOG_ENABLED</code> pour true.",
+    "export": "Exporter",
+    "export_audit_log": "Exporter le journal d'audit",
+    "export_requested": "Demande d'exportation acceptée. Vous serez averti lorsque l'exportation sera terminée.",
+    "export_failed": "Échec du démarrage de l'exportation",
+    "duplicate_export_confirm": "Une exportation avec les mêmes conditions est déjà en cours. Voulez-vous la redémarrer ?",
+    "restart_export": "Redémarrer l'exportation",
+    "confirm_export": "Confirmer l'exportation",
     "disable_mode_explanation_cloud": "Le journal d'audit est actuellement désactivé. Pour l'activer, veuillez modifier les paramètres de l'application depuis l'écran de gestion GROWI.cloud.",
     "docs_url": {
       "log_type": "https://docs.growi.org/en/admin-guide/admin-cookbook/audit-log-setup.html#log-types"
@@ -906,7 +916,9 @@
     "confirm": "Supprimer le plugin?"
   },
   "cloud_setting_management": {
-    "to_cloud_settings": "Ouvrir paramètres GROWI.cloud"
+    "to_cloud_settings": "Ouvrir paramètres GROWI.cloud",
+    "change_from_cloud": "Vous pouvez modifier les paramètres depuis le panneau d'administration GROWI.cloud.",
+    "storage_change_from_cloud": "La méthode de téléversement actuelle est {{fileUploadType}}. Les paramètres peuvent être modifiés depuis le panneau d'administration GROWI.cloud."
   },
   "audit_log_action_category": {
     "Page": "Page",

+ 25 - 1
apps/app/public/static/locales/fr_FR/translation.json

@@ -655,7 +655,9 @@
     }
   },
   "default_ai_assistant": {
-    "not_set": "L'assistant par défaut n'est pas configuré"
+    "not_set": "L'assistant par défaut n'est pas configuré",
+    "open_cloud_settings_to_enable": "Veuillez consulter l'écran de gestion GROWI.cloud pour activer l'intégration AI",
+    "to_cloud_settings": "Ouvrir les paramètres GROWI.cloud"
   },
   "ai_assistant_substance": {
     "add_assistant": "Ajouter un assistant",
@@ -856,6 +858,11 @@
     "started_on": "Commencé le",
     "file_upload_not_configured": "Les paramètres de téléchargement de fichiers ne sont pas configurés"
   },
+  "audit_log_bulk_export": {
+    "download_expired": "La période de téléchargement a expiré",
+    "job_expired": "Le processus d'exportation a été annulé car il a pris trop de temps",
+    "no_results": "Aucun journal d'audit ne correspondait aux filtres spécifiés"
+  },
   "message": {
     "successfully_connected": "Connecté!",
     "fail_to_save_access_token": "Échec de la sauvegarde de access_token.",
@@ -1069,5 +1076,22 @@
     "success-toaster": "Dernière révision synchronisée",
     "skipped-toaster": "Le mode édition doit être activé pour déclencher la synchronisation. Synchronisation annulée.",
     "error-toaster": "Synchronisation échouée"
+  },
+  "toolbar": {
+    "attachments": "Pièces jointes",
+    "bold": "Gras",
+    "bullet_list": "Liste à puces",
+    "checklist": "Liste de contrôle",
+    "code": "Code",
+    "diagram": "Diagramme",
+    "emoji": "Emoji",
+    "heading": "Titre",
+    "italic": "Italique",
+    "numbered_list": "Liste numérotée",
+    "quote": "Citation",
+    "strikethrough": "Barré",
+    "table": "Tableau",
+    "template": "Modèle",
+    "text_formatting": "Mise en forme du texte"
   }
 }

+ 15 - 3
apps/app/public/static/locales/ja_JP/admin.json

@@ -421,6 +421,7 @@
     "azure_storage_account_name": "ストレージアカウント名",
     "azure_storage_container_name": "コンテナ名",
     "azure_note_for_the_only_env_option": "現在Azure設定は環境変数の値によって制限されています<br>この設定を変更する場合は環境変数 <code>{{env}}</code> の値をfalseに変更もしくは削除してください",
+    "azure_note_for_the_only_env_option_cloud": "Azure の設定は GROWI.cloud の管理画面から変更できます",
     "fixed_by_env_var": "環境変数 <code>{{envKey}}={{envVar}}</code> により固定されています。",
     "file_upload": "ファイルをアップロードするための設定を行います。ファイルアップロードの設定を完了させると、ファイルアップロード機能、プロフィール写真機能などが有効になります。",
     "test_connection": "接続テスト",
@@ -434,7 +435,8 @@
     "enable": "有効",
     "disable": "無効",
     "use_env_var_if_empty": "データベース側の値が空の場合、環境変数 <code>{{variable}}</code> の値を利用します",
-    "note_for_the_only_env_option": "現在GCS設定は環境変数の値によって制限されています<br>この設定を変更する場合は環境変数 <code>{{env}}</code> の値をfalseに変更もしくは削除してください"
+    "note_for_the_only_env_option": "現在GCS設定は環境変数の値によって制限されています<br>この設定を変更する場合は環境変数 <code>{{env}}</code> の値をfalseに変更もしくは削除してください",
+    "note_for_the_only_env_option_cloud": "GCS の設定は GROWI.cloud の管理画面から変更できます"
   },
   "markdown_settings": {
     "markdown_settings": "マークダウン設定",
@@ -790,7 +792,8 @@
       "revoke_read_only_access": "閲覧のみアクセス権を外す",
       "grant_read_only_access": "閲覧のみアクセス権を付与する",
       "send_invitation_email": "招待メールの送信",
-      "resend_invitation_email": "招待メールの再送信"
+      "resend_invitation_email": "招待メールの再送信",
+      "deleted_user": "(削除されたユーザー)"
     },
     "reset_password": "パスワードのリセット",
     "reset_password_modal": {
@@ -890,6 +893,13 @@
     "available_action_list_explanation": "現在の設定で検索 / 表示 可能なアクション一覧です",
     "action_list": "アクション一覧",
     "disable_mode_explanation": "現在、監査ログは無効になっています。有効にする場合は環境変数 <code>AUDIT_LOG_ENABLED</code> を true に設定してください。",
+    "export": "エクスポート",
+    "export_audit_log": "監査ログのエクスポート",
+    "export_requested": "エクスポートリクエストを受け付けました。完了後に通知されます。",
+    "export_failed": "エクスポートの開始に失敗しました",
+    "duplicate_export_confirm": "同じ条件のエクスポートが進行中です。やり直しますか?",
+    "restart_export": "やり直す",
+    "confirm_export": "エクスポートの確認",
     "disable_mode_explanation_cloud": "現在、監査ログは無効になっています。有効にするには、GROWI.cloud の管理画面からアプリの設定を変更してください。",
     "docs_url": {
       "log_type": "https://docs.growi.org/ja/admin-guide/admin-cookbook/audit-log-setup.html#log-types"
@@ -916,7 +926,9 @@
     "confirm": "プラグインを削除しますか?"
   },
   "cloud_setting_management": {
-    "to_cloud_settings": "GROWI.cloud の管理画面へ"
+    "to_cloud_settings": "GROWI.cloud の管理画面へ",
+    "change_from_cloud": "GROWI.cloud の管理画面から設定を変更できます。",
+    "storage_change_from_cloud": "現在のファイルアップロード方法は {{fileUploadType}} です。変更は GROWI.cloud の管理画面から行えます。"
   },
   "audit_log_action_category": {
     "Page": "ページ",

+ 25 - 1
apps/app/public/static/locales/ja_JP/translation.json

@@ -693,7 +693,9 @@
     }
   },
   "default_ai_assistant": {
-    "not_set": "デフォルトアシスタントが設定されていません"
+    "not_set": "デフォルトアシスタントが設定されていません",
+    "open_cloud_settings_to_enable": "AI 連携を有効にするには GROWI.cloud の管理画面をご確認ください",
+    "to_cloud_settings": "GROWI.cloud の管理画面へ"
   },
   "ai_assistant_substance": {
     "add_assistant": "アシスタントを追加する",
@@ -894,6 +896,11 @@
     "started_on": "開始日時",
     "file_upload_not_configured": "ファイルアップロード設定が完了していません"
   },
+  "audit_log_bulk_export": {
+    "download_expired": "ダウンロード期限が切れました",
+    "job_expired": "エクスポート時間が長すぎるため、処理が中断されました",
+    "no_results": "指定されたフィルターに一致する監査ログはありませんでした"
+  },
   "message": {
     "successfully_connected": "接続に成功しました!",
     "fail_to_save_access_token": "アクセストークンの保存に失敗しました、再度お試しください。",
@@ -1110,5 +1117,22 @@
     "success-toaster": "最新の本文を同期しました",
     "skipped-toaster": "エディターがアクティブではないため、同期をスキップしました。エディターを開いて再度お試しください。",
     "error-toaster": "最新の本文の同期に失敗しました"
+  },
+  "toolbar": {
+    "attachments": "添付ファイル",
+    "bold": "太字",
+    "bullet_list": "箇条書きリスト",
+    "checklist": "チェックリスト",
+    "code": "コード",
+    "diagram": "ダイアグラム",
+    "emoji": "絵文字",
+    "heading": "見出し",
+    "italic": "イタリック",
+    "numbered_list": "番号付きリスト",
+    "quote": "引用",
+    "strikethrough": "取り消し線",
+    "table": "テーブル",
+    "template": "テンプレート",
+    "text_formatting": "テキスト書式"
   }
 }

+ 15 - 3
apps/app/public/static/locales/ko_KR/admin.json

@@ -413,6 +413,7 @@
     "azure_storage_account_name": "스토리지 계정 이름",
     "azure_storage_container_name": "컨테이너 이름",
     "azure_note_for_the_only_env_option": "Azure 설정은 환경 변수 값에 의해 제한됩니다.<br>이 설정을 변경하려면 환경 변수 <code>{{env}}</code>의 값을 false로 변경하거나 삭제하십시오.",
+    "azure_note_for_the_only_env_option_cloud": "Azure 설정은 GROWI.cloud 관리 화면에서 변경할 수 있습니다.",
     "file_upload": "파일 업로드 설정용입니다. 파일 업로드 설정을 완료하면 파일 업로드 기능, 프로필 사진 기능 등이 활성화됩니다.",
     "test_connection": "메일 연결 테스트",
     "change_setting": "주의: 이 설정을 완료하지 않으면 지금까지 업로드한 파일에 접근할 수 없습니다.",
@@ -425,7 +426,8 @@
     "enable": "활성화",
     "disable": "비활성화",
     "use_env_var_if_empty": "데이터베이스 값이 비어 있으면 환경 변수 <code>{{variable}}</code>의 값이 사용됩니다.",
-    "note_for_the_only_env_option": "GCS 설정은 환경 변수 값에 의해 제한됩니다.<br>이 설정을 변경하려면 환경 변수 <code>{{env}}</code>의 값을 false로 변경하거나 삭제하십시오."
+    "note_for_the_only_env_option": "GCS 설정은 환경 변수 값에 의해 제한됩니다.<br>이 설정을 변경하려면 환경 변수 <code>{{env}}</code>의 값을 false로 변경하거나 삭제하십시오.",
+    "note_for_the_only_env_option_cloud": "GCS 설정은 GROWI.cloud 관리 화면에서 변경할 수 있습니다."
   },
   "markdown_settings": {
     "markdown_settings": "마크다운 설정",
@@ -781,7 +783,8 @@
       "revoke_read_only_access": "읽기 전용 권한 취소",
       "grant_read_only_access": "읽기 전용 권한 부여",
       "send_invitation_email": "초대 이메일 전송",
-      "resend_invitation_email": "초대 이메일 재전송"
+      "resend_invitation_email": "초대 이메일 재전송",
+      "deleted_user": "(삭제된 사용자)"
     },
     "reset_password": "비밀번호 재설정",
     "reset_password_modal": {
@@ -881,6 +884,13 @@
     "available_action_list_explanation": "현재 설정에서 검색/볼 수 있는 작업 목록",
     "action_list": "작업 목록",
     "disable_mode_explanation": "감사 로그가 현재 비활성화되어 있습니다. 활성화하려면 환경 변수 <code>AUDIT_LOG_ENABLED</code>를 true로 설정하십시오.",
+    "export": "내보내기",
+    "export_audit_log": "감사 로그 내보내기",
+    "export_requested": "내보내기 요청이 접수되었습니다. 내보내기가 완료되면 알림을 받게 됩니다.",
+    "export_failed": "내보내기 시작에 실패했습니다",
+    "duplicate_export_confirm": "동일한 조건의 내보내기가 이미 진행 중입니다. 다시 시작하시겠습니까?",
+    "restart_export": "내보내기 다시 시작",
+    "confirm_export": "내보내기 확인",
     "disable_mode_explanation_cloud": "현재 감사 로그가 비활성화되어 있습니다. 활성화하려면 GROWI.cloud 관리 화면에서 앱 설정을 변경하십시오.",
     "docs_url": {
       "log_type": "https://docs.growi.org/en/admin-guide/admin-cookbook/audit-log-setup.html#log-types"
@@ -907,7 +917,9 @@
     "confirm": "플러그인 삭제?"
   },
   "cloud_setting_management": {
-    "to_cloud_settings": "GROWI.cloud 설정 열기"
+    "to_cloud_settings": "GROWI.cloud 설정 열기",
+    "change_from_cloud": "GROWI.cloud 관리 화면에서 설정을 변경할 수 있습니다.",
+    "storage_change_from_cloud": "현재 파일 업로드 방법은 {{fileUploadType}} 입니다. 설정은 GROWI.cloud 관리 화면에서 변경할 수 있습니다."
   },
   "audit_log_action_category": {
     "Page": "페이지",

+ 25 - 1
apps/app/public/static/locales/ko_KR/translation.json

@@ -620,7 +620,9 @@
     }
   },
   "default_ai_assistant": {
-    "not_set": "기본 어시스턴트가 설정되지 않았습니다."
+    "not_set": "기본 어시스턴트가 설정되지 않았습니다.",
+    "open_cloud_settings_to_enable": "AI 통합을 활성화하려면 GROWI.cloud 관리 화면을 확인하십시오",
+    "to_cloud_settings": "GROWI.cloud 관리 화면 열기"
   },
   "ai_assistant_substance": {
     "add_assistant": "어시스턴트 추가",
@@ -821,6 +823,11 @@
     "started_on": "시작일",
     "file_upload_not_configured": "파일 업로드 설정이 구성되지 않았습니다."
   },
+  "audit_log_bulk_export": {
+    "download_expired": "다운로드 기간이 만료되었습니다",
+    "job_expired": "수출 프로세스가 너무 오래 걸려 취소되었습니다",
+    "no_results": "지정된 필터에 일치하는 감사 로그가 없습니다"
+  },
   "message": {
     "successfully_connected": "성공적으로 연결되었습니다!",
     "fail_to_save_access_token": "액세스 토큰 저장 실패. 다시 시도하십시오.",
@@ -1037,5 +1044,22 @@
     "success-toaster": "최신 텍스트 동기화됨",
     "skipped-toaster": "편집기가 활성화되지 않아 동기화 건너뜀. 편집기를 열고 다시 시도하십시오.",
     "error-toaster": "최신 텍스트 동기화 실패"
+  },
+  "toolbar": {
+    "attachments": "첨부 파일",
+    "bold": "굵게",
+    "bullet_list": "글머리 기호 목록",
+    "checklist": "체크리스트",
+    "code": "코드",
+    "diagram": "다이어그램",
+    "emoji": "이모지",
+    "heading": "제목",
+    "italic": "기울임꼴",
+    "numbered_list": "번호 매기기 목록",
+    "quote": "인용",
+    "strikethrough": "취소선",
+    "table": "표",
+    "template": "템플릿",
+    "text_formatting": "텍스트 서식"
   }
 }

+ 15 - 3
apps/app/public/static/locales/zh_CN/admin.json

@@ -421,6 +421,7 @@
     "azure_storage_account_name": "Storage Account Name",
     "azure_storage_container_name": "Container Name",
     "azure_note_for_the_only_env_option": "The Azure Settings is limited by the value of environment variable.<br>To change this setting, please change to false or delete the value of the environment variable <code>{{env}}</code> .",
+    "azure_note_for_the_only_env_option_cloud": "Azure 设置可以从 GROWI.cloud 管理页面进行更改。",
     "fixed_by_env_var": "这是由env var 修复的 <code>{{envKey}}={{envVar}}</code>.",
     "file_upload": "This is for uploading file settings. If you complete file upload settings, file upload function, profile picture function etc will be enabled.",
     "test_connection": "测试邮件服务器连接",
@@ -434,7 +435,8 @@
     "enable": "启用",
     "disable": "停用",
     "use_env_var_if_empty": "如果数据库中的值为空,则环境变量的值 <cod>{{variable}}</code> 启用。",
-    "note_for_the_only_env_option": "The GCS settings is limited by the value of environment variable.<br>To change this setting, please change to false or delete the value of the environment variable <code>{{env}}</code> ."
+    "note_for_the_only_env_option": "The GCS settings is limited by the value of environment variable.<br>To change this setting, please change to false or delete the value of the environment variable <code>{{env}}</code> .",
+    "note_for_the_only_env_option_cloud": "GCS 设置可以从 GROWI.cloud 管理页面进行更改。"
   },
   "markdown_settings": {
     "markdown_settings": "Markdown设置",
@@ -790,7 +792,8 @@
       "revoke_read_only_access": "取消只读访问",
       "grant_read_only_access": "给予只读权限",
       "send_invitation_email": "发送邀请邮件",
-      "resend_invitation_email": "重发邀请函"
+      "resend_invitation_email": "重发邀请函",
+      "deleted_user": "(已删除的用户)"
     },
     "reset_password": "重置密码",
     "reset_password_modal": {
@@ -890,6 +893,13 @@
     "available_action_list_explanation": "在当前配置中可以搜索/查看的行动列表",
     "action_list": "行动清单",
     "disable_mode_explanation": "审计日志当前已禁用。 要启用它,请将环境变量 <code>AUDIT_LOG_ENABLED</code> 设置为 true。",
+    "export": "导出",
+    "export_audit_log": "导出审核日志",
+    "export_requested": "导出请求已接受。导出完成后将通知您。",
+    "export_failed": "导出启动失败",
+    "duplicate_export_confirm": "已有相同条件的导出正在进行中。是否要重新启动它?",
+    "restart_export": "重新启动导出",
+    "confirm_export": "确认导出",
     "disable_mode_explanation_cloud": "审计日志当前已禁用。要启用它,请从 GROWI.cloud 管理界面更改应用程序设置。",
     "docs_url": {
       "log_type": "https://docs.growi.org/en/admin-guide/admin-cookbook/audit-log-setup.html#log-types"
@@ -916,7 +926,9 @@
     "confirm": "Delete plugin?"
   },
   "cloud_setting_management": {
-    "to_cloud_settings": "進入 GROWI.cloud 的管理界面"
+    "to_cloud_settings": "進入 GROWI.cloud 的管理界面",
+    "change_from_cloud": "您可以从 GROWI.cloud 管理界面更改设置。",
+    "storage_change_from_cloud": "当前文件上传方式为 {{fileUploadType}}。设置可以从 GROWI.cloud 管理页面进行更改。"
   },
   "audit_log_action_category": {
     "Page": "页面",

+ 25 - 1
apps/app/public/static/locales/zh_CN/translation.json

@@ -651,7 +651,9 @@
     }
   },
   "default_ai_assistant": {
-    "not_set": "未设置默认助手"
+    "not_set": "未设置默认助手",
+    "open_cloud_settings_to_enable": "请查看 GROWI.cloud 管理界面以启用 AI 集成",
+    "to_cloud_settings": "前往 GROWI.cloud 管理界面"
   },
   "ai_assistant_substance": {
     "add_assistant": "添加助手",
@@ -866,6 +868,11 @@
     "started_on": "开始于",
     "file_upload_not_configured": "未配置文件上传设置"
   },
+  "audit_log_bulk_export": {
+    "download_expired": "下载期限已过期",
+    "job_expired": "导出过程因耗时过长被取消",
+    "no_results": "没有审计日志符合指定筛选条件"
+  },
   "message": {
     "successfully_connected": "连接成功!",
     "fail_to_save_access_token": "无法保存访问令牌。请再试一次。",
@@ -1082,5 +1089,22 @@
     "success-toaster": "同步最新文本",
     "skipped-toaster": "由于编辑器未激活,因此跳过同步。 请打开编辑器并重试。",
     "error-toaster": "同步最新文本失败"
+  },
+  "toolbar": {
+    "attachments": "附件",
+    "bold": "粗体",
+    "bullet_list": "无序列表",
+    "checklist": "清单",
+    "code": "代码",
+    "diagram": "图表",
+    "emoji": "表情符号",
+    "heading": "标题",
+    "italic": "斜体",
+    "numbered_list": "有序列表",
+    "quote": "引用",
+    "strikethrough": "删除线",
+    "table": "表格",
+    "template": "模板",
+    "text_formatting": "文本格式"
   }
 }

+ 0 - 26
apps/app/regconfig.json

@@ -1,26 +0,0 @@
-{
-  "core": {
-    "workingDir": ".reg",
-    "actualDir": "test/playwright/screenshots",
-    "thresholdRate": 0.001,
-    "addIgnore": true,
-    "ximgdiff": {
-      "invocationType": "client"
-    }
-  },
-  "plugins": {
-    "reg-keygen-git-hash-plugin": true,
-    "reg-notify-github-plugin": {
-      "prCommentBehavior": "new",
-      "setCommitStatus": false,
-      "shortDescription": true,
-      "clientId": "$REG_NOTIFY_GITHUB_PLUGIN_CLIENTID"
-    },
-    "reg-notify-slack-plugin": {
-      "webhookUrl": "$SLACK_WEBHOOK_URL"
-    },
-    "reg-publish-s3-plugin": {
-      "bucketName": "growi-vrt-snapshots"
-    }
-  }
-}

+ 21 - 11
apps/app/src/client/components/Admin/App/AzureSetting.tsx

@@ -15,6 +15,7 @@ export type AzureSettingMoleculeProps = {
   envAzureStorageAccountName?: string;
   envAzureStorageContainerName?: string;
   onChangeAzureReferenceFileWithRelayMode: (val: boolean) => void;
+  isCloud: boolean;
 };
 
 export const AzureSettingMolecule = (
@@ -30,6 +31,7 @@ export const AzureSettingMolecule = (
     envAzureClientSecret,
     envAzureStorageAccountName,
     envAzureStorageContainerName,
+    isCloud,
   } = props;
 
   return (
@@ -84,17 +86,25 @@ export const AzureSettingMolecule = (
         </div>
       </div>
 
-      {azureUseOnlyEnvVars && (
-        <p
-          className="alert alert-info"
-          // biome-ignore lint/security/noDangerouslySetInnerHtml: includes <br> and <code> from i18n strings
-          dangerouslySetInnerHTML={{
-            __html: t('admin:app_setting.azure_note_for_the_only_env_option', {
-              env: 'AZURE_USES_ONLY_ENV_VARS_FOR_SOME_OPTIONS',
-            }),
-          }}
-        />
-      )}
+      {azureUseOnlyEnvVars &&
+        (isCloud ? (
+          <p className="alert alert-info">
+            {t('admin:app_setting.azure_note_for_the_only_env_option_cloud')}
+          </p>
+        ) : (
+          <p
+            className="alert alert-info"
+            // biome-ignore lint/security/noDangerouslySetInnerHtml: includes <br> and <code> from i18n strings
+            dangerouslySetInnerHTML={{
+              __html: t(
+                'admin:app_setting.azure_note_for_the_only_env_option',
+                {
+                  env: 'AZURE_USES_ONLY_ENV_VARS_FOR_SOME_OPTIONS',
+                },
+              ),
+            }}
+          />
+        ))}
       <table
         className={`table settings-table ${azureUseOnlyEnvVars && 'use-only-env-vars'}`}
       >

+ 64 - 39
apps/app/src/client/components/Admin/App/FileUploadSetting.tsx

@@ -5,6 +5,7 @@ import { useController, useForm } from 'react-hook-form';
 
 import { toastError, toastSuccess } from '~/client/util/toastr';
 import { FileUploadType } from '~/interfaces/file-uploader';
+import { useGrowiAppIdForGrowiCloud, useGrowiCloudUri } from '~/states/global';
 
 import AdminUpdateButtonRow from '../Common/AdminUpdateButtonRow';
 import { AwsSettingMolecule } from './AwsSetting';
@@ -15,6 +16,9 @@ import { useFileUploadSettings } from './useFileUploadSettings';
 
 const FileUploadSetting = (): JSX.Element => {
   const { t } = useTranslation(['admin', 'commons']);
+  const growiCloudUri = useGrowiCloudUri();
+  const growiAppIdForGrowiCloud = useGrowiAppIdForGrowiCloud();
+  const isCloud = growiCloudUri != null && growiAppIdForGrowiCloud != null;
   const { data, isLoading, error, updateSettings } = useFileUploadSettings();
 
   const { register, handleSubmit, control, watch, formState } =
@@ -107,44 +111,63 @@ const FileUploadSetting = (): JSX.Element => {
           {t('admin:app_setting.file_upload_method')}
         </span>
 
-        <div className="col-md-6 py-2">
-          {Object.values(FileUploadType).map((type) => {
-            return (
-              <div key={type} className="form-check form-check-inline">
-                <input
-                  type="radio"
-                  className="form-check-input"
-                  name="file-upload-type"
-                  id={`file-upload-type-radio-${type}`}
-                  checked={fileUploadTypeField.value === type}
-                  disabled={data.isFixedFileUploadByEnvVar}
-                  onChange={() => fileUploadTypeField.onChange(type)}
-                />
-                <label
-                  className="form-label form-check-label"
-                  htmlFor={`file-upload-type-radio-${type}`}
-                >
-                  {t(`admin:app_setting.${type}_label`)}
-                </label>
-              </div>
-            );
-          })}
-        </div>
-        {data.isFixedFileUploadByEnvVar && (
-          <p className="alert alert-warning mt-2 text-start offset-3 col-6">
-            <span className="material-symbols-outlined">help</span>
-            <b>FIXED</b>
-            <br />
-            <b
-              // biome-ignore lint/security/noDangerouslySetInnerHtml: includes markup from i18n strings
-              dangerouslySetInnerHTML={{
-                __html: t('admin:app_setting.fixed_by_env_var', {
-                  envKey: 'FILE_UPLOAD',
-                  envVar: data.envFileUploadType,
-                }),
-              }}
-            />
-          </p>
+        {!isCloud && (
+          <div className="col-md-6 py-2">
+            {Object.values(FileUploadType).map((type) => {
+              return (
+                <div key={type} className="form-check form-check-inline">
+                  <input
+                    type="radio"
+                    className="form-check-input"
+                    name="file-upload-type"
+                    id={`file-upload-type-radio-${type}`}
+                    checked={fileUploadTypeField.value === type}
+                    disabled={data.isFixedFileUploadByEnvVar}
+                    onChange={() => fileUploadTypeField.onChange(type)}
+                  />
+                  <label
+                    className="form-label form-check-label"
+                    htmlFor={`file-upload-type-radio-${type}`}
+                  >
+                    {t(`admin:app_setting.${type}_label`)}
+                  </label>
+                </div>
+              );
+            })}
+          </div>
+        )}
+        {isCloud ? (
+          <div className="alert alert-warning mt-2 text-start offset-3 col-6">
+            <p>
+              {t('admin:cloud_setting_management.storage_change_from_cloud', {
+                fileUploadType: t(`admin:app_setting.${fileUploadType}_label`),
+              })}
+            </p>
+            <a
+              href={`${growiCloudUri}/my/apps/${growiAppIdForGrowiCloud}`}
+              className="btn btn-outline-secondary"
+            >
+              <span className="material-symbols-outlined me-1">share</span>
+              {t('admin:cloud_setting_management.to_cloud_settings')}
+            </a>
+          </div>
+        ) : (
+          data.isFixedFileUploadByEnvVar && (
+            <p className="alert alert-warning mt-2 text-start offset-3 col-6">
+              <span className="material-symbols-outlined">help</span>
+              <b>FIXED</b>
+              <br />
+              <b
+                // biome-ignore lint/security/noDangerouslySetInnerHtml: includes markup from i18n strings
+                dangerouslySetInnerHTML={{
+                  __html: t('admin:app_setting.fixed_by_env_var', {
+                    envKey: 'FILE_UPLOAD',
+                    envVar: data.envFileUploadType,
+                  }),
+                }}
+              />
+            </p>
+          )
         )}
       </div>
 
@@ -165,6 +188,7 @@ const FileUploadSetting = (): JSX.Element => {
           envGcsBucket={data.envGcsBucket}
           envGcsUploadNamespace={data.envGcsUploadNamespace}
           onChangeGcsReferenceFileWithRelayMode={gcsRelayModeField.onChange}
+          isCloud={isCloud}
         />
       )}
 
@@ -179,10 +203,11 @@ const FileUploadSetting = (): JSX.Element => {
           envAzureStorageAccountName={data.envAzureStorageAccountName}
           envAzureStorageContainerName={data.envAzureStorageContainerName}
           onChangeAzureReferenceFileWithRelayMode={azureRelayModeField.onChange}
+          isCloud={isCloud}
         />
       )}
 
-      <AdminUpdateButtonRow type="submit" disabled={isLoading} />
+      {!isCloud && <AdminUpdateButtonRow type="submit" disabled={isLoading} />}
     </form>
   );
 };

+ 18 - 11
apps/app/src/client/components/Admin/App/GcsSetting.tsx

@@ -12,6 +12,7 @@ export type GcsSettingMoleculeProps = {
   envGcsBucket?: string;
   envGcsUploadNamespace?: string;
   onChangeGcsReferenceFileWithRelayMode: (val: boolean) => void;
+  isCloud: boolean;
 };
 
 export const GcsSettingMolecule = (
@@ -25,6 +26,7 @@ export const GcsSettingMolecule = (
     envGcsApiKeyJsonPath,
     envGcsBucket,
     envGcsUploadNamespace,
+    isCloud,
   } = props;
 
   return (
@@ -79,17 +81,22 @@ export const GcsSettingMolecule = (
         </div>
       </div>
 
-      {gcsUseOnlyEnvVars && (
-        <p
-          className="alert alert-info"
-          // biome-ignore lint/security/noDangerouslySetInnerHtml: includes markup from i18n strings
-          dangerouslySetInnerHTML={{
-            __html: t('admin:app_setting.note_for_the_only_env_option', {
-              env: 'GCS_USES_ONLY_ENV_VARS_FOR_SOME_OPTIONS',
-            }),
-          }}
-        />
-      )}
+      {gcsUseOnlyEnvVars &&
+        (isCloud ? (
+          <p className="alert alert-info">
+            {t('admin:app_setting.note_for_the_only_env_option_cloud')}
+          </p>
+        ) : (
+          <p
+            className="alert alert-info"
+            // biome-ignore lint/security/noDangerouslySetInnerHtml: includes markup from i18n strings
+            dangerouslySetInnerHTML={{
+              __html: t('admin:app_setting.note_for_the_only_env_option', {
+                env: 'GCS_USES_ONLY_ENV_VARS_FOR_SOME_OPTIONS',
+              }),
+            }}
+          />
+        ))}
       <table
         className={`table settings-table ${gcsUseOnlyEnvVars && 'use-only-env-vars'}`}
       >

+ 214 - 0
apps/app/src/client/components/Admin/AuditLog/AuditLogExportModal.tsx

@@ -0,0 +1,214 @@
+import { useCallback, useState } from 'react';
+import { LoadingSpinner } from '@growi/ui/dist/components';
+import { useAtomValue } from 'jotai';
+import { useTranslation } from 'react-i18next';
+import { Modal, ModalBody, ModalFooter, ModalHeader } from 'reactstrap';
+
+import type { IAuditLogBulkExportRequestFilters } from '~/features/audit-log-bulk-export/interfaces/audit-log-bulk-export';
+import type { SupportedActionType } from '~/interfaces/activity';
+import { auditLogAvailableActionsAtom } from '~/states/server-configurations';
+
+import { DateRangePicker } from './DateRangePicker';
+import { DuplicateExportConfirmModal } from './DuplicateExportConfirmModal';
+import { SearchUsernameTypeahead } from './SearchUsernameTypeahead';
+import { SelectActionDropdown } from './SelectActionDropdown';
+import { useAuditLogExport } from './useAuditLogExport';
+
+type Props = {
+  isOpen: boolean;
+  onClose: () => void;
+  initialStartDate?: Date | null;
+  initialEndDate?: Date | null;
+  initialSelectedUsernames?: string[];
+  initialActionMap?: Map<SupportedActionType, boolean>;
+};
+
+const AuditLogExportModalSubstance = ({
+  onClose,
+  initialStartDate,
+  initialEndDate,
+  initialSelectedUsernames,
+  initialActionMap,
+}: {
+  onClose: () => void;
+  initialStartDate?: Date | null;
+  initialEndDate?: Date | null;
+  initialSelectedUsernames?: string[];
+  initialActionMap?: Map<SupportedActionType, boolean>;
+}): JSX.Element => {
+  const { t } = useTranslation('admin');
+
+  const auditLogAvailableActionsData = useAtomValue(
+    auditLogAvailableActionsAtom,
+  );
+
+  const [startDate, setStartDate] = useState<Date | null>(
+    initialStartDate ?? null,
+  );
+  const [endDate, setEndDate] = useState<Date | null>(initialEndDate ?? null);
+  const [selectedUsernames, setSelectedUsernames] = useState<string[]>(
+    initialSelectedUsernames ?? [],
+  );
+  const [actionMap, setActionMap] = useState(() =>
+    initialActionMap != null
+      ? new Map(initialActionMap)
+      : new Map<SupportedActionType, boolean>(
+          auditLogAvailableActionsData?.map((action) => [action, true]) ?? [],
+        ),
+  );
+
+  const datePickerChangedHandler = useCallback((dateList: Date[] | null[]) => {
+    setStartDate(dateList[0]);
+    setEndDate(dateList[1]);
+  }, []);
+
+  const actionCheckboxChangedHandler = useCallback(
+    (action: SupportedActionType) => {
+      setActionMap((prev) => {
+        const next = new Map(prev);
+        next.set(action, !next.get(action));
+        return next;
+      });
+    },
+    [],
+  );
+
+  const multipleActionCheckboxChangedHandler = useCallback(
+    (actions: SupportedActionType[], isChecked: boolean) => {
+      setActionMap((prev) => {
+        const next = new Map(prev);
+        actions.forEach((action) => {
+          next.set(action, isChecked);
+        });
+        return next;
+      });
+    },
+    [],
+  );
+
+  const setUsernamesHandler = useCallback((usernames: string[]) => {
+    setSelectedUsernames(usernames);
+  }, []);
+
+  const buildFilters = useCallback(() => {
+    const selectedActionList = Array.from(actionMap.entries())
+      .filter((v) => v[1])
+      .map((v) => v[0]);
+
+    const filters: IAuditLogBulkExportRequestFilters = {};
+
+    if (selectedUsernames.length > 0) {
+      filters.usernames = selectedUsernames;
+    }
+    if (selectedActionList.length > 0) {
+      filters.actions = selectedActionList;
+    }
+    if (startDate != null) {
+      filters.dateFrom = startDate;
+    }
+    if (endDate != null) {
+      const endOfDay = new Date(endDate);
+      endOfDay.setHours(23, 59, 59, 999);
+      filters.dateTo = endOfDay;
+    }
+
+    return filters;
+  }, [actionMap, selectedUsernames, startDate, endDate]);
+
+  const {
+    isExporting,
+    isDuplicateConfirmOpen,
+    exportHandler,
+    restartExportHandler,
+    closeDuplicateConfirm,
+  } = useAuditLogExport(buildFilters, onClose);
+
+  return (
+    <>
+      <ModalHeader tag="h4" toggle={onClose}>
+        {t('audit_log_management.export_audit_log')}
+      </ModalHeader>
+
+      <ModalBody>
+        <div className="mb-3">
+          <div className="form-label">{t('audit_log_management.username')}</div>
+          <SearchUsernameTypeahead
+            onChange={setUsernamesHandler}
+            initialUsernames={initialSelectedUsernames}
+          />
+        </div>
+
+        <div className="mb-3">
+          <div className="form-label">{t('audit_log_management.date')}</div>
+          <DateRangePicker
+            startDate={startDate}
+            endDate={endDate}
+            onChange={datePickerChangedHandler}
+          />
+        </div>
+
+        <div className="mb-3">
+          <div className="form-label">{t('audit_log_management.action')}</div>
+          <SelectActionDropdown
+            actionMap={actionMap}
+            availableActions={auditLogAvailableActionsData || []}
+            onChangeAction={actionCheckboxChangedHandler}
+            onChangeMultipleAction={multipleActionCheckboxChangedHandler}
+          />
+        </div>
+      </ModalBody>
+
+      <ModalFooter>
+        <button
+          type="button"
+          className="btn btn-outline-secondary"
+          onClick={onClose}
+        >
+          {t('export_management.cancel')}
+        </button>
+        <button
+          type="button"
+          className="btn btn-primary"
+          onClick={exportHandler}
+          disabled={isExporting}
+        >
+          {isExporting ? (
+            <LoadingSpinner className="me-1 fs-3" />
+          ) : (
+            <span className="material-symbols-outlined me-1">download</span>
+          )}
+          {t('audit_log_management.export')}
+        </button>
+      </ModalFooter>
+
+      <DuplicateExportConfirmModal
+        isOpen={isDuplicateConfirmOpen}
+        onClose={closeDuplicateConfirm}
+        onRestart={restartExportHandler}
+      />
+    </>
+  );
+};
+
+export const AuditLogExportModal = ({
+  isOpen,
+  onClose,
+  initialStartDate,
+  initialEndDate,
+  initialSelectedUsernames,
+  initialActionMap,
+}: Props): JSX.Element => {
+  return (
+    <Modal isOpen={isOpen} toggle={onClose}>
+      {isOpen && (
+        <AuditLogExportModalSubstance
+          onClose={onClose}
+          initialStartDate={initialStartDate}
+          initialEndDate={initialEndDate}
+          initialSelectedUsernames={initialSelectedUsernames}
+          initialActionMap={initialActionMap}
+        />
+      )}
+    </Modal>
+  );
+};

+ 39 - 0
apps/app/src/client/components/Admin/AuditLog/DuplicateExportConfirmModal.tsx

@@ -0,0 +1,39 @@
+import { useTranslation } from 'react-i18next';
+import { Modal, ModalBody, ModalFooter, ModalHeader } from 'reactstrap';
+
+type Props = {
+  isOpen: boolean;
+  onClose: () => void;
+  onRestart: () => void;
+};
+
+export const DuplicateExportConfirmModal = ({
+  isOpen,
+  onClose,
+  onRestart,
+}: Props): JSX.Element => {
+  const { t } = useTranslation('admin');
+
+  return (
+    <Modal isOpen={isOpen} toggle={onClose}>
+      <ModalHeader tag="h4" toggle={onClose}>
+        {t('audit_log_management.confirm_export')}
+      </ModalHeader>
+      <ModalBody>
+        {t('audit_log_management.duplicate_export_confirm')}
+      </ModalBody>
+      <ModalFooter>
+        <button
+          type="button"
+          className="btn btn-outline-secondary"
+          onClick={onClose}
+        >
+          {t('export_management.cancel')}
+        </button>
+        <button type="button" className="btn btn-primary" onClick={onRestart}>
+          {t('audit_log_management.restart_export')}
+        </button>
+      </ModalFooter>
+    </Modal>
+  );
+};

+ 12 - 1
apps/app/src/client/components/Admin/AuditLog/SearchUsernameTypeahead.tsx

@@ -29,21 +29,30 @@ type UserDataType = {
 
 type Props = {
   onChange: (text: string[]) => void;
+  initialUsernames?: string[];
 };
 
 const SearchUsernameTypeaheadSubstance: ForwardRefRenderFunction<
   IClearable,
   Props
 > = (props: Props, ref) => {
-  const { onChange } = props;
+  const { onChange, initialUsernames } = props;
   const { t } = useTranslation();
 
   const typeaheadRef = useRef<TypeaheadRef>(null);
 
+  const toUserDataItem = (username: string): UserDataType => ({
+    username,
+    category: Categories.activeUser,
+  });
+
   /*
    * State
    */
   const [searchKeyword, setSearchKeyword] = useState<string>('');
+  const [selectedItems, setSelectedItems] = useState<UserDataType[]>(() =>
+    (initialUsernames ?? []).map(toUserDataItem),
+  );
 
   /*
    * Fetch
@@ -87,6 +96,7 @@ const SearchUsernameTypeaheadSubstance: ForwardRefRenderFunction<
    */
   const changeHandler = useCallback(
     (userData: UserDataType[]) => {
+      setSelectedItems(userData);
       const usernames = userData.map((user) => user.username);
       onChange(usernames);
     },
@@ -148,6 +158,7 @@ const SearchUsernameTypeaheadSubstance: ForwardRefRenderFunction<
         placeholder={t('admin:audit_log_management.username')}
         isLoading={isLoading}
         options={allUser}
+        selected={selectedItems}
         onSearch={searchHandler}
         onChange={changeHandler}
         renderMenu={renderMenu}

+ 67 - 0
apps/app/src/client/components/Admin/AuditLog/useAuditLogExport.ts

@@ -0,0 +1,67 @@
+import { useCallback, useState } from 'react';
+import { useTranslation } from 'react-i18next';
+
+import { apiv3Post } from '~/client/util/apiv3-client';
+import { toastError, toastSuccess } from '~/client/util/toastr';
+import type { IAuditLogBulkExportFilters } from '~/features/audit-log-bulk-export/interfaces/audit-log-bulk-export';
+
+export const useAuditLogExport = (
+  buildFilters: () => IAuditLogBulkExportFilters,
+  onClose: () => void,
+) => {
+  const { t } = useTranslation('admin');
+
+  const [isExporting, setIsExporting] = useState(false);
+  const [isDuplicateConfirmOpen, setIsDuplicateConfirmOpen] = useState(false);
+
+  const exportHandler = useCallback(async () => {
+    setIsExporting(true);
+    try {
+      const filters = buildFilters();
+      await apiv3Post('/audit-log-bulk-export', { filters });
+      toastSuccess(t('audit_log_management.export_requested'));
+      onClose();
+    } catch (errs) {
+      const isDuplicate =
+        Array.isArray(errs) &&
+        errs.some(
+          (e) => e.code === 'audit_log_bulk_export.duplicate_export_job_error',
+        );
+
+      if (isDuplicate) {
+        setIsDuplicateConfirmOpen(true);
+      } else {
+        toastError(t('audit_log_management.export_failed'));
+      }
+    } finally {
+      setIsExporting(false);
+    }
+  }, [buildFilters, t, onClose]);
+
+  const restartExportHandler = useCallback(async () => {
+    setIsDuplicateConfirmOpen(false);
+    setIsExporting(true);
+    try {
+      const filters = buildFilters();
+      await apiv3Post('/audit-log-bulk-export', { filters, restartJob: true });
+      toastSuccess(t('audit_log_management.export_requested'));
+      onClose();
+    } catch {
+      toastError(t('audit_log_management.export_failed'));
+    } finally {
+      setIsExporting(false);
+    }
+  }, [buildFilters, t, onClose]);
+
+  const closeDuplicateConfirm = useCallback(() => {
+    setIsDuplicateConfirmOpen(false);
+  }, []);
+
+  return {
+    isExporting,
+    isDuplicateConfirmOpen,
+    exportHandler,
+    restartExportHandler,
+    closeDuplicateConfirm,
+  };
+};

+ 23 - 0
apps/app/src/client/components/Admin/AuditLogManagement.tsx

@@ -19,6 +19,7 @@ import { useSWRxActivity } from '~/stores/activity';
 import PaginationWrapper from '../PaginationWrapper';
 import { ActivityTable } from './AuditLog/ActivityTable';
 import { AuditLogDisableMode } from './AuditLog/AuditLogDisableMode';
+import { AuditLogExportModal } from './AuditLog/AuditLogExportModal';
 import { AuditLogSettings } from './AuditLog/AuditLogSettings';
 import { DateRangePicker } from './AuditLog/DateRangePicker';
 import { SearchUsernameTypeahead } from './AuditLog/SearchUsernameTypeahead';
@@ -191,6 +192,8 @@ export const AuditLogManagement: FC = () => {
     setActivePageNumber(jumpPageNumber);
   }, [jumpPageNumber]);
 
+  const [isExportModalOpen, setIsExportModalOpen] = useState<boolean>(false);
+
   const startIndex = activityList.length === 0 ? 0 : offset + 1;
   const endIndex = activityList.length === 0 ? 0 : offset + activityList.length;
 
@@ -283,6 +286,17 @@ export const AuditLogManagement: FC = () => {
                 {t('admin:audit_log_management.clear')}
               </button>
             </div>
+
+            <div className="col-12">
+              <button
+                type="button"
+                className="btn btn-outline-secondary"
+                onClick={() => setIsExportModalOpen(true)}
+              >
+                <span className="material-symbols-outlined me-1">download</span>
+                {t('admin:audit_log_management.export')}
+              </button>
+            </div>
           </div>
 
           <p className="ms-2">
@@ -331,6 +345,15 @@ export const AuditLogManagement: FC = () => {
               </button>
             </div>
           </div>
+
+          <AuditLogExportModal
+            isOpen={isExportModalOpen}
+            onClose={() => setIsExportModalOpen(false)}
+            initialStartDate={startDate}
+            initialEndDate={endDate}
+            initialSelectedUsernames={selectedUsernames}
+            initialActionMap={actionMap}
+          />
         </>
       )}
     </div>

+ 1 - 0
apps/app/src/client/components/Admin/UserManagement.tsx

@@ -183,6 +183,7 @@ const UserManagement = (props: UserManagementProps) => {
               {renderCheckbox('active', 'Active', 'success')}
               {renderCheckbox('suspended', 'Suspended', 'warning')}
               {renderCheckbox('invited', 'Invited', 'secondary')}
+              {renderCheckbox('deleted', 'Deleted', 'danger')}
             </div>
             <div>
               {isNotifyCommentShow && (

+ 8 - 1
apps/app/src/client/components/Admin/Users/UserTable.tsx

@@ -5,6 +5,7 @@ import { format as dateFnsFormat } from 'date-fns/format';
 import { useTranslation } from 'next-i18next';
 
 import AdminUsersContainer from '~/client/services/AdminUsersContainer';
+import { UserStatus } from '~/server/models/user/conts';
 
 import { withUnstatedContainers } from '../../UnstatedUtils';
 import { SortIcons } from './SortIcons';
@@ -164,7 +165,13 @@ const UserTable = (props: UserTableProps) => {
                   )}
                 </td>
                 <td>
-                  <strong>{user.username}</strong>
+                  {user.status === UserStatus.STATUS_DELETED ? (
+                    <p className="text-secondary">
+                      {t('admin:user_management.user_table.deleted_user')}
+                    </p>
+                  ) : (
+                    <strong>{user.username}</strong>
+                  )}
                 </td>
                 <td>{user.name}</td>
                 <td>{user.email}</td>

+ 98 - 0
apps/app/src/client/components/InAppNotification/ModelNotification/AuditLogBulkExportJobModelNotification.tsx

@@ -0,0 +1,98 @@
+import React from 'react';
+import { type HasObjectId, isPopulated } from '@growi/core';
+import { useTranslation } from 'react-i18next';
+
+import type { IAuditLogBulkExportJobHasId } from '~/features/audit-log-bulk-export/interfaces/audit-log-bulk-export';
+import { SupportedAction, SupportedTargetModel } from '~/interfaces/activity';
+import type { IInAppNotification } from '~/interfaces/in-app-notification';
+
+import type { ModelNotificationUtils } from '.';
+import { ModelNotification } from './ModelNotification';
+import { useActionMsgAndIconForModelNotification } from './useActionAndMsg';
+
+export const useAuditLogBulkExportJobModelNotification = (
+  notification: IInAppNotification & HasObjectId,
+): ModelNotificationUtils | null => {
+  const { t } = useTranslation();
+  const { actionMsg, actionIcon } =
+    useActionMsgAndIconForModelNotification(notification);
+
+  const isAuditLogBulkExportJobModelNotification = (
+    notification: IInAppNotification & HasObjectId,
+  ): notification is IInAppNotification<IAuditLogBulkExportJobHasId> &
+    HasObjectId => {
+    return (
+      notification.targetModel ===
+      SupportedTargetModel.MODEL_AUDIT_LOG_BULK_EXPORT_JOB
+    );
+  };
+
+  if (!isAuditLogBulkExportJobModelNotification(notification)) {
+    return null;
+  }
+
+  const actionUsers = notification.user.username;
+
+  const getSubMsg = (): JSX.Element => {
+    if (
+      notification.action ===
+        SupportedAction.ACTION_AUDIT_LOG_BULK_EXPORT_COMPLETED &&
+      notification.target == null
+    ) {
+      return (
+        <div className="text-danger">
+          <small>{t('audit_log_bulk_export.download_expired')}</small>
+        </div>
+      );
+    }
+    if (
+      notification.action ===
+      SupportedAction.ACTION_AUDIT_LOG_BULK_EXPORT_JOB_EXPIRED
+    ) {
+      return (
+        <div className="text-danger">
+          <small>{t('audit_log_bulk_export.job_expired')}</small>
+        </div>
+      );
+    }
+    if (
+      notification.action ===
+      SupportedAction.ACTION_AUDIT_LOG_BULK_EXPORT_NO_RESULTS
+    ) {
+      return (
+        <div className="text-danger">
+          <small>{t('audit_log_bulk_export.no_results')}</small>
+        </div>
+      );
+    }
+    return <></>;
+  };
+
+  const Notification = () => {
+    return (
+      <ModelNotification
+        notification={notification}
+        actionMsg={actionMsg}
+        actionIcon={actionIcon}
+        actionUsers={actionUsers}
+        hideActionUsers
+        hidePath
+        subMsg={getSubMsg()}
+      />
+    );
+  };
+
+  const clickLink =
+    notification.action ===
+      SupportedAction.ACTION_AUDIT_LOG_BULK_EXPORT_COMPLETED &&
+    notification.target?.attachment != null &&
+    isPopulated(notification.target?.attachment)
+      ? notification.target.attachment.downloadPathProxied
+      : undefined;
+
+  return {
+    Notification,
+    clickLink,
+    isDisabled: notification.target == null,
+  };
+};

+ 5 - 1
apps/app/src/client/components/InAppNotification/ModelNotification/ModelNotification.tsx

@@ -15,6 +15,7 @@ type Props = {
   actionIcon: string;
   actionUsers: string;
   hideActionUsers?: boolean;
+  hidePath?: boolean;
   subMsg?: JSX.Element;
 };
 
@@ -24,6 +25,7 @@ export const ModelNotification: FC<Props> = ({
   actionIcon,
   actionUsers,
   hideActionUsers = false,
+  hidePath = false,
   subMsg,
 }: Props) => {
   return (
@@ -31,7 +33,9 @@ export const ModelNotification: FC<Props> = ({
       <div className="text-truncate page-title">
         {hideActionUsers ? <></> : <b>{actionUsers}</b>}
         {` ${actionMsg}`}
-        <PagePathLabel path={notification.parsedSnapshot?.path ?? ''} />
+        {!hidePath && (
+          <PagePathLabel path={notification.parsedSnapshot?.path ?? ''} />
+        )}
       </div>
       {subMsg}
       <span className="material-symbols-outlined me-2">{actionIcon}</span>

+ 5 - 1
apps/app/src/client/components/InAppNotification/ModelNotification/index.tsx

@@ -3,6 +3,7 @@ import type { HasObjectId } from '@growi/core';
 
 import type { IInAppNotification } from '~/interfaces/in-app-notification';
 
+import { useAuditLogBulkExportJobModelNotification } from './AuditLogBulkExportJobModelNotification';
 import { usePageBulkExportJobModelNotification } from './PageBulkExportJobModelNotification';
 import { usePageModelNotification } from './PageModelNotification';
 import { useUserModelNotification } from './UserModelNotification';
@@ -23,11 +24,14 @@ export const useModelNotification = (
   const userModelNotificationUtils = useUserModelNotification(notification);
   const pageBulkExportResultModelNotificationUtils =
     usePageBulkExportJobModelNotification(notification);
+  const auditLogBulkExportJobModelNotificationUtils =
+    useAuditLogBulkExportJobModelNotification(notification);
 
   const modelNotificationUtils =
     pageModelNotificationUtils ??
     userModelNotificationUtils ??
-    pageBulkExportResultModelNotificationUtils;
+    pageBulkExportResultModelNotificationUtils ??
+    auditLogBulkExportJobModelNotificationUtils;
 
   return modelNotificationUtils;
 };

+ 13 - 0
apps/app/src/client/components/InAppNotification/ModelNotification/useActionAndMsg.ts

@@ -81,6 +81,19 @@ export const useActionMsgAndIconForModelNotification = (
       actionMsg = 'export failed for';
       actionIcon = 'error';
       break;
+    case SupportedAction.ACTION_AUDIT_LOG_BULK_EXPORT_COMPLETED:
+      actionMsg = 'audit log export completed';
+      actionIcon = 'download';
+      break;
+    case SupportedAction.ACTION_AUDIT_LOG_BULK_EXPORT_FAILED:
+    case SupportedAction.ACTION_AUDIT_LOG_BULK_EXPORT_JOB_EXPIRED:
+      actionMsg = 'audit log export failed';
+      actionIcon = 'error';
+      break;
+    case SupportedAction.ACTION_AUDIT_LOG_BULK_EXPORT_NO_RESULTS:
+      actionMsg = 'audit log export had no results';
+      actionIcon = 'error';
+      break;
     default:
       actionMsg = '';
       actionIcon = '';

+ 15 - 5
apps/app/src/client/components/NotAvailable.tsx

@@ -1,12 +1,16 @@
 import React, { type JSX } from 'react';
 import { Disable } from 'react-disable';
 import type { UncontrolledTooltipProps } from 'reactstrap';
-import { UncontrolledTooltip } from 'reactstrap';
+import {
+  PopoverBody,
+  UncontrolledPopover,
+  UncontrolledTooltip,
+} from 'reactstrap';
 
 type NotAvailableProps = {
   children: JSX.Element;
   isDisabled: boolean;
-  title: string;
+  title: string | JSX.Element;
   classNamePrefix?: string;
   placement?: UncontrolledTooltipProps['placement'];
 };
@@ -29,9 +33,15 @@ export const NotAvailable = ({
       <div id={id}>
         <Disable disabled={isDisabled}>{children}</Disable>
       </div>
-      <UncontrolledTooltip placement={placement} target={id}>
-        {title}
-      </UncontrolledTooltip>
+      {typeof title === 'string' ? (
+        <UncontrolledTooltip placement={placement} target={id}>
+          {title}
+        </UncontrolledTooltip>
+      ) : (
+        <UncontrolledPopover trigger="hover" placement={placement} target={id}>
+          <PopoverBody>{title}</PopoverBody>
+        </UncontrolledPopover>
+      )}
     </>
   );
 };

+ 1 - 1
apps/app/src/client/components/PageComment/Comment.module.scss

@@ -33,7 +33,7 @@
         visibility: hidden;
       }
 
-      &:hover > .page-comment-control {
+      &:hover > :global(.page-comment-control) {
         visibility: visible;
       }
     }

+ 20 - 2
apps/app/src/client/components/PageCreateModal.tsx

@@ -21,7 +21,7 @@ import { debounce } from 'throttle-debounce';
 import { useCreateTemplatePage } from '~/client/services/create-page';
 import { useCreatePage } from '~/client/services/create-page/use-create-page';
 import { useToastrOnError } from '~/client/services/use-toastr-on-error';
-import { useCurrentUser } from '~/states/global';
+import { useCurrentUser, useGrowiCloudUri } from '~/states/global';
 import { isSearchServiceReachableAtom } from '~/states/server-configurations';
 import {
   usePageCreateModalActions,
@@ -35,9 +35,10 @@ import styles from './PageCreateModal.module.scss';
 const { isCreatablePage, isUsersHomepage } = pagePathUtils;
 
 const PageCreateModal: React.FC = () => {
-  const { t } = useTranslation();
+  const { t, i18n } = useTranslation();
 
   const currentUser = useCurrentUser();
+  const growiCloudUri = useGrowiCloudUri();
 
   const { isOpened, path: pathname = '' } = usePageCreateModalStatus();
   const { close: closeCreateModal } = usePageCreateModalActions();
@@ -71,6 +72,12 @@ const PageCreateModal: React.FC = () => {
     [userHomepagePath, t, now],
   );
 
+  const templateHelpLang = i18n.language === 'ja' ? 'ja' : 'en';
+  const templateHelpUrl =
+    growiCloudUri != null
+      ? `https://growi.cloud/help/${templateHelpLang}/guide/features/template.html`
+      : `https://docs.growi.org/${templateHelpLang}/guide/features/template.html`;
+
   const [todayInput, setTodayInput] = useState('');
   const [pageNameInput, setPageNameInput] = useState(pageNameInputInitialValue);
   const [template, setTemplate] = useState(null);
@@ -295,6 +302,16 @@ const PageCreateModal: React.FC = () => {
         <fieldset className="col-12">
           <h3 className="pb-2">
             {t('template.modal_label.Create template under')}
+            <a
+              href={templateHelpUrl}
+              target="_blank"
+              rel="noopener noreferrer"
+              className="ms-1"
+            >
+              <span className="material-symbols-outlined fs-6 text-secondary">
+                help
+              </span>
+            </a>
             <br />
             <code className="h6" data-testid="grw-page-create-modal-path-name">
               {pathname}
@@ -353,6 +370,7 @@ const PageCreateModal: React.FC = () => {
     isOpened,
     pathname,
     template,
+    templateHelpUrl,
     onChangeTemplateHandler,
     createTemplateWithToastr,
     t,

+ 6 - 8
apps/app/src/client/components/PageEditor/PageEditor.tsx

@@ -221,10 +221,10 @@ export const PageEditorSubstance = (props: Props): JSX.Element => {
   const save: Save = useCallback(
     async (revisionId, markdown, opts, onConflict) => {
       if (pageId == null || selectedGrant == null) {
-        logger.error('Some materials to save are invalid', {
-          pageId,
-          selectedGrant,
-        });
+        logger.error(
+          { pageId, selectedGrant },
+          'Some materials to save are invalid',
+        );
         throw new Error('Some materials to save are invalid');
       }
 
@@ -251,7 +251,7 @@ export const PageEditorSubstance = (props: Props): JSX.Element => {
 
         return page;
       } catch (error) {
-        logger.error('failed to save', error);
+        logger.error({ err: error }, 'failed to save');
 
         const remoteRevisionData = extractRemoteRevisionDataFromErrorObj(error);
         if (remoteRevisionData != null) {
@@ -329,9 +329,7 @@ export const PageEditorSubstance = (props: Props): JSX.Element => {
   const uploadHandler = useCallback(
     (files: File[]) => {
       if (pageId == null) {
-        logger.error('pageId is invalid', {
-          pageId,
-        });
+        logger.error({ pageId }, 'pageId is invalid');
         throw new Error('pageId is invalid');
       }
 

+ 1 - 1
apps/app/src/client/components/RecentActivity/RecentActivity.tsx

@@ -54,7 +54,7 @@ export const RecentActivity = (props: RecentActivityProps): JSX.Element => {
 
   useEffect(() => {
     if (error) {
-      logger.error('Failed to fetch recent activity data', error);
+      logger.error({ err: error }, 'Failed to fetch recent activity data');
       toastError(error);
       return;
     }

+ 8 - 0
apps/app/src/client/components/Sidebar/PageCreateButton/CreateButton.module.scss

@@ -19,6 +19,12 @@
   :global(svg .background) {
     pointer-events: fill;
   }
+
+  :global(.hexagon-main),
+  :global(.pencil-icon) {
+    transition: transform 0.2s ease-in-out;
+    transform-origin: 22.5px 22px;
+  }
 }
 
 // == Colors
@@ -35,11 +41,13 @@
 .btn-create:hover {
   :global(svg) {
     fill: var(--bs-btn-hover-bg);
+    transform: scale(1.1);
   }
 }
 
 .btn-create:active {
   :global(svg) {
     fill: var(--bs-btn-active-bg);
+    transform: scale(1.1);
   }
 }

+ 4 - 2
apps/app/src/client/components/Sidebar/PageCreateButton/DropendToggle.module.scss

@@ -4,7 +4,7 @@
 .btn-toggle {
   @extend %btn-basis;
 
-  left: 12px;
+  left: 14px;
   padding: 0;
 
   :global(.icon) {
@@ -18,6 +18,7 @@
 // no caret
 .btn-toggle {
   & {
+
     // no caret
     &::after {
       display: none !important;
@@ -28,7 +29,7 @@
 // hitarea
 .btn-toggle {
   :global(.hitarea) {
-    inset: 0 -10px 0 0;
+    inset: 0 -14px 0 0;
   }
 }
 
@@ -54,3 +55,4 @@
     fill: var(--grw-primary-600);
   }
 }
+

+ 5 - 6
apps/app/src/client/components/Sidebar/PageCreateButton/Hexagon.tsx

@@ -7,17 +7,16 @@ type Props = {
 export const Hexagon = React.memo(
   (props: Props): JSX.Element => (
     <svg
+      width="41"
+      height="36"
+      viewBox="0 0 41 36"
+      fill="none"
       xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 27.691 23.999"
-      height="36px"
       className={props.className}
     >
       <title>Create</title>
       <g className="background" transform="translate(0 0)">
-        <path
-          d="M20.768,0l6.923,12L20.768,24H6.923L0,12,6.923,0Z"
-          transform="translate(0)"
-        ></path>
+        <path d="M28.5717 0C29.9943 0 31.3099 0.755645 32.0268 1.98452L40.1934 15.9845C40.92 17.23 40.92 18.77 40.1934 20.0155L32.0268 34.0155C31.3099 35.2444 29.9943 36 28.5717 36H12.1666C10.744 36 9.42835 35.2444 8.71151 34.0155L0.544839 20.0155C-0.181674 18.77 -0.181673 17.23 0.54484 15.9845L8.71151 1.98451C9.42835 0.755643 10.744 0 12.1666 0H28.5717Z" />
       </g>
     </svg>
   ),

+ 41 - 1
apps/app/src/client/components/Sidebar/SidebarNav/PrimaryItems.tsx

@@ -1,9 +1,12 @@
 import { memo } from 'react';
 import dynamic from 'next/dynamic';
 import { useAtomValue } from 'jotai';
+import { useTranslation } from 'react-i18next';
 
+import { NotAvailable } from '~/client/components/NotAvailable';
 import { SidebarContentsType } from '~/interfaces/ui';
 import { useIsGuestUser } from '~/states/context';
+import { useGrowiAppIdForGrowiCloud, useGrowiCloudUri } from '~/states/global';
 import { aiEnabledAtom } from '~/states/server-configurations';
 import { useSidebarMode } from '~/states/ui/sidebar';
 
@@ -27,14 +30,35 @@ type Props = {
 export const PrimaryItems = memo((props: Props) => {
   const { onItemHover } = props;
 
+  const { t } = useTranslation();
   const { sidebarMode } = useSidebarMode();
   const isAiEnabled = useAtomValue(aiEnabledAtom);
   const isGuestUser = useIsGuestUser();
+  const growiCloudUri = useGrowiCloudUri();
+  const growiAppIdForGrowiCloud = useGrowiAppIdForGrowiCloud();
+  const isCloud = growiCloudUri != null && growiAppIdForGrowiCloud != null;
 
   if (sidebarMode == null) {
     return <></>;
   }
 
+  const aiAssistantNotAvailableTitle = (
+    <>
+      <p className="mb-2">
+        {t('default_ai_assistant.open_cloud_settings_to_enable')}
+      </p>
+      <a href={`${growiCloudUri}/my/apps/${growiAppIdForGrowiCloud}`}>
+        <span
+          className="material-symbols-outlined me-1"
+          style={{ fontSize: '1rem', verticalAlign: 'middle' }}
+        >
+          share
+        </span>
+        {t('default_ai_assistant.to_cloud_settings')}
+      </a>
+    </>
+  );
+
   return (
     <div className={`${styles['grw-primary-items']} mt-1`}>
       <PrimaryItem
@@ -78,7 +102,7 @@ export const PrimaryItems = memo((props: Props) => {
           onHover={onItemHover}
         />
       )}
-      {isAiEnabled && (
+      {isAiEnabled ? (
         <PrimaryItem
           sidebarMode={sidebarMode}
           contents={SidebarContentsType.AI_ASSISTANT}
@@ -87,6 +111,22 @@ export const PrimaryItems = memo((props: Props) => {
           isCustomIcon
           onHover={onItemHover}
         />
+      ) : (
+        isCloud && (
+          <NotAvailable
+            isDisabled
+            title={aiAssistantNotAvailableTitle}
+            placement="right"
+          >
+            <PrimaryItem
+              sidebarMode={sidebarMode}
+              contents={SidebarContentsType.AI_ASSISTANT}
+              label="AI Assistant"
+              iconName="growi_ai"
+              isCustomIcon
+            />
+          </NotAvailable>
+        )
       )}
     </div>
   );

+ 2 - 2
apps/app/src/client/components/StickyStretchableScroller.tsx

@@ -1,5 +1,5 @@
 import type { RefObject } from 'react';
-import React, {
+import {
   type JSX,
   useCallback,
   useEffect,
@@ -73,7 +73,7 @@ export const StickyStretchableScroller = (
     const scrollElement = simplebarRef.current.getScrollElement();
     const newHeight = calcViewHeight(scrollElement);
 
-    logger.debug('Set new height to simplebar', newHeight);
+    logger.debug({ newHeight }, 'Set new height to simplebar');
 
     // set new height
     setSimplebarMaxHeight(newHeight);

+ 4 - 4
apps/app/src/client/services/AdminUsersContainer.js

@@ -25,8 +25,8 @@ export default class AdminUsersContainer extends Container {
 
     this.state = {
       users: [],
-      sort: 'id',
-      sortOrder: 'asc',
+      sort: 'createdAt',
+      sortOrder: 'desc',
       isPasswordResetModalShown: false,
       isUserInviteModalShown: false,
       userForPasswordResetModal: null,
@@ -119,8 +119,8 @@ export default class AdminUsersContainer extends Container {
 
   async resetAllChanges() {
     await this.setState({
-      sort: 'id',
-      sortOrder: 'asc',
+      sort: 'createdAt',
+      sortOrder: 'desc',
       searchText: '',
       selectedStatusList: new Set(['all']),
     });

+ 6 - 0
apps/app/src/client/services/renderer/renderer.tsx

@@ -95,6 +95,7 @@ export const generateViewOptions = (
             presentation.sanitizeOption,
             drawio.sanitizeOption,
             mermaidSanitizeOption,
+            plantuml.sanitizeOption,
             callout.sanitizeOption,
             attachment.sanitizeOption,
             lsxGrowiDirective.sanitizeOption,
@@ -132,6 +133,7 @@ export const generateViewOptions = (
     components.refsimg = refsGrowiDirective.RefsImg;
     components.gallery = refsGrowiDirective.Gallery;
     components.drawio = DrawioViewerWithEditButton;
+    components.plantuml = plantuml.PlantUmlViewer;
     components.table = TableWithEditButton;
     components.mermaid = MermaidViewer;
     components.callout = callout.CalloutViewer;
@@ -220,6 +222,7 @@ export const generateSimpleViewOptions = (
             presentation.sanitizeOption,
             drawio.sanitizeOption,
             mermaidSanitizeOption,
+            plantuml.sanitizeOption,
             callout.sanitizeOption,
             attachment.sanitizeOption,
             lsxGrowiDirective.sanitizeOption,
@@ -250,6 +253,7 @@ export const generateSimpleViewOptions = (
     components.refsimg = refsGrowiDirective.RefsImgImmutable;
     components.gallery = refsGrowiDirective.GalleryImmutable;
     components.drawio = drawio.DrawioViewer;
+    components.plantuml = plantuml.PlantUmlViewer;
     components.mermaid = MermaidViewer;
     components.callout = callout.CalloutViewer;
     components.attachment = RichAttachment;
@@ -321,6 +325,7 @@ export const generatePreviewOptions = (
             getCommonSanitizeOption(config),
             drawio.sanitizeOption,
             mermaidSanitizeOption,
+            plantuml.sanitizeOption,
             callout.sanitizeOption,
             attachment.sanitizeOption,
             lsxGrowiDirective.sanitizeOption,
@@ -352,6 +357,7 @@ export const generatePreviewOptions = (
     components.refsimg = refsGrowiDirective.RefsImgImmutable;
     components.gallery = refsGrowiDirective.GalleryImmutable;
     components.drawio = drawio.DrawioViewer;
+    components.plantuml = plantuml.PlantUmlViewer;
     components.mermaid = MermaidViewer;
     components.callout = callout.CalloutViewer;
     components.attachment = RichAttachment;

+ 201 - 0
apps/app/src/client/util/watch-rendering-and-rescroll.spec.tsx

@@ -0,0 +1,201 @@
+import { GROWI_IS_CONTENT_RENDERING_ATTR } from '@growi/core/dist/consts';
+import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
+
+import { watchRenderingAndReScroll } from './watch-rendering-and-rescroll';
+
+describe('watchRenderingAndReScroll', () => {
+  let container: HTMLDivElement;
+  let scrollToTarget: ReturnType<typeof vi.fn>;
+
+  beforeEach(() => {
+    vi.useFakeTimers();
+    container = document.createElement('div');
+    document.body.appendChild(container);
+    scrollToTarget = vi.fn(() => true);
+  });
+
+  afterEach(() => {
+    vi.useRealTimers();
+    document.body.innerHTML = '';
+  });
+
+  it('should not schedule a timer when no rendering elements exist', () => {
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    vi.advanceTimersByTime(5000);
+    expect(scrollToTarget).not.toHaveBeenCalled();
+
+    cleanup();
+  });
+
+  it('should schedule a scroll after 5s when rendering elements exist', () => {
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    expect(scrollToTarget).not.toHaveBeenCalled();
+
+    vi.advanceTimersByTime(5000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    cleanup();
+  });
+
+  it('should not reset timer on intermediate DOM mutations', async () => {
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    vi.advanceTimersByTime(3000);
+    expect(scrollToTarget).not.toHaveBeenCalled();
+
+    // Trigger a DOM mutation mid-timer
+    const child = document.createElement('span');
+    container.appendChild(child);
+    await vi.advanceTimersByTimeAsync(0);
+
+    // The timer should NOT have been reset — 2 more seconds should fire it
+    vi.advanceTimersByTime(2000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    cleanup();
+  });
+
+  it('should detect rendering elements added after initial check via observer', async () => {
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    vi.advanceTimersByTime(3000);
+    expect(scrollToTarget).not.toHaveBeenCalled();
+
+    // Add a rendering element later (within 10s timeout)
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    // Flush microtasks so MutationObserver callback fires
+    await vi.advanceTimersByTimeAsync(0);
+
+    // Timer should be scheduled — fires after 5s
+    await vi.advanceTimersByTimeAsync(5000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    cleanup();
+  });
+
+  it('should scroll once when multiple rendering elements exist simultaneously', () => {
+    const renderingEl1 = document.createElement('div');
+    renderingEl1.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl1);
+
+    const renderingEl2 = document.createElement('div');
+    renderingEl2.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl2);
+
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    vi.advanceTimersByTime(5000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    cleanup();
+  });
+
+  it('should stop watching after 10s timeout', () => {
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    // First scroll at 5s
+    vi.advanceTimersByTime(5000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    // At 10s both the scroll timer and the watch timeout fire.
+    vi.advanceTimersByTime(5000);
+    const callsAfter10s = scrollToTarget.mock.calls.length;
+
+    // After 10s, no further scrolls should occur regardless
+    vi.advanceTimersByTime(10000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(callsAfter10s);
+
+    cleanup();
+  });
+
+  it('should clean up timer and observer on cleanup call', () => {
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    cleanup();
+
+    vi.advanceTimersByTime(10000);
+    expect(scrollToTarget).not.toHaveBeenCalled();
+  });
+
+  it('should prevent timer callbacks from executing after cleanup (stopped flag)', () => {
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    // Advance partway, then cleanup
+    vi.advanceTimersByTime(3000);
+    cleanup();
+
+    // Timer would have fired at 5s, but cleanup was called
+    vi.advanceTimersByTime(5000);
+    expect(scrollToTarget).not.toHaveBeenCalled();
+  });
+
+  it('should not schedule further re-scrolls after rendering elements complete', async () => {
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    // First timer fires at 5s — re-scroll executes
+    vi.advanceTimersByTime(5000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    // Rendering completes — attribute toggled to false
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'false');
+    await vi.advanceTimersByTimeAsync(0);
+
+    // No further re-scrolls should be scheduled
+    vi.advanceTimersByTime(10000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    cleanup();
+  });
+
+  it('should scroll exactly once when rendering completes before the first timer fires', () => {
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const cleanup = watchRenderingAndReScroll(container, scrollToTarget);
+
+    // Rendering completes before the first poll timer fires
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'false');
+
+    // Poll timer fires at 5s — detects no rendering elements.
+    // wasRendering is reset in the timer callback BEFORE scrollToTarget so that
+    // the subsequent checkAndSchedule call does not trigger a redundant extra scroll.
+    vi.advanceTimersByTime(5000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    // No further scrolls after rendering is confirmed done
+    vi.advanceTimersByTime(5000);
+    expect(scrollToTarget).toHaveBeenCalledTimes(1);
+
+    cleanup();
+  });
+});

+ 84 - 0
apps/app/src/client/util/watch-rendering-and-rescroll.ts

@@ -0,0 +1,84 @@
+import {
+  GROWI_IS_CONTENT_RENDERING_ATTR,
+  GROWI_IS_CONTENT_RENDERING_SELECTOR,
+} from '@growi/core/dist/consts';
+
+const RENDERING_POLL_INTERVAL_MS = 5000;
+export const WATCH_TIMEOUT_MS = 10000;
+
+/**
+ * Watch for elements with in-progress rendering status in the container.
+ * Periodically calls scrollToTarget while rendering elements remain.
+ * Returns a cleanup function that stops observation and clears timers.
+ */
+export const watchRenderingAndReScroll = (
+  contentContainer: HTMLElement,
+  scrollToTarget: () => boolean,
+): (() => void) => {
+  let timerId: number | undefined;
+  let stopped = false;
+  let wasRendering = false;
+
+  const cleanup = () => {
+    stopped = true;
+    observer.disconnect();
+    if (timerId != null) {
+      window.clearTimeout(timerId);
+      timerId = undefined;
+    }
+    window.clearTimeout(watchTimeoutId);
+  };
+
+  const checkAndSchedule = () => {
+    if (stopped) return;
+
+    const hasRendering =
+      contentContainer.querySelector(GROWI_IS_CONTENT_RENDERING_SELECTOR) !=
+      null;
+
+    if (!hasRendering) {
+      if (timerId != null) {
+        window.clearTimeout(timerId);
+        timerId = undefined;
+      }
+      // Final re-scroll to compensate for the layout shift from the last completed render
+      if (wasRendering) {
+        wasRendering = false;
+        scrollToTarget();
+      }
+      return;
+    }
+
+    wasRendering = true;
+
+    // If a timer is already ticking, let it fire — don't reset
+    if (timerId != null) return;
+
+    timerId = window.setTimeout(() => {
+      if (stopped) return;
+      timerId = undefined;
+      // Reset before checkAndSchedule so the wasRendering guard does not
+      // trigger an extra re-scroll if rendering is already done by now.
+      wasRendering = false;
+      scrollToTarget();
+      checkAndSchedule();
+    }, RENDERING_POLL_INTERVAL_MS);
+  };
+
+  const observer = new MutationObserver(checkAndSchedule);
+
+  observer.observe(contentContainer, {
+    childList: true,
+    subtree: true,
+    attributes: true,
+    attributeFilter: [GROWI_IS_CONTENT_RENDERING_ATTR],
+  });
+
+  // Initial check
+  checkAndSchedule();
+
+  // Stop watching after timeout regardless of rendering state
+  const watchTimeoutId = window.setTimeout(cleanup, WATCH_TIMEOUT_MS);
+
+  return cleanup;
+};

+ 4 - 45
apps/app/src/components/PageView/PageView.tsx

@@ -1,12 +1,4 @@
-import {
-  type JSX,
-  memo,
-  useCallback,
-  useEffect,
-  useId,
-  useMemo,
-  useRef,
-} from 'react';
+import { type JSX, memo, useCallback, useId, useMemo, useRef } from 'react';
 import dynamic from 'next/dynamic';
 import { isDeepEquals } from '@growi/core/dist/utils/is-deep-equals';
 import { isUsersHomepage } from '@growi/core/dist/utils/page-path-utils';
@@ -29,6 +21,7 @@ import { UserInfo } from '../User/UserInfo';
 import { PageAlerts } from './PageAlerts/PageAlerts';
 import { PageContentFooter } from './PageContentFooter';
 import { PageViewLayout } from './PageViewLayout';
+import { useHashAutoScroll } from './use-hash-auto-scroll';
 
 // biome-ignore-start lint/style/noRestrictedImports: no-problem dynamic import
 const NotCreatablePage = dynamic(
@@ -129,42 +122,8 @@ const PageViewComponent = (props: Props): JSX.Element => {
     rendererConfig.isEnabledMarp,
   );
 
-  // ***************************  Auto Scroll  ***************************
-  useEffect(() => {
-    if (currentPageId == null) {
-      return;
-    }
-
-    // do nothing if hash is empty
-    const { hash } = window.location;
-    if (hash.length === 0) {
-      return;
-    }
-
-    const contentContainer = document.getElementById(contentContainerId);
-    if (contentContainer == null) return;
-
-    const targetId = decodeURIComponent(hash.slice(1));
-    const target = document.getElementById(targetId);
-    if (target != null) {
-      target.scrollIntoView();
-      return;
-    }
-
-    const observer = new MutationObserver(() => {
-      const target = document.getElementById(targetId);
-      if (target != null) {
-        target.scrollIntoView();
-        observer.disconnect();
-      }
-    });
-
-    observer.observe(contentContainer, { childList: true, subtree: true });
-
-    return () => observer.disconnect();
-  }, [currentPageId, contentContainerId]);
-
-  // *******************************  end  *******************************
+  // Auto-scroll to URL hash target, handling lazy-rendered content
+  useHashAutoScroll({ key: currentPageId, contentContainerId });
 
   const specialContents = useMemo(() => {
     if (isIdenticalPathPage) {

+ 329 - 0
apps/app/src/components/PageView/use-hash-auto-scroll.spec.tsx

@@ -0,0 +1,329 @@
+import { GROWI_IS_CONTENT_RENDERING_ATTR } from '@growi/core/dist/consts';
+import { renderHook } from '@testing-library/react';
+import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
+
+import { useHashAutoScroll } from './use-hash-auto-scroll';
+
+describe('useHashAutoScroll', () => {
+  const containerId = 'test-content-container';
+  let container: HTMLDivElement;
+
+  beforeEach(() => {
+    vi.useFakeTimers();
+    container = document.createElement('div');
+    container.id = containerId;
+    document.body.appendChild(container);
+  });
+
+  afterEach(() => {
+    vi.useRealTimers();
+    document.body.innerHTML = '';
+    window.location.hash = '';
+  });
+
+  it('should not scroll when key is null', () => {
+    window.location.hash = '#heading';
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    renderHook(() =>
+      useHashAutoScroll({ key: null, contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).not.toHaveBeenCalled();
+  });
+
+  it('should not scroll when key is undefined', () => {
+    window.location.hash = '#heading';
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    renderHook(() =>
+      useHashAutoScroll({ key: undefined, contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).not.toHaveBeenCalled();
+  });
+
+  it('should not scroll when hash is empty', () => {
+    window.location.hash = '';
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).not.toHaveBeenCalled();
+  });
+
+  it('should not scroll when container is not found', () => {
+    window.location.hash = '#heading';
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    renderHook(() =>
+      useHashAutoScroll({
+        key: 'page-id',
+        contentContainerId: 'nonexistent-id',
+      }),
+    );
+
+    expect(target.scrollIntoView).not.toHaveBeenCalled();
+  });
+
+  it('should scroll to target when it already exists in DOM', () => {
+    window.location.hash = '#heading';
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    unmount();
+  });
+
+  it('should decode encoded hash values before target resolution', () => {
+    // Japanese characters encoded
+    window.location.hash = '#%E6%97%A5%E6%9C%AC%E8%AA%9E';
+    const target = document.createElement('div');
+    target.id = '日本語';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    unmount();
+  });
+
+  it('should use custom resolveTarget when provided', () => {
+    window.location.hash = '#heading';
+    const target = document.createElement('div');
+    target.scrollIntoView = vi.fn();
+    const resolveTarget = vi.fn(() => target);
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({
+        key: 'page-id',
+        contentContainerId: containerId,
+        resolveTarget,
+      }),
+    );
+
+    expect(resolveTarget).toHaveBeenCalledWith('heading');
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    unmount();
+  });
+
+  it('should use custom scrollTo when provided', () => {
+    window.location.hash = '#heading';
+    const target = document.createElement('div');
+    target.id = 'heading';
+    container.appendChild(target);
+
+    const customScrollTo = vi.fn();
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({
+        key: 'page-id',
+        contentContainerId: containerId,
+        scrollTo: customScrollTo,
+      }),
+    );
+
+    expect(customScrollTo).toHaveBeenCalledWith(target);
+
+    unmount();
+  });
+
+  it('should start rendering watch after scrolling to target', () => {
+    window.location.hash = '#heading';
+
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    // Re-scroll after 5s due to rendering watch
+    vi.advanceTimersByTime(5000);
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(2);
+
+    unmount();
+  });
+
+  // Poll interval is 5s, so this test needs more than 5s — extend timeout to 10s.
+  // happy-dom's MutationObserver does not fire reliably with fake timers when a
+  // setTimeout is pending in the same scope. Use real timers for this test only.
+  it('should re-scroll when rendering elements appear after initial scroll (late-mounting async renderers)', async () => {
+    vi.useRealTimers();
+
+    window.location.hash = '#heading';
+
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    // No rendering elements at scroll time
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    // Async renderer mounts after the initial scroll (simulates Mermaid/PlantUML loading)
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    // Wait for MO to fire and the 5s poll timer to elapse
+    await new Promise<void>((resolve) => setTimeout(resolve, 5100));
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(2);
+
+    unmount();
+  }, 10000);
+
+  it('should not re-scroll when no rendering elements exist after initial scroll', () => {
+    window.location.hash = '#heading';
+
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    // No re-scroll since no rendering elements are present
+    vi.advanceTimersByTime(5000);
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    unmount();
+  });
+
+  it('should wait for target via MutationObserver when not yet in DOM', async () => {
+    // happy-dom's MutationObserver does not fire when a fake-timer setTimeout is
+    // pending in the same effect. Use real timers for this test only.
+    vi.useRealTimers();
+
+    window.location.hash = '#deferred';
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    const target = document.createElement('div');
+    target.id = 'deferred';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    await new Promise<void>((resolve) => setTimeout(resolve, 50));
+
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    unmount();
+  });
+
+  it('should stop target observer after 10s timeout when target never appears', async () => {
+    window.location.hash = '#never-appears';
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    // Advance past the timeout
+    vi.advanceTimersByTime(11000);
+
+    // Target appears after timeout — should NOT trigger scroll
+    const target = document.createElement('div');
+    target.id = 'never-appears';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    await vi.advanceTimersByTimeAsync(0);
+
+    expect(target.scrollIntoView).not.toHaveBeenCalled();
+
+    unmount();
+  });
+
+  it('should clean up all observers and timers on unmount', () => {
+    window.location.hash = '#heading';
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    const renderingEl = document.createElement('div');
+    renderingEl.setAttribute(GROWI_IS_CONTENT_RENDERING_ATTR, 'true');
+    container.appendChild(renderingEl);
+
+    const { unmount } = renderHook(() =>
+      useHashAutoScroll({ key: 'page-id', contentContainerId: containerId }),
+    );
+
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    unmount();
+
+    // No further scrolls after unmount
+    vi.advanceTimersByTime(20000);
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+  });
+
+  it('should re-run effect when key changes', () => {
+    window.location.hash = '#heading';
+    const target = document.createElement('div');
+    target.id = 'heading';
+    target.scrollIntoView = vi.fn();
+    container.appendChild(target);
+
+    const { rerender, unmount } = renderHook(
+      ({ key }) => useHashAutoScroll({ key, contentContainerId: containerId }),
+      { initialProps: { key: 'page-1' as string | null } },
+    );
+
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(1);
+
+    // Change key — effect re-runs
+    rerender({ key: 'page-2' });
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(2);
+
+    // Set to null — no additional scroll
+    rerender({ key: null });
+    expect(target.scrollIntoView).toHaveBeenCalledTimes(2);
+
+    unmount();
+  });
+});

+ 106 - 0
apps/app/src/components/PageView/use-hash-auto-scroll.ts

@@ -0,0 +1,106 @@
+import { useEffect, useRef } from 'react';
+
+import {
+  WATCH_TIMEOUT_MS,
+  watchRenderingAndReScroll,
+  // biome-ignore lint/style/noRestrictedImports: client-only hook used in client-only component
+} from '~/client/util/watch-rendering-and-rescroll';
+
+/** Configuration for the hash-based auto-scroll hook */
+export interface UseHashAutoScrollOptions {
+  /**
+   * Unique key that triggers re-execution when changed.
+   * When null/undefined, all scroll processing is skipped.
+   */
+  key: string | undefined | null;
+
+  /** DOM id of the content container element to observe */
+  contentContainerId: string;
+
+  /**
+   * Optional function to resolve the scroll target element.
+   * Receives the decoded hash string (without '#').
+   * Defaults to: (hash) => document.getElementById(hash)
+   */
+  resolveTarget?: (decodedHash: string) => HTMLElement | null;
+
+  /**
+   * Optional function to scroll to the target element.
+   * Defaults to: (el) => el.scrollIntoView()
+   */
+  scrollTo?: (target: HTMLElement) => void;
+}
+
+/**
+ * Auto-scroll to the URL hash target when a content view loads.
+ * Handles lazy-rendered content by polling for rendering-status
+ * attributes and re-scrolling after they finish.
+ */
+export const useHashAutoScroll = (options: UseHashAutoScrollOptions): void => {
+  const { key, contentContainerId } = options;
+  const resolveTargetRef = useRef(options.resolveTarget);
+  resolveTargetRef.current = options.resolveTarget;
+  const scrollToRef = useRef(options.scrollTo);
+  scrollToRef.current = options.scrollTo;
+
+  useEffect(() => {
+    if (key == null) return;
+
+    const { hash } = window.location;
+    if (hash.length === 0) return;
+
+    const contentContainer = document.getElementById(contentContainerId);
+    if (contentContainer == null) return;
+
+    const targetId = decodeURIComponent(hash.slice(1));
+
+    const scrollToTarget = (): boolean => {
+      const resolve =
+        resolveTargetRef.current ??
+        ((id: string) => document.getElementById(id));
+      const target = resolve(targetId);
+      if (target == null) return false;
+      const scroll =
+        scrollToRef.current ?? ((el: HTMLElement) => el.scrollIntoView());
+      scroll(target);
+      return true;
+    };
+
+    const startRenderingWatch = (): (() => void) => {
+      // Always start regardless of current rendering elements — async renderers
+      // (Mermaid via dynamic import, PlantUML images) may mount after the initial scroll.
+      return watchRenderingAndReScroll(contentContainer, scrollToTarget);
+    };
+
+    // Target already in DOM — scroll and optionally watch rendering
+    if (scrollToTarget()) {
+      const renderingCleanup = startRenderingWatch();
+      return () => {
+        renderingCleanup?.();
+      };
+    }
+
+    // Target not in DOM yet — wait for it, then optionally watch rendering
+    let renderingCleanup: (() => void) | undefined;
+
+    const observer = new MutationObserver(() => {
+      if (scrollToTarget()) {
+        observer.disconnect();
+        window.clearTimeout(timeoutId);
+        renderingCleanup = startRenderingWatch();
+      }
+    });
+
+    observer.observe(contentContainer, { childList: true, subtree: true });
+    const timeoutId = window.setTimeout(
+      () => observer.disconnect(),
+      WATCH_TIMEOUT_MS,
+    );
+
+    return () => {
+      observer.disconnect();
+      window.clearTimeout(timeoutId);
+      renderingCleanup?.();
+    };
+  }, [key, contentContainerId]);
+};

+ 7 - 3
apps/app/src/features/admin/states/socket-io.ts

@@ -27,12 +27,16 @@ export const useSetupAdminSocket = (): void => {
       .then(({ default: io }) => {
         if (cancelled) return;
         const newSocket = io('/admin', { transports: ['websocket'] });
-        newSocket.on('connect_error', (error) => logger.error('/admin', error));
-        newSocket.on('error', (error) => logger.error('/admin', error));
+        newSocket.on('connect_error', (error) =>
+          logger.error({ err: error }, '/admin'),
+        );
+        newSocket.on('error', (error) =>
+          logger.error({ err: error }, '/admin'),
+        );
         setSocket(newSocket);
       })
       .catch((error) =>
-        logger.error('Failed to initialize admin WebSocket:', error),
+        logger.error({ err: error }, 'Failed to initialize admin WebSocket'),
       );
 
     return () => {

+ 10 - 0
apps/app/src/features/ai-tools/server/routes/apiv3/index.ts

@@ -0,0 +1,10 @@
+import express from 'express';
+
+import { suggestPathHandlersFactory } from '~/features/ai-tools/suggest-path/server/routes/apiv3';
+import type Crowi from '~/server/crowi';
+
+export const factory = (crowi: Crowi): express.Router => {
+  const router = express.Router();
+  router.post('/suggest-path', suggestPathHandlersFactory(crowi));
+  return router;
+};

+ 62 - 0
apps/app/src/features/ai-tools/suggest-path/interfaces/suggest-path-types.ts

@@ -0,0 +1,62 @@
+import type { IUserHasId } from '@growi/core/dist/interfaces';
+
+import type { ObjectIdLike } from '~/server/interfaces/mongoose-utils';
+
+export const SuggestionType = {
+  MEMO: 'memo',
+  SEARCH: 'search',
+  CATEGORY: 'category',
+} as const;
+
+export type SuggestionType =
+  (typeof SuggestionType)[keyof typeof SuggestionType];
+
+export type PathSuggestion = {
+  type: SuggestionType;
+  path: string;
+  label: string;
+  description: string;
+  grant: number;
+  informationType?: InformationType;
+};
+
+export type InformationType = 'flow' | 'stock';
+
+export type ContentAnalysis = {
+  keywords: string[];
+  informationType: InformationType;
+};
+
+export type SearchCandidate = {
+  pagePath: string;
+  snippet: string;
+  score: number;
+};
+
+export type EvaluatedSuggestion = {
+  path: string;
+  label: string;
+  description: string;
+};
+
+export type SuggestPathResponse = {
+  suggestions: PathSuggestion[];
+};
+
+export type SearchResultItem = {
+  _score: number;
+  _source: {
+    path: string;
+  };
+  _highlight?: Record<string, string[]>;
+};
+
+export type SearchService = {
+  searchKeyword(
+    keyword: string,
+    nqName: string | null,
+    user: IUserHasId,
+    userGroups: ObjectIdLike[],
+    opts: Record<string, unknown>,
+  ): Promise<[{ data: SearchResultItem[] }, unknown]>;
+};

+ 909 - 0
apps/app/src/features/ai-tools/suggest-path/server/integration-tests/suggest-path-integration.spec.ts

@@ -0,0 +1,909 @@
+import type { NextFunction, Request, Response } from 'express';
+import express from 'express';
+import request from 'supertest';
+
+import type { ContentAnalysis } from '~/features/ai-tools/suggest-path/interfaces/suggest-path-types';
+import type Crowi from '~/server/crowi';
+import type { ApiV3Response } from '~/server/routes/apiv3/interfaces/apiv3-response';
+
+// Mutable test state — controls mock behavior per test
+const testState = vi.hoisted(() => ({
+  authenticateUser: true,
+  aiEnabled: true,
+  openaiServiceType: 'openai' as string | null,
+  disableUserPages: false,
+  // Phase 2 - content analysis
+  contentAnalysis: null as {
+    keywords: string[];
+    informationType: 'flow' | 'stock';
+  } | null,
+  contentAnalysisError: null as Error | null,
+  // Phase 2 - search candidates
+  searchCandidates: [] as Array<{
+    pagePath: string;
+    snippet: string;
+    score: number;
+  }>,
+  searchCandidatesError: null as Error | null,
+  // Phase 2 - candidate evaluation
+  evaluatedSuggestions: [] as Array<{
+    path: string;
+    label: string;
+    description: string;
+  }>,
+  evaluateCandidatesError: null as Error | null,
+  // Phase 2 - category
+  categorySuggestion: null as {
+    type: string;
+    path: string;
+    label: string;
+    description: string;
+    grant: number;
+  } | null,
+  categorySuggestionError: null as Error | null,
+  // Phase 2 - grant
+  parentGrant: 1,
+}));
+
+const mockUser = {
+  _id: 'user123',
+  username: 'alice',
+  status: 2, // STATUS_ACTIVE
+};
+
+// Mock access token parser — always passthrough
+vi.mock('~/server/middlewares/access-token-parser', () => ({
+  accessTokenParser:
+    () => (_req: Request, _res: Response, next: NextFunction) =>
+      next(),
+}));
+
+// Mock login required — conditional authentication based on testState
+vi.mock('~/server/middlewares/login-required', () => ({
+  default: () => (req: Request, res: Response, next: NextFunction) => {
+    if (!testState.authenticateUser) {
+      return res.sendStatus(403);
+    }
+    Object.assign(req, { user: mockUser });
+    next();
+  },
+}));
+
+// Mock config manager — certifyAiService and generateMemoSuggestion read from this
+vi.mock('~/server/service/config-manager', () => ({
+  configManager: {
+    getConfig: (key: string) => {
+      switch (key) {
+        case 'app:aiEnabled':
+          return testState.aiEnabled;
+        case 'openai:serviceType':
+          return testState.openaiServiceType;
+        case 'security:disableUserPages':
+          return testState.disableUserPages;
+        default:
+          return undefined;
+      }
+    },
+  },
+}));
+
+// Mock user group relations — needed for user group resolution in handler
+vi.mock('~/server/models/user-group-relation', () => ({
+  default: {
+    findAllUserGroupIdsRelatedToUser: vi.fn().mockResolvedValue([]),
+  },
+}));
+
+vi.mock(
+  '~/features/external-user-group/server/models/external-user-group-relation',
+  () => ({
+    default: {
+      findAllUserGroupIdsRelatedToUser: vi.fn().mockResolvedValue([]),
+    },
+  }),
+);
+
+// Mock analyzeContent — configurable per test via testState
+vi.mock('../services/analyze-content', () => ({
+  analyzeContent: vi.fn().mockImplementation(() => {
+    if (testState.contentAnalysisError != null) {
+      return Promise.reject(testState.contentAnalysisError);
+    }
+    if (testState.contentAnalysis == null) {
+      return Promise.resolve({ keywords: [], informationType: 'stock' });
+    }
+    return Promise.resolve(testState.contentAnalysis);
+  }),
+}));
+
+// Mock retrieveSearchCandidates — configurable per test via testState
+vi.mock('../services/retrieve-search-candidates', () => ({
+  retrieveSearchCandidates: vi.fn().mockImplementation(() => {
+    if (testState.searchCandidatesError != null) {
+      return Promise.reject(testState.searchCandidatesError);
+    }
+    return Promise.resolve(testState.searchCandidates);
+  }),
+}));
+
+// Mock evaluateCandidates — configurable per test via testState
+vi.mock('../services/evaluate-candidates', () => ({
+  evaluateCandidates: vi.fn().mockImplementation(() => {
+    if (testState.evaluateCandidatesError != null) {
+      return Promise.reject(testState.evaluateCandidatesError);
+    }
+    return Promise.resolve(testState.evaluatedSuggestions);
+  }),
+}));
+
+// Mock generateCategorySuggestion — configurable per test via testState
+vi.mock('../services/generate-category-suggestion', () => ({
+  generateCategorySuggestion: vi.fn().mockImplementation(() => {
+    if (testState.categorySuggestionError != null) {
+      return Promise.reject(testState.categorySuggestionError);
+    }
+    return Promise.resolve(testState.categorySuggestion);
+  }),
+}));
+
+// Mock resolveParentGrant — returns configurable grant value via testState
+vi.mock('../services/resolve-parent-grant', () => ({
+  resolveParentGrant: vi.fn().mockImplementation(() => {
+    return Promise.resolve(testState.parentGrant);
+  }),
+}));
+
+describe('POST /suggest-path integration', () => {
+  let app: express.Application;
+
+  beforeEach(async () => {
+    // Reset test state to defaults
+    testState.authenticateUser = true;
+    testState.aiEnabled = true;
+    testState.openaiServiceType = 'openai';
+    testState.disableUserPages = false;
+    testState.contentAnalysis = null;
+    testState.contentAnalysisError = null;
+    testState.searchCandidates = [];
+    testState.searchCandidatesError = null;
+    testState.evaluatedSuggestions = [];
+    testState.evaluateCandidatesError = null;
+    testState.categorySuggestion = null;
+    testState.categorySuggestionError = null;
+    testState.parentGrant = 1;
+
+    // Setup express app with ApiV3Response methods
+    app = express();
+    app.use(express.json());
+    app.use((_req: Request, res: Response, next: NextFunction) => {
+      const apiRes = res as ApiV3Response;
+      apiRes.apiv3 = function (obj = {}, status = 200) {
+        this.status(status).json(obj);
+      };
+      apiRes.apiv3Err = function (_err, status = 400) {
+        const errors = Array.isArray(_err) ? _err : [_err];
+        this.status(status).json({ errors });
+      };
+      next();
+    });
+
+    // Import and mount the handler factory with real middleware chain
+    const { suggestPathHandlersFactory } = await import('../routes/apiv3');
+    const mockCrowi = {
+      searchService: { searchKeyword: vi.fn() },
+    } as unknown as Crowi;
+    app.post('/suggest-path', suggestPathHandlersFactory(mockCrowi));
+  });
+
+  describe('Phase 1 — memo-only', () => {
+    describe('valid request with authentication', () => {
+      it('should return 200 with suggestions array containing one memo suggestion', async () => {
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Some page content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions).toBeDefined();
+        expect(Array.isArray(response.body.suggestions)).toBe(true);
+        expect(response.body.suggestions).toHaveLength(1);
+      });
+
+      it('should return memo suggestion with all required fields and correct values', async () => {
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Some page content' })
+          .expect(200);
+
+        const suggestion = response.body.suggestions[0];
+        expect(suggestion).toEqual({
+          type: 'memo',
+          path: '/user/alice/memo/',
+          label: 'Save as memo',
+          description: 'Save to your personal memo area',
+          grant: 4,
+        });
+      });
+
+      it('should return path with trailing slash', async () => {
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Some page content' })
+          .expect(200);
+
+        expect(response.body.suggestions[0].path).toMatch(/\/$/);
+      });
+
+      it('should return grant value of 4 (GRANT_OWNER)', async () => {
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Some page content' })
+          .expect(200);
+
+        expect(response.body.suggestions[0].grant).toBe(4);
+      });
+    });
+
+    describe('authentication enforcement', () => {
+      it('should return 403 when user is not authenticated', async () => {
+        testState.authenticateUser = false;
+
+        await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Some page content' })
+          .expect(403);
+      });
+    });
+
+    describe('input validation', () => {
+      it('should return 400 when body field is missing', async () => {
+        await request(app).post('/suggest-path').send({}).expect(400);
+      });
+
+      it('should return 400 when body field is empty string', async () => {
+        await request(app).post('/suggest-path').send({ body: '' }).expect(400);
+      });
+
+      it('should return 400 when body exceeds maximum length', async () => {
+        const oversizedBody = 'x'.repeat(100_001);
+        await request(app)
+          .post('/suggest-path')
+          .send({ body: oversizedBody })
+          .expect(400);
+      });
+
+      it('should accept body at the maximum length boundary', async () => {
+        const maxBody = 'x'.repeat(100_000);
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: maxBody });
+        // Should not be rejected by validation (may be 200 or other non-400 status)
+        expect(response.status).not.toBe(400);
+      });
+    });
+
+    describe('AI service gating', () => {
+      it('should return 403 when AI is not enabled', async () => {
+        testState.aiEnabled = false;
+
+        await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Some page content' })
+          .expect(403);
+      });
+
+      it('should return 403 when openai service type is not configured', async () => {
+        testState.openaiServiceType = null;
+
+        await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Some page content' })
+          .expect(403);
+      });
+    });
+  });
+
+  describe('Phase 2 — revised pipeline verification', () => {
+    // Common fixture data
+    const stockAnalysis = {
+      keywords: ['React', 'hooks'],
+      informationType: 'stock' as const,
+    };
+
+    const flowAnalysis = {
+      keywords: ['meeting', 'standup'],
+      informationType: 'flow' as const,
+    };
+
+    const searchCandidates = [
+      {
+        pagePath: '/tech-notes/React/hooks-guide',
+        snippet: 'React hooks overview',
+        score: 10,
+      },
+      {
+        pagePath: '/tech-notes/React/state-management',
+        snippet: 'State management',
+        score: 8,
+      },
+    ];
+
+    const singleEvaluated = [
+      {
+        path: '/tech-notes/React/',
+        label: 'Save near related pages',
+        description:
+          'This area contains React documentation. Your stock content fits well here.',
+      },
+    ];
+
+    const categorySuggestionFixture = {
+      type: 'category',
+      path: '/tech-notes/',
+      label: 'Save under category',
+      description: 'Top-level category: tech-notes',
+      grant: 1,
+    };
+
+    // Helper: set up full pipeline success with optional overrides
+    const setupFullPipeline = (overrides?: {
+      analysis?: ContentAnalysis;
+      candidates?: typeof searchCandidates;
+      evaluated?: typeof singleEvaluated;
+      category?: typeof categorySuggestionFixture | null;
+    }) => {
+      testState.contentAnalysis = overrides?.analysis ?? stockAnalysis;
+      testState.searchCandidates = overrides?.candidates ?? searchCandidates;
+      testState.evaluatedSuggestions = overrides?.evaluated ?? singleEvaluated;
+      testState.categorySuggestion =
+        overrides?.category !== undefined
+          ? overrides.category
+          : categorySuggestionFixture;
+    };
+
+    describe('complete revised flow end-to-end', () => {
+      it('should return memo, search, and category suggestions when all succeed', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks and state management' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(3);
+        expect(response.body.suggestions[0].type).toBe('memo');
+        expect(response.body.suggestions[1].type).toBe('search');
+        expect(response.body.suggestions[2].type).toBe('category');
+      });
+
+      it('should return correct memo suggestion alongside Phase 2 suggestions', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions[0]).toEqual({
+          type: 'memo',
+          path: '/user/alice/memo/',
+          label: 'Save as memo',
+          description: 'Save to your personal memo area',
+          grant: 4,
+        });
+      });
+
+      it('should return search suggestion with AI-evaluated path and description', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const searchSuggestion = response.body.suggestions[1];
+        expect(searchSuggestion.type).toBe('search');
+        expect(searchSuggestion.path).toBe('/tech-notes/React/');
+        expect(searchSuggestion.label).toBe('Save near related pages');
+        expect(searchSuggestion.description).toBe(
+          'This area contains React documentation. Your stock content fits well here.',
+        );
+        expect(searchSuggestion.grant).toBe(1);
+        expect(searchSuggestion.informationType).toBe('stock');
+      });
+
+      it('should return category suggestion with correct structure', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions[2]).toEqual(categorySuggestionFixture);
+      });
+
+      it('should return multiple search suggestions for multi-candidate evaluation', async () => {
+        const multiEvaluated = [
+          {
+            path: '/tech-notes/React/',
+            label: 'Save near related pages',
+            description:
+              'React documentation area with existing hooks content.',
+          },
+          {
+            path: '/tech-notes/React/performance/',
+            label: 'New section for performance',
+            description: 'New sibling alongside existing React pages.',
+          },
+        ];
+        setupFullPipeline({ evaluated: multiEvaluated });
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React performance' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(4); // memo + 2 search + category
+        const searchSuggestions = response.body.suggestions.filter(
+          (s: { type: string }) => s.type === 'search',
+        );
+        expect(searchSuggestions).toHaveLength(2);
+        expect(searchSuggestions[0].path).toBe('/tech-notes/React/');
+        expect(searchSuggestions[1].path).toBe(
+          '/tech-notes/React/performance/',
+        );
+      });
+
+      it('should omit search suggestions when evaluator finds no suitable candidates', async () => {
+        setupFullPipeline({ evaluated: [] });
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(2); // memo + category
+        expect(response.body.suggestions[0].type).toBe('memo');
+        expect(response.body.suggestions[1].type).toBe('category');
+      });
+    });
+
+    describe('informationType verification', () => {
+      it('should include informationType in search-based suggestions', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const searchSuggestion = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'search',
+        );
+        expect(searchSuggestion.informationType).toBe('stock');
+      });
+
+      it('should not include informationType in memo suggestion', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const memoSuggestion = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'memo',
+        );
+        expect(memoSuggestion).not.toHaveProperty('informationType');
+      });
+
+      it('should not include informationType in category suggestion', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const categorySuggestion = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'category',
+        );
+        expect(categorySuggestion).not.toHaveProperty('informationType');
+      });
+
+      it('should map flow informationType when content is classified as flow', async () => {
+        setupFullPipeline({ analysis: flowAnalysis });
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Meeting notes from standup' })
+          .expect(200);
+
+        const searchSuggestion = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'search',
+        );
+        expect(searchSuggestion.informationType).toBe('flow');
+      });
+    });
+
+    describe('path proposal patterns', () => {
+      it('should support parent directory pattern', async () => {
+        const parentPattern = [
+          {
+            path: '/tech-notes/React/',
+            label: 'Parent directory',
+            description:
+              'Save in the parent directory of matching React pages.',
+          },
+        ];
+        setupFullPipeline({ evaluated: parentPattern, category: null });
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'React hooks content' })
+          .expect(200);
+
+        const searchSuggestion = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'search',
+        );
+        expect(searchSuggestion.path).toBe('/tech-notes/React/');
+        expect(searchSuggestion.path).toMatch(/\/$/);
+      });
+
+      it('should support subdirectory pattern', async () => {
+        const subdirPattern = [
+          {
+            path: '/tech-notes/React/hooks-guide/advanced/',
+            label: 'Subdirectory of matching page',
+            description: 'Save under the hooks guide as a sub-topic.',
+          },
+        ];
+        setupFullPipeline({ evaluated: subdirPattern, category: null });
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Advanced React hooks patterns' })
+          .expect(200);
+
+        const searchSuggestion = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'search',
+        );
+        expect(searchSuggestion.path).toBe(
+          '/tech-notes/React/hooks-guide/advanced/',
+        );
+      });
+
+      it('should support sibling pattern with new path at correct hierarchy level', async () => {
+        // Matching candidate was at /tech-notes/React/hooks-guide (depth 3)
+        // Sibling should also be at depth 3: /tech-notes/React/performance/
+        const siblingPattern = [
+          {
+            path: '/tech-notes/React/performance/',
+            label: 'New section for performance',
+            description:
+              'A new sibling section alongside existing React documentation.',
+          },
+        ];
+        setupFullPipeline({ evaluated: siblingPattern, category: null });
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'React performance optimization tips' })
+          .expect(200);
+
+        const searchSuggestion = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'search',
+        );
+        expect(searchSuggestion.path).toBe('/tech-notes/React/performance/');
+        // Verify hierarchy level: path has 3 segments (same depth as hooks-guide)
+        const segments = searchSuggestion.path.split('/').filter(Boolean);
+        expect(segments).toHaveLength(3);
+      });
+
+      it('should return all three patterns when evaluator produces them', async () => {
+        const allPatterns = [
+          {
+            path: '/tech-notes/React/',
+            label: 'Parent directory',
+            description: 'Parent directory of matching pages.',
+          },
+          {
+            path: '/tech-notes/React/hooks-guide/advanced/',
+            label: 'Subdirectory',
+            description: 'Under the hooks guide.',
+          },
+          {
+            path: '/tech-notes/React/performance/',
+            label: 'Sibling section',
+            description: 'New sibling alongside existing pages.',
+          },
+        ];
+        setupFullPipeline({ evaluated: allPatterns, category: null });
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React' })
+          .expect(200);
+
+        const searchSuggestions = response.body.suggestions.filter(
+          (s: { type: string }) => s.type === 'search',
+        );
+        expect(searchSuggestions).toHaveLength(3);
+        expect(searchSuggestions[0].path).toBe('/tech-notes/React/');
+        expect(searchSuggestions[1].path).toBe(
+          '/tech-notes/React/hooks-guide/advanced/',
+        );
+        expect(searchSuggestions[2].path).toBe(
+          '/tech-notes/React/performance/',
+        );
+        // All paths end with trailing slash
+        for (const s of searchSuggestions) {
+          expect(s.path).toMatch(/\/$/);
+        }
+      });
+    });
+
+    describe('graceful degradation', () => {
+      it('should return memo-only when content analysis fails', async () => {
+        testState.contentAnalysisError = new Error('AI service unavailable');
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(1);
+        expect(response.body.suggestions[0].type).toBe('memo');
+      });
+
+      it('should return memo-only when content analysis returns empty keywords', async () => {
+        // testState.contentAnalysis is null by default → returns { keywords: [], informationType: 'stock' }
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(1);
+        expect(response.body.suggestions[0].type).toBe('memo');
+      });
+
+      it('should omit search suggestions when search returns empty candidates', async () => {
+        testState.contentAnalysis = stockAnalysis;
+        testState.searchCandidates = [];
+        testState.categorySuggestion = categorySuggestionFixture;
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(2); // memo + category
+        expect(response.body.suggestions[0].type).toBe('memo');
+        expect(response.body.suggestions[1].type).toBe('category');
+      });
+
+      it('should return memo + category when candidate evaluation fails', async () => {
+        testState.contentAnalysis = stockAnalysis;
+        testState.searchCandidates = searchCandidates;
+        testState.evaluateCandidatesError = new Error('AI evaluation failed');
+        testState.categorySuggestion = categorySuggestionFixture;
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(2);
+        expect(response.body.suggestions[0].type).toBe('memo');
+        expect(response.body.suggestions[1].type).toBe('category');
+      });
+
+      it('should return memo + search when category generation fails', async () => {
+        testState.contentAnalysis = stockAnalysis;
+        testState.searchCandidates = searchCandidates;
+        testState.evaluatedSuggestions = singleEvaluated;
+        testState.categorySuggestionError = new Error(
+          'Category generation failed',
+        );
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(2);
+        expect(response.body.suggestions[0].type).toBe('memo');
+        expect(response.body.suggestions[1].type).toBe('search');
+      });
+
+      it('should return memo-only when all Phase 2 components fail', async () => {
+        testState.contentAnalysis = stockAnalysis;
+        testState.searchCandidatesError = new Error('Search service down');
+        testState.categorySuggestionError = new Error('Category failed');
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions).toHaveLength(1);
+        expect(response.body.suggestions[0].type).toBe('memo');
+      });
+
+      it('should return correct memo structure even when Phase 2 degrades', async () => {
+        testState.contentAnalysisError = new Error('AI service unavailable');
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        expect(response.body.suggestions[0]).toEqual({
+          type: 'memo',
+          path: '/user/alice/memo/',
+          label: 'Save as memo',
+          description: 'Save to your personal memo area',
+          grant: 4,
+        });
+      });
+
+      it('should skip search pipeline entirely when content analysis fails', async () => {
+        testState.contentAnalysisError = new Error('AI service unavailable');
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        // Only memo, no search or category
+        expect(response.body.suggestions).toHaveLength(1);
+        const types = response.body.suggestions.map(
+          (s: { type: string }) => s.type,
+        );
+        expect(types).not.toContain('search');
+        expect(types).not.toContain('category');
+      });
+    });
+
+    describe('response structure verification', () => {
+      it('should have trailing slashes on all suggestion paths', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        for (const suggestion of response.body.suggestions) {
+          expect(suggestion.path).toMatch(/\/$/);
+        }
+      });
+
+      it('should include all required fields in every suggestion', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const requiredFields = [
+          'type',
+          'path',
+          'label',
+          'description',
+          'grant',
+        ];
+        for (const suggestion of response.body.suggestions) {
+          for (const field of requiredFields) {
+            expect(suggestion).toHaveProperty(field);
+          }
+        }
+      });
+
+      it('should include grant values as numbers for all suggestion types', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        for (const suggestion of response.body.suggestions) {
+          expect(typeof suggestion.grant).toBe('number');
+        }
+      });
+
+      it('should have fixed description for memo type', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const memo = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'memo',
+        );
+        expect(memo.description).toBe('Save to your personal memo area');
+      });
+
+      it('should have AI-generated description for search type', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const search = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'search',
+        );
+        // AI-generated descriptions are non-empty and contextual
+        expect(search.description).toBeTruthy();
+        expect(search.description.length).toBeGreaterThan(10);
+      });
+
+      it('should have mechanical description for category type', async () => {
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const category = response.body.suggestions.find(
+          (s: { type: string }) => s.type === 'category',
+        );
+        // Mechanical description follows "Top-level category: {name}" format
+        expect(category.description).toMatch(/^Top-level category: /);
+      });
+
+      it('should have valid PageGrant values for all suggestions', async () => {
+        testState.parentGrant = 4;
+        setupFullPipeline();
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React hooks' })
+          .expect(200);
+
+        const validGrants = [1, 2, 4, 5];
+        for (const suggestion of response.body.suggestions) {
+          expect(validGrants).toContain(suggestion.grant);
+        }
+      });
+
+      it('should resolve different grant values per search suggestion path', async () => {
+        const multiEvaluated = [
+          {
+            path: '/public-docs/React/',
+            label: 'Public docs',
+            description: 'Public documentation area.',
+          },
+          {
+            path: '/private-notes/React/',
+            label: 'Private notes',
+            description: 'Private notes area.',
+          },
+        ];
+        setupFullPipeline({ evaluated: multiEvaluated, category: null });
+
+        const response = await request(app)
+          .post('/suggest-path')
+          .send({ body: 'Content about React' })
+          .expect(200);
+
+        const searchSuggestions = response.body.suggestions.filter(
+          (s: { type: string }) => s.type === 'search',
+        );
+        // Both use testState.parentGrant (1) — verifies grant resolution is called per path
+        expect(searchSuggestions).toHaveLength(2);
+        for (const s of searchSuggestions) {
+          expect(typeof s.grant).toBe('number');
+        }
+      });
+    });
+  });
+});

+ 182 - 0
apps/app/src/features/ai-tools/suggest-path/server/routes/apiv3/index.spec.ts

@@ -0,0 +1,182 @@
+import type { Request, RequestHandler } from 'express';
+import type { Mock } from 'vitest';
+
+import type Crowi from '~/server/crowi';
+import type { ApiV3Response } from '~/server/routes/apiv3/interfaces/apiv3-response';
+
+const mocks = vi.hoisted(() => {
+  return {
+    generateSuggestionsMock: vi.fn(),
+    loginRequiredFactoryMock: vi.fn(),
+    certifyAiServiceMock: vi.fn(),
+    findAllUserGroupIdsMock: vi.fn(),
+    findAllExternalUserGroupIdsMock: vi.fn(),
+  };
+});
+
+vi.mock('../../services/generate-suggestions', () => ({
+  generateSuggestions: mocks.generateSuggestionsMock,
+}));
+
+vi.mock('~/server/middlewares/login-required', () => ({
+  default: mocks.loginRequiredFactoryMock,
+}));
+
+vi.mock(
+  '~/features/openai/server/routes/middlewares/certify-ai-service',
+  () => ({
+    certifyAiService: mocks.certifyAiServiceMock,
+  }),
+);
+
+vi.mock('~/server/middlewares/access-token-parser', () => ({
+  accessTokenParser: vi.fn(() => vi.fn()),
+}));
+
+vi.mock('~/server/middlewares/apiv3-form-validator', () => ({
+  apiV3FormValidator: vi.fn(),
+}));
+
+vi.mock('~/server/models/user-group-relation', () => ({
+  default: {
+    findAllUserGroupIdsRelatedToUser: mocks.findAllUserGroupIdsMock,
+  },
+}));
+
+vi.mock(
+  '~/features/external-user-group/server/models/external-user-group-relation',
+  () => ({
+    default: {
+      findAllUserGroupIdsRelatedToUser: mocks.findAllExternalUserGroupIdsMock,
+    },
+  }),
+);
+
+describe('suggestPathHandlersFactory', () => {
+  const mockSearchService = { searchKeyword: vi.fn() };
+  const mockCrowi = {
+    searchService: mockSearchService,
+  } as unknown as Crowi;
+
+  beforeEach(() => {
+    vi.resetAllMocks();
+    mocks.loginRequiredFactoryMock.mockReturnValue(vi.fn());
+    mocks.findAllUserGroupIdsMock.mockResolvedValue(['group1']);
+    mocks.findAllExternalUserGroupIdsMock.mockResolvedValue(['extGroup1']);
+  });
+
+  describe('middleware chain', () => {
+    it('should return an array of request handlers', async () => {
+      const { suggestPathHandlersFactory } = await import('.');
+      const handlers = suggestPathHandlersFactory(mockCrowi);
+      expect(Array.isArray(handlers)).toBe(true);
+      expect(handlers.length).toBeGreaterThanOrEqual(5);
+    });
+
+    it('should include certifyAiService in the middleware chain', async () => {
+      const { suggestPathHandlersFactory } = await import('.');
+      const handlers = suggestPathHandlersFactory(mockCrowi);
+      expect(handlers).toContain(mocks.certifyAiServiceMock);
+    });
+  });
+
+  describe('handler', () => {
+    const createMockReqRes = () => {
+      const req = {
+        user: { _id: 'user123', username: 'alice' },
+        body: { body: 'Some page content' },
+      } as unknown as Request;
+
+      const res = {
+        apiv3: vi.fn(),
+        apiv3Err: vi.fn(),
+      } as unknown as ApiV3Response;
+
+      return { req, res };
+    };
+
+    it('should call generateSuggestions with user, body, userGroups, and searchService', async () => {
+      const suggestions = [
+        {
+          type: 'memo',
+          path: '/user/alice/memo/',
+          label: 'Save as memo',
+          description: 'Save to your personal memo area',
+          grant: 4,
+        },
+      ];
+      mocks.generateSuggestionsMock.mockResolvedValue(suggestions);
+
+      const { suggestPathHandlersFactory } = await import('.');
+      const handlers = suggestPathHandlersFactory(mockCrowi);
+      const handler = handlers[handlers.length - 1] as RequestHandler;
+
+      const { req, res } = createMockReqRes();
+      await handler(req, res, vi.fn());
+
+      expect(mocks.generateSuggestionsMock).toHaveBeenCalledWith(
+        { _id: 'user123', username: 'alice' },
+        'Some page content',
+        ['group1', 'extGroup1'],
+        mockSearchService,
+      );
+    });
+
+    it('should return suggestions array via res.apiv3', async () => {
+      const suggestions = [
+        {
+          type: 'memo',
+          path: '/user/alice/memo/',
+          label: 'Save as memo',
+          description: 'Save to your personal memo area',
+          grant: 4,
+        },
+      ];
+      mocks.generateSuggestionsMock.mockResolvedValue(suggestions);
+
+      const { suggestPathHandlersFactory } = await import('.');
+      const handlers = suggestPathHandlersFactory(mockCrowi);
+      const handler = handlers[handlers.length - 1] as RequestHandler;
+
+      const { req, res } = createMockReqRes();
+      await handler(req, res, vi.fn());
+
+      expect(res.apiv3).toHaveBeenCalledWith({ suggestions });
+    });
+
+    it('should return error when generateSuggestions throws', async () => {
+      mocks.generateSuggestionsMock.mockRejectedValue(
+        new Error('Unexpected error'),
+      );
+
+      const { suggestPathHandlersFactory } = await import('.');
+      const handlers = suggestPathHandlersFactory(mockCrowi);
+      const handler = handlers[handlers.length - 1] as RequestHandler;
+
+      const { req, res } = createMockReqRes();
+      await handler(req, res, vi.fn());
+
+      expect(res.apiv3Err).toHaveBeenCalled();
+      // Should not expose internal error details (Req 9.2)
+      const apiv3ErrMock = res.apiv3Err as Mock;
+      const errorCall = apiv3ErrMock.mock.calls[0];
+      expect(errorCall[0].message).not.toContain('Unexpected error');
+    });
+
+    it('should combine internal and external user groups', async () => {
+      mocks.findAllUserGroupIdsMock.mockResolvedValue(['g1', 'g2']);
+      mocks.findAllExternalUserGroupIdsMock.mockResolvedValue(['eg1']);
+      mocks.generateSuggestionsMock.mockResolvedValue([]);
+
+      const { suggestPathHandlersFactory } = await import('.');
+      const handlers = suggestPathHandlersFactory(mockCrowi);
+      const handler = handlers[handlers.length - 1] as RequestHandler;
+
+      const { req, res } = createMockReqRes();
+      await handler(req, res, vi.fn());
+
+      const call = mocks.generateSuggestionsMock.mock.calls[0];
+      expect(call[2]).toEqual(['g1', 'g2', 'eg1']);
+    });
+  });
+});

+ 176 - 0
apps/app/src/features/ai-tools/suggest-path/server/routes/apiv3/index.ts

@@ -0,0 +1,176 @@
+import assert from 'node:assert';
+import type { IUserHasId } from '@growi/core/dist/interfaces';
+import { SCOPE } from '@growi/core/dist/interfaces';
+import { ErrorV3 } from '@growi/core/dist/models';
+import type { Request, RequestHandler } from 'express';
+import { body } from 'express-validator';
+
+import ExternalUserGroupRelation from '~/features/external-user-group/server/models/external-user-group-relation';
+import { certifyAiService } from '~/features/openai/server/routes/middlewares/certify-ai-service';
+import type Crowi from '~/server/crowi';
+import { accessTokenParser } from '~/server/middlewares/access-token-parser';
+import { apiV3FormValidator } from '~/server/middlewares/apiv3-form-validator';
+import loginRequiredFactory from '~/server/middlewares/login-required';
+import UserGroupRelation from '~/server/models/user-group-relation';
+import type { ApiV3Response } from '~/server/routes/apiv3/interfaces/apiv3-response';
+import loggerFactory from '~/utils/logger';
+
+import type { SearchService } from '../../../interfaces/suggest-path-types';
+import { generateSuggestions } from '../../services/generate-suggestions';
+
+const logger = loggerFactory('growi:features:suggest-path:routes');
+
+/**
+ * @swagger
+ *
+ * components:
+ *   schemas:
+ *     PathSuggestion:
+ *       type: object
+ *       required:
+ *         - type
+ *         - path
+ *         - label
+ *         - description
+ *         - grant
+ *       properties:
+ *         type:
+ *           type: string
+ *           enum: [memo, search, category]
+ *           description: The type of suggestion
+ *         path:
+ *           type: string
+ *           description: Suggested page path
+ *           example: "/user/alice/2026/04/01/meeting-notes"
+ *         label:
+ *           type: string
+ *           description: Human-readable label for the suggestion
+ *         description:
+ *           type: string
+ *           description: Explanation of why this path is suggested
+ *         grant:
+ *           type: integer
+ *           description: Page grant (1=public, 4=owner_only, 5=user_group)
+ *         informationType:
+ *           type: string
+ *           enum: [flow, stock]
+ *           description: Whether the content is flow (time-based) or stock (reference)
+ *     SuggestPathResponse:
+ *       type: object
+ *       properties:
+ *         suggestions:
+ *           type: array
+ *           items:
+ *             $ref: '#/components/schemas/PathSuggestion'
+ */
+
+type ReqBody = {
+  body: string;
+};
+
+type SuggestPathReq = Request<
+  Record<string, string>,
+  ApiV3Response,
+  ReqBody
+> & {
+  user?: IUserHasId;
+};
+
+const MAX_BODY_LENGTH = 100_000;
+
+const validator = [
+  body('body')
+    .isString()
+    .withMessage('body must be a string')
+    .notEmpty()
+    .withMessage('body must not be empty')
+    .isLength({ max: MAX_BODY_LENGTH })
+    .withMessage(`body must not exceed ${MAX_BODY_LENGTH} characters`),
+];
+
+/**
+ * @swagger
+ *
+ * /ai-tools/suggest-path:
+ *   post:
+ *     summary: Suggest page paths based on content
+ *     description: Analyzes the given content and suggests appropriate page paths using keyword extraction, search, and AI evaluation.
+ *     tags: [AI Tools]
+ *     security:
+ *       - bearer: []
+ *       - accessTokenInQuery: []
+ *     requestBody:
+ *       required: true
+ *       content:
+ *         application/json:
+ *           schema:
+ *             type: object
+ *             required:
+ *               - body
+ *             properties:
+ *               body:
+ *                 type: string
+ *                 description: The page content to analyze for path suggestions
+ *                 maxLength: 50000
+ *     responses:
+ *       200:
+ *         description: Path suggestions generated successfully
+ *         content:
+ *           application/json:
+ *             schema:
+ *               $ref: '#/components/schemas/SuggestPathResponse'
+ *       500:
+ *         description: Failed to generate path suggestions
+ */
+export const suggestPathHandlersFactory = (crowi: Crowi): RequestHandler[] => {
+  const loginRequiredStrictly = loginRequiredFactory(crowi);
+
+  return [
+    accessTokenParser([SCOPE.READ.FEATURES.AI_ASSISTANT], {
+      acceptLegacy: true,
+    }),
+    loginRequiredStrictly,
+    certifyAiService,
+    ...validator,
+    apiV3FormValidator,
+    async (req: SuggestPathReq, res: ApiV3Response) => {
+      const { user } = req;
+      assert(
+        user != null,
+        'user is required (ensured by loginRequiredStrictly middleware)',
+      );
+
+      try {
+        const { searchService } = crowi;
+        assert(
+          searchService != null &&
+            typeof (searchService as unknown as Record<string, unknown>)
+              .searchKeyword === 'function',
+          'searchService must have searchKeyword method',
+        );
+        const typedSearchService = searchService as unknown as SearchService;
+
+        const userGroups = [
+          ...(await UserGroupRelation.findAllUserGroupIdsRelatedToUser(user)),
+          ...(await ExternalUserGroupRelation.findAllUserGroupIdsRelatedToUser(
+            user,
+          )),
+        ];
+
+        const suggestions = await generateSuggestions(
+          user,
+          req.body.body,
+          userGroups,
+          typedSearchService,
+        );
+        return res.apiv3({ suggestions });
+      } catch (err) {
+        logger.error(err);
+        return res.apiv3Err(
+          new ErrorV3('Failed to generate path suggestions'),
+          500,
+        );
+      }
+    },
+  ];
+};

+ 390 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/analyze-content.spec.ts

@@ -0,0 +1,390 @@
+import type { ContentAnalysis } from '../../interfaces/suggest-path-types';
+import { analyzeContent } from './analyze-content';
+
+const mocks = vi.hoisted(() => {
+  return {
+    chatCompletionMock: vi.fn(),
+    getClientMock: vi.fn(),
+    configManagerMock: {
+      getConfig: vi.fn(),
+    },
+  };
+});
+
+vi.mock('~/features/openai/server/services/client-delegator', () => ({
+  getClient: mocks.getClientMock,
+  isStreamResponse: (result: unknown) => {
+    return (
+      result != null &&
+      typeof result === 'object' &&
+      Symbol.asyncIterator in (result as Record<symbol, unknown>)
+    );
+  },
+}));
+
+vi.mock('~/server/service/config-manager', () => ({
+  configManager: mocks.configManagerMock,
+}));
+
+describe('analyzeContent', () => {
+  beforeEach(() => {
+    vi.resetAllMocks();
+    mocks.configManagerMock.getConfig.mockImplementation((key: string) => {
+      if (key === 'openai:serviceType') return 'openai';
+      return undefined;
+    });
+    mocks.getClientMock.mockReturnValue({
+      chatCompletion: mocks.chatCompletionMock,
+    });
+  });
+
+  describe('successful keyword extraction with quality verification', () => {
+    it('should return keywords and informationType from AI response', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['React', 'hooks', 'useState'],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      const result = await analyzeContent(
+        'A guide to React hooks and useState',
+      );
+
+      expect(result).toEqual({
+        keywords: ['React', 'hooks', 'useState'],
+        informationType: 'stock',
+      } satisfies ContentAnalysis);
+    });
+
+    it('should extract 1-5 keywords prioritizing proper nouns and technical terms', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: [
+                  'TypeScript',
+                  'generics',
+                  'mapped types',
+                  'conditional types',
+                ],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      const result = await analyzeContent(
+        'TypeScript generics and advanced type system features',
+      );
+
+      expect(result.keywords.length).toBeGreaterThanOrEqual(1);
+      expect(result.keywords.length).toBeLessThanOrEqual(5);
+    });
+
+    it('should pass content body to chatCompletion as user message', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['MongoDB'],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      await analyzeContent('MongoDB aggregation pipeline');
+
+      expect(mocks.chatCompletionMock).toHaveBeenCalledWith(
+        expect.objectContaining({
+          messages: expect.arrayContaining([
+            expect.objectContaining({
+              role: 'user',
+              content: 'MongoDB aggregation pipeline',
+            }),
+          ]),
+        }),
+      );
+    });
+
+    it('should use a system prompt instructing both keyword extraction and flow/stock classification', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['Next.js'],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      await analyzeContent('Next.js routing');
+
+      expect(mocks.chatCompletionMock).toHaveBeenCalledWith(
+        expect.objectContaining({
+          messages: expect.arrayContaining([
+            expect.objectContaining({
+              role: 'system',
+            }),
+          ]),
+        }),
+      );
+    });
+
+    it('should not use streaming mode', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['keyword'],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      await analyzeContent('test content');
+
+      expect(mocks.chatCompletionMock).toHaveBeenCalledWith(
+        expect.not.objectContaining({
+          stream: true,
+        }),
+      );
+    });
+  });
+
+  describe('correct flow/stock classification for representative content samples', () => {
+    it('should classify meeting notes as flow', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['sprint', 'retrospective', 'action items'],
+                informationType: 'flow',
+              }),
+            },
+          },
+        ],
+      });
+
+      const result = await analyzeContent(
+        '2025/05/01 Sprint retrospective meeting notes. Action items discussed.',
+      );
+
+      expect(result.informationType).toBe('flow');
+    });
+
+    it('should classify documentation as stock', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['API', 'authentication', 'JWT'],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      const result = await analyzeContent(
+        'API Authentication Guide: How to use JWT tokens for secure access.',
+      );
+
+      expect(result.informationType).toBe('stock');
+    });
+  });
+
+  describe('edge cases', () => {
+    it('should handle very short content', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['hello'],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      const result = await analyzeContent('hello');
+
+      expect(result.keywords).toEqual(['hello']);
+      expect(result.informationType).toBe('stock');
+    });
+
+    it('should handle content with ambiguous information type', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['Docker', 'deployment'],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      const result = await analyzeContent('Docker deployment notes');
+
+      expect(result.keywords.length).toBeGreaterThanOrEqual(1);
+      expect(['flow', 'stock']).toContain(result.informationType);
+    });
+  });
+
+  describe('failure propagation', () => {
+    it('should throw when chatCompletion rejects', async () => {
+      mocks.chatCompletionMock.mockRejectedValue(new Error('API error'));
+
+      await expect(analyzeContent('test')).rejects.toThrow('API error');
+    });
+
+    it('should throw with descriptive message when AI returns invalid JSON', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [{ message: { content: 'not valid json' } }],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow(
+        /Failed to parse LLM response as JSON/,
+      );
+    });
+
+    it('should include truncated response in error message when AI returns invalid JSON', async () => {
+      const longInvalidJson = 'x'.repeat(300);
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [{ message: { content: longInvalidJson } }],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow(
+        /Failed to parse LLM response as JSON/,
+      );
+    });
+
+    it('should throw when AI returns JSON without keywords field', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({ informationType: 'stock' }),
+            },
+          },
+        ],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow();
+    });
+
+    it('should throw when AI returns JSON without informationType field', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({ keywords: ['test'] }),
+            },
+          },
+        ],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow();
+    });
+
+    it('should throw when AI returns invalid informationType value', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: ['test'],
+                informationType: 'invalid',
+              }),
+            },
+          },
+        ],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow();
+    });
+
+    it('should throw when keywords is not an array', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: 'not-an-array',
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow();
+    });
+
+    it('should throw when keywords array is empty', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                keywords: [],
+                informationType: 'stock',
+              }),
+            },
+          },
+        ],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow();
+    });
+
+    it('should throw when choices array is empty', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow();
+    });
+
+    it('should throw when message content is null', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [{ message: { content: null } }],
+      });
+
+      await expect(analyzeContent('test')).rejects.toThrow();
+    });
+
+    it('should throw on streaming response', async () => {
+      const streamMock = {
+        [Symbol.asyncIterator]: () => ({}),
+      };
+      mocks.chatCompletionMock.mockResolvedValue(streamMock);
+
+      await expect(analyzeContent('test')).rejects.toThrow();
+    });
+  });
+});

+ 51 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/analyze-content.ts

@@ -0,0 +1,51 @@
+import { instructionsForInformationTypes } from '~/features/openai/server/services/assistant/instructions/commons';
+
+import type {
+  ContentAnalysis,
+  InformationType,
+} from '../../interfaces/suggest-path-types';
+import { callLlmForJson } from './call-llm-for-json';
+
+const VALID_INFORMATION_TYPES: readonly InformationType[] = ['flow', 'stock'];
+
+const SYSTEM_PROMPT = [
+  'You are a content analysis assistant. Analyze the following content and return a JSON object with two fields:\n',
+  '1. "keywords": An array of 1 to 5 search keywords extracted from the content. ',
+  'Prioritize proper nouns and technical terms over generic or common words.\n',
+  '2. "informationType": Classify the content as either "flow" or "stock".\n\n',
+  '## Classification Reference\n',
+  instructionsForInformationTypes,
+  '\n\n',
+  'Return only the JSON object, no other text.\n',
+  'Example: {"keywords": ["React", "useState", "hooks"], "informationType": "stock"}',
+].join('');
+
+const isValidContentAnalysis = (parsed: unknown): parsed is ContentAnalysis => {
+  if (parsed == null || typeof parsed !== 'object') {
+    return false;
+  }
+
+  const obj = parsed as Record<string, unknown>;
+
+  if (!Array.isArray(obj.keywords) || obj.keywords.length === 0) {
+    return false;
+  }
+
+  if (
+    typeof obj.informationType !== 'string' ||
+    !VALID_INFORMATION_TYPES.includes(obj.informationType as InformationType)
+  ) {
+    return false;
+  }
+
+  return true;
+};
+
+export const analyzeContent = (body: string): Promise<ContentAnalysis> => {
+  return callLlmForJson(
+    SYSTEM_PROMPT,
+    body,
+    isValidContentAnalysis,
+    'Invalid content analysis response: expected { keywords: string[], informationType: "flow" | "stock" }',
+  );
+};

+ 60 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/call-llm-for-json.ts

@@ -0,0 +1,60 @@
+import type { OpenaiServiceType } from '~/features/openai/interfaces/ai';
+import {
+  getClient,
+  isStreamResponse,
+} from '~/features/openai/server/services/client-delegator';
+import { configManager } from '~/server/service/config-manager';
+
+/**
+ * Shared utility for making LLM calls that return JSON responses.
+ * Handles OpenAI client initialization, JSON parsing, and response validation.
+ * Consumed by `analyzeContent` (1st AI call) and `evaluateCandidates` (2nd AI call).
+ */
+export const callLlmForJson = async <T>(
+  systemPrompt: string,
+  userMessage: string,
+  validate: (parsed: unknown) => parsed is T,
+  validationErrorMessage: string,
+): Promise<T> => {
+  const openaiServiceType = configManager.getConfig(
+    'openai:serviceType',
+  ) as OpenaiServiceType;
+  const client = getClient({ openaiServiceType });
+
+  const completion = await client.chatCompletion({
+    model: 'gpt-4.1-nano',
+    messages: [
+      { role: 'system', content: systemPrompt },
+      { role: 'user', content: userMessage },
+    ],
+  });
+
+  if (isStreamResponse(completion)) {
+    throw new Error('Unexpected streaming response from chatCompletion');
+  }
+
+  const choice = completion.choices[0];
+  if (choice == null) {
+    throw new Error('No choices returned from chatCompletion');
+  }
+
+  const content = choice.message.content;
+  if (content == null) {
+    throw new Error('No content returned from chatCompletion');
+  }
+
+  let parsed: unknown;
+  try {
+    parsed = JSON.parse(content);
+  } catch {
+    throw new Error(
+      `Failed to parse LLM response as JSON: ${content.slice(0, 200)}`,
+    );
+  }
+
+  if (!validate(parsed)) {
+    throw new Error(validationErrorMessage);
+  }
+
+  return parsed;
+};

+ 511 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/evaluate-candidates.spec.ts

@@ -0,0 +1,511 @@
+import type {
+  ContentAnalysis,
+  EvaluatedSuggestion,
+  SearchCandidate,
+} from '../../interfaces/suggest-path-types';
+import { evaluateCandidates } from './evaluate-candidates';
+
+const mocks = vi.hoisted(() => {
+  return {
+    chatCompletionMock: vi.fn(),
+    getClientMock: vi.fn(),
+    configManagerMock: {
+      getConfig: vi.fn(),
+    },
+  };
+});
+
+vi.mock('~/features/openai/server/services/client-delegator', () => ({
+  getClient: mocks.getClientMock,
+  isStreamResponse: (result: unknown) => {
+    return (
+      result != null &&
+      typeof result === 'object' &&
+      Symbol.asyncIterator in (result as Record<symbol, unknown>)
+    );
+  },
+}));
+
+vi.mock('~/server/service/config-manager', () => ({
+  configManager: mocks.configManagerMock,
+}));
+
+const stockAnalysis: ContentAnalysis = {
+  keywords: ['React', 'hooks', 'useState'],
+  informationType: 'stock',
+};
+
+const flowAnalysis: ContentAnalysis = {
+  keywords: ['sprint', 'retrospective'],
+  informationType: 'flow',
+};
+
+const sampleCandidates: SearchCandidate[] = [
+  {
+    pagePath: '/tech/React/hooks',
+    snippet: 'React hooks guide for state management',
+    score: 15,
+  },
+  {
+    pagePath: '/tech/React/state',
+    snippet: 'Managing state in React applications',
+    score: 12,
+  },
+];
+
+function mockAiResponse(suggestions: EvaluatedSuggestion[]) {
+  mocks.chatCompletionMock.mockResolvedValue({
+    choices: [
+      {
+        message: {
+          content: JSON.stringify(suggestions),
+        },
+      },
+    ],
+  });
+}
+
+describe('evaluateCandidates', () => {
+  beforeEach(() => {
+    vi.resetAllMocks();
+    mocks.configManagerMock.getConfig.mockImplementation((key: string) => {
+      if (key === 'openai:serviceType') return 'openai';
+      return undefined;
+    });
+    mocks.getClientMock.mockReturnValue({
+      chatCompletion: mocks.chatCompletionMock,
+    });
+  });
+
+  describe('path pattern selection across all three patterns', () => {
+    it('should return parent directory pattern suggestion', async () => {
+      const parentSuggestion: EvaluatedSuggestion = {
+        path: '/tech/React/',
+        label: 'Save near related pages',
+        description:
+          'This directory contains React documentation including hooks and state management.',
+      };
+      mockAiResponse([parentSuggestion]);
+
+      const result = await evaluateCandidates(
+        'A guide to React hooks',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      expect(result).toHaveLength(1);
+      expect(result[0].path).toBe('/tech/React/');
+      expect(result[0].path).toMatch(/\/$/);
+    });
+
+    it('should return subdirectory pattern suggestion', async () => {
+      const subdirSuggestion: EvaluatedSuggestion = {
+        path: '/tech/React/hooks/advanced/',
+        label: 'Save near related pages',
+        description:
+          'Advanced hooks content fits under the existing hooks documentation.',
+      };
+      mockAiResponse([subdirSuggestion]);
+
+      const result = await evaluateCandidates(
+        'Advanced React hooks patterns',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      expect(result).toHaveLength(1);
+      expect(result[0].path).toBe('/tech/React/hooks/advanced/');
+      expect(result[0].path).toMatch(/\/$/);
+    });
+
+    it('should return sibling directory pattern suggestion', async () => {
+      const siblingSuggestion: EvaluatedSuggestion = {
+        path: '/tech/React/performance/',
+        label: 'New section for performance topics',
+        description:
+          'A new section alongside existing React documentation for performance content.',
+      };
+      mockAiResponse([siblingSuggestion]);
+
+      const result = await evaluateCandidates(
+        'React performance optimization',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      expect(result).toHaveLength(1);
+      expect(result[0].path).toBe('/tech/React/performance/');
+      expect(result[0].path).toMatch(/\/$/);
+    });
+  });
+
+  describe('sibling path generation at correct hierarchy level', () => {
+    it('should generate sibling paths at the same level as the candidate page', async () => {
+      const candidates: SearchCandidate[] = [
+        {
+          pagePath: '/docs/frontend/React/basics',
+          snippet: 'React basics introduction',
+          score: 10,
+        },
+      ];
+      const siblingSuggestion: EvaluatedSuggestion = {
+        path: '/docs/frontend/React/advanced/',
+        label: 'New section for advanced topics',
+        description: 'Sibling section at the same level as the basics page.',
+      };
+      mockAiResponse([siblingSuggestion]);
+
+      const result = await evaluateCandidates(
+        'Advanced React patterns',
+        stockAnalysis,
+        candidates,
+      );
+
+      // Sibling path should be at the same depth as the candidate
+      const candidateDepth = '/docs/frontend/React/basics'
+        .split('/')
+        .filter(Boolean).length;
+      const resultDepth = result[0].path
+        .replace(/\/$/, '')
+        .split('/')
+        .filter(Boolean).length;
+      expect(resultDepth).toBe(candidateDepth);
+    });
+  });
+
+  describe('AI-generated description quality', () => {
+    it('should include non-empty descriptions for each suggestion', async () => {
+      const suggestions: EvaluatedSuggestion[] = [
+        {
+          path: '/tech/React/',
+          label: 'Save near related pages',
+          description:
+            'Contains documentation about React hooks and state management patterns.',
+        },
+        {
+          path: '/tech/React/hooks/custom/',
+          label: 'Save under hooks section',
+          description:
+            'Custom hooks content fits naturally under the existing hooks documentation.',
+        },
+      ];
+      mockAiResponse(suggestions);
+
+      const result = await evaluateCandidates(
+        'Custom React hooks',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      expect(result).toHaveLength(2);
+      for (const suggestion of result) {
+        expect(suggestion.description).toBeTruthy();
+        expect(suggestion.description.length).toBeGreaterThan(0);
+      }
+    });
+  });
+
+  describe('ranking order', () => {
+    it('should preserve AI-determined ranking order in results', async () => {
+      const rankedSuggestions: EvaluatedSuggestion[] = [
+        {
+          path: '/tech/React/hooks/',
+          label: 'Best match',
+          description: 'Closest content-destination fit.',
+        },
+        {
+          path: '/tech/React/',
+          label: 'Good match',
+          description: 'Broader category match.',
+        },
+      ];
+      mockAiResponse(rankedSuggestions);
+
+      const result = await evaluateCandidates(
+        'React hooks guide',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      expect(result).toHaveLength(2);
+      expect(result[0].path).toBe('/tech/React/hooks/');
+      expect(result[1].path).toBe('/tech/React/');
+    });
+  });
+
+  describe('flow/stock alignment consideration', () => {
+    it('should pass informationType to AI for ranking consideration', async () => {
+      const suggestion: EvaluatedSuggestion = {
+        path: '/meetings/2025/',
+        label: 'Save near meeting notes',
+        description: 'Flow content fits well in the meetings area.',
+      };
+      mockAiResponse([suggestion]);
+
+      await evaluateCandidates(
+        'Sprint retrospective notes from today',
+        flowAnalysis,
+        [
+          {
+            pagePath: '/meetings/2025/01',
+            snippet: 'January meeting',
+            score: 10,
+          },
+        ],
+      );
+
+      // Verify the AI receives informationType in the prompt
+      expect(mocks.chatCompletionMock).toHaveBeenCalledWith(
+        expect.objectContaining({
+          messages: expect.arrayContaining([
+            expect.objectContaining({
+              role: 'user',
+              content: expect.stringContaining('flow'),
+            }),
+          ]),
+        }),
+      );
+    });
+
+    it('should pass stock informationType to AI for ranking consideration', async () => {
+      const suggestion: EvaluatedSuggestion = {
+        path: '/tech/React/',
+        label: 'Save near documentation',
+        description: 'Stock content aligns with reference documentation.',
+      };
+      mockAiResponse([suggestion]);
+
+      await evaluateCandidates(
+        'React hooks documentation',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      expect(mocks.chatCompletionMock).toHaveBeenCalledWith(
+        expect.objectContaining({
+          messages: expect.arrayContaining([
+            expect.objectContaining({
+              role: 'user',
+              content: expect.stringContaining('stock'),
+            }),
+          ]),
+        }),
+      );
+    });
+  });
+
+  describe('AI invocation details', () => {
+    it('should pass content body to AI', async () => {
+      mockAiResponse([]);
+
+      await evaluateCandidates(
+        'My custom React hooks article',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      expect(mocks.chatCompletionMock).toHaveBeenCalledWith(
+        expect.objectContaining({
+          messages: expect.arrayContaining([
+            expect.objectContaining({
+              role: 'user',
+              content: expect.stringContaining('My custom React hooks article'),
+            }),
+          ]),
+        }),
+      );
+    });
+
+    it('should pass candidate paths and snippets to AI, not full page bodies', async () => {
+      mockAiResponse([]);
+
+      await evaluateCandidates(
+        'React hooks guide',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      const call = mocks.chatCompletionMock.mock.calls[0][0];
+      const userMessage = call.messages.find(
+        (m: { role: string }) => m.role === 'user',
+      );
+      expect(userMessage.content).toContain('/tech/React/hooks');
+      expect(userMessage.content).toContain(
+        'React hooks guide for state management',
+      );
+    });
+
+    it('should include a system prompt with evaluation instructions', async () => {
+      mockAiResponse([]);
+
+      await evaluateCandidates('test content', stockAnalysis, sampleCandidates);
+
+      expect(mocks.chatCompletionMock).toHaveBeenCalledWith(
+        expect.objectContaining({
+          messages: expect.arrayContaining([
+            expect.objectContaining({
+              role: 'system',
+            }),
+          ]),
+        }),
+      );
+    });
+
+    it('should not use streaming mode', async () => {
+      mockAiResponse([]);
+
+      await evaluateCandidates('test content', stockAnalysis, sampleCandidates);
+
+      expect(mocks.chatCompletionMock).toHaveBeenCalledWith(
+        expect.not.objectContaining({
+          stream: true,
+        }),
+      );
+    });
+  });
+
+  describe('empty and edge cases', () => {
+    it('should return empty array when AI evaluates no candidates as suitable', async () => {
+      mockAiResponse([]);
+
+      const result = await evaluateCandidates(
+        'Unrelated content',
+        stockAnalysis,
+        sampleCandidates,
+      );
+
+      expect(result).toEqual([]);
+    });
+
+    it('should handle single candidate input', async () => {
+      const suggestion: EvaluatedSuggestion = {
+        path: '/tech/React/',
+        label: 'Save near related pages',
+        description: 'Single candidate evaluation.',
+      };
+      mockAiResponse([suggestion]);
+
+      const result = await evaluateCandidates('React content', stockAnalysis, [
+        sampleCandidates[0],
+      ]);
+
+      expect(result).toHaveLength(1);
+    });
+  });
+
+  describe('failure propagation', () => {
+    it('should throw when chatCompletion rejects', async () => {
+      mocks.chatCompletionMock.mockRejectedValue(new Error('API error'));
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow('API error');
+    });
+
+    it('should throw with descriptive message when AI returns invalid JSON', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [{ message: { content: 'not valid json' } }],
+      });
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow(/Failed to parse LLM response as JSON/);
+    });
+
+    it('should include truncated response in error message when AI returns invalid JSON', async () => {
+      const longInvalidJson = 'x'.repeat(300);
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [{ message: { content: longInvalidJson } }],
+      });
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow(/Failed to parse LLM response as JSON/);
+    });
+
+    it('should throw when AI returns non-array JSON', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify({
+                path: '/test/',
+                label: 'test',
+                description: 'test',
+              }),
+            },
+          },
+        ],
+      });
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow();
+    });
+
+    it('should throw when choices array is empty', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [],
+      });
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow();
+    });
+
+    it('should throw when message content is null', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [{ message: { content: null } }],
+      });
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow();
+    });
+
+    it('should throw on streaming response', async () => {
+      const streamMock = {
+        [Symbol.asyncIterator]: () => ({}),
+      };
+      mocks.chatCompletionMock.mockResolvedValue(streamMock);
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow();
+    });
+
+    it('should throw when suggestion item is missing required fields', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify([{ path: '/tech/' }]),
+            },
+          },
+        ],
+      });
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow();
+    });
+
+    it('should throw when suggestion path does not end with trailing slash', async () => {
+      mocks.chatCompletionMock.mockResolvedValue({
+        choices: [
+          {
+            message: {
+              content: JSON.stringify([
+                { path: '/tech/React', label: 'test', description: 'test' },
+              ]),
+            },
+          },
+        ],
+      });
+
+      await expect(
+        evaluateCandidates('test', stockAnalysis, sampleCandidates),
+      ).rejects.toThrow();
+    });
+  });
+});

+ 115 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/evaluate-candidates.ts

@@ -0,0 +1,115 @@
+import { instructionsForInformationTypes } from '~/features/openai/server/services/assistant/instructions/commons';
+
+import type {
+  ContentAnalysis,
+  EvaluatedSuggestion,
+  SearchCandidate,
+} from '../../interfaces/suggest-path-types';
+import { callLlmForJson } from './call-llm-for-json';
+
+const SYSTEM_PROMPT = [
+  'You are a page save location evaluator for a wiki system. ',
+  'Given content to be saved, its analysis (keywords and information type), and a list of search candidate pages, ',
+  'propose optimal directory paths for saving the content.\n\n',
+  '## How to Read Wiki Paths\n',
+  'Treat the wiki path hierarchy as a content classification taxonomy. ',
+  'Each path segment represents a category or topic at a certain level of abstraction.\n',
+  'Example: `/engineering/frontend/react-testing-patterns`\n',
+  '- `engineering` = broad domain\n',
+  '- `frontend` = topic category within that domain\n',
+  '- `react-testing-patterns` = specific article\n\n',
+  'When proposing a save location, determine which level of the taxonomy the content belongs to ',
+  'and what category name best describes it. The proposed path should reflect where the content ',
+  'naturally fits in the existing classification structure.\n\n',
+  '## Path Proposal\n',
+  'For each suitable candidate, propose a directory path for the content. The proposed path may be:\n',
+  '- An existing directory in the candidate path hierarchy\n',
+  '- A new directory at the appropriate level of the taxonomy\n\n',
+  'Examples given candidate `/engineering/frontend/react-testing-patterns`:\n',
+  '- Content about React components → `/engineering/frontend/` (same topic category)\n',
+  '- Content about CSS architecture → `/engineering/frontend/css-architecture/` (sub-topic)\n',
+  '- Content about Express API design → `/engineering/backend/` (different topic at the same category level)\n\n',
+  'Only propose candidates that are genuinely suitable. Skip candidates where the content has no meaningful relationship.\n\n',
+  '## Flow/Stock Information Type\n',
+  instructionsForInformationTypes,
+  '\n\n',
+  'Use flow/stock alignment between the content and candidate locations as a RANKING FACTOR, not a hard filter.\n\n',
+  '## Output Format\n',
+  'Return a JSON array of suggestion objects, ranked by content-destination fit (best first).\n',
+  'Each object must have:\n',
+  '- "path": Directory path with trailing slash (e.g., "/engineering/backend/")\n',
+  '- "label": Short display label for the suggestion\n',
+  '- "description": Explanation of why this location is suitable, considering content relevance and flow/stock alignment\n\n',
+  'Return an empty array `[]` if no candidates are suitable.\n',
+  'Return only the JSON array, no other text.',
+].join('');
+
+function buildUserMessage(
+  body: string,
+  analysis: ContentAnalysis,
+  candidates: SearchCandidate[],
+): string {
+  const candidateList = candidates
+    .map(
+      (c, i) =>
+        `${i + 1}. Path: ${c.pagePath}\n   Snippet: ${c.snippet}\n   Score: ${c.score}`,
+    )
+    .join('\n');
+
+  return [
+    '## Content to Save\n',
+    body,
+    '\n\n## Content Analysis\n',
+    `Keywords: ${analysis.keywords.join(', ')}\n`,
+    `Information Type: ${analysis.informationType}\n`,
+    '\n## Search Candidates\n',
+    candidateList,
+  ].join('');
+}
+
+const isValidEvaluatedSuggestion = (
+  item: unknown,
+): item is EvaluatedSuggestion => {
+  if (item == null || typeof item !== 'object') {
+    return false;
+  }
+
+  const obj = item as Record<string, unknown>;
+
+  if (typeof obj.path !== 'string' || !obj.path.endsWith('/')) {
+    return false;
+  }
+
+  if (typeof obj.label !== 'string' || obj.label.length === 0) {
+    return false;
+  }
+
+  if (typeof obj.description !== 'string' || obj.description.length === 0) {
+    return false;
+  }
+
+  return true;
+};
+
+const isValidEvaluatedSuggestionArray = (
+  parsed: unknown,
+): parsed is EvaluatedSuggestion[] => {
+  if (!Array.isArray(parsed)) {
+    return false;
+  }
+  return parsed.every(isValidEvaluatedSuggestion);
+};
+
+export const evaluateCandidates = (
+  body: string,
+  analysis: ContentAnalysis,
+  candidates: SearchCandidate[],
+): Promise<EvaluatedSuggestion[]> => {
+  const userMessage = buildUserMessage(body, analysis, candidates);
+  return callLlmForJson(
+    SYSTEM_PROMPT,
+    userMessage,
+    isValidEvaluatedSuggestionArray,
+    'Invalid candidate evaluation response: each item must have path (ending with /), label, and description',
+  );
+};

+ 170 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/generate-category-suggestion.spec.ts

@@ -0,0 +1,170 @@
+import type { SearchCandidate } from '../../interfaces/suggest-path-types';
+import {
+  extractTopLevelSegmentName,
+  generateCategorySuggestion,
+} from './generate-category-suggestion';
+
+const mocks = vi.hoisted(() => {
+  return {
+    resolveParentGrantMock: vi.fn(),
+  };
+});
+
+vi.mock('./resolve-parent-grant', () => ({
+  resolveParentGrant: mocks.resolveParentGrantMock,
+}));
+
+const GRANT_PUBLIC = 1;
+const GRANT_OWNER = 4;
+
+function createCandidates(
+  pages: { path: string; score: number }[],
+): SearchCandidate[] {
+  return pages.map((p) => ({
+    pagePath: p.path,
+    snippet: '',
+    score: p.score,
+  }));
+}
+
+describe('extractTopLevelSegmentName', () => {
+  it('should extract segment name from nested path', () => {
+    expect(extractTopLevelSegmentName('/tech-notes/React/hooks')).toBe(
+      'tech-notes',
+    );
+  });
+
+  it('should extract segment name from two-level path', () => {
+    expect(extractTopLevelSegmentName('/tech-notes/React')).toBe('tech-notes');
+  });
+
+  it('should extract segment name from single-level path', () => {
+    expect(extractTopLevelSegmentName('/tech-notes')).toBe('tech-notes');
+  });
+
+  it('should return null for root path', () => {
+    expect(extractTopLevelSegmentName('/')).toBeNull();
+  });
+});
+
+describe('generateCategorySuggestion', () => {
+  beforeEach(() => {
+    vi.resetAllMocks();
+    mocks.resolveParentGrantMock.mockResolvedValue(GRANT_PUBLIC);
+  });
+
+  describe('when candidates are provided', () => {
+    it('should return a suggestion with type "category"', async () => {
+      const candidates = createCandidates([
+        { path: '/tech-notes/React/hooks', score: 10 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(result).not.toBeNull();
+      expect(result?.type).toBe('category');
+    });
+
+    it('should extract top-level segment from top candidate path', async () => {
+      const candidates = createCandidates([
+        { path: '/tech-notes/React/hooks', score: 10 },
+        { path: '/guides/TypeScript/basics', score: 8 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(result?.path).toBe('/tech-notes/');
+    });
+
+    it('should return path with trailing slash', async () => {
+      const candidates = createCandidates([
+        { path: '/tech-notes/React/hooks', score: 10 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(result?.path).toMatch(/\/$/);
+    });
+
+    it('should extract top-level even from deeply nested path', async () => {
+      const candidates = createCandidates([
+        { path: '/guides/a/b/c/d', score: 10 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(result?.path).toBe('/guides/');
+    });
+
+    it('should generate description from top-level segment name', async () => {
+      const candidates = createCandidates([
+        { path: '/tech-notes/React/hooks', score: 10 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(result?.description).toBe('Top-level category: tech-notes');
+    });
+
+    it('should have label "Save under category"', async () => {
+      const candidates = createCandidates([
+        { path: '/tech-notes/React/hooks', score: 10 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(result?.label).toBe('Save under category');
+    });
+
+    it('should resolve grant from top-level directory', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_PUBLIC);
+      const candidates = createCandidates([
+        { path: '/tech-notes/React/hooks', score: 10 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(mocks.resolveParentGrantMock).toHaveBeenCalledWith('/tech-notes/');
+      expect(result?.grant).toBe(GRANT_PUBLIC);
+    });
+
+    it('should return GRANT_OWNER when parent page not found', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_OWNER);
+      const candidates = createCandidates([
+        { path: '/nonexistent/page', score: 10 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(result?.grant).toBe(GRANT_OWNER);
+    });
+  });
+
+  describe('when top result is a single-segment page', () => {
+    it('should return the page path as category', async () => {
+      const candidates = createCandidates([
+        { path: '/engineering', score: 10 },
+      ]);
+
+      const result = await generateCategorySuggestion(candidates);
+
+      expect(result).not.toBeNull();
+      expect(result?.path).toBe('/engineering/');
+      expect(result?.description).toBe('Top-level category: engineering');
+    });
+  });
+
+  describe('when candidates are empty', () => {
+    it('should return null', async () => {
+      const result = await generateCategorySuggestion([]);
+
+      expect(result).toBeNull();
+    });
+
+    it('should not call resolveParentGrant', async () => {
+      await generateCategorySuggestion([]);
+
+      expect(mocks.resolveParentGrantMock).not.toHaveBeenCalled();
+    });
+  });
+});

+ 37 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/generate-category-suggestion.ts

@@ -0,0 +1,37 @@
+import type {
+  PathSuggestion,
+  SearchCandidate,
+} from '../../interfaces/suggest-path-types';
+import { SuggestionType } from '../../interfaces/suggest-path-types';
+import { resolveParentGrant } from './resolve-parent-grant';
+
+const CATEGORY_LABEL = 'Save under category';
+
+export function extractTopLevelSegmentName(pagePath: string): string | null {
+  const segments = pagePath.split('/').filter(Boolean);
+  return segments[0] ?? null;
+}
+
+export const generateCategorySuggestion = async (
+  candidates: SearchCandidate[],
+): Promise<PathSuggestion | null> => {
+  if (candidates.length === 0) {
+    return null;
+  }
+
+  const segmentName = extractTopLevelSegmentName(candidates[0].pagePath);
+  if (segmentName == null) {
+    return null;
+  }
+
+  const topLevelPath = `/${segmentName}/`;
+  const grant = await resolveParentGrant(topLevelPath);
+
+  return {
+    type: SuggestionType.CATEGORY,
+    path: topLevelPath,
+    label: CATEGORY_LABEL,
+    description: `Top-level category: ${segmentName}`,
+    grant,
+  };
+};

+ 136 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/generate-memo-suggestion.spec.ts

@@ -0,0 +1,136 @@
+import { generateMemoSuggestion } from './generate-memo-suggestion';
+
+const mocks = vi.hoisted(() => {
+  return {
+    configManagerMock: {
+      getConfig: vi.fn(),
+    },
+    resolveParentGrantMock: vi.fn(),
+  };
+});
+
+vi.mock('@growi/core', () => ({
+  PageGrant: {
+    GRANT_PUBLIC: 1,
+    GRANT_RESTRICTED: 2,
+    GRANT_OWNER: 4,
+    GRANT_USER_GROUP: 5,
+  },
+}));
+
+vi.mock('@growi/core/dist/utils/page-path-utils', () => ({
+  userHomepagePath: (user: { username: string }) => `/user/${user.username}`,
+}));
+
+vi.mock('~/server/service/config-manager', () => {
+  return { configManager: mocks.configManagerMock };
+});
+
+vi.mock('./resolve-parent-grant', () => ({
+  resolveParentGrant: mocks.resolveParentGrantMock,
+}));
+
+const GRANT_PUBLIC = 1;
+const GRANT_OWNER = 4;
+const GRANT_USER_GROUP = 5;
+
+describe('generateMemoSuggestion', () => {
+  beforeEach(() => {
+    vi.resetAllMocks();
+  });
+
+  describe('when user pages are enabled (default)', () => {
+    beforeEach(() => {
+      mocks.configManagerMock.getConfig.mockImplementation((key: string) => {
+        if (key === 'security:disableUserPages') return false;
+        return undefined;
+      });
+    });
+
+    it('should return a suggestion with type "memo"', async () => {
+      const result = await generateMemoSuggestion({ username: 'alice' });
+      expect(result.type).toBe('memo');
+    });
+
+    it('should generate path under user home directory', async () => {
+      const result = await generateMemoSuggestion({ username: 'alice' });
+      expect(result.path).toBe('/user/alice/memo/');
+    });
+
+    it('should set grant to GRANT_OWNER (4)', async () => {
+      const result = await generateMemoSuggestion({ username: 'alice' });
+      expect(result.grant).toBe(GRANT_OWNER);
+    });
+
+    it('should not call resolveParentGrant', async () => {
+      await generateMemoSuggestion({ username: 'alice' });
+      expect(mocks.resolveParentGrantMock).not.toHaveBeenCalled();
+    });
+
+    it('should include a fixed description', async () => {
+      const result = await generateMemoSuggestion({ username: 'alice' });
+      expect(result.description).toBe('Save to your personal memo area');
+    });
+
+    it('should include a label', async () => {
+      const result = await generateMemoSuggestion({ username: 'alice' });
+      expect(result.label).toBe('Save as memo');
+    });
+
+    it('should generate path with trailing slash', async () => {
+      const result = await generateMemoSuggestion({ username: 'alice' });
+      expect(result.path).toMatch(/\/$/);
+    });
+  });
+
+  describe('when user pages are disabled', () => {
+    beforeEach(() => {
+      mocks.configManagerMock.getConfig.mockImplementation((key: string) => {
+        if (key === 'security:disableUserPages') return true;
+        return undefined;
+      });
+    });
+
+    it('should generate path under alternative namespace', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_OWNER);
+      const result = await generateMemoSuggestion({ username: 'bob' });
+      expect(result.path).toBe('/memo/bob/');
+    });
+
+    it('should resolve grant from parent page via resolveParentGrant', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_PUBLIC);
+      const result = await generateMemoSuggestion({ username: 'bob' });
+      expect(result.grant).toBe(GRANT_PUBLIC);
+    });
+
+    it('should call resolveParentGrant with the generated path', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_OWNER);
+      await generateMemoSuggestion({ username: 'bob' });
+      expect(mocks.resolveParentGrantMock).toHaveBeenCalledWith('/memo/bob/');
+    });
+
+    it('should use GRANT_USER_GROUP when parent has user group grant', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_USER_GROUP);
+      const result = await generateMemoSuggestion({ username: 'bob' });
+      expect(result.grant).toBe(GRANT_USER_GROUP);
+    });
+
+    it('should return a suggestion with type "memo"', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_OWNER);
+      const result = await generateMemoSuggestion({ username: 'bob' });
+      expect(result.type).toBe('memo');
+    });
+
+    it('should generate path with trailing slash', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_OWNER);
+      const result = await generateMemoSuggestion({ username: 'bob' });
+      expect(result.path).toMatch(/\/$/);
+    });
+
+    it('should include same fixed description as enabled case', async () => {
+      mocks.resolveParentGrantMock.mockResolvedValue(GRANT_OWNER);
+      const result = await generateMemoSuggestion({ username: 'bob' });
+      expect(result.description).toBe('Save to your personal memo area');
+    });
+  });
+});

+ 41 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/generate-memo-suggestion.ts

@@ -0,0 +1,41 @@
+import { PageGrant } from '@growi/core';
+import { userHomepagePath } from '@growi/core/dist/utils/page-path-utils';
+
+import { configManager } from '~/server/service/config-manager';
+
+import type { PathSuggestion } from '../../interfaces/suggest-path-types';
+import { SuggestionType } from '../../interfaces/suggest-path-types';
+import { resolveParentGrant } from './resolve-parent-grant';
+
+const MEMO_LABEL = 'Save as memo';
+const MEMO_DESCRIPTION = 'Save to your personal memo area';
+
+export const generateMemoSuggestion = async (user: {
+  username: string;
+}): Promise<PathSuggestion> => {
+  const disableUserPages = configManager.getConfig('security:disableUserPages');
+
+  if (disableUserPages) {
+    // When user pages are disabled, memo falls back to /memo/<username>/
+    // which may have inherited grant from an ancestor page — resolve dynamically
+    const path = `/memo/${user.username}/`;
+    const grant = await resolveParentGrant(path);
+    return {
+      type: SuggestionType.MEMO,
+      path,
+      label: MEMO_LABEL,
+      description: MEMO_DESCRIPTION,
+      grant,
+    };
+  }
+
+  // When user pages are enabled, memo is saved under the user's homepage
+  // which is always owner-only by convention — no need to resolve
+  return {
+    type: SuggestionType.MEMO,
+    path: `${userHomepagePath(user)}/memo/`,
+    label: MEMO_LABEL,
+    description: MEMO_DESCRIPTION,
+    grant: PageGrant.GRANT_OWNER,
+  };
+};

+ 383 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/generate-suggestions.spec.ts

@@ -0,0 +1,383 @@
+import type { IUserHasId } from '@growi/core/dist/interfaces';
+
+import type {
+  ContentAnalysis,
+  EvaluatedSuggestion,
+  PathSuggestion,
+  SearchCandidate,
+  SearchService,
+} from '../../interfaces/suggest-path-types';
+
+const mocks = vi.hoisted(() => {
+  return {
+    generateMemoSuggestionMock: vi.fn(),
+    analyzeContentMock: vi.fn(),
+    retrieveSearchCandidatesMock: vi.fn(),
+    evaluateCandidatesMock: vi.fn(),
+    generateCategorySuggestionMock: vi.fn(),
+    resolveParentGrantMock: vi.fn(),
+    loggerErrorMock: vi.fn(),
+  };
+});
+
+vi.mock('./generate-memo-suggestion', () => ({
+  generateMemoSuggestion: mocks.generateMemoSuggestionMock,
+}));
+
+vi.mock('./analyze-content', () => ({
+  analyzeContent: mocks.analyzeContentMock,
+}));
+
+vi.mock('./retrieve-search-candidates', () => ({
+  retrieveSearchCandidates: mocks.retrieveSearchCandidatesMock,
+}));
+
+vi.mock('./evaluate-candidates', () => ({
+  evaluateCandidates: mocks.evaluateCandidatesMock,
+}));
+
+vi.mock('./generate-category-suggestion', () => ({
+  generateCategorySuggestion: mocks.generateCategorySuggestionMock,
+}));
+
+vi.mock('./resolve-parent-grant', () => ({
+  resolveParentGrant: mocks.resolveParentGrantMock,
+}));
+
+vi.mock('~/utils/logger', () => ({
+  default: () => ({
+    error: mocks.loggerErrorMock,
+  }),
+}));
+
+const mockUser = {
+  _id: 'user123',
+  username: 'alice',
+} as unknown as IUserHasId;
+
+const mockUserGroups = [
+  'group1',
+  'group2',
+] as unknown as import('~/server/interfaces/mongoose-utils').ObjectIdLike[];
+
+const mockSearchService = {
+  searchKeyword: vi.fn(),
+} as unknown as SearchService;
+
+const memoSuggestion: PathSuggestion = {
+  type: 'memo',
+  path: '/user/alice/memo/',
+  label: 'Save as memo',
+  description: 'Save to your personal memo area',
+  grant: 4,
+};
+
+const mockAnalysis: ContentAnalysis = {
+  keywords: ['React', 'hooks'],
+  informationType: 'stock',
+};
+
+const mockCandidates: SearchCandidate[] = [
+  {
+    pagePath: '/tech/React/hooks',
+    snippet: 'React hooks overview',
+    score: 10.5,
+  },
+  { pagePath: '/tech/React/state', snippet: 'State management', score: 8.2 },
+];
+
+const mockEvaluated: EvaluatedSuggestion[] = [
+  {
+    path: '/tech/React/',
+    label: 'Save near related pages',
+    description:
+      'This area contains React documentation. Your stock content fits well here.',
+  },
+  {
+    path: '/tech/React/performance/',
+    label: 'New section for performance topics',
+    description: 'A new sibling section alongside existing React pages.',
+  },
+];
+
+const categorySuggestion: PathSuggestion = {
+  type: 'category',
+  path: '/tech/',
+  label: 'Save under category',
+  description: 'Top-level category: tech',
+  grant: 1,
+};
+
+describe('generateSuggestions', () => {
+  beforeEach(() => {
+    vi.resetAllMocks();
+    mocks.generateMemoSuggestionMock.mockResolvedValue(memoSuggestion);
+  });
+
+  const callGenerateSuggestions = async () => {
+    const { generateSuggestions } = await import('./generate-suggestions');
+    return generateSuggestions(
+      mockUser,
+      'Some page content',
+      mockUserGroups,
+      mockSearchService,
+    );
+  };
+
+  describe('successful full pipeline', () => {
+    beforeEach(() => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockResolvedValue(mockCandidates);
+      mocks.evaluateCandidatesMock.mockResolvedValue(mockEvaluated);
+      mocks.generateCategorySuggestionMock.mockResolvedValue(
+        categorySuggestion,
+      );
+      mocks.resolveParentGrantMock.mockResolvedValue(1);
+    });
+
+    it('should return memo + search + category suggestions when all succeed', async () => {
+      const result = await callGenerateSuggestions();
+
+      expect(result).toHaveLength(4); // memo + 2 search + 1 category
+      expect(result[0]).toEqual(memoSuggestion);
+      expect(result[1]).toMatchObject({ type: 'search', path: '/tech/React/' });
+      expect(result[2]).toMatchObject({
+        type: 'search',
+        path: '/tech/React/performance/',
+      });
+      expect(result[3]).toEqual(categorySuggestion);
+    });
+
+    it('should always include memo as the first suggestion', async () => {
+      const result = await callGenerateSuggestions();
+
+      expect(result[0]).toEqual(memoSuggestion);
+    });
+
+    it('should map informationType from content analysis to search-type suggestions', async () => {
+      const result = await callGenerateSuggestions();
+
+      const searchSuggestions = result.filter((s) => s.type === 'search');
+      for (const s of searchSuggestions) {
+        expect(s.informationType).toBe('stock');
+      }
+    });
+
+    it('should not include informationType on memo or category suggestions', async () => {
+      const result = await callGenerateSuggestions();
+
+      expect(result[0].informationType).toBeUndefined(); // memo
+      expect(result[3].informationType).toBeUndefined(); // category
+    });
+
+    it('should resolve grant for each evaluated suggestion path', async () => {
+      mocks.resolveParentGrantMock
+        .mockResolvedValueOnce(1)
+        .mockResolvedValueOnce(4);
+
+      const result = await callGenerateSuggestions();
+
+      expect(mocks.resolveParentGrantMock).toHaveBeenCalledTimes(2);
+      expect(mocks.resolveParentGrantMock).toHaveBeenCalledWith('/tech/React/');
+      expect(mocks.resolveParentGrantMock).toHaveBeenCalledWith(
+        '/tech/React/performance/',
+      );
+      expect(result[1].grant).toBe(1);
+      expect(result[2].grant).toBe(4);
+    });
+
+    it('should pass correct arguments to analyzeContent', async () => {
+      await callGenerateSuggestions();
+
+      expect(mocks.analyzeContentMock).toHaveBeenCalledWith(
+        'Some page content',
+      );
+    });
+
+    it('should pass keywords, user, userGroups, and searchService to retrieveSearchCandidates', async () => {
+      await callGenerateSuggestions();
+
+      expect(mocks.retrieveSearchCandidatesMock).toHaveBeenCalledWith(
+        ['React', 'hooks'],
+        mockUser,
+        mockUserGroups,
+        mockSearchService,
+      );
+    });
+
+    it('should pass body, analysis, and candidates to evaluateCandidates', async () => {
+      await callGenerateSuggestions();
+
+      expect(mocks.evaluateCandidatesMock).toHaveBeenCalledWith(
+        'Some page content',
+        mockAnalysis,
+        mockCandidates,
+      );
+    });
+
+    it('should pass candidates to generateCategorySuggestion', async () => {
+      await callGenerateSuggestions();
+
+      expect(mocks.generateCategorySuggestionMock).toHaveBeenCalledWith(
+        mockCandidates,
+      );
+    });
+  });
+
+  describe('graceful degradation', () => {
+    it('should fall back to memo only when content analysis fails', async () => {
+      mocks.analyzeContentMock.mockRejectedValue(
+        new Error('AI service unavailable'),
+      );
+
+      const result = await callGenerateSuggestions();
+
+      expect(result).toEqual([memoSuggestion]);
+      expect(mocks.retrieveSearchCandidatesMock).not.toHaveBeenCalled();
+      expect(mocks.evaluateCandidatesMock).not.toHaveBeenCalled();
+      expect(mocks.generateCategorySuggestionMock).not.toHaveBeenCalled();
+    });
+
+    it('should log error when content analysis fails', async () => {
+      mocks.analyzeContentMock.mockRejectedValue(
+        new Error('AI service unavailable'),
+      );
+
+      await callGenerateSuggestions();
+
+      expect(mocks.loggerErrorMock).toHaveBeenCalled();
+    });
+
+    it('should fall back to memo only when search candidate retrieval fails', async () => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockRejectedValue(
+        new Error('Search service down'),
+      );
+
+      const result = await callGenerateSuggestions();
+
+      expect(result).toEqual([memoSuggestion]);
+      expect(mocks.loggerErrorMock).toHaveBeenCalled();
+    });
+
+    it('should return memo + category when candidate evaluation fails', async () => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockResolvedValue(mockCandidates);
+      mocks.evaluateCandidatesMock.mockRejectedValue(
+        new Error('AI evaluation failed'),
+      );
+      mocks.generateCategorySuggestionMock.mockResolvedValue(
+        categorySuggestion,
+      );
+
+      const result = await callGenerateSuggestions();
+
+      expect(result).toEqual([memoSuggestion, categorySuggestion]);
+      expect(mocks.loggerErrorMock).toHaveBeenCalled();
+    });
+
+    it('should return memo + search when category generation fails', async () => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockResolvedValue(mockCandidates);
+      mocks.evaluateCandidatesMock.mockResolvedValue(mockEvaluated);
+      mocks.resolveParentGrantMock.mockResolvedValue(1);
+      mocks.generateCategorySuggestionMock.mockRejectedValue(
+        new Error('Category failed'),
+      );
+
+      const result = await callGenerateSuggestions();
+
+      expect(result).toHaveLength(3); // memo + 2 search (no category)
+      expect(result[0]).toEqual(memoSuggestion);
+      expect(result[1]).toMatchObject({ type: 'search' });
+      expect(result[2]).toMatchObject({ type: 'search' });
+      expect(mocks.loggerErrorMock).toHaveBeenCalled();
+    });
+
+    it('should return memo only when both search pipeline and category fail', async () => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockRejectedValue(
+        new Error('Search down'),
+      );
+
+      const result = await callGenerateSuggestions();
+
+      expect(result).toEqual([memoSuggestion]);
+    });
+
+    it('should skip search suggestions when no candidates pass threshold (empty array)', async () => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockResolvedValue([]);
+      mocks.generateCategorySuggestionMock.mockResolvedValue(null);
+
+      const result = await callGenerateSuggestions();
+
+      expect(result).toEqual([memoSuggestion]);
+      expect(mocks.evaluateCandidatesMock).not.toHaveBeenCalled();
+    });
+
+    it('should omit category when generateCategorySuggestion returns null', async () => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockResolvedValue(mockCandidates);
+      mocks.evaluateCandidatesMock.mockResolvedValue(mockEvaluated);
+      mocks.resolveParentGrantMock.mockResolvedValue(1);
+      mocks.generateCategorySuggestionMock.mockResolvedValue(null);
+
+      const result = await callGenerateSuggestions();
+
+      expect(result).toHaveLength(3); // memo + 2 search, no category
+      expect(result.every((s) => s.type !== 'category')).toBe(true);
+    });
+  });
+
+  describe('informationType mapping', () => {
+    it('should map flow informationType to search-type suggestions', async () => {
+      const flowAnalysis: ContentAnalysis = {
+        keywords: ['meeting', 'minutes'],
+        informationType: 'flow',
+      };
+      mocks.analyzeContentMock.mockResolvedValue(flowAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockResolvedValue(mockCandidates);
+      mocks.evaluateCandidatesMock.mockResolvedValue([mockEvaluated[0]]);
+      mocks.resolveParentGrantMock.mockResolvedValue(1);
+      mocks.generateCategorySuggestionMock.mockResolvedValue(null);
+
+      const result = await callGenerateSuggestions();
+
+      const searchSuggestion = result.find((s) => s.type === 'search');
+      expect(searchSuggestion?.informationType).toBe('flow');
+    });
+  });
+
+  describe('parallel execution', () => {
+    it('should run evaluate pipeline and category generation independently', async () => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockResolvedValue(mockCandidates);
+      mocks.evaluateCandidatesMock.mockRejectedValue(
+        new Error('Evaluate failed'),
+      );
+      mocks.generateCategorySuggestionMock.mockResolvedValue(
+        categorySuggestion,
+      );
+
+      const result = await callGenerateSuggestions();
+
+      expect(result).toEqual([memoSuggestion, categorySuggestion]);
+    });
+
+    it('should return search suggestions even when category fails', async () => {
+      mocks.analyzeContentMock.mockResolvedValue(mockAnalysis);
+      mocks.retrieveSearchCandidatesMock.mockResolvedValue(mockCandidates);
+      mocks.evaluateCandidatesMock.mockResolvedValue(mockEvaluated);
+      mocks.resolveParentGrantMock.mockResolvedValue(1);
+      mocks.generateCategorySuggestionMock.mockRejectedValue(
+        new Error('Category failed'),
+      );
+
+      const result = await callGenerateSuggestions();
+
+      const searchSuggestions = result.filter((s) => s.type === 'search');
+      expect(searchSuggestions).toHaveLength(2);
+    });
+  });
+});

+ 102 - 0
apps/app/src/features/ai-tools/suggest-path/server/services/generate-suggestions.ts

@@ -0,0 +1,102 @@
+import type { IUserHasId } from '@growi/core/dist/interfaces';
+
+import type { ObjectIdLike } from '~/server/interfaces/mongoose-utils';
+import loggerFactory from '~/utils/logger';
+
+import type {
+  ContentAnalysis,
+  PathSuggestion,
+  SearchCandidate,
+  SearchService,
+} from '../../interfaces/suggest-path-types';
+import { SuggestionType } from '../../interfaces/suggest-path-types';
+import { analyzeContent } from './analyze-content';
+import { evaluateCandidates } from './evaluate-candidates';
+import { generateCategorySuggestion } from './generate-category-suggestion';
+import { generateMemoSuggestion } from './generate-memo-suggestion';
+import { resolveParentGrant } from './resolve-parent-grant';
+import { retrieveSearchCandidates } from './retrieve-search-candidates';
+
+const logger = loggerFactory(
+  'growi:features:suggest-path:generate-suggestions',
+);
+
+export const generateSuggestions = async (
+  user: IUserHasId,
+  body: string,
+  userGroups: ObjectIdLike[],
+  searchService: SearchService,
+): Promise<PathSuggestion[]> => {
+  const memoSuggestion = await generateMemoSuggestion(user);
+
+  // 1st AI call: Content analysis (keyword extraction + flow/stock classification)
+  let analysis: ContentAnalysis;
+  try {
+    analysis = await analyzeContent(body);
+  } catch (err) {
+    logger.error('Content analysis failed, falling back to memo only:', err);
+    return [memoSuggestion];
+  }
+
+  // Retrieve search candidates (single ES query, shared by evaluate and category)
+  let candidates: SearchCandidate[];
+  try {
+    candidates = await retrieveSearchCandidates(
+      analysis.keywords,
+      user,
+      userGroups,
+      searchService,
+    );
+  } catch (err) {
+    logger.error(
+      'Search candidate retrieval failed, falling back to memo only:',
+      err,
+    );
+    return [memoSuggestion];
+  }
+
+  // Run evaluate pipeline and category generation in parallel
+  const [evaluateResult, categoryResult] = await Promise.allSettled([
+    // Evaluate pipeline: evaluate → grant resolution (skip if no candidates)
+    candidates.length > 0
+      ? (async (): Promise<PathSuggestion[]> => {
+          const evaluated = await evaluateCandidates(
+            body,
+            analysis,
+            candidates,
+          );
+          return Promise.all(
+            evaluated.map(async (s): Promise<PathSuggestion> => {
+              const grant = await resolveParentGrant(s.path);
+              return {
+                type: SuggestionType.SEARCH,
+                path: s.path,
+                label: s.label,
+                description: s.description,
+                grant,
+                informationType: analysis.informationType,
+              };
+            }),
+          );
+        })()
+      : Promise.resolve([]),
+    // Category generation (uses same candidates, no extra ES query)
+    generateCategorySuggestion(candidates),
+  ]);
+
+  const suggestions: PathSuggestion[] = [memoSuggestion];
+
+  if (evaluateResult.status === 'fulfilled') {
+    suggestions.push(...evaluateResult.value);
+  } else {
+    logger.error('Evaluate pipeline failed:', evaluateResult.reason);
+  }
+
+  if (categoryResult.status === 'fulfilled' && categoryResult.value != null) {
+    suggestions.push(categoryResult.value);
+  } else if (categoryResult.status === 'rejected') {
+    logger.error('Category generation failed:', categoryResult.reason);
+  }
+
+  return suggestions;
+};

Некоторые файлы не были показаны из-за большого количества измененных файлов