Sfoglia il codice sorgente

Merge branch 'feat/openai-vector-searching' into feat/155197-generate-vector-store-inside-growi

Shun Miyazawa 1 anno fa
parent
commit
3bbf90e3dc

+ 28 - 16
apps/app/src/server/routes/ogp.ts

@@ -1,6 +1,7 @@
 import * as fs from 'fs';
 import path from 'path';
 
+import { getIdStringForRef, type IUser } from '@growi/core';
 import { DevidedPagePath } from '@growi/core/dist/models';
 // eslint-disable-next-line no-restricted-imports
 import axios from 'axios';
@@ -9,11 +10,15 @@ import type {
 } from 'express';
 import type { ValidationError } from 'express-validator';
 import { param, validationResult } from 'express-validator';
+import type { HydratedDocument } from 'mongoose';
+import mongoose from 'mongoose';
 
 import loggerFactory from '~/utils/logger';
 import { projectRoot } from '~/utils/project-dir-utils';
 
 import { Attachment } from '../models/attachment';
+import type { PageDocument, PageModel } from '../models/page';
+import { configManager } from '../service/config-manager';
 import { convertStreamToBuffer } from '../util/stream';
 
 const logger = loggerFactory('growi:routes:ogp');
@@ -56,21 +61,27 @@ module.exports = function(crowi) {
 
   const renderOgp = async(req: Request, res: Response) => {
 
-    const { configManager } = crowi;
+    const User = mongoose.model<IUser>('User');
     const ogpUri = configManager.getConfig('crowi', 'app:ogpUri');
-    const page = req.body.page;
+    const page: PageDocument = req.body.page; // asserted by ogpValidator
 
-    let user;
-    let pageTitle: string;
-    let bufferedUserImage: Buffer;
+    const title = (new DevidedPagePath(page.path)).latter;
 
-    try {
-      const User = crowi.model('User');
-      user = await User.findById(page.creator._id.toString());
+    let user: IUser | null = null;
+    let userName = '(unknown)';
+    let userImage: Buffer = bufferedDefaultUserImageCache;
 
-      bufferedUserImage = user.imageUrlCached === DEFAULT_USER_IMAGE_URL ? bufferedDefaultUserImageCache : (await getBufferedUserImage(user.imageUrlCached));
-      // todo: consider page title
-      pageTitle = (new DevidedPagePath(page.path)).latter;
+    try {
+      if (page.creator != null) {
+        user = await User.findById(getIdStringForRef(page.creator));
+
+        if (user != null) {
+          userName = user.username;
+          userImage = user.imageUrlCached !== DEFAULT_USER_IMAGE_URL
+            ? bufferedDefaultUserImageCache
+            : await getBufferedUserImage(user.imageUrlCached);
+        }
+      }
     }
     catch (err) {
       logger.error(err);
@@ -82,9 +93,9 @@ module.exports = function(crowi) {
       result = await axios.post(
         ogpUri, {
           data: {
-            title: pageTitle,
-            userName: user.username,
-            userImage: bufferedUserImage,
+            title,
+            userName,
+            userImage,
           },
         }, {
           responseType: 'stream',
@@ -118,9 +129,10 @@ module.exports = function(crowi) {
 
     if (errors.isEmpty()) {
 
+      const Page = mongoose.model<HydratedDocument<PageDocument>, PageModel>('Page');
+
       try {
-        const Page = crowi.model('Page');
-        const page = await Page.findByIdAndViewer(req.params.pageId);
+        const page = await Page.findByIdAndViewer(req.params.pageId, null);
 
         if (page == null || page.status !== Page.STATUS_PUBLISHED || (page.grant !== Page.GRANT_PUBLIC && page.grant !== Page.GRANT_RESTRICTED)) {
           return res.status(400).send('the page does not exist');

+ 1 - 1
apps/app/src/server/service/openai/assistant/assistant.ts

@@ -35,7 +35,7 @@ const findAssistantByName = async(assistantName: string): Promise<OpenAI.Beta.As
 
 const getOrCreateAssistant = async(type: AssistantType): Promise<OpenAI.Beta.Assistant> => {
   const appSiteUrl = configManager.getConfig('crowi', 'app:siteUrl');
-  const assistantName = `GROWI ${type} Assistant for ${appSiteUrl} ${configManager.getConfig('crowi', 'openai:assistantNameSuffix')}}`;
+  const assistantName = `GROWI ${type} Assistant for ${appSiteUrl} ${configManager.getConfig('crowi', 'openai:assistantNameSuffix')}`;
 
   const assistantOnRemote = await findAssistantByName(assistantName);
   if (assistantOnRemote != null) {

+ 5 - 0
apps/app/src/server/service/openai/openai.ts

@@ -101,6 +101,11 @@ class OpenaiService implements IOpenaiService {
 
     const vectorStoreFileRelations = Array.from(vectorStoreFileRelationsMap.values());
     const uploadedFileIds = vectorStoreFileRelations.map(data => data.fileIds).flat();
+
+    if (uploadedFileIds.length === 0) {
+      return;
+    }
+
     try {
       // Create vector store file
       const vectorStore = await this.getOrCreateVectorStoreForPublicScope();

+ 6 - 0
packages/markdown-splitter/package.json

@@ -39,5 +39,11 @@
     "react-dom": "^18.2.0"
   },
   "dependencies": {
+    "@types/js-yaml": "^4.0.9",
+    "remark-frontmatter": "^5.0.0",
+    "remark-gfm": "^4.0.0",
+    "remark-parse": "^11.0.0",
+    "remark-stringify": "^11.0.0",
+    "unified": "^11.0.0"
   }
 }

+ 78 - 54
packages/markdown-splitter/src/services/markdown-splitter.ts

@@ -1,3 +1,11 @@
+import yaml from 'js-yaml';
+import remarkFrontmatter from 'remark-frontmatter'; // Frontmatter processing
+import remarkGfm from 'remark-gfm'; // GFM processing
+import remarkParse from 'remark-parse';
+import type { Options as StringifyOptions } from 'remark-stringify';
+import remarkStringify from 'remark-stringify';
+import { unified } from 'unified';
+
 export type Chunk = {
   label: string;
   text: string;
@@ -5,101 +13,117 @@ export type Chunk = {
 
 /**
  * Processes and adds a new chunk to the chunks array if content is not empty.
- * Clears the contentLines array after processing.
- * @param chunks - The array to store chunks.
- * @param contentLines - The array of content lines.
+ * Clears the contentBuffer array after processing.
+ * @param chunks - The array to store processed chunks.
+ * @param contentBuffer - The array of content lines to be processed.
  * @param label - The label for the content chunk.
  */
-function processPendingContent(chunks: Chunk[], contentLines: string[], label: string) {
-  const text = contentLines.join('\n').trimEnd();
+function addContentChunk(chunks: Chunk[], contentBuffer: string[], label: string) {
+  const text = contentBuffer.join('\n\n').trimEnd();
   if (text !== '') {
     chunks.push({ label, text });
   }
-  contentLines.length = 0; // Clear the contentLines array
+  contentBuffer.length = 0; // Clear the contentBuffer array
 }
 
 /**
  * Updates the section numbers based on the heading depth and returns the updated section label.
  * Handles non-consecutive heading levels by initializing missing levels with 1.
  * @param sectionNumbers - The current section numbers.
- * @param depth - The depth of the heading (e.g., # is depth 1).
+ * @param headingDepth - The depth of the heading (e.g., # is depth 1).
  * @returns The updated section label.
  */
-function updateSectionNumbers(sectionNumbers: number[], depth: number): string {
-  if (depth > sectionNumbers.length) {
-    // If depth increases, initialize missing levels with 1
-    while (sectionNumbers.length < depth) {
+function updateSectionNumbers(sectionNumbers: number[], headingDepth: number): string {
+  if (headingDepth > sectionNumbers.length) {
+    // Initialize missing levels with 1
+    while (sectionNumbers.length < headingDepth) {
       sectionNumbers.push(1);
     }
   }
-  else if (depth === sectionNumbers.length) {
-    // Same level, increment the last number
-    sectionNumbers[depth - 1]++;
+  else if (headingDepth === sectionNumbers.length) {
+    // Increment the last number for the same level
+    sectionNumbers[headingDepth - 1]++;
   }
   else {
-    // Depth decreases, remove deeper levels and increment current level
-    sectionNumbers.splice(depth);
-    sectionNumbers[depth - 1]++;
+    // Remove deeper levels and increment the current level
+    sectionNumbers.splice(headingDepth);
+    sectionNumbers[headingDepth - 1]++;
   }
   return sectionNumbers.join('-');
 }
 
 /**
- * Splits Markdown text into labeled chunks, considering content that may start before any headers
- * and handling non-consecutive heading levels. Preserves list indentation and leading spaces while
- * reducing unnecessary line breaks. Ensures that no empty line is added between sections.
- * @param markdown - The input Markdown string.
+ * Splits Markdown text into labeled chunks using remark-parse and remark-stringify,
+ * considering content that may start before any headers and handling non-consecutive heading levels.
+ * @param markdownText - The input Markdown string.
  * @returns An array of labeled chunks.
  */
-export function splitMarkdownIntoChunks(markdown: string): Chunk[] {
+export async function splitMarkdownIntoChunks(markdownText: string): Promise<Chunk[]> {
   const chunks: Chunk[] = [];
   const sectionNumbers: number[] = [];
+  let frontmatter: Record<string, unknown> | null = null; // Variable to store frontmatter
+  const contentBuffer: string[] = [];
+  let currentSectionLabel = '';
 
-  if (typeof markdown !== 'string' || markdown.trim() === '') {
+  if (typeof markdownText !== 'string' || markdownText.trim() === '') {
     return chunks;
   }
 
-  const lines = markdown.split('\n');
-  const contentLines: string[] = [];
-  let currentLabel = '';
-  let previousLineEmpty = false;
+  const parser = unified()
+    .use(remarkParse)
+    .use(remarkFrontmatter, ['yaml'])
+    .use(remarkGfm); // Enable GFM extensions
 
-  for (const line of lines) {
-    const trimmedLine = line.trim();
+  const stringifyOptions: StringifyOptions = {
+    bullet: '-', // Set list bullet to hyphen
+    rule: '-', // Use hyphen for horizontal rules
+  };
 
-    if (trimmedLine.startsWith('#')) {
-      // Process any pending content before starting a new section
-      if (contentLines.length > 0) {
-        const contentLabel = currentLabel !== '' ? `${currentLabel}-content` : '0-content';
-        processPendingContent(chunks, contentLines, contentLabel);
-      }
+  const stringifier = unified()
+    .use(remarkFrontmatter, ['yaml'])
+    .use(remarkGfm)
+    .use(remarkStringify, stringifyOptions);
 
-      // Match heading level and text
-      const headerMatch = trimmedLine.match(/^(#+)\s+(.*)/);
-      if (headerMatch) {
-        const headingDepth = headerMatch[1].length;
-        currentLabel = updateSectionNumbers(sectionNumbers, headingDepth);
-        chunks.push({ label: `${currentLabel}-heading`, text: line });
-      }
+  const parsedTree = parser.parse(markdownText);
+
+  // Iterate over top-level nodes to prevent duplication
+  for (const node of parsedTree.children) {
+    if (node.type === 'yaml') {
+      frontmatter = yaml.load(node.value) as Record<string, unknown>;
     }
-    else if (trimmedLine === '') {
-      // Handle empty lines to avoid multiple consecutive empty lines
-      if (!previousLineEmpty && contentLines.length > 0) {
-        contentLines.push('');
-        previousLineEmpty = true;
+    else if (node.type === 'heading') {
+      // Process pending content before heading
+      if (contentBuffer.length > 0) {
+        const contentLabel = currentSectionLabel !== '' ? `${currentSectionLabel}-content` : '0-content';
+        addContentChunk(chunks, contentBuffer, contentLabel);
       }
+
+      const headingDepth = node.depth;
+      currentSectionLabel = updateSectionNumbers(sectionNumbers, headingDepth);
+
+      const headingMarkdown = stringifier.stringify(node as any);// eslint-disable-line @typescript-eslint/no-explicit-any
+      chunks.push({ label: `${currentSectionLabel}-heading`, text: headingMarkdown.trim() });
     }
     else {
-      // Add non-empty lines to the current content
-      contentLines.push(line);
-      previousLineEmpty = false;
+      // Add non-heading content to the buffer
+      const contentMarkdown = stringifier.stringify(node as any).trim(); // eslint-disable-line @typescript-eslint/no-explicit-any
+      if (contentMarkdown !== '') {
+        contentBuffer.push(contentMarkdown);
+      }
     }
   }
 
-  // Process any remaining content after the last line
-  if (contentLines.length > 0) {
-    const contentLabel = currentLabel !== '' ? `${currentLabel}-content` : '0-content';
-    processPendingContent(chunks, contentLines, contentLabel);
+  // Process any remaining content
+  if (contentBuffer.length > 0) {
+    const contentLabel = currentSectionLabel !== '' ? `${currentSectionLabel}-content` : '0-content';
+    addContentChunk(chunks, contentBuffer, contentLabel);
+  }
+
+  if (frontmatter) {
+    chunks.unshift({
+      label: 'frontmatter',
+      text: JSON.stringify(frontmatter, null, 2),
+    });
   }
 
   return chunks;

+ 61 - 20
packages/markdown-splitter/test/index.spec.ts

@@ -3,14 +3,14 @@ import { splitMarkdownIntoChunks } from '../src/services/markdown-splitter';
 
 describe('splitMarkdownIntoChunks', () => {
 
-  test('handles empty markdown string', () => {
+  test('handles empty markdown string', async() => {
     const markdown = '';
     const expected: Chunk[] = [];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('handles markdown with only content and no headers', () => {
+  test('handles markdown with only content and no headers', async() => {
     const markdown = `This is some content without any headers.
 It spans multiple lines.
 
@@ -22,11 +22,11 @@ Another paragraph.
         text: 'This is some content without any headers.\nIt spans multiple lines.\n\nAnother paragraph.',
       },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('handles markdown starting with a header', () => {
+  test('handles markdown starting with a header', async() => {
     const markdown = `
 # Header 1
 Content under header 1.
@@ -45,11 +45,11 @@ Content under header 2.
       { label: '2-heading', text: '# Header 2' },
       { label: '2-content', text: 'Content under header 2.' },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('handles markdown with non-consecutive heading levels', () => {
+  test('handles markdown with non-consecutive heading levels', async() => {
     const markdown = `
 Introduction without a header.
 
@@ -114,11 +114,11 @@ Content of section 2.1.
         text: 'Content of section 2.1.',
       },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('handles markdown with skipped heading levels', () => {
+  test('handles markdown with skipped heading levels', async() => {
     const markdown = `
 # Header 1
 Content under header 1.
@@ -142,11 +142,11 @@ Content under header 2.
       { label: '2-heading', text: '# Header 2' },
       { label: '2-content', text: 'Content under header 2.' },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('handles malformed headings', () => {
+  test('handles malformed headings', async() => {
     const markdown = `
 # Header 1
 Content under header 1.
@@ -160,11 +160,11 @@ Content under header 1.1.1.1.
       { label: '1-1-1-1-heading', text: '#### Header 1.1.1.1' },
       { label: '1-1-1-1-content', text: 'Content under header 1.1.1.1.' },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('handles multiple content blocks before any headers', () => {
+  test('handles multiple content blocks before any headers', async() => {
     const markdown = `
 This is the first paragraph without a header.
 
@@ -181,11 +181,11 @@ Content under header 1.
       { label: '1-heading', text: '# Header 1' },
       { label: '1-content', text: 'Content under header 1.' },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('handles markdown with only headers and no content', () => {
+  test('handles markdown with only headers and no content', async() => {
     const markdown = `
 # Header 1
 
@@ -198,11 +198,11 @@ Content under header 1.
       { label: '1-1-heading', text: '## Header 1.1' },
       { label: '1-1-1-heading', text: '### Header 1.1.1' },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('handles markdown with mixed content and headers', () => {
+  test('handles markdown with mixed content and headers', async() => {
     const markdown = `
 # Header 1
 Content under header 1.
@@ -222,11 +222,11 @@ Content under header 2.
       { label: '2-heading', text: '# Header 2' },
       { label: '2-content', text: 'Content under header 2.' },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
 
-  test('preserves list indentation and reduces unnecessary line breaks', () => {
+  test('preserves list indentation and reduces unnecessary line breaks', async() => {
     const markdown = `
 # Header 1
 Content under header 1.
@@ -245,8 +245,49 @@ Content under header 2.
       { label: '2-heading', text: '# Header 2' },
       { label: '2-content', text: 'Content under header 2.' },
     ];
-    const result = splitMarkdownIntoChunks(markdown);
+    const result = await splitMarkdownIntoChunks(markdown); // Await the result
     expect(result).toEqual(expected);
   });
+  test('code blocks containing # are not treated as headings', async() => {
+    const markdown = `
+# Header 1
+Some introductory content.
+\`\`\`
+# This is a comment with a # symbol
+Some code line
+\`\`\`
+Additional content.
+# Header 2
+Content under header 2.
+    `;
+
+    const expected: Chunk[] = [
+      { label: '1-heading', text: '# Header 1' },
+      { label: '1-content', text: 'Some introductory content.\n\n```\n# This is a comment with a # symbol\nSome code line\n```\n\nAdditional content.' },
+      { label: '2-heading', text: '# Header 2' },
+      { label: '2-content', text: 'Content under header 2.' },
+    ];
 
+    const result = await splitMarkdownIntoChunks(markdown);
+    expect(result).toEqual(expected);
+  });
+  test('frontmatter is processed and labeled correctly', async() => {
+    const markdown = `---
+title: Test Document
+author: John Doe
+---
+
+# Header 1
+Some introductory content.
+    `;
+
+    const expected: Chunk[] = [
+      { label: 'frontmatter', text: JSON.stringify({ title: 'Test Document', author: 'John Doe' }, null, 2) },
+      { label: '1-heading', text: '# Header 1' },
+      { label: '1-content', text: 'Some introductory content.' },
+    ];
+
+    const result = await splitMarkdownIntoChunks(markdown);
+    expect(result).toEqual(expected);
+  });
 });

+ 1 - 1
packages/remark-attachment-refs/package.json

@@ -52,7 +52,7 @@
     "hast-util-select": "^6.0.2",
     "express": "^4.20.0",
     "mongoose": "^6.11.3",
-    "swr": "^2.0.3",
+    "swr": "^2.2.2",
     "universal-bunyan": "^0.9.2",
     "xss": "^1.0.15"
   },

+ 3 - 1
packages/remark-lsx/package.json

@@ -38,9 +38,11 @@
     "@growi/ui": "link:../ui",
     "escape-string-regexp": "^4.0.0",
     "express": "^4.20.0",
+    "express-validator": "^6.14.0",
     "http-errors": "^2.0.0",
     "mongoose": "^6.11.3",
-    "swr": "^2.2.2"
+    "swr": "^2.2.2",
+    "xss": "^1.0.15"
   },
   "devDependencies": {
     "eslint-plugin-regex": "^1.8.0",

+ 39 - 2
packages/remark-lsx/src/server/index.ts

@@ -1,4 +1,8 @@
-import type { Request, Response } from 'express';
+import type { NextFunction, Request, Response } from 'express';
+import { query, validationResult } from 'express-validator';
+import { FilterXSS } from 'xss';
+
+import type { LsxApiOptions } from '../interfaces/api';
 
 import { listPages } from './routes/list-pages';
 
@@ -6,12 +10,45 @@ const loginRequiredFallback = (req: Request, res: Response) => {
   return res.status(403).send('login required');
 };
 
+const filterXSS = new FilterXSS();
+
+const lsxValidator = [
+  query('pagePath').notEmpty().isString(),
+  query('offset').optional().isInt(),
+  query('limit').optional().isInt(),
+  query('options')
+    .optional()
+    .customSanitizer((options) => {
+      try {
+        const jsonData: LsxApiOptions = JSON.parse(options);
+
+        Object.keys(jsonData).forEach((key) => {
+          jsonData[key] = filterXSS.process(jsonData[key]);
+        });
+
+        return jsonData;
+      }
+      catch (err) {
+        throw new Error('Invalid JSON format in options');
+      }
+    }),
+  query('options.*').optional().isString(),
+];
+
+const paramValidator = (req: Request, _: Response, next: NextFunction) => {
+  const errObjArray = validationResult(req);
+  if (errObjArray.isEmpty()) {
+    return next();
+  }
+  return new Error('Invalid lsx parameter');
+};
+
 // eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types, @typescript-eslint/no-explicit-any
 const middleware = (crowi: any, app: any): void => {
   const loginRequired = crowi.require('../middlewares/login-required')(crowi, true, loginRequiredFallback);
   const accessTokenParser = crowi.require('../middlewares/access-token-parser')(crowi);
 
-  app.get('/_api/lsx', accessTokenParser, loginRequired, listPages);
+  app.get('/_api/lsx', accessTokenParser, loginRequired, lsxValidator, paramValidator, listPages);
 };
 
 export default middleware;

+ 8 - 5
packages/remark-lsx/src/server/routes/list-pages/index.spec.ts

@@ -3,12 +3,15 @@ import type { Request, Response } from 'express';
 import createError from 'http-errors';
 import { mock } from 'vitest-mock-extended';
 
-import type { LsxApiResponseData } from '../../../interfaces/api';
+import type { LsxApiResponseData, LsxApiParams } from '../../../interfaces/api';
 
 import type { PageQuery, PageQueryBuilder } from './generate-base-query';
 
 import { listPages } from '.';
 
+interface IListPagesRequest extends Request<undefined, undefined, undefined, LsxApiParams> {
+  user: IUser,
+}
 
 // mocking modules
 const mocks = vi.hoisted(() => {
@@ -30,7 +33,7 @@ describe('listPages', () => {
 
   it("returns 400 HTTP response when the query 'pagePath' is undefined", async() => {
     // setup
-    const reqMock = mock<Request & { user: IUser }>();
+    const reqMock = mock<IListPagesRequest>();
     const resMock = mock<Response>();
     const resStatusMock = mock<Response>();
     resMock.status.calledWith(400).mockReturnValue(resStatusMock);
@@ -46,7 +49,7 @@ describe('listPages', () => {
 
   describe('with num option', () => {
 
-    const reqMock = mock<Request & { user: IUser }>();
+    const reqMock = mock<IListPagesRequest>();
     reqMock.query = { pagePath: '/Sandbox' };
 
     const builderMock = mock<PageQueryBuilder>();
@@ -97,7 +100,7 @@ describe('listPages', () => {
 
     it('returns 500 HTTP response when an unexpected error occured', async() => {
       // setup
-      const reqMock = mock<Request & { user: IUser }>();
+      const reqMock = mock<IListPagesRequest>();
       reqMock.query = { pagePath: '/Sandbox' };
 
       // an Error instance will be thrown by addNumConditionMock
@@ -124,7 +127,7 @@ describe('listPages', () => {
 
     it('returns 400 HTTP response when the value is invalid', async() => {
       // setup
-      const reqMock = mock<Request & { user: IUser }>();
+      const reqMock = mock<IListPagesRequest>();
       reqMock.query = { pagePath: '/Sandbox' };
 
       // an http-errors instance will be thrown by addNumConditionMock

+ 10 - 7
packages/remark-lsx/src/server/routes/list-pages/index.ts

@@ -56,20 +56,23 @@ function addExceptCondition(query, pagePath, optionsFilter): PageQuery {
   return addFilterCondition(query, pagePath, optionsFilter, true);
 }
 
+interface IListPagesRequest extends Request<undefined, undefined, undefined, LsxApiParams> {
+  user: IUser,
+}
+
 
-export const listPages = async(req: Request & { user: IUser }, res: Response): Promise<Response> => {
+export const listPages = async(req: IListPagesRequest, res: Response): Promise<Response> => {
   const user = req.user;
 
-  // TODO: use express-validator
   if (req.query.pagePath == null) {
-    return res.status(400).send("The 'pagePath' query must not be null.");
+    return res.status(400).send("the 'pagepath' query must not be null.");
   }
 
   const params: LsxApiParams = {
-    pagePath: removeTrailingSlash(req.query.pagePath.toString()),
-    offset: req.query?.offset != null ? Number(req.query.offset) : undefined,
-    limit: req.query?.limit != null ? Number(req.query?.limit) : undefined,
-    options: req.query?.options != null ? JSON.parse(req.query.options.toString()) : {},
+    pagePath: removeTrailingSlash(req.query.pagePath),
+    offset: req.query?.offset,
+    limit: req.query?.limit,
+    options: req.query?.options ?? {},
   };
 
   const {

+ 15 - 16
yarn.lock

@@ -2199,6 +2199,13 @@
 
 "@growi/markdown-splitter@link:packages/markdown-splitter":
   version "1.0.0"
+  dependencies:
+    "@types/js-yaml" "^4.0.9"
+    remark-frontmatter "^5.0.0"
+    remark-gfm "^4.0.0"
+    remark-parse "^11.0.0"
+    remark-stringify "^11.0.0"
+    unified "^11.0.0"
 
 "@growi/pluginkit@link:packages/pluginkit":
   version "1.0.1"
@@ -2228,7 +2235,7 @@
     express "^4.20.0"
     hast-util-select "^6.0.2"
     mongoose "^6.11.3"
-    swr "^2.0.3"
+    swr "^2.2.2"
     universal-bunyan "^0.9.2"
     xss "^1.0.15"
 
@@ -4518,6 +4525,11 @@
     expect "^29.0.0"
     pretty-format "^29.0.0"
 
+"@types/js-yaml@^4.0.9":
+  version "4.0.9"
+  resolved "https://registry.yarnpkg.com/@types/js-yaml/-/js-yaml-4.0.9.tgz#cd82382c4f902fed9691a2ed79ec68c5898af4c2"
+  integrity sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==
+
 "@types/json-schema@*", "@types/json-schema@^7.0.12", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.6", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9":
   version "7.0.15"
   resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841"
@@ -17607,7 +17619,7 @@ swagger2openapi@^7.0.8:
     yaml "^1.10.0"
     yargs "^17.0.1"
 
-swr@^2.0.3, swr@^2.2.2:
+swr@^2.2.2:
   version "2.2.4"
   resolved "https://registry.yarnpkg.com/swr/-/swr-2.2.4.tgz#03ec4c56019902fbdc904d78544bd7a9a6fa3f07"
   integrity sha512-njiZ/4RiIhoOlAaLYDqwz5qH/KZXVilRLvomrx83HjzCWTfa+InyfAjv05PSFxnmLzZkNO9ZfvgoqzAaEI4sGQ==
@@ -18341,19 +18353,6 @@ unicode-emoji-modifier-base@^1.0.0:
   resolved "https://registry.yarnpkg.com/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz#dbbd5b54ba30f287e2a8d5a249da6c0cef369459"
   integrity sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==
 
-unified@^10.1.2:
-  version "10.1.2"
-  resolved "https://registry.yarnpkg.com/unified/-/unified-10.1.2.tgz#b1d64e55dafe1f0b98bb6c719881103ecf6c86df"
-  integrity sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==
-  dependencies:
-    "@types/unist" "^2.0.0"
-    bail "^2.0.0"
-    extend "^3.0.0"
-    is-buffer "^2.0.0"
-    is-plain-obj "^4.0.0"
-    trough "^2.0.0"
-    vfile "^5.0.0"
-
 unified@^11.0.0, unified@^11.0.3, unified@^11.0.4:
   version "11.0.5"
   resolved "https://registry.yarnpkg.com/unified/-/unified-11.0.5.tgz#f66677610a5c0a9ee90cab2b8d4d66037026d9e1"
@@ -18754,7 +18753,7 @@ vfile-message@^4.0.0:
     "@types/unist" "^3.0.0"
     unist-util-stringify-position "^4.0.0"
 
-vfile@^5.0.0, vfile@^5.1.0:
+vfile@^5.1.0:
   version "5.3.7"
   resolved "https://registry.yarnpkg.com/vfile/-/vfile-5.3.7.tgz#de0677e6683e3380fafc46544cfe603118826ab7"
   integrity sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==