Просмотр исходного кода

Merge pull request #9709 from weseek/support/162313-omit-old-knowledge-assistant-code

support: Omit old knowledge assistant code
Yuki Takei 1 год назад
Родитель
Сommit
dca0a36537

+ 1 - 7
apps/app/public/static/locales/en_US/admin.json

@@ -1139,12 +1139,6 @@
   "ai_integration": {
     "ai_integration": "AI Integration",
     "disable_mode_explanation": "Currently, AI integration is disabled. To enable it, configure the <code>AI_ENABLED</code> environment variable along with the required additional variables.<br><br>For details, please refer to the <a target='blank' rel='noopener noreferrer' href={{documentationUrl}}en/guide/features/ai-knowledge-assistant.html>documentation</a>.",
-    "ai_search_management": "AI search management",
-    "rebuild_vector_store": "Rebuild Vector Store",
-    "rebuild_vector_store_label": "Rebuild",
-    "rebuild_vector_store_explanation1": "Delete the existing Vector Store and recreate the Vector Store on the public page.",
-    "rebuild_vector_store_explanation2": "This process may take several minutes.",
-    "rebuild_vector_store_requested": "Vector Store rebuild has been requested",
-    "rebuild_vector_store_failed": "Vector Store rebuild failed"
+    "ai_search_management": "AI search management"
   }
 }

+ 1 - 7
apps/app/public/static/locales/fr_FR/admin.json

@@ -1138,12 +1138,6 @@
   "ai_integration": {
     "ai_integration": "Intégration de l'IA",
     "disable_mode_explanation": "Actuellement, l'intégration AI est désactivée. Pour l'activer, configurez la variable d'environnement <code>AI_ENABLED</code> ainsi que les autres variables nécessaires.<br><br>Pour plus de détails, veuillez consulter la <a target='blank' rel='noopener noreferrer' href={{documentationUrl}}en/guide/features/ai-knowledge-assistant.html>documentation</a>.",
-    "ai_search_management": "Gestion de la recherche par l'IA",
-    "rebuild_vector_store": "Reconstruire le magasin Vector",
-    "rebuild_vector_store_label": "Reconstruire",
-    "rebuild_vector_store_explanation1": "Supprimez le Vector Store existant et recréez le Vector Store sur la page publique.",
-    "rebuild_vector_store_explanation2": "Ce processus peut prendre plusieurs minutes.",
-    "rebuild_vector_store_requested": "La reconstruction du magasin Vector a été demandée",
-    "rebuild_vector_store_failed": "Échec de la reconstruction du magasin de vecteurs"
+    "ai_search_management": "Gestion de la recherche par l'IA"
   }
 }

+ 1 - 7
apps/app/public/static/locales/ja_JP/admin.json

@@ -1149,12 +1149,6 @@
   "ai_integration": {
     "ai_integration": "AI 連携",
     "disable_mode_explanation": "現在、AI 連携は無効になっています。有効にする場合は環境変数 <code>AI_ENABLED</code> の他、必要な環境変数を設定してください。<br><br>詳細は<a target='blank' rel='noopener noreferrer' href={{documentationUrl}}ja/guide/features/ai-knowledge-assistant.html>ドキュメント</a>を参照してください。",
-    "ai_search_management": "AI 検索管理",
-    "rebuild_vector_store": "Vector Store のリビルド",
-    "rebuild_vector_store_label": "リビルド",
-    "rebuild_vector_store_explanation1": "既存の Vector Store を削除し、公開ページの Vector Store を再作成します。",
-    "rebuild_vector_store_explanation2": "この作業には数分かかる可能性があります。",
-    "rebuild_vector_store_requested": "Vector Store のリビルドを受け付けました",
-    "rebuild_vector_store_failed": "Vector Store のリビルドに失敗しました"
+    "ai_search_management": "AI 検索管理"
   }
 }

+ 1 - 7
apps/app/public/static/locales/zh_CN/admin.json

@@ -1148,12 +1148,6 @@
   "ai_integration": {
     "ai_integration": "AI 集成",
     "disable_mode_explanation": "目前,AI 集成已被禁用。若要启用,请配置 <code>AI_ENABLED</code> 环境变量以及其他必要的变量。<br><br>详细信息请参考<a target='blank' rel='noopener noreferrer' href={{documentationUrl}}en/guide/features/ai-knowledge-assistant.html>文档</a>。",
-    "ai_search_management": "AI 搜索管理",
-    "rebuild_vector_store": "重建矢量商店",
-    "rebuild_vector_store_label": "重建",
-    "rebuild_vector_store_explanation1": "删除现有的矢量存储,在公共页面上重新创建矢量存储。",
-    "rebuild_vector_store_explanation2": "这个过程可能需要几分钟。",
-    "rebuild_vector_store_requested": "已要求重建矢量存储库",
-    "rebuild_vector_store_failed": "向量存储区重建失败"
+    "ai_search_management": "AI 搜索管理"
   }
 }

+ 18 - 15
apps/app/src/components/Admin/Common/AdminNavigation.tsx

@@ -32,19 +32,20 @@ const MenuLabel = ({ menu }: { menu: string }) => {
     case 'user-groups':              return <><span className="material-symbols-outlined me-1">group</span>{            t('user_group_management.user_group_management') }</>;
     case 'audit-log':                return <><span className="material-symbols-outlined me-1">feed</span>{             t('audit_log_management.audit_log')}</>;
     case 'plugins':                  return <><span className="material-symbols-outlined me-1">extension</span>{        t('plugins.plugins')}</>;
-    case 'ai-integration':           return (
-      <>{/* TODO: unify sizing of growi-custom-icons so that simplify code -- 2024.10.09 Yuki Takei */}
-        <span
-          className="growi-custom-icons d-inline-block me-1"
-          style={{
-            fontSize: '18px', width: '24px', height: '24px', lineHeight: '24px', verticalAlign: 'bottom', paddingLeft: '2px',
-          }}
-        >
-          growi_ai
-        </span>
-        {t('ai_integration.ai_integration')}
-      </>
-    );
+    // Temporarily hiding
+    // case 'ai-integration':           return (
+    //   <>{/* TODO: unify sizing of growi-custom-icons so that simplify code -- 2024.10.09 Yuki Takei */}
+    //     <span
+    //       className="growi-custom-icons d-inline-block me-1"
+    //       style={{
+    //         fontSize: '18px', width: '24px', height: '24px', lineHeight: '24px', verticalAlign: 'bottom', paddingLeft: '2px',
+    //       }}
+    //     >
+    //       growi_ai
+    //     </span>
+    //     {t('ai_integration.ai_integration')}
+    //   </>
+    // );
     case 'search':                   return <><span className="material-symbols-outlined me-1">search</span>{           t('full_text_search_management.full_text_search_management') }</>;
     case 'cloud':                    return <><span className="material-symbols-outlined me-1">share</span>{            t('cloud_setting_management.to_cloud_settings')} </>;
     default:                         return <><span className="material-symbols-outlined me-1">home</span>{             t('wiki_management_homepage') }</>;
@@ -119,7 +120,8 @@ export const AdminNavigation = (): JSX.Element => {
         <MenuLink menu="user-groups" isListGroupItems={isListGroupItems} isActive={isActiveMenu(['/user-groups', 'user-group-detail'])} />
         <MenuLink menu="audit-log" isListGroupItems={isListGroupItems} isActive={isActiveMenu('/audit-log')} />
         <MenuLink menu="plugins" isListGroupItems={isListGroupItems} isActive={isActiveMenu('/plugins')} />
-        <MenuLink menu="ai-integration" isListGroupItems={isListGroupItems} isActive={isActiveMenu('/ai-integration')} />
+        {/* Temporarily hiding */}
+        {/* <MenuLink menu="ai-integration" isListGroupItems={isListGroupItems} isActive={isActiveMenu('/aai-integration')} /> */}
         <MenuLink menu="search" isListGroupItems={isListGroupItems} isActive={isActiveMenu('/search')} />
         {growiCloudUri != null && growiAppIdForGrowiCloud != null
           && (
@@ -173,7 +175,8 @@ export const AdminNavigation = (): JSX.Element => {
             {isActiveMenu('/audit-log')             && <MenuLabel menu="audit-log" />}
             {isActiveMenu('/plugins')               && <MenuLabel menu="plugins" />}
             {isActiveMenu('/data-transfer')         && <MenuLabel menu="data-transfer" />}
-            {isActiveMenu('/ai-integration')                && <MenuLabel menu="ai-integration" />}
+            {/* Temporarily hiding */}
+            {/* {isActiveMenu('/ai-integration')                && <MenuLabel menu="ai-integration" />} */}
             {/* eslint-enable no-multi-spaces */}
           </span>
         </button>

+ 0 - 31
apps/app/src/features/openai/client/components/AiIntegration/AiIntegration.tsx

@@ -1,45 +1,14 @@
-import { useCallback } from 'react';
-
 import { useTranslation } from 'react-i18next';
 
-import { apiv3Post } from '~/client/util/apiv3-client';
-import { toastSuccess, toastError } from '~/client/util/toastr';
-
 
 export const AiIntegration = (): JSX.Element => {
   const { t } = useTranslation('admin');
 
-  const clickRebuildVectorStoreButtonHandler = useCallback(async() => {
-    try {
-      toastSuccess(t('ai_integration.rebuild_vector_store_requested'));
-      await apiv3Post('/openai/rebuild-vector-store');
-    }
-    catch {
-      toastError(t('ai_integration.rebuild_vector_store_failed'));
-    }
-  }, [t]);
-
   return (
     <div data-testid="admin-ai-integration">
       <h2 className="admin-setting-header">{ t('ai_integration.ai_search_management') }</h2>
 
       <div className="row">
-        <label className="col-md-3 col-form-label text-start text-md-end">{ t('ai_integration.rebuild_vector_store_label') }</label>
-        <div className="col-md-8">
-          {/* TODO: https://redmine.weseek.co.jp/issues/153978 */}
-          <button
-            type="submit"
-            className="btn btn-primary"
-            onClick={clickRebuildVectorStoreButtonHandler}
-          >
-            {t('ai_integration.rebuild_vector_store')}
-          </button>
-
-          <p className="form-text text-muted">
-            {t('ai_integration.rebuild_vector_store_explanation1')}<br />
-            {t('ai_integration.rebuild_vector_store_explanation2')}<br />
-          </p>
-        </div>
       </div>
     </div>
   );

+ 0 - 4
apps/app/src/features/openai/server/routes/index.ts

@@ -19,10 +19,6 @@ export const factory = (crowi: Crowi): express.Router => {
   }
   // enabled
   else {
-    import('./rebuild-vector-store').then(({ rebuildVectorStoreHandlersFactory }) => {
-      router.post('/rebuild-vector-store', rebuildVectorStoreHandlersFactory(crowi));
-    });
-
     import('./thread').then(({ createThreadHandlersFactory }) => {
       router.post('/thread', createThreadHandlersFactory(crowi));
     });

+ 0 - 47
apps/app/src/features/openai/server/routes/rebuild-vector-store.ts

@@ -1,47 +0,0 @@
-import { ErrorV3 } from '@growi/core/dist/models';
-import type { Request, RequestHandler } from 'express';
-import type { ValidationChain } from 'express-validator';
-
-import type Crowi from '~/server/crowi';
-import { accessTokenParser } from '~/server/middlewares/access-token-parser';
-import { apiV3FormValidator } from '~/server/middlewares/apiv3-form-validator';
-import type { ApiV3Response } from '~/server/routes/apiv3/interfaces/apiv3-response';
-import loggerFactory from '~/utils/logger';
-
-import { getOpenaiService } from '../services/openai';
-
-import { certifyAiService } from './middlewares/certify-ai-service';
-
-const logger = loggerFactory('growi:routes:apiv3:openai:rebuild-vector-store');
-
-type RebuildVectorStoreFactory = (crowi: Crowi) => RequestHandler[];
-
-export const rebuildVectorStoreHandlersFactory: RebuildVectorStoreFactory = (crowi) => {
-  const loginRequiredStrictly = require('~/server/middlewares/login-required')(crowi);
-  const adminRequired = require('~/server/middlewares/admin-required')(crowi);
-
-  const validator: ValidationChain[] = [
-    //
-  ];
-
-  return [
-    accessTokenParser, loginRequiredStrictly, adminRequired, certifyAiService, validator, apiV3FormValidator,
-    async(req: Request, res: ApiV3Response) => {
-
-      const openaiService = getOpenaiService();
-      if (openaiService == null) {
-        return res.apiv3Err(new ErrorV3('GROWI AI is not enabled'), 501);
-      }
-
-      try {
-        // await openaiService?.rebuildVectorStoreAll();
-        return res.apiv3({});
-
-      }
-      catch (err) {
-        logger.error(err);
-        return res.apiv3Err(new ErrorV3('Vector Store rebuild failed'));
-      }
-    },
-  ];
-};

+ 0 - 573
apps/app/src/features/openai/server/services/markdown-splitter/markdown-splitter.spec.ts

@@ -1,573 +0,0 @@
-import { encodingForModel, type TiktokenModel } from 'js-tiktoken';
-
-import { splitMarkdownIntoFragments, type MarkdownFragment } from './markdown-splitter';
-
-const MODEL: TiktokenModel = 'gpt-4';
-const encoder = encodingForModel(MODEL);
-
-describe('splitMarkdownIntoFragments', () => {
-
-  test('handles empty markdown string', async() => {
-    const markdown = '';
-    const expected: MarkdownFragment[] = [];
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('handles markdown with only content and no headers', async() => {
-    const markdown = `This is some content without any headers.
-It spans multiple lines.
-
-Another paragraph.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '0-content-1',
-        type: 'paragraph',
-        text: 'This is some content without any headers.\nIt spans multiple lines.',
-        tokenCount: encoder.encode('This is some content without any headers.\nIt spans multiple lines.').length,
-      },
-      {
-        label: '0-content-2',
-        type: 'paragraph',
-        text: 'Another paragraph.',
-        tokenCount: encoder.encode('Another paragraph.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('handles markdown starting with a header', async() => {
-    const markdown = `
-# Header 1
-Content under header 1.
-
-## Header 1.1
-Content under header 1.1.
-
-# Header 2
-Content under header 2.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.',
-        tokenCount: encoder.encode('Content under header 1.').length,
-      },
-      {
-        label: '1-1-heading',
-        type: 'heading',
-        text: '## Header 1.1',
-        tokenCount: encoder.encode('## Header 1.1').length,
-      },
-      {
-        label: '1-1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.1.',
-        tokenCount: encoder.encode('Content under header 1.1.').length,
-      },
-      {
-        label: '2-heading',
-        type: 'heading',
-        text: '# Header 2',
-        tokenCount: encoder.encode('# Header 2').length,
-      },
-      {
-        label: '2-content-1',
-        type: 'paragraph',
-        text: 'Content under header 2.',
-        tokenCount: encoder.encode('Content under header 2.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('handles markdown with non-consecutive heading levels', async() => {
-    const markdown = `
-Introduction without a header.
-
-# Chapter 1
-Content of chapter 1.
-
-### Section 1.1.1
-Content of section 1.1.1.
-
-## Section 1.2
-Content of section 1.2.
-
-# Chapter 2
-Content of chapter 2.
-
-## Section 2.1
-Content of section 2.1.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '0-content-1',
-        type: 'paragraph',
-        text: 'Introduction without a header.',
-        tokenCount: encoder.encode('Introduction without a header.').length,
-      },
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Chapter 1',
-        tokenCount: encoder.encode('# Chapter 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Content of chapter 1.',
-        tokenCount: encoder.encode('Content of chapter 1.').length,
-      },
-      {
-        label: '1-1-1-heading',
-        type: 'heading',
-        text: '### Section 1.1.1',
-        tokenCount: encoder.encode('### Section 1.1.1').length,
-      },
-      {
-        label: '1-1-1-content-1',
-        type: 'paragraph',
-        text: 'Content of section 1.1.1.',
-        tokenCount: encoder.encode('Content of section 1.1.1.').length,
-      },
-      {
-        label: '1-2-heading',
-        type: 'heading',
-        text: '## Section 1.2',
-        tokenCount: encoder.encode('## Section 1.2').length,
-      },
-      {
-        label: '1-2-content-1',
-        type: 'paragraph',
-        text: 'Content of section 1.2.',
-        tokenCount: encoder.encode('Content of section 1.2.').length,
-      },
-      {
-        label: '2-heading',
-        type: 'heading',
-        text: '# Chapter 2',
-        tokenCount: encoder.encode('# Chapter 2').length,
-      },
-      {
-        label: '2-content-1',
-        type: 'paragraph',
-        text: 'Content of chapter 2.',
-        tokenCount: encoder.encode('Content of chapter 2.').length,
-      },
-      {
-        label: '2-1-heading',
-        type: 'heading',
-        text: '## Section 2.1',
-        tokenCount: encoder.encode('## Section 2.1').length,
-      },
-      {
-        label: '2-1-content-1',
-        type: 'paragraph',
-        text: 'Content of section 2.1.',
-        tokenCount: encoder.encode('Content of section 2.1.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('handles markdown with skipped heading levels', async() => {
-    const markdown = `
-# Header 1
-Content under header 1.
-
-#### Header 1.1.1.1
-Content under header 1.1.1.1.
-
-## Header 1.2
-Content under header 1.2.
-
-# Header 2
-Content under header 2.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.',
-        tokenCount: encoder.encode('Content under header 1.').length,
-      },
-      {
-        label: '1-1-1-1-heading',
-        type: 'heading',
-        text: '#### Header 1.1.1.1',
-        tokenCount: encoder.encode('#### Header 1.1.1.1').length,
-      },
-      {
-        label: '1-1-1-1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.1.1.1.',
-        tokenCount: encoder.encode('Content under header 1.1.1.1.').length,
-      },
-      {
-        label: '1-2-heading',
-        type: 'heading',
-        text: '## Header 1.2',
-        tokenCount: encoder.encode('## Header 1.2').length,
-      },
-      {
-        label: '1-2-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.2.',
-        tokenCount: encoder.encode('Content under header 1.2.').length,
-      },
-      {
-        label: '2-heading',
-        type: 'heading',
-        text: '# Header 2',
-        tokenCount: encoder.encode('# Header 2').length,
-      },
-      {
-        label: '2-content-1',
-        type: 'paragraph',
-        text: 'Content under header 2.',
-        tokenCount: encoder.encode('Content under header 2.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('handles malformed headings', async() => {
-    const markdown = `
-# Header 1
-Content under header 1.
-
-#### Header 1.1.1.1
-Content under header 1.1.1.1.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.',
-        tokenCount: encoder.encode('Content under header 1.').length,
-      },
-      {
-        label: '1-1-1-1-heading',
-        type: 'heading',
-        text: '#### Header 1.1.1.1',
-        tokenCount: encoder.encode('#### Header 1.1.1.1').length,
-      },
-      {
-        label: '1-1-1-1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.1.1.1.',
-        tokenCount: encoder.encode('Content under header 1.1.1.1.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('handles multiple content blocks before any headers', async() => {
-    const markdown = `
-This is the first paragraph without a header.
-
-This is the second paragraph without a header.
-
-# Header 1
-Content under header 1.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '0-content-1',
-        type: 'paragraph',
-        text: 'This is the first paragraph without a header.',
-        tokenCount: encoder.encode('This is the first paragraph without a header.').length,
-      },
-      {
-        label: '0-content-2',
-        type: 'paragraph',
-        text: 'This is the second paragraph without a header.',
-        tokenCount: encoder.encode('This is the second paragraph without a header.').length,
-      },
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.',
-        tokenCount: encoder.encode('Content under header 1.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('handles markdown with only headers and no content', async() => {
-    const markdown = `
-# Header 1
-
-## Header 1.1
-
-### Header 1.1.1
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-1-heading',
-        type: 'heading',
-        text: '## Header 1.1',
-        tokenCount: encoder.encode('## Header 1.1').length,
-      },
-      {
-        label: '1-1-1-heading',
-        type: 'heading',
-        text: '### Header 1.1.1',
-        tokenCount: encoder.encode('### Header 1.1.1').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('handles markdown with mixed content and headers', async() => {
-    const markdown = `
-# Header 1
-Content under header 1.
-
-## Header 1.1
-Content under header 1.1.
-Another piece of content.
-
-# Header 2
-Content under header 2.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.',
-        tokenCount: encoder.encode('Content under header 1.').length,
-      },
-      {
-        label: '1-1-heading',
-        type: 'heading',
-        text: '## Header 1.1',
-        tokenCount: encoder.encode('## Header 1.1').length,
-      },
-      {
-        label: '1-1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.1.\nAnother piece of content.',
-        tokenCount: encoder.encode('Content under header 1.1.\nAnother piece of content.').length,
-      },
-      {
-        label: '2-heading',
-        type: 'heading',
-        text: '# Header 2',
-        tokenCount: encoder.encode('# Header 2').length,
-      },
-      {
-        label: '2-content-1',
-        type: 'paragraph',
-        text: 'Content under header 2.',
-        tokenCount: encoder.encode('Content under header 2.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('preserves list indentation and reduces unnecessary line breaks', async() => {
-    const markdown = `
-# Header 1
-Content under header 1.
-
-- Item 1
-  - Subitem 1
-- Item 2
-
-
-# Header 2
-Content under header 2.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Content under header 1.',
-        tokenCount: encoder.encode('Content under header 1.').length,
-      },
-      {
-        label: '1-content-2',
-        type: 'list',
-        text: '- Item 1\n  - Subitem 1\n- Item 2',
-        tokenCount: encoder.encode('- Item 1\n  - Subitem 1\n- Item 2').length,
-      },
-      {
-        label: '2-heading',
-        type: 'heading',
-        text: '# Header 2',
-        tokenCount: encoder.encode('# Header 2').length,
-      },
-      {
-        label: '2-content-1',
-        type: 'paragraph',
-        text: 'Content under header 2.',
-        tokenCount: encoder.encode('Content under header 2.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('code blocks containing # are not treated as headings', async() => {
-    const markdown = `
-# Header 1
-Some introductory content.
-\`\`\`
-# This is a comment with a # symbol
-Some code line
-\`\`\`
-Additional content.
-# Header 2
-Content under header 2.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Some introductory content.',
-        tokenCount: encoder.encode('Some introductory content.').length,
-      },
-      {
-        label: '1-content-2',
-        type: 'code',
-        text: '```\n# This is a comment with a # symbol\nSome code line\n```',
-        tokenCount: encoder.encode('```\n# This is a comment with a # symbol\nSome code line\n```').length,
-      },
-      {
-        label: '1-content-3',
-        type: 'paragraph',
-        text: 'Additional content.',
-        tokenCount: encoder.encode('Additional content.').length,
-      },
-      {
-        label: '2-heading',
-        type: 'heading',
-        text: '# Header 2',
-        tokenCount: encoder.encode('# Header 2').length,
-      },
-      {
-        label: '2-content-1',
-        type: 'paragraph',
-        text: 'Content under header 2.',
-        tokenCount: encoder.encode('Content under header 2.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-
-  test('frontmatter is processed and labeled correctly', async() => {
-    const markdown = `---
-title: Test Document
-author: John Doe
----
-
-# Header 1
-Some introductory content.
-    `;
-
-    const expected: MarkdownFragment[] = [
-      {
-        label: 'frontmatter',
-        type: 'yaml',
-        text: JSON.stringify({ title: 'Test Document', author: 'John Doe' }, null, 2),
-        tokenCount: encoder.encode(JSON.stringify({ title: 'Test Document', author: 'John Doe' }, null, 2)).length,
-      },
-      {
-        label: '1-heading',
-        type: 'heading',
-        text: '# Header 1',
-        tokenCount: encoder.encode('# Header 1').length,
-      },
-      {
-        label: '1-content-1',
-        type: 'paragraph',
-        text: 'Some introductory content.',
-        tokenCount: encoder.encode('Some introductory content.').length,
-      },
-    ];
-
-    const result = await splitMarkdownIntoFragments(markdown, MODEL);
-    expect(result).toEqual(expected);
-  });
-});

+ 0 - 133
apps/app/src/features/openai/server/services/markdown-splitter/markdown-splitter.ts

@@ -1,133 +0,0 @@
-import { dynamicImport } from '@cspell/dynamic-import';
-import type { TiktokenModel } from 'js-tiktoken';
-import { encodingForModel } from 'js-tiktoken';
-import yaml from 'js-yaml';
-import type * as RemarkFrontmatter from 'remark-frontmatter';
-import type * as RemarkGfm from 'remark-gfm';
-import type * as RemarkParse from 'remark-parse';
-import type * as RemarkStringify from 'remark-stringify';
-import type * as Unified from 'unified';
-
-
-export type MarkdownFragment = {
-  label: string;
-  type: string;
-  text: string;
-  tokenCount: number;
-};
-
-/**
- * Updates the section numbers based on the heading depth and returns the updated section label.
- * Handles non-consecutive heading levels by initializing missing levels with 1.
- * @param sectionNumbers - The current section numbers.
- * @param headingDepth - The depth of the heading (e.g., # is depth 1).
- * @returns The updated section label.
- */
-function updateSectionNumbers(sectionNumbers: number[], headingDepth: number): string {
-  if (headingDepth > sectionNumbers.length) {
-    // Initialize missing levels with 1
-    while (sectionNumbers.length < headingDepth) {
-      sectionNumbers.push(1);
-    }
-  }
-  else if (headingDepth === sectionNumbers.length) {
-    // Increment the last number for the same level
-    sectionNumbers[headingDepth - 1]++;
-  }
-  else {
-    // Remove deeper levels and increment the current level
-    sectionNumbers.splice(headingDepth);
-    sectionNumbers[headingDepth - 1]++;
-  }
-  return sectionNumbers.join('-');
-}
-
-/**
- * Splits Markdown text into labeled markdownFragments using remark-parse and remark-stringify,
- * processing each content node separately and labeling them as 1-content-1, 1-content-2, etc.
- * @param markdownText - The input Markdown string.
- * @returns An array of labeled markdownFragments.
- */
-export async function splitMarkdownIntoFragments(markdownText: string, model: TiktokenModel): Promise<MarkdownFragment[]> {
-  const markdownFragments: MarkdownFragment[] = [];
-  const sectionNumbers: number[] = [];
-  let currentSectionLabel = '';
-  const contentCounters: Record<string, number> = {};
-
-  if (typeof markdownText !== 'string' || markdownText.trim() === '') {
-    return markdownFragments;
-  }
-
-  const encoder = encodingForModel(model);
-
-  const remarkParse = (await dynamicImport<typeof RemarkParse>('remark-parse', __dirname)).default;
-  const remarkFrontmatter = (await dynamicImport<typeof RemarkFrontmatter>('remark-frontmatter', __dirname)).default;
-  const remarkGfm = (await dynamicImport<typeof RemarkGfm>('remark-gfm', __dirname)).default;
-  const remarkStringify = (await dynamicImport<typeof RemarkStringify>('remark-stringify', __dirname)).default;
-  const unified = (await dynamicImport<typeof Unified>('unified', __dirname)).unified;
-
-  const parser = unified()
-    .use(remarkParse)
-    .use(remarkFrontmatter, ['yaml'])
-    .use(remarkGfm); // Enable GFM extensions
-
-  const stringifyOptions: RemarkStringify.Options = {
-    bullet: '-', // Set list bullet to hyphen
-    rule: '-', // Use hyphen for horizontal rules
-  };
-
-  const stringifier = unified()
-    .use(remarkFrontmatter, ['yaml'])
-    .use(remarkGfm)
-    .use(remarkStringify, stringifyOptions);
-
-  const parsedTree = parser.parse(markdownText);
-
-  // Iterate over top-level nodes to prevent duplication
-  for (const node of parsedTree.children) {
-    if (node.type === 'yaml') {
-      // Frontmatter block found, handle only the first instance
-      const frontmatter = yaml.load(node.value) as Record<string, unknown>;
-      const frontmatterText = JSON.stringify(frontmatter, null, 2);
-      const tokenCount = encoder.encode(frontmatterText).length;
-      markdownFragments.push({
-        label: 'frontmatter',
-        type: 'yaml',
-        text: frontmatterText,
-        tokenCount,
-      });
-    }
-    else if (node.type === 'heading') {
-      const headingDepth = node.depth;
-      currentSectionLabel = updateSectionNumbers(sectionNumbers, headingDepth);
-
-      const headingMarkdown = stringifier.stringify(node as any).trim(); // eslint-disable-line @typescript-eslint/no-explicit-any
-      const tokenCount = encoder.encode(headingMarkdown).length;
-      markdownFragments.push({
-        label: `${currentSectionLabel}-heading`, type: node.type, text: headingMarkdown, tokenCount,
-      });
-    }
-    else {
-      // Process non-heading content individually
-      const contentMarkdown = stringifier.stringify(node as any).trim(); // eslint-disable-line @typescript-eslint/no-explicit-any
-      if (contentMarkdown !== '') {
-        const contentCountKey = currentSectionLabel || '0';
-        if (!contentCounters[contentCountKey]) {
-          contentCounters[contentCountKey] = 1;
-        }
-        else {
-          contentCounters[contentCountKey]++;
-        }
-        const contentLabel = currentSectionLabel !== ''
-          ? `${currentSectionLabel}-content-${contentCounters[contentCountKey]}`
-          : `0-content-${contentCounters[contentCountKey]}`;
-        const tokenCount = encoder.encode(contentMarkdown).length;
-        markdownFragments.push({
-          label: contentLabel, type: node.type, text: contentMarkdown, tokenCount,
-        });
-      }
-    }
-  }
-
-  return markdownFragments;
-}

+ 0 - 134
apps/app/src/features/openai/server/services/markdown-splitter/markdown-token-splitter.spec.ts

@@ -1,134 +0,0 @@
-import type { TiktokenModel } from 'js-tiktoken';
-import { encodingForModel } from 'js-tiktoken';
-
-import { splitMarkdownIntoChunks } from './markdown-token-splitter';
-
-const MODEL: TiktokenModel = 'gpt-4';
-const encoder = encodingForModel(MODEL);
-
-describe('splitMarkdownIntoChunks', () => {
-  const repeatedText = 'This is a repeated sentence for testing purposes. '.repeat(100);
-  const markdown = `---
-title: Test Document
-author: John Doe
----
-
-${repeatedText}
-
-# Header 1
-
-This is the first paragraph under header 1. It contains some text to simulate a longer paragraph for testing.
-This paragraph is extended with more content to ensure proper chunking behavior.${repeatedText}
-
-## Header 1-1
-
-This is the first paragraph under header 1-1. The text is a bit longer to ensure proper chunking. More text follows.
-
-
-### Header 1-1-1
-
-This is the first paragraph under header 1-1-1. The content is nested deeper,
-making sure that the chunking algorithm works properly with multiple levels of headers.
-
-This is another paragraph under header 1-1-1, continuing the content at this deeper level.
-
-#### Header 1-1-1-1
-
-Now we have reached the fourth level of headers. The text here should also be properly chunked and grouped with its parent headers.
-
-This is another paragraph under header 1-1-1-1. It should be grouped with the correct higher-level headers.
-
-# Header 2
-
-Here is some content under header 2. This section should also be sufficiently long to ensure that the token count threshold is reached in the test.
-
-## Header 2-1
-
-${repeatedText}
-
-${repeatedText}
-
-Another sub-header under header 2 with text for testing chunking behavior. This is a fairly lengthy paragraph as well.
-
-We now have a fourth-level sub-header under header 2-1. This ensures that the chunking logic can handle deeply nested content.
-
-### Header 2-1-1
-
-Here is another paragraph under header 2-1-1. This paragraph is part of a more deeply nested section.
-
-# Header 3
-
-Continuing with more headers and content to make sure the markdown document is sufficiently large. This is a new header with more paragraphs under it.
-
-### Header 3-1
-
-This is a sub-header under header 3. The content here continues to grow, ensuring that the markdown is long enough to trigger multiple chunks.
-
-#### Header 3-1-1
-
-Here is a fourth-level sub-header under header 3-1. This paragraph is designed to create a larger markdown file for testing purposes.
-`;
-  test('Each chunk should not exceed the specified token count', async() => {
-    const maxToken = 800;
-    const result = await splitMarkdownIntoChunks(markdown, MODEL, maxToken);
-
-    result.forEach((chunk) => {
-      const tokenCount = encoder.encode(chunk).length;
-      expect(tokenCount).toBeLessThanOrEqual(maxToken * 1.1);
-    });
-  });
-  test('Each chunk should include the relevant top-level header', async() => {
-    const result = await splitMarkdownIntoChunks(markdown, MODEL, 800);
-
-    result.forEach((chunk) => {
-      const containsHeader1 = chunk.includes('# Header 1');
-      const containsHeader2 = chunk.includes('# Header 2');
-      const containsHeader3 = chunk.includes('# Header 3');
-      const doesNotContainHash = !chunk.includes('# ');
-
-      expect(containsHeader1 || containsHeader2 || containsHeader3 || doesNotContainHash).toBe(true);
-    });
-  });
-  test('Should throw an error if a header exceeds half of maxToken size with correct error message', async() => {
-    const maxToken = 800;
-    const markdownWithLongHeader = `
-# Short Header 1
-
-This is the first paragraph under short header 1. It contains some text for testing purposes.
-
-## ${repeatedText}
-
-This is the first paragraph under the long header. It contains text to ensure that the header length check is triggered if the header is too long.
-
-# Short Header 2
-
-Another section with a shorter header, but enough content to ensure proper chunking.
-`;
-
-    try {
-      await splitMarkdownIntoChunks(markdownWithLongHeader, MODEL, maxToken);
-    }
-    catch (error) {
-      if (error instanceof Error) {
-        expect(error.message).toContain('Heading token count is too large');
-      }
-      else {
-        throw new Error('An unknown error occurred');
-      }
-    }
-  });
-
-  test('Should return the entire markdown as a single chunk if token count is less than or equal to maxToken', async() => {
-    const markdownText = `
-    # Header 1
-    This is a short paragraph under header 1. It contains only a few sentences to ensure that the total token count remains under the maxToken limit.
-    `;
-
-    const maxToken = 800;
-
-    const result = await splitMarkdownIntoChunks(markdownText, MODEL, maxToken);
-
-    expect(result).toHaveLength(1);
-    expect(result[0]).toBe(markdownText);
-  });
-});

+ 0 - 188
apps/app/src/features/openai/server/services/markdown-splitter/markdown-token-splitter.ts

@@ -1,188 +0,0 @@
-import { encodingForModel, type TiktokenModel } from 'js-tiktoken';
-
-import { splitMarkdownIntoFragments, type MarkdownFragment } from './markdown-splitter';
-
-type MarkdownFragmentGroups = MarkdownFragment[][] ;
-
-function groupMarkdownFragments(
-    markdownFragments: MarkdownFragment[],
-    maxToken: number,
-): MarkdownFragmentGroups {
-
-  const prefixes = markdownFragments.map(({ label }) => {
-    if (label === 'frontmatter') return 'frontmatter';
-    const match = label.match(/^\d+(?:-\d+)*/)!; // eslint-disable-line @typescript-eslint/no-non-null-assertion
-    return match[0];
-  });
-
-  const uniquePrefixes = [...new Set(prefixes.filter(Boolean))];
-
-  // Group chunks by prefix
-  const fragmentGroupes: MarkdownFragmentGroups = [];
-  let remainingPrefixes = [...uniquePrefixes];
-
-  // Process chunks so that the total token count per level doesn't exceed maxToken
-  while (remainingPrefixes.length > 0) {
-    const prefix = remainingPrefixes[0]; // Get the first prefix
-    const hasNextLevelPrefix = uniquePrefixes.some(p => p !== prefix && p.startsWith(prefix));
-
-    if (!hasNextLevelPrefix) {
-      // If there is no prefix that starts with the current prefix, group the chunks directly
-      let matchingFragments = markdownFragments.filter(fragment => fragment.label.startsWith(prefix));
-
-      // Add parent heading if it exists
-      const parts = prefix.split('-');
-      for (let i = 1; i < parts.length; i++) {
-        const parentPrefix = parts.slice(0, i).join('-');
-        const parentHeading = markdownFragments.find(fragment => fragment.label === `${parentPrefix}-heading`);
-        if (parentHeading) {
-          matchingFragments = [parentHeading, ...matchingFragments]; // Add the heading at the front
-        }
-      }
-
-      fragmentGroupes.push(matchingFragments);
-    }
-    else {
-      // Filter chunks that start with the current prefix
-      let matchingFragments = markdownFragments.filter(fragment => fragment.label.startsWith(prefix));
-
-      // Add parent heading if it exists
-      const parts = prefix.split('-');
-      for (let i = 1; i < parts.length; i++) {
-        const parentPrefix = parts.slice(0, i).join('-');
-        const parentHeading = markdownFragments.find(fragment => fragment.label === `${parentPrefix}-heading`);
-        if (parentHeading) {
-          matchingFragments = [parentHeading, ...matchingFragments];
-        }
-      }
-
-      // Calculate total token count including parent headings
-      const totalTokenCount = matchingFragments.reduce((sum, fragment) => sum + fragment.tokenCount, 0);
-
-      // If the total token count doesn't exceed maxToken, group the chunks
-      if (totalTokenCount <= maxToken) {
-        fragmentGroupes.push(matchingFragments);
-        remainingPrefixes = remainingPrefixes.filter(p => !p.startsWith(`${prefix}-`));
-      }
-      else {
-        // If it exceeds maxToken, strictly filter chunks by the exact numeric prefix
-        const strictMatchingFragments = markdownFragments.filter((fragment) => {
-          const match = fragment.label.match(/^\d+(-\d+)*(?=-)/);
-          return match && match[0] === prefix;
-        });
-
-        // Add parent heading if it exists
-        for (let i = 1; i < parts.length; i++) {
-          const parentPrefix = parts.slice(0, i).join('-');
-          const parentHeading = markdownFragments.find(fragment => fragment.label === `${parentPrefix}-heading`);
-          if (parentHeading) {
-            strictMatchingFragments.unshift(parentHeading); // Add the heading at the front
-          }
-        }
-
-        fragmentGroupes.push(strictMatchingFragments);
-      }
-    }
-    remainingPrefixes.shift();
-  }
-
-  return fragmentGroupes;
-}
-
-// Function to group markdown into chunks based on token count
-export async function splitMarkdownIntoChunks(
-    markdownText: string,
-    model: TiktokenModel,
-    maxToken = 800,
-): Promise<string[]> {
-  const encoder = encodingForModel(model);
-
-  // If the total token count for the entire markdown text is less than or equal to maxToken,
-  // return the entire markdown as a single chunk.
-  if (encoder.encode(markdownText).length <= maxToken) {
-    return [markdownText];
-  }
-
-  // Split markdown text into chunks
-  const markdownFragments = await splitMarkdownIntoFragments(markdownText, model);
-  const chunks: string[] = [];
-
-  // Group the chunks based on token count
-  const fragmentGroupes = groupMarkdownFragments(markdownFragments, maxToken);
-
-  fragmentGroupes.forEach((fragmentGroupe) => {
-    // Calculate the total token count for each group
-    const totalTokenCount = fragmentGroupe.reduce((sum, fragment) => sum + fragment.tokenCount, 0);
-
-    // If the total token count doesn't exceed maxToken, combine the chunks into one
-    if (totalTokenCount <= maxToken) {
-      const chunk = fragmentGroupe.map((fragment, index) => {
-        const nextFragment = fragmentGroupe[index + 1];
-        if (nextFragment) {
-          // If both the current and next chunks are headings, add a single newline
-          if (fragment.type === 'heading' && nextFragment.type === 'heading') {
-            return `${fragment.text}\n`;
-          }
-          // Add two newlines for other cases
-          return `${fragment.text}\n\n`;
-        }
-        return fragment.text; // No newlines for the last chunk
-      }).join('');
-
-      chunks.push(chunk);
-    }
-    else {
-      // If the total token count exceeds maxToken, split content
-      const headingFragments = fragmentGroupe.filter(fragment => fragment.type === 'heading'); // Find all headings
-      const headingText = headingFragments.map(heading => heading.text).join('\n'); // Combine headings with one newline
-
-      for (const fragment of fragmentGroupe) {
-        if (fragment.label.includes('content')) {
-          // Combine heading and paragraph content
-          const combinedTokenCount = headingFragments.reduce((sum, heading) => sum + heading.tokenCount, 0) + fragment.tokenCount;
-          // Check if headingChunks alone exceed maxToken
-          const headingTokenCount = headingFragments.reduce((sum, heading) => sum + heading.tokenCount, 0);
-
-          if (headingTokenCount > maxToken / 2) {
-            throw new Error(
-              `Heading token count is too large. Heading token count: ${headingTokenCount}, allowed maximum: ${Math.ceil(maxToken / 2)}`,
-            );
-          }
-
-          // If the combined token count exceeds maxToken, split the content by character count
-          if (combinedTokenCount > maxToken) {
-            const headingTokenCount = headingFragments.reduce((sum, heading) => sum + heading.tokenCount, 0);
-            const remainingTokenCount = maxToken - headingTokenCount;
-
-            // Calculate the total character count and token count
-            const fragmentCharCount = fragment.text.length;
-            const fragmenTokenCount = fragment.tokenCount;
-
-            // Calculate the character count for splitting
-            const charCountForSplit = Math.floor((remainingTokenCount / fragmenTokenCount) * fragmentCharCount);
-
-            // Split content based on character count
-            const splitContents: string[] = [];
-            for (let i = 0; i < fragment.text.length; i += charCountForSplit) {
-              splitContents.push(fragment.text.slice(i, i + charCountForSplit));
-            }
-
-            // Add each split content to the new group of chunks
-            splitContents.forEach((splitText) => {
-              const chunk = headingText
-                ? `${headingText}\n\n${splitText}`
-                : `${splitText}`;
-              chunks.push(chunk);
-            });
-          }
-          else {
-            const chunk = `${headingText}\n\n${fragment.text}`;
-            chunks.push(chunk);
-          }
-        }
-      }
-    }
-  });
-
-  return chunks;
-}

+ 0 - 72
apps/app/src/features/openai/server/services/openai.ts

@@ -35,7 +35,6 @@ import AiAssistantModel, { type AiAssistantDocument } from '../models/ai-assista
 import { convertMarkdownToHtml } from '../utils/convert-markdown-to-html';
 
 import { getClient } from './client-delegator';
-// import { splitMarkdownIntoChunks } from './markdown-splitter/markdown-token-splitter';
 import { openaiApiErrorHandler } from './openai-api-error-handler';
 import { replaceAnnotationWithPageLink } from './replace-annotation-with-page-link';
 
@@ -45,7 +44,6 @@ const BATCH_SIZE = 100;
 
 const logger = loggerFactory('growi:service:openai');
 
-// const isVectorStoreForPublicScopeExist = false;
 
 type VectorStoreFileRelationsMap = Map<string, VectorStoreFileRelation>
 
@@ -233,38 +231,6 @@ class OpenaiService implements IOpenaiService {
     return messages;
   }
 
-  // TODO: https://redmine.weseek.co.jp/issues/160332
-  // public async getOrCreateVectorStoreForPublicScope(): Promise<VectorStoreDocument> {
-  //   const vectorStoreDocument: VectorStoreDocument | null = await VectorStoreModel.findOne({ scopeType: VectorStoreScopeType.PUBLIC, isDeleted: false });
-
-  //   if (vectorStoreDocument != null && isVectorStoreForPublicScopeExist) {
-  //     return vectorStoreDocument;
-  //   }
-
-  //   if (vectorStoreDocument != null && !isVectorStoreForPublicScopeExist) {
-  //     try {
-  //       // Check if vector store entity exists
-  //       // If the vector store entity does not exist, the vector store document is deleted
-  //       await this.client.retrieveVectorStore(vectorStoreDocument.vectorStoreId);
-  //       isVectorStoreForPublicScopeExist = true;
-  //       return vectorStoreDocument;
-  //     }
-  //     catch (err) {
-  //       await oepnaiApiErrorHandler(err, { notFoundError: vectorStoreDocument.markAsDeleted });
-  //       throw new Error(err);
-  //     }
-  //   }
-
-  //   const newVectorStore = await this.client.createVectorStore(VectorStoreScopeType.PUBLIC);
-  //   const newVectorStoreDocument = await VectorStoreModel.create({
-  //     vectorStoreId: newVectorStore.id,
-  //     scopeType: VectorStoreScopeType.PUBLIC,
-  //   }) as VectorStoreDocument;
-
-  //   isVectorStoreForPublicScopeExist = true;
-
-  //   return newVectorStoreDocument;
-  // }
 
   async getVectorStoreRelation(aiAssistantId: string): Promise<VectorStoreDocument> {
     const aiAssistant = await AiAssistantModel.findById({ _id: aiAssistantId }).populate('vectorStore');
@@ -344,22 +310,6 @@ class OpenaiService implements IOpenaiService {
     }
   }
 
-  // TODO: https://redmine.weseek.co.jp/issues/160332
-  // TODO: https://redmine.weseek.co.jp/issues/156643
-  // private async uploadFileByChunks(pageId: Types.ObjectId, body: string, vectorStoreFileRelationsMap: VectorStoreFileRelationsMap) {
-  //   const chunks = await splitMarkdownIntoChunks(body, 'gpt-4o');
-  //   for await (const [index, chunk] of chunks.entries()) {
-  //     try {
-  //       const file = await toFile(Readable.from(chunk), `${pageId}-chunk-${index}.md`);
-  //       const uploadedFile = await this.client.uploadFile(file);
-  //       prepareVectorStoreFileRelations(pageId, uploadedFile.id, vectorStoreFileRelationsMap);
-  //     }
-  //     catch (err) {
-  //       logger.error(err);
-  //     }
-  //   }
-  // }
-
   private async uploadFile(pageId: Types.ObjectId, pagePath: string, revisionBody: string): Promise<OpenAI.Files.FileObject> {
     const convertedHtml = await convertMarkdownToHtml({ pagePath, revisionBody });
     const file = await toFile(Readable.from(convertedHtml), `${pageId}.html`);
@@ -542,28 +492,6 @@ class OpenaiService implements IOpenaiService {
     }
   }
 
-  // TODO: https://redmine.weseek.co.jp/issues/160332
-  // async rebuildVectorStoreAll() {
-  //   await this.deleteVectorStore(VectorStoreScopeType.PUBLIC);
-
-  //   // Create all public pages VectorStoreFile
-  //   const Page = mongoose.model<HydratedDocument<PageDocument>, PageModel>('Page');
-  //   const pagesStream = Page.find({ grant: PageGrant.GRANT_PUBLIC }).populate('revision').cursor({ batch_size: BATCH_SIZE });
-  //   const batchStrem = createBatchStream(BATCH_SIZE);
-
-  //   const createVectorStoreFile = this.createVectorStoreFile.bind(this);
-  //   const createVectorStoreFileStream = new Transform({
-  //     objectMode: true,
-  //     async transform(chunk: HydratedDocument<PageDocument>[], encoding, callback) {
-  //       await createVectorStoreFile(chunk);
-  //       this.push(chunk);
-  //       callback();
-  //     },
-  //   });
-
-  //   await pipeline(pagesStream, batchStrem, createVectorStoreFileStream);
-  // }
-
   async filterPagesByAccessScope(aiAssistant: AiAssistantDocument, pages: HydratedDocument<PageDocument>[]) {
     const isPublicPage = (page :HydratedDocument<PageDocument>) => page.grant === PageGrant.GRANT_PUBLIC;
 

+ 0 - 5
apps/app/src/features/rate-limiter/config/index.ts

@@ -56,11 +56,6 @@ export const defaultConfig: IApiRateLimitEndpointMap = {
     method: 'GET',
     maxRequests: MAX_REQUESTS_TIER_3,
   },
-  '/_api/v3/openai/rebuild-vector-store': {
-    method: 'POST',
-    maxRequests: 1,
-    usersPerIpProspection: 1,
-  },
 };
 
 const isDev = process.env.NODE_ENV === 'development';