فهرست منبع

Merge pull request #10090 from weseek/imprv/167603-behavior-when-pre-message-and-main-message-arrive

imprv(ai): Behavior when pre message and main message arrive
Yuki Takei 9 ماه پیش
والد
کامیت
1691bc95d6

+ 28 - 6
apps/app/src/features/openai/client/components/AiAssistant/AiAssistantSidebar/AiAssistantSidebar.tsx

@@ -241,7 +241,10 @@ const AiAssistantSidebarSubstance: React.FC<AiAssistantSidebarSubstanceProps> =
 
 
         const chunk = decoder.decode(value);
         const chunk = decoder.decode(value);
 
 
-        const textValues: string[] = [];
+        let isPreMessageGenerated = false;
+        let isMainMessageGenerationStarted = false;
+        const preMessages: string[] = [];
+        const mainMessages: string[] = [];
         const lines = chunk.split('\n\n');
         const lines = chunk.split('\n\n');
         lines.forEach((line) => {
         lines.forEach((line) => {
           const trimmedLine = line.trim();
           const trimmedLine = line.trim();
@@ -250,16 +253,36 @@ const AiAssistantSidebarSubstance: React.FC<AiAssistantSidebarSubstanceProps> =
 
 
             processMessageForKnowledgeAssistant(data, {
             processMessageForKnowledgeAssistant(data, {
               onPreMessage: (data) => {
               onPreMessage: (data) => {
-                textValues.push(data.text);
+                // When main message is sent while pre-message is being transmitted
+                if (isMainMessageGenerationStarted) {
+                  preMessages.length = 0;
+                  return;
+                }
+                if (data.finished) {
+                  isPreMessageGenerated = true;
+                  return;
+                }
+                if (data.text == null) {
+                  return;
+                }
+                preMessages.push(data.text);
               },
               },
               onMessage: (data) => {
               onMessage: (data) => {
-                textValues.push(data.content[0].text.value);
+                if (!isMainMessageGenerationStarted) {
+                  isMainMessageGenerationStarted = true;
+                }
+
+                // When main message is sent while pre-message is being transmitted
+                if (!isPreMessageGenerated) {
+                  preMessages.length = 0;
+                }
+                mainMessages.push(data.content[0].text.value);
               },
               },
             });
             });
 
 
             processMessageForEditorAssistant(data, {
             processMessageForEditorAssistant(data, {
               onMessage: (data) => {
               onMessage: (data) => {
-                textValues.push(data.appendedMessage);
+                mainMessages.push(data.appendedMessage);
               },
               },
               onDetectedDiff: (data) => {
               onDetectedDiff: (data) => {
                 logger.debug('sse diff', { data });
                 logger.debug('sse diff', { data });
@@ -280,13 +303,12 @@ const AiAssistantSidebarSubstance: React.FC<AiAssistantSidebarSubstanceProps> =
           }
           }
         });
         });
 
 
-
         // append text values to the assistant message
         // append text values to the assistant message
         setGeneratingAnswerMessage((prevMessage) => {
         setGeneratingAnswerMessage((prevMessage) => {
           if (prevMessage == null) return;
           if (prevMessage == null) return;
           return {
           return {
             ...prevMessage,
             ...prevMessage,
-            content: prevMessage.content + textValues.join(''),
+            content: prevMessage.content + preMessages.join('') + mainMessages.join(''),
           };
           };
         });
         });
 
 

+ 2 - 1
apps/app/src/features/openai/interfaces/knowledge-assistant/sse-schemas.ts

@@ -12,7 +12,8 @@ export const SseMessageSchema = z.object({
 });
 });
 
 
 export const SsePreMessageSchema = z.object({
 export const SsePreMessageSchema = z.object({
-  text: z.string().describe('The pre-message that should be appended to the chat window'),
+  text: z.string().nullish().describe('The pre-message that should be appended to the chat window'),
+  finished: z.boolean().describe('Indicates if the pre-message generation is finished'),
 });
 });
 
 
 
 

+ 14 - 5
apps/app/src/features/openai/server/routes/message/post-message.ts

@@ -116,19 +116,25 @@ export const postMessageHandlersFactory: PostMessageHandlersFactory = (crowi) =>
         return res.status(500).send(err.message);
         return res.status(500).send(err.message);
       }
       }
 
 
+      /**
+      * Create SSE (Server-Sent Events) Responses
+      */
       res.writeHead(200, {
       res.writeHead(200, {
         'Content-Type': 'text/event-stream;charset=utf-8',
         'Content-Type': 'text/event-stream;charset=utf-8',
         'Cache-Control': 'no-cache, no-transform',
         'Cache-Control': 'no-cache, no-transform',
       });
       });
 
 
-      const preMessageDeltaHandler = (delta: ChatCompletionChunk.Choice.Delta) => {
-        const content = { text: delta.content };
+      const preMessageChunkHandler = (chunk: ChatCompletionChunk) => {
+        const chunkChoice = chunk.choices[0];
+
+        const content = {
+          text: chunkChoice.delta.content,
+          finished: chunkChoice.finish_reason != null,
+        };
+
         res.write(`data: ${JSON.stringify(content)}\n\n`);
         res.write(`data: ${JSON.stringify(content)}\n\n`);
       };
       };
 
 
-      // Don't add await since SSE is performed asynchronously with main message
-      openaiService.generateAndProcessPreMessage(req.body.userMessage, preMessageDeltaHandler);
-
       const messageDeltaHandler = async(delta: MessageDelta) => {
       const messageDeltaHandler = async(delta: MessageDelta) => {
         const content = delta.content?.[0];
         const content = delta.content?.[0];
 
 
@@ -144,6 +150,9 @@ export const postMessageHandlersFactory: PostMessageHandlersFactory = (crowi) =>
         res.write(`error: ${JSON.stringify({ code, message })}\n\n`);
         res.write(`error: ${JSON.stringify({ code, message })}\n\n`);
       };
       };
 
 
+      // Don't add await since SSE is performed asynchronously with main message
+      openaiService.generateAndProcessPreMessage(req.body.userMessage, preMessageChunkHandler);
+
       stream.on('event', (delta) => {
       stream.on('event', (delta) => {
         if (delta.event === 'thread.run.failed') {
         if (delta.event === 'thread.run.failed') {
           const errorMessage = delta.data.last_error?.message;
           const errorMessage = delta.data.last_error?.message;

+ 3 - 11
apps/app/src/features/openai/server/services/openai.ts

@@ -16,7 +16,6 @@ import createError from 'http-errors';
 import mongoose, { type HydratedDocument, type Types } from 'mongoose';
 import mongoose, { type HydratedDocument, type Types } from 'mongoose';
 import { type OpenAI, toFile } from 'openai';
 import { type OpenAI, toFile } from 'openai';
 import { type ChatCompletionChunk } from 'openai/resources/chat/completions';
 import { type ChatCompletionChunk } from 'openai/resources/chat/completions';
-import { type Stream } from 'openai/streaming';
 
 
 import ExternalUserGroupRelation from '~/features/external-user-group/server/models/external-user-group-relation';
 import ExternalUserGroupRelation from '~/features/external-user-group/server/models/external-user-group-relation';
 import ThreadRelationModel, { type ThreadRelationDocument } from '~/features/openai/server/models/thread-relation';
 import ThreadRelationModel, { type ThreadRelationDocument } from '~/features/openai/server/models/thread-relation';
@@ -74,10 +73,7 @@ const convertPathPatternsToRegExp = (pagePathPatterns: string[]): Array<string |
 };
 };
 
 
 export interface IOpenaiService {
 export interface IOpenaiService {
-  generateAndProcessPreMessage(
-      message: string,
-      deltaProcessor: (delta: ChatCompletionChunk.Choice.Delta) => void,
-  ): Promise<Nullable<Stream<OpenAI.Chat.Completions.ChatCompletionChunk>>>
+  generateAndProcessPreMessage(message: string, chunkProcessor: (chunk: ChatCompletionChunk) => void): Promise<void>
   createThread(userId: string, type: ThreadType, aiAssistantId?: string, initialUserMessage?: string): Promise<ThreadRelationDocument>;
   createThread(userId: string, type: ThreadType, aiAssistantId?: string, initialUserMessage?: string): Promise<ThreadRelationDocument>;
   getThreadsByAiAssistantId(aiAssistantId: string): Promise<ThreadRelationDocument[]>
   getThreadsByAiAssistantId(aiAssistantId: string): Promise<ThreadRelationDocument[]>
   deleteThread(threadRelationId: string): Promise<ThreadRelationDocument>;
   deleteThread(threadRelationId: string): Promise<ThreadRelationDocument>;
@@ -114,10 +110,7 @@ class OpenaiService implements IOpenaiService {
     return getClient({ openaiServiceType });
     return getClient({ openaiServiceType });
   }
   }
 
 
-  async generateAndProcessPreMessage(
-      message: string,
-      deltaProcessor: (delta: ChatCompletionChunk.Choice.Delta) => void,
-  ): Promise<Nullable<Stream<OpenAI.Chat.Completions.ChatCompletionChunk>>> {
+  async generateAndProcessPreMessage(message: string, chunkProcessor: (delta: ChatCompletionChunk) => void): Promise<void> {
     const systemMessage = [
     const systemMessage = [
       "Generate a message briefly confirming the user's question.",
       "Generate a message briefly confirming the user's question.",
       'Please generate up to 20 characters',
       'Please generate up to 20 characters',
@@ -143,8 +136,7 @@ class OpenaiService implements IOpenaiService {
     }
     }
 
 
     for await (const chunk of preMessageCompletion) {
     for await (const chunk of preMessageCompletion) {
-      const delta = chunk.choices[0].delta;
-      deltaProcessor(delta);
+      chunkProcessor(chunk);
     }
     }
   }
   }