Просмотр исходного кода

refactor S3 client instantiation to use caching for improved performance and resource management

Yuki Takei 6 месяцев назад
Родитель
Сommit
5410584e80

+ 1 - 26
.serena/memories/file-uploader-memory-leak-analysis-report.md

@@ -58,32 +58,7 @@ const S3Factory = (): S3Client => {
 
 
 ### 🟡 中リスク:条件によってメモリリークが発生する可能性
 ### 🟡 中リスク:条件によってメモリリークが発生する可能性
 
 
-#### 3. Stream処理でのエラーハンドリング不足
-**場所**: 行 203-219  
-**問題コード**:
-```typescript
-override async findDeliveryFile(attachment: IAttachmentDocument): Promise<NodeJS.ReadableStream> {
-  try {
-    const body = (await s3.send(new GetObjectCommand(params))).Body;
-    return 'stream' in body
-      ? body.stream() as unknown as NodeJS.ReadableStream
-      : body as unknown as NodeJS.ReadableStream;
-  }
-  catch (err) {
-    logger.error(err);
-    throw new Error(/*...*/);
-  }
-}
-```
-
-**問題点**:
-- 返されたストリームが適切に閉じられない可能性
-- エラー時のリソースクリーンアップが不十分
-- ストリーム参照の長期保持リスク
-
-**影響度**: 中 - ファイルダウンロード頻度に依存
-
-#### 4. マルチパートアップロード処理
+#### 3. マルチパートアップロード処理
 **場所**: 行 248-260  
 **場所**: 行 248-260  
 **問題コード**:
 **問題コード**:
 ```typescript
 ```typescript

+ 24 - 6
apps/app/src/server/service/file-uploader/aws/index.ts

@@ -44,6 +44,8 @@ interface FileMeta {
   size: number;
   size: number;
 }
 }
 
 
+// Cache holder to avoid repeated instantiation of S3 client
+let cachedS3Client: { configKey: string, client: S3Client } | null = null;
 const isFileExists = async(s3: S3Client, params: HeadObjectCommandInput) => {
 const isFileExists = async(s3: S3Client, params: HeadObjectCommandInput) => {
   try {
   try {
     await s3.send(new HeadObjectCommand(params));
     await s3.send(new HeadObjectCommand(params));
@@ -86,12 +88,21 @@ const getS3Bucket = (): NonBlankString | undefined => {
 };
 };
 
 
 const S3Factory = (): S3Client => {
 const S3Factory = (): S3Client => {
+  // Cache key based on configuration values to detect changes
   const accessKeyId = configManager.getConfig('aws:s3AccessKeyId');
   const accessKeyId = configManager.getConfig('aws:s3AccessKeyId');
   const secretAccessKey = configManager.getConfig('aws:s3SecretAccessKey');
   const secretAccessKey = configManager.getConfig('aws:s3SecretAccessKey');
   const s3Region = toNonBlankStringOrUndefined(configManager.getConfig('aws:s3Region')); // Blank strings may remain in the DB, so convert with toNonBlankStringOrUndefined for safety
   const s3Region = toNonBlankStringOrUndefined(configManager.getConfig('aws:s3Region')); // Blank strings may remain in the DB, so convert with toNonBlankStringOrUndefined for safety
   const s3CustomEndpoint = toNonBlankStringOrUndefined(configManager.getConfig('aws:s3CustomEndpoint'));
   const s3CustomEndpoint = toNonBlankStringOrUndefined(configManager.getConfig('aws:s3CustomEndpoint'));
 
 
-  return new S3Client({
+  const configKey = `${accessKeyId ?? ''}|${secretAccessKey ?? ''}|${s3Region ?? ''}|${s3CustomEndpoint ?? ''}`;
+
+  // Return cached client if configuration hasn't changed
+  if (cachedS3Client != null && cachedS3Client.configKey === configKey) {
+    return cachedS3Client.client;
+  }
+
+  // Create new client instance with connection pooling optimizations
+  const client = new S3Client({
     credentials: accessKeyId != null && secretAccessKey != null
     credentials: accessKeyId != null && secretAccessKey != null
       ? {
       ? {
         accessKeyId,
         accessKeyId,
@@ -102,6 +113,10 @@ const S3Factory = (): S3Client => {
     endpoint: s3CustomEndpoint,
     endpoint: s3CustomEndpoint,
     forcePathStyle: s3CustomEndpoint != null, // s3ForcePathStyle renamed to forcePathStyle in v3
     forcePathStyle: s3CustomEndpoint != null, // s3ForcePathStyle renamed to forcePathStyle in v3
   });
   });
+
+  // Cache the new client
+  cachedS3Client = { configKey, client };
+  return client;
 };
 };
 
 
 const getFilePathOnStorage = (attachment: IAttachmentDocument) => {
 const getFilePathOnStorage = (attachment: IAttachmentDocument) => {
@@ -226,7 +241,7 @@ class AwsFileUploader extends AbstractFileUploader {
       throw new Error('AWS is not configured.');
       throw new Error('AWS is not configured.');
     }
     }
 
 
-    const s3 = S3Factory();
+    const s3 = S3Factory(); // Use singleton client
     const filePath = getFilePathOnStorage(attachment);
     const filePath = getFilePathOnStorage(attachment);
 
 
     const params = {
     const params = {
@@ -241,20 +256,20 @@ class AwsFileUploader extends AbstractFileUploader {
     }
     }
 
 
     try {
     try {
-      const body = (await s3.send(new GetObjectCommand(params))).Body;
+      const response = await s3.send(new GetObjectCommand(params));
+      const body = response.Body;
 
 
       if (body == null) {
       if (body == null) {
         throw new Error(`S3 returned null for the Attachment (${filePath})`);
         throw new Error(`S3 returned null for the Attachment (${filePath})`);
       }
       }
 
 
-      // eslint-disable-next-line no-nested-ternary
       return 'stream' in body
       return 'stream' in body
         ? body.stream() as unknown as NodeJS.ReadableStream // get stream from Blob and cast force
         ? body.stream() as unknown as NodeJS.ReadableStream // get stream from Blob and cast force
         : body as unknown as NodeJS.ReadableStream; // cast force
         : body as unknown as NodeJS.ReadableStream; // cast force
     }
     }
     catch (err) {
     catch (err) {
-      logger.error(err);
-      throw new Error(`Coudn't get file from AWS for the Attachment (${attachment._id.toString()})`);
+      logger.error(`Failed to get file from AWS S3 for attachment ${attachment._id.toString()}:`, err);
+      throw new Error(`Couldn't get file from AWS for the Attachment (${attachment._id.toString()})`);
     }
     }
   }
   }
 
 
@@ -303,12 +318,15 @@ class AwsFileUploader extends AbstractFileUploader {
         Key: uploadKey,
         Key: uploadKey,
         UploadId: uploadId,
         UploadId: uploadId,
       }));
       }));
+      logger.debug(`Successfully aborted multipart upload: uploadKey=${uploadKey}, uploadId=${uploadId}`);
     }
     }
     catch (e) {
     catch (e) {
       // allow duplicate abort requests to ensure abortion
       // allow duplicate abort requests to ensure abortion
       if (e.response?.status !== 404) {
       if (e.response?.status !== 404) {
+        logger.error(`Failed to abort multipart upload: uploadKey=${uploadKey}, uploadId=${uploadId}`, e);
         throw e;
         throw e;
       }
       }
+      logger.debug(`Multipart upload already aborted: uploadKey=${uploadKey}, uploadId=${uploadId}`);
     }
     }
   }
   }