Kaynağa Gözat

Store files to S3. Part 4. In Progress.

Thanks to xet7 !
Lauri Ojansivu 2 yıl önce
ebeveyn
işleme
391607ec79
4 değiştirilmiş dosya ile 910 ekleme ve 496 silme
  1. 223 17
      models/lib/fileStoreStrategy.js
  2. 0 187
      models/lib/s3/createOnAfterUpload.js
  3. 686 291
      package-lock.json
  4. 1 1
      package.json

+ 223 - 17
models/lib/fileStoreStrategy.js

@@ -1,8 +1,10 @@
 import fs from 'fs';
 import path from 'path';
 import { createObjectId } from './grid/createObjectId';
-import { httpStreamOutput } from './httpStream.js'
+import { httpStreamOutput } from './httpStream.js';
+//import {} from './s3/Server-side-file-store.js';
 import { ObjectID } from 'bson';
+var Minio = require('minio');
 
 export const STORAGE_NAME_FILESYSTEM = "fs";
 export const STORAGE_NAME_GRIDFS     = "gridfs";
@@ -40,7 +42,7 @@ export default class FileStoreStrategyFactory {
         if (fileObj.meta.source == "import" || fileObj.versions[versionName].meta.gridFsFileId) {
           // uploaded by import, so it's in GridFS (MongoDB)
           storage = STORAGE_NAME_GRIDFS;
-        } else if (fileRef && fileRef.versions && fileRef.versions[version] && fileRef.versions[version].meta && fileRef.versions[version].meta.pipePath) {
+        } else if (fileObj && fileObj.versions && fileObj.versions[version] && fileObj.versions[version].meta && fileObj.versions[version].meta.pipePath) {
           storage = STORAGE_NAME_S3;
         } else {
           // newly uploaded, so it's at the filesystem
@@ -324,6 +326,7 @@ export class FileStoreStrategyFilesystem extends FileStoreStrategy {
 /** Strategy to store attachments at S3 */
 export class FileStoreStrategyS3 extends FileStoreStrategy {
 
+
   /** constructor
    * @param s3Bucket use this S3 Bucket
    * @param fileObj the current file object
@@ -334,28 +337,232 @@ export class FileStoreStrategyS3 extends FileStoreStrategy {
     this.s3Bucket = s3Bucket;
   }
 
-  /** download the file
-   * @param http the current http request
-   * @param cacheControl cacheControl of FilesCollection
-   */
-  interceptDownload(http, cacheControl) {
-    const readStream = this.getReadStream();
-    const downloadFlag = http?.params?.query?.download;
+  /** after successfull upload */
+  onAfterUpload() {
+    if (process.env.S3) {
+      Meteor.settings.s3 = JSON.parse(process.env.S3).s3;
+    }
 
-    let ret = false;
-    if (readStream) {
-      ret = true;
-      httpStreamOutput(readStream, this.fileObj.name, http, downloadFlag, cacheControl);
+    const s3Conf = Meteor.settings.s3 || {};
+    const bound  = Meteor.bindEnvironment((callback) => {
+      return callback();
+    });
+
+    /* https://github.com/veliovgroup/Meteor-Files/blob/master/docs/aws-s3-integration.md */
+    /* Check settings existence in `Meteor.settings` */
+    /* This is the best practice for app security */
+
+    /*
+    if (s3Conf && s3Conf.key && s3Conf.secret && s3Conf.bucket && s3Conf.region && s3Conf.sslEnabled) {
+      // Create a new S3 object
+      const s3 = new S3({
+        secretAccessKey: s3Conf.secret,
+        accessKeyId: s3Conf.key,
+        region: s3Conf.region,
+        sslEnabled: s3Conf.sslEnabled, // optional
+        httpOptions: {
+          timeout: 6000,
+          agent: false
+        }
+      });
     }
+    */
+
+    if (s3Conf && s3Conf.key && s3Conf.secret && s3Conf.bucket && s3Conf.endPoint && s3Conf.port && s3Conf.sslEnabled) {
+      // Create a new S3 object
+      var s3Client = new Minio.Client({
+        endPoint: s3Conf.endPoint,
+        port: s3Conf.port,
+        useSSL: s3Conf.sslEnabled,
+        accessKey: s3Conf.key,
+        secretKey: s3Conf.secret
+        //region: s3Conf.region,
+        // sslEnabled: true, // optional
+        //httpOptions: {
+        //  timeout: 6000,
+        //  agent: false
+        //
+      });
 
-    return ret;
+      // Declare the Meteor file collection on the Server
+      const UserFiles = new FilesCollection({
+        debug: false, // Change to `true` for debugging
+        storagePath: storagePath,
+        collectionName: 'userFiles',
+        // Disallow Client to execute remove, use the Meteor.method
+        allowClientCode: false,
+
+        // Start moving files to AWS:S3
+        // after fully received by the Meteor server
+        onAfterUpload(fileRef) {
+          // Run through each of the uploaded file
+          _.each(fileRef.versions, (vRef, version) => {
+            // We use Random.id() instead of real file's _id
+            // to secure files from reverse engineering on the AWS client
+            const filePath = 'files/' + (Random.id()) + '-' + version + '.' + fileRef.extension;
+
+            // Create the AWS:S3 object.
+            // Feel free to change the storage class from, see the documentation,
+            // `STANDARD_IA` is the best deal for low access files.
+            // Key is the file name we are creating on AWS:S3, so it will be like files/XXXXXXXXXXXXXXXXX-original.XXXX
+            // Body is the file stream we are sending to AWS
+
+            const fileObj = this.fileObj;
+            const versionName = this.versionName;
+            const metadata = { ...fileObj.meta, versionName, fileId: fileObj._id };
+
+            s3Client.putObject({
+              // ServerSideEncryption: 'AES256', // Optional
+              //StorageClass: 'STANDARD',
+              Bucket: s3Conf.bucket,
+              Key: filePath,
+              Body: fs.createReadStream(vRef.path),
+              metadata,
+              ContentType: vRef.type,
+            }, (error) => {
+              bound(() => {
+                if (error) {
+                  console.error(error);
+                } else {
+                  // Update FilesCollection with link to the file at AWS
+                  const upd = { $set: {} };
+                  upd['$set']['versions.' + version + '.meta.pipePath'] = filePath;
+
+                  this.collection.update({
+                    _id: fileRef._id
+                  }, upd, (updError) => {
+                    if (updError) {
+                      console.error(updError);
+                    } else {
+                      // Unlink original files from FS after successful upload to AWS:S3
+                      this.unlink(this.collection.findOne(fileRef._id), version);
+                    }
+                  });
+                }
+              });
+            });
+          });
+        },
+      });
+    }
+  }
+
+  // Intercept access to the file
+  // And redirect request to AWS:S3
+  interceptDownload(http, fileRef, version) {
+    let path;
+
+    if (fileRef && fileRef.versions && fileRef.versions[version] && fileRef.versions[version].meta && fileRef.versions[version].meta.pipePath) {
+      path = fileRef.versions[version].meta.pipePath;
+    }
+
+    if (path) {
+      // If file is successfully moved to AWS:S3
+      // We will pipe request to AWS:S3
+      // So, original link will stay always secure
+
+      // To force ?play and ?download parameters
+      // and to keep original file name, content-type,
+      // content-disposition, chunked "streaming" and cache-control
+      // we're using low-level .serve() method
+      const opts = {
+        Bucket: s3Conf.bucket,
+        Key: path
+      };
+
+      if (http.request.headers.range) {
+        const vRef  = fileRef.versions[version];
+        let range   = _.clone(http.request.headers.range);
+        const array = range.split(/bytes=([0-9]*)-([0-9]*)/);
+        const start = parseInt(array[1]);
+        let end     = parseInt(array[2]);
+        if (isNaN(end)) {
+          // Request data from AWS:S3 by small chunks
+          end       = (start + this.chunkSize) - 1;
+          if (end >= vRef.size) {
+            end     = vRef.size - 1;
+          }
+        }
+        opts.Range   = `bytes=${start}-${end}`;
+        http.request.headers.range = `bytes=${start}-${end}`;
+      }
+
+      const fileColl = this;
+      s3Client.getObject(opts, function (error) {
+        if (error) {
+          console.error(error);
+          if (!http.response.finished) {
+            http.response.end();
+          }
+        } else {
+          if (http.request.headers.range && this.httpResponse.headers['content-range']) {
+            // Set proper range header in according to what is returned from AWS:S3
+            http.request.headers.range = this.httpResponse.headers['content-range'].split('/')[0].replace('bytes ', 'bytes=');
+          }
+
+          const dataStream = new stream.PassThrough();
+          fileColl.serve(http, fileRef, fileRef.versions[version], version, dataStream);
+          dataStream.end(this.data.Body);
+        }
+      });
+      return true;
+    }
+    // While file is not yet uploaded to AWS:S3
+    // It will be served file from FS
+    return false;
   }
 
+
   /** after file remove */
   onAfterRemove() {
+
+    if (process.env.S3) {
+      Meteor.settings.s3 = JSON.parse(process.env.S3).s3;
+    }
+
+    const s3Conf = Meteor.settings.s3 || {};
+    const bound  = Meteor.bindEnvironment((callback) => {
+      return callback();
+    });
+
+    if (s3Conf && s3Conf.key && s3Conf.secret && s3Conf.bucket && s3Conf.endPoint && s3Conf.port && s3Conf.sslEnabled) {
+      // Create a new S3 object
+      var s3Client = new Minio.Client({
+        endPoint: s3Conf.endPoint,
+        port: s3Conf.port,
+        useSSL: s3Conf.sslEnabled,
+        accessKey: s3Conf.key,
+        secretKey: s3Conf.secret
+      });
+    }
+
     this.unlink();
     super.onAfterRemove();
-  }
+    // Intercept FilesCollection's remove method to remove file from AWS:S3
+    const _origRemove = UserFiles.remove;
+    UserFiles.remove = function (selector, callback) {
+      const cursor = this.collection.find(selector);
+      cursor.forEach((fileRef) => {
+        _.each(fileRef.versions, (vRef) => {
+          if (vRef && vRef.meta && vRef.meta.pipePath) {
+            // Remove the object from AWS:S3 first, then we will call the original FilesCollection remove
+            s3Client.deleteObject({
+              Bucket: s3Conf.bucket,
+              Key: vRef.meta.pipePath,
+            }, (error) => {
+              bound(() => {
+                if (error) {
+                  console.error(error);
+                }
+              });
+            });
+          }
+        });
+      });
+    // Remove original file from database
+    _origRemove.call(this, selector, callback);
+  };
+}
 
   /** returns a read stream
    * @return the read stream
@@ -442,8 +649,7 @@ export class FileStoreStrategyS3 extends FileStoreStrategy {
     const ret = `versions.${this.versionName}.meta.s3FileId`;
     return ret;
   }
-}
-
+};
 
 
 

+ 0 - 187
models/lib/s3/createOnAfterUpload.js

@@ -1,187 +0,0 @@
-import { Meteor } from 'meteor/meteor';
-import { _ } from 'meteor/underscore';
-import { Random } from 'meteor/random';
-import { FilesCollection } from 'meteor/ostrio:files';
-import stream from 'stream';
-
-import S3 from 'aws-sdk/clients/s3'; /* http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html */
-/* See fs-extra and graceful-fs NPM packages */
-/* For better i/o performance */
-import fs from 'fs';
-
-/* Example: S3='{"s3":{"key": "xxx", "secret": "xxx", "bucket": "xxx", "region": "xxx""}}' meteor */
-if (process.env.S3) {
-  Meteor.settings.s3 = JSON.parse(process.env.S3).s3;
-
-const s3Conf = Meteor.settings.s3 || {};
-const bound  = Meteor.bindEnvironment((callback) => {
-  return callback();
-});
-
-/* Check settings existence in `Meteor.settings` */
-/* This is the best practice for app security */
-if (s3Conf && s3Conf.key && s3Conf.secret && s3Conf.bucket && s3Conf.region) {
-  // Create a new S3 object
-  const s3 = new S3({
-    secretAccessKey: s3Conf.secret,
-    accessKeyId: s3Conf.key,
-    region: s3Conf.region,
-    // sslEnabled: true, // optional
-    httpOptions: {
-      timeout: 6000,
-      agent: false
-    }
-  });
-
-  // Declare the Meteor file collection on the Server
-  const UserFiles = new FilesCollection({
-    debug: false, // Change to `true` for debugging
-    storagePath: 'assets/app/uploads/uploadedFiles',
-    collectionName: 'userFiles',
-    // Disallow Client to execute remove, use the Meteor.method
-    allowClientCode: false,
-
-    // Start moving files to AWS:S3
-    // after fully received by the Meteor server
-    onAfterUpload(fileRef) {
-      // Run through each of the uploaded file
-      _.each(fileRef.versions, (vRef, version) => {
-        // We use Random.id() instead of real file's _id
-        // to secure files from reverse engineering on the AWS client
-        const filePath = 'files/' + (Random.id()) + '-' + version + '.' + fileRef.extension;
-
-        // Create the AWS:S3 object.
-        // Feel free to change the storage class from, see the documentation,
-        // `STANDARD_IA` is the best deal for low access files.
-        // Key is the file name we are creating on AWS:S3, so it will be like files/XXXXXXXXXXXXXXXXX-original.XXXX
-        // Body is the file stream we are sending to AWS
-        s3.putObject({
-          // ServerSideEncryption: 'AES256', // Optional
-          StorageClass: 'STANDARD',
-          Bucket: s3Conf.bucket,
-          Key: filePath,
-          Body: fs.createReadStream(vRef.path),
-          ContentType: vRef.type,
-        }, (error) => {
-          bound(() => {
-            if (error) {
-              console.error(error);
-            } else {
-              // Update FilesCollection with link to the file at AWS
-              const upd = { $set: {} };
-              upd['$set']['versions.' + version + '.meta.pipePath'] = filePath;
-
-              this.collection.update({
-                _id: fileRef._id
-              }, upd, (updError) => {
-                if (updError) {
-                  console.error(updError);
-                } else {
-                  // Unlink original files from FS after successful upload to AWS:S3
-                  this.unlink(this.collection.findOne(fileRef._id), version);
-                }
-              });
-            }
-          });
-        });
-      });
-    },
-
-
-    // Intercept access to the file
-    // And redirect request to AWS:S3
-    interceptDownload(http, fileRef, version) {
-      let path;
-
-      if (fileRef && fileRef.versions && fileRef.versions[version] && fileRef.versions[version].meta && fileRef.versions[version].meta.pipePath) {
-        path = fileRef.versions[version].meta.pipePath;
-      }
-
-      if (path) {
-        // If file is successfully moved to AWS:S3
-        // We will pipe request to AWS:S3
-        // So, original link will stay always secure
-
-        // To force ?play and ?download parameters
-        // and to keep original file name, content-type,
-        // content-disposition, chunked "streaming" and cache-control
-        // we're using low-level .serve() method
-        const opts = {
-          Bucket: s3Conf.bucket,
-          Key: path
-        };
-
-        if (http.request.headers.range) {
-          const vRef  = fileRef.versions[version];
-          let range   = _.clone(http.request.headers.range);
-          const array = range.split(/bytes=([0-9]*)-([0-9]*)/);
-          const start = parseInt(array[1]);
-          let end     = parseInt(array[2]);
-          if (isNaN(end)) {
-            // Request data from AWS:S3 by small chunks
-            end       = (start + this.chunkSize) - 1;
-            if (end >= vRef.size) {
-              end     = vRef.size - 1;
-            }
-          }
-          opts.Range   = `bytes=${start}-${end}`;
-          http.request.headers.range = `bytes=${start}-${end}`;
-        }
-
-        const fileColl = this;
-        s3.getObject(opts, function (error) {
-          if (error) {
-            console.error(error);
-            if (!http.response.finished) {
-              http.response.end();
-            }
-          } else {
-            if (http.request.headers.range && this.httpResponse.headers['content-range']) {
-              // Set proper range header in according to what is returned from AWS:S3
-              http.request.headers.range = this.httpResponse.headers['content-range'].split('/')[0].replace('bytes ', 'bytes=');
-            }
-
-            const dataStream = new stream.PassThrough();
-            fileColl.serve(http, fileRef, fileRef.versions[version], version, dataStream);
-            dataStream.end(this.data.Body);
-          }
-        });
-
-        return true;
-      }
-      // While file is not yet uploaded to AWS:S3
-      // It will be served file from FS
-      return false;
-    }
-  });
-
-  // Intercept FilesCollection's remove method to remove file from AWS:S3
-  const _origRemove = UserFiles.remove;
-  UserFiles.remove = function (selector, callback) {
-    const cursor = this.collection.find(selector);
-    cursor.forEach((fileRef) => {
-      _.each(fileRef.versions, (vRef) => {
-        if (vRef && vRef.meta && vRef.meta.pipePath) {
-          // Remove the object from AWS:S3 first, then we will call the original FilesCollection remove
-          s3.deleteObject({
-            Bucket: s3Conf.bucket,
-            Key: vRef.meta.pipePath,
-          }, (error) => {
-            bound(() => {
-              if (error) {
-                console.error(error);
-              }
-            });
-          });
-        }
-      });
-    });
-
-    // Remove original file from database
-    _origRemove.call(this, selector, callback);
-  };
-} else {
-  throw new Meteor.Error(401, 'Missing Meteor file settings');
-}
-
-}

Dosya farkı çok büyük olduğundan ihmal edildi
+ 686 - 291
package-lock.json


+ 1 - 1
package.json

@@ -27,7 +27,6 @@
     "@mapbox/node-pre-gyp": "^1.0.8",
     "@wekanteam/markdown-it-mermaid": "^0.6.2",
     "ajv": "^6.12.6",
-    "aws-sdk": "^2.1279.0",
     "babel-runtime": "^6.26.0",
     "bcryptjs": "^2.4.3",
     "bson": "^4.5.2",
@@ -52,6 +51,7 @@
     "markdown-it-mathjax3": "^4.3.1",
     "meteor-accounts-t9n": "^2.6.0",
     "meteor-node-stubs": "^1.1.0",
+    "minio": "^7.0.32",
     "moment": "^2.29.4",
     "mongodb": "^3.7.3",
     "nodemailer": "^6.6.3",

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor