managed_upload.js 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. var AWS = require('../core');
  2. var byteLength = AWS.util.string.byteLength;
  3. var Buffer = AWS.util.Buffer;
  4. /**
  5. * The managed uploader allows for easy and efficient uploading of buffers,
  6. * blobs, or streams, using a configurable amount of concurrency to perform
  7. * multipart uploads where possible. This abstraction also enables uploading
  8. * streams of unknown size due to the use of multipart uploads.
  9. *
  10. * To construct a managed upload object, see the {constructor} function.
  11. *
  12. * ## Tracking upload progress
  13. *
  14. * The managed upload object can also track progress by attaching an
  15. * 'httpUploadProgress' listener to the upload manager. This event is similar
  16. * to {AWS.Request~httpUploadProgress} but groups all concurrent upload progress
  17. * into a single event. See {AWS.S3.ManagedUpload~httpUploadProgress} for more
  18. * information.
  19. *
  20. * ## Handling Multipart Cleanup
  21. *
  22. * By default, this class will automatically clean up any multipart uploads
  23. * when an individual part upload fails. This behavior can be disabled in order
  24. * to manually handle failures by setting the `leavePartsOnError` configuration
  25. * option to `true` when initializing the upload object.
  26. *
  27. * @!event httpUploadProgress(progress)
  28. * Triggered when the uploader has uploaded more data.
  29. * @note The `total` property may not be set if the stream being uploaded has
  30. * not yet finished chunking. In this case the `total` will be undefined
  31. * until the total stream size is known.
  32. * @note This event will not be emitted in Node.js 0.8.x.
  33. * @param progress [map] An object containing the `loaded` and `total` bytes
  34. * of the request and the `key` of the S3 object. Note that `total` may be undefined until the payload
  35. * size is known.
  36. * @context (see AWS.Request~send)
  37. */
  38. AWS.S3.ManagedUpload = AWS.util.inherit({
  39. /**
  40. * Creates a managed upload object with a set of configuration options.
  41. *
  42. * @note A "Body" parameter is required to be set prior to calling {send}.
  43. * @note In Node.js, sending "Body" as {https://nodejs.org/dist/latest/docs/api/stream.html#stream_object_mode object-mode stream}
  44. * may result in upload hangs. Using buffer stream is preferable.
  45. * @option options params [map] a map of parameters to pass to the upload
  46. * requests. The "Body" parameter is required to be specified either on
  47. * the service or in the params option.
  48. * @note ContentMD5 should not be provided when using the managed upload object.
  49. * Instead, setting "computeChecksums" to true will enable automatic ContentMD5 generation
  50. * by the managed upload object.
  51. * @option options queueSize [Number] (4) the size of the concurrent queue
  52. * manager to upload parts in parallel. Set to 1 for synchronous uploading
  53. * of parts. Note that the uploader will buffer at most queueSize * partSize
  54. * bytes into memory at any given time.
  55. * @option options partSize [Number] (5mb) the size in bytes for each
  56. * individual part to be uploaded. Adjust the part size to ensure the number
  57. * of parts does not exceed {maxTotalParts}. See {minPartSize} for the
  58. * minimum allowed part size.
  59. * @option options leavePartsOnError [Boolean] (false) whether to abort the
  60. * multipart upload if an error occurs. Set to true if you want to handle
  61. * failures manually.
  62. * @option options service [AWS.S3] an optional S3 service object to use for
  63. * requests. This object might have bound parameters used by the uploader.
  64. * @option options tags [Array<map>] The tags to apply to the uploaded object.
  65. * Each tag should have a `Key` and `Value` keys.
  66. * @example Creating a default uploader for a stream object
  67. * var upload = new AWS.S3.ManagedUpload({
  68. * params: {Bucket: 'bucket', Key: 'key', Body: stream}
  69. * });
  70. * @example Creating an uploader with concurrency of 1 and partSize of 10mb
  71. * var upload = new AWS.S3.ManagedUpload({
  72. * partSize: 10 * 1024 * 1024, queueSize: 1,
  73. * params: {Bucket: 'bucket', Key: 'key', Body: stream}
  74. * });
  75. * @example Creating an uploader with tags
  76. * var upload = new AWS.S3.ManagedUpload({
  77. * params: {Bucket: 'bucket', Key: 'key', Body: stream},
  78. * tags: [{Key: 'tag1', Value: 'value1'}, {Key: 'tag2', Value: 'value2'}]
  79. * });
  80. * @see send
  81. */
  82. constructor: function ManagedUpload(options) {
  83. var self = this;
  84. AWS.SequentialExecutor.call(self);
  85. self.body = null;
  86. self.sliceFn = null;
  87. self.callback = null;
  88. self.parts = {};
  89. self.completeInfo = [];
  90. self.fillQueue = function() {
  91. self.callback(new Error('Unsupported body payload ' + typeof self.body));
  92. };
  93. self.configure(options);
  94. },
  95. /**
  96. * @api private
  97. */
  98. configure: function configure(options) {
  99. options = options || {};
  100. this.partSize = this.minPartSize;
  101. if (options.queueSize) this.queueSize = options.queueSize;
  102. if (options.partSize) this.partSize = options.partSize;
  103. if (options.leavePartsOnError) this.leavePartsOnError = true;
  104. if (options.tags) {
  105. if (!Array.isArray(options.tags)) {
  106. throw new Error('Tags must be specified as an array; ' +
  107. typeof options.tags + ' provided.');
  108. }
  109. this.tags = options.tags;
  110. }
  111. if (this.partSize < this.minPartSize) {
  112. throw new Error('partSize must be greater than ' +
  113. this.minPartSize);
  114. }
  115. this.service = options.service;
  116. this.bindServiceObject(options.params);
  117. this.validateBody();
  118. this.adjustTotalBytes();
  119. },
  120. /**
  121. * @api private
  122. */
  123. leavePartsOnError: false,
  124. /**
  125. * @api private
  126. */
  127. queueSize: 4,
  128. /**
  129. * @api private
  130. */
  131. partSize: null,
  132. /**
  133. * @readonly
  134. * @return [Number] the minimum number of bytes for an individual part
  135. * upload.
  136. */
  137. minPartSize: 1024 * 1024 * 5,
  138. /**
  139. * @readonly
  140. * @return [Number] the maximum allowed number of parts in a multipart upload.
  141. */
  142. maxTotalParts: 10000,
  143. /**
  144. * Initiates the managed upload for the payload.
  145. *
  146. * @callback callback function(err, data)
  147. * @param err [Error] an error or null if no error occurred.
  148. * @param data [map] The response data from the successful upload:
  149. * * `Location` (String) the URL of the uploaded object
  150. * * `ETag` (String) the ETag of the uploaded object
  151. * * `Bucket` (String) the bucket to which the object was uploaded
  152. * * `Key` (String) the key to which the object was uploaded
  153. * @example Sending a managed upload object
  154. * var params = {Bucket: 'bucket', Key: 'key', Body: stream};
  155. * var upload = new AWS.S3.ManagedUpload({params: params});
  156. * upload.send(function(err, data) {
  157. * console.log(err, data);
  158. * });
  159. */
  160. send: function(callback) {
  161. var self = this;
  162. self.failed = false;
  163. self.callback = callback || function(err) { if (err) throw err; };
  164. var runFill = true;
  165. if (self.sliceFn) {
  166. self.fillQueue = self.fillBuffer;
  167. } else if (AWS.util.isNode()) {
  168. var Stream = AWS.util.stream.Stream;
  169. if (self.body instanceof Stream) {
  170. runFill = false;
  171. self.fillQueue = self.fillStream;
  172. self.partBuffers = [];
  173. self.body.
  174. on('error', function(err) { self.cleanup(err); }).
  175. on('readable', function() { self.fillQueue(); }).
  176. on('end', function() {
  177. self.isDoneChunking = true;
  178. self.numParts = self.totalPartNumbers;
  179. self.fillQueue.call(self);
  180. if (self.isDoneChunking && self.totalPartNumbers >= 1 && self.doneParts === self.numParts) {
  181. self.finishMultiPart();
  182. }
  183. });
  184. }
  185. }
  186. if (runFill) self.fillQueue.call(self);
  187. },
  188. /**
  189. * @!method promise()
  190. * Returns a 'thenable' promise.
  191. *
  192. * Two callbacks can be provided to the `then` method on the returned promise.
  193. * The first callback will be called if the promise is fulfilled, and the second
  194. * callback will be called if the promise is rejected.
  195. * @callback fulfilledCallback function(data)
  196. * Called if the promise is fulfilled.
  197. * @param data [map] The response data from the successful upload:
  198. * `Location` (String) the URL of the uploaded object
  199. * `ETag` (String) the ETag of the uploaded object
  200. * `Bucket` (String) the bucket to which the object was uploaded
  201. * `Key` (String) the key to which the object was uploaded
  202. * @callback rejectedCallback function(err)
  203. * Called if the promise is rejected.
  204. * @param err [Error] an error or null if no error occurred.
  205. * @return [Promise] A promise that represents the state of the upload request.
  206. * @example Sending an upload request using promises.
  207. * var upload = s3.upload({Bucket: 'bucket', Key: 'key', Body: stream});
  208. * var promise = upload.promise();
  209. * promise.then(function(data) { ... }, function(err) { ... });
  210. */
  211. /**
  212. * Aborts a managed upload, including all concurrent upload requests.
  213. * @note By default, calling this function will cleanup a multipart upload
  214. * if one was created. To leave the multipart upload around after aborting
  215. * a request, configure `leavePartsOnError` to `true` in the {constructor}.
  216. * @note Calling {abort} in the browser environment will not abort any requests
  217. * that are already in flight. If a multipart upload was created, any parts
  218. * not yet uploaded will not be sent, and the multipart upload will be cleaned up.
  219. * @example Aborting an upload
  220. * var params = {
  221. * Bucket: 'bucket', Key: 'key',
  222. * Body: Buffer.alloc(1024 * 1024 * 25) // 25MB payload
  223. * };
  224. * var upload = s3.upload(params);
  225. * upload.send(function (err, data) {
  226. * if (err) console.log("Error:", err.code, err.message);
  227. * else console.log(data);
  228. * });
  229. *
  230. * // abort request in 1 second
  231. * setTimeout(upload.abort.bind(upload), 1000);
  232. */
  233. abort: function() {
  234. var self = this;
  235. //abort putObject request
  236. if (self.isDoneChunking === true && self.totalPartNumbers === 1 && self.singlePart) {
  237. self.singlePart.abort();
  238. } else {
  239. self.cleanup(AWS.util.error(new Error('Request aborted by user'), {
  240. code: 'RequestAbortedError', retryable: false
  241. }));
  242. }
  243. },
  244. /**
  245. * @api private
  246. */
  247. validateBody: function validateBody() {
  248. var self = this;
  249. self.body = self.service.config.params.Body;
  250. if (typeof self.body === 'string') {
  251. self.body = AWS.util.buffer.toBuffer(self.body);
  252. } else if (!self.body) {
  253. throw new Error('params.Body is required');
  254. }
  255. self.sliceFn = AWS.util.arraySliceFn(self.body);
  256. },
  257. /**
  258. * @api private
  259. */
  260. bindServiceObject: function bindServiceObject(params) {
  261. params = params || {};
  262. var self = this;
  263. // bind parameters to new service object
  264. if (!self.service) {
  265. self.service = new AWS.S3({params: params});
  266. } else {
  267. // Create a new S3 client from the supplied client's constructor.
  268. var service = self.service;
  269. var config = AWS.util.copy(service.config);
  270. config.signatureVersion = service.getSignatureVersion();
  271. self.service = new service.constructor.__super__(config);
  272. self.service.config.params =
  273. AWS.util.merge(self.service.config.params || {}, params);
  274. Object.defineProperty(self.service, '_originalConfig', {
  275. get: function() { return service._originalConfig; },
  276. enumerable: false,
  277. configurable: true
  278. });
  279. }
  280. },
  281. /**
  282. * @api private
  283. */
  284. adjustTotalBytes: function adjustTotalBytes() {
  285. var self = this;
  286. try { // try to get totalBytes
  287. self.totalBytes = byteLength(self.body);
  288. } catch (e) { }
  289. // try to adjust partSize if we know payload length
  290. if (self.totalBytes) {
  291. var newPartSize = Math.ceil(self.totalBytes / self.maxTotalParts);
  292. if (newPartSize > self.partSize) self.partSize = newPartSize;
  293. } else {
  294. self.totalBytes = undefined;
  295. }
  296. },
  297. /**
  298. * @api private
  299. */
  300. isDoneChunking: false,
  301. /**
  302. * @api private
  303. */
  304. partPos: 0,
  305. /**
  306. * @api private
  307. */
  308. totalChunkedBytes: 0,
  309. /**
  310. * @api private
  311. */
  312. totalUploadedBytes: 0,
  313. /**
  314. * @api private
  315. */
  316. totalBytes: undefined,
  317. /**
  318. * @api private
  319. */
  320. numParts: 0,
  321. /**
  322. * @api private
  323. */
  324. totalPartNumbers: 0,
  325. /**
  326. * @api private
  327. */
  328. activeParts: 0,
  329. /**
  330. * @api private
  331. */
  332. doneParts: 0,
  333. /**
  334. * @api private
  335. */
  336. parts: null,
  337. /**
  338. * @api private
  339. */
  340. completeInfo: null,
  341. /**
  342. * @api private
  343. */
  344. failed: false,
  345. /**
  346. * @api private
  347. */
  348. multipartReq: null,
  349. /**
  350. * @api private
  351. */
  352. partBuffers: null,
  353. /**
  354. * @api private
  355. */
  356. partBufferLength: 0,
  357. /**
  358. * @api private
  359. */
  360. fillBuffer: function fillBuffer() {
  361. var self = this;
  362. var bodyLen = byteLength(self.body);
  363. if (bodyLen === 0) {
  364. self.isDoneChunking = true;
  365. self.numParts = 1;
  366. self.nextChunk(self.body);
  367. return;
  368. }
  369. while (self.activeParts < self.queueSize && self.partPos < bodyLen) {
  370. var endPos = Math.min(self.partPos + self.partSize, bodyLen);
  371. var buf = self.sliceFn.call(self.body, self.partPos, endPos);
  372. self.partPos += self.partSize;
  373. if (byteLength(buf) < self.partSize || self.partPos === bodyLen) {
  374. self.isDoneChunking = true;
  375. self.numParts = self.totalPartNumbers + 1;
  376. }
  377. self.nextChunk(buf);
  378. }
  379. },
  380. /**
  381. * @api private
  382. */
  383. fillStream: function fillStream() {
  384. var self = this;
  385. if (self.activeParts >= self.queueSize) return;
  386. var buf = self.body.read(self.partSize - self.partBufferLength) ||
  387. self.body.read();
  388. if (buf) {
  389. self.partBuffers.push(buf);
  390. self.partBufferLength += buf.length;
  391. self.totalChunkedBytes += buf.length;
  392. }
  393. if (self.partBufferLength >= self.partSize) {
  394. // if we have single buffer we avoid copyfull concat
  395. var pbuf = self.partBuffers.length === 1 ?
  396. self.partBuffers[0] : Buffer.concat(self.partBuffers);
  397. self.partBuffers = [];
  398. self.partBufferLength = 0;
  399. // if we have more than partSize, push the rest back on the queue
  400. if (pbuf.length > self.partSize) {
  401. var rest = pbuf.slice(self.partSize);
  402. self.partBuffers.push(rest);
  403. self.partBufferLength += rest.length;
  404. pbuf = pbuf.slice(0, self.partSize);
  405. }
  406. self.nextChunk(pbuf);
  407. }
  408. if (self.isDoneChunking && !self.isDoneSending) {
  409. // if we have single buffer we avoid copyfull concat
  410. pbuf = self.partBuffers.length === 1 ?
  411. self.partBuffers[0] : Buffer.concat(self.partBuffers);
  412. self.partBuffers = [];
  413. self.partBufferLength = 0;
  414. self.totalBytes = self.totalChunkedBytes;
  415. self.isDoneSending = true;
  416. if (self.numParts === 0 || pbuf.length > 0) {
  417. self.numParts++;
  418. self.nextChunk(pbuf);
  419. }
  420. }
  421. self.body.read(0);
  422. },
  423. /**
  424. * @api private
  425. */
  426. nextChunk: function nextChunk(chunk) {
  427. var self = this;
  428. if (self.failed) return null;
  429. var partNumber = ++self.totalPartNumbers;
  430. if (self.isDoneChunking && partNumber === 1) {
  431. var params = {Body: chunk};
  432. if (this.tags) {
  433. params.Tagging = this.getTaggingHeader();
  434. }
  435. var req = self.service.putObject(params);
  436. req._managedUpload = self;
  437. req.on('httpUploadProgress', self.progress).send(self.finishSinglePart);
  438. self.singlePart = req; //save the single part request
  439. return null;
  440. } else if (self.service.config.params.ContentMD5) {
  441. var err = AWS.util.error(new Error('The Content-MD5 you specified is invalid for multi-part uploads.'), {
  442. code: 'InvalidDigest', retryable: false
  443. });
  444. self.cleanup(err);
  445. return null;
  446. }
  447. if (self.completeInfo[partNumber] && self.completeInfo[partNumber].ETag !== null) {
  448. return null; // Already uploaded this part.
  449. }
  450. self.activeParts++;
  451. if (!self.service.config.params.UploadId) {
  452. if (!self.multipartReq) { // create multipart
  453. self.multipartReq = self.service.createMultipartUpload();
  454. self.multipartReq.on('success', function(resp) {
  455. self.service.config.params.UploadId = resp.data.UploadId;
  456. self.multipartReq = null;
  457. });
  458. self.queueChunks(chunk, partNumber);
  459. self.multipartReq.on('error', function(err) {
  460. self.cleanup(err);
  461. });
  462. self.multipartReq.send();
  463. } else {
  464. self.queueChunks(chunk, partNumber);
  465. }
  466. } else { // multipart is created, just send
  467. self.uploadPart(chunk, partNumber);
  468. }
  469. },
  470. /**
  471. * @api private
  472. */
  473. getTaggingHeader: function getTaggingHeader() {
  474. var kvPairStrings = [];
  475. for (var i = 0; i < this.tags.length; i++) {
  476. kvPairStrings.push(AWS.util.uriEscape(this.tags[i].Key) + '=' +
  477. AWS.util.uriEscape(this.tags[i].Value));
  478. }
  479. return kvPairStrings.join('&');
  480. },
  481. /**
  482. * @api private
  483. */
  484. uploadPart: function uploadPart(chunk, partNumber) {
  485. var self = this;
  486. var partParams = {
  487. Body: chunk,
  488. ContentLength: AWS.util.string.byteLength(chunk),
  489. PartNumber: partNumber
  490. };
  491. var partInfo = {ETag: null, PartNumber: partNumber};
  492. self.completeInfo[partNumber] = partInfo;
  493. var req = self.service.uploadPart(partParams);
  494. self.parts[partNumber] = req;
  495. req._lastUploadedBytes = 0;
  496. req._managedUpload = self;
  497. req.on('httpUploadProgress', self.progress);
  498. req.send(function(err, data) {
  499. delete self.parts[partParams.PartNumber];
  500. self.activeParts--;
  501. if (!err && (!data || !data.ETag)) {
  502. var message = 'No access to ETag property on response.';
  503. if (AWS.util.isBrowser()) {
  504. message += ' Check CORS configuration to expose ETag header.';
  505. }
  506. err = AWS.util.error(new Error(message), {
  507. code: 'ETagMissing', retryable: false
  508. });
  509. }
  510. if (err) return self.cleanup(err);
  511. //prevent sending part being returned twice (https://github.com/aws/aws-sdk-js/issues/2304)
  512. if (self.completeInfo[partNumber] && self.completeInfo[partNumber].ETag !== null) return null;
  513. partInfo.ETag = data.ETag;
  514. self.doneParts++;
  515. if (self.isDoneChunking && self.doneParts === self.totalPartNumbers) {
  516. self.finishMultiPart();
  517. } else {
  518. self.fillQueue.call(self);
  519. }
  520. });
  521. },
  522. /**
  523. * @api private
  524. */
  525. queueChunks: function queueChunks(chunk, partNumber) {
  526. var self = this;
  527. self.multipartReq.on('success', function() {
  528. self.uploadPart(chunk, partNumber);
  529. });
  530. },
  531. /**
  532. * @api private
  533. */
  534. cleanup: function cleanup(err) {
  535. var self = this;
  536. if (self.failed) return;
  537. // clean up stream
  538. if (typeof self.body.removeAllListeners === 'function' &&
  539. typeof self.body.resume === 'function') {
  540. self.body.removeAllListeners('readable');
  541. self.body.removeAllListeners('end');
  542. self.body.resume();
  543. }
  544. // cleanup multipartReq listeners
  545. if (self.multipartReq) {
  546. self.multipartReq.removeAllListeners('success');
  547. self.multipartReq.removeAllListeners('error');
  548. self.multipartReq.removeAllListeners('complete');
  549. delete self.multipartReq;
  550. }
  551. if (self.service.config.params.UploadId && !self.leavePartsOnError) {
  552. self.service.abortMultipartUpload().send();
  553. } else if (self.leavePartsOnError) {
  554. self.isDoneChunking = false;
  555. }
  556. AWS.util.each(self.parts, function(partNumber, part) {
  557. part.removeAllListeners('complete');
  558. part.abort();
  559. });
  560. self.activeParts = 0;
  561. self.partPos = 0;
  562. self.numParts = 0;
  563. self.totalPartNumbers = 0;
  564. self.parts = {};
  565. self.failed = true;
  566. self.callback(err);
  567. },
  568. /**
  569. * @api private
  570. */
  571. finishMultiPart: function finishMultiPart() {
  572. var self = this;
  573. var completeParams = { MultipartUpload: { Parts: self.completeInfo.slice(1) } };
  574. self.service.completeMultipartUpload(completeParams, function(err, data) {
  575. if (err) {
  576. return self.cleanup(err);
  577. }
  578. if (data && typeof data.Location === 'string') {
  579. data.Location = data.Location.replace(/%2F/g, '/');
  580. }
  581. if (Array.isArray(self.tags)) {
  582. for (var i = 0; i < self.tags.length; i++) {
  583. self.tags[i].Value = String(self.tags[i].Value);
  584. }
  585. self.service.putObjectTagging(
  586. {Tagging: {TagSet: self.tags}},
  587. function(e, d) {
  588. if (e) {
  589. self.callback(e);
  590. } else {
  591. self.callback(e, data);
  592. }
  593. }
  594. );
  595. } else {
  596. self.callback(err, data);
  597. }
  598. });
  599. },
  600. /**
  601. * @api private
  602. */
  603. finishSinglePart: function finishSinglePart(err, data) {
  604. var upload = this.request._managedUpload;
  605. var httpReq = this.request.httpRequest;
  606. var endpoint = httpReq.endpoint;
  607. if (err) return upload.callback(err);
  608. data.Location =
  609. [endpoint.protocol, '//', endpoint.host, httpReq.path].join('');
  610. data.key = this.request.params.Key; // will stay undocumented
  611. data.Key = this.request.params.Key;
  612. data.Bucket = this.request.params.Bucket;
  613. upload.callback(err, data);
  614. },
  615. /**
  616. * @api private
  617. */
  618. progress: function progress(info) {
  619. var upload = this._managedUpload;
  620. if (this.operation === 'putObject') {
  621. info.part = 1;
  622. info.key = this.params.Key;
  623. } else {
  624. upload.totalUploadedBytes += info.loaded - this._lastUploadedBytes;
  625. this._lastUploadedBytes = info.loaded;
  626. info = {
  627. loaded: upload.totalUploadedBytes,
  628. total: upload.totalBytes,
  629. part: this.params.PartNumber,
  630. key: this.params.Key
  631. };
  632. }
  633. upload.emit('httpUploadProgress', [info]);
  634. }
  635. });
  636. AWS.util.mixin(AWS.S3.ManagedUpload, AWS.SequentialExecutor);
  637. /**
  638. * @api private
  639. */
  640. AWS.S3.ManagedUpload.addPromisesToClass = function addPromisesToClass(PromiseDependency) {
  641. this.prototype.promise = AWS.util.promisifyMethod('send', PromiseDependency);
  642. };
  643. /**
  644. * @api private
  645. */
  646. AWS.S3.ManagedUpload.deletePromisesFromClass = function deletePromisesFromClass() {
  647. delete this.prototype.promise;
  648. };
  649. AWS.util.addPromises(AWS.S3.ManagedUpload);
  650. /**
  651. * @api private
  652. */
  653. module.exports = AWS.S3.ManagedUpload;