Reputation: 157
I have a S3 bucket which users upload very large files to (1-10GB). I then process the files and copy them out of the upload folder. This works fine for small files, but for files larger than 5GB I get the following error:
The specified copy source is larger than the maximum allowable size for a copy source: 5368709120 (AWS::S3::Errors::InvalidRequest)
I originally wanted to copy processed files to a separate bucket, but I'd be okay if they remained on the same bucket in another directory. I just need to copy them out of the upload folder so users don't disturb them (I want to archive these, eventually moving them to glacier as a very slow background process).
Thanks in advance!
Upvotes: 2
Views: 2096
Reputation: 2225
I wrote this function in a NodeJS project to copy files over 5GB between buckets and it should work for your use case as well (originally adapted from this gist).
function copyS3MP(from_bucket, from_key, to_bucket, to_key) {
const AWS = require('aws-sdk');
AWS.config.update({region: 'us-west-2'});
let s3 = new AWS.S3();
let head, uploadId, numParts, fileSize;
let startTime = new Date();
let partNum = 0;
let partSize = 1024 * 1024 * 10; // 10mb chunks except last part
let maxUploadTries = 3;
let multiPartParams = {
Bucket: to_bucket,
Key: to_key,
ContentType: getContentType(to_key)
};
let multipartMap = {
Parts: []
};
function getHead() {
return new Promise(async (resolve, reject) => {
try {
const h = await s3.headObject({
Bucket: from_bucket,
Key: from_key
}).promise();
resolve(h);
} catch (e) {
reject(e);
}
});
}
function createMultipartUpload() {
return new Promise(async (resolve, reject) => {
try {
s3.createMultipartUpload(multiPartParams, function(mpErr, multipart) {
if (mpErr) {
console.error(mpErr);
return reject(mpErr);
}
console.log('Got upload ID', multipart.UploadId);
return resolve(multipart.UploadId);
});
} catch (e) {
reject(e);
}
});
}
function copyPart(start, partNum) {
let tryNum = 1;
function copyLogic(copyParams) {
return new Promise((resolve, reject) => {
s3.uploadPartCopy(copyParams, function(multiErr, mData) {
if (multiErr) {
console.log('Upload part error:', multiErr);
return reject(multiErr);
} else {
multipartMap.Parts[this.request.params.PartNumber - 1] = {
ETag: mData.ETag,
PartNumber: Number(this.request.params.PartNumber)
};
console.log('Completed part', this.request.params.PartNumber);
console.log('mData', mData);
return resolve();
}
}).on('httpUploadProgress', function(progress) { console.log(Math.round(progress.loaded/progress.total*100)+ '% done') });
});
}
return new Promise(async (resolve, reject) => {
let end = Math.min(start + partSize, fileSize);
try {
let partParams = {
Bucket: to_bucket,
Key: to_key,
PartNumber: String(partNum),
UploadId: uploadId,
CopySource: `${from_bucket}/${from_key}`,
CopySourceRange: `bytes=${start}-${end - 1}`
};
while (tryNum <= maxUploadTries) {
try {
await copyLogic(partParams);
return resolve();
} catch (e) {
tryNum++;
if (tryNum <= maxUploadTries) {
console.log('Retrying copy of part: #', partParams.PartNumber);
await module.exports.sleep(1);
} else {
console.log('Failed uploading part: #', partParams.PartNumber);
return reject(e);
}
}
}
resolve();
} catch (e) {
return reject(e);
}
});
}
function completeMultipartUpload() {
return new Promise((resolve, reject) => {
let doneParams = {
Bucket: to_bucket,
Key: to_key,
MultipartUpload: multipartMap,
UploadId: uploadId
};
s3.completeMultipartUpload(doneParams, function(err, data) {
if (err) {
return reject(err);
}
var delta = (new Date() - startTime) / 1000;
console.log('Completed upload in', delta, 'seconds');
console.log('Final upload data:', data);
return resolve();
});
});
}
return new Promise(async (resolve, reject) => {
try {
head = await getHead();
fileSize = head.ContentLength;
} catch (e) {
return reject(e);
}
numParts = Math.ceil(fileSize / partSize);
console.log('Creating multipart upload for:', to_key);
try {
uploadId = await createMultipartUpload();
} catch (e) {
return reject(e);
}
for (let start = 0; start < fileSize; start += partSize) {
partNum++;
console.log("Part Num: " + partNum);
try {
await copyPart(start, partNum);
} catch (e) {
console.error(e);
return reject(e);
}
}
try {
await completeMultipartUpload();
} catch (e) {
return reject(e);
}
resolve();
});
}
Upvotes: 2
Reputation: 187
Now all you need to do is just to use CLI command like this:
aws s3 cp s3://<source> s3://<destination>
It lets you perform operations on files larger than 5 GB. Another method is described here: https://aws.amazon.com/ru/premiumsupport/knowledge-center/s3-multipart-upload-cli/
Upvotes: 0
Reputation: 8928
You can do this by using a multipart copy. Specifics depend on the language and API you are using.
Upvotes: 5