Reputation: 8988
I need some help with correctly structuring the code for process some text files using S3 buckets and a Lambda function.
I want to use a Lambda function triggered by creation of a new object in the S3 bucket to read the file and to extract some data and write this to a file that gets placed in another S3 bucket.
So far I have the function working fine copying the file from one S3 bucket to another but I can't quite figure out how to add a function to process the file and write the result out to the final S3 destination.
The files are simple text files and I need to extract data from each line in the file.
Below if the Node.js code I am currently using with an additional function added to process the file - see comments with ?? where I am looking for help.
// dependencies
var async = require('async');
var AWS = require('aws-sdk');
var util = require('util');
// get reference to S3 client
var s3 = new AWS.S3();
exports.handler = function(event, context) {
// Read options from the event.
console.log("Reading options from event:\n", util.inspect(event, {depth: 5}));
var srcBucket = event.Records[0].s3.bucket.name;
// Object key may have spaces or unicode non-ASCII characters.
var srcKey =
decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
var dstBucket = "inputBucket";
var dstKey = srcKey + ".txt";
// Sanity check: validate that source and destination are different buckets.
if (srcBucket == dstBucket) {
console.error("Destination bucket must not match source bucket.");
return;
}
// Infer the file type.
var typeMatch = srcKey.match(/\.([^.]*)$/);
if (!typeMatch) {
console.error('unable to infer file type for key ' + srcKey);
return;
}
var imageType = typeMatch[1];
if (imageType != "txt") {
console.log('skipping non-image ' + srcKey);
return;
}
// Download the image from S3, transform, and upload to a different S3 bucket.
async.waterfall([
function download(next) {
// Download the file from S3 into a buffer.
s3.getObject({
Bucket: srcBucket,
Key: srcKey
},
next);
},
function transform(response, next) {
// Read the file we have just downloaded
// ? response.Body ?
var rl = require('readline').createInterface({
input: require('fs').createReadStream('file.in')
});
// Process each line here writing the result to an output buffer?
rl.on('line', function (line) {
console.log('Line from file:', line);
//Do something with the line...
//Create some output string 'outputline'
//Write 'outputline' to an output buffer 'outbuff'
// ??
});
// Now pass the output buffer to the next function
// so it can be uploaded to another S3 bucket
// ??
next;
}
function upload(response, next) {
// Stream the file to a different S3 bucket.
s3.putObject({
Bucket: dstBucket,
Key: dstKey,
Body: response.Body,
ContentType: response.contentType
},
next);
}
], function (err) {
if (err) {
console.error(
'Unable to process ' + srcBucket + '/' + srcKey +
' and upload to ' + dstBucket + '/' + dstKey +
' due to an error: ' + err
);
} else {
console.log(
'Successfully processed ' + srcBucket + '/' + srcKey +
' and uploaded to ' + dstBucket + '/' + dstKey
);
}
context.done();
}
);
};
Upvotes: 4
Views: 7832
Reputation: 1304
Inside the callback of s3.getObject
s3.getObject(params,function(err,data){})
If your file is a text then you can extract the text as a string
data.Body.toString("utf-8")
Upvotes: 1