Reputation: 245
I am hitting the following error: Error [ERR_HTTP_HEADERS_SENT]: Cannot set headers after they are sent to the client
The following server.js
contains a route which pulls some data from MySQL and if the data size exceeds a certain limit (currently 1 byte for testing purposes) then instead of returning the data, it needs to upload it to an S3 and created a signed URL. In order to do this, it calls on my s3DataMitigation.js
file. Once it has the signed URL, it should redirect with a 303 SEE OTHER header to the signed URL on S3 (actually using res.writeHead currently) and it actually does redirect. However, in my console log I am still seeing a build fail because of this error.
On a side note, I may have included too much code. Feel free to edit it down if so.
const cors = require("cors");
const mysql = require("mysql");
//Setup paths to database connection pools
const nawfprojectsDB = require("../lib/naWfProjectsDb.js");
const queries = require("./queries.js");
//Setup a timestamp for logging
const timestamp = new Date().toString();
// create the server and setup routes
const app = express();
// S3 Data Mitigation is needed when a data set exceeds 5 MB in size.
// This is a restriction of Lambda itself (they say 6 MB but want to ensure we dont ever hit the limit)
const s3DataMitigation = require("../lib/s3DataMitigation.js");
//Here we config CORS and enable for all routes
var whitelist = [
"localhost",
"url1", //In my code I use actual URLs here
"url2",
"url3",
];
var corsOptions = {
origin: whitelist,
credentials: true,
};
// Enable CORS for all routes
app.use(cors(corsOptions));
// Size conversion
const dataSizeLimit = 1;
//
// Setup routes
//
app.get("/", (req, res) => res.send("Nothing avail at root"));
//brabbit data table for workgroup data
app.get("/wg_data", (req, res, callback) => {
const dataSet = "wg_data";
nawfprojectsDB.query(queries.wg_data, (err, result) => {
if (err) {
console.log(err);
}
//Stringify our results for S3 to understand
const data = JSON.stringify(result);
if (Buffer.byteLength(data, "utf-8") > dataSizeLimit) {
console.log(timestamp, "Running s3DataMitigation...");
s3DataMitigation({ dataSet, data, res, callback });
} else {
res.send(result);
}
console.log(
timestamp,
"Returned " + result.length + " rows from " + dataSet
);
});
// const user = req.query.user;
// usageLog({ dataSet, user });
});
And here is s3DataMitigation.js
const { v4: uuidv4 } = require("uuid");
const s3DataMitigation = ({ dataSet, data, res, callback }) => {
//Setup a timestamp for logging
const timestamp = new Date().toString();
aws.config = {
accessKeyId: "accessKey",
secretAccessKey: "secretKey",
region: "us-east-1",
};
// Setup S3
const s3 = new aws.S3();
// Build the file name using UUID to make the file name unique
const fileName = dataSet + uuidv4() + ".json";
const bucket = "data-mitigation";
// Setup S3 parameters for upload
const s3UploadParams = {
Bucket: bucket,
Key: fileName,
Body: data,
ContentType: "application/json",
};
// Using aws-sdk we programatically create the file in our S3
s3.putObject(s3UploadParams)
.promise()
.then((data) => {
console.log(timestamp, "complete:PUT Object", data);
// We want to wait until we can confirm the file exists in S3 before proceeding, thus we continue code within this block
var signedUrlParams = {
Bucket: bucket,
Key: fileName,
Expires: 60 * 5,
ResponseContentType: "application/json",
};
s3.getSignedUrl("getObject", signedUrlParams, function (err, url) {
if (err) {
console.log(err);
}
console.log(url);
res.writeHead(302, {
Location: url,
//add other headers here...
});
res.end();
});
callback(null, data);
})
.catch((err) => {
console.log(timestamp, "failure:PUT Object", err);
callback(err);
});
};
module.exports = s3DataMitigation;
Upvotes: 1
Views: 553
Reputation: 1483
In s3DataMitigation, there is a statement you're calling callback, try my code below. I have include some explanatory comments.
const { v4: uuidv4 } = require("uuid");
const s3DataMitigation = ({ dataSet, data, res, callback }) => {
//Setup a timestamp for logging
const timestamp = new Date().toString();
aws.config = {
accessKeyId: "accessKey",
secretAccessKey: "secretKey",
region: "us-east-1",
};
// Setup S3
const s3 = new aws.S3();
// Build the file name using UUID to make the file name unique
const fileName = dataSet + uuidv4() + ".json";
const bucket = "data-mitigation";
// Setup S3 parameters for upload
const s3UploadParams = {
Bucket: bucket,
Key: fileName,
Body: data,
ContentType: "application/json",
};
// Using aws-sdk we programatically create the file in our S3
s3.putObject(s3UploadParams)
.promise()
.then((data) => {
console.log(timestamp, "complete:PUT Object", data);
// We want to wait until we can confirm the file exists in S3 before proceeding, thus we continue code within this block
var signedUrlParams = {
Bucket: bucket,
Key: fileName,
Expires: 60 * 5,
ResponseContentType: "application/json",
};
s3.getSignedUrl("getObject", signedUrlParams, function (err, url) {
if (err) {
console.log(err);
}
console.log(url);
res.writeHead(302, {
Location: url,
//add other headers here...
});
res.end();
});
// callback(null, data); // this line is causing you all the trouble
// what it does is to bubble down the request to default handlers (error handlers etc )
// but at this stage, the request has already been resolved by redirect above
// hence the error, cause it will try to send response for request which has been resolved
})
.catch((err) => {
console.log(timestamp, "failure:PUT Object", err);
callback(err);
});
};
Hope it helps!
Upvotes: 1