I have to transfer a file from and API endpoint to two different bucket. The original upload is made using:
curl -X PUT -F "data=@sample" "http://localhost:3000/upload/1/1"
The endpoint where the file is uploaded:
const PassThrough = require('stream').PassThrough;
async function uploadFile (req, res) {
try {
const firstS3Stream = new PassThrough();
const secondS3Stream = new PassThrough();
req.pipe(firstS3Stream);
req.pipe(secondS3Stream);
await Promise.all([
uploadToFirstS3(firstS3Stream),
uploadToSecondS3(secondS3Stream),
]);
return res.end();
} catch (err) {
console.log(err)
return res.status(500).send({ error: 'Unexpected error during file upload' });
}
}
As you can see, I use two PassThrough streams, in order to duplicate the request stream into two readable streams, as suggested in this SO thread.
This piece of code remains unchanged, what is interesting here are the uploadToFirstS3
and uploadToSecondS3
functions. In this minimal example both do exactly the same thing with a different configuration, i will expend only one here.
What Works Well:
const aws = require('aws-sdk');
const s3 = new aws.S3({
accessKeyId: S3_API_KEY,
secretAccessKey: S3_API_SECRET,
region: S3_REGION,
signatureVersion: 'v4',
});
const uploadToFirstS3 = (stream) => (new Promise((resolve, reject) => {
const uploadParams = {
Bucket: S3_BUCKET_NAME,
Key: 'some-key',
Body: stream,
};
s3.upload(uploadParams, (err) => {
if (err) reject(err);
resolve(true);
});
}));
This piece of code (based on the aws-sdk package) works fine. My issue here is that i want it to run with the @aws-sdk/client-s3 package in order to reduce the size of the project.
What doesn't work:
I first tried to use S3Client.send(PutObjectCommand):
const { S3Client, PutObjectCommand } = require('@aws-sdk/client-s3');
const s3 = new S3Client({
credentials: {
accessKeyId: S3_API_KEY,
secretAccessKey: S3_API_SECRET,
},
region: S3_REGION,
signatureVersion: 'v4',
});
const uploadToFirstS3 = (stream) => (new Promise((resolve, reject) => {
const uploadParams = {
Bucket: S3_BUCKET_NAME,
Key:'some-key',
Body: stream,
};
s3.send(new PutObjectCommand(uploadParams), (err) => {
if (err) reject(err);
resolve(true);
});
}));
Then I tried S3.putObject(PutObjectCommandInput):
const { S3 } = require('@aws-sdk/client-s3');
const s3 = new S3({
credentials: {
accessKeyId: S3_API_KEY,
secretAccessKey: S3_API_SECRET,
},
region: S3_REGION,
signatureVersion: 'v4',
});
const uploadToFirstS3 = (stream) => (new Promise((resolve, reject) => {
const uploadParams = {
Bucket: S3_BUCKET_NAME,
Key:'some-key',
Body: stream,
};
s3.putObject(uploadParams, (err) => {
if (err) reject(err);
resolve(true);
});
}));
The two last examples both give me a 501 - Not Implemented error with the header Transfer-Encoding
. I checked req.headers
and there is no Transfer-Encoding
in it, so I guess the sdk adds in the request to s3 ?
Since the first example (based on aws-sdk) works fine, I'm sure the error is not due to an empty body in the request as suggested in this SO thread.
Still, I thought maybe the stream wasn't readable yet when triggering the upload, thus I wrapped the calls to uploadToFirstS3
and uploadToSecondS3
with a callback triggered by the req.on('readable', callback)
event, but nothing changed.
I would like to process the files in memory without storing it on the disk at any time. Is there a way to achieve it using the @aws-sdk/client-s3 package ?
new S3({}) || new S3Client({})
? β Hambrick