gpt4 book ai didi

javascript - AWS SDK 文件通过 Node/Express 使用流 PassThrough 上传到 S3 - 文件总是损坏

转载 作者:行者123 更新时间:2023-12-03 12:12:33 25 4
gpt4 key购买 nike

这很简单。使用此代码,任何上传的图像文件都已损坏且无法打开。 PDF 看起来不错,但我注意到它正在将值注入(inject)基于文本的文件中。这是 s3 中正确的文件大小,而不是像出了问题那样为零。我不确定这是否是 Express、SDK 或两者兼而有之的问题?是 postman 吗?我在今年 3 月的一个工作项目中构建了类似的东西,它完美无缺。我不再有权访问该代码进行比较。
没有错误,没有任何问题的迹象。

const aws = require("aws-sdk");
const stream = require("stream");
const express = require("express");
const router = express.Router();

const AWS_ACCESS_KEY_ID = "XXXXXXXXXXXXXXXXXXXX";
const AWS_SECRET_ACCESS_KEY = "superSecretAccessKey";
const BUCKET_NAME = "my-bucket";
const BUCKET_REGION = "us-east-1";

const s3 = new aws.S3({
region: BUCKET_REGION,
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY
});

const uploadStream = key => {
let streamPass = new stream.PassThrough();
let params = {
Bucket: BUCKET_NAME,
Key: key,
Body: streamPass
};
let streamPromise = s3.upload(params, (err, data) => {
if (err) {
console.error("ERROR: uploadStream:", err);
} else {
console.log("INFO: uploadStream:", data);
}
}).promise();
return {
streamPass: streamPass,
streamPromise: streamPromise
};
};

router.post("/upload", async (req, res) => {
try {
let key = req.query.file_name;
let { streamPass, streamPromise } = uploadStream(key);
req.pipe(streamPass);
await streamPromise;
res.status(200).send({ result: "Success!" });
} catch (e) {
res.status(500).send({ result: "Fail!" });
}
});

module.exports = router;
这是我的 package.json:
{
"name": "expresss3streampass",
"version": "0.0.0",
"private": true,
"scripts": {
"start": "node ./bin/www"
},
"dependencies": {
"aws-sdk": "^2.812.0",
"cookie-parser": "~1.4.4",
"debug": "~2.6.9",
"express": "~4.16.1",
"morgan": "~1.9.1"
}
}
更新 :
经过进一步测试,我注意到 Postman 正在更改纯文本文件。例如,这个源文件:
{
"question_id": null,
"position_type_id": 1,
"question_category_id": 1,
"position_level_id": 1,
"question": "Do you test your code before calling it \"done\"?",
"answer": "Candidate should respond that they at least happy path test every feature and bug fix they write.",
"active": 1
}
...它落入桶中后看起来像这样:
----------------------------472518836063077482836177
Content-Disposition: form-data; name="file"; filename="question.json"
Content-Type: application/json

{
"question_id": null,
"position_type_id": 1,
"question_category_id": 1,
"position_level_id": 1,
"question": "Do you test your code before calling it \"done\"?",
"answer": "Candidate should respond that they at least happy path test every feature and bug fix they write.",
"active": 1
}
----------------------------472518836063077482836177--
我不得不认为这是问题所在。 postman 是这个等式中唯一改变的东西,从这段代码第一次为我工作起。我的请求 header 如下所示:
enter image description here
我是最初添加“application/x-www-form-urlencoded” header 的人。如果我现在使用它,我最终会在存储桶中得到一个 0 字节的文件。

最佳答案

Multer是要走的路。
它提供了几种不同的模式,但据我所知,您必须编写自定义 storage handler为了访问底层流,否则它将缓冲内存中的所有数据,并且只有在完成后才回调。
如果您查看 req.file在您的路由处理程序中,Multer 通常会在 buffer 下提供一个缓冲区字段,但它不再存在,因为我没有在回调中传递任何东西,所以我有理由相信这是按预期流式传输的。
下面是一个可行的解决方案。
注:parse.single('image')被传递到路由处理程序中。这是指我使用的多部分字段名称。

const aws = require('aws-sdk');
const stream = require('stream');
const express = require('express');
const router = express.Router();
const multer = require('multer')

const AWS_ACCESS_KEY_ID = "XXXXXXXXXXXXXXXXXXXX";
const AWS_SECRET_ACCESS_KEY = "superSecretAccessKey";
const BUCKET_NAME = "my-bucket";
const BUCKET_REGION = "us-east-1";

const s3 = new aws.S3({
region: BUCKET_REGION,
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY
});

const uploadStream = key => {
let streamPass = new stream.PassThrough();
let params = {
Bucket: BUCKET_NAME,
Key: key,
Body: streamPass
};
let streamPromise = s3.upload(params, (err, data) => {
if (err) {
console.error('ERROR: uploadStream:', err);
} else {
console.log('INFO: uploadStream:', data);
}
}).promise();
return {
streamPass: streamPass,
streamPromise: streamPromise
};
};

class CustomStorage {
_handleFile(req, file, cb) {
let key = req.query.file_name;
let { streamPass, streamPromise } = uploadStream(key);
file.stream.pipe(streamPass)
streamPromise.then(() => cb(null, {}))
}
}

const storage = new CustomStorage();
const parse = multer({storage});

router.post('/upload', parse.single('image'), async (req, res) => {
try {
res.status(200).send({ result: 'Success!' });
} catch (e) {
console.log(e)
res.status(500).send({ result: 'Fail!' });
}
});

module.exports = router;
更新:更好的解决方案
Multer我上面提供的基于解决方案有点hacky。所以我在引擎盖下看了看 how it worked .此解决方案仅使用 Busboy解析和流式传输文件。 Multer 实际上只是一个带有一些磁盘 I/O 便利功能的包装器。
const aws = require('aws-sdk');
const express = require('express');
const Busboy = require('busboy');
const router = express.Router();

const AWS_ACCESS_KEY_ID = "XXXXXXXXXXXXXXXXXXXX";
const AWS_SECRET_ACCESS_KEY = "superSecretAccessKey";
const BUCKET_NAME = "my-bucket";
const BUCKET_REGION = "us-east-1";

const s3 = new aws.S3({
region: BUCKET_REGION,
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY
});

function multipart(request){
return new Promise(async (resolve, reject) => {
const headers = request.headers;
const busboy = new Busboy({ headers });
// you may need to add cleanup logic using 'busboy.on' events
busboy.on('error', err => reject(err))
busboy.on('file', function (fieldName, fileStream, fileName, encoding, mimeType) {
const params = {
Bucket: BUCKET_NAME,
Key: fileName,
Body: fileStream
};
s3.upload(params).promise().then(() => resolve());
})
request.pipe(busboy)
})
}

router.post('/upload', async (req, res) => {
try {
await multipart(req)
res.status(200).send({ result: 'Success!' });
} catch (e) {
console.log(e)
res.status(500).send({ result: 'Fail!' });
}
});

module.exports = router;

关于javascript - AWS SDK 文件通过 Node/Express 使用流 PassThrough 上传到 S3 - 文件总是损坏,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/65362731/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com