MalformedXML:您提供的XML格式不正确或未针对我们发布的架构进行验证

时间:2017-08-17 05:30:26

标签: javascript node.js amazon-web-services amazon-s3

使用AWS S3时,我遇到了这个奇怪的问题。我正在开发应用程序,通过它我可以将图像存储到AWS桶。使用Multer作为中间件和S3FS库连接并上传到AWS。

但是当我尝试上传内容时会弹出以下错误。

" MalformedXML:您提供的XML格式不正确或未针对我们的发布行验证 hed schema"

Index.js

var express = require('express');
var router = express();
var multer = require('multer');
var fs = require('fs');
var S3FS = require('s3fs');
var upload = multer({
  dest: 'uploads'
})
var S3fsImpl = new S3FS('bucket-name', {
  region: 'us-east-1',
  accessKeyId: 'XXXXXXXXXXXX',
  secretAccessKey: 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
});

/* GET home page. */
router.get('/', function (req, res, next) {
  res.render('profile', {
    title: 'Express'
  });
});

router.post('/testupload', upload.single('file'), function (req, res) {
  var file = req.file;
  console.log(file);

  var path = req.file.path;
  var stream = fs.createReadStream(path);
  console.log(stream);

  S3fsImpl.writeFile(file.name, stream).then(function () {
    fs.unlink(file.path, function (err) {
      if (err) {
        console.log(err);
      }
    });
    res.redirect('/profile');
  })
});

module.exports = router;

修改 输出

{ fieldname: 'file',
  originalname: '441_1.docx',
  encoding: '7bit',
  mimetype: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
  destination: 'uploads',
  filename: '662dcbe544804e4f50dfef1f52b40d22',
  path: 'uploads\\662dcbe544804e4f50dfef1f52b40d22',
  size: 13938 }
ReadStream {
  _readableState:
   ReadableState {
     objectMode: false,
     highWaterMark: 65536,
     buffer: BufferList { head: null, tail: null, length: 0 },
     length: 0,
     pipes: null,
     pipesCount: 0,
     flowing: null,
     ended: false,
     endEmitted: false,
     reading: false,
     sync: true,
     needReadable: false,
     emittedReadable: false,
     readableListening: false,
     resumeScheduled: false,
     defaultEncoding: 'utf8',
     ranOut: false,
     awaitDrain: 0,
     readingMore: false,
     decoder: null,
     encoding: null },
  readable: true,
  domain: null,
  _events: { end: [Function] },
  _eventsCount: 1,
  _maxListeners: undefined,
  path: 'uploads\\662dcbe544804e4f50dfef1f52b40d22',
  fd: null,
  flags: 'r',
  mode: 438,
  start: undefined,
  end: undefined,
  autoClose: true,
  pos: undefined,
  bytesRead: 0 }

的package.json

{
  "name": "aws-s3-images",
  "version": "1.0.0",
  "private": true,
  "scripts": {
    "start": "node ./bin/www"
  },
  "dependencies": {
    "body-parser": "~1.17.1",
    "connect-multiparty": "^2.0.0",
    "cookie-parser": "~1.4.3",
    "debug": "~2.6.3",
    "express": "~4.15.2",
    "hbs": "~4.0.1",
    "morgan": "~1.8.1",
    "multer": "^1.3.0",
    "s3fs": "^2.5.0",
    "serve-favicon": "~2.4.2"
  },
  "description": "AWS S3 uploading images",
  "main": "app.js",
  "devDependencies": {},
  "keywords": [
    "javascript"
  ],
  "author": "reeversedev",
  "license": "MIT"
}

5 个答案:

答案 0 :(得分:2)

S3将每个DeleteObjectsRequest的文件删除限制为1000。因此,在获取所有KeyVersions列表之后,我检查了密钥是否大于1000,然后将该列表划分为子列表,然后将其传递给具有以下子列表的DeleteObjectsRequest,如下所示:

if (keys.size() > 1000) {
            int count = 0;
            List<List> partition = ListUtils.partition(keys, 1000);
            for (List list : partition) {
                count = count + list.size();
                DeleteObjectsRequest request = new DeleteObjectsRequest(
                        fileSystemConfiguration.getTrackingS3BucketName()).withKeys(list);
                amazonS3Client.deleteObjects(request);
                logger.info("Deleted the completed directory files " + list.size() + " from folder "
                        + eventSpecificS3bucket);
            }
            logger.info("Deleted the total directory files " + count + " from folder " + eventSpecificS3bucket);
        } else {
            DeleteObjectsRequest request = new DeleteObjectsRequest(
                    fileSystemConfiguration.getTrackingS3BucketName()).withKeys(keys);
            amazonS3Client.deleteObjects(request);
            logger.info("Deleted the completed directory files from folder " + eventSpecificS3bucket);
        }

答案 1 :(得分:1)

使用AmplifyJS库时出现此问题。遵循AWS主页中有关Multipart upload overview的文档:

无论何时上传零件,Amazon S3都会在其中返回ETag标头 响应。对于每个上传的零件,您必须记录零件号和 ETag值。您需要在后续步骤中包含这些值 请求完成分段上传。

但是S3默认配置不执行此操作。只需转到“权限”选项卡-> 将<ExposeHeader>ETag</ExposeHeader>添加到CORS配置中。 https://github.com/aws-amplify/amplify-js/issues/61

答案 2 :(得分:0)

此代码应该适合您。你需要记住: 1)使用唯一的桶名称 2)在你的文件对象下使用&#39; originalname&#39;而不是&#39; name&#39; &lt; - 此属性不存在

app.post('/testupload', function(req, res){


    var file = req.files[0];

    console.log(file.path);
    console.log(file.name);

    console.log('FIRST TEST: ' + JSON.stringify(file));

    var stream = fs.createReadStream(file.path);    

    S3fsImpl.writeFile(file.originalname, stream).then(function () 
      {
        console.log('File has been sent - OK');
      },
      function(reason)
      {
          throw reason;
      }
     ); 

     res.redirect('/index');   

});

答案 3 :(得分:0)

据我所知,只需交叉检查“桶名”即可。

final PutObjectRequest putObjectRequest = new PutObjectRequest(**bucketName**, accessKeyId, is ,meta); 

答案 4 :(得分:-2)

你可以尝试这段代码:

var S3fsImpl = new S3FS('bucket-name', {
  region: 'us-east-1',
  accessKeyId: 'XXXXXXXXXXXX',
  secretAccessKey: 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
});

var fsImplStyles = S3fsImpl.getPath(file.name);

// Change us-east-1 for your region
var url = 'https://s3-us-east-1.amazonaws.com/' + fsImplStyles;

如果此代码适合您,请发送反馈。