向上游发送请求时,sendfile()失败(32:管道断开),请求:“ POST

时间:2018-09-27 15:27:43

标签: javascript nginx meteor amazon-ec2 passenger

在使用带有Nginx + passenger的流星的生产中上传文件时遇到问题。我还使用meteor files上传文件。它在开发中效果很好,但我无法在生产中上传文件。我在浏览器控制台中出现错误:

POST http://my-url/ net::ERR_INCOMPLETE_CHUNKED_ENCODING 200 (OK)

我在我的乘客日志文件中发现错误,提示由于应用不允许,因此无法保持应用程序会话连接 :这是日志:

[ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/HttpChunkedBodyParser.h:183 ]: [Client 63] ChunkedBodyParser: parsing new chunk [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/HttpChunkedBodyParser.h:123 ]: [Client 63] ChunkedBodyParser: chunk size determined: 982 bytes [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/HttpChunkedBodyParser.h:162 ]: [Client 63] ChunkedBodyParser: parsing 982 of 982 bytesof remaining chunk data; 0 now remaining [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:1416 ]: [FBC 0x7f71e801b670] Feeding 982 bytes [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:486 ]: [FBC 0x7f71e801b670] pushBuffer() completed: nbuffers = 1, bytesBuffered = 982 [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:554 ]: [FBC 0x7f71e801b670] Reader: reading next [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:586 ]: [FBC 0x7f71e801b670] Reader: found buffer, 982 bytes [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:493 ]: [FBC 0x7f71e801b670] popBuffer() completed: nbuffers = 0,bytesBuffered = 0 [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:594 ]: [FBC 0x7f71e801b670] Reader: feeding buffer, 982 bytes [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:554 ]: [FBC 0x7f71e801b670] Reader: reading next [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:561 ]: [FBC 0x7f71e801b670] Reader: no more buffers. Transitioning to RS_INACTIVE [ D3 2018-09-27 16:53:44.2194 2500/Ta Ser/FileBufferedChannel.h:539 ]: [FBC 0x7f71e801b670] Calling dataFlushedCallback [ D3 2018-09-27 16:53:44.2194 2500/Ta age/Cor/Con/ForwardResponse.cpp:64 ]: [Client 2-63] Event: onAppSourceData [ D3 2018-09-27 16:53:44.2194 2500/Ta age/Cor/Con/ForwardResponse.cpp:206 ]: [Client 2-63] Processing 7 bytes of application data: "\r\n0\r\n\r\n" [ D3 2018-09-27 16:53:44.2195 2500/Ta Ser/HttpChunkedBodyParser.h:248 ]: [Client 63] ChunkedBodyParser: done parsing a chunk [ D3 2018-09-27 16:53:44.2195 2500/Ta Ser/HttpChunkedBodyParser.h:183 ]: [Client 63] ChunkedBodyParser: parsing new chunk [ D3 2018-09-27 16:53:44.2195 2500/Ta Ser/HttpChunkedBodyParser.h:123 ]: [Client 63] ChunkedBodyParser: chunk size determined: 0bytes [ D3 2018-09-27 16:53:44.2195 2500/Ta Ser/HttpChunkedBodyParser.h:162 ]: [Client 63] ChunkedBodyParser: parsing 0 of 0 bytes of remaining chunk data; 0 now remaining [ D3 2018-09-27 16:53:44.2195 2500/Ta Ser/HttpChunkedBodyParser.h:164 ]: [Client 63] ChunkedBodyParser: end chunk detected [ D3 2018-09-27 16:53:44.2195 2500/Ta Ser/HttpChunkedBodyParser.h:267 ]: [Client 63] ChunkedBodyParser: end chunk reached [ D2 2018-09-27 16:53:44.2195 2500/Ta age/Cor/Con/ForwardResponse.cpp:224 ]: [Client 2-63] End of application response body reached [ D2 2018-09-27 16:53:44.2195 2500/Ta age/Cor/Con/ForwardResponse.cpp:1077 ]: [Client 2-63] Not keep-aliving application sessionconnection because application did not allow it [ D3 2018-09-27 16:53:44.2195 2500/Ta age/Cor/App/Socket.h:201 ]: Socket unix:/tmp/passenger.toEIX2t/apps.s/node.1si9u5: connection not checked back into connection pool. There are now 2 connections in total [ D2 2018-09-27 16:53:44.2195 2500/Ta age/Cor/App/Gro/SessionManagement.cpp:150 ]: Session closed for process (pid=2519, group=/var/www/hmn/bundle (production))

  • 流星1.6.1
  • 流星文件1.9.11
  • Ubuntu 16.04
  • nginx 1.14.0
  • 乘客5.3.5
  • AWS

这是我的Nginx配置文件

    user www-data;
    worker_processes  1;

    events {
        worker_connections  1024;
    }


    http {
    include /etc/nginx/mime.types;
    default_type  application/octet-stream;

    sendfile        on;
    # tcp_nopush     on;
    # tcp_nodelay on;
    server_tokens off;

    keepalive_timeout 65;
    types_hash_max_size 2048;

    server_names_hash_bucket_size 64;

    access_log /var/log/nginx/access.log;
    error_log /var/log/nginx/error.log;

    gzip  on;
    gzip_disable "MSIE [1-6]\.";

    gzip_proxied any;
    gzip_http_version 1.0;
    gzip_min_length 500;
    gzip_types    text/plain text/xml text/css
                  text/comma-separated-values
                  text/javascript
                  application/x-javascript
                  application/atom+xml;

    include /etc/nginx/passenger.conf;
    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
    }

    // sites-enabled/* file
    server {
        listen 80 default_server;
        listen [::]:80 default_server;
        server_name _;
        return 301 https://$host$request_uri;
    }
    server{
        listen 443 default_server ssl;
        listen [::]:443 default_server ssl;
        ssl on;
        ssl_certificate    /etc/ssl/mydomain.pem;
        ssl_certificate_key    /etc/ssl/mydomain.key;
        ssl_dhparam /etc/ssl/dhparam.pem;
        server_name xxx.xxx.xxx.xxx;
        passenger_enabled on;
        passenger_sticky_sessions on;
        root /var/www/my_app/bundle/public;
        passenger_app_type node;
        passenger_startup_file main.js;
        passenger_env_var MONGO_URL mongodb://some_shard_urls;
        passenger_env_var ROOT_URL https://xxx.xxx.xxx.xxx;
        passenger_env_var MONGO_OPLOG_URL: mongodb://some_shard_urls;
        keepalive_timeout  1000;
        ssl_session_timeout 1d;
        ssl_session_cache shared:SSL:50m;
        ssl_session_tickets off;
        ssl_prefer_server_ciphers on;
        ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
        ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GC$
        ssl_stapling on;
        ssl_stapling_verify on;

        add_header Strict-Transport-Security "max-age=31536000;";
        access_log  /var/log/my_app/access.log;
        error_log   /var/log/my_app/error.log;

        proxy_send_timeout 60s;

        location / {
                proxy_set_header Connection "";
                proxy_http_version 1.1;
                proxy_redirect     off;
                client_max_body_size 100M;
                if ($uri != '/') {
                    expires 30d;
                }
                break;
        }
}

我正在使用AWS S3托管文件,而流星文件具有与s3集成的功能。这是我的示例代码:

 const ProductAssets = new FilesCollection({
    debug: false,
    collectionName: 'collection_name',
    allowClientCode: false,
    storagePath: './products',
    permissions: 0777,
    // debug: true,
    chunkSize: 'dynamic',
    parentDirPermissions: 0777,
    onBeforeUpload: function(file) {
        if (/png|jpe?g/i.test(file.extension)) {
            return true;
        }
    },
    onAfterUpload(fileRef) {
        Meteor.call('upload.to.s3', fileRef)
    },
});

// upload to s3 method
'upload.to.s3': function(fileRef) {
    _.each(fileRef.versions, async (vRef, version) => {
        const filePath = 'products/' + fileRef._id + '/' + version + '-' + fileRef._id + '.' + fileRef.extension;
        await s3.putObject({
            // ServerSideEncryption: 'AES256',
            StorageClass: 'STANDARD',
            Bucket: bucket,
            Key: filePath,
            Body: fs.createReadStream(vRef.path),
            ContentType: vRef.type
        }, (error, data) => {
            bound(() => {
                if (error) {
                    console.error(error);
                } else {
                    // Update FilesCollection with link to the file at AWS
                    const upd = { $set: {} };
                    upd['$set']['versions.' + version + '.meta.pipePath'] = filePath;

                    ProductAssets.collection.update({
                        _id: fileRef._id
                    }, upd, (updError) => {
                        if (updError) {
                            console.error(updError);
                        } else {
                            // Unlink original files from FS after successful upload to AWS:S3
                            ProductAssets.unlink(ProductAssets.collection.findOne(fileRef._id), version);
                        }
                    });
                }
            })
        });
    })
},

旁注,正在上载的文件不会保存到数据库和s3的生产环境中。在开发中,文件已保存并正确上传到s3

有人知道如何解决它并解释出什么问题吗?谢谢!

0 个答案:

没有答案