使用Mongoose Schema导入CSV

时间:2018-05-15 05:50:38

标签: javascript node.js mongodb csv mongoose

目前我需要将一个大型CSV文件推送到mongo DB中,并且值的顺序需要确定数据库条目的键:

CSV文件示例:

9,1557,358,286,Mutantville,4368,2358026,,M,0,0,0,1,0
9,1557,359,147,Wroogny,4853,2356061,,D,0,0,0,1,0

将其解析为数组的代码:

var fs = require("fs");

var csv = require("fast-csv");

fs.createReadStream("rank.txt")
    .pipe(csv())
    .on("data", function(data){
        console.log(data);
    })
    .on("end", function(data){
        console.log("Read Finished");
    });

代码输出:

[ '9',
  '1557',
  '358',
  '286',
  'Mutantville',
  '4368',
  '2358026',
  '',
  'M',
  '0',
  '0',
  '0',
  '1',
  '0' ]
[ '9',
  '1557',
  '359',
  '147',
  'Wroogny',
  '4853',
  '2356061',
  '',
  'D',
  '0',
  '0',
  '0',
  '1',
  '0' ]

如何将数组插入我的mongoose模式以进入mongo db?

架构:

var mongoose = require("mongoose");


var rankSchema = new mongoose.Schema({
   serverid: Number,
   resetid: Number,
   rank: Number,
   number: Number,
   name: String,
   land: Number,
   networth: Number,
   tag: String,
   gov: String,
   gdi: Number,
   protection: Number,
   vacation: Number,
   alive: Number,
   deleted: Number
});

module.exports = mongoose.model("Rank", rankSchema);

数组的顺序需要匹配模式的顺序,例如在数组中第一个数字9需要始终保存,因为它们键入“serverid”,依此类推。我正在使用Node.JS

2 个答案:

答案 0 :(得分:1)

您可以通过从架构定义中获取headers来使用fast-csv来执行此操作,该定义将解析的行返回为" objects"。你实际上有一些不匹配,所以我用更正标记了它们:

const fs = require('mz/fs');
const csv = require('fast-csv');

const { Schema } = mongoose = require('mongoose');

const uri = 'mongodb://localhost/test';

mongoose.Promise = global.Promise;
mongoose.set('debug', true);

const rankSchema = new Schema({
  serverid: Number,
  resetid: Number,
  rank: Number,
  name: String,
  land: String,         // <-- You have this as Number but it's a string
  networth: Number,
  tag: String,
  stuff: String,        // the empty field in the csv
  gov: String,
  gdi: Number,
  protection: Number,
  vacation: Number,
  alive: Number,
  deleted: Number
});

const Rank = mongoose.model('Rank', rankSchema);

const log = data => console.log(JSON.stringify(data, undefined, 2));

(async function() {

  try {
    const conn = await mongoose.connect(uri);

    await Promise.all(Object.entries(conn.models).map(([k,m]) => m.remove()));

    let headers = Object.keys(Rank.schema.paths)
      .filter(k => ['_id','__v'].indexOf(k) === -1);

    console.log(headers);

    await new Promise((resolve,reject) => {

      let buffer = [],
          counter = 0;

      let stream = fs.createReadStream('input.csv')
        .pipe(csv({ headers }))
        .on("error", reject)
        .on("data", async doc => {
          stream.pause();
          buffer.push(doc);
          counter++;
          log(doc);
          try {
            if ( counter > 10000 ) {
              await Rank.insertMany(buffer);
              buffer = [];
              counter = 0;
            }
          } catch(e) {
            stream.destroy(e);
          }

          stream.resume();

        })
        .on("end", async () => {
          try {
            if ( counter > 0 ) {
              await Rank.insertMany(buffer);
              buffer = [];
              counter = 0;
              resolve();
            }
          } catch(e) {
            stream.destroy(e);
          }
        });

    });


  } catch(e) {
    console.error(e)
  } finally {
    process.exit()
  }


})()

只要架构实际上符合提供的CSV,那么它就没问题。这些是我可以看到的更正,但如果您需要以不同方式对齐实际字段名称,则需要进行调整。但是基本上有一个Number位置有一个String,基本上是一个额外的字段,我假设它是CSV中的空白字段。

一般事情是从模式中获取字段名称数组,并在制作csv解析器实例时将其传递给选项:

let headers = Object.keys(Rank.schema.paths)
  .filter(k => ['_id','__v'].indexOf(k) === -1);

let stream = fs.createReadStream('input.csv')
  .pipe(csv({ headers }))

一旦你真的这样做,那么你得到一个&#34;对象&#34;返回而不是数组:

{
  "serverid": "9",
  "resetid": "1557",
  "rank": "358",
  "name": "286",
  "land": "Mutantville",
  "networth": "4368",
  "tag": "2358026",
  "stuff": "",
  "gov": "M",
  "gdi": "0",
  "protection": "0",
  "vacation": "0",
  "alive": "1",
  "deleted": "0"
}

不要担心&#34;类型&#34;因为Mongoose会根据架构转换值。

其余的发生在data事件的处理程序中。为了获得最大效率,我们使用insertMany()仅每10,000行写入一次数据库。它实际上如何进入服务器和进程取决于MongoDB版本,但根据您在单个集合中导入的平均字段数量,10,000应该是非常合理的&#34;权衡&#34;用于内存使用和编写合理的网络请求。如有必要,请将数字设为较小。

重要的部分是在继续之前将这些调用标记为async个函数,将await标记为insertMany()的结果。此外,我们还需要pause()每个项目的流和resume(),否则我们可能会在实际发送之前覆盖要插入的buffer个文档。 pause()resume()是放置&#34;背压&#34;在管道上,否则物品只是保持&#34;出来&#34;并解雇data事件。

自然地,对10,000个条目的控制要求我们在每次迭代和流完成时检查两者,以便清空缓冲区并将任何剩余的文档发送到服务器。

这真的是你想要做的事情,因为你当然不希望在&#34;每一个&#34;上发起对服务器的异步请求。迭代data事件或基本上没有等待每个请求完成。你没有检查那些非常小的文件&#34;,但是对于任何真实世界的负载,由于&#34;在飞行中&#34;你肯定会超过调用堆栈;尚未完成的异步调用。

仅供参考 - 使用package.jsonmz是可选的,因为它只是一个现代化的Promise启用的标准节点库&#34;内置&#34;我以前习惯使用的图书馆。代码当然可以与fs模块完全互换。

{
  "description": "",
  "main": "index.js",
  "dependencies": {
    "fast-csv": "^2.4.1",
    "mongoose": "^5.1.1",
    "mz": "^2.7.0"
  },
  "keywords": [],
  "author": "",
  "license": "ISC"
}

实际上,对于Node v8.9.x及更高版本,我们甚至可以通过stream-to-iterator模块实现AsyncIterator来简化这一过程。它仍处于Iterator<Promise<T>>模式,但它应该一直运行到Node v10.x变得稳定LTS:

const fs = require('mz/fs');
const csv = require('fast-csv');
const streamToIterator = require('stream-to-iterator');

const { Schema } = mongoose = require('mongoose');

const uri = 'mongodb://localhost/test';

mongoose.Promise = global.Promise;
mongoose.set('debug', true);

const rankSchema = new Schema({
  serverid: Number,
  resetid: Number,
  rank: Number,
  name: String,
  land: String,
  networth: Number,
  tag: String,
  stuff: String,        // the empty field
  gov: String,
  gdi: Number,
  protection: Number,
  vacation: Number,
  alive: Number,
  deleted: Number
});

const Rank = mongoose.model('Rank', rankSchema);

const log = data => console.log(JSON.stringify(data, undefined, 2));

(async function() {

  try {
    const conn = await mongoose.connect(uri);

    await Promise.all(Object.entries(conn.models).map(([k,m]) => m.remove()));

    let headers = Object.keys(Rank.schema.paths)
      .filter(k => ['_id','__v'].indexOf(k) === -1);

    //console.log(headers);

    let stream = fs.createReadStream('input.csv')
      .pipe(csv({ headers }));

    const iterator = await streamToIterator(stream).init();

    let buffer = [],
        counter = 0;

    for ( let docPromise of iterator ) {
      let doc = await docPromise;
      buffer.push(doc);
      counter++;

      if ( counter > 10000 ) {
        await Rank.insertMany(buffer);
        buffer = [];
        counter = 0;
      }
    }

    if ( counter > 0 ) {
      await Rank.insertMany(buffer);
      buffer = [];
      counter = 0;
    }

  } catch(e) {
    console.error(e)
  } finally {
    process.exit()
  }

})()

基本上,所有的流&#34;事件&#34;处理,暂停和恢复被简单的for循环取代:

const iterator = await streamToIterator(stream).init();

for ( let docPromise of iterator ) {
  let doc = await docPromise;
  // ... The things in the loop
}

轻松!当for..await..of变得更稳定时,会在后续节点实现中清除它。但是上面的指定版本及以上版本运行正常。

答案 1 :(得分:0)

说@Neil Lunn在CSV中需要标题

使用 csvtojson 模块的示例。

const csv = require('csvtojson');

const csvArray = [];
  csv()
    .fromFile(file-path)
    .on('json', (jsonObj) => {
      csvArray.push({ name: jsonObj.name, id: jsonObj.id });
    })
    .on('done', (error) => {
      if (error) {
        return res.status(500).json({ error});
      }
          Model.create(csvArray)
      .then((result) => {
         return res.status(200).json({result});
      }).catch((err) => {
          return res.status(500).json({ error});
      });
      });
    });