var config = {
marketingDB: 'localhost:27017/marketing',
feedbackDB: 'localhost:27017/feedback',
teleporterDB: 'localhost:27017/teleporter',
batchSz: 1000
};
var DB = {
marketingDB: connect(config.marketingDB),
feedbackDB: connect(config.feedbackDB),
teleporterDB: connect(config.teleporterDB)
};
function Datasegment() {
var db = DB.teleporterDB;
var datasegmentCollectionObjects = {};
function init() {
var collectionNames = db.getCollectionNames();
for (var i in collectionNames) {
var datasegmentName = collectionNames[i];
datasegmentCollectionObjects[datasegmentName] = db[datasegmentName]
}
};
this.getCollection = function(dataSegmentName) {
return datasegmentCollectionObjects[dataSegmentName];
};
init();
};
function Template() {
var db = DB.marketingDB;
this.findAllTemplateDataSnapshotMap = function() {
var results = {};
db.template.find().forEach(
function(rec){
if(rec.data_snapshot_id) {
results[rec._id] = rec.data_snapshot_id;
}
});
return results;
};
}
function Runbook() {
var db = DB.feedbackDB;
var datasegment = new Datasegment();
var template = new Template();
this.merge = function() {
var templateIdDatasegmentNameMap = template.findAllTemplateDataSnapshotMap();
printjson(templateIdDatasegmentNameMap);
var jobDataSegmentMapping = getJobIdDataSegmentMapping(templateIdDatasegmentNameMap);
printjson(jobDataSegmentMapping);
mergeAllTeleporterDataToRunbook(jobDataSegmentMapping);
};
function getRunbookCollection(jobId) {
return db[jobId];
}
function getRunbookCollectionWrite(jobId) {
return writeDB[jobId];
}
function getRequestRowIndexInDataSegment(channel, requestId) {
var lastPos = requestId.lastIndexOf("_");
var idx = 0;
if (channel == 'push') {
var usualReqId = requestId.substring(lastPos + 1, requestId.length - 1);
var lastToLastPos = usualReqId.lastIndexOf("_");
idx = usualReqId.substring(lastToLastPos + 1, requestId.length - 1);
} else {
idx = requestId.substring(lastPos + 1, requestId.length - 1);
}
return idx;
}
function getRequestRowInDataSegment(datasegmentCollection, index) {
return datasegmentCollection.find().skip(index).limit(1).next()
}
function mergeTeleporterDataToRunbook(jobId, runbookCollection, datasegmentCollection ) {
if (!runbookCollection || !datasegmentCollection) {
return; // fall back to do nothing
}
try {
var sz = runbookCollection.find().count();
var offset = 0;
while (offset <= sz) {
var batchData = [];
runbookCollection.find().skip(offset).limit(config.batchSz).forEach(function(ref) {
if (ref.user_id) {
// this is already populated. No need
throw 100;
}
var index = getRequestRowIndexInDataSegment(ref.channel, ref.request_id);
var datasegmentRow = getRequestRowInDataSegment(datasegmentCollection, index);
var info = {};
if (datasegmentRow.user_id) {
info['user_id'] = datasegmentRow.user_id;
}
if(ref.channel === 'sms') {
if (datasegmentRow.phone) {
info['phone'] = datasegmentRow.phone;
}
}
if(ref.channel === 'email') {
if (datasegmentRow.email) {
info['email'] = datasegmentRow.email;
}
}
//runbookCollection.update({request_id : ref.request_id}, { $set : info });
batchData.push({request_id: ref.request_id, info: info});
}
);
for (var i in batchData) {
var ref = batchData[i];
// this is the line with error
runbookCollection.update({request_id : ref.request_id}, { $set : ref.info });
}
offset = offset + batchData.length;
batchData = [];
}
} catch (err) {
if (err !== 100) throw err;
}
printjson("Done with " + jobId);
}
function mergeAllTeleporterDataToRunbook(jobDataSegmentMapping) {
//printjson(jobDataSegmentMapping);
for (var jobId in jobDataSegmentMapping) {
var dataSegmentName = jobDataSegmentMapping[jobId];
var runbookCollection = getRunbookCollection(jobId);
var datasegmentCollection = datasegment.getCollection(dataSegmentName);
mergeTeleporterDataToRunbook(jobId, runbookCollection,
datasegmentCollection);
}
}
function getJobIdDataSegmentMapping(templateIdDatasegmentMap){
var jobDataSegmentMap = {};
for(var i in templateIdDatasegmentMap) {
db.dispatcher_jobs.find({templateId : i}).forEach(function(rec){
jobDataSegmentMap[rec.jobId] = templateIdDatasegmentMap[i];
});
}
return jobDataSegmentMap;
}
}
new Runbook().merge();
我在更新通话中遇到错误。在这种情况下,mongo被设置为复制群集。我有另一个数据库,它必须工作是一个分片。此外,在那个环境中,我将为3个DB提供不同的IP。
脚本以mongo script_name
2015-11-23T20:39:12.674+0530 E QUERY TypeError: object is not a function
at Mongo.getDB (src/mongo/shell/mongo.js:41:12)
at Mongo.hasWriteCommands (src/mongo/shell/mongo.js:204:29)
at Mongo.writeMode (src/mongo/shell/mongo.js:244:15)
at DBCollection.update (src/mongo/shell/collection.js:443:26)
at mergeTeleporterDataToRunbook (copy_user_info.mongo:154:31)
at mergeAllTeleporterDataToRunbook (copy_user_info.mongo:173:7)
at Runbook.merge (copy_user_info.mongo:66:5)
at copy_user_info.mongo:190:15 at copy_user_info.mongo:160
failed to load: copy_user_info.mongo
答案 0 :(得分:0)
这更像是解决方案而不是解决方案。 我浏览了Mongo的shell JS代码,但找不到任何重要的东西。但是通过反复试验,我意识到问题在于我从同一个shell连接不同的数据库。
以下是我所尝试的一切 - 没有任何其他数据库连接的单个更新工作正常(nn find call)。 您添加另一个数据库连接,更新开始失败。
在我的情况下,我通过拆分脚本的读写部分并用python脚本(丑陋)拼接它们来解决这个问题。但每个人都有最后期限见面。