jsondata = f.read()
dataList = json.loads(jsondata)
projectName = dataList['query']['project']
reportStartTime = dataList['query']['startTime']
reportEndTime = dataList['query']['endTime']
allEventList = dataList['sessionEvents']
'''We need a Type for each data so it will be
like startTime:endTime'''
flag = False
count = 0
esbulkData = []
for event in allEventList:
ls = event["l"]
del event["l"]
for l in ls:
llist = []
eventList = []
llist.append(l)
event["l"] = llist
eventList.append(event)
flurryData = { "project": projectName, "startTime": reportStartTime, "endTime": reportEndTime, "sessionEvents": eventList }
#{
# "_index": "tickets-index",
# "_type": "tickets",
# "_id": j,
# "_source": {
# "any":"data" + str(j),
# "timestamp": datetime.now()}
# }
esData = {"_index": "fl_ios_prod_feb_bulk", "_type": "flurryRawSchema", "_id": count, "_source": flurryData}
esbulkData.append(esData)
es = Elasticsearch([ES_URL])
res = helpers.bulk(es, esbulkData)
if (res):
print("Passed")
else:
print("Failed")
在上面的代码中,一切正常,但doc_count在检查“Sense”时不会超过500。它似乎删除了一些文档。
请帮忙。我正在度过那些夜晚
答案 0 :(得分:0)
好吧所以我忘了增加计数和问题。