我有一个看起来有点像这样的rake任务
require 'open-uri'
require 'csv'
namespace :tm do
task reload: :environment do
gzipped = open('csv link')
csv_text = Zlib::GzipReader.new(gzipped).read
csv = CSV.parse(csv_text, headers: true)
csv.each do |row|
if row[4] == 'logo url'
else
tmdate = Date.parse(row[10]).strftime('%Y-%m-%d')
viatmdate = Date.parse(row[10]).strftime('%d/%m/%Y')
swtmdate = row[10]
tmlocation = row[6].split('at ')[1]
place = row[11].split('|')[1]
place1 = row[11].split('|')[2]
place2 = row[11].split('|')[3]
location = '' + place + ', ' + place1 + ', ' + place2 + ''
tmtime = row[9]
text = row[7].gsub('text', '')
if text.include? '��'
eventname = text.gsub('��', 'e')
else
eventname = text.gsub(/[ªÀÈÌÒÙàèìòùÁÉÍÓÚáéíóúÂÊÎÔÛâêîôûÃÑÕãñõÄËÏÖÜŸäëïöüÿ]/, '')
end
if text.include? '��'
tmname = text.gsub('��', 'e')
else
tmname = text.gsub(/[ªÀÈÌÒÙàèìòùÁÉÍÓÚáéíóúÂÊÎÔÛâêîôûÃÑÕãñõÄËÏÖÜŸäëïöüÿ]/, '')
end
if text.include? ' -'
tmnamesplit = text.split(' -')[0]
end
if tmname[/[^0-9]/].present?
tmnamenn = tmname.gsub(/[^0-9]/i, '')
end
text2urldb = text2.where('eventtitle ILIKE ? AND eventdoortime = ? ', "%#{tmname.gsub(/[\-\:\ ]/, '%')}%", tmdate.to_s).first
text3urldb = text3.where('product_name ILIKE ? AND delivery_time = ? AND valid_from = ?', "%#{tmname}%", tmtime.to_s, tmdate.to_s).first
text1urldb = text1.where('product_name ILIKE ? AND specifications = ? AND promotional_text = ?', "%#{tmname}%", viatmdate.to_s, "%#{place}%").first
if tmnamesplit.present?
if text1urldb.blank?
text1urldb = text1.where('product_name ILIKE ? AND specifications = ?', "%#{tmnamesplit}%", viatmdate.to_s).first
end
if text3urldb.blank?
text3urldb = text3.where('product_name ILIKE ? AND delivery_time = ? AND valid_from = ?', "%#{tmnamesplit}%", tmtime.to_s, tmdate.to_s).first
end
end
if text1urldb.blank?
text1urldb = text1.where('product_name ILIKE ? AND specifications = ? AND promotional_text = ?', "%#{tmname}%", viatmdate.to_s, "%#{location}%").first
if text1urldb.blank?
text1urldb = text1.where('product_name ILIKE ? AND specifications = ?', "%#{tmname}%", viatmdate.to_s).first
end
if text1urldb.blank?
text1urldb = text1.where('product_name ILIKE ? AND specifications = ? AND promotional_text = ?', "%#{tmname}%", viatmdate.to_s, "%#{tmlocation}%").first
end
end
if text1urldb.present?
vurl = text1urldb.merchant_deep_link
txt = vurl
re1 = '.*?' # Non-greedy match on filler
re2 = '(?:[a-z][a-z]+)' # Uninteresting: word
re3 = '.*?' # Non-greedy match on filler
re4 = '(?:[a-z][a-z]+)' # Uninteresting: word
re5 = '.*?' # Non-greedy match on filler
re6 = '(?:[a-z][a-z]+)' # Uninteresting: word
re7 = '.*?' # Non-greedy match on filler
re8 = '(?:[a-z][a-z]+)' # Uninteresting: word
re9 = '.*?' # Non-greedy match on filler
re10 = '(?:[a-z][a-z]+)' # Uninteresting: word
re11 = '.*?' # Non-greedy match on filler
re12 = '((?:[a-z][a-z]+))' # Word 1
re = (re1 + re2 + re3 + re4 + re5 + re6 + re7 + re8 + re9 + re10 + re11 + re12)
m = Regexp.new(re, Regexp::IGNORECASE)
if m.match(txt)
word1 = m.match(txt)[1]
end
end
gmiurl = text3urldb.merchant_deep_link if text3urldb.present?
gigurl = text2urldb.eventurl if text2urldb.present?
api = HTTParty.get(URI.encode('text url' + tmname + '&when_from=' + swtmdate)).parsed_response
api1 = api['Paging']
api2 = api1['TotalResultCount']
if api1.blank?
newapi = HTTParty.get(URI.encode('texturl' + tmnamenn + '&when_from=' + swtmdate)).parsed_response
paging = newapi['Paging']
api2 = paging['TotalResultCount']
if newapi.blank?
apisplit = HTTParty.get(URI.encode('texturl' + tmnamesplit + '&when_from=' + swtmdate)).parsed_response
pagingsplit = apisplit['Paging']
api2 = pagingsplit['TotalResultCount']
end
end
text1 = vurl
text3 = gmiurl
text2 = gigurl
if api2 == 0
else
swurl = api['Events'].first['SwURL']
end
event = Event.find_by(time: row[9], date: row[10], eventname: eventname, eventvenuename: location)
if event.present?
event.update(event_type: word1, text: row[8], eventimage: row[4], textlink: swurl, text1link: text1, text3url: text3, text2url: text2)
else
Event.create(time: row[9], date: row[10], event_type: word1, text: row[8], eventimage: row[4], eventname: eventname, eventvenuename: location, textlink: swurl, text1link: text1, text3url: text3, text2url: text2)
end
end
end
end
end
现在这已经开始长时间运行(大约2-3个小时),所以我想知道如果拆分它会加快一点吗?
如果我要从这里取出文本1 2和3,那么它首先将它放入数据库中。然后在完全填充之后运行另一个rake任务?希望这会加快速度吗?所以,如果我有一个名为tm:reload的rake任务,以及之后在另一个名为BA的文件中的另一个任务:重新加载我将如何做到这一点?
我已经完成并索引了几列,但这并没有改进它。
答案 0 :(得分:0)
我会做以下事情:
假设:文件和文件中没有重复记录可以按任何顺序处理
我认为数据库不是瓶颈。您发出的HTTP请求更有可能。在你摆弄添加索引之前,我会做一些基准测试,看看时间花在哪里。
更新: 像这样的东西(我没有运行代码)
...
csv.each do |row|
ImportEventJob.perform_later row
end
ActiveJob是Rails(后台)Job API。你可以配置一个后端,我喜欢Sidekiq,但其他人也会工作。详情见:
class ImportEventJob < ActiveJob::Base
def perform(row)
event_csv = EventCsv.new(row)
event = Event.find_by(time: event_csv.time, date: event_csv.date, ...)
if event
...
else
...
end
end
end
class EventCsv
RE_WITH_DIACRITICS = /[ªÀÈÌÒÙàèìòùÁÉÍÓÚáéíóúÂÊÎÔÛâêîôûÃÑÕãñõÄËÏÖÜŸäëïöüÿ]/
def initialize(row)
@row = row
end
def time
@row[9]
end
...
def name
text = @row[7].gsub('text', '')
if text.include? '��'
text.gsub('��', 'e')
else
text.gsub(RE_WITH_DIACRITICS, '')
end
end
end