这是导致问题的脚本函数:
def crawl feedbacks, source, project_id, url, use_spam_filter
@logger.info url
xml = open(url)
doc = Nokogiri::HTML(xml, nil, 'UTF-8')
doc.xpath("//entry").each do |entry|
title = entry.at("./title").content
content = entry.at("./content").content
content.force_encoding('UTF-8')
content = content.gsub(/[^0-9a-z ]/i, '')
language = @language_detector.detect(content)
if language != 'en'
puts "#{language}: #{title}"
next
end
if use_spam_filter && @spam_filter.is_spam?(content)
puts "spam: #{title}"
next
end
#content = strip_invalid_utf8_chars(content)
puts "encoding: #{content.encoding.name }"
polarity, description = @sentiment_classifier.process(content)
published = Time.zone.parse(entry.at("./published").content)
link = entry.at("./link[@rel='alternate']")["href"]
author_image = entry.at("./link[@rel='image']")["href"] rescue nil
author_name = entry.at("./author/name").content
author_url = entry.at("./author/uri").content
if source == Feedback::BLOG && @url_filter.should_ignore(link)
puts "urlfilter: #{title}"
next
elsif source == Feedback::TWITTER && @author_filter.should_ignore(author_name)
puts "authorfilter: #{title}"
next
end
feedbacks << [project_id, published, title, description, link, polarity, author_image, author_name, author_url, source, project_id.to_s + link]
end
rescue Exception => e
puts e
puts e.backtrace.join("\n")
@logger.info e.message
@logger.info e.backtrace.join("\n")
end
只要抓取工具正在解析以下网址,我就会收到UTF-8错误的无效字节序列:
和
http://search.twitter.com/search.atom?q=Goodyear&rpp=100&phrase=goodyear+tires
content.encoding.name始终显示UTF-8,但我无法弄清楚为什么会收到错误
答案 0 :(得分:0)
康复问题必须重新安装最新版本的Nokogiri