我在Windows Vista 64位上使用Python.org版本2.7 64位。我有以下代码从单个网页上抓取命名表:
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.utils.markup import remove_tags
from scrapy.cmdline import execute
import csv
filepath = "C:\\Python27\\Football Data\\test" + ".txt"
with open(filepath, "w") as f:
f.write("")
f.close()
class MySpider(Spider):
name = "goal2"
allowed_domains = ["whoscored.com"]
start_urls = ["http://www.whoscored.com/Players/3859/Fixtures/Wayne-Rooney"]
def parse(self, response):
sel = Selector(response)
titles = sel.xpath("normalize-space(//title)")
print 'titles:', titles.extract()[0]
rows = sel.xpath('//table[@id="player-fixture"]//tbody//tr')
for row in rows:
print 'date:', "".join( row.css('.date::text').extract() ).strip()
print 'result:', "".join( row.css('.result a::text').extract() ).strip()
print 'team_home:', "".join( row.css('.team.home a::text').extract() ).strip()
print 'team_away:', "".join( row.css('.team.away a::text').extract() ).strip()
print 'info:', "".join( row.css('.info::text').extract() ).strip(), "".join( row.css('.info::attr(title)').extract() ).strip()
print 'rating:', "".join( row.css('.rating::text').extract() ).strip()
print 'incidents:', ", ".join( row.css('.incidents-icon::attr(title)').extract() ).strip()
print '-'*40
date = "".join( row.css('.date::text').extract() ).strip() + ','
result = "".join( row.css('.result a::text').extract() ).strip() + ','
team_home = "".join( row.css('.team.home a::text').extract() ).strip() + ','
team_away = "".join( row.css('.team.away a::text').extract() ).strip() + ','
info = "".join( row.css('.info::text').extract() ).strip() + ','
rating = "".join( row.css('.rating::text').extract() ).strip() + ','
incident = " ".join( row.css('.incidents-icon::attr(title)').extract() ).strip() + ','
然后我有一些代码可以抓取同一网站的多个页面并抓取文章的文本内容:
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from scrapy.item import Item
from scrapy.spider import BaseSpider
from scrapy import log
from scrapy.cmdline import execute
from scrapy.utils.markup import remove_tags
import time
class ExampleSpider(CrawlSpider):
name = "goal3"
allowed_domains = ["whoscored.com"]
start_urls = ["http://www.whoscored.com/Articles"]
download_delay = 1
rules = [Rule(SgmlLinkExtractor(allow=('/Articles',)), follow=True, callback='parse_item')]
def parse_item(self,response):
paragraphs = response.selector.xpath("//p").extract()
text = "".join(remove_tags(paragraph).encode('utf-8') for paragraph in paragraphs)
print text
execute(['scrapy','crawl','goal3'])
我真正想做的是从任何页面上遇到的任何表中获取数据。顶部的代码示例仅在被抓取页面上的表被调用时才起作用" player-fixture",它不会在每个页面上被刮掉。
在我开始在网站上搜索HTML以寻找哪些页面分支将表格命名为特定内容之前,是否有Scrapy可以从遇到的任何表中获取数据?
由于
答案 0 :(得分:0)
如果您希望id
的{{1}}具有不同的可能值,则可以使用xpath上的table
运算符来捕获所有可能的方案。
e.g。 or
如果有太多可能的值,您可以尝试锚定一个更静态的变量,例如'//table[@id="player-fixture" or @id="other-value"]//tbody//tr'
。
e.g。 div