当我通过extract()[0]
或extract()
时,此代码完全正常 - 它为我解析的第一个链接提供了输出。我无法理解为什么会这样做,bcs在我爬行时使用此代码的其他网站完全没问题。
通过这个网站只抓取第一个链接。如果我改变extract()[1]
那么它会给我第二个链接,依此类推。为什么它不能自动进行for循环?
import scrapy
from scrapy.spider import BaseSpider
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from urlparse import urljoin
class CompItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
name = scrapy.Field()
date = scrapy.Field()
class criticspider(BaseSpider):
name = "mmt_mouth"
allowed_domains = ["mouthshut.com"]
start_urls = ["http://www.mouthshut.com/websites/makemytripcom-reviews-925031929"]
# rules = (
# Rule(
# SgmlLinkExtractor(allow=("search=make-my-trip&page=1/+",)),
# callback="parse",
# follow=True),
# )
def parse(self, response):
sites = response.xpath('//div[@id="allreviews"]')
items = []
for site in sites:
item = CompItem()
item['name'] = site.xpath('.//li[@class="profile"]/div/a/span/text()').extract()[0]
item['title'] = site.xpath('.//div[@class="reviewtitle fl"]/strong/a/text()').extract()[0]
item['date'] = site.xpath('.//div[@class="reviewrate"]//span[@class="datetime"]/span/span/span/text()').extract()[0]
item['link'] = site.xpath('.//div[@class="reviewtitle fl"]/strong/a/@href').extract()[0]
if item['link']:
if 'http://' not in item['link']:
item['link'] = urljoin(response.url, item['link'])
yield scrapy.Request(item['link'],
meta={'item': item},
callback=self.anchor_page)
items.append(item)
def anchor_page(self, response):
old_item = response.request.meta['item']
old_item['data'] = response.xpath('.//div[@itemprop="description"]/p/text()').extract()
yield old_item
答案 0 :(得分:3)
因为你的for循环在给定的网站上没有任何东西可循环。改变你的陈述
sites = response.xpath('//div[@id="allreviews"]')
到
sites = response.xpath('//div[@id="allreviews"]/ul/li')
然后你的for循环可以循环遍历列表元素。