我正在使用scrapy收集来自craiglist的一些电子邮件,当我运行它时会返回.csv文件中的空白行。我能够提取标题,标签和链接。只有电子邮件才是问题。这是代码:
# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.http import Request
# item class included here
class DmozItem(scrapy.Item):
# define the fields for your item here like:
link = scrapy.Field()
attr = scrapy.Field()
title = scrapy.Field()
tag = scrapy.Field()
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["craigslist.org"]
start_urls = [
"http://raleigh.craigslist.org/bab/5038434567.html"
]
BASE_URL = 'http://raleigh.craigslist.org/'
def parse(self, response):
links = response.xpath('//a[@class="hdrlnk"]/@href').extract()
for link in links:
absolute_url = self.BASE_URL + link
yield scrapy.Request(absolute_url, callback=self.parse_attr)
def parse_attr(self, response):
match = re.search(r"(\w+)\.html", response.url)
if match:
item_id = match.group(1)
url = self.BASE_URL + "reply/nos/vgm/" + item_id
item = DmozItem()
item["link"] = response.url
item["title"] = "".join(response.xpath("//span[@class='postingtitletext']//text()").extract())
item["tag"] = "".join(response.xpath("//p[@class='attrgroup']/span/b/text()").extract()[0])
return scrapy.Request(url, meta={'item': item}, callback=self.parse_contact)
def parse_contact(self, response):
item = response.meta['item']
item["attr"] = "".join(response.xpath("//div[@class='anonemail']//text()").extract())
return item
答案 0 :(得分:1)
首先,您打算在目录页面上start_urls
:http://raleigh.craigslist.org/search/bab
。
此外,根据我的理解,收到电子邮件的其他请求应转至reply/ral/bab/
而不是reply/nos/vgm/
。
此外,如果没有attr组,则会在以下行中收到错误:
item["tag"] = "".join(response.xpath("//p[@class='attrgroup']/span/b/text()").extract()[0])
将其替换为:
item["tag"] = "".join(response.xpath("//p[@class='attrgroup']/span/b/text()").extract())
对我有用的完整代码:
# -*- coding: utf-8 -*-
import re
import scrapy
class DmozItem(scrapy.Item):
# define the fields for your item here like:
link = scrapy.Field()
attr = scrapy.Field()
title = scrapy.Field()
tag = scrapy.Field()
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["raleigh.craigslist.org"]
start_urls = [
"http://raleigh.craigslist.org/search/bab"
]
BASE_URL = 'http://raleigh.craigslist.org/'
def parse(self, response):
links = response.xpath('//a[@class="hdrlnk"]/@href').extract()
for link in links:
absolute_url = self.BASE_URL + link
yield scrapy.Request(absolute_url, callback=self.parse_attr)
def parse_attr(self, response):
match = re.search(r"(\w+)\.html", response.url)
if match:
item_id = match.group(1)
url = self.BASE_URL + "reply/ral/bab/" + item_id
item = DmozItem()
item["link"] = response.url
item["title"] = "".join(response.xpath("//span[@class='postingtitletext']//text()").extract())
item["tag"] = "".join(response.xpath("//p[@class='attrgroup']/span/b/text()").extract())
return scrapy.Request(url, meta={'item': item}, callback=self.parse_contact)
def parse_contact(self, response):
item = response.meta['item']
item["attr"] = "".join(response.xpath("//div[@class='anonemail']//text()").extract())
return item