import scrapy
import csv
from series.items import SeriesItem
class EpisodeScraperSpider(scrapy.Spider):
name = "episode_scraper"
allowed_domains = ["imdb.com"]
start_urls = []
def __init__(self, id=None, series=None, *args, **kwargs):
super(EpisodeScraperSpider, self).__init__(*args, **kwargs)
if id is not None:
self.start_urls = ['http://www.imdb.com/title/{!s}/episodes?season={!s}'.format(id, series)]
else:
with open('series_episode.csv') as f:
f_csv = csv.DictReader(f)
for row in f_csv:
self.start_urls.append('http://www.imdb.com/title/{!s}/episodes?season={!s}'.format(row["id"], row["series"]))
def parse(self, response):
episodes = response.xpath('//div[contains(@class, "list_item")]')
title = response.xpath('//h3/a/text()').extract()[0]
for episode in episodes:
global title
item = SeriesItem()
item['series_episode'] = episode.xpath('div/a/div[contains(@data-const,"tt")]/div/text()').extract()
item['title'] = '{!s}: {!s}'.format(title, episode.xpath('div[@class="info"]/strong/a/text()').extract())
item['imdb_id'] = episode.xpath('div[@class="image"]/a/div/@data-const').extract()
item['airdate'] = [x.strip() for x in episode.xpath('div/div[@class="airdate"]/text()').extract()]
yield item
当我在scrapyd中尝试这个脚本时,我没有得到任何结果。它确实导致了scrapy。我认为问题出在这一行。
with open('series_episode.csv') as f:
我不知道在哪里放置我的csv文件。 请帮帮我!!
由于
答案 0 :(得分:0)
一个选项是将其保存在/tmp
with open('/tmp/series_episode.csv') as f: