Scrapy Traceback 302,索引错误列表

时间:2018-06-09 18:14:26

标签: python web-scraping callback scrapy python-requests

我正在尝试删除特定标记的文章,例如Python 2.7中的“机器学习”。我有以下代码:

import scrapy
import codecs
import json
from datetime import datetime
from datetime import timedelta
import os

def writeTofile(fileName,text):
    with codecs.open(fileName,'w','utf-8') as outfile:
        outfile.write(text)

class MediumPost(scrapy.Spider):
    name='medium_scrapper'
    handle_httpstatus_list = [401,400]    
    autothrottle_enabled=True


    def start_requests(self):        
        start_urls = ['https://medium.com/tag/'+self.tagSlug.strip("'")+'/archive/']
        print(start_urls)        
        #Header and cookie information can be got from the Network Tab in Developer Tools
        cookie = {'mhj': 'd4c630604c57a104af8bc98218fb3430145',
                                        'nj': '1',
                                        'ko': '1:J0mnan1t5jlHypyliL8GAY1WNfDvtqZBgmBDr+7STp2QSwyWUz6',
                                        'pi': '233',
                                        'mt': '-874'}
        header = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
        startDate=datetime.strptime(self.start_date,"%Y%m%d")
        endDate=datetime.strptime(self.end_date,"%Y%m%d")
        delta=endDate-startDate
        print(delta)
        for i in range(delta.days + 1):
            d=datetime.strftime(startDate+timedelta(days=i),'%Y/%m/%d')
            for url in start_urls:
                print(url+d)
                yield scrapy.Request(url+d, method="GET",headers=header,cookies=cookie,callback=self.parse,meta={'reqDate':d})

    def parse(self,response):
        response_data=response.text
        response_split=response_data.split("while(1);</x>")
        response_data=response_split[1]
        date_post=response.meta['reqDate']
        date_post=date_post.replace("/","")
        directory=datetime.now().strftime("%Y%m%d")
        if not os.path.exists(directory):
            os.makedirs(directory)
        writeTofile(directory+"//"+self.tagSlug.replace("-","").strip("'")+"Tag"+date_post+".json",response_data)

一条消息说:

scrapy.core.engine] DEBUG: Crawled (200) <GET https://medium.com/tag/machine-learning/archive/2015/07/13> (referer: None)

NotImplementedError:未定义MediumPost.parse回调

但是,我反复得到如下错误:

current.result = callback(current.result, *args, **kw)
File "/home/mkol/anaconda2/lib/python2.7/site-packages/scrapy/spiders/__init__.py", line 90, in parse
    raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))

当我尝试在def start_requests上面进行def解析时,我得到缩进错误。

因为我是一个初学者,我无法得到错误在哪里?

1 个答案:

答案 0 :(得分:0)

我认为你的编辑过程中有“MediumPost.parse回调未定义”的问题。看起来像python翻译器看不到功能“解析”。我想你混合了四个空格和表格。 我用PyCharm。可能是我没有同样问题的过程。 经过一些改变,它对我有用。我添加了self.tagSlug,self.start_date,self.end_date。 我使用PEP-8推荐编辑了代码。现在看起来好多了。我删除了“打印”。在调试期间最好使用断点。 我将变量名称移到了Python类型。我记得PEP-8只向您推荐了一种类型的名称(Python类型或Java类型)。

import scrapy
import codecs
from datetime import datetime
from datetime import timedelta
import os

def writeTofile(file_name, text):
    with codecs.open(file_name, 'w', 'utf-8') as outfile:
        outfile.write(text)

class MediumPost(scrapy.Spider):
    name='medium_scrapper'
    handle_httpstatus_list = [401, 400]
    autothrottle_enabled = True
    tag_slug = 'machine-learning'
    start_date = '20170110'
    end_date = '20181130'

    def start_requests(self):
        start_urls = ['https://medium.com/tag/' + self.tag_slug.strip("'") + '/archive/']

        #Header and cookie information can be got from the Network Tab in Developer Tools
        cookie = {'mhj': 'd4c630604c57a104af8bc98218fb3430145',
                  'nj': '1',
                  'ko': '1:J0mnan1t5jlHypyliL8GAY1WNfDvtqZBgmBDr+7STp2QSwyWUz6',
                  'pi': '233',
                  'mt': '-874'}

        header = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}

        startDate = datetime.strptime(self.start_date, "%Y%m%d")
        endDate = datetime.strptime(self.end_date, "%Y%m%d")
        delta = endDate - startDate

        for i in range(delta.days + 1):
            d = datetime.strftime(startDate + timedelta(days=i), '%Y/%m/%d')

            for url in start_urls:
                print(url + d)
                yield scrapy.Request(url + d, headers=header, cookies=cookie, meta={'req_date': d})

    def parse(self,response):
        response_data = response.text
        response_split = response_data.split("while(1);</x>")
        response_data = response_split[0]
        date_post = response.meta['req_date']
        date_post = date_post.replace("/", "")
        directory = datetime.now().strftime("%Y%m%d")

        if not os.path.exists(directory):
            os.makedirs(directory)

        writeTofile(directory + "//" + self.tag_slug.replace("-", "").strip("'") + "Tag" + date_post + ".json", response_data)