如何从一个链接生成解析的项目,并从同一项目列表中的其他链接生成其他解析的项目

时间:2019-04-02 15:42:39

标签: python web-scraping scrapy

问题是我一直在从一个位置列表进行迭代以刮擦纬度经度和海拔高度。关键是,当我找回所抓取的内容时,由于我迭代的名称可能已被修改或跳过,因此无法将其与当前df关联。

我设法获得了我所看到的名称,但是由于它是从其余项目的链接外部解析而来的,因此无法正常工作。

import scrapy
import pandas as pd
from ..items import latlonglocItem


df = pd.read_csv('wine_df_final.csv')
df = df[pd.notnull(df.real_place)]
real_place = list(set(df.real_place))


class latlonglocSpider(scrapy.Spider):


    name = 'latlonglocs'
    start_urls = []


    for place in real_place:
        baseurl =  place.replace(',', '').replace(' ', '+')
        cleaned_href = f'http://www.google.com/search?q={baseurl}+coordinates+latitude+longitude+distancesto'
        start_urls.append(cleaned_href)



    def parse(self, response):

        items = latlonglocItem()

        items['base_name'] = response.xpath('string(/html/head/title)').get().split(' coordinates')[0]
        for href in response.xpath('//*[@id="ires"]/ol/div/h3/a/@href').getall():
            if href.startswith('/url?q=https://www.distancesto'):
                yield response.follow(href, self.parse_distancesto)
            else:
                pass
        yield items

    def parse_distancesto(self, response):
        items = latlonglocItem()

        try:
            items['appellation'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[2]/p/strong)').get()
            items['latitude'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[1]/td)').get()
            items['longitude'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[2]/td)').get()
            items['elevation'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[10]/td)').get()
            yield items
        except Exception:
            pass
#output
 appellation      base_name       elevation    latitude    longitude
                  Chalone, USA
 Santa Cruz, USA                  56.81        35           9.23 

发生的事情是,我解析了所寻找的内容,然后将其放入链接中并解析了其余信息。但是,很明显,在我的数据框上,我得到的是我所寻找的名称,而该名称与其余项完全无关,即使那样,也很难找到匹配项。我希望将信息传递给另一个函数,以便它一起产生所有项目。

2 个答案:

答案 0 :(得分:0)

这可能有效。我将评论我正在做的事情以及您的一些代码,使您对我正在做的事情有所了解。

import scrapy
import pandas as pd
from ..items import latlonglocItem


df = pd.read_csv('wine_df_final.csv')
df = df[pd.notnull(df.real_place)]
real_place = list(set(df.real_place))


class latlonglocSpider(scrapy.Spider): # latlonglocSpider is a child class of scrapy.Spider

    name = 'latlonglocs'
    start_urls = []

    for place in real_place:
        baseurl =  place.replace(',', '').replace(' ', '+')
        cleaned_href = f'http://www.google.com/search?q={baseurl}+coordinates+latitude+longitude+distancesto'
        start_urls.append(cleaned_href)

    def __init__(self): # Constructor for our class
        # Since we did our own constructor we need to call the parents constructor
        scrapy.Spider.__init__(self)
        self.base_name = None # Here is the base_name we can now use class wide

    def parse(self, response):

        items = latlonglocItem()

        items['base_name'] = response.xpath('string(/html/head/title)').get().split(' coordinates')[0]
        self.base_name = items['base_name'] # Lets store the base_name in the class
        for href in response.xpath('//*[@id="ires"]/ol/div/h3/a/@href').getall():
            if href.startswith('/url?q=https://www.distancesto'):
                yield response.follow(href, self.parse_distancesto)
            else:
                pass
        yield items

    def parse_distancesto(self, response):
        items = latlonglocItem()

        try:
            # If for some reason self.base_name is never assigned in
            # parse() then we want to use an empty string instead of the self.base_name

            # The following syntax means use self.base_name unless it is None or empty
            # in which case just use and empty string.
            base_name = self.base_name or "" # If for some reason

            items['appellation'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[2]/p/strong)').get()
            items['latitude'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[1]/td)').get()
            items['longitude'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[2]/td)').get()
            items['elevation'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[10]/td)').get()
            yield items
        except Exception:
            pass

答案 1 :(得分:0)

import scrapy
import pandas as pd
from ..items import latlonglocItem


df = pd.read_csv('wine_df_final.csv')
df = df[pd.notnull(df.real_place)]
real_place = list(set(df.real_place))


class latlonglocSpider(scrapy.Spider): # latlonglocSpider is a child class of scrapy.Spider

    name = 'latlonglocs'
    start_urls = []

    for place in real_place:
        baseurl =  place.replace(',', '').replace(' ', '+')
        cleaned_href = f'http://www.google.com/search?q={baseurl}+coordinates+latitude+longitude+distancesto'
        start_urls.append(cleaned_href)

    def __init__(self): # Constructor for our class
        # Since we did our own constructor we need to call the parents constructor
        scrapy.Spider.__init__(self)
        self.base_name = None # Here is the base_name we can now use class wide

    def parse(self, response):

        for href in response.xpath('//*[@id="ires"]/ol/div/h3/a/@href').getall():

            if href.startswith('/url?q=https://www.distancesto'):
                self.base_name = response.xpath('string(/html/head/title)').get().split(' coordinates')[0]

                yield response.follow(href, self.parse_distancesto)
            else:
                pass

    def parse_distancesto(self, response):
        items = latlonglocItem()

        try:
            # If for some reason self.base_name is never assigned in
            # parse() then we want to use an empty string instead of the self.base_name

            # The following syntax means use self.base_name unless it is None or empty
            # in which case just use and empty string.
            items['base_name'] = self.base_name or "" # If for some reason
            items['appellation'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[2]/p/strong)').get()
            items['latitude'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[1]/td)').get()
            items['longitude'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[2]/td)').get()
            items['elevation'] = response.xpath('string(/html/body/div[3]/div/div[2]/div[3]/div[3]/table/tbody/tr[10]/td)').get()
            yield items
        except Exception:
            pass

由于错误-句法Re悔。并发请求必须设置为1才能正常工作,并将base_name放入循环中。