当xpath在Chrome控制台中运行时,Scrapy shell无法抓取信息

时间:2017-09-08 23:19:16

标签: xpath scrapy web-crawler

我正在开展一个收集大学教授联系信息的项目。 (所以它不是恶意的。) 教授页面是动态的。我通过Chrome网络找到了请求。但是,scrapy xpath在scrapy shell中不起作用,而它可以在浏览器上运行。我甚至试图添加标题。 scrapy shell result

Chrome console result

import scrapy
from universities.items import UniversitiesItem


class UniversityOfHouston(scrapy.Spider):
    name = 'University_of_Houston'
    allowed_domains = ['uh.edu']
    start_urls = ['http://www.uh.edu/directory/']

    def __init__(self):
        self.lastName = ''

    def parse(self, response):
        self.lastName = 'An'
        query = "http://www.uh.edu/directory/proxy.php?q=" + self.lastName + \
                "&submit=Search&limit=250&loc=HR730&pos=faculty%7Cstaff&faculty=faculty&staff=staff&student=student"

        yield scrapy.Request(query, callback=self.parse_staff)

    def parse_staff(self, response):
        results = response.xpath('//dt/a/@href').extract()
        for result in results:
            query = 'http://www.uh.edu/directory/' + result
            yield scrapy.Request(query, callback=self.parse_item)

    def parse_item(self, response):

        item = UniversitiesItem()

        item['full_name'] = response.xpath('//h2[@class="single_title"]/text()').extract_first()
        item['university'] = 'University of Houston'
        item['discipline'] = response.xpath('//td/a[@class="org"]/text()').extract_first()
        item['title'] = response.xpath('//tr/td[@class="title"]/text()')
        item['email'] = response.xpath('//td/a[@title="email address"]/text()').extract_first()[7:]
        item['phone'] = response.xpath('//td[@class="tel"]/a/text()').extract_first()

        yield item

测试版本:

import scrapy
from universities.items import UniversitiesItem


class UniversityOfHouston(scrapy.Spider):
    #name = 'University_of_Houston'
    name = 'uh2'
    allowed_domains = ['uh.edu']
    start_urls = ['http://www.uh.edu/directory/']

    def __init__(self):
        self.last_name = ''

    def parse(self, response):
        with open('kw.txt') as file_object:
            last_names = file_object.readlines()

        for ln in ['Lee', 'Zhao']:
            self.last_name = ln.strip()
            print('-----------------------------------------------------')
            print("scraping last name: ", self.last_name)
            query = "http://www.uh.edu/directory/proxy.php?q=" + self.last_name + \
                    "&submit=Search&limit=250&loc=HR730&pos=faculty%7Cstaff&faculty=faculty&staff=staff&student=student"

            yield scrapy.Request(query, callback=self.parse_staff)

    def parse_staff(self, response):
        results = response.xpath('//dt/a/@href').extract()
        for result in results:
            query_proxy = 'http://www.uh.edu/directory/' + result.replace("index.php", "proxy.php")
            yield scrapy.Request(query_proxy, callback=self.parse_item)

    def parse_item(self, response):
        full_name = response.xpath('//h2[@class="single_title"]/text()').extract_first()
        if full_name:
            if self.last_name in full_name.split():
                item = UniversitiesItem()
                item['fullname'] = full_name
                # last_name = full_name.split()[-1]
                # item['lastname'] = last_name
                # item['firstname'] = full_name[:-len(last_name)].strip()
                item['university'] = 'University of Houston'
                try:
                    item['department'] = response.xpath('//td/a[@class="org"]/text()').extract_first()
                    item['title'] = response.xpath('//tr/td[@class="title"]/text()').extract_first()
                    item['email'] = response.xpath('//td/a[@title="email address"]/text()').extract_first()
                    item['phone'] = response.xpath('//td[@class="tel"]/a/text()').extract_first()
                except ValueError:
                    pass

                yield item

1 个答案:

答案 0 :(得分:1)

问题是因为使用网页上的AJAX调用获取数据。当您获取主页时,数据不可用

AJAX Call

将您的parse_staff功能更改为以下功能

def parse_staff(self, response):
    results = response.xpath('//dt/a/@href').extract()
    for result in results:
        query = 'http://www.uh.edu/directory/' + result
        query_proxy = "https://ssl.uh.edu/directory/" + result.replace("index.php", "proxy.php")
        yield response.follow(query_proxy, callback=self.parse_item)