我一直在尝试从公司注册簿中获取一些信息。哪个可行,但我想对搜索条目给出的每个结果重复一遍。我一直在尝试使用linkextractors,但即时通讯无法正常工作。
搜索结果网页为: https://www.companiesintheuk.co.uk/Company/Find?q=a
从搜索项中抓取单个结果是可行的(如果我单击一个结果项),但是如何对每个结果项重复此操作?
这是我的代码:
import scrapy
import re
from scrapy.linkextractors import LinkExtractor
class QuotesSpider(scrapy.Spider):
name = 'CYRecursive'
start_urls = [
'https://www.companiesintheuk.co.uk/ltd/a-2']
def parse(self, response):
# Looping throught the searchResult block and yielding it
for i in response.css('div.col-md-9'):
for i in response.css('div.col-md-6'):
yield {
'company_name': re.sub('\s+', ' ', ''.join(i.css('#content2 > strong:nth-child(2) > strong:nth-child(1) > div:nth-child(1)::text').get())),
'address': re.sub('\s+', ' ', ''.join(i.css("#content2 > strong:nth-child(2) > address:nth-child(2) > div:nth-child(1) > span:nth-child(1)::text").extract_first())),
'location': re.sub('\s+', ' ', ''.join(i.css("#content2 > strong:nth-child(2) > address:nth-child(2) > div:nth-child(1) > span:nth-child(3)::text").extract_first())),
'postal_code': re.sub('\s+', ' ', ''.join(i.css("#content2 > strong:nth-child(2) > address:nth-child(2) > div:nth-child(1) > a:nth-child(5) > span:nth-child(1)::text").extract_first())),
}
答案 0 :(得分:1)
import scrapy
import re
from scrapy.linkextractors import LinkExtractor
class QuotesSpider(scrapy.Spider):
name = 'CYRecursive'
start_urls = [
'https://www.companiesintheuk.co.uk/Company/Find?q=a']
def parse(self, response):
for company_url in response.xpath('//div[@class="search_result_title"]/a/@href').extract():
yield scrapy.Request(
url=response.urljoin(company_url),
callback=self.parse_details,
)
next_page_url = response.xpath('//li/a[@class="pageNavNextLabel"]/@href').extract_first()
if next_page_url:
yield scrapy.Request(
url=response.urljoin(next_page_url),
callback=self.parse,
)
def parse_details(self, response):
# Looping throught the searchResult block and yielding it
for i in response.css('div.col-md-9'):
for i in response.css('div.col-md-6'):
yield {
'company_name': re.sub('\s+', ' ', ''.join(i.css('#content2 > strong:nth-child(2) > strong:nth-child(1) > div:nth-child(1)::text').get())),
'address': re.sub('\s+', ' ', ''.join(i.css("#content2 > strong:nth-child(2) > address:nth-child(2) > div:nth-child(1) > span:nth-child(1)::text").extract_first())),
'location': re.sub('\s+', ' ', ''.join(i.css("#content2 > strong:nth-child(2) > address:nth-child(2) > div:nth-child(1) > span:nth-child(3)::text").extract_first())),
'postal_code': re.sub('\s+', ' ', ''.join(i.css("#content2 > strong:nth-child(2) > address:nth-child(2) > div:nth-child(1) > a:nth-child(5) > span:nth-child(1)::text").extract_first())),
}
当然,您可以使用start_requests
自动yield
进行从a
到z
的所有搜索。
您的CSS表达式错误:
yield {
'company_name': response.xpath('//div[@itemprop="name"]/text()').extract_first(),
'address': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
'location': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
'postal_code': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
}