因此,我正在尝试抓取一个包含约2000页帖子的博客,但抓痒似乎正在尽早完成,而没有到达最后一页。
import scrapy
class postsSpider(scrapy.Spider):
name = "posts"
# URL's to scrape.
start_urls = [
"Removed for privacy"
]
def parse(self, response):
# Calls postDetails Parser.
post_urls = response.css(
"div.article-inner > header > h2 > a::attr(href)").extract()
for url in post_urls:
post_urls = response.urljoin(url)
yield scrapy.Request(url=post_urls, callback=self.parsePostDetails)
# Handles next pages.
next_page = response.css(
"#main > nav > div > a.next.page-numbers::attr(href)").extract_first()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
# Parses each post for its title, postBody, and postDate.
def parsePostDetails(self, response):
postString = response.css(
"div.article-inner > div.entry-content > p::text").extract()
postTitle = response.css(
"div.article-inner > header > h1::text").extract()
postAuth = response.css(
"""div.article-inner > header > div.entry-meta.aftertitle-meta > span.author.vcard
> a > em::text""").extract()
postTime = response.css(
"div.article-inner > header > div.entry-meta.aftertitle-meta > span.onDate.date > time.published::text").extract()
yield {
'postTitle': str(postTitle),
'postBody': str(postString),
'postAuthor': str(postAuth),
'postDate': str(postTime)
}
2019-01-08 14:04:53 [scrapy.core.engine] INFO: Closing spider (finished)
2019-01-08 14:04:53 [scrapy.extensions.feedexport] INFO: Stored json feed (600 items) in: posts5.json
2019-01-08 14:04:53 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 242774,
'downloader/request_count': 641,
'downloader/request_method_count/GET': 641,
'downloader/response_bytes': 58016154,
'downloader/response_count': 641,
'downloader/response_status_count/200': 641,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2019, 1, 8, 19, 4, 53, 583047),
'item_scraped_count': 600,
'log_count/DEBUG': 1242,
'log_count/INFO': 10,
'request_depth_max': 40,
'response_received_count': 641,
'scheduler/dequeued': 640,
'scheduler/dequeued/memory': 640,
'scheduler/enqueued': 640,
'scheduler/enqueued/memory': 640,
'start_time': datetime.datetime(2019, 1, 8, 19, 2, 51, 782255)}
2019-01-08 14:04:53 [scrapy.core.engine] INFO: Spider closed (finished)
我不太确定是什么原因造成的。它似乎在随机点停止。知道为什么会这样吗?我认为这与分页有关,但是我不知道发生了什么。
答案 0 :(得分:0)
您的蜘蛛很可能无法找到下一页:
# Handles next pages.
next_page = response.css(
"#main > nav > div > a.next.page-numbers::attr(href)").extract_first()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
尝试添加:
else:
from scrapy.shell import inspect_response
inspect_response(response, self)
这将打开刮y的调试外壳,您可以与对象实时交互:
# open webpage in browser
>>> view(response)
似乎您的下一页的CSS可能在这里有点过分设计,并且某些html不一致可能会破坏它。尝试a.next.page-numbers::attr(href)