mainfile
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from bloggerx.items import BloggerxItem
from scrapy.spider import BaseSpider
class BloggerxSpider(BaseSpider):
name = 'bloggerx'
allowed_domains = ['abcr.com']
start_urls = ['http://www.abcr.com/profile/07372831905432746031']
def parse(self,response):
hxs = HtmlXPathSelector(response)
item = BloggerxItem()
item['gender'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Gender")]/following-sibling::node()/text()').extract()
item['blogger_since'] = hxs.select('/html/body/div[2]/div/div[2]/div/p[2]/text()').re('\d+')
item['profile_views'] = hxs.select('/html/body/div[2]/div/div[2]/div/p[3]/text()').re('\d+')
item['industry'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Industry")]/following-sibling::node()/span/a/text()').extract()
item['occupation'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Occupation")]/following-sibling::node()/span/a/text()').extract()
item['locality'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="locality"]/a/text()').extract()
item['region'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="region"]/a/text()').extract()
item['country'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="country-name"]/a/text()').extract()
item['introduction'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Introduction")]/following-sibling::node()/text()').extract()
item['interests'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Interests")]/following-sibling::node()/span/a/text()').extract()
item['email1'] = hxs.select('//html/body/div[2]/div/div[2]/div/ul/li/script/text()').re('[\w.]+@[\w.]+[com]')
item['email2'] = hxs.select('/html/body/div[2]/div/div[2]/div/ul/li[3]/div/text()').extract()
item['website'] = hxs.select('//html/body/div[2]/div/div[2]/div/ul/li[2]/a/@href').extract()
item['films'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Films")]/following-sibling::node()/span/a/text()').extract()
item['music'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Music")]/following-sibling::node()/span/a/text()').extract()
item['books'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Books")]/following-sibling::node()/span/a/text()').extract()
item['blogs_follow'] = hxs.select('//html/body/div[2]/div/div[3]/ul[2]/li/a/text()').extract()
item['blogs_follow_link'] = hxs.select('//html/body/div[2]/div/div[3]/ul[2]/li/a/@href').extract()
item['author_blogs'] = hxs.select('//html/body/div[2]/div/div[3]/ul/li/span/a/text()').extract()
item['author_blogs_link'] = hxs.select('//html/body/div[2]/div/div[3]/ul/li/span/a/@href').extract()
return item
项目文件
from scrapy.item import Item, Field
class BloggerxItem(Item):
# define the fields for your item here like:
# name = Field()
gender = Field()
blogger_since = Field()
profile_views = Field()
industry = Field()
occupation = Field()
locality = Field()
introduction = Field()
interests = Field()
email1 = Field()
website = Field()
films = Field()
music = Field()
books = Field()
region = Field()
country = Field()
email2 = Field()
blogs_follow = Field()
blogs_follow_link = Field()
author_blogs = Field()
author_blogs_link = Field()
pass
运行时输出:scrapy crawl bloggerx -o items.json -t json
2013-03-07 16:39:24+0530 [scrapy] INFO: Scrapy 0.16.4 started (bot: bloggerx)
2013-03-07 16:39:24+0530 [scrapy] DEBUG: Enabled extensions: FeedExporter, LogStats, TelnetConsole, CloseSpider, WebService, CoreStats, SpiderState
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, RedirectMiddleware, CookiesMiddleware, HttpCompressionMiddleware, ChunkedTransferMiddleware, DownloaderStats
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Enabled item pipelines:
2013-03-07 16:39:25+0530 [bloggerx] INFO: Spider opened
2013-03-07 16:39:25+0530 [bloggerx] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Telnet console listening on 0.0.0.0:6028
2013-03-07 16:39:25+0530 [scrapy] DEBUG: Web service listening on 0.0.0.0:6085
2013-03-07 16:39:27+0530 [bloggerx] DEBUG: Crawled (200) <GET http://www.abcr.com/profile/07372831905432746031> (referer: None)
2013-03-07 16:39:27+0530 [bloggerx] INFO: Closing spider (finished)
2013-03-07 16:39:27+0530 [bloggerx] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 249,
'downloader/request_count': 1,
'downloader/request_method_count/GET': 1,
'downloader/response_bytes': 13459,
'downloader/response_count': 1,
'downloader/response_status_count/200': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2013, 3, 7, 11, 9, 27, 320389),
'log_count/DEBUG': 7,
'log_count/INFO': 4,
'response_received_count': 1,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'start_time': datetime.datetime(2013, 3, 7, 11, 9, 25, 967450)}
2013-03-07 16:39:27+0530 [bloggerx] INFO: Spider closed (finished)
生成的输出文件为空,并且在scrapy shell上尝试时单独的hxs.select语句可以正常工作。我有什么蠢事吗?
答案 0 :(得分:1)
您正在从标题中调用crawl spider并使用基本蜘蛛 错误:
from scrapy.contrib.spiders import CrawlSpider, Rule
class BloggerxSpider(BaseSpider):
更正后:
from scrapy.contrib.spiders import **CrawlSpider**, Rule
class BloggerxSpider(**CrawlSpider**):
或强>
from scrapy.spider import BaseSpider
class BloggerxSpider(BaseSpider):
答案 1 :(得分:0)
而是def parse_blogger,你需要进行def解析。
def parse是在框架中进行解析的默认解析,如果要将其命名为不同 您需要将您的回复发送给新的回复。
要使用自己的解析方法,您需要调用为回调,这是您创建自己的请求时的示例:
request = Request("http://something", callback=self.parse_blogger)
答案 2 :(得分:0)
如果您没有明确定义规则而不关心以下链接,请改用BaseSpider,但要将回调命名为parse
。
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from bloggerx.items import BloggerxItem
class BloggerxSpider(BaseSpider):
...
请注意,对于CrawlSpiders,您不应将documentation explicitly states命名为回调parse
,因为它会覆盖CrawlSpider的parse
方法,并且蜘蛛无法正确抓取。
答案 3 :(得分:0)
您的日志输出对我来说似乎很奇怪,因为您的start_urls
没有条目,服务器响应404,Scrapy默认会忽略,因此不会返回任何项目。你的蜘蛛也没有声明BaseSpider
,这意味着这段代码甚至都不会编译,所以看来这里有一些复制/粘贴问题。
EDIT ------------------
我将域名更改为blogger.com,现在它返回一个项目:
2013-03-08 09:02:28-0600 [scrapy] INFO: Scrapy 0.17.0 started (bot: oneoff)
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Overridden settings: {'NEWSPIDER_MODULE': 'oneoff.spiders', 'SPIDER_MODULES': ['oneoff.spiders'], 'USER_AGENT': 'Chromium OneOff 24.0.1312.56 Ubuntu 12.04 (24.0.1312.56-0ubuntu0.12.04.1)', 'BOT_NAME': 'oneoff'}
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Enabled extensions: LogStats, TelnetConsole, CloseSpider, WebService, CoreStats, SpiderState
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, MetaRefreshMiddleware, HttpCompressionMiddleware, RedirectMiddleware, CookiesMiddleware, ChunkedTransferMiddleware, DownloaderStats
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Enabled item pipelines:
2013-03-08 09:02:28-0600 [bloggerx] INFO: Spider opened
2013-03-08 09:02:28-0600 [bloggerx] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Telnet console listening on 0.0.0.0:6024
2013-03-08 09:02:28-0600 [scrapy] DEBUG: Web service listening on 0.0.0.0:6081
2013-03-08 09:02:28-0600 [bloggerx] DEBUG: Crawled (200) <GET http://www.blogger.com/profile/07372831905432746031> (referer: None)
2013-03-08 09:02:28-0600 [bloggerx] DEBUG: Scraped from <200 http://www.blogger.com/profile/07372831905432746031>
{'author_blogs': [u'Inserire comunicati stampa per il turismo',
u'Inserire Comunicati stampa e Article Marketing',
u'Video Quacos'],
'author_blogs_link': [u'http://comunicati-stampa-per-il-turismo.blogspot.com/',
u'http://comunicati-stampa-vendita-online.blogspot.com/',
u'http://quacos.blogspot.com/'],
'blogger_since': [u'2008'],
'blogs_follow': [u'Abandonware Time',
u'AltroSeo.com',
u'ANSIMA notizie',
u'Cinnamon Girl',
u'enigmamigarun',
u'Fake Books - Libri di una riga.',
u'FM - COSMETICA E NON SOLO ',
u'GS BARBARIANS',
u'Il Disinformatico',
u'Linus' blog',
u'Montefeltro Nuoto Master',
u'Nella Tana del Coniglio',
u'PHP and tips'],
'blogs_follow_link': [u'http://squakenet.blogspot.com/',
u'http://www.altroseo.com/',
u'http://ansima.blogspot.com/',
u'http://cinnamongirl82.blogspot.com/',
u'http://enigmaamigarun.blogspot.com/',
u'http://fake-books.blogspot.com/',
u'http://valeriacosmeticafm.blogspot.com/',
u'http://gsbarbarians.blogspot.com/',
u'http://attivissimo.blogspot.com/',
u'http://torvalds-family.blogspot.com/',
u'http://montefeltronuotomaster.blogspot.com/',
u'http://anonimoconiglio.blogspot.com/',
u'http://phpntips.blogspot.com/'],
'books': [],
'country': [],
'email1': [u'bloggiovanni.cappellini@gmail.com'],
'email2': [u'cappogio@hotmail.com'],
'films': [],
'gender': [],
'industry': [],
'interests': [],
'introduction': [],
'locality': [],
'music': [],
'occupation': [],
'profile_views': [u'553'],
'region': [],
'website': [u'http://www.quacos.com']}
2013-03-08 09:02:28-0600 [bloggerx] INFO: Closing spider (finished)
2013-03-08 09:02:28-0600 [bloggerx] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 288,
'downloader/request_count': 1,
'downloader/request_method_count/GET': 1,
'downloader/response_bytes': 13615,
'downloader/response_count': 1,
'downloader/response_status_count/200': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2013, 3, 8, 15, 2, 28, 948533),
'item_scraped_count': 1,
'log_count/DEBUG': 9,
'log_count/INFO': 4,
'response_received_count': 1,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'start_time': datetime.datetime(2013, 3, 8, 15, 2, 28, 379242)}
2013-03-08 09:02:28-0600 [bloggerx] INFO: Spider closed (finished)
蜘蛛:
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from bloggerx.items import BloggerxItem
class BloggerxSpider(BaseSpider):
name = 'bloggerx'
allowed_domains = ['blogger.com']
start_urls = ['http://www.blogger.com/profile/07372831905432746031']
def parse(self, response):
hxs = HtmlXPathSelector(response)
item = BloggerxItem()
item['gender'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Gender")]/following-sibling::node()/text()').extract()
item['blogger_since'] = hxs.select('/html/body/div[2]/div/div[2]/div/p[2]/text()').re('\d+')
item['profile_views'] = hxs.select('/html/body/div[2]/div/div[2]/div/p[3]/text()').re('\d+')
item['industry'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Industry")]/following-sibling::node()/span/a/text()').extract()
item['occupation'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Occupation")]/following-sibling::node()/span/a/text()').extract()
item['locality'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="locality"]/a/text()').extract()
item['region'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="region"]/a/text()').extract()
item['country'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Location")]/following-sibling::node()/span[@class="country-name"]/a/text()').extract()
item['introduction'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Introduction")]/following-sibling::node()/text()').extract()
item['interests'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Interests")]/following-sibling::node()/span/a/text()').extract()
item['email1'] = hxs.select('//html/body/div[2]/div/div[2]/div/ul/li/script/text()').re('[\w.]+@[\w.]+[com]')
item['email2'] = hxs.select('/html/body/div[2]/div/div[2]/div/ul/li[3]/div/text()').extract()
item['website'] = hxs.select('//html/body/div[2]/div/div[2]/div/ul/li[2]/a/@href').extract()
item['films'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Films")]/following-sibling::node()/span/a/text()').extract()
item['music'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Music")]/following-sibling::node()/span/a/text()').extract()
item['books'] = hxs.select('//html/body/div[2]/div/div[3]/table/tr/th[contains(text(),"Favourite Books")]/following-sibling::node()/span/a/text()').extract()
item['blogs_follow'] = hxs.select('//html/body/div[2]/div/div[3]/ul[2]/li/a/text()').extract()
item['blogs_follow_link'] = hxs.select('//html/body/div[2]/div/div[3]/ul[2]/li/a/@href').extract()
item['author_blogs'] = hxs.select('//html/body/div[2]/div/div[3]/ul/li/span/a/text()').extract()
item['author_blogs_link'] = hxs.select('//html/body/div[2]/div/div[3]/ul/li/span/a/@href').extract()
return item