我写了一个从新闻网站上抓取数据的蜘蛛:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from items import CravlingItem
import re
class CountrySpider(CrawlSpider):
name = 'Post_and_Parcel_Human_Resource'
allowed_domains = ['postandparcel.info']
start_urls = ['http://postandparcel.info/category/news/human-resources/']
rules = (
Rule(LinkExtractor(allow='',
restrict_xpaths=(
'//*[@id="page"]/div[4]/div[1]/div[1]/div[1]/h1/a',
'//*[@id="page"]/div[4]/div[1]/div[1]/div[2]/h1/a',
'//*[@id="page"]/div[4]/div[1]/div[1]/div[3]/h1/a'
)),
callback='parse_item',
follow=False),
)
def parse_item(self, response):
i = CravlingItem()
i['title'] = " ".join(response.xpath('//div[@class="cd_left_big"]/div/h1/text()')
.extract()).strip() or " "
i['headline'] = self.clear_html(
" ".join(response.xpath('//div[@class="cd_left_big"]/div//div/div[1]/p')
.extract()).strip()) or " "
i['text'] = self.clear_html(
" ".join(response.xpath('//div[@class="cd_left_big"]/div//div/p').extract()).strip()) or " "
i['url'] = response.url
i['image'] = (" ".join(response.xpath('//*[@id="middle_column_container"]/div[2]/div/img/@src')
.extract()).strip()).replace('wp-content/', 'http://postandparcel.info/wp-content/') or " "
i['author'] = " "
# print("\n")
# print(i)
return i
@staticmethod
def clear_html(html):
text = re.sub(r'<(style).*?</\1>(?s)|<[^>]*?>|\n|\t|\r', '', html)
return text
我还在管道中编写了一段代码来优化提取的文本: 这是管道:
from scrapy.conf import settings
from scrapy import log
import pymongo
import json
import codecs
import re
class RefineDataPipeline(object):
def process_item(self, item, spider):
#In this section: the below edits will be applied to all scrapy crawlers.
item['text'] =str( item['text'].encode("utf-8"))
replacements ={"U.S.":" US ", " M ":"Million", "same as the title":"", " MMH Editorial ":"", " UPS ":"United Parcel Service", " UK ":" United Kingdom "," Penn ":" Pennsylvania ", " CIPS ":" Chartered Institute of Procurement and Supply ", " t ":" tonnes ", " Uti ":" UTI ", "EMEA":" Europe, Middle East and Africa ", " APEC ":" Asia-Pacific Economic Cooperation ", " m ":" million ", " Q4 ":" 4th quarter ", "LLC":"", "Ltd":"", "Inc":"", "Published text":" Original text "}
allparen= re.findall('\(.+?\)',item['text'])
for item in allparen:
if item[1].isupper() and item[2].isupper():
replacements[str(item)]=''
elif item[1].islower() or item[2].islower():
replacements[str(item)]=item[1:len(item)-1]
else:
try:
val = int(item[1:len(item)-1])
replacements[str(item)]= str(val)
except ValueError:
pass
def multireplace(s, replacements):
substrs = sorted(replacements, key=len, reverse=True)
regexp = re.compile('|'.join(map(re.escape, substrs)))
return regexp.sub(lambda match: replacements[match.group(0)],s)
item['text'] = multireplace(item['text'], replacements)
item['text'] = re.sub( '\s+', ' ', item['text'] ).strip()
return item
但是存在一个阻止蜘蛛成功抓取数据的巨大问题:
Traceback(最近一次调用最后一次):文件 &#34; /usr/lib/python2.7/dist-packages/twisted/internet/defer.py" ;, line 588,在_runCallbacks中 current.result = callback(current.result,* args,** kw)File&#34; / home / hathout / Desktop / updataed portcalls / thomas / thomas / pipelines.py&#34;, 第41行,在process_item中 item [&#39; text&#39;] = multireplace(item [&#39; text&#39;],替换)TypeError:字符串索引必须是整数,而不是str
我真的不知道如何克服&#34; TypeError:字符串索引必须是整数,而不是str&#34;错误。
答案 0 :(得分:3)
简短回答:
变量item
是一个字符串
答案很长: 在本节中
allparen= re.findall('\(.+?\)',item['text'])
for item in allparen:
...
你正在循环遍历allparen,它应该是一个字符串列表或一个空列表,并使用相同的变量名item
作为循环变量。所以item是一个字符串,而不是一个dict / Item对象。为循环变量使用不同的名称,如:
for paren in allparen:
if paren[1].isupper() and paren[2].isupper():
...
基本上你的原始item
变量会被你在循环中使用相同的变量名所覆盖。