如何删除unicode字符串" [u' string]"当我写CSV文件时。
**this is my spider:**
import pdb
import FileManager
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from centerfireguns.items import CenterfiregunsItem
from urlparse import urljoin
from scrapy.http import Request
new_filemanager = FileManager.File_Manager()
class FiregunsSpider(CrawlSpider):
name = 'centerfireguns'
allowed_domains = ['centerfireguns.com']
start_urls = ['http://www.centerfireguns.com/firearms.html']
rules = (
Rule(SgmlLinkExtractor(allow=(), restrict_xpaths=('//a[contains(@class, "i-next")][1]')), callback='parse_item', follow=True),
)
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
urls = hxs.select('//a[contains(@class,"product-image")]/@href').extract()
for url in urls:
new_url = urljoin("http://www.centerfireguns.com/", url)
item = CenterfiregunsItem()
item['ad_url'] = new_url
request = Request(new_url, callback = self.parse_detail)
request.meta['item'] = item
yield request
def parse_detail(self, response):
hxs = HtmlXPathSelector(response)
item = response.meta['item']
#<div class="product-name"><h1 itemprop="name">Adcor Defense BEAR 223 16 OPT RDY</h1>
item['title'] = hxs.select('//div[contains(@class, "product-name")]//h1/text()').extract()
#<div class="product-shop"><span class="regular-price" id="product-price-21339"> <span class="price" itemprop="price">$1,389.00</span> </span>
item['price'] = hxs.select('//div[contains(@class, "product-shop")]//span[contains(@itemprop,"price")][1]/text()').extract()
#<div class="sku"><span>Model #: </span>2013040</div>
item['model'] = hxs.select('//div[contains(@class, "sku")]/text()').extract()
#<img id="image" itemprop="image" src="http://www.centerfireguns.com/media/catalog/product/cache/1/image/292x320/9df78eab33525d08d6e5fb8d27136e95/a/d/adcor-defense-2013040-tactical-rifles.jpg">
item['img_url'] = hxs.select('//img[contains(@id, "image")]/@src').extract()
#<table class="data-table" id="product-attribute-specs-table">
item['specification'] = hxs.select('//table[contains(@id, "product-attribute-specs-table")]/text()').extract()
#<div id="product_tabs_description_tabbed_contents"><h6>Full Description</h6><ol><h2>Details</h2><div class="std">
item['description'] = hxs.select('//div[contains(@id, "product_tabs_description_tabbed_contents")]//div[contains(@class, "std")]/text()').extract()
#new_filemanager.writeFile("/home/user1/Public/www/GajenderData/SCRIPTS/pythonprog/ganesh/centerfireguns_detail.csv",str(title) + "\n")
yield item
这是pipeline.py
# -*- coding: utf-8 -*-
import csv
class CenterfiregunsPipeline(object):
def __init__(self):
self.myCSV = csv.writer(open('/home/user1/Public/www/GajenderData/SCRIPTS/pythonprog/ganesh/centerfireguns_detail.csv', 'wb'))
self.myCSV.writerow(['ad_url','title', 'model','price','img_url','specification','description'])
def process_item(self, item, spider):
self.myCSV.writerow([item['ad_url'].encode('utf-8'),item['title'].encode('utf-8'),item['model'].encode('utf-8'),item['price'].encode('utf-8'),item['img_url'].encode('utf-8'),item['specification'].encode('utf-8'),item['description'].encode('utf-8')])
return item
当我使用.encode(&#39; utf-8&#39;)我收到此错误。请在下面查看
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/scrapy/middleware.py", line 54, in _process_chain
return process_chain(self.methods[methodname], obj, *args)
File "/usr/lib/python2.7/dist-packages/scrapy/utils/defer.py", line 65, in process_chain
d.callback(input)
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 362, in callback
self._startRunCallbacks(result)
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 458, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 545, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/home/user1/Public/www/GajenderData/SCRIPTS/pythonprog/ganesh/centerfireguns/centerfireguns/pipelines.py", line 14, in process_item
self.myCSV.writerow([item['ad_url'].encode('utf-8'),item['title'].encode('utf-8'),item['model'].encode('utf-8'),item['price'].encode('utf-8'),item['img_url'].encode('utf-8'),item['specification'].encode('utf-8'),item['description'].encode('utf-8')])
**exceptions.AttributeError: 'list' object has no attribute 'encode'**
我是python中的初学者
答案 0 :(得分:0)
以下代码片段作为函数的一部分,作为我的宠物项目中函数的一部分,通过声明包含您要删除的值的字典然后使用replace方法来删除您正在查找的内容在目标文件和字典中的项目...我在文本文件中使用它,所以你必须用CSV编写器和读者调整它,但想法是......
name = "file.csv"
infile = name
outfile = name + "_clean.csv"
delete_list = ["['", "']"]
fin = open(infile)
fout = open(outfile, "w+")
for line in fin:
for word in delete_list:
line = line.replace(word, "")
fout.write(line)
fin.close()
fout.close()
另外,也许更加pythonic-hackish会在您定义它们之前剥离然后加入项目...示例
#...
tit = hxs.select('//div[contains(@class, "product-name")]//h1/text()').extract()
tit = [x.strip() for in in tit]
tit = ''.join()
prc = hxs.select('//div[contains(@class, "product-shop")]//span[contains(@itemprop,"price")][1]/text()').extract()
prc = [x.strip() for x in prc]
prc = ''.join(prc)
item = response.meta['item']
item['title'] = tit
item['price'] = prc
#...
通过这种方式,您甚至可以避免使用管道(如果管道的唯一原因是编码)...否则,如果它为您提供所需的目的,您可以取消管道中的编码......请问为什么需要管道?