当将scrapy管道数据写入数据库时​​,错误的值太多,无法解压缩

时间:2015-09-04 06:09:01

标签: python scrapy pymssql

下午好, 我尝试从Web获取数据并存储在SQLServer中。使用lib pymssql,已建立连接。但是当进程项时出现错误“解压的值太多”,所以我还附加了MyItem类。我看不出明显的错误? 这是pipelines.py中的代码

-*- coding: utf-8 -*-
import pymssql
from scrapy import signals   
import json   
import codecs   
class MyPipeline(object):   
    def __init__(self):
         self.conn = pymssql.connect(host=r".\\MyPC",user='sa',password='XXXX',database='Webmining')
         self.cursor = self.conn.cursor()
    def process_item(self, item, spider):
         try:
             self.cursor.executemany("INSERT INTO RecruitInformation(recruitNumber,name,detailLink,publishTime,catalog,worklocation) VALUES (%d,%s,%s,%t,%s,%s)",(item['recruitNumber'],item['name'],item['detailLink'],item['publishTime'],item['catalog'],item['worklocation']))
             self.conn.commit()
         except pymssql.InterfaceError, e:
             print ("pymssql.InterfaceError")
         except pymssql.DataError, e:
             print ("pymssql.DataError")
         except pymssql.OperationalError, e:
             print ("pymssql.OperationalError")
         except pymssql.IntegrityError, e:
             print ("pymssql.IntegrityError")
         except pymssql.InternalError, e:
             print ("pymssql.InternalError")
         except pymssql.ProgrammingError, e:
             print ("pymssql.ProgrammingError")
         except pymssql.NotSupportedError, e:
             print ("pymssql.NotSupportedError")
             return item
     def spider_closed(self, spider):
         self.conn.close()
//the code in item.py is as follow
import scrapy
from scrapy.item import Item, Field  
class MyItem(Item): 
     name = Field()         
     catalog = Field()          
     workLocation = Field()     
     recruitNumber = Field()       
     detailLink = Field()      
     publishTime = Field()

class MySpider(CrawlSpider):   
     name = "xxxx"   
     allowed_domains = ["xxxx.com"]   
     start_urls = [   "http://xx.xxxx.com/position.php"]   
     rules = [Rule(sle(allow=("/position.php\?&start=\d{,4}#a")),                         follow=True,callback='parse_item')]     
     def parse_item(self, response): 
         items = []   
         sel = Selector(response)    
         base_url = get_base_url(response)   
         sites_even = sel.css('table.tablelist tr.even')   
     for site in sites_even:   
         item = MyItem()   
         item['name'] = site.css('.l.square a').xpath('text()').extract()   
         relative_url = site.css('.l.square a').xpath('@href').extract()[0]   
         item['detailLink'] = urljoin_rfc(base_url, relative_url)   
         item['catalog'] = site.css('tr > td:nth-child(2)::text').extract()   
         item['workLocation'] = site.css('tr > td:nth-child(4)::text').extract()   
         item['recruitNumber'] = site.css('tr > td:nth-child(3)::text').extract()   
         item['publishTime'] = site.css('tr > td:nth-child(5)::text').extract()   
         items.append(item)   
         sites_odd = sel.css('table.tablelist tr.odd')   
         for site in sites_odd:   
              item = MyItem()   
              item['name'] = site.css('.l.square a').xpath('text()').extract()   
              relative_url = site.css('.l.square a').xpath('@href').extract()[0]   
              item['detailLink'] = urljoin_rfc(base_url, relative_url)   
              item['catalog'] = site.css('tr > td:nth-child(2)::text').extract()   
              item['workLocation'] = site.css('tr > td:nth-child(4)::text').extract()   
              item['recruitNumber'] = site.css('tr > td:nth-child(3)::text').extract()   
              item['publishTime'] = site.css('tr > td:nth-child(5)::text').extract()   
              items.append(item)   
              return items   
      def _process_request(self, request):   
              info('process ' + str(request))   
              return request

1 个答案:

答案 0 :(得分:0)

尝试在代码中使用self.cursor.execute代替self.cursor.executemany