UnicodeWarning:某些字符无法解码,并被替换为REPLACEMENT CHARACTER

时间:2016-11-30 07:46:50

标签: python beautifulsoup

我在代码中使用python + bs4 + pyside,请查看以下代码部分:

enter code here 
#coding:gb2312
import urllib2
import sys
import urllib
import urlparse
import random
import time
from datetime import datetime, timedelta
import socket
from bs4 import BeautifulSoup
import lxml.html
from PySide.QtGui import *
from PySide.QtCore import *
from PySide.QtWebKit import *

def download(self, url, headers, proxy, num_retries, data=None):
    print 'Downloading:', url
    request = urllib2.Request(url, data, headers or {})
    opener = self.opener or urllib2.build_opener()
    if proxy:
        proxy_params = {urlparse.urlparse(url).scheme: proxy}
        opener.add_handler(urllib2.ProxyHandler(proxy_params))
    try:
        response = opener.open(request)
        html = response.read()
        code = response.code
    except Exception as e:
        print 'Download error:', str(e)
        html = ''
        if hasattr(e, 'code'):
            code = e.code
            if num_retries > 0 and 500 <= code < 600:
                # retry 5XX HTTP errors
                return self._get(url, headers, proxy, num_retries-1, data)
        else:
            code = None
    return {'html': html, 'code': code}
def crawling_hdf(openfile):
filename = open(openfile,'r')
namelist = filename.readlines()
app = QApplication(sys.argv)
for name in namelist:         
    url = "http://so.haodf.com/index/search?type=doctor&kw="+ urllib.quote(name)
    #get doctor's home page
    D = Downloader(delay=DEFAULT_DELAY, user_agent=DEFAULT_AGENT, proxies=None, num_retries=DEFAULT_RETRIES, cache=None)
    html = D(url)
    soup = BeautifulSoup(html)
    tr = soup.find(attrs={'class':'docInfo'})
    td = tr.find(attrs={'class':'docName font_16'}).get('href')
    print td
    #get doctor's detail information page
    loadPage_bs4(td)

filename.close()

if __name__ == '__main__':
crawling_hdf("name_list.txt")

运行程序后,会显示一条警告消息:

警告(来自警告模块): 文件“C:\ Python27 \ lib \ site-packages \ bs4 \ dammit.py”,第231行     “有些字符无法解码,而且是” UnicodeWarning:某些字符无法解码,并被替换为REPLACEMENT CHARACTER。

我使用 print str(html) ,发现所有中文语言都是乱码。

我尝试使用在本网站上搜索的“解码或编码”和“gzip”解决方案,但在我的情况下它不起作用。

非常感谢你的帮助!

2 个答案:

答案 0 :(得分:0)

看起来该页面是以gbk编码的。问题是utf-8gbk之间没有直接转换(我知道)。

我之前看过这个workaround,请尝试:

html.encode('latin-1').decode('gbk').encode('utf-8')

答案 1 :(得分:0)

GBK是Python中codecs中的built-in encodings之一。

这意味着,只要您拥有一串原始字节,就可以使用方法decode和相应的编解码器名称(或其别名)将其转换为本机Unicode字符串。

以下作品(改编自https://stackoverflow.com/q/36530578/2564301),如果返回的文字不包含垃圾&#39;或者&#39;未知&#39;字符,实际上与源页面的编码方式不同(通过将其保存为新文件并比较中文字符的值来验证)。

from urllib import request

def scratch(url,encode='utf-8'):
    user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
    headers = {'User-Agent':user_agent}
    req = request.Request(url,headers=headers)
    result = request.urlopen(req)
    page = result.read()
    u_page = page.decode(encoding="gbk")
    result.close()
    print(u_page)
    return u_page    

page = scratch('http://so.haodf.com/index/search')
print (page)