我有一组python脚本(https://github.com/hvdwolf/wikiscripts),它解析wikidumps使其成为gpx / osm / csv / sql / sqlite转储,用作Nav应用程序中的POI文件。我只解析有坐标的文章。为此,我使用包含sql insert语句的externallinks转储。包含“geohack.php”子字符串的sql语句确实包含坐标。我在sqlite数据库中导入它们以用作文章转储的参考。 它们都是utf-8转储并解析所有“西方类型”文件工作正常,但阿拉伯语,波斯语,俄语,日语,希腊语,中文和其他语言不起作用。显然我做错了。
我获得的标题字符串是:
%D9%85%D8%A7%D9%81%D8%B8%D8%A9_%D8%A7%D9%84%D8%A8%D8%AF%D8%A7%D8%A6%D8% B9 %D8%A3%D9%88%D8%B1%D9%8A%D9%88%D9%8A%D9%84%D8%A7 Battle_of_Nicopolis
青岛
所以一些普通人物都可以。剩下的就是胡言乱语(对我来说)。 我已经做了一些测试,我只是读取转储并写入utf-8编码的文本文件(换行=>换行)然后它工作正常,但在字符串处理函数和“重新”的某处。函数它改变了我的unicode文本。
编辑:我的python脚本以:# - - 编码:utf-8 - -
开头
我的代码(相关部分,包括python2和python3语句,以及一些显示我已经尝试过的评论):
with gzip.open(externallinks_file, 'r') as single_externallinksfile:
#reader = codecs.getreader("utf-8")
#single_externallinksfile = reader(single_externallinksfile)
#with codecs.getreader('utf-8')gzip.open(externallinks_file, 'r') as single_externallinksfile:
linecounter = 0
totlinecounter = 0
filelinecounter = 0
# We need to read line by line as we have massive files, sometimes multiple GBs
for line in single_externallinksfile:
if sys.version_info<(3,0,0):
line = unicode(line, 'utf-8')
else:
line = line.decode("utf-8")
if "INSERT INTO" in line:
insert_statements = line.split("),(")
for statement in insert_statements:
#statement = statement.decode("utf-8")
filelinecounter += 1
#if ("geohack.php?" in statement) and (("pagename" in statement) or ("src=" in statement)):
# src can also be in the line, but is different and we leave it out for now
if ("geohack.php?" in statement) and ("pagename" in statement) and ("params" in statement):
language = ""
region = ""
poitype = ""
content = re.findall(r'.*?pagename=(.*?)\'\,\'',statement,flags=re.IGNORECASE)
if len(content) > 0: # We even need this check due to corrupted lines
splitcontent = content[0].split("&")
title = splitcontent[0]
#title = title.decode('utf8')
for subcontent in splitcontent:
if "language=" in subcontent:
language = subcontent.replace("language=","")
#print('taal is: ' + language)
if "params=" in subcontent:
params_string = subcontent.replace("params=","").split("_")
latitude,longitude,poitype,region = get_coordinates_type_region(params_string)
if ( str(latitude) != "" and str(longitude) != "" and (str(latitude) != "0") or (str(longitude) != "0")):
if GENERATE_SQL == "YES":
sql_file.write('insert into ' + file_prefix + '_externallinks values ("' + title + '","' + str(latitude) + '","' + str(longitude) + '","' + language + '","' + poitype + '","' + region + '");\n')
if CREATE_SQLITE == "YES":
sqlcommand = 'insert into ' + file_prefix + '_externallinks values ("' + title + '","' + str(latitude) + '","' + str(longitude) + '","' + language + '","' + poitype + '","' + region +'");'
#print(sqlcommand)
cursor.execute(sqlcommand)
linecounter += 1
if linecounter == 10000:
if CREATE_SQLITE == "YES":
# Do a databse commit every 10000 rows
wikidb.commit()
totlinecounter += linecounter
linecounter = 0
print('\nProcessed ' + str(totlinecounter) + ' lines out of ' + str(filelinecounter) + ' sql line statements. Elapsed time: ' + str(datetime.datetime.now().replace(microsecond=0) - start_time))
答案 0 :(得分:1)
看起来标题是percent-encoded。
if(!e){ e = window.event; }
产量
try:
# Python 3
from urllib.parse import unquote
except ImportError:
# Python 2
from urllib import unquote
percent_encoded = '''
%D9%85%D8%A7%D9%81%D8%B8%D8%A9_%D8%A7%D9%84%D8%A8%D8%AF%D8%A7%D8%A6%D8%B9
%D8%A3%D9%88%D8%B1%D9%8A%D9%88%D9%8A%D9%84%D8%A7
Battle_of_Nicopolis
Qingdao
'''
print(unquote(percent_encoded))