Pickle ValueError:二进制模式不接受编码参数

时间:2017-06-15 13:55:14

标签: python encoding binary pickle

我想创建一个非常基本的Q& A聊天机器人。给出一系列问题&我用作我的数据集的答案,我想训练它以便返回相关答案,这取决于硬编码的问题(每次都不同)。首先我标记,清理,然后使用余弦相似性,但它给了我一个错误,这是(我猜)一个泡菜问题。

已更新

import csv
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from collections import Counter
import pickle
import os.path
import re, math

tokenizer = RegexpTokenizer(r'\w+')
stopwords = stopwords.words('english') 

extra_stopwords = stopwords + ['I', 'can']
WORD = re.compile(r'\w+')




def get_clean_data():


    clean_data_set = {
    'questions' : {},
    'answers' : {}
    }

    reader = csv.reader(open('data.csv', 'r', encoding="utf-8"))
    tags = []
    counter = 0

    for r in reader:
        question = str(r[0].encode('utf-8'))
        answer = str(r[1].encode('utf-8'))

        _, tags_question = get_tags(question)
        _, tags_answer = get_tags(answer)

        clean_data_set['answers'][answer] = tags_answer + tags_question
        clean_data_set['questions'][question] = text_to_vector(question)

        counter += 1

        # hardcode the number :)
        print (counter, ' out of 746')

    # pickle.dump(clean_data_set, open('dump.dict', 'wb'))
    with open('dump.dict', 'wb') as my_dump_file:
        pickle.dump(clean_data_set, my_dump_file)


def get_tags(text, use_set = True):
    tokens = tokenizer.tokenize(text)
    # remove stop words from tokens
    # make it lower case
    filtered_words = [word.lower() for word in tokens if word not in extra_stopwords]

    # return non duplicate values by default
    if use_set == True:
        filterd_words = list(set(filtered_words))

    return Counter(filtered_words), filtered_words

# simple cosine similarity measure
def get_cosine(vec1, vec2):
     intersection = set(vec1.keys()) & set(vec2.keys())
     numerator = sum([vec1[x] * vec2[x] for x in intersection])

     sum1 = sum([vec1[x]**2 for x in vec1.keys()])
     sum2 = sum([vec2[x]**2 for x in vec2.keys()])
     denominator = math.sqrt(sum1) * math.sqrt(sum2)

     if not denominator:
        return 0.0
     else:
        return float(numerator) / denominator

def text_to_vector(text):
     words = WORD.findall(text)
     return Counter(words)


# question_set is the data we had
def get_cosine_value(question, question_set):

    question_vector = text_to_vector(question)

    cosine = get_cosine(question_vector, question_set)

    return cosine    



def answer_question(question, top = 5):
    with open('dump.dict', 'rb') as my_dump_file:
        data_set = pickle.load(my_dump_file)
    # data_set = pickle.load(open('dump.dict', 'rb'))
        pickle.dump(a, handle, protocol=pickle.HIGHEST_PROTOCOL)


    _, question_tags = get_tags(question)

    ranking_dict = {}
    similar_questions_rank = {}
    for entry in data_set['answers']:
        tags = data_set['answers'][entry]
        # rank is the intersection between the list of tags from the question
        # and the list of tags associated to answers
        rank = len(set(question_tags).intersection(tags))
        ranking_dict[entry] = rank

    for entry in data_set['questions']:
        cosine_similarity = get_cosine_value(question, data_set['questions'][entry])
        similar_questions_rank[entry] = cosine_similarity

    sorted_similarity_dict = sorted(similar_questions_rank.items(), key=lambda x: x[1], reverse=True)
    sorted_ranking_dict = sorted(ranking_dict.items(), key=lambda x: x[1], reverse=True)

    # sort them by rank
    for item in sorted_ranking_dict[0:top-1]:
        print ('Rank: ', item[1])
        print ('Answer: ', item[0])
        print ('\n\n')

    # sort them by rank
    for item in sorted_similarity_dict[0:top-1]:
        print ('Rank: ', item[1])
        print ('Question: ', item[0])

#get_clean_data()


question = 'why all these errors?'

answer_question(question)

这是已更新错误消息:

Traceback (most recent call last):
  File "C:\Users\joasa\Desktop\si\main.py", line 133, in <module>
answer_question(question)
  File "C:\Users\joasa\Desktop\si\main.py", line 94, in answer_question
data_set = pickle.load(my_dump_file)
EOFError: Ran out of input
[Finished in 1.4s]

有人可以帮忙吗?我不知道该怎么做。提前致谢

1 个答案:

答案 0 :(得分:1)

我认为它来自get_clean_data函数中的这一行:

pickle.dump(clean_data_set, open('dump.dict', 'w'))

请看这里打开要写入的文件,但是你从不关闭它,所以当你尝试阅读它时,没有任何迹象表明已经到达文件的末尾。为了避免这种情况发生,请使用上下文管理器块:

with open('dump.dict', 'wb') as my_dump_file:
    pickle.dump(clean_data_set, my_dump_file)

这样,无论您退出with区块,都可以保证关闭文件。

answer_question中加载pickle转储时,您也应该这样做:

with open('dump.dict', 'rb') as my_dump_file:
    data_set = pickle.load(my_dump_file)