Python,Django,pickle资源Errno2没有这样的文件或目录:

时间:2017-03-21 15:28:03

标签: python django python-2.7 nltk pickle

我目前正在尝试在我的虚拟机上安装诗歌生成器 - 运行ubunutu。当我运行服务器并输入文本并点击生成时,我得到以下错误:

Exception Type:     IOError
Exception Value:    

[Errno 2] No such file or directory: 'poetry_generator/resources/bigram.pickle'

Exception Location:     /home/lee/Downloads/PoEmo-master/poetry_generator/architecture/experts/generating_experts/collocation_expert.py in train, line 31

poetry_generator中没有/ resources文件夹,也没有bigram.pickles文件。它将以下脚本中的linke 31标识为问题:

import nltk
import os
import pickle
from pattern import en

from poetry_generator.structures.word import Word
from poetry_generator.architecture.experts.generating_experts.word_generating_expert import WordGeneratingExpert
from poetry_generator.settings import resources_dir


class CollocationExpert(WordGeneratingExpert):
    """Generating most common contexts for words for words"""

    def __init__(self, blackboard):
        super(
            CollocationExpert,
            self).__init__(
            blackboard,
            "Collocation Expert")
        self.word_tag_pairs = []

    def train(self):
        bigram_pickle_file = os.path.join(resources_dir, 'bigram.pickle')
        try:
            with open(bigram_pickle_file,'rb') as f:
                self.word_tag_pairs = pickle.load(f)

        except IOError:
            tagged_words = nltk.corpus.brown.tagged_words(tagset='universal')
            self.word_tag_pairs = list(nltk.bigrams(tagged_words))
            with open(bigram_pickle_file,'w') as f:
                pickle.dump(self.word_tag_pairs,f)


    '''Finding verbs for noun '''

    def _find_verbs(self, word):
        word_bigrams = [(a[0], b[0]) for a, b in self.word_tag_pairs
                                               if a[0] == word.name and a[1] == 'NOUN' and b[1] == 'VERB'
                                               and en.conjugate(b[0], "inf") not in ('be', 'have')]
        return self.__get_best_collocations(word, word_bigrams)

    '''Finding adjectives for noun'''

    def _find_epithets(self, word):
        word_bigrams = [(b[0], a[0]) for (a, b) in self.word_tag_pairs
                        if b[0] == word.name and b[1] == 'NOUN' and a[1] == 'ADJ']
        epithets = self.__get_best_collocations(word, word_bigrams)
        return epithets

    '''Finding nouns described by adjective'''

    def _find_comparisons(self, adjective):
        word_bigrams = [(a[0], b[0]) for (a, b) in self.word_tag_pairs
                        if a[0] == adjective.name and b[1] == 'NOUN' and a[1] == 'ADJ']
        comparisons = self.__get_best_collocations(adjective, word_bigrams)
        return comparisons

    '''Adding epithets for noun to pool'''

    def _add_epithets(self, word):
        epithets = set([Word(w, "JJ") for w in self._find_epithets(word)])
        if word not in self.blackboard.pool.epithets:
            self.blackboard.pool.epithets[word] = []
        self.blackboard.pool.epithets[word] += list(epithets)
        return epithets

    def _add_verbs(self, word):
        verbs = set([Word(w, "V") for w in self._find_verbs(word)])
        self.blackboard.pool.verbs[word] = list(verbs)
        return verbs

    '''Adding nouns for adjectives to pool'''

    def _add_comparisons(self, adj):

        comparisons = set([Word(w, "N") for w in self._find_comparisons(adj)])
        self.blackboard.pool.comparisons[adj] = comparisons
        return comparisons

    def __get_best_collocations(self, word, word_bigrams, n=20):
        words = nltk.ConditionalFreqDist(word_bigrams)[word.name]
        best_bigrams = sorted(words.items(), key=lambda (k, v): v, reverse=False)[:n]

        return dict(best_bigrams).keys()

    def generate_words(self):
        super(CollocationExpert, self).generate_words()
        counter = 0
        for w in self.blackboard.pool.nouns:
            eps = self._add_epithets(w)
            vs = self._add_verbs(w)
            le = len(eps)
            lv = len(vs)
            counter += le + lv
        for adj in self.blackboard.pool.adjectives:
            comps = self._add_comparisons(adj)
            counter += len(comps)
        return counter

任何想法?我是python的新手,所以我并没有真正搞砸。谢谢!

1 个答案:

答案 0 :(得分:0)

错误发生在train方法的第三行,但是你抓住了那个。然而,在捕获中你尝试完全相同的东西。更改打开文件的方式不会忽略错误。如果文件/目录不存在,则需要先创建它。

请参阅this question以递归方式创建目录。

要创建文件,this可能会有所帮助。