我正在尝试为教程编写脚本的预测部分:https://mxnet.incubator.apache.org/tutorials/nlp/cnn.html
import mxnet as mx
from collections import Counter
import os
import re
import threading
import sys
import itertools
import numpy as np
from collections import namedtuple
SENTENCES_DIR = 'C:/code/mxnet/sentences'
CURRENT_DIR = 'C:/code/mxnet'
def clean_str(string):
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_sentences(filename):
sentences_file = open( filename, "r")
# Tokenize
x_text = [line.decode('Latin1').strip() for line in sentences_file.readlines()]
x_text = [clean_str(sent).split(" ") for sent in x_text]
return x_text
def pad_sentences(sentences, padding_word=""):"
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
word_counts = Counter(itertools.chain(*sentences))
vocabulary_inv = [x[0] for x in word_counts.most_common()]
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return vocabulary, vocabulary_inv
def build_input_data(sentences, vocabulary):
x = np.array([
[vocabulary[word] for word in sentence]
for sentence in sentences])
return x
def predict(mod, sen):
mod.forward(Batch(data=[mx.nd.array(sen)]))
prob = mod.get_outputs()[0].asnumpy()
prob = np.squeeze(prob)
a = np.argsort(prob)[::-1]
for i in a[0:5]:
print('probability=%f' %(prob[i]))
sentences = load_data_sentences( os.path.join( SENTENCES_DIR, 'test-pos-1.txt') )
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x = build_input_data(sentences_padded, vocabulary)
Batch = namedtuple('Batch', ['data'])
sym, arg_params, aux_params = mx.model.load_checkpoint( os.path.join( CURRENT_DIR, 'cnn'), 19)
mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names = None)
mod.bind(for_training=False, data_shapes=[('data', (50,56))], label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)
predict(mod, x)
但是我得到了错误:
infer_shape错误。参数:数据:(50,26L) 追溯(最近一次通话): 文件“ C:\ code \ mxnet \ test2.py”,第152行,位于predict(mod,x)中 预测文件“ C:\ code \ mxnet \ test2.py”,第123行 mod.forward(批量(data = [mx.nd.array(sen)])) ...
MXNetError:运算符重整错误0:[16:20:21] c:\ projects \ mxnet-distro-win \ mxnet-build \ src \ operator \ tensor./matrix_op-inl.h:187: 检查失败:oshape.Size()== dshape.Size()(840000与390000) 目标形状大小与源形状不同。 目标:[50,1,56,300] 资料来源:[50,26,300]
来源是包含50个句子字符串的文本文件
很遗憾,我没有在Internet上找到任何帮助。请看一下。 操作系统:Windows 10. Python 2.7 谢谢。
答案 0 :(得分:1)
我相信您遇到的错误是因为输入句子的填充与模型预期的不同。 pad_sentences的工作方式是将句子填充到传入的最长句子的长度,因此,如果您使用其他数据集,则几乎可以肯定会获得与模型填充(56)不同的填充。在这种情况下,您似乎得到了26的填充(来自错误消息“源:[50、26、300]”)。
通过如下修改pad_sentence并以sequence_length = 56使其运行以匹配模型,我能够使您的代码成功运行。
def pad_sentences(sentences, sequence_length, padding_word=""):
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
N.B成功运行后,会遇到错误,因为prob [i]不是浮点数。
def predict(mod, sen):
mod.forward(Batch(data=[mx.nd.array(sen)]))
prob = mod.get_outputs()[0].asnumpy()
prob = np.squeeze(prob)
a = np.argsort(prob)[::-1]
for i in a[0:5]:
print('probability=%f' %(prob[i])) << prob is a numpy.ndarray, not a float.
Vishaal