我正在使用pytorch创建聊天机器人。我在运行良好的Linux系统上开发了该聊天机器人。但是,当我在Windows 10中运行相同的模型时,出现错误:-
回溯(最近通话最近):
返回self.wsgi_app(环境,start_response)
返回超级(_SocketIOMiddleware,自身)。调用(环境,
返回self.wsgi_app(环境,start_response)
响应= self.handle_exception(e)
返回cors_after_request(app.make_response(f(* args,** kwargs)))
提高(exc_type,exc_value,tb)
提高价值
响应= self.full_dispatch_request()
rv = self.handle_user_exception(e)
返回cors_after_request(app.make_response(f(* args,** kwargs)))
提高(exc_type,exc_value,tb)
提高价值
rv = self.dispatch_request()
返回self.view_functionsrule.endpoint>加载(candidateSkillset.get(name))
文件 在train_loader中用于(单词,labels_net):
返回_MultiProcessingDataLoaderIter(自身)
w.start()
self._popen = self._Popen(self)
返回_default_context.get_context()。Process._Popen(process_obj)
返回Popen(process_obj)
reduction.dump(process_obj,to_child)
ForkingPickler(文件,协议).dump(obj)
AttributeError:无法腌制本地对象“ load..ChatDataset”
exitcode = _main(fd,parent_sentinel)
自我= reduction.pickle.load(from_parent)
EOFError:超出输入范围
我正在使用要对其进行训练的数据集调用函数...这是我的代码:-
def load(inp):
global words,labels,doc_x,doc_y,questionsP1,questionsP2,questionsP3,questionsP4,model,questionTag, all_words, tags, xy, questions_list
all_words=[]
tags=[]
xy=[]
questions_list = []
words=[]
labels=[]
docs_x=[]
docs_y=[]
questionsP1=[]
questionsP2=[]
questionsP3=[]
questionsP4=[]
questionTag={}
print( dataFileNames.get(inp) )
with open(dataFileNames.get(inp)) as file:
data = json.load(file)
for intent in data["intents"]:
for proficiency in intent["proficiency"]:
for questions in proficiency["questions"]:
questions_list.append(questions["question"])
for responses in questions["responses"]:
wrds = tokenize(responses)
all_words.extend(wrds)
xy.append((wrds, questions["question"]))
if questions["tag"] in tags:
print(questions["tag"])
if questions["tag"] not in tags:
tags.append(questions["tag"])
if proficiency["level"] == "P1":
questionsP1.append(questions["question"])
questionTag[questions["question"]]=questions["tag"]
if proficiency["level"] == "P2":
questionsP2.append(questions["question"])
questionTag[questions["question"]]=questions["tag"]
if proficiency["level"] == "P3":
questionsP3.append(questions["question"])
questionTag[questions["question"]]=questions["tag"]
if proficiency["level"] == "P4":
questionsP4.append(questions["question"])
questionTag[questions["question"]]=questions["tag"]
#PyTorch Implementation
ignore_words = ['?', '!', '.', ',']
all_words = [stem(x) for x in all_words if x not in ignore_words]
all_words = sorted(set(all_words))
tags = sorted(set(tags))
X_train = []
y_train = []
for tokenized_response, question in xy:
bag = bag_of_words(tokenized_response, all_words)
X_train.append(bag)
label = questions_list.index( question)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples
#HyperParameters
batch_size = 8
hidden_size = 8
output_size = len(tags)
input_size = len(X_train[0])
learning_rate = 0.001
num_epochs = 1000
dataset = ChatDataset()
train_loader = DataLoader(dataset = dataset, batch_size=batch_size, shuffle = True, num_workers = 2)
device = 'cpu'
# print(device)
global model_main
model_main = NeuralNet(input_size, hidden_size, output_size).to(device)
try:
print("Inside Try")
data = torch.load(modelNames.get(inp))
input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
all_words = data["all_words"]
questions_list = data["questions_list"]
model_state = data["model_state"]
model_main = NeuralNet(input_size, hidden_size, output_size).to(device)
model_main.load_state_dict(model_state)
model_main.eval()
except:
#loss and optimizer
print("Inside Except")
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model_main.parameters(), lr = learning_rate)
for epoch in range(num_epochs):
for (words, labels_net) in train_loader:
words = words.to(device)
labels_net = labels_net.to(device)
#Forward
outputs = model_main(words)
loss = criterion(outputs, labels_net)
# print(loss)
#backward and optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 100 == 0:
print(f'epoch {epoch + 1}/ {num_epochs}, loss={loss.item(): .4f}')
print(f'final loss, loss={loss.item(): .4f}')
############### Accuracy Calculation ##############
correct = 0
total = 0
# print(device)
with torch.no_grad():
for words, labels_net in train_loader:
outputs = model_main(words)
_, predicted = torch.max(outputs.data, 1)
total += labels_net.size(0)
correct += (predicted == labels_net).sum().item()
print('Accuracy : %d %%' % ( 100 * correct / total ))
#Saving the model
data = {
"model_state": model_main.state_dict(),
"input_size": input_size,
"output_size": output_size,
"hidden_size": hidden_size,
"all_words": all_words,
"questions_list": questions_list
}
FILE = modelNames.get(inp)
torch.save(data, FILE)