我目前正在学习如何在numpy / python中编码神经网络。我使用了this tutorial中的代码,并试图将其改编成可导入的模块。但是,当我尝试使用自己的数据集时。它引发了一个小错误ValueError: shapes (1,3) and (1,1) not aligned: 3 (dim 1) != 1 (dim 0)
。
我已经尝试过将所有矩阵从(x,)重塑为(x,1),但是没有成功。经过一番阅读之后,转置数组也意味着可以解决此问题,但我也尝试过这样做,但也没有成功。
这里是模块(称为hidden_net):
import numpy as np
class network:
def __init__(self,layer_num,learning_rate=0.7,seed=None,logistic_coefficent=0.9):
self.logistic_coefficent=logistic_coefficent
self.learning_rate=learning_rate
self.w0 = np.random.random((layer_num[0],layer_num[1]))
self.w1 = np.random.random((layer_num[1],layer_num[2]))
np.random.seed(seed)
def sigmoid(self,x,reverse=False):
if(reverse==True):
return x*(1-x)
return 1/(1+np.exp(-x*self.logistic_coefficent))
def train(self,inps,outs):
inps=np.array(inps)
layer0 = inps
layer1 = self.sigmoid(np.dot(layer0,self.w0))
layer2 = self.sigmoid(np.dot(layer1,self.w1))
layer2_error = outs - layer2
layer2_delta = layer2_error*self.sigmoid(layer2,reverse=True)#*self.learning_rate
layer1_error = layer2_delta.dot(self.w1.T)
layer1_delta = layer1_error * self.sigmoid(layer1,reverse=True)#*self.learning_rate
layer1= np.reshape(layer1, (layer1.shape[0], 1))
layer2= np.reshape(layer2, (layer2.shape[0], 1))
layer1_delta= np.reshape(layer1_delta, (layer1_delta.shape[0], 1)) #Other attempts to reshape to avoid this error
layer2_delta= np.reshape(layer2_delta, (layer2_delta.shape[0], 1))
self.w1 += layer1.T.dot(layer2_delta)
self.w0 += layer0.T.dot(layer1_delta)
这是导入该模块的程序:
import hidden_net
op=open('Mall_Customers_Mod.txt','r')
full=op.read()
op.close()
full_lines=full.split('\n')
training_lines=[]
for i in range(174):
training_lines.append(full_lines[0])
del full_lines[0]
training_inputs=[]
training_outputs=[]
for j in training_lines:
training_inputs.append([float(j.split(',')[0]),float(j.split(',')[1])])
training_outputs.append(float(j.split(',')[2]))
testing_lines=full_lines
testing_inputs=[]
testing_outputs=[]
for l in testing_lines:
testing_inputs.append([float(l.split(',')[0]),float(l.split(',')[1])])
testing_outputs.append(float(l.split(',')[2]))
nn=hidden_net.network([2,3,1],seed=10)
for i in range(1000):
for cur in range(len(training_inputs)):
nn.train(training_inputs[cur],training_outputs[cur])
这是我的数据集(Mall_Customers_Mod.txt)的一部分
-1,19,15
-1,21,15
1,20,16
1,23,16
1,31,17
1,22,17
1,35,18
1,23,18
-1,64,19
1,30,19
-1,67,19
1,35,19
1,58,20
1,24,20
-1,37,20
-1,22,20
1,35,21
-1,20,21
-1,52,23
错误在第30行:
self.w1 += layer1.T.dot(layer2_delta)
ValueError: shapes (1,3) and (1,1) not aligned: 3 (dim 1) != 1 (dim 0)
也很抱歉,我知道我本来是要避免粘贴整个文件的,但是在这里看来这是不可避免的
答案 0 :(得分:1)
下面的行是错误的,layer0
是输入层,不包含任何神经元。
self.w1 += layer1.T.dot(layer2_delta)
self.w0 += layer0.T.dot(layer1_delta)
它们应该是:
self.w1 += layer2.T.dot(layer2_delta)
self.w0 += layer1.T.dot(layer1_delta)
所有重塑操作也应删除。更新的train
函数
def train(self,inps,outs):
inps=np.array(inps)
layer0 = inps
layer1 = self.sigmoid(np.dot(layer0,self.w0))
layer2 = self.sigmoid(np.dot(layer1,self.w1))
layer2_error = outs - layer2
layer2_delta = layer2_error*self.sigmoid(layer2,reverse=True)#*self.learning_rate
layer1_error = layer2_delta.dot(self.w1.T)
layer1_delta = layer1_error * self.sigmoid(layer1,reverse=True)#*self.learning_rate
self.w1 += layer2.T.dot(layer2_delta)
self.w0 += layer1.T.dot(layer1_delta)