刚刚在python中编写了我的第一个神经网络类。据我所知,一切都应该有效,但是有一些我似乎无法找到的错误(可能正好盯着我看)。 我首先尝试了10,000个MNIST数据的例子,然后在尝试复制符号函数时再次尝试,并在尝试复制XOR门时再次尝试。每次,不管时代的#,它总是产生来自所有输出神经元的输出(无论可能有多少)都是大致相同的值,但成本函数似乎正在下降。 我正在使用批量梯度下降,所有这些都是使用向量完成的(每个训练示例都没有循环)。
#Neural Network Class
import numpy as np
class NeuralNetwork:
#methods
def __init__(self,layer_shape):
#Useful Network Info
self.__layer_shape = layer_shape
self.__layers = len(layer_shape)
#Initialize Random Weights
self.__weights = []
self.__weight_sizes = []
for i in range(len(layer_shape)-1):
current_weight_size = (layer_shape[i+1],layer_shape[i]+1)
self.__weight_sizes.append(current_weight_size)
self.__weights.append(np.random.normal(loc=0.1,scale=0.1,size=current_weight_size))
def sigmoid(self,z):
return (1/(1+np.exp(-z)))
def sig_prime(self,z):
return np.multiply(self.sigmoid(z),(1-self.sigmoid(z)))
def Feedforward(self,input,Train=False):
self.__input_cases = np.shape(input)[0]
#Empty list to hold the output of every layer.
output_list = []
#Appends the output of the the 1st input layer.
output_list.append(input)
for i in range(self.__layers-1):
if i == 0:
output = self.sigmoid(np.dot(np.concatenate((np.ones((self.__input_cases,1)),input),1),self.__weights[0].T))
output_list.append(output)
else:
output = self.sigmoid(np.dot(np.concatenate((np.ones((self.__input_cases,1)),output),1),self.__weights[i].T))
output_list.append(output)
#Returns the final output if not training.
if Train == False:
return output_list[-1]
#Returns the entire output_list if need for training
else:
return output_list
def CostFunction(self,input,target,error_func=1):
"""Gives the cost of using a particular weight matrix
based off of the input and targeted output"""
#Run the network to get output using current theta matrices.
output = self.Feedforward(input)
#####Allows user to choose Cost Functions.#####
#
#Log Based Error Function
#
if error_func == 0:
error = np.multiply(-target,np.log(output))-np.multiply((1-target),np.log(1-output))
total_error = np.sum(np.sum(error))
#
#Squared Error Cost Function
#
elif error_func == 1:
error = (target - output)**2
total_error = 0.5 * np.sum(np.sum(error))
return total_error
def Weight_Grad(self,input,target,output_list):
#Finds the Error Deltas for Each Layer
#
deltas = []
for i in range(self.__layers - 1):
#Finds Error Delta for the last layer
if i == 0:
error = (target-output_list[-1])
error_delta = -1*np.multiply(error,np.multiply(output_list[-1],(1-output_list[-1])))
deltas.append(error_delta)
#Finds Error Delta for the hidden layers
else:
#Weight matrices have bias values removed
error_delta = np.multiply(np.dot(deltas[-1],self.__weights[-i][:,1:]),output_list[-i-1]*(1-output_list[-i-1]))
deltas.append(error_delta)
#
#Finds the Deltas for each Weight Matrix
#
Weight_Delta_List = []
deltas.reverse()
for i in range(len(self.__weights)):
current_weight_delta = (1/self.__input_cases) * np.dot(deltas[i].T,np.concatenate((np.ones((self.__input_cases,1)),output_list[i]),1))
Weight_Delta_List.append(current_weight_delta)
#print("Weight",i,"Delta:","\n",current_weight_delta)
#print()
#
#Combines all Weight Deltas into a single row vector
#
Weight_Delta_Vector = np.array([[]])
for i in Weight_Delta_List:
Weight_Delta_Vector = np.concatenate((Weight_Delta_Vector,np.reshape(i,(1,-1))),1)
return Weight_Delta_List
def Train(self,input_data,target):
#
#Gradient Checking:
#
#First Get Gradients from first iteration of Back Propagation
output_list = self.Feedforward(input_data,Train=True)
self.__input_cases = np.shape(input_data)[0]
Weight_Delta_List = self.Weight_Grad(input_data,target,output_list)
#Creates List of Gradient Approx arrays set to zero.
grad_approx_list = []
for i in self.__weight_sizes:
current_grad_approx = np.zeros(i)
grad_approx_list.append(current_grad_approx)
#Compute Approx. Gradient for every Weight Change
for W in range(len(self.__weights)):
for index,value in np.ndenumerate(self.__weights[W]):
orig_value = self.__weights[W][index] #Saves the Original Value
print("Orig Value:", orig_value)
#Sets weight to weight +/- epsilon
self.__weights[W][index] = orig_value+.00001
cost_plusE = self.CostFunction(input_data, target)
self.__weights[W][index] = orig_value-.00001
cost_minusE = self.CostFunction(input_data, target)
#Solves for grad approx:
grad_approx = (cost_plusE-cost_minusE)/(2*.00001)
grad_approx_list[W][index] = grad_approx
#Sets Weight Value back to its original value
self.__weights[W][index] = orig_value
#
#Print Gradients from Back Prop. and Grad Approx. side-by-side:
#
print("Back Prop. Grad","\t","Grad. Approx")
print("-"*15,"\t","-"*15)
for W in range(len(self.__weights)):
for index, value in np.ndenumerate(self.__weights[W]):
print(self.__weights[W][index],"\t"*3,grad_approx_list[W][index])
print("\n"*3)
input_ = input("Press Enter to continue:")
#
#Perform Weight Updates for X number of Iterations
#
for i in range(10000):
#Run the network
output_list = self.Feedforward(input_data,Train=True)
self.__input_cases = np.shape(input_data)[0]
Weight_Delta_List = self.Weight_Grad(input_data,target,output_list)
for w in range(len(self.__weights)):
#print(self.__weights[w])
#print(Weight_Delta_List[w])
self.__weights[w] = self.__weights[w] - (.01*Weight_Delta_List[w])
print("Done")`
我甚至实现了Gradient Checking,并且值不同,我想我会尝试用Approx替换Back Propagation更新。渐变检查值,但结果相同,导致我甚至怀疑我的渐变检查代码。
以下是训练XOR门时产生的一些值:
Back Prop.Grad:0.0756102610697 0.261814503398 0.0292734023876 Grad大约:0.05302210631166 0.0416095559674 0.0246847342122 费用:训练前:0.508019225507训练后0.50007095103(10000个时期后) 输出4个不同的例子(训练后): [0.49317733] [0.49294556] [0.50489004] [0.50465824]
所以我的问题是,我的Back Propagation或我的渐变检查有任何明显的问题吗?人工神经网络出现这些症状时是否存在任何常见问题(输出大致相同/成本下降)?
答案 0 :(得分:1)
我不太擅长阅读python代码,但是XOR的渐变列表包含3个元素,对应3个权重。我假设,这些是单个神经元的两个输入和一个偏差。如果为真,则此类网络无法学习XOR(可以学习XOR的最小NN需要两个隐藏神经元和一个输出单元)。现在,看看前馈函数,如果np.dot计算它的名字(即两个向量的点积),而sigmoid是标量,那么这将始终对应于一个神经元的输出,我看不出你的方式可以使用此代码向图层添加更多神经元。
以下建议可用于调试任何新实现的NN:
1)不要从MNIST或XOR开始。完美的实现可能无法学习XOR,因为它很容易陷入局部最小值,你可能花了很多时间寻找不存在的错误。一个好的起点是AND功能,可以用单个神经元学习
2)通过几个例子手动计算结果来检查前向计算通过。这很容易用少量的重量。然后尝试用数值梯度训练它。如果失败,那么您的数值梯度是错误的(手动检查)或训练程序是错误的。 (如果你设置了太大的学习率,它可能无法工作,但是否则训练必须收敛,因为错误表面是凸的)。
3)一旦你可以用数值梯度训练它,调试你的分析梯度(检查每个神经元的梯度,然后梯度为单个权重)。这也可以手动计算并与你看到的相比。
4)完成步骤3后,如果一切正常,则添加一个隐藏层,并使用AND功能重复步骤2和3。
5)在使用AND之后,您可以转到XOR函数和其他更复杂的任务。
这个过程可能看起来很耗时,但它几乎可以导致最终使用NN