我是python的新手,这是(不幸的是)我第一次在其中编写了相对较长的代码段。我一直在学习编写神经网络的教程,但遇到了一条我似乎无法解决的错误信息。我搜索了堆栈溢出“IndexError:list index out of range”,并且理解尝试访问只有n-1个元素的列表的第n个元素可能是错误的。但是,我无法确定哪个列表不正确以及如何修复。任何帮助将不胜感激,并且运行脚本的任何方式都可以为我提供有关哪个列表超出索引的更多信息将非常有用。以下是错误消息和代码...
我一直收到错误信息,
(2, 2, 1)
[array([[ 0.09438987, 0.08228006, -0.00851927],
[-0.09384243, -0.07417094, 0.1341281 ]]), array([[-0.20913607, 0.02783653, -0.07682221]])]
Traceback (most recent call last):
File "BackProp.py", line 126, in <module>
err = bpn.TrainEpoch(lvInput, lvTarget)
File "BackProp.py", line 93, in TrainEpoch
weightDelta = np.sum(layerOutput[None,:,:].transpose(2, 0, 1) * delta[delta_index][None,:,:].transpose(2, 1, 0), axis = 0)
IndexError: list index out of range
......和代码......
import numpy as np$
$
class BackPropagationNetwork:$
"""A back-propagation network"""$
$
#$
# Class Members$
#$
layerCount = 0$
shape = None$
weights = []$
$
#$
# Class Methods$
#$
def __init__(self, layerSize):$
"""Initialize the network"""$
$
# Layer info$
self.layerCount = len(layerSize) - 1$
self.shape = layerSize$
$
#Input/Output data from last Run$
self._layerInput = []$
self._layerOutput = []$
$
# Create the weight arrays$
for (l1, l2) in zip(layerSize[:-1], layerSize[1:]):$
self.weights.append(np.random.normal(scale=0.1, size = (l2, l1+1)))$
$
#$
# Run Method$
#$
def Run(self, input):$
"""Run the network based on the input data"""$
$
lnCases = input.shape[0]$
$
#Clear out the previous intermediate value lists$
self._layerInput = []$
self._layerOutput = []$
$
# Run it$
#$
def Run(self, input):$
"""Run the network based on the input data"""$
$
lnCases = input.shape[0]$
$
#Clear out the previous intermediate value lists$
self._layerInput = []$
self._layerOutput = []$
$
# Run it$
for index in range(self.layerCount):$
# Determine layer input$
if index == 0:$
layerInput = self.weights[0].dot(np.vstack([input.T, np.ones([1, lnCases])]))$
else:$
layerInput = self.weights[index].dot(np.vstack([self._layerOutput[-1], np.ones([1, lnCases])]))$
$
self._layerInput.append(layerInput)$
self._layerOutput.append(self.sgm(layerInput))$
$
return self._layerOutput[-1].T$
$
#$
# TrainEpoch method$
#$
def TrainEpoch(self, input, target, trainingRate = 0.2):$
"""This method trains the network for one epoch"""$
$
delta = []$
lnCases = input.shape[0]$
$
# First run the network$
self.Run(input)$
$
# Calculate our deltas$
for index in reversed(range(self.layerCount)):$
if index == self.layerCount - 1:$
# Compare to the target values$
output_delta = self._layerOutput[index] - target.T$
error = np.sum(output_delta**2)$
delta.append(output_delta * self.sgm(self._layerInput[index], True))$
else:$
# compare to the following layer's delta$
delta_pullback = self.weights[index + 1].T.dot(delta[-1])$
delta.append(delta_pullback[:-1, :] * self.sgm(self._layerInput[index], True))$
# Compute weight deltas$
for index in range(self.layerCount):$
delta_index = self.layerCount - 1 - index$
$
if index == 0:$
layerOutput = np.vstack([input.T, np.ones([1, lnCases])])$
else:$
layerOutput = np.vstack([self._layerOutput[index - 1], np.ones([1, self._layerOutput[index - 1].shape[1]])])$
$
weightDelta = np.sum(layerOutput[None,:,:].transpose(2, 0, 1) * delta[delta_index][None,:,:].transpose(2, 1, 0), axis = 0)$
self.weights[index] -= trainingRate * weightDelta$
$
return error$
$
$
$
$
$
$
# Transfer Functions$
def sgm(self, x, Derivative=False):$
if not Derivative:$
return 1/(1+np.exp(-x))$
else:$
out = self.sgm(x)$
return out*(1-out)$
$
$
#$
# If run as a script, create a test object$
#$
if __name__ == "__main__":$
bpn = BackPropagationNetwork((2,2,1))$
print(bpn.shape)$
print(bpn.weights)$
$
lvInput = np.array([[0, 0], [1, 1], [0,1], [1,0]])$
lvTarget = np.array([[0.05], [0.05], [0.95], [0.95]])$
$
lnMax = 100000$
lnErr = 1e-5$
for i in range(lnMax-1):$
err = bpn.TrainEpoch(lvInput, lvTarget)$
if i % 2500 == 0:$
print("Iteration {0}\tError: {1:0.6f}".format(i, err))$
if err <= lnErr:$
print("Minimum error reached at iteration {0}".format(i))$
break$
$
# Display output$
lvOutput = bpn.Run(lvInput)$
print("Input: {0}\nOutput: {1}".format(lvInput, lvOutput))$
$
答案 0 :(得分:0)
看起来TrainEpoch
中你的第二个for循环缩进太多了,这就是造成IndexError
的原因。换句话说,该行的内容为:
for index in range(self.layerCount):
应该与其他for循环处于相同的缩进级别:
for index in reversed(range(self.layerCount)):