我是深度学习的新手,并试图制作一个程序来识别手写数字。为此,我下载了MNIST数据集,但我注意到训练数据集缺少一些标签,因此,我无法训练我的模型。谁能帮助我,告诉我如何解决这个问题?
或者任何人都可以与我分享训练模型标签zip文件,以便我可以训练我的模型吗?
Size of training images = 60000 Size of training labels = 59992
错误:
number of rows in x (60000) does not match length of y (59992)
Apply node that caused the error: CrossentropySoftmaxArgmax1HotWithBias(Dot22.0, b, targets)
Toposort index: 33
Inputs types: [TensorType(float64, matrix), TensorType(float64, vector), TensorType(int32, vector)]
Inputs shapes: [(60000, 10), (10,), (59992,)]
Inputs strides: [(80, 8), (8,), (4,)]
Inputs values: ['not shown', 'not shown', 'not shown']
Outputs clients: [[Sum{acc_dtype=float64}(CrossentropySoftmaxArgmax1HotWithBias.0)], [CrossentropySoftmax1HotWithBiasDx(Elemwise{Inv}[(0, 0)].0, CrossentropySoftmaxArgmax1HotWithBias.1, targets)], []]
HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debug print and storage map footprint of this apply node.
数据集链接-http://yann.lecun.com/exdb/mnist/
我的代码提取数据:
def load_dataset():
def download(filename,source="http://yann.lecun.com/exdb/mnist/"):
print("Downloading",filename)
import urllib.request
urllib.request.urlretrieve(source+filename,filename)
def load_mnist_image(filename):
if not os.path.exists(filename):
download(filename)
import gzip
with gzip.open(filename,'rb') as f:
data=np.frombuffer(f.read(),np.uint8,offset=16)
data=data.reshape(-1,1,28,28)
return data/np.float32(256)
def load_mnist_label(filename):
if not os.path.exists(filename):
download(filename)
import gzip
with gzip.open(filename,'rb') as f:
data=np.frombuffer(f.read(),np.uint8,offset=16)
return data
x_train=load_mnist_image('train-images-idx3-ubyte.gz')
y_train=load_mnist_label('train-labels-idx1-ubyte.gz')
x_test=load_mnist_image('t10k-images-idx3-ubyte.gz')
y_test=load_mnist_label('t10k-labels-idx1-ubyte.gz')
return x_train,y_train,x_test,y_test
长度检查:
x_train,y_train,x_test,y_test=load_dataset()
print('X - '+str(len(x_train)))
print('Y - '+str(len(y_train)))
输出:
X-60000 是-59992
这是我的完整代码:
import matplotlib.pyplot as plt
%matplotlib inline
for i in range(1,x_train.size):
plt.show(plt.imshow(x_train[i][0]))
print(y_train)
import theano
import lasagne as lse
import theano.tensor as T
def build_nn(input_var=None):
l_input=lse.layers.InputLayer(shape=(None,1,28,28),input_var=input_var)
ldrop=lse.layers.DropoutLayer(l_input,p=0.2)
l_hid1=lse.layers.DenseLayer(ldrop,num_units=800,
nonlinearity=lse.nonlinearities.rectify,
W=lse.init.GlorotUniform())
l_hid1_drop=lse.layers.DropoutLayer(l_hid1,p=0.5)
l_hid2=lse.layers.DenseLayer(l_hid1_drop,num_units=800,
nonlinearity=lse.nonlinearities.rectify,
W=lse.init.GlorotUniform())
l_hid2_drop=lse.layers.DropoutLayer(l_hid2,p=0.5)
l_output=lse.layers.DenseLayer(l_hid2_drop,num_units=10,nonlinearity=lse.nonlinearities.softmax)
return l_output
input_var=T.tensor4('inputs')
target_var=T.ivector('targets')
network=build_nn(input_var)
prediction=lse.layers.get_output(network)
loss=lse.objectives.categorical_crossentropy(prediction,target_var)
loss=loss.mean()
params=lse.layers.get_all_params(network,trainable=True)
update=lse.updates.nesterov_momentum(loss,params,learning_rate=1,momentum=0.9)
tain_fn=theano.function([input_var,target_var],loss,updates=update)
num_training_step=1000
for steps in range(num_training_step):
train_err=tain_fn(x_train,y_train)
print('Step '+str(steps))
完全错误:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
902 outputs =\
--> 903 self.fn() if output_subset is None else\
904 self.fn(output_subset=output_subset)
ValueError: number of rows in x (60000) does not match length of y (59992)
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-38-2827076f729d> in <module>
2
3 for steps in range(num_training_step):
----> 4 train_err=tain_fn(x_train,y_train)
5 print('Step '+str(steps))
~\Anaconda3\lib\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
915 node=self.fn.nodes[self.fn.position_of_error],
916 thunk=thunk,
--> 917 storage_map=getattr(self.fn, 'storage_map', None))
918 else:
919 # old-style linkers raise their own exceptions
~\Anaconda3\lib\site-packages\theano\gof\link.py in raise_with_op(node, thunk, exc_info, storage_map)
323 # extra long error message in that case.
324 pass
--> 325 reraise(exc_type, exc_value, exc_trace)
326
327
~\Anaconda3\lib\site-packages\six.py in reraise(tp, value, tb)
690 value = tp()
691 if value.__traceback__ is not tb:
--> 692 raise value.with_traceback(tb)
693 raise value
694 finally:
~\Anaconda3\lib\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
901 try:
902 outputs =\
--> 903 self.fn() if output_subset is None else\
904 self.fn(output_subset=output_subset)
905 except Exception:
ValueError: number of rows in x (60000) does not match length of y (59992)
Apply node that caused the error: CrossentropySoftmaxArgmax1HotWithBias(Dot22.0, b, targets)
Toposort index: 33
Inputs types: [TensorType(float64, matrix), TensorType(float64, vector), TensorType(int32, vector)]
Inputs shapes: [(60000, 10), (10,), (59992,)]
Inputs strides: [(80, 8), (8,), (4,)]
Inputs values: ['not shown', 'not shown', 'not shown']
Outputs clients: [[Sum{acc_dtype=float64}(CrossentropySoftmaxArgmax1HotWithBias.0)], [CrossentropySoftmax1HotWithBiasDx(Elemwise{Inv}[(0, 0)].0, CrossentropySoftmaxArgmax1HotWithBias.1, targets)], []]
HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
答案 0 :(得分:1)
我可以看到您已手动下载并加载了数据,不建议这样做。我认为您应该考虑使用一种相当稳定的方法来下载和加载数据。根据官方文档here,您可以使用以下简单方法执行所需操作:
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
注意: 如果本地没有索引文件(位于'〜/ .keras / datasets /'),则该索引文件将下载到该位置。