我正在尝试使用tensorflow.keras训练ConvLSTM模型,但是却出现了浮点异常(内核被丢弃)
系统信息
为重新显示崩溃,我使用mnist并将其格式化为数据集的相同结构。
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import ConvLSTM2D, BatchNormalization, Dense, Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import utils, regularizers
from tensorflow.keras.callbacks import TensorBoard, LearningRateScheduler
from tensorflow.keras import backend as K
from tensorflow.keras.datasets import mnist
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
K.set_session(session) # set this TensorFlow session as the default session for Keras
window_len = 50
# load capg dba data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28, 1)
x_train = x_train.repeat(window_len, axis=1)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28, 1)
x_test = x_test.repeat(window_len, axis=1)
y_train = utils.to_categorical(y_train, 10)
y_test = utils.to_categorical(y_test, 10)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
model = Sequential()
model.add(ConvLSTM2D(filters=32, kernel_size=(3,3), input_shape=(window_len, 28, 28, 1), padding='same',
activation='relu', activity_regularizer=regularizers.l1(l=0.01), name='convlstm_1'))
model.add(Flatten())
# model.add(BatchNormalization(momentum=0.9, name='bn_1'))
model.add(Dense(10, activation='softmax'))
sgd = SGD(clipnorm=1, clipvalue=0.5, momentum=0.9)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
def learning_rate_tuner(epoch):
lr = 0.01
if 20 <= epoch < 40:
lr = 0.001
elif epoch >= 40:
lr = 0.0001
return lr
lr_scheduler = LearningRateScheduler(learning_rate_tuner)
history = model.fit(x_train, y_train, batch_size=32, epochs=200, validation_data=(x_test, y_test),
callbacks=[lr_scheduler])
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
因为没有针对python的回溯,所以我使用gdb获取回溯,
Thread 47 "python" received signal SIGFPE, Arithmetic exception.
[Switching to Thread 0x7fe613fff700 (LWP 2125)]
0x00007fe7668a190b in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
(gdb) bt
#0 0x00007fe7668a190b in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#1 0x00007fe7668a1dc2 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#2 0x00007fe766a38f7e in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#3 0x00007fe7668910ab in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#4 0x00007fe766891512 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#5 0x00007fe7669e35b6 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#6 0x00007fe7667a33eb in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#7 0x00007fe7667a3668 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#8 0x00007fe7667a36ae in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#9 0x00007fe766912c80 in cuLaunchKernel () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#10 0x00007fe79e565dc2 in cudart::cudaApiLaunchKernelCommon(void const*, dim3, dim3, void**, unsigned long, CUstream_st*, bool) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so
#11 0x00007fe79e565fb7 in cudart::cudaApiLaunchKernel(void const*, dim3, dim3, void**, unsigned long, CUstream_st*) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so
#12 0x00007fe79e59a41b in cudaLaunchKernel () from /usr/local/lib/python2.7/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so
#13 0x00007fe79dacd013 in tensorflow::Status tensorflow::CudaLaunchKernel<int, float const*, tensorflow::functor::Dimension<3>, float*, int, float const*, tensorflow::functor::Dimension<3>, float*>(void (*)(int, float const*, tensorflow::functor::Dimension<3>, float*), dim3, dim3, unsigned long, CUstream_st*, int, float const*, tensorflow::functor::Dimension<3>, float*) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so
#14 0x00007fe79dada19f in tensorflow::functor::ReverseTransformFilter<Eigen::GpuDevice, float, 4>::operator()(Eigen::GpuDevice const&, Eigen::TensorMap<Eigen::Tensor<float const, 4, 1, long>, 16, Eigen::MakePointer>, Eigen::TensorMap<Eigen::Tensor<float, 4, 1, long>, 16, Eigen::MakePointer>) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so
#15 0x00007fe79d60e2da in tensorflow::LaunchConv2DBackpropFilterOp<Eigen::GpuDevice, float>::operator()(tensorflow::OpKernelContext*, bool, bool, tensorflow::Tensor const&, tensorflow::Tensor const&, int, int, int, int, tensorflow::Padding const&, std::vector<long long, std::allocator<long long> > const&, tensorflow::Tensor*, tensorflow::TensorFormat) () from /usr/local/lib/python2.7/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so
#16 0x00007fe79d60f38c in tensorflow::Conv2DSlowBackpropFilterOp<Eigen::GpuDevice, float>::Compute(tensorflow::OpKernelContext*) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so
#17 0x00007fe79594a96b in tensorflow::BaseGPUDevice::ComputeHelper(tensorflow::OpKernel*, tensorflow::OpKernelContext*) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/../libtensorflow_framework.so
#18 0x00007fe79594b732 in tensorflow::BaseGPUDevice::Compute(tensorflow::OpKernel*, tensorflow::OpKernelContext*) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/../libtensorflow_framework.so
#19 0x00007fe7959a39b1 in tensorflow::(anonymous namespace)::ExecutorState::Process(tensorflow::(anonymous namespace)::ExecutorState::TaggedNode, long long) () from /usr/local/lib/python2.7/dist-packages/tensorflow/python/../libtensorflow_framework.so
#20 0x00007fe7959a3bfa in std::_Function_handler<void (), tensorflow::(anonymous namespace)::ExecutorState::ScheduleReady(absl::InlinedVector<tensorflow::(anonymous namespace)::ExecutorState::TaggedNode, 8ul, std::allocator<tensorflow::(anonymous namespace)::ExecutorState::TaggedNode> > const&, tensorflow::(anonymous namespace)::ExecutorState::TaggedNodeReadyQueue*)::{lambda()#1}>::_M_invoke(std::_Any_data const&) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/../libtensorflow_framework.so
#21 0x00007fe795a2fc46 in Eigen::ThreadPoolTempl<tensorflow::thread::EigenEnvironment>::WorkerLoop(int) ()
from /usr/local/lib/python2.7/dist-packages/tensorflow/python/../libtensorflow_framework.so
#22 0x00007fe795a2eb04 in std::_Function_handler<void (), tensorflow::thread::EigenEnvironment::CreateThread(std::function<void ()>)::{lambda()#1}>::_M_invoke(std::_Any_data const&) () from /usr/local/lib/python2.7/dist-packages/tensorflow/python/../libtensorflow_framework.so
#23 0x00007fe7d2b6dc80 in ?? () from /usr/lib/x86_64-linux-gnu/libstdc++.so.6
#24 0x00007fe7e55446ba in start_thread (arg=0x7fe613fff700) at pthread_create.c:333
#25 0x00007fe7e527a41d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109
一开始我以为这是一个爆炸性的梯度问题,所以我添加了clipnorm
和clipvalue
,但仍然崩溃了。然后,我尝试了较短的序列,例如20,发生了坠毁,训练了更长的时间,但是经过几个时期后仍然坠毁。我在nvidia-docker中尝试了其他Tensorflow版本1.13.1,并遇到了相同的问题。是我的模型设置问题还是我的代码错误?
谢谢
答案 0 :(得分:0)
解决它,问题在于nvidia驱动程序。我重新安装驱动程序410并解决了问题。现在我正在使用tensorflow 1.13.1 gpu版本,cuda 10.0,cudnn 7.4。