调试python runtimewarning:在reduce中遇到无效的值

时间:2019-02-01 16:11:40

标签: python-3.x keras conv-neural-network

我正在训练卷积神经网络,偶尔在训练过程中会收到运行时警告,纪元终止。错误是

     5/83 [>.............................] - ETA: 1:52 - loss: 0.1680
C:\---\Anaconda3\envs\simulator\lib\site-package
s\numpy\core\_methods.py:75: RuntimeWarning: invalid value encountered in reduce
  ret = umr_sum(arr, axis, dtype, out, keepdims)

83/83 [============================>.] - ETA: 2s - loss: 0.1643

我不确定如何调试它,因为我真的不知道从何处调用reduce。

这是我的消息来源,图像增强步骤已删除,因为它不允许我发布完整的消息来源:

import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import keras
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Convolution2D, MaxPooling2D, Dropout, Flatten, Dense
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from imgaug import augmenters as iaa
import cv2
import pandas as pd
import ntpath
import random
import tensorboard
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import time
from keras.layers.normalization import BatchNormalization
import sys
import getopt

np.random.seed(0)

 verbose = True

datadir = 'D:\\BehavioralCloning\\GitHub\\TrainingData'
columns = ['center', 'left', 'right', 'steering', 'steering_orig', 'throttle', 'reverse', 'speed']
data = pd.read_csv(os.path.join(datadir, 'trainingData.csv'), names = columns)
pd.set_option('display.max_colwidth', -1)
data.head()

b_size = 1000

def path_leaf(path):
  head, tail = ntpath.split(path)
  return tail

data['center']  =data['center'].apply(path_leaf)
data['left']  = data['left'].apply(path_leaf)
data['right'] = data['right'].apply(path_leaf)
data.head()

num_bins = 25
samples_per_bin = 12000
hist, bins = np.histogram(data['steering'], num_bins)
center = (bins[:-1] + bins[1:]) * 0.5

if img_verbose is True:
    plt.bar(center, hist, width = 0.05)
    plt.plot((np.min(data['steering']), np.max(data['steering'])), (samples_per_bin, samples_per_bin))
    plt.show()
print('total data:', len(data) * 3)
remove_list = []
for j in range(num_bins):
  list_ = []
  for i in range(len(data['steering'])):
    if data['steering'][i] >= bins[j] and data['steering'][i] <= bins[j+1]:
      list_.append(i)
  list_ = shuffle(list_)
  list_ = list_[samples_per_bin:]
  remove_list.extend(list_)

print('removed:', len(remove_list)*3)
data.drop(data.index[remove_list], inplace=True)
print('remaining:', len(data) *3)

hist, _ = np.histogram(data['steering'], (num_bins))

if img_verbose is True:
    plt.bar(center, hist, width=0.05)
    plt.plot((np.min(data['steering']), np.max(data['steering'])), (samples_per_bin, samples_per_bin))
    plt.show()

print(data.iloc[1])

def load_img_steering(datadir, df):
  image_path = []
  steering = []

  for i in range(len(data)):
    indexed_data = data.iloc[i]
    center, left, right = indexed_data[0], indexed_data[1], indexed_data[2]
    image_path.append(os.path.join(datadir, center.strip() + '.png'))
    steering.append(float(indexed_data[3]))
    # left image append
    image_path.append(os.path.join(datadir,left.strip() + '.png'))
    steering.append(float(indexed_data[3])+0.15)
    # right image append
    image_path.append(os.path.join(datadir,right.strip() + '.png'))
    steering.append(float(indexed_data[3])-0.15)
  image_paths = np.asarray(image_path)
  steerings = np.asarray(steering)
  return image_paths, steerings

image_paths, steerings = load_img_steering(datadir + '/images', data)
X_train, X_valid, y_train, y_valid = train_test_split(image_paths, steerings, test_size=0.2, random_state=6)

print('Training Samples: {}\nValid Samples: {}'.format(len(X_train), len(X_valid)))

if img_verbose is True:
    fig, axes = plt.subplots(1, 2, figsize=(12, 4))
    axes[0].hist(y_train, bins=num_bins, width=0.05, color='blue')
    axes[0].set_title('Training set')
    axes[1].hist(y_valid, bins=num_bins, width=0.05, color='red')
    axes[1].set_title('Validation set')
    plt.show()


def normalize(image):
    return image / np.mean(image)

if img_verbose is True: 
    original_image = mpimg.imread(image)
    normal_image = normalize(original_image)

    fig, axs = plt.subplots(1, 2, figsize=(15, 10))
    fig.tight_layout()

    axs[0].imshow(original_image)
    axs[0].set_title('Original Image')

    axs[1].imshow(normal_image)
    axs[1].set_title('Normal Image')
    plt.show()      

def random_augment(image, steering_angle):
    image = mpimg.imread(image)
    image = normalize(image)
    if np.random.rand() < 0.5:
      image = pan(image)
    if np.random.rand() < 0.5:
      image = zoom(image)
    if np.random.rand() < 0.5:
      image = img_random_brightness(image)
    if np.random.rand() < 0.5:
      image, steering_angle = img_random_flip(image, steering_angle)
    if np.random.rand() < 0.5:
      image = add_shadow(image)
    if np.random.rand() < 0.5:
      image = add_bright_spot(image)
    return image, steering_angle

if img_verbose is True:
    ncol = 2
    nrow = 10

    fig, axs = plt.subplots(nrow, ncol, figsize=(15, 50))
    fig.tight_layout()

    for i in range(10):
      randnum = random.randint(0, len(image_paths) - 1)
      random_image = image_paths[randnum]
      random_steering = steerings[randnum]

      original_image = mpimg.imread(random_image)
      augmented_image, steering = random_augment(random_image, random_steering)

      axs[i][0].imshow(original_image)
      axs[i][0].set_title("Original Image")

      axs[i][1].imshow(augmented_image)
      axs[i][1].set_title("Augmented Image")
    plt.show()


def img_preprocess(img):
    img = normalize(img)
    img = img[60:135,:,:]
    img = cv2.cvtColor(np.float32(img), cv2.COLOR_RGB2YUV)
    img = cv2.GaussianBlur(img,  (3, 3), 0)
    img = cv2.resize(img, (200, 66))
    return img

if img_verbose is True:
    image = image_paths[100]
    original_image = mpimg.imread(image)
    preprocessed_image = img_preprocess(original_image)

    fig, axs = plt.subplots(1, 2, figsize=(15, 10))
    fig.tight_layout()
    axs[0].imshow(original_image)
    axs[0].set_title('Original Image')
    axs[1].imshow(preprocessed_image)
    axs[1].set_title('Preprocessed Image')
    plt.show()

def batch_generator(image_paths, steering_ang, batch_size, istraining):

  while True:
    batch_img = []
    batch_steering = []

    for i in range(batch_size):
      random_index = random.randint(0, len(image_paths) - 1)

      if istraining:
        im, steering = random_augment(image_paths[random_index], steering_ang[random_index])

      else:
        im = mpimg.imread(image_paths[random_index])
        steering = steering_ang[random_index]
      im = img_preprocess(im)
      batch_img.append(im)
      batch_steering.append(steering)
    yield (np.asarray(batch_img), np.asarray(batch_steering))

x_train_gen, y_train_gen = next(batch_generator(X_train, y_train, 1, 1))
x_valid_gen, y_valid_gen = next(batch_generator(X_valid, y_valid, 1, 0))

if img_verbose is True:
    fig, axs = plt.subplots(1, 2, figsize=(15, 10))
    fig.tight_layout()

    axs[0].imshow(x_train_gen[0])
    axs[0].set_title('Training Image')

    axs[1].imshow(x_valid_gen[0])
    axs[1].set_title('Validation Image')
    plt.show()

def nvidia_model():
    model = Sequential()
    model.add(Convolution2D(24, (5, 5), strides=(2, 2), input_shape=(66, 200, 3), activation='elu'))
    model.add(BatchNormalization())

    model.add(Convolution2D(36, (5, 5), strides=(2, 2), activation='elu'))
    model.add(BatchNormalization())

    model.add(Convolution2D(48, (5, 5), strides=(2, 2), activation='elu'))
    model.add(BatchNormalization())
    model.add(Convolution2D(64, (3, 3), activation='elu'))
    model.add(BatchNormalization())

    model.add(Convolution2D(64, (3, 3), activation='elu'))
    model.add(BatchNormalization())
#   model.add(Dropout(0.5))


    model.add(Flatten())

    model.add(Dense(1164, activation = 'elu'))
    model.add(BatchNormalization())
    #   model.add(Dropout(0.5))

    model.add(Dense(200, activation = 'elu'))
    model.add(BatchNormalization())
    #   model.add(Dropout(0.5))

    model.add(Dense(50, activation = 'elu'))
    model.add(BatchNormalization())
    #   model.add(Dropout(0.5))

    model.add(Dense(10, activation = 'elu'))
    model.add(BatchNormalization())
#   model.add(Dropout(0.5))

    model.add(Dense(1))

    optimizer = Adam(lr=0.0001)
    model.compile(loss='mse', optimizer=optimizer)
    return model


callbacks = [EarlyStopping(monitor='val_loss', patience=4, min_delta=0.005),
          ModelCheckpoint(filepath=('best_model'+'.h5'), monitor='val_loss', save_best_only=True),
           ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=1, mode='auto', min_delta=0.01, cooldown=0, min_lr=1e-10)]
model = nvidia_model()
print(model.summary())

history = model.fit_generator(batch_generator(X_train, y_train, b_size, 1),
                                  steps_per_epoch = (len(X_train) / b_size), 
                                  epochs=5000,
                                  callbacks=callbacks,
                                  validation_data=batch_generator(X_valid, y_valid, b_size, 0),
                                  validation_steps= (len(X_valid) / b_size),
                                  verbose=1,
                                  shuffle = 1)

plt.figure()
loss = plt.plot(history.history['loss'])
val_loss = plt.plot(history.history['val_loss'])
plt.xlabel('Epochs')
plt.ylabel('loss')

plt.legend(['loss', 'validation loss'])

plt.show()

0 个答案:

没有答案