Tensorflow 非常高的损失值

时间:2021-07-26 07:02:57

标签: python tensorflow machine-learning keras deep-learning

import tensorflow as tf
import matplotlib.pyplot as plt
import os
import numpy as np
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, DenseFeatures
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
import tensorflow.keras.optimizers as op
from tensorflow.keras.models import load_model

train = ImageDataGenerator(rescale=1. / 255,rotation_range=20,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    vertical_flip=True,
    fill_mode='nearest')
validation = ImageDataGenerator(rescale=1. / 255)
test = ImageDataGenerator(rescale=1. / 255)
train_dataset = train.flow_from_directory('raw-img/training', batch_size=1,
                                          class_mode='categorical', subset="training")

validation_dataset = validation.flow_from_directory('raw-img/validation', target_size=(224, 224), batch_size=1,
                                                    class_mode='categorical')
testing_dataset = test.flow_from_directory('raw-img/testing', target_size=(224, 224), batch_size=1,
                                           class_mode='categorical')


model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(32, (3,3), input_shape=(224, 224, 3), padding='same'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.MaxPool2D(2, 2, padding='same'),
    tf.keras.layers.Dropout(0.01),

    #
    tf.keras.layers.Conv2D(32, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.Dropout(0.01),
    tf.keras.layers.Conv2D(32, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.MaxPool2D(2, 2, padding='same'),
    tf.keras.layers.Dropout(0.01),

    #
    tf.keras.layers.Conv2D(64, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.Dropout(0.01),
    tf.keras.layers.Conv2D(64, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.MaxPool2D(2, 2, padding='same'),
    tf.keras.layers.Dropout(0.01),
    #
    tf.keras.layers.Conv2D(128, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.Dropout(0.01),
    tf.keras.layers.Conv2D(128, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.MaxPool2D(2, 2, padding='same'),
    tf.keras.layers.Dropout(0.01),

    #
    tf.keras.layers.Conv2D(128, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.Dropout(0.01),
    tf.keras.layers.Conv2D(128, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.MaxPool2D(2, 2, padding='same'),
    tf.keras.layers.Dropout(0.01),

    #

    tf.keras.layers.Conv2D(256, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.Dropout(0.01),
    tf.keras.layers.Conv2D(256, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.MaxPool2D(2, 2, padding='same'),
    tf.keras.layers.Dropout(0.01),
    #
    tf.keras.layers.Conv2D(512, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.Dropout(0.01),
    tf.keras.layers.Conv2D(512, (3,3), padding='same'),

    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.MaxPool2D(2, 2, padding='same'),
    tf.keras.layers.Dropout(0.01),

    #
    tf.keras.layers.Flatten(),
    #
    tf.keras.layers.Dense(1024),
    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.Dropout(0.01),

    tf.keras.layers.Dense(512),
    tf.keras.layers.LeakyReLU(alpha=0.3),
    tf.keras.layers.Dropout(0.01),

    tf.keras.layers.Dense(10, activation='softmax'),

])
# print the summary of the model architecture
model.summary()

rlronp = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=.8, patience=3, verbose=1, mode='min')
es = tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=15, verbose=1,
                                      restore_best_weights=True)
model.compile(loss='categorical_crossentropy', optimizer=op.Adam(learning_rate=0.001), metrics=['accuracy'])
model_fit = model.fit(train_dataset, epochs=100, batch_size=1, validation_data=validation_dataset,
                      callbacks=[rlronp, es])

5865/26179 [======>.........] - 预计到达时间:12:23 - 损失:34992108.0000 - 准确度: 0.1589

您好,我有带 adam 优化器的训练模型,但损失率非常高,我尝试将优化器更改为 SDG,但预测结果不正确。

如何更改代码以获得更好的结果和更低的损失值。

损失值 34992108.0000

0 个答案:

没有答案
相关问题