model.trainable = False和layer.trainable = False为每一层提供非常不同的性能

时间:2019-06-07 13:20:14

标签: tensorflow keras deep-learning tf.keras

for layer in base_model_seq.layers:  layer.trainable=Falsebase_model_seq.trainable=False 给出了截然不同的结果。据我了解,两种都是实现相同行为的方法

数据管道:这两种情况都完全相同。提供代码以供参考

import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
img_height = 224
img_width = 224
from random_eraser import get_random_eraser
IMAGE_DIR = "./data/images"
traincsv = pd.read_csv('./data/train.csv')
testcsv = pd.read_csv('./data/test_ApKoW4T.csv')
traincsv['category'] = traincsv['category'].astype(str)
train_batch_size=32
val_batch_size = 32
test_batch_size = 32
seed = 43
traincsv = pd.read_csv('./data/train.csv')
testcsv = pd.read_csv('./data/test_ApKoW4T.csv')
traincsv['category'] = traincsv['category'].astype(str)
df_train,df_val = train_test_split(traincsv, stratify = traincsv.category, random_state=42, test_size=.1, shuffle=True)
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rotation_range=10,width_shift_range=0.3, height_shift_range=0.3, 
                                                            rescale=1./255, shear_range=0.0,zoom_range=0.3, horizontal_flip=True, fill_mode='nearest',
                                                            preprocessing_function=get_random_eraser(pixel_level=True,s_h=0.4))

val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_dataframe(df_train,directory = IMAGE_DIR,x_col='image',y_col='category',
           class_mode="categorical", target_size=(img_height, img_width), batch_size=train_batch_size,shuffle=True)

validation_generator = val_datagen.flow_from_dataframe(df_val,directory=IMAGE_DIR,x_col='image',y_col='category',
           class_mode="categorical", target_size=(img_height, img_width),batch_size=val_batch_size,shuffle=True)

steps_per_epoch = train_generator.n // 32
validation_steps =  validation_generator.n //32

现在在###### ...之后的部分是我在运行两个实验中唯一改变的部分,但结果却截然不同

import tensorflow as tf

from __future__ import absolute_import, division, print_function

import os

import tensorflow as tf
from tensorflow import keras
print("TensorFlow version is ", tf.__version__)

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from tensorflow.keras.applications import MobileNet, InceptionResNetV2
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
from tensorflow.python.keras.applications import densenet_rootexposed
import tensorflow as tf

IMAGE_DIR = "./data/images"
img_height =224
img_width = 224
IMG_SHAPE = (img_height,img_width, 3)
def step_decay(epoch, lr):
    print(lr)
    drop = 0.96
    return lr*drop
lrate = tf.keras.callbacks.LearningRateScheduler(step_decay)
callbacks = [lrate]
base_model_seq = keras.applications.densenet.DenseNet201(input_shape=IMG_SHAPE,
                                              include_top=False, 
                                                weights='imagenet')
#######################################This is the only part i change while training the models
base_model_seq.trainable=False
# for layer in base_model_seq.layers:
#     layer.trainable=False
#######################################
model_seq = tf.keras.Sequential([
  base_model_seq,
  keras.layers.Flatten(),
  keras.layers.Dropout(0.5),
  keras.layers.Dense(2048, activation='relu'),
  keras.layers.Dropout(0.5),
  keras.layers.BatchNormalization(),
  keras.layers.Dense(5, activation='softmax')
])


for layer in base_model_seq.layers:
    layer._name = layer._name + str("_1729")
print(len(model_seq.trainable_variables))


model_seq.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001), 
              loss='categorical_crossentropy', 
              metrics=['accuracy'])

print('************************')
print(model_seq.summary())
epochs = 100

当我通过用base_model_seq.trainable=False冻结模型来运行模型时,模型以0.8 val精度开始,并且在前20个时期内上升到0.91,但是当我通过冻结模型运行时 for layer in base_model_seq.layers: layer.trainable=False 验证准确性从0.72开始,在20个时期内达到0.82,然后停滞在那里。我已经重复了足够多次的实验,以确保不能将其归因于偶然性初始化。请指出我描述的方法不一致的原因

0 个答案:

没有答案
相关问题