启用急切执行时不支持Keras优化器

时间:2020-06-04 17:23:23

标签: tensorflow keras generative-adversarial-network

我正在尝试生成mnist数据集图像。这是我的代码:

fns.py:

import math
import numpy as np

def combine_images(generated_images):
    total,width,height = generated_images.shape[:-1]
    cols = int(math.sqrt(total))
    rows = math.ceil(float(total)/cols)
    combined_image = np.zeros((height*rows, width*cols),
                              dtype=generated_images.dtype)

    for index, image in enumerate(generated_images):
        i = int(index/cols)
        j = index % cols
        combined_image[width*i:width*(i+1), height*j:height*(j+1)] = image[:, :, 0]
    return combined_image

def show_progress(epoch, batch, g_loss, d_loss, g_acc, d_acc):
    msg = "epoch: {}, batch: {}, g_loss: {}, d_loss: {}, g_accuracy: {}, d_accuracy: {}"
    print(msg.format(epoch, batch, g_loss, d_loss, g_acc, d_acc))

main.py:

from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Activation, Reshape
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.layers import UpSampling2D, Conv2D
from tensorflow.python.keras.layers import ELU
from tensorflow.python.keras.layers import Flatten, Dropout
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.datasets import mnist

import os
from PIL import Image
from fns import *

def generator(input_dimension=100, units=1024, activation_function='relu'):
    model = Sequential()
    model.add(Dense(input_dim=input_dimension, units=units))
    model.add(BatchNormalization())
    model.add(Activation(activation_function))

    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation(activation_function))

    model.add(Reshape((7,7,128), input_shape=(128*7*7,)))
    model.add(UpSampling2D((2,2)))
    model.add(Conv2D(64, (5,5), padding='same'))
    model.add(BatchNormalization())
    model.add(Activation(activation_function))
    model.add(UpSampling2D((2,2)))

    model.add(Conv2D(1, (5,5), padding='same'))
    model.add(Activation('tanh'))

    print(model.summary())
    return model

def discriminator(input_shape=(28,28,1), nb_filter=64):
    model = Sequential()
    model.add(Conv2D(nb_filter, (5,5), strides=(2,2), padding='same', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(ELU())

    model.add(Conv2D(2*nb_filter, (5,5), strides=(2,2)))
    model.add(BatchNormalization())
    model.add(ELU())

    model.add(Flatten())
    model.add(Dense(4*nb_filter))
    model.add(BatchNormalization())
    model.add(ELU())
    model.add(Dropout(0.5))

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    print(model.summary())
    return model


batch_size = 32
num_epoch = 50
learning_rate = 0.0002

image_path = 'images/'
if not os.path.exists(image_path):
    os.mkdir(image_path)

def train():
    (x_train, y_train), (_, _) = mnist.load_data()
    x_train = (x_train.astype(np.float32) - 127.5) / 127.5
    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)

    g = generator()
    d = discriminator()

    optimize = Adam(lr=learning_rate, beta_1=0.5)
    d.trainable = True
    d.compile(
        loss='binary_crossentropy',
        metrics=['accuracy'],
        optimizer=optimize)

    d.trainable = False
    dcgan = Sequential([g, d])
    dcgan.compile(
        loss='binary_crossentropy',
        metrics=['accuracy'],
        optimizer=optimize)

    num_batches = x_train.shape[0] // batch_size    #return integer
    gen_img = np.array([np.random.uniform(-1, 1, 100) for _ in range(49)])
    y_d_true = [1] * batch_size
    y_d_gen = [0] * batch_size
    y_g = [1] * batch_size

    for epoch in range(num_epoch):
        for i in range(num_batches):
            x_d_batch = x_train[i*batch_size:(i+1)*batch_size]
            x_g = np.array([np.random.normal(0, 0.5, 100) for _ in range(batch_size)])
            x_d_gen = g.predict(x_g)

            d_loss = d.train_on_batch(x_d_batch, y_d_true)
            d_loss = d.train_on_batch(x_d_gen, y_d_gen)

            g_loss = dcgan.train_on_batch(x_g, y_g)
            show_progress(epoch, i, g_loss[0], d_loss[0], g_loss[1], d_loss[1])

        image = combine_images(g.predict(gen_img))
        image = image * 127.5 + 127*5
        image.fromarray(image.astype(np.uint8)).save(image_path + "%03d.png" % (epoch))

if __name__ == '__main__':
    train()

我运行此脚本时,出现此错误:

Traceback (most recent call last):
  File "e:/Programming/Tensorflow/tensorflow-ile-goruntu-isleme/gans/main.py", line 113, in <module>
    train()
  File "e:/Programming/Tensorflow/tensorflow-ile-goruntu-isleme/gans/main.py", line 81, in train
    optimizer=optimize)
  File "D:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 325, in compile
    self._validate_compile(optimizer, metrics, **kwargs)
  File "D:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1560, in _validate_compile
    '`tf.compat.v1.keras` Optimizer (', optimizer, ') is '
ValueError: ('`tf.compat.v1.keras` Optimizer (', <tensorflow.python.keras.optimizers.Adam object at 0x00000272008C7B48>, ') is not supported when eager execution is enabled. Use a `tf.keras` Optimizer instead, or disable eager execution.')

我搜索了很多页面,但是找不到令人满意的解决方案。

1 个答案:

答案 0 :(得分:1)

默认情况下,tensorflow版本2.x渴望启用执行。

我能够在以下程序的tensorflow version 2.2.0中重现您的错误。在程序中使用from tensorflow.python.keras.optimizers import Adam导入优化器时出现错误-

重现该错误的代码-

%tensorflow_version 2.x
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.optimizers import Adam
#from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K

import os
import numpy as np
import matplotlib.pyplot as plt

(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()

train_images = train_images[:500]
train_labels = train_labels[:500]

test_images = test_images[:50]
test_labels = test_labels[:50]

model = Sequential([
    Conv2D(16, 3, padding='same', activation='relu', input_shape=(32, 32, 3)),
    MaxPooling2D(),
    Conv2D(32, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Flatten(),
    Dense(512, activation='relu'),
    Dense(10)
])

lr = 0.01
adam = Adam(lr)

# Define the Gradient Fucntion
epoch_gradient = []
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

# Define the Required Callback Function
class GradientCalcCallback(tf.keras.callbacks.Callback):
  def on_epoch_end(self, epoch, logs={}):
    with tf.GradientTape() as tape:
       logits = model(train_images, training=True)
       loss = loss_fn(train_labels, logits)    
    grad = tape.gradient(loss, model.trainable_weights)
    model.optimizer.apply_gradients(zip(grad, model.trainable_variables))
    epoch_gradient.append(grad)

gradcalc = GradientCalcCallback()

# Define the Required Callback Function
class printlearningrate(tf.keras.callbacks.Callback):
    def on_epoch_begin(self, epoch, logs={}):
        optimizer = self.model.optimizer
        lr = K.eval(optimizer.lr)
        Epoch_count = epoch + 1
        print('\n', "Epoch:", Epoch_count, ', LR: {:.2f}'.format(lr))

printlr = printlearningrate() 

def scheduler(epoch):
  optimizer = model.optimizer
  return K.eval(optimizer.lr + 0.01)

updatelr = tf.keras.callbacks.LearningRateScheduler(scheduler)

model.compile(optimizer=adam, 
          loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
          metrics=['accuracy'])

epochs = 10 

history = model.fit(train_images, train_labels, epochs=epochs, batch_size=len(train_images), 
                    validation_data=(test_images, test_labels),
                    callbacks = [printlr,updatelr,gradcalc])

# (7) Convert to a 2 dimensiaonal array of (epoch, gradients) type
gradient = np.asarray(epoch_gradient)
print("Total number of epochs run:", epochs)
print("Gradient Array has the shape:",gradient.shape)

输出-

2.2.0
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-37-0f0fef768c1c> in <module>()
     70 model.compile(optimizer=adam, 
     71           loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
---> 72           metrics=['accuracy'])
     73 
     74 epochs = 10

1 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in _validate_compile(self, optimizer, metrics, **kwargs)
   1558         for opt in nest.flatten(optimizer)):
   1559       raise ValueError(
-> 1560           '`tf.compat.v1.keras` Optimizer (', optimizer, ') is '
   1561           'not supported when eager execution is enabled. Use a '
   1562           '`tf.keras` Optimizer instead, or disable eager '

ValueError: ('`tf.compat.v1.keras` Optimizer (', <tensorflow.python.keras.optimizers.Adam object at 0x7fce341a15c0>, ') is not supported when eager execution is enabled. Use a `tf.keras` Optimizer instead, or disable eager execution.')

解决方案-

修改

from tensorflow.python.keras.optimizers import Adam

from tensorflow.keras.optimizers import Adam

注意:也请从tensorflow.keras而非tensorflow.python.keras导入其他库。

固定代码-

%tensorflow_version 2.x
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K

import os
import numpy as np
import matplotlib.pyplot as plt

(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()

train_images = train_images[:500]
train_labels = train_labels[:500]

test_images = test_images[:50]
test_labels = test_labels[:50]

model = Sequential([
    Conv2D(16, 3, padding='same', activation='relu', input_shape=(32, 32, 3)),
    MaxPooling2D(),
    Conv2D(32, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Flatten(),
    Dense(512, activation='relu'),
    Dense(10)
])

lr = 0.01
adam = Adam(lr)

# Define the Gradient Fucntion
epoch_gradient = []
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

# Define the Required Callback Function
class GradientCalcCallback(tf.keras.callbacks.Callback):
  def on_epoch_end(self, epoch, logs={}):
    with tf.GradientTape() as tape:
       logits = model(train_images, training=True)
       loss = loss_fn(train_labels, logits)    
    grad = tape.gradient(loss, model.trainable_weights)
    model.optimizer.apply_gradients(zip(grad, model.trainable_variables))
    epoch_gradient.append(grad)

gradcalc = GradientCalcCallback()

# Define the Required Callback Function
class printlearningrate(tf.keras.callbacks.Callback):
    def on_epoch_begin(self, epoch, logs={}):
        optimizer = self.model.optimizer
        lr = K.eval(optimizer.lr)
        Epoch_count = epoch + 1
        print('\n', "Epoch:", Epoch_count, ', LR: {:.2f}'.format(lr))

printlr = printlearningrate() 

def scheduler(epoch):
  optimizer = model.optimizer
  return K.eval(optimizer.lr + 0.01)

updatelr = tf.keras.callbacks.LearningRateScheduler(scheduler)

model.compile(optimizer=adam, 
          loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
          metrics=['accuracy'])

epochs = 10 

history = model.fit(train_images, train_labels, epochs=epochs, batch_size=len(train_images), 
                    validation_data=(test_images, test_labels),
                    callbacks = [printlr,updatelr,gradcalc])

# (7) Convert to a 2 dimensiaonal array of (epoch, gradients) type
gradient = np.asarray(epoch_gradient)
print("Total number of epochs run:", epochs)
print("Gradient Array has the shape:",gradient.shape)

输出-

2.2.0

 Epoch: 1 , LR: 0.01
Epoch 1/10
1/1 [==============================] - 0s 471ms/step - loss: 71.8890 - accuracy: 0.0740 - val_loss: 3694.5439 - val_accuracy: 0.0800 - lr: 0.0200

 Epoch: 2 , LR: 0.02
Epoch 2/10
1/1 [==============================] - 0s 330ms/step - loss: 113.0054 - accuracy: 0.1060 - val_loss: 172.5451 - val_accuracy: 0.0600 - lr: 0.0300

 Epoch: 3 , LR: 0.03
Epoch 3/10
1/1 [==============================] - 0s 331ms/step - loss: 3.3038 - accuracy: 0.0960 - val_loss: 280.0600 - val_accuracy: 0.1800 - lr: 0.0400

 Epoch: 4 , LR: 0.04
Epoch 4/10
1/1 [==============================] - 0s 339ms/step - loss: 3.2624 - accuracy: 0.0940 - val_loss: 2.3644 - val_accuracy: 0.1800 - lr: 0.0500

 Epoch: 5 , LR: 0.05
Epoch 5/10
1/1 [==============================] - 0s 335ms/step - loss: 2.3810 - accuracy: 0.1120 - val_loss: 2.3599 - val_accuracy: 0.1600 - lr: 0.0600

 Epoch: 6 , LR: 0.06
Epoch 6/10
1/1 [==============================] - 0s 339ms/step - loss: 2.3205 - accuracy: 0.1120 - val_loss: 2.3333 - val_accuracy: 0.0600 - lr: 0.0700

 Epoch: 7 , LR: 0.07
Epoch 7/10
1/1 [==============================] - 0s 337ms/step - loss: 2.3178 - accuracy: 0.1300 - val_loss: 2.3435 - val_accuracy: 0.0600 - lr: 0.0800

 Epoch: 8 , LR: 0.08
Epoch 8/10
1/1 [==============================] - 0s 338ms/step - loss: 2.3028 - accuracy: 0.1300 - val_loss: 2.3059 - val_accuracy: 0.0600 - lr: 0.0900

 Epoch: 9 , LR: 0.09
Epoch 9/10
1/1 [==============================] - 0s 336ms/step - loss: 2.2990 - accuracy: 0.1300 - val_loss: 2.3093 - val_accuracy: 0.1000 - lr: 0.1000

 Epoch: 10 , LR: 0.10
Epoch 10/10
1/1 [==============================] - 0s 339ms/step - loss: 2.3033 - accuracy: 0.1020 - val_loss: 2.3161 - val_accuracy: 0.1000 - lr: 0.1100
Total number of epochs run: 10
Gradient Array has the shape: (10, 10)

希望这能回答您的问题。学习愉快。