“元组”对象没有属性“图层”

时间:2019-09-27 16:07:20

标签: python tensorflow keras version

尝试开始训练模型(DCGAN)时遇到很多麻烦。它向我发送错误:“'tuple'对象没有属性'layer'”。我读到这可能是由于同时拥有tensorflow版本1.14.0和keras版本2.2或更高版本。 我试图通过将keras版本降级到2.1.5来解决此问题,但是却向我发送了相同的问题。 请帮忙! 谢谢

我试图通过将keras版本降级到2.1.5来解决此问题,但还是给我发送了同样的问题。

from google.colab import drive 
drive.mount('/mntDrive')

import os,sys      #os es para gestionar directorios (trabajar con archivos); sys es para trabajar con variables del sistema

# numpy
import numpy as np

# data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from sklearn.model_selection import train_test_split

# Charts
import matplotlib.pyplot as plt
from matplotlib.pyplot import imread
#process
from tqdm import tqdm

# Image IO
from PIL import Image
import skimage.io
import skimage.transform
from skimage.transform import resize

# Deep learning
import tensorflow as tf

from tensorflow.keras import layers
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout, Input
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras import initializers

from __future__ import absolute_import, division, print_function, unicode_literals


# To make sure that we can reproduce the experiment and get the same results
np.random.seed(21)


# The dimension of our random noise vector.
random_dim = 100
from skimage.color import rgb2gray
import scipy.ndimage
import scipy.misc
import re
images = []
for root, dirnames, filenames in os.walk("/mntDrive/My Drive/Colab Notebooks/cubism"):
    for filename in filenames:
        if re.search("\.(jpg|jpeg|png)$", filename):
            filepath = os.path.join(root, filename)
            image = plt.imread(filepath, )
            image = (image.astype(np.float32) - 127.5)/127.5
            image_resized = resize(image, (112, 112))
            images.append(image_resized)
images = np.array(images)


print('Original image shape: {}'.format(images.shape))
im_gray = rgb2gray(images)
print('New image shape: {}'.format(im_gray.shape))
images_resized = im_gray.reshape(320,12544)

def get_optimizer():
  optimizer=tf.keras.optimizers.Adam(0.001)
  return optimizer


def make_generator_model(optimizer):
    generator = tf.keras.Sequential()
    generator.add(layers.Dense(7*7*256, use_bias=False, input_shape=(random_dim,)))
    generator.add(layers.BatchNormalization())
    generator.add(layers.LeakyReLU())

    generator.add(layers.Reshape((7, 7, 256)))
    assert generator.output_shape == (None, 7, 7, 256) # Note: None is the batch size

    generator.add(layers.Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert generator.output_shape == (None, 14, 14, 128)
    generator.add(layers.BatchNormalization())
    generator.add(layers.LeakyReLU())

    generator.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert generator.output_shape == (None, 28, 28, 64)
    generator.add(layers.BatchNormalization())
    generator.add(layers.LeakyReLU())

    generator.add(layers.Conv2DTranspose(32, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert generator.output_shape == (None, 56, 56, 32)
    generator.add(layers.BatchNormalization())
    generator.add(layers.LeakyReLU())

    generator.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
    assert generator.output_shape == (None, 112, 112, 1)
    generator.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001))

    return generator    

def make_discriminator_model(optimizer):
    discriminator = tf.keras.Sequential()
    discriminator.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[112, 112, 1]))
    discriminator.add(layers.LeakyReLU())
    discriminator.add(layers.Dropout(0.3))

    discriminator.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
    discriminator.add(layers.LeakyReLU())
    discriminator.add(layers.Dropout(0.3))

    discriminator.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same'))   
    discriminator.add(layers.LeakyReLU())
    discriminator.add(layers.Dropout(0.3))

    discriminator.add(layers.Flatten())
    discriminator.add(layers.Dense(1, activation='sigmoid' ))
    discriminator.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001))    

    return discriminator

def get_gan_network(discriminator, random_dim, generator, optimizer):
    # We initially set trainable to False since we only want to train either the
    # generator or discriminator at a time
    discriminator.trainable = False
    # gan input (noise) will be 100-dimensional vectors
    gan_input = Input(shape=(random_dim,))
    # the output of the generator (an image)
    x = generator(gan_input)
    # get the output of the discriminator (probability if the image is real or not)
    gan_output = discriminator(x)
    gan = Model(inputs=gan_input, outputs=gan_output)
    gan.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001))
    return gan

def plot_generated_images(epoch, generator, examples=64, dim=(10, 10), figsize=(100, 100)):
    noise = np.random.normal(0, 1, size=[examples, random_dim]) #mean, std deviation, size
    generated_images = generator.predict(noise)
    generated_images = generated_images.reshape(examples, 112, 112)

    plt.figure(figsize=figsize)
    for i in range(generated_images.shape[0]):
        plt.subplot(dim[0], dim[1], i+1)
        plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
        plt.axis('off')
    plt.tight_layout()
    plt.savefig('dcgan_generated_Originals_2_epoch_%d.png' % epoch)


#you can create a function which will save your generated images every 20 epochs
    # Create a wall of generated MNIST images
def train(epochs=15000, batch_size=80):
    # Get the training and testing data
    images_resized
    # Split the training data into batches of size 80
    batch_count = images_resized.shape[0] // batch_size 

    # Build our GAN network
    optimizer=get_optimizer()
    generator = make_generator_model(optimizer)
    discriminator = make_discriminator_model(optimizer)
    gan = get_gan_network(discriminator, random_dim, generator, optimizer)

    for e in range(1, epochs+1):
        print ('-'*15, 'Epoch %d' % e, '-'*15)
        for _ in tqdm(range(batch_count)):
            # Get a random set of input noise and images
            noise = np.random.normal(0, 1, size=[batch_size, random_dim])
            image_batch = images_resized[np.random.randint(0, images_resized.shape[0], size=batch_size)]

            # Generate fake MNIST images
            generated_images = generator.predict(noise)
            X = np.concatenate([image_batch, generated_images])  #128x4096 --- 'a' x 4096

            # Labels for generated and real data
            y_dis = np.zeros(2*batch_size)
            y_dis[:batch_size] = 0.9    # One-sided label smoothing. Se refiere a que la mitad de y_dis seran 0.9 (reales) y la otra mitad seran 0 (falsos)


            # Train discriminator
            discriminator.trainable = True
            discriminator.train_on_batch(X, y_dis)      #Se entrena el Discriminador con dos vectores: X(tiene un batch(128) imagenes reales y un batch de imagenes generadas); y_dis(tiene un batch de 128 0.9(serian las reales) y otro batch de 128 ceros(serian las generadas). De esta manera, el algoritmo generador, aunque las genere bien, va a seguir optimizandose el numero de epochs que haga falta(seguimos en la fase de entrenamiento), ya que sus imagenes son continuamente rechazadas, comparandose con las reales y sacando continuamente diferencias. ) 
                                                        #Entra X(imagenes reales, imagenes generadas)--->y_dis(0.9 , 0)
                                                        #train_on_batch(x,y)---> realiza una actualizacion del gradiente en un batch----> x=array de training data (si el modelo tiene varias entradas pueden ser varios); y= array de target data (si el modelo tiene varias salidas pueden ser varios)

            # Train generator
            noise = np.random.normal(0, 1, size=[batch_size, random_dim])
            y_gen = np.ones(batch_size)
            discriminator.trainable = False
            gan.train_on_batch(noise, y_gen)            #Entra noise(pixeles desordenados)---->y_gen(todo son 1)

        if e == 1 or e % 50 == 0:
            plot_generated_images(e, generator)


if __name__ == '__main__':
   train(30000, 80)

这是输出:

Drive already mounted at /mntDrive; to attempt to forcibly remount, call drive.mount("/mntDrive", force_remount=True).
Original image shape: (320, 112, 112, 3)
New image shape: (320, 112, 112)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_impl.py:180: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:68: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.

WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:507: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.

    ---------------------------------------------------------------------------
    AttributeError                            Traceback (most recent call last)
    <ipython-input-18-6e3c9ece87ff> in <module>()
        196 
        197 if __name__ == '__main__':
    --> 198    train(30000, 80)

    11 frames
    <ipython-input-18-6e3c9ece87ff> in train(epochs, batch_size)
        161     generator = make_generator_model(optimizer)
        162     discriminator = make_discriminator_model(optimizer)
    --> 163     gan = get_gan_network(discriminator, random_dim, generator, optimizer)
        164 
        165     for e in range(1, epochs+1):

    <ipython-input-18-6e3c9ece87ff> in get_gan_network(discriminator, random_dim, generator, optimizer)
        128     gan_input = Input(shape=(random_dim,))
        129     # the output of the generator (an image)
    --> 130     x = generator(gan_input)
        131     # get the output of the discriminator (probability if the image is real or not)
        132     gan_output = discriminator(x)

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
        632                     outputs = base_layer_utils.mark_as_return(outputs, acd)
        633                 else:
    --> 634                   outputs = call_fn(inputs, *args, **kwargs)
        635 
        636             except TypeError as e:

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/sequential.py in call(self, inputs, training, mask)
        245       if not self.built:
        246         self._init_graph_network(self.inputs, self.outputs, name=self.name)
    --> 247       return super(Sequential, self).call(inputs, training=training, mask=mask)
        248 
        249     outputs = inputs  # handle the corner case where self.layers is empty

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py in call(self, inputs, training, mask)
        749                                 ' implement a `call` method.')
        750 
    --> 751     return self._run_internal_graph(inputs, training=training, mask=mask)
        752 
        753   def compute_output_shape(self, input_shape):

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py in _run_internal_graph(self, inputs, training, mask)
        891 
        892           # Compute outputs.
    --> 893           output_tensors = layer(computed_tensors, **kwargs)
        894 
        895           # Update tensor_dict.

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
        661               kwargs.pop('training')
        662             inputs, outputs = self._set_connectivity_metadata_(
    --> 663                 inputs, outputs, args, kwargs)
        664           self._handle_activity_regularization(inputs, outputs)
        665           self._set_mask_metadata(inputs, outputs, previous_mask)

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in _set_connectivity_metadata_(self, inputs, outputs, args, kwargs)
       1706     kwargs.pop('mask', None)  # `mask` should not be serialized.
       1707     self._add_inbound_node(
    -> 1708         input_tensors=inputs, output_tensors=outputs, arguments=kwargs)
       1709     return inputs, outputs
       1710 

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in _add_inbound_node(self, input_tensors, output_tensors, arguments)
       1793     """
       1794     inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,
    -> 1795                                         input_tensors)
       1796     node_indices = nest.map_structure(lambda t: t._keras_history.node_index,
       1797                                       input_tensors)

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
        513 
        514   return pack_sequence_as(
    --> 515       structure[0], [func(*x) for x in entries],
        516       expand_composites=expand_composites)
        517 

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
        513 
        514   return pack_sequence_as(
    --> 515       structure[0], [func(*x) for x in entries],
        516       expand_composites=expand_composites)
        517 

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in <lambda>(t)
       1792             `call` method of the layer at the call that created the node.
       1793     """
    -> 1794     inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,
       1795                                         input_tensors)
       1796     node_indices = nest.map_structure(lambda t: t._keras_history.node_index,
    AttributeError: 'tuple' object has no attribute 'layer'
    AttributeError: 'tuple' object has no attribute 'layer'

3 个答案:

答案 0 :(得分:2)

我只是Python和深度学习的新手。但是我想分享一下,因为它可以帮助我解决类似的错误。 也许在导入部分中,尝试:

import tensorflow.keras

对于其余所有要从喀拉喀什进口的东西,请保持 tensorflow。在前面。 稍后在使用来自keras的任何内容时,将其输入为一个整体。例如:

model.add(tensorflow.keras.layers.Conv2D())

然后您将不需要:将tensorflow导入为tf

元组可能是指您的 input_shape 。我上面提到的解决了我的问题。

我无法回答为什么会这样。

答案 1 :(得分:0)

您已从 tensorflow.keras 导入了图层,而其他功能已从 keras 导入了。您可以从keras导入图层,也可以尝试从tensorflow.keras导入其他功能。

答案 2 :(得分:0)

在Colab上使用GPU / TPU并尝试通过使用pip安装Tensorflow v1.14时遇到此问题。该文档建议不要在GPU / TPU上使用pip安装tensorflow,而是建议使用%tensorflow_version 1.x。设置为此,解决了我的问题。

文档:https://colab.research.google.com/notebooks/tensorflow_version.ipynb