InvalidArgumentError,该怎么办才能解决此错误?

时间:2019-01-28 00:46:57

标签: python tensorflow keras keras-layer

我在这里附加了代码。它可以完全学习而没有任何错误,但是当我想在lambda层之后显示层的输出时,会产生此错误。

  

InvalidArgumentError:必须输入占位符张量的值   'input_78'具有dtype浮点型,形状为[?,28,28,1] [[{{node input_78}}   = Placeholderdtype = DT_FLOAT,shape = [?, 28,28,1],   _device =“ / job:localhost / replica:0 / task:0 / device:GPU:0”]] [[{{node lambda_35 / add / _2359}} = _Recvclient_terminated = false,   recv_device =“ / job:localhost /副本:0 / task:0 / device:CPU:0”,   send_device =“ / job:localhost /副本:0 / task:0 / device:GPU:0”,   send_device_incarnation = 1,tensor_name =“ edge_50_lambda_35 / add”,   tensor_type = DT_FLOAT,   _device =“ / job:localhost /副本:0 /任务:0 /设备:CPU:0”]]

我在未添加w_expanded作为fit函数的输入之前实现了此代码,并且显示了无错误的输出,但是使用此代码,它产生了上述错误。我不明白为什么它显示此错误?请帮我。我真的需要解决方案。

    from keras.layers import Input, Concatenate, GaussianNoise,Dropout
    from keras.layers import Conv2D
    from keras.models import Model
    from keras.datasets import mnist
    from keras.callbacks import TensorBoard
    from keras import backend as K
    from keras import layers
    import matplotlib.pyplot as plt
    import tensorflow as tf
    import keras as Kr
    import numpy as np
    import pylab as pl
    import matplotlib.cm as cm

    #-----------------building w train---------------------------------------------
    w_main = np.random.randint(2,size=(1,4,4,1))
    w_main=w_main.astype(np.float32)
    w_expand=np.zeros((1,28,28,1),dtype='float32')
    w_expand[:,0:4,0:4]=w_main
    w_expand.reshape(1,28,28,1)
    w_expand=np.repeat(w_expand,49999,0)

    #-----------------building w test---------------------------------------------
    w_test = np.random.randint(2,size=(1,4,4,1))
    w_test=w_test.astype(np.float32)
    wt_expand=np.zeros((1,28,28,1),dtype='float32')
    wt_expand[:,0:4,0:4]=w_test
    wt_expand.reshape(1,28,28,1)
    wt_expand=np.repeat(wt_expand,9999,0)
    #-----------------------encoder------------------------------------------------
    #------------------------------------------------------------------------------
    wtm=Input((28,28,1))
    image = Input((28, 28, 1))
    conv1 = Conv2D(16, (3, 3), activation='relu', padding='same', name='convl1e')(image)
    conv2 = Conv2D(8, (3, 3), activation='relu', padding='same', name='convl2e')(conv1)
    conv3 = Conv2D(8, (3, 3), activation='relu', padding='same', name='convl3e')(conv2)
    DrO=Dropout(0.25)(conv3)
    encoded =  Conv2D(1, (3, 3), activation='relu', padding='same',name='reconstructed_I')(conv3)


    #-----------------------adding w---------------------------------------
    #add_const = Kr.layers.Lambda(lambda x: x + Kr.backend.constant(w_expand))
    add_const = Kr.layers.Lambda(lambda x: x + wtm)
    encoded_merged = add_const(encoded)

    encoder=Model(inputs=image, outputs=encoded_merged)
    encoder.summary()

    #-----------------------decoder------------------------------------------------
    #------------------------------------------------------------------------------

    #encoded_merged = Input((28, 28, 2))
    deconv1 = Conv2D(8, (3, 3), activation='relu', padding='same', name='convl1d')(encoded_merged)
    deconv2 = Conv2D(8, (3, 3), activation='relu', padding='same', name='convl2d')(deconv1)
    deconv3 = Conv2D(16, (3, 3), activation='relu',padding='same', name='convl3d')(deconv2)
    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='decoder_output')(deconv3) 

    #decoder=Model(inputs=encoded_merged, outputs=decoded)
    #decoder.summary()
    model=Model(inputs=image,outputs=decoded)
    #----------------------w extraction------------------------------------
    convw1 = Conv2D(8, (3,3), activation='relu', padding='same', name='conl1w')(decoded)
    convw2 = Conv2D(4, (3, 3), activation='relu', padding='same', name='convl2w')(convw1)
    convw3 = Conv2D(2, (3, 3), activation='relu', padding='same', name='conl3w')(convw2)
    pred_w = Conv2D(1, (3, 3), activation='relu', padding='same', name='reconstructed_W')(convw3)  
    # reconsider activation (is W positive?)
    # should be filter=1 to match W
    watermark_extraction=Model(inputs=[image,wtm],outputs=[decoded,pred_w])


    #----------------------training the model--------------------------------------
    #------------------------------------------------------------------------------
    #----------------------Data preparesion----------------------------------------

    (x_train, _), (x_test, _) = mnist.load_data()
    x_validation=x_train[1:10000,:,:]
    x_train=x_train[10001:60000,:,:]
    #
    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.
    x_validation = x_validation.astype('float32') / 255.
    x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))  # adapt this if using `channels_first` image data format
    x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))  # adapt this if using `channels_first` image data format
    x_validation = np.reshape(x_validation, (len(x_validation), 28, 28, 1))

    #---------------------compile and train the model------------------------------
    # is accuracy sensible metric for this model?
    watermark_extraction.compile(optimizer='adadelta', loss={'decoder_output':'mse','reconstructed_W':'mse'}, metrics=['mae'])
    watermark_extraction.fit([x_train,w_expand], [x_train,w_expand],
              epochs=4,
              batch_size=128, 
              validation_data=([x_validation,wt_expand], [x_validation,wt_expand]),
              callbacks=[TensorBoard(log_dir='C:/tmp/autoencoder', histogram_freq=0, write_graph=False)])
    model.summary()
    #model.fit([images, w], [images, w], batch_size=64, epochs=5)

    #--------------------visuallize the output layers------------------------------
    inputs = [K.learning_phase()] + watermark_extraction.inputs

    _convout1_f = K.function(inputs, [watermark_extraction.layers[5].output])
    def convout1_f(X):
        # The [0] is to disable the training phase flag
        return _convout1_f([0] + [X])

    # utility functions 
    i = 4600

    # Visualize the first layer of convolutions on an input image
    X = x_test[i:i+1]    
    # Visualize weights
    W = model.layers[1].get_weights()[0][:,:,0,:]
    w1=W.reshape(16,3,3)
    W = np.squeeze(w1)
    print("W shape : ", W.shape)

    for i in range(0,16):
        plt.subplot(4,4,i+1)
        plt.imshow(w1[i,:,:], interpolation='nearest',cmap='gray')
    plt.show()
    W = model.layers[2].get_weights()[0][:,:,0,:]
    w2=W.reshape(8,3,3)
    W = np.squeeze(w2)
    print("W shape : ", W.shape)

    for i in range(0,8):
        plt.subplot(4,4,i+1)
        plt.imshow(w2[i,:,:], interpolation='nearest',cmap='gray')
    plt.show()


    # Visualize convolution result (after activation)
    C1 = convout1_f(X)
    C1 = np.squeeze(C1)
    print("C1 shape : ", C1.shape)
    for i in range(0,C1.shape[2]):
        plt.subplot(4,4,i+1)
        plt.imshow(C1[:,:,i], interpolation='nearest',cmap='gray')
    plt.show()

0 个答案:

没有答案