我有一个自动编码器,由于我想知道每个图层在其输入上的作用,因此我希望将每个图层的输出显示为图像。我的简单代码在这里。 我曾经用过
from keras import backend as K
inputs = [K.learning_phase()] + autoencoder.inputs
_convout1_f = K.function(inputs, [autoencoder.layers[12].output])
def convout1_f(X):
# The [0] is to disable the training phase flag
return _convout1_f([0] + [X])
C1 = convout1_f(X)
C1 = np.squeeze(C1)
访问每一层的输出。 例如,第12层的输出为(28,28,16),这意味着我在cov层中有16个滤镜。现在如何将这一层的输出显示为separte图像?
from __future__ import absolute_import
from __future__ import print_function
import theano
import warnings
import os
import pylab as pl
import matplotlib.cm as cm
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.layers import Input
from keras.layers import Conv2D
from keras.models import Model
from keras.callbacks import TensorBoard
from keras import backend as K
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D,UpSampling2D
from keras.utils import np_utils
print(theano.config.device)
warnings.filterwarnings("ignore", category=DeprecationWarning)
np.set_printoptions(precision=5, suppress=True)
nb_classes = 10
# the data, shuffled and split between tran and test sets
#(X_train, y_train), (X_test, y_test) = mnist.load_data()
#
#X_train = X_train.reshape(X_train.shape[0],1,28, 28)
#X_test = X_test.reshape(X_test.shape[0], 1,28, 28)
#X_train = X_train.astype("float32")
#X_test = X_test.astype("float32")
#X_train /= 255
#X_test /= 255
#print('X_train shape:', X_train.shape)
#print(X_train.shape[0], 'train samples')
#print(X_test.shape[0], 'test samples')
#####################################################################
input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format
conv1 = Conv2D(16, (3, 3), activation='relu', padding='same', name='conv1' )(input_img)
mxp1 = MaxPooling2D((2, 2), padding='same')(conv1)
conv2 = Conv2D(8, (3, 3), activation='relu', padding='same', name='conv2')(mxp1)
mxp2 = MaxPooling2D((2, 2), padding='same')(conv2)
conv3 = Conv2D(8, (3, 3), activation='relu', padding='same', name='conv3')(mxp2)
encoded = MaxPooling2D((2, 2), padding='same')(conv3)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
conv4 = Conv2D(8, (3, 3), activation='relu', padding='same', name='conv4')(encoded)
us1 = UpSampling2D((2, 2))(conv4)
conv5 = Conv2D(8, (3, 3), activation='relu', padding='same', name='conv5')(us1)
us2 = UpSampling2D((2, 2))(conv5)
conv6 = Conv2D(16, (3, 3), activation='relu', name='conv6')(us2)
up3 = UpSampling2D((2, 2))(conv6)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(up3)
conv1_model=Model(input_img,conv1)
encoder=Model(input_img,encoded)
#decoder=Model(input_img,encoded)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
autoencoder.fit(x_train, x_train,
epochs=5,
batch_size=128,
shuffle=True,
validation_data=(x_test, x_test),
callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])