我在keras中有一个自动编码器,我想在将编码器输出层发送到下一层之前进行一些处理。编码器部分的输出是(1,28,28,1)张量现在我想在其上考虑7x7块,然后将每个块的中间像素加一个常数,但是我不知道该怎么做这样的过程在图层上!可能吗?我应该使用Lambda吗?我可以做一些简单的事情,例如使用Lambda将层的所有输出与另一个具有相同大小的张量相加,但是我不知道该怎么做:(您能告诉我该怎么做吗?我真的需要您帮助,也许这个问题很简单,但是我是一个初学者。在下面的代码中,我只是将w与编码器的输出相加,并且两者的大小相同,但是我喜欢选择编码器输出的一部分,并用w_expand的像素相加尺寸较小。
from keras.layers import Input, Concatenate, GaussianNoise,Dropout,BatchNormalization
from keras.layers import Conv2D, AtrousConv2D
from keras.models import Model
from keras.datasets import mnist
from keras.callbacks import TensorBoard
from keras import backend as K
from keras import layers
import matplotlib.pyplot as plt
import tensorflow as tf
import keras as Kr
from keras.optimizers import SGD,RMSprop,Adam
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import numpy as np
import pylab as pl
import matplotlib.cm as cm
import keract
from matplotlib import pyplot
from keras import optimizers
from keras import regularizers
from tensorflow.python.keras.layers import Lambda;
#-----------------building w train---------------------------------------------
w_expand=np.zeros((49999,28,28),dtype='float32')
wv_expand=np.zeros((9999,28,28),dtype='float32')
wt_random=np.random.randint(2, size=(49999,4,4))
wt_random=wt_random.astype(np.float32)
wv_random=np.random.randint(2, size=(9999,4,4))
wv_random=wv_random.astype(np.float32)
w_expand[:, 3::7, 3::7] = wt_random #the following loop do the same thing as this
#for i in range(4):
# for j in range(4):
# w_expand[:,(i)*7+3,(j)*7+3]=wt_random[:,i,j]
#the following expand one pixel to 7x7 block
#w_expand=wt_random.repeat(7, 2).repeat(7, 1)
w_expand[:,:4,:4]=wt_random
wv_expand[:,:4,:4]=wv_random
x,y,z=w_expand.shape
w_expand=w_expand.reshape((x,y,z,1))
x,y,z=wv_expand.shape
wv_expand=wv_expand.reshape((x,y,z,1))
#-----------------building w test---------------------------------------------
w_test = np.random.randint(2,size=(1,4,4))
w_test=w_test.astype(np.float32)
wt_expand=np.zeros((1,28,28),dtype='float32')
wt_expand[:,0:4,0:4]=w_test
wt_expand=wt_expand.reshape((1,28,28,1))
#-----------------------encoder------------------------------------------------
#------------------------------------------------------------------------------
wtm=Input((28,28,1))
image = Input((28, 28, 1))
conv1 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl1e')(image)
conv2 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl2e')(conv1)
conv3 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl3e')(conv2)
#conv3 = Conv2D(8, (3, 3), activation='relu', padding='same', name='convl3e', kernel_initializer='Orthogonal',bias_initializer='glorot_uniform')(conv2)
BN=BatchNormalization()(conv3)
#DrO1=Dropout(0.25,name='Dro1')(BN)
encoded = Conv2D(1, (5, 5), activation='relu', padding='same',name='encoded_I',dilation_rate=(2,2))(BN)
#-----------------------adding w---------------------------------------
add_const = Kr.layers.Lambda(lambda x: x[0] + x[1])
encoded_merged = add_const([encoded,wtm])
#encoder=Model(inputs=[image,wtm], outputs= encoded_merged ,name='encoder')
#encoder.summary()
#-----------------------decoder------------------------------------------------
#------------------------------------------------------------------------------
#deconv_input=Input((28,28,1),name='inputTodeconv')
#encoded_merged = Input((28, 28, 2))
deconv1 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl1d')(encoded_merged)
deconv2 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl2d')(deconv1)
deconv3 = Conv2D(64, (5, 5), activation='relu',padding='same', name='convl3d')(deconv2)
deconv4 = Conv2D(64, (5, 5), activation='relu',padding='same', name='convl4d')(deconv3)
BNd=BatchNormalization()(deconv3)
#DrO2=Dropout(0.25,name='DrO2')(BNd)
decoded = Conv2D(1, (5, 5), activation='sigmoid', padding='same', name='decoder_output')(BNd)
#model=Model(inputs=image,outputs=decoded)
model=Model(inputs=[image,wtm],outputs=decoded)
decoded_noise = GaussianNoise(0.5)(decoded)
#----------------------w extraction------------------------------------
convw1 = Conv2D(64, (3,3), activation='relu', padding='same', name='conl1w')(decoded_noise)
convw2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='convl2w')(convw1)
convw3 = Conv2D(64, (3, 3), activation='relu', padding='same', name='conl3w')(convw2)
convw4 = Conv2D(64, (3, 3), activation='relu', padding='same', name='conl4w')(convw3)
convw5 = Conv2D(64, (3, 3), activation='relu', padding='same', name='conl5w')(convw4)
convw6 = Conv2D(64, (3, 3), activation='relu', padding='same', name='conl6w')(convw5)
#BNed=BatchNormalization()(convw6)
#DrO3=Dropout(0.25, name='DrO3')(BNed)
pred_w = Conv2D(1, (1, 1), activation='sigmoid', padding='same', name='reconstructed_W')(convw6)
# reconsider activation (is W positive?)
# should be filter=1 to match W
watermark_extraction=Model(inputs=[image,wtm],outputs=[decoded,pred_w])
watermark_extraction.summary()