我有一个自动编码器,它有两个输出,我想计算输入和输出之间的BER,但是我有一些疑问。 1.为了进行计算,我们在python中有任何函数可以执行此操作吗?例如,在Matlab中,我们使用biterr或psnr来比较错误或不易察觉的图像。在Matlab中有相同的功能吗? 2.我们可以将这些参数用作损失函数吗?例如,我使用mse作为损失函数。是否可以使用psnr代替它?我怎样才能做到这一点? 我在这里附加了代码。
#-----------------------including related inputs-------------------------------
from keras.layers import Input, Concatenate, GaussianNoise,Dropout
from keras.layers import Conv2D
from keras.models import Model
from keras.datasets import mnist
from keras.callbacks import TensorBoard
from keras import backend as K
from keras import layers
import matplotlib.pyplot as plt
import tensorflow as tf
import keras as Kr
import numpy as np
import pylab as pl
import matplotlib.cm as cm
import keract
from tensorflow.python.keras.layers import Lambda;
#-----------------building w train---------------------------------------------
w_expand=np.zeros((49999,28,28),dtype='float32')
wv_expand=np.zeros((9999,28,28),dtype='float32')
wt_random=np.random.randint(2, size=(49999,4,4))
wt_random=wt_random.astype(np.float32)
wv_random=np.random.randint(2, size=(9999,4,4))
wv_random=wv_random.astype(np.float32)
w_expand[:,:4,:4]=wt_random
wv_expand[:,:4,:4]=wv_random
x,y,z=w_expand.shape
w_expand=w_expand.reshape((x,y,z,1))
x,y,z=wv_expand.shape
wv_expand=wv_expand.reshape((x,y,z,1))
#-----------------building w test---------------------------------------------
w_test = np.random.randint(2,size=(1,4,4))
w_test=w_test.astype(np.float32)
wt_expand=np.zeros((1,28,28),dtype='float32')
wt_expand[:,0:4,0:4]=w_test
wt_expand=wt_expand.reshape((1,28,28,1))
#wt_expand=np.repeat(wt_expand,10000,0)
#-----------------------encoder------------------------------------------------
#------------------------------------------------------------------------------
wtm=Input((28,28,1))
image = Input((28, 28, 1))
conv1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='convl1e')(image)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='convl2e')(conv1)
conv3 = Conv2D(64, (3, 3), activation='relu', padding='same', name='convl3e')(conv2)
#conv3 = Conv2D(8, (3, 3), activation='relu', padding='same', name='convl3e', kernel_initializer='Orthogonal',bias_initializer='glorot_uniform')(conv2)
BN=BatchNormalization()(conv3)
DrO1=Dropout(0.25,name='Dro1')(BN)
encoded = Conv2D(1, (3, 3), activation='relu', padding='same',name='encoded_I')(DrO1)
#-----------------------adding watermark---------------------------------------
add_const = Kr.layers.Lambda(lambda x: x[0] + x[1])
encoded_merged = add_const([encoded,wtm])
#-----------------------decoder------------------------------------------------
#------------------------------------------------------------------------------
deconv1 = Conv2D(64, (3, 3), activation='elu', padding='same', name='convl1d')(encoded_merged)
deconv2 = Conv2D(64, (3, 3), activation='elu', padding='same', name='convl2d')(deconv1)
deconv3 = Conv2D(64, (3, 3), activation='elu',padding='same', name='convl3d')(deconv2)
BNd=BatchNormalization()(deconv3)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='decoder_output')(DrO2)
model=Model(inputs=[image,wtm],outputs=decoded)
#----------------------w extraction------------------------------------
convw1 = Conv2D(64, (3,3), activation='elu', padding='same', name='conl1w')(decoded)
convw2 = Conv2D(64, (3, 3), activation='elu', padding='same', name='convl2w')(convw1)
convw3 = Conv2D(64, (3, 3), activation='elu', padding='same', name='conl3w')(convw2)
BNed=BatchNormalization()(convw3)
DrO3=Dropout(0.25, name='DrO3')(BNed)
pred_w = Conv2D(1, (1, 1), activation='sigmoid', padding='same', name='reconstructed_W')(DrO3)
# reconsider activation (is W positive?)
# should be filter=1 to match W
watermark_extraction=Model(inputs=[image,wtm],outputs=[decoded,pred_w])
watermark_extraction.summary()
#----------------------training the model--------------------------------------
#------------------------------------------------------------------------------
#----------------------Data preparation----------------------------------------
(x_train, _), (x_test, _) = mnist.load_data()
x_validation=x_train[1:10000,:,:]
x_train=x_train[10001:60000,:,:]
#
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_validation = x_validation.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_validation = np.reshape(x_validation, (len(x_validation), 28, 28, 1))
#---------------------compile and train the model------------------------------
# is accuracy sensible metric for this model?
watermark_extraction.compile(optimizer='SGD', loss={'decoder_output':'mse','reconstructed_W':'binary_crossentropy'}, metrics=['mae'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200)
rlrp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_delta=1E-7, verbose=1)
history=watermark_extraction.fit([x_train,w_expand], [x_train,w_expand],
epochs=1000,
batch_size=32,
validation_data=([x_validation,wv_expand], [x_validation,wv_expand]),
callbacks=[TensorBoard(log_dir='E:/tmp/AutewithW200', histogram_freq=0, write_graph=False),rlrp,es])
model.summary()