所以我刚刚开始对张量流进行一些实验,但是我觉得很难理解这个概念,我目前专注于MNIST数据集,但是只有8000个训练集和2000个测试集。我目前拥有的小代码段是:
from keras.layers import Input, Dense, initializers
from keras.models import Model
from Dataset import Dataset
import matplotlib.pyplot as plt
from keras import optimizers, losses
import tensorflow as tf
import keras.backend as K
#global variables
d = Dataset()
num_features = d.X_train.shape[1]
low_dim = 32
def autoencoder():
w = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
input = Input(shape=(num_features,))
encoded = Dense(low_dim, activation='relu', kernel_initializer = w)(input)
decoded = Dense(num_features, activation='sigmoid', kernel_initializer = w)(encoded)
autoencoder = Model(input, decoded)
adam = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
autoencoder.compile(optimizer=adam, loss='binary_crossentropy')
autoencoder.fit(d.X_train, d.X_train,
epochs=50,
batch_size=64,
shuffle=True,
)
encoded_imgs = autoencoder.predict(d.X_test)
decoded_imgs = autoencoder.predict(encoded_imgs)
#sess = tf.InteractiveSession()
#error = losses.mean_absolute_error(decoded_imgs[0], d.X_train[0])
#print(error.eval())
#print(decoded_imgs.shape)
#sess.close()
n = 20 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
#sess = tf.InteractiveSession()
error = losses.mean_absolute_error(decoded_imgs[n], d.X_test[n])
#print(error.eval())
#print(decoded_imgs.shape)
#sess.close()
ax = plt.subplot(2, n, i + 1)
plt.imshow(d.X_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
#print(error)
plt.show()
return error
我想要做的是将错误存储为列表,以后可以在图形中打印或绘制,但是如何使用tensorflow / keras高效地做到这一点?预先感谢
答案 0 :(得分:1)
您可以使用回调CSVLogger将错误存储在csv文件中。这是此任务的代码段。
from keras.callbacks import CSVLogger
# define callbacks
callbacks = [CSVLogger(path_csv_logger, separator=';', append=True)]
# pass callback to model.fit() oder model.fit_generator()
model.fit_generator(
train_batch, train_steps, epochs=10, callbacks=callbacks,
validation_data=validation_batch, validation_steps=val_steps)
编辑:为了将错误存储在列表中,您可以使用类似这样的
# source https://keras.io/callbacks/
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))