我在使用Tensorboard时遇到问题,特别是在Hyperas模型中,histogram_freq不为零。
我只在tensorboard-callback中添加了Hyperas示例。如果histogram_freq = 0一切正常。但如果不同,我得到了错误:
InvalidArgumentError (see above for traceback): Shape [-1,784] has negative dimensions
[[Node: dense_1_input = Placeholderdtype=DT_FLOAT, shape=[?,784], _device="/job:localhost/replica:0/task:0/cpu:0"]]
我正在使用:
Windows 7
tensorflow 1.3.0
tensorflow-tensorboard 0.1.6
hyperas 0.4
hyperopt 0.1
python 3.5.3
我试过不同的机器(Windows 10)和tensorflow-version 1.2.1。有谁知道如何解决它?
Example.py:
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from keras.datasets import mnist
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils
import keras.callbacks as callbacks
from hyperas import optim
from hyperas.distributions import choice, uniform, conditional
def data():
"""`
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nb_classes = 10
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
return x_train, y_train, x_test, y_test
def model(x_train, y_train, x_test, y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dropout({{uniform(0, 1)}}))
# If we choose 'four', add an additional fourth layer
if conditional({{choice(['three', 'four'])}}) == 'four':
model.add(Dense(100))
# We can also choose between complete sets of layers
model.add({{choice([Dropout(0.5), Activation('linear')])}})
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})
tbCallBack = callbacks.TensorBoard(log_dir='./Graph', histogram_freq=1,
write_graph=True, write_images=True)
model.fit(x_train, y_train,
batch_size={{choice([64, 128])}},
epochs=3,
verbose=2,
validation_data=(x_test, y_test),
callbacks=[tbCallBack])
score, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=2,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)