我构建了一个带有两个隐藏层的神经网络。
from collections import namedtuple
def multilayer_perceptron():
tf.reset_default_graph()
inputs = tf.placeholder(tf.float32, shape=[None,train_x.shape[1]])
y = tf.placeholder(tf.float32, shape=[None, 1])
weights = {
'h1': tf.Variable(tf.random_normal([train_x.shape[1], n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, 1]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([1]))
}
# Hidden layer con funzione di attivazione ReLU
layer_1 = tf.add(tf.matmul(inputs, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with ReLU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
learning_rate = tf.placeholder(tf.float32)
is_training=tf.Variable(True,dtype=tf.bool)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y,logits=out_layer )
cost = tf.reduce_mean(cross_entropy)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
predicted = tf.nn.sigmoid(out_layer)
correct_pred = tf.equal(tf.round(predicted), y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Export the nodes
export_nodes = ['inputs', 'y', 'learning_rate','is_training', 'out_layer',
'cost', 'optimizer', 'predicted', 'accuracy']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
pred1 = multilayer_perceptron()
接下来,我创建一个函数来确定批输入和输出值:
def get_batch(data_x,data_y,batch_size=32):
batch_n=len(data_x)//batch_size
for i in range(batch_n):
batch_x=data_x[i*batch_size:(i+1)*batch_size]
batch_y=data_y[i*batch_size:(i+1)*batch_size]
yield batch_x,batch_y
epochs = 25
train_collect = 20
train_print=train_collect*2
learning_rate_value = 0.001
batch_size=400
x_collect = []
train_loss_collect = []
train_acc_collect = []
valid_loss_collect = []
valid_acc_collect = []
saver = tf.train.Saver()
最后,我启动了打印损失函数和列车模型精度的会话。我将结果保存在文件.ckpt
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
iteration=0
for e in range(epochs):
for batch_x,batch_y in get_batch(train_x,train_y,batch_size):
iteration+=1
feed = {pred1.inputs: train_x,
pred1.y: train_y,
pred1.learning_rate: learning_rate_value,
pred1.is_training:True
}
train_loss, _, train_acc = sess.run([pred1.cost, pred1.optimizer, pred1.accuracy], feed_dict=feed)
if iteration % train_collect == 0:
x_collect.append(e)
train_loss_collect.append(train_loss)
train_acc_collect.append(train_acc)
if iteration % train_print==0:
print("Epoch: {}/{}".format(e + 1, epochs),
"Train Loss: {:.4f}".format(train_loss),
"Train Acc: {:.4f}".format(train_acc))
feed = {pred1.inputs: valid_x,
pred1.y: valid_y,
pred1.is_training:False
}
val_loss, val_acc = sess.run([pred1.cost, pred1.accuracy], feed_dict=feed)
valid_loss_collect.append(val_loss)
valid_acc_collect.append(val_acc)
if iteration % train_print==0:
print("Epoch: {}/{}".format(e + 1, epochs),
"Validation Loss: {:.4f}".format(val_loss),
"Validation Acc: {:.4f}".format(val_acc))
saver.save(sess, "./insurance2.ckpt")
当我启动数据集会话时,代码会给我一个错误:
model=multilayer_perceptron()
restorer=tf.train.Saver()
with tf.Session() as sess:
restorer.restore(sess,"./insurance2.ckpt")
feed={
pred1.inputs:test_data,
pred1.is_training:False
}
test_predict=sess.run(pred1.predicted,feed_dict=feed)
这两个错误是:
ValueError: Tensor Tensor("Placeholder:0", shape=(?, 125), dtype=float32) is not an element of this graph.
TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("Placeholder:0", shape=(?, 125), dtype=float32) is not an element of this graph.
在我的图表中,我输出了输入。