所以我对TensorFlow非常陌生,所以我的问题可能有点愚蠢或显而易见。所以我使用函数在TensorFlow中编写了一个简单的小代码。所以基本上我正在读取1000个参数并将其存储在numpy数组中,并将其传递给具有2个隐藏层的NN。这是我的代码:
import tensorflow as tf
import numpy as np
import random
pc = open("../data/pcNum.txt", 'r')
npc = open("../data/npcNum.txt", 'r')
lines1 = pc.readlines()
lines2 = npc.readlines()
size = 200
learning_rate = 0.01
epochs = 200
trainDataset = np.array([])
labels = np.array([])
trainList = []
def arrayfy(sequence):
seq = np.array([])
for i in range(0, len(sequence)):
seq = np.append(seq, int(sequence[i]))
return seq
for i in range(0, size):
sequence = lines1[i].strip()
trainList.append((sequence, 1))
sequence = lines2[i].strip()
trainList.append((sequence, 0))
random.shuffle(trainList)
for i in trainList:
seq = arrayfy(i[0])
trainDataset = np.append(trainDataset, seq)
if(i[1] == 0):
label = np.array([0, 1])
else:
label = np.array([1, 0])
labels = np.append(labels, label)
trainDataset = trainDataset.reshape((2 * size, 1000))
trainDataset = trainDataset.T
labels = np.transpose(labels.reshape((-1, 2)))
dataset = np.asarray(trainDataset, np.float32)
labels = np.asarray(labels, np.float32)
dataset = tf.convert_to_tensor(dataset, tf.float32)
#labels = tf.convert_to_tensor(labels, tf.float32)
# Begining of TensorFlow code
l1_nodes = 100
l2_nodes = 100
out_nodes = 2
weights_l1 = tf.get_variable('weights_l1', dtype = tf.float32, initializer = tf.random_normal((1000, l1_nodes), mean = 0.0, stddev = 1.0))
weights_l2 = tf.get_variable('weights_l2', dtype = tf.float32, initializer = tf.random_normal((l1_nodes, l2_nodes), mean = 0.0, stddev = 1.0))
weights_out = tf.get_variable('weights_out', dtype = tf.float32, initializer = tf.random_normal((l2_nodes, 2), mean = 0.0, stddev = 1.0))
bias_l1 = tf.get_variable('bias_l1', dtype = tf.float32, initializer = tf.constant(0.0))
bias_l2 = tf.get_variable('bias_l2', dtype = tf.float32, initializer = tf.constant(0.0))
bias_out = tf.get_variable('bias_out', dtype = tf.float32, initializer = tf.constant(0.0))
"""a1 = tf.placeholder(dtype = tf.float32, name = 'a1')
a2 = tf.placeholder(dtype = tf.float32, name = 'a2')
z_out = tf.placeholder(dtype = tf.float32, name = 'z_out')
hypothesis = tf.placeholder(dtype = tf.float32, name = 'hypothesis')"""
def forwardPropagation(dataset, weights_l1, bias_l1, weights_l2, bias_l2, weights_out, bias_out):
a1 = tf.sigmoid(tf.tensordot(tf.transpose(weights_l1), dataset, axes = 1) + bias_l1)
a2 = tf.sigmoid(tf.tensordot(tf.transpose(weights_l2), a1, axes = 1) + bias_l2)
z_out = tf.tensordot(tf.transpose(weights_out), a2, axes = 1) + bias_out
return z_out
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits = z_out, labels = labels, name = 'cross_entropy')
loss = tf.reduce_mean(entropy, name = 'loss')
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
hypothesis = tf.nn.softmax(z_out)
correct_preds = tf.equal(tf.argmax(hypothesis, 0), tf.argmax(labels, 0))
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
writer = tf.summary.FileWriter('./graphs/logreg', tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(0, epochs):
z_out = sess.run(forwardPropagation(dataset, weights_l1, bias_l1, weights_l2, bias_l2, weights_out, bias_out))
_, l = sess.run([optimizer, loss]) #, feed_dict = {z_out:z_out, labels:labels})
sess.run(hypothesis)
sess.run(correct_preds)
acc = sess.run(accuracy)
print("Epoch :", i+1, ", loss : ", l, ", accuracy :", acc)
writer.close()
它给出错误为:
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits = z_out, labels = labels, name = 'cross_entropy')
NameError: name 'z_out' is not defined
那我该怎么做才能使其正常运行并保持功能呢?
当我删除该功能时,虽然我不确定它是否正在执行我期望的功能,但它显然可以运行:
import tensorflow as tf
import numpy as np
import random
pc = open("../data/pcNum.txt", 'r')
npc = open("../data/npcNum.txt", 'r')
lines1 = pc.readlines()
lines2 = npc.readlines()
size = 200
learning_rate = 0.01
epochs = 200
trainDataset = np.array([])
labels = np.array([])
trainList = []
def arrayfy(sequence):
seq = np.array([])
for i in range(0, len(sequence)):
seq = np.append(seq, int(sequence[i]))
return seq
for i in range(0, size):
sequence = lines1[i].strip()
trainList.append((sequence, 1))
sequence = lines2[i].strip()
trainList.append((sequence, 0))
random.shuffle(trainList)
for i in trainList:
seq = arrayfy(i[0])
trainDataset = np.append(trainDataset, seq)
if(i[1] == 0):
label = np.array([0, 1])
else:
label = np.array([1, 0])
labels = np.append(labels, label)
trainDataset = trainDataset.reshape((2 * size, 1000))
trainDataset = trainDataset.T
labels = np.transpose(labels.reshape((-1, 2)))
dataset = np.asarray(trainDataset, np.float32)
labels = np.asarray(labels, np.float32)
dataset = tf.convert_to_tensor(dataset, tf.float32)
#labels = tf.convert_to_tensor(labels, tf.float32)
l1_nodes = 100
l2_nodes = 100
out_nodes = 2
weights_l1 = tf.get_variable('weights_l1', dtype = tf.float32, initializer = tf.random_normal((1000, l1_nodes), mean = 0.0, stddev = 1.0))
weights_l2 = tf.get_variable('weights_l2', dtype = tf.float32, initializer = tf.random_normal((l1_nodes, l2_nodes), mean = 0.0, stddev = 1.0))
weights_out = tf.get_variable('weights_out', dtype = tf.float32, initializer = tf.random_normal((l2_nodes, 2), mean = 0.0, stddev = 1.0))
bias_l1 = tf.get_variable('bias_l1', dtype = tf.float32, initializer = tf.constant(0.0))
bias_l2 = tf.get_variable('bias_l2', dtype = tf.float32, initializer = tf.constant(0.0))
bias_out = tf.get_variable('bias_out', dtype = tf.float32, initializer = tf.constant(0.0))
"""a1 = tf.placeholder(dtype = tf.float32, name = 'a1')
a2 = tf.placeholder(dtype = tf.float32, name = 'a2')
z_out = tf.placeholder(dtype = tf.float32, name = 'z_out')
hypothesis = tf.placeholder(dtype = tf.float32, name = 'hypothesis')"""
#def forwardPropagation(dataset, weights_l1, bias_l1, weights_l2, bias_l2, weights_out, bias_out):
a1 = tf.sigmoid(tf.tensordot(tf.transpose(weights_l1), dataset, axes = 1) + bias_l1)
a2 = tf.sigmoid(tf.tensordot(tf.transpose(weights_l2), a1, axes = 1) + bias_l2)
z_out = tf.tensordot(tf.transpose(weights_out), a2, axes = 1) + bias_out
#return z_out
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits = z_out, labels = labels, name = 'cross_entropy')
loss = tf.reduce_mean(entropy, name = 'loss')
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
hypothesis = tf.nn.softmax(z_out)
correct_preds = tf.equal(tf.argmax(hypothesis, 0), tf.argmax(labels, 0))
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
writer = tf.summary.FileWriter('./graphs/logreg', tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(0, epochs):
#z_out = sess.run(forwardPropagation(dataset, weights_l1, bias_l1, weights_l2, bias_l2, weights_out, bias_out))
_, l = sess.run([optimizer, loss]) #, feed_dict = {z_out:z_out, labels:labels})
sess.run(hypothesis)
sess.run(correct_preds)
acc = sess.run(accuracy)
print("Epoch :", i+1, ", loss : ", l, ", accuracy :", acc)
writer.close()
非常感谢您的帮助和提示。
答案 0 :(得分:1)
在初始化变量之前,尝试使用z_out
作为参数来调用您的熵计算。在第一个代码中,您需要在下一行中为z_out
分配一个值,但是您已经注释掉了代码。
z_out = tf.placeholder(dtype = tf.float32, name = 'z_out')
接下来,您将使用函数forwardPropagation
返回z_out
的值,但是直到代码的最后,才调用forwardPropagation
函数。然后在下一行中,您尝试使用尚未初始化的entropy
变量来计算z_out
,这就是为什么会出现错误的原因。
您的第二组代码有效,因为您正在执行z_out
计算,然后计算entropy
。要使第一组代码正常工作,必须在计算forwardPropagation
之前调用z_out
来定义entropy
。