get_reduce_mean始终为0

时间:2019-03-30 16:57:25

标签: python tensorflow artificial-intelligence stock

我正在尝试训练一个预测股票价值的AI程序。每一次,我的成本是0,测试是100%。我似乎找不到我在做错什么。

placeholder1 = tf.placeholder(tf.float32, shape=[None, 3])


#trainers
dates_train = np.array(dates[0:8000]).astype(np.float32)
highPrice_train = np.array(highPrice[0:8000]).astype(np.float32)
print(dates_train[0][0])

#testers
dates_test = np.array(dates[8000:9564]).astype(np.float32)
highPrice_test = np.array(highPrice[8000:9564]).astype(np.float32)

def get_training_batch(n):
    n = min(n,7999)
    idx = np.random.choice(7999,n)
    return dates_train[idx],highPrice_train[idx]

n_hidden_1 = 100
n_hidden_2 = 100

weights = {
    'h1' : tf.Variable(tf.random_normal([3, n_hidden_1])),
    'h2' : tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),
    'out' : tf.Variable(tf.random_normal([n_hidden_2,1]))
}

biases = {
    'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
    'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
    'out' : tf.Variable(tf.random_normal([1]))
}

layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(placeholder1, weights['h1']), biases['b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']))

y = tf.matmul(layer_2,weights['out']) + biases['out']

placeholder2 = tf.placeholder(tf.float32,shape=[None,1])
print("Mean")
print(sum(highPrice)/len(highPrice))

mean = tf.reduce_mean(highPrice)
print(mean)

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y, labels=placeholder2))
print("Printing cross_entropy")
print(cross_entropy)

rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(rate).minimize(cross_entropy)
print(optimizer)

prediction = tf.nn.softmax(y)
print(prediction)

##Training
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(placeholder2,1))
accuracy = 100 * tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy)

epochs = 1000
batch_size = 10

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

cost = []
accu = []
test_accu = []
for ep in range(epochs):
    x_feed,y_feed = get_training_batch(batch_size)
    y_feed = np.reshape(y_feed,[10,1])
    _,cos,predictions,acc = sess.run([optimizer, cross_entropy, prediction, accuracy], feed_dict={placeholder1:x_feed, placeholder2:y_feed})

    highPrice_test = np.reshape(highPrice_test,[1564,1])
    test_acc = accuracy.eval(feed_dict={placeholder1:dates_test, placeholder2:highPrice_test})

    cost.append(cos)
    accu.append(acc)
    test_accu.append(test_acc)

    if(ep % (epochs // 10) == 0):
        print('[%d]: Cos: %.4f, Acc: %.1f%%, Test Acc: %.1f%%' % (ep,cos,acc,test_acc))

plt.plot(cost)
plt.title('cost')
plt.show()

plt.plot(accu)
plt.title('Train Accuracy')
plt.show()

plt.plot(test_accu)
plt.title('Test Accuracy')
plt.show()

index = 36
p = sess.run(prediction, feed_dict = {placeholder1:dates_train[index:index +1]})[0]

[0]:Cos:0.0000,Acc:100.0%,Test Acc:100.0% [100]:Cos:0.0000,Acc:100.0%,Test Acc:100.0%

这是我对每个测试的输出。我希望会有一个成本,而且准确性不应该是100%

1 个答案:

答案 0 :(得分:1)

似乎问题在于softmax_cross_entropy_with_logits_v2需要多个输出类:Cost function always returning zero for a binary classification in tensorflow。如果我将highPrice更改为2维,则可以使用。

请注意,如果我正确理解您的问题,则您正在尝试预测确切的股价。更好的方法可能是只是预测它是上升还是下降,因此您可以创建分类标签,例如(上升,不变,下降)。

import tensorflow as tf

y_dimensions = 2

placeholder1 = tf.placeholder(tf.float32, shape=[None, 3])

dates = np.array([pd.date_range('2012-10-01', periods=10000, freq='10min'),
                  pd.date_range('2012-10-01', periods=10000, freq='20min'),
                  pd.date_range('2012-10-01', periods=10000,
                                freq='30min')]).T

highPrice = np.random.random((10000, y_dimensions)) * 100

# training set
dates_train = np.array(dates[0:8000]).astype(np.float32)
highPrice_train = np.array(highPrice[0:8000]).astype(np.float32)
print("dates train", dates_train[0])

# testing set
dates_test = np.array(dates[8000:9564]).astype(np.float32)
highPrice_test = np.array(highPrice[8000:9564]).astype(np.float32)

def get_training_batch(n):
    n = min(n, 7999)
    idx = np.random.choice(7999, n)  # create size n sample from range 7999
    #print("len batch:", len(idx))
    return dates_train[idx], highPrice_train[idx]

n_hidden_1 = 100
n_hidden_2 = 100

weights = {
    'h1': tf.Variable(tf.random_normal([3, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, y_dimensions]))
}

biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([1]))
}

layer_1 = tf.nn.sigmoid(
    tf.add(tf.matmul(placeholder1, weights['h1']), biases['b1']))
layer_2 = tf.nn.sigmoid(
    tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']))

y = tf.matmul(layer_2, weights['out']) + biases['out']



placeholder2 = tf.placeholder(tf.float32, shape=[None, y_dimensions])

print("Mean:", sum(highPrice) / len(highPrice))

mean = tf.reduce_mean(highPrice)
print("TF mean:", mean)

# labels are high prices, logits are model output
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=y,
                                               labels=placeholder2))
print("cross_entropy:", cross_entropy)

rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(rate).minimize(cross_entropy)
print("optimizer:", optimizer)

prediction = tf.nn.softmax(y)
print("Prediction:", prediction)


##Training
correct_prediction = tf.equal(tf.argmax(prediction, 1),
                              tf.argmax(placeholder2, 1))
accuracy = 100 * tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("accuracy:", accuracy)

epochs = 300
batch_size = 10


sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())


cost = []
accu = []
test_accu = []
for ep in range(epochs):

    x_feed, y_feed = get_training_batch(batch_size)
    y_feed = np.reshape(y_feed, [batch_size, y_dimensions])
    _, cos, predictions, acc = sess.run(
        [optimizer, cross_entropy, prediction, accuracy],
        feed_dict={placeholder1: x_feed, placeholder2: y_feed})

    highPrice_test = np.reshape(highPrice_test, [1564, y_dimensions])
    test_acc = accuracy.eval(
        feed_dict={placeholder1: dates_test, placeholder2: highPrice_test})

    # create history
    cost.append(cos)
    accu.append(acc)
    test_accu.append(test_acc)

    # every 10 epochs
    if ep % (epochs // 10) == 0:
        print('[%d]: Cos: %.4f, Acc: %.1f%%, Test Acc: %.1f%%' % (
        ep, cos, acc, test_acc))

plt.plot(cost)
plt.title('cost')
plt.show()

plt.plot(accu)
plt.title('Train Accuracy')
plt.show()

plt.plot(test_accu)
plt.title('Test Accuracy')
plt.show()

index = 78
p = sess.run(prediction,
             feed_dict={placeholder1: dates_train[index:index + 1]})[0]

print("final x input for prediction:", dates_train[index:index + 1])
print("final y prediction:", p)

输出:

[0]: Cos: 232.5091, Acc: 50.0%, Test Acc: 50.4%
[30]: Cos: 1119.8948, Acc: 70.0%, Test Acc: 49.6%
[60]: Cos: 554.2071, Acc: 50.0%, Test Acc: 50.4%
[90]: Cos: 668.4500, Acc: 60.0%, Test Acc: 50.4%
[120]: Cos: 1485.1707, Acc: 20.0%, Test Acc: 50.4%
[150]: Cos: 2667.8867, Acc: 50.0%, Test Acc: 50.4%
[180]: Cos: 806.8883, Acc: 50.0%, Test Acc: 50.4%
[210]: Cos: 105.7802, Acc: 50.0%, Test Acc: 49.6%
[240]: Cos: 2002.2031, Acc: 50.0%, Test Acc: 50.4%
[270]: Cos: 3357.0098, Acc: 20.0%, Test Acc: 50.4%