tf.sigmoid-预测等于0

时间:2018-07-21 16:54:20

标签: python tensorflow neural-network sigmoid

我一直在尝试应用tf.nn.sigmoid,以便在下面的代码中预测我的外层X21。但是,我得到的预测等于0。 它与绝对响应值有关系吗? 关于可能出什么问题的任何建议吗?

注意:当我从第82行和第109行中删除tf.sigmoid时,它工作得很好。

import time
start_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

# Parameters
learning_rate = 0.0000000002
training_epochs = 100
display_step = 5

#Data_ABX3 = np.random.random((193, 8)).astype('f')

#train_X = Data_ABX3[0:192, 0:6]
#train_Y = Data_ABX3[0:192, [7]]

train_X = np.genfromtxt('H:\Prince\Trainings\Python\Data\ex3x.dat')
train_Y= np.genfromtxt('H:\Prince\Trainings\Python\Data\ex3y.dat',names="price")

train_X.shape=[47,2]
train_Y.shape=[47,1]

first=train_X[:,0]
first.shape=[47,1]

second=train_X[:,1]
second.shape=[47,1]

train_Y=train_Y.astype(float)


Data = np.column_stack((train_X,train_Y))

from mpl_toolkits.mplot3d import Axes3D

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(Data[:,0],Data[:,1],Data[:,2], 'ks')
plt.show()

minus=(train_X[:,0]-train_X[:,1])
plus=(train_X[:,0]+train_X[:,1])
mult=(train_X[:,0]*train_X[:,1])
divide=(train_X[:,0]*train_X[:,1])
plus_sq = (train_X[:,0]+np.square(train_X[:,1]))
sq = np.sqrt(train_X[:,0])
squ = np.square(train_X[:,1])

#train_X=np.column_stack((train_X,plus))

# Training Data
n_samples = train_Y.shape[0]

# Set model weights
b11 = tf.cast(tf.Variable(np.random.randn(), name="bias"),tf.float64)
b12 = tf.cast(tf.Variable(np.random.randn(), name="bias"),tf.float64)
b21 = tf.cast(tf.Variable(np.random.randn(), name="bias"),tf.float64)
W01_1 = tf.cast(tf.Variable(np.random.randn(1, 1), name="bias"),tf.float64)
W02_1 = tf.cast(tf.Variable(np.random.randn(1, 1), name="bias"),tf.float64)
W01_2 = tf.cast(tf.Variable(np.random.randn(1, 1), name="bias"),tf.float64)
W02_2 = tf.cast(tf.Variable(np.random.randn(1, 1), name="bias"),tf.float64)
W11_1 = tf.cast(tf.Variable(np.random.randn(1, 1), name="bias"),tf.float64)
W12_1 = tf.cast(tf.Variable(np.random.randn(1, 1), name="bias"),tf.float64)


# placeholders for a tensor that will be always fed.

Y = tf.placeholder('float64', shape = [47, 1])
X01 = tf.placeholder('float64', shape = [47, 1])
X02 = tf.placeholder('float64', shape = [47, 1])

X11 = tf.add(tf.add(tf.matmul(X01,W01_1),tf.matmul(X02,W02_1)),b11)
X12 = tf.add(tf.add(tf.matmul(X01,W01_2),tf.matmul(X02,W02_2)),b12)
X21 = tf.sigmoid(tf.add(tf.add(tf.matmul(X11,W11_1),tf.matmul(X12,W12_1)),b21))

cost = (tf.reduce_sum(tf.pow(X21-Y, 2))/(2*n_samples))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Start training
with tf.Session() as sess:

    # Run the initializer
    sess.run(init)

    # Fit all training data
    for epoch in range(training_epochs):
        #for (x, y) in zip(train_X, train_Y):
        sess.run(optimizer, feed_dict={X01: first,X02: second, Y: train_Y})
                # Display logs per epoch step
        if (epoch+1) % display_step == 0:
            c = sess.run(cost, feed_dict={X01: first,X02: second, Y: train_Y})
            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))

    print("Optimization Finished!")

    training_cost = sess.run(cost, feed_dict={X01: first,X02: second, Y: train_Y})
    print("Training cost=", training_cost)
    line = sess.run(tf.sigmoid(tf.add(tf.add(tf.matmul(X11,W11_1),tf.matmul(X12,W12_1)),b21)),feed_dict={X01: first,X02: second, Y: train_Y})

#merging the response and prediction
FinalPred=np.column_stack((train_Y,line))

#Sorting in ascending order
FinalPred=sorted(FinalPred, key=lambda x: x[1], reverse=False)
FinalPred= np.asarray(FinalPred)

# Graphic display
plt.plot(FinalPred)

print("--- %s seconds ---" % (time.time() - start_time))

0 个答案:

没有答案