我是Tensorflow的新手,并构建了一个具有2个隐藏层的感知器。 我的数据集有8000个训练样例,代码如下:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data=pd.read_csv("Churn_Modelling.csv")
X=data.iloc[:,3:13].values
Y=data.iloc[:,13].values
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
le1=LabelEncoder()
X[:,1]=le1.fit_transform(X[:,1])
le2=LabelEncoder()
X[:,2]=le2.fit_transform(X[:,2])
ohe1=OneHotEncoder(categorical_features=[1])
X=ohe1.fit_transform(X).toarray()
X=X[:,1:]
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.2)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
y_train=np.array([[y_train[i]] for i in range(8000)])
y_test=np.array([[y_test[i]] for i in range(2000)])
import tensorflow as tf
hidden1=6
hidden2=6
batch_size=10
x=tf.placeholder(shape=[None,11],dtype=tf.float32)
y=tf.placeholder(shape=[None,1],dtype=tf.float32)
def neural_network(data):
l1={"weights":tf.Variable(tf.random_normal([11,hidden1])),
"biases":tf.Variable(tf.random_normal([hidden1]))}
l2={"weights":tf.Variable(tf.random_normal([hidden1,hidden2])),
"biases":tf.Variable(tf.random_normal([hidden2]))}
output={"weights":tf.Variable(tf.random_normal([hidden2,1])),
"biases":tf.Variable(tf.random_normal([1]))}
hl1=tf.add(tf.matmul(data,l1["weights"]),l1["biases"])
hl1=tf.nn.relu(hl1)
hl2=tf.add(tf.matmul(hl1,l2["weights"]),l2["biases"])
hl2=tf.nn.relu(hl2)
output1=tf.add(tf.matmul(hl2,output["weights"]),output["biases"])
return output1
tcost=[]
def train_neural_network(data):
prediction=neural_network(data)
cost=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=prediction,labels=y))
#cost=-tf.reduce_mean(y*tf.log(prediction)+(1-y)*tf.log(1-prediction))
optimizer=tf.train.AdamOptimizer(0.01).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
epochs=100
for epoch in range(epochs):
avg_cost=0
for i in range(batch_size):
start=i
end=start+batch_size
a=X_train[start:end,:]
b=y_train[start:end,:]
_,c=sess.run([optimizer,cost],feed_dict={x:a,y:b})
avg_cost+=c
i=i+batch_size
print("Cost",avg_cost)
correct= tf.equal(tf.round(prediction), y)
# correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct,"float"))
## my_acc = tf.reduce_mean(tf.cast(tf.equal(y, prediction), tf.float32))
## print(sess.run(my_acc, feed_dict={x:X_test,y:y_test}))
print("Accuracy",accuracy.eval({x:X_train,y:y_train}))
tcost.append(avg_cost)
plt.scatter(range(100),tcost,color="blue")
plt.show()
train_neural_network(x)
无论我做什么,我的网络准确率都不会超过训练集的10%。 即使成本函数随着每个时代逐渐减少,我也尝试过修改学习率以及层数和隐藏单位而无济于事。
最后我得到了什么: -
费用0.0032630344212520868
准确度0.015
我执行的准确度函数错了吗?问题是什么?
答案 0 :(得分:0)
sigmoid_cross_entropy_with_logits
this function only is useful for training because of the optimizations with sigmoid and the cross entropy, for predictions you need to use the sigmoid
function in your outputs. You are mixing the logits
with the predictions
, those are different things.
def neural_network(data):
l1={"weights":tf.Variable(tf.random_normal([11,hidden1])),
"biases":tf.Variable(tf.random_normal([hidden1]))}
l2={"weights":tf.Variable(tf.random_normal([hidden1,hidden2])),
"biases":tf.Variable(tf.random_normal([hidden2]))}
output={"weights":tf.Variable(tf.random_normal([hidden2,1])),
"biases":tf.Variable(tf.random_normal([1]))}
hl1=tf.add(tf.matmul(data,l1["weights"]),l1["biases"])
hl1=tf.nn.relu(hl1)
hl2=tf.add(tf.matmul(hl1,l2["weights"]),l2["biases"])
hl2=tf.nn.relu(hl2)
logits=tf.add(tf.matmul(hl2,output["weights"]),output["biases"])
predictions = tf.sigmoid(logits)
return logtis, predictions
...
logits,prediction=neural_network(data)
cost=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=y))