我正在尝试为kaggle的心脏数据集实现神经网络
https://www.kaggle.com/ronitf/heart-disease-uci
即使我在整个数据集上训练网络,它似乎也根本无法学习,其准确度保持在50%左右,即使它过度拟合神经网络也应该能够获得更高的样本准确度。有人能看到这段代码有什么问题吗(我尝试了很多学习率,批次和时代,但似乎没有任何效果。
#need to switch from the binary output to the singular value output
import pandas
import numpy as np
import tensorflow as tf
import random
from sklearn.preprocessing import normalize
#pulling in data from excel
sheet = pandas.read_excel("./heart.xlsx")
sheet2=sheet.values
#getting my data arrays
X_data,y_data = np.split(sheet2, [13], axis=1)
y_data=np.reshape(y_data, [-1]) #need to learn more about why this works
X_data = normalize(X_data, axis=0, norm='max')
#random shuffle
ind_list = [i for i in range(len(X_data))]
random.shuffle(ind_list)
X_data=X_data[ind_list]
y_data=y_data[ind_list]
#initialising values
epochs = 1000
learning_rate=0.001
batch_size=50
X = tf.placeholder(tf.float32,shape=(None,n_inputs),name="X")
y = tf.placeholder(tf.int64,shape=(None) ,name="y")
#creating the structure of the neural network
with tf.name_scope("dnn"):
input = tf.layers.dense(X,13,name="hidden1" ,activation=tf.nn.relu)
input = tf.layers.dense(input,7,name="hidden2" ,activation=tf.nn.relu)
logits = tf.layers.dense(input,1,name="outputs")
with tf.name_scope("loss"):
entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(y,tf.float32),logits=logits)
loss=tf.reduce_mean(entropy,name="loss")
with tf.name_scope("train"):
optimizer=tf.train.AdamOptimizer(learning_rate)
training_operation=optimizer.minimize(loss)
with tf.name_scope("accuracy"):
predicted = tf.nn.sigmoid(logits)
correct_pred = tf.equal(tf.round(predicted), tf.cast(y,tf.float32))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#defining functions
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
#initialising the graph and running the optimisation
init= tf.global_variables_initializer()
save = tf.train.Saver()
with tf.Session() as sess:
init.run()
for epoch in range(epochs):
for X_batch,y_batch in shuffle_batch(X_data,y_data,batch_size):
sess.run(training_operation,feed_dict={X:X_batch,y:y_batch})
acc_train=accuracy.eval(feed_dict={X:X_data,y:y_data})
print("Epoch",epoch,"training acuuracy",acc_train)
输出
2019-09-15 07:52:04.662488: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Epoch 0 training acuuracy 0.5089697
Epoch 1 training acuuracy 0.5089697
Epoch 2 training acuuracy 0.5089697
Epoch 3 training acuuracy 0.50838155
Epoch 4 training acuuracy 0.5089697
Epoch 5 training acuuracy 0.5092638
Epoch 6 training acuuracy 0.5101461
Epoch 7 training acuuracy 0.5101461
Epoch 8 training acuuracy 0.5101461
Epoch 9 training acuuracy 0.51073426
Epoch 10 training acuuracy 0.51102835
Epoch 11 training acuuracy 0.51102835
Epoch 12 training acuuracy 0.51073426
Epoch 13 training acuuracy 0.51132244
Epoch 14 training acuuracy 0.5122047
Epoch 15 training acuuracy 0.5124988
Epoch 16 training acuuracy 0.5127929
Epoch 17 training acuuracy 0.513087
Epoch 18 training acuuracy 0.51338106
Epoch 19 training acuuracy 0.51338106
Epoch 20 training acuuracy 0.51367515
Epoch 21 training acuuracy 0.51396924
Epoch 22 training acuuracy 0.5151456
Epoch 23 training acuuracy 0.51426333
Epoch 24 training acuuracy 0.5154397
Epoch 25 training acuuracy 0.5151456
Epoch 26 training acuuracy 0.51632196
Epoch 27 training acuuracy 0.5172042
Epoch 28 training acuuracy 0.51808643
Epoch 29 training acuuracy 0.5183805
Epoch 30 training acuuracy 0.5192628
Epoch 31 training acuuracy 0.5189687
Epoch 32 training acuuracy 0.51985097
Epoch 33 training acuuracy 0.52043915
Epoch 34 training acuuracy 0.52043915
Epoch 35 training acuuracy 0.52043915
Epoch 36 training acuuracy 0.5213214
Epoch 37 training acuuracy 0.5219096
Epoch 38 training acuuracy 0.5222037
Epoch 39 training acuuracy 0.5222037
Epoch 40 training acuuracy 0.5224978
Epoch 41 training acuuracy 0.5224978
Epoch 42 training acuuracy 0.5224978
Epoch 43 training acuuracy 0.5222037
Epoch 44 training acuuracy 0.5219096
Epoch 45 training acuuracy 0.5222037
Epoch 46 training acuuracy 0.5222037
Epoch 47 training acuuracy 0.5219096
Epoch 48 training acuuracy 0.5222037
Epoch 49 training acuuracy 0.52279186
Epoch 50 training acuuracy 0.52308595
Epoch 51 training acuuracy 0.52338004
Epoch 52 training acuuracy 0.52308595
Epoch 53 training acuuracy 0.52308595
Epoch 54 training acuuracy 0.52308595
Epoch 55 training acuuracy 0.52367413
Epoch 56 training acuuracy 0.5245564
Epoch 57 training acuuracy 0.52602684
Epoch 58 training acuuracy 0.52543867
Epoch 59 training acuuracy 0.52573276
Epoch 60 training acuuracy 0.526615
Epoch 61 training acuuracy 0.52573276
Epoch 62 training acuuracy 0.5269091
Epoch 63 training acuuracy 0.5269091
Epoch 64 training acuuracy 0.52602684
Epoch 65 training acuuracy 0.526615
Epoch 66 training acuuracy 0.5277914
Epoch 67 training acuuracy 0.5272032
Epoch 68 training acuuracy 0.526615
Epoch 69 training acuuracy 0.526615
Epoch 70 training acuuracy 0.526615
Epoch 71 training acuuracy 0.5269091
Epoch 72 training acuuracy 0.526615
Epoch 73 training acuuracy 0.526615
Epoch 74 training acuuracy 0.5277914
Epoch 75 training acuuracy 0.52867365
Epoch 76 training acuuracy 0.5277914
Epoch 77 training acuuracy 0.5277914
Epoch 78 training acuuracy 0.52837956
Epoch 79 training acuuracy 0.52837956
Epoch 80 training acuuracy 0.52837956
Epoch 81 training acuuracy 0.5295559
Epoch 82 training acuuracy 0.52985
Epoch 83 training acuuracy 0.5304382
Epoch 84 training acuuracy 0.5301441
Epoch 85 training acuuracy 0.5301441
Epoch 86 training acuuracy 0.5307323
Epoch 87 training acuuracy 0.5304382
Epoch 88 training acuuracy 0.5304382
Epoch 89 training acuuracy 0.53132045
Epoch 90 training acuuracy 0.5322027
Epoch 91 training acuuracy 0.5322027
Epoch 92 training acuuracy 0.5322027
Epoch 93 training acuuracy 0.533085
Epoch 94 training acuuracy 0.5348495
Epoch 95 training acuuracy 0.53455544
Epoch 96 training acuuracy 0.5333791
Epoch 97 training acuuracy 0.5333791
Epoch 98 training acuuracy 0.53426135
Epoch 99 training acuuracy 0.53455544
Epoch 100 training acuuracy 0.53455544
Epoch 101 training acuuracy 0.5354377
Epoch 102 training acuuracy 0.5351436
Epoch 103 training acuuracy 0.53455544
Epoch 104 training acuuracy 0.5348495
Epoch 105 training acuuracy 0.53396726
Epoch 106 training acuuracy 0.53455544
Epoch 107 training acuuracy 0.53426135
Epoch 108 training acuuracy 0.5351436
Epoch 109 training acuuracy 0.5354377
Epoch 110 training acuuracy 0.5354377
Epoch 111 training acuuracy 0.53690815
Epoch 112 training acuuracy 0.53720224
Epoch 113 training acuuracy 0.5377904
Epoch 114 training acuuracy 0.5380845
Epoch 115 training acuuracy 0.5380845
Epoch 116 training acuuracy 0.5377904
Epoch 117 training acuuracy 0.5377904
Epoch 118 training acuuracy 0.53720224
Epoch 119 training acuuracy 0.5377904
Epoch 120 training acuuracy 0.5377904
Epoch 121 training acuuracy 0.5377904
Epoch 122 training acuuracy 0.5383786
Epoch 123 training acuuracy 0.5383786
Epoch 124 training acuuracy 0.5383786
Epoch 125 training acuuracy 0.5386727
Epoch 126 training acuuracy 0.5383786
Epoch 127 training acuuracy 0.5383786
Epoch 128 training acuuracy 0.5383786
Epoch 129 training acuuracy 0.5383786
Epoch 130 training acuuracy 0.5389668
Epoch 131 training acuuracy 0.5386727
Epoch 132 training acuuracy 0.5389668
Epoch 133 training acuuracy 0.53926086
Epoch 134 training acuuracy 0.53926086
Epoch 135 training acuuracy 0.53926086
Epoch 136 training acuuracy 0.53955495
Epoch 137 training acuuracy 0.53955495
Epoch 138 training acuuracy 0.53984904
Epoch 139 training acuuracy 0.53984904
Epoch 140 training acuuracy 0.53984904
Epoch 141 training acuuracy 0.54014313
Epoch 142 training acuuracy 0.5404372
Epoch 143 training acuuracy 0.54014313
Epoch 144 training acuuracy 0.53955495
Epoch 145 training acuuracy 0.5404372
Epoch 146 training acuuracy 0.54014313
Epoch 147 training acuuracy 0.54014313
Epoch 148 training acuuracy 0.53984904
Epoch 149 training acuuracy 0.53955495
Epoch 150 training acuuracy 0.54014313
Epoch 151 training acuuracy 0.5404372
Epoch 152 training acuuracy 0.5407313
Epoch 153 training acuuracy 0.5410254
Epoch 154 training acuuracy 0.5410254
Epoch 155 training acuuracy 0.5413195
Epoch 156 training acuuracy 0.5413195
Epoch 157 training acuuracy 0.5410254
Epoch 158 training acuuracy 0.5410254
Epoch 159 training acuuracy 0.5410254
Epoch 160 training acuuracy 0.5413195
Epoch 161 training acuuracy 0.5413195
Epoch 162 training acuuracy 0.5413195
Epoch 163 training acuuracy 0.5413195
Epoch 164 training acuuracy 0.5410254
Epoch 165 training acuuracy 0.5413195
Epoch 166 training acuuracy 0.54190767
Epoch 167 training acuuracy 0.54220176
Epoch 168 training acuuracy 0.54190767
Epoch 169 training acuuracy 0.54220176
Epoch 170 training acuuracy 0.54220176
Epoch 171 training acuuracy 0.54220176
Epoch 172 training acuuracy 0.54220176
Epoch 173 training acuuracy 0.54220176
Epoch 174 training acuuracy 0.54220176
Epoch 175 training acuuracy 0.54220176
Epoch 176 training acuuracy 0.54190767
Epoch 177 training acuuracy 0.5416136
Epoch 178 training acuuracy 0.5416136
Epoch 179 training acuuracy 0.54190767
Epoch 180 training acuuracy 0.54220176
Epoch 181 training acuuracy 0.54220176
Epoch 182 training acuuracy 0.54220176
Epoch 183 training acuuracy 0.54220176
Epoch 184 training acuuracy 0.54220176
Epoch 185 training acuuracy 0.54220176
Epoch 186 training acuuracy 0.54220176
Epoch 187 training acuuracy 0.54220176
Epoch 188 training acuuracy 0.54220176
Epoch 189 training acuuracy 0.54220176
Epoch 190 training acuuracy 0.54220176
Epoch 191 training acuuracy 0.54220176
Epoch 192 training acuuracy 0.54220176
Epoch 193 training acuuracy 0.54220176
Epoch 194 training acuuracy 0.54278994
Epoch 195 training acuuracy 0.543084
Epoch 196 training acuuracy 0.543084
Epoch 197 training acuuracy 0.543084
Epoch 198 training acuuracy 0.543084
Epoch 199 training acuuracy 0.543084
Epoch 200 training acuuracy 0.54278994
Epoch 201 training acuuracy 0.54249585
Epoch 202 training acuuracy 0.54220176
Epoch 203 training acuuracy 0.54249585
Epoch 204 training acuuracy 0.54220176
Epoch 205 training acuuracy 0.54220176
Epoch 206 training acuuracy 0.54249585
Epoch 207 training acuuracy 0.54249585
Epoch 208 training acuuracy 0.54249585
Epoch 209 training acuuracy 0.54249585
Epoch 210 training acuuracy 0.54249585
Epoch 211 training acuuracy 0.54249585
Epoch 212 training acuuracy 0.543084
Epoch 213 training acuuracy 0.543084
Epoch 214 training acuuracy 0.54278994
Epoch 215 training acuuracy 0.54249585
Epoch 216 training acuuracy 0.54278994
Epoch 217 training acuuracy 0.543084
Epoch 218 training acuuracy 0.54278994
Epoch 219 training acuuracy 0.54278994
Epoch 220 training acuuracy 0.54278994
Epoch 221 training acuuracy 0.54278994
Epoch 222 training acuuracy 0.54278994
Epoch 223 training acuuracy 0.54249585
Epoch 224 training acuuracy 0.54249585
Epoch 225 training acuuracy 0.54249585
Epoch 226 training acuuracy 0.54278994
Epoch 227 training acuuracy 0.54278994
Epoch 228 training acuuracy 0.54278994
Epoch 229 training acuuracy 0.54278994
Epoch 230 training acuuracy 0.54278994
Epoch 231 training acuuracy 0.54278994
Epoch 232 training acuuracy 0.54249585
Epoch 233 training acuuracy 0.54249585
Epoch 234 training acuuracy 0.54278994
Epoch 235 training acuuracy 0.543084
Epoch 236 training acuuracy 0.543084
Epoch 237 training acuuracy 0.5433781
Epoch 238 training acuuracy 0.543084
Epoch 239 training acuuracy 0.5433781
Epoch 240 training acuuracy 0.5433781
Epoch 241 training acuuracy 0.5436722
Epoch 242 training acuuracy 0.5433781
Epoch 243 training acuuracy 0.543084
Epoch 244 training acuuracy 0.543084
Epoch 245 training acuuracy 0.543084
Epoch 246 training acuuracy 0.543084
Epoch 247 training acuuracy 0.543084
Epoch 248 training acuuracy 0.543084
答案 0 :(得分:0)
我只发布已更改的部分代码。
X = tf.placeholder(tf.float32,shape=(None,n_inputs),name="X")
y = tf.placeholder(tf.int32,shape=(None) ,name="y")
#creating the structure of the neural network
with tf.name_scope("dnn"):
input = tf.layers.dense(X,13,name="hidden1" ,activation=tf.nn.relu)
input = tf.layers.dense(input,7,name="hidden2" ,activation=tf.nn.relu)
logits = tf.layers.dense(input,2,name="outputs", activation=tf.nn.softmax)
with tf.name_scope("loss"):
entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits)
loss=tf.reduce_mean(entropy,name="loss")
with tf.name_scope("train"):
optimizer=tf.train.AdamOptimizer(learning_rate)
training_operation=optimizer.minimize(loss)
with tf.name_scope("accuracy"):
predicted = tf.argmax(logits, axis=-1, output_type=tf.int32)
correct_pred = tf.equal(predicted, y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
您现在将获得0.953的准确性。
将S形损失转换为softmax损失,因为它们是互斥的类输出值。
答案 1 :(得分:0)
下面的代码将通过单个输出进行学习。乙状结肠损失并不强加互斥性,但是如果您要坚持使用单一输出结构,则可以按如下所示直接编写二进制ce损失。我不知道这是否是您的代码无法学习的原因,因为我希望即使在发生S型损失的情况下也可以学习一些东西-可能是强制转换运算符是罪魁祸首。
X = tf.placeholder(tf.float32,shape=(None,n_inputs),name="X")
y = tf.placeholder(tf.float32,shape=(None) ,name="y")
#creating the structure of the neural network
with tf.name_scope("dnn"):
input = tf.layers.dense(X,13,name="hidden1" ,activation=tf.nn.relu)
input = tf.layers.dense(input,7,name="hidden2" ,activation=tf.nn.relu)
logits = tf.layers.dense(input,1,name="outputs", activation=tf.nn.sigmoid)
with tf.name_scope("loss"):
entropy = -tf.expand_dims(y,-1)*tf.log(logits + 1e-10) - (1 - tf.expand_dims(y,-1))*tf.log(1 - logits + 1e-10)
loss=tf.reduce_mean(entropy, name="loss")
with tf.name_scope("train"):
optimizer=tf.train.AdamOptimizer(learning_rate)
training_operation=optimizer.minimize(loss)
with tf.name_scope("accuracy"):
predicted = tf.round(logits)
correct_pred = tf.equal(predicted, tf.expand_dims(y,-1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
epochs = 1000
init= tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
for epoch in range(epochs):
for X_batch,y_batch in shuffle_batch(X_data,y_data,batch_size):
sess.run(training_operation,feed_dict={X:X_batch,y:y_batch})
acc_train=accuracy.eval(feed_dict={X:X_data,y:y_data})
print("Epoch",epoch,"training acuuracy",acc_train)