我很想使用tensorflow,当我开始学习网络时,我发现当我运行优化时,weigths不会更新。我不明白为什么tensorflow不会更新网络的权重。 这是我用于我的任务的代码:
import tensorflow as tf
import numpy as np
def importDataset(path,nsample):
#--------------------------------------------------------------IMPORT DATASET---------------------------------------------------------------------------------------------------
filename_queue = tf.train.string_input_producer([path],shuffle=True)
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
## decoded result.
record_defaults = [[1.0], [1.0], [1.0], [1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0]]
col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13, col14 = tf.decode_csv(value, record_defaults=record_defaults)
features = tf.stack([col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13, col14])
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
training_data= np.matrix([])
training_y= np.matrix([])
for i in range(nsample):
# Retrieve a single instance:
example, label = sess.run([features, col1])
#creo un vettore con tre zeri che rappresenta le mie ipotetiche tre classi
temp=np.zeros(3)
#devo assegnare nella posizione corrispettiva del vettore la classe che rappresenta
#es. in posizione 2 ci sara' uno se l'esempio appartiene alla classe 2
temp[int(label)-1] = 1.0
if i==0:
training_data=np.vstack([example])
training_y=np.vstack([temp])
else:
training_data=np.vstack([training_data,example])
training_y=np.vstack([training_y,temp])
coord.request_stop()
coord.join(threads)
#print(len(training_data[:,1]))
#print(len(training_y[:,1]))
#print(training_y)
return training_data,training_y
def splitDataset(nsample, testPerc, path):
example,example_y= importDataset(path,nsample)
#convert training_data and training_y in a list
example_list = example.tolist()
example_y_list = example_y.tolist()
training_data = list()
training_y = list()
percent = int((nsample*testPerc)/100)
#begin uniform extraction from data.
for i in range(percent):
index = np.random.randint(0,len(example_list))
training_data.append(example_list[index])
training_y.append(example_y_list[index])
example_list.remove(example_list[index])
example_y_list.remove(example_y_list[index])
training_data = np.matrix(training_data)
training_y = np.matrix(training_y)
test_data = np.matrix(example_list)
test_y = np.matrix(example_y_list)
#print(len(training_data[:,1]))
#print(len(training_y[:,1]))
#print(len(test_data[:,1]))
#print(len(test_y[:,1]))
return training_data,training_y,test_data,test_y
#---------------------------------------------INIZIO DEFINIZIONE MODELLO--------------------------------------
x = tf.placeholder(tf.float32, [None,13])
y = tf.placeholder(tf.float32, [None,3])
hidden_Layer1 ={'weights':tf.Variable(tf.truncated_normal([13,3],stddev=0.001)), 'biases':tf.Variable(tf.truncated_normal([3],stddev=0.001))}
hidden_Layer2 ={'weights':tf.Variable(tf.truncated_normal([3,3],stddev=0.001)), 'biases':tf.Variable(tf.truncated_normal([3],stddev=0.001))}
hidden_Layer3 ={'weights':tf.Variable(tf.truncated_normal([3,3],stddev=0.001)), 'biases':tf.Variable(tf.truncated_normal([3],stddev=0.001))}
output_Layer ={'weights':tf.Variable(tf.truncated_normal([3,3],stddev=0.001)), 'biases':tf.Variable(tf.truncated_normal([3],stddev=0.001))}
#output layer #1
output_Layer1 = tf.add( tf.matmul(x,hidden_Layer1['weights']),hidden_Layer1['biases'])
output_Layer1 = tf.nn.sigmoid(output_Layer1)
#output layer #2
output_Layer2 = tf.add(tf.matmul(output_Layer1,hidden_Layer2['weights']),hidden_Layer2['biases'])
output_Layer2 = tf.nn.sigmoid(output_Layer2)
#output layer #3
output_Layer3 = tf.add(tf.matmul(output_Layer2,hidden_Layer3['weights']),hidden_Layer3['biases'])
output_Layer3 = tf.nn.sigmoid(output_Layer3)
#output layer #output
output_Layer_Output = tf.nn.sigmoid(tf.add(tf.matmul(output_Layer3,output_Layer['weights']),output_Layer['biases']))
#--------------------------------------------FINE DEFINIZIONE MODELLO-------------------------------------------
#--------------------------------------------TRAINING DEL MODELLO-----------------------------------------------
error = tf.nn.l2_loss(output_Layer_Output-y, name="squared_error_cost")
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(error)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
#
training_data,training_label,test_data,test_label = splitDataset(178,70,"datasetvino.csv")
for _ in range(100):
print(sess.run([error,train_step,hidden_Layer1['weights']],feed_dict={x:training_data, y:training_label}))
correct_class = tf.equal(tf.argmax(output_Layer_Output,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_class, tf.float32))
#print(sess.run([accuracy], feed_dict={x:test_data,y:test_label}))
答案 0 :(得分:0)
对不起,我忘了在这部分代码中插入train_step:
print(sess.run(error,feed_dict={x:training_data, y:training_label}))
现在,我正在尝试设置超参数以获得良好的分类。谢谢你的回答。