这是我在简单神经网络的tensorflow中的代码:
import tensorflow as tf
import numpy as np
class Model:
def __init__(self,input_neuron=2,hidden_neuron=10,output_neuron=2):
self.input_neuron = input_neuron
self.hidden_neuron = hidden_neuron
self.output_neuron = output_neuron
self.x = tf.placeholder(tf.float32,[None,self.input_neuron])
self.y = tf.placeholder(tf.float32,[None,self.output_neuron])
self.model = self.graph()
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
@staticmethod
def one_hot_encode(y):
y_ = np.zeros((len(y),2))
for i in range(len(y)):
y_[i,y[i][0]]=1
return y_
def graph(self):
w1=tf.Variable(tf.random_normal([self.input_neuron,self.hidden_neuron]))
l1=tf.nn.relu(tf.matmul(self.x,w1))
w2=tf.Variable(tf.random_normal([self.hidden_neuron,self.output_neuron]))
l2=tf.matmul(l1,w2)
return l2
def train(self,xTrain,yTrain):
yTrain = self.one_hot_encode(yTrain)
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=self.model,labels=self.y))
train = tf.train.AdamOptimizer(0.1).minimize(loss)
for epoch in range(100):
self.sess.run(train,feed_dict={self.x:xTrain,self.y:yTrain})
def predict(self,xTest):
prediction = tf.argmax(self.model)
return self.sess.run(prediction,feed_dict={x:xTest})
当我使用以下方式运行时:
model = Model()
xTrain = np.array([[0,0],[0,1],[1,0],[1,1]])
yTrain = np.array([[0],[1],[1],[0]])
model.train(xTrain,yTrain)
我收到此错误:
FailedPreconditionError(参见上面的回溯):尝试使用未初始化的值beta1_power_18
我做错了什么?
答案 0 :(得分:1)
您在self.sess.run(tf.global_variables_initializer())
课程的__init__中执行了Model
,但只有train()
方法才能设置tf.train.AdamOptimizer()
。后者还会创建一些需要初始化的变量。移动
self.sess.run(tf.global_variables_initializer())
在
之后train = tf.train.AdamOptimizer(0.1).minimize(loss)
它会起作用。
完整代码(已测试):
import tensorflow as tf
import numpy as np
class Model:
def __init__(self,input_neuron=2,hidden_neuron=10,output_neuron=2):
self.input_neuron = input_neuron
self.hidden_neuron = hidden_neuron
self.output_neuron = output_neuron
self.x = tf.placeholder(tf.float32,[None,self.input_neuron])
self.y = tf.placeholder(tf.float32,[None,self.output_neuron])
self.model = self.graph()
self.sess = tf.InteractiveSession()
@staticmethod
def one_hot_encode(y):
y_ = np.zeros((len(y),2))
for i in range(len(y)):
y_[i,y[i][0]]=1
return y_
def graph(self):
w1=tf.Variable(tf.random_normal([self.input_neuron,self.hidden_neuron]))
l1=tf.nn.relu(tf.matmul(self.x,w1))
w2=tf.Variable(tf.random_normal([self.hidden_neuron,self.output_neuron]))
l2=tf.matmul(l1,w2)
return l2
def train(self,xTrain,yTrain):
yTrain = self.one_hot_encode(yTrain)
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=self.model,labels=self.y))
train = tf.train.AdamOptimizer(0.1).minimize(loss)
self.sess.run(tf.global_variables_initializer())
for epoch in range(100):
self.sess.run(train,feed_dict={self.x:xTrain,self.y:yTrain})
print("Training done!")
def predict(self,xTest):
prediction = tf.argmax(self.model)
return self.sess.run(prediction,feed_dict={x:xTest})
model = Model()
xTrain = np.array([[0,0],[0,1],[1,0],[1,1]])
yTrain = np.array([[0],[1],[1],[0]])
model.train(xTrain,yTrain)
根据您的评论,如果您不希望在每次调用train()
方法时重新初始化整个网络,则需要使用__init__()
方法初始化网络并使用tf.report_uninitialized_variables()
使所有未初始化的内容仅初始化train()
中的内容。我根据this answer编写了Salvador Dali方法initialize_uninitialized()
来编写方法import tensorflow as tf
import numpy as np
class Model:
def __init__(self,input_neuron=2,hidden_neuron=10,output_neuron=2):
self.input_neuron = input_neuron
self.hidden_neuron = hidden_neuron
self.output_neuron = output_neuron
self.x = tf.placeholder(tf.float32,[None,self.input_neuron])
self.y = tf.placeholder(tf.float32,[None,self.output_neuron])
self.model = self.graph()
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
@staticmethod
def one_hot_encode(y):
y_ = np.zeros((len(y),2))
for i in range(len(y)):
y_[i,y[i][0]]=1
return y_
def graph(self):
w1=tf.Variable(tf.random_normal([self.input_neuron,self.hidden_neuron]))
l1=tf.nn.relu(tf.matmul(self.x,w1))
w2=tf.Variable(tf.random_normal([self.hidden_neuron,self.output_neuron]))
l2=tf.matmul(l1,w2)
return l2
def initialize_uninitialized( self ):
uninitialized_variables = [v for v in tf.global_variables()
if v.name.split(':')[0] in set(self.sess.run(tf.report_uninitialized_variables())) ]
self.sess.run( tf.variables_initializer( uninitialized_variables ) )
def train(self,xTrain,yTrain):
yTrain = self.one_hot_encode(yTrain)
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=self.model,labels=self.y))
train = tf.train.AdamOptimizer(0.1).minimize(loss)
self.initialize_uninitialized()
for epoch in range(100):
self.sess.run(train,feed_dict={self.x:xTrain,self.y:yTrain})
print("Training done!")
def predict(self,xTest):
prediction = tf.argmax(self.model)
return self.sess.run(prediction,feed_dict={x:xTest})
model = Model()
xTrain = np.array([[0,0],[0,1],[1,0],[1,1]])
yTrain = np.array([[0],[1],[1],[0]])
model.train(xTrain,yTrain)
。
完整代码(已测试):
AlarmManager alarmManager;
static PendingIntent pendingIntent;
private String TAG = "netwatcher";
private final int alarmId = 0;