tensorflow ValueError:没有为任何变量提供渐变,请检查图形以了解不支持渐变的操作

时间:2019-01-19 08:37:50

标签: python tensorflow

我在tensorflow上出错。 代码是这样的:

from sklearn import datasets
import random
import tensorflow as tf
wine=datasets.load_wine()
def generate_batch(batch_size,wine):
    batch_x=[]
    batch_y=[]
    for _ in range(batch_size):
        index=random.randint(0,177)
        batch_y.append(float(wine.target[index]))
        batch_x.append(wine.data[index])
    return batch_x,batch_y
def inference(x):
    with tf.variable_scope('layer1'):
        weight1 = tf.get_variable('weight', [13, 7], initializer=tf.truncated_normal_initializer(stddev=0.1))
        bias1 = tf.get_variable('bias', [7], initializer=tf.constant_initializer(0.1))
        layer1 = tf.nn.relu(tf.matmul(x, weight1) + bias1)
    weight2 =tf.get_variable('weight', [7, 3], initializer=tf.truncated_normal_initializer(stddev=0.1))
    bias2 = tf.get_variable('bias', [3], initializer=tf.constant_initializer(0.1))
    logit = tf.matmul(layer1, weight2) + bias2
    logit = tf.cast(tf.arg_max(logit, 1), tf.float32)
    return logit
x=tf.placeholder(tf.float32,[None,13])
y_=tf.placeholder(tf.float32,[None])
y=inference(x)
cross_entropy=tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
cross_entropy_mean=tf.reduce_mean(cross_entropy)
train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy_mean)
correct_prediction = tf.equal(y ,y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
with tf.Session() as sess:
    tf.global_variables_initializer().run()
    for i in range(2000):
        data,target=generate_batch(20,wine)
        sess.run(train_step,feed_dict={x:data,y_:target})
错误是:

  

ValueError:没有为任何变量提供渐变,请检查图表以了解变量之间不支持渐变的操作   [“”,   “”,   “”,   “”]和损失   Tensor(“平均值:0”,shape =(),dtype = float32)。

我的tensorflow版本是1.2.1,并且我正在使用python3.6。

1 个答案:

答案 0 :(得分:0)

您遇到了这个问题,因为您传递给tf.nn.softmax_cross_entropy_with_logits的参数不是它想要的。来自doc of tf.nn.softmax_cross_entropy_with_logits

  

一个常见的用例是具有形状为[batch_size,   num_classes],但支持更大的尺寸,并且使用dim   指定类尺寸的参数。

因此,在将目标输入神经网络之前,应先对其进行一键编码。以下是可运行的代码:

from sklearn import datasets
import random
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import OneHotEncoder

wine=datasets.load_wine()
wine_data = wine.data
onehotencoder = OneHotEncoder()
wine_target = onehotencoder.fit_transform(wine.target[...,np.newaxis]).toarray()

def generate_batch(batch_size,wine):
    batch_x=[]
    batch_y=[]
    for _ in range(batch_size):
        index=random.randint(0,177)
        batch_y.append(wine_target[index])
        batch_x.append(wine_data[index])
    return batch_x,batch_y

def inference(x):
    with tf.variable_scope('layer1'):
        weight1 = tf.get_variable('weight', [13, 7], initializer=tf.truncated_normal_initializer(stddev=0.1))
        bias1 = tf.get_variable('bias', [7], initializer=tf.constant_initializer(0.1))
        layer1 = tf.nn.relu(tf.matmul(x, weight1) + bias1)
    weight2 =tf.get_variable('weight', [7, 3], initializer=tf.truncated_normal_initializer(stddev=0.1))
    bias2 = tf.get_variable('bias', [3], initializer=tf.constant_initializer(0.1))
    logit = tf.matmul(layer1, weight2) + bias2
    return logit

x=tf.placeholder(tf.float32,[None,13])
y_=tf.placeholder(tf.float32,[None, 3])
y=inference(x)

cross_entropy=tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
cross_entropy_mean=tf.reduce_mean(cross_entropy)

train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy_mean)
correct_prediction = tf.equal(y ,y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    for i in range(2000):
        data,target=generate_batch(20,wine)
        sess.run(train_step,feed_dict={x:data,y_:target})