没有为任何变量提供渐变。如何解决此错误?

时间:2018-06-03 21:56:25

标签: python numpy tensorflow deep-learning

我面临着tensorflow的麻烦。这是我的代码的一部分。 错误说:

  

没有为任何变量提供渐变,请检查图表中不支持渐变的ops,变量之间。

请告诉我如何解决这个问题。

import tensorflow as tf
import numpy as np

k = 10
n = 20
m = 5
N = 3
lr = 1e-3
he1_dim = 32
he2_dim = 64
he3_dim = n
hd1_dim = 32
hd2_dim = 64
hd3_dim = k

def Sample(m, n): 
  return np.random.rand(m, n)  

def Quantization(sample_x, m, n):
   x = np.zeros([m, n])
   for i in range(m):
     for j in range(n):
       if sample_x[i][j] < 0.5:
           x[i][j] = 0.0
       else:
           x[i][j] = 1.0
   return x

def Binary(x):
    cond = tf.less(x, tf.fill(tf.shape(x), 0.5))
    out = tf.where(cond, tf.zeros(tf.shape(x)), tf.ones(tf.shape(x)))
    return out

  
def Delete(x, p, m, n):
  a = np.arange(n)
  v = tf.zeros([1,n-m])
  
  for i in range(p):
    mask = np.repeat(True, n)
    b = np.random.choice(a, m, replace=False)

    for ir in b:
      mask[ir] = False

    xtemp = x[i]
    X = tf.boolean_mask(xtemp, mask)
    X = tf.cast([X], tf.float32)
    v = tf.concat([v, X], 0)
  
    v = v[1:tf.shape(v)[0]]

  return v

def xavier_init(size):
    in_dim = size[0]
    xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
    return tf.random_normal(shape=size, stddev=xavier_stddev)

X = tf.placeholder(tf.float32, shape=[None, k])
V = tf.placeholder(tf.float32, shape=[None, n-m])

E_W1 = tf.Variable(xavier_init([k, he1_dim]))
E_b1 = tf.Variable(tf.zeros(shape=[he1_dim]))
E_W2 = tf.Variable(xavier_init([he1_dim, he2_dim]))
E_b2 = tf.Variable(tf.zeros(shape=[he2_dim]))
E_W3 = tf.Variable(xavier_init([he2_dim, he3_dim]))
E_b3 = tf.Variable(tf.zeros(shape=[he3_dim]))

D_W1 = tf.Variable(xavier_init([n-m, hd1_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[hd1_dim]))
D_W2 = tf.Variable(xavier_init([hd1_dim, hd2_dim]))
D_b2 = tf.Variable(tf.zeros(shape=[hd2_dim]))
D_W3 = tf.Variable(xavier_init([hd2_dim, hd3_dim]))
D_b3 = tf.Variable(tf.zeros(shape=[hd3_dim]))  
  
theta_En = [E_W1, E_W2, E_W3, E_b1, E_b2, E_b3]
theta_De = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]

def Encoder(X):
    En_h1 = tf.nn.sigmoid(tf.matmul(X, E_W1) + E_b1)
    En_h2 = tf.nn.sigmoid(tf.matmul(En_h1, E_W2) + E_b2)
    u = tf.nn.sigmoid(tf.matmul(En_h2, E_W3) + E_b3)
    return u

def Decoder(V):
    De_h1 = tf.nn.sigmoid(tf.matmul(V, D_W1) + D_b1)
    De_h2 = tf.nn.sigmoid(tf.matmul(De_h1, D_W2) + D_b2)
    y = tf.nn.sigmoid(tf.matmul(De_h2, D_W3) + D_b3)
    De_logit = tf.matmul(De_h2, D_W3) + D_b3
    return y, De_logit


# generate codeword u with redundancy
u = Encoder(X)
U = Binary(u)

# delete m random bits
V = Delete(U, N, m, n)

# generate output y
y, De_logit = Decoder(V)
Y = Binary(y)

# define loss function
diff = tf.subtract(X, Y)
diff = tf.abs(diff)
ham_dist = tf.reduce_sum(diff, axis=1)

loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = 
tf.zeros_like(ham_dist), logits = ham_dist))

# optimizer
En_solver = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, 
var_list=theta_En)
De_solver = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, 
var_list=theta_De)

# initialization
sess = tf.Session()
sess.run(tf.global_variables_initializer())


################
sample_x = Sample(N, k)
x = Quantization(sample_x, N, k)

y_output = sess.run(Y, feed_dict={X: x})
loss = sess.run(loss, feed_dict={X: x})

我无法弄清楚这段代码的哪一部分在图表之外运行。

0 个答案:

没有答案