Tensorflow TypeError:Fetch参数在查找渐变时,无无效类型<class'nonetype'=“”>

时间:2018-03-14 07:50:09

标签: python tensorflow backpropagation tensorflow-gradient

这是CNN模型的文本分析任务,我想要想象哪个单词对我的特定分类触发最多,这是我的代码

with tf.Session() as sess:    

saver = tf.train.import_meta_graph('/home/rakesh/WORK/CNN_Lookout/runs/1519022246/checkpoints/model-200.meta') #load graph
saver.restore(sess,tf.train.latest_checkpoint('/home/rakesh/WORK/CNN_Lookout/runs/1519022246/checkpoints/./')) #load weigt

graph = tf.get_default_graph()
input_x = graph.get_tensor_by_name("input_x:0")
input_y = graph.get_tensor_by_name("input_y:0")
dropout_keep_prob=graph.get_tensor_by_name("dropout_keep_prob:0")    
embedding_W= graph.get_tensor_by_name("embedding/W:0")

embedded_chars = tf.nn.embedding_lookup(embedding_W, input_x)
embedded_chars_expanded = tf.expand_dims(embedded_chars, -1) 

conv_maxpool_5_W= graph.get_tensor_by_name("conv-maxpool-5/W:0")
conv_maxpool_5_b= graph.get_tensor_by_name("conv-maxpool-5/b:0")
conv_maxpool_5_conv= graph.get_operation_by_name("conv-maxpool-5/conv")
conv_maxpool_5_relu= graph.get_tensor_by_name("conv-maxpool-5/relu:0")
conv_maxpool_5_pool= graph.get_tensor_by_name("conv-maxpool-5/pool:0")

conv_maxpool_7_W= graph.get_tensor_by_name("conv-maxpool-7/W:0")
conv_maxpool_7_b= graph.get_tensor_by_name("conv-maxpool-7/b:0")
conv_maxpool_7_conv= graph.get_tensor_by_name("conv-maxpool-7/conv:0")
conv_maxpool_7_relu= graph.get_tensor_by_name("conv-maxpool-7/relu:0")
conv_maxpool_7_pool= graph.get_tensor_by_name("conv-maxpool-7/pool:0")

conv_maxpool_9_W= graph.get_tensor_by_name("conv-maxpool-9/W:0")
conv_maxpool_9_b= graph.get_tensor_by_name("conv-maxpool-9/b:0")
conv_maxpool_9_conv= graph.get_tensor_by_name("conv-maxpool-9/conv:0")
conv_maxpool_9_relu= graph.get_tensor_by_name("conv-maxpool-9/relu:0")
conv_maxpool_9_pool= graph.get_tensor_by_name("conv-maxpool-9/pool:0")

# Combine all the pooled features
num_filters_total = 128 * 3
pooled_outputs = [conv_maxpool_5_pool,conv_maxpool_7_pool,conv_maxpool_9_pool]
print(conv_maxpool_5_pool.shape)
h_pool = tf.concat(pooled_outputs,3)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])   

final_w= graph.get_tensor_by_name("W:0")
final_b= graph.get_tensor_by_name("output/b:0")



# Unpooling       
Ps = (tf.gradients(conv_maxpool_5_pool, conv_maxpool_5_relu))[0]
unpooled = tf.multiply(Ps, conv_maxpool_5_relu)
print(sess.run(unpooled,feed_dict))
print(sess.run(unpooled,feed_dict).shape)
#print(sess.run(conv_maxpool_5_relu,feed_dict))

# Deconv
batch_size = tf.shape(input_x)[0]
ds = [batch_size]
ds.append(embedded_chars_expanded.get_shape()[1])
ds.append(embedded_chars_expanded.get_shape()[2])
ds.append(embedded_chars_expanded.get_shape()[3])
deconv_shape = tf.stack(ds)
deconv = tf.nn.conv2d_transpose(
    unpooled,
    conv_maxpool_5_W,
    deconv_shape,
    strides=[1, 1, 1, 1],
    padding='VALID',
    name="Deconv"
    )
print(sess.run(deconv,feed_dict))
print(sess.run(deconv,feed_dict).shape)
print(sess.run(embedded_chars_expanded,feed_dict).shape)
#reshaped to original w2v embeding
embedded_chars_expanded_shape=sess.run(embedded_chars_expanded,feed_dict).shape
embedded_chars_back=tf.reshape(deconv,[embedded_chars_expanded_shape[0],embedded_chars_expanded_shape[1],embedded_chars_expanded_shape[2]])
print(sess.run(embedded_chars_back,feed_dict))
print(sess.run(embedded_chars_back,feed_dict).shape)
print(sess.run(embedded_chars,feed_dict))
print(sess.run(embedded_chars,feed_dict).shape)
#original input
print(sess.run(input_x, feed_dict))
print(sess.run(input_x, feed_dict).shape)
input_back = (tf.gradients(embedded_chars_back, input_x))[0]
#print(sess.run(input_back , feed_dict))
#input_back = tf.multiply(input_back, input_x)
#print(sess.run(input_back ,feed_dict).shape)

我希望通过使用嵌入图层中的渐变来获取原始输入的最后一部分,结果变为无类型,我使用Google word2vec模型来嵌入正向传递。

input_back = (tf.gradients(embedded_chars_back, input_x))[0]  #becoming none

print(sess.run(input_back , feed_dict)) 

我----&gt;最后一行的错误

Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<string>", line 155, in <module>
File "/home/rakesh/anaconda3/envs/tensorflow/lib/python3.5/site-
packages/tensorflow/python/client/session.py", line 889, in run
run_metadata_ptr)
File "/home/rakesh/anaconda3/envs/tensorflow/lib/python3.5/site-
packages/tensorflow/python/client/session.py", line 1105, in _run
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
File "/home/rakesh/anaconda3/envs/tensorflow/lib/python3.5/site-
packages/tensorflow/python/client/session.py", line 414, in __init__
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
File "/home/rakesh/anaconda3/envs/tensorflow/lib/python3.5/site-
packages/tensorflow/python/client/session.py", line 231, in for_fetch
(fetch, type(fetch)))
TypeError: Fetch argument None has invalid type <class 'NoneType'>

我想直观地显示最后一个cnn层直到输入,所以我想知道在输入之前反向传播多少,哪个词贡献最多,

1 个答案:

答案 0 :(得分:0)

我没有看到明确的问题,但我假设您想知道为什么渐变返回None。当tf.gradientsNone之间没有可区分的操作序列时,x(以及各种类似的方法)返回y(或者如果您明确调用stop_gradients )。

如果从代码中看不清楚(不是我),断开连接的位置,您可以在tensorboard中查看图形,或者您可以尝试将x和{{之间的操作顺序一分为二1}}缩小渐变变为y的确切位置。