传递两个值时,Tensorflow tf,py_func问题

时间:2018-12-21 04:31:57

标签: tensorflow

我使用tf.py_func使用以下模型。我将两个参数传递给py_fuc函数。我需要具有grad函数 _Accm1_grad_op 来编译图形,但是即使编译成功,它也不会运行。它给出了如下错误:

您可以复制并粘贴代码并运行它,您将看到相同的错误。

InvalidArgumentError(请参阅上面的回溯):pyfunc_25返回2个值,但希望看到1个值。      [[Node:conv1 / AddGrad = PyFunc [Tin = [DT_FLOAT,DT_FLOAT],Tout = [DT_FLOAT],_gradient_op_type =“ PyFuncGrad4682895”,token =“ pyfunc_25”,_device =“ / job:localhost / replica:0 / task: 0 /设备:CPU:0“](_ arg_Placeholder_1_0_1,conv1 / Conv2D)]]

我将非常感谢您的帮助。你能告诉我我在做什么错吗?请把它当作我的圣诞礼物。

tf.reset_default_graph()
graph = tf.Graph()

def accm(accp, a):
    return (accp, a)

def _Accm1_grad_op(op, grad):
    x = tf.to_float(op.inputs[0])######192x2
    y = tf.to_float(op.inputs[1])######12x16
    return grad*x, grad*y#, grad*s

def pyaccum(x,y, name=None):
    with tf.name_scope(name, "AddGrad", [x, y]) as name:
        acc = py_func(accum,
                        [x,y],
                        [tf.float32],
                        name=name,
                        grad=_Accm1_grad_op)  # <-- here's the call to the gradient
        return acc

def py_func(func, inp, Tout, stateful=True, name=None, grad=None, graph = graph): 
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
    print("accum_idnetifier = %s", rnd_name)
    tf.RegisterGradient(rnd_name)(grad)  # see _MySquareGrad for grad example
    with graph.gradient_override_map({"PyFunc": rnd_name}):
        y = tf.py_func(func, inp, Tout, stateful=stateful, name=name)
        return y

def IR_net(in1, in2):
    with tf.variable_scope("conv1"):
        W = tf.get_variable("weights", [2, 2, 1, 1],initializer=initializer1(1))
        conv1 = tf.nn.conv2d(in1, W, strides=[1,1,1,1], padding = "SAME")   # 256
        out1 = pyaccum(in2, conv1)
        return out1

batch_size=1




with graph.as_default():

    x_in_1 = tf.placeholder(tf.float32, shape= (batch_size, 4, 16, 1))
    x_in_2 = tf.placeholder(tf.float32, shape= (64, 2))

    net = IR_net(x_in_1, x_in_2)    
    nx = tf.ones((1,8,8,1), dtype=tf.float32)

    loss = tf.reduce_mean(tf.abs(net[0]-nx))
    optimizer = tf.train.AdamOptimizer(learning_rate=.1, beta1=.9, beta2=.99)
    train_step = optimizer.minimize(loss)
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())


step = 0
with tf.Session(graph=graph) as sess:
    sess.run(init)
    feed_dict = {}
    losses = []
    while step < 10000:
        a = np.random.randn(1,4,16,1)
        feed_dict[x_in_1] = np.asarray(a, dtype= np.float32)
        accmap= np.asarray(np.random.randint(7, size=(64,2)),dtype=np.float32)
        feed_dict[x_in_2] = accmap

        step +=1
        uv  = sess.run(net, feed_dict=feed_dict)
        _, l = sess.run([train_step,loss], feed_dict=feed_dict)
        losses.append(l)
        print(l)

plt.figure()
plt.plot(losses)

0 个答案:

没有答案