如何从张量流图调用Keras函数进行多任务学习?

时间:2019-01-29 07:53:03

标签: python tensorflow keras

我有一个用于分割的张量流代码,在这里我进行了修改以将词袋作为特征提取器,然后将这些特征用于分类问题。目的是将CNN的中间层输出传递给BoW函数作为我们的输入,并返回结果,并由CNN的其余层进一步处理。

我尝试了以下代码:

def CNN_B(im_size,out_size,L,batch,ind,batch_size=1,layers = 5, wd=0.001, numfilt=None, E_blur=2,stack_from=2):

   if numfilt is None:
     numfilt = np.ones(layers,dtype=np.int32)*32
   #Input and output
   x = tf.placeholder(tf.float32, shape=[im_size, im_size, 3, batch_size])
   x_image = tf.reshape(x, [-1, im_size, im_size, 3])
   y_ = tf.placeholder(tf.float32, shape=[L,2])
   bov=BOV()
   W_conv = []
   b_conv = []
   h_conv = []
   h_pool = []
   resized_out = []
   W_conv.append(weight_variable([7, 7, 3, numfilt[0]], wd=wd))
   b_conv.append(bias_variable([numfilt[0]]))
   h_conv.append(tf.nn.relu(conv2d(x_image, W_conv[-1],padding='SAME') + b_conv[-1]))
   h_pool.append(batch_norm(max_pool_2x2(h_conv[-1])))

   for layer in range(1,layers):
       if layer == 1:
           W_conv.append(weight_variable([5, 5, numfilt[layer-1], numfilt[layer]],wd=wd))
       else:
           W_conv.append(weight_variable([3, 3, numfilt[layer-1], numfilt[layer]], wd=wd))
       b_conv.append(bias_variable([numfilt[layer]]))
       h_conv.append(tf.nn.relu(conv2d(h_pool[-1], W_conv[-1],padding='SAME') + b_conv[-1]))
       h_pool.append(batch_norm(max_pool_2x2(h_conv[-1])))
       if layer >= stack_from:
           resized_out.append(tf.image.resize_images(h_conv[-1], [out_size, out_size]))

   h_concat = tf.concat(resized_out,3)

   # MLP for dimension reduction
   W_convd = weight_variable([1, 1, int(h_concat.shape[3]), 256], wd=wd)
   b_convd = bias_variable([256])
   h_convd = batch_norm(tf.nn.relu(conv2d(h_concat, W_convd) + b_convd))

   # MLP for dimension reduction
   W_convf = weight_variable([1, 1, 256, 64],wd=wd)
   b_convf = bias_variable([64])
   h_convf = batch_norm(tf.nn.relu(conv2d(h_convd, W_convf) + b_convf))

   #Predict energy
   W_fcE = weight_variable([1, 1, 64, 1],wd=wd)
   b_fcE = bias_variable([1])
   h_fcE = conv2d(h_convf, W_fcE) + b_fcE
   G_filt = gaussian_filter((9,9), E_blur)
   predE = tf.reshape(conv2d(h_fcE,G_filt), [out_size, out_size, 1, -1])

   # Predict alpha
   W_fcA = weight_variable([1, 1, 64, 1],wd=wd)
   b_fcA = bias_variable([1])
   h_fcA = conv2d(h_convf, W_fcA) + b_fcA
   h_fcA = tf.reduce_mean(h_fcA) + h_fcA * 0
   # predA = tf.nn.softplus(tf.reshape(h_fcA,[im_size,im_size,1,-1]))
   predA = tf.reshape(h_fcA, [out_size, out_size, 1, -1])
   # Predict beta
   W_fcB = weight_variable([1, 1, 64, 1],wd=wd)
   b_fcB = bias_variable([1])
   h_fcB = conv2d(h_convf, W_fcB) + b_fcB
   #h_fcB = tf.log(1+tf.exp(h_fcB))
   predB = tf.reshape(h_fcB, [out_size, out_size, 1, -1])
   # Predict kappa
   W_fcK = weight_variable([1, 1, 64, 1],wd=wd)
   b_fcK = bias_variable([1])
   h_fcK = conv2d(h_convf, W_fcK) + b_fcK
   #h_fcK = tf.log(1+tf.exp(h_fcK))
   predK = tf.reshape(h_fcK, [out_size, out_size, 1, -1])

   #Inject the gradients
   grad_predE = tf.placeholder(tf.float32, shape=[out_size, out_size, 1, batch_size])
   grad_predA = tf.placeholder(tf.float32, shape=[out_size, out_size, 1, batch_size])
   grad_predB = tf.placeholder(tf.float32, shape=[out_size, out_size, 1, batch_size])
   grad_predK = tf.placeholder(tf.float32, shape=[out_size, out_size, 1, batch_size])
   l2loss = tf.add_n(tf.get_collection('losses'), name='l2_loss')
   grad_l2loss = tf.placeholder(tf.float32, shape=[])
   tvars = tf.trainable_variables()
   grads = tf.gradients([predE,predA,predB,predK,l2loss], tvars, grad_ys = [grad_predE,grad_predA,grad_predB,grad_predK,grad_l2loss])

   ##classification model
   x_1 = tf.placeholder(tf.float32, shape=[100, None])
   y_1 = tf.placeholder(tf.float32, shape=[None, 3])
   vocab = tf.placeholder(tf.float32, shape=[100, 1])
   x1 = batch[ind]
   feature_im=tf.reshape(h_conv[5], [h_conv[5].shape[0]*h_conv[5].shape[1]*h_conv[5].shape[2], h_conv[5].shape[3]]) 
   vocab = tf.py_func(bov.trainModel(feature_im), [feature_im], tf.float32)

...................
...................
...................
   return tvars,grads,predE, predA, predB, predK, l2loss, grad_predE, grad_predA, grad_predB, grad_predK, grad_l2loss, x,y_,x_1,y_1, vocab, train_step, loss, ind, accuracy, feature_im 

CNN_B 这样被称为

with tf.device('/cpu:0'):
tvars, grads, predE, predA, predB, predK, l2loss, grad_predE, grad_predA, grad_predB, grad_predK, \
grad_l2loss, x, y_ , x_1,y_1, train_step, loss, ind, accuracy= CNN_B(im_size, out_size, L, batch_x,ind, \
                                           batch_size=1,layers=len(numfilt),wd=0.001,numfilt=numfilt)

在每个时期,运行会话以计算以下值:

 [mapE, mapA, mapB, mapK, l2,_,c, accuracy_val] = sess.run([predE, predA, predB, predK, l2loss, train_step, loss, accuracy], feed_dict={x: batch, x_1: x1, y_1: y1})

通过这种方法,我收到以下错误:

  

TypeError:仅在渴望执行时,张量对象才可迭代   已启用。要遍历此张量,请使用tf.map_fn。

我还尝试通过使用feature_im.eval(session=sess)函数将张量 feature_im 转换为numpy数组,但与此同时出现以下错误:

  

ValueError:无法使用eval()评估张量:无默认会话   已被注册。与sess.as_default()一起使用或传递一个显式的   会话到eval(session = sess)

的代码

  

bov.trainModel:

def trainModel(self, feat):

    kp, des = feat
    print("kp shape=", np.shape(kp))
    print(np.shape(des))
    descriptor_list.append(des)




    # perform clustering
    bov_descriptor_stack = bov_helper.formatND(descriptor_list)
    bov_helper.cluster()
    bagfeatures=bov_helper.developVocabulary(n_images = trainImageCount, descriptor_list=descriptor_list)
    print(np.shape(bagfeatures))

    # show vocabulary trained
    # . bov_helper.plotHist()


    bagfeatures=bov_helper.standardize()
    #. bov_helper.train(. train_labels)
    return bagfeatures

0 个答案:

没有答案