如何在mxnet中控制内核

时间:2017-10-24 09:53:39

标签: tensorflow kernel convolution mxnet

例如:在tensorflow中我们可以按照以下方式执行,我们如何在mxnet中控制内核,(权重=权重*掩码),非常感谢。

if mask_type is not None:
      #C
      mask[:center_h, :, :, :] = 1
      if mask_type == 'A':
        mask[center_h, :center_w, :, :] = 1

      if mask_type == 'B':
        mask[center_h, :center_w+1, :, :] = 1

    else:
      mask[:, :, :, :] = 1

    weights_shape = [kernel_h, kernel_w, in_channel, num_outputs]
    weights = tf.get_variable("weights", weights_shape,
      tf.float32, tf.truncated_normal_initializer(stddev=0.1))
    weights = weights * mask
    biases = tf.get_variable("biases", [num_outputs],
          tf.float32, tf.constant_initializer(0.0))

    outputs = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], padding="SAME")
    outputs = tf.nn.bias_add(outputs, biases)
# #########这是tensorflow的完整代码
def conv2d(inputs, num_outputs, kernel_shape, strides=[1, 1], mask_type=None, scope="conv2d"):
  with tf.variable_scope(scope) as scope:
    kernel_h, kernel_w = kernel_shape
    stride_h, stride_w = strides
    batch_size, height, width, in_channel = inputs.get_shape().as_list()

    center_h = kernel_h // 2
    center_w = kernel_w // 2

    assert kernel_h % 2 == 1 and kernel_w % 2 == 1, "kernel height and width must be odd number"
    mask = np.zeros((kernel_h, kernel_w, in_channel, num_outputs), dtype=np.float32)
    if mask_type is not None:
      #C
      mask[:center_h, :, :, :] = 1
      if mask_type == 'A':
        mask[center_h, :center_w, :, :] = 1

      if mask_type == 'B':
        mask[center_h, :center_w+1, :, :] = 1

    else:
      mask[:, :, :, :] = 1

    weights_shape = [kernel_h, kernel_w, in_channel, num_outputs]
    weights = tf.get_variable("weights", weights_shape,
      tf.float32, tf.truncated_normal_initializer(stddev=0.1))
    weights = weights * mask
    biases = tf.get_variable("biases", [num_outputs],
          tf.float32, tf.constant_initializer(0.0))

    outputs = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], padding="SAME")
    outputs = tf.nn.bias_add(outputs, biases)

    return outputs

1 个答案:

答案 0 :(得分:1)

在MXNet中,您可以通过首先创建权重变量然后将该变量用于卷积运算符来使用您自己的变量作为权重。例如:

weight = mx.sym.Variable('weights', init=mx.initializer.Xavier())
conv1 = mx.sym.Convolution(data=data, weight=weight, kernel=(5,5), num_filter=20)

如果您想像在代码中那样对重量应用蒙版:

mask = #create the mask you want
weight = weight * mask