Tensorflow不遵循python的LEGB规则吗?

时间:2018-09-27 11:47:15

标签: python tensorflow

我正在经历初始网络的tensorflow实现,并遇到以下初始模块的实现。接收模块的第一个元素是conv11,它将与conv33和conv55堆叠在一起以形成接收模块的输出。在进行conv33和conv55之前,起始模块的输入经过1 x 1过滤操作(称为conv_33_reduce和conv_55_reduce)以减小深度(瓶颈操作)

@add_arg_scope
def inception_layer(conv_11_size, conv_33_reduce_size, conv_33_size,
                    conv_55_reduce_size, conv_55_size, pool_size,
                    layer_dict, inputs=None,
                    bn=False, wd=0, init_w=None,
                    pretrained_dict=None, trainable=True, is_training=True,
                    name='inception'):

    if inputs is None:
        inputs = layer_dict['cur_input']
    layer_dict['cur_input'] = inputs

    arg_scope = tf.contrib.framework.arg_scope
    with arg_scope([L.conv], layer_dict=layer_dict, pretrained_dict=pretrained_dict,
                   bn=bn, nl=tf.nn.relu, init_w=init_w, trainable=trainable,
                   is_training=is_training, wd=wd, add_summary=False):


        conv_11 = L.conv(filter_size=1, out_dim=conv_11_size,
                         inputs=inputs, name='{}_1x1'.format(name))

        L.conv(filter_size=1, out_dim=conv_33_reduce_size,
               inputs=inputs, name='{}_3x3_reduce'.format(name))

conv_33的输入应为上述L.conv的输出的“ layer_dict”,这是减小张量深度的瓶颈操作;但是,上述L.conv的输出未定义

        conv_33 = L.conv(filter_size=3, out_dim=conv_33_size,
                         name='{}_3x3'.format(name))

        L.conv(filter_size=1, out_dim=conv_55_reduce_size,
               inputs=inputs, name='{}_5x5_reduce'.format(name))

类似地,未为conv_55操作定义上述L.conv的输出

        conv_55 = L.conv(filter_size=5, out_dim=conv_55_size,
                         name='{}_5x5'.format(name))

        L.max_pool(layer_dict=layer_dict, inputs=inputs, stride=1,
                   filter_size=3, padding='SAME', name='{}_pool'.format(name))
        convpool = L.conv(filter_size=1, out_dim=pool_size,
                          name='{}_pool_proj'.format(name))

        output = tf.concat([conv_11, conv_33, conv_55, convpool], 3,
                           name='{}_concat'.format(name))
        layer_dict['cur_input'] = output
        layer_dict[name] = output
    return output

这是L.conv的代码,它定义了卷积运算。 L.conv的输出是一个名为“ layer_dict”的字典,该字典应作为输入送入下一个L.conv。但是,在上述“ inception_layer”方法中,没有为conv_33和conv_55操作定义L.conv的输出。 我的问题是,因为layer_dict是本地对象,下面的conv_33和conv_55如何访问更新的layer_dict?

@add_arg_scope
def conv(filter_size,
         out_dim,
         layer_dict,
         inputs=None,
         pretrained_dict=None,
         stride=1,
         dilations=[1, 1, 1, 1],
         bn=False,
         nl=tf.identity,
         init_w=None,
         init_b=tf.zeros_initializer(),
         use_bias=True,
         padding='SAME',
         pad_type='ZERO',
         trainable=True,
         is_training=None,
         wd=0,
         name='conv',
         add_summary=False):
    if inputs is None:
        inputs = layer_dict['cur_input']
    stride = get_shape4D(stride)
    in_dim = inputs.get_shape().as_list()[-1]
    filter_shape = get_shape2D(filter_size) + [in_dim, out_dim]

    if padding == 'SAME' and pad_type == 'REFLECT':
        pad_size_1 = int((filter_shape[0] - 1) / 2)
        pad_size_2 = int((filter_shape[1] - 1) / 2)
        inputs = tf.pad(
            inputs,
            [[0, 0], [pad_size_1, pad_size_1], [pad_size_2, pad_size_2], [0, 0]],
            "REFLECT")
        padding = 'VALID'

    with tf.variable_scope(name):
        if wd > 0:
            regularizer = tf.contrib.layers.l2_regularizer(scale=wd)
        else:
            regularizer=None

        if pretrained_dict is not None and name in pretrained_dict:
            try:
                load_w = pretrained_dict[name][0]
            except KeyError:
                load_w = pretrained_dict[name]['weights']
            print('Load {} weights!'.format(name))

            load_w = np.reshape(load_w, filter_shape)
            init_w = tf.constant_initializer(load_w)

        weights = tf.get_variable('weights',
                                  filter_shape,
                                  initializer=init_w,
                                  trainable=trainable,
                                  regularizer=regularizer)
        if add_summary:
            tf.summary.histogram(
                'weights/{}'.format(name), weights, collections = ['train'])

        outputs = tf.nn.conv2d(inputs,
                               filter=weights,
                               strides=stride,
                               padding=padding,
                               use_cudnn_on_gpu=True,
                               data_format="NHWC",
                               dilations=dilations,
                               name='conv2d')

        if use_bias:
            if pretrained_dict is not None and name in pretrained_dict:
                try:
                    load_b = pretrained_dict[name][1]
                except KeyError:
                    load_b = pretrained_dict[name]['biases']
                print('Load {} biases!'.format(name))

                load_b = np.reshape(load_b, [out_dim])
                init_b = tf.constant_initializer(load_b)

            biases = tf.get_variable('biases',
                                 [out_dim],
                                 initializer=init_b,
                                 trainable=trainable)
            outputs += biases

        if bn is True:
            outputs = batch_norm(outputs, train=is_training, name='bn')

        layer_dict['cur_input'] = nl(outputs)
        layer_dict[name] = layer_dict['cur_input']
        return layer_dict['cur_input']

0 个答案:

没有答案