Python Conv神经网络推理函数问题

时间:2018-05-31 01:30:07

标签: python conv-neural-network

我以前从未在Python中编程,我必须为类编写推理函数。大部分代码都是为学生编写的,我们只需要进行推理功能。我在网上找到了一些例子,但是我们必须使用不同于在线示例的图层。我对塑造感到困惑,但我对任何代码都不太确定。

这就是我所拥有的:

    import tensorflow as tf

    def _variable_with_weight_decay(name, shape, wd):
      with tf.device('/cpu:0'):
        var = tf.get_variable(name, shape, 
                    initializer=tf.contrib.layers.xavier_initializer())
      if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, 
    name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
      return var

    def inference(X,phase=False,dropout_rate=0.8,n_classes=10,weight_decay=1e-4):
# logits should be of dimension (batch_size, n_classes)
# X is tensor with dimension (NONE, 32, 32,3)

# conv layer 1
batchSize = tf.shape(X)[0] # is this batch_size?

with tf.variable_scope('conv1') as scope:
    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], wd=weight_decay)
    conv = tf.nn.conv2d(X, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv1 = tf.nn.relu(pre_activation, name=scope.name)
    pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# Batch Normalization
#Batch variables
epsilon = 1e-3
batch_mean1, batch_var1 = tf.nn.moments(pool1,[0])
scale1 = tf.Variable(tf.ones([64]))
beta1 = tf.Variable(tf.zeros([64]))
batch1 = tf.nn.batch_normalization(pool1,batch_mean1,batch_var1,scale1,beta1,epsilon)
    with tf.variable_scope('conv2') as scope:

    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], wd=weight_decay)
    conv = tf.nn.conv2d(batch1, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv2 = tf.nn.relu(pre_activation, name=scope.name)
    pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')

#Batch Normalization 2
#Batch variables
batch_mean2, batch_var2 = tf.nn.moments(pool2,[0])
keep_prob = tf.placeholder(tf.float32)
scale2 = tf.Variable(tf.ones([64]))
beta2 = tf.Variable(tf.zeros([64]))
batch2 = tf.nn.batch_normalization(pool2,batch_mean2,batch_var2,scale2,beta2,epsilon)
#dropout 2
drop2 = tf.nn.dropout(batch2, keep_prob)
    with tf.variable_scope('local1') as scope:
    reshape = tf.reshape(drop2, [batchSize, -1])
    #dim = reshape.get_shape()[1].value
    dim = reshape[-1].value
    weights = _variable_with_weight_decay('weights', shape=[dim, 384], wd=0.004)
    biases = tf.get_variable('biases', [384], initializer=tf.constant_initializer(0.1))
    local1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
    # #_activation_summary(local1)
    # # local2
with tf.variable_scope('local2') as scope:
    weights = _variable_with_weight_decay('weights', shape=[384, 192], wd=0.004)
    biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))
    local2 = tf.nn.relu(tf.matmul(local1, weights) + biases, name=scope.name)



with tf.variable_scope('softmax_linear') as scope:
    weights = _variable_with_weight_decay('weights', [192, n_classes], wd=None)
    biases = tf.get_variable('biases', [n_classes], initializer=tf.constant_initializer(0.1))
    logits = tf.add(tf.matmul(local2, weights), biases, name=scope.name)



return logits
我应该有4个卷积层,但这对我来说太混乱了。现在这会搞砸。它一直说排名是1和0或其他什么。我如何重塑它(batch_size,n_samples)?而且我知道缩进是关闭的,但这就是我在这里粘贴的方式,而不是我的实际代码。

由于

EDIT ---------------------------------------------- ----------

好的,所以我修了一些,但现在给我一个问题的是行

    dim = reshape.get_shape()[1].value

我的批量大小是32,直到结束时它变为16.如果总共有336个,那么将有10个完整批次的32个和1批16个。上面的行给出错误,"必须完全定义新变量的形状,而是(?,256)。"

这是我的全部新代码。我继续尝试做4个卷积层。

    import tensorflow as tf

    def _variable_with_weight_decay(name, shape, wd):
      with tf.device('/cpu:0'):
        var = tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
      if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
      return var  

    def inference(X,phase=False,dropout_rate=0.8,n_classes=10,weight_decay=1e-4):
# logits should be of dimension (batch_size, n_classes)


# conv layer 1
#batchSize = tf.shape(X)[0]
batchSize = X.get_shape().as_list()[0]
if batchSize is None:
    batchSize = 32

with tf.variable_scope('conv1') as scope:
    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], wd=weight_decay)
    conv = tf.nn.conv2d(X, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv1 = tf.nn.relu(pre_activation, name=scope.name)
    #_activation_summary(conv1)

# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# Batch Normalization
#Batch variables
epsilon = 1e-3
batch_mean1, batch_var1 = tf.nn.moments(pool1,[0])
scale1 = tf.Variable(tf.ones([64]))
beta1 = tf.Variable(tf.zeros([64]))
batch1 = tf.nn.batch_normalization(pool1,batch_mean1,batch_var1,scale1,beta1,epsilon)
#dropout 1
#keep_prob = tf.placeholder(tf.float32,[batchSize,32,32,3])
#drop1 = tf.nn.dropout(batch1, keep_prob)

#conv layer 2

with tf.variable_scope('conv2') as scope:

    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 128], wd=weight_decay)
    conv = tf.nn.conv2d(batch1, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [128], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv2 = tf.nn.relu(pre_activation, name=scope.name)
    #_activation_summary(conv2)



#pool 2
pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')

#Batch Normalization 2
#Batch variables
batch_mean2, batch_var2 = tf.nn.moments(pool2,[0])
scale2 = tf.Variable(tf.ones([128]))
beta2 = tf.Variable(tf.zeros([128]))
batch2 = tf.nn.batch_normalization(pool2,batch_mean2,batch_var2,scale2,beta2,epsilon)
#dropout 2
#drop2 = tf.nn.dropout(batch2, keep_prob)

#conv layer 3

with tf.variable_scope('conv3') as scope:
    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 128, 256], wd=weight_decay)
    conv = tf.nn.conv2d(batch2, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [256], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv3 = tf.nn.relu(pre_activation, name=scope.name)
    #_activation_summary(conv3)


#pool 3
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
#Batch Normalization 3
#Batch variables
batch_mean3, batch_var3 = tf.nn.moments(pool3,[0])
scale3 = tf.Variable(tf.ones([256]))
beta3 = tf.Variable(tf.zeros([256]))
batch3 = tf.nn.batch_normalization(pool3,batch_mean3,batch_var3,scale3,beta3,epsilon)
#dropout 3
#drop3 = tf.nn.dropout(batch3, keep_prob)

# conv layer 3

with tf.variable_scope('conv4') as scope:
    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 256, 256], wd=weight_decay)
    conv = tf.nn.conv2d(batch3, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [256], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv4 = tf.nn.relu(pre_activation, name=scope.name)
    #_activation_summary(conv4)


#pool 4
pool4 = tf.nn.max_pool(conv4, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
#Batch Normalization 4
#Batch variables
batch_mean4, batch_var4 = tf.nn.moments(pool4,[0])
scale4 = tf.Variable(tf.ones([256]))
beta4 = tf.Variable(tf.zeros([256]))
batch4 = tf.nn.batch_normalization(pool4,batch_mean4,batch_var4,scale4,beta4,epsilon)
#dropout 4
#drop4 = tf.nn.dropout(batch4, keep_prob)


# local layer

with tf.variable_scope('local1') as scope:
    reshape = tf.reshape(batch4, [batchSize, -1])
    dim = reshape.get_shape()[1].value
    #dim = batchSize*32
    weights = _variable_with_weight_decay('weights', shape=[dim, 256], wd=0.004)
    biases = tf.get_variable('biases', [256], initializer=tf.constant_initializer(0.1))
    local1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
    # #_activation_summary(local1)
    # # local2
with tf.variable_scope('local2') as scope:
    weights = _variable_with_weight_decay('weights', shape=[256, batchSize], wd=0.004)
    biases = tf.get_variable('biases', [batchSize], initializer=tf.constant_initializer(0.1))
    local2 = tf.nn.relu(tf.matmul(local1, weights) + biases, name=scope.name)
    # #_activation_summary(local2)


with tf.variable_scope('softmax_linear') as scope:
    #reshape = tf.reshape(batch4, [batchSize, n_classes])
    weights = _variable_with_weight_decay('weights', [batchSize, n_classes], wd=None)
    biases = tf.get_variable('biases', [n_classes], initializer=tf.constant_initializer(0.1))
    logits = tf.add(tf.matmul(local2, weights), biases, name=scope.name)
    #_activation_summary(logits)


return logits

0 个答案:

没有答案