当我完全不更改文件时,为什么在训练时tensorflow会给我完全不同的结果?

时间:2019-03-03 19:50:40

标签: python tensorflow conv-neural-network

我正在为CIFAR-10数据集构建带有空间变换器的CNN,由于某种原因,当我第一次运行它时,它会获得非常好的结果。然后,当我再次运行它时,它拒绝训练,训练精度徘徊在10%左右。但是,当我将文件移到另一个文件夹中时,它再次获得了良好的结果……我很困惑。

在定义任何tf变量之前,我都有一个tf.reset_default_graph()。这可能是什么?

加载数据:

cifar10_dataset_folder_path = 'cifar-10-batches-py'

class DownloadProgress(tqdm):
    last_block = 0
    def hook(self, block_num=1, block_size=1, total_size=None):
        self.total = total_size
        self.update((block_num - self.last_block) * block_size)
        self.last_block = block_num


if not isfile('cifar-10-python.tar.gz'):
    with DownloadProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
        urlretrieve(
            'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
            'cifar-10-python.tar.gz',
            pbar.hook)

if not isdir(cifar10_dataset_folder_path):
    with tarfile.open('cifar-10-python.tar.gz') as tar:
        tar.extractall()
        tar.close()



def load_label_names():
    return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']


def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
    with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
        # note the encoding type is 'latin1'
        batch = pickle.load(file, encoding='latin1')

    features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
    labels = batch['labels']

    return features, labels

处理数据:

def normalize(x):
    """
        argument
            - x: input image data in numpy array [32, 32, 3]
        return
            - normalized x 
    """
    min_val = np.min(x)
    max_val = np.max(x)
    x = (x-min_val) / (max_val-min_val)
    return x


#One-hot encode

def one_hot_encode(x):
    """
        argument
            - x: a list of labels
        return
            - one hot encoding matrix (number of labels, number of class)
    """
    encoded = np.zeros((len(x), 10))

    for idx, val in enumerate(x):
        encoded[idx][val] = 1

    return encoded


def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
    features = normalize(features)
    labels = one_hot_encode(labels)

    pickle.dump((features, labels), open(filename, 'wb'))

def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
    n_batches = 5
    valid_features = []
    valid_labels = []

    for batch_i in range(1, n_batches + 1):
        features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)

        # find index to be the point as validation data in the whole dataset of the batch (10%)
        index_of_validation = int(len(features) * 0.1)

        # preprocess the 90% of the whole dataset of the batch
        # - normalize the features
        # - one_hot_encode the lables
        # - save in a new file named, "preprocess_batch_" + batch_number
        # - each file for each batch
        _preprocess_and_save(normalize, one_hot_encode,
                             features[:-index_of_validation], labels[:-index_of_validation], 
                             'preprocess_batch_' + str(batch_i) + '.p')

        # unlike the training dataset, validation dataset will be added through all batch dataset
        # - take 10% of the whold dataset of the batch
        # - add them into a list of
        #   - valid_features
        #   - valid_labels
        valid_features.extend(features[-index_of_validation:])
        valid_labels.extend(labels[-index_of_validation:])

    # preprocess the all stacked validation dataset
    _preprocess_and_save(normalize, one_hot_encode,
                         np.array(valid_features), np.array(valid_labels),
                         'preprocess_validation.p')

    # load the test dataset
    with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
        batch = pickle.load(file, encoding='latin1')

    # preprocess the testing data
    test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
    test_labels = batch['labels']

    # Preprocess and Save all testing data
    _preprocess_and_save(normalize, one_hot_encode,
                         np.array(test_features), np.array(test_labels),
                         'preprocess_training.p')
preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)


valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))

占位符:

# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()

# Inputs
x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3), name='input_x')
y =  tf.placeholder(tf.float32, shape=(None, 10), name='output_y')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')

这是架构:

def conv_net(x, keep_prob):

# Identity transformation
initial = np.array([[1., 0, 0], [0, 1., 0]])
initial = initial.astype('float32')
initial = initial.flatten()


#x_reshaped = tf.reshape(x, [-1, 32*32*3])

st1_filter = tf.Variable(tf.truncated_normal(shape=[5, 5, 3, 100], mean=0, stddev=0.08))
st2_filter =  tf.Variable(tf.truncated_normal(shape=[5, 5, 100, 200], mean=0, stddev=0.08))
b_st1 = tf.Variable(tf.random_normal(shape=[100], mean=0.0, stddev=0.01))
b_st2= tf.Variable(tf.random_normal(shape=[200], mean=0.0, stddev=0.01))


#preprocess pool
st0_pool = tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')



#layer 1
st1 = tf.nn.conv2d(st0_pool, st1_filter, strides=[1,1,1,1], padding='SAME')
st1 = tf.nn.bias_add(st1, b_st1)
st1 = tf.nn.relu(st1)
st1_bn = tf.layers.batch_normalization(st1)

st1_pool = tf.nn.max_pool(st1_bn, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

#layer 2
st2 = tf.nn.conv2d(st1_pool, st2_filter, strides=[1,1,1,1], padding='SAME')
st2 = tf.nn.bias_add(st2, b_st2)
st2 = tf.nn.relu(st2)
st2_bn = tf.layers.batch_normalization(st2)

st2_pool = tf.nn.max_pool(st2_bn, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')


#flatten
st1_pool_final = tf.nn.max_pool(st1_pool, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME')
st2_pool_final = tf.nn.max_pool(st2_pool, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

flat_features = tf.concat([tf.contrib.layers.flatten(st1_pool_final), tf.contrib.layers.flatten(st2_pool_final)], 1)

#fully connected stn layer
st1FC_filter = tf.Variable(tf.truncated_normal(shape=[1200,100], mean=0, stddev=0.08))
st2FC_filter =  tf.Variable(tf.truncated_normal(shape=[100,6], mean=0, stddev=0.08))
b_st1FC = tf.Variable(tf.random_normal(shape=[100], mean=0.0, stddev=0.01))
b_st2FC= tf.Variable(tf.random_normal(shape=[6], mean=0.0, stddev=0.01))



# Create variables for fully connected layer for the localisation network
W_fc_loc1 = tf.Variable(tf.truncated_normal(shape=[1200,100], mean=0, stddev=0.08))
b_fc_loc1 = tf.Variable(tf.random_normal(shape=[100], mean=0.0, stddev=0.01))

#layer 3
st_fc1 = tf.layers.batch_normalization(flat_features)
st_fc1 = tf.add(tf.matmul(st_fc1, W_fc_loc1), b_fc_loc1)
st_fc1 = tf.nn.relu(st_fc1)
st_fc1 = tf.nn.dropout(st_fc1, keep_prob)

#layer 4

W_fc2 = tf.Variable(tf.zeros([100, 6]), name='sp_weight_fc2')
b_fc2 = tf.Variable(initial_value=initial, name='sp_biases_fc2')

sp_fc2 = tf.layers.batch_normalization(st_fc1)
sp_fc2 = tf.add(tf.matmul(sp_fc2, W_fc2), b_fc2)



out_size = (32, 32,3)
h_trans = transformer(x, sp_fc2, out_size)




conv1_filter = tf.Variable(tf.truncated_normal(shape=[3, 3, 3, 64], mean=0, stddev=0.08))
conv2_filter = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 128], mean=0, stddev=0.08))
conv3_filter = tf.Variable(tf.truncated_normal(shape=[5, 5, 128, 256], mean=0, stddev=0.08))
conv4_filter = tf.Variable(tf.truncated_normal(shape=[5, 5, 256, 512], mean=0, stddev=0.08))

# 1, 2
conv1 = tf.nn.conv2d(h_trans, conv1_filter, strides=[1,1,1,1], padding='SAME')
conv1 = tf.nn.relu(conv1)
conv1_pool = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
conv1_bn = tf.layers.batch_normalization(conv1_pool)

# 3, 4
conv2 = tf.nn.conv2d(conv1_bn, conv2_filter, strides=[1,1,1,1], padding='SAME')
conv2 = tf.nn.relu(conv2)
conv2_pool = tf.nn.max_pool(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')    
conv2_bn = tf.layers.batch_normalization(conv2_pool)

# 5, 6
conv3 = tf.nn.conv2d(conv2_bn, conv3_filter, strides=[1,1,1,1], padding='SAME')
conv3 = tf.nn.relu(conv3)
conv3_pool = tf.nn.max_pool(conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')  
conv3_bn = tf.layers.batch_normalization(conv3_pool)

# 7, 8
conv4 = tf.nn.conv2d(conv3_bn, conv4_filter, strides=[1,1,1,1], padding='SAME')
conv4 = tf.nn.relu(conv4)
conv4_pool = tf.nn.max_pool(conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
conv4_bn = tf.layers.batch_normalization(conv4_pool)

# 9
flat = tf.contrib.layers.flatten(conv4_bn)  

# 10
full1 = tf.contrib.layers.fully_connected(inputs=flat, num_outputs=128, activation_fn=tf.nn.relu)
full1 = tf.nn.dropout(full1, keep_prob)
full1 = tf.layers.batch_normalization(full1)

# 11
full2 = tf.contrib.layers.fully_connected(inputs=full1, num_outputs=256, activation_fn=tf.nn.relu)
full2 = tf.nn.dropout(full2, keep_prob)
full2 = tf.layers.batch_normalization(full2)

# 12
full3 = tf.contrib.layers.fully_connected(inputs=full2, num_outputs=512, activation_fn=tf.nn.relu)
full3 = tf.nn.dropout(full3, keep_prob)
full3 = tf.layers.batch_normalization(full3)    

# 13
full4 = tf.contrib.layers.fully_connected(inputs=full3, num_outputs=1024, activation_fn=tf.nn.relu)
full4 = tf.nn.dropout(full4, keep_prob)
full4 = tf.layers.batch_normalization(full4)        

# 14
out = tf.contrib.layers.fully_connected(inputs=full3, num_outputs=10, activation_fn=None)
return out, h_trans

超参数和优化器等

#Hyperparameters
epochs = 10
batch_size = 128
keep_probability = 0.7
learning_rate = 0.001

#Cost Function & optimizer
#### INIT
#with tf.name_scope('logits_and_stn_output'):
logits, h_trans = conv_net(x, keep_prob)

model = tf.identity(logits, name='logits') # Name logits Tensor, so that can be loaded from disk after training

# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')

训练循环:

print('Training...')
with tf.Session() as sess:
    # Initializing the variables
    sess.run(tf.global_variables_initializer())

    # Training cycle
    for epoch in range(epochs):
        # Loop over all batches
        n_batches = 5
        for batch_i in range(1, n_batches + 1):
            for batch_features, batch_labels in load_preprocess_training_batch(batch_i, batch_size):
                train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)



            print('Epoch {:>2}, CIFAR-10 Batch {}:  '.format(epoch + 1, batch_i), end='')
            print_stats(sess, batch_features, batch_labels, cost, accuracy)

    # Save Model
    saver = tf.train.Saver()
    save_path = saver.save(sess, save_model_path)

0 个答案:

没有答案