网络错误-Inception v1未接受培训

时间:2019-08-26 14:44:37

标签: tensorflow conv-neural-network

我正在尝试使用此链接https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v1.py中的Inception模型(GoogLeNet),该模型由Google使用Tensorflow Slim API实现,用于对Cifar10数据集的图像进行分类(数据集链接https://www.cs.toronto.edu/~kriz/cifar.html)。问题是网络成本几乎保持不变,而且我找不到错误。我对tensorflow和苗条非常陌生,所以我将不胜感激。

我正在使用这些软件包:

    List<String> listword = new List<string>();

    private void AddBT_Clicked(object sender, EventArgs e)
    {
        listword.Add(MainEntry.Text);
        MainEntry.Text = "";
    }

    private void TestBT_Clicked(object sender, EventArgs e)
    {
        for (int i = 0; i < listword.Count; i++)
        {
       CrossTextToSpeech.Current.Speak(text: listword[i], crossLocale: null, pitch: (float)1, speakRate: (float)0.5, volume: 1);
          thread.sleep(30000);
        }

    }

然后我做了theese的两个功能:

import tensorflow as tf
import numpy as np
from tensorflow.python.framework import ops
import matplotlib.pyplot as plt
import os
import pickle
import cv2
from sklearn import model_selection as ms
from nets import inception_v1,inception_utils
import math
%matplotlib inline

首先,我正在读取数据集:

def one_hot_matrix(labels, C):

    C = tf.constant(C,name='C')
    one_hot_matrix = tf.one_hot(labels,C,axis=0)
    sess = tf.Session()
    one_hot = sess.run(one_hot_matrix)
    sess.close()

    return one_hot


def make_mini_batches(X, Y, mini_batch_size):

    m = X.shape[0]                  
    mini_batches = []

    # number of mini batches of size mini_batch_size in the dataset
    num_complete_minibatches = math.floor(m/mini_batch_size) 

    for k in range(0, num_complete_minibatches):
        mini_batch_X = X[k*mini_batch_size : (k+1)*mini_batch_size,...]
        mini_batch_Y = Y[k*mini_batch_size : (k+1)*mini_batch_size,:]
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

    # Handling the end case (last mini-batch < mini_batch_size)
    if m % mini_batch_size != 0:
        mini_batch_X = X[num_complete_minibatches*mini_batch_size:,...]
        mini_batch_Y = Y[num_complete_minibatches*mini_batch_size:,:]
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

    return mini_batches

接着进行一些预处理和训练/验证拆分:

# function to read the batches
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
    with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
        # note the encoding type is 'latin1'
        batch = pickle.load(file, encoding='latin1')

    features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
    labels = batch['labels']
    datadict = {'data':features,'labels':labels}

    return datadict

# combine batches into one dataset (batch size: 10000)
full_data = load_cfar10_batch('./cifar_10',1)['data']
full_labels = []
for i in range(5):
    full_labels.extend(load_cfar10_batch('./cifar_10',i+1)['labels'])
    if i > 0:
        full_data = np.concatenate((full_data,load_cfar10_batch('./cifar_10',i+1)['data']),axis = 0)

# dataset sizes
full_data.shape, len(full_labels) 

然后我初始化变量:

# data preprocessing (using only 1/10 of the dataset for speed)

X = full_data[0:5000]           
y = one_hot_matrix(full_labels[0:5000], 10).T       

# split into training-validation sets
x_train, x_val, y_train, y_val = ms.train_test_split(X, y, test_size=0.2, random_state=1)

x_train = x_train.astype('float32')
x_val = x_val.astype('float32')

x_train = x_train / 255.0
x_val = x_val / 255.0

print('x_train shape:',x_train.shape)
print('y_train shape:',y_train.shape)
print('x_val shape:',x_val.shape)
print('y_val shape:',y_val.shape)

然后是训练循环:

tf.set_random_seed(1)
seed = 3
(m, n_H, n_W, n_C) = x_train.shape             
n_y = y_train.shape[1]
costs = []
print_cost = True
learning_rate = 0.001
num_epochs = 100
minibatch_size = 256
num_minibatches = int(m / minibatch_size)
minibatches = make_mini_batches(x_train, y_train, minibatch_size)
ops.reset_default_graph() 

inputs = tf.placeholder(tf.float32,shape=[None, n_H, n_W, n_C],name = 'inputs')
labels = tf.placeholder(tf.int8,shape=[None, n_y],name = 'labels')

# Forward propagation (Inception)
Z = inception_v1.inception_v1(inputs,num_classes = n_y,dropout_keep_prob=1,global_pool=True)[1]['Logits']
# Cost function 
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = Z, labels = labels))
# ADAM optimizer
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) 
# Initialize variables
init = tf.global_variables_initializer()

输出看起来像这样:

with tf.Session() as sess:

    sess.run(init)

    for epoch in range(num_epochs):

        # learning rate decay
        if epoch % 8 == 0:
            learning_rate *= math.pow(0.95,epoch/8)

        minibatch_cost = 0.

        for minibatch in minibatches:

            (minibatch_X, minibatch_Y) = minibatch
            _ , temp_cost = sess.run([optimizer, cost], feed_dict={inputs: minibatch_X, labels: minibatch_Y})
            minibatch_cost += temp_cost / num_minibatches

        # Print the cost every epoch
        if print_cost == True and epoch % 5 == 0:
            print ("Cost after epoch %i: %f" % (epoch, minibatch_cost),", Learning rate: %f" %(learning_rate))
        if print_cost == True and epoch % 1 == 0:
            costs.append(minibatch_cost)

    # Plot the cost
    plt.plot(np.squeeze(costs))
    plt.ylabel('Cost')
    plt.xlabel('Iterations (per tens)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()

    # Calculate the correct predictions
    predict_op = tf.argmax(Z, 1)
    correct_prediction = tf.equal(predict_op, tf.argmax(labels, 1))

    # Calculate accuracy on the validation set
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    #print(accuracy)
    train_accuracy = accuracy.eval({inputs: x_train, labels: y_train})
    val_accuracy = accuracy.eval({inputs: x_val, labels: y_val})
    print("Train Accuracy:", train_accuracy)
    print("Validation Accuracy:", val_accuracy)

所以我的网络没有训练。

1 个答案:

答案 0 :(得分:0)

我设法找到了解决方案。在调用它之前,我必须将自变量的范围设置为:

with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
    Z = inception_v1.inception_v1(inputs,num_classes = n_y,dropout_keep_prob=1,global_pool=True)[1]['Logits']

之后,一切正常。