我的tensorflow卷积神经网络无法训练

时间:2019-10-10 14:20:14

标签: python tensorflow

我尝试使用张量流为kaggle上的人脸表情识别数据实现基于类的卷积神经网络。但是,由于某种原因,我的网络无法进行训练,因此每次迭代我都会得到相同的成本和错误率。

我尝试对标签使用一种热向量,更改了超参数,但它们对结果没有任何影响。

import pandas as pd 
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.utils import shuffle


def get_data():
    df = pd.read_csv('../large_files/fer2013/fer2013.csv')
    Y = df.emotion.to_numpy()
    XX = df.pixels
    X = []
    for i in range(len(XX)):
        X.append(XX[i].split())
    X = np.array(X).astype(np.float)
    Z = df.Usage
    train = (Z == 'Training').to_list()
    test = [not i for i in train]
    Xtrain = X[train].astype(np.float32)
    Xtrain = Xtrain.reshape((Xtrain.shape[0], int(np.sqrt(Xtrain.shape[1])), int(np.sqrt(Xtrain.shape[1])), 1))
    Xtest = X[test].astype(np.float32)
    Xtest = Xtest.reshape((Xtest.shape[0], int(np.sqrt(Xtest.shape[1])), int(np.sqrt(Xtest.shape[1])), 1))
    Ytrain = Y[train].astype(np.int32)
    Ytest = Y[test].astype(np.int32)
    return Xtrain / 255, Xtest / 255, Ytrain, Ytest

def convpool(X, W, b,poolsz):
    conv_out = tf.nn.conv2d(X, W, strides = [1,1,1,1], padding = 'SAME')
    conv_out = tf.nn.bias_add(conv_out, b)
    pool_out = tf.nn.max_pool(conv_out, ksize=[1,poolsz,poolsz,1], strides=[1,poolsz,poolsz,1], padding = 'SAME')
    return tf.nn.relu(pool_out) 

def init_filter(shape):
    w = np.random.rand(*shape) * np.sqrt(2 / np.prod(shape[:-1]))
    return w.astype(np.float32)

def error_rate(Y,T):
    return np.mean(Y != T)

class FullyConnectedLayer():
    def __init__(self, M1, M2, activation = tf.nn.relu):
        W = np.random.randn(M1,M2) / np.sqrt(M1 + M2)
        self.W = tf.Variable(W.astype(np.float32))
        b = np.zeros(M2)
        self.b = tf.Variable(b.astype(np.float32))
        self.activation = activation
    def forward(self, X):
        if self.activation == None:
            return tf.matmul(X, self.W) + self.b
        else:
            return self.activation(tf.matmul(X, self.W) + self.b)


class ConvolutionLayer():
    def __init__(self, filter_shape, b, poolsz = 2):
        W = init_filter(filter_shape)
        self.W = tf.Variable(W)
        self.b = tf.Variable(b.astype(np.float32))
        self.poolsize = poolsz
    def forward(self, X):
        return convpool(X, self.W, self.b, self.poolsize)


class CNN():
    def __init__(self, filter_shapes, dense_layer_sizes):
        self.filter_shapes = filter_shapes #List of shapes
        self.dense_layer_sizes = dense_layer_sizes # List of hidden units for dense layers

    def fit(self, trainset, testset, learning_rate = 0.001, momentum = 0.9, decay = 0.99, batch_sz = 200, poolsize = 2):
        learning_rate = np.float32(learning_rate)
        momentum = np.float32(momentum)
        decay = np.float32(decay)

        Xtrain = trainset[0]
        Ytrain = trainset[1]
        Xtest = testset[0]
        Ytest = testset[1]
        K = len(set(Ytrain))

        # Crop Train and Test sets for divisibility to batch size
        Ntrain = len(Ytrain)
        Ntrain = Ntrain // batch_sz * batch_sz      
        Xtrain = Xtrain[:Ntrain,]
        Ytrain = Ytrain[:Ntrain]

        Ntest = len(Ytest)
        Ntest = Ntest//batch_sz * batch_sz
        Xtest = Xtest[:Ntest,]
        Ytest = Ytest[:Ntest]

        X_shape = Xtrain.shape
        width = X_shape[1]
        height = X_shape[2]

        # Create Convolution Layers and Store Them
        self.convolutionlayers = []
        for shape in self.filter_shapes:
            b = np.zeros(shape[-1], dtype = np.float32)
            conv = ConvolutionLayer(shape, b, poolsz = poolsize)
            self.convolutionlayers.append(conv)

        # Size of both width and height is halved in each max pooling so in input size of first fully connected layer is found like this
        final_filter_shape = self.filter_shapes[-1]
        num_convs = len(self.convolutionlayers)
        M1 = int((width/(2**num_convs)) * (height/(2**num_convs)) * final_filter_shape[-1])  

        # Create Fully Connected Layers and Store Them
        self.vanillalayers = []
        for M2 in self.dense_layer_sizes:
            layer = FullyConnectedLayer(M1,M2)
            self.vanillalayers.append(layer)
            M1 = M2
        final_layer = FullyConnectedLayer(M1, K, activation = None) 

        self.vanillalayers.append(final_layer)
        self.AllLayers = self.convolutionlayers + self.vanillalayers

        tfX = tf.placeholder(dtype=tf.float32, shape= (batch_sz, width, height, 1))
        tfT = tf.placeholder(dtype=tf.int32, shape = (batch_sz,))
        Yish = self.forward(tfX)

        cost = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = Yish, labels=tfT))
        train_op = tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=decay, momentum=momentum).minimize(cost)
        predict_op = self.predict(tfX)

        max_epoch = 10
        print_period = 20
        num_batches = Ntrain // batch_sz
        TestCosts = []

        init = tf.global_variables_initializer()

        with tf.Session() as sess:
            sess.run(init)
            for i in range(max_epoch):
                Xtrain, Ytrain = shuffle(Xtrain, Ytrain)
                for j in range(num_batches):
                    Xbatch = Xtrain[j * batch_sz: (j + 1)*batch_sz,]
                    Ybatch = Ytrain[j * batch_sz: (j + 1)*batch_sz,]

                    sess.run(train_op, feed_dict = {tfX : Xbatch, tfT : Ybatch})
                    if j % print_period == 0:
                        test_cost = 0
                        prediction = np.zeros(Ntest)
                        for k in range(Ntest // batch_sz):
                            Xtestbatch = Xtest[k*batch_sz:(k*batch_sz + batch_sz),]
                            Ytestbatch = Ytest[k*batch_sz:(k*batch_sz + batch_sz),]                            
                            test_cost += sess.run(cost, feed_dict={tfX: Xtestbatch, tfT: Ytestbatch})
                            prediction[k*batch_sz:(k*batch_sz + batch_sz)] = sess.run(
                                predict_op, feed_dict={tfX: Xtestbatch})
                        err = error_rate(prediction, Ytest)
                        print("Cost / err at iteration i=%d, j=%d: %.3f / %.3f" % (i, j, test_cost, err))
                        TestCosts.append(test_cost)

        plt.plot(TestCosts)
        plt.show()

    def forward(self, X):
        Z = X
        count = 0 
        for layer in self.AllLayers:
            # If next layer is fully connected layer, reshape Z
            if count >= len(self.convolutionlayers):
                Z_shape = Z.get_shape().as_list()
                Z = tf.reshape(Z, [Z_shape[0], np.prod(Z_shape[1:])])
            Z = layer.forward(Z)
            count += 1
        return Z

    def predict(self, X):
        out = self.forward(X)
        return tf.math.argmax(out, axis = 1)

def main():     
    Xtrain, Xtest, Ytrain, Ytest = get_data()
    trainset = [Xtrain, Ytrain]
    testset = [Xtest, Ytest]
    filtershapes = [(5,5,1,10), (5,5,10,20), (5,5,20,40)]
    fullylayers = [500,500]

    cnn = CNN(filtershapes, fullylayers)
    cnn.fit(trainset, testset)

if __name__ == '__main__':
    main()


0 个答案:

没有答案