测试准确度无

时间:2018-01-19 18:36:04

标签: python list tensorflow machine-learning neural-network

我构建了一个MLP并集成了一个称为轮廓系数函数的函数,并用它训练网络。但是,在测试时,我需要降低一些权重(10% - 100%,增量为10%),以研究这种调整将如何影响分类准确性。

我期待获得10套测试准确度,每个百分比配置一个。

在测试时丢弃权重的函数(multilayer_perceptron_drop)位于“使用轮廓系数降低权重”标题下。我没有收到任何错误,但它返回测试准确度=无。

# Multilayer Perceptron.

from __future__ import print_function
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

import time
import numpy as np
import tensorflow as tf
import math
import matplotlib.pyplot as plt
from tensorflow.python.framework import ops

# Parameters
learning_rate = 0.07
training_epochs = 10
batch_size = 64
display_step = 1

# Network Parameters
n_1 = 100               # 1st layer number of neurons
n_2 = 100               # 2nd layer number of neurons
n_input = 784           # MNIST data input (img shape: 28*28)
n_classes = 10          # MNIST total classes (0-9 digits)

tic = time.time()
# tf Graph input
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_input])
Y = tf.placeholder(tf.float32, [None, n_classes])

# Store layers weight & bias
def initialize_param(n_input, n_1, n_2, n_class):
    tf.set_random_seed(1)
    W1 = tf.get_variable("W1", shape = [n_input, n_1], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
    b1 = tf.get_variable("b1", shape = [n_1], initializer = tf.zeros_initializer())
    W2 = tf.get_variable("W2", shape = [n_1, n_2], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
    b2 = tf.get_variable("b2", shape = [n_2], initializer = tf.zeros_initializer())
    W3 = tf.get_variable("W3", shape = [n_2, n_class], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
    b3 = tf.get_variable("b3", shape = [n_class], initializer = tf.zeros_initializer())

    parameters = {"W1": W1, 
                  "b1": b1, 
                  "W2": W2,  
                  "b2": b2, 
                  "W3": W3,
                  "b3": b3}
    return parameters
parameters = initialize_param(784, 100, 100, 10)

# Create profile function
def linear_func(n):
    return[np.float32(1.0 - 1.0 * i/n) for i in range(1, n + 1)]
L = linear_func(100)

# Create model with profile coefficient
def multilayer_perceptron(x): 
    Z1 = tf.add(tf.matmul(x, parameters['W1']), parameters['b1'])  # First fully connected layer with complete input channels
    A1 = tf.nn.relu(Z1)
    P1 = tf.multiply(L, A1)
    Z2 = tf.add(tf.matmul(P1, parameters['W2']), parameters['b2'])  # Second fully connected layer with half input channels (0.5 neurons)
    A2 = tf.nn.relu(Z2)
    P2 = tf.multiply(L, A2)
    out_layer = tf.add(tf.matmul(P2, parameters['W3']), parameters['b3'])       # Output fully connected layer with quater input channels (0.25 neuron for each class)

    return out_layer

# Construct model
logits = multilayer_perceptron(X)

# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss_op)

# Initializing the variables
init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)

    # Training Loop
    cost = []
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)

        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)

            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, loss_op], feed_dict = {X: batch_x, Y: batch_y})

            # Compute average loss
            avg_cost += c / total_batch
            cost.append(avg_cost)
            if i % 5000 == 0:
                pred = tf.nn.softmax(logits)  # Apply softmax to logits
                correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                trian_accuracy = accuracy.eval({X: mnist.train.images, Y: mnist.train.labels})


        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch:", '%03d' % (epoch + 1), "cost = {:.9f}".format(avg_cost))

    # Create profile function
    def linear_func(n):
        return[np.float32(1.0 - 1.0 * i/n) for i in range(1, n + 1)]
    L = linear_func(100)

    def linear_profile(lp, n_1):
        p_L = tf.constant(L, shape = [1, 100])
        L_11 = tf.constant(1.0, shape = [1, int(np.round((lp) * n_1))])
        L_12 = tf.zeros(shape = [1, int(np.round((1 - lp) * n_1))])
        L1 = tf.concat((L_11, L_12), axis = 1)
        p_L1 = tf.multiply(L1, p_L)
        return p_L1
    pc = np.linspace(0.1, 1.0, 10) 
    profile_1 = []
    for i in pc:
        p_L1 = linear_profile(i, 100)
        profile = tf.stack(p_L1, axis = 0) 
        profile_1.append(profile)
        profile_2 = tf.convert_to_tensor(profile_1, dtype=tf.float32)

    # Drop Weights with profile coefficients
    def multilayer_perceptron_drop(x):
        logist_1 = []
        for j in range(len(profile_1)):
            Z_1 = tf.add(tf.matmul(x, parameters['W1']), parameters['b1'])  
            A_1 = tf.nn.relu(Z_1)
            P_1 = tf.multiply(profile_2[j], A_1)
            Z_2 = tf.add(tf.matmul(A_1, parameters['W2']), parameters['b2'])  
            A_2 = tf.nn.relu(Z_2)
            P_2 = tf.multiply(profile_2[j], A_2)
            out_layer = tf.add(tf.matmul(P_2, parameters['W3']), parameters['b3'])
            logist_1.append(out_layer) 
            return logist_1 
    logist_1 = multilayer_perceptron_drop(X)


    #Retrieved model
    test_accuracy_2 = []
    for k in range(len(logist_1)):
        pred_1 = tf.nn.softmax(logist_1[k])
        correct_prediction_1 = tf.equal(tf.argmax(pred_1, 1), tf.argmax(Y, 1))
        accuracy_1 = tf.reduce_mean(tf.cast(correct_prediction_1, "float"))
        test_accuracy_1 = accuracy_1.eval({X: mnist.test.images, Y: mnist.test.labels})*100
        test_accuracy_2 = test_accuracy_2.append(test_accuracy_1)
        #test_accuracy_1 = sess.run(accuracy_1, feed_dict = {X: mnist.test.images, Y: mnist.test.labels})
    sess.close()

    for l in range(len(pc)):
        print("Percentage_Profile:", '%03d' % (l + 1), "Test Accuracy = {}".format(test_accuracy_2))

    #print('Test Accuracy: {}'.format(test_accuracy_2))
    toc = time.time()
    print('Training Time is :' + str((toc - tic)/60) + 's') 

输出:

Extracting /tmp/data/train-images-idx3-ubyte.gz
Extracting /tmp/data/train-labels-idx1-ubyte.gz
Extracting /tmp/data/t10k-images-idx3-ubyte.gz
Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
Epoch: 001 cost = 0.463683842
Epoch: 003 cost = 0.156443127
Epoch: 005 cost = 0.108447251
Epoch: 007 cost = 0.083334308
Epoch: 009 cost = 0.064379380
Percentage_Profile: 001 Test Accuracy = None
Percentage_Profile: 002 Test Accuracy = None
Percentage_Profile: 003 Test Accuracy = None
Percentage_Profile: 004 Test Accuracy = None
Percentage_Profile: 005 Test Accuracy = None
Percentage_Profile: 006 Test Accuracy = None
Percentage_Profile: 007 Test Accuracy = None
Percentage_Profile: 008 Test Accuracy = None
Percentage_Profile: 009 Test Accuracy = None
Percentage_Profile: 010 Test Accuracy = None
Training Time is :1.06416635116s

1 个答案:

答案 0 :(得分:2)

更改此行:

# WRONG! The result of `append` is `None`, not the list
test_accuracy_2 = test_accuracy_2.append(test_accuracy_1)

为...

# OK. Just collect the values in the list
test_accuracy_2.append(test_accuracy_1)