如何检索经过训练的模型的权重/偏差并将其分配给特定的图层?

时间:2018-06-24 21:51:59

标签: python tensorflow neural-network

我目前正在使用TensorFlow,并已按照YouTube上senddex的教程实现了具有3个隐藏层的基本多层感知器模型。

我想知道如何检索每个层的训练后的权重/偏差,以及是否还可能将那些检索到的矩阵分配给特定的层等。虽然这样做对我来说可能没有多大意义,但我的目标是只是为了进行实验并查看所产生的影响并更加熟悉TensorFlow。

我查看了一些使用tf.trainable_variables()的建议,但仍然无法解决。我能想到的方法是初始化,然后在最后分配预定义的变量,如果可以使用的话,可能会有些混乱。

class expFFNN():

def __init__(self):


    self.nNodesHidL1 = 500
    self.nNodesHidL2 = 500
    self.nNodesHidL3 = 500

    self.nClass = 10
    self.batch_size = 100

    self.x = tf.placeholder('float', [None, 784])
    self.y = tf.placeholder('float')

    self.h1Weights = tf.Variable(tf.random_normal([784, self.nNodesHidL1]))
    self.h2Weights = tf.Variable(tf.random_normal([self.nNodesHidL1, self.nNodesHidL2]))
    self.h3Weights = tf.Variable(tf.random_normal([self.nNodesHidL2, self.nNodesHidL3]))
    self.outWeights = tf.Variable(tf.random_normal([self.nNodesHidL3, self.nClass]))

    self.h1Biases = tf.Variable(tf.random_normal([self.nNodesHidL1]))
    self.h2Biases = tf.Variable(tf.random_normal([self.nNodesHidL2]))
    self.h3Biases = tf.Variable(tf.random_normal([self.nNodesHidL3]))
    self.outBiases = tf.Variable(tf.random_normal([self.nClass]))

    self.finH1W = []
    self.finH2W = []
    self.finH3W = []

    self.finH1B = []
    self.finH2B = []
    self.finH2B = []


def neural_network_model(self, data):
    h1Layer = {'weights':self.h1Weights,
               'biases':self.h1Biases}

    h2Layer = {'weights':self.h2Weights,
               'biases':self.h2Biases}

    h3Layer = {'weights':self.h3Weights,
               'biases':self.h3Biases}

    outputLayer = {'weights':self.outWeights,
               'biases':self.outBiases}

    layer1 = tf.add(tf.matmul(data, h1Layer['weights']), h1Layer['biases']) 
    layer1 = tf.nn.relu(layer1)

    layer2 = tf.add(tf.matmul(layer1, h2Layer['weights']), h2Layer['biases']) 
    layer2 = tf.nn.relu(layer2) 

    layer3 = tf.add(tf.matmul(layer2, h3Layer['weights']), h3Layer['biases']) 
    layer3 = tf.nn.relu(layer3)

    output = tf.add(tf.matmul(layer3, outputLayer['weights']), outputLayer['biases']) 

    return output    


def trainNeuralNetwork(self, x):
    y = self.y
    prediction = neural_network_model(self, x)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=self.y))
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    hmEpochs = 3

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for epoch in range(hmEpochs):
            epochLoss = 0

            for _ in range(int(mnist.train.num_examples/self.batch_size)):
                epoch_x, epoch_y = mnist.train.next_batch(self.batch_size)
                _, c = sess.run([optimizer, cost], feed_dict = {self.x:epoch_x, self.y:epoch_y})
                epochLoss += c
            print ('Epoch', epoch, 'completed out of', hmEpochs, 'loss:', epochLoss)

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))

        self.finH1W = self.h1Weights
        self.finH2W = self.h2Weights
        self.finH3W = self.h3Weights

        self.finH1B = self.h1Biases
        self.finH2B = self.h2Biases
        self.finH3B = self.h3Biases

        print('Accuracy', accuracy.eval({x:mnist.test.images, self.y:mnist.test.labels}))

0 个答案:

没有答案