内部的tensorflow变量与外部的不同

时间:2017-10-04 06:30:56

标签: python-3.x tensorflow

我正在尝试使用Tensorflow解决ANN模型。目前,我能够将程序作为一长串文本运行。但是,现在我想将我的代码转换为更容易使用的代码。所以我把我的代码转换成了一个类。这就是我做的。 (基本上将整个代码集复制到一个类中。

import os
import tensorflow as tf

class NNmodel:

    def __init__(self, 
                 layers, inpShape, outShape, 
                 features,
                 learning_rate=0.1, nSteps = 100,
                 saveFolder='models'):

        self.layers = layers
        self.features = features
        self.learning_rate = learning_rate
        self.saveFolder = saveFolder
        self.nSteps = 100

        self.d    = tf.placeholder(shape = inpShape, dtype = tf.float32, name='d') # input layer
        self.dOut = tf.placeholder(shape = outShape, dtype = tf.float32, name='dOut') # output layer

        self.weights = []
        self.biases  = []
        self.compute = [self.d]

        layerSizes = [self.features] + [l['size'] for l in self.layers]

        for i, (v1, v2) in enumerate(zip(layerSizes, layerSizes[1:])):
            self.weights.append( 
                tf.Variable(np.random.randn(v1, v2)*0.1, dtype = tf.float32, name='W{}'.format(i)))

            self.biases.append(
                tf.Variable(np.zeros((1,1)), dtype = tf.float32, name='b{}'.format(i)) )

            self.compute.append( tf.matmul( 
                self.compute[-1], self.weights[i]) + self.biases[i] )

            if self.layers[i]['activation'] == 'tanh':
                self.compute.append( tf.tanh( self.compute[-1] ) )

            if self.layers[i]['activation'] == 'relu':
                self.compute.append( tf.nn.relu( self.compute[-1] ) )

            if self.layers[i]['activation'] == 'sigmoid':
                self.compute.append( tf.sigmoid ( self.compute[-1] ) )

        self.result = self.compute[-1]
        self.delta  = self.dOut - self.result
        self.cost   = tf.reduce_mean(self.delta**2)

        self.optimizer = tf.train.AdamOptimizer(
            learning_rate = self.learning_rate).minimize(self.cost)
        return

    def findVal(self, func, inpDict, restorePt=None):

        saver = tf.train.Saver()
        sess  = tf.Session()

        init = tf.global_variables_initializer()
        sess.run(init)

        if restorePt is not None:
            try:
                saver.restore(sess,  tf.train.latest_checkpoint(restorePt) )
                print('Session restored')
            except Exception as e:
                print('Unable to restore the session ...')
                return None
        else:
            print('Warning, no restore point selected ...')

        result = sess.run(func, feed_dict = inpDict)
        sess.close()
        return result

    def optTF(self, inpDict, printSteps=50, modelFile=None):

        cost = []
        saver = tf.train.Saver()

        sess = tf.Session()
        init = tf.global_variables_initializer()
        sess.run(init)

        print('x'*100)

        for i in range(self.nSteps):

            # First run the optimizer ...
            sess.run(self.optimizer, feed_dict = inpDict)

            # Save all the data you want to save
            c = sess.run( self.cost, feed_dict = inpDict)
            cost.append(c)

            if (i%printSteps) == 0:
                print('{:5d}'.format(i))

        result = self.run(self.result, feed_dict = inpDict)

        if modelFile is not None:
            path = saver.save(sess, os.path.join( 
                self.saveFolder, modelFile))
            print('Model saved in: {}'.format(path))
        else:
            print('Warning! model not saved')
        sess.close()

        return cost, result

当我使用这个模型时,我发现似乎存在问题:

N        = 500
features = 2
nSteps   = 1000

X = [ (np.random.random(N))*np.random.randint(1000, 2000) for i in range(features)]
X = np.array([np.random.random(N), np.random.random(N)])
data = [X.T, X[0].reshape(-1, 1)]

layers = [
    {'name':'6', 'size': 10, 'activation':'tanh'},
    {'name':'7', 'size': 1, 'activation':'linear'},
]
m1 = NNmodel(layers, inpShape=np.shape(data[0]), outShape = np.shape(data[1]), 
             features=features,
             learning_rate=0.1, nSteps = 100,
             saveFolder='models1')

d    = tf.placeholder(shape = np.shape(data[0]), dtype = tf.float32, name='d_4')
dOut = tf.placeholder(shape = np.shape(data[1]), dtype = tf.float32, name='dOut')

m1.findVal(m1.result, {d: data[0], dOut:data[1]})

现在看来我使用ddOut的占位符与我在外部提供的格式不匹配,以及模型中已存在的占位符self.dself.dOut。我该如何解决这个问题?

1 个答案:

答案 0 :(得分:1)

为什么不使用模型中声明的占位符?

m1.findVal(m1.result, {m1.d: data[0], m1.dOut:data[1]})