将Tensorflow模型恢复为类并运行特定层

时间:2018-11-25 16:42:32

标签: python tensorflow

我的tensorflow模型在一个类中定义如下:

class EENetwork:
def __init__(self, state_size, learning_rate, output_units, name='EENetwork'):
    self.learning_rate = learning_rate
    self.state_size = state_size    # State size =  height * width * cameras
    self.output_units = output_units

    with tf.variable_scope(name):
        self.inputs_ = tf.placeholder(tf.float32, [None, *state_size], name='inputs')
        self.outputs_ = tf.placeholder(tf.float32, [None, self.output_units], name='outputs_')  # To hold the training Y


        # Placeholders aren't added to the saved graph by default
        tf.add_to_collection('inputs', self.inputs_)
        tf.add_to_collection('outputs_', self.outputs_)

        # Conv1 = samples * h * w * 24
        # Assume h, w = 64, so samples * 64 * 64 * 24
        self.conv1 = tf.layers.conv2d(inputs=self.inputs_, filters=24, kernel_size=[5, 5],
                                      padding='same', activation=tf.nn.relu, name='conv1')



        # Maxpool1 = samples * h/2 * w/2 * 12 = samples * 32 * 32 * 24
        self.maxpool1 = tf.layers.max_pooling2d(inputs=self.conv1, pool_size=[2, 2], strides=2, name='mp1')

        # Conv2 = samples * 32 * 32* 36
        self.conv2 = tf.layers.conv2d(inputs=self.maxpool1, filters=36, kernel_size=[5, 5],
                                      padding='same', activation=tf.nn.relu, name='conv2')

        # Maxpool2 = samples * 16 * 16 * 36
        self.maxpool2 = tf.layers.max_pooling2d(inputs=self.conv2, pool_size=[2, 2], strides=2, name='mp2')

        # Conv3 = samples * 16 * 16 * 48
        self.conv3 = tf.layers.conv2d(inputs=self.maxpool2, filters=48, kernel_size=[3, 3],
                                      padding='same', activation=tf.nn.relu, name='conv3')

        # Maxpool3 = samples * 8 * 8 * 48
        self.maxpool3 = tf.layers.max_pooling2d(inputs=self.conv3, pool_size=[2, 2], strides=2, name='mp3')

        # Conv4 = samples * 8 * 8 * 64
        self.conv4 = tf.layers.conv2d(inputs=self.maxpool3, filters=64, kernel_size=[3, 3],
                                      padding='same', activation=tf.nn.relu, name='conv4')

        # Maxpool4 = samples * 4 * 4 * 64
        self.maxpool4 = tf.layers.max_pooling2d(inputs=self.conv4, pool_size=[2, 2], strides=2, name='mp4')

        # Flatten = samples * 1024
        self.flatten = tf.contrib.layers.flatten(self.maxpool4)

        # FC1, Output units  = 1164
        self.fc1 = tf.layers.dense(inputs=self.flatten, units=1164, activation=tf.nn.elu,
                                   kernel_initializer=tf.contrib.layers.xavier_initializer(), name='fc1')

        # FC2, Output units = 100
        self.fc2 = tf.layers.dense(inputs=self.fc1, units=100, activation=tf.nn.elu,
                                   kernel_initializer=tf.contrib.layers.xavier_initializer(), name='fc2')

        # FC3, Output units = 50
        self.fc3 = tf.layers.dense(inputs=self.fc2, units=50, activation=tf.nn.elu,
                                   kernel_initializer=tf.contrib.layers.xavier_initializer(), name='fc3')

        # FC4, Output units = 10
        self.fc4 = tf.layers.dense(inputs=self.fc3, units=10, activation=tf.nn.elu,
                                   kernel_initializer=tf.contrib.layers.xavier_initializer(), name='fc4')

        # Output 
        self.output = tf.layers.dense(inputs=self.flatten, units=self.output_units, name='DL')

        # Cost
        #self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output, labels=self.outputs_))
        self.loss = tf.losses.mean_squared_error(labels=self.outputs_, predictions=self.output)

        # Optimizer
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)

我正在创建此类的一个实例,然后对其进行培训和保存。现在,将保存的模型还原到另一个文件中之后,我想获得self.output产生的输出。

这是我为第二个文件编写的代码:

with tf.Session() as sess:
    saver = tf.train.import_meta_graph("./models/drivingModel_{LR}_{EP}_{MN}.ckpt.meta".format(LR=learning_rate, EP=epochs, MN=mod_number))
    saver.restore(sess, "./models/drivingModel_{LR}_{EP}_{MN}.ckpt".format(LR=learning_rate, EP=epochs, MN=mod_number))

    frontImage = cv2.imread('Front_5.png')
    leftImage = cv2.imread('Left_5.png')
    rightImage = cv2.imread('Right_5.png')
    front = cv2.resize(cv2.cvtColor(frontImage, cv2.COLOR_RGB2GRAY), (h, w))
    left = cv2.resize(cv2.cvtColor(leftImage, cv2.COLOR_RGB2GRAY), (h, w))
    right = cv2.resize(cv2.cvtColor(rightImage, cv2.COLOR_RGB2GRAY), (h, w))

    images = np.stack((front, left, right))  # num_cameras * h * w
    images = np.moveaxis(images, [0, 1, 2], [2, 0, 1])
    X = []
    X.append(images)
    X = np.array(X)  

    print(tf.get_default_graph().get_all_collection_keys())
    all_variables = tf.get_collection('variables')

    print("Variables of the tensor:")
    for var in all_variables:
    print(var)

    graph = sess.graph
    print("Operations of the tensor: ")
    operations = tf.get_collection('train_op')
    for op in graph.get_operations():
        print(op)

我想将X作为feeddict参数传递给self.inputs_并获取self.output产生的输出(命名为“ DL”)。

我不知道我可以使用哪些变量和操作来执行上述任务。

我可以使用以下方法获取输入占位符:

    inputs_ = tf.get_collection('inputs')[0]

但是如何将X传递给它并使用它来获取self.output层产生的值?

编辑: 我可以通过创建该类的新实例然后运行以下代码来解决该问题:

tf.reset_default_graph()

drivingNet = cnn_model.EENetwork(state_size=state_size, 
learning_rate=learning_rate, output_units=classes)


with tf.Session() as sess:
saver = tf.train.Saver(tf.global_variables())
saver = tf.train.import_meta_graph("./models/drivingModel_{LR}_{EP}_{MN}.ckpt.meta".format(LR=learning_rate, EP=epochs, MN=mod_number))
saver.restore(sess, "./models/drivingModel_{LR}_{EP}_{MN}.ckpt".format(LR=learning_rate, EP=epochs, MN=mod_number))

frontImage = cv2.imread('Front_5.png')
leftImage = cv2.imread('Left_5.png')
rightImage = cv2.imread('Right_5.png')
front = cv2.resize(cv2.cvtColor(frontImage, cv2.COLOR_RGB2GRAY), (h, w))
left = cv2.resize(cv2.cvtColor(leftImage, cv2.COLOR_RGB2GRAY), (h, w))
right = cv2.resize(cv2.cvtColor(rightImage, cv2.COLOR_RGB2GRAY), (h, w))

images = np.stack((front, left, right))  # num_cameras * h * w
# Move the axis to make it h * w * num_cameras
images = np.moveaxis(images, [0, 1, 2], [2, 0, 1])
# images = np.concatenate((leftImage, frontImage), axis=1)
# images = np.concatenate((images, rightImage), axis=1)

# Input needs to be passed as samples * h * w * num_cameras
X = []
X.append(images)
X = np.array(X)  # This makes it 1 * h * w * cameras
decision = sess.run(drivingNet.output, feed_dict={drivingNet.inputs_: X})
temp = decision.tolist()
flat_list = [item for sublist in temp for item in sublist]
print(flat_list)

但是,在这种情况下,我仍然不知道saver = tf.train.Saver(tf.global_variables())在做什么?为什么需要此行代码?如果我不包括这一行,它将引发“试图使用未初始化的值”错误。元图和saver.restore不应该将所有值初始化为保存的值吗?

0 个答案:

没有答案