如何预测迁移学习的使用模式?

时间:2018-07-11 00:49:32

标签: python tensorflow deep-learning conv-neural-network transfer-learning

我正在使用Inception-v3进行机器学习问题中的迁移学习。 Inception v3层用于生成瓶颈文件,然后我添加了3个完全连接的层来训练这些瓶颈。培训进行得很好,但是我在预测输入图像时遇到问题。

以下是生成模型的代码:

def create_inception_graph(num_batches_per_epoch, FLAGS):
    modelFilePath = os.path.join(FLAGS.imagenet_inception_model_dir, INCEPTION_MODEL_GRAPH_DEF_FILE)
    inceptionV3 = InceptionV3(modelFilePath)
    inceptionV3.add_final_training_ops(FLAGS.num_classes, FLAGS.final_tensor_name, FLAGS.optimizer_name,
                                   num_batches_per_epoch, FLAGS)
    inceptionV3.add_evaluation_step()
    return inceptionV3



def _add_fully_connected_layer(self, input_to_layer, input_size, output_size, layer_name, keep_rate):
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            initial_value_weights = tf.truncated_normal([input_size, output_size], stddev=0.001)
            layer_weights = tf.Variable(initial_value_weights, name='final_weights')
        with tf.name_scope('biases'):
            layer_biases = tf.Variable(tf.zeros([output_size]), name='final_biases')
        with tf.name_scope('Wx_plus_b'):
            logits_bn = tf.matmul(input_to_layer, layer_weights) + layer_biases
            logits_bn = tf.nn.relu(logits_bn)
            logits_bn = tf.nn.dropout(logits_bn, keep_rate)
    return logits_bn

这是在起始V3之后生成要保留的最后3个全连接层的功能

def add_final_training_ops(self, class_count, final_tensor_name, optimizer_name, num_batches_per_epoch, FLAGS):
    with self.inceptionGraph.as_default():
        with tf.name_scope('input'):
            self.bottleneckInput = tf.placeholder_with_default(self.bottleneckTensor,
                                                               shape=[None, BOTTLENECK_TENSOR_SIZE],
                                                               name='BottleneckInputPlaceholder')
            self.groundTruthInput = tf.placeholder(tf.float32, [None, class_count], name='GroundTruthInput')
            self.keep_rate = tf.placeholder(tf.float32, name='dropout_keep_rate')
            self.is_training_ph = tf.placeholder(tf.bool, name='is_training_ph')

        layer_name = 'final_minus_2_training_ops'
        logits_final_minus_2 = self._add_fully_connected_layer(self.bottleneckInput, BOTTLENECK_TENSOR_SIZE,
                                                               FINAL_MINUS_2_LAYER_SIZE, layer_name, self.keep_rate,
                                                               self.is_training_ph, FLAGS)

        layer_name = 'final_minus_1_training_ops'
        logits_final_minus_1 = self._add_fully_connected_layer(logits_final_minus_2, FINAL_MINUS_2_LAYER_SIZE,
                                                               FINAL_MINUS_1_LAYER_SIZE, layer_name, self.keep_rate,
                                                               self.is_training_ph, FLAGS)

        layer_name = 'final_training_ops'
        with tf.name_scope(layer_name):
            with tf.name_scope('weights'):
                initial_value = tf.truncated_normal([FINAL_MINUS_1_LAYER_SIZE, class_count], stddev=0.001)
                layer_weights = tf.Variable(initial_value, name='final_weights')
                #bishwa
                self.variable_summaries(layer_weights)
            with tf.name_scope('biases'):
                layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
                #bishwa
                self.variable_summaries(layer_biases)
            with tf.name_scope('Wx_plus_b'):
                logits = tf.matmul(logits_final_minus_1, layer_weights) + layer_biases
                #bishwa
                tf.summary.histogram('pre_activations', logits)

        self.finalTensor = tf.nn.softmax(logits, name=final_tensor_name)
        #bishwa
        tf.summary.histogram('activations', self.finalTensor)
        with tf.name_scope('cross_entropy'):
            self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.groundTruthInput,
                                                                         logits=logits)
            with tf.name_scope('total'):
                self.cross_entropy_mean = tf.reduce_mean(self.cross_entropy)
        #bishwa
        tf.summary.scalar('cross_entropy', self.cross_entropy_mean)

        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.create_learning_rate(FLAGS, self.global_step, num_batches_per_epoch)

        with tf.name_scope('train'):
            if optimizer_name == "sgd":
                optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
            elif optimizer_name == "adam":
                optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
            elif optimizer_name == "rmsprop":
                optimizer = tf.train.RMSPropOptimizer(self.learning_rate, FLAGS.rmsprop_decay,
                                                      momentum=FLAGS.rmsprop_momentum,
                                                      epsilon=FLAGS.rmsprop_epsilon)
            else:
                raise ValueError('Incorrect Optimizer Type...')
            self.trainStep = optimizer.minimize(self.cross_entropy_mean, global_step=self.global_step)

现在,在训练后,保存模型,同时预测新图像时,我编写了以下代码,但是在将哪个张量用于输入和预测时出现了错误。

tf.reset_default_graph()
imported_meta = tf.train.import_meta_graph("model/malignant-detection-model-.meta")
with tf.Session() as sess:
    imported_meta.restore(sess, tf.train.latest_checkpoint('model/'))
    print('Model restored successfully...')

    logits = tf.get_default_graph().get_tensor_by_name("final_result:0")

    x = tf.get_default_graph().get_tensor_by_name("input/BottleneckInputPlaceholder:0")

    y = sess.run([logits], feed_dict={x:bottleneck})
    print(y)

但是这段代码给我一个错误,说我在张量或操作名称以及将(299,299,3)图像加载到(?,2048)中都犯了一个错误。

在这种情况下,我非常需要帮助。

0 个答案:

没有答案