Tensorflow 1.6从estimator.predict()获取预测输出

时间:2018-03-14 21:25:08

标签: python tensorflow

我有这个代码只是Mnist tesorflow示例,我会做测试数据的预测

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# Imports
import numpy as np
import tensorflow as tf

tf.logging.set_verbosity(tf.logging.INFO)


# Our application logic will be added here
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])

# Convolutional Layer #1
conv1 = tf.layers.conv2d(
    inputs=input_layer,
    filters=32,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu)

# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
    inputs=pool1,
    filters=64,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
    inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)

# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)

predictions = {
    # Generate predictions (for PREDICT and EVAL mode)
    "classes": tf.argmax(input=logits, axis=1),
    # Add `softmax_tensor` to the graph. It is used for PREDICT and by the
    # `logging_hook`.
    "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}

if mode == tf.estimator.ModeKeys.PREDICT:
    return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
    train_op = optimizer.minimize(
        loss=loss,
        global_step=tf.train.get_global_step())

    return tf.estimator.EstimatorSpec(mode=mode, loss=loss, 
train_op=train_op)

# Add evaluation metrics (for EVAL mode)
if mode == tf.estimator.ModeKeys.EVAL:
    eval_metric_ops = {
        "accuracy": tf.metrics.accuracy(
            labels=labels, predictions=predictions["classes"])}
    return tf.estimator.EstimatorSpec(
        mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
    predictions = {
        'class_ids': predicted_classes[:, tf.newaxis],
        'probabilities': tf.nn.softmax(logits),
        'logits': logits,
    }
    return tf.estimator.EstimatorSpec(mode, predictions=predictions)


def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images[:54000]  # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)[:54000]
eval_data = train_data[:500]  # Returns np.array
eval_labels = train_labels[:500]  # np.asarray(mnist.test.labels, 
dtype=np.int32)
test_data = train_data[1000:]
test_label = train_labels[1000:]
# eval_data = mnist.test.images  # Returns np.array
# eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
    model_fn=cnn_model_fn, model_dir="./tmp/mnist_convnet_model")
# Set up logging for predictions
tensors_train_to_log = {"probabilities": "softmax_tensor"}
# tensors_eval_to_log = {"accuracy": "classes"}
logging_train_hook = tf.train.LoggingTensorHook(
    tensors=tensors_train_to_log, every_n_iter=6000)
# logging_eval_hook = tf.train.LoggingTensorHook(
#     tensors=tensors_eval_to_log, every_n_iter=1000)
# Train the model
print("Training Data length:", np.shape(train_data))
train_input_fn = tf.estimator.inputs.numpy_input_fn(
    x={"x": train_data},
    y=train_labels,
    batch_size=10,
    num_epochs=1,
    shuffle=True)

eval_input_fn = tf.estimator.inputs.numpy_input_fn(
    x={"x": eval_data},
    y=eval_labels,
    num_epochs=1,
    shuffle=True)
#     input_fn=train_input_fn,
#     steps=20000,
#     hooks=[logging_hook])
# Evaluate the model and print results

# eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
# print(eval_results)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=6500)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator=mnist_classifier, 
train_spec=train_spec,eval_spec=eval_spec)

test_input_fn = tf.estimator.inputs.numpy_input_fn(
    x={"x": test_data[0]},
    y=test_label,
    num_epochs=1,
    shuffle=True)
# mnist_classifier.train(
test_spec = tf.estimator.EvalSpec(input_fn=test_input_fn)
predictions = mnist_classifier.predict(test_spec)

print(predictions["logits"][0])
# print(predictions["logits"])

#I got an error when I tried to print this
if __name__ == "__main__":
tf.app.run()

代码工作会让我得到训练有素的模型,但是当我试图打印预测时,我可以找到一种方法来做到这一点。所以,任何一个人都做了这个例子并打印了自豪的数据而不仅仅是评估的准确性。

4 个答案:

答案 0 :(得分:1)

尝试一下:

training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])

答案 1 :(得分:0)

它是一个生成器对象,要打印它,你应该使用

print(list(predictions)[0])

答案 2 :(得分:0)

以下应打印所有预测-

   for i in range(300):

        print(list(predictions)[0])

答案 3 :(得分:0)

这应该有效

 outputs = [list(next(predictions).values())[0] for i in range(300)]