我想查看每个测试图像的概率,所以我修改了这个代码(cifar10_eval.py)
def eval_once(saver, summary_writer, logits, labels, top_k_op, summary_op):
...............
while step < num_iter and not coord.should_stop():
result1, result2 = sess.run([logits, labels])
print('Step:', step, 'result',result1, 'Label:', result2)
...............
我运行这样的python代码。
# python cifar10_eval.py --batch_size=1 --run_once=True
屏幕结果如下
Step: 0 result [[ 0.01539493 -0.00109618 -0.00364288 -0.00898853 -0.00086198 0.00587899 0.00981337 -0.00785329 -0.00282823 -0.00171288]] Label: [4]
Step: 1 result [[ 0.01539471 -0.00109601 -0.00364273 -0.00898863 -0.00086192 0.005879 0.00981339 -0.00785322 -0.00282811 -0.00171296]] Label: [7]
Step: 2 result [[ 0.01539475 -0.00109617 -0.00364274 -0.00898876 -0.00086183 0.00587886 0.00981328 -0.00785333 -0.00282814 -0.00171295]] Label: [8]
Step: 3 result [[ 0.01539472 -0.00109597 -0.00364275 -0.0089886 -0.00086183 0.00587902 0.00981344 -0.00785326 -0.00282817 -0.00171299]] Label: [4]
Step: 4 result [[ 0.01539488 -0.00109631 -0.00364294 -0.00898863 -0.00086199 0.00587896 0.00981327 -0.00785329 -0.00282809 -0.00171307]] Label: [0]
Step: 5 result [[ 0.01539478 -0.00109607 -0.00364292 -0.00898858 -0.00086194 0.00587904 0.00981335 -0.0078533 -0.00282818 -0.00171321]] Label: [4]
Step: 6 result [[ 0.01539493 -0.00109627 -0.00364277 -0.00898873 -0.0008618 0.00587892 0.00981339 -0.00785325 -0.00282807 -0.00171289]] Label: [9]
Step: 7 result [[ 0.01539504 -0.00109619 -0.0036429 -0.00898865 -0.00086194 0.00587894 0.0098133 -0.00785331 -0.00282818 -0.00171294]] Label: [4]
Step: 8 result [[ 0.01539493 -0.00109627 -0.00364286 -0.00898867 -0.00086183 0.00587899 0.00981332 -0.00785329 -0.00282825 -0.00171283]] Label: [8]
Step: 9 result [[ 0.01539495 -0.00109617 -0.00364286 -0.00898852 -0.00086186 0.0058789 0.00981337 -0.00785326 -0.00282827 -0.00171287]] Label: [9]
Label值似乎很好,但logits输出似乎是相同的值! 为什么? 谁能告诉我原因?
这是新的cifar10_eval.py源代码。
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
#from tensorflow.models.image.cifar10 import cifar10
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/cifar10_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/cifar10_train',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 10000,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', True,
"""Whether to run eval only once.""")
def eval_once(saver, summary_writer, logits, labels, top_k_op, summary_op):
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
#num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
#total_sample_count = num_iter * FLAGS.batch_size
num_iter = FLAGS.num_examples
total_sample_count = FLAGS.num_examples
print (num_iter, FLAGS.batch_size, total_sample_count)
true_count = 0 # Counts the number of correct predictions.
step = 0
time.sleep(1)
while step < num_iter and not coord.should_stop():
result1, result2 = sess.run([logits, labels])
#label = sess.run(labels)
print('Step:', step, 'result',result1, 'Label:', result2)
step += 1
precision = true_count / step
print('Summary -- Step:', step, 'Accurcy:',true_count * 100.0 / step * 1.0, )
print('%s: total:%d true:%d precision @ 1 = %.3f' % (datetime.now(), total_sample_count, true_count, precision))
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data, )
# Build a Graph that computes the logits predictions from the
# inference model. logits is softmax
logits = cifar10.inference(images)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
eval_once(saver, summary_writer, logits, labels,top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
print('Evaluate Start')
evaluate()
if __name__ == '__main__':
tf.app.run()
答案 0 :(得分:1)
我已经训练了1k步(准确度10%左右)。 但是在我训练了100k步(精度86%左右)之后,结果非常好
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<!-- Gherkins -->
<dependency>
<groupId>info.cukes</groupId>
<artifactId>cucumber-java</artifactId>
<version>${project.cucumber.version}</version>
</dependency>
<dependency>
<groupId>info.cukes</groupId>
<artifactId>cucumber-junit</artifactId>
<version>${project.cucumber.version}</version>
</dependency>
<dependency>
<groupId>info.cukes</groupId>
<artifactId>cucumber-spring</artifactId>
<version>${project.cucumber.version}</version>
</dependency>
</dependencies>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.4.0</version>
<executions>
<execution>
<id>scenario-tests</id>
<phase>test</phase>
<goals>
<goal>java</goal>
</goals>
</execution>
</executions>
<configuration>
<mainClass>com.mytest.RunScenarioTest</mainClass>
<arguments>
<argument>--glue</argument><argument>com.mytest.steps</argument>
<argument>features</argument> <argument>src/main/resources/features</argument>
<argument>path</argument> <argument>src/main/resources/features</argument>
<arg value="--format"/> <arg value="html:bin/cucumber-html-report"/>
</arguments>
</configuration>
</plugin>