我正在尝试使用Convolutional Network教程https://www.tensorflow.org/tutorials/layers作为我自己的数据集的基础,我当前正在尝试使用此数据集https://ai.stanford.edu/~jkrause/cars/car_dataset.html。 我创建了这个reader.py文件来提供数据,看起来工作正常
import tensorflow as tf
from os import listdir
from os.path import isfile, join, dirname, realpath
def my_input_fn():
this_path = dirname(realpath(__file__))
data_path = this_path + "/cars_train/"
f = open(this_path + "/train_perfect_preds.txt")
labels = [int(line.rstrip()) for line in f]
f.close()
files = ["/cars_train/" + f for f in listdir(data_path)]
files.sort()
def _parse_function(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string,3)
image_resized = tf.image.resize_images(image_decoded, [28, 28])
return image_resized, label
filenames = tf.constant(files)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(_parse_function)
iterator = dataset.make_one_shot_iterator()
imagen, label = iterator.get_next()
return imagen, label
我尝试从此文件https://github.com/tensorflow/tensorflow/blob/r1.7/tensorflow/examples/tutorials/layers/cnn_mnist.py
中调用它更改Main()函数和dropout图层以调整为我的类数
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import reader as cars
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
input_layer = tf.reshape(features, [-1, 28, 28, 3])
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
logits = tf.layers.dense(inputs=dropout, units=196)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Create the Estimator
classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
classifier.train(
input_fn= lambda: cars.my_input_fn(),
steps=20000,
hooks=[logging_hook])
eval_results = classifier.evaluate(input_fn =lambda: my_input_fn())
print(eval_results)
if __name__ == "__main__":
tf.app.run()
但是我收到了错误
INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_task_type': 'worker', '_global_id_in_cluster': 0, '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7fd4a5264790>, '_evaluation_master': '', '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': None, '_master': '', '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 100, '_model_dir': '/tmp/mnist_convnet_model', '_save_summary_steps': 100}
INFO:tensorflow:Calling model_fn.
Traceback (most recent call last):
File "test.py", line 116, in <module>
tf.app.run()
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 126, in run
_sys.exit(main(argv))
File "test.py", line 109, in main
hooks=[logging_hook])
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 352, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 812, in _train_model
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 793, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "test.py", line 73, in cnn_model_fn
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/ops/losses/losses_impl.py", line 831, in sparse_softmax_cross_entropy
labels, logits, weights, expected_rank_diff=1)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/ops/losses/losses_impl.py", line 760, in _remove_squeezable_dimensions
labels, predictions, expected_rank_diff=expected_rank_diff)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/ops/confusion_matrix.py", line 75, in remove_squeezable_dimensions
predictions = array_ops.squeeze(predictions, [-1])
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 2568, in squeeze
return gen_array_ops._squeeze(input, axis, name)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 5169, in _squeeze
"Squeeze", input=input, squeeze_dims=axis, name=name)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3273, in create_op
compute_device=compute_device)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3313, in _create_op_helper
set_shapes_for_outputs(op)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2501, in set_shapes_for_outputs
return _set_shapes_for_outputs(op)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2474, in _set_shapes_for_outputs
shapes = shape_func(op)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2404, in call_with_requiring
return call_cpp_shape_fn(op, require_shape_fn=True)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.py", line 627, in call_cpp_shape_fn
require_shape_fn)
File "/home/satler/TFG/testCifar10/tensor/local/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.py", line 691, in _call_cpp_shape_fn_impl
raise ValueError(err.message)
ValueError: Can not squeeze dim[1], expected a dimension of 1, got 196 for 'sparse_softmax_cross_entropy_loss/remove_squeezable_dimensions/Squeeze' (op: 'Squeeze') with input shapes: [1,196].
我不是试图获得不错的结果只是尝试让它工作,所以我可以开始在有效的东西之上构建我自己的网络。我不确定我的问题是来自我如何加载数据集或网络架构,任何提示都会非常具有学徒性。 如果信息不充分或其他原因,这是我的第一个问题。
其中cars_train包含带有数字名称(00001.jpg ...)的汽车jpg,train_perfect_preds.txt包含每个imagen一行,数字为类。 First Imagen对应于.txt的第一行,依此类推。