Tensorflow Convolution SoftmaxCrossEntropyWithLogits logits和label必须大小相同:logits_size = [640,2] labels_size = [10,2]

时间:2017-04-16 07:59:42

标签: python tensorflow neural-network conv-neural-network softmax

我对每个卷积神经元层的矩阵计算有问题......

我得到了:

InvalidArgumentError(请参见上面的回溯):logits和label必须大小相同:logits_size = [640,2] labels_size = [10,2]

有人可以通过详细说明向我指出初学友好资源吗?

由于

源代码参考https://github.com/martin-gorner/tensorflow-mnist-tutorial/blob/master/mnist_3.0_convolutional.py

import os
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
import numpy as np
import glob
import fnmatch
import matplotlib.pyplot as plt
from PIL import Image
import random
import threading
import math

tf.set_random_seed(0)

def convertToOneHot(vector, num_classes=None):

    assert isinstance(vector, np.ndarray)
    assert len(vector) > 0

        if num_classes is None:
        num_classes = np.max(vector)+1
        else:
        assert num_classes > 0
        assert num_classes >= np.max(vector)

    result = np.zeros(shape=(len(vector), num_classes))
    result[np.arange(len(vector)), vector] = 1

    return result.astype(np.float32)


def make_labels(filenames):

    n = len(filenames)

    #y = np.zeros((n,2), dtype = np.int32)
    #y = np.zeros(shape=[n], dtype = np.float32)
    label_y = np.zeros((n,2), dtype = np.float32)
    counter = 0
    dog = 0
    cat = 0

    for i in range(n):
        # If 'dog' string is in file name assign '1' 
        if fnmatch.fnmatch(filenames[i], '*dog*'):
            label_y[i,0] = 1
            #label_y[i] = 1
            dog += 1
        else:
            label_y[i,1] = 1
            #label_y[i] = 0
            cat += 1

    print("Dog: " , dog , " Cat: " , cat)

    return label_y

def make_test_labels(filenames):

    n = len(filenames)

    test_label_y = np.zeros([n], dtype = np.int32)

    for i in range(n):
        test_label_y[i] =   random.randrange(0,2)

    one_hot = convertToOneHot(test_label_y)

    return one_hot


train_path = "./data/train/*.jpg"

test_path = "./data/test1/*.jpg"

#Training Dataset

train_files = tf.gfile.Glob(train_path)

train_image_labels = make_labels(train_files)

train_filename_queue = tf.train.string_input_producer(train_files, shuffle=False)

train_image_reader = tf.WholeFileReader()

train_image_filename, train_image_file = train_image_reader.read(train_filename_queue)

train_image_file = tf.image.decode_jpeg(train_image_file, 1)

train_image_file = tf.image.resize_images(train_image_file, [224, 224])

train_image_file.set_shape((224, 224, 1))

train_image_file = tf.squeeze(train_image_file)


#Test or Eval Dataset

test_files = tf.gfile.Glob(test_path)

test_image_labels = make_test_labels(test_files)

test_filename_queue = tf.train.string_input_producer(test_files, shuffle=False)

test_image_reader = tf.WholeFileReader()

test_image_filename, test_image_file = test_image_reader.read(test_filename_queue)

test_image_file = tf.image.decode_jpeg(test_image_file, 1)

test_image_file = tf.image.resize_images(test_image_file, [224, 224])

test_image_file.set_shape((224, 224, 1))

test_image_file = tf.squeeze(test_image_file)


train_batch_size = 10

test_batch_size = 2

num_preprocess_threads = 1

min_queue_examples = 256


X = tf.placeholder(tf.float32, [None, 224, 224, 1])

Y_ = tf.placeholder(tf.float32, [None, 2])

lr = tf.placeholder(tf.float32)

pkeep = tf.placeholder(tf.float32)

# three convolutional layers with their channel counts, and a
# fully connected layer (tha last layer has 2 softmax neurons)

K = 4  # first convolutional layer output depth
L = 8  # second convolutional layer output depth
M = 12  # third convolutional layer
N = 200  # fully connected layer

W1 = tf.Variable(tf.truncated_normal([5, 5, 1, K], stddev=0.1))  # 5x5 patch, 1 input channel, K output channels

print "W1: " , W1.get_shape()

B1 = tf.Variable(tf.ones([K])/10)

print "B1: " , B1.get_shape()

W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.1))

print "W2: " , W2.get_shape()

B2 = tf.Variable(tf.ones([L])/10)

print "B2: " , B2.get_shape()

W3 = tf.Variable(tf.truncated_normal([4, 4, L, M], stddev=0.1))

print "W3: " , W3.get_shape()

B3 = tf.Variable(tf.ones([M])/10)

print "B3: " , B3.get_shape()

W4 = tf.Variable(tf.truncated_normal([7 * 7 * M, N], stddev=0.1))

print "W4: " , W4.get_shape()

B4 = tf.Variable(tf.ones([N])/10)

print "B4: " , B4.get_shape()

W5 = tf.Variable(tf.truncated_normal([N, 2], stddev=0.1))

print "W5: " , W5.get_shape()

B5 = tf.Variable(tf.ones([2])/10)

print "B5: " , B5.get_shape()


# The model

stride = 1  # output is 28x28

Y1 = tf.nn.relu(tf.nn.conv2d(X, W1, strides=[1, stride, stride, 1], padding='SAME') + B1)

print "Y1: " , Y1.get_shape()

stride = 2  # output is 14x14

Y2 = tf.nn.relu(tf.nn.conv2d(Y1, W2, strides=[1, stride, stride, 1], padding='SAME') + B2)

print "Y2: " , Y2.get_shape()

stride = 2  # output is 7x7

Y3 = tf.nn.relu(tf.nn.conv2d(Y2, W3, strides=[1, stride, stride, 1], padding='SAME') + B3)

print "Y3: " , Y3.get_shape()

# reshape the output from the third convolution for the fully connected layer

#YY = tf.reshape(Y3, shape=[-1, 7 * 7 * M])

YY = tf.reshape(Y3, shape=[-1, 7 * 7 * M])

print "YY: " , YY.get_shape()

Y4 = tf.nn.relu(tf.matmul(YY, W4) + B4)

print "Y4: " , Y4.get_shape()

Ylogits = tf.matmul(Y4, W5) + B5

print "Ylogits: " , Ylogits.get_shape()

Y = tf.nn.softmax(Ylogits)



# cross-entropy loss function (= -sum(Y_i * log(Yi)) ), normalised for batches of 10  images
# TensorFlow provides the softmax_cross_entropy_with_logits function to avoid numerical stability
# problems with log(0) which is NaN

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)

cross_entropy = tf.reduce_mean(cross_entropy) * 10

# accuracy of the trained model, between 0 (worst) and 1 (best)

correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))



allweights = tf.concat(0, [tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])])

allbiases  = tf.concat(0, [tf.reshape(B1, [-1]), tf.reshape(B2, [-1]), tf.reshape(B3, [-1]), tf.reshape(B4, [-1]), tf.reshape(B5, [-1])])

# training step, the learning rate is a placeholder

train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)


train_images = tf.train.batch([train_image_file], batch_size=train_batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * train_batch_size, allow_smaller_final_batch=True)  

test_images = tf.train.batch([test_image_file], batch_size=test_batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * test_batch_size, allow_smaller_final_batch=True)

train_labels = tf.train.batch([train_image_labels], batch_size=train_batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * train_batch_size, enqueue_many=True, allow_smaller_final_batch=True)

test_labels = tf.train.batch([test_image_labels], batch_size=test_batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * test_batch_size, enqueue_many=True, allow_smaller_final_batch=True) 


init = tf.global_variables_initializer()

sess = tf.Session()

sess.run(init)

def training_step(i, update_test_data, update_train_data):

    train_images_batch = train_images.eval(session=sess)

    train_images_batch = np.expand_dims(train_images_batch, axis=(3))

    train_labels_batch = train_labels.eval(session=sess)

    test_images_batch = test_images.eval(session=sess)

    test_images_batch = np.expand_dims(test_images_batch, axis=(3))

    test_labels_batch = test_labels.eval(session=sess)

    # learning rate decay
    max_learning_rate = 0.003
    min_learning_rate = 0.0001
    decay_speed = 2000.0
    learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)


    if update_train_data:

        a, c, w, b = sess.run([accuracy, cross_entropy, allweights, allbiases], {X: train_images_batch, Y_: train_labels_batch})

        print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(learning_rate) + ")")

    if update_test_data:

        a, c = sess.run([accuracy, cross_entropy], {X: test_images_batch, Y_: test_labels_batch})

        print(str(i) + ": ********* epoch " + " ********* test accuracy:" + str(a) + " test loss: " + str(c))

    # the backpropagation training step
    sess.run(train_step, {X: batch_X, Y_: batch_Y, lr: learning_rate})

coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)  

for i in range(10000+1):

    training_step(i, i % 100 == 0, i % 20 == 0)

coord.request_stop()
coord.join(threads)

结果:

('Dog: ', 12500, ' Cat: ', 12500)
W1:  (5, 5, 1, 4)
B1:  (4,)
W2:  (5, 5, 4, 8)
B2:  (8,)
W3:  (4, 4, 8, 12)
B3:  (12,)
W4:  (588, 200)
B4:  (200,)
W5:  (200, 2)
B5:  (2,)
Y1:  (?, 224, 224, 4)
Y2:  (?, 112, 112, 8)
Y3:  (?, 56, 56, 12)
YY:  (?, 588)
Y4:  (?, 200)
Ylogits:  (?, 2)
Traceback (most recent call last):
  File "convolutional.py", line 306, in <module>
    training_step(i, i % 100 == 0, i % 20 == 0)
  File "convolutional.py", line 288, in training_step
    a, c, w, b = sess.run([accuracy, cross_entropy, allweights, allbiases], {X: train_images_batch, Y_: train_labels_batch})
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 766, in run
    run_metadata_ptr)
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 964, in _run
    feed_dict_string, options, run_metadata)
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1014, in _do_run
    target_list, options, run_metadata)
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1034, in _do_call
    raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: logits and labels must be same size: logits_size=[640,2] labels_size=[10,2]
     [[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](Reshape_1, Reshape_2)]]

Caused by op u'SoftmaxCrossEntropyWithLogits', defined at:
  File "convolutional.py", line 229, in <module>
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/ops/nn_ops.py", line 1449, in softmax_cross_entropy_with_logits
    precise_logits, labels, name=name)
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 2265, in _softmax_cross_entropy_with_logits
    features=features, labels=labels, name=name)
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 759, in apply_op
    op_def=op_def)
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2240, in create_op
    original_op=self._default_original_op, op_def=op_def)
  File "/home/dragon/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1128, in __init__
    self._traceback = _extract_stack()

InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[640,2] labels_size=[10,2]
     [[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](Reshape_1, Reshape_2)]]

1 个答案:

答案 0 :(得分:0)

管理以修复它:

import os
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
import numpy as np
import glob
import fnmatch
import matplotlib.pyplot as plt
from PIL import Image
import random
import threading
import math

tf.set_random_seed(0)

def convertToOneHot(vector, num_classes=None):

    assert isinstance(vector, np.ndarray)
    assert len(vector) > 0

        if num_classes is None:
        num_classes = np.max(vector)+1
        else:
        assert num_classes > 0
        assert num_classes >= np.max(vector)

    result = np.zeros(shape=(len(vector), num_classes))
    result[np.arange(len(vector)), vector] = 1

    return result.astype(np.float32)


def make_labels(filenames):

    n = len(filenames)

    #y = np.zeros((n,2), dtype = np.int32)
    #y = np.zeros(shape=[n], dtype = np.float32)
    label_y = np.zeros((n,2), dtype = np.float32)
    counter = 0
    dog = 0
    cat = 0

    for i in range(n):
        # If 'dog' string is in file name assign '1' 
        if fnmatch.fnmatch(filenames[i], '*dog*'):
            label_y[i,0] = 1
            #label_y[i] = 1
            dog += 1
        else:
            label_y[i,1] = 1
            #label_y[i] = 0
            cat += 1

    print("Dog: " , dog , " Cat: " , cat)

    return label_y

def make_test_labels(filenames):

    n = len(filenames)

    test_label_y = np.zeros([n], dtype = np.int32)

    for i in range(n):
        test_label_y[i] =   random.randrange(0,2)

    one_hot = convertToOneHot(test_label_y)

    return one_hot


train_path = "./data/train/*.jpg"

test_path = "./data/test1/*.jpg"

#Training Dataset

train_files = tf.gfile.Glob(train_path)

train_image_labels = make_labels(train_files)

train_filename_queue = tf.train.string_input_producer(train_files, shuffle=False)

train_image_reader = tf.WholeFileReader()

train_image_filename, train_image_file = train_image_reader.read(train_filename_queue)

train_image_file = tf.image.decode_jpeg(train_image_file, 1)

train_image_file = tf.image.resize_images(train_image_file, [224, 224])

train_image_file.set_shape((224, 224, 1))

train_image_file = tf.squeeze(train_image_file)


#Test or Eval Dataset

test_files = tf.gfile.Glob(test_path)

test_image_labels = make_test_labels(test_files)

test_filename_queue = tf.train.string_input_producer(test_files, shuffle=False)

test_image_reader = tf.WholeFileReader()

test_image_filename, test_image_file = test_image_reader.read(test_filename_queue)

test_image_file = tf.image.decode_jpeg(test_image_file, 1)

test_image_file = tf.image.resize_images(test_image_file, [224, 224])

test_image_file.set_shape((224, 224, 1))

test_image_file = tf.squeeze(test_image_file)


train_batch_size = 10

test_batch_size = 2

num_preprocess_threads = 1

min_queue_examples = 256


X = tf.placeholder(tf.float32, [None, 224, 224, 1])

Y_ = tf.placeholder(tf.float32, [None, 2])

lr = tf.placeholder(tf.float32)

pkeep = tf.placeholder(tf.float32)

# three convolutional layers with their channel counts, and a
# fully connected layer (tha last layer has 2 softmax neurons)

K = 4  # first convolutional layer output depth
L = 8  # second convolutional layer output depth
M = 12  # third convolutional layer
N = 200  # fully connected layer

W1 = tf.Variable(tf.truncated_normal([5, 5, 1, K], stddev=0.1))  # 5x5 patch, 1 input channel, K output channels

print "W1: " , W1.get_shape()

B1 = tf.Variable(tf.ones([K])/10)

print "B1: " , B1.get_shape()

W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.1))

print "W2: " , W2.get_shape()

B2 = tf.Variable(tf.ones([L])/10)

print "B2: " , B2.get_shape()

W3 = tf.Variable(tf.truncated_normal([4, 4, L, M], stddev=0.1))

print "W3: " , W3.get_shape()

B3 = tf.Variable(tf.ones([M])/10)

print "B3: " , B3.get_shape()

W4 = tf.Variable(tf.truncated_normal([7 * 7 * M, N], stddev=0.1))

print "W4: " , W4.get_shape()

B4 = tf.Variable(tf.ones([N])/10)

print "B4: " , B4.get_shape()

W5 = tf.Variable(tf.truncated_normal([N, 2], stddev=0.1))

print "W5: " , W5.get_shape()

B5 = tf.Variable(tf.ones([2])/10)

print "B5: " , B5.get_shape()


# The model

stride = 1

Y1 = tf.nn.relu(tf.nn.conv2d(X, W1, strides=[1, stride, stride, 1], padding='SAME') + B1)

print "Y1: " , Y1.get_shape()

stride = 6 #changed from 2 to 6

Y2 = tf.nn.relu(tf.nn.conv2d(Y1, W2, strides=[1, stride, stride, 1], padding='SAME') + B2)

print "Y2: " , Y2.get_shape()

stride = 6 #changed from 2 to 6

Y3 = tf.nn.relu(tf.nn.conv2d(Y2, W3, strides=[1, stride, stride, 1], padding='SAME') + B3)

print "Y3: " , Y3.get_shape()

# reshape the output from the third convolution for the fully connected layer

#YY = tf.reshape(Y3, shape=[-1, 7 * 7 * M])

YY = tf.reshape(Y3, shape=[-1, 7 * 7 * M])

print "YY: " , YY.get_shape()

Y4 = tf.nn.relu(tf.matmul(YY, W4) + B4)

print "Y4: " , Y4.get_shape()

Ylogits = tf.matmul(Y4, W5) + B5

print "Ylogits: " , Ylogits.get_shape()

Y = tf.nn.softmax(Ylogits)



# cross-entropy loss function (= -sum(Y_i * log(Yi)) ), normalised for batches of 10  images
# TensorFlow provides the softmax_cross_entropy_with_logits function to avoid numerical stability
# problems with log(0) which is NaN

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)

cross_entropy = tf.reduce_mean(cross_entropy) * 10

# accuracy of the trained model, between 0 (worst) and 1 (best)

correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))



allweights = tf.concat(0, [tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])])

allbiases  = tf.concat(0, [tf.reshape(B1, [-1]), tf.reshape(B2, [-1]), tf.reshape(B3, [-1]), tf.reshape(B4, [-1]), tf.reshape(B5, [-1])])

# training step, the learning rate is a placeholder

train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)


train_images = tf.train.batch([train_image_file], batch_size=train_batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * train_batch_size, allow_smaller_final_batch=True)  

test_images = tf.train.batch([test_image_file], batch_size=test_batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * test_batch_size, allow_smaller_final_batch=True)

train_labels = tf.train.batch([train_image_labels], batch_size=train_batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * train_batch_size, enqueue_many=True, allow_smaller_final_batch=True)

test_labels = tf.train.batch([test_image_labels], batch_size=test_batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * test_batch_size, enqueue_many=True, allow_smaller_final_batch=True) 


init = tf.global_variables_initializer()

sess = tf.Session()

sess.run(init)

def training_step(i, update_test_data, update_train_data):

    train_images_batch = train_images.eval(session=sess)

    train_images_batch = np.expand_dims(train_images_batch, axis=(3))

    train_labels_batch = train_labels.eval(session=sess)

    test_images_batch = test_images.eval(session=sess)

    test_images_batch = np.expand_dims(test_images_batch, axis=(3))

    test_labels_batch = test_labels.eval(session=sess)

    # learning rate decay
    max_learning_rate = 0.003
    min_learning_rate = 0.0001
    decay_speed = 2000.0
    learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)


    if update_train_data:

        a, c, w, b = sess.run([accuracy, cross_entropy, allweights, allbiases], {X: train_images_batch, Y_: train_labels_batch})

        print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(learning_rate) + ")")

    if update_test_data:

        a, c = sess.run([accuracy, cross_entropy], {X: test_images_batch, Y_: test_labels_batch})

        print(str(i) + ": ********* epoch " + " ********* test accuracy:" + str(a) + " test loss: " + str(c))

    # the backpropagation training step
    sess.run(train_step, {X: batch_X, Y_: batch_Y, lr: learning_rate})

coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)  

for i in range(10000+1):

    training_step(i, i % 100 == 0, i % 20 == 0)

coord.request_stop()
coord.join(threads)

结果:

0: accuracy:0.6 loss: 11.1554 (lr:0.003)
0: ********* epoch  ********* test accuracy:0.5 test loss: 13.0221
20: accuracy:0.7 loss: 5.93612 (lr:0.00297114451787)
40: accuracy:0.5 loss: 7.40466 (lr:0.00294257615259)
60: accuracy:0.4 loss: 6.7717 (lr:0.00291429204729)
80: accuracy:0.5 loss: 7.07674 (lr:0.00288628937354)
100: accuracy:0.6 loss: 7.0582 (lr:0.00285856533105)
100: ********* epoch  ********* test accuracy:0.5 test loss: 7.8989
120: accuracy:0.3 loss: 6.95063 (lr:0.00283111714739)
140: accuracy:0.5 loss: 6.57362 (lr:0.00280394207773)
160: accuracy:0.4 loss: 7.82804 (lr:0.00277703740452)
180: accuracy:0.6 loss: 6.80486 (lr:0.00275040043729)
200: accuracy:0.6 loss: 6.98334 (lr:0.0027240285123)
200: ********* epoch  ********* test accuracy:0.5 test loss: 6.71705
220: accuracy:0.9 loss: 4.97062 (lr:0.00269791899236)
240: accuracy:0.5 loss: 5.81752 (lr:0.00267206926648)
260: accuracy:0.6 loss: 6.53955 (lr:0.00264647674967)
280: accuracy:0.3 loss: 8.13235 (lr:0.00262113888266)
300: accuracy:0.5 loss: 6.55135 (lr:0.00259605313163)
300: ********* epoch  ********* test accuracy:0.5 test loss: 6.69505
320: accuracy:0.7 loss: 6.20664 (lr:0.002571216988)
340: accuracy:0.6 loss: 6.23909 (lr:0.00254662796813)
360: accuracy:0.8 loss: 4.65571 (lr:0.00252228361309)
380: accuracy:0.5 loss: 8.66489 (lr:0.00249818148844)
400: accuracy:0.6 loss: 6.5182 (lr:0.00247431918393)
400: ********* epoch  ********* test accuracy:0.0 test loss: 7.87964
420: accuracy:0.8 loss: 5.66837 (lr:0.00245069431331)
440: accuracy:0.5 loss: 6.87555 (lr:0.00242730451409)
460: accuracy:0.7 loss: 5.9571 (lr:0.00240414744726)
480: accuracy:0.6 loss: 6.63313 (lr:0.00238122079709)
500: accuracy:0.7 loss: 6.31318 (lr:0.00235852227091)
500: ********* epoch  ********* test accuracy:0.0 test loss: 8.10029
520: accuracy:0.7 loss: 7.07258 (lr:0.00233604959883)
540: accuracy:0.5 loss: 7.05975 (lr:0.00231380053358)
560: accuracy:0.5 loss: 7.25364 (lr:0.00229177285022)
580: accuracy:0.6 loss: 6.73549 (lr:0.00226996434598)
600: accuracy:0.8 loss: 4.62922 (lr:0.00224837283998)
600: ********* epoch  ********* test accuracy:0.5 test loss: 6.42755
620: accuracy:0.6 loss: 6.97526 (lr:0.00222699617305)
640: accuracy:0.7 loss: 5.79574 (lr:0.00220583220751)
660: accuracy:0.7 loss: 6.3626 (lr:0.00218487882695)
680: accuracy:0.8 loss: 7.46478 (lr:0.00216413393601)
700: accuracy:0.6 loss: 5.08642 (lr:0.00214359546018)
700: ********* epoch  ********* test accuracy:0.5 test loss: 13.1772
720: accuracy:0.5 loss: 7.97948 (lr:0.00212326134561)
740: accuracy:0.5 loss: 7.21764 (lr:0.00210312955885)
760: accuracy:0.7 loss: 6.36856 (lr:0.00208319808672)
780: accuracy:0.7 loss: 5.55039 (lr:0.00206346493604)
800: accuracy:0.6 loss: 7.82716 (lr:0.0020439281335)
800: ********* epoch  ********* test accuracy:0.0 test loss: 8.27366
820: accuracy:0.5 loss: 6.67392 (lr:0.0020245857254)
840: accuracy:0.6 loss: 6.52732 (lr:0.00200543577746)
860: accuracy:0.6 loss: 6.47456 (lr:0.0019864763747)
880: accuracy:0.8 loss: 5.34777 (lr:0.00196770562114)
900: accuracy:0.5 loss: 6.67296 (lr:0.0019491216397)
900: ********* epoch  ********* test accuracy:0.0 test loss: 9.64283
920: accuracy:0.7 loss: 6.66137 (lr:0.00193072257197)
940: accuracy:0.9 loss: 5.7024 (lr:0.00191250657802)
960: accuracy:0.7 loss: 6.43189 (lr:0.00189447183624)
980: accuracy:0.7 loss: 5.07373 (lr:0.00187661654313)
1000: accuracy:0.7 loss: 6.38296 (lr:0.00185893891317)
1000: ********* epoch  ********* test accuracy:0.5 test loss: 8.52038
1020: accuracy:0.8 loss: 9.55432 (lr:0.00184143717856)
1040: accuracy:0.6 loss: 7.09334 (lr:0.00182410958911)
1060: accuracy:0.4 loss: 7.11957 (lr:0.00180695441207)
1080: accuracy:0.6 loss: 7.0127 (lr:0.00178996993188)
1100: accuracy:0.4 loss: 7.69579 (lr:0.0017731544501)
1100: ********* epoch  ********* test accuracy:0.5 test loss: 11.4886
1120: accuracy:0.7 loss: 6.882 (lr:0.00175650628516)
1140: accuracy:0.8 loss: 5.1929 (lr:0.00174002377223)
1160: accuracy:0.3 loss: 6.76535 (lr:0.00172370526304)
1180: accuracy:0.5 loss: 7.55367 (lr:0.00170754912573)
1200: accuracy:0.7 loss: 5.56166 (lr:0.00169155374467)
1200: ********* epoch  ********* test accuracy:0.5 test loss: 7.25217
1220: accuracy:0.8 loss: 5.60191 (lr:0.00167571752032)
1240: accuracy:0.6 loss: 6.82252 (lr:0.00166003886902)
1260: accuracy:0.9 loss: 5.644 (lr:0.00164451622292)
1280: accuracy:0.8 loss: 5.7729 (lr:0.00162914802972)
1300: accuracy:0.7 loss: 5.87659 (lr:0.00161393275261)
1300: ********* epoch  ********* test accuracy:0.5 test loss: 5.97413
1320: accuracy:0.5 loss: 7.00393 (lr:0.00159886887003)
1340: accuracy:0.5 loss: 6.74683 (lr:0.00158395487558)
1360: accuracy:0.5 loss: 6.34526 (lr:0.00156918927786)
1380: accuracy:0.5 loss: 7.20938 (lr:0.00155457060029)
1400: accuracy:0.5 loss: 6.90222 (lr:0.001540097381)
1400: ********* epoch  ********* test accuracy:1.0 test loss: 5.80986
1420: accuracy:0.7 loss: 6.27892 (lr:0.00152576817264)
1440: accuracy:0.6 loss: 7.12203 (lr:0.00151158154228)
1460: accuracy:0.6 loss: 6.3593 (lr:0.00149753607126)
1480: accuracy:0.5 loss: 6.73234 (lr:0.00148363035501)
1500: accuracy:0.7 loss: 5.93877 (lr:0.00146986300295)
1500: ********* epoch  ********* test accuracy:0.0 test loss: 13.2649
1520: accuracy:0.5 loss: 6.56755 (lr:0.00145623263833)
1540: accuracy:0.2 loss: 11.2914 (lr:0.0014427378981)
1560: accuracy:0.7 loss: 6.40623 (lr:0.00142937743279)
1580: accuracy:0.8 loss: 5.21643 (lr:0.00141614990632)
1600: accuracy:0.6 loss: 7.13112 (lr:0.00140305399594)
1600: ********* epoch  ********* test accuracy:0.5 test loss: 7.00448
1620: accuracy:0.8 loss: 6.04726 (lr:0.00139008839205)
1640: accuracy:0.6 loss: 5.6949 (lr:0.00137725179807)
1660: accuracy:0.4 loss: 8.35133 (lr:0.00136454293033)
1680: accuracy:0.7 loss: 5.67702 (lr:0.00135196051794)
1700: accuracy:0.7 loss: 5.75319 (lr:0.00133950330265)
1700: ********* epoch  ********* test accuracy:0.5 test loss: 4.69712
1720: accuracy:0.7 loss: 5.62581 (lr:0.00132717003872)
1740: accuracy:0.5 loss: 6.94164 (lr:0.00131495949282)
1760: accuracy:0.8 loss: 6.81993 (lr:0.00130287044388)
1780: accuracy:0.6 loss: 6.46852 (lr:0.00129090168298)
1800: accuracy:0.7 loss: 5.52436 (lr:0.00127905201325)
1800: ********* epoch  ********* test accuracy:1.0 test loss: 5.64173
1820: accuracy:0.6 loss: 5.64919 (lr:0.0012673202497)
1840: accuracy:0.6 loss: 6.43817 (lr:0.00125570521915)
1860: accuracy:0.6 loss: 6.09095 (lr:0.00124420576008)
1880: accuracy:0.7 loss: 5.98002 (lr:0.00123282072254)
1900: accuracy:0.8 loss: 5.04342 (lr:0.00122154896802)
1900: ********* epoch  ********* test accuracy:0.5 test loss: 5.80969
1920: accuracy:0.5 loss: 6.73896 (lr:0.00121038936933)
1940: accuracy:0.6 loss: 5.74824 (lr:0.0011993408105)
1960: accuracy:0.9 loss: 5.07425 (lr:0.00118840218667)
1980: accuracy:0.8 loss: 5.07815 (lr:0.00117757240396)
2000: accuracy:0.6 loss: 6.49868 (lr:0.0011668503794)
2000: ********* epoch  ********* test accuracy:0.5 test loss: 5.17815
2020: accuracy:0.7 loss: 6.32198 (lr:0.00115623504076)
2040: accuracy:0.5 loss: 6.81228 (lr:0.0011457253265)
2060: accuracy:0.7 loss: 6.27763 (lr:0.00113532018565)
2080: accuracy:0.4 loss: 6.48217 (lr:0.00112501857768)
2100: accuracy:0.8 loss: 5.47317 (lr:0.00111481947242)
2100: ********* epoch  ********* test accuracy:0.5 test loss: 6.97906
2120: accuracy:0.7 loss: 6.64062 (lr:0.00110472184996)
2140: accuracy:0.4 loss: 7.58298 (lr:0.00109472470051)
2160: accuracy:0.5 loss: 7.05855 (lr:0.00108482702437)
2180: accuracy:0.7 loss: 5.3101 (lr:0.00107502783175)
2200: accuracy:0.4 loss: 7.99556 (lr:0.00106532614272)
2200: ********* epoch  ********* test accuracy:0.0 test loss: 30.0326
2220: accuracy:0.5 loss: 5.50862 (lr:0.00105572098712)
2240: accuracy:0.7 loss: 7.57991 (lr:0.00104621140441)
2260: accuracy:0.9 loss: 4.57821 (lr:0.00103679644362)
2280: accuracy:0.5 loss: 6.55349 (lr:0.00102747516327)
2300: accuracy:0.4 loss: 7.27755 (lr:0.0010182466312)
2300: ********* epoch  ********* test accuracy:0.5 test loss: 8.20956
2320: accuracy:0.7 loss: 5.98563 (lr:0.00100910992456)
2340: accuracy:0.7 loss: 5.72076 (lr:0.00100006412967)
2360: accuracy:0.7 loss: 5.22523 (lr:0.000991108341943)
2380: accuracy:0.8 loss: 4.87689 (lr:0.000982241665793)
2400: accuracy:0.8 loss: 5.14703 (lr:0.000973463214545)
2400: ********* epoch  ********* test accuracy:0.5 test loss: 6.0281
2420: accuracy:0.7 loss: 6.03927 (lr:0.000964772110347)
2440: accuracy:0.7 loss: 4.91816 (lr:0.00095616748408)
2460: accuracy:0.5 loss: 6.61033 (lr:0.000947648475274)
2480: accuracy:0.7 loss: 4.98853 (lr:0.000939214232023)
2500: accuracy:0.5 loss: 6.02745 (lr:0.000930863910895)
2500: ********* epoch  ********* test accuracy:0.5 test loss: 6.64094
2520: accuracy:0.8 loss: 5.77956 (lr:0.000922596676849)
2540: accuracy:0.5 loss: 7.03326 (lr:0.000914411703157)
2560: accuracy:0.9 loss: 3.64503 (lr:0.000906308171314)
2580: accuracy:0.5 loss: 7.78428 (lr:0.00089828527096)

似乎准确率不高.........这是否意味着我必须添加更多图层?

还是有什么问题?

由于