如果有人能指出我需要解决的问题,我会非常感激。我是TensorFlow的新手,我不知道出了什么问题。
代码:
from PIL import ImageFont, Image, ImageDraw
import tensorflow as tf
import random
import numpy as np
x = tf.placeholder(tf.float32, [None,48,48,1])
y = tf.placeholder(tf.float32, [None,26])
def initW(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
w1 = initW([5,5,1,32])
w2 = initW([5,5,32,64])
w3 = initW([64*12*12,26])
keep_prob = tf.placeholder(tf.float32)
saver = tf.train.Saver()
def model(x,w1,w2,w3,keep_prob):
l1c = tf.nn.relu(tf.nn.conv2d(x, w1, strides = [1,1,1,1], padding = "SAME"))
l1p = tf.nn.max_pool(l1c, ksize = [1,2,2,1], strides = [1,2,2,1], padding = "SAME")
l2c = tf.nn.relu(tf.nn.conv2d(l1p, w2, strides = [1,1,1,1], padding = "SAME"))
l2p = tf.nn.max_pool(l2c, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
l2r = tf.reshape(l2p,[-1,64*12*12])
l2d = tf.nn.dropout(l2r, keep_prob)
l3f = tf.matmul(l2d,w3)
return l3f
model = model(x,w1,w2,w3,keep_prob)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = model))
op = tf.train.AdamOptimizer(0.000002).minimize(cost)
def imgData(char):
img = Image.new("L", (48, 48), color=255)
f = ImageFont.truetype("Arial.ttf", 50)
ImageDraw.Draw(img).text((25 - f.getsize("W")[0] / 2, 0), "W", font=f)
return np.reshape(img.getdata(),[48,48,1])
def oneHot(C,reshape=False):
arr = [0] * 26
arr[charToInt(C)] = 1
if reshape:
return np.reshape(arr,[1,26])
else:
return arr
def charToInt(C):
return ord(C) - ord("A")
def accuracy():
alphabet = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
testSet = np.array([imgData(C) for C in alphabet])
testLabels = np.array([oneHot(C) for C in alphabet])
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(model,1),tf.argmax(y,1)),tf.float32))
print (sess.run(accuracy,feed_dict={x:testSet,y:testLabels,keep_prob:1.0}))
def predict(char):
probabilities = sess.run(model, feed_dict= {x:[imgData(char)], keep_prob: 0.5})[0]
for n in range(26):
print (chr(ord("A") + n) + ": " + "{0:.2f}".format(probabilities[n]))
with tf.Session() as sess:
tf.global_variables_initializer().run()
alphabet = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
random.shuffle(alphabet)
train = True
if train:
for n in range(20):
for C in alphabet:
sess.run(op, feed_dict = {
x: [imgData(C)],
y: oneHot(C,reshape=True),
keep_prob: 0.5
})
print("cost: " + str(sess.run(cost,feed_dict={
x:[imgData(C)],
y:oneHot(C,reshape=True),
keep_prob: 1.0
})))
saver.save(sess,"./model")
else:
saver.restore(sess,"./model")
accuracy()
# predict("B")
这是一个简单的卷积神经网络程序,它采用大写字母的图像并输出预测的字符。 x
是由imgData(char)
生成的48x48图像的输入张量,y
是表示大写字符的输出。
cnn模型位于model(x,w1,w2,w3,keep_prob)
内。
尽管尝试了学习率和时代的各种数字,但成本总是收敛到3.3左右。
accuracy()
使用测试集打印出模型的测量精度。它总是说0.0384615。我希望predict()
显示给定图像的概率是多种字符,但现在它给出了各种奇怪的数字。
非常感谢。
答案 0 :(得分:1)
您的imgData
方法有错字:如果我理解正确,您应该写出char
而不是W
(请参阅ImageDraw.draw.text的文档)。否则,您的黄金标签将始终是W
的字符。这就是为什么你总是得到0.0384的准确度为1/26,即你只学会识别26个字符中的一个(这是预期的,因为你永远不会为W
以外的其他字符提供标签)。
def imgData(char):
img = Image.new("L", (48, 48), color=255)
f = ImageFont.truetype("/tmp/arial.ttf", 50)
ImageDraw.Draw(img).text((25 - f.getsize("W")[0] / 2, 0), char, font=f)
return np.reshape(img.getdata(),[48,48,1])
我的学习率也提高了10倍。然后模型在~10次迭代中过度拟合:
0, cost: 4.96586 accu 0.0769231
1, cost: 5.01186 accu 0.115385
2, cost: 3.9491 accu 0.230769
3, cost: 2.56 accu 0.384615
4, cost: 1.99173 accu 0.423077
5, cost: 2.51248 accu 0.576923
6, cost: 2.19388 accu 0.730769
7, cost: 3.79979 accu 0.769231
8, cost: 2.25762 accu 0.807692
9, cost: 1.27227 accu 0.884615
10, cost: 0.964877 accu 0.884615
11, cost: 2.15709 accu 0.923077
12, cost: 0.482007 accu 0.961538
13, cost: 0.733133 accu 1.0