我正在编写hand_writing_recognition代码,但我无法弄清楚代码中有什么问题 这是我执行代码时发生的错误
启动内核时发生错误 2019??08:37:49.344730:FT:\ src \ github \ tensorflow \ tensorflow / core / util / sparse / sparse_tensor.h:68]检查失败:order.size()== dims_(2 vs. 1)Order长度必须为SparseTensor等级。
请帮助
import tensorflow as tf
from PIL import Image
import numpy as np
import re
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
train_num = 1
dic_pattern = r'\w{3}-\w{3,4}-\d{2}-\d{2}'
label_pattern = r'\S+$'
data_file = open('./vaild_data.txt','r')
datas = data_file.readlines()
data_file.close()
char_arr = [c for c in r"""i(jQXr).gA&t#enw75:0l6*2KVOz/hL-?UIP'bv,McTJ9pdY4xfy3+"N1ZHDSqGWCFE!skRoB;8uam"""]
num_dic = {n: i for i, n in enumerate(char_arr)}
tf.reset_default_graph()
global_step = tf.Variable(0,trainable=False)
X = tf.placeholder(tf.float32, [256,1088])
Xdata = tf.reshape(X, [1, 256, 1088, 1])
Y = tf.sparse_placeholder(tf.int32)
keep_prob = tf.placeholder(tf.float32)
learning_rate = 0.001
W1 = tf.Variable(tf.random_normal([3,3,1,8], stddev = 0.01))
L1 = tf.nn.conv2d(Xdata,W1, strides = [1,1,1,1], padding = 'SAME')
L1 = tf.nn.relu(L1)
L1 = tf.nn.max_pool(L1, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME')
L1 = tf.nn.dropout(L1, keep_prob)
W2 = tf.Variable(tf.random_normal([3,3,8,32], stddev = 0.01))
L2 = tf.nn.conv2d(L1,W2, strides = [1,1,1,1], padding = 'SAME')
L2 = tf.nn.relu(L2)
L2 = tf.nn.max_pool(L2, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME')
L2 = tf.nn.dropout(L2, keep_prob)
W3 = tf.Variable(tf.random_normal([2,2,32,64], stddev = 0.01))
L3 = tf.nn.conv2d(L2,W3, strides = [1,1,1,1], padding = 'SAME')
L3 = tf.nn.relu(L3)
L3 = tf.nn.max_pool(L3, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME')
L3 = tf.nn.dropout(L3, keep_prob)
W4 = tf.Variable(tf.random_normal([2,2,64,128], stddev = 0.01))
L4 = tf.nn.conv2d(L3,W4, strides = [1,1,1,1], padding = 'SAME')
L4 = tf.nn.relu(L4)
L4 = tf.nn.max_pool(L4, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME')
L4 = tf.nn.dropout(L4, keep_prob)
W5 = tf.Variable(tf.random_normal([2,2,128,256], stddev = 0.01))
L5 = tf.nn.conv2d(L4,W5, strides = [1,1,1,1], padding = 'SAME')
L5 = tf.nn.relu(L5)
L5 = tf.nn.max_pool(L5, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME')
L5 = tf.nn.dropout(L5, keep_prob)
L6 = tf.transpose(L5,[0,2,1,3])
L6 = tf.reshape(L6,[1,34,8*256])
GRU1 = tf.contrib.rnn.GRUBlockCellV2(200,name='gru1')
L7, _ = tf.nn.dynamic_rnn(GRU1, L6, dtype=tf.float32)
GRU2 = tf.contrib.rnn.GRUBlockCellV2(79,name='gru2')
model,_ = tf.nn.dynamic_rnn(GRU2, L7, dtype=tf.float32)
cost = tf.nn.ctc_loss(labels=Y,inputs=model,sequence_length=[34],time_major=False)
cost = tf.reduce_mean(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost,global_step=global_step)
sess = tf.Session()
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state('./saver')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
for x in range(train_num):
gs = sess.run(global_step)
data = datas[gs]
print("data=",data)
dic = ''.join(re.findall(dic_pattern, data))
data_label = ''.join(re.findall(label_pattern, data))
print("data_label=",data_label)
splited = dic.split('-')
print("splited=",splited)
im = Image.open('./words/{0}/{0}-{1}/{0}-{1}-{2}-{3}.png'.format(splited[0],splited[1],splited[2],splited[3]))
data_im = np.array(im)
im.close()
data_height, data_width = np.shape(data_im)[0],np.shape(data_im)[1]
base = np.full((256,1088),255)
base[128-int(data_height/2):128+int((data_height+1)/2),544-int(data_width/2):544+int((data_width+1)/2)] = data_im
print("base=",base)
indices = [[i] for i in range(0,len(data_label))]
values = [char_arr.index(c) for c in list(data_label)]
shape = [len(data_label),1,1]
label = tf.SparseTensorValue(indices, values, shape)
print(indices, values, label)
sess.run(optimizer, feed_dict={X : base, Y : label, keep_prob : 0.6})
print('Step :',gs,', label :',data_label)
print('cost :',sess.run(cost,feed_dict={X : base, Y : label, keep_prob : 1}))
saver.save(sess,'./saver/hcnn.ckpt',global_step=global_step)