这是我第一次在stackoverflow中提问,因此我可能无法在一个帖子中提供我的问题的所有细节。
我正在尝试在活动识别数据集上应用CNN但当前我面临的问题是logits和标签必须大小相同:logits_size = [1200,7] labels_size = [600,7]。
Link of dataset Link to my github file
batch_size = 600 # Batch size
seq_len = 200 # Number of steps
learning_rate = 0.0005
epochs = 200
n_classes = 7
n_channels = 3
inputs_ = tf.placeholder(tf.float32, [None, seq_len, n_channels], name = 'inputs')
labels_ = tf.placeholder(tf.float32, [None, n_classes], name = 'labels')
keep_prob_ = tf.placeholder(tf.float32, name = 'keep')
learning_rate_ = tf.placeholder(tf.float32, name = 'learning_rate')
cconv1 = tf.layers.conv1d(inputs=inputs_, filters=18, kernel_size=2, strides=1,
padding='same', activation = tf.nn.relu)
pool_1 = tf.layers.max_pooling1d(inputs=cconv1, pool_size=4, strides=4, padding='same')
cconv2 = tf.layers.conv1d(inputs=pool_1, filters=36, kernel_size=2, strides=1,
padding='same', activation = tf.nn.relu)
pool_2 = tf.layers.max_pooling1d(inputs=cconv2, pool_size=4, strides=4, padding='same')
cconv3 = tf.layers.conv1d(inputs=pool_2, filters=72, kernel_size=2, strides=1,
padding='same', activation = tf.nn.relu)
pool_3 = tf.layers.max_pooling1d(inputs=cconv3, pool_size=4, strides=4, padding='same')
flat = tf.reshape(pool_3, (-1, 2*72))
print(flat.get_shape())
flat = tf.nn.dropout(flat, keep_prob=keep_prob_)
print(flat.get_shape())
logits = tf.layers.dense(flat, n_classes)
print(logits.get_shape())
# Cost function and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_))
# print(cost.get_shape())
optimizer = tf.train.AdamOptimizer(learning_rate_).minimize(cost)
tf.summary.scalar("cost",cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tf.summary.scalar("accuracy",accuracy)
def get_batches(X, y, batch_size = 100):
""" Return a generator for batches """
n_batches = len(X) // batch_size
X, y = X[:n_batches*batch_size], y[:n_batches*batch_size]
# Loop over batches and yield
for b in range(0, len(X), batch_size):
yield X[b:b+batch_size], y[b:b+batch_size]
以下是我运行模型的代码:
test_acc = []
test_loss = []
train_acc = []
train_loss = []
# with graph.as_default():
saver = tf.train.Saver()
# with tf.Session(graph=graph) as sess:
# with tf.Session() as sess:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# writer = tf.summary.FileWriter("logs/", sess.graph)
iteration = 1
for e in range(epochs):
# tf.set_random_seed(123)
# Loop over batches
for x,y in get_batches(X_train, y_train, batch_size):
# Feed dictionary
feed = {inputs_ : x, labels_ : y, keep_prob_ : 0.5, learning_rate_ : learning_rate}
# Loss
loss, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict = feed)
train_acc.append(acc)
train_loss.append(loss)
# Print at each 5 iters
if (iteration % 5 == 0):
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {:d}".format(iteration),
"Train loss: {:6f}".format(loss),
"Train acc: {:.6f}".format(acc))
# Compute validation loss at every 10 iterations
if (iteration%10 == 0):
val_acc_ = []
val_loss_ = []
for x_t, y_t in get_batches(X_test, y_test, batch_size):
# Feed
feed = {inputs_ : x_t, labels_ : y_t, keep_prob_ : 1.0}
# Loss
loss_v, acc_v = sess.run([cost, accuracy], feed_dict = feed)
val_acc_.append(acc_v)
val_loss_.append(loss_v)
# Print info
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {:d}".format(iteration),
"Testing loss NOW: {:6f}".format(np.mean(val_loss_)),
"Testing acc NOW: {:.6f}".format(np.mean(val_acc_)))
# Store
test_acc.append(np.mean(val_acc_))
test_loss.append(np.mean(val_loss_))
# Iterate
iteration += 1
print("Optimization Finished!")
print("Ended!")
感谢所有帮助,谢谢提前
答案 0 :(得分:1)
我猜问题正在重塑。 pool_3的输出可能是4 * 1 * 72的形状。当你在做什么"同样"填充零将在末尾填充。
您需要将重塑图层更改为flat = tf.reshape(pool_3, (-1, 4*72))