Keras代码运行完美,损失接近于零。输入数据是xData,标记数据是yTrainData。
xData = np.reshape(xData, (-1, 1, sendLengthG * 4))
yTrainData = np.reshape(yTrainData, (-1, sendLengthG, sentComponentTypeCount))
model = k.models.Sequential()
model.add(k.layers.Dense(512, input_shape=(1, sendLengthG * 4), activation='tanh'))
model.add(k.layers.Dense(sendLengthG * sentComponentTypeCount, activation='linear'))
model.add(k.layers.Reshape([sendLengthG, sentComponentTypeCount]))
model.add(k.layers.Dense(sentComponentTypeCount, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='RMSProp', metrics=['accuracy'])
model.summary()
model.fit(xData, yTrainData, epochs=roundCount, batch_size=1, verbose=2)
我已经编写了一些TensorFlow代码,如下所示,但不能减少0.011以下的损失
x = tf.placeholder(dtype=tf.float32)
yTrain = tf.placeholder(dtype=tf.float32)
x1 = tf.reshape(x, shape=[1, sendLengthG * 4])
nodeCount1 = 18
w1 = tf.Variable(tf.random_normal([sendLengthG * 4, nodeCount1], mean=0.5, stddev=0.1), dtype=tf.float32)
b1 = tf.Variable(tf.zeros([nodeCount1]), dtype=tf.float32)
n1 = tf.nn.tanh(tf.matmul(x1, w1) + b1)
nodeCount2 = 21
w2 = tf.Variable(tf.random_normal([nodeCount1, nodeCount2], mean=1.5, stddev=0.1), dtype=tf.float32)
b2 = tf.Variable(tf.zeros([nodeCount2]), dtype=tf.float32)
n2 = tf.nn.tanh(tf.matmul(n1, w2) + b2)
wn = tf.Variable(tf.random_normal([nodeCount2, sendLengthG * sentComponentTypeCount], mean=0.5, stddev=0.1), dtype=tf.float32)
bn = tf.Variable(tf.zeros([sendLengthG * sentComponentTypeCount]), dtype=tf.float32)
y = tf.matmul(n2, wn) + bn
yResult = tf.nn.softmax(tf.reshape(y, [sendLengthG, -1]))
loss = -tf.reduce_mean(yTrain * tf.log(tf.clip_by_value(yResult, 1e-10, 1.0)))
optimizer = tf.train.RMSPropOptimizer(learnRate)
train = optimizer.minimize(loss)