我遇到了keras尺寸错误
输入形状是这样的
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
结果
(5739, 1, 8) (5739,) (1435, 1, 8) (1435,)
且型号低于
batch_size=128
epochs=20
from keras_self_attention import SeqSelfAttention
from keras.layers import Flatten
model = keras.models.Sequential()
model.add(keras.layers.LSTM(epochs, input_shape=(train_X.shape[0], train_X.shape[2]), return_sequences=True))
model.add(SeqSelfAttention(attention_activation='sigmoid'))
model.add(Flatten())
model.add(keras.layers.Dense(units=1))
model.compile(loss='mse', optimizer='adam')
model.summary()
结果
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_33 (LSTM) (None, 5739, 20) 2320
_________________________________________________________________
seq_self_attention_35 (SeqSe (None, 5739, 20) 1345
_________________________________________________________________
flatten_8 (Flatten) (None, 114780) 0
_________________________________________________________________
dense_33 (Dense) (None, 1) 114781
=================================================================
Total params: 118,446
Trainable params: 118,446
Non-trainable params: 0
_________________________________________________________________
但是我在健身步骤中出现了错误
history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_X, test_y), verbose=2, shuffle=False)
错误
ValueError: Error when checking input: expected lstm_33_input to have shape (5739, 8) but got array with shape (1, 8)
但是我打印的输入形状是(5739,8),我不明白(1,8)的来源。以及解决方法。
input_shape=(train_X.shape[0], train_X.shape[2])
print(input_shape)
(5739, 8)
是test_X,test_Y或训练中的输入形状问题吗? 以及我该如何解决?
答案 0 :(得分:2)
Keras的LSTM层需要一批形状为private void encode(byte[] data) {
inputBuffers = mMediaCodec.getInputBuffers();
outputBuffers = mMediaCodec.getOutputBuffers();
int inputBufferIndex = mMediaCodec.dequeueInputBuffer(-1);
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.clear();
inputBuffer.put(data);
mMediaCodec.queueInputBuffer(inputBufferIndex, 0, data.length, 0, 0);
} else {
return;
}
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo, 0);
Log.i(TAG, "outputBufferIndex-->" + outputBufferIndex);
do {
if (outputBufferIndex >= 0) {
ByteBuffer outBuffer = outputBuffers[outputBufferIndex];
System.out.println("buffer info-->" + bufferInfo.offset + "--"
+ bufferInfo.size + "--" + bufferInfo.flags + "--"
+ bufferInfo.presentationTimeUs);
byte[] outData = new byte[bufferInfo.size];
outBuffer.get(outData);
try {
if (bufferInfo.offset != 0) {
fos.write(outData, bufferInfo.offset, outData.length
- bufferInfo.offset);
} else {
fos.write(outData, 0, outData.length);
}
fos.flush();
Log.i(TAG, "out data -- > " + outData.length);
mMediaCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo,
0);
} catch (IOException e) {
e.printStackTrace();
}
} else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
outputBuffers = mMediaCodec.getOutputBuffers();
} else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
MediaFormat format = mMediaCodec.getOutputFormat();
}
} while (outputBufferIndex >= 0);
}
private void initCodec() {
try {
fos = new FileOutputStream(mVideoFile, false);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
try {
mMediaCodec = MediaCodec.createEncoderByType("video/avc");
} catch (IOException e) {
e.printStackTrace();
}
MediaFormat mediaFormat = MediaFormat.createVideoFormat("video/avc",
320,
240);
mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, 125000);
mediaFormat.setInteger(MediaFormat.KEY_FRAME_RATE, 15);
mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT,
MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Planar);
mediaFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 5);
mMediaCodec.configure(mediaFormat,
null,
null,
MediaCodec.CONFIGURE_FLAG_ENCODE);
mMediaCodec.start();
}
的数据。您正在使用错误的尺寸构建图层。
首先,将您的训练数据重塑为(n_timesteps, n_features)
的形状:
n_data_points, n_timesteps, n_features
然后使用正确的尺寸指定模型:
train_X_ = np.swapaxes(train_X, 1, 2)
train_X_.shape # now of shape (5739, 8, 1)
这将正常工作:
model = keras.models.Sequential()
# input shape for the LSTM layer will be (8,1). No need to specify the batch shape.
model.add(keras.layers.LSTM(20, input_shape=(train_X_.shape[1], train_X_.shape[2]), return_sequences=True))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(1))
model.compile(optimizer='adam', loss='mse')