我试图将位图从ffmpeg传递到android。 它已经可以工作但它在表面上显示图片从java传递到本机代码。 如何获取帧缓冲位图数据以将其传递给java?
我试图保存out_frame缓冲区数据:
unsigned char bmpFileHeader[14] = {'B', 'M', 0,0,0,0, 0,0, 0,0, 54, 0,0,0};
unsigned char bmpInfoHeader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0};
unsigned char bmpPad[3] = {0, 0, 0};
void saveBuffer(int fileIndex, int width, int height, unsigned char *buffer, int buffer_size) {
unsigned char filename[1024];
sprintf(filename, "/storage/sdcard0/3d_player_%d.bmp", fileIndex);
LOGI(10, "saving ffmpeg bitmap file: %d to %s", fileIndex, filename);
FILE *bitmapFile = fopen(filename, "wb");
if (!bitmapFile) {
LOGE(10, "failed to create ffmpeg bitmap file");
return;
}
unsigned char filesize = 54 + 3 * width * height; // 3 = (r,g,b)
bmpFileHeader[2] = (unsigned char)(filesize);
bmpFileHeader[3] = (unsigned char)(filesize >> 8);
bmpFileHeader[4] = (unsigned char)(filesize >> 16);
bmpFileHeader[5] = (unsigned char)(filesize >> 24);
bmpInfoHeader[4] = (unsigned char)(width);
bmpInfoHeader[5] = (unsigned char)(width >> 8);
bmpInfoHeader[6] = (unsigned char)(width >> 16);
bmpInfoHeader[7] = (unsigned char)(width >> 24);
bmpInfoHeader[8] = (unsigned char)(height);
bmpInfoHeader[9] = (unsigned char)(height >> 8);
bmpInfoHeader[10] = (unsigned char)(height >> 16);
bmpInfoHeader[11] = (unsigned char)(height >> 24);
fwrite(bmpFileHeader, 1, 14, bitmapFile);
fwrite(bmpInfoHeader, 1, 40, bitmapFile);
int i;
for (i=0; i<height; i++) {
fwrite(buffer + width * (height - 1) * 3, 3, width, bitmapFile);
fwrite(bmpPad, 1, (4-(width * 3) % 4) % 4, bitmapFile);
}
fflush(bitmapFile);
fclose(bitmapFile);
}
int player_decode_video(struct DecoderData * decoder_data, JNIEnv * env,
struct PacketData *packet_data) {
int got_frame_ptr;
struct Player *player = decoder_data->player;
int stream_no = decoder_data->stream_no;
AVCodecContext * ctx = player->input_codec_ctxs[stream_no];
AVFrame * frame = player->input_frames[stream_no];
AVStream * stream = player->input_streams[stream_no];
int interrupt_ret;
int to_write;
int err = 0;
AVFrame *rgb_frame = player->rgb_frame;
ANativeWindow_Buffer buffer;
ANativeWindow * window;
#ifdef MEASURE_TIME
struct timespec timespec1, timespec2, diff;
#endif // MEASURE_TIME
LOGI(10, "player_decode_video decoding");
int frameFinished;
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ×pec1);
#endif // MEASURE_TIME
int ret = avcodec_decode_video2(ctx, frame, &frameFinished,
packet_data->packet);
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ×pec2);
diff = timespec_diff(timespec1, timespec2);
LOGI(3, "decode_video timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
if (ret < 0) {
LOGE(1, "player_decode_video Fail decoding video %d\n", ret);
return -ERROR_WHILE_DECODING_VIDEO;
}
if (!frameFinished) {
LOGI(10, "player_decode_video Video frame not finished\n");
return 0;
}
// saving in buffer converted video frame
LOGI(7, "player_decode_video copy wait");
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ×pec1);
#endif // MEASURE_TIME
pthread_mutex_lock(&player->mutex_queue);
window = player->window;
if (window == NULL) {
pthread_mutex_unlock(&player->mutex_queue);
goto skip_frame;
}
ANativeWindow_setBuffersGeometry(window, ctx->width, ctx->height,
WINDOW_FORMAT_RGBA_8888);
if (ANativeWindow_lock(window, &buffer, NULL) != 0) {
pthread_mutex_unlock(&player->mutex_queue);
goto skip_frame;
}
pthread_mutex_unlock(&player->mutex_queue);
int format = buffer.format;
if (format < 0) {
LOGE(1, "Could not get window format")
}
enum PixelFormat out_format;
if (format == WINDOW_FORMAT_RGBA_8888) {
out_format = PIX_FMT_RGBA;
LOGI(6, "Format: WINDOW_FORMAT_RGBA_8888");
} else if (format == WINDOW_FORMAT_RGBX_8888) {
out_format = PIX_FMT_RGB0;
LOGE(1, "Format: WINDOW_FORMAT_RGBX_8888 (not supported)");
} else if (format == WINDOW_FORMAT_RGB_565) {
out_format = PIX_FMT_RGB565;
LOGE(1, "Format: WINDOW_FORMAT_RGB_565 (not supported)");
} else {
LOGE(1, "Unknown window format");
}
avpicture_fill((AVPicture *) rgb_frame, buffer.bits, out_format,
buffer.width, buffer.height);
rgb_frame->data[0] = buffer.bits;
if (format == WINDOW_FORMAT_RGBA_8888) {
rgb_frame->linesize[0] = buffer.stride * 4;
} else {
LOGE(1, "Unknown window format");
}
LOGI(6,
"Buffer: width: %d, height: %d, stride: %d",
buffer.width, buffer.height, buffer.stride);
int i = 0;
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ×pec2);
diff = timespec_diff(timespec1, timespec2);
LOGI(1,
"lockPixels and fillimage timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
#ifdef MEASURE_TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ×pec1);
#endif // MEASURE_TIME
LOGI(7, "player_decode_video copying...");
AVFrame * out_frame;
int rescale;
if (ctx->width == buffer.width && ctx->height == buffer.height) {
// This always should be true
out_frame = rgb_frame;
rescale = FALSE;
} else {
out_frame = player->tmp_frame2;
rescale = TRUE;
}
if (ctx->pix_fmt == PIX_FMT_YUV420P) {
__I420ToARGB(frame->data[0], frame->linesize[0], frame->data[2],
frame->linesize[2], frame->data[1], frame->linesize[1],
out_frame->data[0], out_frame->linesize[0], ctx->width,
ctx->height);
} else if (ctx->pix_fmt == PIX_FMT_NV12) {
__NV21ToARGB(frame->data[0], frame->linesize[0], frame->data[1],
frame->linesize[1], out_frame->data[0], out_frame->linesize[0],
ctx->width, ctx->height);
} else {
LOGI(3, "Using slow conversion: %d ", ctx->pix_fmt);
struct SwsContext *sws_context = player->sws_context;
sws_context = sws_getCachedContext(sws_context, ctx->width, ctx->height,
ctx->pix_fmt, ctx->width, ctx->height, out_format,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
player->sws_context = sws_context;
if (sws_context == NULL) {
LOGE(1, "could not initialize conversion context from: %d"
", to :%d\n", ctx->pix_fmt, out_format);
// TODO some error
}
sws_scale(sws_context, (const uint8_t * const *) frame->data,
frame->linesize, 0, ctx->height, out_frame->data,
out_frame->linesize);
}
if (rescale) {
// Never occurs
__ARGBScale(out_frame->data[0], out_frame->linesize[0], ctx->width,
ctx->height, rgb_frame->data[0], rgb_frame->linesize[0],
buffer.width, buffer.height, __kFilterNone);
out_frame = rgb_frame;
}
// TODO: (4ntoine) frame decoded and rescaled, ready to call callback with frame picture from buffer
int bufferSize = buffer.width * buffer.height * 3; // 3 = (r,g,b);
static int bitmapCounter = 0;
if (bitmapCounter < 10) {
saveBuffer(bitmapCounter++, buffer.width, buffer.height, (unsigned char *)out_frame->data, bufferSize);
}
但out_frame为空,文件有标题和0x00字节正文。
如何在ffmpeg中获取图片缓冲区数据?
答案 0 :(得分:0)
简而言之:你应该从ANativeWindow_Buffer - buffer.bits中获取缓冲区。注意缓冲区是(rgba)但BMP通常是(rgb) - 3个字节。要将其保存为BMP,需要添加BMP标头并使用填充保存行。