尝试将YUV420p转换为RGB24时,我的图像失真了 sws_scale。
代码:
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame\n");
return ret;
}
if (*got_frame)
{
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
cached ? "(cached)" : "",
video_frame_count++, frame->coded_picture_number,
"#"/*av_ts2timestr(frame->pts, &video_dec_ctx->time_base)*/);
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
av_image_copy(video_dst_data, video_dst_linesize,
(const uint8_t **)(frame->data), frame->linesize,
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
/* write to rawvideo file */
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
AVPicture pic;
avpicture_alloc( &pic, AV_PIX_FMT_RGB24, frame->width, frame->height);
SwsContext *ctxt = sws_getContext(frame->width, frame->height, static_cast<AVPixelFormat>(frame->format),
frame->width, frame->height, AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
if ( NULL == ctxt )
{
//Log("failed to get sws context");
}
if ( 0 < sws_scale(ctxt, frame->data, frame->linesize, 0, frame->height, pic.data, pic.linesize))
{
char szPic[256] = { 0 };
sprintf( szPic, "decoded/%d.bmp", video_frame_count );
FILE *pf = fopen(szPic,"w");
if ( NULL != pf )
{
BITMAPFILEHEADER bmpFileHeader = {0};
bmpFileHeader.bfReserved1 = 0;
bmpFileHeader.bfReserved2 = 0;
bmpFileHeader.bfType = 0x4D42;
bmpFileHeader.bfSize = sizeof(bmpFileHeader) + sizeof(BITMAPINFOHEADER) + pic.linesize[0] * frame->height;
bmpFileHeader.bfOffBits = sizeof(bmpFileHeader) + sizeof(BITMAPINFOHEADER);
BITMAPINFOHEADER bmiHeader = { 0 };
bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmiHeader.biWidth = frame->width;
bmiHeader.biHeight = 0 - frame->height;
bmiHeader.biPlanes = 1;
bmiHeader.biBitCount = 24;
bmiHeader.biCompression = BI_RGB;
bmiHeader.biSizeImage = pic.linesize[0] * frame->height;
bmiHeader.biXPelsPerMeter = 0;
bmiHeader.biYPelsPerMeter = 0;
bmiHeader.biClrUsed = 0;
bmiHeader.biClrImportant = 0;
fwrite( &bmpFileHeader, 1, sizeof(bmpFileHeader), pf );
fwrite( &bmiHeader, 1, sizeof(bmiHeader), pf );
fwrite( pic.data[0], 1, pic.linesize[0] * frame->height, pf );
fclose( pf );
}
}
// pic.data[0] now contains the image data in RGB format (3 bytes)
// and pic.linesize[0] is the pitch of the data (ie. size of a row in memory, which can be larger than width*sizeof(pixel))
avpicture_free(&pic);
sws_freeContext(ctxt);
}
上面只解码帧然后将其转换为RGB24,然后写一个位图。 像这样的原始视频帧, 但转换后的图像, 是否缺少一些代码或某些代码是错误的?
提前感谢。
答案 0 :(得分:2)
fwrite( pic.data[0], 1, pic.linesize[0] * frame->height, pf );
对于例如图像1280x720,线条尺寸通常较大,例如1312,如果你写了lineize * height,你将会写出比图像大小更多的数据。你想写(在一个循环中)宽度像素偏移lineize字节:
uint8_t *ptr = pic.data[0];
for (int y = 0; y < frame->height; y++) {
fwrite(ptr, 1, frame->width, pf);
ptr += pic.linesize[0];
}
然后它应该正常工作。
答案 1 :(得分:-1)
也许这些代码可以帮到你。这些都很好。
int got_frame = 0;
auto len = avcodec_decode_video2(m_avCodecContext
, m_avFrame
, &got_frame
, &avpkt);
if (len < 0)
{
return;
}
if (got_frame /*&& !silentMode*/)
{
//if (videoRenderer != nullptr)
{
if (frameSize == NULL)
{
return;
}
uint8_t *dst_data[4];
int dst_linesize[4];
int dst_w, dst_h;
int ret = 0;
if (1)// avcodec_alloc_frame()
{
auto stride = m_avFrame->linesize;
auto scan0 = m_avFrame->data;
SwsContext *scaleContext = sws_getContext(m_avCodecContext->width
, m_avCodecContext->height
, m_avCodecContext->pix_fmt
, m_avCodecContext->width
, m_avCodecContext->height
, PixelFormat::PIX_FMT_BGR24
, SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (scaleContext == NULL)
{
//TODO: log error
return;
}
try
{
//*vb->signal = 1;
ret = avpicture_alloc(&m_dst_picture
, PixelFormat::PIX_FMT_BGR24
, m_avCodecContext->width
, m_avCodecContext->height);
// AVFrame *picture_RGB;
// uint8_t *bufferRGB;
// picture_RGB = avcodec_alloc_frame();
// bufferRGB = (uint8_t*)malloc(720*576*(24/8)/*avpicture_get_size(PIX_FMT_RGB24, 720, 576)*/);
// avpicture_fill((AVPicture *)picture_RGB, bufferRGB, PIX_FMT_RGB24, 720, 576);
if (ret < 0)
{
return;
}
int retScale = sws_scale(scaleContext
, scan0
, stride
, 0
, m_avCodecContext->height
, m_dst_picture.data //picture_RGB->data
, m_dst_picture.linesize //picture_RGB->linesize
);
if (1)
{
HWND hwnd = m_pParent->GetSafeHwnd();
SetFocus(hwnd);
CRect rc;
m_pParent->GetClientRect(rc);
CDC *cdc = m_pParent->GetDC();
char* bitmap = (char*)m_dst_picture.data[0];
// static unsigned int i = 0;
// bmp_save(bitmap, m_avCodecContext->width, m_avCodecContext->height, i++);
BITMAPINFO bmpinfo;
bmpinfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo.bmiHeader.biWidth = m_avCodecContext->width;
bmpinfo.bmiHeader.biHeight = -m_avCodecContext->height;
bmpinfo.bmiHeader.biPlanes = 1;
bmpinfo.bmiHeader.biBitCount = 24;
bmpinfo.bmiHeader.biCompression = BI_RGB;
bmpinfo.bmiHeader.biSizeImage =
m_avCodecContext->width * m_avCodecContext->height * (24 / 8);
bmpinfo.bmiHeader.biXPelsPerMeter = 100;
bmpinfo.bmiHeader.biYPelsPerMeter = 100;
bmpinfo.bmiHeader.biClrUsed = 0;
bmpinfo.bmiHeader.biClrImportant = 0;
HBITMAP hBitmap = CreateDIBitmap(cdc->GetSafeHdc(), &bmpinfo.bmiHeader, CBM_INIT, bitmap, &bmpinfo/*bi*/, DIB_RGB_COLORS);
DrawBitmap(cdc, hBitmap, m_pParent);
::DeleteObject(hBitmap);
::DeleteObject(cdc->GetSafeHdc());
}
avpicture_free(&m_dst_picture);
sws_freeContext(scaleContext);
}
catch (int e)
{
sws_freeContext(scaleContext);
}
}
}
}
void DrawBitmap(CDC *pDC, HBITMAP hbitmap,CWnd *wnd)
{
CBitmap *pBitmap = CBitmap::FromHandle(hbitmap);
BITMAP bm;
pBitmap -> GetBitmap(&bm);
CDC MemDC;
MemDC.CreateCompatibleDC(pDC);
HGDIOBJ gob= MemDC.SelectObject(pBitmap);
CRect rc;
wnd->GetClientRect(rc);
pDC->SetStretchBltMode( COLORONCOLOR);
pDC->StretchBlt(0, 0,rc.Width(),rc.Height() , &MemDC, 0, 0, bm.bmWidth, bm.bmHeight, SRCCOPY);
MemDC.SelectObject(gob);
DeleteObject(pBitmap);
DeleteObject(MemDC);
DeleteObject(&bm);
ReleaseDC(wnd->GetSafeHwnd(), MemDC);
}
void initDecoder()
{
m_avCodecContext = avcodec_alloc_context();
if (!m_avCodecContext)
{
//failed to allocate codec context
Cleanup();
return;
}
m_avCodecContext->flags = 0;
uint8_t startCode[] = { 0x00, 0x00, 0x01 };
//////////////////////////////////////////////////////////////////////////
//I thought for play live video you can comment these lines.
if (m_sProps != NULL)
{
// USES_CONVERSION;
// ::MessageBox(NULL, A2T(sprops), TEXT("sprops"), MB_OK);
unsigned spropCount;
SPropRecord* spropRecords = parseSPropParameterSets(m_sProps, spropCount);
try
{
for (unsigned i = 0; i < spropCount; ++i)
{
AddExtraData(startCode, sizeof(startCode));
AddExtraData(spropRecords[i].sPropBytes, spropRecords[i].sPropLength);
}
}
catch (void*)
{
//extradata exceeds size limit
delete[] spropRecords;
Cleanup();
return;
}
delete[] spropRecords;
m_avCodecContext->extradata = extraDataBuffer;
m_avCodecContext->extradata_size = extraDataSize;
}
AddExtraData(startCode, sizeof(startCode));
bInitEx = true;
av_register_all();
avcodec_register_all();
m_codecId = CODEC_ID_H264;
m_avCodec = avcodec_find_decoder(m_codecId);
if (m_avCodec == NULL)
{
return;
}
if (avcodec_open(m_avCodecContext, m_avCodec) < 0)
{
//failed to open codec
Cleanup();
return;
}
if (m_avCodecContext->codec_id == CODEC_ID_H264)
{
m_avCodecContext->flags2 |= CODEC_FLAG2_CHUNKS;
//avCodecContext->flags2 |= CODEC_FLAG2_SHOW_ALL;
}
m_avFrame = avcodec_alloc_frame();
if (!m_avFrame)
{
//failed to allocate frame
Cleanup();
return;
}
}