我正在尝试将Nvidia Encoder SDK与Nvidia Frame Buffer Capture SDK集成在一起。我正在cuda缓冲区中捕获帧,然后将其发送到Nvidia编码器以H.264格式进行编码。 但是,编码后的视频不是我想要的那样,它是完全粉红色的屏幕。 是否有任何可能的原因呢?
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <dlfcn.h>
#include <string.h>
#include <getopt.h>
#include <unistd.h>
#include<dlfcn.h>
#include"nvEncodeAPI.h"
#include <NvFBC.h>
// If you have the CUDA Toolkit Installed, you can include the CUDA headers
// #include "cuda.h"
//#include "cuda_drvapi_dynlink_cuda.h"
#include"cuda.h"
#include "NvFBCUtils.h"
#define APP_VERSION 4
#define LIB_NVFBC_NAME "libnvidia-fbc.so.1"
#define LIB_CUDA_NAME "libcuda.so.1"
#define N_FRAMES 10
/*
* CUDA entry points
*/
typedef CUresult (* CUINITPROC) (unsigned int Flags);
typedef CUresult (* CUDEVICEGETPROC) (CUdevice *device, int ordinal);
typedef CUresult (* CUCTXCREATEV2PROC) (CUcontext *pctx, unsigned int flags, CUdevice dev);
typedef CUresult (* CUMEMCPYDTOHV2PROC) (void *dstHost, CUdeviceptr srcDevice, size_t ByteCount);
typedef CUresult (* CUGETDEVICEPTR) (CUdeviceptr *dptr, size_t bytesize);
static CUINITPROC cuInit_ptr = NULL;
static CUDEVICEGETPROC cuDeviceGet_ptr = NULL;
static CUCTXCREATEV2PROC cuCtxCreate_v2_ptr = NULL;
static CUMEMCPYDTOHV2PROC cuMemcpyDtoH_v2_ptr = NULL;
static CUGETDEVICEPTR cuGetDevicePtr=NULL;
typedef NVENCSTATUS (*PNVENCCREATEINSTANCE) (NV_ENCODE_API_FUNCTION_LIST* pFuncList);
/**
* Dynamically opens the CUDA library and resolves the symbols that are
* needed for this application.
*
* \param [out] libCUDA
* A pointer to the opened CUDA library.
*
* \return
* NVFBC_TRUE in case of success, NVFBC_FALSE otherwise.
*/
static NVFBC_BOOL cuda_load_library(void *libCUDA)
{
libCUDA = dlopen(LIB_CUDA_NAME, RTLD_NOW);
if (libCUDA == NULL) {
fprintf(stderr, "Unable to open '%s'\n", LIB_CUDA_NAME);
return NVFBC_FALSE;
}
cuInit_ptr = (CUINITPROC) dlsym(libCUDA, "cuInit");
if (cuInit_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'cuInit'\n");
return NVFBC_FALSE;
}
cuDeviceGet_ptr = (CUDEVICEGETPROC) dlsym(libCUDA, "cuDeviceGet");
if (cuDeviceGet_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'cuDeviceGet'\n");
return NVFBC_FALSE;
}
cuCtxCreate_v2_ptr = (CUCTXCREATEV2PROC) dlsym(libCUDA, "cuCtxCreate_v2");
if (cuCtxCreate_v2_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'cuCtxCreate_v2'\n");
return NVFBC_FALSE;
}
cuMemcpyDtoH_v2_ptr = (CUMEMCPYDTOHV2PROC) dlsym(libCUDA, "cuMemcpyDtoH_v2");
if (cuMemcpyDtoH_v2_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'cuMemcpyDtoH_v2'\n");
return NVFBC_FALSE;
}
cuGetDevicePtr = (CUGETDEVICEPTR) dlsym(libCUDA,"cuMemAlloc");
if(cuGetDevicePtr==NULL){
fprintf(stderr, "Unable to resolve symbol 'cuMemAlloc'\n");
return NVFBC_FALSE;
}
return NVFBC_TRUE;
}
/**
* Initializes CUDA and creates a CUDA context.
*
* \param [in] cuCtx
* A pointer to the created CUDA context.
*
* \return
* NVFBC_TRUE in case of success, NVFBC_FALSE otherwise.
*/
static NVFBC_BOOL cuda_init(CUcontext *cuCtx)
{
CUresult cuRes;
CUdevice cuDev;
cuRes = cuInit_ptr(0);
if (cuRes != CUDA_SUCCESS) {
fprintf(stderr, "Unable to initialize CUDA (result: %d)\n", cuRes);
return NVFBC_FALSE;
}
cuRes = cuDeviceGet_ptr(&cuDev, 0);
if (cuRes != CUDA_SUCCESS) {
fprintf(stderr, "Unable to get CUDA device (result: %d)\n", cuRes);
return NVFBC_FALSE;
}
cuRes = cuCtxCreate_v2_ptr(cuCtx, CU_CTX_SCHED_AUTO, cuDev);
if (cuRes != CUDA_SUCCESS) {
fprintf(stderr, "Unable to create CUDA context (result: %d)\n", cuRes);
return NVFBC_FALSE;
}
return NVFBC_TRUE;
}
/**
* Initializes the NvFBC and CUDA libraries and creates an NvFBC instance.
*
* Creates and sets up a capture session to video memory using the CUDA interop.
*
* Captures a bunch of frames every second, converts them to BMP and saves them
* to the disk.
*/
int main(int argc, char *argv[])
{
//handing the opening of the nvidia encoder library
void* handle = dlopen("libnvidia-encode.so",RTLD_NOW);
if(handle==NULL)
{
printf("Unable to load the nvidia encoder library");
return EXIT_FAILURE;
}
PNVENCCREATEINSTANCE NvEncCreateInstance_ptr;
NvEncCreateInstance_ptr = (PNVENCCREATEINSTANCE) dlsym(handle,"NvEncodeAPICreateInstance");
if(NvEncCreateInstance_ptr==NULL)
{
printf("Failure to load the nv encode lib");
}
static struct option longopts[] = {
{ "get-status", no_argument, NULL, 'g' },
{ "track", required_argument, NULL, 't' },
{ "frames", required_argument, NULL, 'f' },
{ "size", required_argument, NULL, 's' },
{ "format", required_argument, NULL, 'o' },
{ NULL, 0, NULL, 0 }
};
int opt, ret;
unsigned int i, nFrames = N_FRAMES;
NVFBC_SIZE frameSize = { 0, 0 };
NVFBC_BOOL printStatusOnly = NVFBC_FALSE;
NVFBC_TRACKING_TYPE trackingType = NVFBC_TRACKING_DEFAULT;
char outputName[NVFBC_OUTPUT_NAME_LEN];
uint32_t outputId = 0;
void *libNVFBC = NULL, *libCUDA = NULL;
PNVFBCCREATEINSTANCE NvFBCCreateInstance_ptr = NULL;
NVFBC_API_FUNCTION_LIST pFn;
CUcontext cuCtx;
NVFBCSTATUS fbcStatus;
NVFBC_BOOL fbcBool;
NVFBC_SESSION_HANDLE fbcHandle;
NVFBC_CREATE_HANDLE_PARAMS createHandleParams;
NVFBC_GET_STATUS_PARAMS statusParams;
NVFBC_CREATE_CAPTURE_SESSION_PARAMS createCaptureParams;
NVFBC_DESTROY_CAPTURE_SESSION_PARAMS destroyCaptureParams;
NVFBC_DESTROY_HANDLE_PARAMS destroyHandleParams;
NVFBC_TOCUDA_SETUP_PARAMS setupParams;
NVFBC_BUFFER_FORMAT bufferFormat = NVFBC_BUFFER_FORMAT_NV12;
char videoFile[64]="video.h264";
FILE* fd;
/*
* Parse the command line.
*/
while ((opt = getopt_long(argc, argv, "hgt:f:s:o:", longopts, NULL)) != -1) {
switch (opt) {
case 'g':
printStatusOnly = NVFBC_TRUE;
break;
case 't':
NvFBCUtilsParseTrackingType(optarg, &trackingType, outputName);
break;
case 'f':
nFrames = (unsigned int) atoi(optarg);
break;
case 's':
ret = sscanf(optarg, "%ux%u", &frameSize.w, &frameSize.h);
if (ret != 2) {
fprintf(stderr, "Invalid size format: '%s'\n", optarg);
return EXIT_FAILURE;
}
break;
case 'o':
if (!strcasecmp(optarg, "rgb")) {
bufferFormat = NVFBC_BUFFER_FORMAT_RGB;
} else if (!strcasecmp(optarg, "argb")) {
bufferFormat = NVFBC_BUFFER_FORMAT_ARGB;
} else if (!strcasecmp(optarg, "nv12")) {
bufferFormat = NVFBC_BUFFER_FORMAT_NV12;
} else if (!strcasecmp(optarg, "yuv444p")) {
bufferFormat = NVFBC_BUFFER_FORMAT_YUV444P;
} else {
fprintf(stderr, "Unknown buffer format: '%s'\n", optarg);
return EXIT_FAILURE;
}
break;
case 'h':
default:
usage(argv[0]);
return EXIT_SUCCESS;
}
}
NvFBCUtilsPrintVersions(APP_VERSION);
/*
* Dynamically load the NvFBC library.
*/
libNVFBC = dlopen(LIB_NVFBC_NAME, RTLD_NOW);
if (libNVFBC == NULL) {
fprintf(stderr, "Unable to open '%s'\n", LIB_NVFBC_NAME);
return EXIT_FAILURE;
}
fbcBool = cuda_load_library(libCUDA);
if (fbcBool != NVFBC_TRUE) {
return EXIT_FAILURE;
}
fbcBool = cuda_init(&cuCtx);
if (fbcBool != NVFBC_TRUE) {
return EXIT_FAILURE;
}
//input output buffer the contains all the pointers to the functions for the api
NV_ENCODE_API_FUNCTION_LIST nvencFunctionList;
nvencFunctionList.version=NV_ENCODE_API_FUNCTION_LIST_VER;
//this function call populates the passed pointer to the nvidia api function list with
//the function pointers to the routines implemented by the encoder api
NVENCSTATUS status = NvEncCreateInstance_ptr(&nvencFunctionList);//NvEncodeAPICreateInstance(&nvencFunctionList);
//exit code if failed to create instance of the api
if(status!=NV_ENC_SUCCESS)
{
printf("The nvidia encoder could not be initialized");
EXIT_FAILURE;
}
//creating the encode session params
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS openSessionExParams;
//initializing them and then adding the values
// memset(&openSessionExParams,0,sizeof(openSessionExParams));
//setting the version
openSessionExParams.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
openSessionExParams.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
//Note - Maybe change the next line
//ading the device pointer which in this case will be a cuda context
openSessionExParams.device = (void*)cuCtx;
openSessionExParams.reserved = NULL;
openSessionExParams.apiVersion = NVENCAPI_VERSION;
//setting all values of the reserved 1 array to 0
//Note - check the next two lines
memset(openSessionExParams.reserved1,0,253*sizeof(uint32_t));//sizeof(openSessionExParams.reserved1)/sizeof(openSessionExParams.reserved1[0]));
memset(openSessionExParams.reserved2,NULL,64*sizeof(void*));//sizeof(openSessionExParams.reserved2)/sizeof(openSessionExParams.reserved2[0]));
//handle for the encoder
void* encoder;
//Note- Seg fault
status = nvencFunctionList.nvEncOpenEncodeSessionEx(&openSessionExParams,&encoder);
//NvEncOpenEncodeSessionEx(&openSessionExParams,encoder);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure opening encoding session: %u\n",status);
return EXIT_FAILURE;
}
//getting the count of the available encoder settings
uint32_t guidCount;
status =nvencFunctionList.nvEncGetEncodeGUIDCount(encoder,&guidCount);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure getting GUID count");
return EXIT_FAILURE;
}
//allocating a buffer that holds that count
GUID* GUIDs = malloc(guidCount*sizeof(GUID));
//number of supported guid
uint32_t supportedGUID;
//adding all the guids to the previously allocated buffer
status =nvencFunctionList.nvEncGetEncodeGUIDs(encoder,GUIDs,guidCount,&supportedGUID);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure filling the guid buffer");
return EXIT_FAILURE;
}
//setting the encode guid
GUID encodeGuid = NV_ENC_CODEC_H264_GUID;
//settin up the presets
uint32_t encodePresetCount;
status =nvencFunctionList.nvEncGetEncodePresetCount(encoder,encodeGuid,&encodePresetCount);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure getting the encoder preset count");
return EXIT_FAILURE;
}
//allocating a buffer to hold the suppoting preset guid
GUID *presetGUIDs = malloc(encodePresetCount*sizeof(GUID));
uint32_t supportedPresetGUIDs;
status =nvencFunctionList.nvEncGetEncodePresetGUIDs(encoder,encodeGuid,presetGUIDs,encodePresetCount,&supportedPresetGUIDs);
if(status!=NV_ENC_SUCCESS)
{
printf("Failure filling up the preset buffer");
return EXIT_FAILURE;
}
//getting a preset guid Note - check this line later
GUID presetGuid = NV_ENC_PRESET_LOW_LATENCY_HQ_GUID;
//setting up a preset congif Note - check this
NV_ENC_PRESET_CONFIG encPresetConfig;
encPresetConfig.version = NV_ENC_PRESET_CONFIG_VER;
encPresetConfig.presetCfg.version=NV_ENC_CONFIG_VER;
status = nvencFunctionList.nvEncGetEncodePresetConfig(encoder,encodeGuid,presetGuid,&encPresetConfig);
if(status!=NV_ENC_SUCCESS)
{
printf("Error getting the preset configuration");
return EXIT_FAILURE;
}
//getting the input format which is YUV for our solution
//retrieving the input formats
uint32_t inputFormatCount;
status = nvencFunctionList.nvEncGetInputFormatCount(encoder,encodeGuid,&inputFormatCount);
if(status!=NV_ENC_SUCCESS)
{
printf("Error getting the input buffer format count");
return EXIT_FAILURE;
}
//allocating a buffer for the input formats
NV_ENC_BUFFER_FORMAT* encodeBufferFormats = malloc(inputFormatCount*sizeof(encodeBufferFormats));
//holding the size of the supported formats
uint32_t supportedInputFormats;
//filling up the above buffer
status = nvencFunctionList.nvEncGetInputFormats(encoder,encodeGuid, encodeBufferFormats,inputFormatCount,&supportedInputFormats);
if(status!=NV_ENC_SUCCESS)
{
printf("Error getting the supported input formats");
return EXIT_FAILURE;
}
//selecting a buffer format
//NV_ENC_BUFFER_FORMAT bufferFormat = NV_ENC_BUFFER_FORMAT_IYUV;
//querying the underlying hardware encoder capabilities can be done using the API
//initializing the hardware encoder
NV_ENC_INITIALIZE_PARAMS initializationParams;
/* NV_ENC_PRESET_CONFIG presetConfig;
presetConfig.version = NV_ENC_PRESET_CONFIG_VER;
NV_ENC_CONFIG encoderConfig;
encoderConfig.version = NV_ENC_CONFIG_VER;
encoderConfig.profileGUID=NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID;
presetConfig.presetCfg=encoderConfig;
status = nvencFunctionList.nvEncGetEncodePresetConfig(encoder,encodeGuid,presetGuid,&presetConfig);*/
if(status!=NV_ENC_SUCCESS)
{
printf("\nUnable to get preset config %u",status);
return EXIT_FAILURE;
}
//initializing the encoder params
initializationParams.version = NV_ENC_INITIALIZE_PARAMS_VER;
initializationParams.encodeConfig = &encPresetConfig.presetCfg;
initializationParams.encodeGUID = encodeGuid;
initializationParams.presetGUID=presetGuid;
initializationParams.encodeWidth = 600;
initializationParams.encodeHeight = 600;
initializationParams.frameRateNum = 60;
initializationParams.frameRateDen = 1;
initializationParams.enablePTD=1;
status = nvencFunctionList.nvEncInitializeEncoder(encoder,&initializationParams);
if(status!=NV_ENC_SUCCESS)
{
printf("Error initializing encoder, %u",status);
return EXIT_FAILURE;
}
//allocating the nvidia input output buffers
int num_macroblocks = ((600 + 15) / 16) * ((600 + 15) / 16);
int max_surfaces = (num_macroblocks >= 8160) ? 16 : 32;
NV_ENC_INPUT_PTR* inputBuffers = (NV_ENC_INPUT_PTR*) malloc(max_surfaces * sizeof(NV_ENC_INPUT_PTR));
NV_ENC_OUTPUT_PTR* outputBuffers = (NV_ENC_OUTPUT_PTR*) malloc(max_surfaces * sizeof(NV_ENC_OUTPUT_PTR));
//CUdeviceptr* cudaDevicePtrs = (CUdeviceptr*) malloc(max_surfaces*sizeof(CUdeviceptr));
for(int i=0;i<max_surfaces;i++)
{
//creating the input buffer
NV_ENC_CREATE_INPUT_BUFFER inputBufferParams;
inputBufferParams.version = NV_ENC_CREATE_INPUT_BUFFER_VER;
inputBufferParams.width =(600 + 31) & ~31;
inputBufferParams.height=(600 + 31) & ~31;
inputBufferParams.bufferFmt = NV_ENC_BUFFER_FORMAT_IYUV;
inputBufferParams.reserved=0;
memset(inputBufferParams.reserved1,0,57*sizeof(uint32_t));
memset(inputBufferParams.reserved2,NULL,63*sizeof(void*));
status = nvencFunctionList.nvEncCreateInputBuffer(encoder,&inputBufferParams);
if(status!=NV_ENC_SUCCESS)
{
printf("Input buffer could not be created");
return EXIT_FAILURE;
}
//creating the output buffer
NV_ENC_CREATE_BITSTREAM_BUFFER bitstreamBufferParams;
bitstreamBufferParams.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
bitstreamBufferParams.reserved=0;
memset(bitstreamBufferParams.reserved1,0,58*sizeof(uint32_t));
memset(bitstreamBufferParams.reserved2,NULL,64*sizeof(void*));
//This line is giving me the error, NV_ENC_ERR_UNIMPLEMENTED
status = nvencFunctionList.nvEncCreateBitstreamBuffer(encoder,&bitstreamBufferParams);
if(status!=NV_ENC_SUCCESS)
{
printf("Bitstream could not be created %i",status);
return EXIT_FAILURE;
}
/* CUresult cuResult = cuGetDevicePtr(&cudaDevicePtrs[i],600);
if(cuResult!=CUDA_SUCCESS)
{
printf("Could not allocate cuda device pointer, %u\n",cuResult);
return EXIT_FAILURE;
}*/
inputBuffers[i]=inputBufferParams.inputBuffer;
outputBuffers[i]=bitstreamBufferParams.bitstreamBuffer;
}
/*
* Resolve the 'NvFBCCreateInstance' symbol that will allow us to get
* the API function pointers.
*/
NvFBCCreateInstance_ptr =
(PNVFBCCREATEINSTANCE) dlsym(libNVFBC, "NvFBCCreateInstance");
if (NvFBCCreateInstance_ptr == NULL) {
fprintf(stderr, "Unable to resolve symbol 'NvFBCCreateInstance'\n");
return EXIT_FAILURE;
}
/*
* Create an NvFBC instance.
*
* API function pointers are accessible through pFn.
*/
memset(&pFn, 0, sizeof(pFn));
pFn.dwVersion = NVFBC_VERSION;
fbcStatus = NvFBCCreateInstance_ptr(&pFn);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "Unable to create NvFBC instance (status: %d)\n",
fbcStatus);
return EXIT_FAILURE;
}
/*
* Create a session handle that is used to identify the client.
*/
memset(&createHandleParams, 0, sizeof(createHandleParams));
createHandleParams.dwVersion = NVFBC_CREATE_HANDLE_PARAMS_VER;
fbcStatus = pFn.nvFBCCreateHandle(&fbcHandle, &createHandleParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
/*
* Get information about the state of the display driver.
*
* This call is optional but helps the application decide what it should
* do.
*/
memset(&statusParams, 0, sizeof(statusParams));
statusParams.dwVersion = NVFBC_GET_STATUS_PARAMS_VER;
fbcStatus = pFn.nvFBCGetStatus(fbcHandle, &statusParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
if (printStatusOnly) {
NvFBCUtilsPrintStatus(&statusParams);
return EXIT_SUCCESS;
}
if (statusParams.bCanCreateNow == NVFBC_FALSE) {
fprintf(stderr, "It is not possible to create a capture session "
"on this system.\n");
return EXIT_FAILURE;
}
if (trackingType == NVFBC_TRACKING_OUTPUT) {
if (!statusParams.bXRandRAvailable) {
fprintf(stderr, "The XRandR extension is not available.\n");
fprintf(stderr, "It is therefore not possible to track an RandR output.\n");
return EXIT_FAILURE;
}
outputId = NvFBCUtilsGetOutputId(statusParams.outputs,
statusParams.dwOutputNum,
outputName);
if (outputId == 0) {
fprintf(stderr, "RandR output '%s' not found.\n", outputName);
return EXIT_FAILURE;
}
}
/*
* Create a capture session.
*/
printf("Creating an asynchronous capture session of %u frames with 1 "
"second internal between captures.\n", nFrames);
memset(&createCaptureParams, 0, sizeof(createCaptureParams));
createCaptureParams.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER;
createCaptureParams.eCaptureType = NVFBC_CAPTURE_SHARED_CUDA;
createCaptureParams.bWithCursor = NVFBC_TRUE;
createCaptureParams.frameSize = frameSize;
createCaptureParams.eTrackingType = trackingType;
if (trackingType == NVFBC_TRACKING_OUTPUT) {
createCaptureParams.dwOutputId = outputId;
}
fbcStatus = pFn.nvFBCCreateCaptureSession(fbcHandle, &createCaptureParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
/*
* Set up the capture session.
*/
memset(&setupParams, 0, sizeof(setupParams));
setupParams.dwVersion = NVFBC_TOCUDA_SETUP_PARAMS_VER;
setupParams.eBufferFormat = bufferFormat;
fbcStatus = pFn.nvFBCToCudaSetUp(fbcHandle, &setupParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
/*
* We are now ready to start grabbing frames.
*/
fd = fopen(videoFile, "wb");
if (fd == NULL)
{
fprintf(stderr,"Unable to create '%s'\n", videoFile);
return EXIT_FAILURE;
}
//configuring the per frame encode parameters
NV_ENC_PIC_PARAMS encoderPicParams;
//Codec specific params
/* NV_ENC_CODEC_PIC_PARAMS h264Params;
h264Params.h264PicParams.reserved3=0;
h264Params.h264PicParams.reservedBitFields=0;
h264Params.h264PicParams.ltrUsageMode=0;
memset(h264Params.h264PicParams.reserved,0,242*sizeof(uint32_t));
memset(h264Params.h264PicParams.reserved2,NULL,61*sizeof(void*));*/
encoderPicParams.version = NV_ENC_PIC_PARAMS_VER;
//encoderPicParams.codecPicParams=h264Params;
encoderPicParams.inputWidth=(600+ 31) & ~31;
encoderPicParams.inputHeight=(600+ 31) & ~31;
encoderPicParams.inputPitch = (600+ 31) & ~31;
//encoderPicParams.encodePicFlags=NV_ENC_PIC_FLAG_FORCEIDR;
//encoderPicParams.bufferFmt=NV_ENC_BUFFER_FORMAT_IYUV;
encoderPicParams.pictureStruct=NV_ENC_PIC_STRUCT_FRAME;
for (i = 0; i < 60; i++) {
static CUdeviceptr cuDevicePtr;
static unsigned char *frame = NULL;
static uint32_t lastByteSize = 0;
//char filename[64];
int index=i;
if(i>=max_surfaces)
{
index=i%max_surfaces;
}
int res;
uint64_t t1, t2, t1_total, t2_total, t_delta, wait_time_ms;
CUresult cuRes;
NVFBC_TOCUDA_GRAB_FRAME_PARAMS grabParams;
NVFBC_FRAME_GRAB_INFO frameInfo;
t1 = NvFBCUtilsGetTimeInMillis();
t1_total = t1;
memset(&grabParams, 0, sizeof(grabParams));
memset(&frameInfo, 0, sizeof(frameInfo));
grabParams.dwVersion = NVFBC_TOCUDA_GRAB_FRAME_PARAMS_VER;
/*
* Use asynchronous calls.
*
* The application will not wait for a new frame to be ready. It will
* capture a frame that is already available. This might result in
* capturing several times the same frame. This can be detected by
* checking the frameInfo.bIsNewFrame structure member.
*/
grabParams.dwFlags = NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT;
/*
* This structure will contain information about the captured frame.
*/
grabParams.pFrameGrabInfo = &frameInfo;
/*
* The frame will be mapped in video memory through this CUDA
* device pointer.
*/
grabParams.pCUDADeviceBuffer = &cuDevicePtr;
/*
* Capture a frame.
*/
fbcStatus = pFn.nvFBCToCudaGrabFrame(fbcHandle, &grabParams);
if (fbcStatus != NVFBC_SUCCESS) {
fprintf(stderr, "%s\n", pFn.nvFBCGetLastErrorStr(fbcHandle));
return EXIT_FAILURE;
}
t2 = NvFBCUtilsGetTimeInMillis();
/*
* Allocate or re-allocate the destination buffer in system memory
* when necessary.
*
* This is to handle change of resolution.
*/
if (lastByteSize < frameInfo.dwByteSize) {
frame = (unsigned char *)realloc(frame, frameInfo.dwByteSize);
if (frame == NULL) {
fprintf(stderr, "Unable to allocate system memory\n");
return EXIT_FAILURE;
}
printf("Reallocated %u KB of system memory\n",
frameInfo.dwByteSize / 1024);
lastByteSize = frameInfo.dwByteSize;
}
printf("%s id %u grabbed in %llu ms",
(frameInfo.bIsNewFrame ? "New frame" : "Frame"),
frameInfo.dwCurrentFrame,
(unsigned long long) (t2 - t1));
/*
* Download frame from video memory to system memory.
*/
t1 = NvFBCUtilsGetTimeInMillis();
cuRes = cuMemcpyDtoH_v2_ptr((void *) frame, cuDevicePtr,
frameInfo.dwByteSize);
if (cuRes != CUDA_SUCCESS) {
fprintf(stderr, "CUDA memcpy failure (result: %d)\n", cuRes);
return EXIT_FAILURE;
}
//creating the params for the nv enc lock input buffer
//inputBufferParams.pSysMemBuffer=(void*) frame;
//status = nvencFunctionList.nvEncCreateInputBuffer(encoder,&inputBufferParams);
//setting up the input params
NV_ENC_REGISTER_RESOURCE registerResource;
registerResource.version=NV_ENC_REGISTER_RESOURCE_VER;
registerResource.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR;
registerResource.width = (600+31)&~31;
registerResource.height=(600+31)&~31;
registerResource.pitch=(600+31)&~31;
registerResource.resourceToRegister = (void*)cuDevicePtr;
registerResource.bufferFormat = NV_ENC_BUFFER_FORMAT_NV12;
status = nvencFunctionList.nvEncRegisterResource(encoder,®isterResource);
if(status!=NV_ENC_SUCCESS)
{
printf("Failed to register resource, %u",status);
return EXIT_FAILURE;
}
//mapping these resources
NV_ENC_MAP_INPUT_RESOURCE mappedResource;
mappedResource.version = NV_ENC_MAP_INPUT_RESOURCE_VER;
mappedResource.registeredResource=registerResource.registeredResource;
status = nvencFunctionList.nvEncMapInputResource(encoder,&mappedResource);
if(status!=NV_ENC_SUCCESS)
{
printf("Could not map resource: %u,",status);
return EXIT_FAILURE;
}
encoderPicParams.outputBitstream=outputBuffers[index];
encoderPicParams.inputBuffer=mappedResource.mappedResource;
encoderPicParams.bufferFmt=mappedResource.mappedBufferFmt;
/* memset(encoderPicParams.reserved1,0,6*sizeof(uint32_t));
memset(encoderPicParams.reserved2,NULL,2*sizeof(void*));
memset(encoderPicParams.reserved3,0,286*sizeof(uint32_t));
memset(encoderPicParams.reserved4,0,60*sizeof(void*));*/
//sending the picture to be encoded
status=nvencFunctionList.nvEncEncodePicture(encoder,&encoderPicParams);
if(status!=NV_ENC_SUCCESS)
{
printf("\n Unable to encode video, Error code: %u",status);
return EXIT_FAILURE;
}
//setting up the lock bitstream params
//sending the data to the output stream
NV_ENC_LOCK_BITSTREAM lockBitstreamParams;
lockBitstreamParams.version=NV_ENC_LOCK_BITSTREAM_VER;
lockBitstreamParams.doNotWait=0;
lockBitstreamParams.outputBitstream=outputBuffers[index];
lockBitstreamParams.reservedBitFields=0;
memset(lockBitstreamParams.reserved,0,219*sizeof(uint32_t));
memset(lockBitstreamParams.reserved1,0,13*sizeof(uint32_t));
memset(lockBitstreamParams.reserved2,NULL,64*sizeof(uint32_t));
status = nvencFunctionList.nvEncLockBitstream(encoder,&lockBitstreamParams);
if(status!=NV_ENC_SUCCESS)
{
printf("error locking bitstream");
return EXIT_FAILURE;
}
//writing the video data to the file
int bytes = fwrite(lockBitstreamParams.bitstreamBufferPtr,1,lockBitstreamParams.bitstreamSizeInBytes ,fd);
printf("\nNumber of bytes: %u\n",bytes);
if(bytes==0)
{
fprintf(stderr, "Unable to write to '%s'\n", videoFile);
return EXIT_FAILURE;
}
status = nvencFunctionList.nvEncUnlockBitstream(encoder,outputBuffers[index]);
status = nvencFunctionList.nvEncUnmapInputResource(encoder,mappedResource.mappedResource);
if(status!=NV_ENC_SUCCESS)
{
printf("failure to map resource, %u",status);
return EXIT_FAILURE;
}