将无符号字符*转换为用于cvShowImage OpenCV的FlyCapture2图像

时间:2012-06-01 00:17:27

标签: c++ c opencv flycapture

我正在使用三台ptgrey相机来获取图像并将其保存到我的硬盘中。 我正在使用MultiCameraWriteToDiskEx示例,它工作得很好,但我也希望在采集过程中显示图像,所以我试图将FlyCapture图像转换为可重复的OpenCV格式,以便用cvShowImage()或imshow()函数。

我有一个函数IplImage* ConvertImageToOpenCV(Image* pImage)可以将Flycapture2 Image *转换为OpenCV IplImage *但是我不知道如何在 doGrabLoop()捕捉相机帧的功能。

你能帮助我吗? 我对C / C ++不太自信:( 特别是,我不知道如何转换g_arImageplus [uiCamera] .image.pData以将其传递给ConvertImageToOpenCV()。 (使用g_arImageplus [uiCamera] .image.pData是否正确?)

IplImage* ConvertImageToOpenCV(Image* pImage)
{
    IplImage* cvImage = NULL;
    bool bColor = true;
    CvSize mySize;
    mySize.height = pImage->GetRows();
    mySize.width = pImage->GetCols();
    printf("ciao %d\n",  pImage->GetPixelFormat() );
    switch ( pImage->GetPixelFormat() )
    {
        case PIXEL_FORMAT_MONO8:     cvImage = cvCreateImageHeader(mySize, 8, 1 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 1;
                                     bColor = false;
                                     break;
        case PIXEL_FORMAT_411YUV8:   cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_422YUV8:   cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_444YUV8:   cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_RGB8:      cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_MONO16:    cvImage = cvCreateImageHeader(mySize, 16, 1 );
                                     cvImage->depth = IPL_DEPTH_16U;
                                     cvImage->nChannels = 1;
                                     bColor = false;
                                     break;
        case PIXEL_FORMAT_RGB16:     cvImage = cvCreateImageHeader(mySize, 16, 3 );
                                     cvImage->depth = IPL_DEPTH_16U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_S_MONO16:  cvImage = cvCreateImageHeader(mySize, 16, 1 );
                                     cvImage->depth = IPL_DEPTH_16U;
                                     cvImage->nChannels = 1;
                                     bColor = false;
                                     break;
        case PIXEL_FORMAT_S_RGB16:   cvImage = cvCreateImageHeader(mySize, 16, 3 );
                                     cvImage->depth = IPL_DEPTH_16U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_RAW8:      cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_RAW16:     cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_MONO12:    printf("Not supported by OpenCV");
                                     bColor = false;
                                     break;
        case PIXEL_FORMAT_RAW12:     printf("Not supported by OpenCV");
                                     break;
        case PIXEL_FORMAT_BGR:       cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_BGRU:      cvImage = cvCreateImageHeader(mySize, 8, 4 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 4;
                                     break;
        case PIXEL_FORMAT_RGBU:      cvImage = cvCreateImageHeader(mySize, 8, 4 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 4;
                                     break;
        default: printf("Some error occured...\n");
                 return NULL;
    }

    if(bColor) {
        if(!bInitialized)
        {
            colorImage.SetData(new unsigned char[pImage->GetCols() * pImage->GetRows()*3], pImage->GetCols() * pImage->GetRows()*3);
            bInitialized = true;
        }

        pImage->Convert(PIXEL_FORMAT_BGR, &colorImage); //needs to be as BGR to be saved

        cvImage->width = colorImage.GetCols();
        cvImage->height = colorImage.GetRows();
        cvImage->widthStep = colorImage.GetStride();

        cvImage->origin = 0; //interleaved color channels

        cvImage->imageDataOrigin = (char*)colorImage.GetData(); //DataOrigin and Data same pointer, no ROI
        cvImage->imageData         = (char*)(colorImage.GetData());
        cvImage->widthStep      = colorImage.GetStride();
        cvImage->nSize = sizeof (IplImage);
        cvImage->imageSize = cvImage->height * cvImage->widthStep;
    }
    else
    {
        cvImage->imageDataOrigin = (char*)(pImage->GetData());
        cvImage->imageData         = (char*)(pImage->GetData());
        cvImage->widthStep         = pImage->GetStride();
        cvImage->nSize             = sizeof (IplImage);
        cvImage->imageSize         = cvImage->height * cvImage->widthStep;

        //at this point cvImage contains a valid IplImage
     }
    return cvImage;
}

//
// Grab and test loop
//
int doGrabLoop()
{
   FlyCaptureError   error = FLYCAPTURE_FAILED;
   unsigned int      aruiPrevSeqNum[ _MAX_CAMERAS ];
   unsigned int      aruiDelta[ _MAX_CAMERAS ];
   unsigned int      aruiCycles[ _MAX_CAMERAS ];
   HANDLE        arhFile[ _MAX_CAMERAS ];
   DWORD         ardwBytesWritten[ _MAX_CAMERAS ];
   DWORD         dwTotalKiloBytesWritten = 0;
   bool          bMissed = false;
   bool          bOutOfSync = false;
   unsigned int      uiMissedImages = 0;
   unsigned int      uiOutOfSyncImages = 0;
   __int64       nStartTime = 0;
   __int64       nEndTime = 0;
   __int64       nDifference = 0;
   __int64       nTotalTime = 0;
   __int64       nGlobalStartTime = 0;
   __int64       nGlobalEndTime = 0;
   __int64       nGlobalTotalTime = 0;
   __int64       nFrequency = 0;

   QueryPerformanceFrequency( (LARGE_INTEGER*)&nFrequency );
   QueryPerformanceCounter( (LARGE_INTEGER*)&nGlobalStartTime );

   printf( "Starting grab...\n" );

   // Create files to write to
   if ( createFiles( arhFile ) != 0 )
   {
      printf( "There was error creating the files\n" );
      return -1;
   }  

   BOOL bSuccess;

   //
   // Start grabbing the images
   //

   for( int iImage = 0; iImage < g_iNumImagesToGrab; iImage++ )
   {
#ifdef _VERBOSE
      printf( "Grabbing image %u\n", iImage );
#else
      printf( "." );
#endif

      unsigned int uiCamera = 0;

      // Grab an image from each camera
      for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
      {
     error = flycaptureLockNext( g_arContext[uiCamera], &g_arImageplus[uiCamera] );
         _HANDLE_ERROR( error, "flycaptureLockNext()" );

     // Save image dimensions & bayer info from first image for each camera
     if(iImage == 0)
     {
        g_arImageTemplate[uiCamera] = g_arImageplus[uiCamera].image;

        error = flycaptureGetColorTileFormat(g_arContext[uiCamera], &g_arBayerTile[uiCamera]);
        _HANDLE_ERROR( error, "flycaptureGetColorTileFormat()" );
     }
      }

      for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
      {      
     // Start timer
     QueryPerformanceCounter( (LARGE_INTEGER*)&nStartTime );

     // Calculate the size of the image to be written
     int iImageSize = 0;     
     int iRowInc = g_arImageplus[uiCamera].image.iRowInc;
     int iRows = g_arImageplus[uiCamera].image.iRows;
     iImageSize = iRowInc * iRows;
 // ERROR: HOW CAN I CONVERT g_arImageplus[ uiCamera ].image.pData IN ORDER TO USE IT WITH ConvertImageToOpenCV() function?
     IplImage* destImage = ConvertImageToOpenCV(g_arImageplus[ uiCamera ].image.pData);

        cvShowImage("prova", destImage);


     waitKey(1);

     // Write to the file
     bSuccess = WriteFile(
        arhFile[uiCamera], 
        g_arImageplus[ uiCamera ].image.pData, 
        iImageSize,
        &ardwBytesWritten[uiCamera], 
        NULL ); 

     // End timer
     QueryPerformanceCounter( (LARGE_INTEGER*)&nEndTime );  

     // Ensure that the write was successful
     if ( !bSuccess || ( ardwBytesWritten[uiCamera] != (unsigned)iImageSize ) ) 
     {
        printf( "Error writing to file for camera %u!\n", uiCamera );
        return -1;
     }

     // Update various counters
     dwTotalKiloBytesWritten += (ardwBytesWritten[uiCamera] / 1024);
     nDifference = nEndTime - nStartTime;
     nTotalTime += nDifference;

     // Keep track of the difference in image sequence numbers (uiSeqNum)
     // in order to determine if any images have been missed.  A difference
     // greater than 1 indicates that an image has been missed.
         if( iImage == 0 )
         {
        // This is the first image, set up the variables
            aruiPrevSeqNum[uiCamera] = g_arImageplus[uiCamera].uiSeqNum;
            aruiDelta[uiCamera] = 1;
         }
         else
         {
        // Get the difference in sequence numbers between the current
        // image and the last image we received
            aruiDelta[uiCamera] = 
               g_arImageplus[uiCamera].uiSeqNum - aruiPrevSeqNum[uiCamera];
         }         

         if( aruiDelta[uiCamera] != 1 )
         {
            // We have missed an image.
        bMissed = true;
            uiMissedImages += aruiDelta[uiCamera] - 1;
         }
     else
     {
        bMissed = false;
     }

         aruiPrevSeqNum[uiCamera] = g_arImageplus[uiCamera].uiSeqNum;

     // Calculate the cycle count for the camera
         aruiCycles[uiCamera] = 
            g_arImageplus[uiCamera].image.timeStamp.ulCycleSeconds * 8000 +
            g_arImageplus[uiCamera].image.timeStamp.ulCycleCount;

     // Determine the difference of the timestamp for every image from the
     // first camera.  If the difference is greater than 1 cycle count, 
     // register the camera as being out of synchronization.
     int iDeltaFrom0 = abs( (int)(aruiCycles[uiCamera] - aruiCycles[0]) );

         if( ( iDeltaFrom0 % ( 128 * 8000 - 1 ) ) > 1 )
         {
        bOutOfSync = true;
        uiOutOfSyncImages++;
     }
     else
     {
        bOutOfSync = false;
     }

#ifdef _VERBOSE
     // Output is in the following order:
     // - The index of the image being captured
     // - The index of the camera that is currently being captured
     // - The time taken to write the image to disk
     // - Number of kilobytes written
     // - Write speed (in MB/s)
     // - Sequence number
     // - Cycle seconds in timestamp
     // - Cycle count in timestamp
     // - Delta from 0th value
     // - Missed an image?
     // - Out of sync?
     printf( 
        "%04d: \t%02u\t%0.5f\t%.0lf\t%.2lf\t%04u\t%03u.%04u\t%d\t%s %s\n",
        iImage,
        uiCamera,
        nDifference,
        (double)ardwBytesWritten[ uiCamera ] / 1024.0,
        (double)ardwBytesWritten[ uiCamera ] / ( 1024 * 1024 * nDifference ),
        g_arImageplus[ uiCamera ].uiSeqNum,
        g_arImageplus[ uiCamera ].image.timeStamp.ulCycleSeconds,
            g_arImageplus[ uiCamera ].image.timeStamp.ulCycleCount,
        iDeltaFrom0,
        bMissed ? "Y" : "N",
        bOutOfSync ? "Y" : "N");
#endif
      }

      // Unlock image, handing the buffer back to the buffer pool.
      for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
      {
         error = flycaptureUnlock( 
            g_arContext[uiCamera], g_arImageplus[uiCamera].uiBufferIndex );
         _HANDLE_ERROR( error, "flycaptureUnlock()" );
      }
   }

   //
   // Done grabbing images
   //

   QueryPerformanceCounter( (LARGE_INTEGER*)&nGlobalEndTime );
   nGlobalTotalTime = nGlobalEndTime - nGlobalStartTime;

   double dGlobalTotalTime = (double)nGlobalTotalTime / (double)nFrequency;
   double dTotalTime = (double)nTotalTime / (double)nFrequency;

   // Report on the results
   // Burst time is the time that was spent writing to disk only
   // Overall time is total time taken, including image grabs, calculations etc
   printf( 
      "\nBurst: Wrote %.1lfMB in %0.2fs ( %.2lfMB/sec )\n", 
      (double)( dwTotalKiloBytesWritten / 1024 ), 
      dTotalTime,
      (double)( dwTotalKiloBytesWritten / ( 1024 * dTotalTime ) ) );

   printf(
      "Overall: Wrote %.1lfMB in %0.2fs ( %.2lfMB/sec )\n",
      (double)( dwTotalKiloBytesWritten / 1024 ), 
      dGlobalTotalTime,
      (double)( dwTotalKiloBytesWritten / ( 1024 * dGlobalTotalTime ) ) );  

   printf( g_bSyncSuccess ? "Sync success\n" : "Sync failed\n" );
   printf( "Missed images = %u.\n", uiMissedImages );
   printf( "Out of sync images = %u.\n", uiOutOfSyncImages );

   for ( unsigned int uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
   {
      // Close file handles
      CloseHandle(arhFile[uiCamera]);
   }

   return 0;
}

1 个答案:

答案 0 :(得分:1)

您的转换方式:

IplImage* ConvertImageToOpenCV(Image* pImage)

采用图像指针。构造一个新图像,从您拥有的数据传递给它。 Here's the doc for the Image class.如果您使用此功能,它将不会保留缓冲区或复制它:

Image   (   unsigned int    rows,
            unsigned int    cols,
            unsigned int    stride,
            unsigned char*  pData,
            unsigned int    dataSize,
            PixelFormat     format,
            BayerTileFormat bayerFormat = NONE 
)

所以你的代码看起来像:

// Pull these values from your other image structure
Image image(rows, cols, stride, pData, dataSize, format, bayerFormat);

// Pass the address of the temporary image to your conversion
IplImage* opencvImage = ConvertImageToOpenCV(&image);