如何减少以下代码的执行时间?

时间:2015-03-11 04:18:07

标签: c++ performance opencv image-processing sketching

我试图给一个图像提供一些草图效果,因为我在opencv中使用了高斯建模技术,但是我面临一个问题,即需要更多的时间来执行。如果图片的尺寸较小,则时间会减少,如果尺寸较大则需要更多时间。请任何人告诉如何减少执行时间而不改变以下代码的图像的实际大小

#include "opencv2/opencv.hpp"
#include <iostream>
#include <vector>
#include "opencv2/ml/ml.hpp"
#include <list>
#include <iostream>
using namespace cv;
using namespace std;

void clustrize_colors(Mat& src,Mat& dst)
{
	// Number of clusters
	int NrGMMComponents = 96;

	cv::GaussianBlur(src,src,Size(3,3),1);

	int srcHeight = src.rows;
	int srcWidth  = src.cols;

	// Get datapoints
	vector<Vec3d> ListSamplePoints;

	for (int y=0; y<srcHeight; y++)
	{
		for (int x=0; x<srcWidth; x++)
		{
			// Collecting points from image
			Vec3b bgrPixel = src.at<Vec3b>(y, x);

			uchar b = bgrPixel.val[0];
			uchar g = bgrPixel.val[1];
			uchar r = bgrPixel.val[2];
			if(rand()%25==0) // peek every 25-th
			{
				ListSamplePoints.push_back(Vec3d(b,g,r));
			}
		} // for (x)
	} // for (y)


	// Form training matrix
	int NrSamples = ListSamplePoints.size();    
	Mat samples( NrSamples, 3, CV_64FC1 );

	for (int s=0; s<NrSamples; s++)
	{
		Vec3d v = ListSamplePoints.at(s);
		samples.at<double>(s,0) = (float) v[0];
		samples.at<double>(s,1) = (float) v[1];
		samples.at<double>(s,2) = (float) v[2];
	}    
	// 
	cout << "Learning to represent the sample distributions with " << NrGMMComponents << " gaussians." << endl;
	cout << "Started GMM training" << endl;

	Ptr<cv::ml::EM> em_model;
	cv::ml::EM::Params params(NrGMMComponents,cv::ml::EM::COV_MAT_GENERIC);

	Mat labels(NrSamples,1,CV_32SC1);
	Mat logLikelihoods( NrSamples, 1, CV_64FC1 );

	// Train classifier
	em_model=cv::ml::EM::train(samples,logLikelihoods,labels,noArray(),params);
	cout << "Finished GMM training" << endl;

	// result image
	Mat img  = Mat::zeros( Size( srcWidth, srcHeight ), CV_8UC3 );

	// predict cluster
	Mat sample( 1, 3, CV_64FC1 );

	Mat means=em_model->getMeans();

	for(int i = 0; i < img.rows; i++ )
	{
		for(int j = 0; j < img.cols; j++ )
		{
			Vec3b v=src.at<Vec3b>(i,j);
			sample.at<double>(0,0) = (float) v[0];
			sample.at<double>(0,1) = (float) v[1];
			sample.at<double>(0,2) = (float) v[2];
			int response = cvRound(em_model->predict( sample ));
			img.at<Vec3b>(i,j)[0]=means.at<double>(response,0);
			img.at<Vec3b>(i,j)[1]=means.at<double>(response,1);
			img.at<Vec3b>(i,j)[2]=means.at<double>(response,2);
		}
	}

	img.convertTo(img,CV_8UC3);
        namedWindow("result",WINDOW_AUTOSIZE);
	imshow("result",img);
        imwrite("D:\\nfr.jpg",img);
	waitKey();
	dst=img;
}

void processLayer(Mat& src,Mat& dst)
{
	Mat tmp=src.clone();
	Mat gx,gy,mag,blurred;
	Sobel( src, gx, -1, 1, 0, 3);
	Sobel( src, gy, -1, 0, 1, 3);
	magnitude(gx,gy,mag);
	//GaussianBlur(mag,blurred,Size(3,3),2);
	//mag+=blurred;
	normalize(mag,mag,0,1,cv::NORM_MINMAX);
	//sqrt(mag,dst);
	dst=mag.clone();
	normalize(dst,dst,0,1,cv::NORM_MINMAX);
}

int main(int ac, char** av)
{
	Mat clusterized;
	Mat frame=imread("image path"); ////load an image//////
        //resize(frame,frame,Size(256,256),0,0,INTER_LINEAR);
	clustrize_colors(frame,clusterized);
	clusterized.convertTo(clusterized,CV_32FC3,1.0/255.0);
	frame.convertTo(frame,CV_32FC3,1.0/255.0);
	Mat result1;
	vector<Mat> ch;
	split(frame, ch);

	processLayer(ch[0],ch[0]);
	processLayer(ch[1],ch[1]);
	processLayer(ch[2],ch[2]);

	merge(ch,result1);

	result1=(0.5*frame-0.9*result1+0.3*clusterized)*2.0;
        namedWindow("result1",WINDOW_AUTOSIZE);
	imshow("result1",result1);
        //cout<<result1;
        imwrite("D:\\finalresult.jpg",result1);
	waitKey(0);
	//destroyAllWindows();
	return 0;
}

1 个答案:

答案 0 :(得分:0)

瓶颈很可能是opencv的cv :: ml :: EM :: train方法。训练分类器并不是一件容易或简单的事。分类问题尚未得到最终解决。这就是算法之间存在大量权衡和差异的原因,更不用说跨越不同的问题空间了。

至于性能,如果您坚持使用EM,请检查EM class documentation及其可能的父类以进行修改:

  • 培训的最大迭代次数和/或
  • 停止培训的期限标准。

由于使用了第三方库,您无法做到这一点会提高速度但不会牺牲准确性。另一方面,该库是开源的,它可能相当好地优化。我不建议尝试优化实际的库代码。