我已经为dft做了一些预处理,我试图通过imwrite保存这个图像。
我的裁剪图片有此信息
output.type() 5
output.channels() 1
output.depth() 5
但每当我保存它时会产生黑色输出。我已经检查了堆栈流的旧现有线程,但似乎都不适合我。 例如 OpenCV2.3 imwrite saves black image
我也尝试了很多颜色转换和深度转换,但我不知道它为什么不起作用。
std::vector<int> qualityType;
qualityType.push_back(CV_IMWRITE_JPEG_QUALITY);
qualityType.push_back(90);
Mat out1,out2;
cv::cvtColor(output, out1, CV_GRAY2BGR);
//out1.convertTo(out2,CV_8U,1./256); // i tried this too
cv::imwrite("dft1.jpg",out1,qualityType); // tried even using quality type
imshow显示此图像很好,只有当我保存它时会出现问题。
请帮助
[编辑]也许我制作的dft类有问题,因为每当我使用dft函数时输出只能用于inshow但是为了保存它不起作用。
CDftRidgeAnalyses::CDftRidgeAnalyses(void)
{
}
CDftRidgeAnalyses::~CDftRidgeAnalyses(void)
{
}
Mat CDftRidgeAnalyses::GetRidgeAnalyses(Mat inpGray)
{
Mat img = inpGray;
int WidthPadded=0,HeightPadded=0;
WidthPadded=img.cols*2;
HeightPadded=img.rows*2;
int M = getOptimalDFTSize( img.rows );
//Create a Gaussian Highpass filter 5% the height of the Fourier transform
double db = 0.05 * HeightPadded;
Mat fft = ForierTransform(img.clone(),HeightPadded,WidthPadded);
Mat ghpf = CreateGaussianHighPassFilter(Size(WidthPadded, HeightPadded), db);
Mat res;
cv::mulSpectrums(fft,ghpf,res,DFT_COMPLEX_OUTPUT);
Mat mag = GetDftToImage(res,img);
int cx = mag.cols/2;
int cy = mag.rows/2;
cv::Mat croped = mag(cv::Rect(0,0,cx, cy));
cv::threshold(mag, mag, 0.019, 1, cv::THRESH_BINARY);
Mat bgr;
cvtColor(mag,bgr,CV_GRAY2RGB);
//imshow("XXX",bgr);
//imshow("croped", croped);
//imshow("img",img);
//
//cv::waitKey();
return croped;
}
Mat CDftRidgeAnalyses::ForierTransform(Mat inpGray,int M,int N)
{
Mat img = inpGray;
int i = img.channels();
Mat padded;
Mat img2;
img.convertTo(img2,CV_64F,1./255);
copyMakeBorder(img2, padded, 0, M - img2.rows, 0, N - img2.cols, BORDER_CONSTANT, Scalar::all(0));
Mat element1 = Mat_<float>(padded);
Mat element2 = Mat::zeros(padded.size(), CV_32F);
Mat planes[] = {element1, element2};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg ,0, img.rows);
//printMat(complexImg);
return complexImg;
}
double CDftRidgeAnalyses::pixelDistance(double u, double v)
{
return cv::sqrt(u*u + v*v);
}
double CDftRidgeAnalyses::gaussianCoeff(double u, double v, double d0)
{
double d = pixelDistance(u, v);
return 1.0 - cv::exp((-d*d) / (2*d0*d0));
}
cv::Mat CDftRidgeAnalyses::CreateGaussianHighPassFilter(cv::Size size, double cutoffInPixels)
{
Mat ghpf(size, CV_32F);
cv::Point center2((size.width*0.80), size.width/2);
//cv::Point center2(0,0);
for(int u = 0; u < ghpf.rows; u++)
{
for(int v = 0; v < ghpf.cols; v++)
{
ghpf.at<float>(u, v) = gaussianCoeff(u - center2.x, v - center2.y, cutoffInPixels);
}
}
Mat bmp;
int channels = ghpf.channels();
int type = ghpf.type();
int depth = ghpf.depth();
cv::cvtColor(ghpf,bmp,CV_GRAY2RGB);
cv::cvtColor(ghpf,bmp,CV_GRAY2BGRA);
//imshow("XXX",bmp);
int cx = ghpf.cols/2;
int cy = ghpf.rows/2;
Mat tmp;
int iExactright = (size.width*0.59);
int iExactbottom = (size.height*0.86);
//full Mat q0(ghpf, Rect(69,10,400,290));
// Mat whiteq(ghpf, Rect(0,390,270,330));
int iMainleft=0, iMainright=0;
int iMainBottom=0,iMainTop=0;
Mat Quad;
Mat ql(ghpf, Rect(190,0,270,330));
/** Make the rectangle on middle default filter with respect to top right angle**/
iMainleft=(size.width*0.111);
iMainright=(size.width*0.402);
iMainTop=(size.height*0.484);
iMainBottom = (size.height*0.155);
Quad = ghpf(Rect(iMainleft,iMainTop,iMainright+6,iMainBottom));
Mat qTopRight(ghpf, Rect(iExactright,0, iMainright+6, iMainBottom));
Quad.copyTo(qTopRight);
/** Make the rectangle on middle default filter with respect to top left angle**/
iMainright=(size.width*0.402);
Quad = ghpf(Rect(300,iMainTop,300,iMainBottom));
Mat qTopLeft(ghpf, Rect(0,0, 300, iMainBottom));
Quad.copyTo(qTopLeft);
/** Make the rectangle on middle default filter with respect to bottom left angle**/
iMainTop = iMainTop-iMainBottom;
iExactbottom = size.height - iMainBottom;
Quad = ghpf(Rect(300,iMainTop,300,iMainBottom));
Mat qBottomLeft(ghpf, Rect(0,iExactbottom, 300, iMainBottom));
Quad.copyTo(qBottomLeft);
/** Make the rectangle on middle default filter with respect to bottom right angle**/
iMainleft=(size.width*0.111);
iMainright=(size.width*0.402);
Quad = ghpf(Rect(iMainleft,iMainTop,iMainright+6,iMainBottom));
Mat qBottomRight(ghpf, Rect(iExactright,iExactbottom, iMainright+6, iMainBottom));
Quad.copyTo(qBottomRight);
// remove middle rectangle [ circle ]
iMainright=(size.width*0.402);
Quad = ghpf(Rect(0,iMainTop+iMainTop,size.width,iMainBottom+iMainBottom-130));
Mat qMiddle(ghpf,Rect(0,iMainTop+150,size.width,iMainBottom+iMainBottom-130));
Quad.copyTo(qMiddle);
qMiddle =ghpf(Rect(0,iMainTop-10,size.width,iMainBottom+iMainBottom-130));
Quad.copyTo(qMiddle);
normalize(ghpf, ghpf, 0, 1, CV_MINMAX);
/*Mat x;
cv::resize(ghpf,x,cv::Size(400,700));
imshow("fftXhighpass2", x);*/
Filter = ghpf;
Mat padded;
copyMakeBorder(ghpf, padded, 0, size.height - ghpf.rows, 0, size.width - ghpf.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
return complexImg;
}
Mat CDftRidgeAnalyses::GetDftToImage(Mat res,Mat orgImage)
{
idft(res,res,DFT_COMPLEX_OUTPUT,orgImage.rows);
Mat padded;
copyMakeBorder(orgImage, padded, 0,orgImage.rows, 0, orgImage.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
split(res, planes);
magnitude(planes[0], planes[1], planes[0]);
Mat mag = planes[0];
mag += Scalar::all(1);
// log(mag, mag);
// crop the spectrum, if it has an odd number of rows or columns
mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));
normalize(mag, mag, 1, 0, CV_MINMAX);
return mag;
}
我想保存的输出来自
Mat org = imread("4.png",CV_LOAD_IMAGE_GRAYSCALE);
Mat re;
resize(org,re,cv::Size(311,519));
Mat xyz = CDftRidgeAnalyses::GetRidgeAnalyses(re);
cv::imwrite("dft1.jpg",xyz);
这里矩阵xyz具有这些值
output.type() 5
output.channels() 1
output.depth() 5
我希望你们现在可以更好地帮助我......也许在从复杂滤波器转换后我会失去一些分数????
答案 0 :(得分:14)
imwrite
以0到255的比例打印,但您的图像的比例为0比1。要向上扩展,请使用以下行:
image.convertTo(image, CV_8UC3, 255.0);
答案 1 :(得分:6)
这感觉就像浮点数和整数的问题。当您的图像具有浮点值时,opencv的imshow()期望这些值介于0和1之间:
http://opencv.itseez.com/modules/highgui/doc/user_interface.html?highlight=imshow#cv2.imshow
我不太清楚imwrite()对浮点图像的作用,因为我在这里看不懂:
无论如何,imwrite可能期望0到255之间的整数值,并且可能只是将浮点数转换为整数。在这种情况下,几乎所有东西都被铸造成0(例如,0.8被铸造成0),因此你的黑色图像。
尝试将图片转换为CV_U8CX。或者,这是我用来调试这些opencv问题的东西:
void printType(Mat &mat) {
if(mat.depth() == CV_8U) printf("unsigned char(%d)", mat.channels());
else if(mat.depth() == CV_8S) printf("signed char(%d)", mat.channels());
else if(mat.depth() == CV_16U) printf("unsigned short(%d)", mat.channels());
else if(mat.depth() == CV_16S) printf("signed short(%d)", mat.channels());
else if(mat.depth() == CV_32S) printf("signed int(%d)", mat.channels());
else if(mat.depth() == CV_32F) printf("float(%d)", mat.channels());
else if(mat.depth() == CV_64F) printf("double(%d)", mat.channels());
else printf("unknown(%d)", mat.channels());
}
void printInfo(const char *prefix, Mat &mat) {
printf("%s: ", prefix);
printf("dim(%d, %d)", mat.rows, mat.cols);
printType(mat);
printf("\n");
}
void printInfo(Mat &mat) {
printf("dim(%d, %d)", mat.rows, mat.cols);
printType(mat);
printf("\n");
}
通过这种方式,您可以了解您的cv :: Mat在其数据字段中的含义。
PS:我没有彻底调试你的代码,所以要对其他问题原因保持开放。
答案 2 :(得分:3)
针对来自Google的人的 Python解决方案
import numpy as np
import cv2
frame_normed = 255 * (frame - frame.min()) / (frame.max() - frame.min())
frame_normed = np.array(frame_normed, np.int)
cv2.imwrite("path/to/out/file", frame_normed)