我在Mac OS 10.9.5上使用OpenCV2.4.8.2。 我有以下代码片段:
static void compute_weights(const vector<Mat>& images, vector<Mat>& weights)
{
weights.clear();
for (int i = 0; i < images.size(); i++) {
Mat image = images[i];
Mat mask = Mat::zeros(image.size(), CV_32F);
int x_start = (i == 0) ? 0 : image.cols/2;
int y_start = 0;
int width = image.cols/2;
int height = image.rows;
Mat roi = mask(Rect(x_start,y_start,width,height)); // Set Roi
roi.setTo(1);
weights.push_back(mask);
}
}
static void blend(const vector<Mat>& inputImages, Mat& outputImage)
{
int maxPyrIndex = 6;
vector<Mat> weights;
compute_weights(inputImages, weights);
// Find the fused pyramid:
vector<Mat> fused_pyramid;
for (int i = 0; i < inputImages.size(); i++) {
Mat image = inputImages[i];
// Build Gaussian Pyramid for Weights
vector<Mat> weight_gaussian_pyramid;
buildPyramid(weights[i], weight_gaussian_pyramid, maxPyrIndex);
// Build Laplacian Pyramid for original image
Mat float_image;
inputImages[i].convertTo(float_image, CV_32FC3, 1.0/255.0);
vector<Mat> orig_guassian_pyramid;
vector<Mat> orig_laplacian_pyramid;
buildPyramid(float_image, orig_guassian_pyramid, maxPyrIndex);
for (int j = 0; j < orig_guassian_pyramid.size() - 1; j++) {
Mat sized_up;
pyrUp(orig_guassian_pyramid[j+1], sized_up, Size(orig_guassian_pyramid[j].cols, orig_guassian_pyramid[j].rows));
orig_laplacian_pyramid.push_back(orig_guassian_pyramid[j] - sized_up);
}
// Last Lapalcian layer is the same as the Gaussian layer
orig_laplacian_pyramid.push_back(orig_guassian_pyramid[orig_guassian_pyramid.size()-1]);
// Convolve laplacian original with guassian weights
vector<Mat> convolved;
for (int j = 0; j < maxPyrIndex + 1; j++) {
// Create 3 channels for weight gaussian pyramid as well
vector<Mat> gaussian_3d_vec;
for (int k = 0; k < 3; k++) {
gaussian_3d_vec.push_back(weight_gaussian_pyramid[j]);
}
Mat gaussian_3d;
merge(gaussian_3d_vec, gaussian_3d);
//Mat convolved_result = weight_gaussian_pyramid[j].clone();
Mat convolved_result = gaussian_3d.clone();
multiply(gaussian_3d, orig_laplacian_pyramid[j], convolved_result);
convolved.push_back(convolved_result);
}
if (i == 0) {
fused_pyramid = convolved;
} else {
for (int j = 0; j < maxPyrIndex + 1; j++) {
fused_pyramid[j] += convolved[j];
}
}
}
// Blending
for (int i = (int)fused_pyramid.size()-1; i > 0; i--) {
Mat sized_up;
pyrUp(fused_pyramid[i], sized_up, Size(fused_pyramid[i-1].cols, fused_pyramid[i-1].rows));
fused_pyramid[i-1] += sized_up;
}
Mat final_color_bgr;
fused_pyramid[0].convertTo(final_color_bgr, CV_32F, 255);
final_color_bgr.copyTo(outputImage);
imshow("final", outputImage);
waitKey(0);
imwrite(outputImagePath, outputImage);
}
此代码正在为2个图像进行一些基本的金字塔混合。关键问题与最后一行中的imshow和imwrite有关。他们给了我截然不同的结果。我为显示如此冗长/凌乱的代码而道歉,但我担心这种差异来自代码的其他部分,这些部分随后会影响imshow和imwrite。
第一张图片显示了imwrite的结果,第二张图片显示了imshow的结果,基于给出的代码。我很困惑为什么会这样。
当我这样做时,我也注意到了:
Mat float_image;
inputImages[i].convertTo(float_image, CV_32FC3, 1.0/255.0);
imshow("float image", float_image);
imshow("orig image", image);
它们显示完全相同的东西,即它们都在原始rgb图像中显示相同的图像(在图像中)。
答案 0 :(得分:4)
IMWRITE功能
默认情况下, imwrite ,将输入图像转换为仅8位(如果是PNG,JPEG 2000和TIFF,则为16位无符号(CV_16U))单通道或使用此功能保存3通道(带'BGR'通道顺序)图像。 因此,无论您为imwrite提供什么格式,它都会盲目地转换为CV_8U,范围为0(黑色) - 255(白色),采用BGR格式。
IMSHOW - 问题
因此,当注意到您的函数时,fused_pyramid[0].convertTo(final_color_bgr, CV_32F, 255);
fused_pyramid已经在mat类型21(浮点CV_32F)下。您尝试使用比例因子255转换为浮点。此缩放因子255导致问题@ imshow。相反可视化,你可以直接输入fused_pyramid而不进行转换,因为它已被缩放到0.0(黑色) - 1.0(白色)之间的浮点。
希望它有所帮助。