我正在使用ImageJ实现混合图像,并坚持合并低滤镜图像和高滤镜图像以形成混合图像。
这就是我已经完成的。我有2张来自Gaussian Blur和Laplician of Gaussian filer的图像。之后我需要逐层合并这两个图像。知道如何实现吗?
import ij.*;
import ij.process.*;
import ij.gui.*;
import java.awt.*;
import ij.plugin.filter.*;
import ij.plugin.*;
import ij.io.*;
import java.io.*;
public class HybridImage_Plugin implements PlugInFilter{
int cfsize=3;
String img_lowPass;
String img_highPass;
private double[][] filter;
private double sigma;
float w=2 ,delta=0 , thr=0;
int mode=0;
//dialogbox
private boolean GUI()
{
GenericDialog gd = new GenericDialog("Enter Values", IJ.getInstance());
gd.addNumericField("Sigma (3,5,9,17,35)", cfsize, 0);
gd.addStringField("Low-Pass", "/home/atrx/ImageJ/plugins/hybridimage/l1.tif");
gd.addStringField("High-Pass", "/home/atrx/ImageJ/plugins/hybridimage/l2.tif");
return getUserParams(gd);
}
//get parameters
private boolean getUserParams(GenericDialog gd)
{
gd.showDialog();
if (gd.wasCanceled())
{
return false;
}
cfsize = (int) gd.getNextNumber();
img_lowPass = gd.getNextString();
img_highPass= gd.getNextString();
return true;
}
public int setup(String arg, ImagePlus imp) {
return PlugInFilter.NO_IMAGE_REQUIRED;
}
public void run(ImageProcessor ip) {
int[][] result;
if(GUI() == false)
{
return;
}
else
{
Opener opener1 = new Opener();
Opener opener2 = new Opener();
ImagePlus imp1= opener1.openImage(img_lowPass);
ImagePlus imp2= opener2.openImage(img_highPass);
//imp1.show("Low Pass Image");
//imp2.show("HighPass Image");
ImageProcessor ip1 = imp1.getProcessor();
ImageProcessor ip2 = imp2.getProcessor();
//lowpass filter(Gaussian Blur)
ip1.blurGaussian(cfsize);
showProcessor(ip1,"Low Pass Filtered Image");
//highpass filter(LoG)
int csize = ip2.getHeight();
int rsize = ip2.getWidth();
Rectangle rect = ip2.getRoi();
int d0,a0,acr,dow,it;
int i,x,y;
double h12, h21, ft, h1h2, h2h1, fmu, dh, dv;
double r, dt, dmx, dmn;
float logaus[] = new float[(rect.width>rect.height)? rect.width : rect.height];
float gaus[] = new float[(rect.width>rect.height)? rect.width : rect.height];
float dgaus[] = new float[(rect.width>rect.height)? rect.width : rect.height];
long zcn =0;
byte pixels[] = (byte[])ip2.getPixels();
int img_in[] = new int[rect.width*rect.height];
if (cfsize<0) cfsize=3;
if (cfsize>35) cfsize=35;
if(w<0) w=0;
int fsize = (int)(cfsize*w);
if (fsize%2 == 0)
{
fsize += 1;
}
double dimg[] = new double[rect.height*rect.width];
double dr[] = new double[rect.height*rect.width];
i=0;
for(y=rect.y;y<(rect.y+rect.height);y++)
{
for(x=rect.x;x<(rect.x+rect.width);x++)
{
img_in[i] = (pixels[(y*rsize)+x]&0xff);
i++;
}
}
int size = rect.width + fsize -1;
int image[] = new int[(rect.width+fsize-1)*(rect.height+fsize-1)];
int extension= (fsize/2);
for( i=0; i<rect.height;i++)
{
System.arraycopy(img_in,(i*rect.width),image,( ((i+extension)*(rect.width+fsize-1))+ extension ),rect.width);
}
h1h2= h2h1 = h12 =0.0;
for(i=1; i<( (fsize+1) /2);i++)
{
w = (float)cfsize/(float)2.0/(float)1.414;
ft = i/w;
gaus[i] = (float)Math.exp(-ft*ft/2);
h1h2 += 2.0 *(gaus[i]);
logaus[i] =(float)(1-ft*ft)*(float)Math.exp(-ft*ft/2);
h2h1 += 2.0*(logaus[i]);
dgaus[i] =(float)ft*(float)Math.exp(-ft*ft/2);
}
fmu = (h2h1 + 1)* (h1h2+1);
int prel[] = new int[rect.width+1];
dmx = -99999.9;
dmn = 99999.9;
int limit = ((rect.width+fsize-1)*(rect.height+fsize-1));
for(d0=0;d0<rect.height;d0++)
{
for(a0=0;a0<rect.width;a0++)
{
acr = a0 + fsize/2;
dow = d0 + fsize/2;
dh = dv = 0.0;
h1h2 = h2h1 = 0.0;
for (int j=1; j<(fsize+1)/2; j++)
{
int a0d0, a0d1, a1d0, a1d1;
h12=h21=0.0;
for(i=1;i<(fsize+1)/2;i++)
{
a0d0 = acr-i+((dow-j)*size);
a0d1 = acr-i+((dow+j)*size);
a1d0 = acr+i+((dow-j)*size);
a1d1 = acr+i+((dow+j)*size);
h12 += logaus[i]*(image[a0d0] + image[a0d1]+
image[a1d0] + image[a1d1]);
h21 += gaus[i]* (image[a0d0] + image[a0d1] +
image[a1d0] + image[a1d1]);
}
a0d0 = acr-j+dow*size;
a0d1 = acr+(dow-j)*size;
a1d0 = acr+j+dow*size;
a1d1 = acr+(dow+j)*size;
h1h2 += gaus[j] * (h12+ image[a0d0]+image[a0d1]+
image[a1d0]+image[a1d1]);
h2h1 += logaus[j]*(h21+ image[a0d0]+ image[a0d1] +
image[a1d0] + image[a1d1] );
if(thr != 0.0)
{
dh += dgaus[j] * ( image[a1d0] - image[a0d0] );
dv += dgaus[j] * ( image[a1d1] - image[a0d1] );
}
}
dt = dimg[d0*rect.width+a0] = h1h2 + h2h1 + (2*image[dow*size+acr]) ;
if (dt > dmx) dmx = dt;
if (dt < dmn) dmn = dt;
if( thr!= 0.0)
{
dr[(d0*rect.width)+a0] = Math.abs(dh) + Math.abs(dv);
}
}
}
dmx = (dmx-dmn) / 2;
dmn += dmx;
int row=0, column=0;
for(d0=0;d0<rect.height;d0++)
{
for(a0=0;a0<rect.width;a0++)
{
int id = (d0*rect.width) +a0;
int index = rsize*(rect.y+d0) + (a0+rect.x);
int k = 15;
it = (int)(dt = (dimg[id] - (dmn-delta*dmx))*255 / (dmx*(1+Math.abs(delta))));
switch(mode){
case 0:
pixels[index] = (byte)((dt-dmn+dmx)/dmx*127);
break;
case 1:
pixels[index] = (byte)Math.abs(it);
break;
case 2:
pixels[index] = (byte)( ((dt!=0)?((dt>0) ? 1: -1) : 0) * 192);
break;
case 3:
default:
r = dr[id];
it = ( (dt!=0) ? ((dt>0) ? 1: -1) : 0);
if( it==0 && r>=thr)
{
k = 255;
zcn++;
}
else
{
if( (it*prel[a0]<0 || it*prel[a0+1]<0) && r>=thr)
{
k = 255;
zcn++;
}
}
prel[a0+1] = it;
if(k==255 || mode!=3)
pixels[index] = (byte)k;
break;
}
}
}
showProcessor(ip2,"High Pass Filtered Image");
}
}
static void showProcessor(ImageProcessor ip, String title){
ImagePlus win = new ImagePlus(title,ip);
win.show();
}
}
答案 0 :(得分:1)
你尝试过加权和吗?
OUT = w*LPF + (1 - w)*HPF
这种总和随处可见。特别是图像混合,alpha遮罩甚至是一些优化方案。
然而,由于图像周围存在不同空间频率的斑块,因此您可能必须使体重适应。您还必须选择要强调的更多内容。你想要低通或高通信息更突出吗?根据您的需要,您可能希望在其中一个图像中使用信息,并通过一些距离或相似性度量来运行它以获得正确的权重。