使用多个图像制作词典是否正确?

时间:2017-03-24 08:09:06

标签: machine-learning scikit-learn computer-vision

我编写了一个代码,通过使用多个图像来制作一个字典。

这是我的代码=>

from time import time

import matplotlib.pyplot as plt
import numpy as np
import scipy as sp

from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d

from sklearn.utils.fixes import sp_version
from sklearn.datasets import load_sample_image

from scipy import ndimage

from skimage import color
from skimage import io

from PIL import Image

from sklearn.decomposition import SparseCoder
from sklearn.decomposition import sparse_encode

from scipy.misc import imfilter, imread
from scipy.signal import convolve2d as conv2


from skimage import data, img_as_float



from scipy import ndimage as ndi
from skimage import feature

from scipy.misc import imsave


c = np.asarray(Image.open('047.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
d = np.asarray(Image.open('048.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
e = np.asarray(Image.open('049.jpg').convert('L').resize((512,512), Image.ANTIALIAS))


f = np.asarray(Image.open('046.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
g = np.asarray(Image.open('038.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
h = np.asarray(Image.open('039.jpg').convert('L').resize((512,512), Image.ANTIALIAS))


n0 = np.asarray(Image.open('037.jpg').convert('L').resize((512,512), Image.ANTIALIAS))


n0 = n0 / 255



height, width = n0.shape


n0 = n0 + 0.075 * np.random.randn(height, width)

imsave('noise.png',n0)


patchsize = (8,8)
t0 = time()


data1 = extract_patches_2d(c,(8,8))
data2 = extract_patches_2d(d,(8,8))
data3 = extract_patches_2d(e,(8,8))
data4 = extract_patches_2d(f,(8,8))
data5 = extract_patches_2d(g,(8,8))
data6 = extract_patches_2d(h,(8,8))

data = np.append(data1,data2,axis=0)
data = np.append(data,data3,axis=0)
data = np.append(data,data4,axis=0)
data = np.append(data,data5,axis=0)
data = np.append(data,data6,axis=0)


data = data.reshape(data.shape[0], -1)

print('Extract patch shape :',data.shape)

data = data - np.mean(data, axis=0)
data = data / np.std(data, axis=0)

t1 = time()
print('Total time : ',round((t1-t0),2),' sec')

print('Learning the dictionary ....')

t2 = time()
n_iter = 1000

dico = MiniBatchDictionaryLearning(n_components=100,alpha=3,n_iter=n_iter)

V = dico.fit(data).components_

实际上我想训练一本学得很好的词典来帮助我去除图像去噪。

通过使用多个图像训练字典是否正确?

0 个答案:

没有答案