我想使用lmfit模块将函数拟合到可变数量的数据集,包括一些共享和一些单独的参数。
这是一个生成高斯数据的示例,并分别适合每个数据集:
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit
def func_gauss(params, x, data=[]):
A = params['A'].value
mu = params['mu'].value
sigma = params['sigma'].value
model = A*np.exp(-(x-mu)**2/(2.*sigma**2))
if data == []:
return model
return data-model
x = np.linspace( -1, 2, 100 )
data = []
for i in np.arange(5):
params = Parameters()
params.add( 'A' , value=np.random.rand() )
params.add( 'mu' , value=np.random.rand()+0.1 )
params.add( 'sigma', value=0.2+np.random.rand()*0.1 )
data.append(func_gauss(params,x))
plt.figure()
for y in data:
fit_params = Parameters()
fit_params.add( 'A' , value=0.5, min=0, max=1)
fit_params.add( 'mu' , value=0.4, min=0, max=1)
fit_params.add( 'sigma', value=0.4, min=0, max=1)
minimize(func_gauss, fit_params, args=(x, y))
report_fit(fit_params)
y_fit = func_gauss(fit_params,x)
plt.plot(x,y,'o',x,y_fit,'-')
plt.show()
# ideally I would like to write:
#
# fit_params = Parameters()
# fit_params.add( 'A' , value=0.5, min=0, max=1)
# fit_params.add( 'mu' , value=0.4, min=0, max=1)
# fit_params.add( 'sigma', value=0.4, min=0, max=1, shared=True)
# minimize(func_gauss, fit_params, args=(x, data))
#
# or:
#
# fit_params = Parameters()
# fit_params.add( 'A' , value=0.5, min=0, max=1)
# fit_params.add( 'mu' , value=0.4, min=0, max=1)
#
# fit_params_shared = Parameters()
# fit_params_shared.add( 'sigma', value=0.4, min=0, max=1)
# call_function(func_gauss, fit_params, fit_params_shared, args=(x, data))
答案 0 :(得分:16)
我认为你大部分都在那里。您需要将数据集放入一个数组或结构中,该数组或结构可以在单个全局目标函数中使用,您可以使用minimum()并使用一组参数为所有数据集拟合所有数据集。您可以根据需要在数据集之间共享此集。稍微扩展您的示例,下面的代码确实可以对5种不同的高斯函数进行单独拟合。对于跨数据集绑定参数的示例,我使用几乎相同的sigma值,5个数据集具有相同的值。我创建了5个不同的sigma参数('sig_1','sig_2',...,'sig_5'),但随后使用数学约束强制它们具有相同的值。因此,问题中有11个变量,而不是15个。
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit
def gauss(x, amp, cen, sigma):
"basic gaussian"
return amp*np.exp(-(x-cen)**2/(2.*sigma**2))
def gauss_dataset(params, i, x):
"""calc gaussian from params for data set i
using simple, hardwired naming convention"""
amp = params['amp_%i' % (i+1)].value
cen = params['cen_%i' % (i+1)].value
sig = params['sig_%i' % (i+1)].value
return gauss(x, amp, cen, sig)
def objective(params, x, data):
""" calculate total residual for fits to several data sets held
in a 2-D array, and modeled by Gaussian functions"""
ndata, nx = data.shape
resid = 0.0*data[:]
# make residual per data set
for i in range(ndata):
resid[i, :] = data[i, :] - gauss_dataset(params, i, x)
# now flatten this to a 1D array, as minimize() needs
return resid.flatten()
# create 5 datasets
x = np.linspace( -1, 2, 151)
data = []
for i in np.arange(5):
params = Parameters()
amp = 0.60 + 9.50*np.random.rand()
cen = -0.20 + 1.20*np.random.rand()
sig = 0.25 + 0.03*np.random.rand()
dat = gauss(x, amp, cen, sig) + np.random.normal(size=len(x), scale=0.1)
data.append(dat)
# data has shape (5, 151)
data = np.array(data)
assert(data.shape) == (5, 151)
# create 5 sets of parameters, one per data set
fit_params = Parameters()
for iy, y in enumerate(data):
fit_params.add( 'amp_%i' % (iy+1), value=0.5, min=0.0, max=200)
fit_params.add( 'cen_%i' % (iy+1), value=0.4, min=-2.0, max=2.0)
fit_params.add( 'sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
# but now constrain all values of sigma to have the same value
# by assigning sig_2, sig_3, .. sig_5 to be equal to sig_1
for iy in (2, 3, 4, 5):
fit_params['sig_%i' % iy].expr='sig_1'
# run the global fit to all the data sets
result = minimize(objective, fit_params, args=(x, data))
report_fit(result.fit_params)
# plot the data sets and fits
plt.figure()
for i in range(5):
y_fit = gauss_dataset(result.fit_params, i, x)
plt.plot(x, data[i, :], 'o', x, y_fit, '-')
plt.show()
对于它的价值,我会考虑将多个数据集保存在DataSet类的字典或列表中,而不是多维数组中。无论如何,我希望这有助于你了解你真正需要做的事情。
答案 1 :(得分:0)
我使用了简单的方法:定义一个函数firs n(= cargsnum)的参数很常见 对于所有数据集,其他是个人的 {
def likelihood_common(var, xlist, ylist, mlist, cargsnum):
cvars = var[:cargsnum]
iargnum = [model.func_code.co_argcount - 1 - cargsnum for model in mlist]
argpos = [cargsnum,] + list(np.cumsum(iargnum[:-1]) + cargsnum)
args = [list(cvars) + list(var[pos:pos+iarg]) for pos, iarg in zip(argpos, iargnum)]
res = [likelihood(*arg) for arg in zip(args, xlist, ylist, mlist)]
return np.sum(res)
} 这里假设每个数据集具有相同的权重。 我在这种方法中遇到的问题是疲劳的低计算速度和不稳定性 如果有大量拟合参数和数据集。