如何使用optim.jl估计ARMA(p,q)MLE?

时间:2017-12-07 12:34:54

标签: julia

我试图用julia来估计ARMA(p,q)模型。我在优化对数似然函数时遇到问题。 我有一个名为" SsfLogLikConc"的函数。估计我的集中对数似然,我希望看到它作为vP的函数,即我的参数phi和theta的值,然后从值vP0=zeros(cp+cq,1)开始优化此函数,其中cp和cp代表我的ARMA(p,q)模型的p和q阶。我试着这样做:

vP0=zeros(cp+cq,1)
function f(vP)
  SsfLogLikConc(vP, vy, cp, cq)
end
using Optim
res=optimize(f, vP0, BFGS())

但即使更改方法,我仍然会从优化功能中获得错误。这是完整的代码:

function FisherInvTransform(Vp)
           vt = (exp(2*vP)-1) ./ (exp(2*vP)+1)
end

function  ReparAR(vr)
  cp = length(vr)
  vphi = zeros(cp,1);
  vphi[1] = vr[1]
 for k=2:cp
  vphi[1:k-1] = vphi[1:k-1] - vr[k] * vphi[k-1:-1:1]
   vphi[k] = vr[k]
 end
end

function ReparMA(vr)
  cq = length(vr)
  vtheta = zeros(cq,1)
  vtheta[1] = vr[1]
 for k = 2:cq
  vtheta[1:k-1] = vtheta[1:k-1] + vr[k] * vtheta[k-1:-1:1]
  vtheta[k] = vr[k]
 end
end

function SetStateSpaceModel(vP, cp, cq)
  cm = max(cp,cq)
  vphi = zeros(cm, 1)
  vtheta = zeros(cm, 1)
  if (cp > 0)
    vphi[1:cp] = ReparAR(FisherInvTransform(vP[1:cp]))
  end
  if (cq > 0)
    vtheta[1:cq] = ReparMA(FisherInvTransform(vP[cp+1:cp+cq]));
  end
 #Measurement equation: % y_{t} = Z_t \alpha_{t} +  G_t \epsilon_t
 mZ = [1, zeros(1, cm-1)]
 mGG = 1 #G_t * G_t'
 #Transition equation:  % \alpha_{t+1} = T_t \alpha_{t} + H_t \epsilon_t
 mT = [vphi,  [eye(cm-1); zeros(1, cm-1)] ]
 disp(mT)
 disp( vphi)
 mH = vtheta+vphi
 mHH = mH*mH' #H_t * H_t'
 mHG = mH
 #initial conditions
 va = zeros(cm,1)
 mP = reshape(inv(eye(cm^2)-kron(mT,mT)) * reshape(mHH, cm^2,1) , cm, cm)
end

function KalmanFilter(vy, mZ, mGG, mT, mHH, mHG, va, mP)
  cm = length(va) #n. of state elements and of diffuse elements
  ck = 1
  cn = length(vy)
  #Initialisation of matrices and scalars
  dLogf = 0
  dSumSquares = 0
  vInnovations = NaN(1, cn) #stores the KF innovations
  vVarInnovations = NaN(1, cn)
  mStatePred = NaN(cm, cn) #stores the states predictions
  mCovStatePred = NaN(cm, cn)
  for i = 1:cn
    dv = vy[i] - mZ * va;           dF = mZ * mP * mZ' + mGG;

                                    vK = (mT * mP * mZ' + mHG) / dF;
    va = mT * va + vK * dv;         mP = mT * mP * mT' + mHH - vK * vK' * dF ;
    if (i > ck)
        vInnovations[i] = dv;       vVarInnovations[i] = dF;
        dLogf = dLogf + log(dF);    dSumSquares = dSumSquares + dv^2 /dF;
    end
    mStatePred[:,i] = va;           mCovStatePred[:,i] = diag(mP);
  end
 dSigma = dSumSquares/(cn-ck);
 dLogLik = -0.5 * ((cn - ck) * log(2 * pi) + dLogf + dSumSquares );
 dLogLikConc = -0.5*((cn - ck)* (log(2 * pi * dSigma)+1) + dLogf );
 dPev =  dSigma *  dF ;    # Final prediction error variance
end

function SsfLogLikConc(vP, vy, cp, cq)
  SetStateSpaceModel(vP, cp, cq)
  dLogF = 0; dSumSquares = 0;
  cn = length(vy)
  for i = 1:cn
    dv = vy[i] - mZ * va;           dF = mZ * mP * mZ' + mGG;
                                    vK = (mT * mP * mZ' + mHG) / dF;
    va = mT * va + vK * dv;         mP = mT * mP * mT' + mHH - vK * dF * vK'  ;
    dLogF = dLogF + log(dF);
    dSumSquares = dSumSquares + dv^2 /dF;
  end
 dSigma2 = dSumSquares/cn;
 dLogLikConc = 0.5*( cn * (log(dSigma2)+1) + dLogF ); # concentrated LF (change of sign)
end

#Pkg.add("DataFrames")
using DataFrames;
cd("$(homedir())/Desktop")
pwd()
df=readtable("df.csv")
df
o=df[2]
mean(o)
vy=zeros(1000)
for i=1:length(o)
 vy[i]=o[i]-mean(o)
end

cp = 1; cq =1;
vP0 = zeros(cp+cq,1)

function f(vP)
  SsfLogLikConc(vP, vy, cp, cq)
end

#Pkg.add("Optim")
using Optim
res=optimize(f, vP0, BFGS())
谢谢你的阅读,希望有人能帮帮我!

0 个答案:

没有答案