SharedArray-未更新

时间:2018-06-19 15:29:24

标签: julia

我对ekwg_monte_carlo_bias函数有疑问。在此功能中,有SharedArray个未更新。有什么建议吗?

注意:通过将@parallel for i in 1: m替换为for i in 1: m并删除addprocs (Sys.CPU_CORES),该功能按预期工作。

朱莉娅代码

@everywhere using Distributions
@everywhere using Optim

@everywhere function gexp(x,par)
    λ = par[1]
    λ * exp(-λ * x)
end

# valor = hquadrature(x -> gexp(x,1), 0, 100)[1]

@everywhere function Gexp(x,par)
    λ = par[1]
    1- exp(-λ * x)
end

@everywhere function QGexp(x,par)
    λ = par[1]
    # A função Exponential no pacote Distributions é reparametrizada
    # como 1/lambda. Dessa forma, para trabalhar com densidade na forma
    # λ * exp(-λ*x) é preciso tomar 1/λ.
    quantile.(Exponential(1/λ),x)
end

@everywhere function sample_ekwg(QG, n, par0, par1...)
    a = par0[1]
    b = par0[2]
    c = par0[3]

   # r = randjump(MersenneTwister(0),8);   

    u = rand(n)

    p = (1 - (1 - u.^(1/c)).^(1/b)).^(1/a)

    QG(p, par1...)
end

@everywhere function cdf_ekwg(cdf, x, par0, par1...)
   a = par0[1]
   b = par0[2]
   c = par0[3]

   (1 - (1 - cdf.(x,par1...).^a).^b).^c
end

@everywhere function pdf_ekwg(cdf, pdf, x, par0, par1...)
   a = par0[1]
   b = par0[2]
   c = par0[3]

   g = pdf(x, par1...)
   G = cdf(x, par1...)

   a * b * c * g * G.^(a-1) * (1-G.^a).^(b-1) * (1 - (1-G.^a).^b).^(c-1)

end

@everywhere function loglike(cdf, pdf, x, par0, par1...)
   n = length(x)
   soma = 0
   for i = 1:n
      soma += log(pdf_ekwg(cdf, pdf, x[i], par0, par1...))
   end
   return -soma # Queremos minimizar loglike.
end

@everywhere function myoptimize(sample_boot)
   try
   # Array com as estimativas.
   optimize(par0 -> loglike(G, g, sample_boot, par0, par1...), starts,
            Optim.Options(g_tol = 1e-2))
   catch
      -1
   end
end

@everywhere function ekwg_bootstrap_bias(B, G, g, data, original_estimates, starts, par1...)

   result_boot = SharedArray{Float64}(length(original_estimates)*B)

   j = 1
   while j <= B

      sample_boot = sample(data, length(data), replace = true)

      result = myoptimize(sample_boot)

      if (result == -1) || (result.g_converged == false)
          continue
      end
       result_boot[(3*j-2):3*j] = result.minimizer
       j = j+1
   end 
   estimates_matrix = convert.(Float64,reshape(result_boot,length(starts),B))'

   error  = std(estimates_matrix,1)

   return error, (2.*original_estimates' .- mean(estimates_matrix,1))'
end 

@everywhere function ekwg_monte_carlo_bias(M, B, n, true_parameters, seed, par1...)

    result_mc_correct_vector = SharedArray{Float64}(length(true_parameters)*m)
    result_mc_vector = SharedArray{Float64}(length(true_parameters)*m)
    result_error_boot = SharedArray{Float64}(length(true_parameters)*m)
    output1 = SharedArray{Float64}(M,length(true_parameters))
    output2 = SharedArray{Float64}(M,length(true_parameters))
    output3 = SharedArray{Float64}(M,length(true_parameters))

    #for i in 1:M
    #Threads.@threads

    @parallel for i in 1:m
        true_sample = sample_ekwg(qgexp, n, true_parameters, par1...)
        result_mc = myoptimize(true_sample)

        if result_mc != -1
            result_mc_vector[(3*i-2):3*i] = result_mc.minimizer
            result_error_boot[(3*i-2):3*i],result_mc_correct_vector[(3*i-2):3*i] = ekwg_bootstrap_bias(b, g, g, true_sample,
                                                result_mc.minimizer, true_parameters, par1...)
        end
    end
    #rmprocs()
    output1 = convert.(Float64,reshape(result_mc_vector,length(true_parameters),M))'
    output2 = convert.(Float64,reshape(result_mc_correct_vector,length(true_parameters),M))'
    output3 = convert.(Float64,reshape(result_error_boot,length(true_parameters),M))'
    return (mean(output1,1),mean(output2,1),mean(output3,1))
end

g = gexp;
G = Gexp;
qgexp = QGexp;
starts = [1.0,1.0,1.0];
true_parameters = [1.0,1.0,1.0];
par1 = 1.5;
m = 10;
b = 50;
n = 100;
addprocs(Sys.CPU_CORES)
@time mc_estimates, mc_estimates_boot, mc_error_boot = ekwg_monte_carlo_bias(m, b, n, true_parameters, 0, par1)

0 个答案:

没有答案