Commit c06609c5 authored by Alberto Ramos's avatar Alberto Ramos

hyperd working, ForwardDiff better

After a proper configuration of the chunk size, ForwardDiff
outperforms hyperd basically always.

This commit stores the last version of hyperd.
parent eb1e915f
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
module ADerrors module ADerrors
import ForwardDiff, Statistics, FFTW, LinearAlgebra, QuadGK, BDIO, Printf import ForwardDiff, Statistics, FFTW, LinearAlgebra, QuadGK, BDIO, Printf
import ForwardDiff: HessianConfig, GradientConfig, Chunk, hessian!
# Include data types # Include data types
include("ADerrorsTypes.jl") include("ADerrorsTypes.jl")
......
...@@ -23,8 +23,9 @@ for op in (:+, :-, :*, :/, :^, :atan, :hypot) ...@@ -23,8 +23,9 @@ for op in (:+, :-, :*, :/, :^, :atan, :hypot)
function fvec(x::Vector) function fvec(x::Vector)
return Base.$op(x[1], x[2]) return Base.$op(x[1], x[2])
end end
grad = ForwardDiff.gradient(fvec, [a.mean, b.mean]) x = [a.mean, b.mean]
cfg = GradientConfig(fvec, x, Chunk{2}());
grad = ForwardDiff.gradient(fvec, x, cfg)
if (length(a.der) > length(b.der)) if (length(a.der) > length(b.der))
p = similar(a.prop) p = similar(a.prop)
......
...@@ -31,12 +31,17 @@ function chiexp(hess::Array{Float64, 2}, data::Vector{uwreal}, W::Vector{Float64 ...@@ -31,12 +31,17 @@ function chiexp(hess::Array{Float64, 2}, data::Vector{uwreal}, W::Vector{Float64
n = size(hess, 1) - m n = size(hess, 1) - m
hm = view(hess, 1:n, n+1:n+m) hm = view(hess, 1:n, n+1:n+m)
sm = hm * LinearAlgebra.Diagonal(1.0 ./ sqrt.(W)) sm = Array{Float64, 2}(undef, n, m)
for i in 1:n, j in 1:m
sm[i,j] = hm[i,j] / sqrt.(W[j])
end
maux = sm * sm'
hi = LinearAlgebra.pinv(maux)
Px = -hm' * hi * hm
Px = LinearAlgebra.Symmetric(LinearAlgebra.Diagonal(W)) - for i in 1:m
LinearAlgebra.Symmetric(hm' * Px[i,i] = W[i] + Px[i,i]
LinearAlgebra.inv(LinearAlgebra.Symmetric(sm * sm')) * end
hm)
return trcov(Px, data) return trcov(Px, data)
end end
...@@ -52,10 +57,9 @@ function chiexp(hess::Array{Float64, 2}, data::Vector{uwreal}, W::Array{Float64, ...@@ -52,10 +57,9 @@ function chiexp(hess::Array{Float64, 2}, data::Vector{uwreal}, W::Array{Float64,
hm = view(hess, 1:n, n+1:n+m) hm = view(hess, 1:n, n+1:n+m)
sm = hm * Li' sm = hm * Li'
Px = LinearAlgebra.Symmetric(W) - maux = sm * sm'
LinearAlgebra.Symmetric(hm' * hi = LinearAlgebra.pinv(maux)
LinearAlgebra.inv(LinearAlgebra.Symmetric(sm * sm')) * Px = W - hm' * hi * hm
hm)
return trcov(Px, data) return trcov(Px, data)
end end
...@@ -76,9 +80,10 @@ function chiexp(chisq::Function, ...@@ -76,9 +80,10 @@ function chiexp(chisq::Function,
xav[i] = data[i-n].mean xav[i] = data[i-n].mean
end end
ccsq(x::Vector) = chisq(view(x, 1:n), view(x, n+1:n+m)) ccsq(x::Vector) = chisq(view(x, 1:n), view(x, n+1:n+m))
cfg = ForwardDiff.HessianConfig(ccsq, xav, Chunk{8}());
hess = Array{Float64}(undef, n+m, n+m) hess = Array{Float64}(undef, n+m, n+m)
# @time ForwardDiff.hessian!(hess, ccsq, xav) ForwardDiff.hessian!(hess, ccsq, xav, cfg)
hyperd_hessian!(hess, ccsq, xav)
cse = 0.0 cse = 0.0
if (m-n > 0) if (m-n > 0)
...@@ -119,10 +124,11 @@ function fit_error(chisq::Function, ...@@ -119,10 +124,11 @@ function fit_error(chisq::Function,
xav[i] = data[i-n].mean xav[i] = data[i-n].mean
end end
ccsq(x::Vector) = chisq(view(x, 1:n), view(x, n+1:n+m)) ccsq(x::Vector) = chisq(x[1:n], x[n+1:n+m])
cfg = ForwardDiff.HessianConfig(ccsq, xav, Chunk{8}());
hess = Array{Float64}(undef, n+m, n+m) hess = Array{Float64}(undef, n+m, n+m)
# @time ForwardDiff.hessian!(hess, ccsq, xav) ForwardDiff.hessian!(hess, ccsq, xav, cfg)
hyperd_hessian!(hess, ccsq, xav)
hinv = LinearAlgebra.pinv(hess[1:n,1:n]) hinv = LinearAlgebra.pinv(hess[1:n,1:n])
grad = - hinv[1:n,1:n] * hess[1:n,n+1:n+m] grad = - hinv[1:n,1:n] * hess[1:n,n+1:n+m]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment