Skip to content

Commit

Permalink
add Derivatives
Browse files Browse the repository at this point in the history
  • Loading branch information
matthieugomez committed Jun 12, 2024
1 parent 2ad92a3 commit 1001307
Show file tree
Hide file tree
Showing 8 changed files with 146 additions and 8 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "InfinitesimalGenerators"
uuid = "2fce0c6f-5f0b-5c85-85c9-2ffe1d5ee30d"
version = "0.5.2"
version = "1.0.0"

[deps]
Arpack = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97"
Expand Down
1 change: 1 addition & 0 deletions src/AdditiveFunctional.jl
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,4 @@ end




7 changes: 6 additions & 1 deletion src/InfinitesimalGenerators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,13 @@ using Roots: fzero





include("MarkovProcess.jl")
include("AdditiveFunctional.jl")
include("feynman_kac.jl")
include("principal_eigenvalue.jl")
include("derivatives.jl")



Expand All @@ -29,5 +32,7 @@ CoxIngersollRoss,
AdditiveFunctional,
cgf,
tail_index,
AdditiveFunctionalDiffusion
AdditiveFunctionalDiffusion,
FirstDerivative,
SecondDerivative
end
2 changes: 1 addition & 1 deletion src/MarkovProcess.jl
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ function generator(x::AbstractVector, μx::AbstractVector, σx::AbstractVector)
Δxm = x[max(i-1, 1) + 1] - x[max(i-1, 1)]
Δx = (Δxm + Δxp) / 2
# upwinding to ensure off diagonals are posititive
if μx[i] >= 0
if (μx[i] >= 0) | (i == 1)
𝕋[i, min(i + 1, n)] += μx[i] / Δxp
𝕋[i, i] -= μx[i] / Δxp
else
Expand Down
79 changes: 79 additions & 0 deletions src/derivatives.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@

struct FirstDerivative{T} <: AbstractVector{T}
x::AbstractVector{<:Real}
y::AbstractVector{T}
bc::NTuple{2}{T}
direction::Symbol
function FirstDerivative{T}(x, y, bc, direction) where {T}
size(x) == size(y) || throw(DimensionMismatch(
"cannot match grid of length $(length(x)) with vector of length $(length(y))"))
direction (:upward, :downward) || throw(ArgumentError("direction must be :upward or :downward"))
return new(x, y, bc, direction)
end
end

function FirstDerivative(x::AbstractVector, y::AbstractVector; bc = (0, 0), direction = :upward)
FirstDerivative{eltype(y)}(x, y, bc, direction)
end


Base.size(d::FirstDerivative) = (length(d.x), 1)

Base.IndexStyle(d::FirstDerivative) = IndexLinear()

function Base.getindex(d::FirstDerivative{T}, i::Int) where {T}
(; x, y, bc, direction) = d
if direction == :upward
if i == length(x)
return convert(T, bc[end])
else
Δxp = x[min(i, length(x)-1)+1] - x[min(i, length(x)-1)]
return (y[i+1] - y[i]) / Δxp
end
else
if i == 1
return convert(T, bc[1])
else
Δxm = x[max(i-1, 1) + 1] - x[max(i-1, 1)]
return (y[i] - y[i-1]) / Δxm
end
end
end


struct SecondDerivative{T} <: AbstractVector{T}
x::AbstractVector{<:Real}
y::AbstractVector{T}
bc::NTuple{2}{T}
function SecondDerivative{T}(x, y, bc) where {T}
length(x) == length(y) || throw(DimensionMismatch(
"cannot match grid of length $(length(x)) with vector of length $(length(y))"))
return new(x, y, bc)
end
end

function SecondDerivative(x::AbstractVector, y::AbstractVector; bc = (0, 0))
SecondDerivative{eltype(y)}(x, y, bc)
end


Base.size(d::SecondDerivative) = (length(d.x), 1)

Base.IndexStyle(d::SecondDerivative) = IndexLinear()

function Base.getindex(d::SecondDerivative{T}, i::Int) where {T}
(; x, y, bc) = d
Δxp = x[min(i, length(x)-1)+1] - x[min(i, length(x)-1)]
Δxm = x[max(i-1, 1) + 1] - x[max(i-1, 1)]
Δx = (Δxm + Δxp) / 2
if i == 1
return y[2] / (Δxp * Δx) + (y[1] - bc[1] * Δxm) / (Δxm * Δx) - 2 * y[1] / (Δxp * Δxm)
elseif i == length(x)
return (y[end] + bc[end] * Δxp) / (Δxp * Δx) + y[end - 1] / (Δxm * Δx) - 2 * y[end] / (Δxp * Δxm)
else
return y[i + 1] / (Δxp * Δx) + y[i - 1] / (Δxm * Δx) - 2 * y[i] / (Δxp * Δxm)
end
end



12 changes: 7 additions & 5 deletions src/principal_eigenvalue.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,19 @@ In other words, all eigenvalues of 𝕋 have real part <= 0. This means that
"""
function principal_eigenvalue(𝕋; r0 = ones(size(𝕋, 1)))
a, η, r = 0.0, 0.0, r0
if maximum(abs.(sum(𝕋, dims = 1))) < 1e-9
# if columns sum up to zero
if (maximum(abs.(sum(𝕋, dims = 1))) < 1e-9) | (maximum(abs.(sum(𝕋, dims = 2))) < 1e-9)
# if columns or rows sum up to zero
# we know principal is asssociated with zero
if 𝕋 isa Tridiagonal
η = 0.0
r = [1.0 ; - Tridiagonal(𝕋.dl[2:end], 𝕋.d[2:end], 𝕋.du[2:end]) \ vec(𝕋[2:end, 1])]
else
η = 0.0
r = [1.0 ; - 𝕋[2:end, 2:end] \ vec(𝕋[2:end, 1])]
# standard way of solving Ax = 0 is to do inverse iteration https://stackoverflow.com/questions/33563401/lapack-routines-for-solving-a-x-0
vals, vecs = Arpack.eigs(𝕋; v0 = collect(r0), nev = 1, which = :LM, sigma = 0.0)
η = vals[1]
r = vecs[:, 1]
# vals, vecs = Arpack.eigs(𝕋; v0 = collect(r0), nev = 1, which = :LM, sigma = 0.0)
# η = vals[1]
# r = vecs[:, 1]
end
else
a = - minimum(diag(𝕋))
Expand Down
50 changes: 50 additions & 0 deletions src/upwinding.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@

struct Derivative
x::Vector{Float64}
end

function FirstDerivative(x, y::T, i; bc = (0, 0), direction = :up)
if method == :up
Δxp = x[min(i, n-1)+1] - x[min(i, n-1)]
(i < length(x)) ? (y[i+1] - y[i]) / Δxp : convert(T, bc[end])
elseif method == :down
Δxm = x[max(i-1, 1) + 1] - x[max(i-1, 1)]
(i > 1) ? (y[i] - y[i-1]) / Δxm : convert(T, bc[1])
end
end

function SecondDerivative(x, y::T, i, bc = (0, 0))
Δxp = x[min(i, n-1)+1] - x[min(i, n-1)]
Δxm = x[max(i-1, 1) + 1] - x[max(i-1, 1)]
Δx = (Δxm + Δxp) / 2
(1 < i < length(x)) ? (y[i + 1] / (Δxp * Δx) + y[i - 1] / (Δxm * Δx) - 2 * y[i] / (Δxp * Δxm)) : ((i == 1) ? (y[2] / (Δxp * Δx) + (y[1] - bc[1] * Δxm) / (Δxm * Δx) - 2 * y[1] / (Δxp * Δxm)) : ((y[end] + bc[end] * Δxp) / (Δxp * Δx) + y[end - 1] / (Δxm * Δx) - 2 * y[end] / (Δxp * Δxm)))


if method == :upwind
Δxp = x[min(i, n-1)+1] - x[min(i, n-1)]
Δfp = y[min(i, n-1)+1] - y[min(i, n-1)]
if Δxp != 0
return Δfxp / Δxp
else
return 0.0
end
else
Δxm = x[max(i-1, 1) + 1] - x[max(i-1, 1)]
Δfm = f[max(i-1, 1) + 1] - f[max(i-1, 1)]
if Δxm != 0
return Δfxm / Δxm
else
return 0.0
end
end
end
elseif method == :down
return x[min(i, n-1)+1] - x[min(i, n-1)]
struct ∂down
grid::Vector{Float64}
end

struct ∂down
grid::Vector{Float64}
end
upwinding(wgrid)
1 change: 1 addition & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
using InfinitesimalGenerators, Test, Statistics, LinearAlgebra, Expokit


xbar = 0.0
κ = 0.1
σ = 0.02
Expand Down

2 comments on commit 1001307

@matthieugomez
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register()

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/108812

Tip: Release Notes

Did you know you can add release notes too? Just add markdown formatted text underneath the comment after the text
"Release notes:" and it will be added to the registry PR, and if TagBot is installed it will also be added to the
release that TagBot creates. i.e.

@JuliaRegistrator register

Release notes:

## Breaking changes

- blah

To add them here just re-invoke and the PR will be updated.

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v1.0.0 -m "<description of version>" 1001307452d21d587d31e531c83cb5881fb1f3eb
git push origin v1.0.0

Please sign in to comment.