Skip to content

Commit

Permalink
Add general linear second-order elliptic operator (#51)
Browse files Browse the repository at this point in the history
* add general linear second-order elliptic operator and equation

* format

* fix test

* add comma [skip ci]
  • Loading branch information
JoshuaLampert authored May 14, 2024
1 parent 45af2ef commit 7353f96
Show file tree
Hide file tree
Showing 9 changed files with 165 additions and 21 deletions.
2 changes: 1 addition & 1 deletion docs/src/pde.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@ TODO:

* Explain basic setup and basics of collocation for stationary and time-dependent PDEs
* Define custom differential operators and PDEs and solve them
* Stationary and time-dependent PDEs
* Stationary (example Laplace in L shape) and time-dependent PDEs
* AD vs analytic derivatives
36 changes: 36 additions & 0 deletions examples/PDEs/anisotropic_elliptic_2d_basic.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
using KernelInterpolation
using Plots

# analytical solution of equation (manufactured solution)
u(x, equations) = exp(x[1] * x[2])

# coefficients of elliptic equation
A(x) = [x[1] * x[2]+1 sin(x[2])
sin(x[2]) 2]
b(x) = [x[1]^2, 1]
c(x) = 3 * x[2]^2 * x[1] + 4 * x[2]
# right-hand-side of elliptic equation (computed from analytical solution)
function f(x, equations)
AA = equations.op.A(x)
bb = equations.op.b(x)
cc = equations.op.c(x)
return (-AA[1, 1] * x[2]^2 - (AA[1, 2] + AA[2, 1]) * (1 + x[1] * x[2]) -
AA[2, 2] * x[1]^2 + bb[1] * x[2] + bb[2] * x[1] + cc) * u(x, equations)
end
pde = EllipticEquation(A, b, c, f)

n = 10
nodeset_inner = homogeneous_hypercube(n, (0.1, 0.1), (0.9, 0.9); dim = 2)
n_boundary = 3
nodeset_boundary = homogeneous_hypercube_boundary(n_boundary; dim = 2)
# Dirichlet boundary condition (here taken from analytical solution)
g(x) = u(x, pde)

kernel = WendlandKernel{2}(3, shape_parameter = 0.8)
sd = SpatialDiscretization(pde, nodeset_inner, g, nodeset_boundary, kernel)
itp = solve_stationary(sd)

many_nodes = homogeneous_hypercube(20; dim = 2)

plot(many_nodes, itp)
plot!(many_nodes, x -> u(x, pde), label = "analytical solution")
5 changes: 3 additions & 2 deletions src/KernelInterpolation.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,9 @@ export GaussKernel, MultiquadricKernel, InverseMultiquadricKernel,
Matern52Kernel, Matern72Kernel, RieszKernel,
TransformationKernel, ProductKernel, SumKernel
export phi, Phi, order
export Gradient, Laplacian
export PoissonEquation, AdvectionEquation, HeatEquation, AdvectionDiffusionEquation
export Gradient, Laplacian, EllipticOperator
export PoissonEquation, EllipticEquation, AdvectionEquation, HeatEquation,
AdvectionDiffusionEquation
export SpatialDiscretization, Semidiscretization, semidiscretize
export NodeSet, empty_nodeset, separation_distance, dim, eachdim, values_along_dim,
distance_matrix, random_hypercube, random_hypercube_boundary, homogeneous_hypercube,
Expand Down
4 changes: 1 addition & 3 deletions src/callbacks_step/alive.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
# Adapted from Trixi.jl
# https://github.com/trixi-framework/Trixi.jl/blob/c221bca89b38d416fb49137b1b266cecd1646b52/src/callbacks_step/alive.jl
"""
AliveCallback(io::IO = stdout; interval::Integer=0,
dt=nothing)
AliveCallback(io::IO = stdout; interval::Integer=0, dt=nothing)
Inexpensive callback showing that a simulation is still running by printing
some information such as the current time to the screen every `interval`
Expand Down
40 changes: 38 additions & 2 deletions src/differential_operators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ end
"""
Gradient()
The gradient operator. It can be called with a `RadialSymmetricKernel` and points
The gradient operator. It can be called with a [`RadialSymmetricKernel`](@ref) and points
`x` and `y` to evaluate the gradient of the `kernel` at `x - y`.
"""
struct Gradient <: AbstractDifferentialOperator
Expand All @@ -36,7 +36,7 @@ end
"""
Laplacian()
The Laplacian operator. It can be called with a `RadialSymmetricKernel` and points
The Laplacian operator. It can be called with a [`RadialSymmetricKernel`](@ref) and points
`x` and `y` to evaluate the Laplacian of the `kernel` at `x - y`.
"""
struct Laplacian <: AbstractDifferentialOperator
Expand All @@ -50,3 +50,39 @@ function (::Laplacian)(kernel::RadialSymmetricKernel, x)
H = ForwardDiff.hessian(x -> Phi(kernel, x), x)
return tr(H)
end

@doc raw"""
EllipticOperator(A, b, c)
Linear second-order elliptic operator with matrix ``A(x)\in\mathbb{R}^{d\times d}``, vector ``b(x)\in\mathbb{R}^d``, and scalar ``c(x)``.
The operator is defined as
```math
\mathcal{L}u = -\sum_{i,j = 1}^d a_{ij}(x)\partial_{x_i,x_j}^2u + \sum_{i = 1}^db_i(x)\partial_{x_i}u + c(x)u.
```
`A`, `b` and `c` are space-dependent functions returning a matrix, a vector, and a scalar, respectively. The matrix `A` should be symmetric and
positive definite for any input `x`.
The operator can be called with a [`RadialSymmetricKernel`](@ref) and points `x` and `y` to evaluate the operator of the `kernel` at `x - y`.
"""
struct EllipticOperator{AType, BType, CType} <:
AbstractDifferentialOperator where {AType, BType, CType}
A::AType
b::BType
c::CType
end

function Base.show(io::IO, ::EllipticOperator)
print(io, "-∑_{i,j = 1}^d aᵢⱼ (x)∂_{x_i,x_j}^2 + ∑_{i = 1}^d bᵢ(x)∂_{x_i} + c(x)")
end

function (operator::EllipticOperator)(kernel::RadialSymmetricKernel, x)
@unpack A, b, c = operator
AA = A(x)
bb = b(x)
cc = c(x)
H = ForwardDiff.hessian(x -> Phi(kernel, x), x)
gr = ForwardDiff.gradient(x -> Phi(kernel, x), x)

return sum(-AA[i, j] * H[i, j] for i in 1:dim(kernel), j in 1:dim(kernel)) +
sum(bb[i] * gr[i] for i in 1:dim(kernel)) +
cc * Phi(kernel, x)
end
6 changes: 3 additions & 3 deletions src/discretization.jl
Original file line number Diff line number Diff line change
Expand Up @@ -112,12 +112,12 @@ end
dim(semi::Semidiscretization) = dim(semi.spatial_discretization)
Base.eltype(semi::Semidiscretization) = eltype(semi.spatial_discretization)

# right-hand side of the ODE
# M c' = -A c + b
# right-hand side of the ODE (N_I: number of inner nodes, N_B: number of boundary nodes, N = N_I + N_B)
# M c' = -A_{LB} c + b
# where M is the (singular) mass matrix
# M = (A_I; 0)∈R^{N x N}, A_I∈R^{N_I x N}
# A_{LB} = (A_L; A_B)∈R^{N x N}, A_L∈R^{N_I x N}, A_B∈R^{N_B x N}
# b = (g; g)∈R^N, f∈R^{N_I}, g∈R^{N_B}
# b = (f; g)∈R^N, f∈R^{N_I}, g∈R^{N_B}

# We can get u from c by
# u = A c
Expand Down
33 changes: 33 additions & 0 deletions src/equations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ defined as
```math
-\Delta u = f
```
See also [`Laplacian`](@ref).
"""
struct PoissonEquation{F} <: AbstractStationaryEquation where {F}
f::F
Expand All @@ -36,6 +38,37 @@ function (::PoissonEquation)(kernel::RadialSymmetricKernel, x, y)
return -Laplacian()(kernel, x, y)
end

@doc raw"""
EllipticEquation(A, b, c, f)
Libear second-order elliptic equation with matrix `A`, vector `b`, and scalar `c` and right-hand side `f`.
The elliptic equation is defined as
```math
\mathcal{L}u = \sum_{i,j = 1}^d a_{ij}(x)\partial_{x_i,x_j}^2u + \sum_{i = 1}^db_i(x)\partial_{x_i}u + c(x)u = f,
```
where `A`, `b` and `c` are space-dependent functions returning a matrix, a vector, and a scalar, respectively.
See also [`EllipticOperator`](@ref).
"""
struct EllipticEquation{AType, BType, CType, F} <:
AbstractStationaryEquation where {AType, BType, CType, F}
op::EllipticOperator{AType, BType, CType}
f::F

function EllipticEquation(A, b, c, f)
return new{typeof(A), typeof(b), typeof(c), typeof(f)}(EllipticOperator(A, b, c), f)
end
end

function Base.show(io::IO, ::EllipticEquation)
print(io,
"-∑_{i,j = 1}^d aᵢⱼ (x)∂_{x_i,x_j}^2u + ∑_{i = 1}^d bᵢ(x)∂_{x_i}u + c(x)u = f")
end

function (equations::EllipticEquation)(kernel::RadialSymmetricKernel, x, y)
return equations.op(kernel, x, y)
end

abstract type AbstractTimeDependentEquation <: AbstractEquation end

function rhs(t, nodeset::NodeSet, equations::AbstractTimeDependentEquation)
Expand Down
6 changes: 6 additions & 0 deletions test/test_examples_pde.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@ EXAMPLES_DIR = joinpath(examples_dir(), "PDEs")
pde_test=true)
end

@ki_testset "anisotropic_elliptic_2d_basic.jl" begin
@test_include_example(joinpath(EXAMPLES_DIR, "anisotropic_elliptic_2d_basic.jl"),
l2=0.3680733486177618, linf=0.09329545570900866,
pde_test=true)
end

@ki_testset "heat_2d_basic.jl" begin
@test_include_example(joinpath(EXAMPLES_DIR, "heat_2d_basic.jl"),
l2=0.8163804598948964, linf=0.0751084002569955,
Expand Down
54 changes: 44 additions & 10 deletions test/test_unit.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ module TestUnit

using Test
using KernelInterpolation
using LinearAlgebra: norm, Symmetric
using LinearAlgebra: norm, Symmetric, I
using OrdinaryDiffEq: solve, Rodas5P
using StaticArrays: MVector
using Plots
Expand Down Expand Up @@ -644,6 +644,13 @@ include("test_util.jl")
g = @test_nowarn Gradient()
@test_nowarn println(g)
@test_nowarn display(g)
A(x) = [x[1]*x[2] sin(x[1])
sin(x[2]) x[1]^2]
b(x) = [x[1]^2 + x[2], x[1] + x[2]^2]
c(x) = x[1] + x[2]
el = @test_nowarn EllipticOperator(A, b, c)
@test_nowarn println(el)
@test_nowarn display(el)
# Test if automatic differentiation gives same results as analytical derivatives
# Define derivatives of Gauss kernel analytically and use them in the solver
# instead of automatic differentiation
Expand Down Expand Up @@ -677,24 +684,30 @@ include("test_util.jl")
return (Dim - 1) * phi_deriv_over_r(kernel, r, 1) + phi_deriv(kernel, r, 2)
end
kernel = GaussKernel{2}(shape_parameter = 0.5)
el_l = EllipticOperator(x -> I, zero, x -> 0) # Laplacian with general elliptic operator

x1 = [0.4, 0.6]
@test isapprox(l(kernel, x1), AnalyticalLaplacian()(kernel, x1))
@test isapprox(g(kernel, x1), [-0.17561908618411226, -0.2634286292761684])
@test isapprox(el(kernel, x1), 0.6486985764273818)
@test isapprox(el_l(kernel, x1), -AnalyticalLaplacian()(kernel, x1))
kernel = GaussKernel{3}(shape_parameter = 0.5)
x2 = [0.1, 0.2, 0.3]
@test isapprox(l(kernel, x2), AnalyticalLaplacian()(kernel, x2))
@test isapprox(g(kernel, x2),
[-0.04828027081287833, -0.09656054162575665, -0.14484081243863497])
@test isapprox(el_l(kernel, x2), -AnalyticalLaplacian()(kernel, x2))
kernel = GaussKernel{4}(shape_parameter = 0.5)
x3 = rand(4)
@test isapprox(l(kernel, x3, x3), AnalyticalLaplacian()(kernel, x3, x3))
@test isapprox(el_l(kernel, x3, x3), -AnalyticalLaplacian()(kernel, x3, x3))
end

@testset "PDEs" begin
# stationary PDEs
# Passing a function
f(x, equations) = x[1] + x[2]
poisson = @test_nowarn PoissonEquation(f)
f1(x, equations) = x[1] + x[2]
poisson = @test_nowarn PoissonEquation(f1)
@test_nowarn println(poisson)
@test_nowarn display(poisson)
nodeset = NodeSet([0.0 0.0
Expand All @@ -706,28 +719,40 @@ include("test_util.jl")
@test_nowarn poisson = PoissonEquation([0.0, 1.0, 1.0, 3.0])
@test KernelInterpolation.rhs(nodeset, poisson) == [0.0, 1.0, 1.0, 3.0]

A(x) = [x[1]*x[2] sin(x[1])
sin(x[2]) x[1]^2]
b(x) = [x[1]^2 + x[2], x[1] + x[2]^2]
c(x) = x[1] + x[2]
elliptic = @test_nowarn EllipticEquation(A, b, c, f1)
@test_nowarn println(elliptic)
@test_nowarn display(elliptic)
@test KernelInterpolation.rhs(nodeset, elliptic) == [0.0, 1.0, 1.0, 2.0]
# Passing a vector
@test_nowarn elliptic = EllipticEquation(A, b, c, [0.0, 1.0, 1.0, 3.0])
@test KernelInterpolation.rhs(nodeset, elliptic) == [0.0, 1.0, 1.0, 3.0]

# time-dependent PDEs
# Passing a function
f(t, x, equations) = x[1] + x[2] + t
advection = @test_nowarn AdvectionEquation((2.0, 0.5), f)
advection = @test_nowarn AdvectionEquation([2.0, 0.5], f)
f2(t, x, equations) = x[1] + x[2] + t
advection = @test_nowarn AdvectionEquation((2.0, 0.5), f2)
advection = @test_nowarn AdvectionEquation([2.0, 0.5], f2)
@test_nowarn println(advection)
@test_nowarn display(advection)
@test KernelInterpolation.rhs(1.0, nodeset, advection) == [1.0, 2.0, 2.0, 3.0]
# Passing a vector
@test_nowarn advection = AdvectionEquation((2.0, 0.5), [1.0, 2.0, 2.0, 4.0])
@test KernelInterpolation.rhs(1.0, nodeset, advection) == [1.0, 2.0, 2.0, 4.0]

heat = @test_nowarn HeatEquation(2.0, f)
heat = @test_nowarn HeatEquation(2.0, f2)
@test_nowarn println(heat)
@test_nowarn display(heat)
@test KernelInterpolation.rhs(1.0, nodeset, heat) == [1.0, 2.0, 2.0, 3.0]
# Passing a vector
@test_nowarn heat = HeatEquation(2.0, [1.0, 2.0, 2.0, 4.0])
@test KernelInterpolation.rhs(1.0, nodeset, heat) == [1.0, 2.0, 2.0, 4.0]

advection_diffusion = @test_nowarn AdvectionDiffusionEquation(2.0, (2.0, 0.5), f)
advection_diffusion = @test_nowarn AdvectionDiffusionEquation(2.0, [2.0, 0.5], f)
advection_diffusion = @test_nowarn AdvectionDiffusionEquation(2.0, (2.0, 0.5), f2)
advection_diffusion = @test_nowarn AdvectionDiffusionEquation(2.0, [2.0, 0.5], f2)
@test_nowarn println(advection_diffusion)
@test_nowarn display(advection_diffusion)
@test KernelInterpolation.rhs(1.0, nodeset, advection_diffusion) ==
Expand All @@ -737,11 +762,20 @@ include("test_util.jl")
[1.0, 2.0, 2.0, 4.0])
@test KernelInterpolation.rhs(1.0, nodeset, advection_diffusion) ==
[1.0, 2.0, 2.0, 4.0]

# Test consistency between equations
kernel = Matern52Kernel{2}(shape_parameter = 0.5)
x = rand(2)
y = rand(2)
kernel = Matern52Kernel{2}(shape_parameter = 0.5)
@test advection_diffusion(kernel, x, y) ==
advection(kernel, x, y) + heat(kernel, x, y)
el_laplace = EllipticEquation(x -> [1 0; 0 1], x -> [0, 0], x -> 0, f1)
@test el_laplace(kernel, x, y) == poisson(kernel, x, y)
el_advection = EllipticEquation(x -> [0 0; 0 0], x -> [2, 0.5], x -> 0, f1)
@test el_advection(kernel, x, y) == advection(kernel, x, y)
el_advection_diffusion = EllipticEquation(x -> [2 0; 0 2], x -> [2, 0.5], x -> 0.0,
f1)
@test el_advection_diffusion(kernel, x, y) == advection_diffusion(kernel, x, y)
end

@testset "Discretization" begin
Expand Down

0 comments on commit 7353f96

Please sign in to comment.