Skip to content

Commit

Permalink
Merge pull request #68 from ashutosh-b-b/bb/NeuralPDE_port
Browse files Browse the repository at this point in the history
Port DeepBSDE and DeepBSDEHan from NerualPDE
  • Loading branch information
ChrisRackauckas authored Jan 19, 2024
2 parents ad2410d + b2ab323 commit b3e2651
Show file tree
Hide file tree
Showing 27 changed files with 2,138 additions and 824 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/documentation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@latest
with:
version: '1.6'
version: '1.10'
- name: Install dependencies
run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
- name: Build and deploy
Expand Down
9 changes: 7 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,18 @@ Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
SciMLSensitivity = "1ed8b502-d754-442c-8d5d-10ac956f44a1"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
StochasticDiffEq = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TestEnv = "1e6cf692-eddd-4d53-88a5-2d735e33781b"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

[compat]
CUDA = "3, 5"
CUDA = "3, 4, 5"
DiffEqBase = "6"
DocStringExtensions = "0.8, 0.9"
ExprTools = "0.1"
Expand All @@ -28,4 +33,4 @@ Functors = "0.2, 0.3, 0.4"
Reexport = "1"
Statistics = "1"
Zygote = "0.6"
julia = "1.6,1.7"
julia = "1.8, 1.9"
16 changes: 7 additions & 9 deletions benchmarks/reflect.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,30 +10,28 @@ end
# testing reflection on batchsize
d = 100
batch_size = 10000
y0 = CUDA.zeros(d,batch_size)
y0 = CUDA.zeros(d, batch_size)
y1 = CUDA.randn(size(y0)...)
@btime _reflect_GPU2($y0,$y1,-1f0,1f0,_device)
@btime _reflect_GPU2($y0, $y1, -1.0f0, 1.0f0, _device)

@btime CUDA.similar(y0)


function imin2array()
y1 = CUDA.randn(1000,1000)
imin = argmin(y1,dims=1) |> Array
y1 = CUDA.randn(1000, 1000)
imin = argmin(y1, dims = 1) |> Array
n = zeros(size(y1))
n[imin] .= 1
n = b |> gpu
end
@btime imin2array()
@btime CUDA.zeros(size(y1))


n = CUDA.zeros(size(y1))
CUDA.allowscalar(true)
function imin_scalar()
y1 = CUDA.randn(1000,1000)
imin = argmin(y1,dims=1)
n .= 0.
y1 = CUDA.randn(1000, 1000)
imin = argmin(y1, dims = 1)
n .= 0.0
n[imin] .= 1
end
@btime imin_scalar()
37 changes: 18 additions & 19 deletions benchmarks/sde_loop_benchmark.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,39 +14,38 @@ else
device = Flux.cpu
end

function sde_loop(d,batch_size)
X0 = zeros(Float32,d)
y0 = repeat(X0[:],1,batch_size)
y1 = repeat(X0[:],1,batch_size)
function sde_loop(d, batch_size)
X0 = zeros(Float32, d)
y0 = repeat(X0[:], 1, batch_size)
y1 = repeat(X0[:], 1, batch_size)
dt = 0.1
dWall = sqrt(dt) * randn(d,batch_size,N)
dWall = sqrt(dt) * randn(d, batch_size, N)
for i in 1:N
dW = @view dWall[:,:,i]
dW = @view dWall[:, :, i]
y0 .= y1
y1 .= y0 .+ 0. * dt .+ 1. .* dW
y1 .= y0 .+ 0.0 * dt .+ 1.0 .* dW
end
return y0, y1
end


function sde_loop_CUDA(d,batch_size)
X0 = CUDA.zeros(Float32,d)
y0 = CUDA.repeat(X0[:],1,batch_size)
y1 = CUDA.repeat(X0[:],1,batch_size)
function sde_loop_CUDA(d, batch_size)
X0 = CUDA.zeros(Float32, d)
y0 = CUDA.repeat(X0[:], 1, batch_size)
y1 = CUDA.repeat(X0[:], 1, batch_size)
dt = 0.1
dWall = sqrt(dt) * CUDA.randn(d,batch_size,N)
dWall = sqrt(dt) * CUDA.randn(d, batch_size, N)
for i in 1:N
dW = @view dWall[:,:,i]
dW = @view dWall[:, :, i]
y0 .= y1
y1 .= y0 .+ 0. * dt .+ 1. .* dW
y1 .= y0 .+ 0.0 * dt .+ 1.0 .* dW
end
return y0, y1
end

using BenchmarkTools

@btime CUDA.randn(1000,1000)
@btime randn(1000,1000)
@btime CUDA.randn(1000, 1000)
@btime randn(1000, 1000)

@btime sde_loop(d,batch_size)
@btime sde_loop_CUDA(d,batch_size)
@btime sde_loop(d, batch_size)
@btime sde_loop_CUDA(d, batch_size)
16 changes: 8 additions & 8 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true)

include("pages.jl")

makedocs(sitename="HighDimPDE.jl",
authors = "Victor Boussange",
pages = pages,
clean = true, doctest = false, linkcheck = true,
format = Documenter.HTML(assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/HighDimPDE/stable/"),)

deploydocs(repo = "github.com/SciML/HighDimPDE.jl", devbranch="main")
makedocs(sitename = "HighDimPDE.jl",
authors = "Victor Boussange",
pages = pages,
clean = true, doctest = false, linkcheck = true,
format = Documenter.HTML(assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/HighDimPDE/stable/"))

deploydocs(repo = "github.com/SciML/HighDimPDE.jl", devbranch = "main")
19 changes: 12 additions & 7 deletions docs/pages.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
pages = [
"Home" => "index.md",
"Getting started" => "getting_started.md",
"Solver Algorithms" => ["MLP.md",
"DeepSplitting.md",
],
"Feynman Kac formula" => "Feynman_Kac.md",
]
"Home" => "index.md",
"Getting started" => "getting_started.md",
"Solver Algorithms" => ["MLP.md",
"DeepSplitting.md",
"DeepBSDE.md"],
"Tutorials" => [
"tutorials/deepsplitting.md",
"tutorials/deepbsde.md",
"tutorials/mlp.md",
],
"Feynman Kac formula" => "Feynman_Kac.md",
]
13 changes: 13 additions & 0 deletions docs/src/DeepBSDE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# [The `DeepBSDE` algorithm](@id deepbsde)

```@autodocs
Modules = [HighDimPDE]
Pages = ["DeepBSDE.jl", "DeepBSDE_Han.jl"]
```

## The general idea 💡
The `DeepBSDE` algorithm is similar in essence to the `DeepSplitting` algorithm, with the difference that
it uses two neural networks to approximate both the the solution and its gradient.

## References
- Han, J., Jentzen, A., E, W., Solving high-dimensional partial differential equations using deep learning. [arXiv](https://arxiv.org/abs/1707.02568) (2018)
1 change: 0 additions & 1 deletion docs/src/DeepSplitting.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,4 +99,3 @@ u(t_{n+1}, X_{T - t_{n+1}}) \approx \sum_{j=1}^{\text{batch\_size}} \left[ u(t_{
## References
- Boussange, V., Becker, S., Jentzen, A., Kuckuck, B., Pellissier, L., Deep learning approximations for non-local nonlinear PDEs with Neumann boundary conditions. [arXiv](https://arxiv.org/abs/2205.03672) (2022)
- Beck, C., Becker, S., Cheridito, P., Jentzen, A., Neufeld, A., Deep splitting method for parabolic PDEs. [arXiv](https://arxiv.org/abs/1907.03452) (2019)
- Han, J., Jentzen, A., E, W., Solving high-dimensional partial differential equations using deep learning. [arXiv](https://arxiv.org/abs/1707.02568) (2018)
4 changes: 2 additions & 2 deletions docs/src/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ The general workflow for using `HighDimPDE.jl` is as follows:

- Define a Partial Integro-Differential Equation problem
- Select a solver algorithm
- Solve the problem
- Solve the problem.

## Examples

Expand Down Expand Up @@ -125,5 +125,5 @@ sol = solve(prob,
abstol = 2e-3,
maxiters = 1000,
batch_size = 1000,
use_cuda = true)
use_cuda=true)
```
25 changes: 14 additions & 11 deletions docs/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@ where $u \colon [0,T] \times \Omega \to \R$, $\Omega \subseteq \R^d$ is subject

* the [Deep Splitting scheme](@ref deepsplitting)

* the [Multi-Level Picard iterations scheme](@ref mlp).
* the [Multi-Level Picard iterations scheme](@ref mlp)

* the Deep BSDE scheme (@ref deepbsde).


To make the most out of **HighDimPDE.jl**, we advise to first have a look at the

Expand All @@ -27,15 +30,15 @@ as all solver algorithms heavily rely on it.

## Algorithm overview

----------------------------------------------
Features | `DeepSplitting` | `MLP` |
----------|:----------------------:|:------------:
Time discretization free| ❌ | |
Mesh-free | ✅ | ✅ |
Single point $x \in \R^d$ approximation| ✅ | ✅ |
$d$-dimensional cube $[a,b]^d$ approximation| ✅ | |
GPU | ✅ | ❌ |
Gradient non-linearities | ✔️| |
------------------------------------------------------------
Features | `DeepSplitting` | `MLP` | `DeepBSDE` |
----------|:----------------------:|:------------:|:--------:
Time discretization free| ❌ | ✅ | |
Mesh-free | ✅ | ✅ | ✅ |
Single point $x \in \R^d$ approximation| ✅ | ✅ | ✅ |
$d$-dimensional cube $[a,b]^d$ approximation| ✅ | ❌ | ✔️ |
GPU | ✅ | ❌ | ✅ |
Gradient non-linearities | ✔️| ❌ | ✅ |

✔️ : will be supported in the future

Expand Down Expand Up @@ -85,4 +88,4 @@ file and the
[project]($link_project)
file.
""")
```
```
Loading

0 comments on commit b3e2651

Please sign in to comment.