Allen-Cahn Equation with Sequential Training

In this tutorial we are going to solve the Allen-Cahn equation with periodic boundary condition from $t=0$ to $t=1$. The traning process is split into four stages, namely $t\in [0,0.25]$, $t\in [0.0,0.5]$, $t\in [0.0,0.75]$ and $t\in [0.0, 1.0]$.

using ModelingToolkit, IntervalSets
using Sophon
using Optimization, OptimizationOptimJL, Zygote

@parameters t, x
@variables u(..)
Dₓ = Differential(x)
Dₓ² = Differential(x)^2
Dₜ = Differential(t)

eq = Dₜ(u(x, t)) - 0.0001 * Dₓ²(u(x, t)) + 5 * u(x,t) * (abs2(u(x,t)) - 1.0) ~ 0.0

domain = [x ∈ -1.0..1.0, t ∈ 0.0..0.25]

bcs = [u(x,0) ~ x^2 * cospi(x),
       u(-1,t) ~ u(1,t)]

@named allen = PDESystem(eq, bcs, domain, [x, t], [u(x, t)])

\[ \begin{align} - 0.0001 \frac{\mathrm{d}}{\mathrm{d}x} \frac{\mathrm{d}}{\mathrm{d}x} u\left( x, t \right) + \frac{\mathrm{d}}{\mathrm{d}t} u\left( x, t \right) + 5 u\left( x, t \right) \left( -1 + \left|u\left( x, t \right)\right|^{2} \right) =& 0 \end{align} \]

Then we define the neural net, the sampler, and the training strategy.

chain = FullyConnected(2, 1, tanh; hidden_dims=16, num_layers=4)
pinn = PINN(chain)
sampler = QuasiRandomSampler(500, (300, 100))
strategy = NonAdaptiveTraining(1, (50, 1))
prob = Sophon.discretize(allen, pinn, sampler, strategy)
OptimizationProblem. In-place: true
u0: ComponentVector{Float64}(layer_1 = (weight = [-0.8752049803733826 -1.6144436597824097; 0.7620846033096313 -0.8905686140060425; … ; 1.301376223564148 -1.519716501235962; 0.0854928120970726 0.7288126945495605], bias = [0.0; 0.0; … ; 0.0; 0.0;;]), layer_2 = (weight = [-0.6316173076629639 0.6210235953330994 … -0.22657057642936707 0.021649064496159554; 0.09131347388029099 -0.5250071883201599 … -0.1624867022037506 0.6075783371925354; … ; -0.2128787636756897 -0.5480732917785645 … 0.11872220039367676 0.42442893981933594; 0.5888541340827942 -0.30603694915771484 … -0.2840951085090637 -0.5087822079658508], bias = [0.0; 0.0; … ; 0.0; 0.0;;]), layer_3 = (weight = [0.28193700313568115 -0.3333921432495117 … -0.016602864488959312 0.41789060831069946; -0.24477268755435944 -0.16727755963802338 … 0.23270662128925323 -0.6435869932174683; … ; -0.2881397306919098 -0.0781872421503067 … 0.6088017225265503 0.38586747646331787; -0.06798248738050461 -0.10729983448982239 … 0.19259150326251984 -0.2086983025074005], bias = [0.0; 0.0; … ; 0.0; 0.0;;]), layer_4 = (weight = [-0.6167285442352295 -0.6182552576065063 … -0.08852913975715637 -0.3098369538784027; -0.35342639684677124 0.30243057012557983 … 0.017411047592759132 -0.5873801708221436; … ; 0.5072125196456909 -0.22946476936340332 … -0.20068426430225372 -0.48214173316955566; -0.2796747088432312 -0.3211762309074402 … -0.20306219160556793 -0.10026216506958008], bias = [0.0; 0.0; … ; 0.0; 0.0;;]), layer_5 = (weight = [0.6766920685768127 -0.42048224806785583 … -0.5528343915939331 0.3897639513015747], bias = [0.0;;]))

We solve the equation sequentially in time.

function train(allen, prob, sampler, strategy)
    bfgs = BFGS()
    res = Optimization.solve(prob, bfgs; maxiters=2000)

    for tmax in [0.5, 0.75, 1.0]
        allen.domain[2] = t ∈ 0.0..tmax
        data = Sophon.sample(allen, sampler)
        prob = remake(prob; u0=res.u, p=data)
        res = Optimization.solve(prob, bfgs; maxiters=2000)
    end
    return res
end

res = train(allen, prob, sampler, strategy)
retcode: Failure
u: ComponentVector{Float64}(layer_1 = (weight = [-1.2615314547461223 -0.6467568945981477; 0.9171667707546358 -1.5776538665295055; … ; 0.6117778345752231 -1.6192006715484732; -0.11437254444097242 0.699498756696311], bias = [-0.18625046512192817; 1.856851980066959; … ; 0.4004314001955676; -0.17782227391124913;;]), layer_2 = (weight = [-0.6667544104581862 0.2477152420987151 … -0.21295969217077795 0.033794811481831666; -0.12227799704859639 -0.6260264774875532 … -0.4606317415297303 0.6152029831842883; … ; -0.06210309144699704 -1.147601180470698 … -0.02652940879063782 0.4203293925235852; 0.503352461865645 -0.11783626503144613 … -0.4265177559380468 -0.5395208367981691], bias = [-0.2236831662077369; 0.15689910715172295; … ; -0.24457108678147663; -0.08123378106675214;;]), layer_3 = (weight = [0.32707704232173157 0.2347611803314087 … -0.1634926781773904 0.3736757708460598; -0.2513957607695072 -0.08391900470651825 … 0.2430742094949465 -0.49767335607402613; … ; -0.5000897455368853 -0.15811714523329998 … 0.6996481670855006 0.2055377961640715; -0.4069077052411249 -0.45004014217829863 … 0.29823949184629217 -0.13675683884932963], bias = [0.09436080441841492; 0.1067293674785647; … ; -0.1278614257659224; 0.31369126977276257;;]), layer_4 = (weight = [-0.4758857919493195 -0.21439332079955492 … -0.03486563207336827 -0.5523927373907851; -0.3891997422078343 0.2696969677317054 … 0.457992785094005 -0.3998810130960089; … ; 0.3060735704829046 -1.0626103190750382 … -0.2910369393511152 -0.6774567263734406; -0.04140268000441657 -0.7402387729500617 … 0.1498844486119199 -0.35570789136328096], bias = [-0.6375156863841616; -0.45712557427976913; … ; -0.39248536816602925; -0.1850501854480371;;]), layer_5 = (weight = [0.5445476413245849 -0.5343690186026969 … -0.701763856247054 -0.0969444474258104], bias = [-0.7367973898093862;;]))

Let's plot the result.

using CairoMakie

phi = pinn.phi
xs, ts = [infimum(d.domain):0.01:supremum(d.domain) for d in allen.domain]
axis = (xlabel="t", ylabel="x", title="Prediction")
u_pred = [sum(pinn.phi([x, t], res.u)) for x in xs, t in ts]
fig, ax, hm = heatmap(ts, xs, u_pred', axis=axis)