diff --git a/Project.toml b/Project.toml index 3b9289a..7f2796a 100644 --- a/Project.toml +++ b/Project.toml @@ -22,9 +22,9 @@ Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" [extras] -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] diff --git a/README.md b/README.md new file mode 100644 index 0000000..996ddde --- /dev/null +++ b/README.md @@ -0,0 +1,489 @@ +# Gutzwiller +_importance sampling and variational Monte Carlo for +[Rimu.jl](https://github.com/joachimbrand/Rimu.jl)_ + +## Installation + +Gutzwiller.jl is not yet registered. To install it, run + +```julia +import Pkg; Pkg.add("https://github.com/mtsch/Gutzwiller.jl") +``` + +## Usage guide + +````julia +using Rimu +using Gutzwiller +using CairoMakie +using LaTeXStrings +```` + +First, we set up a starting address and a Hamiltonian + +````julia +addr = near_uniform(BoseFS{10,10}) +H = HubbardReal1D(addr; u=2.0) +```` + +```` +HubbardReal1D(fs"|1 1 1 1 1 1 1 1 1 1⟩"; u=2.0, t=1.0) +```` + +In this example, we'll set up a Gutzwiller ansatz to importance-sample the Hamiltonian. + +````julia +ansatz = GutzwillerAnsatz(H) +```` + +```` +GutzwillerAnsatz{BoseFS{10, 10, BitString{19, 1, UInt32}}, Float64, HubbardReal1D{Float64, BoseFS{10, 10, BitString{19, 1, UInt32}}, 2.0, 1.0}}(HubbardReal1D(fs"|1 1 1 1 1 1 1 1 1 1⟩"; u=2.0, t=1.0)) +```` + +An ansatz is a struct that given a set of parameters and an address, produces the value +it would have if it was a vector. + +````julia +ansatz(addr, [1.0]) +```` + +```` +1.0 +```` + +In addition, the function `val_and_grad` can be used to compute both the value and its +gradient with respect to the parameters. + +````julia +val_and_grad(ansatz, addr, [1.0]) +```` + +```` +(1.0, [-0.0]) +```` + +### Deterministic optimization + +For effective importance sampling, we want the ansatz to be as good of an approximation to +the ground state of the Hamiltonian as possible. As the value of the Rayleigh quotient of +a given ansatz is always larger than the Hamiltonian's ground state energy, we can use +an optimization algorithm to find the paramters that minimize its energy. + +When the basis of the Hamiltonian is small enough to fit into memory, it's best to use the +`LocalEnergyEvaluator` + +````julia +le = LocalEnergyEvaluator(H, ansatz) +```` + +```` +LocalEnergyEvaluator(HubbardReal1D(fs"|1 1 1 1 1 1 1 1 1 1⟩"; u=2.0, t=1.0), GutzwillerAnsatz{BoseFS{10, 10, BitString{19, 1, UInt32}}, Float64, HubbardReal1D{Float64, BoseFS{10, 10, BitString{19, 1, UInt32}}, 2.0, 1.0}}(HubbardReal1D(fs"|1 1 1 1 1 1 1 1 1 1⟩"; u=2.0, t=1.0))) +```` + +which can be used to evaulate the value of the Rayleigh quotient (or its gradient) for +given parameters. In the case of the Gutzwiller ansatz, there is only one parameter. + +````julia +le([1.0]) +```` + +```` +-7.825819465042905 +```` + +Like before, we can use `val_and_grad` to also evaluate its gradient. + +````julia +val_and_grad(le, [1.0]) +```` + +```` +(-7.825819465042905, [10.614147776166838]) +```` + +Now, let's plot the energy landscape for this particular case + +````julia +begin + fig = Figure() + ax = Axis(fig[1, 1]; xlabel=L"p", ylabel=L"E") + ps = range(0, 2; length=100) + Es = [le([p]) for p in ps] + lines!(ax, ps, Es) + fig +end +```` +![](docs/README-21.png) + +To find the minimum, pass `le` to `optimize` from Optim.jl + +````julia +using Optim + +opt_nelder = optimize(le, [1.0]) +```` + +```` + * Status: success + + * Candidate solution + Final objective value: -1.300521e+01 + + * Found with + Algorithm: Nelder-Mead + + * Convergence measures + √(Σ(yᵢ-ȳ)²)/n ≤ 1.0e-08 + + * Work counters + Seconds run: 0 (vs limit Inf) + Iterations: 11 + f(x) calls: 25 + +```` + +To take advantage of the gradients, wrap the evaluator in `Optim.only_fg!`. This will +usually reduce the number of steps needed to reach the minimum. + +````julia +opt_lbgfs = optimize(Optim.only_fg!(le), [1.0]) +```` + +```` + * Status: success + + * Candidate solution + Final objective value: -1.300521e+01 + + * Found with + Algorithm: L-BFGS + + * Convergence measures + |x - x'| = 4.19e-05 ≰ 0.0e+00 + |x - x'|/|x'| = 1.26e-04 ≰ 0.0e+00 + |f(x) - f(x')| = 5.04e-08 ≰ 0.0e+00 + |f(x) - f(x')|/|f(x')| = 3.87e-09 ≰ 0.0e+00 + |g(x)| = 4.79e-09 ≤ 1.0e-08 + + * Work counters + Seconds run: 0 (vs limit Inf) + Iterations: 5 + f(x) calls: 16 + ∇f(x) calls: 16 + +```` + +We can inspect the parameters and the value at the minimum as + +````julia +opt_lbgfs.minimizer, opt_lbgfs.minimum +```` + +```` +([0.3331889106872666], -13.005208186380349) +```` + +### Variational quantum Monte Carlo + +When the Hamiltonian is too large to store its full basis in memory, we can use +variational QMC to sample addresses from the Hilbert space and evaluate their energy +at the same time. An important paramter we have tune is the number `steps`. More steps +will give us a better approximation of the energy, but take longer to evaluate. +Not taking enough samples can also result in producing a biased result. +Consider the following. + +````julia +p0 = [1.0] +@time kinetic_vqmc(H, ansatz, p0; steps=1e2) +```` + +```` +KineticVQMCResult + walkers: 24 + samples: 2400 + local energy: -7.7167 ± 0.081513 +```` + +````julia +@time kinetic_vqmc(H, ansatz, p0; steps=1e5) +```` + +```` +KineticVQMCResult + walkers: 24 + samples: 2400000 + local energy: -7.8291 ± 0.0033925 +```` + +````julia +@time kinetic_vqmc(H, ansatz, p0; steps=1e7) +```` + +```` +KineticVQMCResult + walkers: 24 + samples: 240000000 + local energy: -7.8259 ± 0.00034744 +```` + +For this simple example, `1e2` steps gives an energy that is significantly higher, while +`1e7` takes too long. `1e5` seems to work well enough. For more convenient evaluation, we +wrap VQMC into a struct that behaves much like the `LocalEnergyEvaluator`. + +````julia +qmc = KineticVQMC(H, ansatz; samples=1e4) +```` + +```` +KineticVQMC( + HubbardReal1D(fs"|1 1 1 1 1 1 1 1 1 1⟩"; u=2.0, t=1.0), + GutzwillerAnsatz{BoseFS{10, 10, BitString{19, 1, UInt32}}, Float64, HubbardReal1D{Float64, BoseFS{10, 10, BitString{19, 1, UInt32}}, 2.0, 1.0}}(HubbardReal1D(fs"|1 1 1 1 1 1 1 1 1 1⟩"; u=2.0, t=1.0)); + steps=417, + walkers=24, +) +```` + +````julia +qmc([1.0]), le([1.0]) +```` + +```` +(-7.764278136564674, -7.825819465042905) +```` + +Because the output of this procedure is noisy, optimizing it with Optim.jl will not work. +However, we can use a stochastic gradient descent (in this case +[AMSGrad](https://paperswithcode.com/method/amsgrad)). + +````julia +grad_result = amsgrad(qmc, [1.0]) +```` + +```` +GradientDescentResult + iterations: 101 + converged: false (iterations) + last value: -12.959814613812123 + last params: [0.3341375361174239] +```` + +While `amsgrad` attempts to determine if the optimization converged, it will generally not +detect convergence due to the noise in the QMC evaluation. The best way to determine +convergence is to plot the results. `grad_result` can be converted to a `DataFrame`. + +````julia +grad_df = DataFrame(grad_result) +```` + +```` +101×10 DataFrame + Row │ α β1 β2 iter param value gradient first_moment second_moment param_delta + │ Float64 Float64 Float64 Int64 SArray… Float64 SArray… SArray… SArray… SArray… +─────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + 1 │ 0.01 0.1 0.01 1 [1.0] -7.8517 [10.5681] [10.4915] [1.11685] [-0.0992753] + 2 │ 0.01 0.1 0.01 2 [0.900725] -8.81397 [10.3845] [10.4808] [2.18406] [-0.0709192] + 3 │ 0.01 0.1 0.01 3 [0.829805] -9.64972 [10.1503] [10.4478] [3.19251] [-0.0584734] + 4 │ 0.01 0.1 0.01 4 [0.771332] -10.1564 [9.68553] [10.3716] [4.09868] [-0.0512297] + 5 │ 0.01 0.1 0.01 5 [0.720102] -10.57 [8.64895] [10.1993] [4.80574] [-0.0465254] + 6 │ 0.01 0.1 0.01 6 [0.673577] -11.101 [8.60619] [10.04] [5.49834] [-0.0428171] + 7 │ 0.01 0.1 0.01 7 [0.63076] -11.4535 [8.08636] [9.84463] [6.09725] [-0.0398687] + 8 │ 0.01 0.1 0.01 8 [0.590891] -11.7732 [7.72568] [9.63273] [6.63314] [-0.0374016] + 9 │ 0.01 0.1 0.01 9 [0.55349] -12.0944 [7.30056] [9.39952] [7.09979] [-0.0352763] + 10 │ 0.01 0.1 0.01 10 [0.518213] -12.2247 [6.44683] [9.10425] [7.44441] [-0.0333679] + 11 │ 0.01 0.1 0.01 11 [0.484845] -12.5246 [5.86906] [8.78073] [7.71443] [-0.0316139] + 12 │ 0.01 0.1 0.01 12 [0.453231] -12.6774 [4.88452] [8.39111] [7.87587] [-0.0298999] + 13 │ 0.01 0.1 0.01 13 [0.423331] -12.7854 [4.02343] [7.95434] [7.95899] [-0.0281952] + 14 │ 0.01 0.1 0.01 14 [0.395136] -12.9317 [2.98377] [7.45728] [7.96843] [-0.0264177] + 15 │ 0.01 0.1 0.01 15 [0.368719] -12.9544 [1.76278] [6.88783] [7.96843] [-0.0244004] + 16 │ 0.01 0.1 0.01 16 [0.344318] -12.9194 [0.445011] [6.24355] [7.96843] [-0.022118] + 17 │ 0.01 0.1 0.01 17 [0.3222] -12.9675 [-0.576934] [5.5615] [7.96843] [-0.0197018] + 18 │ 0.01 0.1 0.01 18 [0.302498] -12.9884 [-2.00539] [4.80481] [7.96843] [-0.0170212] + 19 │ 0.01 0.1 0.01 19 [0.285477] -12.8922 [-2.88221] [4.03611] [7.97181] [-0.014295] + 20 │ 0.01 0.1 0.01 20 [0.271182] -12.7798 [-3.94714] [3.23778] [8.0479] [-0.0114132] + 21 │ 0.01 0.1 0.01 21 [0.259769] -12.9013 [-4.77098] [2.43691] [8.19504] [-0.00851263] + 22 │ 0.01 0.1 0.01 22 [0.251256] -12.7883 [-5.93092] [1.60013] [8.46485] [-0.00549977] + 23 │ 0.01 0.1 0.01 23 [0.245757] -12.7912 [-6.41241] [0.798872] [8.79139] [-0.00269432] + 24 │ 0.01 0.1 0.01 24 [0.243062] -12.8012 [-6.5577] [0.0632146] [9.13351] [-0.00020917] + 25 │ 0.01 0.1 0.01 25 [0.242853] -12.6994 [-7.16865] [-0.659972] [9.55607] [0.00213494] + 26 │ 0.01 0.1 0.01 26 [0.244988] -12.7609 [-6.02553] [-1.19653] [9.82358] [0.00381758] + 27 │ 0.01 0.1 0.01 27 [0.248806] -12.7231 [-6.64482] [-1.74136] [10.1669] [0.00546127] + 28 │ 0.01 0.1 0.01 28 [0.254267] -12.8143 [-5.57447] [-2.12467] [10.376] [0.00659594] + 29 │ 0.01 0.1 0.01 29 [0.260863] -12.6247 [-5.8031] [-2.49251] [10.609] [0.00765245] + 30 │ 0.01 0.1 0.01 30 [0.268515] -12.9275 [-4.11193] [-2.65445] [10.6719] [0.00812556] + 31 │ 0.01 0.1 0.01 31 [0.276641] -12.9529 [-3.5771] [-2.74672] [10.6932] [0.00839964] + 32 │ 0.01 0.1 0.01 32 [0.285041] -12.8441 [-3.32014] [-2.80406] [10.6965] [0.00857367] + 33 │ 0.01 0.1 0.01 33 [0.293614] -12.9499 [-1.99052] [-2.72271] [10.6965] [0.00832492] + 34 │ 0.01 0.1 0.01 34 [0.301939] -12.9249 [-2.05429] [-2.65586] [10.6965] [0.00812055] + 35 │ 0.01 0.1 0.01 35 [0.31006] -13.0387 [-1.10435] [-2.50071] [10.6965] [0.00764616] + 36 │ 0.01 0.1 0.01 36 [0.317706] -12.9058 [-1.14945] [-2.36559] [10.6965] [0.007233] + 37 │ 0.01 0.1 0.01 37 [0.324939] -12.9008 [-0.784072] [-2.20743] [10.6965] [0.00674943] + 38 │ 0.01 0.1 0.01 38 [0.331688] -13.017 [0.014757] [-1.98522] [10.6965] [0.00606998] + 39 │ 0.01 0.1 0.01 39 [0.337758] -13.0489 [0.431095] [-1.74358] [10.6965] [0.00533117] + 40 │ 0.01 0.1 0.01 40 [0.343089] -12.9353 [0.543217] [-1.5149] [10.6965] [0.00463196] + 41 │ 0.01 0.1 0.01 41 [0.347721] -12.9249 [0.256665] [-1.33775] [10.6965] [0.00409028] + 42 │ 0.01 0.1 0.01 42 [0.351812] -12.9855 [1.19472] [-1.0845] [10.6965] [0.00331596] + 43 │ 0.01 0.1 0.01 43 [0.355128] -12.9889 [1.22405] [-0.853646] [10.6965] [0.0026101] + 44 │ 0.01 0.1 0.01 44 [0.357738] -12.9676 [1.81621] [-0.58666] [10.6965] [0.00179377] + 45 │ 0.01 0.1 0.01 45 [0.359532] -13.0389 [1.69995] [-0.357999] [10.6965] [0.00109462] + 46 │ 0.01 0.1 0.01 46 [0.360626] -12.9846 [1.79403] [-0.142796] [10.6965] [0.000436613] + 47 │ 0.01 0.1 0.01 47 [0.361063] -13.0172 [1.897] [0.0611831] [10.6965] [-0.000187073] + 48 │ 0.01 0.1 0.01 48 [0.360876] -13.018 [1.95182] [0.250246] [10.6965] [-0.000765151] + 49 │ 0.01 0.1 0.01 49 [0.360111] -12.955 [1.2156] [0.346781] [10.6965] [-0.00106032] + 50 │ 0.01 0.1 0.01 50 [0.35905] -13.0011 [1.44412] [0.456515] [10.6965] [-0.00139584] + 51 │ 0.01 0.1 0.01 51 [0.357654] -13.0055 [1.69032] [0.579896] [10.6965] [-0.00177308] + 52 │ 0.01 0.1 0.01 52 [0.355881] -12.9531 [1.18625] [0.640531] [10.6965] [-0.00195848] + 53 │ 0.01 0.1 0.01 53 [0.353923] -13.0302 [1.12235] [0.688712] [10.6965] [-0.0021058] + 54 │ 0.01 0.1 0.01 54 [0.351817] -12.9214 [0.860403] [0.705881] [10.6965] [-0.0021583] + 55 │ 0.01 0.1 0.01 55 [0.349659] -13.035 [0.972663] [0.73256] [10.6965] [-0.00223987] + 56 │ 0.01 0.1 0.01 56 [0.347419] -13.0992 [1.17143] [0.776447] [10.6965] [-0.00237406] + 57 │ 0.01 0.1 0.01 57 [0.345045] -13.0368 [1.16839] [0.815641] [10.6965] [-0.0024939] + 58 │ 0.01 0.1 0.01 58 [0.342551] -13.0096 [0.526658] [0.786743] [10.6965] [-0.00240554] + 59 │ 0.01 0.1 0.01 59 [0.340145] -13.0312 [0.573331] [0.765402] [10.6965] [-0.00234029] + 60 │ 0.01 0.1 0.01 60 [0.337805] -13.1102 [0.683714] [0.757233] [10.6965] [-0.00231531] + 61 │ 0.01 0.1 0.01 61 [0.33549] -13.0502 [0.574572] [0.738967] [10.6965] [-0.00225946] + 62 │ 0.01 0.1 0.01 62 [0.33323] -12.9879 [0.219047] [0.686975] [10.6965] [-0.00210049] + 63 │ 0.01 0.1 0.01 63 [0.33113] -13.0849 [0.269882] [0.645266] [10.6965] [-0.00197296] + 64 │ 0.01 0.1 0.01 64 [0.329157] -12.9193 [-0.2817] [0.552569] [10.6965] [-0.00168953] + 65 │ 0.01 0.1 0.01 65 [0.327467] -13.0491 [-0.243852] [0.472927] [10.6965] [-0.00144602] + 66 │ 0.01 0.1 0.01 66 [0.326021] -12.9892 [-0.718302] [0.353804] [10.6965] [-0.00108179] + 67 │ 0.01 0.1 0.01 67 [0.32494] -13.0694 [-0.119883] [0.306435] [10.6965] [-0.000936954] + 68 │ 0.01 0.1 0.01 68 [0.324003] -13.0568 [-0.29773] [0.246019] [10.6965] [-0.000752225] + 69 │ 0.01 0.1 0.01 69 [0.32325] -13.0144 [-0.312828] [0.190134] [10.6965] [-0.000581353] + 70 │ 0.01 0.1 0.01 70 [0.322669] -13.0241 [-0.452879] [0.125833] [10.6965] [-0.000384745] + 71 │ 0.01 0.1 0.01 71 [0.322284] -13.0181 [-0.920708] [0.0211788] [10.6965] [-6.47561e-5] + 72 │ 0.01 0.1 0.01 72 [0.32222] -13.0224 [-0.619517] [-0.0428908] [10.6965] [0.000131143] + 73 │ 0.01 0.1 0.01 73 [0.322351] -13.0043 [-0.317985] [-0.0704002] [10.6965] [0.000215255] + 74 │ 0.01 0.1 0.01 74 [0.322566] -13.0609 [-0.460166] [-0.109377] [10.6965] [0.000334429] + 75 │ 0.01 0.1 0.01 75 [0.3229] -12.9247 [-0.738369] [-0.172276] [10.6965] [0.00052675] + 76 │ 0.01 0.1 0.01 76 [0.323427] -13.0772 [-0.548139] [-0.209862] [10.6965] [0.000641673] + 77 │ 0.01 0.1 0.01 77 [0.324069] -13.102 [-0.227004] [-0.211576] [10.6965] [0.000646914] + 78 │ 0.01 0.1 0.01 78 [0.324716] -12.9948 [-0.16287] [-0.206706] [10.6965] [0.000632022] + 79 │ 0.01 0.1 0.01 79 [0.325348] -13.0316 [-0.117584] [-0.197794] [10.6965] [0.000604772] + 80 │ 0.01 0.1 0.01 80 [0.325952] -13.0189 [-0.46716] [-0.22473] [10.6965] [0.000687133] + 81 │ 0.01 0.1 0.01 81 [0.32664] -13.0443 [-0.257545] [-0.228012] [10.6965] [0.000697166] + 82 │ 0.01 0.1 0.01 82 [0.327337] -13.0766 [-0.133085] [-0.218519] [10.6965] [0.000668142] + 83 │ 0.01 0.1 0.01 83 [0.328005] -13.0219 [-0.160378] [-0.212705] [10.6965] [0.000650365] + 84 │ 0.01 0.1 0.01 84 [0.328655] -13.0371 [-0.00947678] [-0.192382] [10.6965] [0.000588226] + 85 │ 0.01 0.1 0.01 85 [0.329243] -12.9564 [-0.105346] [-0.183679] [10.6965] [0.000561614] + 86 │ 0.01 0.1 0.01 86 [0.329805] -13.0115 [0.0302974] [-0.162281] [10.6965] [0.000496189] + 87 │ 0.01 0.1 0.01 87 [0.330301] -13.0708 [0.169606] [-0.129092] [10.6965] [0.000394711] + 88 │ 0.01 0.1 0.01 88 [0.330696] -13.0239 [-0.0109219] [-0.117275] [10.6965] [0.00035858] + 89 │ 0.01 0.1 0.01 89 [0.331055] -13.0459 [-0.131932] [-0.118741] [10.6965] [0.000363061] + 90 │ 0.01 0.1 0.01 90 [0.331418] -12.9393 [-0.141194] [-0.120986] [10.6965] [0.000369927] + 91 │ 0.01 0.1 0.01 91 [0.331788] -13.0257 [-0.113008] [-0.120188] [10.6965] [0.000367487] + 92 │ 0.01 0.1 0.01 92 [0.332155] -12.957 [-0.186209] [-0.126791] [10.6965] [0.000387674] + 93 │ 0.01 0.1 0.01 93 [0.332543] -13.0154 [0.201128] [-0.0939986] [10.6965] [0.000287409] + 94 │ 0.01 0.1 0.01 94 [0.33283] -12.8957 [-0.622455] [-0.146844] [10.6965] [0.00044899] + 95 │ 0.01 0.1 0.01 95 [0.333279] -12.9735 [0.107338] [-0.121426] [10.6965] [0.000371271] + 96 │ 0.01 0.1 0.01 96 [0.33365] -13.0728 [0.212622] [-0.0880212] [10.6965] [0.000269133] + 97 │ 0.01 0.1 0.01 97 [0.33392] -13.0786 [0.183407] [-0.0608785] [10.6965] [0.000186141] + 98 │ 0.01 0.1 0.01 98 [0.334106] -13.0088 [0.109653] [-0.0438253] [10.6965] [0.000134] + 99 │ 0.01 0.1 0.01 99 [0.33424] -13.0209 [0.327876] [-0.00665525] [10.6965] [2.0349e-5] + 100 │ 0.01 0.1 0.01 100 [0.33426] -13.0733 [0.460508] [0.040061] [10.6965] [-0.00012249] + 101 │ 0.01 0.1 0.01 101 [0.334138] -12.9598 [0.517111] [0.087766] [10.6965] [-0.000268353] +```` + +To plot it, we can either work with the `DataFrame` or access the fields directly + +````julia +begin + fig = Figure() + ax1 = Axis(fig[1, 1]; xlabel=L"i", ylabel=L"E_v") + ax2 = Axis(fig[2, 1]; xlabel=L"i", ylabel=L"p") + + lines!(ax1, grad_df.iter, grad_df.value) + lines!(ax2, first.(grad_result.param)) + fig +end +```` +![](docs/README-41.png) + +We see from the plot that the value of the energy is fluctiating around what appears to be +the minimum. While the parameter estimate here is probably good enough for importance +sampling, we can refine the result by creating a new `KineticVQMC` structure with +increased samples and use it to refine the result. + +````julia +qmc2 = KineticVQMC(H, ansatz; samples=1e6) +grad_result2 = amsgrad(qmc2, grad_result.param[end]) +```` + +```` +GradientDescentResult + iterations: 101 + converged: false (iterations) + last value: -13.01330709959126 + last params: [0.33239805300127506] +```` + +Now, let's plot the refined result next to the minimum found by Optim.jl + +````julia +begin + fig = Figure() + ax1 = Axis(fig[1, 1]; xlabel=L"i", ylabel=L"E_v") + ax2 = Axis(fig[2, 1]; xlabel=L"i", ylabel=L"p") + + lines!(ax1, grad_result2.value) + hlines!(ax1, [opt_lbgfs.minimum]; linestyle=:dot) + lines!(ax2, first.(grad_result2.param)) + hlines!(ax2, opt_lbgfs.minimizer; linestyle=:dot) + fig +end +```` +![](docs/README-45.png) + +### Importance sampling + +Finally, we have a good estimate for the parameter to use with importance +sampling. Gutzwiller.jl provides `AnsatzSampling`, which is similar to +`GutzwillerSampling` from Rimu, but can be used with different ansatze. + +````julia +p = grad_result.param[end] +G = AnsatzSampling(H, ansatz, p) +```` + +```` +AnsatzSampling{Float64, 1, GutzwillerAnsatz{BoseFS{10, 10, BitString{19, 1, UInt32}}, Float64, HubbardReal1D{Float64, BoseFS{10, 10, BitString{19, 1, UInt32}}, 2.0, 1.0}}, HubbardReal1D{Float64, BoseFS{10, 10, BitString{19, 1, UInt32}}, 2.0, 1.0}}(HubbardReal1D(fs"|1 1 1 1 1 1 1 1 1 1⟩"; u=2.0, t=1.0), GutzwillerAnsatz{BoseFS{10, 10, BitString{19, 1, UInt32}}, Float64, HubbardReal1D{Float64, BoseFS{10, 10, BitString{19, 1, UInt32}}, 2.0, 1.0}}(HubbardReal1D(fs"|1 1 1 1 1 1 1 1 1 1⟩"; u=2.0, t=1.0)), [0.3341375361174239]) +```` + +This can now be used with FCIQMC. Let's compare the importance sampled time series to a +non-importance sampled one. + +````julia + +prob_standard = ProjectorMonteCarloProblem(H; target_walkers=15, last_step=2000) +sim_standard = solve(prob_standard) +shift_estimator(sim_standard; skip=1000) +```` + +```` +BlockingResult{Float64} + mean = -10.14 ± 0.59 + with uncertainty of ± 0.07654551002905478 + from 31 blocks after 5 transformations (k = 6). + +```` + +````julia +prob_sampled = ProjectorMonteCarloProblem(G; target_walkers=15, last_step=2000) +sim_sampled = solve(prob_sampled) +shift_estimator(sim_sampled; skip=1000) +```` + +```` +BlockingResult{Float64} + mean = -12.36 ± 0.38 + with uncertainty of ± 0.0345891146330015 + from 62 blocks after 4 transformations (k = 5). + +```` + +Note that the lower energy estimate in the sampled case is probably due to a reduced +population control bias. The effect importance sampling has on the statistic can be more +dramatic for larger systems and beter choices of anstaze. + +--- + +*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 0000000..a6fc604 --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,4 @@ +[deps] +CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" +LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" +Rimu = "c53c40cc-bd84-11e9-2cf4-a9fde2b9386e" diff --git a/docs/README-21.png b/docs/README-21.png new file mode 100644 index 0000000..97381b1 Binary files /dev/null and b/docs/README-21.png differ diff --git a/docs/README-41.png b/docs/README-41.png new file mode 100644 index 0000000..f956ec4 Binary files /dev/null and b/docs/README-41.png differ diff --git a/docs/README.jl b/docs/README.jl new file mode 100644 index 0000000..1148730 --- /dev/null +++ b/docs/README.jl @@ -0,0 +1,191 @@ +# # Gutzwiller +# _importance sampling and variational Monte Carlo for +# [Rimu.jl](https://github.com/joachimbrand/Rimu.jl)_ +# +# ## Installation +# +# Gutzwiller.jl is not yet registered. To install it, run +# +# ```julia +# import Pkg; Pkg.add("https://github.com/mtsch/Gutzwiller.jl") +# ``` +# + +# ## Usage guide +# +# +using Rimu +using Gutzwiller +using CairoMakie +using LaTeXStrings + +# First, we set up a starting address and a Hamiltonian + +addr = near_uniform(BoseFS{10,10}) +H = HubbardReal1D(addr; u=2.0) + +# In this example, we'll set up a Gutzwiller ansatz to importance-sample the Hamiltonian. + +ansatz = GutzwillerAnsatz(H) + +# An ansatz is a struct that given a set of parameters and an address, produces the value +# it would have if it was a vector. + +ansatz(addr, [1.0]) + +# In addition, the function `val_and_grad` can be used to compute both the value and its +# gradient with respect to the parameters. + +val_and_grad(ansatz, addr, [1.0]) + +# ### Deterministic optimization + +# For effective importance sampling, we want the ansatz to be as good of an approximation to +# the ground state of the Hamiltonian as possible. As the value of the Rayleigh quotient of +# a given ansatz is always larger than the Hamiltonian's ground state energy, we can use +# an optimization algorithm to find the paramters that minimize its energy. + +# When the basis of the Hamiltonian is small enough to fit into memory, it's best to use the +# `LocalEnergyEvaluator` + +le = LocalEnergyEvaluator(H, ansatz) + +# which can be used to evaulate the value of the Rayleigh quotient (or its gradient) for +# given parameters. In the case of the Gutzwiller ansatz, there is only one parameter. + +le([1.0]) + +# Like before, we can use `val_and_grad` to also evaluate its gradient. + +val_and_grad(le, [1.0]) + +# Now, let's plot the energy landscape for this particular case + +begin + fig = Figure() + ax = Axis(fig[1, 1]; xlabel=L"p", ylabel=L"E") + ps = range(0, 2; length=100) + Es = [le([p]) for p in ps] + lines!(ax, ps, Es) + fig +end + +# To find the minimum, pass `le` to `optimize` from Optim.jl + +using Optim + +opt_nelder = optimize(le, [1.0]) + +# To take advantage of the gradients, wrap the evaluator in `Optim.only_fg!`. This will +# usually reduce the number of steps needed to reach the minimum. + +opt_lbgfs = optimize(Optim.only_fg!(le), [1.0]) + +# We can inspect the parameters and the value at the minimum as + +opt_lbgfs.minimizer, opt_lbgfs.minimum + +# ### Variational quantum Monte Carlo + +# When the Hamiltonian is too large to store its full basis in memory, we can use +# variational QMC to sample addresses from the Hilbert space and evaluate their energy +# at the same time. An important paramter we have tune is the number `steps`. More steps +# will give us a better approximation of the energy, but take longer to evaluate. +# Not taking enough samples can also result in producing a biased result. +# Consider the following. + +p0 = [1.0] +kinetic_vqmc(H, ansatz, p0; steps=1e2) #hide +@time kinetic_vqmc(H, ansatz, p0; steps=1e2) + +# + +@time kinetic_vqmc(H, ansatz, p0; steps=1e5) + +# + +@time kinetic_vqmc(H, ansatz, p0; steps=1e7) + +# For this simple example, `1e2` steps gives an energy that is significantly higher, while +# `1e7` takes too long. `1e5` seems to work well enough. For more convenient evaluation, we +# wrap VQMC into a struct that behaves much like the `LocalEnergyEvaluator`. + +qmc = KineticVQMC(H, ansatz; samples=1e4) + +# + +qmc([1.0]), le([1.0]) + +# Because the output of this procedure is noisy, optimizing it with Optim.jl will not work. +# However, we can use a stochastic gradient descent (in this case +# [AMSGrad](https://paperswithcode.com/method/amsgrad)). + +grad_result = amsgrad(qmc, [1.0]) + +# While `amsgrad` attempts to determine if the optimization converged, it will generally not +# detect convergence due to the noise in the QMC evaluation. The best way to determine +# convergence is to plot the results. `grad_result` can be converted to a `DataFrame`. + +grad_df = DataFrame(grad_result) + +# To plot it, we can either work with the `DataFrame` or access the fields directly + +begin + fig = Figure() + ax1 = Axis(fig[1, 1]; xlabel=L"i", ylabel=L"E_v") + ax2 = Axis(fig[2, 1]; xlabel=L"i", ylabel=L"p") + + lines!(ax1, grad_df.iter, grad_df.value) + lines!(ax2, first.(grad_result.param)) + fig +end + +# We see from the plot that the value of the energy is fluctiating around what appears to be +# the minimum. While the parameter estimate here is probably good enough for importance +# sampling, we can refine the result by creating a new `KineticVQMC` structure with +# increased samples and use it to refine the result. + +qmc2 = KineticVQMC(H, ansatz; samples=1e6) +grad_result2 = amsgrad(qmc2, grad_result.param[end]) + +# Now, let's plot the refined result next to the minimum found by Optim.jl + +begin + fig = Figure() + ax1 = Axis(fig[1, 1]; xlabel=L"i", ylabel=L"E_v") + ax2 = Axis(fig[2, 1]; xlabel=L"i", ylabel=L"p") + + lines!(ax1, grad_result2.value) + hlines!(ax1, [opt_lbgfs.minimum]; linestyle=:dot) + lines!(ax2, first.(grad_result2.param)) + hlines!(ax2, opt_lbgfs.minimizer; linestyle=:dot) + fig +end + +# ### Importance sampling + +# Finally, we have a good estimate for the parameter to use with importance +# sampling. Gutzwiller.jl provides `AnsatzSampling`, which is similar to +# `GutzwillerSampling` from Rimu, but can be used with different ansatze. + +p = grad_result.param[end] +G = AnsatzSampling(H, ansatz, p) + +# This can now be used with FCIQMC. Let's compare the importance sampled time series to a +# non-importance sampled one. + +using Random; Random.seed!(1337) #hide + +prob_standard = ProjectorMonteCarloProblem(H; target_walkers=15, last_step=2000) +sim_standard = solve(prob_standard) +shift_estimator(sim_standard; skip=1000) + +# + +prob_sampled = ProjectorMonteCarloProblem(G; target_walkers=15, last_step=2000) +sim_sampled = solve(prob_sampled) +shift_estimator(sim_sampled; skip=1000) + +# Note that the lower energy estimate in the sampled case is probably due to a reduced +# population control bias. The effect importance sampling has on the statistic can be more +# dramatic for larger systems and beter choices of anstaze. diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 0000000..dc46425 --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,10 @@ +for fn in EXAMPLES_FILES + fnmd_full = Literate.markdown( + joinpath(EXAMPLES_INPUT, fn), EXAMPLES_OUTPUT; + documenter = true, execute = true + ) + ex_num, margintitle = parse_header(fnmd_full) + push!(EXAMPLES_NUMS, ex_num) + fnmd = fn[1:end-2]*"md" # full path does not work + push!(EXAMPLES_PAIRS, margintitle => joinpath("generated", fnmd)) +end diff --git a/src/Gutzwiller.jl b/src/Gutzwiller.jl index 163c032..f112dce 100644 --- a/src/Gutzwiller.jl +++ b/src/Gutzwiller.jl @@ -13,7 +13,7 @@ using SpecialFunctions using Tables using VectorInterface -import Rimu.Hamiltonians.extended_bose_hubbard_interaction as ebh +import Rimu.Hamiltonians.extended_hubbard_interaction as ebh export num_parameters, val_and_grad include("ansatz/abstract.jl") @@ -25,9 +25,10 @@ export ExtendedGutzwillerAnsatz include("ansatz/extgutzwiller.jl") export MultinomialAnsatz include("ansatz/multinomial.jl") +export JastrowAnsatz, RelativeJastrowAnsatz +include("ansatz/jastrow.jl") export ExtendedAnsatz include("ansatz/combination.jl") -export kinetic_vqmc, kinetic_vqmc!, local_energy_estimator, KineticVQMC export LocalEnergyEvaluator include("localenergy.jl") @@ -39,7 +40,7 @@ include("kinetic-vqmc/state.jl") include("kinetic-vqmc/qmc.jl") include("kinetic-vqmc/wrapper.jl") -# Not done yet. +export gradient_descent, amsgrad include("amsgrad.jl") diff --git a/src/amsgrad.jl b/src/amsgrad.jl index f8ea905..161bc40 100644 --- a/src/amsgrad.jl +++ b/src/amsgrad.jl @@ -1,46 +1,197 @@ -function amsgrad(f, params; kwargs...) +""" + GradientDescentResult + +Return type of [`adaptive_gradient_descent`](@ref) variants such as +[`gradient_descent`](@ref) and [`amsgrad`](@ref). + +Can be used as a `Table` from Tables.jl. +""" +struct GradientDescentResult{T,P<:SVector{<:Any,T},F} + fun::F + hyperparameters::NamedTuple + initial_params::P + + iterations::Int + converged::Bool + reason::String + + param::Vector{P} + value::Vector{T} + gradient::Vector{P} + first_moment::Vector{P} + second_moment::Vector{P} + param_delta::Vector{P} +end + +function Base.show(io::IO, r::GradientDescentResult) + println(io, "GradientDescentResult") + println(io, " iterations: ", r.iterations) + println(io, " converged: ", r.converged, " (", r.reason, ")") + println(io, " last value: ", r.value[end]) + print(io, " last params: ", r.param[end]) +end + +Tables.istable(::Type{<:GradientDescentResult}) = true +Tables.rowaccess(::Type{<:GradientDescentResult}) = true +Tables.rows(r::GradientDescentResult) = GradientDescentResultRows(r) +function Tables.Schema(r::GradientDescentResult{T,P}) where {T,P} + hyper_keys = keys(r.hyperparameters) + hyper_types = typeof.(value.(r.hyperparameters)) + + return Tables.Schema( + (hyper_keys..., :iter, :param, :value, :gradient, + :first_moment, :second_moment, :param_delta), + (hyper_types..., Int, Int, P, T, P, P, P, P) + ) +end + +struct GradientDescentResultRows{T,P,F} <: AbstractVector{NamedTuple} + result::GradientDescentResult{T,P,F} +end +function Base.getindex(rows::GradientDescentResultRows, i) + r = rows.result + + return (; r.hyperparameters..., + iter=i, + param=r.param[i], + value=r.value[i], + gradient=r.gradient[i], + first_moment=r.first_moment[i], + second_moment=r.second_moment[i], + param_delta=r.param_delta[i], + ) +end +function Base.size(rows::GradientDescentResultRows) + return (length(rows.result.param), 1) +end + +""" + adaptive_gradient_descent( + φ, ψ, f, p_init::P; + maxiter=100, verbose=true, α=0.01, + grad_tol=√eps(T), param_tol=√eps(T), val_tol=√(eps(T)), + kwargs... + ) + +For immediately useful versions of this function, see [`gradient_descent`] and [`amsgrad`]. +Returns [`GradientDescentResult`](@ref). +""" +function adaptive_gradient_descent(φ, ψ, f, params; kwargs...) params_svec = SVector{length(params)}(params) - return amsgrad(f, params_svec; kwargs...) + return adaptive_gradient_descent(φ, ψ, f, params_svec; kwargs...) end -function amsgrad( - f, params::SVector{N,T}; - maxiter=100, α=0.01, β1=0.1, β2=β1^2, verbose=true, - gtol=√eps(Float64), - ptol=√eps(Float64), -) where {N,T} - df = DataFrame() +function adaptive_gradient_descent( + φ, ψ, f, p_init::P; + maxiter=100, verbose=true, α=0.01, + grad_tol=√eps(T), param_tol=√eps(T), val_tol=√(eps(T)), + kwargs... +) where {N,T,P<:SVector{N,T}} first_moment = zeros(SVector{N,T}) second_moment = zeros(SVector{N,T}) - - val, grad = val_and_grad(f, params) + val, grad = val_and_grad(f, p_init) first_moment = grad + old_val = Inf - for iter in 1:maxiter - val, grad = val_and_grad(f, params) - first_moment = (1 - β1) * first_moment + β1 * grad - second_moment = max.((1 - β2) * second_moment + β2 * grad.^2, second_moment) - Δparams = α .* first_moment ./ sqrt.(second_moment) - - if verbose - print(stderr, "Step $iter:") - print(stderr, " y: ", round(val, sigdigits=5)) - print(stderr, ", x: ", round.(params, sigdigits=5)) - print(stderr, ", Δx: ", round.(Δparams, sigdigits=5)) - println(stderr, ", ∇: ", round.(grad, sigdigits=5)) - end + iter = 0 + params = P[] + values = T[] + gradients = P[] + first_moments = P[] + second_moments = P[] + param_deltas = P[] + converged = false + reason = "iterations" - push!(df, (; iter, val, grad, params, Δparams)) - params -= Δparams + p = p_init - if norm(Δparams) < ptol + verbose && (prog = Progress(maxiter)) + + while iter ≤ maxiter + iter += 1 + val, grad = val_and_grad(f, p) + first_moment = φ(first_moment, grad; kwargs...) + second_moment = ψ(second_moment, grad; kwargs...) + + δp = -α * first_moment ./ .√second_moment + δval = old_val - val + old_val = val + + push!(params, p) + push!(values, val) + push!(gradients, grad) + push!(first_moments, first_moment) + push!(second_moments, second_moment) + push!(param_deltas, δp) + + if norm(δp) < param_tol + verbose && @info "Converged (params)" + reason = "params" + converged = true break end - if norm(grad) < gtol + if norm(grad) < grad_tol + verbose && @info "Converged (grad)" + reason = "gradient" + converged = true break end + if abs(δval) < val_tol + verbose && @info "Converged (value)" + reason = "value" + converged = true + break + end + + p = p + δp + + verbose && next!( + prog; showvalues=(((:iter, iter), (:value, val), (:param, p))) + ) end - return df + iter == maxiter && verbose && @info "Aborted (maxiter)" + + verbose && finish!(prog) + + return GradientDescentResult( + f, (; α, kwargs...), p_init, length(params), converged, reason, + params, values, gradients, first_moments, second_moments, param_deltas, + ) +end + +""" + gradient_descent( + f, p0; + maxiter=100, verbose=true, α=0.01, + grad_tol=√eps(T), param_tol=√eps(T), val_tol=√(eps(T)), + ) + +Vanilla gradient descent on function `f` with initial parameters `p0`. + +See also [`adaptive_gradient_descent`](@ref). Returns [`GradientDescentResult`](@ref). +""" +function gradient_descent(f, params; kwargs...) + φ(m1, g; _...) = g + ψ(m2, g; _...) = ones(typeof(g)) + return adaptive_gradient_descent(φ, ψ, f, params; kwargs...) +end + +""" + amsgrad( + f, p0; + maxiter=100, verbose=true, α=0.01, β1=0.1, β2=0.01, + grad_tol=√eps(T), param_tol=√eps(T), val_tol=√(eps(T)), + ) + +[AMSGrad](https://paperswithcode.com/method/amsgrad) on function `f` with initial parameters +`p0`. + +See also [`adaptive_gradient_descent`](@ref). Returns [`GradientDescentResult`](@ref). +""" +function amsgrad(f, params; β1=0.1, β2=0.01, kwargs...) + φ(m1, g; β1, _...) = (1 - β1) * m1 + β1 * g + ψ(m2, g; β2, _...) = max.((1 - β2) * m2 + β2 * g.^2, m2) + return adaptive_gradient_descent(φ, ψ, f, params; β1, β2, kwargs...) end diff --git a/src/ansatz/jastrow.jl b/src/ansatz/jastrow.jl new file mode 100644 index 0000000..2106e8d --- /dev/null +++ b/src/ansatz/jastrow.jl @@ -0,0 +1,107 @@ +using Rimu.Hamiltonians: circshift_dot +# TODO: geometry for relative version, multicomponent support for both. + +""" + JastrowAnsatz(hamiltonian) <: AbstractAnsatz + +```math +J_i = exp(-∑_{k=1}^M ∑_{l=k}^M p_{k,l} n_k n_l) +``` + +With translationally invariant Hamiltonians, use [`RelativeJastrowAnsatz`](@ref) instead. +""" +struct JastrowAnsatz{A,T,N,H} <: AbstractAnsatz{A,T,N} + hamiltonian::H +end + +function JastrowAnsatz(hamiltonian) + address = starting_address(hamiltonian) + @assert address isa SingleComponentFockAddress + N = num_modes(address) * (num_modes(address) + 1) ÷ 2 + return JastrowAnsatz{typeof(address),Float64,N,typeof(hamiltonian)}(hamiltonian) +end + +Rimu.build_basis(j::JastrowAnsatz) = build_basis(j.hamiltonian) + +function val_and_grad(j::JastrowAnsatz, addr, params) + o = onr(addr) + M = num_modes(addr) + + val = 0.0 + grad = zeros(typeof(params)) + + p = 0 + for i in 1:M, j in i:M + p += 1 + + onproduct = o[i] * o[j] + local_val = params[p] * onproduct + val += local_val + grad = setindex(grad, -onproduct, p) + end + val = exp(-val) + grad = grad .* val + + return val, grad +end +function (j::JastrowAnsatz)(addr, params) + o = onr(addr) + M = num_modes(addr) + val = 0.0 + p = 0 + + for i in 1:M, j in i:M + p += 1 + + onproduct = o[i] * o[j] + val += params[p] * onproduct + end + return exp(-val) +end + +""" + RelativeJastrowAnsatz(hamiltonian) <: AbstractAnsatz + +For a translationally invariant Hamiltonian, this is equivalent to [`JastrowAnsatz`](@ref), +but has fewer parameters. + +```math +R_i = exp(-∑_{d=0}^{M÷2} p_d ∑_{k=1}^{M} n_k n_{k + d}) +``` + +""" +struct RelativeJastrowAnsatz{A,T,N,H} <: AbstractAnsatz{A,T,N} + hamiltonian::H +end + +function RelativeJastrowAnsatz(hamiltonian) + address = starting_address(hamiltonian) + @assert address isa SingleComponentFockAddress + N = cld(num_modes(address), 2) + return RelativeJastrowAnsatz{typeof(address),Float64,N,typeof(hamiltonian)}(hamiltonian) +end + +Rimu.build_basis(rj::RelativeJastrowAnsatz) = build_basis(rj.hamiltonian) + +function val_and_grad(rj::RelativeJastrowAnsatz, addr, params) + o = onr(addr) + M = num_parameters(rj) + + products = ntuple(i -> circshift_dot(o, o, i - 1), Val(M)) + val = sum(1:M) do i + params[i] * circshift_dot(o, o, i - 1) + end + val = exp(-val) + grad = -SVector{M,Float64}(products .* val) + + return val, grad +end +function (rj::RelativeJastrowAnsatz)(addr, params) + o = onr(addr) + M = num_parameters(rj) + + val = sum(1:M) do i + params[i] * circshift_dot(o, o, i - 1) + end + return exp(-val) +end diff --git a/src/kinetic-vqmc/qmc.jl b/src/kinetic-vqmc/qmc.jl index bbc0d45..ad22382 100644 --- a/src/kinetic-vqmc/qmc.jl +++ b/src/kinetic-vqmc/qmc.jl @@ -17,25 +17,30 @@ function kinetic_sample!(prob_buffer, hamiltonian, ansatz, params, addr1) resize!(prob_buffer, length(offdiags)) val1, grad1 = val_and_grad(ansatz, addr1, params) - local_energy_num = diagonal_element(hamiltonian, addr1) * val1 + diag = diagonal_element(hamiltonian, addr1) + local_energy_num = diag * val1 + residence_time_denom = 0.0 for (i, (addr2, melem)) in enumerate(offdiags) - val2, _ = val_and_grad(ansatz, addr2, params) + val2 = ansatz(addr2, params) residence_time_denom += abs(val2) local_energy_num += melem * val2 - prob_buffer[i] = residence_time_denom end residence_time = abs(val1) / residence_time_denom + if !isfinite(residence_time) + error("Infinite residence time at $params") + end local_energy = local_energy_num / val1 + gradient = grad1 / val1 chosen = pick_random_from_cumsum(prob_buffer) new_addr, _ = offdiags[chosen] - return new_addr, residence_time, local_energy, grad1 + return new_addr, residence_time, local_energy, gradient end """ diff --git a/src/kinetic-vqmc/state.jl b/src/kinetic-vqmc/state.jl index b58cd52..eb56176 100644 --- a/src/kinetic-vqmc/state.jl +++ b/src/kinetic-vqmc/state.jl @@ -43,7 +43,7 @@ end function val_and_grad(res::KineticVQMCWalkerState) weights = FrequencyWeights(res.residence_times) val = mean(res.local_energies, weights) - grads = res.grad_ratios .* (res.local_energies .- val) + grads = res.grad_ratios .* (res.local_energies .- val)# ./ weights grad = 2 * mean(grads, weights) return val, grad diff --git a/test/qmc.jl b/test/qmc.jl index ac2f03f..bf25b7d 100644 --- a/test/qmc.jl +++ b/test/qmc.jl @@ -1,3 +1,9 @@ +using Gutzwiller +using KrylovKit +using Random +using Rimu +using Test + @testset "qmc" begin Random.seed!(17)