From f8f26eb0791a7ec0bf2aba70f29824229672a10f Mon Sep 17 00:00:00 2001 From: ThummeTo <83663542+ThummeTo@users.noreply.github.com> Date: Mon, 24 Jul 2023 15:35:12 +0200 Subject: [PATCH] V0.10.4 (#92) * minor fix in scheduler * minor fix scheduler * minor scheduler fix * scheduler fix * minor fix scheduler * changed dispatch for batch loss * scheduler... * intermediate save * intermediate tutorial * modified action * minor adjustments * tutorial 1st * minor mod --- .github/workflows/Example.yml | 4 +- Project.toml | 12 +- .../img/juliacon_2023/neuralfmu_topology.png | Bin 0 -> 59145 bytes docs/src/examples/overview.md | 11 +- examples/src/.gitignore | 3 +- examples/src/juliacon_2023.ipynb | 783 ++++++++++++++++++ examples/src/juliacon_2023.jld2 | Bin 0 -> 2447 bytes .../src/juliacon_2023_distributedhyperopt.jl | 50 ++ examples/src/juliacon_2023_helpers.jl | 182 ++++ examples/src/mdpi_2022.ipynb | 2 +- src/FMIFlux.jl | 22 +- src/batch.jl | 258 ++++-- src/losses.jl | 223 ++++- src/neural.jl | 372 +++++++-- src/scheduler.jl | 37 +- test/batching.jl | 109 +++ test/hybrid_ME_dis.jl | 38 +- test/multi.jl | 4 +- test/runtests.jl | 18 +- test/supported_sensitivities.jl | 57 ++ 20 files changed, 1962 insertions(+), 223 deletions(-) create mode 100644 docs/src/examples/img/juliacon_2023/neuralfmu_topology.png create mode 100644 examples/src/juliacon_2023.ipynb create mode 100644 examples/src/juliacon_2023.jld2 create mode 100644 examples/src/juliacon_2023_distributedhyperopt.jl create mode 100644 examples/src/juliacon_2023_helpers.jl create mode 100644 test/batching.jl create mode 100644 test/supported_sensitivities.jl diff --git a/.github/workflows/Example.yml b/.github/workflows/Example.yml index b3f7464c..0bf5d90b 100644 --- a/.github/workflows/Example.yml +++ b/.github/workflows/Example.yml @@ -19,8 +19,8 @@ jobs: fail-fast: false matrix: os: [windows-latest] # , ubuntu-latest] - file-name: [advanced_hybrid_ME, modelica_conference_2021, simple_hybrid_CS, simple_hybrid_ME, mdpi_2022] - julia-version: ['1.8'] # 1.7 + file-name: [advanced_hybrid_ME, modelica_conference_2021, simple_hybrid_CS, simple_hybrid_ME, mdpi_2022, juliacon_2023] + julia-version: ['1.9'] julia-arch: [x64] experimental: [false] diff --git a/Project.toml b/Project.toml index 5355cb08..33f0e2c8 100644 --- a/Project.toml +++ b/Project.toml @@ -1,11 +1,12 @@ name = "FMIFlux" uuid = "fabad875-0d53-4e47-9446-963b74cae21f" -version = "0.10.3" +version = "0.10.4" [deps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +DifferentiableEigen = "73a20539-4e65-4dcb-a56d-dc20f210a01b" DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" FMIImport = "9fcbc62e-52a0-44e9-a616-1359a0008194" Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" @@ -13,7 +14,6 @@ Optim = "429524aa-4258-5aef-a3af-852621145aeb" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca" Requires = "ae029012-a4dd-5104-9daa-d747884805df" -SciMLSensitivity = "1ed8b502-d754-442c-8d5d-10ac956f44a1" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431" @@ -21,12 +21,12 @@ ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431" ChainRulesCore = "1.16.0" Colors = "0.12.8" DiffEqCallbacks = "2.26.0" -DifferentialEquations = "7.7.0" -FMIImport = "0.15.6" -Flux = "0.13.16" +DifferentiableEigen = "0.2.0" +DifferentialEquations = "7.8.0" +FMIImport = "0.15.7" +Flux = "0.13.17" Optim = "1.7.0" ProgressMeter = "1.7.0" Requires = "1.3.0" -SciMLSensitivity = "7.31.0" ThreadPools = "2.1.1" julia = "1.6" diff --git a/docs/src/examples/img/juliacon_2023/neuralfmu_topology.png b/docs/src/examples/img/juliacon_2023/neuralfmu_topology.png new file mode 100644 index 0000000000000000000000000000000000000000..9c682923d23f9b7a18198076344b7eae3f094264 GIT binary patch literal 59145 zcmdRU^;?$R(l&~Kf|N)|D%~j{jf6Bv=MB=`-5{Z~bc3{XcM1Xm(%s$N4ez?~dG_9a z!1v4Naj3+)u6xa_Su^LH=LCF|7C}bDMTCKYK^7Agl7oSP*MNa}bnp}&{G{!;l@kUA zE#E~!#ZFG!@wJtWrGc@T{%bpDEB)8{PR0f>Fium2@uoH_HR!(g%P)B!&%xm%^go?% z={%3sGgV@_KCti|eN);_{{DLzH4D;ipXZ(XZ1Y7|+S}Nm$|=*+o0Sw-{08H@+s*#@ zh2!J%ORApn{>jw7`s-6R8%g}(YwwVA9QgGfOLjLNl0T{a^=Fr)RsH;_s7>&aE;cK@ zcf4YKo#$@9Q@yX8=SSvtt`!X3!nSP}S)%**Ir|5A-3YVqXU{PvzYfuBMC)(|4EN99 z^u69>&-yudfmiE)nca8&EpYkU@VMmD-!eVdoG$k~{Csei+sW6gSC<$;T0TUb>wzZ{ zwA&OP8ZO&V1umBt_}<)|Eu83nI9x|#HSeo?f@_q8SDdId0-jYe}>95j91E1EIR^PpU9rc1eGMFO^H@O|_)NHTx zKm$*rKsINQ+>0_-PF;!>PK5*esMChz%Lg&CSUD4>uZ1VfCV90nYWa%cGbMVHfr>1q zW9AhjHZ+B7riG_$E<4XqXd4~23(t8!*!dtJci?L9y<=IHWa($#u9!4P)eYyRDK6V8 z8&%TM?M!BSTe@8}>oC9k#PL&LqzuRDgmF%)bysDgMbqk-Y4xl}C+Z0AkGs87)Q?C+ zBTGI}Mdd_4tjFTrkZ6E23Oz3hOd!yhN} zb=99};?{p=zO^^{tlBE*9*KiuAkbE@PSk7`gspzcTzu<|}KI0=8sT82`)zwRxDN!Z#Vk#~PiP zg%ruZ4?e8!(l1+F`n+_0y;-8W+ulkFXNr7F+^&ma_UH4eVH{2B4#Z`x$Ld>(I zH93vTmoM%g$hyvPa`(h_#A$1%9OT~NvPr9p~!Dm zecgPz%OAe3^n!Nhts}YV-ekb1HW$+A=d6b00XqkLmqwV8)?)H>!LHXlev&oX1r9g9 z#)9YCvm?cYRx_9@XQ*#bCkw`qyCtJv!36aL=~oFW`erO8by9i<>B;45IE!<0uoF>m zih6V;ZRf#_kR|CcATei)1&fYoT&uJ>=6N2y%yX1Q$w$bmUwL)v@73OF(-R=9_ET>0 z-sLH;*r3MBk;YcxazQ+n#zLDWzj=n5?@!UT0TI63WOwoUXPkNZrA;{e)w(|7uJ5h` zt7~+26ANaV_RQ;r0-M@k8Y>1Ch)@-M$!oPV63EdDzRQr88Zq}XS@}nP_YqM*(~LNA`7RH_LAZ%H-!^OSOi z@S;?3U&1;o#XNr0s!p)+xVEeS<$~dhn!FZ82nDVX9K3}27;{X6lI6>E4Re(jUliZp zlG`*U`C}62?`w)K|L|@4O59C3`qd*n1>^m>8fkR()r7mw1gZjrlQn7Z7R!(&!1t#_ z{-B;l^Yov~EADXTyvkEt3v;XAle*4r5*6wi95iqo#Zn)9oe#5P-Hs!>r%DSR9dTc)mq!`t&+-{-wbhY9NNd#zyj z0b*(6m|cRf_c?tJJ_G9l<_FEEk*`(6tWGbET}NN48gk}{H@lF^eoR`!i#J9=S^SU$ zPlXgg%A%@up0aCX*SsM4lshhdjE_Sr$Jb1sd}EC~*C7K)rA;wIHBVKj_AH!9-f1)o5VjHpi1I}8?`ps7J#g0dUj zqNhkK$Q)&xVV!8cC5Dp|0na;h1!P8Y+_;ZE4E!UzSV9UpcfF(!z$QXmL2LC(|!Dea&jf?l2aSkDiJPqFLtD`(8ZaLag_( z)Yg~qASNix|8${-XXPU-7xDH;RsQ%QegxmUC;NM4%AwVQ;4aS1lm5u?c!k4Neo<$2 zbme8m_z;))cIS!$%zpEtZ+r!`%5N)Qoj;m;t^GB9D_&u&wR?HP!fQxow=kC$ZG7+**3f5ISf7AV;s_q) zKznpjRF4rxB6+{3*gwp>tOm;t2u8T!N?F!7`ogi`)e%Tg2T?{(LWA&+a_BdL7UGMI z1dODOy?Ga>^i~cKmSUfsIq_mo`Oa-3-)#Y$x$&@S`{LT$uIVLT(X~*WrUAYuMKT*NQ7o22 zSyc9Zi7bb0cpD49xWz48Aw6L%)3&0>eb1@Qii&aOXxdLZWQnF6+;;osx#+}_kTBoj z^G@`4^9#FvnLl>DFO1EU+W1A}V#OtB4u1qB<Z!v`9Qn?BlK(R z<&F%}&&pU;=K)Q>>^!rN0`TXs*G;=!gja~<+A{TND^AZ)cY9w4=4Ww!B+YcsGt_=& zz@wx4+UaY+X-mbY(8>47gI(Vcu$0rjTMO`y7!A0YUZY9WMmu=z(COBE8u^UvJu_uo zcHggr8mH9|0pOd;6O@v(mKe(`JPJh=6BH)-D11MYskiTJ=s)<8=wL&;R!OP20CI++Jo-anU3 zc5-HPr7LuhEaU){SJ&Xjvj}0x>$FhVWJ`|Y+7QZPhU;0XeqX}E5ZW5mFkBlo!JC{T zqOYawC5!^WPCsZeKN=%{GfP@g8V}MbB4Z=(-hN0;D4C4Czu z41344G)A?%>5!Ban%3lj_L527ht@xJt9)OTY(jbe2SS(mM0W8KkH37SbM)_`h;SSC z)oY%9f`!vzo__k@g7!ElzFxnwZuw5$L|Tl8&%+_m%B($0ZTKi)daf&QaPW!MJ*GzK z#n<`3_gcTIsiVe57kVkzXmPD zBl(H<$^1yPG;h)?Uu$CA{i#u2BM+PJp`VtV;do-LS1ChreJgs3N-OQETlW3QuWa7? z_Ex-`mjn1PX@^Pc(IkAvb6+%0B7&EP*xiIs85cXmR$G6|CMV*cggK>}lsghFY}(cC z;KKYWf@zHvbA3mwgr8{fk>zsf@iAFihfzKX-Eu76-HyvSpT%n~?v5CL*Y-CKCLz3- zF$`}75MSBbZr-N%ELw|1!MrRGqBPDM7g#-XVDy^l%=}JY*UR)%gn(k+h$r0bSx2^Q zL*K%C?gL)=WQIAt>^1MeJM7%CJ%6@6cx$~M1u?uzC^%NEOocOWw!=SKzG;we4D|TC z7!2WehioZUZla2Q`ChHwUL7n7r-8JDTDfB++hRN%b~ad)vx7kIZ7N{%qTiOjUq&`=6rkm};8DVNe2addyn z7%R$sv?oI%hq9Hzc>FfV-13E^sUX}vvdFwdNOB$ao?=7{=8+e;$i!=Xo4Zay!J! z@WhKfLt4V-z#PsUpfq;)CvimoZ1}kykzMl#9evlcYX1 z)4^)@dNARJsg6n}mZX0Z>hu-QR->?px9Zvl0b&l{b-iL_K1%6%mF|YKAW~#R@2TvM z=dI}%Ms`~&+4;rVdPnaWKAsHECJuHtnGR}etq-`GLt~KeBUkS6g0P#>g7Zc6Gc6= zAxoJPVXVu^t*MsFGZ9uDn+muvF(22`)3+NZl3C0d__d$+l4*T}O1S3p!i6oTgwC~Q zQ?kzuB)uX5hv8=|DELuKQ1D;c0+1C+?%_P5ZT$G{TJl-Mbi%a6OW)EsRnai5qo}cd z3K-K=Y<;rX{4$XFM$Fp!r3Mm;2dxr;Mv}d~73PzlsA$UA6vEh4hn`|-!odigw@!}x z*5@c_xR}`qkY%qt)eoHL*tVCLOb3B5@B?2M1zxb6>KVL2>OZtcCDPj0;qJMfNhQgA zDG7few4|Dihjqw#c#@rEJ4X>(QkIF9iV^n8?){QTQ4f8yH9ZII!Jn_;QvK*6U)$vn zdI`-^i6<0~qJ9Qh^nAgtW`SuoxY;LG*x>lxONIJRmKEC1bIN-aOEmTcPvWom{{r$SVitmG03;jUB^x1i- zIlBzZJ&RRVpCj@uBKpbaUXRGP|@x&DpBBmR53 zf%>hpv8m2Y*l$@yF&17vf}bjcCQS+V_cnKS=UEExJYXc?Y+{9AtLg!r1g|3@A_V@| zhtUq%T6O_nJ+l&3wS|E}X@mZK)M@!c5Bw0pPE1M|VI2Vr9hLTn?oK)k%xf4iq4x?- zQ#-TnDhlVx_xIT3iP63jUJl!6GbURCI;?Xr!<}$?}uNlbe(;JuJkqL+*&MNRT;nK)Q#z`#I%u~O}3av!Gx%1-av+`tt zZToX<4bd3qFzoyDsxO|yw$C}a{~BXpW@d&KZ13-%lSxLUNB@4qe~(u4@7FLV7~V+# ze#6g){{Po6W`9%Ta6fwVsNe|<#KgkFfUIO5zQqF%Y7gU+fmXPG4f~$@6wrkK{J9Ef z`JZ2Gw6#lo+{g10B6_n^|EOdiN+iNOSFEu5kGnP~9%KLetd|Gz|GQt*g)4&h4shAq z+t=Bw1~T!Qj8OXd`j+TCeA5FFKVo8P+GXok#_JVcyTl^F)`KZ>~g7TXr_6v2nY&m zYToH`Q&8lzjf;wl%UM_ywYIjZq~Q-gILYny=39$jq%F)`t=6acmb0G=UA?{L<9TFM zRAPJ=KT%&|VvcHi(g8E6>F5+KEG+83Wj8cvFflO|sDy4lY`@9PLv*3FivwnVhM~mr z)Ac8-3S{;4jEqY2=_*15a))||Wz%K0oSd9SHb$M5Bm(rIgTrIN$Jo++kDS&$yxn1^ z5nzkc(*;UPOFNv@)Do&J=jU=fRJOLZ^w<(Z#^?tJ2g_7wqp+9bP@oSlWcZgtS>(*C zD;GLQ-3ZRU%mlHRwzf7{6c`#BL#n8xv=widqAd*jce&9bGvKA_-#oG_{cB{44Lupu zHI8%6IsMu}Ecv5(U%x)py=n1=6&Dr#)bGm| z8a}>xocqgu&pyNlds+}4ZeR0x2c9KPo3XKR0<(Ugx|ZkOOl^mitZcZ6iHXWzZ(ZO( zBXZ%K7AuJ9=3+Ul{`M3W7PeBi6GgY7p&=>y!9uUFH_%Y^=(VWM|MBBDPAZ#uf=CPo z9-irQA|970OjM{{|1R476VK9N1>81$WnoN1Ox%hyS8Gc_LkL|1LnOHH7zXBMYO>tW ze%}3DEaJ0|4^I`o$Ttjp`~naekVcp2=)bQ+{rj2QGqtu?s|=pG;UpqkKM{yVR(oQ3 z>Kbz8bGDkU_lC`@XR}B1l`-lbUcVp+y_{ZJoQ``*Wo2b2>E>+ri75S z-{B;zW-2|4<03@vnIU-WFaCLqm^_5q<3lkeD^c@oN0gaV3RjAJzRwJwPJ~J zdfqjVZVsdg5~`m4^g$pKOLad}t?zsadDuA4)!&V8P@9G8MO{zTSZC}w)T_suSE@xv zMv`p0l#qy3&)5VpNp1lD!N9Z(tWA4M42!Lj`BT4sElwleUtakL=0KjnkFKk$tK;BT z00t%&7P(knLBU80kLygGn+S->`I|L%OniJ=v8x`fyYx;%*FR3df9>*0>#2r~?I_RV zF^IJLLfra!VhU_LEwnX;)pTrQIgF330|Nu2(q>g$mz$MUk(HIzLe=B_dvAxm8ROub ztSl;C-Z`A=>0PyQgAgX^fA_f3<}DFPuShUrkw`l16hh#Rp$-x%->~Y$QFO7Jut*po zYqnIcopzuX2|=!|;a5Wl{>JE$XErPKHzO z6u`ZWP!kV|?=?_VP&hHZgoA?%3<+WF7#$s;OHst#ZBe+ZlguqAu2F1T8v*gC& zXH>COSlDkORVo#s*AqI9{rK_APd9@(;FOyR@=JsxPzbq=BvW}@tC;YC*Mr)ZUex{S z=%8j{kypyE6zM7Y@#CY7O_@YbOlBsj`Fs-uB-@U+>t#A3q4?pX0)I<_KjS>227R=N zu#wkei;#~3AH2;k!i)sJ+{#?2X&2-i%Y$-va^s#f}-m{|-q{C6AxIDqg$S__E+r|?^5RTGNUywS%tFY-c2E1VrDm-po zVqwTRIn`dhdNl>!3aqwY?Z%{2Efae4=FM!AgQ0#q{=ad-X>ro-nYTG(JUT&2B^O3E zSBP$A2==KY(*|z4$4w0rxAT$Sti!Z9kEXP!=+_r7UPOKS_FAmFuW#?%HI-5OGsu1R zhfBhc1}9kx=_D&E8gd9K1b?+1KMiTM-K%NSeDnKHTGqEJJ!hTw@u$05_vJY~YakEb zulkttC5)&zig%KVIZ_hSel2_s((jhk1o3G@(ggojUO^lz-QUD=;K7SiGm^i>UuqVi zhg)2lVW~p{3$x(kf>(8t1=em9fwljntGV9u$FbSRoDf^32t60Gs`L*7P(as|U9BB7 z(rlxXvO-Bi-bG0>_UfOIZHV2^LJu)ibp1o#l_`dvmi`?FEYD)l4q8cG{e|x$_n)AH z*7I~HcZa?^qgXqrItIgb!&sFG;osMWg{`8G+wGm?#@wkxvysvKV;HaE z4bur?XG;#IFLP0twy*6T@Ip)dg)Hnha@LfYLQGXGRlLNBBaj;2+&)t$-KVEm8%QkE zLHQ?PIH3}uXmnRzSMTS`MkJW!^=V}x?01jV9xfsxrw@RQ3krL-VOcVOr>6hsDFh-3 zLVr`YCCADZQZcq@{h17P*vBue1b?Ix77w1Z^0-+16p}Rp{!dG526o>9yRZIhx7W{g zTPYs@**m&I*GB$$>+CTq3|78%K1I2Dk;FfaFRB2#6sjRH3-|v33~TzYVV|rnKbaRM z>IJ$O(Rq0bLrWffa(x?k zXEcVbmB_<8`3rA3HSb{?)G`#aVw5X5>?0mOejMC9Fw#3^N)=gI8esnf0E>2LU2FM% zBs4*c+-swQvPVg?#t-U);7`^`6rP-sgQx8|f8;;>w2l=qhkKQf$SjY(C}TAJCJ~<0 z;nKB|EzHrEm!-0{F1#1KZ2i#Yq zObCu3X+cApLC013a&k13r-IqDL8V-h-QMlUB5=H$h>t!aP2$iaI#p*L8MsyI!L440 zcEGK-z!NF|xOMKkSnQojRyBS`_7k!Xes<1Ryu=nwV`T4$V{IChJnD{{G;j;4D;v^# zb0gKbnk;Vb6X>yOavk8xx~( zw<`V&D45Spk1}&PA@>wl>KOdkc14vMU2DX`RGi*u1G(FY?BR%B!xNi@yJaNB`A~ER za-h|rrbDeTK2H`gU$apOtfWzr?*W)u~%Zt$ap zBf1omQrAKz##yRr$WTp_WK}B~GDbf2@YBGE3<1oTIw?z4hC?QBlA3-{XvU{gJ*_A& zpYi$gV|@byY7UOcny2yc@wv!&Y(HvhCU&u(BT$Cg#6l zF)-R1$rYE9Qk0es4GatncDAvxsc<@R~gvz za|ksxH53sr>2^?(kqL8fa7@+O<{V!1C368-nGqHBQdmfcl7mAPD@fR(KAC~e|Lte& z=XvPmCua7wRehxYxDd_PqTxOBO2_kg zQOB5_74KFRAQsLcZpWt{dieCwrq>bc!DF1+mmJguocaBUiK8<&we;hA%6&IuiLcH2 zjl0flDlHRj6mz=I^CwjFvNhjoW~lb|dX7%!jaSmJq}lriBs6bRxV&?FH;oTM>*4mh zX;@pAuKkJ6I61Kcjn&zOGq5V~>(`G41_r4NI4F7(#Ft9oM#;or+FCho=l}ry7ZSyO zEanmx78Vy18$541=TU2Dp+IZ+G$4H<#iW+tDQS2dyPc-Gh{#O%D(#^7UX)8yZ4@_Q zLg8%8I(b+53#C+@g8j#O0%M68rN+A%=6DjRLTr40DoSqI9yaa_xI(cI^YizL5R>`= zzU>RHF9gk!-8a_$av|g2GC>xj z!n|JfbftD1JUGz0Cv*U-LJ>$PK1cf0lp3VgM%b_~N!HxF5D^I}IwofAh!dEdz+xB* zx>A?Q8*dbKw?c5+a(V(eYG~|o0pJu85~}~Me|vSxX|>>GVrpu?*a`=!v3d`16Ey?F zIDlx7H9Bf)IUylmS}Y_jy^ANGu}|WA(?{4Ay?W*^dh_mF_ZeV=za|uR;`b^9qHbQ? z-!&3hmT4*?1gfi7$3-1Xz$W01+{v|-OgE=tZm(6{X|;2H936FBRB}H=O0LwX70dqJ zS)nZ$VX+sBO)R+ve6q_OlNN3+z$<^3G9C?Y6xyy`8M%#6htB+vm2jG92U}<_T{ae zIM-HLKN~U6AZVV|?^e09>@HW)qsogZ43Gkr`olw$pxZ!|`LxDWw@R}|Ef zjZ44_00gK6J`QjaA)8^mS4P!z5O@Fq+{%;U+V}wk*f8CY`?voAyazb=!JErt%oi_) zc5Og&uFsZezxL-_>NE*=_r@4{hzYbq+#FTbQdChzbH^)u={romB*HhU$ z9QS4>wdZktFhTUFL!$@Wm=kilNzic*%6tTr7MuChNRC(k8%D;D)76%aweK3;uN|5e zaE6|0F2tjb5g;}WI=4DU-(FMmY`4Fc82>;y)9~I>t4L8yI}Kb;MU@(ndS(pHxN4Pt z-m9c;dh}W-27%HBUZ~E;z3dQrH?C^VjW876f7+4Hk*)C$sz{x91;{w9E~LZDs!0oQE^P?P_9iEUGCDrAGN5 z1JD!4_o5pA%!V!Ozt)H~MT&kkekG}1ilT4`fjZTvr5!dfDCqR4hao>L%{%%Lz%F>~ z7H|El&8NqTmEg^LlexyTDkeaGM~3V%v9t}fqYl{Nm=GkVnoNfxORPmHDrriYH@Bk+slNDGn2O>d4;AY z8)U)X51#Pc^givB_3C4txTFNe6u)0sdgFS&H;N&{{r=!F3=SipGw2J|b#!!QuC739 zPQTg(Dl9w?xRTA_N`8V~NoqSW85#C$zN6nH7FrIVnP+MM#Wyxa@oqVyAcob2Q73!N z;N^x(+V^GZ-*Gun7f)w#zx}{;c2!i^#m##=i9+Sst}$D=CFnQH8{y;0Aat;_PO-9l zAJ!DPV6=Xy1#-yeJiJ8CS?r{n-r|Z;N^GqB{a>M{pZ8DA>$X#Vj4hhz-%syJ-+cki zj+<2CahT(a7wox*D;9;nrwUD`E9qk#6;p37nTY_=wvgQESn#-?*y$0>^QiNA&v~2_ zKF=nR7$(y(T3x696doVKD$zSx5dDlGciO$ID^M-(cGoz!-VtgVI)#= zMMS-9_9QCJu-&bvlhvV;Y7*?ihkWs4rOlu9P5Hr{%{lGPyMnRg>H%!rl>(FfeaZ?O zKFPknN6XAS)WQt7y0fK*g9EfyY(ZnuuznD#$jJCq69y(_ZDy#g_9lwU&ku|vajNNd zhK^>6MS^HJjJ44rFdz4(rCA$bv%5TKLuGZpvV%iFsIL&RW?z53n-Ru)RLtNo442Gt zr97)#ro})jqu}&yc+vG^jX^xo;L(ll)s0#7878l1qn1yMrSIDM%e=fRY&EO(rw$Q< zPksj7Cr2O%(~mGX(ACVPim6s%k&95Bz>7G!_*jH|9dF&)Cc~FHP90@vA{uSdnD}b! z;xx%lAOcVQTU1tIgAezOnFJbM*2~y2o8-nySzIo%5vRh2s8Tv_i@dB`TajN|UuuIw z?tdzMOMAHvb20X`o#BsC{IdHl{m7fx*Bc3D_wt$8A!A(0e!q+MXDb%!#qs%)!qA~T zBYWMpUzOQp>y+8)W=^WZ)x=z*Yo+79CeO)$AalHA*`!$kBaGLy`(aQ@{WJ?3n^KN+ z@(?on%qK~_;1c1ZR8SK_ADFeb-GNeZjN1J3J|uRk)fUN_F>UK=BI{Z&2m{wFOuwS-@f+s<^n+4Gj-N*=ine zF%h3V)AmC)Cb3)pY))OjE8)VtIyv?+orSzze{D;dPuc_5_8RidDmL z>?NOd;B}3pJ|rd7-{`9C_+>0)w%0WxGtsWz*{2&(#O|0c?qzJPSmJ&2KIM>|eB!#L zqWvh()%hw-6lCnyxy<-3i2626Z??}o6M*)h8agW~1_nd(vsU0B`+lDD-v;HIsGb+F zG*4g#EcNyEVat-V+x#~zJ+BkdYxE2ZP{aXVRR+EP#f8)EY`wCaglefbibiJ4z$yaY zg}2AuajHZ_k!B<3rygGCBT_Ag3(U{ho=gIZYsfCwN6XZ7MMI0}hS7`oadB%jhV`n$ zdnvi9i?bCfNC`jSE-?e8czztgHrp7#8b0Eb*|_g(Qf&^Y%Sn8#ANQFUdpkOhVS9C# z&)%I6a)U}{D1UNwk3Y8Wcr!xvF8;?QN4{PPhdkqyD)FyrBAF*TK|s?gNC zMhBci9sCzjwWi^FuUxBN82D9NE&_dn@%Q6|#KgqeLxK6bvuf4EMl?X|V^Bzg_GoLV zJ;-L*0EThIvHQ^@`emp*)EJ(WWTx>ozpzjd05y@AV<16nm-q9sbWi~5HkwA+N@;hsReC1-cP>Q3YBlsJhBreAy$UTp>we{6tn8!-^ zDxgNQGBZUZj*gFc#zexo&X9MiZ(Zkk&{F+EKVu_&#`e;+0NbsZ8Ee(w9xE*9RcgM! z-4EfCnMOoG8FOsDhRk~2-y%MLuBZP9W~M{_k%T%jGBQ0jbtoZE9*_=DoHmv$p;fNF zKt)8mhaVAf$-@9!DmBlp_i;O)tJ1M!W8ONJ!FKAL)W zlH)L(-mfTi_Iw z=_LEn0o@>80%px(zwtbU=_ZT8J%FJ;O)R!MWjLe1Dhi2O z&8I55cCj6H9qK`M8hps$dD-JO`H12P1K5Pu`D?2~w9hej`T6TlG=k@c(>wU=u z-{|%9^o)Qg35xEL5bo`-*&)G6h~upO^Zl7xc>o8--y>Fo~3 zp(qogR8Y^X1MqP;Y2#rBp2fb_YA^fzTmQ(2h?>EccE*se>IJb%iq*a=+k}I5XX740 z3s4p3z`y)$&FsnM@ab{ty~eX=&wT0JKLWu0=Z|E_E0zzrxw*b{Jl{Zn{Ogxc2tJ1# zU2?G&5nfM?HyJ$V5{Lys>9XT_x(@bt zczD=x0LTFDsw5z!#IqRUwndUlF%$Om_9_BoJRM^WIzH>=54tn6+M$;Cz4Jw?cql(= zFXN7k!=F|@VE}+${-G_|v45ld?<_!s2BCzE%s5c8X73cHbPA;#VI{Y zLQv2r#cAo8?u{R|=0QCceT3c8uV;mtjcRfdJyQ)0+|>*j872C?39K%ctFfntokS8Q z#`^Ke$>uXs`#6qY0MmkVuM1!|v_mSsfh@;`x>vJ%VsV-rF7XFpmjHnj@Le-*N$-KC zTgy&KPp<@UTK(qsc=34fD;B$Ph@&;}L;#RpZx6#fM}iodCiX1OKv&s#G9>XtG3n-K ze^uDAu!&e+Vd3t$M~g-E_7}dJw9>GE(9p5F+iUyXDOR4-5%~_c;-;%SfZurt>!M;~ zO?zVKI<|`;^<$QNkx)&e6;0&n8a=pv)$<;%^}8NND`K&{z79JRX=D;HC#IKxK%t_d z0)f#gnEEc;v?A|>z5c`!sa%m8#Zu)T zN?Fc_mtFr!NDIzUFWbI@2r^s7~e+ z%mWE#mQ>q6-?ifb`5BXjYgP7EebYJia*R7-2Db_(3KuGL!$0;$f(H_P5vb51hd}rm z9UC+0iXii)bw>kTz|M{tX#DFn%tAs!$n4&a16s0IJuUvxBgCgqKl1aZE2&85mG+uR zRS9-$g->wGL@B+4%=nx?ef~VEUB|eZ=yz{#^sL4H;kJT;f)Stu=(T%73Ai-q-R42s z?yv%54y4X+?Tn3#1_m=kLJonTcXe9jV4(=|)>6#)4$xUM9p~N0;|MV^^`A39>J*>~ zq!Ta}i%xAM-=iJyDpB$An>J?OVq&sEck9nkMat@)?rs{hsjB~Ds|;fA7o^WjSTME+ zyT2WA?51C}-Oox!$rPwd^Ec_H2rYbmKg?z(_E&`yzocPL7Im~MjUIp_ShO&ne~E=P zX2JIj@P&Zf`|>u=PW7r15;t^!fgL^ZZjqZp=D- zK93ok6dLGIWISN^X)-?ZmujiA>h8{p;-05qYl!ZdZD)m_jkqgVF_tTy)u-{Ml}|x z=3~0!7}%2zL<*x>E{c|rf~W?hKy}4}^3;1(viR_!%tJ(^r2(R%&Y&j@2g<#@z4V?S z2KEGq=zVIBZfj6uLtC-vf7ED+WO)Hq4HE;St^3vEYrjX6>~GzPd(thbRx{x8TyCjC z=rxNrqSjYxD5j!R(<2iNjD2;R8n&_P_0_T>BDdd~{$i{O!_F@*9uLH%nL0gOSf|Kvf1aCcHnkt(SfUTy-lqr5g`t+t=?s zh4`;|Xum!~rX!%n-sndy9zOa#ohgx}W^LF|7V=9!T3nib96Keo_(yPCpn7z%@|e^p zk#euBTQ91@_1a@URNd?6aP95wP*q+cC^}`MOSo!}zO|m_;a-6DS39`*pcTnuzEwB^ zI=lb|DI}m18Q@}$Z;x+?O%U4?6jkb1s#Su(iN&Jf36F}(+}YWAGrDAd5V~2^oxrYz zj&xol9Xg$tH}07!s#am;HjnU7?)mxtv0Ahd8EW;M+p(1SOik%H_Oscn40GK|HB5B$ zMb_=g`g*Tt4|aY$L!P1Aot^GP2FdKCk89s(zR5j#PZ`Mbc3@UyQ+@tQDU3c3OfE^w z@SH|5-EHL;S0TyIp#LXfACDJ7dZaT(Kf@sfm*{~)oxrFq5SBVUT#`jGT}uK=9B>Aw z)4``N*j8CF-}J!|1BwtQB1;%yKdwkDR(yP;@4_%7CV-hvNqbc(-m*D>0lxG6_O9_Y zXr*CbT%4u(7h0c9@^6|?$SEu5Jdj>;B(VCFPw3@(+Vf^+G(gvAfAKD@XJDG)d0&%v zeO9CEb&WEp_I;^*`^<&K*!8YMM`_JK;r+!qK3=B7gRGvJ5{`Q__x^?8HDTH5DC^H-jeIXXc?h_}u!a)WK#&g>D)C66J?yi6R+z%$#TKG34CUn>m zA$v{I;^GmLW@EFC^MwG;O=j#@vulfkn=L`~6n3#Yn&xUFBTV7r$4^N&SeTfbAjScO zN8nE%+80drDow)7j5ZBvszxlMGc;DrrcwPWt34|F@I6q!E$T%b?r+cWyd@MB6~AEb z5WMjs!FlI$@`KG{W+G!CD=qEkQ}xW4vcMV(D3)}gqoUfBT^LFP!vk~f=Mdd@)YN%0 z`z(x%R-#p(4mG9fY%WyA|=^WPSy?iUYU6$OUf zi+N5Q%In9r|1+XMG*G!>s(r~H6c8XQDJl8or^MeT@EPdcVZ4-{&>y^W^rZvG0H|Zl z%X`qCqjtAv0J8138C!l$jSAo#BQm?Y#R20bfPK=&2$A(j2!yMlC0Pv(Gp0b0{`T$L_oE{lT@dFZqoelknn~&|^J_ySnp7QmA_Nr=e-Qq1IqOw9XrU5hI720>NU)-vR6DD(?{)GW| zV35j=bN^o;upBzDx;5IzM}mM#_}1UBKcyFOpWn@`fyoUhOVaDx=3sW{q21sHSX=Y7 z5f;^$y9isEez?5y+~|&^kFJSV&K#oKuhm&0PL}Fkw6?Z(=8146mrx)Uq7zfi2#I-p|I5V~7cM4f}DPygV zjottML|rJ79~veQHXw?9W5@)0Fg`)%!>X~;}#x= zy*7K56sZXUDyV)hCWe&ths(B3RuX>JJ9*Ff2{KhW(^JtAM1q8r{HY#Gli|-_)b?Ve z1ET`8n2W7}V{zNA&-eGs+5<<92mEYwc$WXpS*#TlYlA$|!2fs7%T73#o^{f0gRksl zqrCHFwMqUM*%z9M%KbttHq7QT*JP83Ofb>IQ;2!_MGx=mla8lT6ejO2{39%LSc97J zeO^3-7hR1cTs4W=s~HUFb_3M+V=Ux!Pm$D`P9>%)^-HtQw?iPqIWe^DS={+kaJ4R* zs^zUd7n?^)jZ}d5ihS7LxTn+U35LNQ2_NUb#)OaV`IBd9o|T zIcq8lZ;x+`{us!7+$dzwwCmNp%>C3tcw&iw0%!&sF$iGGS9Yl$C!bRGe%}TVrVcbq z+a~h53-YNRC7ETnci&D{+i^9kX%@4C0f#z72qncB7eO@UHWJ?*DiNs5tym14 zg8=5na)Zli$`Q{_USFHn(&yDpV{&GO&mC<0wu3+n%iB%juVww}sSw z@t!=iVLc;75zLKEi`&Ujn)q3&iQEpAx%6gGzRz%ygVyHwhZ~8cI90>nYo|4@aerGA{CG>U%kK6TJD=>^#|` z@(7RYKGrlfaW~sH8L&1tiXj>J(fsHcCrmr|UHq<>Me_Y(fLhh!D>}wWg_u&+=2Few zi^inc@2Vt84kv~0oAl&b>lD=t9QJfgeOr5bPL}_Ob{Py7qqvct&D_~biUU0W3|Mja zz+UD4GXeZgJIFD4p^n53Od{*+U%ix>p_`qul<L{uBy@pq*p+0Aav-@xMa=4~;)ru$w-{3h37sKTU01Zf;T(KzII~K<4AB~E}3N?vs zGdHiolm?QSi#5|EBlUW&WeDEcNB3?CU#UIpkXVd;-i@V@#)XoUR0x{qhOb_LpnD!6 z)oIdyR#2ew@dq(^yo&zDU@6s4FvkMphUtxT6)T485pm&>Ewo3 z0L8_SW{$seydyI+W|B~bWo#SjC>+vWx$rz;^1O?442=onk86o?ox5k9sYZkPoRmi`g{azH>Y}e)*D3 zGg75iP|349P)rWc8V`tVcVjTammXr}OV{UKB|6jmL5WpL7SDn;k&l<=VheZemH zZ=h+tU-+yElJOKpg)co0GvCLIPLn{fh}|gZ)cUdZq~udUC&! zb_E$Ft1;>p;s0s)PPai)BT}Hb9?YrS{Z?2hEFOtlh)}`0ozq=TW;801A2qYgTMH%? zj-HwKU1lplg%!eYm8Goq#{W#U9*XwPSC;k+MIP)gD}07>mCx-pQU!|9sxCUZ03K`* z!FH4gi_88J>n@)h{oIuE)U{E`EOKdWb8+)cfjGl3*x>_mke8Q-T-m*`vmTY5C^P8A zOyB@)Lrr%y$&CfJ$P5?eH;W&K(yq;e4pxDr+@)l^|DH6-g^81Kw`NJ2PuhU)yN z=v5bE0bsM-#bV0zp4l-~RH?aV7n>>e4Q4e`;q&(aHcOdHto

SJT;)pOpB9*Kcc zNEO@INSLpF^~bYsnW~olFUh&qn<%m|WulvTyoK&atJJzH)X6|b|z^a#~sD;rDuv_P!Mg5 z9#TKt7k}J7=K&I5x+dor5*v^EYpG4=9PLd^gdYmFz=r-vB>`-hy)jT0A<-D7J@W#wbN+NF z0H#1q7Cgj>`MVsLybB`v{25Ie5{%=vv1;l6G4=m-fmh6!cimVFR zlr4K_X73{2Dtn|NGn?$JGEyOA?@ig`_dL4p&+mKv)qUMv?{m)Uyk5`o7|)aT0{*(O z92!a(t@W-*O*Q;R4e8jkTBFf~*)cGxrbhhC$ia?W<-SKSFCf4Gn3#r~TX2H^Jc-oS zHbb7Wc8oXw$FH^Cy9aQWQvdGqlQZ{-;f7g>E{J&^Jb;trSWEi=%dr9^1k92M! zYj5%j1tldgQYeEQ$*-)ZtE&q^np{qG00u32OqUteW~K>BiCQoCBg-ma;PEd2j0P>h zEiFaGP^I6~(4yq?`7Wx|EEm>-vZa}lYC{X?wb-bA2A>VtFjHBFu88vM4!DN=mk~U5VZkffq(g8$kDhV&TkqdF@$~ z{yqkVksFgPw8)@5qnZY+5|@Z*-Su~cX_QQAj{XO2K%w@=EQQ6zYXo;DkqM z$=7gHO3bay!()CxTJwEya2efjl&f82G`2iXK|!&zESCnV*jwUY)Yz0cf8hcbA79a! zB;dT)*#&4$hIFPVq zwmsjAbn71h4}wJ?8hXM7LMj`Z_W-121B(!sw2RUAWDH79Td|Ug(FaCgZP8^`ie2Hi zy9J&nc!`^X{U^Mj$h&6oWnti?BNYVJzW+{zTWTT=V7fv(gJ&@64gdaTbAFvq=rIiR z%a_(K0+!x0Bmvr;D&zm#q2jJmQ^G&s;Ap2z!sh#1di2KD_nHIXH3|@g#g8ZJ^_D*N zBOsLTgWf;vUbVnl#lyqB(n39r0>fHua9|{|Nc}^_RtHNDM!vC%i0D8u!3&_vw?9C} z+5Y-yP4^qi5R6}?rGn&SWFKpTYD7RbDq**1l`081vLdzcyF&_PD!q2siL!vT{wai% zpciAm;y4-n#k4=`rs63r98k=<7`Rg?X2E;$-E_}+q7j3Yg$2L*_-W+|ZR3s;x`%hE zHa|sI-fy=4Y? z5U;5Uz#~9Eq#A|r#Dl}@OkAb)5Zqs>NsnGg(pD&^qRF#Us8Ifj?*!o4st}uFI>`C6 z3JSOZ)UHVp%FD|`Xaz?0?#3%LQr;gw$Ix0v`)@Q|h|dYSKtwZO(VMpV(|m``^&L3y zz_Tro2B0z^j=TFkbH&B{z*VCA%mEvx>sem>FQ`g8&xErq!b!jO=IhIAdF|$ds7CY| zvfM}q;@`a-kh9evEamflFa#6Mt%Y;=rHAVc<5Y$3-y_9Kzef}cSZn%MyH~6iN)eJt zl#}zFv1t_W?oJ$6uR2qsv3UUl2Okwp3o9#Y{691tFrl_EGHLcJR~l!D7I4p!)j?1r zRzNRMM)!b-0LH^qbp%ADpl$oyMSTSbJcOe{oDyrJVnu#MJ*%XfmHTzAVva&>%8~MN9=jRUx(0_-v z$%VabjTbJhlc@qkqkt4MbM~Nbg{vjl-xRG&0Q~v!{X4$r(A0#1w%zH;@n`fLA?%O0aN~uuE@;*;IUn43Phh$DOryFoZ&loi{&;-3sk8cB9u* z$kV#?UCqLvp1`$_f8{n#aRyxQ*c2Xd~};O54%^ zGSb%;k5K}k&YM~(0ZOqHc#gqBEYxAwC)*L+B@mGapNk0G0zH}S`tSPBRb%!#y1L1* zlNX=Q4zIdH`jG{$^q+);p`$pr?9{g>Ma+AvlYQT0=OLu5F%K?K=*b)o_xeovAE5pB zDbt6g0o>!A_T7JMSYh`dFAfm0E<0h{(JH57@ihC=`rPYrIZMNW`mdE#Zx7vnFG3l- z9O;&Rvf9boWiJw!Q}qD|2&G8;skEBFdq4+6!bOw3j3)zQ=FKRIMc~GM$ec{e5OzGF zhCT&Er9&L`lDo5r3AF03C@AD42)8a81Oz64UK_;8w|H`cc2-B`?;nOdS*uoJONbzt z4f*A#J*2ZU##L08CG}*7YMQTZqdB15(P{3oqz{BU8X3R33j498l3Cxs`NoJOLO_}d za)Z`^0~~~)6KS8tWP3oZ6?F+u$!KZwjgOaf4#3*>y?Z?T%mmX`Mn4EXPtDxkuXh(R z(-An^y!9$hZ@$gQonO+2Y%H~NcgKr*W4^ahWluejl z0pB33TA6^i*IKs#9aJ#v_WtK4JScb#tXz!@@7>FRc3&HpztO(_uQqzKX{kU;TH5Gz zgXr|iKU*QKLtTG%ciFJr!N~Xt+pQYtR$LEPD~Y;J$94#<<|#x_2Z&|Y_+$%x=s3^{ z0PIbhpQWew7G2^NdopOrkv1%4jz8w=;LqVFLW_R~KX$_XvT!aC-g!;GB&1XB&fsZ` z9wG~xDy&`m1_1U0P$;4N))c{&?mxXyS5sQ56_3ydp7$RJfR^I(XT@7Q=6<2^>eBlP zfL|{5*MV`3_G8pny8VeDB7=kP3k%21zQ1;2wgDBz_r!~GneXA`;_7Ba?H`VxuJxP0 zQe{I2u!C-xR@kl&`oRl-{QUv7`}N@zVC9Xbm9W#@)01>w<4-I1S=&Hfk%h23>y=-D zUN-!ExHY0@Wb}J>8VWRzQ75JG-;eqfa(UB-#J}ZyuDL5d_osL@v4zHHFEr_<`xr@| zwu|2Us~qA{Z4ndD{{QXbDx&(3mx?&F5qGds9MN~K#Q$jlJPNdzGb-LA7#Y{?NP@Yp zyc{Zit6w{_2*dy)YVRMrjvRiJGADb}yvw5!BQ?!{@gdCGNWLOlemF$kNL0o5K?m6BtB>zjiVa!!FSK z(kYka_m6fKmxT{@DjUWoUFT2qM|U5H7JVakAxmE+Q}q~)7 zi|5v@TTqcuJ)`voF9i^?ESg$h(qpSJKqE4xaXG*kXn0U&#|s$g>v!LF3UZqM{P*qj zHpI6g2LVC*QPB?%$`ti0?QcXS_$1O?F_<+PzHBUWV8`R>! z<3enN$gLdz+yB%T?KlfEBq+JRRlCe%ubg@i>R(=O+B`wI@_}j%e)PwHy3Fdyc-ff` z?x*$BTR3r28Si9UYF?@f-l5L#Pnq<(2Xqc#)YqM7I^0cF;6@?)l>>iQWFrMyg@C}8 zNl{e8tj~oBNXWgoP6f?>k*Gp-H`3LjBM0Aoup?U75()k@A)1m+XLIt zW6(8&0f0yEBPd8ee*Dhd{kzOEPT&Xx@P@WO3j{u25Oj6oWfyi|(S5iF0uUe3KzmOi zD<>;JY99*E=PpTa+|@%G#~dhN9GsJ=t3ciO?g5i~;i zc{G2&hN;Us4HZ(z@pvFlb;L_9G1)`w4)3!v!ZO$| z8t~@;1z*a_H?lz;_LUwrj|;T(bw174u74V#T3d*H^5^TuWJ|gGsvf+i&$DJP zm^*Z9-B&Z*p8ZY)DxUNse!&0r*;v=00J>6T>|N7=0d<#M-})LwD3B;izU%{QF_nFd9P>Gq^A zqQ4c~xo|&m`_3)4w%H@upLfcHvc@D2@5e9hrIx?jD<5CJm$1ya6#iiO(T88}Mc?J7 zymCvmAkbPMy`llDj13gMpknAt7I;WTOx!;?4x*%j#eqC7@B>4^ zNza87w9K9kfBIEy8uUgzy%31H>Ve0k#Kf5a0Vq~>b}lHwK?Gn#{7h{8!TN{HJ`DcwvYo1o076Yp^RGjOignNS3g$U&b(J)OqQ_4ED^Q@N|%WjZ<@#PscI9WzA;joN2N zXf_~KMXz_>2<3g;9f8QoG#nj^OYFKLG3d5^`lcN3%&VoxtJEeEb_W~&c(-+Y)jjRe zZ4I+~%0%daoX<;?A0?;xUdhN5Qm*&rRPD!cs0ov=``uNd8L1GbF4UK|Pa?~2KUp6$ ztgMK5qOd)!s#S8cLF!xIn$X`l{f3O!;#_&cH`>9{f-DpdEEF9G+kq+lnk*oHH&5+8 z(0AR2O@{A4#{$+S5>nFZ7GJ1f5rC{q{>f*LWMW9a*1$yFR#pAJ^dtZNg9pRe^H8aB zN=X?)m0q(y_Zo~dp|S+xFiOJUH!Z5y1y0G69lIEMsb8~9Z@_{Enh0erXbYeZ2EY27 zH^7`V^K|#ZM%$-@KG?;}=ZZWfXtrGDzVyTHC1+r8P|Yr&;vb^9uXFf(z?0p)nPz;@ z!2@0Ug|d?S!N8MWxH%=a8w^fTUTohzcKl4qntK21;`Y*eD;<@nR~mQxU7ypd@ZU1$ zTNVjhlJGlC{n%ygm6`O(Qgr}y$#HGB5BMG*f1MQ0ZU5)vg+-c^{kKF!utWr~M9@bf z<}4|x`j)6YXonE9^F|RfVqt=H0PPcW@fVE_n#@Y94*o2HtYK9M1&^}qI3ntywr&UY z0^*7Sr8+X~p)(-PR$t8yzc}z@*WeH*dwiQA@QsTaOOj`XbEQ{Mwg`ucUc+`+f@Mx%0D?eQOZO4Lh#< zm_#yQ_W0z)b?jI6yy&@S5@egRoRPaX(rv51)Ina_5@%Mb;$ zLGc706bawd)AahoxTyC%yBAKH{p_aA@AM0ol>G3+*Deec<>leX5uV7dmk^q%u$EhV zG<&Yrot9<8XuH-7_w}gxLi>+?0`4)bIXMpzQ$fB0`4MrdM|y%UAE-|KcI8HQdsaSO zXK;*l3B|xp9jb%O4C8re(t$!JQgyMIp1Yd<2P!EZ8|* z#pz(>+q5JU$czlSBVrh2=utrZ20*oik}_|DX%=;Yy}eM@H-U3&^W<>7#jK>!tmFzC z8#V}sG~V4~KCvg2zIMnQ%nRlAc}Fo<>J>4fTp4(UV@~aKAkKDZpd)2dZ4AMqY%Mhd z8JmQ0u>wY)hY^Ly?*3HVUggSsYFF&n)<|c%Zw?zQ$(L?Nq~82J(oM`DhAma%EFoQvmYxevTAB*0JaN{iJ{}SBkq`WW?^H?%E&;arl!8l&rgUhEY{@? zkBVvmKu+{m8)jl+Vt7Qv^I)Faxg;QP5cNNi1W68I;_X?@p>rtQkaO@`L!W#le*OBD zF}TcHiGK0o3dn+N&3(c-s;ZJmN%yh}h8&wICZDug2}N#Kqi)#PTm@;eB>n2Qp`q{# z3_iu`Lt5I}!V4ZR`IKs^xY*fD9tdQaT#06K`WZ_^bAoHDq|>{q&79`PK`YVrk(Gv1 z-J$LC9|muClfnLAblO$f<;Zh0Z&YcT?jbMX_1Y~4WoY4y8cL9po)SDb+)dsPk_)C%T zHzKLi<)&a^WzB@~GPF-QqW|cqsZM+?HESWzPmxJCkVnVIla!n{GdE||)6+94*K1RG z_>dVYthR4aJ+kF_rcsSLnNhXQX6KT~nQ^h}vy}}B9~b#hRr;RdDkvBPuD3tR(B)=* zBWLtO_Gs|UlS`}pnuZRXCE5gCr{n5QEmWu~nuNTur&g#{o;;_DhFb;(n3n`Ntiu+| zJ-mbBf6vR9P#6CeGxBrAn|b~aWe}wA-5uF<4{y6urAiXl0@p0>&ej12R}clVmRq?S zoSZ-ZQokRRim=R-9~e(>d^B)Z!{Va+>XRkFo^%78nvYqo$K6*>Z6Q%Sxo2d=E7nL| z@Etv7uXOh=X@NGE^y!gH1dH8EEfUR*rJ|JNZObZ=uE-dnvvVONB&7UFBzT?BgO%d&?*02%F^bn_ zPFqH1+YbJ>Nk!8Zsc%lXK70D2j-Vq#45xm8s4MH5ipHC?pa<{wH59yQlrHABO1DJ5 zpC3ar98R#<(HUy|;hbdU!Tjm6d|hMoRCMd->dlgO6mtnGnN_(SWM8&!9-}d8J;~f^ zdv26(vRCEq@;)S!Y57&>nH>4m^Q#XH5!ai)u}?SG1ZY&kK$uMYTqgL=#J9hd4U_8$ z@tUOv#H~t3nB3n?y#8!+tbM)z%Y*L5T~qGDu^p91E0S?9^6m`P3lK6$rG|#^H1id7 zFc`E@e9(>pkmrxs%XLRl$Avyd=)Lje#jCMU-u3iX6K@ONR%A=agb)EmfGnKefV6%x znwZecsJrR5LNuLkvmq3NV`Hbf=&Oaio7;7mXvMnA5Uuw?(ZYfa>0(9s=!zXBQ(xs79?N71qJ725Qocq<2!`#6pqt^MN0;t0(KlD z?5ulNpTB$=k*k7^wGzq@kw*u1Mc_d19qxO$gR#KL^|>*mqgT8q<1gm?CeK}dk-ySb z6sk`x+LDOrxyzgWG^>4|rdqo8#&rJKNmV+|O8Zy2r7ce~%GV{!G1+_j?_<+6`HB7* zV;5M)(hNT5(o9ZxFXy^h#=rV2ZZ4g#W$;5@07I_VELDsw92}#guFFb@Nlc6y5}rJvKCCV=7)b4H*YfXOg?JsAYZlzsW?vb%vyxf1&nt&8FYX3^EGVWw-oNMNM4D3>g0yQ{CP8c87KGajPwVu;m!qiXPblu<+dP+j^k1x{>Z87TQ0j_BTqtb+Qdnpm6G3SP+9-* zK(!}DDah5`-P`w=hli)5qvO(ucq$5(;ZGYk~n-VfP-&jJ`-px_vNaC%h|$y8cpiYT=4yb5S)p zE*9iLY!sD*d%>PKNmoquiMS*e7am}gp0kNMEJY0vyiu2kVqKh_Gc-6dwBGG0vf|1H z!HVHm79{hvP*{Q?OIcO*&tYfv^H^7EcmVr zLlXyl_5p72zWzb90d-JERhw?%RoVJ?xViRK)ft~s+hQ}|U#;?x|}5B)t- zQ*}sxnq%5C-b~B`g@6K?vgu_BgEZ9EmTWhl=HD(p;@&M&d$DxXAs~wNHTGdGf%o&Z zBOb$%2Zw?3+5qHWfVyw19pI6cd$_x2RaH^Z#9-rwWc9KN2oOWKgfU)PAVJiHgT95U&qdS0il7vDJe2Y+lj_?9v6=t`C{cUA${~4%_ zHZR@a$4uz%HuD(8AW;3li)*$MR#ChQ06)0b8m)wG|AhJ@1}c=2)K{;T%AV^e%;+Pl zc+tG=JS+->>(~jd{<+62%!ZPj>}R*cg_Lek6E)aGJ9A&xuACZH!_}DXZbGPNt z0NdQVe?MZiprAmSwWAsI&ghg@(zyoWi@BduFsaM+Vt)SACZnWe78d?F-5%fh2)2;| zgjWD=T1)Lw&z?W;&vPAmb={U_r0;qTbJoPXwU|JY2IQ}#;+y=VeQgQ<)dJ6c^Ebad zG%>qa^Qp1PE&o}`HdTPy(5YYNSZd$q=Aq~)AsifBx!`l_YySwrc8Q(W`l#W1A(k=c7^PFT zf9W3vY+tT&&D>r$me&f|3rZ+kA$r){|&EBi7yq}krtuG3U-B;Nj5 zUQQrp<&>+zQ4EZ5i6p^IdzIz^lfOz!*l>Y-HL; zv1Kp)Eu7*>$m#qvG4VH`JyzQKmC*SJnJw_>@TDr-rw0*o;d!&I=fTc2<^1?D53ga( z%#L3+j~y}ob=S?0DakiLR9JA>LL@U2mE6Ig#A~UZooncAcsPkEYEm!yg*>gXr|mYk zkM@wm0jaACh3RUOOZ;?h)P+G=f%#kCcO{vY((f~#ECy9^6iyfZZk(fXw-^m+lTiOd zVEFkg%b<>y8rFT#+A$Ef36X;U@YKpq_D?3SX*y)c^j(#l!yy29=3}wzM20c3)(B^>QUQM z9murMvCiy^rng=Q07FveD%eM|&0g1rnj%5*Sw4WjrN@7~rC=0Vop5iA0k6`72QOJ% zJG;8#|1=ozqNC;Xz;(*P5Bq2i_E8|ad7C24m7==6Ao$|nKH55bjwf!%z8Sdu<8Ln$ zvtm^0Pn8zYuGfL&Zk7gJ@2;D6KfM(%-{gcRJQ=OhS$<8Q!X#zHEk$-LlG>i|3cG1j z5}U{}r2%GJn-~KEPrOcsV(#H+|T^w?>MF8_0Pv1Q599-Mh>c zF^btRzD*XWPnLU#?RO^UJ;USPlAj@RL(!5U&Ne!7ye=yO7 zq(d*ovF`q)Z*41dzkFhdLGQhTnIlJZY;t3>#tjqgUryQe|-WE+%wJ zBHWtKXl-Es)s~v!>yOfMxKgW$31D@IQZj5Iozjwkz_Z*=LKS=94F>{?C_=Ylnl=2Le8fqAv8eH16IF z0v_M=!#wcFr08FiuCA}q<6kxY-?o>`r=0yb`H3!(&f z_1As>-rinxwCYwa#PH4?C&Kzif-hMvNqBBdgfqmsG+o;7aqN!3N$HNfE+oWnM}5N| zhCNKo!m;}IAh6$|N6ONm*wU0A93HT;_NZZ+rLL<#M2E+%91ni}`nfl~Zg7$CNr|9h zDA&xe=-&-(JJnN0C?5fJ$}B8Yv!kS-$f~RJ=~#wN@)9~#i5>Ng^q_}*=$KJJK=bnK zxf_BE$}sdLr{I27Y?hv?Ts}Y?C8Ey#6B^Sb*AZKVb4{G9{BEH(=a`EeHkvIn*CO#T_$b>VU2~IHKRJris zkRbnHU)%onV-7eX++2zZCyf~)NWg9GOxEx}n5M96ez|7v>XF{$@9-ej^VZNeF0H^P zY3~>6jYVospWtb8Yg~yR@xK9F!{W7H6G#oDDm2az*20*Ck9mcY-Fj-^7kddV5n}TRJ}?+y>y@3YE$z<3WL35o zFJ6SNBF1N*0aC9{*@uxnj`R=j73pUJ$9g}K3;+@ zL{oHXk8laC$|eu7eCB**lgSM|J(_66>40p!6!qZRBz%{&R*$#WXX{z7UcD_C*}T86 z27H+TVi0O}9!UeMxPiO0*P^2+OSUwCa+q63DJnQ(7jB?fNUhVQeZ6&;9qjBBK)*Be zbNR2|>%34aAu4OJk}}I)1TjMM8j=(p!bn(wnZME6h0H-GyZ)z?Fg#SA&`?)9I@||h zBo<~84GRLURL2|MzbBK313CbtV`jz#TnQi~NZ=YAWbqBl;G*(Km80{! zL1Vu+0xGOF$4M|GPo45xe`DE{2A!5^wP9)y%lzYs%aT~mYNYdnFckSb3iy#rXbExg zP@jN?C%g8YP4#*oK7+Dn--3NB>-h^(Qb3?;&AucHEG#XZL;crhu1v^^o)$Ol*H&g^ zm~>CwDQ`*KkOp^0avih)52RaM>b&WLV>9koPqcxdaS zqY3CahU&b^W`;0K`UEe-+$aTLKt@Ik{NI9ntd|f5r!s9_!{(hSftUv5@dcm`mB1dI zPUk(p&)IS|6jMsXen0UPaXF@3ocFh!Uh~y(cR56g@Vu3&EqRB#Ll!o>SfB|L?1Y(^T znbi5MHVH!HFjCb-7X0{C<{;=0V`0xbW-`Lw>9Z0-x)GSc4Ta2$+pH37_~7mo$cQ@y zH)CWsOe7&kE9r%*<^O>0A{%Iu3z-Vi{2-q{4&g~)SN{wVfz?HXPR1`KRT4G-+#j5+ zRdcUwFEKF0xh?;U7{tqr%5$mVHmK4tfC<_b?4l->3p%j zOknbb7@G7o3^l9b4(4s4UTiKkQ<%Ytwc1Q?Qml4#MIZ^y34TY$qVUK_5|5>a4a5rXv|lAC%ZrT;PKUzBmD0ehc}9@+cRWM2tlEc7R+;_*T4jr zHI%NlUGa%4FS~m?orkhLgZ%<>DWM@h+8~P40h{t;Y~IUbNHsww%FU_hH}7+<$E)X( zze}3v31}{-LoxW>mhL$XoABdP`Rr*y+c;wqA1>FT(G}qZ{$~JoiK@BS=WMEG+mHZZ zP~xg@<=$9a4h)rvs>ASS{*86#c+WNywzG;{84KH2}Y0LW>mI@%d=C@YIl6u{z8pHon&t+=B7=-#Fz=VylU$~_R6{KVk5$0(6%^}@7|#K%DC z|6UIAWC`7~@tS0Cu67Tb%s?A9q^71er|48_(sa(Ly14j~d+k_J-1yKCN368gRRndP zJiF!#Qd;|=bHxQiCeZrM*}TofgK8*74PWo+>unCoSbL{hgdZUWyt`JZpb(}7_iHjtO%jdFe)kRs(7Mr# zdPyGkx!V^FkrO@Pyf9@y(%RN$3I;ZmcUL5I#$Tq};ixsin{!$j5 N%BL{UG4Q+0 zseXBN9S^9=(PRbLKfWdYr+#+7>mHDlUL2+g2qeg#GV~Fmy;0-Yiny3BIn<3Gc&c$_E%8DNrq$G z7F78Utvon#T6LFzBCxgn!~6H~FZ#0Rp<{~IGWzi0gWt>f&j86WGt1jS9`^g5c+>I3 z;9^)Wc0y+`rLhk&YNM4@{YI{q)8YB>fAq}rgoNqMaZhi#uXe)I;M2kEyQ{VWqwSxM zuEiAJ)a?aJF=NH<%J6i*a;)QgjGq{Y7^!OTKgMVm=rY>31MYF_rH04&-~R>Ogr~=v zR}&$o?k*DvRH?VGYk#5`ZSbJ}c>c{g9O|}!U;9eLzjFo_5hm=leU0^Z?6Jv;-aBwI zn}S%|IIciJI@7AAtxW-akIzX}&xKgXv#=@VK07i)g{YNwg^#ay-encm_tub>re+h( zcLgIeAsPX2S~G_5ppXg~MiU?fzpjyEhO^mkTzym$`EoBrrs}Pab;)_=ZGh6#gRB~v zeQ>z~70_lTtEh+#YkXX4d_w_^ob91icPMb+M@(U41CUDcAe8KFrpT~V@PfZOH8llQ zOdv>jnjQ)6*T3LDD{0Iolrs>6 zpfvII&RCTErVOwEXhcUZDlpFgLW)MS<6H~b)xCKW3nuP3og8_&dU#})m65`m1!hJb z1|-d#01l`O^!Af;KFG7->z@bZG9~);t5;Re1&*A}3MFw2;r3c*fesv5^@bDxg(iz^ zl-9doqoS~3J3z#^0QT%38;bz~DEX_!!e4KIF*Z+t=Y&x^Zwm@$u-Ud+Jg^{a?s@@F zL+|@XQ#D1D7%*N;Y;J%m0a^!;XlmXHg5)0EnvWQa!S}3%U%r2z{Ev72OjY-NQ&UE8 z+QJ)Au>1892X^PEGSbJjw8onSuJ|OSFWj9{d3UebO31su!V%)h40_DF{&&%KeDGD+ zs4ykIv*~vz0c{=4y#P>m@ps(I)f=Uc}}c{l_Q(q zRQNKBi#43vW#-^+#A9Jv>7xyKq3hQ(Yier7 z=Q9D&2gna8d`-ocQdrv{Ab049Hiw)85E>Vv?$P~xfM>y$YAX1$&i%?uc&}pn**|Qk zD?F8RwPcG1qgH_t11!z^F^iUp3Z4*642zw>>#r$a5<2jH78=tG$RMf3D#XW84g z96-~-K$r)DTP84cCB7Hs0gp#sfr6=JU9_{U1_Z+2avMs`V3~bCIg9|C*>5F@yJRj% zEkr{AWelr14`95&IPJe-+aiI~D+6F2*pEx<@@zXYf7od1=pbC+JsllpC<8!mDLu@d z)IAGzErPNG2HOAgXDEao$YY(yQ1=0E%@1!DMyiyJQX%DI7*&-(p~A9>aX>icvPAV|^6gq85n)D;(XwO0fkU}W`mjfE)~bPl`? z@HFY`BaY zBSA(fDLN3m`QDDV8~>ELU4bv}?k)^vQftL_&S%B!{%K6U*I(K8-O{%)EFmWmZA8?; zl$wX2_%*A2`_`&8`yklF0Ett_hr81d`lsAL2*{4Wd)viWC@8sGXJJCDL5-{RJ^SVV zD?UML7^X8gZoEK3P9B0o$`(H8yEhpY&u|TnFRX9O!l5>>CItnmTT!;gYFF-%^4~ z{7tV*i&#KoLg61zVsZ|kdYC80C@83vB2)`u402&JBt1tk3RL>%K?+DuXGki~R63}> zM3_#H($aRCGT#SE3jm-u^!n_CXpqviPy)VAfe&+`bOVuZbNuldV)6vda9gqISdCkF zTpWX2IwitfzkQ2BQddbk{dVVc)$0Ls=+7Z&Vxn?#awg!bA?2P=AoM}^J^0B3>Al^H zV}oa;>11kyHp}_@E+Z-d&^FV5EmDvNr^Hia5Jaz^!`q%HglK@{T6wAtV~Pi**Q?c) zAeNcB95pqz9jdaCB23LFxH70@{3MBb!kTQ#n@#wm_XZ0fPy((r6PS}u_&e-rLP>0F zkPJ5OiN8M^JA2gn9f$N3cO!pSS0TTnX@;mu!zZiddk4!!7<d$8+4szTyi#E#7i_ddhL%{j*mm|hEWoa9p-V_$OU*U2Rwg+p%x z3Yh+1zm_UuH*N&qV((3tky#PLd+Xnv^BHEP@xT5WPji?CW&^WDTqXwV$B)U?gJJs> zHMBnxqz17I2I}q`RpO$CsYtFPD4QWdEkjB&=#Fx8bq#0F|8%^gKk9e5I^|JZnVXjv z)eQ=pmNc1Yj!^<_@q+j7ap4-!DviqQ2tthdPhoNYeh3JwgWIHwZs*E@uyya@1z9K$ zAc=xhXHO0kBuwWVS!)|BVRgcQQ=J4Q4TEbd-e17vf!Tt2ETFxLgo%XN+1Z%upAHK> z(?QTo3^&>Y1bs04`FZZUBL%hu0HBB=`~kBm4?6|Apb*&IopprGxB6k%hT3&vWNA44 zSdx_*OC-kZ+DlOLs z4W_r-$>bA60&Gk~`I_X!hCDnbWrOL4ptIcGITVoFbR>^17(?Kc z#84zN=`pp0CPJNFo)Dpve`BV}r;YqX*&ZMsTNjM0j`|Obl%0i(^jc+*qv~KezDG z)g7uZKO+5r?*I5X@)5C1kVMs^b7`Iw{;2RfLp%StY@6Q#zp$><@|m57hlt-WlEkjDR!cxn!!K?FAe$DgL2 zURMvig(9aD^T(E!dA;=WfJId~PM!mP0cICBoJs@!9Sw9#LtF*bWAnCihH9#wgn)p- z-+AgeW|{6A3Vq4miU1%}t+Svi|1@l^3_r-Ss`0WmFt_EY!;>mJ{nE0sVX+13V?6_d zXgEijDT9g&z}EMvOl8he{U_rpRX!L@i|KIC`n2gorje!Z1;< zeWV4ZuK~U^_|nwg=CtXtA?<>*8GIlIw4{?D&i8~NHNHDlVT5!f=%R)b?@z;=1p-z$ zJ&&hoDxd&hVPh+GF}R39CQk`BNb^Bg>q|RGaAN@&F@@<-`1(+tzY=UUg*bab`hVH1CH>GBQ-0X9_2=faQa5w_e5^SKUU5>M@9ARY_bEsVvhO2Tz zXRu*U8OkG{070eRh_3<;B4BJyv1czNFfc0a3?TP+ktQ|-Rv`b!(%*ItPEKO<{MXlE z=G{qHlCGc&x`ej1wZ#-pk1oP0O-ZgTHityE*QxHu^pe91s!${U6>tMO0a+B4{XV=I zD=2BA0stau^H!<~{GRKs@HHq|fVcW28{ms#od_Dxp-)w{*9ed!HGr$n#}T9*tf+Pp z=~uQwp$aPjqubG+JQ=KjfH0)hZ`z!0Z*e}vexIW2ZyHscz72D4qXy?^W<1?qM1k51 zd_8SDpvv7>%qwonVBMpRQ7jRnXcW^Ag<;jSunFxf&CN4QN}kin!+tl735+6&W{^os zy`i9hF?rbsP+_Q%yv*yNN(bDTc*MKjNbuX0zRYeb4lastXR{#a(w-$JUxdI{k%}4r z9KQQTt}e%wc>4P`spC4}`Xi<(*>j4ts7s03RG4OiTsq<20AweDf65;J^Nq6$sE?IBi@8O(#aL`yr8 zr|s|MwO_3M76x&tbnU@M@0@zh<34~VjoV%jpS%_^VzD>lEC$pP8?vb&w056us^Zp0 zc*7iD;BKJcj%r}T@|HtG-$`KY7Xz<0CpVYCt90T<2=AnU_Lub8)=-*?i=c?+Ocw?6hyOU zbF#qUmV(Wbul<1p(Z*a&jHH`}nT*`e&u>Fzxk3iV99M#P4E|XOMY;JY1Xq3edPAWw z9dB}D*hKK<222(`hk_B75zl0-i6cka0YiqM4+qidNv+<8P`ON0fZuxdy{ix<#S7xq zU`*syM5|S?4(R^O{QO}7%TF*QbB2owif9t~wvw2DLns_5keGq$qSJ~pu+5Cv*k!Gm0yLgK zfBt-iqaTh98k@`(i?Ul?)N}Lm*X%Yips(TNsX(gpLBsQAd)Bs!hlrh1hcEF zso~ga*?dV<%ab=Wq=U&BaX@eor#o(0E|A!pw;jR38ZnK!k*SdZ!kfpBA438D?8S4Z zTMFDZ=?Fn0PvXd3x@_7u)JYTh#+Lbx?0@O zZfub%2sO^jiTVf^=jb}Rh+c-4jC2G--`~a7#*?dj6l&&eK2RR$z-(Rv_FIHujv+`3 zPw^#H0-f6;_ssTXKCA$CgWr&n>YY2}BlErKyUXEBNo3h4MtBMeg?n_e@@}lP0g#$p zL6qVrmHE+bSP*qd9;wPI<><9TX)Y^Mi# z1Dh^gM8F1qx48}~d=v^qM2gTzNraw@W!H|lXhNmJ+!AjFPcr~+C3|(C<(dcLfGMlE5jo`HM_q%N-GKB9zzp&dU#P>Lt>l46 zOUFP>MP>UJ$CXgfNufa);`B0G|A-5URhx1>yTQT1vb*pfbIbrx3oo)^+igM6|HQ|s z`rSKDmctIpSx6g~hz%_&M*!+$t^n$*OKr34jH`St;$RVm1k;X($T^4T-QCCfy*qLr z2C}q(h=_CkhS3LYQ1K+CPFX_t8l?5^JPM?~aBNG@u9VCKJ;!pUJmBUIZkaU6JG;NLV6T8NAJxZtuRGGK?BD(E!bkczc$Zzpp{*~udrpBD zEe}a^b@%s^xK!kM7^4Caf&mN5A&fT@=HTf6TZ_NFpy%%HVL#;L2jr#d3-AQ63A?DO zkW2M#xG#`I!n0^1&(d!WK_(vWIVhZ7kk+nNj0|yNRQ!i}KcoA9uR-n*%A2XJ{RIFS zWW5PiKr>AO{5(&01QrHV`MFx06+k{oATL^u1QRJ$aK(SaDtssy0_GH11sVevm$E8J z81mq*{7hM0oe~{KC-IpO*&)x)(m?pr_)0mn##JuicxTWUgojJIKr8L!1!eHcB~&8_ zbxtl5zXN>^?w4zhC%;`zFrM-if4>4tu#fxiih92<8p`Fu6}!%^xD^e8h%e*d&}mU~ zBB7HYNWgSWGJ*L3Q_jA=QX<&M8)NzR#=a}k)CAvtP%@cB`fx~2VWt{RI8vj?vn9m* z?pYAUcqohEcbG&@_fLymf(nQ#rXP(ce{fTD=!B?2iVh+UuRb>g0IQ!&pgQE30oDhe zEJNg5A1hf`Bms4qJ#3q&s^2)=`UU^6`d{nZ@NZU^Izt>?182re3X#W$% zD>R$0u6ypF$J-fx9uub5x#8+a-@~$PqZ!Snn%~WvmSp-1kj$twvM0Q2bym(HZ;HHy z!0>5(!H~w;o7yq}v&xKI_UrBW$H<`T(`$z8#n;?$nV8;Oahu1%w=Fy*jz>t6S|-1oo_dtZBkirroJkBwmpGq!Qg%=NRjI}lEO?7zEyF&WC+1P*p!`;krL z6}HW?nyuB2L*-vae!2}Vlr|x*AN+0tKV>+p{0s{`l{uJqKAk zF&cWsE<);DT^q+grWY0k{5L{Wp6;=MYYoZHDfqc+Il*4Pq8c z98sDGpTH(Ktoy$b|E)Mw%RHE#cqt+JSzR=1t=FGSGrutBel(*0;f4C~d8q8>uFrmH$lv!WY;3R#8wz-74&YzlwgPrb*1+Jh zprGI-Qqmxpp%P6eK>^?V;Q*)|hN3Dh(-r)8={wMC4dKk_0E$&L9V_985cAidTIc!~ zX>>P)T&ZuL{W`dARa3>up{0!fb-D&!F6=53xE55aVJ(wX8z&%SMot}M6@B1o{g0Q8 zHz&gwO3!{xS1TG~N^>atY?~wxm&F-sHo4;jEh`Z2S0L!vrt<^hbOCCX|13<01rLEc zKmv%B5PatW6#&BwP$*ype78j5$j4U{(TAN|1~MpU*v|dQ2uxgz1AI%TR0e>r83KF* zIq+*{sTqe5eZoSCtr=CH-y|)h`gXOZw?1mT!7h{zRd_@!=&U7hjJuOAa=2crzMN5X zfEp*vjbrUy#UA}|EH>3{+%zaj?6tqWA{o2#E9CTKq!PunnH?SWT>O~k^ux#nY&^r@ zQZuAf5V%|P=fdu$BC;mB^i4@rEOTVQUX z1_)0g2)fj#fhup8E2e{->0HxMjY2ub3~}#gD+x2T4cMjqlJhGCzI1$bIgE%V&bttYotEeOsC0kZ)4oQ3tmUQa7PDF1WjyD`pO*3JiUOrs8 zA?BKxj#8HG93x_Mt6>;iWey7AMYtbKG{`A#B~Q{+z{)_;1lS6tB#E|K_CcFS->6vdWM69gW~iyZ zx^Cmi!&73-8*ZsS7m8l~dZKSpXCWO^X!_wDfy3jWXmp&>pQf@uvo-a$8DfSAdc6$& zZJU=7(lQ_oHDE=(8KzOt;9+I=BVC^nSgp?I|G;hZ{TC5F%}d%$LHjCuqtaTVxGHo?VD4*}xrO!zf5vW_0j-mi0BaM<7$ z+@m;k*|=|a(2cc2dl(mfZjDQ*!8G`mU1iJ1LXy9fVU*Pw*)$=iI~OiD%mlrf`D9Ru z{!3W{4J`dYo-w_>%h8?_P>Y=@JpWYr_T6D#a=hq=fYSgnJ>>=s2#C^rVK~+eCno`< zN@AFP8!-VWfPm9W_IB$Wzx{4wK}06k@Nj~}{gtWbB9G5f=YF+A!CrPIwtO5Oz5I`t z4%jE>JAFwaFt+IW;E)El4|EC2Q4y*5q6-m1} zxfZt(LzGs?H@N!vID{7vHMA!{4W!UwwN`*|2v{@4qapBf0S#q0`29N~o`GVa%;IY^ zwDqjaMo3=S58aN?nNK(*^Rf7n+@zL4Ogz^PDw-`ohfUvRdy7PWH70T{vUjB)Ba;7W z?M{U(V<8^pYeEwp36uges|%+V3Z+z>zs3=2T(>M_lXk#!vT-YqD6I@`%Xdu<&8)<$ zu3(_Aj~CQ*r9w%tv@H?n2AJPI^c&_CuoxiU%tHK*QIC;q^l93Mq#-*fBrqsu`(htJ zP3{+wy5ln3%sUEwV=u~`z^nDH=}odN-FtWvQO8OOvW>BC#7Vs^9Bwsl<)t?}-xoq< z_zq$S5x~%vROMTXQkkx%e16*_z#o68?DF4t9{V&)Qv)*gDFkR6N(0|XK$@1Bd}^6F zKeT5QDp778#%o2usVMuIek~3O@2D&UoXP1Wv%%9i{y}P#>TBVcf7M=Cph~ihNZnHz z-vj^S(O)qR(uF~jiD{V89j_g*J0v{6`d^rAa30mEu=|@Sp%dCV zoEv&Qp)Sa48+02qWamXHe)*@+;lxyNA6*yjEnGaG$A&KG?mmvckju#(RRj%KQu12RI3?qHaj27S#tO z$lo^jJh5bb8tvBb zA{b_yPl7UU&4m^@5T}mxO;#nkCY2+@oU+Ycu2Fg)cJ!>(A)_++n@x~tvpxJ>8n)SS z9#s|4U^f;ezq5WYnt$j|*i`(kBF|~CrSJC>F5`>H&Auoc;BT;4d(1K!m}5x8GRO*a@!@Z;?sH^SsC$jzXWfcID#V zVV6Fcrmu6xK)sRuU>N=2A@h*a*(w$$tWR9O+q=S}k;}N4Wb56KnPC{fB7GPr zQVQm4tJ((FXWI9O9;sF%omP+meOSrZ|3}zcKvlU#ZKE3$P*6}oIt*Gu8fi)CZrDgl zcef&4($d}C-2&3xDJ9*VcWv<8@4w@}|Gj%S#?iCkeOJ!4=A6%b9wHJw4reaq_PNK| z4`$u?`M(toTzNewg#f$rxUxF@rK+Oug+E`Q+8gs;0@32^WjKC#$PJ#V+aYC;+EpJ) z|7_IeNg}w4{Q=1XUPVPLKq>!~=xPCp%Y_3Z>cM>r0_t@RVC_d}0+{q)E8r&aeQ)sU zNnRGa$IcEzmMY!bWwaep5s_cIL3j84AF~45Sl<(KO;rOrh}N$K1~i~;@4RouF8!Cx z%rk7U64=M!UcF!_ar-JDi;Y!u>qGvTronr>@+P~|qB`#98OgSqLf-86nv6dq(ZRpA zMaMs1b8@2h+t!u8M4)Y)@5h&KE;GFa%5Jab;Dt|r2?g4%>00Oq@S=uuhh4jBLsLHtN>w49}I#J z=NG`>NO7c!JmvBHJC{}ZBnHfeIp z*mKP7mBTQfR9oxxqg!-`tB1LSBD{=$9np4fE_d**=Iv%M`2XZ=`g1Rh zstY;d4&rh|I)+`+u2_`*sHw9{2tJ7_)=hlb>Z|t5f*{njp71tf>s+U{=*xqfV>F^A zJhLkXaJ(miV5@I8U^6}H)MiZ_PzA2Q{6i4zVEodD4+H$2Q5k18mzR}+(tDq6z=U}p zPSF8-PE}D;WXkfYyC)$#VEWkT$F-%I$we)rQm8q>&&r1mx0C%%jxnm=$T`$oI%~K; zXG%%rT|4OfZXECb)VNtxbmn6&^b_iYL{{r{n)Mw0#eVmE-UfB_8Qsa%D#F$HU(~`Y zA<#Zn7Me%&CaOdlQa zUvxv170-z*GVdJc4T<7h0FqmJ^H%ZK0M!EivQ-i=-8QMUmyP>!&N{zO=YQ@B{n1O_ zuaw-IjG9M1z?wblxOM&-#>>ElymaeprkGMRXoI^t-qU)iDofo%M*H;g@DwRW?dmdS zu5*fe?rV$Nd~km~eM}#)2=!o;__F2Tfg=X0Q#Q(N*zr*TRLa)`L>vKJr2);y2Yw6q zCzw$#&nt!(fRhI3f7)yTt&Rp^R5~K?{(mdlBeLWLw&fe(DA6~s=jcBz*{KczVXYZA zZPTik+zC_DYgrvcx0Z+xcE>b+A(_?t+I?T0nSOtL>0JB5)BoQ5UcSGb3K;Ln9Auh7 z3jPTnv17ntVMOeX8b^N3G*)-vYj;Bj3@$7+4e)*B6(}eugA)^L5jMLM#SehilNLLm z4>W^+lk#c-x1te5jkYkjfeZ;&)Tkj7jOgJmHaP?u+9M#Q>@I&BIy4>QUYnry%|TkR zNc#$iiCMGnI4GT!k3GKPa(nCQcD3os8)f$S)*nvNENk0DlFv~P-t~8Q0J)g6=PFCU z`{Rm~!@2fxkD!xFRE@@K#uPZn_WS%?{_8ePAsQc_2Bh;?#YF`TXVVMw*@g^&BwNu1 zOm%|zFg1*~pgzo4QBIDBChE|Hhacl1=oNr+wAl&^KW1+W{;!|~@XPhWT-cN!9Ze)# z>i%cxlBs#H;?cH=vnHwl-OPtcw<$mN2Hr}c9Cvcf1x1|Zx0lMheI2!hi>iThp8iV9 zZSvKiLq#Vmp+%LT{xp97ndMHsFQ_gux7g>bYdDsy545L}jQ6x4bXE_JEq&CT>J$sR ztWFB;5f?NE-qzvXUL?`p-)Mlm;G3H%oVCa50NMXze94@($Y7XMcp_!gkc8|69Bg|) zN|uraTvP2$tlvI01$R|oP#_Rcl?Wo!#rI*&eSFgi|0eli`QHv36dcFlm?#;p{V1DP zu!Alqu`_;F&Gei)$eDfWk``W(!Zm0Si?H@8cQ@EBb?+wem7XDnj`o{p7Ne*89M$)##StPi=6w;K)L0)_nrH(Yf_^m{X=V3w3 zITy#_bh-}^{9Es~xlB-_@gCF%MNOE64A@U#2u4RoHc;OVcEF?DQA#L47yt$apw$-u zQaAfpwGdEj24tz)(S`X_@9MCj(7pwdU*_Tdx79(W$^fi>7rJZ#XoELyHI{F7dYn-+ z-XcjC@6*k=Sk23ZgzTfeD%|28w9a~ga60Grsc)7SgSn>Ri~%}plU4~zh>E*4Chv=; z=YFSO>l#mf5g7ML7*ryUED-4I&p2ObJYK1Bk-Yb{VA%vU4FHh>00Y1*@fsvc>e4bY z-hf^3q6{ETK*2635dR4xwB!J{*$P@(6=VsRlb(W&z}_wdOb!Up{9+cWgvE4O{|8bV zFM?~%5Fj%fwKqCX)AZW-CbuDYlT^TxAB7vMWwjK*Isdd_X*`5*?EgXs&hrcxh9!8G z?d}|Pbg=HLdyJ407!1i*7KuTElV2)pLv?{o7LB42kdwn;5&Ypc29%L~n;ih;=I`u5 z2#D+yTgL+6833-#H_`&^kTdnO=cl`!UkZs3X`ByVJhwY@@%)P7i4N8e$X?)cI${74 zh)pXeUS4>;zPQJpyv==!Tm*qb{X1V*eKhx8TmiRQXITIsRJe!h^U~8{s7Dg!7kyr_B4#lb}?#k}<{`Uh-15{NJ3)P2VWId_mCERK(e@C4aI z+nz@tH2@^D{I-GiqGdQ1c>KT2Du{D&{aPR?1Jn$#!sBgF_uZ^lTCD8_c*@=YG??+G zb9lx>pm(83Kx;#X4#TKu*2}5z7zY9T*jNp*zr01c*n}!aNrM z!fH)_e*YOv*%me$$Y=skCqRGsi~}tiKu~j^E@t|#iEyA(_6Cf&0CL-T0%Ch#I-WGC z!!XRStQ+`Q&ms^r&--feS7nCa`PWC8-QjujT8YJoNcmWk;{5k10AMw--`hWy0QK#l z#yHfZd7zCGh+RPog#bDTK*D(Cu-Nhh^Xi=twJReA1e63wN2|NkupXW#o?=fGs4HqFa^>Sal zF3|3H9_aYrbbZ4H1OMtf4MMgf-X_Jpw}M7zF~*GV{dP5c^>? zR=Wq!!Tm99XU7x@!jn;Ow7>dIV1G-T<}bjmdk6&?Lr@G)JPt%Qv_SPtYRa#lfIkKb zC4^b6O#s^L(PIF;(Rjs=(L&RFabcF6i`8Di|2BP@gnx~kpIQM zq2W~@$6{6b0Vu+VUK{pnbUw;`Sa)Hx>CWFaK~YW z9%Z58{2m;mhM!rC;1SDJ4Kg`Nf9TY1>(%&lCy0zssSCfSmgKE5A(4F(-)1YTPd)pB zw-uef43|8p%fe>$6p01()Q;hI@8-FE&r-A6cvQ3z*SI;|_;JGSsj5LLmCK{%?vW4 zmCVU)r2DGs_&piVlYT({)SdaQIZPqU9qPWYaa=g*uN`*l4!)IxWDk=c>2FwtGP{uZx+4c3DF$yjPHWzBLNCx zz`W?~mS%?rO&gE$6p6{pdU?py79V`>f&;qn94INmP>N79EMYL00|2+>q_!smg%$t? z+NVTZYLXoL{Pi}l6f5A??R=&MaeN+2d1|UC5~O_^DdmaaD1!^x=3s{jHV(9Mj|R*9 z2C9_XNa2_a)zKF1ml+!tqqMUZFTO<;C{9nOYxbUQPb*^8S)ay$sH{G)dAMB8RX}iZ zGapvA1&Z#&L3O&MBn>b8ns`@4lx`&7%p2KdDue?POvpATQ!3Ef1}}1YUZ<82(+GyN z3lNh7T%}y4Gbp36VG0fo9#7zOsu3uR0!capaMCy~hX^r(z@s3iilH~!G>|cLc6Od< zhBzj6^+%Lb%xiuXzG;|q_C?^SeO?87I1dre-cUIhQW!zD%h8}v;5V#n$DpXBq*qS~ zIg|j#cI#jdXltoCooo$)%1I_52M3UB>%neU+V#w!CQ%v`ZvX=0w_a!u26ENUHe_{K zZOuXoO&eVqu_WNi@Hzm;Cm_Qt!K7qo`pEDE5oG#to@k2v<{&om&|~g~rjvXhEqg4- zd-B+DyyJPDHFd`Vj8q+P!kVWtgMvvh^)sMV9Sn&5)Ik%Zg@t{GD^k056b3-?^zxFyfSAL_t~{VlB+8fn5Q%66kI`t98OkfjI9zb! z$a=oq^*QUZnk-%qc(X#lK28;D3#Hn?xb#6)UcWPe1dzQ*0aWGhKzjiQ*r?nEU_^=6 zfqxlE>;j;QFeTbpVZ)X>KnH=18HfrKJD@{`TE)iJo?zzvZAnqs=L6nCfwHOw}f)2YJ zq_l@Jvlzt%rlE?mS2P47$RSQ9wId!;uH$85wW#@0kIy@SP^GnT5{ zZ5NARTlwf!g<0e1`t#w4y4BYR*hq#WdFBE!H2Iar7g7!pD_xSxX+9&n7M$eOsoDNf>Xx;4A})BEDQ&n(7=+`K&ksc(gl#aGG1Trx{U+C zMke>wA2d)c{>B(?ppsiP;-!(BheT@zUVEp1uo#5Rosb3siAwfHfmpZ6f8!I0CrDAU z+K%`p)kY>cHdlr5@|TYOJW&gi9}1$nmE~x6!&Bkls2#wqrbZ?aCj~I*fSeWlK_#pk zvDyZp)<{5A5AcrAN@D;%rpFBge_+C}f&tjg4e%--Oiv@VS*ET(ibPCyudrA};4^?p za$qZC*3qyYr=`Cq?DA}l|wBZQiMTI@v#O>oyTE&YHbkl zx6ur7G(J`|CdJaRfD4!LSF86UN~aNVacZZBM7L)`$ncPDTQwx^i?U{VGzW(#DU55R z#wD1muaO8we?9SRC+WDCcof%OLA2#P0=GVkpG ziO6`9B6A->RDw??=I8SXk@X^?Kl5KVFqEC8hBz*dC9`R7zAJS7&Kr8NaTJC3QvR~H zWPlxm(cm-QZJGDR*k!u-bB1Mhor)JjCtX7V<@aZX<4N|RL~%`6>;X7j^qJ~nLA>)W|(w)q=W@Y@LG$zA) z_DD%+rI9%c?&-}fAqV&^Thvu`iM#pULq}3ga@1suuF>S)McA zHxe??*DtNxHZ}j=EF_cTk(&AWTto@sRGPqRl!yXsld}VA63@%>8;2R&PuZGt^w{tb zKU`9>9^RbD^KA9T#+RmMKw%)LWZy?bp}bJylEbS{Q=>T)aA?yl%yTZ`IhY$ z!#%0l_WWhI()88)j5BpM-OBE{HHDl!>_*f$M!B5#haY;;#0KUljO5KPTu9n_WPyol zxtw7+bmRud+_>Csq{@*coO?w{r;1G4gFr59hpr_NuDk{M`ghc|We3H&B9-3_CpR01 zcpI4opV-uwlm50V8}!6e!rQWcM|J$eSp9@$Dtdf{rqQ!;XtSGFS6CyXLt)2qeNPLY z6ExPH(1F=jZ1P>AjByP{scl4LZ(X~lZuSXQ;=l?{^=4g=E-59eRs^!v*v7H^p94C} zsX?q~b=HBaj+G-RQ7DkI^VaabI4Z?dR2mOXChPZ!>gFHfM&?->v*bTuMlYftS^v&r zhSpVP)9><`lzgHWA7U+pt4|~RE3cp?cg->tbgmk+%q;R+l8k&#N4hUZ_o5Jg{1|?5 z?au1?7{QOulO9U^R!Hwnsi!41Y|eRr42?{AY2~uaX&NeK6G%;OcC949ESw{wxgo30 zESnqZg zT77*$>Ia+fVAlC8Lei!Z#|*EN!?6*P^^%7j2I7li(K9p4gsh?=q~0g1@m^>Ry4-c47B zv~|tKyrcLO!qAXeKaDTpwL!=t>ZyRoUWWGC-l8BNAfQH7O-*g~psK3MaT5Z`cQO5t z8m9Xt`vKbxW{GpsuUZc#2HSGWjmVgCHm$D-c8@+7N1~NE->6ZmnV8-*Z?qtUU1h3I zU&MJwOp5F=|9<~VG1+20(7Qc%Y4HQ7R=di)V49#JBDc_Uhkv^m4!!(lrC{`DmRA3= z_|U{j>U0wW6Xz9$Mr;X}VmeAi?7h*s$;3lN7mUHcPb{dT?#9rv-1@kf>@Fm$BN0@? zSf*rN$Gp`w$o|gE>>{m~lV8|V!0~T%$MGkbNC2+cI+3M(-|>k8O+RDTIlq&8@+6Wq#ta1}OS~QVwZ!r$n%iDW=*0kY# zwZ;a>;aSz&aB*>AJb|cOEm#V!c;jhgB&=~Oyv;^cak<4GJy{Vg6S3%Gcab2$@yGW% zwW<24F-_C)r(O?PUMcBIegB1xRT258Q=~wn>y&yYpHPwQlIi~J&#m=LD7&Lx->jGt z*Nx0>^RW}ZauzkoQ+CZ#loET+C94)*`A7C^WN6zpWPBUO(yde^r>$P?){AUh5BpXj zNG2F~5-)Xa6w4(CO+S?ZBL$a@y}ucy?xDrtwh%KdeQYW+leksk|BLZl@Lbf!s<}+mip)>Y?EE5@c*W4jw0LcKs1l~OOG7rj9!6<&^;!^}vvBzv zh02OvOv+kXxeD&W;DVLuQ_+SJ*V}KA?e*_udsONh7Hy+v1j&ghBv10N&siHfjmhNd z8p|&J3TleI+wgj0HT3IXb`|YNMx1KBe?9$1Qh%kTv)4JKN0lW2Z?L^ll?6vgGKZ_* zKv8Nq=oHb75Voyj?D!E##%vSnASOaUe6kAPIJ0}S%A@CvZED8k6k1|T`4tnj~r5XnbeA39>r*4Thd@n`m?@aJ0@6%}($-vlg)>*09 zHUT7L5nY=T)vtFFU65X*%g17~o&M>c+H|SK)g}|nl9LNz`qtJ2v3&`Yc#%*M@>uQ8g1Xs!j>O4YOXbdHK%H$n?0sx5vQB;CJy3 zOE~VA51^SmiObm(#YW*F#mb^bei@E#g&CW-Qs)I!!$;+{@_T)y$aLk0v6k%=Di;z# zw)M+}4+a}>wch{!yq5X8Sv9RTEIg_aO6R&0PGlkolQpejkJXJ*pb=LHT53`7p3Jv3 zkO;BOiM?zicGED!aT28?fGVzCR~pJJ?NCeLiR3fXYsbtcUcs6IaS4eH;A%!jODs$` zK-zG_GqDTRYVlYZ)}h5Mpnft`dA;bk>Sc zZbw2my|W)o_21n-5B^U#AQ}NO%;-wjyf?AL^4af?DM-?`BhoeDmza5%O!o?ICMgaZ zCUiK`5Fp$S!&+w zW;YcLY$Rm_9yW|b<%+vq)UoLoDTgOkh}mhBs_2F}FPm&>M%@xP74rAVd`54@Vn=;! z+fXO-MLxTw=P#q%D$)jv=}*MUvxlgh9%9MJC`Z;8qOb0ZD&)u>X5^XKUFDG07QUhC zGu{gASpnhb5O_n6r~k(*N!6=e?2%<)c`Y_ue;6c@dU7WH+z1!^U?I!&>+_G~#7cOq zo$>I1$}$?g62~bX?TVD(^3j>1QxOkC@f!J4?-+dOsnogP@@AxZw#sw*oRy|lg}j;k z>d+W8Yc~J8s{&7ue1k zn`la3a8=kfzZd2NezEed}UB+H}c;}wfF35wx1?=NU@C^&XV?`5AIGB*3 zAa*qJtRON)+0+tEIa{Vy-PS4FmP~-MY2+9FTS21P2FCo1c=eN6VZX=w1|*REk?>1G zxgt51C(4_MzcHR=r_H7_Y0$7Gm8GA)`_)m8HNqk({Q7N-$(JL41r>vBS095VqXHS} z9f@cXinEWiEN#OAg?Ago9qPA$02$DO5Awz%R=5B@cFU@b&PUP;qGl)Bv2WZL# zTP$!}qC9z%@GKd6%Pi}RR=)mk|1{hI~tNitD`f>+16d+X(sZ^hl4lKh!Te))OyYERDDZs%+-d+ zcNCh}Pa`lm+W-;thYiVN*u#<>K%NMf)%=y`+FL*`mI2^$ut2DYgv1!&P#{)EhMLx_ zlfpzBV*ryN2z2r{Lz4bpe;LnS%A|PU_=p9^<=e|0^gMIFLED|zB3;PUVoo6B@x{$K zKW7NQI?I8aQjIRqh!_F?0Iw4G3d~Ly5V9+CIMj>ha0t~pdVORM=RUU)ClSeX%h7#Sp&9%w1IAMd(li*zkbzX==?ZZ)&ZB3| zqD1_ALznMIz`&coy!+*PP>v0Y4%)ha;z6jx3Lu__tc9o%)&W0gejkaXXRn|dd#iA5 zekYszV@|o$Wc1UiyiU1>6%O{_o{ylAq^P3!!OoJ>5jb~I*lvp><>uB%bU9JZ-R8R7 zVsHUWj^K(upk4jOzF3({YOx1`(0A>t@K?z&l2Gm<)T!Kj>Gh;eSowR|$970SeZqs; z;yH3$qr^FIggV+_FYqpnmp<1-v4JeScp~};Vjg4GaGud?lIXCCgNz6MNN|;!b8@3;jLwz2)(5A? zjeuL@IeI;#k$>Ho-sX`hH%`7&`RY!!U+z<=CG%$EsUYPmormlVC;&q$4pKeCAQ&DV z7ncY0tUlFX)Y&^Y&;?LJj>xr_xoxp^Ap;agz$&x$0@@V{cu11u6dF;Y&UHZ$nXKAVDfRH3mfRyLwf7k|bPgv*? zj-X4yhG1!aqJy{(pPiqVS8pGzbiaJ|jAsyB%bAU`2~~b6;@R*sG};~P)9)y8eB@&K5JKFfn`1b6W5wAS%Q9Cvb9+4dI zr122(F2I8c1sY30T?MM)c5UOo-5yLN2?RdY2Jt;GGfU&8_HSgo5lB4+`^L}$O!Lda z#8?8tu(AV0pO-;+%$Q^n_^RZPldM?%z|x}xx_6*_0JfYhuR+N#1`&~DGT2L@q8^bd zV=^!C&9~-Xq+NQnGwsX{WyQnt6r+c=P3wm7ge$fbD4gT=(NZ%m6^Vy!DW;bAXfQ76<+#kIH*d+HR1jvFjAt9k7 z&@?@s&<-A@b^_!nM?gQXtF_ew9uDFdJ~1&t^GL-oUQyXXx*mj>2ubrq1ghC}ZPj${ zT;L^dYc%PhWOY9l;ZWgZyoM2hTJ@8fX@U4YQRyNvmvj$=bwb1zwbxd-BfEGFcw_N{?kv znIz<8J=N3wb!M~vnCc@;n||v4egIJ!DA&L&f(4dZul2khm%0{kn{?%m)7W>aiCdU$ zV-#lp>73E2yKoezV;bhmP}p-8Kmg8d2q^wz8C*y^&~Q*GzI9T!VMic8Xr5-wX%k15 z(m9GhV5k`QZC#o2SAANSFa2(r=-r)@)62Jbk`8K}=`xV`R_VLMSB?K&(yB2+@UKlz zTtPU#x9j^1bI$!GyqKpX8(u^VccHpKyee<#79DDeA-HVqKc3}s2uQ?k^PMqU>>p)des>mkq92oFA(c8A3GI0BrHcY5k~o7^ z|43|RMl~eMG&UA@wWk^^c3>|sfu0peN^OF3!_ILJwhh5&_8Z@OnU0hPL@p`3x%rMR zni-MQN@^@ZemXLHB^dcX*(*H5YAsiEqjjZQ&p-EVDy?ccu*8itG&9S4zx`Ol{abNv ziL{d#1kJ-YWQd}|y4+S2{f6J%BaaW?R<1;H~9 zO0WMMEu?en!s6QQI}0-J7k`H~NR6k_8mG8qEiynyO8>Z0_nXNu5dXwT`dzfS46U6w z2pOxdlf4P?S6%xkgC6vg)F5u02Zu@cCX^3UCx$D{@gZZYWsF>>8`|#R2!nSUB#Un| z$6``a4GdSy)6kAbQmf*6Qpwm1yuiW|`V9}MA47=eQl2}?@lC*OQ5^otmCbAqt93YT zdVsc8F>N{w#2naU9$ zjP^!p4Q@ogEw7pDg0yi@%btA>@|ureJ{Lv-9}j|yz|A#`^9vM&;yRN zE=i+=P3SBUW5IeyKA5+2=DVA>aR_fbiyI0UEuNaB;gyb9=*r5+g>P)2FeG$Pe}waN zhL7&z*_Vg3|7+vwm`Jt=J-q{@?8Rh|h(8slN*O^a#`2Ny{v<%;?%(vB+(&KpLc5fke5P;>4SOX=E023heA* zUfK}mQkZ{UfZI3!rn-|;CtZY3%Or@EnFy zm71oJHO$mU&}=*v>MXvm=wKw1(r-;NgAz?Gd--pRafQE1r2^Is7_=X7*^mp;0$GlX zey+>BP8))!Du(xMSStrS5onV`q96GQ+{k(~WLU(MggpAz? z4WvKId}>vyHsnk$%kJh+aU z&4yPt^5)K`bMMZl$*W2`Jd7$ck%$dHy#tBC2WSqTvP;dR`8S$3E+ge* zJ{)~z=2D12mc`Py{>7$DtSs}E(a{#<*?P;llecuO7_B0zSF6V;w2r>%C3g2(*D4r0 z2u&;6uS?3-kM@85(fnD8dzas3@q(m%dWola5aacDlrVYb-Y4-^9}aRPhR$TtBCke_uz-Ot z3VRuZKoFddYdA2+FUfa>5R|3xJ3v@r%H!aZGz;rgee1J8e6}C-5c7ZU=7|MpAqhZ- z)ZhwgYK;J5<#_C_HHDP%Y_2uFD{ogmd>2iF#CfDXzI%*3Yzzo*62-{6C76q6WPv9R ziTnQVOJ&%XP?JUCK!Q#Z8AxCA;5|6z5$JD{V)dRR!K78y{sA~076n1mV1o{88Y=8- z$^8Qmk6HLPgAmYVo?Y9M@SsGKI{TDcrNA^c@Sgi&y?77%Hqjh5c~7w&5T71QD zA2#Q>JNmhNl0c9kYCcJvz7#)jXvHEP~?)bX$82L=5eG#T8- zs*=!N!G>0yIi6F#pk21gzDxU2ln@*%nh>~agJ^FXYz&L;LbeGlx}ohs%Ln9;zkhu0 zmQDRL&<#jA&Dv4tLA!sZ_zxCE@Tu&&_uML0 zq$MGAzrbf<>jToK0}Co%`19^c4WxYyM+DpAF80L3!UH-E&@&QQ_HG9C0zLEaP4f3I zSfGES4_BigNoe?t2g&);lZ&1mmAw6m8lvWX-$I%PuokNIW^~aW9(`SUBb(I%&q?tV z5_KQ?bEn~ljj199$k8YVU8{TW z5n)eK`wmtgeYsK$64)qTa$8WKwi;b^oR%S&}cyDDNTXk5z#&GYph`Vb& z8O+o)$=@wlQ9Ek9<13Wd2(u5g;Ox1(+B#?3`BMnuLF34X3ckUw!T0>R+X@mb_@I4N zDiIO{+}q~!DRHCH$Z74}keY}0i;fj=8AiIEwGzDmMsPvqFIe3= zcvIpEBVH^=TC9A&aBFP%X0YZrJqF~5z=5}xtkCX~coNpUP`EbYQ6lsFXzlb6C!UIZ z+7eGTT!xEov;1c#y$l^h{ht+vB#oD3(|?J_%%c}&%Q{+7EvsyPxWp5@xSDziol6O* z41K9T*#RX^m!3*PV20*Beuc~tmgr5>Wd-kj5Fe1YzsS+f^ z+62B51ve=B5U2(wr9nxGLeDsc9SstgQDj&c1NK?#oe`wa0fO}JD8T5`Tt|uC8PdD& z*#0e3*gulO?$!O+lKTBW?6pgz5&L;<8|9zS9uZ#W4&>$8N{Wn zhr&fs0_F~mS%f2F5~w1bb+2y_wga6MGk!j~&qor=$|PXBKZiibu4psvP7rW%{g`G& zm?c^+YRVh6py+xmG00BaH%eZB490rKtv>wjC6FW*+*gGbVfd>SAv!8)0pnN{?tfZA zq>k;T_HJQ9jzmo9gA$0{Is|T53`;;32P|)rTZj9p8bFz}J~^U@w^OrD`b46jT1(WS z@-u(r`+Em6o!lXVSUb7s;v0JGx|flTpO|g2L1673?KEJ7^Tf-M1+pe;{Qra#%?EY- zR$PtoUS47cplh_>09hoMX*}ydFrARsmW?zi4Nl8{mIjhkSxh+6o*AVt-z+E4o48f9 z=z}bhXL5GUtD8xxpJA8uW^qW@VxxUhr&l8r;kjFw*&hGNOR%>8WicRY;>I1}^#>Gf z8KQJXiUyNCM)(SmhHm@53SPTx*;k4$j+~oy@+q#lC%0kPz9xt1FTKO>L|HcHne&Fw zWjQ7^{+HGq;nZ7ZeVl%s?&YA^U#L~8zw7U-QncazEh=5VO^wJQIF?veUE9A$m0KZ1 zJTyn{=S4US0478GbziD!sIr`$LIy4aPGW8>wab}afdc(Sx#b@;{uyvT5&yrJIlWOf zGOCh}GN&xxrGwE%sI~I9r7}{B3^7xXSBOjh6vpRtD6xVm-wBw9NvK&ifBo3qyp*Ws zwdQy)JqCebAPVz;l;fRz{FNU2$><-+)MKW@iaKs}BR88U!S(Ncr%@%hVBH}8w}{d7 z-n|=h?GRS@mN;BM?T_&}uX=0AHFZbTGLz4v5@VD7)3qS&ScoTu3Z0!XmfEYF_{x>H zy->W&>Quy{CGaE(H<7!IT4>(3_YvH6MH36zIams#$7fVEtolI&HtiDcgQuS%Jca!C zhaoJ|d*_pFzf(2UI~v>;9hR$M?Dd?c=Y1{gP(sF}q{d(-8iz@-JJ_`PfN)w`m+JuC z&^6S!620SsHvDhW@GDFTTJ@E(@96}_Tc1p5VyN(yi4)lZ8x@i7#;Y# zg!#XDfTb8o5WpRZ?*6h0ZX4|m$|t7sQyI>}n9(dI@0L&I8_vyGlqFN- zERZCj`<(=?Ih++Oa-KL(=&>8fcC-B>OtY0k4&(G3-6Rn`J|gb&>ncvM-JJ1dP||d~ z=>!MM^Mk;NEM9t$oI)E*d~t6DnnGeF14_2&wy*M^BMWzhadyfFqd|CBZf}ME|7K5@&u={KjPYA!!7eYxf}@{nVRs||ic1)Ek`z#v}-N2Bx+Lekj?U%JC&q!RHPzg`+n;~e&B3j=TD_qB%8MJ@6##|X)FaA;0-Iy2K(xM&zT5jRT zKGU9=z$&!wD_U-UlaD(rv!~T5S7!Hpi2dMzl=R*1fDD55hGB~MSj%n5`GVrb@`{GO zO;DSrde$NfM@n{U$7W>h1D%a;SHGdIJVr~4)>W&=y-N5Q`*k63*6@QY_Z=Rfz|y_$ z*mPVBa-jC64%O)$%?-X*LgTtkro!6lbGT?_FJrT<(4%n{zSTZ&G*x~bt6MnqLeI!& z_lW@iRocgiH^Kl1Qm^>j>cPq^t=MA&C*9)xKKOysfmU!_!ai#cFS)q~`u|dowHM|w ziHg-wY|vdF^~EkTXnrrqq8e31vMM`H=U$s_RS=%nKP_}k5fW!FJ09F*ENT-<%*$HV zjbTXsnCm9L_B-=7)sBHa!fJxDGpDfPCUdom&utc)dj6}IsVvcdfbu}z$km_aj>*co zr3`X0m^A2SbP&#emlbf{Yi1r9T=-vXCr=fvAgNSorb1oU*l-BwgJb!2#)5b2g>H{^ z@)qYVL}ZF@!q5EXE(iUQPa9G^JBVK2;&98@yBF{0TxuVcW-gVzrI0`gv+bhH)-nnW z-9Z`j+H*5%lJ^JD+kYT zOM>K9=#eIhJE7;lQx@VLlyfD<&b&M}gOi~ORm#M|`%XRjWWx-3sDCVPphc6(pR!_e zG~Ro3Kcw`@2Dfp^8-=`4BCLy1D)zK-=(UuFP1K(%N^ZrmjH?*-u$N)`U+OEY8&+Ap zz|!_BGUf6K0q0#Am3DmOL;JlO_<_`aDPrK^IUJ8F>N3MJa4QFYYtD{cud6HU z{Pzhtt6%Z#^x9I>W!*M^_6}I>iFF8!N9XG>Vaj65hQN4PzigUuR9)$J)#zR& ztgu|r6qjQ})+7FUW*(2~v?ec8OdwK3xV^g$X~sI;J5wx_Dpbx^CetJ|yEM7m^ToE~ z4Oz@(?y0Ux*}=P&H15Kd2dbj_$0VToH%lJ;(A&blpHtL*hy2*mq4pNRRPiwy88V62JHC1?m_iCJUR6KK-FkRnb{?)t}BCOM&YQqBF3m)91k;N@0HnT{|69|*Gu^pi>!*T zEb|gCG^I8)dCv0I5)ZL{Ga_vGyvOAo}e z`GN_lFfL|Q;%SD91*nYxbow?# z`7ODAdku?29B+E(2<0=<(M!Kpov$@uDN=MT7xjb|R5`uHpc_`?gB2Rx_(4krqNDSB zwGZ4VZ`KIvrKhu7%|f8A>tAX%FD!7G2gEnb!q=yw{jQI3NpAJk#8cxP4Lbg^4JVr< zCMnaE8o?(wzB6%xE>$b{VXdd;6OGS29!qgDHGUR8As2m`%siSw-Av*5W%h+9(oaL0 zo*LlHWstmc%l;XITRsxI8Y)%)qd`!z=gN&9=Tg+`D=xEZ&W=X0pd_NbUmtZ7$P<7Z zYqYyNL_kPL_7SYVvq7VVqse^UM?j}u`bm^!*dVRUrll?A2SZB+6I4phosRyjkhZfX@WPHH zud4DQ2%tsl4kB4hW~@E0J}WJKPSseh{bXQYJ`m68|7B z_R078iTtO~wj8UIAd~85M)^Odl4C}N_~Dec>$COq z8-2BcqYnwb9IXf`=j_aD4Q+=oSZ}*~8FYLAzPs{aDXbjLnql6Z2QzTmT?j_T z>~38Rl;}GC{0d-riQ`yapugX2iGjtM7M(nOVX)RC&%~)o#ir&e$UK(@Aa?)m>+Vqh zr1V;=ut74VKYtF(8BKy4W}cBB}h9=cIVUq z!+rx(zgJ{ktc&n|p?X}~jK3Yubu0KSqi34E#g;C48>h?wVolUPiPGo-_7IN#MXA9( zN*-`})ROD3lbwb@!C7aw8+CDEZS>&dbz+cfu|`|D9Vlk-K`$7XY&_?!_BPs8C)xe; zz-hJ53NL@t>~vcX&nCSf-loztikM(Xb^0d9tj{@H-E399khynkMzwy#an7=k86(c9 z^}VU>+LOs#ToX|gG@a$x;>^gvhw)Co5Zj+_<~Z~tbi(Ib?bb2-U3;%~UOCv0 zHV_48CH?9mA|LSomp%ZobDh1I93O=8uzI2)Bgw)6qg&Qd>ovy9Ooo{E(OMLVtH?seEit zWXvUt;l*+Y>ZZ5hl?@jYx8mB;_-&pfWej%QwuVr6`3mQ#!lZR>u;A(D@}@an?mF7{ z-{it$1R@vmF{mP;@Z}5hg<(#mjcdt98Hi(7afPEGiQ7<~Edm2%${P)#`h7xNtN!om z;~qqB|899OaqVO(2utJoWAQ0Rfjo|zyr!^c&X%ou(T%Kzo8FjLItm7EW$Nm;8fiuD zlPbbG3;lPJ=FkYThKM$;Y`4n{sl@9QNyp=Lq?eW*{pGbMl-Hvu5oZfe<54W)+HB>b zC2y39a}$SKc+3KWps6O&yA<=54TDBy%Z-^PMugH$J%7VxOGhvc-CIaMfLyIq*tgxW znJJZBQJJmKrx%;I3LCt~=s6p|KGq*z{a-u(<%e|qRTHjDhs$H{J1GP`eiJAvYa9C6 z!dG&Eq4B$mO^K6279MWjzcQ{Sd&j%1LiumgGq2Tlt(TS8N$-3YH)WB}afVY-z&$Lf zyME7N7uo*dIPlJ&7ffGn)N=waFz}OvWUC9)C3wD8H&5+R@f%K7WlR=p)d|_Pl=-~)pE+fqRhgF?*tXg&+`ler#f9{HYvsLs_Jkgki`Q1a z|M6tcu3ro7v!1X1o_R>rH)M&8xBcvr_P8}GzB_cCy0Lc42SeXC&y7-bf0p0<7+=3W zGU3wO*JrD~Up%>M(NXc@xZ_6CUo7h=-Bj{uYy63L<^2aNWZzx*=iuJQ8)M~OeG%SQ z;yE1fcYph&t$)v2uu4Z~=Uh1=ubSyzoao&yVfxYi{L8n0%xCdO@0v5ar|zPcw$6oh zydSypj_1!>bnVsTWrxH(je0Je{l772f3=CnSFhS1kAdrEj>y}t4v(90y9C%(o45VN z!spLU-FWu5W@;YieC7SSmd{vu;=<}^&S;g)2DjGrA%4d;g}q7$DyZ4@X;$@HhF!L> z(TD|`d7Mk%{$2g$-1GX2(kJ^fj~!{7{?V6fhk2a--nonS=ftj>aIQ`F7{gu3uS@3r zUElkw_@4b1`^V2`y*A!5yG6I}_RGZ+W~jW~7qM#3Tgg)u+L3|F)NTO>Rh*BNz16Z< zdTER6tV>Q05)9&&pP0V=b^iM`p?~(8n{xQ{%(8gAJZj>JsReE?h<1_Z~_E+`-Ucj;W|ADb&HLqkhV=YGF>e$IcBJ>GNs z<4Y}S|Ht_~-})jw^%BoUvmYhr&YtfFS2hQ3{HZQrd{>vdL9amDy$e!hw6K8ZrWqP0 zKr18xA<&K)AaH7e^b;IBpkvV-oS>bBK%jUK(z#Ypfes484Y_ax+`y68z3w#+V?&25 yP7Qy+X|Ca7=2}T6hUQJs6o_dcRa}MJkQeOLx1(JO*ZY@){N?HD=d#Wzp$P#0o`)>} literal 0 HcmV?d00001 diff --git a/docs/src/examples/overview.md b/docs/src/examples/overview.md index 0f4b013c..611a44e0 100644 --- a/docs/src/examples/overview.md +++ b/docs/src/examples/overview.md @@ -1,11 +1,11 @@ # Examples - Overview -This section discusses the included examples of the FMIFlux.jl library. So you can execute them on your machine and get detailed information about all of the steps. +This section discusses the included examples of the FMIFlux.jl library. You can execute them on your machine and get detailed information about all of the steps. If you require further information about the function calls, see [library functions](https://thummeto.github.io/FMIFlux.jl/dev/library/) section. For more information related to the setup and simulation of an FMU see [FMI.jl library](https://thummeto.github.io/FMI.jl/dev/). -The examples are primarily intended for users who work in the field of first principle and/or data driven modeling and are further interested in hybrid model building. -The examples show how to combine FMUs with machine learning and illustrates the advantages of this approach. +The examples are intended for users who work in the field of first principle and/or data driven modeling and are further interested in hybrid model building. +The examples show how to combine FMUs with machine learning ("NeuralFMU") and illustrates the advantages of this approach. ## Examples - [__Simple CS-NeuralFMU__](https://thummeto.github.io/FMIFlux.jl/dev/examples/simple_hybrid_CS/): Showing how to train a NeuralFMU in Co-Simulation-Mode. @@ -13,5 +13,6 @@ The examples show how to combine FMUs with machine learning and illustrates the - [__Advanced ME-NeuralFMU__](https://thummeto.github.io/FMIFlux.jl/dev/examples/advanced_hybrid_ME/): Advanced training techniques for a ME-NeuralFMU. ## Advanced examples: Demo applications -- [__Modelica Conference 2021__](https://thummeto.github.io/FMIFlux.jl/dev/examples/modelica_conference_2021/): Showing basics on how to train a NeuralFMU (Contribution for the *Modelica Conference 2021*). -- [__Physics-enhanced NeuralODEs in real-world applications__](https://thummeto.github.io/FMIFlux.jl/dev/examples/mdpi_2022/): An example for a NeuralODE in a real world modeling scenario (Contribution in *MDPI Electronics 2022*). +- [__JuliaCon 2023: Using NeuralODEs in real life applications__](https://thummeto.github.io/FMIFlux.jl/dev/examples/juliacon_2023/): An example for a NeuralODE in a real world engineering scenario. +- [__MDPI 2022: Physics-enhanced NeuralODEs in real-world applications__](https://thummeto.github.io/FMIFlux.jl/dev/examples/mdpi_2022/): An example for a NeuralODE in a real world modeling scenario (Contribution in *MDPI Electronics 2022*). +- [__Modelica Conference 2021: NeuralFMUs__](https://thummeto.github.io/FMIFlux.jl/dev/examples/modelica_conference_2021/): Showing basics on how to train a NeuralFMU (Contribution for the *Modelica Conference 2021*). diff --git a/examples/src/.gitignore b/examples/src/.gitignore index 741e8cb1..ae9c2156 100644 --- a/examples/src/.gitignore +++ b/examples/src/.gitignore @@ -1 +1,2 @@ -*.jld2 \ No newline at end of file +params/ +*.png \ No newline at end of file diff --git a/examples/src/juliacon_2023.ipynb b/examples/src/juliacon_2023.ipynb new file mode 100644 index 00000000..43fc8c99 --- /dev/null +++ b/examples/src/juliacon_2023.ipynb @@ -0,0 +1,783 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using NeuralODEs in real life applications\n", + "-----\n", + "Tutorial by Tobias Thummerer | Last edit: 07-21-2023\n", + "\n", + "This workshop was held at the JuliaCon 2023 | 07-25-2023 | MIT (Boston, USA)\n", + "\n", + "Keywords: *#NeuralODE, #NeuralFMU, #PeNODE, #HybridModeling*\n", + "\n", + "## Introduction\n", + "NeuralODEs lead to amazing results in academic examples. But the expectations are often being disappointed as soon as one tries to adapt this concept for real life use cases. Bad convergence behavior, handling of discontinuities and/or instabilities are just some of the stumbling blocks that might pop up during the first steps. During the workshop, we want to show how to integrate real life industrial models in NeuralODEs using FMI and present sophisticated training strategies.\n", + "\n", + "This tutorial can be used in two ways:\n", + "1. As a single script, showing how a NeuralFMU can be setup and trained. Results can be loaded from a precomputed hyperparameter optimization.\n", + "2. As a module (see sections *Optional: Organize as module*) together with the file `juliacon_2023_distributedhyperopt.jl` to perform your own distributed hyperparameter optimization." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## License" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright (c) 2023 Tobias Thummerer, Lars Mikelsons\n", + "# Licensed under the MIT license. \n", + "# See LICENSE (https://github.com/thummeto/FMIFlux.jl/blob/main/LICENSE) file in the project root for details.\n", + "\n", + "# This workshop was held at the JuliaCon2023 @ MIT (Boston)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Optional: Organize as module\n", + "If you want, you can place all code inside of a module named `NODE_Training`, this simplifies hyper parameter optimization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# for hyper parameter optimization, place the code in a `module`\n", + "# just uncomment the following lines (and the one at the very end, too!) \n", + "#module NODE_Training \n", + "#using DistributedHyperOpt\n", + "#using DistributedHyperOpt.Distributed" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Loading the Packages\n", + "Before we start modeling our NeuralODE, we load all required packages. If some packages are still missing, install them by typing `import Pkg; Pkg.add(\"[PKG-NAME]\")`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Loading the required libraries\n", + "using FMIFlux # for NeuralFMUs\n", + "using FMI # import FMUs into Julia \n", + "using FMIZoo # a collection of demo models, including the VLDM\n", + "using FMIFlux.Flux # Machine Learning in Julia\n", + "\n", + "import JLD2 # data format for saving/loading parameters\n", + "\n", + "import Random # for fixing the random seed\n", + "using Plots # plotting results\n", + "\n", + "# A variable indicating the hyperparameter run\n", + "HPRUN = 0 " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Beside the packages, we use another little script that includes some nice plotting functions specialy for this workshop." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# a helper file with some predefined functions to make \"things look nicer\", but are not really relevant to the topic\n", + "include(joinpath(@__DIR__, \"juliacon_2023_helpers.jl\"));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Because notebooks can't handle progress bars, we disable *progress bar printing* - but feel free to enable it if you are using the code outside of a jupyter notebook. The progress bar gives further helpful information, like the estimated remaining computation time for simulation and training." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# disable progress bars in jupyter notebook\n", + "showProgress=false;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Loading FMU & Data\n", + "Before starting with hybrid modeling, we load in the used training data and our FMU of the VLDM. We simulate the FMU, plot the results and compare them to data. \n", + "\n", + "We start by loading in the data (training and validation) used in this tutorial from *FMIZoo.jl* - a container library for different system model FMUs and corresponding data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# sample length for data is 0.1s \n", + "dt = 0.1 \n", + "\n", + "# load data (training and validation) from FMIZoo.jl, gather simulation parameters for FMU\n", + "data = VLDM(:train, dt=dt) \n", + "data_validation = VLDM(:validate, dt=dt)\n", + "\n", + "# start (`tStart`) and stop time (`tStop`) for simulation, saving time points for ODE solver (`tSave`)\n", + "tStart = data.consumption_t[1]\n", + "tStop = data.consumption_t[end]\n", + "tSave = data.consumption_t\n", + "\n", + "# have a look on the FMU parameters (these are the file paths to the characteristic maps, remaining parameters are set to default by the FMU)\n", + "display(data.params)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After that, we load the FMU and have a look on its model meta data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load our FMU of the VLDM (we take it from the FMIZoo.jl, exported with Dymola 2020x)\n", + "fmu = fmiLoad(\"VLDM\", \"Dymola\", \"2020x\"; type=:ME, logLevel=:info) # \"Log everything that might be interesting!\", default is `:warn`\n", + "\n", + "# let's have a look on the model meta data\n", + "fmiInfo(fmu)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Simulating is as easy as calling `fmiSimulate`. Note, that we are putting in the paramter dictionary `data.params` from above. This FMU has many events, these are detected and handled automatically by *FMI.jl*." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# let's run a simulation from `tStart` to `tStop`, use the parameters we just viewed for the simulation run\n", + "resultFMU = fmiSimulate(fmu, (tStart, tStop); parameters=data.params, showProgress=showProgress)\n", + "display(resultFMU)\n", + "\n", + "# Plot the simulation results\n", + "fig = plot(resultFMU) # Plot it, but this is a bit too much, so ...\n", + "fig = plot(resultFMU; stateIndices=6:6) # ... only plot the state #6 (the cumulative consumption) and ...\n", + "fig = plot(resultFMU; stateIndices=6:6, ylabel=\"Cumulative consumption [Ws]\", label=\"FMU\") # ... add some helpful labels!\n", + "\n", + "# further plot the (measurement) data values `consumption_val` and deviation between measurements `consumption_dev`\n", + "plot!(fig, data.cumconsumption_t, data.cumconsumption_val; label=\"Data\", ribbon=data.cumconsumption_dev, fillalpha=0.3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We do another simulation run and record the derivative values, to have a good starting point for scaling the pre- and post-processing layers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# variable we want to manipulate - why we are picking exactly these three is shown a few lines later ;-)\n", + "manipulatedDerVars = [\"der(dynamics.accelerationCalculation.integrator.y)\",\n", + " \"der(dynamics.accelerationCalculation.limIntegrator.y)\",\n", + " \"der(result.integrator.y)\"]\n", + "# alternative: manipulatedDerVars = fmu.modelDescription.derivativeValueReferences[4:6]\n", + "\n", + "# reference simulation to record the derivatives \n", + "resultFMU = fmiSimulate(fmu, (tStart, tStop), parameters=data.params, recordValues=:derivatives, saveat=tSave, showProgress=showProgress) \n", + "manipulatedDerVals = fmiGetSolutionValue(resultFMU, manipulatedDerVars)\n", + "\n", + "# what happens without propper transformation between FMU- and ANN-domain?\n", + "plot(resultFMU.values.t, manipulatedDerVals[1,:][1]; label=\"vehicle velocity [m/s]\");\n", + "plot!(resultFMU.values.t, tanh.(manipulatedDerVals[1,:][1]); label=\"tanh(velocity)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's why we need shift- and scale layers!\n", + "\n", + "Finally, the FMU is unloaded again to release memory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# unload FMU\n", + "fmiUnload(fmu)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. NeuralFMU setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![NeuralFMU](https://github.com/thummeto/FMIFlux.jl/blob/main/docs/src/examples/img/juliacon_2023/neuralfmu_topology.png?raw=true)\n", + "\n", + "Equipped with data and a simulation model, we can setup the NeuralFMU as introduced in the workshop." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# function that builds the considered NeuralFMU on basis of a given FMU (FMI-Version 2.0) `f`\n", + "function build_NFMU(f::FMU2)\n", + " \n", + " # pre- and post-processing\n", + " preProcess = ShiftScale(manipulatedDerVals)\n", + " preProcess.scale[:] *= 0.25 # add some additional \"buffer\"\n", + " postProcess = ScaleShift(preProcess; indices=2:3) \n", + "\n", + " # cache\n", + " cache = CacheLayer()\n", + " cacheRetrieve = CacheRetrieveLayer(cache)\n", + "\n", + " # the gates layer, signals are:\n", + " # (1) acceleration from FMU (gate=1.0 | open)\n", + " # (2) consumption from FMU (gate=1.0 | open)\n", + " # (3) acceleration from ANN (gate=0.0 | closed)\n", + " # (4) consumption from ANN (gate=0.0 | closed)\n", + " # the acelerations [1,3] and consumptions [2,4] are paired\n", + " gates = ScaleSum([1.0, 1.0, 0.0, 0.0], [[1,3], [2,4]]) \n", + "\n", + " # setup the NeuralFMU topology\n", + " net = Chain(x -> f(; x=x), # take `x`, put it into the FMU, retrieve `dx`\n", + " dx -> cache(dx), # cache `dx`\n", + " dx -> dx[4:6], # forward only dx[4, 5, 6]\n", + " preProcess, # pre-process `dx`\n", + " Dense(3, 32, tanh), # Dense Layer 3 -> 32 with `tanh` activation\n", + " Dense(32, 2, tanh), # Dense Layer 32 -> 2 with `tanh` activation \n", + " postProcess, # post process `dx`\n", + " dx -> cacheRetrieve(5:6, dx), # dynamics FMU | dynamics ANN\n", + " gates, # compute resulting dx from ANN + FMU\n", + " dx -> cacheRetrieve(1:4, dx)) # stack together: dx[1,2,3,4] from cache + dx[5,6] from gates\n", + "\n", + " # new NeuralFMU \n", + " neuralFMU = ME_NeuralFMU(f, net, (tStart, tStop); saveat=tSave)\n", + " neuralFMU.modifiedState = false # speed optimization (NeuralFMU state equals FMU state)\n", + " \n", + " return neuralFMU \n", + "end" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's test the NeuralFMU: First, load the FMU und built a NeuralFMU from it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load our FMU (we take one from the FMIZoo.jl, exported with Dymola 2022x)\n", + "fmu = fmiLoad(\"VLDM\", \"Dymola\", \"2020x\"; type=:ME)\n", + "\n", + "# build NeuralFMU\n", + "neuralFMU = build_NFMU(fmu)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, do a simulation for a given start state `x0` from *FMIZoo.jl*." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# get start state vector from data (FMIZoo)\n", + "x0 = FMIZoo.getStateVector(data, tStart)\n", + "\n", + "# simulate and plot the (uninitialized) NeuralFMU\n", + "resultNFMU = neuralFMU(x0, # the start state to solve the ODE\n", + " (tStart, tStop); # the simulation range\n", + " parameters=data.params, # the parameters for the VLDM\n", + " showProgress=showProgress) # show progress (or not)\n", + "\n", + "display(resultNFMU) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's have a look on the cumulative consumption plot ..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# plot the NeuralFMU, original FMU and data (cumulative consumption)\n", + "fig = plot(resultNFMU; stateIndices=6:6, stateEvents=false, timeEvents=false, label=\"NeuralFMU (untrained)\", ylabel=\"cumulative consumption [Ws]\")\n", + "plot!(fig, resultFMU; stateIndices=6:6, values=false, stateEvents=false, timeEvents=false, label=\"FMU\")\n", + "plot!(fig, data.cumconsumption_t, data.cumconsumption_val, label=\"Data\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, unload the FMU and invalidate the NeuralFMU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# unload FMU / invalidate NeuralFMU\n", + "fmiUnload(fmu)\n", + "neuralFMU = nothing" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Training the NeuralFMU" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An untrained NeuralFMU is not that impressive - so let's train it a bit. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# prepare training data \n", + "train_t = data.consumption_t \n", + "\n", + "# data is as \"array of arrays\" required (multidimensional data)\n", + "train_data = collect([d] for d in data.cumconsumption_val);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, the loss function is defined. The loss is computed on basis of a given `solution` and `data`. Dependent on the hyperparameter `LOSS`, either `:MAE` or `:MSE` is used to compute the loss. The hyperparameter `LASTWEIGHT` determines how much the last solution point is weight against the remaining solution points. For example a value of $0.3$ determines that the last point of the solution contributes $30\\%$ to the loss, whereas all remaining solution points contribute $70\\%$ in total." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "function _lossFct(solution::FMU2Solution, data::VLDM_Data, LOSS::Symbol, LASTWEIGHT::Real)\n", + "\n", + " if !solution.success\n", + " @warn \"The solution process was not successfull, try to compute loss anyway.\"\n", + " end\n", + "\n", + " # determine the indices in the data array (sampled with 10Hz)\n", + " ts = 1+round(Int, solution.states.t[1]/dt)\n", + " te = 1+round(Int, solution.states.t[end]/dt)\n", + " \n", + " nfmu_cumconsumption = fmiGetSolutionState(solution, 6; isIndex=true)\n", + " cumconsumption = data.cumconsumption_val[ts:te]\n", + " cumconsumption_dev = data.cumconsumption_dev[ts:te]\n", + "\n", + " Δcumconsumption = 0.0\n", + " if LOSS == :MAE\n", + " Δcumconsumption = FMIFlux.Losses.mae_last_element_rel_dev(nfmu_cumconsumption, cumconsumption, cumconsumption_dev, LASTWEIGHT)\n", + " elseif LOSS == :MSE\n", + " Δcumconsumption = FMIFlux.Losses.mse_last_element_rel_dev(nfmu_cumconsumption, cumconsumption, cumconsumption_dev, LASTWEIGHT)\n", + " else\n", + " @assert false, \"Unknown LOSS: `$(LOSS)`\"\n", + " end\n", + " \n", + " return Δcumconsumption \n", + "end" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, the function `train!` is defined, that triggers a new training run for a given set of hyperparameters `hyper_params`, a training ressource `ressource` and the current training index `ind`. The following hyperparameters are used:\n", + "- `TRAINDUR` equals the given `ressource` and specifies the training duration (meassured on data) in seconds. Hyperparameter optimizers like *Hyperband* use different ressources during searching for a minimum.\n", + "- `ETA` the update rate $\\eta$ of the *Adam* optimizer.\n", + "- `BETA1` the first momentum coefficient $\\beta_1$ of the *Adam* optimizer.\n", + "- `BETA2` the second momentum coefficient $\\beta_2$ of the *Adam* optimizer. \n", + "- `BATCHDUR` the duration of a single batch element (length) in seconds.\n", + "- `LASTWEIGHT` a weighting factor between the last solution point and all remaining solution points. \n", + "- `SCHEDULER` an identifier for the batch scheduler, can be `:Sequential`, `:Random` or `:LossAccumulation`.\n", + "- `LOSS` an identifier for the loss function to use, `:MAE` or `:MSE`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ressource = training time horizon (duration of data seen)\n", + "function train!(hyper_params, ressource, ind)\n", + "\n", + " # make the runs determinisitic by fixing the random seed\n", + " Random.seed!(1234)\n", + "\n", + " # training duration (in seconds) equals the given ressource\n", + " TRAINDUR = ressource\n", + "\n", + " # unpack the hyperparemters\n", + " ETA, BETA1, BETA2, BATCHDUR, LASTWEIGHT, SCHEDULER, LOSS = hyper_params\n", + "\n", + " # compute the number of training steps TRAINDUR / BATCHDUR, but do at least one step\n", + " steps = max(round(Int, TRAINDUR/BATCHDUR), 1) \n", + "\n", + " # print a bit of info\n", + " @info \"--------------\\nStarting run $(ind) with parameters: $(hyper_params) and ressource $(ressource) doing $(steps) step(s).\\n--------------------\"\n", + "\n", + " # load our FMU (we take one from the FMIZoo.jl, exported with Dymola 2020x)\n", + " fmu = fmiLoad(\"VLDM\", \"Dymola\", \"2020x\"; type=:ME) \n", + "\n", + " # built the NeuralFMU on basis of the loaded FMU `fmu`\n", + " neuralFMU = build_NFMU(fmu)\n", + "\n", + " # switch to a more efficient execution configuration, allocate only a single FMU instance, see:\n", + " # https://thummeto.github.io/FMI.jl/dev/features/#Execution-Configuration\n", + " fmu.executionConfig = FMI.FMIImport.FMU2_EXECUTION_CONFIGURATION_NOTHING\n", + " c, _ = FMIFlux.prepareSolveFMU(neuralFMU.fmu, nothing, neuralFMU.fmu.type, true, false, false, false, true, data.params; x0=x0)\n", + "\n", + " # batch the data (time, targets), train only on model output index 6, plot batch elements\n", + " batch = batchDataSolution(neuralFMU, # our NeuralFMU model\n", + " t -> FMIZoo.getStateVector(data, t), # a function returning a start state for a given time point `t`, to determine start states for batch elements\n", + " train_t, # data time points\n", + " train_data; # data cumulative consumption \n", + " batchDuration=BATCHDUR, # duration of one batch element\n", + " indicesModel=6:6, # model indices to train on (6 equals the state `cumulative consumption`)\n", + " plot=false, # don't show intermediate plots (try this outside of Jupyter)\n", + " parameters=data.params, # use the parameters (map file paths) from *FMIZoo.jl*\n", + " showProgress=showProgress) # show or don't show progess bar, as specified at the very beginning\n", + "\n", + " # limit the maximum number of solver steps to 1000 * BATCHDUR (longer batch elements get more steps)\n", + " # this allows the NeuralFMU to do 10x more steps (average) than the original FMU, but more should not be tolerated (to stiff system)\n", + " solverKwargsTrain = Dict{Symbol, Any}(:maxiters => round(Int, 1000*BATCHDUR)) \n", + " \n", + " # a smaller dispatch for our custom loss function, only taking the solution object\n", + " lossFct = (solution::FMU2Solution) -> _lossFct(solution, data, LOSS, LASTWEIGHT)\n", + "\n", + " # selecting a scheduler for training\n", + " scheduler = nothing\n", + " if SCHEDULER == :Random\n", + " # a scheduler that picks a random batch element\n", + " scheduler = RandomScheduler(neuralFMU, batch; applyStep=1, plotStep=0)\n", + " elseif SCHEDULER == :Sequential\n", + " # a scheduler that picks one batch element after another (in chronological order)\n", + " scheduler = SequentialScheduler(neuralFMU, batch; applyStep=1, plotStep=0)\n", + " elseif SCHEDULER == :LossAccumulation\n", + " # a scheduler that picks the element with largest accumulated loss:\n", + " # - after every training step, the accumulated loss for every batch element is increased by the current loss value \n", + " # - when picking a batch element, the accumulated loss is reset to zero\n", + " # - this promotes selecting elements with larger losses more often, but also prevents starving of elements with small losses\n", + " scheduler = LossAccumulationScheduler(neuralFMU, batch, lossFct; applyStep=1, plotStep=0, updateStep=1)\n", + " else \n", + " @error \"Unknown SCHEDULER: ´$(SCHEDULER)´.\"\n", + " return nothing\n", + " end\n", + "\n", + " # loss for training, do a simulation run on a batch element taken from the scheduler\n", + " loss = p -> FMIFlux.Losses.loss(neuralFMU, # the NeuralFMU to simulate\n", + " batch; # the batch to take an element from\n", + " p=p, # the NeuralFMU training parameters (given as input)\n", + " parameters=data.params, # the FMU paraemters\n", + " lossFct=lossFct, # our custom loss function\n", + " batchIndex=scheduler.elementIndex, # the index of the batch element to take, determined by the choosen scheduler\n", + " logLoss=true, # log losses after every evaluation\n", + " showProgress=showProgress, # sho progress bar (or don't)\n", + " solverKwargsTrain...) # the solver kwargs defined above\n", + "\n", + " # gather the parameters from the NeuralFMU\n", + " params = FMIFlux.params(neuralFMU)\n", + "\n", + " # initialize the scheduler \n", + " initialize!(scheduler; parameters=data.params, p=params[1], showProgress=showProgress)\n", + " \n", + " # initialize Adam optimizer with our hyperparameters\n", + " optim = Adam(ETA, (BETA1, BETA2))\n", + " \n", + " # the actual training\n", + " FMIFlux.train!(loss, # the loss function for training\n", + " params, # the parameters to train\n", + " Iterators.repeated((), steps), # an iterator repeating `steps` times\n", + " optim; # the optimizer to train\n", + " gradient=:ForwardDiff, # we use *ForwardDiff.jl* for gradient determination\n", + " chunk_size=32, # the ForwardDiff chunk size is 32, so 32 parameter sensitivites are determined at a time\n", + " cb=() -> update!(scheduler), # update the scheduler after every step \n", + " proceed_on_assert=true) # proceed, even if assertions are thrown, with the next step\n", + " \n", + " # switch back to the default execution configuration, allocate a new FMU instance for every run, see:\n", + " # https://thummeto.github.io/FMI.jl/dev/features/#Execution-Configuration\n", + " fmu.executionConfig = FMI.FMIImport.FMU2_EXECUTION_CONFIGURATION_NO_RESET\n", + " FMIFlux.finishSolveFMU(neuralFMU.fmu, c, false, true)\n", + "\n", + " # save our result parameters\n", + " fmiSaveParameters(neuralFMU, joinpath(@__DIR__, \"params\", \"$(HPRUN)\", \"$(ind).jld2\"))\n", + " \n", + " # simulate the NeuralFMU on a validation trajectory\n", + " resultNFMU = neuralFMU(x0, (data_validation.consumption_t[1], data_validation.consumption_t[end]); parameters=data_validation.params, showProgress=showProgress, maxiters=1e7, saveat=data_validation.consumption_t)\n", + "\n", + " # determine loss on validation data (if the simulation was successfull)\n", + " validation_loss = nothing \n", + " if resultNFMU.success\n", + " validation_loss = _lossFct(resultNFMU, data_validation, :MSE)\n", + " end \n", + "\n", + " # unload FMU\n", + " fmiUnload(fmu)\n", + "\n", + " # return the loss (or `nothing` if no loss can be determined)\n", + " return validation_loss\n", + "end" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check if the train function is working for a given set of hyperparameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# check if the train function is working for a set of given hyperparameters\n", + "# ([ ETA, BETA1, BETA2, BATCHDUR, LASTWEIGHT, SCHEDULER, LOSS], RESSOURCE, INDEX)\n", + "train!([0.001, 0.9, 0.9999, 10.0, 0.7, :LossAccumulation, :MSE], 10.0, 1) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 5. Results Discussion\n", + "After hyperparameter optimization, results can be loaded (one is already preparred if you skipped the optimization)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load our FMU (we take one from the FMIZoo.jl, exported with Dymola 2022x)\n", + "fmu = fmiLoad(\"VLDM\", \"Dymola\", \"2020x\"; type=:ME)\n", + "\n", + "# build NeuralFMU\n", + "neuralFMU = build_NFMU(fmu)\n", + "\n", + "# load parameters from hyperparameter optimization\n", + "fmiLoadParameters(neuralFMU, joinpath(@__DIR__, \"juliacon_2023.jld2\"))\n", + "\n", + "# simulate and plot the NeuralFMU\n", + "resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n", + "resultFMU = fmiSimulate(fmu, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n", + "\n", + "# plot the NeuralFMU, original FMU and data (cumulative consumption)\n", + "fig = plot(resultNFMU; stateIndices=6:6, stateEvents=false, timeEvents=false, label=\"NeuralFMU\", ylabel=\"cumulative consumption [m/s]\")\n", + "plot!(fig, resultFMU; stateIndices=6:6, values=false, stateEvents=false, timeEvents=false, label=\"FMU\")\n", + "plot!(fig, data.cumconsumption_t, data.cumconsumption_val, label=\"Data\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We also have a ready-to-use function that calculates different errors." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plotCumulativeConsumption(resultNFMU, resultFMU, data; filename=joinpath(@__DIR__, \"comparision_train_100.png\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Because the deviation is small, let's check the last 10% of WLTC focussed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plotCumulativeConsumption(resultNFMU, resultFMU, data; range=(0.9, 1.0), filename=joinpath(@__DIR__, \"comparision_train_10.png\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we should check the results on validation data: The full WLTC cycle." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# get start and stop for the validation cycle (full WLTC)\n", + "tStart_validation = data_validation.cumconsumption_t[1]\n", + "tStop_validation = data_validation.cumconsumption_t[end]\n", + "tSave_validation = data_validation.cumconsumption_t\n", + "\n", + "# simulate the NeuralFMU on validation data\n", + "resultNFMU = neuralFMU(x0, (tStart_validation, tStop_validation); parameters=data_validation.params, showProgress=showProgress, saveat=tSave_validation) \n", + "resultFMU = fmiSimulate(fmu, (tStart_validation, tStop_validation); parameters=data_validation.params, showProgress=showProgress, saveat=tSave_validation) \n", + "\n", + "plotCumulativeConsumption(resultNFMU, resultFMU, data_validation; filename=joinpath(@__DIR__, \"comparision_validation_100.png\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "... and the last 10% ..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plotCumulativeConsumption(resultNFMU, resultFMU, data_validation; range=(0.9, 1.0), filename=joinpath(@__DIR__, \"comparision_validation_10.png\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After we finished, let's finally unload the FMU and invalidate the NeuralFMU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# unload FMU / invalidate NeuralFMU\n", + "fmiUnload(fmu)\n", + "neuralFMU = nothing" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Optional: Organize as module\n", + "If you want, you can place all code inside of a module named `NODE_Training`, this simplifies hyper parameter optimization (if you want to do one)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# for hyper parameter optimization, place the code in a `module`\n", + "# just uncomment the following line (and the one at the very beginning, too!) \n", + "#end # NODE_Training " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Julia 1.9.1", + "language": "julia", + "name": "julia-1.9" + }, + "language_info": { + "file_extension": ".jl", + "mimetype": "application/julia", + "name": "julia", + "version": "1.9.1" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/src/juliacon_2023.jld2 b/examples/src/juliacon_2023.jld2 new file mode 100644 index 0000000000000000000000000000000000000000..437926f2f9f045926b1f375cf6075b12eb11c44d GIT binary patch literal 2447 zcmeHJSy0o56aEDxKoAR9@Ir`MPB{fZT5!Xma>!ARj)GM=L?}YJq=INcR8-D@j7LM{ z2y#}f2;xyT0SZJSBqT8rRPYv1L5^02t618nKK`HnGyNX+o1OV~cXnUCb#~gi(KskD zZeIx1_27ZN(3Q&9mumKaW^=6W zt+67MJ=k%s-+25pWuRpj&DSCmH?9D%lv{CJ-~6JygmENsxaJr=nH}&fYqT5 zbEP?DI5S=AwJBHtJA3ulKj-`krRk~mUMPW`@5|kahHql5pe|&RTGx#6fu_Za81y=6ykbKQ)LcI#!r9&~D zxF)F1l*bjoc>L2!g$Op-7=EBz)eCWQ=CbuF5W-`JyJS1RMpQ9m>uFu!!oZhn<;m`1 zq;X_kY;nGaMz0>L-_{hu@}n1WxEWQr#$d+tWbi}eUvb^Q4rs=^vK-Z4PP1X>3u`7N zhzALb;8+8fCiHN_bvjC75Uh)->pLg{ddZ1M-p~WwG$JC5?WOSKyE!LR>P2ODajxk^ z8@T#5t?OUh0|9c4H!dkL!LhF^>RA{E&S_C}He75%gUNwY%?3gkD?7Y8`&kR5%&NKA zE8GEHS_w(+cMe$7jxYTqn+FpZ^UyXxfGZt+ST7HD!|}njBtEvooPRaFYe-RUS(&|x5cPfVS@c}-XRq|`3*Ww$ui~H}gSlAy|f5AU42J4c{>W&O|f}DBG z-QH__eA%Z^^3%@`fpqnG_Okgok=Cw>*F-Jwu_(!P+PnZ3UsOB0TP*?8M5>`n$C<#e z%UNZemWcs3`u7AJE5t&(E(aCTA*AcMDHX2D!S}02^S@Ha!-Q(tr`+HoxF4;s))=$l z%W;)T?QIN&-RSQrcOG}IupZ6t$O6wnB3&!u0%X@~^zYt9 zM`e54tFMn`V}_cI^1Tm*An{-XEaPXxetr=>WVZjgWPm>Ra9D}@eO&uJh!))RXRcYHAO?~Tpav$;#zxJQIT?x)^==gEPHds=%2;!^Q_ z(U~P(rrl7L|F!=0d;x?v9NtmRYJ#I1WGAa;i(%5BTDvGw4BqEdwIhcm@X?GnyZvS{ z_PI-3$-Dy4Pj%VfnAD3)2iq3+sfi(Hiq#la#71U0<%bpWi6F95EGbQt zf=-Ck`OtJL_yi~U(YPE`4B|?+s}!U6%Nnj{7ze$#ZTC|<+ye5M93qSnhX*T}m19$4 zF!g?IGOB+Y`W!D(oKz$btfqG=SE~!-_({19V>M85)5rbQWEU8ZjIiBQIGC$lc%VW_ ziW77n)qQDV{5VRxT77^6nHmJCSi-^^X(OSZDG4CGdxz<_gWY(Z=OR|B5dyh?SYF$X zi*Kjm@2s%u1crN?BmM1fFsLqfN#00`A4t@G4Q2}lReMt&cs~O3-Q9Y_X>2s=oTRPH zkidTa1?4znE)=G_No%6`@O=Md#{&i%wiUF>WQ9Ef1CNr=AjvbEf-05ac%zYZQ$p$TJe zPTF-pqB$&X=e^p_M4iK0>N4H;mxB>`GHRbJ>L087hbezqAX$STNGc>sbYN^? 0.0 ? max(0.0, ANN_error[i]-data.consumption_dev[i]) : min(0.0, ANN_error[i]+data.consumption_dev[i]) for i in 1:length(data.consumption_t)) + + label = L"consumption [W]" + colorMin=-610.0 + colorMax=610.0 + else # :acceleration + ANN_acceleration = collect(o[1] for o in ANNOutputs) + ANN_error = ANN_acceleration - acceleration_val + ANN_error = collect(ANN_error[i] > 0.0 ? max(0.0, ANN_error[i]-acceleration_dev[i]) : min(0.0, ANN_error[i]+acceleration_dev[i]) for i in 1:length(data.consumption_t)) + + label = L"acceleration [m/s^2]" + colorMin=-0.04 + colorMax=0.04 + end + + ANN_error = movavg(ANN_error, mov_avg) + + ANNInput_vel = collect(o[4] for o in ANNInputs) + ANNInput_acc = collect(o[5] for o in ANNInputs) + ANNInput_con = collect(o[6] for o in ANNInputs) + + _max = max(ANN_error...) + _min = min(ANN_error...) + neutral = -colorMin/(colorMax-colorMin) # -_min/(_max-_min) + + if _max > colorMax + @warn "max value ($(_max)) is larger than colorMax ($(colorMax)) - values will be cut" + end + + if _min < colorMin + @warn "min value ($(_min)) is smaller than colorMin ($(colorMin)) - values will be cut" + end + + ANN_error = collect(min(max(e, colorMin), colorMax) for e in ANN_error) + + @info "$(_min) $(_max) $(neutral)" + + anim = @animate for ang in 0:5:360 + l = Plots.@layout [Plots.grid(3,1) r{0.85w}] + fig = Plots.plot(layout=l, size=(1600,800), left_margin = 10Plots.mm, right_margin = 10Plots.mm, bottom_margin = 10Plots.mm) + + colorgrad = cgrad([:orange, :white, :blue], [0.0, 0.5, 1.0]) # , scale = :log) + + scatter!(fig[1], ANNInput_vel[1:reductionFactor:end], ANNInput_acc[1:reductionFactor:end], + xlabel=L"velocity [m/s]", ylabel=L"acceleration [m/s^2]", + color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], label=:none, colorbar=:none) # + + scatter!(fig[2], ANNInput_acc[1:reductionFactor:end], ANNInput_con[1:reductionFactor:end], + xlabel=L"acceleration [m/s^2]", ylabel=L"consumption [W]", + color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], label=:none, colorbar=:none) # + + scatter!(fig[3], ANNInput_vel[1:reductionFactor:end], ANNInput_con[1:reductionFactor:end], + xlabel=L"velocity [m/s]", ylabel=L"consumption [W]", + color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], label=:none, colorbar=:none) # + + scatter!(fig[4], ANNInput_vel[1:reductionFactor:end], ANNInput_acc[1:reductionFactor:end], ANNInput_con[1:reductionFactor:end], + xlabel=L"velocity [m/s]", ylabel=L"acceleration [m/s^2]", zlabel=L"consumption [W]", + color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], markersize=8, label=:none, camera=(ang,20), colorbar_title=" \n\n\n\n" * L"Δ" * label * " (smoothed)") # + + # draw invisible dummys to scale colorbar to fixed size + for i in 1:3 + scatter!(fig[i], [0.0,0.0], [0.0,0.0], + color=colorgrad, zcolor=[colorMin, colorMax], + markersize=0, label=:none) + end + for i in 4:4 + scatter!(fig[i], [0.0,0.0], [0.0,0.0], [0.0,0.0], + color=colorgrad, zcolor=[colorMin, colorMax], + markersize=0, label=:none) + end + end + + return gif(anim, filename; fps=10) +end + +function plotCumulativeConsumption(solutionNFMU::FMU2Solution, solutionFMU::FMU2Solution, data::FMIZoo.VLDM_Data; range=(0.0,1.0), filename=nothing) + + len = length(data.consumption_t) + steps = (1+round(Int, range[1]*len)):(round(Int, range[end]*len)) + + t = data.consumption_t + nfmu_val = fmiGetSolutionState(solutionNFMU, 6; isIndex=true) + fmu_val = fmiGetSolutionState(solutionFMU, 6; isIndex=true) + data_val = data.cumconsumption_val + data_dev = data.cumconsumption_dev + + mse_nfmu = FMIFlux.Losses.mse_dev(nfmu_val, data_val, data_dev) + mse_fmu = FMIFlux.Losses.mse_dev(fmu_val, data_val, data_dev) + + mae_nfmu = FMIFlux.Losses.mae_dev(nfmu_val, data_val, data_dev) + mae_fmu = FMIFlux.Losses.mae_dev(fmu_val, data_val, data_dev) + + max_nfmu = FMIFlux.Losses.max_dev(nfmu_val, data_val, data_dev) + max_fmu = FMIFlux.Losses.max_dev(fmu_val, data_val, data_dev) + + fig = plot(xlabel=L"t[s]", ylabel=L"x_6 [Ws]", dpi=600) + plot!(fig, t[steps], data_val[steps]; label="Data", ribbon=data_dev, fillalpha=0.3) + plot!(fig, t[steps], fmu_val[steps]; label="FMU [ MSE:$(roundToLength(mse_fmu,10)) | MAE:$(roundToLength(mae_fmu,10)) | MAX:$(roundToLength(max_fmu,10)) ]") + plot!(fig, t[steps], nfmu_val[steps]; label="NeuralFMU [ MSE:$(roundToLength(mse_nfmu,10)) | MAE:$(roundToLength(mae_nfmu,10)) | MAX:$(roundToLength(max_nfmu,10)) ]") + + if !isnothing(filename) + savefig(fig, filename) + end + + return fig +end + +function simPlotCumulativeConsumption(cycle::Symbol, filename=nothing; kwargs...) + d = FMIZoo.VLDM(cycle) + tStart = d.consumption_t[1] + tStop = d.consumption_t[end] + tSave = d.consumption_t + + resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=d.params, showProgress=false, saveat=tSave, maxiters=1e7) + resultFMU = fmiSimulate(fmu, (tStart, tStop), parameters=d.params, showProgress=false, saveat=tSave) + + fig = plotCumulativeConsumption(resultNFMU, resultFMU, d, kwargs...) + if !isnothing(filename) + savefig(fig, filename) + end + return fig +end + +function checkMSE(cycle; init::Bool=false) + + data = FMIZoo.VLDM(cycle) + tStart = data.consumption_t[1] + tStop = data.consumption_t[end] + tSave = data.consumption_t + + if init + c = FMI.FMIImport.getCurrentComponent(fmu) + FMI.FMIImport.fmi2SetFMUstate(c, batch[1].initialState) + c.eventInfo = deepcopy(batch[1].initialEventInfo) + c.t = batch[1].tStart + end + resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=true, maxiters=1e7, saveat=tSave) + + mse_NFMU = FMIFlux.Losses.mse_dev(data.cumconsumption_val, fmiGetSolutionState(resultNFMU, 6; isIndex=true), data.cumconsumption_dev) + + return mse_NFMU +end diff --git a/examples/src/mdpi_2022.ipynb b/examples/src/mdpi_2022.ipynb index d4b67e2f..f3eadf26 100644 --- a/examples/src/mdpi_2022.ipynb +++ b/examples/src/mdpi_2022.ipynb @@ -8,7 +8,7 @@ "# Physics-enhanced NeuralODEs in real-world applications\n", "Tutorial by *Tobias Thummerer* based on the paper [*NeuralFMU: presenting a workflow for integrating hybrid NeuralODEs into real-world applications*](https://doi.org/10.3390/electronics11193202)\n", "\n", - "🚧 Work in progress, last edit: 29.03.2023 🚧\n", + "📚 This tutorial is archieved, for a more up-to-date version see the [Workshop for JuliaCon2023](https://github.com/ThummeTo/FMIFlux.jl/blob/examples/examples/juliacon_2023.ipynb) 📚\n", "\n", "## Keywords\n", "PeNODE, NeuralODE, Universal Differential Equation, Hybrid Modeling, Functional Mock-up Unit, FMU, NeuralFMU\n", diff --git a/src/FMIFlux.jl b/src/FMIFlux.jl index bcbcd9c4..42330e5e 100644 --- a/src/FMIFlux.jl +++ b/src/FMIFlux.jl @@ -11,13 +11,27 @@ if VERSION < v"1.7.0" @warn "Training under Julia 1.6 is very slow, please consider using Julia 1.7 or newer." end +# Overwrite tag printing and limit partials length from ForwardDiff.jl +# import FMIImport.ForwardDiff +# function Base.show(io::IO, d::ForwardDiff.Dual{T,V,N}) where {T,V,N} +# print(io, "Dual(", ForwardDiff.value(d)) +# for i in 1:min(N, 5) +# print(io, ", ", ForwardDiff.partials(d, i)) +# end +# if N > 5 +# print(io, ", [$(N-5) more]...") +# end +# print(io, ")") +# end + # ToDo: Quick-fixes until patch release SciMLSensitivity v0.7.29 -import SciMLSensitivity: FakeIntegrator, u_modified!, TrackedAffect, set_u! +import FMIImport.SciMLSensitivity: FakeIntegrator, u_modified!, TrackedAffect +import FMIImport.SciMLSensitivity.DiffEqBase: set_u! function u_modified!(::FakeIntegrator, ::Bool) + return nothing end - -function set_u!(integrator::FakeIntegrator, u) - #integrator.u = u +function set_u!(::FakeIntegrator, u) + return nothing end # ToDo: Quick-fixes until patch release SciMLSensitivity v0.7.28 diff --git a/src/batch.jl b/src/batch.jl index 3fd69faa..2a9b4142 100644 --- a/src/batch.jl +++ b/src/batch.jl @@ -3,20 +3,36 @@ # Licensed under the MIT license. See LICENSE file in the project root for details. # -import FMIImport: fmi2Real, fmi2FMUstate, fmi2EventInfo +import FMIImport: fmi2Real, fmi2FMUstate, fmi2EventInfo, fmi2ComponentState import ChainRulesCore: ignore_derivatives using DiffEqCallbacks: FunctionCallingCallback using FMIImport.ForwardDiff +import FMIImport: unsense -struct FMU2Loss - loss::Real +struct FMU2Loss{T} + loss::T step::Integer time::Real - function FMU2Loss(loss::Real, step::Integer=0, time::Real=time()) - inst = new(ForwardDiff.value(loss), step, time) + function FMU2Loss{T}(loss::T, step::Integer=0, time::Real=time()) where {T} + inst = new{T}(loss, step, time) return inst end + + function FMU2Loss(loss, step::Integer=0, time::Real=time()) + loss = unsense(loss) + T = typeof(loss) + inst = new{T}(loss, step, time) + return inst + end +end + +function nominalLoss(l::FMU2Loss{T}) where T <: AbstractArray + return unsense(sum(l.loss)) +end + +function nominalLoss(l::FMU2Loss{T}) where T <: Real + return unsense(l.loss) end abstract type FMU2BatchElement end @@ -27,6 +43,7 @@ mutable struct FMU2SolutionBatchElement <: FMU2BatchElement tStop::fmi2Real initialState::Union{fmi2FMUstate, Nothing} + initialComponentState::fmi2ComponentState initialEventInfo::Union{fmi2EventInfo, Nothing} losses::Array{<:FMU2Loss} step::Integer @@ -38,7 +55,9 @@ mutable struct FMU2SolutionBatchElement <: FMU2BatchElement solution::FMU2Solution - function FMU2SolutionBatchElement() + scalarLoss::Bool + + function FMU2SolutionBatchElement(;scalarLoss::Bool=true) inst = new() inst.xStart = nothing inst.tStart = -Inf @@ -53,6 +72,7 @@ mutable struct FMU2SolutionBatchElement <: FMU2BatchElement inst.targets = nothing inst.indicesModel = nothing + inst.scalarLoss = scalarLoss return inst end @@ -74,7 +94,9 @@ mutable struct FMU2EvaluationBatchElement <: FMU2BatchElement result - function FMU2EvaluationBatchElement() + scalarLoss::Bool + + function FMU2EvaluationBatchElement(;scalarLoss::Bool=true) inst = new() inst.tStart = -Inf @@ -89,25 +111,44 @@ mutable struct FMU2EvaluationBatchElement <: FMU2BatchElement inst.indicesModel = nothing inst.result = nothing + inst.scalarLoss = scalarLoss return inst end end -function startStateCallback(fmu, batchElement) - #print("Setting state ... ") - +function copyState!(fmu::FMU2, batchElement::FMU2SolutionBatchElement) c = getCurrentComponent(fmu) - if batchElement.initialState != nothing - fmi2SetFMUstate(c, batchElement.initialState) - c.eventInfo = deepcopy(batchElement.initialEventInfo) - c.t = batchElement.tStart - else + if isnothing(batchElement.initialState) batchElement.initialState = fmi2GetFMUstate(c) - batchElement.initialEventInfo = deepcopy(c.eventInfo) - @warn "Batch element does not provide a `initialState`, I try to simulate anyway. InitialState is overwritten." + else + fmi2GetFMUstate!(c, Ref(batchElement.initialState)) end + batchElement.initialEventInfo = deepcopy(c.eventInfo) + batchElement.initialComponentState = c.state + + # don't overwrite fields that are initialized from data! + # batchElement.tStart = c.t + # batchElement.xStart = c.x + + return nothing +end + +function pasteState!(fmu::FMU2, batchElement::FMU2SolutionBatchElement) + @assert !isnothing(batchElement.initialState) "Batch element does not provide a `initialState`." + c = getCurrentComponent(fmu) + + fmi2SetFMUstate(c, batchElement.initialState) + c.eventInfo = deepcopy(batchElement.initialEventInfo) + c.state = batchElement.initialComponentState + + # c.t = batchElement.tStart + # c.x = batchElement.xStart + FMI.fmi2SetContinuousStates(c, batchElement.xStart) + FMI.fmi2SetTime(c, batchElement.tStart) + + return nothing end function stopStateCallback(fmu, batchElement) @@ -127,39 +168,35 @@ end function run!(neuralFMU::ME_NeuralFMU, batchElement::FMU2SolutionBatchElement; lastBatchElement=nothing, kwargs...) - ignore_derivatives() do + neuralFMU.customCallbacksAfter = [] + neuralFMU.customCallbacksBefore = [] + + # STOP CALLBACK + if !isnothing(lastBatchElement) + stopcb = FunctionCallingCallback((u, t, integrator) -> copyState!(neuralFMU.fmu, lastBatchElement); + funcat=[batchElement.tStop]) + push!(neuralFMU.customCallbacksAfter, stopcb) + end - neuralFMU.customCallbacksAfter = [] - neuralFMU.customCallbacksBefore = [] + if isnothing(batchElement.initialState) + startcb = FunctionCallingCallback((u, t, integrator) -> copyState!(neuralFMU.fmu, batchElement); + funcat=[batchElement.tStart], func_start=true) + push!(neuralFMU.customCallbacksAfter, startcb) - # START CALLBACK - startcb = FunctionCallingCallback((u, t, integrator) -> startStateCallback(neuralFMU.fmu, batchElement); - funcat=[batchElement.tStart], func_start=true, func_everystep=false) - push!(neuralFMU.customCallbacksBefore, startcb) - - # STOP CALLBACK - if lastBatchElement != nothing - stopcb = FunctionCallingCallback((u, t, integrator) -> stopStateCallback(neuralFMU.fmu, lastBatchElement); - funcat=[batchElement.tStop]) - push!(neuralFMU.customCallbacksAfter, stopcb) - end + c = getCurrentComponent(neuralFMU.fmu) + FMI.fmi2SetContinuousStates(c, batchElement.xStart) + FMI.fmi2SetTime(c, batchElement.tStart) + else + pasteState!(neuralFMU.fmu, batchElement) end batchElement.solution = neuralFMU(batchElement.xStart, (batchElement.tStart, batchElement.tStop); saveat=batchElement.saveat, kwargs...) - ignore_derivatives() do - - if lastBatchElement != nothing - #lastBatchElement.tStart = ForwardDiff.value(batchElement.solution.states.t[end]) - #lastBatchElement.xStart = collect(ForwardDiff.value(u) for u in batchElement.solution.states.u[end]) - end - - neuralFMU.customCallbacksBefore = [] - neuralFMU.customCallbacksAfter = [] - - batchElement.step += 1 - end + neuralFMU.customCallbacksBefore = [] + neuralFMU.customCallbacksAfter = [] + batchElement.step += 1 + return batchElement.solution end @@ -173,7 +210,7 @@ end function plot(batchElement::FMU2SolutionBatchElement; targets::Bool=true, kwargs...) - fig = Plots.plot(; xlabel="t [s]") # , title="loss[$(batchElement.step)] = $(batchElement.losses[end].loss)") + fig = Plots.plot(; xlabel="t [s]") # , title="loss[$(batchElement.step)] = $(nominalLoss(batchElement.losses[end]))") for i in 1:length(batchElement.indicesModel) if batchElement.solution != nothing Plots.plot!(fig, batchElement.saveat, collect(ForwardDiff.value(u[batchElement.indicesModel[i]]) for u in batchElement.solution.states.u), label="Simulation #$(i)") @@ -188,7 +225,7 @@ end function plot(batchElement::FMU2BatchElement; targets::Bool=true, features::Bool=true, kwargs...) - fig = Plots.plot(; xlabel="t [s]") # , title="loss[$(batchElement.step)] = $(batchElement.losses[end].loss)") + fig = Plots.plot(; xlabel="t [s]") # , title="loss[$(batchElement.step)] = $(nominalLoss(batchElement.losses[end]))") if batchElement.features != nothing && features for i in 1:length(batchElement.features[1]) @@ -213,12 +250,12 @@ function plot(batch::AbstractArray{<:FMU2BatchElement}; plot_mean::Bool=true, pl num = length(batch) xs = 1:num - ys = collect((length(b.losses) > 0 ? b.losses[end].loss : 0.0) for b in batch) + ys = collect((length(b.losses) > 0 ? nominalLoss(b.losses[end]) : 0.0) for b in batch) fig = Plots.plot(; xlabel="Batch ID", ylabel="Loss", plotkwargs...) if plot_shadow - ys_shadow = collect((length(b.losses) > 1 ? b.losses[end-1].loss : 0.0) for b in batch) + ys_shadow = collect((length(b.losses) > 1 ? nominalLoss(b.losses[end-1]) : 0.0) for b in batch) Plots.bar!(fig, xs, ys_shadow; label="Previous loss", color=:green, bar_width=1.0); end @@ -256,23 +293,81 @@ function plotLoss(batchElement::FMU2BatchElement; xaxis::Symbol = :steps) return fig end -function loss!(batchElement::FMU2BatchElement, lossFct; logLoss::Bool=true) +function loss!(batchElement::FMU2SolutionBatchElement, lossFct; logLoss::Bool=true) - loss = 0.0 + loss = nothing - for i in 1:length(batchElement.indicesModel) - modelOutput = nothing - - if isa(batchElement, FMU2SolutionBatchElement) - modelOutput = collect(u[batchElement.indicesModel[i]] for u in batchElement.solution.states.u) + if hasmethod(lossFct, Tuple{FMU2Solution}) + loss = lossFct(batchElement.solution) + + elseif hasmethod(lossFct, Tuple{FMU2Solution, Union{}}) + loss = lossFct(batchElement.solution, batchElement.targets) + + else # hasmethod(lossFct, Tuple{Union{}, Union{}}) + + if batchElement.solution.success + if batchElement.scalarLoss + for i in 1:length(batchElement.indicesModel) + dataTarget = collect(d[i] for d in batchElement.targets) + modelOutput = collect(u[batchElement.indicesModel[i]] for u in batchElement.solution.states.u) + + if isnothing(loss) + loss = lossFct(modelOutput, dataTarget) + else + loss += lossFct(modelOutput, dataTarget) + end + end + else + dataTarget = batchElement.targets + modelOutput = collect(u[batchElement.indicesModel] for u in batchElement.solution.states.u) + + if isnothing(loss) + loss = lossFct(modelOutput, dataTarget) + else + loss += lossFct(modelOutput, dataTarget) + end + end else - modelOutput = collect(u[i] for u in batchElement.result) + @warn "Can't compute loss for batch element, because solution is invalid (`success=false`) for batch element\n$(batchElement)." end - dataTarget = collect(d[i] for d in batchElement.targets) + + end - loss += lossFct(modelOutput, dataTarget) + ignore_derivatives() do + if logLoss + push!(batchElement.losses, FMU2Loss(loss, batchElement.step)) + end end + return loss +end + +function loss!(batchElement::FMU2EvaluationBatchElement, lossFct; logLoss::Bool=true) + + loss = nothing + + if batchElement.scalarLoss + for i in 1:length(batchElement.indicesModel) + dataTarget = collect(d[i] for d in batchElement.targets) + modelOutput = collect(u[i] for u in batchElement.result) + + if isnothing(loss) + loss = lossFct(modelOutput, dataTarget) + else + loss += lossFct(modelOutput, dataTarget) + end + end + else + dataTarget = batchElement.targets + modelOutput = batchElement.result + + if isnothing(loss) + loss = lossFct(modelOutput, dataTarget) + else + loss += lossFct(modelOutput, dataTarget) + end + end + ignore_derivatives() do if logLoss push!(batchElement.losses, FMU2Loss(loss, batchElement.step)) @@ -283,7 +378,7 @@ function loss!(batchElement::FMU2BatchElement, lossFct; logLoss::Bool=true) end function batchDataSolution(neuralFMU::NeuralFMU, x0_fun, train_t::AbstractArray{<:Real}, targets::AbstractArray; - batchDuration::Real=(train_t[end]-train_t[1]), indicesModel=1:length(targets[1]), plot::Bool=false, solverKwargs...) + batchDuration::Real=(train_t[end]-train_t[1]), indicesModel=1:length(targets[1]), plot::Bool=false, scalarLoss::Bool=true, solverKwargs...) if fmi2CanGetSetState(neuralFMU.fmu) @assert !neuralFMU.fmu.executionConfig.instantiate "Batching not possible for auto-instanciating FMUs." @@ -293,31 +388,30 @@ function batchDataSolution(neuralFMU::NeuralFMU, x0_fun, train_t::AbstractArray{ batch = Array{FMIFlux.FMU2SolutionBatchElement,1}() - indicesData = 1:1 + # indicesData = 1:1 tStart = train_t[1] - iStart = timeToIndex(train_t, tStart) - iStop = timeToIndex(train_t, tStart + batchDuration) + # iStart = timeToIndex(train_t, tStart) + # iStop = timeToIndex(train_t, tStart + batchDuration) - startElement = FMIFlux.FMU2SolutionBatchElement() - startElement.tStart = train_t[iStart] - startElement.tStop = train_t[iStop] - startElement.xStart = x0_fun(tStart) + # startElement = FMIFlux.FMU2SolutionBatchElement(;scalarLoss=scalarLoss) + # startElement.tStart = train_t[iStart] + # startElement.tStop = train_t[iStop] + # startElement.xStart = x0_fun(tStart) - startElement.saveat = train_t[iStart:iStop] - startElement.targets = targets[iStart:iStop] + # startElement.saveat = train_t[iStart:iStop] + # startElement.targets = targets[iStart:iStop] - startElement.indicesModel = indicesModel + # startElement.indicesModel = indicesModel - push!(batch, startElement) - - for i in 2:floor(Integer, (train_t[end]-train_t[1])/batchDuration) - push!(batch, FMIFlux.FMU2SolutionBatchElement()) + # push!(batch, startElement) - FMIFlux.run!(neuralFMU, batch[i-1]; lastBatchElement=batch[i], solverKwargs...) + numElements = floor(Integer, (train_t[end]-train_t[1])/batchDuration) + + for i in 1:numElements + push!(batch, FMIFlux.FMU2SolutionBatchElement(;scalarLoss=scalarLoss)) - # overwrite start state iStart = timeToIndex(train_t, tStart + (i-1) * batchDuration) iStop = timeToIndex(train_t, tStart + i * batchDuration) batch[i].tStart = train_t[iStart] @@ -328,6 +422,16 @@ function batchDataSolution(neuralFMU::NeuralFMU, x0_fun, train_t::AbstractArray{ batch[i].targets = targets[iStart:iStop] batch[i].indicesModel = indicesModel + end + + for i in 1:numElements + + nextBatchElement = nothing + if i < numElements + nextBatchElement = batch[i+1] + end + + FMIFlux.run!(neuralFMU, batch[i]; lastBatchElement=nextBatchElement, solverKwargs...) if plot fig = FMIFlux.plot(batch[i-1]; solverKwargs...) @@ -339,7 +443,7 @@ function batchDataSolution(neuralFMU::NeuralFMU, x0_fun, train_t::AbstractArray{ end function batchDataEvaluation(train_t::AbstractArray{<:Real}, targets::AbstractArray, features::Union{AbstractArray, Nothing}=nothing; - batchDuration::Real=(train_t[end]-train_t[1]), indicesModel=1:length(targets[1]), plot::Bool=false, round_digits=3) + batchDuration::Real=(train_t[end]-train_t[1]), indicesModel=1:length(targets[1]), plot::Bool=false, round_digits=3, scalarLoss::Bool=true) batch = Array{FMIFlux.FMU2EvaluationBatchElement,1}() @@ -350,7 +454,7 @@ function batchDataEvaluation(train_t::AbstractArray{<:Real}, targets::AbstractAr iStart = timeToIndex(train_t, tStart) iStop = timeToIndex(train_t, tStart + batchDuration) - startElement = FMIFlux.FMU2EvaluationBatchElement() + startElement = FMIFlux.FMU2EvaluationBatchElement(;scalarLoss=scalarLoss) startElement.tStart = train_t[iStart] startElement.tStop = train_t[iStop] @@ -366,7 +470,7 @@ function batchDataEvaluation(train_t::AbstractArray{<:Real}, targets::AbstractAr push!(batch, startElement) for i in 2:floor(Integer, (train_t[end]-train_t[1])/batchDuration) - push!(batch, FMIFlux.FMU2EvaluationBatchElement()) + push!(batch, FMIFlux.FMU2EvaluationBatchElement(;scalarLoss=scalarLoss)) iStart = timeToIndex(train_t, tStart + (i-1) * batchDuration) iStop = timeToIndex(train_t, tStart + i * batchDuration) diff --git a/src/losses.jl b/src/losses.jl index 9c4c139b..b3193614 100644 --- a/src/losses.jl +++ b/src/losses.jl @@ -6,7 +6,8 @@ module Losses using Flux -import ..FMIFlux: FMU2BatchElement, NeuralFMU, loss!, run!, ME_NeuralFMU +import ..FMIFlux: FMU2BatchElement, NeuralFMU, loss!, run!, ME_NeuralFMU, FMU2Solution +import FMIImport: unsense mse = Flux.Losses.mse mae = Flux.Losses.mae @@ -32,27 +33,152 @@ function mae_last_element(a::AbstractArray, b::AbstractArray) return mae(a[end], b[end]) end +function deviation(a::AbstractArray, b::AbstractArray, dev::AbstractArray) + Δ = abs.(a .- b) + Δ -= abs.(dev) + Δ = collect(max(val, 0.0) for val in Δ) + + return Δ +end + +function mae_dev(a::AbstractArray, b::AbstractArray, dev::AbstractArray) + num = length(a) + Δ = deviation(a, b, dev) + Δ = sum(Δ) / num + return Δ +end + +function mse_dev(a::AbstractArray, b::AbstractArray, dev::AbstractArray) + num = length(a) + Δ = deviation(a, b, dev) + Δ = sum(Δ .^ 2) / num + return Δ +end + +function max_dev(a::AbstractArray, b::AbstractArray, dev::AbstractArray) + Δ = deviation(a, b, dev) + Δ = max(Δ...) + return Δ +end + +function mae_last_element_rel_dev(a::AbstractArray, b::AbstractArray, dev::AbstractArray, lastElementRatio::Real) + num = length(a) + Δ = deviation(a, b, dev) + Δ[1:end-1] .*= (1.0-lastElementRatio) + Δ[ end ] *= lastElementRatio + Δ = sum(Δ) / num + return Δ +end + +function mse_last_element_rel_dev(a::AbstractArray, b::AbstractArray, dev::AbstractArray, lastElementRatio::Real) + num = length(a) + Δ = deviation(a, b, dev) + Δ = Δ .^ 2 + Δ[1:end-1] .*= (1.0-lastElementRatio) + Δ[ end ] *= lastElementRatio + Δ = sum(Δ) / num + return Δ +end + +function stiffness_corridor(solution::FMU2Solution, corridor::AbstractArray{<:AbstractArray{<:Tuple{Real, Real}}}; lossFct=Flux.Losses.mse) + @assert !isnothing(solution.eigenvalues) "stiffness_corridor: Need eigenvalue information, that is not present in the given `FMU2Solution`. Use keyword `recordEigenvalues=true` for FMU or NeuralFMU simulation." + + eigs_over_time = solution.eigenvalues.saveval + num_eigs_over_time = length(eigs_over_time) + + @assert num_eigs_over_time == length(corridor) "stiffness_corridor: length of time points with eigenvalues $(num_eigs_over_time) doesn't match time points in corridor $(length(corridor))." + + l = 0.0 + for i in 2:num_eigs_over_time + eigs = eigs_over_time[i] + num_eigs = Int(length(eigs)/2) + + for j in 1:num_eigs + re = eigs[(j-1)*2+1] + im = eigs[j*2] + + c_min, c_max = corridor[i][j] + if re > c_max + l += lossFct(re, c_max) / num_eigs / num_eigs_over_time + end + if re < c_min + l += lossFct(c_min, re) / num_eigs / num_eigs_over_time + end + end + end + + return l +end + +function stiffness_corridor(solution::FMU2Solution, corridor::AbstractArray{<:Tuple{Real, Real}}; lossFct=Flux.Losses.mse) + @assert !isnothing(solution.eigenvalues) "stiffness_corridor: Need eigenvalue information, that is not present in the given `FMU2Solution`. Use keyword `recordEigenvalues=true` for FMU or NeuralFMU simulation." + + eigs_over_time = solution.eigenvalues.saveval + num_eigs_over_time = length(eigs_over_time) + + @assert num_eigs_over_time == length(corridor) "stiffness_corridor: length of time points with eigenvalues $(num_eigs_over_time) doesn't match time points in corridor $(length(corridor))." + + l = 0.0 + for i in 2:num_eigs_over_time + eigs = eigs_over_time[i] + num_eigs = Int(length(eigs)/2) + c_min, c_max = corridor[i] + + for j in 1:num_eigs + re = eigs[(j-1)*2+1] + im = eigs[j*2] + + if re > c_max + l += lossFct(re, c_max) / num_eigs / num_eigs_over_time + end + if re < c_min + l += lossFct(c_min, re) / num_eigs / num_eigs_over_time + end + end + end + + return l +end + +function stiffness_corridor(solution::FMU2Solution, corridor::Tuple{Real, Real}; lossFct=Flux.Losses.mse) + @assert !isnothing(solution.eigenvalues) "stiffness_corridor: Need eigenvalue information, that is not present in the given `FMU2Solution`. Use keyword `recordEigenvalues=true` for FMU or NeuralFMU simulation." + + eigs_over_time = solution.eigenvalues.saveval + num_eigs_over_time = length(eigs_over_time) + + c_min, c_max = corridor + + l = 0.0 + for i in 2:num_eigs_over_time + eigs = eigs_over_time[i] + num_eigs = Int(length(eigs)/2) + + for j in 1:num_eigs + re = eigs[(j-1)*2+1] + im = eigs[j*2] + + if re > c_max + l += lossFct(re, c_max) / num_eigs / num_eigs_over_time + end + if re < c_min + l += lossFct(re, c_min) / num_eigs / num_eigs_over_time + end + end + end + + return l +end + function loss(model, batchElement::FMU2BatchElement; logLoss::Bool=true, lossFct=Flux.Losses.mse, p=nothing) model = nfmu.neuralODE.model[layers] - loss = 0.0 - # evaluate model result = run!(model, batchElement, p=p) - # for i in 1:length(batchElement.targets[1]) - # targets_model = collect(r[batchElement.indicesModel[i]] for r in batchElement.result) - # targets_data = collect(td[i] for td in batchElement.targets) - - # loss += lossFct(targets_model, targets_data) - # end - - loss = loss!(batchElement, lossFct; logLoss=logLoss) - - return loss + return loss!(batchElement, lossFct; logLoss=logLoss) end function loss(nfmu::NeuralFMU, batch::AbstractArray{<:FMU2BatchElement}; @@ -71,13 +197,11 @@ function loss(nfmu::NeuralFMU, batch::AbstractArray{<:FMU2BatchElement}; solution = run!(nfmu, batch[batchIndex]; lastBatchElement=lastBatchElement, progressDescr="Sim. Batch $(batchIndex)/$(length(batch)) |", kwargs...) - if solution.success - return loss!(batch[batchIndex], lossFct; logLoss=logLoss) - else - @warn "Solving the NeuralFMU as part of the loss function failed. This is often because the ODE cannot be solved. Did you initialize the NeuralFMU model? Often additional solver errors/warnings are printed before this warning." - return Inf - end - + if !solution.success + @warn "Solving the NeuralFMU as part of the loss function failed with return code `$(solution.states.retcode)`.\nThis is often because the ODE cannot be solved. Did you initialize the NeuralFMU model?\nOften additional solver errors/warnings are printed before this warning.\nHowever, it is tried to compute a loss on the partial retrieved solution from $(unsense(solution.states.t[1]))s to $(unsense(solution.states.t[end]))s." + end + + return loss!(batch[batchIndex], lossFct; logLoss=logLoss) end function loss(model, batch::AbstractArray{<:FMU2BatchElement}; @@ -87,39 +211,45 @@ function loss(model, batch::AbstractArray{<:FMU2BatchElement}; run!(model, batch[batchIndex], p) - loss = loss!(batch[batchIndex], lossFct; logLoss=logLoss) - - return loss + return loss!(batch[batchIndex], lossFct; logLoss=logLoss) end function batch_loss(neuralFMU::ME_NeuralFMU, batch::AbstractArray{<:FMU2BatchElement}; update::Bool=false, logLoss::Bool=false, lossFct=nothing, kwargs...) - accu = 0.0 + accu = nothing if update @assert lossFct != nothing "update=true, but no keyword lossFct provided. Please provide one." numBatch = length(batch) for i in 1:numBatch b = batch[i] - b_next = nothing + b_next = nothing if i < numBatch b_next = batch[i+1] end - if b.xStart != nothing + if !isnothing(b.xStart) run!(neuralFMU, b; lastBatchElement=b_next, progressDescr="Sim. Batch $(i)/$(numBatch) |", kwargs...) end - l = loss!(b, lossFct; logLoss=logLoss) - - accu += l + if isnothing(accu) + accu = loss!(b, lossFct; logLoss=logLoss) + else + accu += loss!(b, lossFct; logLoss=logLoss) + end + end else for b in batch @assert length(b.losses) > 0 "batch_loss(): `update=false` but no existing losses for batch element $(b)" - accu += b.losses[end].loss + + if isnothing(accu) + accu = b.losses[end].loss + else + accu += b.losses[end].loss + end end end @@ -128,7 +258,7 @@ end function batch_loss(model, batch::AbstractArray{<:FMU2BatchElement}; update::Bool=false, logLoss::Bool=false, lossFct=nothing, p=nothing) - accu = 0.0 + accu = nothing if update @assert lossFct != nothing "update=true, but no keyword lossFct provided. Please provide one." @@ -138,13 +268,21 @@ function batch_loss(model, batch::AbstractArray{<:FMU2BatchElement}; update::Boo run!(model, b, p) - accu += loss!(b, lossFct; logLoss=logLoss) + if isnothing(accu) + accu = loss!(b, lossFct; logLoss=logLoss) + else + accu += loss!(b, lossFct; logLoss=logLoss) + end end else for b in batch - accu += b.losses[end].loss + if isnothing(accu) + accu = nominalLoss(b.losses[end]) + else + accu += nominalLoss(b.losses[end]) + end end end @@ -152,4 +290,23 @@ function batch_loss(model, batch::AbstractArray{<:FMU2BatchElement}; update::Boo return accu end +mutable struct ToggleLoss + index::Int + losses + + function ToggleLoss(losses...) + @assert length(losses) >= 2 "ToggleLoss needs at least 2 losses, $(length(losses)) given." + return new(1, losses) + end +end + +function (t::ToggleLoss)(args...; kwargs...) + ret = t.losses[t.index](args...; kwargs...) + t.index += 1 + if t.index > length(t.losses) + t.index = 1 + end + return ret +end + end # module diff --git a/src/neural.jl b/src/neural.jl index 59a69623..3625c2f4 100644 --- a/src/neural.jl +++ b/src/neural.jl @@ -8,12 +8,12 @@ import FMIImport: assert_integrator_valid, fd_eltypes, fd_set!, finishSolveFMU, handleEvents, isdual, istracked, prepareSolveFMU, rd_set!, undual, unsense, untrack import Optim import ProgressMeter -import SciMLSensitivity.SciMLBase: CallbackSet, ContinuousCallback, ODESolution, ReturnCode, RightRootFind, +import FMIImport.SciMLSensitivity.SciMLBase: CallbackSet, ContinuousCallback, ODESolution, ReturnCode, RightRootFind, VectorContinuousCallback, set_u!, terminate!, u_modified!, build_solution -import SciMLSensitivity.ForwardDiff -import SciMLSensitivity.ReverseDiff -using SciMLSensitivity.ReverseDiff: TrackedArray -import SciMLSensitivity: InterpolatingAdjoint, ReverseDiffVJP +import FMIImport.SciMLSensitivity.ForwardDiff +import FMIImport.SciMLSensitivity.ReverseDiff +using FMIImport.SciMLSensitivity.ReverseDiff: TrackedArray +import FMIImport.SciMLSensitivity: InterpolatingAdjoint, ReverseDiffVJP import ThreadPools using DiffEqCallbacks @@ -26,7 +26,7 @@ using FMIImport: FMU2Component, FMU2Event, FMU2Solution, fmi2ComponentState, fmi2TypeModelExchange, logError, logInfo, logWarning using Flux using Flux.Zygote -using SciMLSensitivity: +using FMIImport.SciMLSensitivity: ForwardDiffSensitivity, InterpolatingAdjoint, ReverseDiffVJP, ZygoteVJP zero_tgrad(u,p,t) = zero(u) @@ -156,6 +156,7 @@ end ##### EVENT HANDLING START function startCallback(integrator, nfmu::ME_NeuralFMU, c::Union{FMU2Component, Nothing}, t) + ignore_derivatives() do #nfmu.solveCycle += 1 #@debug "[$(nfmu.solveCycle)][FIRST STEP]" @@ -241,7 +242,7 @@ function condition(nfmu::ME_NeuralFMU, c::FMU2Component, out::SubArray{<:Forward @assert getCurrentComponent(nfmu.fmu) == c "Thread `$(Threads.threadid())` wants to evaluate wrong component!" @debug assert_integrator_valid(integrator) - @assert c.state == fmi2ComponentStateContinuousTimeMode "condition(...): Must be called in mode continuous time." + #@assert c.state == fmi2ComponentStateContinuousTimeMode "condition(...): Must be called in mode continuous time." # ToDo: set inputs here #fmiSetReal(myFMU, InputRef, Value) @@ -270,7 +271,7 @@ function condition(nfmu::ME_NeuralFMU, c::FMU2Component, out::SubArray{<:Reverse @assert getCurrentComponent(nfmu.fmu) == c "Thread `$(Threads.threadid())` wants to evaluate wrong component!" @debug assert_integrator_valid(integrator) - @assert c.state == fmi2ComponentStateContinuousTimeMode "condition(...): Must be called in mode continuous time." + #@assert c.state == fmi2ComponentStateContinuousTimeMode "condition(...): Must be called in mode continuous time." # ToDo: set inputs here #fmiSetReal(myFMU, InputRef, Value) @@ -298,7 +299,7 @@ function condition(nfmu::ME_NeuralFMU, c::FMU2Component, out, _x, t, integrator) @debug assert_integrator_valid(integrator) @assert getCurrentComponent(nfmu.fmu) == c "Thread `$(Threads.threadid())` wants to evaluate wrong component!" - @assert c.state == fmi2ComponentStateContinuousTimeMode "condition(...): Must be called in mode continuous time." + @debug @assert c.state == fmi2ComponentStateContinuousTimeMode "condition(...): Must be called in mode continuous time." #@debug "State condition..." @@ -371,7 +372,7 @@ function affectFMU!(nfmu::ME_NeuralFMU, c::FMU2Component, integrator, idx) @assert getCurrentComponent(nfmu.fmu) == c "Thread `$(Threads.threadid())` wants to evaluate wrong component!" @debug assert_integrator_valid(integrator) - @assert c.state == fmi2ComponentStateContinuousTimeMode "affectFMU!(...): Must be in continuous time mode!" + @debug @assert c.state == fmi2ComponentStateContinuousTimeMode "affectFMU!(...): Must be in continuous time mode!" t = unsense(integrator.t) x = unsense(integrator.u) @@ -440,11 +441,14 @@ function affectFMU!(nfmu::ME_NeuralFMU, c::FMU2Component, integrator, idx) T, V, N = fd_eltypes(integrator.u) new_x = collect(ForwardDiff.Dual{T, V, N}(V(right_x[i]), ForwardDiff.partials(integrator.u[i])) for i in 1:length(integrator.u)) - set_u!(integrator, new_x) + #set_u!(integrator, new_x) + integrator.u .= new_x @debug "affectFMU!(_, _, $idx): NeuralFMU event with state change at $t. Indicator [$idx]. (ForwardDiff) " else - integrator.u = right_x + #set_u!(integrator, right_x) + integrator.u .= right_x + @debug "affectFMU!(_, _, $idx): NeuralFMU event with state change at $t. Indicator [$idx]." end @@ -556,6 +560,37 @@ function saveValues(nfmu::ME_NeuralFMU, c::FMU2Component, recordValues, _x, t, i return (fmi2GetReal(c, recordValues)...,) end +# TODO +import DifferentiableEigen +function saveEigenvalues(nfmu::ME_NeuralFMU, c::FMU2Component, _x, p, _t, integrator, sensitivity) + + #@assert c.state == fmi2ComponentStateContinuousTimeMode "saveEigenvalues(...): Must be in continuous time mode." + + c.solution.evals_saveeigenvalues += 1 + + t = unsense(_t) + + c.next_t = t + + A = nothing + #if sensitivity == :ForwardDiff + A = ForwardDiff.jacobian(x -> evaluateReModel(nfmu, c, x, p), _x) # TODO: chunk_size! + # elseif sensitivity == :ReverseDiff + # A = ReverseDiff.jacobian(x -> evaluateReModel(nfmu, c, x, p), _x) + # elseif sensitivity == :Zygote + # A = Zygote.jacobian(x -> evaluateReModel(nfmu, c, x, p), _x)[1] + # elseif sensitivity == :none + # A = ForwardDiff.jacobian(x -> evaluateReModel(nfmu, c, x, p), unsense(_x)) + # end + eigs, _ = DifferentiableEigen.eigen(A) + + # x = unsense(_x) + # c.next_t = t + # evaluateModel(nfmu, c, x) + + return (eigs...,) +end + function fx(nfmu::ME_NeuralFMU, c::FMU2Component, dx,#::Array{<:Real}, @@ -727,6 +762,10 @@ function checkExecTime(integrator, nfmu::ME_NeuralFMU, c, max_execution_duration return 1.0 end +function getComponent(nfmu::NeuralFMU) + return hasCurrentComponent(nfmu.fmu) ? getCurrentComponent(nfmu.fmu) : nothing +end + """ Evaluates the ME_NeuralFMU in the timespan given during construction or in a custom timespan from `t_start` to `t_stop` for a given start state `x_start`. @@ -750,7 +789,10 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0, saveEventPositions::Bool=false, max_execution_duration::Real=-1.0, recordValues::fmi2ValueReferenceFormat=nfmu.recordValues, + recordEigenvaluesSensitivity::Symbol=:none, + recordEigenvalues::Bool=(recordEigenvaluesSensitivity != :none), saveat=nfmu.saveat, # ToDo: Data type + sensealg=nfmu.fmu.executionConfig.sensealg, # ToDo: AbstractSensitivityAlgorithm kwargs...) if saveat[1] != tspan[1] @@ -763,7 +805,6 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0, recordValues = prepareValueReference(nfmu.fmu, recordValues) saving = (length(recordValues) > 0) - sense = nfmu.fmu.executionConfig.sensealg inPlace = nfmu.fmu.executionConfig.inPlace t_start = tspan[1] @@ -795,7 +836,7 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0, callbacks = [] - c = (hasCurrentComponent(nfmu.fmu) ? getCurrentComponent(nfmu.fmu) : nothing) + c = getComponent(nfmu) c = startCallback(nothing, nfmu, c, t_start) ignore_derivatives() do @@ -804,6 +845,11 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0, push!(callbacks, cb) end + # cb = FunctionCallingCallback((x, t, integrator) -> @info "Start"; # startCallback(integrator, nfmu, c, t); + # funcat=[t_start], + # func_start=true) + # push!(callbacks, cb) + nfmu.fmu.hasStateEvents = (c.fmu.modelDescription.numberOfEventIndicators > 0) nfmu.fmu.hasTimeEvents = (c.eventInfo.nextEventTimeDefined == fmi2True) @@ -860,6 +906,8 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0, if showProgress c.progressMeter = ProgressMeter.Progress(1000; desc=progressDescr, color=:blue, dt=1.0) #, barglyphs=ProgressMeter.BarGlyphs("[=> ]")) ProgressMeter.update!(c.progressMeter, 0) # show it! + else + c.progressMeter = nothing end # integrator step callback @@ -883,6 +931,34 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0, push!(callbacks, savingCB) end + if recordEigenvalues + + @assert recordEigenvaluesSensitivity ∈ (:none, :ForwardDiff, :ReverseDiff, :Zygote) "Keyword `recordEigenvaluesSensitivity` must be one of (:none, :ForwardDiff, :ReverseDiff, :Zygote)" + + recordEigenvaluesType = nothing + if recordEigenvaluesSensitivity == :ForwardDiff + recordEigenvaluesType = FMIImport.ForwardDiff.Dual + elseif recordEigenvaluesSensitivity == :ReverseDiff + recordEigenvaluesType = FMIImport.ReverseDiff.TrackedReal + elseif recordEigenvaluesSensitivity ∈ (:none, :Zygote) + recordEigenvaluesType = fmi2Real + end + + dtypes = collect(recordEigenvaluesType for _ in 1:2*length(c.fmu.modelDescription.stateValueReferences)) + c.solution.eigenvalues = SavedValues(recordEigenvaluesType, Tuple{dtypes...}) + + savingCB = nothing + if saveat === nothing + savingCB = SavingCallback((u,t,integrator) -> saveEigenvalues(nfmu, c, u, p, t, integrator, recordEigenvaluesSensitivity), + c.solution.eigenvalues) + else + savingCB = SavingCallback((u,t,integrator) -> saveEigenvalues(nfmu, c, u, p, t, integrator, recordEigenvaluesSensitivity), + c.solution.eigenvalues, + saveat=saveat) + end + push!(callbacks, savingCB) + end + end # ignore_derivatives prob = nothing @@ -897,16 +973,20 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0, prob = ODEProblem{false}(ff, nfmu.x0, nfmu.tspan, p) end - if isnothing(sense) - sense = InterpolatingAdjoint(autojacvec=ReverseDiffVJP(false)) - end - # if (length(callbacks) == 2) # only start and stop callback, so the system is pure continuous # startCallback(nfmu, nfmu.tspan[1]) - # c.solution.states = solve(prob, nfmu.args...; sensealg=sense, saveat=nfmu.saveat, nfmu.kwargs...) + # c.solution.states = solve(prob, nfmu.args...; sensealg=sensealg, saveat=nfmu.saveat, nfmu.kwargs...) # stopCallback(nfmu, nfmu.tspan[end]) # else - #c.solution.states = solve(prob, nfmu.args...; sensealg=sense, saveat=nfmu.saveat, callback = CallbackSet(callbacks...), nfmu.kwargs...) + #c.solution.states = solve(prob, nfmu.args...; sensealg=sensealg, saveat=nfmu.saveat, callback = CallbackSet(callbacks...), nfmu.kwargs...) + + if isnothing(sensealg) + #if length(callbacks) > 0 # currently, only ForwardDiffSensitivity works for hybride NeuralODEs with multiple events triggered + # sensealg = ForwardDiffSensitivity(; chunk_size=32, convert_tspan=true) + #else + sensealg = InterpolatingAdjoint(;autojacvec=ReverseDiffVJP(false), checkpointing=false) # ReverseDiffVJP() + #end + end args = Vector{Any}() kwargs = Dict{Symbol, Any}(kwargs...) @@ -919,11 +999,12 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0, push!(args, solver) end - c.solution.states = solve(prob, args...; sensealg=sense, callback=CallbackSet(callbacks...), nfmu.kwargs..., kwargs...) - - #end + c.solution.states = solve(prob, args...; sensealg=sensealg, callback=CallbackSet(callbacks...), nfmu.kwargs..., kwargs...) ignore_derivatives() do + + @assert !isnothing(c.solution.states) "Solving NeuralODE returned `nothing`!" + # ReverseDiff returns an array instead of an ODESolution, this needs to be corrected if isa(c.solution.states, TrackedArray) @@ -1127,42 +1208,69 @@ end # return y, fmi2EvaluateME_pullback # end -function computeGradient(loss, params, gradient, chunk_size) +function computeGradient(loss, params, gradient, chunk_size, multiObjective::Bool) if gradient == :ForwardDiff if chunk_size == :auto_forwarddiff - grad_conf = ForwardDiff.GradientConfig(loss, params); - return ForwardDiff.gradient(loss, params, grad_conf); + if multiObjective + conf = ForwardDiff.JacobianConfig(loss, params) + jac = ForwardDiff.jacobian(loss, params, conf) + return collect(jac[i,:] for i in 1:size(jac)[1]) + else + conf = ForwardDiff.GradientConfig(loss, params) + return [ForwardDiff.gradient(loss, params, conf)] + end elseif chunk_size == :auto_fmiflux chunk_size = 32 - grad_conf = ForwardDiff.GradientConfig(loss, params, ForwardDiff.Chunk{min(chunk_size, length(params))}()); - return ForwardDiff.gradient(loss, params, grad_conf); + + if multiObjective + conf = ForwardDiff.JacobianConfig(loss, params, ForwardDiff.Chunk{min(chunk_size, length(params))}()); + jac = ForwardDiff.jacobian(loss, params, conf) + return collect(jac[i,:] for i in 1:size(jac)[1]) + else + conf = ForwardDiff.GradientConfig(loss, params, ForwardDiff.Chunk{min(chunk_size, length(params))}()); + return [ForwardDiff.gradient(loss, params, conf)] + end else - grad_conf = ForwardDiff.GradientConfig(loss, params, ForwardDiff.Chunk{min(chunk_size, length(params))}()); - return ForwardDiff.gradient(loss, params, grad_conf); + if multiObjective + conf = ForwardDiff.JacobianConfig(loss, params, ForwardDiff.Chunk{min(chunk_size, length(params))}()); + jac = ForwardDiff.jacobian(loss, params, conf) + return collect(jac[i,:] for i in 1:size(jac)[1]) + else + conf = ForwardDiff.GradientConfig(loss, params, ForwardDiff.Chunk{min(chunk_size, length(params))}()); + return [ForwardDiff.gradient(loss, params, conf)] + end end elseif gradient == :Zygote - return Zygote.gradient( - loss, - params)[1] + if multiObjective + jac = Zygote.jacobian(loss, params)[1] + return collect(jac[i,:] for i in 1:size(jac)[1]) + else + return [Zygote.gradient(loss, params)[1]] + end + elseif gradient == :ReverseDiff - return ReverseDiff.gradient( - loss, - params) + if multiObjective + jac = ReverseDiff.jacobian(loss, params) + return collect(jac[i,:] for i in 1:size(jac)[1]) + else + return [ReverseDiff.gradient(loss, params)] + end else @assert false "Unknown `gradient=$(gradient)`, supported are `:ForwardDiff`, `:Zygote` and `:ReverseDiff`." end end +# WIP function trainStep(loss, params, gradient, chunk_size, optim::Optim.AbstractOptimizer, printStep, proceed_on_assert, cb; state=nothing) try @@ -1178,9 +1286,6 @@ function trainStep(loss, params, gradient, chunk_size, optim::Optim.AbstractOpti update_state!(d, state, optim) - - - step = Flux.Optimise.apply!(optim, params[j], grad) params[j] .-= step @@ -1212,7 +1317,7 @@ function trainStep(loss, params, gradient, chunk_size, optim::Optim.AbstractOpti end lk_OptimApply = ReentrantLock() -function trainStep(loss, params, gradient, chunk_size, optim::Flux.Optimise.AbstractOptimiser, printStep, proceed_on_assert, cb) +function trainStep(loss, params, gradient, chunk_size, optim::Flux.Optimise.AbstractOptimiser, printStep, proceed_on_assert, cb, multiObjective) global lk_OptimApply @@ -1220,19 +1325,22 @@ function trainStep(loss, params, gradient, chunk_size, optim::Flux.Optimise.Abst for j in 1:length(params) - grad = computeGradient(loss, params[j], gradient, chunk_size) - - @assert !isnothing(grad) "Gradient nothing!" + grads = computeGradient(loss, params[j], gradient, chunk_size, multiObjective) + + @assert !any(isnothing.(grads)) "Gradient nothing!" lock(lk_OptimApply) do - step = Flux.Optimise.apply!(optim, params[j], grad) - params[j] .-= step + for grad in grads + step = Flux.Optimise.apply!(optim, params[j], grad) + params[j] .-= step + + if printStep + @info "Grad: Min = $(min(abs.(grad)...)) Max = $(max(abs.(grad)...))" + @info "Step: Min = $(min(abs.(step)...)) Max = $(max(abs.(step)...))" + end + end end - if printStep - @info "Grad: Min = $(min(abs.(grad)...)) Max = $(max(abs.(grad)...))" - @info "Step: Min = $(min(abs.(step)...)) Max = $(max(abs.(step)...))" - end end catch e @@ -1275,8 +1383,9 @@ A function analogous to Flux.train! but with additional features and explicit pa - `printStep` a boolean determining wheater the gradient min/max is printed after every step (for gradient debugging) - `proceed_on_assert` a boolean that determins wheater to throw an ecxeption on error or proceed training and just print the error - `numThreads` [WIP]: an integer determining how many threads are used for training (how many gradients are generated in parallel) +- `multiObjective`: set this if the loss function returns multiple values (multi objective optimization), currently gradients are fired to the optimizer one after another (default `false`) """ -function train!(loss, params::Union{Flux.Params, Zygote.Params, AbstractVector{<:AbstractVector{<:Real}}}, data, optim; gradient::Symbol=:ReverseDiff, cb=nothing, chunk_size::Union{Integer, Symbol}=:auto_forwarddiff, printStep::Bool=false, proceed_on_assert::Bool=false, multiThreading::Bool=false) # ::Flux.Optimise.AbstractOptimiser +function train!(loss, params::Union{Flux.Params, Zygote.Params, AbstractVector{<:AbstractVector{<:Real}}}, data, optim; gradient::Symbol=:ReverseDiff, cb=nothing, chunk_size::Union{Integer, Symbol}=:auto_fmiflux, printStep::Bool=false, proceed_on_assert::Bool=false, multiThreading::Bool=false, multiObjective::Bool=false) # ::Flux.Optimise.AbstractOptimiser if multiThreading && Threads.nthreads() == 1 @warn "train!(...): Multi-threading is set via flag `multiThreading=true`, but this Julia process does not have multiple threads. This will not result in a speed-up. Please spawn Julia in multi-thread mode to speed-up training." @@ -1287,7 +1396,7 @@ function train!(loss, params::Union{Flux.Params, Zygote.Params, AbstractVector{< return end - _trainStep = (i,) -> trainStep(loss, params, gradient, chunk_size, optim, printStep, proceed_on_assert, cb) + _trainStep = (i,) -> trainStep(loss, params, gradient, chunk_size, optim, printStep, proceed_on_assert, cb, multiObjective) if multiThreading ThreadPools.qforeach(_trainStep, 1:length(data)) @@ -1303,4 +1412,165 @@ end function train!(loss, neuralFMU::Union{ME_NeuralFMU, CS_NeuralFMU}, data, optim::Flux.Optimise.AbstractOptimiser; kwargs...) params = Flux.params(neuralFMU) train!(loss, params, data, optim; kwargs...) +end + +# checks gradient determination for all available sensitivity configurations, see: +# https://docs.sciml.ai/SciMLSensitivity/stable/manual/differential_equation_sensitivities/ +using FMIImport.SciMLSensitivity +function checkSensalgs!(loss, neuralFMU::Union{ME_NeuralFMU, CS_NeuralFMU}; + gradients=(:ForwardDiff, :ReverseDiff, :Zygote), + max_msg_len=192, chunk_size=32, + OtD_autojacvecs=(false, true, TrackerVJP(), ZygoteVJP(), EnzymeVJP(), ReverseDiffVJP()), + OtD_sensealgs=(BacksolveAdjoint, InterpolatingAdjoint, QuadratureAdjoint), + OtD_checkpointings=(true, false), + DtO_sensealgs=(ReverseDiffAdjoint, ZygoteAdjoint, TrackerAdjoint, ForwardDiffSensitivity), + multiObjective::Bool=false, + bestof::Int=2, + kwargs...) + + params = Flux.params(neuralFMU) + initial_sensalg = neuralFMU.fmu.executionConfig.sensealg + + best_timing = Inf + best_gradient = nothing + best_sensealg = nothing + + printstyled("Mode: Optimize-then-Discretize\n") + for gradient ∈ gradients + printstyled("\tGradient: $(gradient)\n") + + for sensealg ∈ OtD_sensealgs + printstyled("\t\tSensealg: $(sensealg)\n") + for checkpointing ∈ OtD_checkpointings + printstyled("\t\t\tCheckpointing: $(checkpointing)\n") + + if sensealg == QuadratureAdjoint && checkpointing + printstyled("\t\t\t\tQuadratureAdjoint doesn't implement checkpointing, skipping ...\n") + continue + end + + for autojacvec ∈ OtD_autojacvecs + printstyled("\t\t\t\tAutojacvec: $(autojacvec)\n") + + if sensealg ∈ (BacksolveAdjoint, InterpolatingAdjoint) + neuralFMU.fmu.executionConfig.sensealg = sensealg(; autojacvec=autojacvec, chunk_size=chunk_size, checkpointing=checkpointing) + else + neuralFMU.fmu.executionConfig.sensealg = sensealg(; autojacvec=autojacvec, chunk_size=chunk_size) + end + + call = () -> _tryrun(loss, params, gradient, chunk_size, 5, max_msg_len, multiObjective) + for i in 1:bestof + timing = call() + + if timing < best_timing + best_timing = timing + best_gradient = gradient + best_sensealg = neuralFMU.fmu.executionConfig.sensealg + end + end + + end + end + end + end + + printstyled("Mode: Discretize-then-Optimize\n") + for gradient ∈ gradients + printstyled("\tGradient: $(gradient)\n") + for sensealg ∈ DtO_sensealgs + printstyled("\t\tSensealg: $(sensealg)\n") + + if sensealg == ForwardDiffSensitivity + neuralFMU.fmu.executionConfig.sensealg = sensealg(; chunk_size=chunk_size, convert_tspan=true) + else + neuralFMU.fmu.executionConfig.sensealg = sensealg() + end + + call = () -> _tryrun(loss, params, gradient, chunk_size, 3, max_msg_len, multiObjective) + for i in 1:bestof + timing = call() + + if timing < best_timing + best_timing = timing + best_gradient = gradient + best_sensealg = neuralFMU.fmu.executionConfig.sensealg + end + end + + end + end + + neuralFMU.fmu.executionConfig.sensealg = initial_sensalg + + printstyled("------------------------------\nBest time: $(best_timing)\nBest gradient: $(best_gradient)\nBest sensealg: $(best_sensealg)\n", color=:blue) + + return nothing +end + +function _tryrun(loss, params, gradient, chunk_size, ts, max_msg_len, multiObjective::Bool=false; print_stdout::Bool=true, print_stderr::Bool=true) + + spacing = "" + for t in ts + spacing *= "\t" + end + + message = "" + color = :black + timing = Inf + + original_stdout = stdout + original_stderr = stderr + (rd_stdout, wr_stdout) = redirect_stdout(); + (rd_stderr, wr_stderr) = redirect_stderr(); + + try + tstart = time() + grads = computeGradient(loss, params[1], gradient, chunk_size, multiObjective) + timing = time() - tstart + + if length(grads[1]) == 1 + grads = [grads] + end + val = collect(sum(abs.(grad)) for grad in grads) + + message = spacing * "SUCCESS | $(round(timing; digits=2))s | GradAbsSum: $(round.(val; digits=3))\n" + color = :green + catch e + msg = "$(e)" + if length(msg) > max_msg_len + msg = msg[1:max_msg_len] * "..." + end + + message = spacing * "$(msg)\n" + color = :red + end + + redirect_stdout(original_stdout) + redirect_stderr(original_stderr) + close(wr_stdout) + close(wr_stderr) + + if print_stdout + msg = read(rd_stdout, String) + if length(msg) > 0 + if length(msg) > max_msg_len + msg = msg[1:max_msg_len] * "..." + end + printstyled(spacing * "STDOUT: $(msg)\n", color=:yellow) + end + end + + if print_stderr + msg = read(rd_stderr, String) + if length(msg) > 0 + if length(msg) > max_msg_len + msg = msg[1:max_msg_len] * "..." + end + printstyled(spacing * "STDERR: $(msg)\n", color=:yellow) + end + end + + printstyled(message, color=color) + + return timing end \ No newline at end of file diff --git a/src/scheduler.jl b/src/scheduler.jl index 3acf0d3a..68e8bb39 100644 --- a/src/scheduler.jl +++ b/src/scheduler.jl @@ -182,7 +182,7 @@ function initialize!(scheduler::BatchScheduler; runkwargs...) lastIndex = 0 scheduler.step = 0 - scheduler.elementIndex = 1 + scheduler.elementIndex = 0 if hasfield(typeof(scheduler), :runkwargs) scheduler.runkwargs = runkwargs @@ -214,8 +214,8 @@ function plot(scheduler::BatchScheduler, lastIndex::Integer) num = length(scheduler.batch) xs = 1:num - ys = collect((length(b.losses) > 0 ? b.losses[end].loss : 0.0) for b in scheduler.batch) - ys_shadow = collect((length(b.losses) > 1 ? b.losses[end-1].loss : 1e-16) for b in scheduler.batch) + ys = collect((length(b.losses) > 0 ? nominalLoss(b.losses[end]) : 0.0) for b in scheduler.batch) + ys_shadow = collect((length(b.losses) > 1 ? nominalLoss(b.losses[end-1]) : 1e-16) for b in scheduler.batch) title = "[$(scheduler.step)]" if hasfield(typeof(scheduler), :printMsg) @@ -248,9 +248,9 @@ function plot(scheduler::BatchScheduler, lastIndex::Integer) end if lastIndex > 0 - Plots.plot!(fig, [lastIndex], [0.0], color=:pink, marker=:circle, label="Current ID", markersize = 5.0) # current batch element + Plots.plot!(fig, [lastIndex], [0.0], color=:pink, marker=:circle, label="Current ID [$(lastIndex)]", markersize = 5.0) # current batch element end - Plots.plot!(fig, [scheduler.elementIndex], [0.0], color=:pink, marker=:circle, label="Next ID", markersize = 3.0) # next batch element + Plots.plot!(fig, [scheduler.elementIndex], [0.0], color=:pink, marker=:circle, label="Next ID [$(scheduler.elementIndex)]", markersize = 3.0) # next batch element display(fig) end @@ -294,7 +294,7 @@ function apply!(scheduler::WorstElementScheduler; print::Bool=true) num = length(scheduler.batch) for i in 1:num - #l = scheduler.batch[i].losses[end].loss + #l = nominalLoss(scheduler.batch[i].losses[end]) FMIFlux.run!(scheduler.neuralFMU, scheduler.batch[i]; scheduler.runkwargs...) l = FMIFlux.loss!(scheduler.batch[i], scheduler.lossFct; logLoss=true) @@ -325,7 +325,9 @@ function apply!(scheduler::LossAccumulationScheduler; print::Bool=true) nextind = 1 # reset current accu loss - scheduler.lossAccu[scheduler.elementIndex] = 0.0 + if scheduler.elementIndex > 0 + scheduler.lossAccu[scheduler.elementIndex] = 0.0 + end num = length(scheduler.batch) for i in 1:num @@ -333,12 +335,13 @@ function apply!(scheduler::LossAccumulationScheduler; print::Bool=true) l = 0.0 if length(scheduler.batch[i].losses) >= 1 - l = scheduler.batch[i].losses[end].loss + l = nominalLoss(scheduler.batch[i].losses[end]) end if scheduler.step % scheduler.updateStep == 0 FMIFlux.run!(scheduler.neuralFMU, scheduler.batch[i]; scheduler.runkwargs...) - l = FMIFlux.loss!(scheduler.batch[i], scheduler.lossFct; logLoss=true) + FMIFlux.loss!(scheduler.batch[i], scheduler.lossFct; logLoss=true) + l = nominalLoss(scheduler.batch[i].losses[end]) end scheduler.lossAccu[i] += l @@ -383,7 +386,7 @@ function apply!(scheduler::WorstGrowScheduler; print::Bool=true) l_der = l # fallback for first run (greatest error) if length(scheduler.batch[i].losses) >= 2 - l_der = (l - scheduler.batch[i].losses[end-1].loss) + l_der = (l - nominalLoss(scheduler.batch[i].losses[end-1])) end losssum += l @@ -410,23 +413,25 @@ end function apply!(scheduler::RandomScheduler; print::Bool=true) + next = rand(1:length(scheduler.batch)) + if print - @info "$(scheduler.elementIndex) [$(scheduler.step)]" + @info "Current step: $(scheduler.step) | Current element=$(scheduler.elementIndex) | Next element=$(next)" end - return rand(1:length(scheduler.batch)) + return next end function apply!(scheduler::SequentialScheduler; print::Bool=true) - if print - @info "$(scheduler.elementIndex) [$(scheduler.step)]" - end - next = scheduler.elementIndex+1 if next > length(scheduler.batch) next = 1 end + if print + @info "Current step: $(scheduler.step) | Current element=$(scheduler.elementIndex) | Next element=$(next)" + end + return next end \ No newline at end of file diff --git a/test/batching.jl b/test/batching.jl new file mode 100644 index 00000000..43bcf385 --- /dev/null +++ b/test/batching.jl @@ -0,0 +1,109 @@ +# +# Copyright (c) 2021 Tobias Thummerer, Lars Mikelsons +# Licensed under the MIT license. See LICENSE file in the project root for details. +# + +using FMI +using Flux + +import Random +Random.seed!(1234); + +t_start = 0.0 +t_step = 0.01 +t_stop = 50.0 +tData = t_start:t_step:t_stop + +# generate training data +posData = cos.(tData.*3.0)*2.0 +velData = sin.(tData.*3.0) +x0 = [0.5, 0.0] + +# load FMU for NeuralFMU +fmu = fmiLoad("SpringPendulum1D", ENV["EXPORTINGTOOL"], ENV["EXPORTINGVERSION"]; type=fmi2TypeModelExchange) + +using FMI.FMIImport +using FMI.FMIImport.FMICore +import FMI.FMIImport: unsense + +# loss function for training +function losssum_single(p) + global problem, x0, posData + solution = problem(x0; p=p, showProgress=true) + + if !solution.success + return Inf + end + + posNet = fmi2GetSolutionState(solution, 1; isIndex=true) + + return Flux.Losses.mse(posNet, posData) +end + +function losssum_multi(p) + global problem, x0, posData + solution = problem(x0; p=p, showProgress=true) + + if !solution.success + return [Inf, Inf] + end + + posNet = fmi2GetSolutionState(solution, 1; isIndex=true) + velNet = fmi2GetSolutionState(solution, 2; isIndex=true) + + return [Flux.Losses.mse(posNet, posData), Flux.Losses.mse(velNet, velData)] +end + +# callback function for training +global iterCB = 0 +global lastLoss = Inf +function callb(losssum, p) + global iterCB += 1 + global lastLoss + + if iterCB % 5 == 0 + newloss = losssum(p[1]) + loss = (length(newloss) > 1 ? sum(newloss) : newloss) + @info "[$(iterCB)] Loss: $loss" + @test loss < lastLoss + lastLoss = loss + end +end + +numStates = fmiGetNumberOfStates(fmu) + +# the "Chain" for training +net = Chain(x -> fmu(;x=x), + Dense(numStates, 12, tanh), + Dense(12, numStates, identity)) + +problem = ME_NeuralFMU(fmu, net, (t_start, t_stop); saveat=tData) +@test problem != nothing + +solutionBefore = problem(x0) + +# train it ... +p_net = Flux.params(problem) + +iterCB = 0 + +# single objective +lastLoss = losssum_single(p_net[1]) +optim = Adam(1e-3) +FMIFlux.train!(losssum_single, p_net, Iterators.repeated((), parse(Int, ENV["NUMSTEPS"])), optim; cb=()->callb(losssum_single, p_net), gradient=:ReverseDiff) + +# multi objective +lastLoss = sum(losssum_multi(p_net[1])) +optim = Adam(1e-3) +FMIFlux.train!(losssum_multi, p_net, Iterators.repeated((), parse(Int, ENV["NUMSTEPS"])), optim; cb=()->callb(losssum_multi, p_net), gradient=:ReverseDiff, multiObjective=true) + +# check results +solutionAfter = problem(x0) +@test solutionAfter.success +@test length(solutionAfter.states.t) == length(tData) +@test solutionAfter.states.t[1] == t_start +@test solutionAfter.states.t[end] == t_stop + +@test length(fmu.components) <= 1 + +fmiUnload(fmu) diff --git a/test/hybrid_ME_dis.jl b/test/hybrid_ME_dis.jl index d4548950..4bd370c9 100644 --- a/test/hybrid_ME_dis.jl +++ b/test/hybrid_ME_dis.jl @@ -16,15 +16,12 @@ t_stop = 3.0 tData = t_start:t_step:t_stop # generate training data -realFMU = fmiLoad("SpringFrictionPendulum1D", ENV["EXPORTINGTOOL"], ENV["EXPORTINGVERSION"]; type=:ME) +fmu = fmiLoad("SpringFrictionPendulum1D", ENV["EXPORTINGTOOL"], ENV["EXPORTINGVERSION"]; type=:ME) pdict = Dict("mass.m" => 1.3) -realSimData = fmiSimulate(realFMU, (t_start, t_stop); parameters=pdict, recordValues=["mass.s", "mass.v"], saveat=tData) +realSimData = fmiSimulate(fmu, (t_start, t_stop); parameters=pdict, recordValues=["mass.s", "mass.v"], saveat=tData) x0 = collect(realSimData.values.saveval[1]) @test x0 == [0.5, 0.0] -# load FMU for NeuralFMU -myFMU = fmiLoad("SpringPendulum1D", ENV["EXPORTINGTOOL"], ENV["EXPORTINGVERSION"]; type=fmi2TypeModelExchange) - # setup traing data velData = fmi2GetSolutionValue(realSimData, "mass.v") @@ -60,9 +57,9 @@ function callb(p) end end -vr = fmi2StringToValueReference(myFMU, "mass.m") +vr = fmi2StringToValueReference(fmu, "mass.m") -numStates = fmiGetNumberOfStates(myFMU) +numStates = fmiGetNumberOfStates(fmu) # some NeuralFMU setups nets = [] @@ -76,14 +73,14 @@ c4 = CacheRetrieveLayer(c3) net = Chain(x -> c1(x), Dense(numStates, numStates, identity; init=Flux.identity_init), x -> c2([1], x[2], []), - x -> myFMU(;x=x), + x -> fmu(;x=x), x -> c3(x), Dense(numStates, numStates, identity; init=Flux.identity_init), x -> c4([1], x[2], [])) push!(nets, net) # 2. default ME-NeuralFMU (learn dynamics) -net = Chain(x -> myFMU(;x=x), +net = Chain(x -> fmu(;x=x), x -> c1(x), Dense(numStates, 16, identity; init=Flux.identity_init), Dense(16, 16, identity; init=Flux.identity_init), @@ -97,7 +94,7 @@ net = Chain(x -> c1(x), Dense(16, 16, identity, init=Flux.identity_init), Dense(16, numStates, identity, init=Flux.identity_init), x -> c2([1], x[2], []), - x -> myFMU(;x=x)) + x -> fmu(;x=x)) push!(nets, net) # 4. default ME-NeuralFMU (learn dynamics and states) @@ -105,7 +102,7 @@ net = Chain(x -> c1(x), Dense(numStates, 16, identity; init=Flux.identity_init), Dense(16, numStates, identity; init=Flux.identity_init), x -> c2([1], x[2], []), - x -> myFMU(;x=x), + x -> fmu(;x=x), x -> c3(x), Dense(numStates, 16, identity, init=Flux.identity_init), Dense(16, 16, identity, init=Flux.identity_init), @@ -114,7 +111,7 @@ net = Chain(x -> c1(x), push!(nets, net) # 5. NeuralFMU with hard setting time to 0.0 -net = Chain(states -> myFMU(;x=states, t=0.0), +net = Chain(states -> fmu(;x=states, t=0.0), x -> c1(x), Dense(numStates, 8, identity; init=Flux.identity_init), Dense(8, 16, identity; init=Flux.identity_init), @@ -123,9 +120,9 @@ net = Chain(states -> myFMU(;x=states, t=0.0), push!(nets, net) # 6. NeuralFMU with additional getter -getVRs = [fmi2StringToValueReference(myFMU, "mass.s")] +getVRs = [fmi2StringToValueReference(fmu, "mass.s")] numGetVRs = length(getVRs) -net = Chain(x -> myFMU(;x=x, y_refs=getVRs), +net = Chain(x -> fmu(;x=x, y_refs=getVRs), x -> c1(x), Dense(numStates+numGetVRs, 8, identity; init=Flux.identity_init), Dense(8, 16, identity; init=Flux.identity_init), @@ -134,9 +131,9 @@ net = Chain(x -> myFMU(;x=x, y_refs=getVRs), push!(nets, net) # 7. NeuralFMU with additional setter -setVRs = [fmi2StringToValueReference(myFMU, "mass.m")] +setVRs = [fmi2StringToValueReference(fmu, "mass.m")] numSetVRs = length(setVRs) -net = Chain(x -> myFMU(;x=x, u_refs=setVRs, u=[1.1]), +net = Chain(x -> fmu(;x=x, u_refs=setVRs, u=[1.1]), x -> c1(x), Dense(numStates, 8, identity; init=Flux.identity_init), Dense(8, 16, identity; init=Flux.identity_init), @@ -145,7 +142,7 @@ net = Chain(x -> myFMU(;x=x, u_refs=setVRs, u=[1.1]), push!(nets, net) # 8. NeuralFMU with additional setter and getter -net = Chain(x -> myFMU(;x=x, u_refs=setVRs, u=[1.1], y_refs=getVRs), +net = Chain(x -> fmu(;x=x, u_refs=setVRs, u=[1.1], y_refs=getVRs), x -> c1(x), Dense(numStates+numGetVRs, 8, identity; init=Flux.identity_init), Dense(8, 16, identity; init=Flux.identity_init), @@ -161,7 +158,7 @@ for i in 1:length(nets) solver = Tsit5() net = nets[i] - problem = ME_NeuralFMU(myFMU, net, (t_start, t_stop), solver; saveat=tData) + problem = ME_NeuralFMU(fmu, net, (t_start, t_stop), solver; saveat=tData) @test problem !== nothing @@ -190,7 +187,6 @@ for i in 1:length(nets) end end -@test length(myFMU.components) <= 1 +@test length(fmu.components) <= 1 -fmiUnload(realFMU) -fmiUnload(myFMU) +fmiUnload(fmu) diff --git a/test/multi.jl b/test/multi.jl index 03ddbf2c..137cc7b0 100644 --- a/test/multi.jl +++ b/test/multi.jl @@ -62,8 +62,8 @@ end # Load FMUs fmus = Vector{FMU2}() for i in 1:2 # how many FMUs do you want? - fmu = fmiLoad("SpringPendulumExtForce1D", ENV["EXPORTINGTOOL"], ENV["EXPORTINGVERSION"]; type=fmi2TypeCoSimulation) - push!(fmus, fmu) + _fmu = fmiLoad("SpringPendulumExtForce1D", ENV["EXPORTINGTOOL"], ENV["EXPORTINGVERSION"]; type=fmi2TypeCoSimulation) + push!(fmus, _fmu) end # NeuralFMU setup diff --git a/test/runtests.jl b/test/runtests.jl index 7c8c8661..db8a7377 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -54,10 +54,10 @@ function runtests(exportingTool) include("train_modes.jl") end - @info "Multi-threading (multi_threading.jl)" - @testset "Multi-threading" begin - include("multi_threading.jl") - end + # @info "Multi-threading (multi_threading.jl)" + # @testset "Multi-threading" begin + # include("multi_threading.jl") + # end @info "CS-NeuralFMU (hybrid_CS.jl)" @testset "CS-NeuralFMU" begin @@ -68,6 +68,16 @@ function runtests(exportingTool) @testset "Multiple FMUs" begin include("multi.jl") end + + @info "Batching (batching.jl)" + @testset "Batching" begin + include("batching.jl") + end + + # @info "Benchmark: Supported sensitivities (supported_sensitivities.jl)" + # @testset "Benchmark: Supported sensitivities " begin + # include("supported_sensitivities.jl") + # end end end diff --git a/test/supported_sensitivities.jl b/test/supported_sensitivities.jl new file mode 100644 index 00000000..346d7261 --- /dev/null +++ b/test/supported_sensitivities.jl @@ -0,0 +1,57 @@ +# +# Copyright (c) 2021 Tobias Thummerer, Lars Mikelsons +# Licensed under the MIT license. See LICENSE file in the project root for details. +# + +using FMI +using Flux +using DifferentialEquations: Tsit5, Rosenbrock23 + +import Random +Random.seed!(5678); + +t_start = 0.0 +t_step = 0.1 +t_stop = 3.0 +tData = t_start:t_step:t_stop +velData = sin.(tData) + +# load FMU for NeuralFMU +fmu = fmiLoad("SpringFrictionPendulum1D", ENV["EXPORTINGTOOL"], ENV["EXPORTINGVERSION"]; type=fmi2TypeModelExchange) + +numStates = fmiGetNumberOfStates(fmu) +x0 = [1.0, 1.0] + +c1 = CacheLayer() +c2 = CacheRetrieveLayer(c1) +c3 = CacheLayer() +c4 = CacheRetrieveLayer(c3) + +# default ME-NeuralFMU (learn dynamics and states, almost-neutral setup, parameter count << 100) +net = Chain(x -> c1(x), + Dense(numStates, numStates, identity; init=Flux.identity_init), + x -> c2([1], x[2], []), + x -> fmu(;x=x), + x -> c3(x), + Dense(numStates, numStates, identity; init=Flux.identity_init), + x -> c4([1], x[2], [])) + +# loss function for training +function losssum(p) + global nfmu, x0, posData + solution = nfmu(x0; p=p) + + if !solution.success + return Inf + end + + velNet = fmi2GetSolutionState(solution, 2; isIndex=true) + + return FMIFlux.Losses.mse(velNet, velData) +end + +nfmu = ME_NeuralFMU(fmu, net, (t_start, t_stop); saveat=tData) + +FMIFlux.checkSensalgs!(losssum, nfmu) + +fmiUnload(fmu) \ No newline at end of file