From 69d66dfe06ae703ab8b937c3df3ac9195d68b64f Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 2 Mar 2021 22:24:02 +0000 Subject: [PATCH 01/48] INIT: Project description. --- README.md | 32 ++++++++++++++++++++++++++++++-- docs/architecture.png | Bin 0 -> 45305 bytes docs/master-based.png | Bin 0 -> 52422 bytes 3 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 docs/architecture.png create mode 100644 docs/master-based.png diff --git a/README.md b/README.md index d023585e..4563f8b3 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,30 @@ -# metronome -Checkpointing PoW blockchains with HotStuff BFT +# Metronome + +Metronome is a checkpointing component for Proof-of-Work blockchains, using the [HotStuff BFT](https://arxiv.org/pdf/1803.05069.pdf) algorithm. + +## Overview +Checkpoints provides finality to blockchains by attesting to the hash of well-embedded blocks. A proper checkpointing system can secure the blockchain even under adversary with super-majority mining power. + +The Metronome checkpointing system consists of a generic BFT Service (preferrably HotStuff), a Checkpoint-assisted Blockchain, and a Checkpointing Intepreter that bridges the two. This structure enables many features, including flexible BFT choices, multi-chain support, plug-and-play forensic monitoring platform via the BFT service, as well as the ability of bridging trust between two different blockchains. + +### Architecture + +BFT Service: Committee-based BFT service with a simple and generic interface: It takes consensus candidates (e.g., checkpoint candidates) as input and generates certificates for the elected ones. + +Checkpoint-assisted Blockchain: Maintains the main blockchain that accepts and applies checkpointing results. The checkpointing logic is delegated to the checkpointing interpreter below. + +Checkpointing Interpreter: Maintains checkpointing logic, including the creation and validation (via blockchain) of checkpointing candidates, as well as checkpoint-related validation of new blockchain blocks. + +Each of these modules can be developed independently with only minor data structure changes required for compatibility. This independence allows us to be flexible with the choice of BFT algorithm (e.g., variants of OBFT or Hotstuff) and checkpointing interpreter (e.g., simple checkpoints or Advocate). + +The architecture also enables a convenient forensic monitoring module: By simply connecting to the BFT service, the forensics module can download the stream of consensus data and detect illegal behaviors such as collusion and identify the offenders. + +![](docs/architecture.png) + +### BFT Algorithm + +The BFT service delegates checkpoint proposal and candidate validation to the Checkpointing Interpreter using 2-way communication to allow asynchronous responses as and when the data becomes available: + +![](docs/master-based.png) + +When a winner is elected a Checkpoint Certificate is compiled, comprising of the checkpointed data (a block identity, or something more complex) as well as a witness for the BFT agreement, which proves that the decision is final and cannot be rolled back. Because of the need for this proof, low latency BFT algorithms such as HotStuff are preferred. diff --git a/docs/architecture.png b/docs/architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..d731716665e2cf52757e54bbf8f7815bc3b737e8 GIT binary patch literal 45305 zcmeFZ}<#X3@1&q@<)96r=>CySqWUyStkOXR@!o z-*f(e^XZ)5ye>YhC+0I|jJWSHCPD8NBr%@8dSIGw#C*@Ld za<-Yg8m9Z(Si2|7(h}s0!HsHR;OeMcn^>L^%DwaVx){Z_0un9a<+xRA=LK-nPZ<)eB z7>1hq=WCidwe(~`)Ds-z`6#fZX#|-*Z{`iPNz5&~?2(=s$Q4e}#2jyWY~Mc8j^ zhn|d+*tX5C%Z$oX{E@)HgQ7st&|a%>qu_nxRnFC9Vwa>p{#XNrzffvc2}FGt~gAyUXfSRI@cVH5`*Q zW6k3Q1e{k3tX4S`g}(#;<|Xsm`;P>)RAC!d*;ba7H3`O%RA6$N6!*%}B!y_27j+Ex zd=b;D>dl~{dhW`XI#XO36#8wg)K=>vs)C)do^h|11a& zDlbtxej3y*tq%Pi=ZqX~Pnz!|_tKSR6GdbzdUnazg^4>{w%8tbFPk_1Ja=1v3YoHD zSFTNFKkMa-;>qw?7fT)MKO7TFy-ZAMmPF|KfKpLS6h*CJ%*LaNfOeuN5TzmO<@Ab+ zvI5(Ci`iD6#kzC=*@fDNLg&bie9g&yv{E?-CN6_L{qKJ z!W8tym9U4zlIwdiztijTn7b(_QW|i|jJ^qx3>6tXoBU@4#;D=kQt9GBp zg4EuoPOcp&bnL425hs8b4XCzvy(mP@p&Q7WQz8{5unz|Ljn$Mm(s2V<28)N<1}%t8 zhK82Hcgq8-^xGlz#_FKU!ib~@E*>KL&!r6;L7~;+oZ0!&mdamRV)W8_j~-MyLQ=b& zFB*?;5VEM%74(hzID%3KpIwr=knesZZVI-{KtPe+ydtrQV68q7Qb*{Rb4AwQtJ) zTT_vguH%eKy|wT$&vXJG12&EzZR^w$j)<<;qtLvnzgg2~N@wNev*&s;L z?Dr94UUirDev;V0pspVgx;#8&pLSy!$vtxo`iLXCdKpjMxNA4aRy$%Wj`1DR?Zq`p zCavCmspEgXjKf`i|5i9a1R-a5#?ah#T>%aB`!Y4an-7G2i?vp|^JLD6p`})_SQI z!UIDgGc6q5YF6y{vAmVRvj>|JLUnN@G@=q#-zFH0V)63DNhih*camobrA*o|zKt=6 zot@(i|1zNF78j%P>qi-6g+W=)ISDOQ>z_PU{i-`0Bp@j=UvvVj%joOcwv5st|4-1JgRD^lE0YYXnwj z{Udw(0fk&%QWGUYI%$;WI5<4}Yn@v6Xt;+?S+*0~n&xg5exeVvfsM0so?_ZvF~ZGf zmZ{ZmG&yDV7JO2%?P64Xd=HbV)<#0mM(4||LIvo(ZTAoHrts>|#;#?g>RuZpdd9cp zTe7NMCKO?cw4#_@bKFm?bhyA88oBUo?04E}JR1;CeywMaDM1BCorNW;porKr?Mh`R zel$$!zRtZ-{-Po#77{68yFO^Fm{-%@{Q+*RI3(6beb|SmZp|l+c@yGw?n39tXTk|& zGK22@-O%M@LDn(%BURX4g<>rUA_D_|nWMld;v0Ehn0MyO*viysRK3GY z9AoN<>6($#+v-fILV6eteuNKYmCFZo&T1t`m0^JyuY)Dc8iGpZR_el%bxg@NYs`YP zc300=?gEbV)pT2_RtdMbxw{=&jltF_)f>4tN)O}YCcthzlfyV$*^Vk!)9{tFPJ8g`JhR~ z(ThL&aqC#-x5iK?e|1TTeXr$!c-zB55G?oH4YcVYc1C&^myERF9B+54IldXMuy?-b z1m4NwVG;uCWig@j*PQKE1mxh0&;ZKFb$%<452iNl^WuVvb%oE9DJTky4+CGQ@ACV~ z+Iqf;=`#l}rY23}q37)}&G>ZmMFjH0iV4PXoOgw+Pk4=z4!l7($6lIK5qXzbRBbIW zh;#iUp+AAX0J@+qcgy1@dcXcuzxsvC9AFJ{w=X^tjhmm4gvsaa=_L823hx zdA|dj*}%Jq9@}+H@zv@cgO&thcM|Tpfw-JrE>cgS2cw{`B5a;936>|-FS%>O;-5j) z_GIc7Rg{>dxW-qDV!PX>58iebln$9N&*I7w%^~&X4gUOV2N5HM&(?hov%O!3{``rijYO+Szw%734S3U6OyRUx%gE ze0-S2;4U1!=BFjKXB@7tDVV>ijZ;h#yq1+K!m4CCUhtJf6A`17>PD@%#N#8V~ztbJC9Bq_v&xc%f10{YTtm|-A%y|mu7)b!+!*c>Mi((=vqC=z5>uO^=BSp-4f z^gt9t*fWx%z_zb^Kx3)oQ>Muty=zE!?;{TC3XY*~M0DY?<|$`hEuN-trlUB1$qOPj z>xu+>=K4bzJ&grsg5#$cJyd7_#c_*i|4dvP-!t!Z1MoA_ zY}vLQdlyH-wo=!mCvxavT=woyd7ispA#Wfu3zQW6vjhpj6Go`@&*XpJ^7N5>3)HGw zbAF{FK_B)w7c;w#*H(X4%nN;T-DhF;XJEWvfr<*mNOU8f=XpdKjt4(6gmyyLEKID8 zKUtULvu7h3R>nUQ-od>367u-ijLV9cVqNtw4}4-52$l!NwFw)<>hLzy}dwo_mnYa^*bJp3Riq6_;W?hDS#n zVk^GO4z3R}?Yj_b42l~a0F?mmyqTF>_#9X|K{~DQ&tB;B>*eZ>nCqA|42)MB*wunO zX>=TUFafofN#@dlejzSrU=Iaqnf7U;f2!IE=m(2lTI>bq4`^#!KAWaa1cxj##h*DgpcI3<1EDGqlOj4Ju=)CW*<^(w z`?K7(;796QXrKk8WOAoQPv5S>#ed;55jHE9P4+(vM0MO@1F=@?ltj`=3egR3u-Ru3 zOVzsVhe9;XdyF5t>T7wP*Y}JJ+BXY?FHMRlhL}`zKkV>6`iz35`7^WCR&#y2e>P~f zJ3Z+qxdc_13Y7}7m2F2y0v~#Wd93}-Q%EdfSQ2N z*V5GdPM*P0S>E+MFpw%JSP|0HBa;oorl*(PpM|G9lb%?{$naudc*|?hHvIhg*{+N} z!jXf&6|SsaW$*_}Q|XqJ%YaIE;o`u+F4?x0S%knqsh2WuZ!_pUu_H7jjnbA==VcD4P|qlrR;jq|-3 zOYbKUv}T5e!s~qr-Khe60|Nt9H8qj!<_gdN>i<+^@U{H&ZM* zq)oj2&>RXyY9^+E)tlR^6P2GLkAG_vDpXic_2I?@{@eJ2&3gp>0|H7t&OUaWdKLy;aocDTKCh-K328P1fo1W)%T zap{n5F3c$QXI#imM3 zEl092I>X7#Z_f9uXR9LWJui;WXY5omW$>T@Z~hx$_*M{3^3l`ntr) zdcT9KD~z-_h~G6eHPy|-!*TO3xq7~4bD8DH6AX+@y;_%bO!@yPFVa>VYHnj=V?rSx zegp)B(E^<~^#W~y!YVQSoD$PsIH_WsO$v?md zM+*(^-8-m^)a@GKoUZ$`hCAa0s{<)X$QUo<`KbQm+T1^NzHHvw+v~5enaR;CrU)hC z@|kB1il4hVXjJ_C=yALdOwiEK5QHcH8`Wa*^W@%G#@6}yR8E^|{sT8L{g-z2XZRm3 zuL%AVCT@fhCz*)Lb``AFTg&0E3|b|1f$?JcPyhV+Gx}RCm{Kxqac9R0M0Wo+D!gzx zd)v917~N_|rFxGu0_^ZE*-ZU<4~X?lML%$x|2CLWd%j~pio>~jD(CVSdvmuzz9qV^Q-g?=2x#i=~UV( z^sI?V?6^YvO*hD6y<|Gw zoF+7^vqL2z+l2W8En#MQ`5>MUN<+zs!m{rQ&Bb4x&$VLf@ zudcc5j5CI%@plA$!a+iQ{J|~_+8x4Wch1kLMi&b+AVyyW}4N(jBFR z3}EGwj!oax9U2?d`h+%(AguN*1zPz`5cu2@tW5?Px6L<5YYmoY>Hub$83*g4egulqF>In}*i9F`HNSJ9kX2D6qTbNA-E`ad%(+ z45*G~Qwe-_8EY{Q)hj=rf_H=he9qc#Zioe&*XSp5)T+@FY-}JeI82!`vIorXexWDH-(9z+{z_x(}Jf-Z&wuY3pBf?uZTv<4kmb;6Lf)jA-&Z8 zDdMO9fod=g*bXk+*`}VJ9)J@->!RPiy$T+6+rjxFGGG`t=6~-D3ol6(f71A@x#Vm{ zYo|N_m7M^3bgYJddqzK7zvQDgWik+=@`u=@viDl`1}5M{u~WIXGMP2yhR+Nb=9_%( zwVu!Nv3-_=zXR*Oy6n#g-~8Ew*t^FUWyn07`&ZSTGN3aw}AW6pv(Ra!E{UP{Kf5JPz#1=ya`=h5c9--7j| zg{9SXCSfEUP2}Vb($$n@BGG;@Hf{|Uy0h44(GNUY=}h87dN4xn7+ylwPld@h4o~fk z&x9!H-ZxEMPSfg5m)L9|Y}di*&@^leuWD?{yfVTFGdLPM&hi|Vy0+i$LGADN(f1bw z*}gJ&fYNUR8RpGcHS9RYV`32R{!xkRaIO*m)%z^EYDVI^lrKFzV^fry0l~b;jcsy@8^y!M8v)BuMj*B`_YR+6VZiU5nL6-SWpLSb~UNI#RPvsQH zT_7^ksuDWM1x&l_Zw&_VjKr9taz&o!j;*nC*3<5vJ}t61^6~zyaxv9zR6=1O<1YbV zmC=krQ&d-%Sgpr>S-mL%kH|&2t>4ec53R(k*Ov2u|J}YWkA-&OOOCu4S&j}ELE%Qz zP}FQ-P^PMq>vW~Hg|hiuhF$(7SJEGDOJ6!({y2%VS?n2;65JY&mDjl_@m?@&VU~FR zQ_zsF7pot=g&>gJcM;yOdJ_@$qH#le)q4E~;+ zzRK=01KS}|`%{8!;S>os++(RB#lwUT6?@0aCQYcs(6ub(Y8ll$RT3-ukyB-` zMez%@Lk1%d#dL#SrIn%;%zFiwQ1=R+*0*(Zl-RdFT3%Ustx`h#_!#fQN(DyMx@$&} zj$xgPt>H!k2{6G@b1%8(v^&sOkK0HTOoh?A-R5X1ygI3Kki_tvznB8RAj+z<|Hvy+%gUU|4k8 zw1mX|(H3coCaGrS6G&qlg(mOtS~#jMn8CG#;nE0IP|@ zC8eqt<-f<$jR;4er!jdN@wFzHpR;d^i;8|&EE&~J6%k(P?Za)fDoq)IF&s&kmMt+E zIM%$`o2~S9PMSb?_Kdix?>Pp1l+SdG$jxbD;@z5RiwQW5qX3|BlqF%_KIc(gkgP=)Uuan69x|U_K*)C z*h%JhkA!Xvu-eV@L%tC(E95F?9$s9SFE)MFt#Rs}?5uM?=}O{aZVkZdY;S-3NhMo> z_2cO4kE1!C3k#V+ywAa7+vo##+bO7oR5`9aJ!rfaWMyTgu`%n5Z~psJlul#7{wIgS zvY3ddXs%{)I;(rNS^rCaOp=c!3yB<7@mzMKpv(60@$=%Ff8x=j={>}E{W6)kN@vXz zK_daM)PCKJ-!o~M=QnHp_{1LI1>ZW`v)AVQ-Aobtf;k0SYJJQdT zhw=D$eZ30CB3z5VyuKZ|ZB9$UJTR`O5Am8UMw!}GNpno0hT^OD>-C=HND)I5|8%li zUZ=U;xFuN=!6#nh%jwBk4i0zN1Zyy*A2cbdoX+Ja7yb!-wUgiicG86zc+cQG;k(O!88Y0xZ(9=LdWCKuw?EQf(37h?DG^lgRDnf z=@}1hrmD7v-Ms_poOIG^xbvFP>gpt1TDRUM#tuekF)L^|%`M{=?Pl@?dROA^bu?$5 z+rVkHOuRjokM+*Th~mJ>#`t=wKxZIO=zj6$PG~qo1q;HY*C5ruJ{A<#U^qO{B~2O6 zdPnwKEg!RqnG!~^>GZ5TZ=76qogut@AyO^)%I=650wSD~J!ZrNPFtEQy{Go07dgc) zA0r#h6n=e@6*f~$4E7XuT{)-D!ynnO8gaI>wkeGqA>R-c%F8}8i8?e;0vjFp!e7^G z2e3l!gDm-^2mFPJi5Wu3?q6;-!REZJ$}b?m<8#Zs(h({2jXjX zZl{8-_mR`k_ydPoVm-yCUZ^j>(BKUai6Ni?u)Gcc2<%uj1D6tjM4|zh?X9i84_Xe8 zrTMnOLsY3RgwSjbd2PV9wwbnSy=#5+dRb<^E{pz#R1jOrPgy*H!|WN@aBQplZ+AX7 z%Cl8ed$Y%OkX&Qq`$T+Bb6GvzZzoU4wk7Pe96rW#_pml%n zTH!#y&!CaWwkRMZrFlTWAIWYxg?s*LRi~wedHAB$C3&tfxmQx1XvSqnC>qCABNi54 zJVL;BI1+}mr`;9Sj^of1gV^}vy}IqJxQ{vQql&?7$Ejrq!*`w9~+PHVlvCUV3O8yE>3ZC-Fbno%outyE*F+zSQn;e*;VA#_H|_J&Q| z8Ytgzxg9~Bi@Q>Rk+YfNb$GtkUPvKI@^R;QN32%VdI_{&-{+>;K9z{e^sVPP5xzek ziN|#OEHS4WQm5;6vG8#IEV?Um*m| zVp|?^L-N(`mA;BKzEDb~?2y!svSO^kATix)YdPDy!?h3RcKmJN=GLVGEu`a)qi51( zF4h?s7nMRY?}~z1@lUQS_jepvIy3pV{MO%D?@&kXsK6|E3*IG*>PsHYtqlSQK?AO2 z(1v+^UnG%`HpQ&DdaUHkH{+nb} zw50+C#-QME_S^fXPg!WV8g~Ug^~+YWy7qWE@~*DRX?BSU6=r8*C%bx|Yi%buVPRq2 z-QC!MbalepZhPS|pkOP3?G)nQ4m-(UfAh82Ic;a9CjRJk$1$fl-2Ya~OAHB1=5u8N zo-9+d*ywa~sI{sdc7K;F=$*LI8NRZ&CqMREEwpfIa+1Zc1*yc8oj+)6zMfAumSG7* zo?;RbMs{|CJu&nMuXQT&9dIpcUG|<}V$!g&MFP{HN`McXTx_vTt&2bTw)^pFb4Q2& zpx{l2_s#iGflg)i{{O9u2(P~%9BEj~4*-Ktv>w(>F6jbkfZvKfI2{6J1M*`Js`IH` zLN0G|@areo)VJ4r_1NMn{urykH`HPg8^bHS-~Q2vVfJ~6)+ChCm~EMn2=O}gksU~B zY`bX9E^^;McsZJDv`fj!>(9jp>yFh!vDB?)qtCh*?oh?w)?zs#=vk0w@R=>f5;4 z^MhFkxv785uNf|wK2h1qv+nvdzu>iQ4rFQ%Bj^)_?NxW${Sx)B-ID1d&nw)Ojhh{= z`WHe_Y%r#!>vE|Q(@OKMIihpB8z-}OX#! z%8~H3a;NZCqqfoz;RHWk*zd{=lnuE&!OFU25BiOl$98AeT79Ehzjp_Kgd z=T92c0pJr8%LE)(pFm>hHP!*xqbbCrQ~nAx3wpY#6oq6S^F}SCTOL%Xp*_WW(>9bL zS?zX2aejUt&7c(tl!Vo!>)CXfrD%xmqk*YKMQ*yfEOpahRzbjL92!~%q8Xk&AU z?LK|?z@M$2t;CPm?|r=bJ{^mtQmFmd_!tB9{U(laph1+D8+!hTx^$}4rf{j_CbUKk zRglE21XdkMx+`cOvUh{2(xA$^qPp8r{wAsHrlixe%qN4BrzA;X%lf#PzNo_tS zmYi)3Hy>eCk~F}2Ne2OnOJbi4zh+#Jx5`_s@W!a*KR2k<=6TQyhenm~TmI=v3yWj# zBZ|ie+<1=r5jR=u{nukEUJa(-2+RU5utf&-Ug@}ao?j88ET-UN$elAP)qOv@_Y15e zc3kfg2dksWzs6H3?Q7PtO7lqvK?C9i_eKz{ZeZl&M0{UtE&gRNK5Ut3N$HUR%mDL- zl!k`p!Nd5RNM(P+%`Fj`ZhgeGPx6AXsrl76tUICn0#U?J4DlVW^alv18vQ)^g@n}k&cwk_ zn}HN|U4D>LFtST4vHzWyM-Q?H8_VJ&*Fx>VN&2TqC`3L_?yfe-z+akbxbup?CF9p$ z*iqf4UEdK{M$I9uPyPZn9v=pSWhtcejOMBUvht@w3UA%bek~v(GB+v7$v^-7``-Ar z8{|aaSJ@k`Tquq-KG0Hiso(Hi9s|hOies-)sQX}=C#Ow*y02(=?EPV))41qQro(}t zqWbSF7~lD~Eh=VYpNwp7(M==jhK8_pEH3Ywu(cDvDlNdEmc^AWUZ|)KfCScHw}S9x zDA&6b*G2~4S+TgFYtAFuN7&!W`u;;}WBKoB`JwUFITqe^vBiw+?X@mj^JDL<5JIx* z!PeD*1cCMnxde1?8Y(IgkuMG}J(j*)DVOyp&0xR(qOxkL^jlLJ#2XHr{2`8+q`DWA z%zwji5E^j1G`=5a_7YWY^xN=m^NAvlK>ONdFOG>*gy4&x;gaIwj_1ispQFr+BkA;A z`_{f1GiBq>L~ZLyNG^P6uUP|l$NL+nI2xXOaew(Iax82o-oVSRZeJ9LWPD~DUtqm6 z)DZQd+?`E8>A?Cv!)oevv0x?EbCf8bN670D$l8H&l_Sa( zc0*#mwcXwq&lf`o#mE$WIch7FeXeKme&(=gC%l>Ra`S|V_(Y)R@Tig9>-J!B8Wg6r znw@SA2dDd~_~;Lokj>KjvA7(+?oc3&`Iyv|V>R30J)@a1(#~U=vVMDYTL(rM_8RJqiYQL>30b2ZLZQAkFP7qmoC5=w${0PiQTnLhRL@!4HyB(t-# z^YZf2EH%Rb9JcBC?$l~eOs`4WYGX2%hr!_Rzq^Bf7GvaW+-4396%|W2)2kFLwn;2WZPHuUq7*=Y_t<3&^Z*H*blq#Pp zRLd7S7Zj+(ntuzbfBXWUM`nLukB@E|*>5(bBaO?H?ZPMg#GOA#@;uI(N+YX}E-Lru z2GcJ~eu`u3Z@6#1O)xRLcpm(6U%|(+zE~j>DzdpyZ@5`T0k&EB(Sa}n<3S3Jzt@S? zHzbSagGSV-80@PO%cK#;Qh&~pY=BF{zrXoOwtxkXtr^={UbXPBEt8>UeojP$_6{NL z3(bt(LPJct&G7FHg=$itfpQsB?TeMy%lpj-zWzzpvWYc5fFTyVn`OB!0TKWn2+>u; z^&0`5!cP|``Pwy}^>jsq{0!&ufx$_RN&%*O%1QBh z;~BA_PyC)|MX^mb0Wbwd05;}NSADbTb^Sg=XPOY7J%_aqpW6YFf) zc~aP|BP;n@rGiS<5fKp#1`Wv|E2#_;7J`?Hh#et>tss%s4OkNZ5IZg-OG`>9U_PzP zy2GkxR2ze7*?{ptC+0rPy&>?mn)u_eH_ZVJAg7?Pl-h81C)LkP%abvcf%r#+$WGYG z*DLAruk8OppRaO>8J*$(YOW+IUsR`1y?HKd6;=4_t2t+Feia@|$UAhjMnK0d(RefH z)%!&{FRWA+v&&TslJW&I?IjKUFcVL=ZTq_s5KXD3oRsdpyZ=QvGnf#W4VT`eYHPlV zeeHcreE_fSFgI9u3@tUGsN86=3zOhd4%j{e=}tXfUt!>fU7=P z2^SKafO^Fo`#*POzfWquLfcTW!SSMrx2eV0Sos)Z^=o=vI5ND3LA#2ZWZL_~aSVO- z62g0zo82n>x2yaWb9z$()ju;}nfg1^)(fTmCO7dvSKPACYDaT)qc%5*ZVz>OYT5$A zNMwrp4@RW{KSdW7lZhw(lyCskzVsidHhJc}2IU1v_}tfPj~Z7P2cJ8&`@4x~eJLwVraAmeuWt>rrtm#$0J zfJyvm-Cc6JH6HhPgkYs~?a5=^r_V{e5f4P?>8^dI;gN2$B;q@(@wDxpj7p<8klK6~ zldkX0@lNxu7f{8>@86rS7bq34j|B0lxyhn@eSIBF)+%hW8bbj84S5B-yTyH@y7?2X zn)4n3!pzLPyu2JTlaY~ewO@PiLePtw%TCZst#A~bOfb3G7X={p`s`L7$5ocF-(G(o zy#t&AXGhfc?;mCbUH5er(gfv!?MchMpO^x7E{{6Na&;=1rb^8p>RPv^k6KX6oYNLnd~)}p_Zq6JXPqD8PMyzL)#0_?H~2~8Oy=8dhPUS{adK(gUkZpc zaME_`>@W?@zZ@OS_m}5kn}t#e$nsY1&}gOx06Iab(pImouC9-yhaKwTVsg4Th_HEj zHJCqFHL`j(!)0ta(^^zh1(GaIo|RWPWEqD2-yjT{)d_8U7XyW?N<(G)=Nx3PGom3@ zuI{zXcYezLNhR%8b^FfG(wZh36&3-YA+^0=d}e5hijOb%@){3vqG4@x5#+l3!jVf!yFT5}M0W4DDS~F12mMgQ#hpIj_9?1$`mDEaybpQk~ zP<=9M6UQcb^7e+>z*#Vtb8GvIJ}|hp@;;PE$?4z%`(X%4u9D{Cg{JW`jb3}o%Q*X? z>w5JLG3rV%!qcshfP44>?1AJ0!Ov3yrarLct}YB(->8zoZjUI@aw{!{zWAXLIBt*9 z3Eu3ozvQ&;+MB6-sPu?q(o1*Qn~r|1v>dctQdybpxYqNK|7q~P87?uE&Ckz2JU?IW zilCgFoO}-oQB*Iy59%*8!J9yT7Iz98vz)eucjxP8xG73Of@yH{erFRDkvzzvlp)~& zaz$B+iuE+nf&2~oWN#^#kK0ii^nZS2l#siOlX0o8NQIKtQ)7kiU zYu@#h0s8IQ=fWGegY`T?5>yOnEd$!{Qnbd9WSP*KP|_PWOUw0W*1l^`h76kiOv^Xr zp{vc21A8D(MFpZ-knaK|8t#wB3UmlQd7N!OC6gtf(ML~=H_}mk2FN*KedmcwVI~{HFHT{^ym z!R9Uv+ZMcT#|}md%(Q3z*o50{RYQgR&}xcI_S9Y6$eKr5LkDXc5Z~o-ZND!Nk#-^D zmWYWZ8>ttiZ}rr#PC%4|gJk1MirJY8@r5NWz3Qj4ne!nKa(Mk+$GCR#tT?&7NOLv4 z{xa65UFN^YZl0@KfuwVBS;v;aWe%8skP!^Y?ARX7jrs#ySY3+|@N^NpzlHkT9!Vl& zk@kTFzFH1uzT7Px9CmuuXD4*q{3o{d2``D*-uhTBh~~wEaW|a0=<4>vvKSVXzZ*2( z?B!nH@56jGYb`O@*A=L@JDh*3=STpo+#yGb+BAp=BL|eI$v|=fvnSSv%gY7Jg#r?? z`h`%UA+Gqwz9PsVU%|AH{nN5q9rA|Fv9t&rf7ZoL=3Mn5r}90;y9-v5g{`kvK3wCr z`k{c;6MyRd_Y7_eIo&!ZgbgOPOv_tc7Aew#IJ2$1%c?Jt3-J@vfitzid^87nnt!d8 zVa48j4_2dhuTKB;@R@Ho*<^)8%rs!hPCBa=s&E}pLEe+oR&Nj>7>MZJP{O@@wrDMu zTb`BC+3<}vM5;o_X-nvRS0dcZ=%26$rl=NyV0-1TY0T|*sQiAcL~${&QGtSgOM zUk#pYgd)q+xmKPwo6iXucg*HK1X3Uetr&G}_soMSM0(_2E^1JSqtN-w$Z|srI$5T2 zrVKhE+gAe^Jf=OEAgPWMpz#$8sj9fMhWi)eZ2-=R4rc8(4>D ztMFUA(N4$pdrF{Kzy~2tIX*l`A&ZLNrL)^i_;9{I8DgW(mssJ|`O7{gnVojD{ z^ZbrTY#}gRq1CMH;|v1d&F)u!WY&YU?#nCE%pFg8&4#ttr0x7^>y&OZF6UES3B5sm zTGnK9nxCSBf6I&Ozg{0$i!<+Sxn|2fG9HnILVH~yetD=*c{(p(x8m6@c8}f{dbqQh_E`+4 zWd{(U(ZEIg`}`kA6cv2{1p=3c!3LT&wnS+dFKw)6>I^aNNVuNl%NOnxM8Qd9f49T# zHzGzk-Sz6iY?4ZBcl6Imc$*7!TsKDw;Pr!jEyE^_szL^`BZFLV8HfQB_o4>}5-)@d z2+V2yy(f|Ybw62VEJR4ARC!<;&0m~w3w^=wR&@0xQe%E24K|RYNLKl#bEvHEb&hsW>> zSfXDPgVb`i-c5Y|;oQZ_U{OWIdgV|fi*1j^$~lPMmILmD1W&zBuMbu5nbHOmik|t0 z-uqTCP9wYrd^wnwuKcSDd@Cl9stRhoqkx*V^6;C7=~`=+`H|l5e9YIf49S%gYS_GL zG&x}1sZw=Ut85g^yzr~^e65jko9Z;-2Uq56etJt0FJ3R7N`aMvc zlECFmj^cHG(s{Y?kjknX%V(V9b>U)SJDn?Bh(A5K`LU#$XCn2Eh7sTC@DRZ`9T#1C zcc?L>mAux_JN{QeIHO{VL6jd60FW(GTo?>qg74=*c44}8`K9x1$4S&W@Fyz z+*P}LqZ2`hNQe^DBITzvMqu4Inau1LbYPd+Bv!U}vtmVM-3Mi=O)I>ql=6Bvwl|Wk z$nLcHe!9X&7UYsfPJe9aSz9xZlaqsdZjjg2@zV10aaVfXg{++1YKtEl5(-KbxhV4N z>?}YJ8|>{dAcO@KraaJf^w`)~P=ZM0>Uz-`wg(Um;K{>4!CwTeVk)j^R9gA{sr7uF zJ0SG}P;qE}UFz;DT5D@-zYJ(qOcT^YRdCNGMy+f4nn`P+l>OT8L}-R*L;SbW8!YTN z<`S1|vOoJ-d2{|Ls99>hB|smIhP3SfuPO7{YiLDJojGk#5hk~s5;m8%>88W=Crh$( z9hXt*n`siJ(7Dh3t$@El<3WIVt|NmstZB7;rqb@n@TP-!vNR>!6KfH+fa#qcPqoK5 z=lDfdBmKplWzRG$mzp!mda8ulnF;qfG>Ao3M$h{Ny;b zNQ*Cm@fu3sppJhpEZ?%(hX?hqRyI3tkGfOKh<5eb94Hy;8o4 zv{XlXr^CA3lh5RzABr)CL`Wy6v*vhMt4lXpiYiMx2lj`28*U%Lxn1uVNcTx;D5VDt zxCi@nxE%TqKFU2yMHA{Biju5$-oXc`9@NWFwr|9J|NhC~2bnN=c2QXwl>sTiH#|Ct zj*wjDM`v$u^s3xatG*&7ryRe)sRw~>GU%YYDaxcTJ|f-kTb3s78)*>T= zGpG;$O5KS%yf)`j_`_o{ns<&kQ#Ve@ou&?wrgVT_24#2N+ky-X3|&#QgOmi+9?yXa zRMpn5?C;;H-th7AE^lsL&R)aDe#K5AwRX3(v^*jpAfV{lUq; zRkVN86elyby{O#OKgqP%#yVc8CAZkUVp36@sXFzv?o5H8WY)FV1AeW>S*X_`y+Em{ zt)2C9b*SmbX~8b5;m0^SZ*3(clvAFc@GQKMkcY zSyxYFi{I1tRqsDbR(i|vK-YdK)kmu!9Bl0F29}=q7M73n7<*%eFTs2z*d2Rdw^zM! zr`JU(tku=Vl~|y3?X^nXp={i&ocu>h&I7WQjG-h@(HXj2=dj{0?cKG(nm#fxkor*R zqtk>1j`he7wTRPcUk+V;mrGz%nl5!pBozV%d^Ar9@Vfa0`3|o1Uu$PtC#0m&6xr6` z!f86Gv$=&XsS`X4qBo^wWxuK<9lm+;yH!k=?#F5KAb|YB@tU)H{)Meg%4cP2WdjF+ z0&t+@_$wH`Tx5KFm!c5d7VQ#=xiZF=cfTCxy(y}F&XlLi?pegY z9QLd4<*MCdBd-T9OTirJ3ay6SbRZFz(rP405*V~i8cqGLHO@QA*$OGR z8QkP>T3%|ng=Q%QW%g}n_5!AP;h@8|I=$k%zl0rwx=2lb3x!yrU$HK@gXBK};}6}P zWCbMz=jo!+6YoP`asb*={&yj;x%oP?braG0q~1dyl}5%qc$>YysqeA9DZi=P zxxO9WklR;Xls?~=NuYwfR!_&BD#@P1ZLSGTJh@1N6}fhreT?mvZOtmk)y^O_rB`nq zn?t0xvMS+=k428>@?>MD38jJwyZT%kv+aw2Tk;$YU>4eIsxtKLPN5~+n?LwiG8iIF znLAPS7$dRlmrSTfzAjtbNcQbiv8h04QYCAJPG!$~qOCown#`KW<*gqOW5<)EG|8dG z(D6$Wp_VjCWGI;M5t`7&(n6o*_5K$%jSJC%y#!ZP7m)d@MPnmlqxh?f7x)7SG6Cx4 zlj}JCG0xWy{F|rm1N4mul82a^o8Q4N3|5xEPz+fv-{vx1?lbK;m}$5mr}RfR+57F+fn@HbOH7YX_}qbU+DQry59b&~ z*qbb1PO0%mekU0BWpa9Iq}oQn#agS^+yLH|ObC1Ka7B^Xqy9FRx0L&lcP&u>PWpz_Ch<=1wJPxDZ-eTb*s=;=f6dr>6)ig}tD>;P}` zU=vF3n9BRH5wN#;Wyr^*7XsL&XX0qp4c{%#2e*S=e-7F;qm%N-C$O6f7Nz+4J+6js zn1Sk1W)@!e6ga4q0~UySY$9^pF-IjE6U6=x(e!*y+FAm^X)Y- zC-J&^dRp}!9HnOc%YavU_4e&s@z2Mrj~+dGty#2)7N)$q1dhnItm56RW3J?$-8nuB z3-jk?K}ALV8W7;Hd+6h0GuPx<@bUO&GNH_4=2Y6Xx`*Sne!ARmI8W^__`zmU!9u1| zDJL;+QCvMA*Gj!PpQo!e&9wwB?EVVn;VDtARdMu8-8FdVV4)Q8Q{ve?OAz)Cx7_0gS>QJ=zj~3(mxjx=c&mlj z&H7kUXQG(#(&M*>{JumltaGItAlu{{ME7t7i zsO8~5XPv46osXiPg?g%;CzQW5cTCHoHSV!QI4%eK56p*(@z^ z(5-nk(BwlF!4r(Ja;I;h`xo*Gy3R2A`yLsyI@x047sU`)IyaQ}nJj&4+EQ|L^5t$G z3bUM99ODgT_(Fv@0f5(0#)lCG6(IQs3ad_fNeQkdmQS~cZ>gOq)8DC~5-)CzsYoagRcyZYPH zwZDn{X5wMPsX+-#uT17=@@>Ur0(#am@RJr67F_l+@13rzdpA|Z%k-64E_;#AB%Be- z`KUcGes*c~q)fKV%pvj(aTQhl=*w5inrD#Y4H~Al^^UdoEVV2a`NHo|dg5VgHH$q=APrK0C5p z`I68WTf+EGQ@`D{J?ukUYP>;78tqvDjY9;t0WFkTk>S!)A5TxM>E0rE+Jv?0nR6e0 z*$Q^Ed}BSiQuvc}*zNG>v{Y{apX7(OWV1)PmZvPL z*g4Ivi#H}1sk$tUAJ)=}VPN?&&xG>>KUu!bWT^c%|NDxwVf}4_=^?S|tGx;)%UoL$ z$|4Onp{D{IP_nfb>#K$?t|m87F9f~#e=RM0o}hPcf?^H4>D>3NbF$T~tw$*- zDYvK!B)ocIU7@J*<{xN>bXR>GPp z$4vcNjYC{g&tIADZqI)XT6nBG=2(uFs#6rxaqk2cMp*yqin=)`UQb^BGmtyhJ7Tma z@96KJPeoY6d5%1(FL78a*^<)fQcAMtc~yK+93SyEO=11rWWnMJNsx!>lffjn!(-zs zamjog85Vq^t2SHzs*glc6gIsRLUVpVtnd9}dSyyGcOy&%$ zdVBY;LT-}mgEqbbVJe7awW>&G({$x{_^dy)#l6*6F1>SDbl!N^DwX|Vms8=nm$Dyr zRX@l(_L$%74aaee=mu;3ykKe!7J3^LdMKR31KygFPukX#odKvN#h(U>QW4J@1vWAZ zo!1*LKf3j%ikzc7nRqAr=n#uy!p**?I@wY=$tiI|J+6^X8DE?JY`gO_g|mjAemLmL zdV7I3&z%0c?yN(@p}5iIWpZIdOoT-jN;RytuhUi@|4y?gIi4GILDes9r%$v_#*UYy zlh2fe|1FkmnRvq!bQpw4ECfx!)D#lRUCRV zIDUe9je`2SvkVSnz|Cva>g~@go8d?{rGTCFbwUDju4&KLnwq^D8XAB8{5b~)8!Wui z4ZgmMzd@;k(@Vpvbm3)l-8`Wvt;nC3=%H_#UwB{{Hia7*BQNnlZqrRNBzsM=xa4KQaP~ zk>EM`c$zNg8Ce$i=#k7|P*lfo;5O>#GG zhnIc#u!^`I;M}5a|Dc7G?_~2yrE~{wOH^bVOD&gvn-($aS^cW7Ef6!PqXeXs;I=mLgGXH1k zsA%PSNd7~<=^VrBkRI4Eh+av#po`^!Iw@`Hiz_D*U{@+^irE4mhL5uygFwZ`8D@i) z*ozcR8>&yOrqmu!K1{TdcG^nluvdTXPF3@bwd^>=`%xL(q~$jAxf}a z*;hVsPL(^@R)tT}pDdRxXK?*`?RhBpc|W$iLfHOeS3T`F`TYOW=flkK$Gj~tLbW~L zmNKD!`>)_8zs{D4#@JpuD*B)Q(9zX=J+k+eukU;5Doe;8k3MYY`66Fkw(9MPFmZZD+dY(4N*dz6W$n1nYF!jW1g$wsR zEaR&E%TBqc*L^udxqIJ%XqA_SgSO`{gL|C|{826_t3IxnRyi*7LBMY5-@w2-6`3bs z#4GK*RCY$J@_ro-Tlhs2S_n^02$(-H>6&WcSR(hi+K)uo3P3&y%9YnNO2qM^hV#Oa zx-fpqe<3{s>&od`Y#@_`;Y_AZ)ghPlh5OpSxyO`1_OhxMw#Tl+)xSJfFXr=ph3R(q ztw)c};gsr>x$Xa`lD&shxQc<#keVg3cc!_{9ky{l@xg6U^)s z@Y2@7etTBqYTx_6Jh!m18OAur8>o(!&ycW+f7})tt5M=o zxDfd6wkq(h>&7zk-ec^%2J#Te+>R9XK+(geI6m8i|2i$+iG4?vZLZtUz;{x)o7~U^ zk;+#m+0j~B3XM2EqT9RIi`hj3`j{~Xg?&ZE^tbPl*Y<@4`i+WYgmm6SWj`4q>4{FNHIqX%37c2E3U=2G(b;h7{Molv)}0Hq7R6)7 zv>gwGvQC52?VT2BXFM*X8UL$Tvrg=c-Fwab>>5S4({!eFN24;qcYQkbC1mHcIM&N> z<$n8i-RU*0HJ5!Iw*_*^%*&bMrsRrC%H)Qf&I3OW{0eJ{qus;u_IwaNnx%S}|T@+_Ct2drz3&x|k- z0|HXH6fOUBcR|GYOs24TA-&mKZu?JuaWoO4$*t44=*v;O%Gx5JClh|HDAeuJlGJ?; zO^hFcIgPmq56qcMo^as2D)=DeuP^;@x`$gyqSZ{fCdyUXVO-f@tYu=s)W0Lggbwrk z5g_>3dn{#5O|aQx_D_Wx&zvG|D4ZF+Z&a`;C?O`gSo zxw$!d=4Jthc^B|0Tz$@DdWxZ%AbrT_pqP1@mnT3>kK?3*9$_a0;zC+jtCP94!CcQdikH4Jo@f2fZZ5pM<0b8lQ?FMdn+lH2O$(ImX`c%;V_d2g zRN+splSp%H`@5xUgy)kjGX>KRm9KxRtSmT!gX-og4!W2NvgjWugNnb8D(dU}reeH6 zFaEPZN8-_@8%|PnlDa-!{FCjnH8-;VstX1jRvn)0xM{ZS=ezocsTy9qW7>mnH;LJM zy+(2DP4`v`CvI~Z!l4YovS5zE#Z!)foG#xtDF{8m5@k43N#5km9heb3Qt7;LsW8Y|~Szyb5KFkSOW2Y<;v z&auk2z97F3I{Z>m1)e8=WOMnW!k#O6MAsmKcnw#{C z<1Jo+Y^qza4uoI~u=L~X#fyO;bhr#ir)v5Dwc*^LyzF;cdU-F>sJ3?AXQlv9C4-7nhEF>f}&^)&=(RP@YRuT?%LeJnW ze+*q&7343$d`3q%*U1c6N5VUnre7afNbYs?U6VF8HV)?0+=|fM7VSu$0xo@lJphM} zANPoepg+ufdwV~uD#W%73XC4Kp%i*`@XX2fTtZfSOGV}4g6((DLu4&JGsz7m;7=YBb| zNxxpuTQ_746J!V>amrvuFoT#vNr!dcXx?-h(<-7&hWFAwOJXCJ;yFwUM#3Q_d76Z zZ8@T8tq5A^T@Y;OLT`{%-1>fwS?BxT{-lPtgm=u=4Sb|@0b~b znPQ5D1l+?*21^eqZftS~x@GS_E7EJ!^>i+Xof8)I_H0{b>*OA(_#2hin}(WR|LHC9 z>TDV3wEnX%L+4_6T2P8|HRi!^5B8H8HaKr@F4CyI4VbuZjzR3{_fsOx8JH}GOU!vU z)rw7Q2$ofD-iG!{&A0IO#Z}`MFQdu@b9vzDxI#H#fOth4TZ8 zc*Ad?e!!uqM66T=bG^4Pv>R)Tmlp(f_I@vL?b+tgx7 zchNWz+rkJL`ad?xu%{L=>Pk}UAyR(4cwpD>!o2kwwB*|^Upc1#OxFFlMom!V?kCRf zxBsD|zow^3H(>Oji(fXoB+$o@#y^ZgNu@Ez;K9YocHhnOO$|H@#q050`MW=@_H$9t zHh;YJt>xC~)ZPNYMRWdc4&&}$bsQX{|Go5-KI3aGp5)faJj=6NH~)1FOjWPVE!j8t zYWm=fwYMvy<<693iaSM;`~4>|WmEW|b{KDe>$9yWXB`3og^B?xOeO`k!p)hqFp9$FlDn%PX6gAHst3wt9a;d{Bmym$cM1zpIIo>9_hZ*c<)1m))DI`zP;0uHB_NoW>spU z>!0|iOkYZ?`^vsz9?jA`YFy5Jexmi54corU?fE?6f<^*jaECA%DK?$(P4Qtn>*)~R zE|&CK_woFG#gD2#Aflqv*n2>5$7L>Gt@DdgCU13$3zy}MYoOGIoq@a|nbMIJ`tH3u zO<$XT_2ekRb@s%MiQ#M+a}`ywbyYNoveu4rmNOgIe`TIv;R8PP3}pFM zCr?a6v_{HNm@^or-y3-c@pzza73v1kkGaV=~M0&G67oWY3RHSNKc z&7krobq!Tj+Eo1r%c1_Dw9jInBA%~5rxalfIu#(GrDe9|ji6PvkzSM1J~5&FhKhqd zf&1l6{OdNpJm1y0QuMt^2;vM;QPC5kPQl=YA>BBKZ*Dyv`|)g^Q5(Bycixrtm4&81 zqFmq~pvdyWCMxaY+4GOOa!kIJmlIn#ux3*4PO!Ig%eal?4KFRb+ zV$VUi<&ay#t*LC6%>CKPt-oEx<_A{a^~vQrJP1Ewwe3yt8(>wn@%-r3xW(LGF7Cbz z!vppE56RL%JXT zkGx(um+>yv6QE6}qQq2yYjXYD?rY9&v18Nq*BV#%plBnx1^?YH0GWf8P-<)hxQNCa z6ICZ?XL)CWMw*}O;xFIz05LD>vazhMR~Nca+V((Ajqp?_-W}*w_z1|@n5kb+Ipq(v z_Nk9*`6Ou8gcVs~KhuIsn#xH8kBTs65=xd82fY0dUV}pz3GD=z@r`jR)}c4)IOjtO ze2<;|xqU9$rBAk0Y(`$|*t zR>=E=;GrjgsJ(Ur`vFqrC)40vtBn*1J0WNpyYjivX;1!V3iFxSBbTzBpL;8F_c?O0 zM<&fFYMGemeqKNIdO5Cer>K)K#go~yA}2qaHGEB_U}R+cIr+SmDea}ZhE_{_#NhWp z%T^^)mJ7%1AogS&zx4PV27%7WX;>8(RprktPdJovsKnO(fu#wZ-h4aH1*=t0wH~g-Z%&>YqJ(hD8tw*F6r^FGMOuMg@m)Rrq<^D9fvJC@HJljeSG8 z1%p+qhg2z{IT!7nOMHLP#y{M6Ms1$2di3vA0dULTr!A-Twd&$q*0`qldktFyiy8V< zzc|(Yv`|t}%CH+cjqdpiM3a@V2j#I~hvgaJMVams?>b7p(Op?y*^s19`IW;h)2j`yQkW0JGJ%lZ^t`LWkigaxtR;t4uQtSKY>x&ZwR2#WzDf^(#i|cP5wvMhYI-`J8mF(&~8hJGA+Lf=J zO)CSk((UK;Kwwck!GUCXON$B8keGOdA^;)aT{C=`b)eGmp2Xtd@#Oy*>9dAmB)3BspFaexjP-TTXzA zRiuyVZwm@muW7Hrk`@Ax2rnhkoe*wFthy;IDmpG`$;hMs;~TW+P-!K6-h>I+01@mt zNq9|yPsRd)oF8{}fAQiIet2X|!aDhPX2w3==u-g8e!etPsu{kG!O!R|ckkY9@91dw zaG#_4?fEjOeFVUJ!I`NK*%+~Yhgjef^CEt|xcT?*FP)xRFZcAnWub&oSF|2Px5^;fP6Xijr={N4;=>SXS* zJ30!C&t3V2&tXrE}n@Q$DTUvaj^3#rz`^8S-!L z{=wXwAj_~pF4^SLtEItul=EtNoahpU`ds%P<3_kygi7C&q-Fc8Uw^XVHN6z*lB9bd z0xeFxj}&oijCrXkejNr9q!J5jYv)m)+bW}NX$%@pGdr-VWu)}!w)UsBbpdsg&0kr~~x?t%O*vO^rE9~v%HINYHvX$5> z!)xZkp*ahAZ)4)!Gocqvi2O(cwh1|2wt?6wBPU~?ilgaQ>5A|swBT~azOPyQR)-54KJ6TEnYy_e+1C`m%J0-vaQJ1K`ZO^Bu`Pe7=l=$e%FDvru0mo8BUSFvYQorPDTOedS>i@y7Z2-&7W z5M|KB zeDjQ%Yr9*N`~Kh1(pwHa{#{bOdC29|34|tX^X($uSO2#dKAm>IA%Cn&`8d&4Dbk0{ zDQ4y@MG99`FSuEm^!cvFb+(l?C(6+Qsc+Db>4HwPV-HbPMaqPr-4eya#2uIBO2!^P zB7ZqdUjOVcc_>_>DS0~T*=qEp1S#i!vVzB{Yi@sG;>fU@(el5DC7i)$-oJ65c;PUX3dL14djN0wEfH({PJT@sP`jW;?A9akWZyUdC#|q zzbjswmBN_vDz-!tJwH-@Mcdli8euiL>3i}lez>>rm!nDscb9?yjeyzXXS@s@yGC&> z3@0-S3vqTczYCX#!``wQIQY3P&*~B;VUNSi#N@cC$sZd!e-8&{+?vCLsU3tFq7$x{ zXO{re5;^8^@^BZ=5n`UYvM|w+u1!h!0-??=TxeB==(a0%9TDIDNpl;lLd4RubS-gw ztp1t^_M>TIn+%bl@h3w62sthqr)rek#v$;@?jxvL2y$m&q5-pkm8XrLG>HwQ8#J5l z*RRWEz`(4WsfRyNspMIqMv52iLAKs6;WMsIJg>{9B~ZYnB$;HJluokxs}j0j(~_GCSD zNc=6cL=DnfnI8j3><;2E(|OefuP`7cB6k1YeXddd6$h1=SpwdEv-1fv?mE`fmr&VO zl4;&@RPDj+-y3DL#Lgs)^dFDTohf;}S=pY0L5PdOv(|*v$UB{<)~_SzW<_ z;Q6M{5+?CJOFO63`=da%c0+3;uA0xXI1+2ulTb$BvWD$ zEogYFncw0U6cS58HAZN4I_T&yLW!*36fZ+STU2IZNv;1~GQh?fzJ*Rrpbs$yvzs*< zSQV+t{#)TghmEA~fm=?9IK0J->(Gf4(f}D_Se%tiRRo7!dyykQ>NG}ClTUdLPIu=& z&&}lzyJ&Ly+__g5?j_$zQQ^aeAs=!i6h~U%U7wul*!MFmJ2p0{qgfpL4j-ieemzp(b6JF7P!jn{y9oOjfM>o@N;>U!*vC^t_cw1zF32T-3cD_Sc}T z_k`$hSs;DbanW8*@g7hxbMfqhPwHI|w{(3Q+;4J2v&+Y)R!f3vxYoi>1<-~k4TY&6 zJL*Ao&fH>U-T?c=BoU4B;Xl9qy$M+S0u6>AhlZ${np&iT^}IVax^hEgMOd*1wTLaZ z;g_sEK?D98A74>R5ElN9cZrQ`(7n{E6+4AsQEva(Sk*U=!y0z1^<5}egi8fm)NU>O zp4TP+5Sso3F@(#~FG?7iDk*dJQc)4BjEQqAkgN|+W+68Fp6|RQC4=*Rn7D7~b zvv_mEaloVCQ9}$$V^e{xS%oiySz{|UI$@MaGck-p=r7?fcVOMDCHaWgQMRwu)t;!c zP3hV<{Z))5kVLN|^Fc{-2E-%;f^tAs3TCx8y=nZ$6VF%)9H$bX zRD02uT|Z)t+`v3lp8R6JFn&+F3&Y+7T*tH-kDtd-wnq z*6&0a?*EY_pU{Z7$B%Udd!LgY6kH@uBGmBvXn9B9Ib-Wj@P4|B!irrZXRFP&9~3HP z)TrC7^T~B7Yqag|;KHrc2fj>Zk>N_rs;qO1bxN-z_nlso+rDf$5~DVzRS`S*-aC}n z9h-G(UVxT|mT;FS{f&p_=KvrEY;rFS`{&s#T)OGB%_IM~*UfiZE6z_R?cOWo``lU4 zcyiju+#!%Wo4mVEIbgfd_qLVglz*MD{bWFJ5V^m-D?y!w58Wpwa8RC~Q>^{8V%f-N zgu^;BJIigLfgyU1S)ZwJ-&ALIO>CfrB{3Bd+g#(JIL5B_uWY0+6Q4lTV5b0|tR-7_ zp}iHrvhIm!|8USDpp`GSO1$>2mx-V)U%m_}bU{5siZ^g#AR9Hr>_9lDrC*#M=#478MR^}uOcjmsLXDTTHpy{e zYHCg=^nbkghvv{NOhT#I24_m|M$PP=dGR8Ac(!HNoFCVnA3&b_KV|x(Gd=Rv*y;&|ejGv(6;UK; zkclR8O;AuU&aQDBG#mQt^Y>G`_sW>!V7fP_KA=O4Vehk++O2;L94ZDr7GL1#h45pG zoOeuNRAwd*Mo;|e3K5sAtAo44vB2Ses@f^+Ek!6>BCTI~JbGDL+FODF$2A%?lOS7w zteavb4paEQ_zir11wgdVX>LC%_Xu*m_eqgZ^k@SwX-h#*5q%oyo{EM*>{_n`fW?Yb zE|bnI>S;{``#u`ZCwSjE@U;2+A4Ypv5p@^h2n^3&;Ga=YzkT~=KKT7$br45A_}@so z`F|dE6NCXWz+vcnG3M~*|3h}Yar!h_2;(lCXf6Ey#3BM5jRp*DaS)y&0-))ZOs_{a zKrBfvu%sLd!HA8s;wR$d!u`}*?3(?KcFM!sd;32Ar||qpbDd^FEZX+kQ*wHrlQf;| z%zmO-P4-tW?8~!wj$=MLQubG;crLL!#{MDb>3w2{5S>&GO;eQLRT{LV8yuU8P)t$PZ*%b z7BZ;_$ADl(AUi{lltXQEoZl1~Biufwe%C(hE(!pshicnnEe0yqRwmM5DGP~?j=sqh zBtE!s(PeFk`iwEjX{P*`P9X=^Hn+xqsU*kuDC-;Rw8auNJ zDtDmoB4>9VQ{6c`IYM;`5`<`$;Y0t2p57!pazmi$kbfs=rj?}E9H+-*ag0iN*ojX(4j-9e$hbk=y)px`s2_{z$SYXLMfufS1+_nUT3y%&bCl7Y>0U| zv$16#RW>U03uxED=)Prt&`DDRyy1OT*7GQvXI$J_{B{?|ID~~y0Gu;P2R~HvD0W$2 zc|6hTgOh%IQw)cJPRe7jF04&JWga+m$3BVrSmi^IMQP)O4u1E0j?<4EMq@eneMX7Q ze4&zQ*0RYgQP>ZGVKMT99$@Di@rhl~6d&i-d1Yb2XrGVU7c6l0m{arLD%1>b&^>~G z89A;JXGp&J`zm=R?AoL7PsnKbaG!{s$A8-WdW#&J@+`CoNDR}kuCdk9U(Eq( zzh^u49-^Una&b`a3D zKp{Rn!X$JNGhZkfI4uWn5!eC~;a5<_;_%@K;RX1(GYN307e11{6kpNTx9QPmMv5JK zD`7oB+Dvf@&)`MVu;W(t?F8IxN;=snEG5#kD&@>5!YKQBI6Etg_w_zs506s<0@&>N zkKF78!Tf$1M9taP4v$?Gb7_C?Aa|fpY~f*-#R7Zfmyd09=prO)x!tv^I?6G zWkib$;yj3mT!=T2>#ky)=$$P!IHZuOCfJ&ydJ{Xh0pDC677+W|kUYA3dWapqGj3ej zFD*%t-o0X4XUCp#NEp6$Zvk4tW4M)khrlzLB(~Q{$ zA`++1oWZb@+~zNJfjyF7E4^vBo?+KXCK8e2N6wE<_pwCov+eW`<|wDx?{uAjabZq< zxn>#%7({Ck3DP$-G0O2qm5x{WaQixAK6D)MiC=dA1epd&bOk`_()foo?-BJU=#OXN zNRC(DhgZvV#;o*Cn1FqqG$*4X!5ZOJQj|5LXz26U{e4K}MYN{`nJYiPiTg!(@WzSz zK42mEidft-`tx1cnfd~98v%d^(chsNe1elWgERNvDo0wGfUHM4auNX;_tG4b-m`l* z*K)1A|69gev=125+(j9(-%mKY1>nLo|F_=zcI~ozQ~*`baD9|4HZ-TGWPio|C`+B| z+pbNO=S)cnQffL(L;OjF*bih#uIOJ5RNdSIGE+FFPAMfI&Wnc`$WI`P(z z=xGSa7D7^F{?vWLuVvFI)jc@YlsN@1>zDr8k(p1=TH?xyNEKWYa_&y_ll-zbm3M$G z@t)lg{NPvMh6JzX3z$so$_#F%FR-3a^vscveH#%GA(JC^Tci`J%TKPGE{oHB^*A5O zB`zZB>gt4a2Dr>(c2HuVF4f<^>DokZcgcl;ffJiv9L4?qvZ9)pZ5y|LdP)%I2qOpb zR2}pYOJfO9C-bI32*^`&)dSoS*P#%=cA_m!AN3XwxKv5j45@|Jw0p_V!sAtU8sf#y z&aQuY+BYhy?~K1meGKLk(viX)I$>aZC{XydfrIA%bnu7^hF*QShYPNV11AJZ(QCA$ zb4uR*1C>JiD0~3#MCG*cmCH_@6r8P^P<#e{uFL!#DOP6R7oc^Rx2>6nw zT<3;m{0DFk%`p@Y9KB^oQHVhXaH~G*9WgMV^5Iz??L_4SEyR95^#t*rLsQNn!Joc% zI_;@Sw&50HYv~V+nGC~9OE2T{8VZ{DSXE`^<7Ta3r~+f08?}U!M3S6?S?;I)ZPRc~ z2iXH+;Ts5KbZUEl{rZIhbNI-Sa%6s=W3Qn!7<^78O6XQ9(tJf%cHy~k>wJGW%eZFo zQKKZXWMjn(7gwMet^^1cw4DmK@biqNYl+=%)F`e@id7M-szbN{$^M#}B98c1&`KV! zUthf4q>Sx-GQR#Z)HMoTUABv?Met%eL2cG7sBuSXcMx5 z$D=>DaI$iz<4A;uhev+4(IY`EPvNE#Dr*SBoFdDlt5pYVIW_1z;?PocyP`l`7Z)e) z76OkFxwLeeBr1pppXLt?xlw8ZP4Jd|+2%qx z?&c2aUIPjW_&u#E7a>DQ>0VhZcF-><3^vQOr{IRrzN=8UeeEX8CJ4e;VL@w+PxXG! z?{8}CAK%J|vGrT7%0WGr3`g(O7q#}jwy|ejJK7PauGSPxos*$&X5kt~= zye1~=hYW29HuxaiEi}Di2e3BtXsHrEyFv0hoO|$U6*;H`5)C6D_eMLjS@0Mxnr5b_ zbBS8tzhZgNo)&eNr*$SfGxI$Tl%ChwBn4=srR1C+SuOri)?WV+J7-`>(mUt`uN=kW zsqWDraEs20I$EZf$Qf-7iAzO*L6p_p+~pc68)WLqXC7Bi106K04PaKrbc3|h3A zRj@a7Ml1I}D)}Mf(N4^vIr*;Bw|cBX%3%}eFE6iK^}!0*m-CA<>;}|{FJHb)&zS`Z zmgxj~|A}>e4)dc=eSGS)`=VZbuaU9sIi2Q#Ssu1~yeugZLpOp$2)&xrp@05)sP^y+ zD0_^FX~@R~%>TuJUk{H1YvD-fOD#ZoKxw<{G7!jlIQ|Zo`{5$4P+kKf-w^ge5r_HG z&dyNeS%L&b|3UmqNDu)Q-3WCUM$2JoB^#mm;WDANAGk_K74~`KUkSc`QvCa%^YS~9 zWCp4|0C_~8ShQH-pT^tLWPw$o$y802H4}O~T8ak$QFk5}@u*;B9L7mrLm{J}Z8>w= zcOHJmFz>CN-rl?+TLrL0EVH#B6{Iev;X1#+d7xbi#jkO<*va15#6%Ds4g&J@g$snN zm&V^E={$t~L|5x4MD`7Lq9r73rSV;gJpTv_3yWTTqzn$=?mc^A zu(652Z0KXL7O)1;+xv_WG3WO^DH6Ht{OZ<!=j9`o`LmA1$x@Yhxuz(o@q>(IB&w%pe^_|HF=!Rxjo!(qO}ImR-k*@3ib(3VP%@xw2S z5kP8Hwd(;z6H@@x47!P!1JIay9pm5zS^(y#Ks?aksxZvf6X0Y3GO4hm;uNB6Uq*Wq0#P7pCKF5RBKByZiK)}sPH+I2W)eM1g#$^V zcy&sM*dMhfI+irNzN1XuU*Vf0z%jwv=k*}?K-AD4yWl|XvGwrsdXFxPN!FSLIUB8& z4Es_4NaH@&wOGF^5}W7>15i-?VQ%LYf0l)+=Csgq|B0rb2Tn{=@;w%USaMns#<~zx zhEkLxpUM@q)IMJO7RH(?W9#ulPdvMI{p9)Vdtbq0$l`?)zl0CJ>6_WYMMl!}oWRJ$ zAPSrKudgqU3)`7{mh7egZrhDd&9q>2YD_2j3TJ%!)Q_6ckJ;c?fN`B)1+jLd%Ha7v zDla~R$8`w(B^-7JqjUxo6BsydLa?*V=t&^U{nt2Mus<^(m?D0g1$uey#V^bE`kzC7 zZS@=_Ygds|N4s)42z1w!iYF7orR|okpg^iD(lY^WCQbl>C-6l(_MK_Hi60Zx2&UyC z0PZm$WGbu-X6GgcPL5oO98?jgda`93AnOD6f$W}8jbQ)Z zD`;|N5a^KgvEsDpuAM;HG#X&tZMR0Cb9-fwv`q1fU%^rk^A%udg7f0I#O@bVjRg0< zxM;hx@yu!^F7(I&Kcd8ZLH3AyQrnOE_o0_X^Zqg_iUG$zp3rXylg_~|K&&$#8F5EX zxnu9qSe+_TQtBU?!-va|bp_zAg*GP+30~Go+oc3xz#MQ6xIv2W$)KoI%?B~!dOi5j z38AVx+{&?c3^C|^4#yA18-w*xQNOWOmsMqkTj2jBl-pj07WM|33Vw-Wxw~&O%QOF) z#C_8Evz?TbnDLtl_lf*j3-0YHb`C?i`VCtSzU6uO_*yCJOu8ZdXRqirNNK~{x!Z$? zm4!lZI;n1apimdCfOaO6}6j*+P&kkvL zNtl_%;FbtqV=GQeS+4 z8`A|sZua1F7H(Gd3wi^Y2~xIf;nMKH_b4&f zob2y^V<==~G@(#mRdwu`WN#~)HYJ8loDCwAiK^(_BJPd&@2~&szPd(g>;JA;f&l9R zp6&n+aoS@x+?%kBM)QANwkL}_UXH4&s$zNQ=`=x^yZ>|f;6cB~Qm{z;&tDR`yy6e8 zM|!{sh6bN-l_;sI4vddWf~P&FkoYG5n*V05?TF^R*5%DgjVIrS4_aIi@4jR4<*bEF zts3{4Chnj=L42V}28vD6aeaE@s@gJjS?zBXwFM1Tl5`D+FQ&!??~RE*9rI)>do*jt z^x8(h4Y$`PMUpYMO~fwS8+;A#1p!mdjW$<-}$ze zMtGbIr?_yIgG07ngdFbVkula8oI? zTv{(&aD|Dav#m0j`bfSTMIO)H{_S_pBRrZg3uE%Hip7V@5_N*8ck5%at|UhMT+(1d zvi@k?xf6Zx%#G$eIwy+66qK>E$Z|;OUq;W7Vv&=pdwL#Vl;5%Pu_Cf6E;nwcOIN z{{DX@;@pC_X5Y23$@w!mscR&apf3TQ@eN~$G!wpKP^YNYw_WxQ4i_$7RD`n&Ggv<~ zU3wkG=D)5kj5b_@GoTqnNi;Gxx}s~q^DSt>QYue6N<%+Nij9qBOwlM&|MKNaMrLLc z$`dcTXgOJT)JbckXOQz!a9h5Cs22T6G*;48b#!!O^nn|pK6EGsIPd}wk38{+k;skg z5B(V&j74|<2(eaOTdM#q4=*leYDp@a`c>9|H{kFJS=|6iJAFU3(D`kq6-9 z0tyNWFtAL+tk4J`hNN?Rn2gX>>U3>W$$I2xX=TL+Cww+GbG7H7L!}omzYNMVr`A1$ zM2Z0{eS{CIuB&UoZ;v}tOSJ5(GhEgk&YnGM@b$&+@*iEu60LZh@y*Q&J}to)J=q9DXgfsF1gB|9kZS48)m3W5b8;*!oX7U`fA^%;*~2;BOS)ihe*d; zj&Q)1Y~ljFhXSk(LT0s>0-zm_7Lk)HZI^Iy*ZF^T(Xs}T4I4Zw;h3;|drkd~UQ zr_ZO_?#Bw?prOuoWCQN5#Utyg6p10Jj+LgStk4|x#h<%e7Z4sU&GF3$?YtC8PF_A6 zOJjUa2xhd~(9zMobaT7F%zP6%N|k)exJcIx`KGKVLy-M%}g8R8+nxm7%I}4`qg0Qd}27^JFnVGY5a|?}99AizgVr)4Y zU@dCGwNr%Ko=(t{f0eoO=~h$Mg$p)WYP+Ch_?#5^+|Kt1%$UP~4?l>Z(=uTfW#sN& zvBf-pc5$(llELZW`RLGfFp?|C-FCP3G#@%*;=gm}PDA{f<%O@T?2e0l4s`>J|fe&tHiYgAo8rNtgOiDpUflG8gj}b4S2Ra zkm$ty=+GU)wweX0(IvfJ1;0E@pNZ(Nl0f2+ zE-dC!+_8F)8#gpK$jHyn|De!r#%6enVJsS@WIUrAm5E%?r}HAkN;F=h5{U)dJf4!< z+Xy--fJ8c-lxVAuH>&^MBjE*wq&6_uJtAXT8cX|hYAS0r59igN9fO87sPz6LzpmvC;^XJzL zV(NcHK-^t&m`^0W41=$l=joWzuyAu{bXV>#b+{KV16o@S$6pS$WzT^FttjSRzar^anW1N@GpZ0fPvx*iwjrQ!>^VuLC@2U zVa3k zpaYj2V!+l@T)dW|S>`r|H=naFJBm~D8NN6ct>su!LS!SX(Pt%4`0uUm-A-{tWFOAw z&rh0V%`m(cfezyik@ocUy|1OEmF|q@G|slq`935)%_wzQmP3*N5tF?|s$d3^ev%#R z?USHFATeU-hJrQoS!7>b(R^81nODT#wiH!niBMtt43vd5RF|`ijNjYZ?r*NNl{6PQ z+Cs_q2zL??uaxmT8cVfh0DQ*|d!GrGz=q1-+K0zdXoN@;k+A`vv&-DGcdwMl zyPbxI=j6X4MbU-F>RDJ>;m2B!4GyMZQ_i@Kzs-tECvK7#S!kB06?L-9ADLkjWGol{ zX7=sX!An-d-a{o|EEoQC$dj^cr`09?lRq6ktUYFui4k9lTE3j?=4#*BBXt}Kjc0s? z<^GJIxJ$?-<@eP3B!XoLnh>CfJ`EVfX5}Rz&L7iLU>l_HnMOeK>kBP0$TYuk-`5v6 zy~I-8(V>imF+uZ=cjZ@#ZYC`6>}qXQL?d;{Y3Wl&M#f`ILrr=Mm2um9U>v8Rde*ca zLXDb^js>lY$lua!~IzvF!{N}!H$WS2(UZ{!&{9+VI-A18fS3Ed2A zXlOXH;zH4y#61!N&OW2vzM{LUQLP!$?LI~c*QGKB)~4$r56o`aN(|2t!qWghzf(zx zSisGYh7Lou^UFPZ_8v45Lp*6BqK=NMa`}ji6g*KPz=#|TyaaClz8*)-EY>pQN^@}I zhGDHPln&NwqePUO6m;}$y?PpKJ^Z=|!ECY9hFy9he>|@&NhffMaM025SbxZVxw}r|9E*hihgFZUSUi0Xa9sxg;4|}lI zq`a=M$FGu&G7g5jn%j2>AbfCaEXi>yzcNGP(W6AdEmBh$7GRA8CLss%LGlO9gsE8^G-m8dF*}+!v;^soI_;8;xW}wx^U90#-fRE!&GWPyK%GE2-eA-lAE{%WJvH1ld`r|PD5sliLwFVd>yCA`rall_pWfU^!=&~jPX8#F=b;Pcfja6P z<`}xPIhSgkoLz#kWg_6dx?hz}V`jbR2cVZK)D!~)gTQ<%$qV^0Z2VCa-go?3vTHi| z(Ls=^)lQ764AIlNh~VdWgOiSHryoP|Y0{A)PU1yUs@X9)00w~u;ui)SsK~WciISo^AHVY*zg%b46bK=A8Adgts9G`rEJgGoPiVCvShd7r z9`7FrLmbpV3E;oK8rN<_tU-|8j!f8n+(&LXI86I znm+CZ&Lk=D>dZoF5ZK1U&COcgL$Zq5n%I;4LB`P3wz8ta5J&XC|1^(jPSC2AG?Txs zWRlq0FRJnl4f^Fe`s*-}jA(fci{H2rs1ndET;1Qf>bAD#f9>@|k34!m)f)~e z4+?T{of1Pu$cV&FbNzE|N}-^DFYxjz5Qzg?WV^Ku1V4W^#9@iEFS3%#4{*D7UVsu5NFJG?s#6 zG~j{r%ZqyiMDVbxtE(G{29Tf?j`Q3AQSOhB1JgwP&p*+aYL5Y`wPG%_P{-e=RCh0mVxzWP8ZMyWf628K-uZV^N{u3h* z{%}8q#>TjtqC^o847v&1rU8mOjtcW?3hF2pjp!v7aFECFTH7F)NJJ|#4smnp-|=35 zCYeSkI|A@*0kvxE3ogz2f`5U)tpWA=F(yDp*cEpHljqDwg2wHfJGO5h1#5L*O)VU= z#Vkk|rLOSv@!bQ?ZHAi}*Dh$JI(kC3X9TF?@ayF<>XRo^Pl`BKsi)zknP!vQ7jo{|#5iJ;|_YKhp(SqoXpJYL85o}Qv+^F3>(K<_I^|GL~ESy%{! zxsohUT{drMB<^FyfbKRd*@5I>GsCuz%Gx1bb={xNbx5`L_NpTd1frQkWybxr#zkwh zH5f`ZLo|?$S$<9Ubu^HHxw*NBmj8OC&814QW=v|)^*vhp`-}LqpgKIAHLAo}QpukM z?^m^Li7u?Zcva}W)~4AD(>suNK*OPIT{xooImbL*T+&GlfDYboY7 z>4XxczVA<+$K&_U@9*DV-~IJ|?7?gA_v`(-J)iH_`||;%TVPM2O(5tUAb}JHq$uv( z`J2Vph?V>2FXmp>ga8IW>u^QcY$|y3E7ZHM?_FEOM{TD1ZSS;PA|-*xf2;y85<$EP zs0#v;=QUba2V5X*01g}2pkj0hSUKO7dyO3NKd5GmL*7F@h&0t@wUmHR+Pe91EE)JY zgg$|?U34Zg+6tf=YbGDuX}~eoJP)?7A95~mii(P20~$nj?@>R11tDdCXY5n~3gDDu za?YKL0L(70tziN9fi5I(3Jov20}=;7zM%CsfGB-umh?mgCDSI;kW~Wr28AuPh67-6aKp95p<|%$A53lf^=tQa$yWQ{sR(|U2#MpjstejQ zbUR8Dd>ch&2Absy*Jd|V)P*=l{ilV+rGnkSdv2PB@ovDo;wL#04#vB^ltnFqv9y$ub0_^-zLNAS?|uTn29CUripKQ zqMUH=|1jEd!YeFq6?htW56mARZd3y1%CoD)+%vzuwiX2{dk||q09+G*vP`slryJG0 z80aV-_~tOZA0#eNzTrO3?a^RjB5g zVRl(EX8YLYhiF3dAnI|LEO+UsqE3vn!{buuZk(cmLOAH`x@VCp7SRX-$=h*`2f%v$ zAdG-h3Ek2I;+`22n*Ufu=*8qJ?UGaD?Vu9MP z6cpi(fFKosL_-8et92xJ^$=t-QDdfnTxWpo08J7kfM27DdSc%{S@zd=P)U4nPGBMY==0!AIymK^1Giv+N>rE|EKkP+;szupG=WeLZo<=n&ml+< zhYd;@P>^|cshNt2je|}@FKtcDS#BIm68r{xLSNBbg8`Tbz{*lVh_ASN_a`7{9-P^O zXS-5EDTj*6!BZGL0SuiM3TdFW>>3*zn>ztS-37f9;GWX$3~?Zc@7?G(+2RGLLkNC% z^>H|yP9BfZ$r9562-p!=Qazh(Vk!M&U6m-xZgUrK>J&C?_~V-toP8pg-}jGKnuO-R z!z!>lKZYrSl1EZMhL3RIMCQlvG+Qf1;>WPoN5LGL3HW&HOH+T0#Hx31MgBYduUMdn z|0?1CPy+5}UtbskZ5LGhCqisWQDJxIP)Em{@}rsub%q<4?b_-rQdEm-QE&JeR#$4G z+&SfU6Re8<`s>)p@XJrrNMND3pSOQ58N7s)GyBsPk8e{`TLfM)ZVWq5hBJP#>6wJ5 zyq8|q7~?uth~e`$zp>qy2|Hwv63T{#W`Xf)Z=;*!ftAu7{HSRL(mAJ+wrPsr&d#Ce zk_Xda&M79faghfRoqcVQF*_ffTOT3S1P7J|i#2u^7hHpDYaV(A#McqxmN9n&f6-LB z30rc4W|VoOyu?6OTV>?CFj7IyjdGY-mRJPp8HWPbHaj#5{_9Tyi~g%yr3dRlKO)6< z=FN;AqSLu_B4bovmp%6?B`t4IcC8e69s7^q2}|508jH|YadZ-wp-^*a;I--;vzAzU zsoHHHKUg71gh2^eup(Ve>tiI$GrnLF{-~zvRapUC+uC9Rn&ALBb;)1UoQH4P3oSPE z$b`Q_;R$;ww(kXWf4g{JhXI&rWlDPfBFx1?J1iXt)0xH9!g<9(hS$hqjlHxh52zHF1XuB0+<`WK*mjO^NwEaif>U`{!um}E%n91$SOJIwHn9+O*Ad^*M+w86D! zz|CVjG0CTX&R&BzkT^`Cg?VgPOr$BAl$Fy2DL&SCrvb0{dTZpOx1)OEjWg=GS~iA( zx0Fcf84}9-4b9vTN@PKcJvnd51=Q*Vu~Ry$bz`r84?(kJH0&`2yDqN$ZDpRM~)-VX}vvot|6q# z>r2b{vimOBc#5Zr`;PphSRy$F+k693XGV>EutfvD;-O0z?dr+WDX~WX(We3uJp0}* zuv-}Q#+eTSdZ?s-YRJaVd}OS9wg#f6ncXzs24ZQ^?uEgAe-hDnxgO|o>|F?^CD(*V zO00spVdT7F*$ph`b@ayiJFwF6fa?~{)|YMN^cO?lEd&!)i=30z8@#|vF$@Z6N}9Or zYQEl62meeg%d0-IzJ31d5Aow2o&`$=mX|h4`rT8;7Wf=pXXl=Q6#Nn}vPvprf;eK~ zqHR>DqQ14MX5hCruOKFd^;iNZ&XZ~XZHQDhM^*y*7!NQCa#*WW-(YLM_%8S!x6$bk z?aa(VxUvJ1zsKpZK_#=bX4{8wJW#R^U$k6bPo{Od*g$flNTqEg|IL#9HZc{k)wA^8 zwTCY#bH_CTns7w5$%f}Wyg|<072JY=?za-{3{888Tj6xbBx)WB!rg@Gh%oS#PgBBrRis+k^dIdo@H6`8zDvv&B9A?01Js3q74B1qBix<( zRH$2;toPqJt_zID|0Ypy?L5e5w)!)#`aI6&a5nVKJLa@(**aQ!{I7SzIo#o{c2-4y zYzi(o;$7u7>sP3cf|9@FW1PA&Jl@QQ)<^3J-mRwgpH0DewBJHZEQ#p4MhLF*p6?T) z9V`j#jT}Sw+76GYQP}k^DXk?v5DJZRN4ZEEoiAj526z6PS zNN?UVS8}ih<~7#}ZPC4z@agJj?^XtcPh_G#rxzS;dH|<|oP8D@hIus-ujCNJ^?Q}E;D;aMQQoIOrPT#l)v^LprRy2n8Uy5R-g}qUXKRh~T zZc9nE$u1ha+1vSe%)jkGv)89h@RMTPHRcR=mU+hVXO+GS)TZF+>6wVQN7|t?(;~)p z%jfI>e^7WfQhe{oPmeQMOvgy5w9!SP zQ+)V*rL|pH{6C+Pz_wFJ6mkXI%1vkEji)uiEkd@fiqoGd_IP;Mf_L%6L!j2|Yu{+A z_d^|>By?!Mu22osRBINi(4|*aJXt*+;9Hrg@kgiZ%Vo13RFO0A*2OMi1~7FgdRA7~ zS?SRWemRU&svD~l%l|OHTv#`~UaciA*5sIgZ)@{bDksm9;;|JossnjRJA$m4@UZZ% zmofgx-oNkLMIA1Q-U0M)ax7ru)YEt2Rn#t0-r4~|l+BB|6Sf>7d@t%tzY-;Tp9yz1 zIHx7>u;otF0XWrdsMkT<+4Us0Li{bCn^)1FnL~n3`vnu*T-va!% zym#$f*U(T0^yd!S@(1Vb!-OFOek=L&1iH`l5U`6B(dZ}N+ooHNceQ1tZ(DEhOpKOx zq$e3ah+}KcQIW@=RK-d4+1u=b5s)loqr@ z%->x-Hl8X7qV!Bu&)LeQZ>Xy8pl3?&pru61YhSpWwO|r+Z*Af&zs%FVG+3Ng27(N5 zec+N>^3jRw)-Rg};_&y519OS7FLvkov|6FFcNz=obE-DtS1?`WOvh&uaD<(vVfeQ? z<Ke`7R0^{BI?=dJRcO$?@*M# zUW=60@MuKf_=%i;hFH~4UwzBusx2zV7mp+nW3sV*UWC{a=jae9y}sK}YG8x?+^Ba7HK^o;TlG$fZv|jAVzu zsdkt25i#XfF=9F;znD27oC-V^>J&+3$@)I@OCqdiqn*t)aRu#_iNKEl?Var zN$bmte2n$ph4Of96bKk#}re#t#@0+<9vgK zVhj7C{*xZ}$H_^x&XzH01d^QY`X|58-=nkP7KiUHBpuOKTH78f>OK}n zcf4-V$!+mJtCX$t_QJ!mlCwdg#Lvgd6AsD9ZTD{cMAx-!n9AziT9^&zm_!$yd7Dc| zB0ZMX<4lv=%Bz%5KyDc3||0+V=BtA{Rw z*<5EAl)pTfRhVSQQK_KBn|P1#Y_z9RLsl*)mLj9h(*r(#%XF!FcDqMvJZNVyS@ElH z)_%EskAyQBaK@#nWI}{bVEMs=g1ETMQl6b0GF}!b9g&qsuBhfm(lkT0G>klX*=P#U zXrz7u^d`j5y>QgG#D4RUgYrP*G_l)L{f_ehFAl4yp6!KeF1@HeGNSU6j8{c1s*|US z3bw7+rl{a3b!$u?)YIoUJTRNy#to@bFS*(^>-D*M}msIoiZQoXr zhqspEG@{e+E4PL`J>1er^*KrMsCr*HH5YW^Xp!kHiv+v6Vx{(97e4fALeQgMZAT{0il*M}LEtkQkPflu- zhlp@K8O9G*DA|F(7??Jx_`+a2NN#1MEtodBd}JPVbdtoeyR-B( z3f#P${Za==#*~~~+IaUkdaG`co@s+=+IaqA<+$Z0*&1RI5Dv)W%wwmo{s(I-5HtV) literal 0 HcmV?d00001 diff --git a/docs/master-based.png b/docs/master-based.png new file mode 100644 index 0000000000000000000000000000000000000000..edd1ecc8765eb2dd0550165e013e358c174a749b GIT binary patch literal 52422 zcmeFZXINC*wk=wSii(IR5(GgpAW8-WM8J#zML{Je5m3oF2O}s5Vju?<1zkv%0wjtO zlq4dth>}z!OHQv1)_QO6v(J6sx&Q9_@%TR1QmSg!oFnw!dTXtZyJycRGOgOUibA0< zoj86(g+f_Uh(C2Jmg6@{7sk$0DCPH0965O2DdKmFt6P=*LdkH6BUkvQ5LMX++{Rni zaowpfShLmp2;-+MpHf)wzVMK_{LJbJ>${zY&)hw9{0e8a&94Iz+DE8cY?{m*oEGM- z-1F;=8Cds@=d@YWo6~w{WOC%A-4)cTa&z+r1By5d*RtS8;PUU0Yzp~X;Ox`Qf_(V7 z{qRO|>874vzfRs<&SXKGH@tKySzXX?lk7Nk)8Vd*w)XZ#mr@$$O>&j_wr*Xh_Asdk zyIpk2psy~0;^gA;HPxt0>uicoGb1;-umFd+xOj3}ODUWTmiL2?)ZxdpHuTccr&lxxba?iIM;>0 z&KtD6v!~3?j#jA)vb8#OGW~_OXj$gk_nuMN#-OXKn=?C7M*b*8NC(c(&DaiqKYHoX zC9mee`<0Qhs(+mqX!V#eKlxxF>V=~TfI@P{`WTQUdO`n9`7`pktklv##Vfto}nSfS>kl~ z-tJHQ(c8qt0-qoCT{48xt4`8WXnK3~kzSn8k^?7Cu2Kx(>Hbk2^SdeC*D`NvxuT-t zU*}&x?8~J)`}WE?Jdfg`Lrb)%X76ijpO{2B-+A%k#r}YaVlGQj3}LbVrZP;%RQpZl zkp0d!Q4`@l&4zwH&F6%Lg?EUGik8&BbLhV&$mVjj+&RJ*i~bWF+B}Jl z54KrSMjK6{8+H3?gl&%!eaNdG2-wi2St7c9g84&u#!X|Lw zOw|6nVt24GmsoXw-oU+S6J3b3HD9#Hw1DT_82itkKPfL>z8vn2_FST-uym=~g0sx% zJOwjQJTsVYsWp%Ps-_vArtDHnbPKOK*;PC_JuU7$a{1w-M?YJ#rS_QCbzx$bj^s&E ze*gZxQ#E!q<>jg1<%#2*y~P}dG3Ly2e;$}?V1t+^bvxwWEzLcez{0o8Cpehh!NK8o zbJn9=*NIfm1&{IHjUVnXafR$P-$=iF^=28FP>;FkMrz8uqQaIfbY7h$0Va*S2Nve% z8nYcmTqg#)+uz^B;FM7Pb93h+rS}B(?b1ly79cmyfmI*lxiD|m^!9GLY5kI{Hm-}D zoSYv^O6Z45cs(f%hiuOXvN2-xsOA)G`AKY$(VVs;U9C1B4m$N#Mry=T1yo+FV3wcU zjA8m|pW)sUYJw<0T{ytI&u)cp(V^<+M^BtNvs@ozj;%F6mgL#(q%9>SbtZD3krodd z+ns=bs{28fF|S{~*z6M!VDkB~fW|yKFRyaknaI-8QvLTgmT@b1M3_Xo2jN=^>iV2| z$~p7qr%mgU^q31K+B}tyA78=ddpo1eBRna8KJ4y#{w9Cb^lKM*Tz+h+OVlYF%wMSd zUgZ1p>q}oDNnh6UNx9=2vEvN8N&{cS#r3*asy8+^4u9UAlx$qFv8OWfRH(SoiWMtL zM>)v$Q&^al;B_0K<8}YT;y(Sypejnvxz&;{Lvv=lKdmcX-01U{bW=52{*Ue+>L z^mxCiR$YuDGsbOU!gIky>teo#`^g}|(1el}m*&8&icLXt(|t*JE1R&kQ%!Gmf|isZ zTiI#L;6aPG-rq6ol3puUt`r;08TIE>@W^?s^73Aax}b51|0ZV(S;qlw`j9Gxg=P-J z^1|DU=5qQps<}ilhlIzh6Me{!A3wMQ7(<>N_HGJNn4j#7MMRR)%)6g1VpAR>CY75| zn(#nXWSc(KvdseVR>{dJ^Yz(hH>@2T_6liHB^CX-#Xf!d6swc%P>laeUi(&hL{Tvu z0X6*V%Trv2exdv9{}Rx7_cm)`e#U$vJY7V3$ByD#w{CH;v$Lz_xjBy}dCrw_jOM#` zR7kazC!N}I>5=N-tPji9)%x<&KVq~8nG6%hSFj7He4OpmTM!=@7>Kp${%kxqJyy)| z$RuE>$d~$WW2({VyX&@>4|RMvt*O~$KPVNb^%{4zTy#DE8A&oS&eK2RnDm>d z=B_Q~m|>66FNc{MJ;sNJ#k{W8{`CaWunbFamyBQq%_Xr}w6rKN@LH4no#`EQclhyi#M78cfN&Fe8~fbYG6UmE*;;a&KT9Spfwzd-XKn1R1$AS+EE#XwTKJAEl+GLw2hrhCF`EI*KO?Q{4Y zvVBGVXr*lIrJ{Sq*hF@Ntu5mR>u>b%j;+w*-=1&xbDc?L#1=`($BgX4LkQu+rCS4r zaIZiV2W$VjFg!G*k84w~##FIKuGPKy#H=t&DZmq7`SO%*%^Dd`56c2CI#wag$5L%> z5;W8Bz2?u=l61fQb-{PT?u+dM1C@pWa>eg&GAxXLQ{m&YnGg+YQX1HTXWT3#bib?X za;w|u=^e^ZN{EG_3ZC-Vv@wUh*Sv_0UB}I>ukEXir6F5xd?}ePk*UK?PJ} zhknEaG->*5o_KwL@delE^18rAq~)-!ivA&UGZW$Xs^+>8QIkq;`VgbCAWlO+fCI70 z%F0;#zPjR&_^p}SPX$S0>lIg5SG)bGRy6+d^kC0*1|Cs<`sI-$T>~UK8CqIKw;B1&3e~Z6 zU}e~Lm9mU>2kUJFrqC)oJT=l?ag$m8ik4^Lt(8=VeSIqivIp}_k>^d3E6qcHJyMnz z1Hu#c_xIl+BO_DNIXyKMnr2cJMpj#EuB!B2XtmJMacK5MyEP4@Q5yc85tSQ zlO5jXNyvQVd-)>j?Yx&&oDNt~6}HD0foT{JVg&1QwCj=b*Vn52v^YB~Zf5Pd-7$7Q zzkVz#qElPgIMtVwKRHq+f^pg+D*B+H;DE=%oFnZ~qRotN(KWybRuSC@V1~<_GV`4Q z3OhC5zUp6JSf~8Cq^PK6=$lGnV&b=WHRjBH>5qMUnl}XrYCL~k(Uqu{pt)z)g};s< zs3Q0oBcXB_`uO^$8SO3&v%dE2)xl2>w>M_l@@G5r^Wke}2C@SHxKgn;p2x(z#u5yA zy*%kc>g5x<9#ck$W@Skg_H~9B)zNxAPXZH~QZMwp+uUd8jk|cGot0W(93LK~ zm}%Qv`R8}zdRJFhz^+1!{fgPaLy^+fOERZ#Jw?K)*{gQ7v-mz;h^!+(l`vMO=P8HCJa$-6Xek_WC!VjW$ZpjF6 zIAILHQe0D0L!N1ekWdA-`HK^e$av9}_f&-67gIA?mnvK~yHH!tQ(sk8H9q=SK-Dab zLqkEZKGWK;b6~(CNLc4utLK8dp^1r!=@X}4uj7!DC)?>qJG^l*=6je$6&Wo1VvI|Inen{oS>Z6)dKv*Ad)WY~@cgT@*Z;aeAQbB* zL&Kl-NxVo{42k~gT7qoyGruKxoZ9J-WJ(oz-H#;c=96 zF^UDM%nffQu3}UO7N>Mbh@R>W&M<0InVfA~nBzs1zg+Ccxqq}=;#-1NFw%J=&z)}= z|HaZaEykkd9cAhAUIR#?+F>LmxkKkQ_pqc zUaDW6+e!iqR)y9(yPr$6E7S}O3~qUQr&)CzYRa%w4&*zUI$P#?{bAb)?Jft@17URK znbw_VWnaI3Enly^-{1~2uZ3UMzIBc7>`f(Xx{h~#|9*)8<58Mreq&vg`LEy@#egtH z9_NpUqs{w=3g}5JEDrg-HCb71?6I}2t;Ed4;NsSxE5~QdcU^pQ1z`5-u>c-(*^3sc z%$j{~{a0vBq+R~BuCuq-2D+~OdjLOBRj17D;pN~6Nr-y5{Z!aMOLjPdVryn* zW(amm<1vb=s%nK79lg@<_oCAkVSCJZiyM*+4pUA0$X$9&b_gzfx9#__((N6cn`jIF zRU6NR!eJDJD2IqhlHk@t$Fj$=j`09`qySgNKC(q^KpR0#TP$hb^bNRzc8ssLJk*fK zZRnut^CK&1e}ap<w;f=eEd}!HDL;w!%s{@QQvd=Cx7Sf-)2$n;~IwL zwG+mO|4JDaElMeuiU@i$uQ}Yk$NpRT*21jZ5_UoL2S$|x$lOM+`IBZohr|yyUaS3! zQ%y~66euBlcTEbd-8xfYH`_D7_XMpIh72^dSiRx1uFqDq|a z%}@EG7IMoBviLPjBxS6>A4%+ULWzqF5<);kb&Micfn9$S0ihU23(BY`3XC4i${q^~ z3u9z&gwkJi38q-v@@dy?#aQ%F@$mXHx+ zA!@_oii!$?l*hl^#}Wsq36-*pqEi}d&8;B!Igr0Fuf1@hZeQ=^m}B>q@VrWznwmkI zBkHG6fQ5g4vOAnSB0_LDyPjKh1C2(zVWnHIW5LuHS2#1)7u>h6ZbIY00k2RQ+t}9{ zX|Z(WU|>!kJ9Y;p0hQD*cY)eboQaX)1V$5&S4*N+2*1S;EKa&>{l@(Z(|sbUFHdf$ z!IpaT@ZlxDtm)RbX=xI^Y(nBdmEvd4oS`x-QD4w-!Z`MzN{~cujKy^J;QnOf1-Hgc zOomFBn58kAH)mEy085@)TiKU)QdwCF>s$=}TQTH6Z$NK53vgI6eg8j9;ep&`sfgi8J z{5w_V9@bmIf*P0nrrK0Ds=ZM+AfyY0gd0x7)PS?DSW|eg#%m+P!zq!L69X+w`dnLP z^A~3F_um}Cel%nLHU&(Uy0jywvczRh*KPPHwlhJc_`|kqAXdO};NrP+=a!42;C4yu zQ*Sc?6eiUb=#7R}6X(;iSKWCv_?s7oOL)!a?$)(8ChEwNsaVg;YbhJ$?X4KpW(%6* zJ`n5)W#uZ{U#}u=SXm5O_x+oLHr+jmKC1{=2=zJp4JP)R_a&dWO{C!K0u zoLJx4*=eYvs+#uC9nNENafgN_{^>2=VFRjNNc(S6aTaT0cR zb}ue}dPp@NTpY8y3EeY;Nv4AVSX1ew&dUq}*~KreyOOz`^xAJe33(*UkMvO43hP1sI zeZm?3(b2q7QBeTLU)e?|do7xMGBTvd_7y7oIbXWJzYFyRxg!Q~IF+3~^Kq%mnMD%nHt#c`JAmX^_oeZS5m-)U`bP0L!j zW(}#butlN>FRGSgqT5SyhKh=cwBrExgBiQ%dR<4BsTtN=-BI7N9W7^PXG25{mINrw zOIzj)FFze7xkX(3Au5pHzeha!9DjYg>*u$cO;EjHxU<9=2!l|fuQbzlfk-3x@iy8sR*G-M@L6$2;ATolwWwkEN!}r#5|Iy zz(54Gc`ihPk|EK4-MV#VX-n^O%REFe_XYG8n~1L2j9qrorDrNHPv#64a{(URMD^M* z?uz=7u;Ji1K2XhR3-DD2f2Wy!kro|%7 zl34tB*M-mfLG$&XHY*P4%ddhYp>+JXk2U!5?y_L1h3WcyvzbVccCXb_o|8HUOQgQk)OYEl82$SZ6 zcOzVG*}4V{;)V%*MN;=zUwv=G)(F~^Mbq0)<5BLT6;h|Ma!;ojSETCHl*<)f#65@~ z2-4VUBO49AswT}unT*?#B})heNmq{2A<4DnXxCs0N6g&=M=dfm(u~EfeA!I3XgPWI z>{*ZCM=`f*H zuHQV|Tm8rYclt4(=7Y<|1}h13)ZJagD3WvF^5t#3p0i>Y>*rYZQ#xU+>z0kRdc_CC zC+OzxYjYcA0+!ry_wL=d7A^bo%hm;I9XP0dyi2Pnby5|h9wP*mTc2~@*b&U$iHA<@%Rb_%TU z#lMd~JOA{i6>bgrY&#Z6{wWp#D?3l0t}_$f#|P{@XO*ur7ow=G5jnYy;hn+}o4A?7 z+Z^91%*P#IFd*L$NIQzV#Luej%NBnj@R;@1QT+SGS(UYHT{QqzGuj2rIwf&e< zxFKw%z z=McL1!P4Bms8xkhpEp{~&CQK!4-PKv$+@w3MONeH$CyQvo@5az3yO`@5Wd1D_@j~ezFDS0UJGYl0aKTf@fUAEnN_-=Wznu>lDCJ+8aZ^ zsgjbZI4o(3m7^p7jvaUKE|mVEfEPq-fxtF(QA1JYfxXJCO)IC&T;+vbwS-64lm@LFOXmk9XfQSA$dzpoH7HERPgR< zRPHFJ%E0NaVGJQHkKaepHroo?dJ=|oYv zk*h&aKy=u?XHTsI6RKH*eU1a=B_^M5y2P8RU=5P zHO~$+c+B)mX=Yed08l_Plq3AluWzrx$)1Zlvk6p0;kVbSL`f&h2r2X18;wUO9wjg_ z$NafZfK^?*EVMIr5#7ru>Iuc4<75pI>NA9AjZCX8n31B2iu<4(10OtikkA9R_l zTi(?K~st5T&{}i_gPIY^I_r*s$PG16Wg&cGi_er=iEvcP5A#vUiy8|q6 z=SBM0!U%#g6H=#zMfITPLD73uUp%2Wfx%RI10?&31N(bS=P^WV2HRC`T_13iB zAEJAjVaXbxEGMx3bjMGf`T#-YqM?;la{mnY;&a&!<`g1N2Lvz!9v4?u`X^{*q*}Jg zW2I3tp|n6w*uG=Oay>o0d+Z`>4#48zbMM~gv5j1Y4SDXefH`l4(lRo7G5KKRZh|yi z&&GBP8Y{VHF)=ZzFsGf3z5Uv{aO%kr= zo`dPRZ;^q&1Ey6CJB2Eq7GgUURPQI&(5TaYD`k98bHdWZB)C7bZVcJg8h1E}VrB(1 zvk6AkuCI=XmzP&-=Af=U935$ngSr&5EI^PPANy5nzUR*E+hRta9}Chf0UXP!>RgiK z;@-Y}D_B+;>E#KHMNP6)_obzy|Ijo=TlcopW5aP&ym){{d9?bq^$ZY*O=)KbIC zSVNuQuE1(?CuwE)Ll|klcBLiP)sbqN?9i`CLER@-{2(M`MFI3evzaxkR;A9RSkF#c zlWlbA5+8vdz$=$Q34%QL!gk~V>yDCzGBhy}hw$>JuRZ|shp6kg8HB$gtRq4k<#`Sb zeZ0GV%EgV$Y3Q+AmNDR{#XV*x1Djt-z|VpLI8mb(qUrI-T}yXyyguss1N4%V@<9zmsf6`>Uc zK4FB3w*}jT2=ZSLtoO^w0sU?l5LoHOvh~;((3k*fw;w%P&n~P@(MO4~np^sT^Jq`t z``sW` zw&%;-mWQ$}ynIg#3=AzUP4YVdYxEqdECF`w&~_Y~jf z0zNN)ts19JHRD89FoaA+W!`nq$EUO{ZnJi&_RPVz*#~h?Kk!M9U~3&It`G_D&YUH- zc%uxe++Y#CGbyYOx63;?n32Zx97r|8WNM9XS{84)4a# z@n*1k=?o!LmfA&wrz+hvC_LE3%=JtzB!?GA8c*y>D4h^C6oaBis+4&EUJ; zH-_$~E^qEbIL=tsS$=)0$>{G7P9(bSTG6)Cv|wyvqRKWGkZp8TCHatw73*gJzxgxz zRF4X70J#kzs=A?Df2^r_gsstmtPU(fj5?O8wF}yC6=9mr$6vfpUhM1h z&CgeWy~^ApxEL(RNcK`(`I8C0uOP>H)CmRA znm0)zw>IXvXPZxff(L!O7kY-qu@kmh9zsY8C`(Jgauad;2jn2atP>4#oMtHr0SWZl zS_pqMWhhac@@~@jDAbfxr{PN!VeQNVSoPTB&majYMaysU0;|P>;DT-YS}Q{sOoki< zjAP8zn%G-+?f}9CSYaDd!X&R20#Xpw(SMKBwQoM4bf6P1D?s!_;(mt6@~58`jP*?0 z;{+|62t^_}Lrdgb{=jy{>FDT4%OmnHA_rpuG~g>Wu}r{-l2^ap!nUA*WoP_-Jn|%{AMdPOzrzY? zrMbD8h;}vrR<|L%hBZ`V0uA8}UH89{g_)-?k8Ffp7mHrqDII&_iCVj*egI0fQn zST=1sjgng9ot*$gBjb+3+c0LZ0$V!tHyuT~rG`OOKZ-y@d>t5Tcrc#;dmE;xb4Xqx zCQ>_4X7F)5g4#jAARzUfM~_sa>Yd^7Aihc@WLsc0m@i~(x>k_t7k~ys3|g=hS;whA zy?zqD1?-T_?fHhdV3w@FiZ)}wqi&Qk#g|fsAb6}@yS4!Sg;PpOs{ttiF`2xuaFPL! zw1c&Pt?!a>1abiPsX;XhHE($eJ1ZMwF$aYGl}t>_o+1*!AJTN=DgYm4XnZ^tlKHl6 z+W_GdM9msRDZoL;kR%#&U8Qv1*%=UUd-P~E2yTM!k;RW6Ki&?{n4q<4-GBf7 zJ$0}Ou1b*0bkfcv+vMftQ8R8tA_u?mInR9t>z1w^Ea_t*tsv|uH+Odd-5hBOTtITyFjNSO1NB?m?lEFX=Sm*#mo?hOR`hzKLV{ z`r@X1)0X_mNF8dYX1yn9Fzkg6=s;V5wJ@s0vgYRIX4cQ(T-yN?4oF%$bu0^bpSB>e zXVh}QjtZtM)hr)-SWrQ$FLyiwt2pz9SrB=2u5`nVWhpcw*-1vrqeJV1)?oOsr^(1EQcf(T&>wb z@t7M^gV}0mXo#YZ5`qH5>{1Hy7Ua0gS_>#vNG+12E7$7MCxnSd@CVuyz#Nj=7xPyP z7WsPoTp=TyV5Wr*UM6J+usO=nC4}qLF25V9!PP`~YmO zk$!ChJbqmSyv@J=fXb$;Zy1SUz-S{&-hDu)GPn=Y1Q{6_J6{It^1QG^5U)Y-FZ z}&4eNoYEkc3O<>2U)^sC2Mn+kgJN3y%s!7CeEm^IvT+ORJ+M z3NiwOYh#l8IoQO~r$v@$mHdn!kl1=;?y?D_AC(XdB@brjZUgYseq2)i1S~uJZkQGB zjatjiT~0y;*5GLNV=gKrY(mE)5sq|E!f>?>=9^Q}RFB;1f(RMuHYI)kd-t9rufTct zCB?8ru>Qc`gdAb!N*1mhQSaIZ0EdP56e+|xF<4c99&rg}icV0w8B@8;{at!oAzV@x zuntO~^11}DEvQBHBc!?ftDVS{p3yg~w%GPFR1Yl2J;k61p3*6{Z!y!a4Yy(AsJfO-;3@PqqH2g0pTz?JXdi}a z9ebj5jHM2@YSZ~`;}(?zk@DbR_1vF8e>j)tHZ?1(NU&neW;O_rZZ#eA1zL`(PHz0r?_z{K%Gd=LxfI$_5>&R}sh6wxx{1d0x zrKN;LMg;!w{{3QYgD9Zqm<_@JbPs(2Xk?PE%klq{ptgsGhYi59?4NAExsXW={a?WV zLAU$}xkQ7o4)yETts~w@2no3zF18OJJU9TN9dZxd=B-=l;XHuL>E!X_r3&%^`F+Q( z<+s?Z%9OdSBO)S_laqsFMs-h6ja!36^npeT2XYWIZ+hz{%Z$a59eHC&=o^$CFl!YE zfrPp!3E+*xdMAn%)R-;x{m79pS;^r;9avQ$#lwi_5r}a~G8`$IRI^XOKvoV8dE3+X znm0WLp){XhE>R!Ojw}wGXr_QGP3(sgIAXu^I#<`3eEZ*?zmAzM$ci&GN8B*irIXsc zc{44wPXic8NP&Umf+HX~8&eD!2#P@;M?vMlwZ1Ww)K$cYz?g*ix_YCe57Ysapaj$? zE6hm(VDKX}!jI2uxEjFg?QMN8SR~i3TXzFn52N+xSFJBFov_ZkkA#r}YsP_24J0|R zJ8QYPPJxJ+LimI>jUq*!Ti%VhYZuFOlwi&XIUwhlQ1-9p6u$*k$P3h4lYe+k3XkjW z6u~hNpQ1FG+@V578C}pM8=*zK2l5&k8M*ZR<6X0cWIAvMa14a(F=ay;4Z!6MQ!4h4 zGX+uiXLSq%B+&?2$2FPLB?qAcz|}(Xq*9pVN{G3?`};%XT=zoXF=;QjPQWoNWvD}S z;3ZO@)3 zpiXI3Q28#8bbUhkg3M6>AIf`t1E3t~TcF^#wm-f+6P!75>Qq^3wCNKOy(p}J6uX}) zD8``D=a|odr=;jxS}G%`%eamUD$Mu|??bC`oE8&1zv2mN#ZZ8yltM(W1DKdJI-r!M4iZm0GKBL$R&VN?G=QudH*RnM z+(0Y24CV_7DHO|*R%dNhI^8E(tnc?JRZvb&POCq({?n|BZb|Cr{s>T;ApK3i#ZkyS zVfQ}3qtd8=GKQq*gBmVsa40BR0_KT66Jw?>Hs58a2=T3HwN(4Qi=a%2b^hpq0~B;7 zeHw8A#G{%6?YH)+nsEukzAoy0dgXDNX5wG}z7U0lO$*}yx zpE-f{y?gK8CGvo4GxgsuS-zU%0CoWqGtmMm<dt7W~<4o=Qn>L^zBIZb%1Ez2*YD6!D$Yj?E3)tJQ#!mp{ zQ-^;2_~9c~T-Rx)6Nm5sF~JWJi}p(|S)Y#dn}BZHwt2IcRI3v`df0$x>jl$^7}62D z*^MzlJ8K=jf;id$!NCHSeEM`3Wh$Mh-H)@#*;ku0#ZYm8N~6Qu0HUVgBjDI`jTI=j z18Bz(MmrMF(cW-dByaMDW&Ru+^2VJtn?8d-y8=H5_yVGF5JEhw?i<7aEWcCw`kOJt zeA3blY{?0T>%|ZvoVx-QGp#yq^p7?BKgnust`y)W6#zIt?tOMXsHd8w7LW^`M(d{)Z=UD6<>v9x$sEmo!CrZ?;aLj?P|G~210}wxX zgB~pr8f92X7#IskW7&_zdhYPlghPlhN~D$RNq7$a{2AI7w~cHVMS&IUqM`H_y2=u= zFfaJOr|Raqtjfeod{PGvPOiz)ElBr>mT5O$^Y-uGX4L8M-U1vzx~36p>#T5nX)s;>yn?C)8%QZE)qLTg6X5!OJUMW^HEcHEPGO7QGL-M- zn!EQl*UC%~Vi2plMjMe2d`kQ6K!!n6LmgQOZzeb!x~I_fz!Pi)H7H0o(X3P_Y^4Zl zzV$)sv;k)iC(%jtM6&sAFVWb92v>ygktR3_a&ONz6I~L~`6d!U+v&w)xQIxLHaH2; z;>5YqF@9``8MX_R`77f2ZkqVt3aV>81~&ozM2{UxAyzds4f@3Ve9x%9xyXVDHLFFX z{6=Tun!*fP+sg8~q)_^_vG_%;_V{}wQ^w|B1;2m(bZ&82{rBZOk6}5yt1ehn_cNk( z2u8%uLtfTfmVsc=Zo_ALi+~c8SCI8ep{2UE^b;{iNy)Fjm+Z!U_P@@el0#p>A@o5Z zv&;&;PQhB|mUDTG8X3ms!2U7R0YmT=ww=$F0VrGvhfW;cLO4ET&#$1!;ftT}RLLWg zEc6mLN<7U3tdV9;j0!CCWiW4^24jW{S87W`#n^bG8#Qhfnwv=10pO((3U6r{8RXLd zEzLpGq}*a?xC|5 z$P^I?7kmjig7O9;2#n1g2!N0kL1d7wIZQl~_;N&aClsq3=y^-~p zDMTrG1KAXXENB7-7-#^54}qZ}dG_*9ETpk!= zF16-3gQvs8s=vL$0w~FgBL6a~Wc-pK{WltE;P^@tJi;-3$LV4)eFU^Uefrd_EMdze zIvJrP`k{V+%vXtQ!U@?034>JMkk;;F7gpzcdZG*nGvb9o^cmF4f?5s!Smo$57@tX! z3=}-}lVA!?5ECqho?fE%LKgZ_8_!NSLDU7r&;>>b4Zm*$XXLTV*Y8kj2P8!A$S^{4 z;}U&~lF!pczV*yPBSTNA6{yoPBlm`T^P@q|tT4w|F5%)Q<#GpW!gl?Du zEfRC{J*9*v2E+@21n{3O7|zd55k&{hqyQ+);7^O)kzpWJ64lVU^VpdP z>4Rowg8xk^1_AI}!PAh8-gS~NLFPRS3NoI_LOgN9c*FfkWP1dp6aR&&>oxno;hh|8 z@C0nbEKwOHR|aT<7ACWXwVC@Qihz9SCQ;QB_+yo>kO}@&W1RiJ3C7*;N^Zn=H~^zj z=je%}**_PzjM9p!0JW9#_q+ULsgE=#o(dKw^NB8HP)UlUIRerz(wu__;CK#>cL1HP#L_jAgJR~-QzU&fHB`g8hyKD!^n)~HSn_V!vW24k&hFE=w>OEzG=v^H zfC#dMY^&rL4j_(G5V5xam_WIPnvCMad7u~mba>Gof+K4PGP-J-2=FJhhf^((Ajn-qfh7#-n#0Xv=n zn^Ws;n+D+H;|GNUQS%Oh8PO|c2Xk(UEjL^ZSPLsBXtC5<3%~c31D-;IGCBc3PFAG2 zNYEl9g9TA?DTUk;p%x==E$17J+V931=b{=_Wgyc z5UiKzzXqN;6X%wyssIQs#MDV4O~?;=Q^f1JSXn;;XHulmhZ}URwH)Swelu)Wb352UMVR=)*J0DDz(-DeztG>;Ow1_dpl?kJ?e5pX<9*B6PIG%td&Ed|u# zgw26y7+4u3gf8mH>Y!bv{vit+mrO)Dpty9CL)0(LRoYsZaR3x6EC822`mg}b(*etp z;|UPef41c-V7$?fLrfv49K?!6(N^64OW`M=*b*IQOI;ByV3YOv^G8u5G@@-Wkv26wy$&Zdpi>Qm7RWtr3Xm$H=+Nv_ z1R@YC;3?Ktd4$ZP5+2vpQ0}3pPr=)4lgkF~4m*AA-;HjlvdOs-n4qt)9zr9z2S+W6 z)%L93s2JoUD*qg4M0cq_FBJNAP9?Cy;*5s#Vh9o|VNmU!VF%7E7u=EWoihURP#q*d z3BWSqk(o#E)+xgDgWv$0@Yr$>n2m=ZgaiZLL+G1z*S#)dYn2yTz`bDuEWqZZfe)iu z9xhci9yY)n*!**T?Wf3rfQ4$GK;R1!&WOF_q;d0qu!hu7SpvtA1I$KG zqnapf150P)4RVp#T+`e{N6=1g)|MAJ+FLbx-d^Dv@9h1S_5AI>b=I&C+GIMpc7E@_ zZ}>?iW9B@*i!fY#%N~1sGw8XGZ9vVmq@b}e6w^ekRJu+b%krjw?Zz-ER`^F=VQsqe zB6Gd=WYw1M85Uc_g#G_?m5~en?!NCEEG69iS#PeD#^t>r{~I{-h3LPtj{>?wOIXk}jI9nZyexkF7VpDe+weDUYRny6tCZEZV8#@cvXDO34?&JHD z__H}9Om8)R&pi2OA%Cey%9j_jgF^#`nQ(JehJb`UtP8Yhh z2%M!^{9P^AhS~30^QTfZXD8_0@Wr1SV?9;lZzBewvE|5sv;X!}-_=tu`;%u-acb;# zq$7}+Z|WdkU6laZ4(pW^?r;WufQt{Y#|98!R`@iAnS$>S_y*(gzrdynNKk~u3c)_0 zwdNqhw#2!6KBf211tj~FdTI!=0hvxg2o)w`;Ptmzy&Ubw z&?P0Ve%%K61|JL|fq<#cLX!l*#(@)zJF)EGM z;r6@oQEqEMV}muj0OBL+8VKOZU!EUjB)T4QP$N7??O~gb)uuW2L_44(`8;sMf#w#VIMm(ca<1{ce^D>L0LQZKx zDMKe(`~_t)WEDDXbDb+Fv?&A-!5WQI87NkNb4RD}&BGuHYHK%m0kfj9j)6j?c8RL} zH-V`Esle`b+?km9?aE7Oh;=^Y=i6E6Ie)m2ny{@naktBZ%TSHM#pEAY-R$7{@p*Rtgf_D8 zx;(L5k60YAvJ!bH-WWz&3e*Y_x4fMZ@)$Gv;kHO(AgwBW0{rmE**V-R$)*5`*MH zv{dY$Xtp(@QSYPRBAgjXkoNX30WEPa=OBH-P;dwoAUXI44yu%NMQzLcg-hU$xvK#1*`cCoRs35te;qX?NcH9-wa5{(uS z=2Jvu>F$L*+4{}Hl0hrJXir(q!$&7ROWvF99cY=&Gqw-9$k@B!&qqdTU<0noXOmw0 zT*c^~+Fud-EB5rR6;zq6B%z;3NT2w|CG0B}12ThVB? zA8l6>i8M~@7G|EwX_4S@_3qt_+}iIWKWk<;jB>T!4{v@rclUS)vIvB$@U&p|N?l#4 z`fKOj{#fWaB0P~V(2jW7az4JO$S^<6vS;;~!P!7>Z>Ico{vCX`Z(!Z{G}|~+HNkW5 z*=%wxJ#(Mh$lW&yZ1C)4J+#HYMV<Z7+RFb(9cf@HvO0_i*Lo9>H7q9A(D-X%YB8p@yyb`++O&{V!#&E@5&m z*s`xc)`h5(r%*zW><#O151N{Zg`$dZ9rvNYAlMs1yC7vUia8iWtnBTLz=KJeH9SBR z-2^ZNK-nGs*Y1pdP#VQxnsBhg&SDNfF`~FYp=kb-)pm0C#Wzoh94jN!ZMsQnSDaV4 zYQxybNMTtSqot)KtsZ2`7x*&Z!_`2k4I;8^u!bdY$r+M*BJ$^_eG#HZTU`f9)TXQj zK!ckCy#0EVHCYzfW{u)#ggJnARj>H>V<=e}+94|Rq}S^jB0^`p93YNma@-TZ*ijIf zq`wS*&FFTL`VQw!EXuPfo?Fly{!`f6B)yv@i-!1e{5s*MpaU8*zNf&b+yiSg>+XxI zy?}VckevW#Q6`(fa7QR&;B}J2e0@(*=c3>XBJGS?^EiJ*6s#mrJfLNJ+rqpQfwkac z^id5^AS{G~X%{VXkT5Rai~=_Lf`_k?KD5ti+0*21aOfn z!T7;|+@=pUgj@(T%S{yI)=&cB;^IVY2g+mH_U+XW4GHA??*s`st_mUysD(-JRj>t3 zD&_}Abg^AYV#*DrPu%lmKGJJ9pUv*Yu}H(gR-k%Qg);86`?8CO!)pV%kr-i6BOAe( zfD+^ZnCGuRn25ebh?k`3C8x8XIQ#&g0O5MdQALT`xK_yDlM<%%-W;$~xT4Nyh05>o) zc;_rwI%CR+;~jlskAi}ZL6QL1Wq|byht5NUXd8@>$NZED&e0%jl184J4AK%D)|tA= z72H-542`DriT0sB#}ATSyzC5wb)51>jV3fNst{725(QIQIt2KL$NkSa3cNE=RV%Ia zASb4#A0zr2;q#D2i^iwnXCzI-=5HlYTYJv+=@H6+sFHY2I#StDFa{rRUm^SKcQLHt zE`W)dnv|R@3bvg%$I%r>h$|up5Yv}>lGq(`VYX>4j50=eBxf%w7os(hBf=g;MO6Z` z5UCYovvt?5dnop*QH*hCL>_$W7NbES*-$fd_PBVlgiZ5%FaNLZH+*C|X1hi?c0t5M4FJFFPej*>Pzf5!J z-DT(k-vRf~Gt3HXKTEnalg19)UknW}Nl`azGntP891}(JDUL{pPDp(4U@as@uEGx= zh`VO7CoT!*gkBD-1i4Ib@+urxc>px6Fo7Qs5vD-bnKRx4Z(xW$LYfW0iV(Bl8eaL; z+WqXhP-!SAC{bN@WwsFll$|~JrE(^eoLL+J2Znp27n=S^?*vkd64W5}H~9yu<^nIm&Hez5dh|o6bsEP>R|IzHvPFk@89Js3 zWHotC9Riuw=G+Jx7EU#x7bA>EX{m;)V34LHIE?E>UX2R}}UJH+{# z=kBU?>ZZ?egJ&${p)nhW&!1=AddyEBfC|FFIrQ*C=2Zq!fUXFXdPIt8)Li{0ff3vi z`b7(=NwB~BV(974qRUo(e$r4YoBjak7WC)uAU&)oyIJn?LX!-CvegoY3tmaUzW-LmA!B3smBY6<)eSEkTY~VgBoPRSv<6N*C2kS; ztS~sC=zS7uTS%vq%Gi4wig8baCw5PW(c-(i7~tb}CFd^SK*jCF98qAe!=F(R2|12gB-TfCvr>q53*_I2!^(Lat~z z;6z1O=QNV_uahS`F8t@ROkj31YP4clzi<{0@u-40p+JNy0ET$J7P#^uBYC0&uv<KiB%nj`@f|?0|My18G_;L&Be8#m$0K$BKAE@@0iwlTTztU)(Mqn;KyI!Z z=3Ub1Jm3;~czA4yt;H6ZQSrY$H_6Ah7%*kllY3W4aJMDu@EfT5E*L}1P>E~avn+Zp$G zUj55|#VU=YM-*o8VTe8(fRjGL&z6|o0?(ycn;L{9l!vOhF7{wTPa^h{<}(VY$KTCa zqp^LTuV(+(y1l-h9TYP;x+uIgAB&dIBPvhfs1!pEC-H$C0s!9=;KDQL3(gk7Xx4xi zI!zE6W&>%_OG!x~Tp?mgOHddZ||IKi1HbYKO0L_I|x1DB+Xh06VB9H=T07Czg z0xruO6zfdn_(!&<_r*YS-iCD1>$C=Vp{MX! ze*u(vnH-eVdnN-pXMS-1Yg#jm^YlXit`*tN4KJ#2V6K#KB*v#%D*IzyC6V& zGLxXr5a&9Dd?NzhiM-j7Q?x#X*&%vxJSFctel2XcbL&>JZSNVh0v*OgfQI-0M=w%z zBb6Lz2D(Tg^$#Ul*3lml487Gz*9j=agrz}}&#I$=FeVLW0DF+wm_Yo$?N7pdQ83i1 z$bVRE=nLQ_y?j<<0XIMyr@`z&LFquzW~v6>?uM=REs6=+{CzP(O^_2n{f#Uwt3XT% zT=?r7(}F*$JW{c%JcX`FPOb%RpaXIolLIt_b^r{Ge0VabA2zy?W=ph1K=`u@ zGAa%+Q=m0A8u)G5x^-*G91?irjo0>FP3X-4`d*R@ixO$(f7~n>xr$PL)ngFb5}xyJq(={#6|4Vzl5Qj* za8ncWKbDt&V~a3(@UH-YFNwu9s_(A6i59VR}&Ye3;Tp}YP$h%?Sfh>6& z{c`O+JYYGN!^ac zSGF!9;kr!Rxo%T>TkLh5?bw625SKM}VhRl2D5t7r?Z)|RG9WXs2pkZv3U(98YJ|a? zyLUqf#FS5%$4qp3g2Rl3(FZ%S?QYGw!0ZUZSg~@Tp^o&l(hi?(|8-N5)QMQBmX5Fa zY?!~iVFBq@Pr6?)Ow_mu8CluR_IA?mjkcl}Sj1D>&wQGh{w8~HTI^?Z4f%@Qfe}g^ zCxQ|(Ia&W;RL$qRK9^(3SrJ)%#4ht6_UZu@L<;gXIM@|V?)u7|UcaF~IIKyyC<%Z;-|Qghh|tSZ6>kX@UWr0rR&$Mv}O+7=tf#d9z|eFRek zYc%x0Fi@nM{o&uPyB0{>nTm@NTC{luAsYj^9&7o21do;K$ET$wgaZpe`Bt!jmG(V8 zzj&c3Z*-dyhP|y#fo57y)g&30ItUgMNwtVZljW^E^kRtI9!u7J3u-Oj-n~KaY;^6r z_vRvFpq3&ToNdv~SK^B>*+nqnroPqe`u0eWPipa{7hYL`7a2{@RyJQd2Re#dAsNz6 z;Q!U$n}_wBw}1a1`wT|IkdT_mAQEB7TK0Vnp-`a=X%R`4!#_ zEQ=Vh=KW#q*7*;Yc|FjCzp10p_*G+P1{rySpII1NTSbKI)L3F2fj{=viq=n4Q+ui3 z8J2ga?%hY18V;h$7{(pX0bP5*cP z2hC%rRI8%%q7j2WP)w7Y*4gJIG;X<72kg(Tdb7V{th-&@DzEX^FW?ulHpEhEJnc_X z*(gPqi^`}e<`}%(3Z8&w$5{W^m?jG|ndD7SIg;dCmUjLusq`PBmR;y59S?>vz!5R7 zT`IjZn4;Y^>#bN~7)C2rJ4X+c$4#e~GrrrD)v5)05{t@fzGjXB#5qn$s593qeT2bog@Xf$DZHg2mG8nuGPQ*g_GA0@6A)=}{52FH?zi9X zt|K~()ejS?go*3wHQ>(unLZ`f`tt7Dk7fgT*an|kk$W8l%T~Pj-dv;-9Djs0CrioX z8N~FXEol@GWRU2dAW;_SH?R;%rj@&rI#Gl>P99vltu8ND{X%cPePn_+M9&QpCsJ5R z@c`F326`*TtiRxRw>}J@IT2YfggT}hqdy~kEl*{ zt4ZMALC)_WCO3iE$g`qP4yT(VcryiC{teuCx5TCaKFTjhxOJ;rYIO_d3tAR~I9C@@ zM^R;^`NsH!luN?0>03w#XebHzakR-k`m2w>_<95+9CUmoW@}0J1^X~%SW)cNn!OXn zU}FM}X*&7BNA66&aqZePvh~vwbch1K0q$5<316r3CGi1#!RBj&tnx}!UaSaa?lwIH7L;vK-lj*TaNZ>&= z<>>%{;UFkqo=yfXnp9KE@DL*;o*WvB`H6vQx}`VYMD$Zx6cQfuY=v*M`+%WCI}RQk zGcTqoMErg zg+a}@6Hx50w~-)Rm&{WjtD)lT1f`n942n6Y!MQur%nEIulWam|ILcQm1%5;&v%I*Y z{;i09Hb;)R(}d?J6Bz-MKCM;EJUw$OT#^p%Lxn)Kw5S7|%XMrcOx7R*czW)D{mhJe z_gXG$4jiH2DN2Yeh7V5vG|07T0E32XSdlS^H`@p8Iw@{`M zE73uUKxH1;DRR0A%Jh^>DEqe$vjOv8Qekf-By@^P>b~picU|-gN|@Zd zTYL8Qm`hq-xA%Yh&qz1}a5P||oj;sBSpNBY#@>HQpf9b9>GN`$>Fj)|@Ltm68#)EI z>}u=802+2}V<^EsA$|itGOXkfr$GQiY-}v1l)b{Z!7b!DJZ)5`Zh*j#Z*SeVNhTKy zt!QEovna4|iBaRP$<$KK2&AQcy4U`%Z@1|dOXGHZ+!t70_yp<&IxE<`zP`S=BVyjg z_{jXZ`AmYrEzRYbq$P;N+6^aCWo#upJDCXFI3JAHWTJQ-QC2ghnJD7bo z(l<=U8wFmPfH7+fFg6Pf8sQ;~^m@Wu=tG$W3H+{k^Tpgz3R?X2^ypxaVzY-E8l(`W zmbc3xK7-du-;-iD$g@C{_^`Yz2-yadN|!mBW*)RHG8d`oHqx5t1+GKUC3D1=oE;oh zs)~FGcx)lT9O%fNjQO~7@FBpOd*_$VKw&4oFP`Wp_Nik__Q`E|Q~UABsp>M#VW3lR z-A>wM#mdSV@uV<{gram71v-M|l^HiU0|(HB@=kM!IWglPiA)nGF6b-W*>T>UR0H?| zSuO#}ypw|q8-b|L@y@Q7Kw5eXRZrX_YJbf@0kG*yLFdc5PB$Rx0Q#}(DjVrcWNx5~ z65fj|u^S?rGgVUTaE_bhZJ3wh$wP};cU@J`0ry=JgyNvUt6|MwccMQYQ z(-5-;fau8w2JZulAT@C%O@#P$;nyW%I?u>qLB1oMmTd9T`;FWckf4KkX@ zi-=_zn@#LihaGuM>cWmP76NOZWGZ#$&X!IT;%>+$-nhq|pd?d+W1Z_|QPDS(_!|Dp=$ zFi2_Lzt6AZz5sNH=c?BmH+5=0;{_=BZjt=cwR7wU&S_N{6IoPVUOpuC!I!}ybR@W0 zm@@j`C>2XM2Z?ye;~&KmH>V9f{!GnmQS;TSXV>-~2-ZH) z3VPt_vdD9_5D3zh(F%?vft%nxS>TX1RO!OAPRl#?Gx^6{6y0y41gjVnjHO7<1(GI^ zGnrX>WzgW@(p9#$H_MEs2HQZ~%ctCe3yem+j6KH>>$KuMyKYIQM2@sZJ zn}REs$)DYso<;~A!@mNf`Zzd!#a}z|bUi`jtrQ$6XXk|0uiF&Yu4AmYiiu2PU~edi z1lwrAINtvWQ6G--WO;!JodIODjMPZ*+2(izStW_;o0yb_q;}H%2cZ7KpqM{CsXn-W zUuDP|9a13kILgKSE9pyDny8DQLIm@RGC`Ic4=$NvH|Wqm>MX3R07I{&%(J`Iq>ihN z{$ZQ4_5|+xImiGA#;Jc;Tz(|SFoPPv|HxR(l!G5OHPVIKUE*k{1O$s6Wbj5wataLz z(x}5wH$pYiH}_tA^T$D}UQ1LWm8H=km$G(}^*lM+=WCXn^%qs4PX4=c@Ei}`__Pcj zM&NpLrfIP`aViK7ya=H(rbiw$0R&`lg)B$YYi3z}GqC8=oxWWM9*|-9U)yV4mz0++ z`Zjuk7ooZ2x$HqU*m}S>puDVU)24mUY%L-O?m^Mxi#Gi-)oYLaEra-L9*3?u;8Vr? zt66qYkF}#b>(26_MZijQtF3Se(q)7>Ytv6MywSNli~aKDivbf?RZru*NCD7WMmJGe zWY_#65GzWsr}fX#4{-~g-?RWxOjXbMzh-?MibsP-c;&t>0ejQHcn zZFg-$+^bu2yriQKJB|DW017L2CsX}36VUg z7lnl`6U!WctLBY$0GaePUo|_Zxke-RI+VXVWpN>epGr}3VuLNKI3o}HMSK5J`ubdi z_Z0wcj+^u~{WVIa%BLH5R5+bX0OE+yZ+=Y7P;0T z_o*Ei?8Nyfq!q&J^^!n+<6b7E#-d%O6*EX(NA_cM6Tvrc4lQ6{?%cRN>iEwgNyYVP4z2frNP#aV zAnFpTNdOn_eCEgdbYfLKr%IZy=yD&(239PY5}+yj2V~Ktc>MuWLEZ_%{>jVvxZFwI ze#2udu>d0P8UQjvaZW{c@vFMLDT1tlStKX8#P|O(6#tK*_`k$ZbU>ZC6^v@Ju^#0Y z@r8eGe^Qu^(=k!HB=(BFt5OT z@;W`L*gndHbX)Y6HO4n^=SBs0_(LWasYQYwul=ZrT2hi90Bx#*7QGT#axSf4t#Nu! z_+N-bKvqGbu-%$3L*B&sve4RfAI|0Y>9Ps=`_g!l)=uL#9} z|E~g~!Lri^iw$~BK~R)p{beNskr7?_#h|YE~jpT)xnE%A%jph{C z4wJ6ZG%A0(qu6Bm=~DV@NuUl4L?!d_5@sGT+af*jnrT3Re1>^W6D|N)DCs;4j!z5M}Q{Z1G zipHi4S76{F&PwPu?p~Y2)G-!%bwg%#DurvqeK^G z*ri(&lX&U#b0^A2dnPD6ifWzwzf~1RSJ2x&2$i8$EBMOAwo4sDy}_oY-9f9nt@(-y z_(|>#((ez9;15pE(vm$Qw5t>FjY<#&MVmX#3sq#Dm*#?}z*`c_^8$E@j5=Ai99yIZ zvm*PgJ@vk5D$v#dVx`AMHU8-T7rJ9(VFX`k{n_dzE6U!HWy{W%UXd@gjn4lJ@19fx zSHQNH(}ly;_)n%>q_m;R>w0fSNFrAE^gi72xW}8tf77V*t7MPKcD12@A}e@#F%!~C z(kz#!g@J)%H}95aSx>Gmf+8WgZ0$$Y{M(tRG{duX^RHy&FfOmGH3csfT$zS~i=`?I zCmlKfZfgHNe!hR3au}+MFILKKX18Z?k42Ens7kO+h<4J=n=iOW*$CqP4~uT7My>)rc?e3rkWw1=0lbK!{_};ewyn=3p)YW5CS~PYR`k!bZDP&=Pe%WYaDU1w zB8j<*zGF9yh(>^idc#OtBz`L`(;a}APybKx+kS~0qKY-_y=liWW93F>L~|6w=sN8F zVhKxGsoWT3nKSMwskDUy(n?B81p5Kbl65LNxyiAm?<#?&>_h^E=!F-_a)3i z%pGEJ2%O(k?dzdXPk>95l04WMk9$4CLr%8nByKU$y-W5gpI5-|^s&oAkdrFG5t5h4 zNxRr`?oF|&h$g$TxGsyL$FZ{t(YF0HL7cXEWH0 zXMBwWDy=QdbegxMstH^s)cPVFq>~5 zXH@YiB(T8njG<-Lw24;pBOFfOt->Wv%li{QCEQK*I#w-xM$BE5@7cx@BN}K{t`IlIYN)(kehX(rb}%U?N!HhmsNvRyK(- zh-D)nW3%2a{Q1s=ufMse)|rP_S=nu-wj@Dk067Yj&A80@XV3rU!g;b~Tt5%5Fz%j7?666XAl;^wTpu`%X5DYmQGpk#j)?~}f zPL%6xIxtYi*{f!EXIZ0A6w9u$M-Hw0wxTiKhuh14YPmv``}~vhZS~HzZ?CXr#@A2U z^5i!HV#>EwJpQ|7^xCpzz&9!S|G^6wk9I(b#hkZ;@p3o8d*rEd_<_oYbK$lX83r7w zyWR$y%+1XQ{*jq;1dYERNgTXS;_th1`cm-3P%>*(lolwUe75fK%GMrDG9l;s^zGa6 zsgtDD6~flX)L&~d@Z0y(YWBW9t(7iE+IHa91zV5+T3!Vy%j{KAXULhC zmrU2x;({Ia+q+jndJ)dPW z!EL_%NPPd@Tw{~USU&0Ly+>Nz8*-H&(umqR!uI7+`H&?Ys$ceUGvoH^T;_n~hH`)E z;Y#7MK>}~4rk(_84^R^s4DGCailrM!eOjRj_%&_}Vd6P61%%|3bj~cjEsEJ8)gK=y zU{3u2lzj%gvdA3E93G7jlE}5t%jaLPo}{YHW#w*l7q+AWz0h{<^FaX^MO3zQ-Sne}QM4*V{Kz3L5&XdIlA^1n z<$)l^OLl*{o_n+`kvx;LXM0H04fbphnP2g4Z`e@(aLBpy=leqx-GN7TBzs);9dV$e zmb88SKSK`mKtJ3Ry2lr#yYyv^vQv^7?wi_!CN) zcpXI!AZR{@d0E>d3T_^MNSDa!1+<&{>1-$hTA%c8Uwr)Nvw+3$6FPn={GJl}0j2|k z9H+S4or8r`5jz+aDd6C2)GuIXS12>lbf6MLg(zLND0AgE1Aju3E=54)R+*lqkg*53 z&sAhYa5R$SEfBqV%j@Ae$Wfl0sR++Rad;7!qQR{9~R)2VOG(*i^*#mlzdP^<7mDC-)Z=nCWUV z1U>U3s(g{rqxael`$5*GVAP;D&qjt!M%L41&sJC#xqJC~bOd>5fR=0C{fSzTN$^%U=7SoX=jqzPA7lWs3^aa2|F^`HliyO(Yx| zmDhQ2%H&c$Z8_g{1*L${U?qswPW}3|K|8<>k~z3HL@UK(zzn@l#$_1*kN`^l0o^t? zjHoeTvc%f)A=s_j12C4``Sx-*NJF9Nu)R`rl+h4O!usx5Yri{vR3~|sb+51({U%jp z{N%|m(QZkLN2L=+Fr4)+^!{6cOaYcxX&%wErV*Bq8_l!sK_ILmwurOt!1pL$8@jo} zw&$KfJKblr=@Z;!8o|*B%f%(y+b=~fGhb$dP39f=bPbwrEG4S*78m*gt4Q8y`H<=8 zjd3At-skC$BetcDQ|Bd#ufrp!)OzQBCr8Jp`sz$~z}Sn;&E-z#>6JZB=-2E6^EYkY zEX@q>kLs-irxxb9(4i=!or16$nKvnI6_it|VPZ;|F-OJC<f9kx@8X3Edtb+xsgxDREARAhdzGC%eOV@0qfQ%ZlL)o!Mw zG-_I~k`vq0EGZ6+t4&J}9|v-MQ1a>B#qThXK1Jb9qN_#Sq5Ge4>iWG`Xf8 zZ-DDO%EF?Hg6yjcM=CW7jS`C`EeKWivjtP+JYwRSI8XA+}v4M;K#b^k2D4@2p`3YWc)T;Xl#v>G=Zdy)_c z^V&PdOQzMq?BgYxf%n2Kng9U3j(gOX8RI^)0p_yTKv;~?TA!-mHA2bKW0HnHsmEkW za2%%#(oh}WNyq>RE+0$Sl@Lkvc@|t@X7BnT7Afr+YNfE_Y}&VA?N**%Kw&KcjeB#% zj6n_~r#}aTEWcc!wjzSZspO=7_0|?02i*lMD!N$E)7|TlW#-tqX(c5kVOj1Fg`8VM zz)T<$O2~Ec=A80*wh!qJdOrR4Kt(=|&pGl^g!tXIO=HlY_Go5Mk+q0F;PCtwwNkz# zw!Ho z?1Oo@t!Uf0fMbW0yu#k(k36@8oZsury9@4Rp8D}ih33&Zoe3S=+Bs9W=>B&=x3n%c zGx)xax|q2V=A0I4PhCH?JHZmT<6Tml^aa=+3YGG=&SZHC$JL{CW-0-VGYRGj3lA@> zr%SJk$LYyP8;D}9iv=2O#z?DryeYcfNrA^=(0)3oJhCE+pYqt5*m|zgwxY;!0d>uT z!L26~+D+(?i4bI^N+y?Fm>;sQR+SdWtEs8V=8^H*YHNF$#JnPJQHmn=?`KF*-}s@9N{FzBGp9Qcpl9H~feJPOizbjN^zK+pck>@tu+9yB z$BLr0Lp=WUFO{(rd(wl@oGfWnHfdD%2K%-ZNn%HkU2XMeNv+1hH~K3G5>EpU4uE=6 z>!Oo}!4*-%95-sl{s}rr5i|2K#=-W~rxcj@y53a8p;HHS=Led4~UvkAEwzMuz zgu61kFu;I*AZATs75cnJEQQTqw^;ZACJS+TXFObpwVgqjHv>B$MrnheY+N8!Qq()Q*(=K9X*)8}SV_l&Gp`%Kmh>2vgGjJAe%oiirG@Nv>UkE`#zUo5+o z{A;h=qeE{#8(HLCSpQ5L2OISv&d#=cKVmolPI*?final~(o&}3)ze(t(?^Q>z+sYIQgui0bhO)c|ZBXu4w=+QJL z@oG>%t!553#YcZG$Tef#c(V(j6B*FoCl7cU?jbo#Mn^mJ}vgF>Ms1WUyrB8`dy zS&ghH0t=C}x?G0Oyu3Uexa)(iUzXDQC2>@X=v6HIq;JwB>^g8DrIY6nNoAdrGl;Uv z)cv0B+DxS}qMsoE=e%$Hp($kIJ-x7^6F(smTe>RxP0;|M2o-jcV#ON3^(eGhLaYo# z6~K?}PqCmRkLMJ;{pg%isO{XNAL9)N8=u`=_I6Nu{WFh_WWhYoEv&$YR6 zzhO(S(;^8x-4Uma7|ZbGNoFaioE6A>QBN=u^nB>RA%4~cmIool>~LF{g%l`-Q`u5A%j zl$9@L+^&ke{QQ;3A3(B4lbKJARy3X1iBl7}a>%qf7w>XjDJXOjDhqw1khT&nmsD8( z3Y%$l6(TRg08sBRP_itRlvsS#ff5;fJG1CmuiASH{(N!t%bb>8cK2*1whZ*4!j7FX zr3KBQ1tfs*1)9x+6WeW`ezfTE;y&V%fOjYe(MNub;t&AY_-()W^XC_KZpyuZu4v%H z77aiEx5}wnSykmsi!}faN=|S9!LEoEX^S$a#egN{oK%S>as5n+wQUV&So?K;|o z_wZ89x6qu=0kv(QHABTday=33rLKK`@5I@Y<2%Py}6IOr+pTS7fcoIpL&&&(D@*h($1jL%xaW-4p$m$%v=N z&PPTs3)5)W3lMkF_kQhBD=-q+7e$6UBn_YH(8k-d}{JSQ~H3@2qwG$0nYxNka**+j5 zSou|fhr_sWTfGy9IJ>Oq=co0)Cz5YLoV>ahom;u2qIm1#rahG1tH-$Nxn^gV{cNc< z{8Uju6H4LB0QoHkPNl7@Y@!!`t6y!Gp##59(9t<-Xm{t(NsG&0%5@(a`f_cGxs17$_H!Q@7fmQzh*%GzYI|Icf}#JaKge+5t^vSOy0=h!iK z2fg`uaLsSZ;fM5Jty`xR98s8luwKpn>IU5}p5gs^@8oi}DZ7&Wt40&OYp=}`c+Z`5 z`*~-4{H@Ov)lLqxvsX`9*52{W(lt*;gi7rBcLaPHyquen8Q7-%+RR+L2L`vsPu7}k z+P|_g_l?^u)3RR8>-Hxa%;y;#@O`-3u;}{Bt&O@zFI}2!IUUUHzf?G)c5b7d5l1H%k(Om$y|)-V`j`740hN1aE5F zmo^ADF?+LVq-l!~ld@49cqgsKhEdZb&O4>RuVsY8$uAF#m6KJn%JqZ+pMj}_33DncN4-Nk<7U;2dfdNC4 z8~_Rgkl9u-=eCTq7aaZ3pJ<;zp|UAj$az~6i6L8u-4~(5I`{NC$?9O3uW*thl=?s~ zqqFeZcQ~T#wPLKQ7R+uH|eAl$8g!vLO z)k>CTeH(>p0i^*>ON|H) z+0ww@g`8N7Vp)v%aJ;w*rGn9rCj!kM2?EKO)a54$9+xO-uwgMx&{VWkS67P1Lm*y6 zNp$0*NW;zkx<1g(07LVN7A^k=bS%y}W{<}WPp>_HZZlJf%5T=XWuV2!5dSSREg?-{ zyy`0wbYvkVa^YcS<418?*OOhhK%0!eie28`64oYr5e&~nB$vfuxuo>2SgBQI-yUfc z5i%0!j+^l$kd;FAmqLs@iz2_W-fjCyOI@E`3bg%Yk5*svHRToa=Vta&Os!Q6IX9`( zlJq4`{d_0IGpf{i%$B+aGuNo-DDgoB0Ks@!v5IM|2mmf`}PM$V%EgaDYLw ztQ^i0NApXNHPLNH9rq-_Deu)QpD(^vMg!Ytx}@~q*2E&+auzGh+V*@gH)i#w`6i=I z*P-R znD+7AvUO`V{c8CvtBjBF^UfG%FVswI5N`D-&RBK-)Uyjl-`;6;{@@UAqa#OjFT1o1 ztdTGH8BbOY>Ns=PgM0SmzIR1&M8Vo_dx`saL;JJcZGU4K50iae%a zp#A%B3Phk?va9Q2{~_a_CpK5i8;ys~V7UX<2Jst)JTJMjh30 z>U=@EuWaLH#e)mdv)U@fTK8L)F)i+_^_POlqs~o@>o)56?XN40pZ_xF9M7BpZ0$GH zljIVJymRgTsE7P@0J~Hbo6U3(rAR%3z*(e!;_zWb+kIi`p(~dd0W8^s?#IM#>(GqSudnb0zU;h;fe$qwp}>&t*NEhUvwQuJ<@? zUwem!gSJ0^wjtOfrZT9eDV2G&j7k2o_k?|zE8!yQRv@63Q(=KGz?#JA3IOl#uPWL$ z0?$)u@rAdM;7Lei$!&xqSlGu`43d0r-B)yE+M|1y1qwRNH&RyA*1V|KJmCfSQO}4S zRJ0WzUr*Po{y2kBhI+El2;&|>Pv5Nr50nj|;H6N^i_rd(w~jmh8spnQ_8cQFUkJ;< zFWrc$=ufeBB5Swn_!B-5PMM0ztlGaZVQcK)cWTk^{Oq)+%5#ERyllKPpb6dSlT$j$ zYX|#u;TbcRQT`<$Rd0YfNlKF6fj$=1o}97bFu~^{ni4U!ianpa0a+~k&9DvsvwQ#k zn-CtoeJ4q*Bw&P$Xto{yUHv6_-^ii^g&)r5vDyYJbRhL)(+8rf2FxQ8cLdKs##kze zBT3(CEop&AWVzkfS{sI*T1c3gGiOHc3>7DyWJC$^w4kbIQEY6BfF_9vMLT@d_;5|+ z!*!-kk>C8g%)c-=Yb1q_U_Yi(fy@*_*D&&m7|?y?Pk$L^6%2FYY??+A8#;6No%L6D zZ$UX=62605cVUE4(!0;%r~vG>p1o|&qJ;}3)DSUC4?ZSiSMznk=g_lMu4aZF7QM6E zUAde(q{%S& z^$FY8eEBxVT7k>>!`eT~n)-EU9i%z~RTty4iACJ;z#Y|;$?ucfI`KMBDUg-YLVqvtM zzsJ*eAx}0~xf&^{t6yS!a7ZPy)mx6K<9`mUI27?~FXIij9(i|37@K*1b!lnIIiuxQ zR_uLjmRC06Ld1fLc^B@EU|`0#(6mKQ<<%~>ad!Smvy}dj1*}bFpdIzE18qRsW@R6> z#e3&z_vmzNyLV!rcpYBz$nSd}hfnF~+`eD;_R$gAe#VWGh?l(P$lNq@MNdO5r~Kqs zS8H76^fHMo{%rl@)%BZdLz>K;7}_SW=|rV%W0n@4*;Mf*r(-LFh33714CZ-EZo~pC zgN}87-{HXEB)6%*g5VB#r(06`p_x)tlexyiYUYZrZ~MKdS+8Vkml8LA z>XfTG&6}U`#%~!oH(0M`hs|;$9r%VNGF|`haOv|6gHnul`5nBYbm&~}$0gtA8Ee0D zaZ&E=kt`m2%7^{GarS>>oUIFm+$_+Rx51q5*=`T&uS|6diq@{?D7Y;lL8k*s8e6PG zFv41v;9WjGn>oxd<;d0(K*P)uSK3~0W~wks4rP8LeFC73wY@TT-E-wdJpg=3$Q?**C$lU%G<_*NyF*SnWRuskV zF1!{)T-$Y|q3O_*0Ttdr86xFYG6JD|5h1!u#+E2)B-j@Fk}a}5Ieke}_=E4UZO?h8 zAe7=!Mw857)sE;K31B1zC&Z*La&*QYw4IY)>>L&Qyv+RGNVnNOiDe&^;&X40bS(=A z3m#H_GQJ1cD5uo5z_~$&XALa_54GETg@PyRUKL=84Dpazir+IY_ETWBl_gwdR_VW& zJi0-~!jEolwFaJAMW4g-pG2*Eu+1m&&iIL)Q_JoivI=~srBpLtSI5LrvOv3u_-RTI z+2c_1oA`>Kd~VaawPfJlgN$o+<(^LSdGng82NSp+QQRxmN@Yx7rH74)%e=YEm8$Q( zF0xc}$c%S>8L`GRvdI#*+O3eZv7Cqa)NY9VPr(I}VpXCN zz-LLWywuqRXyO-L-OsZRR9e;#FN`?d_?3UNw(o&iub%7ml?5CSA#!=BiQZ5w2#`=9 zSRf@wvebMXj&E+qUhj!xKEORxSz{~<=V&7vyl2DD^WBzT2Su|3b|2!G8(+&)Zt{v8 z9zUMIj+8SNisWx~5&vBfA}Uh!|^_>`2C1(kzY zBvIMhA=ADsTvJHg)Jc=h+aMhKVI7NreE7F)cGiuQnMLvb2N%paId6#0p+TdI9Gq^e z@Aqhw-{Je@E7K2@S0rh_Tl>lFLT>EpD(jCPai1bbU%SOKyX7Vc%%utM@;|5754ZXh zm$N>4&VZg;I+bIedz^7^;@Ga;@#R@#ZkLVv)3e=;j-Mkg57p)SIVcoehMlhJeveJL zmW?cQZESk%>p9)@Rs+*oKev;H=oKDsNVlD>VeqJqAq|+`dVjEszqU^CfxaC&>1W02 z*V^@ebL-Y04?o66bZq0bY0_>!I$iWfO%*Yk2l*q*#|-sNS#DbMG| zZE(`mn6-Y4u%EZPg%}OI@Lro)viy0++w-d%|ET&XP!9B2+vTmz9LO8b7;5|D4{OV= zg|9xat!U?#nL=SnwK})iO6g#|dgXuob}cilty1yTYB%GMbxMXcet~UV z!Zi#6`L4%BltN|n6( zDkPLlmXLC$p-MBS?l2K$zj}3@4ALZ6KeXqv9!5wam_?Z;Ky*;-WD|T2WYm*EVxt_* zXlUx0_V*KXv(B&0K5o8mVf`Sr`1ZSUFi1v@Yz<_LmAEF9Ua}feGPZbWjGSTfZU}?L z@FUxR+-s2GWV+UNo}jcmB0O=~@4@H7wL^?L-u^*#qSD(J4??D!?fd?lbMM=yG~3ib4-1~f;nTy%Fa(Jr_i6&mUy&tu)rZ}KP?ko1=|o%k2EJO zgFbF=rHUUf<>Y4B+1lLrYS*&+4cj_6+_>MsxbSRn@W?mAKZH0P6aM1Sdbu(a#Ss9Y zDWXH!jfxk^7F7}epy)UeZa4gy?=Y^q(56Lx`^{GX5(re=;BtuSM;7m#uN5opx8@s} zXzmmEra+-aJUnExq7G!nAvF=oPb?sDQ9CsEMLdzO6R%`JRC7a$p`vKi8!&|)o3%@<6r z#~}U7R!+3c0#A;OD~~L^P@=XnVqKTX6Ek}uSvb+C->Boy-U1^d5crivpb}0X{(o6` zRKxUMe^Xx$^(-~f-U;*g`blUf5nH`L_V5+L4sB zQ z&fJxcjr#QMb~E(`0r`5l!wXNuH84eci5n>ONHr2A9n<)Atx8DRTjU9}4mc?Gofg`(aHuym~m4gkAF|Jek#=yz?s zu~uWQ&oa~f$;0Y#gYc(cNl!IBy)|jUCJXNvIjE~=3>m8RPLHay40(K@#gv*Q+P9K? zOzrFgZoQwb8CzcxV@;>MkNeLK`gFhZjk`w9JB>_CE^XN8v1z_~hE=%QyidFK zi?~Nfs>#Q@b`G_vnb%|PTvHWOm4^kbX1opE6wsty=h@AYZfY(v4{ffqU0$$#z^Ah_ znj9LlW7`;OtE5+kSHha3-B+WE&GPqnbo=?h@aPaXb)+qYm6tpGVQZSL|8DqWgXN<` zuioDXDOtJ9EH-wH`yYGf%uP;6YdbqUx_+?%DLk+6#s}sU4RY|QJvi<}{K6GaZ1VDR z+p1YBg+;b%=naUnrL}y}oGA}249gPOzk@Ak*Vv^Oi0Csk(T@|NYuddb>=z^jsI^W0@>W~ zg=-c!4BFEoEO+Xn-jx-LT6}k*#3((su-yyKr}~GF#&oCrzmYN2OkH(~hTV&i+tvDS zhf%bfG2<5WV(zuxb`EW<^N*kK+IMiXyLM|W9WwJ2@_ipn%sc?RpEk6ouC}-T!}E5H zEW+dT57r1jc=hmE`e%(tn>fq%=u|e@s}=w1M8Ixnktxq+tskhJqIN=vz2k#KEZHP7 z{+-i-T(a@Ylr)>)POKmPG)p~IAv>C*tGP?_W>?HwUvXQ|`tsBAvQZ*P%P`Q@*Qspd zvtU5OSBgq|8MuyLlB3^1)6F#2U~Tq~`={>mi<~HgqxBX!Q@d>nXjP-zPv^#^O1G$h zVm^4Z3}_LnI<0SK%2Q3FtBjh04tEsdQn!jE6P9!o{9xh)_a(b*NP8RbZtD2)M*wz+ z=6Sn0y>&}3zp2ut&FEWMNgy?D1(>;V^uZFag;EepWdwRM|`*kN1NpO}gSiP`6GI3l(!tVEP@hcR_2 z>(H0n@n?a_HgRx1II*Ke9mn=`-@i;!OZxbB&(UhtGVnfEXVmQ%IXN4iE`J5RM=`n^ z=jNt3YpAtg!2%>sf*%U~Cwqn82mk(R!gY`YISQ) zXR*6p-ibEa9tZaLOgLD0L_gi&>B}FIXD^VuY@4+7d(9|NeCds8LgvhwvvsW*l7QwL zp;r5(Yc6=c>(C)3Q>9~voJ+oc{``5`yN$etFlo8xI_wX=q^y)&_CoIHDMG6pZ;gCe z;yXknjay)-q&~}G}@CeF9TwMA31NE*QT@h)$dc@JQ8Q@c7 zM($*#Wb6WuyFuqij-F(_!c;GGZJTa`Bk!#(nYnD!=I0;FMj!toks|eo=eR~F-0iAM ztkuJB6LYNwyE2x7w;{dgvH&vbZL+^2Q<__UhBgJN(038nyr)MuSh;p>-8ks{wsmw? zVK+C^p343bC|<}P3FSnU5)Wwu4G;%1!x z0HF$2|LDJV=u$h5- zjrJN!y!Tg8`0j2~12<`Fw(S)mmXirCrc(oyH3I75bpl76o3%{%EGNyLK@X3fpT5}J zXiML(f7FG{6*c}n*s9JcIXy2YK$*V2hkfgT8X8CI7Y)lRtMeKLzT!2!cJE%OE}!p7 zq{lv^)=Ia#N9=klD`G}n+e+XUp9xO$?OO zetucdv8uebzum%=xPh%|-JEMZb)WQIIml#c%}*wm2W;yGK|wUAq`EA?HqjMW2{O`{-fsnZ{aE*8D-*3Mdpmb3Ml@Na3A zJ>PBra)0uqre_Ti*1bp9mU;5^qs;UsFxk!{+UFD%c(^-c*TiYhiEpA+oAPj5>{H9X zs(Y;`oI3x_%J>(m^!enN>nXc;?Xpct4G$0ZWwC_lKwp$s*AomA&nEcGaojUZhD%)ZQ z5DT%8+OGRjt$Q=$Y1+*r+l{W*zf*#mU1^`b9pPR5jHmCv+-hE?i}U;UL7P4w_*7Wz z(ZnRGeLw3{CWcX`Kc)G7RTN1U12SUC28TXVaNIBA-zo_!q7cYDf<=hr;HBy=Js7?O zD#HnYc{U7l2~tyebFz}D@Ja|ib1~PDj0>7;_|}nU!)!egGO`L{8bkdSB6BBIb&BDJ! z=C1juHf6(K_|xqSy;#=zqh<-HnT!7T@$(y~FpWRjcw|hPRiMkd)XBp1NgiYfb|=64 z2R%KNNGN=PttF3zjOU9@$ZQ7vuEE69f{M|JFahWEGqIvK~B+vdr`)3M0y>(U^2i$uww>ohLHGVa0!Az;a9xJ zHX~UptzfU24kda7b0Xirk4ncMTC|9?*|&NDHbD$l&Igz?OWkv1(@AK9UH- ziUbzKiMOxNDhTa>>>{43SJ4o$5;0bi>MiaY*^b!Dt)dx*D>GW!cj-}{1jR{(K2%$^ zM6dis1E;}an*tiIqW!#zhn4nWFAj83Y|$S~#m`7UL+4n(+NykgY|}rfsEi{zBk;4t zE{h-mcz-7na{&A64<3Xk7Z2{+cMl#R+1P`RLN=M9(On1@z{O}?d~D_qoJ%=-p}Z$x zZj1p>B3}`82D2|K&HyWsxeh*-UtTliDw{W#zr_@qAZXJUm1I z9@!)to|pa0l6J<9ou+)x0AuGKwgf2dFWxO!_#5(3==cVLDdBNkj5to3X{6EK1lhdmM3z@1$90X@-hC{UM}|ijf2X6TRv08;&R7{Kjo!OoI3R+?e;LUrK4cA zY4C37Z10mQ42#IsWIYs#aCAB-=_Epwj6Q|z$H7m6Vj;vRDDBB~{@EYd0oAtM6t z9Fi=Nxv#+EgEl)wvi|w4;yHc-2E-vLX&CYYTn(|{0N9935gVBM<-)Kr`!Vt)(skC8 zpXN&GGj+TKNs@%_tt%4B&C{ZG1~i%d=;^ZwKh4X?>^MnK9uedbNYP1`Pu=R zHPxhc?xxLtGV#Fei|$w$+&@QFqW>TXAuIaUVp*w8GQr*pu1npNIlpbT<{r0`R^VhC zW##K)_`tZvip&Ai%G(zGZKN3>R^1shBy9mDh(wJ@*&%ASfu{4gC8$jm_y@K_rb$#h zlFfDl?T#n(OKNX+n!Ln)y-d3BH1bO&0f;}9HGPzzoPC~Y10LTY3$T;U-6MA%8~*vi zAo1;@M6ajIuj@3Iffo*_g6#WxvR{hJ0Z{6m{ z9dU0s63>xz$sNTrUF^-|WbkZv@rclQ^oc8F!ev+JQlTX&B9sk(eYb;*Lm?0KJvcIa zo{kOl|Cnwt%KsU6wK&Z|4;yV8vsq%BDr^6YkS~#NrOvdil1Avpb19Yx5P&D1F5L3+ zDcl&EMUzC-!0O*~jXF(|fW%xsbBuSh6P3|#a`_5-9Q~73f*nxVJ zowDL;!^;vvJ`D7!I~@dHFs9BhoZg3R|0ol*l43!-(~&e%+3AM;g-o!ndU|R^O^d6Z zD=xesYFAmV^76}+|1hM6I9HUkI@F6V0#Ar1=R@tEpWlnIG>eU|02is81zW7owmtv)$CE-J61batz`ny6+ug7 zB{cI*_|*vU<|gPgKgKCB#@YE2DiPTgzX-wbW;2yBYu8?6o$Zobc*o29(Fcq+?Q!ie zBk#|?>xyM*2NRA8)|d0eenT9?K5(0N@b&c_!$7hF{G=VPfv!Xs+txyr&1U$g@350A z?gN9p{E^bokGo{~@FGfk)C=QN&p(z<5AC7s!6xaHZAI7ZSWY_uM3#S&!y~hfj3*?(ErtmfF6)tpPf-3yc?D zZvd=2q~PI|lhZp@Zf{bU(1{?}kc57QkF&aJkU z(r2VpnX$~6#jQI3G)-mqN;|uE#U@-nE@41{voV+#w*R?v=Mt$?(0<4o+pSrRbS2vb ziF+poO$@j~%85noYm83ktD%Hy9ZR&a=NeNsBQD&q7kqMLqlyY=U8ly@{Vx5kcOvIi zaKGjWz0WHjI@Z5a?&%P}t%>~$)<2#1cBJV)`CG+-L-Zoj8uk3m@+a888gjD@G!pP6GMNqQ`bV{FxmIAK&L!RgzzHryxAe4APE`DCTiei(f{ig&k2f|oQk%u5Eg=#(laGT2 zcGiRI@E`g!K86p};-^txhr0l8Bc|%MXa%KGqRo(j2`S{G!V8zX*NL6RO`WhXt15a< zSXjJHyw0h#&Ld}wYc#ws&mRck0!R=7Nlu7XYI7#CRmziC#SV7-nFZheWY4!>;+}NT_#|zaRm&o zV|Ee>)I>>*&0QY2d)XW&i|QnZg0~TLcb!0rI=ty4>^@x=iZObbJtRJyR9F3X1^mlYm z?N2?p)X4tNsGoI*9#Getf8$MRtD8|qPb$CkSg3qs_Nvd&1^tb}Eq@LH;L<-zz=&{~(`eX5f+?Z29&`IJ2eoy;S#cPrs#FweHP1Hgi>1pdYSERISp~ Y?loR_507Z9;GglMr;Iu`!gRy`01vpkmH+?% literal 0 HcmV?d00001 From bc135f721558a3f4b4902b9d301b2d162dd5cb2d Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Thu, 4 Mar 2021 11:07:23 +0000 Subject: [PATCH 02/48] INIT: Project structure. (#1) --- .gitignore | 4 + .mill-version | 1 + .scalafmt.conf | 3 + README.md | 48 ++++++++++ build.sc | 220 ++++++++++++++++++++++++++++++++++++++++++++ versionFile/version | 1 + 6 files changed, 277 insertions(+) create mode 100644 .mill-version create mode 100644 .scalafmt.conf create mode 100644 build.sc create mode 100644 versionFile/version diff --git a/.gitignore b/.gitignore index 9c07d4ae..e990eec4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ *.class *.log +.bloop +.metals +.vscode +out/ diff --git a/.mill-version b/.mill-version new file mode 100644 index 00000000..a3df0a69 --- /dev/null +++ b/.mill-version @@ -0,0 +1 @@ +0.8.0 diff --git a/.scalafmt.conf b/.scalafmt.conf new file mode 100644 index 00000000..8c23f5fc --- /dev/null +++ b/.scalafmt.conf @@ -0,0 +1,3 @@ +version = "2.7.4" +maxColumn = 80 +align.preset = more diff --git a/README.md b/README.md index 4563f8b3..e31fa422 100644 --- a/README.md +++ b/README.md @@ -28,3 +28,51 @@ The BFT service delegates checkpoint proposal and candidate validation to the Ch ![](docs/master-based.png) When a winner is elected a Checkpoint Certificate is compiled, comprising of the checkpointed data (a block identity, or something more complex) as well as a witness for the BFT agreement, which proves that the decision is final and cannot be rolled back. Because of the need for this proof, low latency BFT algorithms such as HotStuff are preferred. + + +## Build + +The project is built using [mill](https://github.com/com-lihaoyi/mill), which works fine with [Metals](https://scalameta.org/metals/docs/build-tools/mill.html). + +To compile everything, use the `__` wildcard: + +```console +mill __.compile +``` + +The project is set up to cross build to all Scala versions for downstream projects that need to import the libraries. To build any specific version, put them in square brackets: + +```console +mill metronome[2.12.10].checkpointing.app.compile +``` + +To run tests, use the wild cards again and the `.test` postix: + +```console +mill __.test +mill metronome[2.12.10].checkpointing.app.test.test +``` + +To run a single test class, use the `.single` method with the full path to the spec: + +```console +mill __.checkpointing.app.test.single io.iohk.metronome.app.config.ConfigSpec +``` + +### Formatting the codebase + +Please configure your editor to use `scalafmt` on save. CI will be configured to check formatting. + + +## Publishing + +We're using the [VersionFile](https://com-lihaoyi.github.io/mill/page/contrib-modules.html#version-file) plugin to manage versions. + +The initial version has been written to the file without newlines: +```console +echo -n "0.1.0-SNAPSHOT" > versionFile/version +``` + +Builds on `develop` will publish the snapshot version to Sonatype, which can be overwritten if the version number isn't updated. + +During [publishing](https://com-lihaoyi.github.io/mill/page/common-project-layouts.html#publishing) on `master` we'll use `mill versionFile.setReleaseVersion` to remove the `-SNAPSHOT` postfix and make a release. After that the version number should be bumped on `develop`, e.g. `mill versionFile.setNextVersion --bump minor`. diff --git a/build.sc b/build.sc new file mode 100644 index 00000000..d56d4005 --- /dev/null +++ b/build.sc @@ -0,0 +1,220 @@ +import mill._ +import mill.modules._ +import scalalib._ +import ammonite.ops._ +import coursier.maven.MavenRepository +import mill.scalalib.{PublishModule, ScalaModule} +import mill.scalalib.publish.{Developer, License, PomSettings, VersionControl} +import $ivy.`com.lihaoyi::mill-contrib-versionfile:$MILL_VERSION` +import mill.contrib.versionfile.VersionFileModule + +object versionFile extends VersionFileModule + +object VersionOf { + val cats = "2.3.1" + val config = "1.4.1" + val logback = "1.2.3" + val monix = "3.3.0" + val prometheus = "0.10.0" + val rocksdb = "6.15.2" + val scalacheck = "1.15.2" + val scalalogging = "3.9.2" + val scalatest = "3.2.5" + val scalanet = "0.7.0" +} + +object metronome extends Cross[MetronomeModule]("2.12.10", "2.13.4") + +class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { + + // Get rid of the `metronome-2.12.10-` part from the artifact name. The JAR name suffix will shows the Scala version. + // Check with `mill show metronome[2.12.10].__.artifactName` or `mill __.publishLocal`. + private def removeCrossVersion(artifactName: String): String = + "metronome-" + artifactName.split("-").drop(2).mkString("-") + + // In objects inheriting this trait, use `override def moduleDeps: Seq[PublishModule]` + // to point at other modules that also get published. In other cases such as tests + // it can be `override def moduleDeps: Seq[JavaModule]`, i.e. point at any module. + trait Publishing extends PublishModule { + def description: String + + // Make sure there's no newline in the file. + override def publishVersion = versionFile.currentVersion().toString + + override def pomSettings = PomSettings( + description = description, + organization = "io.iohk", + url = "https://github.com/input-output-hk/metronome", + licenses = Seq(License.`Apache-2.0`), + versionControl = VersionControl.github("input-output-hk", "metronome"), + // Add yourself if you make a PR! + developers = Seq( + Developer("aakoshh", "Akosh Farkash", "https://github.com/aakoshh") + ) + ) + } + + /** Common properties for all Scala modules. */ + trait SubModule extends ScalaModule { + override def scalaVersion = crossScalaVersion + override def artifactName = removeCrossVersion(super.artifactName()) + + override def ivyDeps = Agg( + ivy"org.typelevel::cats-core:${VersionOf.cats}", + ivy"org.typelevel::cats-effect:${VersionOf.cats}" + ) + + // `extends Tests` uses the context of the module in which it's defined + trait TestModule extends Tests { + override def artifactName = + removeCrossVersion(super.artifactName()) + + override def testFrameworks = + Seq("org.scalatest.tools.Framework") + + // It may be useful to see logs in tests. + override def moduleDeps: Seq[JavaModule] = + super.moduleDeps ++ Seq(logging) + + override def ivyDeps = Agg( + ivy"org.scalatest::scalatest:${VersionOf.scalatest}", + ivy"org.scalacheck::scalacheck:${VersionOf.scalacheck}", + ivy"ch.qos.logback:logback-classic:${VersionOf.logback}" + ) + + def single(args: String*) = T.command { + super.runMain("org.scalatest.run", args: _*) + } + } + } + + /** Storage abstractions, e.g. a generic key-value store. */ + object storage extends SubModule + + /** Emit trace events, abstracting away logs and metrics. + * + * Based on https://github.com/input-output-hk/iohk-monitoring-framework/tree/master/contra-tracer + */ + object tracing extends SubModule with Publishing { + override def description: String = + "Abstractions for contravariant tracing." + } + + /** Additional crypto utilities such as threshold signature. */ + object crypto extends SubModule with Publishing { + override def description: String = + "Cryptographic primitives to support HotStuff and BFT proof verification." + + // TODO: Use crypto library from Mantis. + object test extends TestModule + } + + /** Generic HotStuff BFT library. */ + object hotstuff extends SubModule { + + /** Pure consensus models. */ + object consensus extends SubModule { + object test extends TestModule + } + + /** Expose forensics events via tracing. */ + object forensics extends SubModule + + /** Implements peer-to-peer communication, state and block synchronisation. + * + * Includes the remote communication protocol messages and networking. + */ + object service extends SubModule { + override def moduleDeps: Seq[JavaModule] = + Seq(storage, tracing, crypto, hotstuff.consensus, hotstuff.forensics) + + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"io.iohk::scalanet:${VersionOf.scalanet}" + ) + + object test extends TestModule + } + } + + /** Components realising the checkpointing functionality using HotStuff. */ + object checkpointing extends SubModule { + + /** Library to be included on the PoW side to talk to the checkpointing service. + * + * Includes the certificate models, the local communication protocol messages and networking. + */ + object interpreter extends SubModule with Publishing { + override def description: String = + "Components to implement a PoW side checkpointing interpreter." + + override def ivyDeps = Agg( + ivy"io.iohk::scalanet:${VersionOf.scalanet}" + ) + + override def moduleDeps: Seq[PublishModule] = + Seq(tracing, crypto) + } + + /** Implements the checkpointing functionality and the ledger rules. + * + * If it was published, it could be directly included in the checkpoint assisted blockchain application, + * so the service and the interpreter can share data in memory. + */ + object service extends SubModule { + override def moduleDeps: Seq[JavaModule] = + Seq(tracing, hotstuff.service, checkpointing.interpreter) + + object test extends TestModule + } + + /** Executable application for running HotStuff and checkpointing as a stand-alone process, + * communicating with the interpreter over TCP. + */ + object app extends SubModule { + override def moduleDeps: Seq[JavaModule] = + Seq(hotstuff.service, checkpointing.service, rocksdb, logging, metrics) + + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"com.typesafe:config:${VersionOf.config}", + ivy"ch.qos.logback:logback-classic:${VersionOf.logback}", + ivy"io.iohk::scalanet-discovery:${VersionOf.scalanet}", + ivy"io.monix::monix:${VersionOf.monix}" + ) + + object test extends TestModule + } + } + + /** Implements tracing abstractions to do structured logging. */ + object logging extends SubModule { + override def moduleDeps: Seq[JavaModule] = + Seq(tracing) + + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"com.typesafe.scala-logging::scala-logging:${VersionOf.scalalogging}" + ) + } + + /** Implements tracing abstractions to expose metrics to Prometheus. */ + object metrics extends SubModule { + override def moduleDeps: Seq[JavaModule] = + Seq(tracing) + + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"io.prometheus:simpleclient:${VersionOf.prometheus}", + ivy"io.prometheus:simpleclient_httpserver:${VersionOf.prometheus}" + ) + } + + /** Implements the storage abstractions using RocksDB. */ + object rocksdb extends SubModule { + override def moduleDeps: Seq[JavaModule] = + Seq(storage) + + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"org.rocksdb:rocksdbjni:${VersionOf.rocksdb}" + ) + + object test extends TestModule + } +} diff --git a/versionFile/version b/versionFile/version new file mode 100644 index 00000000..4ecb6644 --- /dev/null +++ b/versionFile/version @@ -0,0 +1 @@ +0.1.0-SNAPSHOT \ No newline at end of file From 034d10f078864e4cd23be09842cdde3b9f774cd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Paradzi=C5=84ski?= Date: Fri, 5 Mar 2021 10:19:55 +0100 Subject: [PATCH 03/48] contravariant tracing (#3) * add kind-projector for type patterns like Tracer[F, *] * add distributed tracer based on input-output-hk/contra-tracer * add lemastero to developers section * add missing space at the end * improve docs for tracer --- build.sc | 5 +- .../io/iohk/metronome/tracer/Tracer.scala | 105 ++++++++++++++++++ 2 files changed, 109 insertions(+), 1 deletion(-) create mode 100644 metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala diff --git a/build.sc b/build.sc index d56d4005..35e7eb4f 100644 --- a/build.sc +++ b/build.sc @@ -49,7 +49,8 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { versionControl = VersionControl.github("input-output-hk", "metronome"), // Add yourself if you make a PR! developers = Seq( - Developer("aakoshh", "Akosh Farkash", "https://github.com/aakoshh") + Developer("aakoshh", "Akosh Farkash", "https://github.com/aakoshh"), + Developer("lemastero", "Piotr Paradzinski", "https://github.com/lemastero") ) ) } @@ -98,6 +99,8 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { object tracing extends SubModule with Publishing { override def description: String = "Abstractions for contravariant tracing." + + def scalacPluginIvyDeps = Agg(ivy"org.typelevel:::kind-projector:0.11.3") } /** Additional crypto utilities such as threshold signature. */ diff --git a/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala b/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala new file mode 100644 index 00000000..7c9eed5b --- /dev/null +++ b/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala @@ -0,0 +1,105 @@ + +import cats.{Applicative, Contravariant, FlatMap, Id, Monad, Monoid, Show, ~>} + +/** + * Contravariant tracer + * + * Ported from https://github.com/input-output-hk/contra-tracer/blob/master/src/Control/Tracer.hs + */ +trait Tracer[F[_], -A] extends Function[A, F[Unit]] + +object Tracer { + + /** + * If you know how to trace A and + * - how to enrich A to get value B + * - how to forget some details about B to get A + * then you can create Tracer for B + */ + implicit def contraTracer[F[_]]: Contravariant[Tracer[F, *]] = + new Contravariant[Tracer[F, *]] { + override def contramap[A, B](fa: Tracer[F, A])(f: B => A): Tracer[F, B] = + a => fa(f(a)) + } + + def noOpTracer[M[_], A](implicit MA: Applicative[M]): Tracer[M, A] = + _ => MA.pure(()) + + implicit def monoidTracer[F[_], S](implicit MA: Applicative[F]): Monoid[Tracer[F, S]] = + new Monoid[Tracer[F, S]] { + + /** Run sequentially two tracers */ + override def combine(a1: Tracer[F, S], a2: Tracer[F, S]): Tracer[F, S] = + s => MA.productR(a1(s))(a2(s)) + + override def empty: Tracer[F, S] = noOpTracer + } + + /** Trace value a using tracer tracer */ + def traceWith[F[_], A](tracer: Tracer[F, A], a: A): F[Unit] = tracer(a) + + /** contravariant Kleisli composition: + * if you can: + * - produce effect M[B] from A + * - trace B's + * then you can trace A's + */ + def contramapM[F[_], A, B](f: A => F[B], tracer: Tracer[F, B])(implicit MM: FlatMap[F]): Tracer[F, A] = + (a: A) => MM.flatMap(f(a))(tracer) + + /** change the effect F to G using natural transformation nat */ + def natTracer[F[_], G[_], A](nat: F ~> G, tracer: Tracer[F, A]): Tracer[G, A] = + a => nat(tracer(a)) + + /** filter out values to trace if they do not pass predicate p */ + def condTracing[F[_], A](p: A => Boolean, tr: Tracer[F, A])(implicit FM: Applicative[F]): Tracer[F, A] = + (a: A) => + if (p(a)) tr(a) + else FM.pure(()) + + /** filter out values that was send to trace using side effecting predicate */ + def condTracingM[F[_], A](p: F[A => Boolean], tr: Tracer[F, A])(implicit FM: Monad[F]): Tracer[F, A] = + a => + FM.flatMap(p) { pa => + if (pa(a)) tr(a) + else FM.pure(()) + } + + def showTracing[F[_], A]( + tracer: Tracer[F, String] + )(implicit S: Show[A], C: Contravariant[Tracer[F, *]]): Tracer[F, A] = + C.contramap(tracer)(S.show) + + def traceAll[A, B](f: B => List[A], t: Tracer[Id, A]): Tracer[Id, B] = + event => f(event).foreach(t) +} + +object TracerSyntax { + + implicit class TracerOps[F[_], A](val tracer: Tracer[F, A]) extends AnyVal { + + /** Trace value a using tracer */ + def trace(a: A): F[Unit] = tracer(a) + + /** contravariant Kleisli composition: + * if you can: + * - produce effect M[B] from A + * - trace B's + * then you can trace A's + */ + def >=>[B](f: B => F[A])(implicit MM: FlatMap[F]): Tracer[F, B] = + Tracer.contramapM(f, tracer) + + def nat[G[_]](nat: F ~> G): Tracer[G, A] = + Tracer.natTracer(nat, tracer) + + def filter(p: A => Boolean)(implicit FM: Applicative[F]): Tracer[F, A] = + Tracer.condTracing[F, A](p, tracer) + + def filterNot(p: A => Boolean)(implicit FM: Applicative[F]): Tracer[F, A] = + filter(a => !p(a)) + + def filterM(p: F[A => Boolean])(implicit FM: Monad[F]): Tracer[F, A] = + Tracer.condTracingM(p, tracer) + } +} From 60f7b68c14d4d5602c7e31d201f6a5ef945edb6f Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Sat, 6 Mar 2021 21:33:54 +0000 Subject: [PATCH 04/48] PM-2906: Lazy tracing. (#4) --- .../io/iohk/metronome/tracer/Tracer.scala | 100 +++++++++++------- 1 file changed, 63 insertions(+), 37 deletions(-) diff --git a/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala b/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala index 7c9eed5b..412ac330 100644 --- a/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala +++ b/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala @@ -1,31 +1,39 @@ +package io.iohk.tracer +import language.higherKinds import cats.{Applicative, Contravariant, FlatMap, Id, Monad, Monoid, Show, ~>} -/** - * Contravariant tracer - * - * Ported from https://github.com/input-output-hk/contra-tracer/blob/master/src/Control/Tracer.hs - */ -trait Tracer[F[_], -A] extends Function[A, F[Unit]] +/** Contravariant tracer + * + * Ported from https://github.com/input-output-hk/contra-tracer/blob/master/src/Control/Tracer.hs + */ +trait Tracer[F[_], -A] { + def apply(a: => A): F[Unit] +} object Tracer { - /** - * If you know how to trace A and - * - how to enrich A to get value B - * - how to forget some details about B to get A - * then you can create Tracer for B - */ + /** If you know: + * - how to enrich type A that is traced + * - how to squeeze B's to create A's (possibly enrich B with extra stuff, or forget some details) + * then you have Tracer for B + */ implicit def contraTracer[F[_]]: Contravariant[Tracer[F, *]] = new Contravariant[Tracer[F, *]] { override def contramap[A, B](fa: Tracer[F, A])(f: B => A): Tracer[F, B] = - a => fa(f(a)) + new Tracer[F, B] { + override def apply(a: => B): F[Unit] = fa(f(a)) + } } def noOpTracer[M[_], A](implicit MA: Applicative[M]): Tracer[M, A] = - _ => MA.pure(()) + new Tracer[M, A] { + override def apply(a: => A): M[Unit] = MA.pure(()) + } - implicit def monoidTracer[F[_], S](implicit MA: Applicative[F]): Monoid[Tracer[F, S]] = + implicit def monoidTracer[F[_], S](implicit + MA: Applicative[F] + ): Monoid[Tracer[F, S]] = new Monoid[Tracer[F, S]] { /** Run sequentially two tracers */ @@ -39,26 +47,42 @@ object Tracer { def traceWith[F[_], A](tracer: Tracer[F, A], a: A): F[Unit] = tracer(a) /** contravariant Kleisli composition: - * if you can: - * - produce effect M[B] from A - * - trace B's - * then you can trace A's - */ - def contramapM[F[_], A, B](f: A => F[B], tracer: Tracer[F, B])(implicit MM: FlatMap[F]): Tracer[F, A] = - (a: A) => MM.flatMap(f(a))(tracer) + * if you can: + * - produce effect M[B] from A + * - trace B's + * then you can trace A's + */ + def contramapM[F[_], A, B](f: A => F[B], tracer: Tracer[F, B])(implicit + MM: FlatMap[F] + ): Tracer[F, A] = { + new Tracer[F, A] { + override def apply(a: => A): F[Unit] = + MM.flatMap(f(a))(tracer(_)) + } + } /** change the effect F to G using natural transformation nat */ - def natTracer[F[_], G[_], A](nat: F ~> G, tracer: Tracer[F, A]): Tracer[G, A] = + def natTracer[F[_], G[_], A]( + nat: F ~> G, + tracer: Tracer[F, A] + ): Tracer[G, A] = a => nat(tracer(a)) - /** filter out values to trace if they do not pass predicate p */ - def condTracing[F[_], A](p: A => Boolean, tr: Tracer[F, A])(implicit FM: Applicative[F]): Tracer[F, A] = - (a: A) => - if (p(a)) tr(a) - else FM.pure(()) + /** filter out values to trace if they do not pass predicate p */ + def condTracing[F[_], A](p: A => Boolean, tr: Tracer[F, A])(implicit + FM: Applicative[F] + ): Tracer[F, A] = { + new Tracer[F, A] { + override def apply(a: => A): F[Unit] = + if (p(a)) tr(a) + else FM.pure(()) + } + } /** filter out values that was send to trace using side effecting predicate */ - def condTracingM[F[_], A](p: F[A => Boolean], tr: Tracer[F, A])(implicit FM: Monad[F]): Tracer[F, A] = + def condTracingM[F[_], A](p: F[A => Boolean], tr: Tracer[F, A])(implicit + FM: Monad[F] + ): Tracer[F, A] = a => FM.flatMap(p) { pa => if (pa(a)) tr(a) @@ -66,27 +90,29 @@ object Tracer { } def showTracing[F[_], A]( - tracer: Tracer[F, String] + tracer: Tracer[F, String] )(implicit S: Show[A], C: Contravariant[Tracer[F, *]]): Tracer[F, A] = C.contramap(tracer)(S.show) def traceAll[A, B](f: B => List[A], t: Tracer[Id, A]): Tracer[Id, B] = - event => f(event).foreach(t) + new Tracer[Id, B] { + override def apply(event: => B): Id[Unit] = f(event).foreach(t(_)) + } } object TracerSyntax { implicit class TracerOps[F[_], A](val tracer: Tracer[F, A]) extends AnyVal { - /** Trace value a using tracer */ + /** Trace value a using tracer tracer */ def trace(a: A): F[Unit] = tracer(a) /** contravariant Kleisli composition: - * if you can: - * - produce effect M[B] from A - * - trace B's - * then you can trace A's - */ + * if you can: + * - produce effect M[B] from A + * - trace B's + * then you can trace A's + */ def >=>[B](f: B => F[A])(implicit MM: FlatMap[F]): Tracer[F, B] = Tracer.contramapM(f, tracer) From 4ac22eaf23f1c0245285dcaa928cba293c79c8a2 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Wed, 10 Mar 2021 09:36:42 +0000 Subject: [PATCH 05/48] PM-2903: Storage and RocksDB interface (#2) * PM-2903: KVStore abstraction and in-memory implementation. * PM-2903: Skeleton for RocksDBStore. * PM-2903: Define compiler options (fixes some cats syntax). * PM-2903: Switch to using ReaderT. * PM-2903: Implement read-oriented version with fallback to writes. * PM-2903: Using ReentrantReadWriteLock. * PM-2903: Database creation. * PM-2903: Update Scala version, ignore warning about .asJava * PM-2930: Setup for testing RocksDB. * PM-2903: Testing opening and closing the database. * PM-2903: Testing that the RocksDB and in-memory implementations are equivalent. * PM-2903: Test concurrent locking behaviour. * PM-2903: Updated the README with actual test examples. * PM-2903: Added KVStore.pure and KVStore.unit helpers. * PM-2903: Update comment and change parameter order. * PM-2903: Adjust test sizes so concurrency problems are always detected. * PM-2903: Use the read options. * PM-2903: Separated LockSupport to a base class. * PM-2903: Added a DBSupport class. * PM-2903: Reuse write options across all writes via DBSupport. * PM-2903: Added a projection to read-only operations. * PM-2903: Changed .readonly.get to just .read * PM-2903: Renamings. * PM-2903: Added more methods to do things without locking. * PM-2903: Clarify the docs of withLockUpgrade --- README.md | 4 +- build.sc | 79 ++- .../iohk/metronome/rocksdb/RocksDBStore.scala | 455 +++++++++++++++ .../metronome/rocksdb/RocksDBStoreSpec.scala | 539 ++++++++++++++++++ .../iohk/metronome/storage/KVCollection.scala | 37 ++ .../io/iohk/metronome/storage/KVStore.scala | 73 +++ .../io/iohk/metronome/storage/KVStoreOp.scala | 34 ++ .../iohk/metronome/storage/KVStoreRead.scala | 40 ++ .../iohk/metronome/storage/KVStoreState.scala | 79 +++ .../io/iohk/metronome/storage/package.scala | 18 + .../metronome/storage/KVStoreStateSpec.scala | 45 ++ 11 files changed, 1385 insertions(+), 18 deletions(-) create mode 100644 metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala create mode 100644 metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/KVCollection.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/KVStore.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/KVStoreOp.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/KVStoreState.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/package.scala create mode 100644 metronome/storage/test/src/io/iohk/metronome/storage/KVStoreStateSpec.scala diff --git a/README.md b/README.md index e31fa422..97e0af39 100644 --- a/README.md +++ b/README.md @@ -50,13 +50,13 @@ To run tests, use the wild cards again and the `.test` postix: ```console mill __.test -mill metronome[2.12.10].checkpointing.app.test.test +mill --watch metronome[2.13.4].rocksdb.test ``` To run a single test class, use the `.single` method with the full path to the spec: ```console -mill __.checkpointing.app.test.single io.iohk.metronome.app.config.ConfigSpec +mill __.storage.test.single io.iohk.metronome.storage.KVStoreStateSpec ``` ### Formatting the codebase diff --git a/build.sc b/build.sc index 35e7eb4f..5f624682 100644 --- a/build.sc +++ b/build.sc @@ -11,24 +11,28 @@ import mill.contrib.versionfile.VersionFileModule object versionFile extends VersionFileModule object VersionOf { - val cats = "2.3.1" - val config = "1.4.1" - val logback = "1.2.3" - val monix = "3.3.0" - val prometheus = "0.10.0" - val rocksdb = "6.15.2" - val scalacheck = "1.15.2" - val scalalogging = "3.9.2" - val scalatest = "3.2.5" - val scalanet = "0.7.0" + val cats = "2.3.1" + val config = "1.4.1" + val logback = "1.2.3" + val monix = "3.3.0" + val prometheus = "0.10.0" + val rocksdb = "6.15.2" + val scalacheck = "1.15.2" + val scalalogging = "3.9.2" + val scalatest = "3.2.5" + val scalanet = "0.7.0" + val `scodec-core` = "1.11.7" + val `scodec-bits` = "1.1.12" } -object metronome extends Cross[MetronomeModule]("2.12.10", "2.13.4") +// Using 2.12.13 instead of 2.12.10 to access @nowarn, to disable certain deperaction +// warnings that come up in 2.13 but are too awkward to work around. +object metronome extends Cross[MetronomeModule]("2.12.13", "2.13.4") class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { - // Get rid of the `metronome-2.12.10-` part from the artifact name. The JAR name suffix will shows the Scala version. - // Check with `mill show metronome[2.12.10].__.artifactName` or `mill __.publishLocal`. + // Get rid of the `metronome-2.13.4-` part from the artifact name. The JAR name suffix will shows the Scala version. + // Check with `mill show metronome[2.13.4].__.artifactName` or `mill __.publishLocal`. private def removeCrossVersion(artifactName: String): String = "metronome-" + artifactName.split("-").drop(2).mkString("-") @@ -65,13 +69,44 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ivy"org.typelevel::cats-effect:${VersionOf.cats}" ) + override def scalacOptions = Seq( + "-unchecked", + "-deprecation", + "-feature", + "-encoding", + "utf-8", + "-Xfatal-warnings", + "-Ywarn-value-discard" + ) ++ { + crossScalaVersion.take(4) match { + case "2.12" => + // These options don't work well with 2.13 + Seq( + "-Xlint:unsound-match", + "-Ywarn-inaccessible", + "-Ywarn-unused-import", + "-Ypartial-unification", // Required for the `>>` syntax. + "-language:higherKinds", + "-language:postfixOps" + ) + case "2.13" => + Seq() + } + } + // `extends Tests` uses the context of the module in which it's defined trait TestModule extends Tests { override def artifactName = removeCrossVersion(super.artifactName()) + override def scalacOptions = + SubModule.this.scalacOptions + override def testFrameworks = - Seq("org.scalatest.tools.Framework") + Seq( + "org.scalatest.tools.Framework", + "org.scalacheck.ScalaCheckFramework" + ) // It may be useful to see logs in tests. override def moduleDeps: Seq[JavaModule] = @@ -90,7 +125,15 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { } /** Storage abstractions, e.g. a generic key-value store. */ - object storage extends SubModule + object storage extends SubModule { + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"org.typelevel::cats-free:${VersionOf.cats}", + ivy"org.scodec::scodec-bits:${VersionOf.`scodec-bits`}", + ivy"org.scodec::scodec-core:${VersionOf.`scodec-core`}" + ) + + object test extends TestModule + } /** Emit trace events, abstracting away logs and metrics. * @@ -218,6 +261,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ivy"org.rocksdb:rocksdbjni:${VersionOf.rocksdb}" ) - object test extends TestModule + object test extends TestModule { + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"io.monix::monix:${VersionOf.monix}" + ) + } } } diff --git a/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala b/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala new file mode 100644 index 00000000..cd8b2f77 --- /dev/null +++ b/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala @@ -0,0 +1,455 @@ +package io.iohk.metronome.rocksdb + +import cats._ +import cats.implicits._ +import cats.data.ReaderT +import cats.effect.{Resource, Sync} +import cats.free.Free.liftF +import io.iohk.metronome.storage.{ + KVStore, + KVStoreOp, + KVStoreRead, + KVStoreReadOp +} +import io.iohk.metronome.storage.KVStoreOp.{Put, Get, Delete} +import java.util.concurrent.locks.ReentrantReadWriteLock +import org.rocksdb.{ + RocksDB, + WriteBatch, + WriteOptions, + ReadOptions, + Options, + DBOptions, + ColumnFamilyOptions, + ColumnFamilyDescriptor, + ColumnFamilyHandle, + BlockBasedTableConfig, + BloomFilter, + CompressionType, + ClockCache +} +import scodec.Codec +import scodec.bits.BitVector +import scala.collection.mutable +import java.nio.file.Path +import scala.annotation.nowarn + +/** Implementation of intepreters for `KVStore[N, A]` and `KVStoreRead[N, A]`operations + * with various semantics. Application code is not expected to interact with this class + * directly. Instead, some middle layer should be passed as a dependency to code that + * delegates to the right interpreter in this class. + * + * For example if our data schema is append-only, there's no need to pay the performance + * penalty for using locking, or if two parts of the application are isolated from each other, + * locking could be performed in their respective middle-layers, before they forward the + * query for execution to this class. + */ +class RocksDBStore[F[_]: Sync]( + db: RocksDBStore.DBSupport[F], + lock: RocksDBStore.LockSupport[F], + handles: Map[RocksDBStore.Namespace, ColumnFamilyHandle] +) { + + import RocksDBStore.{Namespace, DBQuery, autoCloseableR} + + private val kvs = KVStore.instance[Namespace] + + // Batch execution needs these variables for accumulating operations + // and executing them against the database. They are going to be + // passed along in a Reader monad to the Free compiler. + type BatchEnv = WriteBatch + + // Type aliases to support the `~>` transformation with types that + // only have 1 generic type argument `A`. + type Batch[A] = + ({ type L[A] = ReaderT[F, BatchEnv, A] })#L[A] + + type KVNamespacedOp[A] = + ({ type L[A] = KVStoreOp[Namespace, A] })#L[A] + + type KVNamespacedReadOp[A] = + ({ type L[A] = KVStoreReadOp[Namespace, A] })#L[A] + + /** Execute the accumulated write operations in a batch. */ + private val writeBatch: ReaderT[F, BatchEnv, Unit] = + ReaderT { batch => + if (batch.hasPut() || batch.hasDelete()) + db.write(batch) >> + Sync[F].delay { + batch.clear() + } + else + ().pure[F] + } + + /** Execute one `Get` operation. */ + private def read[K, V](op: Get[Namespace, K, V]): F[Option[V]] = { + for { + kbs <- encode(op.key)(op.keyCodec) + mvbs <- db.read(handles(op.namespace), kbs) + mv <- mvbs match { + case None => + none.pure[F] + + case Some(bytes) => + decode(bytes)(op.valueCodec).map(_.some) + } + } yield mv + } + + /** Collect writes in a batch, until we either get to the end, or there's a read. + * This way writes are atomic, and reads can see their effect. + * + * In the next version of RocksDB we can use transactions to make reads and writes + * run in isolation. + */ + private val batchingCompiler: KVNamespacedOp ~> Batch = + new (KVNamespacedOp ~> Batch) { + def apply[A](fa: KVNamespacedOp[A]): Batch[A] = + fa match { + case op @ Put(n, k, v) => + ReaderT { batch => + for { + kbs <- encode(k)(op.keyCodec) + vbs <- encode(v)(op.valueCodec) + _ = batch.put(handles(n), kbs, vbs) + } yield () + } + + case op @ Get(_, _) => + // Execute any pending deletes and puts before performing the read. + writeBatch >> ReaderT.liftF(read(op)) + + case op @ Delete(n, k) => + ReaderT { batch => + for { + kbs <- encode(k)(op.keyCodec) + _ = batch.delete(handles(n), kbs) + } yield () + } + } + } + + /** Intended for reads, with fallback to writes. */ + private val nonBatchingCompiler: KVNamespacedOp ~> F = + new (KVNamespacedOp ~> F) { + def apply[A](fa: KVNamespacedOp[A]): F[A] = + fa match { + case op @ Get(_, _) => + read(op) + case op => + lock.withLockUpgrade { + runWithBatchingNoLock { + liftF[KVNamespacedOp, A](op) + } + } + } + } + + private def encode[T](value: T)(implicit ev: Codec[T]): F[Array[Byte]] = + Sync[F].fromTry(ev.encode(value).map(_.toByteArray).toTry) + + private def decode[T](bytes: Array[Byte])(implicit ev: Codec[T]): F[T] = + Sync[F].fromTry(ev.decodeValue(BitVector(bytes)).toTry) + + /** Mostly meant for writing batches atomically. + * + * If a read is found the accumulated writes are performed, + * then the read happens, before batching carries on; + * this breaks the atomicity of writes. + * + * This version doesn't use any locking, so it's suitable for + * append-only data stores, or writing to independent stores + * in parallel. + */ + def runWithBatchingNoLock[A]( + program: KVStore[Namespace, A] + ): DBQuery[F, A] = { + autoCloseableR(new WriteBatch()).use { + (program.foldMap(batchingCompiler) <* writeBatch).run + } + } + + /** Same as `runWithBatchingNoLock`, but write lock is taken out + * to make sure concurrent reads are not affected. + * + * This version is suitable for cases where data may be deleted, + * which could result for example in foreign key references + * becoming invalid after they are read, before the data they + * point to is retrieved. + */ + def runWithBatching[A](program: KVStore[Namespace, A]): DBQuery[F, A] = + lock.withWriteLock { + runWithBatchingNoLock(program) + } + + /** Similar to `runWithBatching` in that it can contain both reads + * and writes, but the expectation is that it will mostly be reads. + * + * A read lock is taken out to make sure writes don't affect reads; + * if a write is found, it is executed as an individual operation, + * while a write lock is taken out to protect other reads. Note that + * this breaks the isolation of reads, because to acquire a write lock, + * the read lock has to be released, which gives a chance for other + * threads to get in before the write statement runs. + */ + def runWithoutBatching[A](program: KVStore[Namespace, A]): DBQuery[F, A] = + lock.withReadLock { + program.foldMap(nonBatchingCompiler) + } + + /** For strictly read-only operations. + * + * Doesn't use locking, so most suitable for append-only data schemas + * where reads don't need isolation from writes. + */ + def runReadOnlyNoLock[A](program: KVStoreRead[Namespace, A]): DBQuery[F, A] = + kvs.lift(program).foldMap(nonBatchingCompiler) + + /** Same as `runReadOnlyNoLock`, but a read lock is taken out + * to make sure concurrent writes cannot affect the results. + * + * This version is suitable for use cases where destructive + * updates are happening. + */ + def runReadOnly[A](program: KVStoreRead[Namespace, A]): DBQuery[F, A] = + lock.withReadLock { + runReadOnlyNoLock(program) + } +} + +object RocksDBStore { + type Namespace = IndexedSeq[Byte] + + /** Database operations may fail due to a couple of reasons: + * - database connection issues + * - obsolete format stored, codec unable to read data + * + * But it's not expected, so just using `F[A]` for now, + * rather than `EitherT[F, Throwable, A]`. + */ + type DBQuery[F[_], A] = F[A] + + case class Config( + path: Path, + createIfMissing: Boolean, + paranoidChecks: Boolean, + maxThreads: Int, + maxOpenFiles: Int, + verifyChecksums: Boolean, + levelCompaction: Boolean, + blockSizeBytes: Long, + blockCacheSizeBytes: Long + ) + object Config { + def default(path: Path): Config = + Config( + path = path, + // Create DB data directory if it's missing + createIfMissing = true, + // Should the DB raise an error as soon as it detects an internal corruption + paranoidChecks = true, + maxThreads = 1, + maxOpenFiles = 32, + // Force checksum verification of all data that is read from the file system on behalf of a particular read. + verifyChecksums = true, + // In this mode, size target of levels are changed dynamically based on size of the last level. + // https://rocksdb.org/blog/2015/07/23/dynamic-level.html + levelCompaction = true, + // Approximate size of user data packed per block (16 * 1024) + blockSizeBytes = 16384, + // Amount of cache in bytes that will be used by RocksDB (32 * 1024 * 1024) + blockCacheSizeBytes = 33554432 + ) + } + + def apply[F[_]: Sync]( + config: Config, + namespaces: Seq[Namespace] + ): Resource[F, RocksDBStore[F]] = { + + @nowarn // JavaConverters are deprecated in 2.13 + def open( + opts: DBOptions, + cfds: Seq[ColumnFamilyDescriptor], + cfhs: mutable.Buffer[ColumnFamilyHandle] + ): RocksDB = { + import scala.collection.JavaConverters._ + RocksDB.open(opts, config.path.toString, cfds.asJava, cfhs.asJava) + } + + // There is a specific order for closing RocksDB with column families described in + // https://github.com/facebook/rocksdb/wiki/RocksJava-Basics#opening-a-database-with-column-families + // 1. Free all column families handles + // 2. Free DB and DB options + // 3. Free column families options + // So they are created in the opposite order. + for { + _ <- Resource.liftF[F, Unit](Sync[F].delay { + RocksDB.loadLibrary() + }) + + tableConf <- Resource.pure[F, BlockBasedTableConfig] { + mkTableConfig(config) + } + + cfOpts <- autoCloseableR[F, ColumnFamilyOptions] { + new ColumnFamilyOptions() + .setCompressionType(CompressionType.LZ4_COMPRESSION) + .setBottommostCompressionType(CompressionType.ZSTD_COMPRESSION) + .setLevelCompactionDynamicLevelBytes(config.levelCompaction) + .setTableFormatConfig(tableConf) + } + + allNamespaces = RocksDB.DEFAULT_COLUMN_FAMILY.toIndexedSeq +: namespaces + + cfDescriptors = allNamespaces.map { n => + new ColumnFamilyDescriptor(n.toArray, cfOpts) + } + + dbOpts <- autoCloseableR[F, DBOptions] { + new DBOptions() + .setCreateIfMissing(config.createIfMissing) + .setParanoidChecks(config.paranoidChecks) + .setMaxOpenFiles(config.maxOpenFiles) + .setIncreaseParallelism(config.maxThreads) + .setCreateMissingColumnFamilies(true) + } + + readOpts <- autoCloseableR[F, ReadOptions] { + new ReadOptions().setVerifyChecksums(config.verifyChecksums) + } + writeOptions <- autoCloseableR[F, WriteOptions] { + new WriteOptions() + } + + // The handles will be filled as the database is opened. + columnFamilyHandleBuffer = mutable.Buffer.empty[ColumnFamilyHandle] + + db <- autoCloseableR[F, RocksDB] { + open( + dbOpts, + cfDescriptors, + columnFamilyHandleBuffer + ) + } + + columnFamilyHandles <- Resource.make( + (allNamespaces zip columnFamilyHandleBuffer).toMap.pure[F] + ) { _ => + // Make sure all handles are closed, and this happens before the DB is closed. + Sync[F].delay(columnFamilyHandleBuffer.foreach(_.close())) + } + + // Sanity check; if an exception is raised everything will be closed down. + _ = assert( + columnFamilyHandleBuffer.size == allNamespaces.size, + "Should have created a column family handle for each namespace." + + s" Expected ${allNamespaces.size}; got ${columnFamilyHandleBuffer.size}." + ) + + store = new RocksDBStore[F]( + new DBSupport(db, readOpts, writeOptions), + new LockSupport(new ReentrantReadWriteLock()), + columnFamilyHandles + ) + + } yield store + } + + /** Remove the database directory. */ + def destroy[F[_]: Sync]( + config: Config + ): F[Unit] = { + autoCloseableR[F, Options] { + new Options() + .setCreateIfMissing(config.createIfMissing) + .setParanoidChecks(config.paranoidChecks) + .setCompressionType(CompressionType.LZ4_COMPRESSION) + .setBottommostCompressionType(CompressionType.ZSTD_COMPRESSION) + .setLevelCompactionDynamicLevelBytes(config.levelCompaction) + .setMaxOpenFiles(config.maxOpenFiles) + .setIncreaseParallelism(config.maxThreads) + .setTableFormatConfig(mkTableConfig(config)) + }.use { options => + Sync[F].delay { + RocksDB.destroyDB(config.path.toString, options) + } + } + } + + private def mkTableConfig(config: Config): BlockBasedTableConfig = + new BlockBasedTableConfig() + .setBlockSize(config.blockSizeBytes) + .setBlockCache(new ClockCache(config.blockCacheSizeBytes)) + .setCacheIndexAndFilterBlocks(true) + .setPinL0FilterAndIndexBlocksInCache(true) + .setFilterPolicy(new BloomFilter(10, false)) + + private def autoCloseableR[F[_]: Sync, R <: AutoCloseable]( + mk: => R + ): Resource[F, R] = + Resource.fromAutoCloseable[F, R](Sync[F].delay(mk)) + + /** Help run reads and writes isolated from each other. */ + private class LockSupport[F[_]: Sync](rwlock: ReentrantReadWriteLock) { + + // Batches can interleave multiple reads (and writes); + // to make sure they see a consistent view, writes are + // isolated from reads via locks, so for example if we + // read an ID, then retrieve the record from a different + // collection, we can be sure it hasn't been deleted in + // between the two operations. + private val lockRead = Sync[F].delay(rwlock.readLock().lock()) + private val unlockRead = Sync[F].delay(rwlock.readLock().unlock()) + private val lockWrite = Sync[F].delay(rwlock.writeLock().lock()) + private val unlockWrite = Sync[F].delay(rwlock.writeLock().unlock()) + + def withReadLock[A](fa: F[A]): F[A] = + Sync[F].bracket(lockRead)(_ => fa)(_ => unlockRead) + + def withWriteLock[A](fa: F[A]): F[A] = + Sync[F].bracket(lockWrite)(_ => fa)(_ => unlockWrite) + + /* + * In case there's a write operation among the reads and we haven't + * taken out a write lock, we can replace the the read lock we have + * with a write lock, for the duration of the operation, then downgrade + * it back to when we're done. + * + * Note that *technically* this is not an upgrade: to acquire the write + * lock, the read lock has to be released first, therefore other threads + * may get the write lock first. It works in the other direction though: + * the write lock can be turned into a read. + * + * See here for the rules up (non-)upgrading and downgrading: + * https://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html + */ + def withLockUpgrade[A](fa: F[A]): F[A] = + Sync[F].bracket { + unlockRead >> lockWrite + }(_ => fa) { _ => + lockRead >> unlockWrite + } + } + + /** Wrap a RocksDB instance. */ + private class DBSupport[F[_]: Sync]( + db: RocksDB, + readOptions: ReadOptions, + writeOptions: WriteOptions + ) { + def read( + handle: ColumnFamilyHandle, + key: Array[Byte] + ): F[Option[Array[Byte]]] = Sync[F].delay { + Option(db.get(handle, readOptions, key)) + } + + def write( + batch: WriteBatch + ): F[Unit] = Sync[F].delay { + db.write(writeOptions, batch) + } + } +} diff --git a/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala new file mode 100644 index 00000000..242fb3ad --- /dev/null +++ b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala @@ -0,0 +1,539 @@ +package io.iohk.metronome.rocksdb + +import cats.implicits._ +import cats.effect.Resource +import io.iohk.metronome.storage.{ + KVStoreState, + KVStore, + KVCollection, + KVStoreRead +} +import java.nio.file.Files +import monix.eval.Task +import org.scalacheck.commands.Commands +import org.scalacheck.{Properties, Gen, Prop, Test, Arbitrary} +import org.scalacheck.Arbitrary.arbitrary +import org.scalacheck.Prop.forAll +import scala.util.{Try, Success} +import scala.concurrent.duration._ +import scala.annotation.nowarn +import scodec.bits.ByteVector +import scodec.codecs.implicits._ + +// https://github.com/typelevel/scalacheck/blob/master/doc/UserGuide.md#stateful-testing +// https://github.com/typelevel/scalacheck/blob/master/examples/commands-redis/src/test/scala/CommandsRedis.scala + +object RocksDBStoreSpec extends Properties("RocksDBStoreCommands") { + + override def overrideParameters(p: Test.Parameters): Test.Parameters = + p.withMinSuccessfulTests(20).withMaxSize(100) + + // Equivalent to the in-memory model. + property("equivalent") = RocksDBStoreCommands.property() + + // Run reads and writes concurrently. + property("linearizable") = forAll { + import RocksDBStoreCommands._ + for { + empty <- genInitialState + // Generate some initial data. Puts are the only useful op. + init <- Gen.listOfN(50, genPut(empty)).map { ops => + ReadWriteProgram(ops.toList.sequence, batching = true) + } + state = init.nextState(empty) + // The first program is read/write, it takes a write lock. + prog1 <- genReadWriteProg(state).map(_.copy(batching = true)) + // The second program is read-only, it takes a read lock. + prog2 <- genReadOnlyProg(state) + } yield (init, state, prog1, prog2) + } { case (init, state, prog1, prog2) => + import RocksDBStoreCommands._ + + val sut = newSut(state) + try { + // Connect to the database. + ToggleConnected.run(sut) + // Initialize the database. + init.run(sut) + + // Run them concurrently. They should be serialised. + val (result1, result2) = await { + Task.parMap2(Task(prog1.run(sut)), Task(prog2.run(sut)))((_, _)) + } + + // Need to chain together Read-Write and Read-Only ops to test them as one program. + val liftedRO = + KVStore.instance[RocksDBStore.Namespace].lift(prog2.program) + + // Overall the results should correspond to either prog1 ++ prog2, or prog2 ++ prog1. + val prog12 = ReadWriteProgram((prog1.program, liftedRO).mapN(_ ++ _)) + val prog21 = ReadWriteProgram((liftedRO, prog1.program).mapN(_ ++ _)) + + // One of them should have run first. + val prop1 = prog1.postCondition(state, Success(result1)) + val prop2 = prog2.postCondition(state, Success(result2)) + // The other should run second, on top of the changes from the first. + val prop12 = prog12.postCondition(state, Success(result1 ++ result2)) + val prope1 = prog21.postCondition(state, Success(result2 ++ result1)) + + (prop1 && prop12) || (prop2 && prop1) + } finally { + destroySut(sut) + } + } +} + +object RocksDBStoreCommands extends Commands { + import RocksDBStore.Namespace + + // The in-memory implementation is our reference execution model. + object InMemoryKVS extends KVStoreState[Namespace] + + // Some structured data to be stored in the database. + case class TestRecord(id: ByteVector, name: String, value: Int) + + // Symbolic state of the test. + case class Model( + // Support opening/closing the database to see if it can read back the files it has created. + isConnected: Boolean, + namespaces: IndexedSeq[Namespace], + store: InMemoryKVS.Store, + deleted: Map[Namespace, Set[Any]], + // Some collections so we have typed access. + coll0: KVCollection[Namespace, String, Int], + coll1: KVCollection[Namespace, Int, ByteVector], + coll2: KVCollection[Namespace, ByteVector, TestRecord] + ) { + + def storeOf(coll: Coll): Map[Any, Any] = + store.getOrElse(namespaces(coll.idx), Map.empty) + + def nonEmptyColls: List[Coll] = + Colls.filter(c => storeOf(c).nonEmpty) + } + sealed trait Coll { + def idx: Int + } + case object Coll0 extends Coll { def idx = 0 } + case object Coll1 extends Coll { def idx = 1 } + case object Coll2 extends Coll { def idx = 2 } + + val Colls = List(Coll0, Coll1, Coll2) + + case class Allocated[T](value: T, release: Task[Unit]) + + class Database( + val namespaces: Seq[Namespace], + val config: Allocated[RocksDBStore.Config], + var maybeConnection: Option[Allocated[RocksDBStore[Task]]] + ) + + type State = Model + type Sut = Database + + def await[T](task: Task[T]): T = { + import monix.execution.Scheduler.Implicits.global + task.runSyncUnsafe(timeout = 10.seconds) + } + + /** Run one database at any time. */ + @nowarn // Traversable deprecated in 2.13 + override def canCreateNewSut( + newState: State, + initSuts: Traversable[State], + runningSuts: Traversable[Sut] + ): Boolean = + initSuts.isEmpty && runningSuts.isEmpty + + /** Start with an empty database. */ + override def initialPreCondition(state: State): Boolean = + state.store.isEmpty && !state.isConnected + + /** Create a new empty database. */ + override def newSut(state: State): Sut = { + val res = for { + path <- Resource.make(Task { + Files.createTempDirectory("testdb") + }) { path => + Task { + if (Files.exists(path)) Files.delete(path) + } + } + + config = RocksDBStore.Config.default(path) + + _ <- Resource.make(Task.unit) { _ => + RocksDBStore.destroy[Task](config) + } + } yield config + + await { + res.allocated.map { case (config, release) => + new Database( + state.namespaces, + Allocated(config, release), + maybeConnection = None + ) + } + } + } + + /** Release the database and all resources. */ + override def destroySut(sut: Sut): Unit = + await { + sut.maybeConnection + .fold(Task.unit)(_.release) + .guarantee(sut.config.release) + } + + /** Initialise a fresh model state. */ + override def genInitialState: Gen[State] = + for { + n <- Gen.choose(3, 10) + ns <- Gen.listOfN(n, arbitrary[Array[Byte]].suchThat(_.nonEmpty)) + namespaces = ns.map(_.toIndexedSeq).toIndexedSeq + } yield Model( + isConnected = false, + namespaces = namespaces, + store = Map.empty, + deleted = Map.empty, + coll0 = new KVCollection[Namespace, String, Int](namespaces(0)), + coll1 = new KVCollection[Namespace, Int, ByteVector](namespaces(1)), + coll2 = new KVCollection[Namespace, ByteVector, TestRecord](namespaces(2)) + ) + + /** Produce a Command based on the current model state. */ + def genCommand(state: State): Gen[Command] = + if (!state.isConnected) Gen.const(ToggleConnected) + else + Gen.frequency( + (10, genReadWriteProg(state)), + (3, genReadOnlyProg(state)), + (1, Gen.const(ToggleConnected)) + ) + + /** Generate a sequence of writes and reads. */ + def genReadWriteProg(state: State): Gen[ReadWriteProgram] = + for { + batching <- arbitrary[Boolean] + n <- Gen.choose(0, 30) + ops <- Gen.listOfN( + n, + Gen.frequency( + 10 -> genPut(state), + 30 -> genPutExisting(state), + 5 -> genDel(state), + 15 -> genDelExisting(state), + 5 -> genGet(state), + 30 -> genGetExisting(state), + 5 -> genGetDeleted(state) + ) + ) + program = ops.toList.sequence + } yield ReadWriteProgram(program, batching) + + /** Generate a read-only operations. */ + def genReadOnlyProg(state: State): Gen[ReadOnlyProgram] = + for { + n <- Gen.choose(0, 10) + ops <- Gen.listOfN( + n, + Gen.frequency( + 1 -> genRead(state), + 4 -> genReadExisting(state) + ) + ) + program = ops.toList.sequence + } yield ReadOnlyProgram(program) + + implicit val arbColl: Arbitrary[Coll] = Arbitrary { + Gen.oneOf(Coll0, Coll1, Coll2) + } + + implicit val arbByteVector: Arbitrary[ByteVector] = Arbitrary { + arbitrary[Array[Byte]].map(ByteVector(_)) + } + + implicit val arbTestRecord: Arbitrary[TestRecord] = Arbitrary { + for { + id <- arbitrary[ByteVector] + name <- Gen.alphaNumStr + value <- arbitrary[Int] + } yield TestRecord(id, name, value) + } + + def genPut(state: State): Gen[KVStore[Namespace, Any]] = + arbitrary[Coll] flatMap { + case Coll0 => + for { + k <- Gen.alphaLowerStr.suchThat(_.nonEmpty) + v <- arbitrary[Int] + } yield state.coll0.put(k, v) + + case Coll1 => + for { + k <- arbitrary[Int] + v <- arbitrary[ByteVector] + } yield state.coll1.put(k, v) + + case Coll2 => + for { + k <- arbitrary[ByteVector].suchThat(_.nonEmpty) + v <- arbitrary[TestRecord] + } yield state.coll2.put(k, v) + } map { + _.map(_.asInstanceOf[Any]) + } + + def genPutExisting(state: State): Gen[KVStore[Namespace, Any]] = + state.nonEmptyColls match { + case Nil => + genPut(state) + + case colls => + for { + c <- Gen.oneOf(colls) + k <- Gen.oneOf(state.storeOf(c).keySet) + op <- c match { + case Coll0 => + arbitrary[Int].map { v => + state.coll0.put(k.asInstanceOf[String], v) + } + case Coll1 => + arbitrary[ByteVector].map { v => + state.coll1.put(k.asInstanceOf[Int], v) + } + case Coll2 => + arbitrary[TestRecord].map { v => + state.coll2.put(k.asInstanceOf[ByteVector], v) + } + } + } yield op.map(_.asInstanceOf[Any]) + } + + def genDel(state: State): Gen[KVStore[Namespace, Any]] = + arbitrary[Coll] flatMap { + case Coll0 => + arbitrary[String].map(state.coll0.delete) + case Coll1 => + arbitrary[Int].map(state.coll1.delete) + case Coll2 => + arbitrary[ByteVector].map(state.coll2.delete) + } map { + _.map(_.asInstanceOf[Any]) + } + + def genDelExisting(state: State): Gen[KVStore[Namespace, Any]] = + state.nonEmptyColls match { + case Nil => + genGet(state) + + case colls => + for { + c <- Gen.oneOf(colls) + k <- Gen.oneOf(state.storeOf(c).keySet) + op = c match { + case Coll0 => + state.coll0.delete(k.asInstanceOf[String]) + case Coll1 => + state.coll1.delete(k.asInstanceOf[Int]) + case Coll2 => + state.coll2.delete(k.asInstanceOf[ByteVector]) + } + } yield op.map(_.asInstanceOf[Any]) + } + + def genGet(state: State): Gen[KVStore[Namespace, Any]] = + arbitrary[Coll] flatMap { + case Coll0 => + arbitrary[String].map(state.coll0.get) + case Coll1 => + arbitrary[Int].map(state.coll1.get) + case Coll2 => + arbitrary[ByteVector].map(state.coll2.get) + } map { + _.map(_.asInstanceOf[Any]) + } + + def genGetExisting(state: State): Gen[KVStore[Namespace, Any]] = + state.nonEmptyColls match { + case Nil => + genGet(state) + + case colls => + for { + c <- Gen.oneOf(colls) + k <- Gen.oneOf(state.storeOf(c).keySet) + op = c match { + case Coll0 => + state.coll0.get(k.asInstanceOf[String]) + case Coll1 => + state.coll1.get(k.asInstanceOf[Int]) + case Coll2 => + state.coll2.get(k.asInstanceOf[ByteVector]) + } + } yield op.map(_.asInstanceOf[Any]) + } + + def genGetDeleted(state: State): Gen[KVStore[Namespace, Any]] = { + val hasDeletes = + Colls + .map { c => + c -> state.namespaces(c.idx) + } + .filter { case (_, n) => + state.deleted.getOrElse(n, Set.empty).nonEmpty + } + + hasDeletes match { + case Nil => + genGet(state) + + case deletes => + for { + cn <- Gen.oneOf(deletes) + (c, n) = cn + k <- Gen.oneOf(state.deleted(n)) + op = c match { + case Coll0 => + state.coll0.get(k.asInstanceOf[String]) + case Coll1 => + state.coll1.get(k.asInstanceOf[Int]) + case Coll2 => + state.coll2.get(k.asInstanceOf[ByteVector]) + } + } yield op.map(_.asInstanceOf[Any]) + } + } + + def genRead(state: State): Gen[KVStoreRead[Namespace, Any]] = + arbitrary[Coll] flatMap { + case Coll0 => + arbitrary[String].map(state.coll0.read) + case Coll1 => + arbitrary[Int].map(state.coll1.read) + case Coll2 => + arbitrary[ByteVector].map(state.coll2.read) + } map { + _.map(_.asInstanceOf[Any]) + } + + def genReadExisting(state: State): Gen[KVStoreRead[Namespace, Any]] = + state.nonEmptyColls match { + case Nil => + genRead(state) + + case colls => + for { + c <- Gen.oneOf(colls) + k <- Gen.oneOf(state.storeOf(c).keySet) + op = c match { + case Coll0 => + state.coll0.read(k.asInstanceOf[String]) + case Coll1 => + state.coll1.read(k.asInstanceOf[Int]) + case Coll2 => + state.coll2.read(k.asInstanceOf[ByteVector]) + } + } yield op.map(_.asInstanceOf[Any]) + } + + /** Open or close the database. */ + case object ToggleConnected extends UnitCommand { + def run(sut: Sut) = { + sut.maybeConnection match { + case Some(connection) => + await(connection.release) + sut.maybeConnection = None + + case None => + val connection = await { + RocksDBStore[Task](sut.config.value, sut.namespaces).allocated + .map { case (db, release) => + Allocated(db, release) + } + } + sut.maybeConnection = Some(connection) + } + } + + def preCondition(state: State) = true + def nextState(state: State) = state.copy( + isConnected = !state.isConnected + ) + def postCondition(state: State, succeeded: Boolean) = succeeded + } + + case class ReadWriteProgram( + program: KVStore[Namespace, List[Any]], + batching: Boolean = false + ) extends Command { + // Collect all results from a batch of execution steps. + type Result = List[Any] + + def run(sut: Sut): Result = { + val db = sut.maybeConnection + .getOrElse(sys.error("The database is not connected.")) + .value + + await { + if (batching) { + db.runWithBatching(program) + } else { + db.runWithoutBatching(program) + } + } + } + + def preCondition(state: State): Boolean = state.isConnected + + def nextState(state: State): State = { + val nextStore = InMemoryKVS.compile(program).runS(state.store).value + + // Leave only what's still deleted. Add what's been deleted now. + val nextDeleted = state.deleted.map { case (n, ks) => + val existing = nextStore.getOrElse(n, Map.empty).keySet + n -> ks.filterNot(existing) + } ++ state.store.map { case (n, kvs) => + val existing = nextStore.getOrElse(n, Map.empty).keySet + n -> (kvs.keySet -- existing) + } + + state.copy( + store = nextStore, + deleted = nextDeleted + ) + } + + def postCondition(state: Model, result: Try[Result]): Prop = { + val expected = InMemoryKVS.compile(program).runA(state.store).value + result == Success(expected) + } + } + + case class ReadOnlyProgram( + program: KVStoreRead[Namespace, List[Any]] + ) extends Command { + // Collect all results from a batch of execution steps. + type Result = List[Any] + + def run(sut: Sut): Result = { + val db = sut.maybeConnection + .getOrElse(sys.error("The database is not connected.")) + .value + + await { + db.runReadOnly(program) + } + } + + def preCondition(state: State): Boolean = state.isConnected + + def nextState(state: State): State = state + + def postCondition(state: Model, result: Try[Result]): Prop = { + val expected = InMemoryKVS.compile(program).run(state.store) + result == Success(expected) + } + } +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVCollection.scala b/metronome/storage/src/io/iohk/metronome/storage/KVCollection.scala new file mode 100644 index 00000000..f6f026ac --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/KVCollection.scala @@ -0,0 +1,37 @@ +package io.iohk.metronome.storage + +import scodec.Codec + +/** Storage for a specific type of data, e.g. blocks, in a given namespace. + * + * We should be able to string together KVStore operations across multiple + * collections and execute them in one batch. + */ +class KVCollection[N, K: Codec, V: Codec](namespace: N) { + + private implicit val kvsRW = KVStore.instance[N] + private implicit val kvsRO = KVStoreRead.instance[N] + + /** Get a value by key, if it exists, for a read-only operation. */ + def read(key: K): KVStoreRead[N, Option[V]] = + KVStoreRead[N].read(namespace, key) + + /** Put a value under a key. */ + def put(key: K, value: V): KVStore[N, Unit] = + KVStore[N].put(namespace, key, value) + + /** Get a value by key, if it exists, for potentially doing + * updates based on its value, i.e. the result can be composed + * with `put` and `delete`. + */ + def get(key: K): KVStore[N, Option[V]] = + KVStore[N].get(namespace, key) + + /** Delete a value by key. */ + def delete(key: K): KVStore[N, Unit] = + KVStore[N].delete(namespace, key) + + /** Update a key by getting the value and applying a function on it, if the value exists. */ + def update(key: K, f: V => V): KVStore[N, Unit] = + KVStore[N].update(namespace, key, f) +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala new file mode 100644 index 00000000..650dd780 --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala @@ -0,0 +1,73 @@ +package io.iohk.metronome.storage + +import cats.{~>} +import cats.free.Free +import cats.free.Free.liftF +import scodec.Codec + +/** Helper methods to read/write a key-value store. */ +object KVStore { + + def unit[N]: KVStore[N, Unit] = + pure(()) + + def pure[N, A](a: A): KVStore[N, A] = + Free.pure(a) + + def instance[N]: Ops[N] = new Ops[N] {} + + def apply[N: Ops] = implicitly[Ops[N]] + + /** Scope all operations under the `N` type, which can be more convenient, + * e.g. `KVStore[String].pure(1)` instead of `KVStore.pure[String, Int](1)` + */ + trait Ops[N] { + import KVStoreOp._ + + type KVNamespacedOp[A] = ({ type L[A] = KVStoreOp[N, A] })#L[A] + type KVNamespacedReadOp[A] = ({ type L[A] = KVStoreReadOp[N, A] })#L[A] + + def unit: KVStore[N, Unit] = KVStore.unit[N] + + def pure[A](a: A) = KVStore.pure[N, A](a) + + def put[K: Codec, V: Codec]( + namespace: N, + key: K, + value: V + ): KVStore[N, Unit] = + liftF[KVNamespacedOp, Unit]( + Put[N, K, V](namespace, key, value) + ) + + def get[K: Codec, V: Codec](namespace: N, key: K): KVStore[N, Option[V]] = + liftF[KVNamespacedOp, Option[V]]( + Get[N, K, V](namespace, key) + ) + + def delete[K: Codec](namespace: N, key: K): KVStore[N, Unit] = + liftF[KVNamespacedOp, Unit]( + Delete[N, K](namespace, key) + ) + + def update[K: Codec, V: Codec]( + namespace: N, + key: K, + f: V => V + ): KVStore[N, Unit] = + get[K, V](namespace, key).flatMap { + case None => unit + case Some(value) => put(namespace, key, f(value)) + } + + /** Lift a read-only operation into a read-write one, so that we can chain them together. */ + def lift[A](read: KVStoreRead[N, A]): KVStore[N, A] = + read.mapK(liftCompiler) + + private val liftCompiler: KVNamespacedReadOp ~> KVNamespacedOp = + new (KVNamespacedReadOp ~> KVNamespacedOp) { + def apply[A](fa: KVNamespacedReadOp[A]): KVNamespacedOp[A] = + fa + } + } +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStoreOp.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStoreOp.scala new file mode 100644 index 00000000..44f59287 --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStoreOp.scala @@ -0,0 +1,34 @@ +package io.iohk.metronome.storage + +import scodec.Codec + +/** Representing key-value storage operations as a Free Monad, + * so that we can pick an execution strategy that best fits + * the database technology at hand: + * - execute multiple writes atomically by batching + * - execute all reads and writes in a transaction + * + * The key-value store is expected to store binary data, + * so a scodec.Codec is required for all operations to + * serialize the keys and the values. + * + * https://typelevel.org/cats/datatypes/freemonad.html + */ +sealed trait KVStoreOp[N, A] +sealed trait KVStoreReadOp[N, A] extends KVStoreOp[N, A] +sealed trait KVStoreWriteOp[N, A] extends KVStoreOp[N, A] + +object KVStoreOp { + case class Put[N, K, V](namespace: N, key: K, value: V)(implicit + val keyCodec: Codec[K], + val valueCodec: Codec[V] + ) extends KVStoreWriteOp[N, Unit] + + case class Get[N, K, V](namespace: N, key: K)(implicit + val keyCodec: Codec[K], + val valueCodec: Codec[V] + ) extends KVStoreReadOp[N, Option[V]] + + case class Delete[N, K](namespace: N, key: K)(implicit val keyCodec: Codec[K]) + extends KVStoreWriteOp[N, Unit] +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala new file mode 100644 index 00000000..0a8afcca --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala @@ -0,0 +1,40 @@ +package io.iohk.metronome.storage + +import cats.free.Free +import cats.free.Free.liftF +import scodec.Codec + +/** Helper methods to compose operations that strictly only do reads, no writes. + * + * Basically the same as `KVStore` without `put` and `delete`. + */ +object KVStoreRead { + + def unit[N]: KVStore[N, Unit] = + pure(()) + + def pure[N, A](a: A): KVStore[N, A] = + Free.pure(a) + + def instance[N]: Ops[N] = new Ops[N] {} + + def apply[N: Ops] = implicitly[Ops[N]] + + trait Ops[N] { + import KVStoreOp._ + + type KVNamespacedOp[A] = ({ type L[A] = KVStoreReadOp[N, A] })#L[A] + + def unit: KVStore[N, Unit] = KVStore.unit[N] + + def pure[A](a: A) = KVStore.pure[N, A](a) + + def read[K: Codec, V: Codec]( + namespace: N, + key: K + ): KVStoreRead[N, Option[V]] = + liftF[KVNamespacedOp, Option[V]]( + Get[N, K, V](namespace, key) + ) + } +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStoreState.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStoreState.scala new file mode 100644 index 00000000..29996235 --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStoreState.scala @@ -0,0 +1,79 @@ +package io.iohk.metronome.storage + +import cats.{~>} +import cats.data.{State, Reader} +import io.iohk.metronome.storage.KVStoreOp.{Put, Get, Delete} + +/** A pure implementation of the Free interpreter using the State monad. + * + * It uses a specific namespace type, which is common to all collections. + */ +class KVStoreState[N] { + + // Ignoring the Codec for the in-memory use case. + type Store = Map[N, Map[Any, Any]] + // Type aliases to support the `~>` transformation with types that + // only have 1 generic type argument `A`. + type KVNamespacedState[A] = State[Store, A] + type KVNamespacedOp[A] = ({ type L[A] = KVStoreOp[N, A] })#L[A] + + type KVNamespacedReader[A] = Reader[Store, A] + type KVNamespacedReadOp[A] = ({ type L[A] = KVStoreReadOp[N, A] })#L[A] + + private val stateCompiler: KVNamespacedOp ~> KVNamespacedState = + new (KVNamespacedOp ~> KVNamespacedState) { + def apply[A](fa: KVNamespacedOp[A]): KVNamespacedState[A] = + fa match { + case Put(n, k, v) => + State.modify { nkvs => + val kvs = nkvs.getOrElse(n, Map.empty) + nkvs.updated(n, kvs.updated(k, v)) + } + + case Get(n, k) => + State.inspect { nkvs => + for { + kvs <- nkvs.get(n) + v <- kvs.get(k) + // NOTE: This should be fine as long as we access it through + // `KVCollection` which works with 1 kind of value; + // otherwise we could change the effect to allow errors: + // `State[Store, Either[Throwable, A]]` + + // The following cast would work but it's not required: + // .asInstanceOf[A] + } yield v + } + + case Delete(n, k) => + State.modify { nkvs => + val kvs = nkvs.getOrElse(n, Map.empty) - k + if (kvs.isEmpty) nkvs - n else nkvs.updated(n, kvs) + } + } + } + + private val readerCompiler: KVNamespacedReadOp ~> KVNamespacedReader = + new (KVNamespacedReadOp ~> KVNamespacedReader) { + def apply[A](fa: KVNamespacedReadOp[A]): KVNamespacedReader[A] = + fa match { + case Get(n, k) => + Reader { nkvs => + for { + kvs <- nkvs.get(n) + v <- kvs.get(k) + } yield v + } + } + } + + /** Compile a KVStore program to a State monad, which can be executed like: + * + * `new KvStoreState[String].compile(program).run(Map.empty).value` + */ + def compile[A](program: KVStore[N, A]): KVNamespacedState[A] = + program.foldMap(stateCompiler) + + def compile[A](program: KVStoreRead[N, A]): KVNamespacedReader[A] = + program.foldMap(readerCompiler) +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/package.scala b/metronome/storage/src/io/iohk/metronome/storage/package.scala new file mode 100644 index 00000000..c340dc80 --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/package.scala @@ -0,0 +1,18 @@ +package io.iohk.metronome + +import cats.free.Free + +package object storage { + + /** Read/Write operations over a key-value store. */ + type KVStore[N, A] = Free[({ type L[A] = KVStoreOp[N, A] })#L, A] + + /** Read-only operations over a key-value store. */ + type KVStoreRead[N, A] = Free[({ type L[A] = KVStoreReadOp[N, A] })#L, A] + + /** Extension method to lift a read-only operation to read-write. */ + implicit class KVStoreReadOps[N, A](val read: KVStoreRead[N, A]) + extends AnyVal { + def lift: KVStore[N, A] = KVStore.instance[N].lift(read) + } +} diff --git a/metronome/storage/test/src/io/iohk/metronome/storage/KVStoreStateSpec.scala b/metronome/storage/test/src/io/iohk/metronome/storage/KVStoreStateSpec.scala new file mode 100644 index 00000000..4778e4d2 --- /dev/null +++ b/metronome/storage/test/src/io/iohk/metronome/storage/KVStoreStateSpec.scala @@ -0,0 +1,45 @@ +package io.iohk.metronome.storage + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import scodec.codecs.implicits._ + +class KVStoreStateSpec extends AnyFlatSpec with Matchers { + import KVStoreStateSpec._ + + behavior of "KVStoreState" + + it should "compose multiple collections" in { + type Namespace = String + // Two independent collections with different types of keys and values. + val collA = new KVCollection[Namespace, Int, RecordA](namespace = "a") + val collB = new KVCollection[Namespace, String, RecordB](namespace = "b") + + val program: KVStore[Namespace, Option[RecordA]] = for { + _ <- collA.put(1, RecordA("one")) + _ <- collB.put("two", RecordB(2)) + b <- collB.get("three") + _ <- collB.put("three", RecordB(3)) + _ <- collB.delete("two") + _ <- + if (b.isEmpty) collA.put(4, RecordA("four")) + else KVStore.unit[Namespace] + a <- collA.read(1).lift + } yield a + + val compiler = new KVStoreState[Namespace] + + val (store, maybeA) = compiler.compile(program).run(Map.empty).value + + maybeA shouldBe Some(RecordA("one")) + store shouldBe Map( + "a" -> Map(1 -> RecordA("one"), 4 -> RecordA("four")), + "b" -> Map("three" -> RecordB(3)) + ) + } +} + +object KVStoreStateSpec { + case class RecordA(a: String) + case class RecordB(b: Int) +} From 35ab159661810ee3df86c0252300551954015c50 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Fri, 19 Mar 2021 15:00:58 +0000 Subject: [PATCH 06/48] PM-2921: Setup CI (#8) * Add .circleci/config.yml * PM-2921: Try installing newer version of Coursier that can install scalafmt. * PM-2921: Cannot write to /usr/local/bin * PM-2921: Compile and test in one step. * PM-2921: Try to cache more, compile and test separately. * PM-2921: Make sure the namespaces are unique. * PM-2921: Run compile and test as one step. Don't skip scalafmt. * PM-2921: Use BASH_ENV to install coursier. * PM-2921: Fix formatting. * PM-2921: Try to install mill into local/bin * PM-2921: Remove comment about older install style. --- .circleci/config.yml | 85 +++++++++++++++++++ build.sc | 6 +- .../metronome/rocksdb/RocksDBStoreSpec.scala | 8 +- 3 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 .circleci/config.yml diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 00000000..c0a3834c --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,85 @@ +version: 2.1 +jobs: + build: + docker: + - image: circleci/openjdk:8-jdk + + working_directory: ~/repo + + environment: + JVM_OPTS: -Xmx3200m + TERM: dumb + + steps: + - checkout + + # Download and cache dependencies + - restore_cache: + keys: + - v1-dependencies-{{ checksum "build.sc" }} + # fallback to using the latest cache if no exact match is found + - v1-dependencies- + + # https://circleci.com/docs/2.0/env-vars/#using-bash_env-to-set-environment-variables + - run: + name: install coursier + command: | + curl -fLo cs https://git.io/coursier-cli-"$(uname | tr LD ld)" + chmod +x cs + ./cs install cs + rm cs + echo "export PATH=$PATH:/home/circleci/.local/share/coursier/bin" >> $BASH_ENV + + - run: + name: install scalafmt + command: cs install scalafmt + + - run: + name: install mill + command: | + mkdir -p ~/.local/bin + (echo "#!/usr/bin/env sh" && curl -L https://github.com/lihaoyi/mill/releases/download/0.8.0/0.8.0) > ~/.local/bin/mill + chmod +x ~/.local/bin/mill + + - run: + name: check that the code is formatted properly + command: scalafmt --test + + # For some reason if I try to separate compile and test, then the subsequent test step does nothing. + - run: + name: compile and test + command: mill __.test + + - save_cache: + paths: + - ~/.ivy2 + - ~/.cache + key: v1-dependencies--{{ checksum "build.sc" }} + + - when: + condition: + or: + - equal: [ master, << pipeline.git.branch >> ] + - equal: [ develop, << pipeline.git.branch >> ] + steps: + - run: + name: install gpg2 + # GPG in docker needs to be run with some additional flags + # and we are not able to change how mill uses it + # this is why we're creating wrapper that adds the flags + command: sh -c "apt update && apt install -y gnupg2 && mv /usr/bin/gpg /usr/bin/gpg-vanilla && echo '#!/bin/sh\n\n/usr/bin/gpg-vanilla --no-tty --pinentry loopback \$@' > /usr/bin/gpg && chmod 755 /usr/bin/gpg && cat /usr/bin/gpg" + + - run: + name: install base64 + command: apt update && apt install -y cl-base64 + + # TODO: Configure Mantis' credentials + # - run: + # name: publish + # command: .circleci/publish + # no_output_timeout: 30m + +workflows: + build_and_publish: + jobs: + - build diff --git a/build.sc b/build.sc index 5f624682..e936e61a 100644 --- a/build.sc +++ b/build.sc @@ -54,7 +54,11 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { // Add yourself if you make a PR! developers = Seq( Developer("aakoshh", "Akosh Farkash", "https://github.com/aakoshh"), - Developer("lemastero", "Piotr Paradzinski", "https://github.com/lemastero") + Developer( + "lemastero", + "Piotr Paradzinski", + "https://github.com/lemastero" + ) ) ) } diff --git a/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala index 242fb3ad..99aff80f 100644 --- a/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala +++ b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala @@ -189,8 +189,12 @@ object RocksDBStoreCommands extends Commands { /** Initialise a fresh model state. */ override def genInitialState: Gen[State] = for { - n <- Gen.choose(3, 10) - ns <- Gen.listOfN(n, arbitrary[Array[Byte]].suchThat(_.nonEmpty)) + // Generate at least 3 unique namespaces. + n <- Gen.choose(3, 10) + ns <- Gen + .listOfN(n, arbitrary[ByteVector].suchThat(_.nonEmpty)) + .map(_.distinct) + .suchThat(_.size >= 3) namespaces = ns.map(_.toIndexedSeq).toIndexedSeq } yield Model( isConnected = false, From a4ad463d3fcb38310676655afe0202ed8a61a3d0 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Wed, 24 Mar 2021 10:15:46 +0000 Subject: [PATCH 07/48] PM-2909: Basic HotStuff (#5) * PM-2909: Generic data types for the Basic HotStuff protocol. * PM-2909: Effect and Event to create a block. * PM-2909: Fix the comment about lockedQC. * PM-2909: Field to collect votes in the current phase. * PM-2909: Skeleton for message handling. * PM-2909: Combine Prepare, Commit and Pre-Commit messages into a Quroum message. * PM-2909: Validating the safe extension rule. * PM-2909: Cast votes. * PM-2909: Collect votes and broadcast QC * PM-2909: Register NewView and trigger block creation. * PM-2909: Check signatures. * PM-2909: Ignore extra votes. * PM-2909: Add extraVotes. * PM-2909: Remove unused imports. * PM-2909: Removed the behind-the-scenes prev-phase check in favor of explictly passing the phases. * PM-2909: Ascii state machine diagram. * PM-2909: Pass the signign key and federation to validation. * PM-2909: Removed unused errors. Ignore extra NewViews. * PM-2909: Fix leader check on NewView. * PM-2909: Keep the highest prepared per sender. Fix hiqhQC selection. * PM-2909: Fix quorum checks. * PM-2909: ProtocolError for unexpected block hash. * PM-2909: Extra vote check to use isBefore * PM-2909: Added ProtocolError.TooEarly * PM-2909: Added ProtocolError.TooEarly * PM-2900: Setup for HotStuff tests. * PM-2900: Testing timeout. * PM-2900: Test valid commands. * PM-2900: Test NewView. * PM-2900: Test HighQC, add labeling. * PM-2900: Structure for genValid. * PM-2900: Save the full HighQC, not just the view. * PM-2900: Test block creation. * PM-2900: Test Prepare. * PM-2900: Cover failures with labels as well. * PM-2900: Test votes. * PM-2900: Test Quorum * PM-2900: Test invalid commands. * PM-2900: Testing that the TooEarly classification is happening. * PM-2909: Fix compilation on 2.12. * PM-2909: Fix the phase to compare for early vote/quorum. --- build.sc | 52 +- .../core/src/metronome/core/Tagger.scala | 36 + .../core/src/metronome/core/Validated.scala | 3 + .../core/src/metronome/core/package.scala | 5 + .../src/metronome/crypto/GroupSignature.scala | 7 + .../metronome/crypto/PartialSignature.scala | 7 + .../hotstuff/consensus/Federation.scala | 20 + .../hotstuff/consensus/ViewNumber.scala | 13 + .../hotstuff/consensus/basic/Agreement.scala | 27 + .../hotstuff/consensus/basic/Block.scala | 19 + .../hotstuff/consensus/basic/Effect.scala | 57 ++ .../hotstuff/consensus/basic/Event.scala | 26 + .../hotstuff/consensus/basic/Message.scala | 69 ++ .../hotstuff/consensus/basic/Phase.scala | 31 + .../consensus/basic/ProtocolError.scala | 77 ++ .../consensus/basic/ProtocolState.scala | 492 +++++++++ .../consensus/basic/QuorumCertificate.scala | 14 + .../hotstuff/consensus/basic/Signing.scala | 74 ++ .../hotstuff/consensus/package.scala | 5 + .../basic/HotStuffProtocolProps.scala | 931 ++++++++++++++++++ ...StoreSpec.scala => RocksDBStorePropsscala} | 8 +- 21 files changed, 1955 insertions(+), 18 deletions(-) create mode 100644 metronome/core/src/metronome/core/Tagger.scala create mode 100644 metronome/core/src/metronome/core/Validated.scala create mode 100644 metronome/core/src/metronome/core/package.scala create mode 100644 metronome/crypto/src/metronome/crypto/GroupSignature.scala create mode 100644 metronome/crypto/src/metronome/crypto/PartialSignature.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/ViewNumber.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Agreement.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Block.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Effect.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Event.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Message.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Phase.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolError.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/QuorumCertificate.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Signing.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/package.scala create mode 100644 metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala rename metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/{RocksDBStoreSpec.scala => RocksDBStorePropsscala} (98%) diff --git a/build.sc b/build.sc index e936e61a..ceb6c14a 100644 --- a/build.sc +++ b/build.sc @@ -11,18 +11,20 @@ import mill.contrib.versionfile.VersionFileModule object versionFile extends VersionFileModule object VersionOf { - val cats = "2.3.1" - val config = "1.4.1" - val logback = "1.2.3" - val monix = "3.3.0" - val prometheus = "0.10.0" - val rocksdb = "6.15.2" - val scalacheck = "1.15.2" - val scalalogging = "3.9.2" - val scalatest = "3.2.5" - val scalanet = "0.7.0" - val `scodec-core` = "1.11.7" - val `scodec-bits` = "1.1.12" + val cats = "2.3.1" + val config = "1.4.1" + val `kind-projector` = "0.11.3" + val logback = "1.2.3" + val monix = "3.3.0" + val prometheus = "0.10.0" + val rocksdb = "6.15.2" + val scalacheck = "1.15.2" + val scalalogging = "3.9.2" + val scalatest = "3.2.5" + val scalanet = "0.7.0" + val shapeless = "2.3.3" + val `scodec-core` = "1.11.7" + val `scodec-bits` = "1.1.12" } // Using 2.12.13 instead of 2.12.10 to access @nowarn, to disable certain deperaction @@ -128,6 +130,21 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { } } + /** Data models shared between all modules. */ + object core extends SubModule with Publishing { + override def description: String = + "Common data models." + + override def ivyDeps = Agg( + ivy"com.chuusai::shapeless:${VersionOf.shapeless}" + ) + } + + /** Generic Peer-to-Peer components that can multiplex protocols + * from different modules over a single authenticated TLS connection. + */ + object networking extends SubModule + /** Storage abstractions, e.g. a generic key-value store. */ object storage extends SubModule { override def ivyDeps = super.ivyDeps() ++ Agg( @@ -147,7 +164,9 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { override def description: String = "Abstractions for contravariant tracing." - def scalacPluginIvyDeps = Agg(ivy"org.typelevel:::kind-projector:0.11.3") + def scalacPluginIvyDeps = Agg( + ivy"org.typelevel:::kind-projector:${VersionOf.`kind-projector`}" + ) } /** Additional crypto utilities such as threshold signature. */ @@ -164,6 +183,9 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { /** Pure consensus models. */ object consensus extends SubModule { + override def moduleDeps: Seq[PublishModule] = + Seq(core, crypto) + object test extends TestModule } @@ -205,7 +227,9 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { Seq(tracing, crypto) } - /** Implements the checkpointing functionality and the ledger rules. + /** Implements the checkpointing functionality, the ledger rules, + * and state synchronisation, which is not an inherent part of + * HotStuff, but applies to the checkpointing use case. * * If it was published, it could be directly included in the checkpoint assisted blockchain application, * so the service and the interpreter can share data in memory. diff --git a/metronome/core/src/metronome/core/Tagger.scala b/metronome/core/src/metronome/core/Tagger.scala new file mode 100644 index 00000000..975e344e --- /dev/null +++ b/metronome/core/src/metronome/core/Tagger.scala @@ -0,0 +1,36 @@ +package metronome.core + +import shapeless.tag, tag.@@ + +/** Helper class to make it easier to tag raw types such as BitVector + * to specializations so that the compiler can help make sure we are + * passign the right values to methods. + * + * ``` + * object MyType extends Tagger[ByteVector] + * type MyType = MyType.Tagged + * + * val myThing: MyType = MyType(ByteVector.empty) + * ``` + */ +trait Tagger[U] { + trait Tag + type Tagged = U @@ Tag + def apply(underlying: U): Tagged = + tag[Tag][U](underlying) +} + +/** Helper class to tag not a specific raw type, but to apply a common tag to any type. + * + * ``` + * object Validated extends GenericTagger + * type Validated[U] = Validated.Tagged[U] + * ``` + */ +trait GenericTagger { + trait Tag + type Tagged[U] = U @@ Tag + + def apply[U](underlying: U): Tagged[U] = + tag[Tag][U](underlying) +} diff --git a/metronome/core/src/metronome/core/Validated.scala b/metronome/core/src/metronome/core/Validated.scala new file mode 100644 index 00000000..aed95743 --- /dev/null +++ b/metronome/core/src/metronome/core/Validated.scala @@ -0,0 +1,3 @@ +package metronome.core + +object Validated extends GenericTagger diff --git a/metronome/core/src/metronome/core/package.scala b/metronome/core/src/metronome/core/package.scala new file mode 100644 index 00000000..7432ecba --- /dev/null +++ b/metronome/core/src/metronome/core/package.scala @@ -0,0 +1,5 @@ +package metronome + +package object core { + type Validated[U] = Validated.Tagged[U] +} diff --git a/metronome/crypto/src/metronome/crypto/GroupSignature.scala b/metronome/crypto/src/metronome/crypto/GroupSignature.scala new file mode 100644 index 00000000..1eccd169 --- /dev/null +++ b/metronome/crypto/src/metronome/crypto/GroupSignature.scala @@ -0,0 +1,7 @@ +package metronome.crypto + +/** Group signature of members with identity `K` over some content `H`, + * represented by type `G`, e.g. `G` could be a `List[Secp256k1Signature]` + * or a single combined threshold signature of some sort. + */ +case class GroupSignature[K, H, G](sig: G) diff --git a/metronome/crypto/src/metronome/crypto/PartialSignature.scala b/metronome/crypto/src/metronome/crypto/PartialSignature.scala new file mode 100644 index 00000000..c54223f1 --- /dev/null +++ b/metronome/crypto/src/metronome/crypto/PartialSignature.scala @@ -0,0 +1,7 @@ +package metronome.crypto + +/** An individual signature of a member with identity `K` over some content `H`, + * represented by type `P`, e.g. `P` could be a single `Secp256k1Signature` + * or a partial threshold signature of some sort. + */ +case class PartialSignature[K, H, P](sig: P) diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala new file mode 100644 index 00000000..bd8bbe09 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala @@ -0,0 +1,20 @@ +package metronome.hotstuff.consensus + +/** Collection of keys of the federation members. */ +case class Federation[PKey]( + publicKeys: IndexedSeq[PKey] +) { + private val publicKeySet = publicKeys.toSet + + /** Size of the federation, `n`. */ + val size: Int = publicKeys.size + + /** Maximum number of Byzantine nodes, `f`, so that `n >= 3*f+1`. */ + val maxFaulty: Int = (size - 1) / 3 + + def contains(publicKey: PKey): Boolean = + publicKeySet.contains(publicKey) + + def leaderOf(viewNumber: ViewNumber): PKey = + publicKeys((viewNumber % size).toInt) +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/ViewNumber.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/ViewNumber.scala new file mode 100644 index 00000000..f92e1f26 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/ViewNumber.scala @@ -0,0 +1,13 @@ +package metronome.hotstuff.consensus + +import metronome.core.Tagger + +object ViewNumber extends Tagger[Long] { + implicit class Ops(val vn: ViewNumber) extends AnyVal { + def next: ViewNumber = ViewNumber(vn + 1) + def prev: ViewNumber = ViewNumber(vn - 1) + } + + implicit val ord: Ordering[ViewNumber] = + Ordering.by(identity[Long]) +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Agreement.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Agreement.scala new file mode 100644 index 00000000..ce2d2266 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Agreement.scala @@ -0,0 +1,27 @@ +package metronome.hotstuff.consensus.basic + +/** Capture all the generic types in the BFT agreement, + * so we don't have to commit to any particular set of content. + */ +trait Agreement { + + /** The container type that the agreement is about. */ + type Block + + /** The type we use for hashing blocks, + * so they don't have to be sent in entirety in votes. + */ + type Hash + + /** The concrete type that represents a partial signature. */ + type PSig + + /** The concrete type that represents a group signature. */ + type GSig + + /** The public key identity of federation members. */ + type PKey + + /** The secret key used for signing partial messages. */ + type SKey +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Block.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Block.scala new file mode 100644 index 00000000..32a8c231 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Block.scala @@ -0,0 +1,19 @@ +package metronome.hotstuff.consensus.basic + +/** Type class to project the properties we need a HotStuff block to have + * from the generic `Block` type in the `Agreement`. + * + * This allows the block to include use-case specific details HotStuff doesn't + * care about, for example to build up a ledger state that can be synchronised + * directly, rather than just carry out a sequence of commands on all replicas. + * This would require the blocks to contain ledger state hashes, which other + * use cases may have no use for. + */ +trait Block[A <: Agreement] { + def blockHash(b: A#Block): A#Hash + def parentBlockHash(b: A#Block): A#Hash +} + +object Block { + def apply[A <: Agreement: Block]: Block[A] = implicitly[Block[A]] +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Effect.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Effect.scala new file mode 100644 index 00000000..5bc868c9 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Effect.scala @@ -0,0 +1,57 @@ +package metronome.hotstuff.consensus.basic + +import scala.concurrent.duration.FiniteDuration + +import metronome.hotstuff.consensus.ViewNumber + +/** Represent all possible effects that a protocol transition can + * ask the host system to carry out, e.g. send messages to replicas. + */ +sealed trait Effect[+A <: Agreement] + +object Effect { + + /** Schedule a callback after a timeout to initiate the next view + * if the current rounds ends without an agreement. + */ + case class ScheduleNextView( + viewNumber: ViewNumber, + timeout: FiniteDuration + ) extends Effect[Nothing] + + /** Send a message to a federation member. + * + * The recipient can be the current member itself (i.e. the leader + * sending itself a message to trigger its own vote). It is best + * if the host system carries out these effects before it talks + * to the external world, to avoid any possible phase mismatches. + * + * The `ProtocolState` could do it on its own but this way it's + * slightly closer to the pseudo code. + */ + case class SendMessage[A <: Agreement]( + recipient: A#PKey, + message: Message[A] + ) extends Effect[A] + + /** The leader of the round wants to propose a new block + * on top of the last prepared one. The host environment + * should consult the mempool and create one, passing the + * result as an event. + * + * The block must be built as a child of `highQC.blockHash`. + */ + case class CreateBlock[A <: Agreement]( + viewNumber: ViewNumber, + highQC: QuorumCertificate[A] + ) extends Effect[A] + + /** Execute blocks after a decision, from the last executed hash + * up to the block included in the Quorum Certificate. + */ + case class ExecuteBlocks[A <: Agreement]( + lastExecutedBlockHash: A#Hash, + quorumCertificate: QuorumCertificate[A] + ) extends Effect[A] + +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Event.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Event.scala new file mode 100644 index 00000000..ade31767 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Event.scala @@ -0,0 +1,26 @@ +package metronome.hotstuff.consensus.basic + +import metronome.hotstuff.consensus.ViewNumber + +/** Input events for the protocol model. */ +sealed trait Event[A <: Agreement] + +object Event { + + /** A scheduled timeout for the round, initiating the next view. */ + case class NextView(viewNumber: ViewNumber) extends Event[Nothing] + + /** A message received from a federation member. */ + case class MessageReceived[A <: Agreement]( + sender: A#PKey, + message: Message[A] + ) extends Event[A] + + /** The block the leader asked to be created is ready. */ + case class BlockCreated[A <: Agreement]( + viewNumber: ViewNumber, + block: A#Block, + // The certificate which the block extended. + highQC: QuorumCertificate[A] + ) extends Event[A] +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Message.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Message.scala new file mode 100644 index 00000000..0fd7c8f0 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Message.scala @@ -0,0 +1,69 @@ +package metronome.hotstuff.consensus.basic + +import metronome.crypto.PartialSignature +import metronome.hotstuff.consensus.ViewNumber + +/** Basic HotStuff protocol messages. */ +sealed trait Message[A <: Agreement] { + + /** Messages are only accepted if they match the node's current view number. */ + def viewNumber: ViewNumber +} + +/** Message from the leader to the replica. */ +sealed trait LeaderMessage[A <: Agreement] extends Message[A] + +/** Message from the replica to the leader. */ +sealed trait ReplicaMessage[A <: Agreement] extends Message[A] + +object Message { + + /** The leader proposes a new block in the `Prepare` phase, + * using the High Q.C. gathered from `NewView` messages. + */ + case class Prepare[A <: Agreement]( + viewNumber: ViewNumber, + block: A#Block, + highQC: QuorumCertificate[A] + ) extends LeaderMessage[A] + + /** Having received one of the leader messages, the replica + * casts its vote with its partical signature. + * + * The vote carries either the hash of the block, which + * was either received full in the `Prepare` message, + * or as part of a `QuorumCertificate`. + */ + case class Vote[A <: Agreement]( + viewNumber: ViewNumber, + phase: VotingPhase, + blockHash: A#Hash, + signature: PartialSignature[ + A#PKey, + (VotingPhase, ViewNumber, A#Hash), + A#PSig + ] + ) extends ReplicaMessage[A] + + /** Having collected enough votes from replicas, + * the leader combines the votes into a Q.C. and + * broadcasts it to replicas: + * - Prepare votes combine into a Prepare Q.C., expected in the PreCommit phase. + * - PreCommit votes combine into a PreCommit Q.C., expected in the Commit phase. + * - Commit votes combine into a Commit Q.C, expected in the Decide phase. + * + * The certificate contains the hash of the block to vote on. + */ + case class Quorum[A <: Agreement]( + viewNumber: ViewNumber, + quorumCertificate: QuorumCertificate[A] + ) extends LeaderMessage[A] + + /** At the end of the round, replicas send the `NewView` message + * to the next leader with the last Prepare Q.C. + */ + case class NewView[A <: Agreement]( + viewNumber: ViewNumber, + prepareQC: QuorumCertificate[A] + ) extends ReplicaMessage[A] +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Phase.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Phase.scala new file mode 100644 index 00000000..5b6808f7 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Phase.scala @@ -0,0 +1,31 @@ +package metronome.hotstuff.consensus.basic + +/** All phases of the basic HotStuff protocol. */ +sealed trait Phase { + import Phase._ + def next: Phase = + this match { + case Prepare => PreCommit + case PreCommit => Commit + case Commit => Decide + case Decide => Prepare + } + + def prev: Phase = + this match { + case Prepare => Decide + case PreCommit => Prepare + case Commit => PreCommit + case Decide => Commit + } +} + +/** Subset of phases over which there can be vote and a Quorum Certificate. */ +sealed trait VotingPhase extends Phase + +object Phase { + case object Prepare extends VotingPhase + case object PreCommit extends VotingPhase + case object Commit extends VotingPhase + case object Decide extends Phase +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolError.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolError.scala new file mode 100644 index 00000000..7b48fa90 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolError.scala @@ -0,0 +1,77 @@ +package metronome.hotstuff.consensus.basic + +import metronome.hotstuff.consensus.ViewNumber + +sealed trait ProtocolError[A <: Agreement] + +object ProtocolError { + + /** A leader message was received from a replica that isn't the leader of the view. */ + case class NotFromLeader[A <: Agreement]( + event: Event.MessageReceived[A], + expected: A#PKey + ) extends ProtocolError[A] + + /** A replica message was received in a view that this replica is not leading. */ + case class NotToLeader[A <: Agreement]( + event: Event.MessageReceived[A], + expected: A#PKey + ) extends ProtocolError[A] + + /** A message coming from outside the federation members. */ + case class NotFromFederation[A <: Agreement]( + event: Event.MessageReceived[A] + ) extends ProtocolError[A] + + /** The vote signature doesn't match the content. */ + case class InvalidVote[A <: Agreement]( + sender: A#PKey, + message: Message.Vote[A] + ) extends ProtocolError[A] + + /** The Q.C. signature doesn't match the content. */ + case class InvalidQuorumCertificate[A <: Agreement]( + sender: A#PKey, + quorumCertificate: QuorumCertificate[A] + ) extends ProtocolError[A] + + /** The block in the prepare message doesn't extend the previous Q.C. */ + case class UnsafeExtension[A <: Agreement]( + sender: A#PKey, + message: Message.Prepare[A] + ) extends ProtocolError[A] + + /** A message we didn't expect to receive in the given state. */ + case class UnexpectedBlockHash[A <: Agreement]( + event: Event.MessageReceived[A], + expected: A#Hash + ) extends ProtocolError[A] + + /** A message that we received slightly earlier than we expected. + * + * One reason for this could be that the peer is slightly ahead of us, + * e.g. already finished the `Decide` phase and sent out the `NewView` + * to us, the next leader, in which case the view number would not + * match up. Or maybe a quorum has already formed for the next round + * and we receive a `Prepare`, while we're still in `Decide`. + * + * The host system passing the events and processing the effects + * is expected to inspect `TooEarly` messages and decide what to do: + * - if the message is for the next round or next phase, then just re-deliver it after the view transition + * - if the message is far in the future, perhaps it's best to re-sync the status with everyone + */ + case class TooEarly[A <: Agreement]( + event: Event.MessageReceived[A], + expectedInViewNumber: ViewNumber, + expectedInPhase: Phase + ) extends ProtocolError[A] + + /** A message we didn't expect to receive in the given state. + * + * The host system can maintain some metrics so we can see if we're completely out of + * alignment with all the other peers. + */ + case class Unexpected[A <: Agreement]( + event: Event.MessageReceived[A] + ) extends ProtocolError[A] +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala new file mode 100644 index 00000000..73fb58fa --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala @@ -0,0 +1,492 @@ +package metronome.hotstuff.consensus.basic + +import metronome.core.Validated +import metronome.hotstuff.consensus.{ViewNumber, Federation} +import scala.concurrent.duration.FiniteDuration + +/** Basic HotStuff protocol state machine. + * + * See https://arxiv.org/pdf/1803.05069.pdf + * + * ``` + * + * PHASE LEADER REPLICA + * | | + * | <--- NewView(prepareQC) ---- | + * # Prepare --------------- | | + * select highQC | | + * create block | | + * | ------ Prepare(block) -----> | + * | | check safety + * | <----- Vote(Prepare) ------- | + * # PreCommit ------------- | | + * | ------ Prepare Q.C. -------> | + * | | save as prepareQC + * | <----- Vote(PreCommit) ----- | + * # Commit ---------------- | | + * | ------ PreCommit Q.C. -----> | + * | | save as lockedQC + * | <----- Vote(Commit) -------- | + * # Decide ---------------- | | + * | ------ Commit Q.C. --------> | + * | | execute block + * | <--- NewView(prepareQC) ---- | + * | | + * + * ``` + */ +case class ProtocolState[A <: Agreement: Block: Signing]( + viewNumber: ViewNumber, + phase: Phase, + publicKey: A#PKey, + signingKey: A#SKey, + federation: Federation[A#PKey], + // Highest QC for which a replica voted Pre-Commit, because it received a Prepare Q.C. from the leader. + prepareQC: QuorumCertificate[A], + // Locked QC, for which a replica voted Commit, because it received a Pre-Commit Q.C. from leader. + lockedQC: QuorumCertificate[A], + // Hash of the block that was last decided upon. + lastExecutedBlockHash: A#Hash, + // Hash of the block the federation is currently voting on. + preparedBlockHash: A#Hash, + // Timeout for the view, so that it can be adjusted next time if necessary. + timeout: FiniteDuration, + // Votes gathered by the leader in this phase. They are guarenteed to be over the same content. + votes: Set[Message.Vote[A]], + // NewView messages gathered by the leader during the Prepare phase. Map so every sender can only give one. + newViews: Map[A#PKey, Message.NewView[A]] +) { + import Message._ + import Effect._ + import Event._ + import ProtocolState._ + import ProtocolError._ + + val leader = federation.leaderOf(viewNumber) + val isLeader = leader == publicKey + + /** The leader has to collect `n-f` signatures into a Q.C. */ + def quorumSize = federation.size - federation.maxFaulty + + /** No state transition. */ + private def stay: Transition[A] = + this -> Nil + + private def moveTo(phase: Phase): ProtocolState[A] = + copy( + viewNumber = if (phase == Phase.Prepare) viewNumber.next else viewNumber, + phase = phase, + votes = Set.empty, + newViews = Map.empty + ) + + /** The round has timed out; send `prepareQC` to the leader + * of the next view and move to that view now. + */ + def handleNextView(e: NextView): Transition[A] = + if (e.viewNumber == viewNumber) { + val next = moveTo(Phase.Prepare) + val effects = Seq( + SendMessage(next.leader, NewView(viewNumber, prepareQC)), + ScheduleNextView(next.viewNumber, next.timeout) + ) + next -> effects + } else stay + + /** A block we asked the host system to create using `Effect.CreateBlock` is + * ready to be broadcasted, if we're still in the same view. + */ + def handleBlockCreated(e: BlockCreated[A]): Transition[A] = + if (e.viewNumber == viewNumber && isLeader && phase == Phase.Prepare) { + // TODO: If the block is empty, we could just repeat the agreement on + // the previous Q.C. to simulate being idle, without timing out. + val effects = broadcast { + Prepare(viewNumber, e.block, e.highQC) + } + this -> effects + } else stay + + /** Filter out messages that are completely invalid, + * independent of the current phase and view number, + * i.e. stateless validation. + * + * This check can be performed before for example the + * block contents in the `Prepare` message are validated, + * so that we don't waste time with spam. + */ + def validateMessage( + e: MessageReceived[A] + ): Either[ProtocolError[A], Validated[MessageReceived[A]]] = { + val currLeader = federation.leaderOf(e.message.viewNumber) + val nextLeader = federation.leaderOf(e.message.viewNumber.next) + + e.message match { + case _ if !federation.contains(e.sender) => + Left(NotFromFederation(e)) + + case m: LeaderMessage[_] if e.sender != currLeader => + Left(NotFromLeader(e, currLeader)) + + case m: ReplicaMessage[_] + if !m.isInstanceOf[NewView[_]] && publicKey != currLeader => + Left(NotToLeader(e, currLeader)) + + case m: NewView[_] if publicKey != nextLeader => + Left(NotToLeader(e, nextLeader)) + + case m: Vote[_] if !Signing[A].validate(e.sender, m) => + Left(InvalidVote(e.sender, m)) + + case m: Quorum[_] + if !Signing[A].validate(federation, m.quorumCertificate) => + Left(InvalidQuorumCertificate(e.sender, m.quorumCertificate)) + + case m: NewView[_] if m.prepareQC.phase != Phase.Prepare => + Left(InvalidQuorumCertificate(e.sender, m.prepareQC)) + + case m: NewView[_] if !Signing[A].validate(federation, m.prepareQC) => + Left(InvalidQuorumCertificate(e.sender, m.prepareQC)) + + case m: Prepare[_] if !Signing[A].validate(federation, m.highQC) => + Left(InvalidQuorumCertificate(e.sender, m.highQC)) + + case _ => + Right(Validated[MessageReceived[A]](e)) + } + } + + /** Handle an incoming message that has already gone through partial validation: + * + * The sender is verified by the network layer and retrieved from the + * lower level protocol message; we know the signatures are correct; + * and the contents of any proposed block have been validated as well, + * so they are safe to be voted on. + * + * Return the updated state and any effects to be carried out in response, + * or an error, so that mismatches can be traced. Discrepancies can arise + * from the state being different or have changed since the message originally + * received. + * + * The structure of the method tries to match the pseudo code of `Algorithm 2` + * in the HotStuff paper. + */ + def handleMessage( + e: Validated[MessageReceived[A]] + ): TransitionAttempt[A] = + phase match { + // Leader: Collect NewViews, create block, boradcast Prepare + // Replica: Wait for Prepare, check safe extension, vote Prepare, move to PreCommit. + case Phase.Prepare => + matchingMsg(e) { + case m: NewView[_] if m.viewNumber == viewNumber.prev && isLeader => + Right(addNewViewAndMaybeCreateBlock(e.sender, m)) + + case m: Prepare[_] if matchingLeader(e) => + if (isSafe(m)) { + val blockHash = Block[A].blockHash(m.block) + val effects = Seq( + sendVote(Phase.Prepare, blockHash) + ) + val next = moveTo(Phase.PreCommit).copy( + preparedBlockHash = blockHash + ) + Right(next -> effects) + } else { + Left(UnsafeExtension(e.sender, m)) + } + } + + // Leader: Collect Prepare votes, broadcast Prepare Q.C. + // Replica: Wait for Prepare Q.C, save prepareQC, vote PreCommit, move to Commit. + case Phase.PreCommit => + matchingMsg(e) { + handleVotes(e, Phase.Prepare) orElse + handleQuorum(e, Phase.Prepare) { m => + val effects = Seq( + sendVote(Phase.PreCommit, m.quorumCertificate.blockHash) + ) + val next = moveTo(Phase.Commit).copy( + prepareQC = m.quorumCertificate + ) + next -> effects + } + } + + // Leader: Collect PreCommit votes, broadcast PreCommit Q.C. + // Replica: Wait for PreCommit Q.C., save lockedQC, vote Commit, move to Decide. + case Phase.Commit => + matchingMsg(e) { + handleVotes(e, Phase.PreCommit) orElse + handleQuorum(e, Phase.PreCommit) { m => + val effects = Seq( + sendVote(Phase.Commit, m.quorumCertificate.blockHash) + ) + val next = moveTo(Phase.Decide).copy( + lockedQC = m.quorumCertificate + ) + next -> effects + } + } + + // Leader: Collect Commit votes, broadcast Commit Q.C. + // Replica: Wait for Commit Q.C., execute block, send NewView, move to Prepare. + case Phase.Decide => + matchingMsg(e) { + handleVotes(e, Phase.Commit) orElse + handleQuorum(e, Phase.Commit) { m => + handleNextView(NextView(viewNumber)) match { + case (next, effects) => + val withExec = ExecuteBlocks( + lastExecutedBlockHash, + m.quorumCertificate + ) +: effects + + next -> withExec + } + } + } + } + + /** The leader's message handling is the same across all phases: + * add the vote to the list; if we reached `n-f` then combine + * into a Q.C. and broadcast. + * + * It can also receive messages beyond the `n-f` it needed, + * which it can ignore. + */ + private def handleVotes( + event: MessageReceived[A], + phase: VotingPhase + ): PartialFunction[Message[A], TransitionAttempt[A]] = { + // Check that a vote is compatible with our current expectations. + case v: Vote[_] + if isLeader && v.viewNumber == viewNumber && + v.phase == phase && + v.blockHash == preparedBlockHash => + Right(addVoteAndMaybeBroadcastQC(v)) + + // Once the leader moves on to the next phase, it can still receive votes + // for the previous one. These can be ignored, they are not unexpected. + case v: Vote[_] + if isLeader && + v.viewNumber == viewNumber && + v.phase.isBefore(phase) && + v.blockHash == preparedBlockHash => + Right(stay) + + // Ignore votes for other blocks. + case v: Vote[_] + if isLeader && v.viewNumber == viewNumber && + v.phase == phase && + v.blockHash != preparedBlockHash => + Left(UnexpectedBlockHash(event, preparedBlockHash)) + + case v: NewView[_] if isLeader && v.viewNumber == viewNumber.prev => + Right(stay) + } + + private def handleQuorum( + event: Validated[MessageReceived[A]], + phase: VotingPhase + )( + f: Quorum[A] => Transition[A] + ): PartialFunction[Message[A], TransitionAttempt[A]] = { + case m: Quorum[_] + if matchingLeader(event) && + m.quorumCertificate.viewNumber == viewNumber && + m.quorumCertificate.phase == phase && + m.quorumCertificate.blockHash == preparedBlockHash => + Right(f(m)) + + case m: Quorum[_] + if matchingLeader(event) && + m.quorumCertificate.viewNumber == viewNumber && + m.quorumCertificate.phase == phase && + m.quorumCertificate.blockHash != preparedBlockHash => + Left(UnexpectedBlockHash(event, preparedBlockHash)) + } + + /** Categorize unexpected messages into ones that can be re-queued or discarded. + * + * At this point we already know that the messages have been validated once, + * so at at least they are consistent with their own view, e.g. sending to the + * leader of their own view. + */ + private def handleUnexpected(e: MessageReceived[A]): ProtocolError[A] = { + e.message match { + case m: NewView[_] if m.viewNumber >= viewNumber => + TooEarly(e, m.viewNumber.next, Phase.Prepare) + + case m: Prepare[_] if m.viewNumber > viewNumber => + TooEarly(e, m.viewNumber, Phase.Prepare) + + case m: Vote[_] + if m.viewNumber > viewNumber || + m.viewNumber == viewNumber && m.phase.isAfter(phase.prev) => + TooEarly(e, m.viewNumber, m.phase.next) + + case m: Quorum[_] + if m.quorumCertificate.viewNumber > viewNumber || + m.quorumCertificate.viewNumber == viewNumber && + m.quorumCertificate.phase.isAfter(phase.prev) => + TooEarly(e, m.viewNumber, m.quorumCertificate.phase.next) + + case _ => + Unexpected(e) + } + } + + /** Try to match a message to expectations, or return Unexpected. */ + private def matchingMsg(e: MessageReceived[A])( + pf: PartialFunction[Message[A], TransitionAttempt[A]] + ): TransitionAttempt[A] = + pf.lift(e.message).getOrElse(Left(handleUnexpected(e))) + + /** Check that a message is coming from the view leader and is for the current phase. */ + private def matchingLeader(e: MessageReceived[A]): Boolean = + e.message.viewNumber == viewNumber && + e.sender == federation.leaderOf(viewNumber) + + /** Broadcast a message from the leader to all replicas. + * + * This includes the leader sending a message to itself, + * because the leader is a replica as well. The effect + * system should take care that these messages don't + * try to go over the network. + * + * NOTE: Some messages trigger transitions; it's best + * if the message sent to the leader by itself is handled + * before the other messages are sent out to avoid any + * votes coming in return coming in phases that don't + * yet expect them. + */ + private def broadcast(m: Message[A]): Seq[Effect[A]] = + federation.publicKeys.map { pk => + SendMessage(pk, m) + } + + /** Produce a vote with the current view number. */ + private def vote(phase: VotingPhase, blockHash: A#Hash): Vote[A] = { + val signature = Signing[A].sign(signingKey, phase, viewNumber, blockHash) + Vote(viewNumber, phase, blockHash, signature) + } + + private def sendVote(phase: VotingPhase, blockHash: A#Hash): SendMessage[A] = + SendMessage(leader, vote(phase, blockHash)) + + /** Check that the proposed new block extends the locked Q.C. (safety) + * or that the Quorum Certificate is newer than the locked Q.C. (liveness). + */ + private def isSafe(m: Prepare[A]): Boolean = { + val valid = isExtension(m.block, m.highQC) + val safe = isExtension(m.block, lockedQC) + val live = m.highQC.viewNumber > lockedQC.viewNumber + + valid && (safe || live) + } + + /** Check that a block extends from the one in the Q.C. + * + * Currently only allows direct parent-child relationship, + * which means each leader is expected to create max 1 block + * on top of the previous high Q.C. + */ + private def isExtension(block: A#Block, qc: QuorumCertificate[A]): Boolean = + qc.blockHash == Block[A].parentBlockHash(block) + + /** Register a new vote; if there are enough to form a new Q.C., + * do so and broadcast it. + */ + private def addVoteAndMaybeBroadcastQC(vote: Vote[A]): Transition[A] = { + // `matchingVote` made sure all votes are for the same content, + // and `moveTo` clears the votes, so they should be uniform. + val next = copy(votes = votes + vote) + + // Only make the quorum certificate once. + val effects = + if (votes.size < quorumSize && next.votes.size == quorumSize) { + val vs = next.votes.toSeq + val qc = QuorumCertificate( + phase = vs.head.phase, + viewNumber = vs.head.viewNumber, + blockHash = vs.head.blockHash, + signature = Signing[A].combine(vs.map(_.signature)) + ) + broadcast { + Quorum(viewNumber, qc) + } + } else Nil + + // The move to the next phase will be triggered when the Q.C. is delivered. + next -> effects + } + + /** Register a NewView from a replica; if there are enough, select the High Q.C. and create a block. */ + private def addNewViewAndMaybeCreateBlock( + sender: A#PKey, + newView: NewView[A] + ): Transition[A] = { + // We already checked that these are for the current view. + val next = copy(newViews = + newViews.updated( + sender, + newViews.get(sender).fold(newView) { oldView => + if (newView.prepareQC.viewNumber > oldView.prepareQC.viewNumber) + newView + else oldView + } + ) + ) + + // Only make a block once. + val effects = + if (newViews.size < quorumSize && next.newViews.size == quorumSize) { + List( + CreateBlock( + viewNumber, + highQC = next.newViews.values.map(_.prepareQC).maxBy(_.viewNumber) + ) + ) + } else Nil + + // The move to the next phase will be triggered when the block is created. + next -> effects + } +} + +object ProtocolState { + + /** The result of state transitions are the next state and some effects + * that can be carried out in parallel. + */ + type Transition[A <: Agreement] = (ProtocolState[A], Seq[Effect[A]]) + + type TransitionAttempt[A <: Agreement] = + Either[ProtocolError[A], Transition[A]] + + /** Return an initial set of effects; at the minimum the timeout for the first round. */ + def init[A <: Agreement](state: ProtocolState[A]): Seq[Effect[A]] = + List(Effect.ScheduleNextView(state.viewNumber, state.timeout)) + + private implicit class PhaseOps(val a: Phase) extends AnyVal { + import Phase._ + + /** Check that *within the same view* phase `a` precedes phase `b`. */ + def isBefore(b: Phase): Boolean = + (a, b) match { + case (Prepare, PreCommit | Commit | Decide) => true + case (PreCommit, Commit | Decide) => true + case (Commit, Decide) => true + case _ => false + } + + /** Check that *within the same view* phase `a` follows phase `b`. */ + def isAfter(b: Phase): Boolean = + (a, b) match { + case (PreCommit, Prepare) => true + case (Commit, Prepare | PreCommit) => true + case (Decide, Prepare | PreCommit | Commit) => true + case _ => false + } + } +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/QuorumCertificate.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/QuorumCertificate.scala new file mode 100644 index 00000000..043dea65 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/QuorumCertificate.scala @@ -0,0 +1,14 @@ +package metronome.hotstuff.consensus.basic + +import metronome.crypto.GroupSignature +import metronome.hotstuff.consensus.ViewNumber + +/** A Quorum Certifcate (QC) over a tuple (message-type, view-number, block-hash) is a data type + * that combines a collection of signatures for the same tuple signed by (n − f) replicas. + */ +case class QuorumCertificate[A <: Agreement]( + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: A#Hash, + signature: GroupSignature[A#PKey, (VotingPhase, ViewNumber, A#Hash), A#GSig] +) diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Signing.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Signing.scala new file mode 100644 index 00000000..81a74f5e --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Signing.scala @@ -0,0 +1,74 @@ +package metronome.hotstuff.consensus.basic + +import metronome.crypto.{PartialSignature, GroupSignature} +import metronome.hotstuff.consensus.{ViewNumber, Federation} + +trait Signing[A <: Agreement] { + + def sign( + signingKey: A#SKey, + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: A#Hash + ): Signing.PartialSig[A] + + def combine( + signatures: Seq[Signing.PartialSig[A]] + ): Signing.GroupSig[A] + + /** Validate that partial signature was created by a given public key. + * + * Check that the signer is part of the federation. + */ + def validate( + publicKey: A#PKey, + signature: Signing.PartialSig[A], + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: A#Hash + ): Boolean + + /** Validate a group signature. + * + * Check that enough members of the federation signed, + * and only the members. + */ + def validate( + federation: Federation[A#PKey], + signature: Signing.GroupSig[A], + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: A#Hash + ): Boolean + + def validate(sender: A#PKey, vote: Message.Vote[A]): Boolean = + validate( + sender, + vote.signature, + vote.phase, + vote.viewNumber, + vote.blockHash + ) + + def validate( + federation: Federation[A#PKey], + quorumCertificate: QuorumCertificate[A] + ): Boolean = + validate( + federation, + quorumCertificate.signature, + quorumCertificate.phase, + quorumCertificate.viewNumber, + quorumCertificate.blockHash + ) +} + +object Signing { + def apply[A <: Agreement: Signing]: Signing[A] = implicitly[Signing[A]] + + type PartialSig[A <: Agreement] = + PartialSignature[A#PKey, (VotingPhase, ViewNumber, A#Hash), A#PSig] + + type GroupSig[A <: Agreement] = + GroupSignature[A#PKey, (VotingPhase, ViewNumber, A#Hash), A#GSig] +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/package.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/package.scala new file mode 100644 index 00000000..ef8e0249 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/package.scala @@ -0,0 +1,5 @@ +package metronome.hotstuff + +package object consensus { + type ViewNumber = ViewNumber.Tagged +} diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala new file mode 100644 index 00000000..29b7881f --- /dev/null +++ b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala @@ -0,0 +1,931 @@ +package metronome.hotstuff.consensus.basic + +import metronome.crypto.{GroupSignature, PartialSignature} +import metronome.hotstuff.consensus.{ViewNumber, Federation} +import org.scalacheck.commands.Commands +import org.scalacheck.{Properties, Gen, Prop} +import org.scalacheck.Arbitrary.arbitrary +import org.scalacheck.Prop.{propBoolean, all, falsified} +import scala.annotation.nowarn +import scala.concurrent.duration._ +import scala.util.{Try, Failure, Success} + +object HotStuffProtocolProps extends Properties("Basic HotStuff") { + + property("protocol") = HotStuffProtocolCommands.property() + +} + +/** State machine tests for the Basic HotStuff protocol. + * + * The `Model` class has enough reflection of the state so that we can generate valid + * and invalid commands using `genCommand`. Each `Command`, has its individual post-condition + * check comparing the model state to the actual protocol results. + */ +object HotStuffProtocolCommands extends Commands { + + case class TestBlock(blockHash: Int, parentBlockHash: Int, command: String) + + object TestAgreement extends Agreement { + type Block = TestBlock + type Hash = Int + type PSig = Long + type GSig = Seq[Long] + type PKey = Int + type SKey = Int + } + type TestAgreement = TestAgreement.type + + val genesisQC = QuorumCertificate[TestAgreement]( + phase = Phase.Prepare, + viewNumber = ViewNumber(0), + blockHash = 0, + signature = GroupSignature(Nil) + ) + + implicit val block: Block[TestAgreement] = new Block[TestAgreement] { + override def blockHash(b: TestBlock) = b.blockHash + override def parentBlockHash(b: TestBlock) = b.parentBlockHash + } + + // Going to use publicKey == -1 * signingKey. + def mockSigningKey(pk: TestAgreement.PKey): TestAgreement.SKey = -1 * pk + + // Mock signatures. + implicit val mockSigning: Signing[TestAgreement] = + new Signing[TestAgreement] { + private def hash( + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: TestAgreement.Hash + ): TestAgreement.Hash = + (phase, viewNumber, blockHash).hashCode + + private def isGenesis( + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: TestAgreement.Hash + ): Boolean = + phase == genesisQC.phase && + viewNumber == genesisQC.viewNumber && + blockHash == genesisQC.blockHash + + private def sign( + sk: TestAgreement.SKey, + h: TestAgreement.Hash + ): TestAgreement.PSig = + h + sk + + private def unsign( + s: TestAgreement.PSig, + h: TestAgreement.Hash + ): TestAgreement.PKey = + ((s - h) * -1).toInt + + override def sign( + signingKey: TestAgreement#SKey, + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: TestAgreement.Hash + ): Signing.PartialSig[TestAgreement] = { + val h = hash(phase, viewNumber, blockHash) + val s = sign(signingKey, h) + PartialSignature(s) + } + + override def combine( + signatures: Seq[Signing.PartialSig[TestAgreement]] + ): Signing.GroupSig[TestAgreement] = + GroupSignature(signatures.map(_.sig)) + + override def validate( + publicKey: TestAgreement.PKey, + signature: Signing.PartialSig[TestAgreement], + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: TestAgreement.Hash + ): Boolean = { + val h = hash(phase, viewNumber, blockHash) + publicKey == unsign(signature.sig, h) + } + + override def validate( + federation: Federation[TestAgreement.PKey], + signature: Signing.GroupSig[TestAgreement], + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: TestAgreement.Hash + ): Boolean = { + if (isGenesis(phase, viewNumber, blockHash)) { + signature.sig.isEmpty + } else { + val h = hash(phase, viewNumber, blockHash) + + signature.sig.size == federation.size - federation.maxFaulty && + signature.sig.forall { sig => + federation.publicKeys.exists { publicKey => + publicKey == unsign(sig, h) + } + } + } + } + } + + case class Model( + n: Int, + f: Int, + viewNumber: ViewNumber, + phase: Phase, + federation: Vector[TestAgreement.PKey], + ownIndex: Int, + votesFrom: Set[TestAgreement.PKey], + newViewsFrom: Set[TestAgreement.PKey], + newViewsHighQC: QuorumCertificate[TestAgreement], + prepareQCs: List[QuorumCertificate[TestAgreement]], + maybeBlockHash: Option[TestAgreement.Hash] + ) { + def publicKey = federation(ownIndex) + + // Using a signing key that works with the mock validation. + def signingKey = mockSigningKey(publicKey) + + def isLeader = viewNumber % n == ownIndex + def leader = federation((viewNumber % n).toInt) + + def `n - f` = n - f + } + + // Keep a variable state in our System Under Test. + class Protocol(var state: ProtocolState[TestAgreement]) + + type Sut = Protocol + type State = Model + + @nowarn + override def canCreateNewSut( + newState: State, + initSuts: Traversable[State], + runningSuts: Traversable[Sut] + ): Boolean = true + + override def initialPreCondition(state: State): Boolean = + state.viewNumber == 1 && + state.phase == Phase.Prepare && + state.votesFrom.isEmpty && + state.newViewsFrom.isEmpty + + override def newSut(state: State): Sut = + new Protocol( + ProtocolState[TestAgreement]( + viewNumber = ViewNumber(state.viewNumber), + phase = state.phase, + publicKey = state.publicKey, + signingKey = state.signingKey, + federation = Federation(state.federation), + prepareQC = genesisQC, + lockedQC = genesisQC, + lastExecutedBlockHash = genesisQC.blockHash, + preparedBlockHash = genesisQC.blockHash, + timeout = 10.seconds, + votes = Set.empty, + newViews = Map.empty + ) + ) + + override def destroySut(sut: Sut): Unit = () + + override def genInitialState: Gen[State] = + for { + // Pick the max Byzantine nodes first, then size the federation based on that. + f <- Gen.choose(0, 3) + n = 3 * f + 1 + + ownIndex <- Gen.choose(0, n - 1) + + // Create unique keys. + publicKeys <- Gen + .listOfN(n, Gen.posNum[Int]) + .map { ns => + ns.tail.scan(ns.head)(_ + _) + } + .retryUntil(_.size == n) + + } yield Model( + n, + f, + viewNumber = ViewNumber(1), + phase = Phase.Prepare, + federation = publicKeys.toVector, + ownIndex = ownIndex, + votesFrom = Set.empty, + newViewsFrom = Set.empty, + newViewsHighQC = genesisQC, + prepareQCs = List(genesisQC), + maybeBlockHash = None + ) + + /** Generate valid and invalid commands depending on state. + * + * Invalid commands are marked as such, so we don't have to repeat validations here + * to tell what we expect the response to be. We can send invalid commands from up + * to `f` Bzyantine members of the federation. The rest should be honest, but they + * might still send commands which are delivered in a different state, e.g. because + * they didn't have the data available to validate a proposal. + */ + override def genCommand(state: State): Gen[Command] = + Gen.frequency( + 7 -> genValid(state), + 2 -> genInvalid(state), + 1 -> genTimeout(state) + ) + + def fail(msg: String) = msg |: falsified + + def votingPhaseFor(phase: Phase): Option[VotingPhase] = + phase match { + case Phase.Prepare => None + case Phase.PreCommit => Some(Phase.Prepare) + case Phase.Commit => Some(Phase.PreCommit) + case Phase.Decide => Some(Phase.Commit) + } + + def genTimeout(state: State): Gen[NextViewCmd] = + Gen.const(NextViewCmd(state.viewNumber)) + + /** Geneerate a valid input for the givens state. */ + def genValid(state: State): Gen[Command] = { + val usables: List[Gen[Command]] = + List( + // The leader may receive NewView any time. + genValidNewView(state) -> + state.isLeader, + // The leader can get a block generated by the host system in Prepare. + genValidBlock(state) -> + (state.phase == Phase.Prepare && state.isLeader && state.maybeBlockHash.isEmpty), + // Replicas can get a Prepared block in Prepare (for the leader this should match the created block). + genValidPrepare(state) -> + (state.phase == Phase.Prepare && + (state.isLeader && state.maybeBlockHash.isDefined || + !state.isLeader && state.maybeBlockHash.isEmpty)), + // The leader can get votes on the block it created, except in Prepare. + genValidVote(state) -> + (state.phase != Phase.Prepare && state.isLeader && state.maybeBlockHash.isDefined), + // Replicas can get a Quroum on the block that was Prepared, except in Prepare. + genValidQuorum(state) -> + (state.phase != Phase.Prepare && state.maybeBlockHash.isDefined) + ).collect { + case (gen, usable) if usable => gen + } + + usables match { + case Nil => genTimeout(state) + case one :: Nil => one + case one :: two :: rest => Gen.oneOf(one, two, rest: _*) + } + } + + /** Take an valid command and turn it invalid. */ + def genInvalid(state: State): Gen[Command] = { + def nextVoting(phase: Phase): VotingPhase = { + phase.next match { + case p: VotingPhase => p + case p => nextVoting(p) + } + } + + def invalidateHash(h: TestAgreement.Hash) = h * 2 + 1 + def invalidateSig(s: TestAgreement.PSig) = s * 2 + 1 + def invalidateViewNumber(v: ViewNumber) = ViewNumber(v + 1000) + def invalidSender = state.federation.sum + 1 + + def invalidateQC( + qc: QuorumCertificate[TestAgreement] + ): Gen[QuorumCertificate[TestAgreement]] = { + Gen.oneOf( + genLazy( + qc.copy[TestAgreement](blockHash = invalidateHash(qc.blockHash)) + ), + genLazy(qc.copy[TestAgreement](phase = nextVoting(qc.phase))), + genLazy( + qc.copy[TestAgreement](viewNumber = + invalidateViewNumber(qc.viewNumber) + ) + ), + genLazy( + qc.copy[TestAgreement](signature = + // The quorum cert has no items, so add one to make it different. + qc.signature.copy(sig = 0L +: qc.signature.sig.map(invalidateSig)) + ) + ) + ) + } + + implicit class StringOps(label: String) { + def `!`(gen: Gen[MessageCmd]): Gen[InvalidCmd] = + gen.map(cmd => InvalidCmd(label, cmd, isEarly = label == "viewNumber")) + } + + genValid(state) flatMap { + case msg: MessageCmd => + msg match { + case cmd @ NewViewCmd(_, m) => + Gen.oneOf( + "sender" ! genLazy(cmd.copy(sender = invalidSender)), + "viewNumber" ! genLazy( + cmd.copy(message = + m.copy(viewNumber = invalidateViewNumber(m.viewNumber)) + ) + ), + "prepareQC" ! invalidateQC(m.prepareQC).map { qc => + cmd.copy(message = m.copy(prepareQC = qc)) + } + ) + + case cmd @ PrepareCmd(_, m) => + Gen.oneOf( + "sender" ! genLazy(cmd.copy(sender = invalidSender)), + "viewNumber" ! genLazy( + cmd.copy(message = m.copy(viewNumber = m.viewNumber.next)) + ), + "parentBlockHash" ! genLazy( + cmd.copy(message = + m.copy[TestAgreement](block = + m.block + .copy(parentBlockHash = + invalidateHash(m.block.parentBlockHash) + ) + ) + ) + ), + "highQC" ! invalidateQC(m.highQC).map { qc => + cmd.copy(message = m.copy(highQC = qc)) + } + ) + + case cmd @ VoteCmd(_, m) => + Gen.oneOf( + "sender" ! genLazy(cmd.copy(sender = invalidSender)), + "viewNumber" ! genLazy( + cmd.copy(message = + m.copy[TestAgreement](viewNumber = + invalidateViewNumber(m.viewNumber) + ) + ) + ), + "phase" ! genLazy( + cmd.copy(message = + m.copy[TestAgreement](phase = nextVoting(m.phase)) + ) + ), + "blockHash" ! genLazy( + cmd.copy(message = + m.copy[TestAgreement](blockHash = invalidateHash(m.blockHash)) + ) + ), + "signature" ! genLazy( + cmd.copy(message = + m.copy[TestAgreement](signature = + m.signature.copy(sig = invalidateSig(m.signature.sig)) + ) + ) + ) + ) + + case cmd @ QuorumCmd(_, m) => + Gen.oneOf( + "sender" ! genLazy(cmd.copy(sender = invalidSender)), + "quorumCertificate" ! invalidateQC(m.quorumCertificate).map { + qc => + cmd.copy(message = m.copy(quorumCertificate = qc)) + } + ) + } + + // Leave anything else alone. + case other => Gen.const(other) + } + } + + /** A constant expression, but only evaluated if the generator is chosen, + * which allows us to have conditions attached to it. + */ + def genLazy[A](a: => A): Gen[A] = Gen.lzy(Gen.const(a)) + + /** Replica sends a new view with an arbitrary prepare QC. */ + def genValidNewView(state: State): Gen[NewViewCmd] = + for { + s <- Gen.oneOf(state.federation) + qc <- Gen.oneOf(state.prepareQCs) + m = Message.NewView(ViewNumber(state.viewNumber - 1), qc) + } yield NewViewCmd(s, m) + + /** Leader creates a valid block on top of the saved High Q.C. */ + def genValidBlock(state: State): Gen[BlockCreatedCmd] = + for { + c <- arbitrary[String] + h <- genHash + qc = state.prepareQCs.head // So that it's a safe extension. + p = qc.blockHash + b = TestBlock(h, p, c) + e = Event + .BlockCreated[TestAgreement](state.viewNumber, b, qc) + } yield BlockCreatedCmd(e) + + /** Leader sends a valid Prepare command with the generated block. */ + def genValidPrepare(state: State): Gen[PrepareCmd] = + for { + blockCreated <- genValidBlock(state).map(_.event).map { bc => + bc.copy[TestAgreement]( + block = bc.block.copy( + blockHash = state.maybeBlockHash.getOrElse(bc.block.blockHash) + ) + ) + } + } yield { + PrepareCmd( + sender = state.leader, + message = Message.Prepare( + state.viewNumber, + blockCreated.block, + blockCreated.highQC + ) + ) + } + + /** Replica sends a valid vote for the current phase and prepared block. */ + def genValidVote(state: State): Gen[VoteCmd] = + for { + blockHash <- genLazy { + state.maybeBlockHash.getOrElse(sys.error("No block to vote on.")) + } + // The leader is expecting votes for the previous phase. + phase = votingPhaseFor(state.phase).getOrElse( + sys.error(s"No voting phase for ${state.phase}") + ) + sender <- Gen.oneOf(state.federation) + vote = Message.Vote[TestAgreement]( + state.viewNumber, + phase, + blockHash, + signature = mockSigning.sign( + mockSigningKey(sender), + phase, + state.viewNumber, + blockHash + ) + ) + } yield VoteCmd(sender, vote) + + /** Leader sends a valid quorum from the collected votes. */ + def genValidQuorum(state: State): Gen[QuorumCmd] = + for { + blockHash <- genLazy { + state.maybeBlockHash.getOrElse(sys.error("No block for quorum.")) + } + pks <- Gen.pick(state.`n - f`, state.federation) + // The replicas is expecting the Q.C. for the previous phase. + phase = votingPhaseFor(state.phase).getOrElse( + sys.error(s"No voting phase for ${state.phase}") + ) + qc = QuorumCertificate[TestAgreement]( + phase, + state.viewNumber, + blockHash, + signature = mockSigning.combine( + pks.toList.map { pk => + mockSigning.sign( + mockSigningKey(pk), + phase, + state.viewNumber, + blockHash + ) + } + ) + ) + q = Message.Quorum(state.viewNumber, qc) + } yield QuorumCmd(state.leader, q) + + // A positive hash, not the same as Genesis. + val genHash: Gen[TestAgreement.Hash] = + arbitrary[Int].map(math.abs(_) + 1) + + /** Timeout. */ + case class NextViewCmd(viewNumber: ViewNumber) extends Command { + type Result = ProtocolState.Transition[TestAgreement] + + def run(sut: Sut): Result = { + sut.state.handleNextView(Event.NextView(viewNumber)) match { + case result @ (next, _) => + sut.state = next + result + } + } + + def nextState(state: State): State = + state.copy( + viewNumber = ViewNumber(state.viewNumber + 1), + phase = Phase.Prepare, + votesFrom = Set.empty, + // In this model there's not a guaranteed message from the leader to itself. + newViewsFrom = Set.empty, + newViewsHighQC = genesisQC, + maybeBlockHash = None + ) + + def preCondition(state: State): Boolean = + viewNumber == state.viewNumber + + def postCondition(state: Model, result: Try[Result]): Prop = + "NextView" |: { + result match { + case Failure(exception) => + fail(s"unexpected $exception") + + case Success((next, effects)) => + val propNewView = effects + .collectFirst { + case Effect.SendMessage( + recipient, + Message.NewView(viewNumber, prepareQC) + ) => + "sends the new view to the next leader" |: + recipient == next.leader && + viewNumber == state.viewNumber && + prepareQC == next.prepareQC + } + .getOrElse(fail("didn't send the new view")) + + val propSchedule = effects + .collectFirst { + case Effect.ScheduleNextView( + viewNumber, + timeout + ) => + "schedules the next view" |: + viewNumber == next.viewNumber && + timeout == next.timeout + } + .getOrElse(fail("didn't schedule the next view")) + + val propNext = "goes to the next phase" |: + next.phase == Phase.Prepare && + next.viewNumber == state.viewNumber + 1 && + next.votes.isEmpty && + next.newViews.isEmpty + + propNext && + propNewView && + propSchedule && + ("only has the expected effects" |: effects.size == 2) + } + } + } + + /** Common logic of handling a received message */ + sealed trait MessageCmd extends Command { + type Result = ProtocolState.TransitionAttempt[TestAgreement] + + def sender: TestAgreement.PKey + def message: Message[TestAgreement] + + override def run(sut: Protocol): Result = { + val event = Event.MessageReceived(sender, message) + sut.state.validateMessage(event).flatMap(sut.state.handleMessage).map { + case result @ (next, _) => + sut.state = next + result + } + } + } + + /** NewView from a replicas to the leader. */ + case class NewViewCmd( + sender: TestAgreement.PKey, + message: Message.NewView[TestAgreement] + ) extends MessageCmd { + override def nextState(state: State): State = + state.copy( + newViewsFrom = state.newViewsFrom + sender, + newViewsHighQC = + if (message.prepareQC.viewNumber > state.newViewsHighQC.viewNumber) + message.prepareQC + else state.newViewsHighQC + ) + + override def preCondition(state: State): Boolean = + state.isLeader && state.viewNumber == message.viewNumber + 1 + + override def postCondition( + state: Model, + result: Try[Result] + ): Prop = { + val nextS = nextState(state) + "NewView" |: { + if ( + state.phase == Phase.Prepare && + state.newViewsFrom.size != state.`n - f` && + nextS.newViewsFrom.size == state.`n - f` + ) { + result match { + case Success(Right((next, effects))) => + val newViewsMax = nextS.newViewsHighQC.viewNumber + val highestView = effects.headOption match { + case Some(Effect.CreateBlock(_, highQC)) => + highQC.viewNumber.toInt + case _ => -1 + } + + "n-f collected" |: all( + s"stays in the phase (${state.phase} -> ${next.phase})" |: next.phase == state.phase, + "records newView" |: next.newViews.size == state.`n - f`, + "creates a block and nothing else" |: effects.size == 1 && + effects.head.isInstanceOf[Effect.CreateBlock[_]], + s"selects the highest QC: $highestView ?= $newViewsMax" |: highestView == newViewsMax + ) + case err => + fail(s"unexpected $err") + } + } else { + result match { + case Success(Right((next, effects))) => + "n-f not collected" |: all( + s"stays in the same phase (${state.phase} -> ${next.phase})" |: next.phase == state.phase, + "doesn't create more effects" |: effects.isEmpty + ) + case err => + fail(s"unexpected $err") + } + } + } + } + } + + /** The leader handed the block created by the host system. */ + case class BlockCreatedCmd(event: Event.BlockCreated[TestAgreement]) + extends Command { + type Result = ProtocolState.Transition[TestAgreement] + + override def run(sut: Protocol): Result = { + sut.state.handleBlockCreated(event) match { + case result @ (next, _) => + sut.state = next + result + } + } + + override def nextState(state: State): State = + state.copy( + maybeBlockHash = Some(event.block.blockHash) + ) + + override def preCondition(state: State): Boolean = + event.viewNumber == state.viewNumber + + override def postCondition( + state: State, + result: Try[Result] + ): Prop = { + "BlockCreated" |: { + result match { + case Success((next, effects)) => + all( + "stay in Prepare" |: next.phase == Phase.Prepare, + "broadcast to all" |: effects.size == state.federation.size, + all( + effects.map { + case Effect.SendMessage(_, m: Message.Prepare[_]) => + all( + "send prepared block" |: m.block == event.block, + "send highQC" |: m.highQC == event.highQC + ) + case other => + fail(s"expected Prepare message: $other") + }: _* + ) + ) + case Failure(ex) => + fail(s"failed with $ex") + } + } + } + } + + /** Prepare from leader to a replica. */ + case class PrepareCmd( + sender: TestAgreement.PKey, + message: Message.Prepare[TestAgreement] + ) extends MessageCmd { + override def nextState(state: State): State = { + state.copy( + phase = Phase.PreCommit, + maybeBlockHash = Some(message.block.blockHash) + ) + } + + override def preCondition(state: State): Boolean = { + message.viewNumber == state.viewNumber && + state.phase == Phase.Prepare && + (state.isLeader && state.maybeBlockHash.isDefined || + !state.isLeader && state.maybeBlockHash.isEmpty) + } + + override def postCondition( + state: Model, + result: Try[Result] + ): Prop = { + "Prepare" |: { + result match { + case Success(Right((next, effects))) => + all( + "move to PreCommit" |: next.phase == Phase.PreCommit, + "cast a vote" |: effects.size == 1, + effects.head match { + case Effect.SendMessage( + recipient, + Message.Vote(_, phase, blockHash, _) + ) => + all( + "vote Prepare" |: phase == Phase.Prepare, + "send to leader" |: recipient == state.leader, + "vote on block" |: blockHash == message.block.blockHash + ) + case other => + fail(s"unexpected effect $other") + } + ) + case other => + fail(s"unexpected result $other") + } + } + } + } + + /** A Vote from a replica to the leader. */ + case class VoteCmd( + sender: TestAgreement.PKey, + message: Message.Vote[TestAgreement] + ) extends MessageCmd { + override def nextState(state: State): State = + state.copy( + votesFrom = state.votesFrom + sender + ) + + override def preCondition(state: State): Boolean = + state.isLeader && + state.viewNumber == message.viewNumber && + votingPhaseFor(state.phase).contains(message.phase) && + state.maybeBlockHash.contains(message.blockHash) + + override def postCondition(state: Model, result: Try[Result]): Prop = { + "Vote" |: { + result match { + case Success(Right((next, effects))) => + val nextS = nextState(state) + val maybeBroadcast = + if ( + state.votesFrom.size < state.`n - f` && + nextS.votesFrom.size == state.`n - f` + ) { + "n - f collected" |: all( + "broadcast to all" |: effects.size == state.federation.size, + "all messages are quorums" |: all( + effects.map { + case Effect.SendMessage(_, Message.Quorum(_, qc)) => + all( + "quorum is about the current phase" |: qc.phase == message.phase, + "quorum is about the block" |: qc.blockHash == message.blockHash + ) + case other => + fail(s"unexpected effect $other") + }: _* + ) + ) + } else { + "not n - f" |: "not broadcast" |: effects.isEmpty + } + + all( + "stay in the same phase" |: next.phase == state.phase, + maybeBroadcast + ) + + case other => + fail(s"unexpected result $other") + } + } + } + } + + /** A Quorum from the leader to a replica. */ + case class QuorumCmd( + sender: TestAgreement.PKey, + message: Message.Quorum[TestAgreement] + ) extends MessageCmd { + override def nextState(state: State): State = + state.copy( + viewNumber = + if (state.phase == Phase.Decide) state.viewNumber.next + else state.viewNumber, + phase = state.phase match { + case Phase.Prepare => Phase.PreCommit + case Phase.PreCommit => Phase.Commit + case Phase.Commit => Phase.Decide + case Phase.Decide => Phase.Prepare + }, + votesFrom = Set.empty, + newViewsFrom = Set.empty, + maybeBlockHash = + if (state.phase == Phase.Decide) None else state.maybeBlockHash, + prepareQCs = + if (message.quorumCertificate.phase == Phase.Prepare) + message.quorumCertificate :: state.prepareQCs + else state.prepareQCs, + newViewsHighQC = + if (state.phase == Phase.Decide) genesisQC else state.newViewsHighQC + ) + + override def preCondition(state: State): Boolean = + state.viewNumber == message.viewNumber && + votingPhaseFor(state.phase).contains(message.quorumCertificate.phase) && + state.maybeBlockHash.contains(message.quorumCertificate.blockHash) + + override def postCondition( + state: Model, + result: Try[Result] + ): Prop = { + "Quorum" |: { + result match { + case Success(Right((next, effects))) => + val nextS = nextState(state) + all( + "moves to the next state" |: next.phase == nextS.phase, + if (state.phase != Phase.Decide) { + effects + .collectFirst { + case Effect + .SendMessage( + recipient, + Message.Vote(_, phase, _, _) + ) => + "votes for the next phase" |: phase == state.phase + } + .getOrElse(fail("expected to vote")) + } else { + "executes the block" |: effects.collectFirst { + case _: Effect.ExecuteBlocks[_] => + }.isDefined + } + ) + + case other => + fail(s"unexpected result $other") + } + } + } + } + + /** Check that a deliberately invalidated command returns a protocol error. */ + case class InvalidCmd(label: String, cmd: MessageCmd, isEarly: Boolean) + extends Command { + type Result = (Boolean, ProtocolState.TransitionAttempt[TestAgreement]) + + // The underlying command should return a `Left`, + // which means it shouldn't update the state. + override def run(sut: Protocol): Result = { + val event = Event.MessageReceived(cmd.sender, cmd.message) + val isStaticallyValid = sut.state.validateMessage(event).isRight + isStaticallyValid -> cmd.run(sut) + } + + // The model state validation is not as sophisticated, + // but because we know this is invalid, we know + // it should not cause any change in state. + override def nextState(state: State): State = + state + + // The invalidation should be strong enough that it doesn't + // become valid during shrinking. + override def preCondition(state: State): Boolean = + true + + override def postCondition( + state: State, + result: Try[Result] + ): Prop = + s"Invalid $label" |: { + result match { + case Success((isStaticallyValid, Left(error))) => + // Ensure that some errors are marked as TooEarly. + "is early" |: + isEarly && isStaticallyValid && error + .isInstanceOf[ProtocolError.TooEarly[_]] || + !isStaticallyValid || + !isEarly + + case other => + fail(s"unexpected result $other") + } + } + + } +} diff --git a/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStorePropsscala similarity index 98% rename from metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala rename to metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStorePropsscala index 99aff80f..e1790122 100644 --- a/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreSpec.scala +++ b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStorePropsscala @@ -23,7 +23,7 @@ import scodec.codecs.implicits._ // https://github.com/typelevel/scalacheck/blob/master/doc/UserGuide.md#stateful-testing // https://github.com/typelevel/scalacheck/blob/master/examples/commands-redis/src/test/scala/CommandsRedis.scala -object RocksDBStoreSpec extends Properties("RocksDBStoreCommands") { +object RocksDBStoreProps extends Properties("RocksDBStore") { override def overrideParameters(p: Test.Parameters): Test.Parameters = p.withMinSuccessfulTests(20).withMaxSize(100) @@ -211,9 +211,9 @@ object RocksDBStoreCommands extends Commands { if (!state.isConnected) Gen.const(ToggleConnected) else Gen.frequency( - (10, genReadWriteProg(state)), - (3, genReadOnlyProg(state)), - (1, Gen.const(ToggleConnected)) + 10 -> genReadWriteProg(state), + 3 -> genReadOnlyProg(state), + 1 -> Gen.const(ToggleConnected) ) /** Generate a sequence of writes and reads. */ From 612d3825373411bc16e34e0b25f422a31a36b455 Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Mon, 29 Mar 2021 08:57:48 +0200 Subject: [PATCH 08/48] [PM-2966] Remote connection manager (#7) * [PM-2966] Add initial files and tests * [PM-2966] Add happy path for handling incoming/outgoing connections * [PM-2966] Add happy path for sending/receiving messages * [PM-2966] Handle fibers cancellation. Add scalanet facade * [PM-2966] Generalize connectin manager * [PM-2966] Add smarter reconnection logic * [PM-2966] Allow only correct static topology * [PM-2966] Refactor basic integration tests * [PM-2966] Add integration test for reconnection * [PM-2966] Add basic unit tests * [PM-2966] Add more test cases * [PM-2966] Move MockConnectionProvider to separate file * [PM-2966] Add description to remote connections manager * [PM-2966] Refactor unit tests * [PM-2966] Handle possible duplicated incoming peers * [PM-2966] Refactor integration tests by introducing cluster abstraction * [PM-2966] Handle decoding error in scalanetConnectionProvider * [PM-2966] Move networking to separate top module * [PM-2966] Fix scalafmt * [PM-2966] Change cluster config desigh * [PM-2966] fix unit tests * [PM-2966] bump openjdk image * [PM-2966] Minor formattin fixes * [PM-2966] Refactor connections manager into smaller components * [PM-2966] Add more test cases to extracted connection handler * [PM-2966] Fix test naming * [PM-2966] Use crypto primitives from mantis-crypto * [PM-2966] Various code improvemts Fix naming in crypto primitives Fix naming in ConnectionRegister Keep track of failed handshake ip address Add docs to HandledConnection Remove unnecessary () from methods * [PM-2966] Move send message to connection handler * [PM-2966] Add random jitter in relation to provided delay * [PM-2966] Update developers list * [PM-2966] Add some docs to finish callback --- .circleci/config.yml | 2 +- build.sc | 27 +- .../src/metronome/crypto/Secp256k1Utils.scala | 28 ++ .../networking/ConnectionHandler.scala | 278 ++++++++++++++ .../networking/ConnectionsRegister.scala | 53 +++ .../EncryptedConnectionProvider.scala | 37 ++ .../networking/RemoteConnectionManager.scala | 355 ++++++++++++++++++ .../ScalanetConnectionProvider.scala | 178 +++++++++ .../networking/ConnectionHandlerSpec.scala | 283 ++++++++++++++ .../MockEncryptedConnectionProvider.scala | 264 +++++++++++++ .../RemoteConnectionManagerTestUtils.scala | 78 ++++ ...onnectionManagerWithMockProviderSpec.scala | 353 +++++++++++++++++ ...ctionManagerWithScalanetProviderSpec.scala | 329 ++++++++++++++++ 13 files changed, 2262 insertions(+), 3 deletions(-) create mode 100644 metronome/crypto/src/metronome/crypto/Secp256k1Utils.scala create mode 100644 metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala create mode 100644 metronome/networking/src/io/iohk/metronome/networking/ConnectionsRegister.scala create mode 100644 metronome/networking/src/io/iohk/metronome/networking/EncryptedConnectionProvider.scala create mode 100644 metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala create mode 100644 metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala create mode 100644 metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala create mode 100644 metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala create mode 100644 metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala create mode 100644 metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala create mode 100644 metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala diff --git a/.circleci/config.yml b/.circleci/config.yml index c0a3834c..8cb92561 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,7 +2,7 @@ version: 2.1 jobs: build: docker: - - image: circleci/openjdk:8-jdk + - image: circleci/openjdk:11-jdk working_directory: ~/repo diff --git a/build.sc b/build.sc index ceb6c14a..fd563d08 100644 --- a/build.sc +++ b/build.sc @@ -25,6 +25,7 @@ object VersionOf { val shapeless = "2.3.3" val `scodec-core` = "1.11.7" val `scodec-bits` = "1.1.12" + val `mantis-crypto` = "3.2.1-SNAPSHOT" } // Using 2.12.13 instead of 2.12.10 to access @nowarn, to disable certain deperaction @@ -60,6 +61,11 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { "lemastero", "Piotr Paradzinski", "https://github.com/lemastero" + ), + Developer( + "KonradStaniec", + "Konrad Staniec", + "https://github.com/KonradStaniec" ) ) ) @@ -75,6 +81,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ivy"org.typelevel::cats-effect:${VersionOf.cats}" ) + override def repositories = super.repositories ++ Seq( + MavenRepository("https://oss.sonatype.org/content/repositories/snapshots") + ) + override def scalacOptions = Seq( "-unchecked", "-deprecation", @@ -143,7 +153,16 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { /** Generic Peer-to-Peer components that can multiplex protocols * from different modules over a single authenticated TLS connection. */ - object networking extends SubModule + object networking extends SubModule { + override def moduleDeps: Seq[JavaModule] = + Seq(tracing, crypto) + + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"io.iohk::scalanet:${VersionOf.scalanet}" + ) + + object test extends TestModule + } /** Storage abstractions, e.g. a generic key-value store. */ object storage extends SubModule { @@ -174,7 +193,11 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { override def description: String = "Cryptographic primitives to support HotStuff and BFT proof verification." - // TODO: Use crypto library from Mantis. + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"io.iohk::mantis-crypto:${VersionOf.`mantis-crypto`}", + ivy"org.scodec::scodec-bits:${VersionOf.`scodec-bits`}" + ) + object test extends TestModule } diff --git a/metronome/crypto/src/metronome/crypto/Secp256k1Utils.scala b/metronome/crypto/src/metronome/crypto/Secp256k1Utils.scala new file mode 100644 index 00000000..380d5845 --- /dev/null +++ b/metronome/crypto/src/metronome/crypto/Secp256k1Utils.scala @@ -0,0 +1,28 @@ +package metronome.crypto + +import java.security.SecureRandom +import org.bouncycastle.crypto.AsymmetricCipherKeyPair +import org.bouncycastle.crypto.params.ECPublicKeyParameters +import scodec.bits.BitVector + +object Secp256k1Utils { + + def generateKeyPair( + secureRandom: SecureRandom + ): AsymmetricCipherKeyPair = { + io.iohk.ethereum.crypto.generateKeyPair(secureRandom) + } + + /** Returns secp256k1 public key bytes in uncompressed form, with compression indicator stripped + */ + def keyPairToUncompressed(keyPair: AsymmetricCipherKeyPair): BitVector = { + BitVector( + keyPair.getPublic + .asInstanceOf[ECPublicKeyParameters] + .getQ + .getEncoded(false) + .drop(1) + ) + } + +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala b/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala new file mode 100644 index 00000000..13b9c6b6 --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala @@ -0,0 +1,278 @@ +package io.iohk.metronome.networking + +import cats.effect.{Concurrent, ContextShift, Resource, Sync} +import cats.effect.concurrent.{Deferred, TryableDeferred} +import io.iohk.metronome.networking.RemoteConnectionManager.withCancelToken +import monix.catnap.ConcurrentQueue +import monix.execution.atomic.AtomicInt +import monix.tail.Iterant +import cats.implicits._ +import cats.effect.implicits._ +import io.iohk.metronome.networking.ConnectionHandler.{ + ConnectionAlreadyClosedException, + HandledConnection, + MessageReceived, + UnexpectedConnectionError +} +import io.iohk.metronome.networking.EncryptedConnectionProvider.{ + ConnectionAlreadyClosed, + ConnectionError +} + +import java.net.InetSocketAddress +import scala.util.control.NoStackTrace + +class ConnectionHandler[F[_]: Concurrent, K, M]( + connectionQueue: ConcurrentQueue[F, HandledConnection[F, K, M]], + connectionsRegister: ConnectionsRegister[F, K, M], + messageQueue: ConcurrentQueue[F, MessageReceived[K, M]], + cancelToken: TryableDeferred[F, Unit], + connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] +) { + + private val numberOfRunningConnections = AtomicInt(0) + + private def closeAndDeregisterConnection( + handledConnection: HandledConnection[F, K, M] + ): F[Unit] = { + for { + _ <- Concurrent[F].delay(numberOfRunningConnections.decrement()) + _ <- connectionsRegister.deregisterConnection(handledConnection) + _ <- handledConnection.close + } yield () + } + + /** Registers connections and start handling incoming messages in background, in case connection is already handled + * it closes it + * + * @param possibleNewConnection, possible connection to handle + */ + def registerOrClose( + possibleNewConnection: HandledConnection[F, K, M] + ): F[Unit] = { + connectionsRegister.registerIfAbsent(possibleNewConnection).flatMap { + case Some(_) => + //TODO [PM-3092] for now we are closing any new connections in case of conflict, we may investigate other strategies + // like keeping old for outgoing and replacing for incoming + possibleNewConnection.close + case None => + connectionQueue.offer(possibleNewConnection) + } + } + + /** Checks if handler already handles connection o peer with provided key + * + * @param connectionKey key of remote peer + */ + def isNewConnection(connectionKey: K): F[Boolean] = { + connectionsRegister.isNewConnection(connectionKey) + } + + /** Retrieves set of keys of all connected and handled peers + */ + def getAllActiveConnections: F[Set[K]] = + connectionsRegister.getAllRegisteredConnections.map { connections => + connections.map(_.key) + } + + /** Number of connections actively red in background + */ + def numberOfActiveConnections: F[Int] = { + Concurrent[F].delay(numberOfRunningConnections.get()) + } + + /** Stream of all messages received from all remote peers + */ + def incomingMessages: Iterant[F, MessageReceived[K, M]] = + Iterant.repeatEvalF(messageQueue.poll) + + /** Retrieves handled connection if one exists + * + * @param key, key of remote peer + */ + def getConnection(key: K): F[Option[HandledConnection[F, K, M]]] = + connectionsRegister.getConnection(key) + + def sendMessage( + recipient: K, + message: M + ): F[Either[ConnectionAlreadyClosedException[K], Unit]] = { + getConnection(recipient).flatMap { + case Some(connection) => + connection + .sendMessage(message) + .attemptNarrow[ConnectionAlreadyClosed] + .flatMap { + case Left(_) => + connection.close.as( + Left(ConnectionAlreadyClosedException(recipient)) + ) + + case Right(_) => + Concurrent[F].pure(Right(())) + } + case None => + Concurrent[F].pure(Left(ConnectionAlreadyClosedException(recipient))) + } + } + + private def callCallBackIfNotClosed( + handledConnection: HandledConnection[F, K, M] + ): F[Unit] = { + cancelToken.tryGet.flatMap { + case Some(_) => Sync[F].unit + case None => connectionFinishCallback(handledConnection) + } + } + + /** Connections multiplexer, it receives both incoming and outgoing connections and start reading incoming messages from + * them concurrently, putting them on received messages queue. + * In case of error or stream finish it cleans up all resources. + */ + private def handleConnections: F[Unit] = { + Iterant + .repeatEvalF(connectionQueue.poll) + .mapEval { connection => + Sync[F].delay(numberOfRunningConnections.increment()).flatMap { _ => + Iterant + .repeatEvalF( + withCancelToken(cancelToken, connection.incomingMessage) + ) + .takeWhile(_.isDefined) + .map(_.get) + .mapEval { + case Right(m) => + messageQueue.offer( + MessageReceived(connection.key, m) + ) + case Left(e) => + Concurrent[F].raiseError[Unit]( + UnexpectedConnectionError(e, connection.key) + ) + } + .guarantee( + closeAndDeregisterConnection(connection) + .flatMap(_ => callCallBackIfNotClosed(connection)) + ) + .completedL + .start + } + } + .completedL + } + + // for now shutdown of all connections is completed in background + private def shutdown: F[Unit] = cancelToken.complete(()).attempt.void +} + +object ConnectionHandler { + case class ConnectionAlreadyClosedException[K](key: K) + extends RuntimeException( + s"Connection with node ${key}, has already closed" + ) + with NoStackTrace + + private def getConnectionErrorMessage[K]( + e: ConnectionError, + connectionKey: K + ): String = { + e match { + case EncryptedConnectionProvider.DecodingError => + s"Unexpected decoding error on connection with ${connectionKey}" + case EncryptedConnectionProvider.UnexpectedError(ex) => + s"Unexpected error ${ex.getMessage} on connection with ${connectionKey}" + } + } + + case class UnexpectedConnectionError[K](e: ConnectionError, connectionKey: K) + extends RuntimeException(getConnectionErrorMessage(e, connectionKey)) + + case class MessageReceived[K, M](from: K, message: M) + + /** Connection which is already handled by connection handler i.e it is registered in registry and handler is subscribed + * for incoming messages of that connection + * + * @param key, key of remote node + * @param serverAddress, address of the server of remote node. In case of incoming connection it will be diffrent that + * underlyingConnection remoteAddress + * @param underlyingConnection, encrypted connection to send and receive messages + */ + case class HandledConnection[F[_], K, M]( + key: K, + serverAddress: InetSocketAddress, + underlyingConnection: EncryptedConnection[F, K, M] + ) { + def sendMessage(m: M): F[Unit] = { + underlyingConnection.sendMessage(m) + } + + def close: F[Unit] = { + underlyingConnection.close + } + + def incomingMessage: F[Option[Either[ConnectionError, M]]] = { + underlyingConnection.incomingMessage + } + } + + object HandledConnection { + def outgoing[F[_], K, M]( + encryptedConnection: EncryptedConnection[F, K, M] + ): HandledConnection[F, K, M] = { + HandledConnection( + encryptedConnection.remotePeerInfo._1, + encryptedConnection.remotePeerInfo._2, + encryptedConnection + ) + } + + def incoming[F[_], K, M]( + serverAddress: InetSocketAddress, + encryptedConnection: EncryptedConnection[F, K, M] + ): HandledConnection[F, K, M] = { + HandledConnection( + encryptedConnection.remotePeerInfo._1, + serverAddress, + encryptedConnection + ) + } + + } + + private def buildHandler[F[_]: Concurrent: ContextShift, K, M]( + connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] + ): F[ConnectionHandler[F, K, M]] = { + for { + cancelToken <- Deferred.tryable[F, Unit] + acquiredConnections <- ConnectionsRegister.empty[F, K, M] + messageQueue <- ConcurrentQueue.unbounded[F, MessageReceived[K, M]]() + connectionQueue <- ConcurrentQueue + .unbounded[F, HandledConnection[F, K, M]]() + } yield new ConnectionHandler[F, K, M]( + connectionQueue, + acquiredConnections, + messageQueue, + cancelToken, + connectionFinishCallback + ) + } + + /** Starts connection handler, and polling form connections + * + * @param connectionFinishCallback, callback to be called when connection is finished and get deregistred + */ + def apply[F[_]: Concurrent: ContextShift, K, M]( + connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] + ): Resource[F, ConnectionHandler[F, K, M]] = { + Resource + .make(buildHandler(connectionFinishCallback)) { handler => + handler.shutdown + } + .flatMap { handler => + for { + _ <- handler.handleConnections.background + } yield handler + } + } + +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/ConnectionsRegister.scala b/metronome/networking/src/io/iohk/metronome/networking/ConnectionsRegister.scala new file mode 100644 index 00000000..9c93414e --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/ConnectionsRegister.scala @@ -0,0 +1,53 @@ +package io.iohk.metronome.networking + +import cats.effect.Concurrent +import cats.effect.concurrent.Ref +import io.iohk.metronome.networking.ConnectionHandler.HandledConnection +import cats.implicits._ + +class ConnectionsRegister[F[_]: Concurrent, K, M]( + registerRef: Ref[F, Map[K, HandledConnection[F, K, M]]] +) { + + def registerIfAbsent( + connection: HandledConnection[F, K, M] + ): F[Option[HandledConnection[F, K, M]]] = { + registerRef.modify { register => + val connectionKey = connection.key + + if (register.contains(connectionKey)) { + (register, register.get(connectionKey)) + } else { + (register.updated(connectionKey, connection), None) + } + } + } + + def isNewConnection(connectionKey: K): F[Boolean] = { + registerRef.get.map(register => !register.contains(connectionKey)) + } + + def deregisterConnection( + connection: HandledConnection[F, K, M] + ): F[Unit] = { + registerRef.update(register => register - (connection.key)) + } + + def getAllRegisteredConnections: F[Set[HandledConnection[F, K, M]]] = { + registerRef.get.map(register => register.values.toSet) + } + + def getConnection( + connectionKey: K + ): F[Option[HandledConnection[F, K, M]]] = + registerRef.get.map(register => register.get(connectionKey)) + +} + +object ConnectionsRegister { + def empty[F[_]: Concurrent, K, M]: F[ConnectionsRegister[F, K, M]] = { + Ref + .of(Map.empty[K, HandledConnection[F, K, M]]) + .map(ref => new ConnectionsRegister[F, K, M](ref)) + } +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/EncryptedConnectionProvider.scala b/metronome/networking/src/io/iohk/metronome/networking/EncryptedConnectionProvider.scala new file mode 100644 index 00000000..3e0adda3 --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/EncryptedConnectionProvider.scala @@ -0,0 +1,37 @@ +package io.iohk.metronome.networking + +import io.iohk.metronome.networking.EncryptedConnectionProvider.{ + ConnectionError, + HandshakeFailed +} + +import java.net.InetSocketAddress + +trait EncryptedConnection[F[_], K, M] { + def remotePeerInfo: (K, InetSocketAddress) + def sendMessage(m: M): F[Unit] + def incomingMessage: F[Option[Either[ConnectionError, M]]] + def close: F[Unit] +} + +trait EncryptedConnectionProvider[F[_], K, M] { + def localPeerInfo: (K, InetSocketAddress) + def connectTo( + k: K, + address: InetSocketAddress + ): F[EncryptedConnection[F, K, M]] + def incomingConnection + : F[Option[Either[HandshakeFailed, EncryptedConnection[F, K, M]]]] +} + +object EncryptedConnectionProvider { + case class HandshakeFailed(ex: Throwable, remoteAddress: InetSocketAddress) + + sealed trait ConnectionError + case object DecodingError extends ConnectionError + case class UnexpectedError(ex: Throwable) extends ConnectionError + + case class ConnectionAlreadyClosed(address: InetSocketAddress) + extends RuntimeException + +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala new file mode 100644 index 00000000..e4119adf --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala @@ -0,0 +1,355 @@ +package io.iohk.metronome.networking + +import cats.effect.concurrent.Deferred +import cats.effect.implicits._ +import cats.effect.{Concurrent, ContextShift, Resource, Sync, Timer} +import cats.implicits._ +import io.iohk.metronome.networking.ConnectionHandler.{ + HandledConnection, + MessageReceived +} +import io.iohk.metronome.networking.RemoteConnectionManager.RetryConfig.RandomJitterConfig +import monix.catnap.ConcurrentQueue +import monix.eval.{TaskLift, TaskLike} +import monix.reactive.Observable +import monix.tail.Iterant +import scodec.Codec + +import java.net.InetSocketAddress +import java.util.concurrent.{ThreadLocalRandom, TimeUnit} +import scala.concurrent.duration.FiniteDuration + +class RemoteConnectionManager[F[_]: Sync, K, M: Codec]( + connectionHandler: ConnectionHandler[F, K, M], + localInfo: (K, InetSocketAddress) +) { + + def getLocalPeerInfo: (K, InetSocketAddress) = localInfo + + def getAcquiredConnections: F[Set[K]] = { + connectionHandler.getAllActiveConnections + } + + def incomingMessages: Iterant[F, MessageReceived[K, M]] = + connectionHandler.incomingMessages + + def sendMessage( + recipient: K, + message: M + ): F[Either[ConnectionHandler.ConnectionAlreadyClosedException[K], Unit]] = { + connectionHandler.sendMessage(recipient, message) + } +} +//TODO add logging +object RemoteConnectionManager { + case class ConnectionSuccess[F[_], K, M]( + encryptedConnection: EncryptedConnection[F, K, M] + ) + + case class ConnectionFailure[K]( + connectionRequest: OutGoingConnectionRequest[K], + err: Throwable + ) + + private def connectTo[ + F[_]: Sync, + K: Codec, + M: Codec + ]( + encryptedConnectionProvider: EncryptedConnectionProvider[F, K, M], + connectionRequest: OutGoingConnectionRequest[K] + ): F[Either[ConnectionFailure[K], ConnectionSuccess[F, K, M]]] = { + encryptedConnectionProvider + .connectTo(connectionRequest.key, connectionRequest.address) + .redeemWith( + e => Sync[F].pure(Left(ConnectionFailure(connectionRequest, e))), + connection => Sync[F].pure(Right(ConnectionSuccess(connection))) + ) + } + + case class RetryConfig( + initialDelay: FiniteDuration, + backOffFactor: Long, + maxDelay: FiniteDuration, + randomJitterConfig: RandomJitterConfig + ) + + object RetryConfig { + sealed abstract case class RandomJitterConfig private ( + fractionOfDelay: Double + ) + + object RandomJitterConfig { + import scala.concurrent.duration._ + + /** Build random jitter config + * @param fractionOfTheDelay, in what range in the computed jitter should lay, it should by in range 0..1 + */ + def buildJitterConfig( + fractionOfTheDelay: Double + ): Option[RandomJitterConfig] = { + if (fractionOfTheDelay >= 0 && fractionOfTheDelay <= 1) { + Some(new RandomJitterConfig(fractionOfTheDelay) {}) + } else { + None + } + } + + /** computes new duration with additional random jitter added. Works with millisecond precision i.e if provided duration + * will be less than 1 millisecond then no jitter will be added + * @param config,jitter config + * @param delay, duration to randomize it should positive number otherwise no randomization will happen + */ + def randomizeWithJitter( + config: RandomJitterConfig, + delay: FiniteDuration + ): FiniteDuration = { + val fractionDuration = + (delay.max(0.milliseconds) * config.fractionOfDelay).toMillis + if (fractionDuration == 0) { + delay + } else { + val randomized = ThreadLocalRandom + .current() + .nextLong(-fractionDuration, fractionDuration) + val randomFactor = FiniteDuration(randomized, TimeUnit.MILLISECONDS) + delay + randomFactor + } + } + + /** Default jitter config which will keep random jitter in +/-20% range + */ + val defaultConfig: RandomJitterConfig = buildJitterConfig(0.2).get + } + + import scala.concurrent.duration._ + def default: RetryConfig = { + RetryConfig( + 500.milliseconds, + 2, + 30.seconds, + RandomJitterConfig.defaultConfig + ) + } + + } + + private def retryConnection[F[_]: Timer: Concurrent, K]( + config: RetryConfig, + failedConnectionRequest: OutGoingConnectionRequest[K] + ): F[OutGoingConnectionRequest[K]] = { + val updatedFailureCount = + failedConnectionRequest.numberOfFailures + 1 + val exponentialBackoff = + math.pow(config.backOffFactor.toDouble, updatedFailureCount).toLong + + val newDelay = + ((config.initialDelay * exponentialBackoff).min(config.maxDelay)) + + val newDelayWithJitter = RandomJitterConfig.randomizeWithJitter( + config.randomJitterConfig, + newDelay + ) + + Timer[F] + .sleep(newDelayWithJitter) + .as(failedConnectionRequest.copy(numberOfFailures = updatedFailureCount)) + + } + + /** Connections are acquired in linear fashion i.e there can be at most one concurrent call to remote peer. + * In case of failure each connection will be retried infinite number of times with exponential backoff between + * each call. + */ + private def acquireConnections[ + F[_]: Concurrent: TaskLift: TaskLike: Timer, + K: Codec, + M: Codec + ]( + encryptedConnectionProvider: EncryptedConnectionProvider[F, K, M], + connectionsToAcquire: ConcurrentQueue[F, OutGoingConnectionRequest[K]], + connectionsHandler: ConnectionHandler[F, K, M], + retryConfig: RetryConfig + ): F[Unit] = { + + /** Observable is used here as streaming primitive as it has richer api than Iterant and have mapParallelUnorderedF + * combinator, which makes it possible to have multiple concurrent retry timers, which are cancelled when whole + * outer stream is cancelled + */ + Observable + .repeatEvalF(connectionsToAcquire.poll) + .filterEvalF(request => connectionsHandler.isNewConnection(request.key)) + .mapEvalF { connectionToAcquire => + connectTo(encryptedConnectionProvider, connectionToAcquire) + } + .mapParallelUnorderedF(Integer.MAX_VALUE) { + case Left(failure) => + //TODO add logging of failure + val failureToLog = failure.err + retryConnection(retryConfig, failure.connectionRequest).flatMap( + updatedRequest => connectionsToAcquire.offer(updatedRequest) + ) + case Right(connection) => + val newOutgoingConnections = + HandledConnection.outgoing(connection.encryptedConnection) + connectionsHandler.registerOrClose(newOutgoingConnections) + + } + .completedF + } + + /** Reads incoming connections in linear fashion and check if they are on cluster allowed list. + */ + private def handleServerConnections[F[_]: Concurrent: TaskLift, K, M: Codec]( + pg: EncryptedConnectionProvider[F, K, M], + connectionsHandler: ConnectionHandler[F, K, M], + clusterConfig: ClusterConfig[K] + ): F[Unit] = { + Iterant + .repeatEvalF(pg.incomingConnection) + .takeWhile(_.isDefined) + .map(_.get) + .collect { case Right(value) => + value + } + .mapEval { encryptedConnection => + clusterConfig.getIncomingConnectionServerInfo( + encryptedConnection.remotePeerInfo._1 + ) match { + case Some(incomingConnectionServerAddress) => + val handledConnection = HandledConnection.incoming( + incomingConnectionServerAddress, + encryptedConnection + ) + connectionsHandler.registerOrClose(handledConnection) + + case None => + // unknown connection, just close it + encryptedConnection.close + } + } + .completedL + } + + def withCancelToken[F[_]: Concurrent, A]( + token: Deferred[F, Unit], + ops: F[Option[A]] + ): F[Option[A]] = + Concurrent[F].race(token.get, ops).map { + case Left(()) => None + case Right(x) => x + } + + class HandledConnectionFinisher[F[_]: Concurrent: Timer, K, M]( + connectionsToAcquire: ConcurrentQueue[F, OutGoingConnectionRequest[K]], + retryConfig: RetryConfig + ) { + def finish(handledConnection: HandledConnection[F, K, M]): F[Unit] = { + retryConnection( + retryConfig, + OutGoingConnectionRequest.initial( + handledConnection.key, + handledConnection.serverAddress + ) + ).flatMap(req => connectionsToAcquire.offer(req)) + } + } + + case class OutGoingConnectionRequest[K]( + key: K, + address: InetSocketAddress, + numberOfFailures: Int + ) + + object OutGoingConnectionRequest { + def initial[K]( + key: K, + address: InetSocketAddress + ): OutGoingConnectionRequest[K] = { + OutGoingConnectionRequest(key, address, 0) + } + } + + case class ClusterConfig[K]( + clusterNodes: Set[(K, InetSocketAddress)] + ) { + val clusterNodesKeys = clusterNodes.map(_._1) + + val serverAddresses = clusterNodes.toMap + + def isAllowedIncomingConnection(k: K): Boolean = + clusterNodesKeys.contains(k) + + def getIncomingConnectionServerInfo(k: K): Option[InetSocketAddress] = + serverAddresses.get(k) + + } + + /** Connection manager for static topology cluster. It starts 3 concurrent backgrounds processes: + * 1. Calling process - tries to connect to remote nodes specified in cluster config. In case of failure, retries with + * exponential backoff. + * 2. Server process - reads incoming connections from server socket. Validates that incoming connections is from known + * remote peer specified in cluster config. + * 3. Message reading process - receives connections from both, Calling and Server processes, and for each connections + * start concurrent process reading messages from those connections. In case of some error on connections, it closes + * connection. In case of discovering that one of outgoing connections failed, it request Calling process to establish + * connection once again. + * + * @param encryptedConnectionsProvider component which makes it possible to receive and acquire encrypted connections + * @param clusterConfig static cluster topology configuration + * @param retryConfig retry configuration for outgoing connections (incoming connections are not retried) + */ + def apply[ + F[_]: Concurrent: TaskLift: TaskLike: Timer, + K: Codec, + M: Codec + ]( + encryptedConnectionsProvider: EncryptedConnectionProvider[F, K, M], + clusterConfig: ClusterConfig[K], + retryConfig: RetryConfig + )(implicit + cs: ContextShift[F] + ): Resource[F, RemoteConnectionManager[F, K, M]] = { + for { + connectionsToAcquireQueue <- Resource.liftF( + ConcurrentQueue.unbounded[F, OutGoingConnectionRequest[K]]() + ) + _ <- Resource.liftF( + connectionsToAcquireQueue.offerMany( + clusterConfig.clusterNodes.collect { + case toConnect + if toConnect != encryptedConnectionsProvider.localPeerInfo => + OutGoingConnectionRequest.initial(toConnect._1, toConnect._2) + } + ) + ) + + handledConnectionFinisher = new HandledConnectionFinisher[F, K, M]( + connectionsToAcquireQueue, + retryConfig + ) + + connectionsHandler <- ConnectionHandler.apply( + // when each connection will finished it the callback will be called, and connection will be put to connections to acquire + // queue + handledConnectionFinisher.finish + ) + + _ <- acquireConnections( + encryptedConnectionsProvider, + connectionsToAcquireQueue, + connectionsHandler, + retryConfig + ).background + _ <- handleServerConnections( + encryptedConnectionsProvider, + connectionsHandler, + clusterConfig + ).background + } yield new RemoteConnectionManager[F, K, M]( + connectionsHandler, + encryptedConnectionsProvider.localPeerInfo + ) + + } +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala b/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala new file mode 100644 index 00000000..02003afc --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala @@ -0,0 +1,178 @@ +package io.iohk.metronome.networking + +import cats.effect.{Resource, Sync} +import io.iohk.metronome.networking.EncryptedConnectionProvider.{ + ConnectionAlreadyClosed, + ConnectionError, + DecodingError, + HandshakeFailed, + UnexpectedError +} +import io.iohk.scalanet.peergroup.PeerGroup.{ + ChannelBrokenException, + ServerEvent +} +import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.{ + Config, + FramingConfig, + PeerInfo +} +import io.iohk.scalanet.peergroup.dynamictls.{DynamicTLSPeerGroup, Secp256k1} +import io.iohk.scalanet.peergroup.{Channel, InetMultiAddress} +import monix.eval.{Task, TaskLift} +import monix.execution.Scheduler +import org.bouncycastle.crypto.AsymmetricCipherKeyPair +import scodec.Codec + +import java.net.InetSocketAddress +import java.security.SecureRandom + +object ScalanetConnectionProvider { + private class ScalanetEncryptedConnection[F[_]: TaskLift, K: Codec, M: Codec]( + underlyingChannel: Channel[PeerInfo, M], + underlyingChannelRelease: F[Unit], + channelKey: K + ) extends EncryptedConnection[F, K, M] { + + override def close: F[Unit] = underlyingChannelRelease + + override val remotePeerInfo: (K, InetSocketAddress) = ( + channelKey, + underlyingChannel.to.address.inetSocketAddress + ) + + override def sendMessage(m: M): F[Unit] = { + TaskLift[F].apply(underlyingChannel.sendMessage(m).onErrorRecoverWith { + case _: ChannelBrokenException[_] => + Task.raiseError( + ConnectionAlreadyClosed( + underlyingChannel.to.address.inetSocketAddress + ) + ) + }) + } + + override def incomingMessage: F[Option[Either[ConnectionError, M]]] = { + TaskLift[F].apply(underlyingChannel.nextChannelEvent.map { + case Some(event) => + event match { + case Channel.MessageReceived(m) => Some(Right(m)) + case Channel.UnexpectedError(e) => Some(Left(UnexpectedError(e))) + case Channel.DecodingError => Some(Left(DecodingError)) + } + case None => None + }) + } + } + + private object ScalanetEncryptedConnection { + def apply[F[_]: TaskLift, K: Codec, M: Codec]( + channel: Channel[PeerInfo, M], + channelRelease: Task[Unit] + ): Task[EncryptedConnection[F, K, M]] = { + + Task + .fromTry(Codec[K].decodeValue(channel.to.id).toTry) + .map { key => + new ScalanetEncryptedConnection[F, K, M]( + channel, + TaskLift[F].apply(channelRelease), + key + ) + } + .onErrorHandleWith { e => + channelRelease.flatMap(_ => Task.raiseError(e)) + } + + } + + } + + // Codec constraint for K is necessary as scalanet require peer key to be in BitVector format + def scalanetProvider[F[_]: Sync: TaskLift, K: Codec, M: Codec]( + bindAddress: InetSocketAddress, + nodeKeyPair: AsymmetricCipherKeyPair, + secureRandom: SecureRandom, + useNativeTlsImplementation: Boolean, + framingConfig: FramingConfig, + maxIncomingQueueSizePerPeer: Int + )(implicit + sch: Scheduler + ): Resource[F, EncryptedConnectionProvider[F, K, M]] = { + for { + config <- Resource.liftF[F, Config]( + Sync[F].fromTry( + DynamicTLSPeerGroup + .Config( + bindAddress, + Secp256k1, + nodeKeyPair, + secureRandom, + useNativeTlsImplementation, + framingConfig, + maxIncomingQueueSizePerPeer, + None + ) + ) + ) + pg <- DynamicTLSPeerGroup[M](config).mapK(TaskLift.apply) + local <- Resource.pure( + ( + Codec[K].decodeValue(pg.processAddress.id).require, + pg.processAddress.address.inetSocketAddress + ) + ) + + } yield new EncryptedConnectionProvider[F, K, M] { + override def localPeerInfo: (K, InetSocketAddress) = local + + import cats.implicits._ + + /** Connects to remote node, creating new connection with each call + * + * @param k, key of the remote node + * @param address, address of the remote node + */ + override def connectTo( + k: K, + address: InetSocketAddress + ): F[EncryptedConnection[F, K, M]] = { + val encodedKey = Codec[K].encode(k).require + pg.client(PeerInfo(encodedKey, InetMultiAddress(address))) + .mapK[Task, F](TaskLift[F]) + .allocated + .map { case (channel, release) => + new ScalanetEncryptedConnection(channel, release, k) + } + } + + override def incomingConnection + : F[Option[Either[HandshakeFailed, EncryptedConnection[F, K, M]]]] = { + TaskLift[F].apply(pg.nextServerEvent.flatMap { + case Some(ev) => + ev match { + case ServerEvent.ChannelCreated(channel, release) => + ScalanetEncryptedConnection[F, K, M](channel, release).map { + connection => + Some(Right(connection)) + } + + case ServerEvent.HandshakeFailed(failure) => + Task.now( + Some( + Left( + HandshakeFailed( + failure, + failure.to.address.inetSocketAddress + ) + ) + ) + ) + + } + case None => Task.now(None) + }) + } + } + } +} diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala new file mode 100644 index 00000000..e2bee90b --- /dev/null +++ b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala @@ -0,0 +1,283 @@ +package io.iohk.metronome.networking + +import monix.execution.Scheduler +import org.scalatest.flatspec.AsyncFlatSpecLike +import org.scalatest.matchers.should.Matchers + +import scala.concurrent.duration._ +import RemoteConnectionManagerTestUtils._ +import cats.effect.Resource +import cats.effect.concurrent.Deferred +import io.iohk.metronome.networking.ConnectionHandler.{ + ConnectionAlreadyClosedException, + HandledConnection +} +import io.iohk.metronome.networking.ConnectionHandlerSpec.{ + buildHandlerResource, + buildNConnections, + newHandledConnection +} +import io.iohk.metronome.networking.MockEncryptedConnectionProvider.MockEncryptedConnection +import monix.eval.Task +import ConnectionHandlerSpec._ +import io.iohk.metronome.networking.EncryptedConnectionProvider.DecodingError +import io.iohk.metronome.networking.RemoteConnectionManagerWithMockProviderSpec.fakeLocalAddress + +import java.net.InetSocketAddress + +class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { + implicit val testScheduler = + Scheduler.fixedPool("ConnectionHandlerSpec", 16) + implicit val timeOut = 5.seconds + + behavior of "ConnectionHandler" + + it should "register new connections" in customTestCaseResourceT( + buildHandlerResource() + ) { handler => + for { + handledConnection1 <- newHandledConnection() + _ <- handler.registerOrClose(handledConnection1._1) + connections <- handler.getAllActiveConnections + } yield { + assert(connections.contains(handledConnection1._1.key)) + } + } + + it should "send message to registered connection" in customTestCaseResourceT( + buildHandlerResource() + ) { handler => + for { + handledConnection1 <- newHandledConnection() + _ <- handler.registerOrClose(handledConnection1._1) + connections <- handler.getAllActiveConnections + sendResult <- handler.sendMessage(handledConnection1._1.key, MessageA(1)) + } yield { + assert(connections.contains(handledConnection1._1.key)) + assert(sendResult.isRight) + } + } + + it should "fail to send message to un-registered connection" in customTestCaseResourceT( + buildHandlerResource() + ) { handler => + for { + handledConnection1 <- newHandledConnection() + connections <- handler.getAllActiveConnections + sendResult <- handler.sendMessage(handledConnection1._1.key, MessageA(1)) + } yield { + assert(connections.isEmpty) + assert(sendResult.isLeft) + assert( + sendResult.left.getOrElse(null) == ConnectionAlreadyClosedException( + handledConnection1._1.key + ) + ) + } + } + + it should "fail to send message silently failed peer" in customTestCaseResourceT( + buildHandlerResource() + ) { handler => + for { + handledConnection1 <- newHandledConnection() + (handled, underLaying) = handledConnection1 + _ <- underLaying.closeRemoteWithoutInfo + _ <- handler.registerOrClose(handledConnection1._1) + connections <- handler.getAllActiveConnections + sendResult <- handler.sendMessage(handledConnection1._1.key, MessageA(1)) + } yield { + assert(connections.contains(handledConnection1._1.key)) + assert(sendResult.isLeft) + assert( + sendResult.left.getOrElse(null) == ConnectionAlreadyClosedException( + handledConnection1._1.key + ) + ) + } + } + + it should "not register and close duplicated connection" in customTestCaseResourceT( + buildHandlerResource() + ) { handler => + for { + handledConnection <- newHandledConnection() + duplicatedConnection <- newHandledConnection(remotePeerInfo = + (handledConnection._1.key, handledConnection._1.serverAddress) + ) + (handled, underlyingEncrypted) = handledConnection + _ <- handler.registerOrClose(handled) + connections <- handler.getAllActiveConnections + _ <- handler.registerOrClose(duplicatedConnection._1) + connectionsAfterDuplication <- handler.getAllActiveConnections + closedAfterDuplication <- duplicatedConnection._2.isClosed + } yield { + assert(connections.contains(handled.key)) + assert(connectionsAfterDuplication.contains(handled.key)) + assert(closedAfterDuplication) + + } + } + + it should "close all connections in background when released" in customTestCaseT { + val expectedNumberOfConnections = 4 + for { + handlerAndRelease <- buildHandlerResource().allocated + (handler, release) = handlerAndRelease + connections <- buildNConnections(expectedNumberOfConnections) + _ <- Task.traverse(connections)(connection => + handler.registerOrClose(connection._1) + ) + maxNumberOfActiveConnections <- handler.numberOfActiveConnections + .waitFor(numOfConnections => + numOfConnections == expectedNumberOfConnections + ) + + _ <- release + connectionsAfterClose <- handler.getAllActiveConnections.waitFor( + connections => connections.isEmpty + ) + } yield { + assert(maxNumberOfActiveConnections == expectedNumberOfConnections) + assert(connectionsAfterClose.isEmpty) + } + } + + it should "call provided callback when connection is closed" in customTestCaseT { + for { + cb <- Deferred.tryable[Task, Unit] + handlerAndRelease <- buildHandlerResource(_ => cb.complete(())).allocated + (handler, release) = handlerAndRelease + connection <- newHandledConnection() + (handledConnection, underlyingEncrypted) = connection + _ <- handler.registerOrClose(handledConnection) + numberOfActive <- handler.numberOfActiveConnections.waitFor(_ == 1) + _ <- underlyingEncrypted.pushRemoteEvent(None) + numberOfActiveAfterDisconnect <- handler.numberOfActiveConnections + .waitFor(_ == 0) + callbackCompleted <- cb.tryGet.waitFor(_.isDefined) + _ <- release + } yield { + assert(numberOfActive == 1) + assert(numberOfActiveAfterDisconnect == 0) + assert(callbackCompleted.isDefined) + } + } + + it should "call provided callback and close connection in case of error" in customTestCaseT { + for { + cb <- Deferred.tryable[Task, Unit] + handlerAndRelease <- buildHandlerResource(_ => cb.complete(())).allocated + (handler, release) = handlerAndRelease + connection <- newHandledConnection() + (handledConnection, underlyingEncrypted) = connection + _ <- handler.registerOrClose(handledConnection) + numberOfActive <- handler.numberOfActiveConnections.waitFor(_ == 1) + _ <- underlyingEncrypted.pushRemoteEvent(Some(Left(DecodingError))) + numberOfActiveAfterError <- handler.numberOfActiveConnections + .waitFor(_ == 0) + callbackCompleted <- cb.tryGet.waitFor(_.isDefined) + _ <- release + } yield { + assert(numberOfActive == 1) + assert(numberOfActiveAfterError == 0) + assert(callbackCompleted.isDefined) + } + } + + it should "try not to call callback in case of closing manager" in customTestCaseT { + for { + cb <- Deferred.tryable[Task, Unit] + handlerAndRelease <- buildHandlerResource(_ => cb.complete(())).allocated + (handler, release) = handlerAndRelease + connection <- newHandledConnection() + (handledConnection, underlyingEncrypted) = connection + _ <- handler.registerOrClose(handledConnection) + numberOfActive <- handler.numberOfActiveConnections.waitFor(_ == 1) + _ <- release + numberOfActiveAfterDisconnect <- handler.numberOfActiveConnections + .waitFor(_ == 0) + callbackCompleted <- cb.tryGet.waitFor(_.isDefined).attempt + } yield { + assert(numberOfActive == 1) + assert(numberOfActiveAfterDisconnect == 0) + assert(callbackCompleted.isLeft) + } + } + + it should "multiplex messages from all open channels" in customTestCaseResourceT( + buildHandlerResource() + ) { handler => + val expectedNumberOfConnections = 4 + for { + connections <- buildNConnections(expectedNumberOfConnections) + _ <- Task.traverse(connections)(connection => + handler.registerOrClose(connection._1) + ) + maxNumberOfActiveConnections <- handler.numberOfActiveConnections + .waitFor(numOfConnections => + numOfConnections == expectedNumberOfConnections + ) + _ <- Task.traverse(connections) { case (_, encConnection) => + encConnection.pushRemoteEvent(Some(Right(MessageA(1)))) + } + receivedMessages <- handler.incomingMessages + .take(expectedNumberOfConnections) + .toListL + } yield { + + val senders = connections.map(_._1.key).toSet + val receivedFrom = receivedMessages.map(_.from).toSet + assert(receivedMessages.size == expectedNumberOfConnections) + assert(maxNumberOfActiveConnections == expectedNumberOfConnections) + assert( + senders.intersect(receivedFrom).size == expectedNumberOfConnections + ) + } + } + +} + +object ConnectionHandlerSpec { + implicit class TaskOps[A](task: Task[A]) { + def waitFor(condition: A => Boolean)(implicit timeOut: FiniteDuration) = { + task.restartUntil(condition).timeout(timeOut) + } + } + + def buildHandlerResource( + cb: HandledConnection[Task, Secp256k1Key, TestMessage] => Task[Unit] = + _ => Task(()) + ): Resource[Task, ConnectionHandler[Task, Secp256k1Key, TestMessage]] = { + ConnectionHandler + .apply[Task, Secp256k1Key, TestMessage](cb) + } + + def newHandledConnection( + remotePeerInfo: (Secp256k1Key, InetSocketAddress) = + (Secp256k1Key.getFakeRandomKey, fakeLocalAddress) + )(implicit + s: Scheduler + ): Task[ + ( + HandledConnection[Task, Secp256k1Key, TestMessage], + MockEncryptedConnection + ) + ] = { + for { + enc <- MockEncryptedConnection(remotePeerInfo) + } yield (HandledConnection.outgoing(enc), enc) + } + + def buildNConnections(n: Int)(implicit + s: Scheduler + ): Task[List[ + ( + HandledConnection[Task, Secp256k1Key, TestMessage], + MockEncryptedConnection + ) + ]] = { + Task.traverse((0 until n).toList)(_ => newHandledConnection()) + } + +} diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala new file mode 100644 index 00000000..71463239 --- /dev/null +++ b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala @@ -0,0 +1,264 @@ +package io.iohk.metronome.networking + +import cats.effect.concurrent.{Deferred, Ref, TryableDeferred} +import cats.implicits.toFlatMapOps +import io.iohk.metronome.networking.EncryptedConnectionProvider.ConnectionAlreadyClosed +import io.iohk.metronome.networking.MockEncryptedConnectionProvider._ +import io.iohk.metronome.networking.RemoteConnectionManagerTestUtils.{ + Secp256k1Key, + TestMessage +} +import io.iohk.metronome.networking.RemoteConnectionManagerWithMockProviderSpec.fakeLocalAddress +import monix.catnap.ConcurrentQueue +import monix.eval.Task + +import java.net.InetSocketAddress + +class MockEncryptedConnectionProvider( + private val incomingConnections: ConcurrentQueue[Task, IncomingServerEvent], + private val onlineConnections: Ref[ + Task, + Map[Secp256k1Key, MockEncryptedConnection] + ], + private val connectionStatistics: ConnectionStatisticsHolder, + val localPeerInfo: (Secp256k1Key, InetSocketAddress) = + (Secp256k1Key.getFakeRandomKey, fakeLocalAddress) +) extends EncryptedConnectionProvider[Task, Secp256k1Key, TestMessage] { + + private def connect(k: Secp256k1Key) = { + onlineConnections.get.flatMap { state => + state.get(k) match { + case Some(value) => Task.now(value) + case None => + Task.raiseError(new RuntimeException("Failed connections")) + } + } + } + + override def connectTo( + k: Secp256k1Key, + address: InetSocketAddress + ): Task[MockEncryptedConnection] = { + (for { + _ <- connectionStatistics.incrementInFlight(k) + connection <- connect(k) + } yield connection).doOnFinish(_ => connectionStatistics.decrementInFlight) + } + + override def incomingConnection: Task[IncomingServerEvent] = + incomingConnections.poll +} + +object MockEncryptedConnectionProvider { + def apply(): Task[MockEncryptedConnectionProvider] = { + for { + queue <- ConcurrentQueue.unbounded[Task, IncomingServerEvent]() + connections <- Ref.of[Task, Map[Secp256k1Key, MockEncryptedConnection]]( + Map.empty + ) + connectionsStatistics <- Ref.of[Task, ConnectionStatistics]( + ConnectionStatistics(0, 0, Map.empty) + ) + } yield new MockEncryptedConnectionProvider( + queue, + connections, + new ConnectionStatisticsHolder(connectionsStatistics) + ) + } + + implicit class MockEncryptedConnectionProviderTestMethodsOps( + provider: MockEncryptedConnectionProvider + ) { + + private def disconnect( + withFailure: Boolean, + chosenPeer: Option[Secp256k1Key] = None + ): Task[MockEncryptedConnection] = { + provider.onlineConnections + .modify { current => + chosenPeer.fold { + val peer = current.head + (current - peer._1, peer._2) + } { keyToFail => + val peer = current(keyToFail) + (current - keyToFail, peer) + } + } + .flatTap { connection => + if (withFailure) { + connection.closeRemoteWithoutInfo + } else { + connection.close + } + } + } + + def randomPeerDisconnect(): Task[MockEncryptedConnection] = { + disconnect(withFailure = false) + } + + def specificPeerDisconnect( + key: Secp256k1Key + ): Task[MockEncryptedConnection] = { + disconnect(withFailure = false, Some(key)) + } + + def failRandomPeer(): Task[MockEncryptedConnection] = { + disconnect(withFailure = true) + } + + def registerOnlinePeer(key: Secp256k1Key): Task[MockEncryptedConnection] = { + for { + connection <- MockEncryptedConnection((key, fakeLocalAddress)) + _ <- provider.onlineConnections.update { connections => + connections.updated( + key, + connection + ) + } + } yield connection + } + + def getAllRegisteredPeers: Task[Set[MockEncryptedConnection]] = { + provider.onlineConnections.get.map(connections => + connections.values.toSet + ) + } + + def newIncomingPeer(key: Secp256k1Key): Task[MockEncryptedConnection] = { + registerOnlinePeer(key).flatMap { connection => + provider.incomingConnections + .offer(Some(Right(connection))) + .map(_ => connection) + } + } + + def getReceivedMessagesPerPeer + : Task[Set[(Secp256k1Key, List[TestMessage])]] = { + provider.onlineConnections.get.flatMap { connections => + Task.traverse(connections.toSet) { case (key, connection) => + connection.getReceivedMessages.map(received => (key, received)) + } + } + } + + def getStatistics: Task[ConnectionStatistics] = + provider.connectionStatistics.stats.get + + } + + case class ConnectionStatistics( + inFlightConnections: Long, + maxInFlightConnections: Long, + connectionCounts: Map[Secp256k1Key, Long] + ) + + class ConnectionStatisticsHolder(val stats: Ref[Task, ConnectionStatistics]) { + def incrementInFlight(connectionTo: Secp256k1Key): Task[Unit] = { + stats.update { current => + val newInFlight = current.inFlightConnections + 1 + val newMax = + if (newInFlight > current.maxInFlightConnections) newInFlight + else current.maxInFlightConnections + + val newPerConnectionStats = + current.connectionCounts.get(connectionTo) match { + case Some(value) => + current.connectionCounts.updated(connectionTo, value + 1L) + case None => current.connectionCounts.updated(connectionTo, 0L) + } + + ConnectionStatistics(newInFlight, newMax, newPerConnectionStats) + } + } + + def decrementInFlight: Task[Unit] = { + stats.update(current => + current.copy(inFlightConnections = current.inFlightConnections - 1) + ) + } + } + + type IncomingServerEvent = Option[Either[ + EncryptedConnectionProvider.HandshakeFailed, + EncryptedConnection[Task, Secp256k1Key, TestMessage] + ]] + + type IncomingConnectionEvent = + Option[Either[EncryptedConnectionProvider.ConnectionError, TestMessage]] + + class MockEncryptedConnection( + private val incomingEvents: ConcurrentQueue[ + Task, + IncomingConnectionEvent + ], + private val closeToken: TryableDeferred[Task, Unit], + private val sentMessages: Ref[Task, List[TestMessage]], + val remotePeerInfo: (Secp256k1Key, InetSocketAddress) = + (Secp256k1Key.getFakeRandomKey, fakeLocalAddress) + ) extends EncryptedConnection[Task, Secp256k1Key, TestMessage] { + + override def close: Task[Unit] = { + Task + .parZip2(incomingEvents.offer(None), closeToken.complete(()).attempt) + .void + } + + override def incomingMessage: Task[IncomingConnectionEvent] = + incomingEvents.poll + + override def sendMessage(m: TestMessage): Task[Unit] = + Task + .race(closeToken.get, sentMessages.update(current => m :: current)) + .flatMap { + case Left(_) => + Task.raiseError(ConnectionAlreadyClosed(remotePeerInfo._2)) + case Right(_) => Task.now(()) + } + } + + object MockEncryptedConnection { + def apply( + remotePeerInfo: (Secp256k1Key, InetSocketAddress) = + (Secp256k1Key.getFakeRandomKey, fakeLocalAddress) + ): Task[MockEncryptedConnection] = { + for { + incomingEvents <- ConcurrentQueue + .unbounded[Task, IncomingConnectionEvent]() + closeToken <- Deferred.tryable[Task, Unit] + sentMessages <- Ref.of[Task, List[TestMessage]](List.empty[TestMessage]) + } yield new MockEncryptedConnection( + incomingEvents, + closeToken, + sentMessages, + remotePeerInfo + ) + } + + implicit class MockEncryptedConnectionTestMethodsOps( + connection: MockEncryptedConnection + ) { + lazy val key = connection.remotePeerInfo._1 + + def pushRemoteEvent( + ev: Option[ + Either[EncryptedConnectionProvider.ConnectionError, TestMessage] + ] + ): Task[Unit] = { + connection.incomingEvents.offer(ev) + } + + def getReceivedMessages: Task[List[TestMessage]] = + connection.sentMessages.get + + // it is possible that in some cases remote peer will be closed without generating final None event in incoming events + // queue + def closeRemoteWithoutInfo: Task[Unit] = + connection.closeToken.complete(()) + + def isClosed: Task[Boolean] = + connection.closeToken.tryGet.map(closed => closed.isDefined) + } + } + +} diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala new file mode 100644 index 00000000..30b005b2 --- /dev/null +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala @@ -0,0 +1,78 @@ +package io.iohk.metronome.networking + +import cats.effect.Resource +import monix.eval.Task +import monix.execution.Scheduler +import org.bouncycastle.crypto.AsymmetricCipherKeyPair +import org.scalatest.Assertion +import scodec.Codec +import scodec.bits.BitVector + +import java.net.{InetSocketAddress, ServerSocket} +import java.security.SecureRandom +import scala.concurrent.Future +import scala.util.Random + +object RemoteConnectionManagerTestUtils { + def customTestCaseResourceT[T]( + fixture: Resource[Task, T] + )(theTest: T => Task[Assertion])(implicit s: Scheduler): Future[Assertion] = { + fixture.use(fix => theTest(fix)).runToFuture + } + + def customTestCaseT[T]( + test: => Task[Assertion] + )(implicit s: Scheduler): Future[Assertion] = { + test.runToFuture + } + + def randomAddress(): InetSocketAddress = { + val s = new ServerSocket(0) + try { + new InetSocketAddress("localhost", s.getLocalPort) + } finally { + s.close() + } + } + + import scodec.codecs._ + + sealed abstract class TestMessage + case class MessageA(i: Int) extends TestMessage + case class MessageB(s: String) extends TestMessage + + object TestMessage { + implicit val messageCodec: Codec[TestMessage] = discriminated[TestMessage] + .by(uint8) + .typecase(1, int32.as[MessageA]) + .typecase(2, utf8.as[MessageB]) + } + + case class Secp256k1Key(key: BitVector) + + object Secp256k1Key { + implicit val codec: Codec[Secp256k1Key] = bits.as[Secp256k1Key] + + def getFakeRandomKey: Secp256k1Key = { + val array = new Array[Byte](64) + Random.nextBytes(array) + Secp256k1Key(BitVector(array)) + } + + } + + case class NodeInfo(keyPair: AsymmetricCipherKeyPair, publicKey: Secp256k1Key) + + object NodeInfo { + def generateRandom(secureRandom: SecureRandom): NodeInfo = { + val keyPair = + metronome.crypto.Secp256k1Utils.generateKeyPair(secureRandom) + NodeInfo( + keyPair, + Secp256k1Key( + metronome.crypto.Secp256k1Utils.keyPairToUncompressed(keyPair) + ) + ) + } + } +} diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala new file mode 100644 index 00000000..7694efa7 --- /dev/null +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala @@ -0,0 +1,353 @@ +package io.iohk.metronome.networking + +import cats.effect.Resource +import io.iohk.metronome.networking.ConnectionHandler.ConnectionAlreadyClosedException +import io.iohk.metronome.networking.EncryptedConnectionProvider.DecodingError +import io.iohk.metronome.networking.MockEncryptedConnectionProvider._ +import io.iohk.metronome.networking.RemoteConnectionManager.RetryConfig.RandomJitterConfig +import io.iohk.metronome.networking.RemoteConnectionManager.{ + ClusterConfig, + RetryConfig +} +import io.iohk.metronome.networking.RemoteConnectionManagerTestUtils._ +import io.iohk.metronome.networking.RemoteConnectionManagerWithMockProviderSpec.{ + RemoteConnectionManagerOps, + buildConnectionsManagerWithMockProvider, + buildTestCaseWithNPeers, + defaultToMake, + fakeLocalAddress, + longRetryConfig +} +import monix.eval.Task +import monix.execution.Scheduler +import org.scalatest.flatspec.AsyncFlatSpecLike +import org.scalatest.matchers.should.Matchers + +import java.net.InetSocketAddress +import scala.concurrent.duration._ + +class RemoteConnectionManagerWithMockProviderSpec + extends AsyncFlatSpecLike + with Matchers { + implicit val testScheduler = + Scheduler.fixedPool("RemoteConnectionManagerUtSpec", 16) + implicit val timeOut = 5.seconds + + behavior of "RemoteConnectionManagerWithMockProvider" + + it should "continue to make connections to unresponsive peer with exponential backoff" in customTestCaseT { + MockEncryptedConnectionProvider().flatMap(provider => + buildConnectionsManagerWithMockProvider(provider) + .use { connectionManager => + for { + _ <- Task.sleep(800.milliseconds) + stats <- provider.getStatistics + acquiredConnections <- connectionManager.getAcquiredConnections + } yield { + assert(stats.maxInFlightConnections == 1) + assert(stats.connectionCounts.get(defaultToMake).contains(3)) + assert(acquiredConnections.isEmpty) + } + } + ) + } + + it should "continue to make connections to unresponsive peers one connection at the time" in customTestCaseT { + val connectionToMake = + (0 to 3).map(_ => (Secp256k1Key.getFakeRandomKey, fakeLocalAddress)).toSet + MockEncryptedConnectionProvider().flatMap(provider => + buildConnectionsManagerWithMockProvider( + provider, + nodesInCluster = connectionToMake + ) + .use { connectionManager => + for { + _ <- Task.sleep(800.milliseconds) + stats <- provider.getStatistics + acquiredConnections <- connectionManager.getAcquiredConnections + } yield { + assert( + connectionToMake.forall(connection => + stats.connectionCounts + .get(connection._1) + .exists(count => count == 2 || count == 3) + ) + ) + assert(stats.maxInFlightConnections == 1) + assert(acquiredConnections.isEmpty) + } + } + ) + } + + it should "connect to online peers" in customTestCaseResourceT( + buildTestCaseWithNPeers(4) + ) { case (provider, manager, _) => + for { + stats <- provider.getStatistics + acquiredConnections <- manager.getAcquiredConnections + } yield { + assert(stats.maxInFlightConnections == 1) + assert(acquiredConnections.size == 4) + } + } + + it should "send messages to online peers" in customTestCaseResourceT( + buildTestCaseWithNPeers(4) + ) { case (provider, manager, _) => + for { + acquiredConnections <- manager.getAcquiredConnections + _ <- manager.getAcquiredConnections.flatMap(keys => + Task.traverse(keys)(key => manager.sendMessage(key, MessageA(2))) + ) + received <- provider.getReceivedMessagesPerPeer.map(_.map(_._2)) + stats <- provider.getStatistics + } yield { + assert(stats.maxInFlightConnections == 1) + assert(acquiredConnections.size == 4) + assert( + received.forall(peerMessages => peerMessages.contains(MessageA(2))) + ) + } + } + + it should "try to reconnect disconnected peer" in customTestCaseResourceT( + buildTestCaseWithNPeers(2) + ) { case (provider, manager, _) => + for { + disconnectedPeer <- provider.randomPeerDisconnect() + _ <- manager.waitForNConnections(1) + notContainDisconnectedPeer <- manager.notContainsConnection( + disconnectedPeer + ) + _ <- provider.registerOnlinePeer(disconnectedPeer.key) + _ <- manager.waitForNConnections(2) + containsAfterReconnect <- manager.containsConnection(disconnectedPeer) + } yield { + assert(notContainDisconnectedPeer) + assert(containsAfterReconnect) + } + } + + it should "try to reconnect to failed peer after failed send" in customTestCaseResourceT( + buildTestCaseWithNPeers(2) + ) { case (provider, manager, _) => + for { + disconnectedPeer <- provider.failRandomPeer() + _ <- Task.sleep(100.milliseconds) + // remote peer failed without any notice, we still have it in our acquired connections + containsFailedPeer <- manager.containsConnection(disconnectedPeer) + sendResult <- manager + .sendMessage(disconnectedPeer.key, MessageA(1)) + .map(result => result.left.getOrElse(null)) + _ <- Task( + assert( + sendResult == ConnectionAlreadyClosedException(disconnectedPeer.key) + ) + ) + notContainsFailedPeerAfterSend <- manager.notContainsConnection( + disconnectedPeer + ) + _ <- provider.registerOnlinePeer(disconnectedPeer.key) + _ <- manager.waitForNConnections(2) + containsFailedAfterReconnect <- manager.containsConnection( + disconnectedPeer + ) + } yield { + assert(containsFailedPeer) + assert(notContainsFailedPeerAfterSend) + assert(containsFailedAfterReconnect) + } + } + + it should "fail sending message to unknown peer" in customTestCaseResourceT( + buildTestCaseWithNPeers(2) + ) { case (provider, manager, _) => + val randomKey = Secp256k1Key.getFakeRandomKey + for { + sendResult <- manager.sendMessage(randomKey, MessageA(1)) + } yield { + assert(sendResult.isLeft) + assert( + sendResult.left.getOrElse(null) == ConnectionAlreadyClosedException( + randomKey + ) + ) + } + } + + it should "deny not allowed incoming connections " in customTestCaseResourceT( + buildTestCaseWithNPeers(2) + ) { case (provider, manager, _) => + for { + incomingPeerConnection <- provider.newIncomingPeer( + Secp256k1Key.getFakeRandomKey + ) + _ <- Task.sleep(100.milliseconds) + notContainsNotAllowedIncoming <- manager.notContainsConnection( + incomingPeerConnection + ) + closedIncoming <- incomingPeerConnection.isClosed + } yield { + assert(notContainsNotAllowedIncoming) + assert(closedIncoming) + } + } + + it should "allow configured incoming connections" in customTestCaseResourceT( + buildTestCaseWithNPeers(2, shouldBeOnline = false, longRetryConfig) + ) { case (provider, manager, clusterPeers) => + for { + initialAcquired <- manager.getAcquiredConnections + incomingConnection <- provider.newIncomingPeer(clusterPeers.head) + _ <- manager.waitForNConnections(1) + containsIncoming <- manager.containsConnection(incomingConnection) + } yield { + assert(initialAcquired.isEmpty) + assert(containsIncoming) + } + } + + it should "not allow duplicated incoming peer" in customTestCaseResourceT( + buildTestCaseWithNPeers(2, shouldBeOnline = false, longRetryConfig) + ) { case (provider, manager, clusterPeers) => + for { + initialAcquired <- manager.getAcquiredConnections + incomingConnection <- provider.newIncomingPeer(clusterPeers.head) + _ <- manager.waitForNConnections(1) + containsIncoming <- manager.containsConnection(incomingConnection) + duplicatedIncoming <- provider.newIncomingPeer(clusterPeers.head) + duplicatedIncomingClosed <- duplicatedIncoming.isClosed + } yield { + assert(initialAcquired.isEmpty) + assert(containsIncoming) + assert(duplicatedIncomingClosed) + } + } + + it should "disconnect from peer on which connection error happened" in customTestCaseResourceT( + buildTestCaseWithNPeers(2) + ) { case (provider, manager, _) => + for { + initialAcquired <- manager.getAcquiredConnections + randomAcquiredConnection <- provider.getAllRegisteredPeers.map(_.head) + _ <- randomAcquiredConnection.pushRemoteEvent(Some(Left(DecodingError))) + _ <- manager.waitForNConnections(1) + errorIsClosed <- randomAcquiredConnection.isClosed + } yield { + assert(initialAcquired.size == 2) + assert(errorIsClosed) + } + } + + it should "receive messages from all connections" in customTestCaseResourceT( + buildTestCaseWithNPeers(2) + ) { case (provider, manager, _) => + for { + acquiredConnections <- manager.getAcquiredConnections + connections <- provider.getAllRegisteredPeers + _ <- Task.traverse(connections)(conn => + conn.pushRemoteEvent(Some(Right(MessageA(1)))) + ) + received <- manager.incomingMessages.take(2).toListL + } yield { + assert(acquiredConnections.size == 2) + assert(received.size == 2) + } + } + +} + +object RemoteConnectionManagerWithMockProviderSpec { + implicit class RemoteConnectionManagerOps( + manager: RemoteConnectionManager[Task, Secp256k1Key, TestMessage] + ) { + def waitForNConnections( + n: Int + )(implicit timeOut: FiniteDuration): Task[Unit] = { + manager.getAcquiredConnections + .restartUntil(connections => connections.size == n) + .timeout(timeOut) + .void + } + + def containsConnection( + connection: MockEncryptedConnection + ): Task[Boolean] = { + manager.getAcquiredConnections.map(connections => + connections.contains(connection.remotePeerInfo._1) + ) + } + + def notContainsConnection( + connection: MockEncryptedConnection + ): Task[Boolean] = { + containsConnection(connection).map(contains => !contains) + } + } + + val noJitterConfig = RandomJitterConfig.buildJitterConfig(0).get + val quickRetryConfig = + RetryConfig(50.milliseconds, 2, 2.seconds, noJitterConfig) + val longRetryConfig: RetryConfig = + RetryConfig(5.seconds, 2, 20.seconds, noJitterConfig) + + def buildTestCaseWithNPeers( + n: Int, + shouldBeOnline: Boolean = true, + retryConfig: RetryConfig = quickRetryConfig + )(implicit timeOut: FiniteDuration): Resource[ + Task, + ( + MockEncryptedConnectionProvider, + RemoteConnectionManager[Task, Secp256k1Key, TestMessage], + Set[Secp256k1Key] + ) + ] = { + val keys = (0 until n).map(_ => (Secp256k1Key.getFakeRandomKey)).toSet + + for { + provider <- Resource.liftF(MockEncryptedConnectionProvider()) + _ <- Resource.liftF { + if (shouldBeOnline) { + Task.traverse(keys)(key => provider.registerOnlinePeer(key)) + } else { + Task.unit + } + } + manager <- buildConnectionsManagerWithMockProvider( + provider, + retryConfig = retryConfig, + nodesInCluster = keys.map(key => (key, fakeLocalAddress)) + ) + _ <- Resource.liftF { + if (shouldBeOnline) { + manager.waitForNConnections(n) + } else { + Task.unit + } + } + } yield (provider, manager, keys) + } + + val fakeLocalAddress = new InetSocketAddress("localhost", 127) + + val defalutAllowed = Secp256k1Key.getFakeRandomKey + val defaultToMake = Secp256k1Key.getFakeRandomKey + + def buildConnectionsManagerWithMockProvider( + ec: MockEncryptedConnectionProvider, + retryConfig: RetryConfig = quickRetryConfig, + nodesInCluster: Set[(Secp256k1Key, InetSocketAddress)] = Set( + (defaultToMake, fakeLocalAddress) + ) + ): Resource[ + Task, + RemoteConnectionManager[Task, Secp256k1Key, TestMessage] + ] = { + val clusterConfig = ClusterConfig(nodesInCluster) + + RemoteConnectionManager(ec, clusterConfig, retryConfig) + } + +} diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala new file mode 100644 index 00000000..81033157 --- /dev/null +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala @@ -0,0 +1,329 @@ +package io.iohk.metronome.networking + +import cats.data.NonEmptyList +import cats.effect.concurrent.Ref +import cats.effect.{Concurrent, ContextShift, Resource, Timer} +import io.iohk.metronome.networking.ConnectionHandler.MessageReceived +import io.iohk.metronome.networking.RemoteConnectionManager.{ + ClusterConfig, + RetryConfig +} +import io.iohk.metronome.networking.RemoteConnectionManagerTestUtils._ +import io.iohk.metronome.networking.RemoteConnectionManagerWithScalanetProviderSpec.{ + Cluster, + buildTestConnectionManager +} +import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.FramingConfig +import monix.eval.{Task, TaskLift, TaskLike} +import monix.execution.Scheduler +import org.bouncycastle.crypto.AsymmetricCipherKeyPair +import org.scalatest.flatspec.AsyncFlatSpecLike +import org.scalatest.matchers.should.Matchers +import scodec.Codec + +import java.net.InetSocketAddress +import java.security.SecureRandom +import scala.concurrent.duration._ + +class RemoteConnectionManagerWithScalanetProviderSpec + extends AsyncFlatSpecLike + with Matchers { + implicit val testScheduler = + Scheduler.fixedPool("RemoteConnectionManagerSpec", 16) + + implicit val timeOut = 10.seconds + + behavior of "RemoteConnectionManagerWithScalanetProvider" + + it should "start connectionManager without any connections" in customTestCaseResourceT( + buildTestConnectionManager[Task, Secp256k1Key, TestMessage]() + ) { connectionManager => + for { + connections <- connectionManager.getAcquiredConnections + } yield assert(connections.isEmpty) + } + + it should "build fully connected cluster of 3 nodes" in customTestCaseResourceT( + Cluster.buildCluster(3) + ) { cluster => + for { + size <- cluster.clusterSize + eachNodeCount <- cluster.getEachNodeConnectionsCount + } yield { + assert(eachNodeCount.forall(count => count == 2)) + assert(size == 3) + } + } + + it should "build fully connected cluster of 4 nodes" in customTestCaseResourceT( + Cluster.buildCluster(4) + ) { cluster => + for { + size <- cluster.clusterSize + eachNodeCount <- cluster.getEachNodeConnectionsCount + } yield { + assert(eachNodeCount.forall(count => count == 3)) + assert(size == 4) + } + } + + it should "send and receive messages with other nodes in cluster" in customTestCaseResourceT( + Cluster.buildCluster(3) + ) { cluster => + for { + eachNodeCount <- cluster.getEachNodeConnectionsCount + sendResult <- cluster.sendMessageFromRandomNodeToAllOthers(MessageA(1)) + (sender, receivers) = sendResult + received <- Task.traverse(receivers.toList)(receiver => + cluster.getMessageFromNode(receiver) + ) + } yield { + assert(eachNodeCount.forall(count => count == 2)) + assert(receivers.size == 2) + assert(received.size == 2) + //every node should have received the same message + assert( + received.forall(receivedMessage => + receivedMessage == MessageReceived(sender, MessageA(1)) + ) + ) + } + } + + it should "eventually reconnect to offline node" in customTestCaseResourceT( + Cluster.buildCluster(3) + ) { cluster => + for { + size <- cluster.clusterSize + killed <- cluster.shutdownRandomNode + _ <- cluster.sendMessageFromRandomNodeToAllOthers(MessageA(1)) + (address, keyPair, clusterConfig) = killed + _ <- cluster.waitUntilEveryNodeHaveNConnections(1) + // be offline for a moment + _ <- Task.sleep(3.seconds) + connectionAfterFailure <- cluster.getEachNodeConnectionsCount + _ <- cluster.startNode(address, keyPair, clusterConfig) + _ <- cluster.waitUntilEveryNodeHaveNConnections(2) + } yield { + assert(size == 3) + assert(connectionAfterFailure.forall(connections => connections == 1)) + } + } +} +object RemoteConnectionManagerWithScalanetProviderSpec { + val secureRandom = new SecureRandom() + val standardFraming = + FramingConfig.buildStandardFrameConfig(1000000, 4).getOrElse(null) + val testIncomingQueueSize = 20 + + def buildTestConnectionManager[ + F[_]: Concurrent: TaskLift: TaskLike: Timer, + K: Codec, + M: Codec + ]( + bindAddress: InetSocketAddress = randomAddress(), + nodeKeyPair: AsymmetricCipherKeyPair = + metronome.crypto.Secp256k1Utils.generateKeyPair(secureRandom), + secureRandom: SecureRandom = secureRandom, + useNativeTlsImplementation: Boolean = false, + framingConfig: FramingConfig = standardFraming, + maxIncomingQueueSizePerPeer: Int = testIncomingQueueSize, + clusterConfig: ClusterConfig[K] = ClusterConfig( + Set.empty[(K, InetSocketAddress)] + ), + retryConfig: RetryConfig = RetryConfig.default + )(implicit + s: Scheduler, + cs: ContextShift[F] + ): Resource[F, RemoteConnectionManager[F, K, M]] = { + ScalanetConnectionProvider + .scalanetProvider[F, K, M]( + bindAddress, + nodeKeyPair, + secureRandom, + useNativeTlsImplementation, + framingConfig, + maxIncomingQueueSizePerPeer + ) + .flatMap(prov => + RemoteConnectionManager(prov, clusterConfig, retryConfig) + ) + } + + type ClusterNodes = Map[ + Secp256k1Key, + ( + RemoteConnectionManager[Task, Secp256k1Key, TestMessage], + AsymmetricCipherKeyPair, + ClusterConfig[Secp256k1Key], + Task[Unit] + ) + ] + + def buildClusterNodes( + keys: NonEmptyList[NodeInfo] + )(implicit + s: Scheduler, + timeOut: FiniteDuration + ): Task[Ref[Task, ClusterNodes]] = { + val keyWithAddress = keys.toList.map(key => (key, randomAddress())).toSet + + for { + nodes <- Ref.of[Task, ClusterNodes](Map.empty) + _ <- Task.traverse(keyWithAddress) { case (info, address) => + val clusterConfig = ClusterConfig(clusterNodes = + keyWithAddress.map(keyWithAddress => + (keyWithAddress._1.publicKey, keyWithAddress._2) + ) + ) + + buildTestConnectionManager[Task, Secp256k1Key, TestMessage]( + bindAddress = address, + nodeKeyPair = info.keyPair, + clusterConfig = clusterConfig + ).allocated.flatMap { case (manager, release) => + nodes.update(map => + map + (manager.getLocalPeerInfo._1 -> (manager, info.keyPair, clusterConfig, release)) + ) + } + } + + } yield nodes + } + + class Cluster(nodes: Ref[Task, ClusterNodes]) { + + private def broadcastToAllConnections( + manager: RemoteConnectionManager[Task, Secp256k1Key, TestMessage], + message: TestMessage + ) = { + manager.getAcquiredConnections.flatMap { connections => + Task + .parTraverseUnordered(connections)(connectionKey => + manager.sendMessage(connectionKey, message) + ) + .map { _ => + connections + } + } + + } + + def clusterSize: Task[Int] = nodes.get.map(_.size) + + def getEachNodeConnectionsCount: Task[List[Int]] = { + for { + runningNodes <- nodes.get.flatMap(nodes => + Task.traverse(nodes.values.map(_._1))(manager => + manager.getAcquiredConnections + ) + ) + + } yield runningNodes.map(_.size).toList + } + + def waitUntilEveryNodeHaveNConnections( + n: Int + )(implicit timeOut: FiniteDuration): Task[List[Int]] = { + getEachNodeConnectionsCount + .restartUntil(counts => + counts.forall(currentNodeConnectionCount => + currentNodeConnectionCount == n + ) + ) + .timeout(timeOut) + } + + def closeAllNodes: Task[Unit] = { + nodes.get.flatMap { nodes => + Task + .parTraverseUnordered(nodes.values) { case (node, _, _, release) => + release + } + .void + } + } + + def sendMessageFromRandomNodeToAllOthers( + message: TestMessage + ): Task[(Secp256k1Key, Set[Secp256k1Key])] = { + for { + runningNodes <- nodes.get + (key, (node, _, _, _)) = runningNodes.head + nodesReceivingMessage <- broadcastToAllConnections(node, message) + } yield (key, nodesReceivingMessage) + } + + def sendMessageFromAllClusterNodesToTheirConnections( + message: TestMessage + ): Task[List[(Secp256k1Key, Set[Secp256k1Key])]] = { + nodes.get.flatMap { current => + Task.parTraverseUnordered(current.values) { case (manager, _, _, _) => + broadcastToAllConnections(manager, message).map { receivers => + (manager.getLocalPeerInfo._1 -> receivers) + } + } + } + } + + def getMessageFromNode(key: Secp256k1Key) = { + nodes.get.flatMap { runningNodes => + runningNodes(key)._1.incomingMessages.take(1).toListL.map(_.head) + } + } + + def shutdownRandomNode: Task[ + (InetSocketAddress, AsymmetricCipherKeyPair, ClusterConfig[Secp256k1Key]) + ] = { + for { + current <- nodes.get + ( + randomNodeKey, + (randomManager, nodeKeyPair, clusterConfig, randomRelease) + ) = current.head + _ <- randomRelease + _ <- nodes.update(current => current - randomNodeKey) + } yield (randomManager.getLocalPeerInfo._2, nodeKeyPair, clusterConfig) + } + + def startNode( + bindAddress: InetSocketAddress, + key: AsymmetricCipherKeyPair, + clusterConfig: ClusterConfig[Secp256k1Key] + )(implicit s: Scheduler): Task[Unit] = { + buildTestConnectionManager[Task, Secp256k1Key, TestMessage]( + bindAddress = bindAddress, + nodeKeyPair = key, + clusterConfig = clusterConfig + ).allocated.flatMap { case (manager, release) => + nodes.update { current => + current + (manager.getLocalPeerInfo._1 -> (manager, key, clusterConfig, release)) + } + } + } + + } + + object Cluster { + def buildCluster(size: Int)(implicit + s: Scheduler, + timeOut: FiniteDuration + ): Resource[Task, Cluster] = { + val nodeInfos = NonEmptyList.fromListUnsafe( + ((0 until size).map(_ => NodeInfo.generateRandom(secureRandom)).toList) + ) + + Resource.make { + for { + nodes <- buildClusterNodes(nodeInfos) + cluster = new Cluster(nodes) + _ <- cluster.getEachNodeConnectionsCount + .restartUntil(counts => counts.forall(count => count == size - 1)) + .timeout(timeOut) + } yield cluster + } { cluster => cluster.closeAllNodes } + } + + } + +} From 057c691422858c08fd6d60f222c57a61b1f0a28b Mon Sep 17 00:00:00 2001 From: NeilBurgess42 Date: Mon, 29 Mar 2021 18:59:18 +1000 Subject: [PATCH 09/48] PM-2773 Update GitHub Doc, cosmetic changes to readme (#10) --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 97e0af39..179c5040 100644 --- a/README.md +++ b/README.md @@ -3,36 +3,36 @@ Metronome is a checkpointing component for Proof-of-Work blockchains, using the [HotStuff BFT](https://arxiv.org/pdf/1803.05069.pdf) algorithm. ## Overview -Checkpoints provides finality to blockchains by attesting to the hash of well-embedded blocks. A proper checkpointing system can secure the blockchain even under adversary with super-majority mining power. +Checkpointing provides finality to blockchains by attesting to the hash of well-embedded blocks. A proper checkpointing system can secure the blockchain even against an adversary with super-majority mining power. -The Metronome checkpointing system consists of a generic BFT Service (preferrably HotStuff), a Checkpoint-assisted Blockchain, and a Checkpointing Intepreter that bridges the two. This structure enables many features, including flexible BFT choices, multi-chain support, plug-and-play forensic monitoring platform via the BFT service, as well as the ability of bridging trust between two different blockchains. +The Metronome checkpointing system consists of a generic BFT Service (preferably HotStuff), a Checkpoint-assisted Blockchain, and a Checkpointing Interpreter that bridges the two. This structure enables many features, including flexible BFT choices, multi-chain support, plug-and-play forensic monitoring platform via the BFT service, and the capability of bridging trust between two different blockchains. ### Architecture -BFT Service: Committee-based BFT service with a simple and generic interface: It takes consensus candidates (e.g., checkpoint candidates) as input and generates certificates for the elected ones. +BFT Service: A committee-based BFT service with a simple and generic interface. It takes consensus candidates (e.g., checkpoint candidates) as input and generates certificates for the elected ones. Checkpoint-assisted Blockchain: Maintains the main blockchain that accepts and applies checkpointing results. The checkpointing logic is delegated to the checkpointing interpreter below. Checkpointing Interpreter: Maintains checkpointing logic, including the creation and validation (via blockchain) of checkpointing candidates, as well as checkpoint-related validation of new blockchain blocks. -Each of these modules can be developed independently with only minor data structure changes required for compatibility. This independence allows us to be flexible with the choice of BFT algorithm (e.g., variants of OBFT or Hotstuff) and checkpointing interpreter (e.g., simple checkpoints or Advocate). +Each of these modules can be developed independently with only minor data structure changes required for compatibility. This independence allows flexibility with the choice of BFT algorithm (e.g., variants of OBFT or Hotstuff) and checkpointing interpreter (e.g., simple checkpoints or Advocate). -The architecture also enables a convenient forensic monitoring module: By simply connecting to the BFT service, the forensics module can download the stream of consensus data and detect illegal behaviors such as collusion and identify the offenders. +The architecture also enables a convenient forensic monitoring module. By simply connecting to the BFT service, the forensics module can download the stream of consensus data and detect illegal behaviors such as collusion, and identify the offenders. -![](docs/architecture.png) +![Architecture diagram](docs/architecture.png) ### BFT Algorithm -The BFT service delegates checkpoint proposal and candidate validation to the Checkpointing Interpreter using 2-way communication to allow asynchronous responses as and when the data becomes available: +The BFT service delegates checkpoint proposal and candidate validation to the Checkpointing Interpreter using 2-way communication to allow asynchronous responses as and when the data becomes available. -![](docs/master-based.png) +![Algorithm diagram](docs/master-based.png) -When a winner is elected a Checkpoint Certificate is compiled, comprising of the checkpointed data (a block identity, or something more complex) as well as a witness for the BFT agreement, which proves that the decision is final and cannot be rolled back. Because of the need for this proof, low latency BFT algorithms such as HotStuff are preferred. +When a winner is elected, a Checkpoint Certificate is compiled, comprising the checkpointed data (a block identity, or something more complex) and a witness for the BFT agreement, which proves that the decision is final and cannot be rolled back. Because of the need for this proof, low latency BFT algorithms such as HotStuff are preferred. ## Build -The project is built using [mill](https://github.com/com-lihaoyi/mill), which works fine with [Metals](https://scalameta.org/metals/docs/build-tools/mill.html). +The project is built using [Mill](https://github.com/com-lihaoyi/mill), which works fine with [Metals](https://scalameta.org/metals/docs/build-tools/mill.html). To compile everything, use the `__` wildcard: @@ -73,6 +73,6 @@ The initial version has been written to the file without newlines: echo -n "0.1.0-SNAPSHOT" > versionFile/version ``` -Builds on `develop` will publish the snapshot version to Sonatype, which can be overwritten if the version number isn't updated. +Builds on `develop` will publish the snapshot version to Sonatype, which can be overwritten if the version number is not updated. -During [publishing](https://com-lihaoyi.github.io/mill/page/common-project-layouts.html#publishing) on `master` we'll use `mill versionFile.setReleaseVersion` to remove the `-SNAPSHOT` postfix and make a release. After that the version number should be bumped on `develop`, e.g. `mill versionFile.setNextVersion --bump minor`. +During [publishing](https://com-lihaoyi.github.io/mill/page/common-project-layouts.html#publishing) on `master` we will use `mill versionFile.setReleaseVersion` to remove the `-SNAPSHOT` postfix and make a release. After that the version number should be bumped on `develop`, e.g. `mill versionFile.setNextVersion --bump minor`. From 345f72e07ffe254967ef1d2d6f7d123c9538d6a2 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 29 Mar 2021 10:50:35 +0100 Subject: [PATCH 10/48] PM-3058: Allow specifying the number of Byzantine nodes. (#12) * PM-3058: Allow specifying the number of Byzantine nodes. * PM-2909: Undo accidental removal of the extension of RocksDBStoreProps.scala --- .../hotstuff/consensus/Federation.scala | 71 +++++++++++++++++-- .../consensus/basic/ProtocolState.scala | 7 +- .../hotstuff/consensus/FederationSpec.scala | 58 +++++++++++++++ .../basic/HotStuffProtocolProps.scala | 24 +++---- ...torePropsscala => RocksDBStoreProps.scala} | 0 5 files changed, 140 insertions(+), 20 deletions(-) create mode 100644 metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala rename metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/{RocksDBStorePropsscala => RocksDBStoreProps.scala} (100%) diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala index bd8bbe09..6e07eddf 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala @@ -1,16 +1,40 @@ package metronome.hotstuff.consensus -/** Collection of keys of the federation members. */ -case class Federation[PKey]( - publicKeys: IndexedSeq[PKey] +/** Collection of keys of the federation members. + * + * There are two inequalities that decide the quorum size `q`: + * + * 1.) Safety inequality: + * There should not be two conflicting quorums. + * If two quorums conflict, their intersection has a size of `2q-n`. + * The intersection represents equivocation and will have a size of + * at most `f` (since honest nodes don't equivocate). + * Thus for safety we need `2q-n > f` => `q > (n+f)/2` + * + * 2.) Liveness inequality: + * Quorum size should be small enough so that adversaries cannot deadlock + * the system by not voting. If the quorum size is greater than `n-f`, + * adversaries may decide to not vote and hence we will not have any quorum certificate. + * Thus, we need `q <= n-f` + * + * So any `q` between `(n+f)/2+1` and `n-f` should work. + * Smaller `q` is preferred as it would improve speed. + * We can set it to `(n+f)/2+1` or fix it to `2/3n+1`. + * + * Extra: The above two inequalities `(n+f)/2 < q <= n-f`, lead to the constraint: `f < n/3`, or `n >= 3*f+1`. + */ +abstract case class Federation[PKey]( + publicKeys: IndexedSeq[PKey], + // Maximum number of Byzantine nodes. + maxFaulty: Int ) { private val publicKeySet = publicKeys.toSet - /** Size of the federation, `n`. */ + /** Size of the federation. */ val size: Int = publicKeys.size - /** Maximum number of Byzantine nodes, `f`, so that `n >= 3*f+1`. */ - val maxFaulty: Int = (size - 1) / 3 + /** Number of signatures required for a Quorum Certificate. */ + val quorumSize: Int = (size + maxFaulty) / 2 + 1 def contains(publicKey: PKey): Boolean = publicKeySet.contains(publicKey) @@ -18,3 +42,38 @@ case class Federation[PKey]( def leaderOf(viewNumber: ViewNumber): PKey = publicKeys((viewNumber % size).toInt) } + +object Federation { + + /** Create a federation with the highest possible fault tolerance. */ + def apply[PKey]( + publicKeys: IndexedSeq[PKey] + ): Either[String, Federation[PKey]] = + apply(publicKeys, maxByzantine(publicKeys.size)) + + /** Create a federation with the fault tolerance possibly reduced from the theoretical + * maximum, which can allow smaller quorum sizes, and improved speed. + * + * Returns an error if the configured value is higher than the theoretically tolerable maximum. + */ + def apply[PKey]( + publicKeys: IndexedSeq[PKey], + maxFaulty: Int + ): Either[String, Federation[PKey]] = { + val f = maxByzantine(publicKeys.size) + if (publicKeys.isEmpty) { + Left("The federation cannot be empty!") + } else if (publicKeys.distinct.size < publicKeys.size) { + Left("The keys in the federation must be unique!") + } else if (maxFaulty > f) { + Left( + s"The maximum tolerable number of Byzantine members is $f, less than the specified $maxFaulty." + ) + } else { + Right(new Federation(publicKeys, maxFaulty) {}) + } + } + + /** Maximum number of Byzantine nodes in a federation of size `n` */ + private def maxByzantine(n: Int): Int = (n - 1) / 3 +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala index 73fb58fa..e437edc2 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala @@ -65,8 +65,11 @@ case class ProtocolState[A <: Agreement: Block: Signing]( val leader = federation.leaderOf(viewNumber) val isLeader = leader == publicKey - /** The leader has to collect `n-f` signatures into a Q.C. */ - def quorumSize = federation.size - federation.maxFaulty + /** The leader has to collect `n-f` signatures into a Q.C. + * + * This value can be lower if we have higher trust in the federation. + */ + def quorumSize = federation.quorumSize /** No state transition. */ private def stay: Transition[A] = diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala new file mode 100644 index 00000000..aa75d63b --- /dev/null +++ b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala @@ -0,0 +1,58 @@ +package metronome.hotstuff.consensus + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatest.Inside +import org.scalatest.prop.TableDrivenPropertyChecks._ + +class FederationSpec extends AnyFlatSpec with Matchers with Inside { + + behavior of "Federation" + + it should "not create an empty federation" in { + Federation(Vector.empty).isLeft shouldBe true + } + + it should "not create a federation with duplicate keys" in { + Federation(Vector(1, 2, 1)).isLeft shouldBe true + } + + it should "not create a federation with too high configured f" in { + Federation(1 to 4, maxFaulty = 2).isLeft shouldBe true + } + + it should "determine the correct f and q based on n" in { + val examples = Table( + ("n", "f", "q"), + (10, 3, 7), + (1, 0, 1), + (3, 0, 2), + (4, 1, 3) + ) + forAll(examples) { case (n, f, q) => + inside(Federation(1 to n)) { case Right(federation) => + federation.maxFaulty shouldBe f + federation.quorumSize shouldBe q + } + } + } + + it should "use lower quorum size if there are less faulties" in { + val examples = Table( + ("n", "f", "q"), + (10, 2, 7), + (10, 1, 6), + (10, 0, 6), + (9, 0, 5), + (100, 0, 51), + (100, 1, 51) + ) + forAll(examples) { case (n, f, q) => + inside(Federation(1 to n, f)) { case Right(federation) => + federation.maxFaulty shouldBe f + federation.quorumSize shouldBe q + } + } + } + +} diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala index 29b7881f..1191e397 100644 --- a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala +++ b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala @@ -121,7 +121,7 @@ object HotStuffProtocolCommands extends Commands { } else { val h = hash(phase, viewNumber, blockHash) - signature.sig.size == federation.size - federation.maxFaulty && + signature.sig.size == federation.quorumSize && signature.sig.forall { sig => federation.publicKeys.exists { publicKey => publicKey == unsign(sig, h) @@ -152,7 +152,7 @@ object HotStuffProtocolCommands extends Commands { def isLeader = viewNumber % n == ownIndex def leader = federation((viewNumber % n).toInt) - def `n - f` = n - f + def quorumSize = (n + f) / 2 + 1 } // Keep a variable state in our System Under Test. @@ -181,7 +181,8 @@ object HotStuffProtocolCommands extends Commands { phase = state.phase, publicKey = state.publicKey, signingKey = state.signingKey, - federation = Federation(state.federation), + federation = Federation(state.federation, state.f) + .getOrElse(sys.error("Invalid federation!")), prepareQC = genesisQC, lockedQC = genesisQC, lastExecutedBlockHash = genesisQC.blockHash, @@ -196,9 +197,8 @@ object HotStuffProtocolCommands extends Commands { override def genInitialState: Gen[State] = for { - // Pick the max Byzantine nodes first, then size the federation based on that. - f <- Gen.choose(0, 3) - n = 3 * f + 1 + n <- Gen.choose(1, 10) + f <- Gen.choose(0, (n - 1) / 3) ownIndex <- Gen.choose(0, n - 1) @@ -482,7 +482,7 @@ object HotStuffProtocolCommands extends Commands { blockHash <- genLazy { state.maybeBlockHash.getOrElse(sys.error("No block for quorum.")) } - pks <- Gen.pick(state.`n - f`, state.federation) + pks <- Gen.pick(state.quorumSize, state.federation) // The replicas is expecting the Q.C. for the previous phase. phase = votingPhaseFor(state.phase).getOrElse( sys.error(s"No voting phase for ${state.phase}") @@ -623,8 +623,8 @@ object HotStuffProtocolCommands extends Commands { "NewView" |: { if ( state.phase == Phase.Prepare && - state.newViewsFrom.size != state.`n - f` && - nextS.newViewsFrom.size == state.`n - f` + state.newViewsFrom.size != state.quorumSize && + nextS.newViewsFrom.size == state.quorumSize ) { result match { case Success(Right((next, effects))) => @@ -637,7 +637,7 @@ object HotStuffProtocolCommands extends Commands { "n-f collected" |: all( s"stays in the phase (${state.phase} -> ${next.phase})" |: next.phase == state.phase, - "records newView" |: next.newViews.size == state.`n - f`, + "records newView" |: next.newViews.size == state.quorumSize, "creates a block and nothing else" |: effects.size == 1 && effects.head.isInstanceOf[Effect.CreateBlock[_]], s"selects the highest QC: $highestView ?= $newViewsMax" |: highestView == newViewsMax @@ -783,8 +783,8 @@ object HotStuffProtocolCommands extends Commands { val nextS = nextState(state) val maybeBroadcast = if ( - state.votesFrom.size < state.`n - f` && - nextS.votesFrom.size == state.`n - f` + state.votesFrom.size < state.quorumSize && + nextS.votesFrom.size == state.quorumSize ) { "n - f collected" |: all( "broadcast to all" |: effects.size == state.federation.size, diff --git a/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStorePropsscala b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreProps.scala similarity index 100% rename from metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStorePropsscala rename to metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreProps.scala From 35a1f2f8091224bb7296f868dcfbd96086aa57da Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 29 Mar 2021 12:40:03 +0100 Subject: [PATCH 11/48] PM-2938: Leader selection (#9) * PM-2938: Added Hash type and Keccak256. * PM-2938: Add LeaderSelection and RoundRobin. * PM-2938: LeaderSelection.Hashing * PM-2938: Use hashing leader selection in HotStuff basic tests. * PM-2938: Silence logs from networking in tests. * PM-2966: Add some sleep in tests. --- README.md | 6 +++ build.sc | 44 +++++++++------ .../core/src/metronome/core/Validated.scala | 11 ++++ .../src/metronome/crypto/hash/Hash.scala | 6 +++ .../src/metronome/crypto/hash/Keccak256.scala | 20 +++++++ .../src/metronome/crypto/hash/package.scala | 5 ++ .../metronome/crypto/hash/Keccak256Spec.scala | 21 ++++++++ .../hotstuff/consensus/Federation.scala | 8 +-- .../hotstuff/consensus/LeaderSelection.scala | 53 +++++++++++++++++++ .../hotstuff/consensus/FederationSpec.scala | 2 + .../consensus/LeaderSelectionProps.scala | 44 +++++++++++++++ .../basic/HotStuffProtocolProps.scala | 9 ++-- .../networking/test/resources/logback.xml | 18 +++++++ .../MockEncryptedConnectionProvider.scala | 19 ++++--- ...onnectionManagerWithMockProviderSpec.scala | 12 +++-- ...ctionManagerWithScalanetProviderSpec.scala | 15 +++++- 16 files changed, 258 insertions(+), 35 deletions(-) create mode 100644 metronome/crypto/src/metronome/crypto/hash/Hash.scala create mode 100644 metronome/crypto/src/metronome/crypto/hash/Keccak256.scala create mode 100644 metronome/crypto/src/metronome/crypto/hash/package.scala create mode 100644 metronome/crypto/test/src/metronome/crypto/hash/Keccak256Spec.scala create mode 100644 metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/LeaderSelection.scala create mode 100644 metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/LeaderSelectionProps.scala create mode 100644 metronome/networking/test/resources/logback.xml diff --git a/README.md b/README.md index 179c5040..ee0f34a2 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,12 @@ To run a single test class, use the `.single` method with the full path to the s mill __.storage.test.single io.iohk.metronome.storage.KVStoreStateSpec ``` +To experiment with the code, start an interactive session: + +```console +mill -i metronome[2.13.4].hotstuff.consensus.console +``` + ### Formatting the codebase Please configure your editor to use `scalafmt` on save. CI will be configured to check formatting. diff --git a/build.sc b/build.sc index fd563d08..33c4d6a3 100644 --- a/build.sc +++ b/build.sc @@ -15,6 +15,7 @@ object VersionOf { val config = "1.4.1" val `kind-projector` = "0.11.3" val logback = "1.2.3" + val mantis = "3.2.1-SNAPSHOT" val monix = "3.3.0" val prometheus = "0.10.0" val rocksdb = "6.15.2" @@ -25,7 +26,6 @@ object VersionOf { val shapeless = "2.3.3" val `scodec-core` = "1.11.7" val `scodec-bits` = "1.1.12" - val `mantis-crypto` = "3.2.1-SNAPSHOT" } // Using 2.12.13 instead of 2.12.10 to access @nowarn, to disable certain deperaction @@ -150,20 +150,6 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ) } - /** Generic Peer-to-Peer components that can multiplex protocols - * from different modules over a single authenticated TLS connection. - */ - object networking extends SubModule { - override def moduleDeps: Seq[JavaModule] = - Seq(tracing, crypto) - - override def ivyDeps = super.ivyDeps() ++ Agg( - ivy"io.iohk::scalanet:${VersionOf.scalanet}" - ) - - object test extends TestModule - } - /** Storage abstractions, e.g. a generic key-value store. */ object storage extends SubModule { override def ivyDeps = super.ivyDeps() ++ Agg( @@ -193,14 +179,31 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { override def description: String = "Cryptographic primitives to support HotStuff and BFT proof verification." + override def moduleDeps: Seq[PublishModule] = + Seq(core) + override def ivyDeps = super.ivyDeps() ++ Agg( - ivy"io.iohk::mantis-crypto:${VersionOf.`mantis-crypto`}", + ivy"io.iohk::mantis-crypto:${VersionOf.mantis}", ivy"org.scodec::scodec-bits:${VersionOf.`scodec-bits`}" ) object test extends TestModule } + /** Generic Peer-to-Peer components that can multiplex protocols + * from different modules over a single authenticated TLS connection. + */ + object networking extends SubModule { + override def moduleDeps: Seq[JavaModule] = + Seq(tracing, crypto) + + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"io.iohk::scalanet:${VersionOf.scalanet}" + ) + + object test extends TestModule + } + /** Generic HotStuff BFT library. */ object hotstuff extends SubModule { @@ -221,7 +224,14 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { */ object service extends SubModule { override def moduleDeps: Seq[JavaModule] = - Seq(storage, tracing, crypto, hotstuff.consensus, hotstuff.forensics) + Seq( + storage, + tracing, + crypto, + networking, + hotstuff.consensus, + hotstuff.forensics + ) override def ivyDeps = super.ivyDeps() ++ Agg( ivy"io.iohk::scalanet:${VersionOf.scalanet}" diff --git a/metronome/core/src/metronome/core/Validated.scala b/metronome/core/src/metronome/core/Validated.scala index aed95743..d6895e5a 100644 --- a/metronome/core/src/metronome/core/Validated.scala +++ b/metronome/core/src/metronome/core/Validated.scala @@ -1,3 +1,14 @@ package metronome.core +/** Can be used to tag any particular type as validated, for example: + * + * ``` + * def validateBlock(block: Block): Either[Error, Validated[Block]] + * def storeBlock(block: Validated[Block]) + * ``` + * + * It's a bit more lightweight than opting into the `ValidatedNel` from `cats`, + * mostly just serves as control that the right methods have been called in a + * pipeline. + */ object Validated extends GenericTagger diff --git a/metronome/crypto/src/metronome/crypto/hash/Hash.scala b/metronome/crypto/src/metronome/crypto/hash/Hash.scala new file mode 100644 index 00000000..9fb44d4d --- /dev/null +++ b/metronome/crypto/src/metronome/crypto/hash/Hash.scala @@ -0,0 +1,6 @@ +package metronome.crypto.hash + +import metronome.core.Tagger +import scodec.bits.ByteVector + +object Hash extends Tagger[ByteVector] diff --git a/metronome/crypto/src/metronome/crypto/hash/Keccak256.scala b/metronome/crypto/src/metronome/crypto/hash/Keccak256.scala new file mode 100644 index 00000000..4a16720d --- /dev/null +++ b/metronome/crypto/src/metronome/crypto/hash/Keccak256.scala @@ -0,0 +1,20 @@ +package metronome.crypto.hash + +import org.bouncycastle.crypto.digests.KeccakDigest +import scodec.bits.{BitVector, ByteVector} + +object Keccak256 { + def apply(data: Array[Byte]): Hash = { + val output = new Array[Byte](32) + val digest = new KeccakDigest(256) + digest.update(data, 0, data.length) + digest.doFinal(output, 0) + Hash(ByteVector(output)) + } + + def apply(data: ByteVector): Hash = + apply(data.toArray) + + def apply(data: BitVector): Hash = + apply(data.toByteArray) +} diff --git a/metronome/crypto/src/metronome/crypto/hash/package.scala b/metronome/crypto/src/metronome/crypto/hash/package.scala new file mode 100644 index 00000000..df00e947 --- /dev/null +++ b/metronome/crypto/src/metronome/crypto/hash/package.scala @@ -0,0 +1,5 @@ +package metronome.crypto + +package object hash { + type Hash = Hash.Tagged +} diff --git a/metronome/crypto/test/src/metronome/crypto/hash/Keccak256Spec.scala b/metronome/crypto/test/src/metronome/crypto/hash/Keccak256Spec.scala new file mode 100644 index 00000000..24bccd88 --- /dev/null +++ b/metronome/crypto/test/src/metronome/crypto/hash/Keccak256Spec.scala @@ -0,0 +1,21 @@ +package metronome.crypto.hash + +import scodec.bits._ +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class Keccak256Spec extends AnyFlatSpec with Matchers { + behavior of "Keccak256" + + it should "hash empty data" in { + Keccak256( + "".getBytes + ) shouldBe hex"c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + } + + it should "hash non-empty data" in { + Keccak256( + "abc".getBytes + ) shouldBe hex"4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45" + } +} diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala index 6e07eddf..1ca46c75 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala @@ -27,7 +27,7 @@ abstract case class Federation[PKey]( publicKeys: IndexedSeq[PKey], // Maximum number of Byzantine nodes. maxFaulty: Int -) { +)(implicit ls: LeaderSelection) { private val publicKeySet = publicKeys.toSet /** Size of the federation. */ @@ -40,7 +40,7 @@ abstract case class Federation[PKey]( publicKeySet.contains(publicKey) def leaderOf(viewNumber: ViewNumber): PKey = - publicKeys((viewNumber % size).toInt) + publicKeys(implicitly[LeaderSelection].leaderOf(viewNumber, size)) } object Federation { @@ -48,7 +48,7 @@ object Federation { /** Create a federation with the highest possible fault tolerance. */ def apply[PKey]( publicKeys: IndexedSeq[PKey] - ): Either[String, Federation[PKey]] = + )(implicit ls: LeaderSelection): Either[String, Federation[PKey]] = apply(publicKeys, maxByzantine(publicKeys.size)) /** Create a federation with the fault tolerance possibly reduced from the theoretical @@ -59,7 +59,7 @@ object Federation { def apply[PKey]( publicKeys: IndexedSeq[PKey], maxFaulty: Int - ): Either[String, Federation[PKey]] = { + )(implicit ls: LeaderSelection): Either[String, Federation[PKey]] = { val f = maxByzantine(publicKeys.size) if (publicKeys.isEmpty) { Left("The federation cannot be empty!") diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/LeaderSelection.scala b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/LeaderSelection.scala new file mode 100644 index 00000000..ecd795f5 --- /dev/null +++ b/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/LeaderSelection.scala @@ -0,0 +1,53 @@ +package metronome.hotstuff.consensus + +import metronome.crypto.hash.Keccak256 +import scodec.bits.ByteVector + +/** Strategy to pick the leader for a given view number from + * federation of with a fixed size. + */ +trait LeaderSelection { + + /** Return the index of the federation member who should lead the view. */ + def leaderOf(viewNumber: ViewNumber, size: Int): Int +} + +object LeaderSelection { + + /** Simple strategy cycling through leaders in a static order. */ + object RoundRobin extends LeaderSelection { + override def leaderOf(viewNumber: ViewNumber, size: Int): Int = + (viewNumber % size).toInt + } + + /** Leader assignment based on view-number has not been discussed in the Hotstuff + * paper and in general, it does not affect the safety and liveness. + * However, it does affect worst-case latency. + * + * Consider a static adversary under a round-robin leader change scheme. + * All the f nodes can set their public keys so that they are consecutive. + * In such a scenario those f consecutive leaders can create timeouts leading + * to an O(f) confirmation latency. (Recall that in a normal case, the latency is O(1)). + * + * A minor improvement to this is to assign leaders based on + * "publicKeys((H256(viewNumber).toInt % size).toInt)". + * + * This leader order randomization via a hash function will ensure that even + * if adversarial public keys are consecutive in PublicKey set, they are not + * necessarily consecutive in leader order. + * + * Note that the above policy will not ensure that adversarial leaders are never consecutive, + * but the probability of such occurrence will be lower under a static adversary. + */ + object Hashing extends LeaderSelection { + override def leaderOf(viewNumber: ViewNumber, size: Int): Int = { + val bytes = ByteVector.fromLong(viewNumber) // big-endian + val hash = Keccak256(bytes) + // If we prepend 0.toByte then it would treat it as unsigned, at the cost of an array copy. + // Instead of doing that I'll just make sure we deal with negative modulo. + val num = BigInt(hash.toArray) + val mod = (num % size).toInt + if (mod < 0) mod + size else mod + } + } +} diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala index aa75d63b..eb3d6737 100644 --- a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala +++ b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala @@ -7,6 +7,8 @@ import org.scalatest.prop.TableDrivenPropertyChecks._ class FederationSpec extends AnyFlatSpec with Matchers with Inside { + implicit val ls = LeaderSelection.RoundRobin + behavior of "Federation" it should "not create an empty federation" in { diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/LeaderSelectionProps.scala b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/LeaderSelectionProps.scala new file mode 100644 index 00000000..0fbfe1bb --- /dev/null +++ b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/LeaderSelectionProps.scala @@ -0,0 +1,44 @@ +package metronome.hotstuff.consensus + +import metronome.core.Tagger +import org.scalacheck._ +import org.scalacheck.Prop.forAll + +abstract class LeaderSelectionProps(name: String, val selector: LeaderSelection) + extends Properties(name) { + + object Size extends Tagger[Int] + type Size = Size.Tagged + + implicit val arbViewNumber: Arbitrary[ViewNumber] = Arbitrary { + Gen.posNum[Long].map(ViewNumber(_)) + } + + implicit val arbFederationSize: Arbitrary[Size] = Arbitrary { + Gen.posNum[Int].map(Size(_)) + } + + property("leaderOf") = forAll { (viewNumber: ViewNumber, size: Size) => + val idx = selector.leaderOf(viewNumber, size) + 0 <= idx && idx < size + } +} + +object RoundRobinSelectionProps + extends LeaderSelectionProps( + "LeaderSelection.RoundRobin", + LeaderSelection.RoundRobin + ) { + + property("round-robin") = forAll { (viewNumber: ViewNumber, size: Size) => + val idx0 = selector.leaderOf(viewNumber, size) + val idx1 = selector.leaderOf(viewNumber.next, size) + idx1 == idx0 + 1 || idx0 == size - 1 && idx1 == 0 + } +} + +object HashingSelectionProps + extends LeaderSelectionProps( + "LeaderSelection.Hashing", + LeaderSelection.Hashing + ) diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala index 1191e397..1a5bfdfe 100644 --- a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala +++ b/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala @@ -1,7 +1,7 @@ package metronome.hotstuff.consensus.basic import metronome.crypto.{GroupSignature, PartialSignature} -import metronome.hotstuff.consensus.{ViewNumber, Federation} +import metronome.hotstuff.consensus.{ViewNumber, Federation, LeaderSelection} import org.scalacheck.commands.Commands import org.scalacheck.{Properties, Gen, Prop} import org.scalacheck.Arbitrary.arbitrary @@ -48,6 +48,8 @@ object HotStuffProtocolCommands extends Commands { override def parentBlockHash(b: TestBlock) = b.parentBlockHash } + implicit val leaderSelection = LeaderSelection.Hashing + // Going to use publicKey == -1 * signingKey. def mockSigningKey(pk: TestAgreement.PKey): TestAgreement.SKey = -1 * pk @@ -149,8 +151,9 @@ object HotStuffProtocolCommands extends Commands { // Using a signing key that works with the mock validation. def signingKey = mockSigningKey(publicKey) - def isLeader = viewNumber % n == ownIndex - def leader = federation((viewNumber % n).toInt) + def leaderIndex = leaderSelection.leaderOf(viewNumber, n) + def isLeader = leaderIndex == ownIndex + def leader = federation(leaderIndex) def quorumSize = (n + f) / 2 + 1 } diff --git a/metronome/networking/test/resources/logback.xml b/metronome/networking/test/resources/logback.xml new file mode 100644 index 00000000..244621d7 --- /dev/null +++ b/metronome/networking/test/resources/logback.xml @@ -0,0 +1,18 @@ + + + + + + %d{HH:mm:ss.SSS} %-5level %logger{36} %msg%n + + + + + + + + + + + + diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala index 71463239..46ca8893 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala @@ -208,13 +208,18 @@ object MockEncryptedConnectionProvider { incomingEvents.poll override def sendMessage(m: TestMessage): Task[Unit] = - Task - .race(closeToken.get, sentMessages.update(current => m :: current)) - .flatMap { - case Left(_) => - Task.raiseError(ConnectionAlreadyClosed(remotePeerInfo._2)) - case Right(_) => Task.now(()) - } + closeToken.tryGet.flatMap { + case Some(_) => + Task.raiseError(ConnectionAlreadyClosed(remotePeerInfo._2)) + case None => + Task + .race(closeToken.get, sentMessages.update(current => m :: current)) + .flatMap { + case Left(_) => + Task.raiseError(ConnectionAlreadyClosed(remotePeerInfo._2)) + case Right(_) => Task.now(()) + } + } } object MockEncryptedConnection { diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala index 7694efa7..da61d0ad 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala @@ -40,12 +40,16 @@ class RemoteConnectionManagerWithMockProviderSpec buildConnectionsManagerWithMockProvider(provider) .use { connectionManager => for { - _ <- Task.sleep(800.milliseconds) + _ <- Task.sleep(1.second) stats <- provider.getStatistics acquiredConnections <- connectionManager.getAcquiredConnections } yield { assert(stats.maxInFlightConnections == 1) - assert(stats.connectionCounts.get(defaultToMake).contains(3)) + assert( + stats.connectionCounts + .get(defaultToMake) + .exists(count => count == 2 || count == 3) + ) assert(acquiredConnections.isEmpty) } } @@ -62,7 +66,7 @@ class RemoteConnectionManagerWithMockProviderSpec ) .use { connectionManager => for { - _ <- Task.sleep(800.milliseconds) + _ <- Task.sleep(1.second) stats <- provider.getStatistics acquiredConnections <- connectionManager.getAcquiredConnections } yield { @@ -217,6 +221,7 @@ class RemoteConnectionManagerWithMockProviderSpec _ <- manager.waitForNConnections(1) containsIncoming <- manager.containsConnection(incomingConnection) duplicatedIncoming <- provider.newIncomingPeer(clusterPeers.head) + _ <- Task.sleep(500.millis) // Let the offered connection be processed. duplicatedIncomingClosed <- duplicatedIncoming.isClosed } yield { assert(initialAcquired.isEmpty) @@ -233,6 +238,7 @@ class RemoteConnectionManagerWithMockProviderSpec randomAcquiredConnection <- provider.getAllRegisteredPeers.map(_.head) _ <- randomAcquiredConnection.pushRemoteEvent(Some(Left(DecodingError))) _ <- manager.waitForNConnections(1) + _ <- Task.sleep(500.millis) // Let the offered connection be processed. errorIsClosed <- randomAcquiredConnection.isClosed } yield { assert(initialAcquired.size == 2) diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala index 81033157..79437aed 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala @@ -13,6 +13,7 @@ import io.iohk.metronome.networking.RemoteConnectionManagerWithScalanetProviderS Cluster, buildTestConnectionManager } +import io.iohk.scalanet.peergroup.PeerGroup import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.FramingConfig import monix.eval.{Task, TaskLift, TaskLike} import monix.execution.Scheduler @@ -24,12 +25,24 @@ import scodec.Codec import java.net.InetSocketAddress import java.security.SecureRandom import scala.concurrent.duration._ +import monix.execution.UncaughtExceptionReporter class RemoteConnectionManagerWithScalanetProviderSpec extends AsyncFlatSpecLike with Matchers { implicit val testScheduler = - Scheduler.fixedPool("RemoteConnectionManagerSpec", 16) + Scheduler.fixedPool( + "RemoteConnectionManagerSpec", + 16, + reporter = UncaughtExceptionReporter { + case ex: IllegalStateException + if ex.getMessage.contains("executor not accepting a task") => + case ex: PeerGroup.ChannelBrokenException[_] => + // Probably test already closed with some task running in the background. + case ex => + UncaughtExceptionReporter.default.reportFailure(ex) + } + ) implicit val timeOut = 10.seconds From f25712d9a64423217c680d3ce8df837fb12afbb3 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 29 Mar 2021 12:42:18 +0100 Subject: [PATCH 12/48] PM-3093: Add publish script. (#11) * PM-3093: Add publish script. * PM-3093: Perhaps BASH_ENV needs circleci image. * PM-3093: Try with sudo. * PM-3093: Use sudo for base64 * PM-3093: Fix publish script name. * PM-3093: Remove publishing on test branch. --- .circleci/config.yml | 19 ++++++++++++------- .circleci/publish.sh | 31 +++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 7 deletions(-) create mode 100755 .circleci/publish.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 8cb92561..88ab8dc3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -67,17 +67,22 @@ jobs: # GPG in docker needs to be run with some additional flags # and we are not able to change how mill uses it # this is why we're creating wrapper that adds the flags - command: sh -c "apt update && apt install -y gnupg2 && mv /usr/bin/gpg /usr/bin/gpg-vanilla && echo '#!/bin/sh\n\n/usr/bin/gpg-vanilla --no-tty --pinentry loopback \$@' > /usr/bin/gpg && chmod 755 /usr/bin/gpg && cat /usr/bin/gpg" + command: | + sudo apt update + sudo apt install -y gnupg2 + sudo mv /usr/bin/gpg /usr/bin/gpg-vanilla + sudo sh -c "echo '#!/bin/sh\n\n/usr/bin/gpg-vanilla --no-tty --pinentry loopback \$@' > /usr/bin/gpg" + sudo chmod 755 /usr/bin/gpg + cat /usr/bin/gpg - run: name: install base64 - command: apt update && apt install -y cl-base64 + command: sudo apt update && sudo apt install -y cl-base64 - # TODO: Configure Mantis' credentials - # - run: - # name: publish - # command: .circleci/publish - # no_output_timeout: 30m + - run: + name: publish + command: .circleci/publish.sh + no_output_timeout: 30m workflows: build_and_publish: diff --git a/.circleci/publish.sh b/.circleci/publish.sh new file mode 100755 index 00000000..63b8197a --- /dev/null +++ b/.circleci/publish.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -euv + +echo $GPG_KEY | base64 --decode | gpg --batch --import + +gpg --passphrase $GPG_PASSPHRASE --batch --yes -a -b LICENSE + +if [[ "$CIRCLE_BRANCH" == "develop" ]]; then + +mill mill.scalalib.PublishModule/publishAll \ + __.publishArtifacts \ + "$OSS_USERNAME":"$OSS_PASSWORD" \ + --gpgArgs --passphrase="$GPG_PASSPHRASE",--batch,--yes,-a,-b + +elif [[ "$CIRCLE_BRANCH" == "master" ]]; then + +mill versionFile.setReleaseVersion +mill mill.scalalib.PublishModule/publishAll \ + __.publishArtifacts \ + "$OSS_USERNAME":"$OSS_PASSWORD" \ + --gpgArgs --passphrase="$GPG_PASSPHRASE",--batch,--yes,-a,-b \ + --readTimeout 600000 \ + --awaitTimeout 600000 \ + --release true + +else + + echo "Skipping publish step" + +fi From b03dba58c831cb225ef72aa8c9cdc72473ba6098 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 30 Mar 2021 15:31:20 +0100 Subject: [PATCH 13/48] PM-2907: Emit trace events from networking. (#13) * PM-2907: Emit trace events from networking. * PM-2907: Demonstrate logging in remote connection tests. * PM-2907: Log with SLF4J in test. * PM-2907: In-memory log collector for tests. * PM-2907: Add Tracer.instance and Tracer.const * PM-2907: Fix comment. * PM-2907: Add NetworkEvent.ConnectionError * PM-2907: Added ConnectionUnknown * PM-2907: Add MessageReceived, MessageSent and ConnectionUnknown. --- build.sc | 20 ++++-- .../io/iohk/metronome/logging/HybridLog.scala | 36 ++++++++++ .../metronome/logging/HybridLogObject.scala | 38 ++++++++++ .../metronome/logging/InMemoryLogTracer.scala | 50 +++++++++++++ .../io/iohk/metronome/logging/LogTracer.scala | 38 ++++++++++ .../networking/ConnectionHandler.scala | 45 ++++++++---- .../metronome/networking/NetworkEvent.scala | 54 ++++++++++++++ .../metronome/networking/NetworkTracers.scala | 71 +++++++++++++++++++ .../networking/RemoteConnectionManager.scala | 22 +++--- .../networking/test/resources/logback.xml | 2 +- .../networking/ConnectionHandlerSpec.scala | 5 +- ...onnectionManagerWithMockProviderSpec.scala | 4 ++ ...ctionManagerWithScalanetProviderSpec.scala | 45 +++++++++++- .../io/iohk/metronome/tracer/Tracer.scala | 20 +++++- 14 files changed, 415 insertions(+), 35 deletions(-) create mode 100644 metronome/logging/src/io/iohk/metronome/logging/HybridLog.scala create mode 100644 metronome/logging/src/io/iohk/metronome/logging/HybridLogObject.scala create mode 100644 metronome/logging/src/io/iohk/metronome/logging/InMemoryLogTracer.scala create mode 100644 metronome/logging/src/io/iohk/metronome/logging/LogTracer.scala create mode 100644 metronome/networking/src/io/iohk/metronome/networking/NetworkEvent.scala create mode 100644 metronome/networking/src/io/iohk/metronome/networking/NetworkTracers.scala diff --git a/build.sc b/build.sc index 33c4d6a3..9b20c525 100644 --- a/build.sc +++ b/build.sc @@ -12,6 +12,7 @@ object versionFile extends VersionFileModule object VersionOf { val cats = "2.3.1" + val circe = "0.12.3" val config = "1.4.1" val `kind-projector` = "0.11.3" val logback = "1.2.3" @@ -20,10 +21,10 @@ object VersionOf { val prometheus = "0.10.0" val rocksdb = "6.15.2" val scalacheck = "1.15.2" - val scalalogging = "3.9.2" val scalatest = "3.2.5" val scalanet = "0.7.0" val shapeless = "2.3.3" + val slf4j = "1.7.30" val `scodec-core` = "1.11.7" val `scodec-bits` = "1.1.12" } @@ -128,6 +129,9 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { override def moduleDeps: Seq[JavaModule] = super.moduleDeps ++ Seq(logging) + // Enable logging in tests. + // Control the visibility using ./test/resources/logback.xml + // Alternatively, capture logs in memory. override def ivyDeps = Agg( ivy"org.scalatest::scalatest:${VersionOf.scalatest}", ivy"org.scalacheck::scalacheck:${VersionOf.scalacheck}", @@ -201,7 +205,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ivy"io.iohk::scalanet:${VersionOf.scalanet}" ) - object test extends TestModule + object test extends TestModule { + override def moduleDeps: Seq[JavaModule] = + super.moduleDeps ++ Seq(logging) + } } /** Generic HotStuff BFT library. */ @@ -292,13 +299,18 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { } } - /** Implements tracing abstractions to do structured logging. */ + /** Implements tracing abstractions to do structured logging. + * + * To actually emit logs, a dependant module also has to add + * a dependency on e.g. logback. + */ object logging extends SubModule { override def moduleDeps: Seq[JavaModule] = Seq(tracing) override def ivyDeps = super.ivyDeps() ++ Agg( - ivy"com.typesafe.scala-logging::scala-logging:${VersionOf.scalalogging}" + ivy"org.slf4j:slf4j-api:${VersionOf.slf4j}", + ivy"io.circe::circe-core:${VersionOf.circe}" ) } diff --git a/metronome/logging/src/io/iohk/metronome/logging/HybridLog.scala b/metronome/logging/src/io/iohk/metronome/logging/HybridLog.scala new file mode 100644 index 00000000..5ec61ecb --- /dev/null +++ b/metronome/logging/src/io/iohk/metronome/logging/HybridLog.scala @@ -0,0 +1,36 @@ +package io.iohk.metronome.logging + +import io.circe.JsonObject +import java.time.Instant +import scala.reflect.ClassTag + +/** Type class to transform instances of `T` to `HybridLogObject`. */ +trait HybridLog[T] { + def apply(value: T): HybridLogObject +} + +object HybridLog { + def apply[T](implicit ev: HybridLog[T]): HybridLog[T] = ev + + /** Create an instance of `HybridLog` for a type `T` by passing + * functions to transform instances of `T` to message and JSON. + */ + def instance[T: ClassTag]( + level: T => HybridLogObject.Level, + message: T => String, + event: T => JsonObject + ): HybridLog[T] = + new HybridLog[T] { + val source = implicitly[ClassTag[T]].runtimeClass.getName + + override def apply(value: T): HybridLogObject = { + HybridLogObject( + level = level(value), + timestamp = Instant.now(), + source = source, + message = message(value), + event = event(value) + ) + } + } +} diff --git a/metronome/logging/src/io/iohk/metronome/logging/HybridLogObject.scala b/metronome/logging/src/io/iohk/metronome/logging/HybridLogObject.scala new file mode 100644 index 00000000..aacb42ed --- /dev/null +++ b/metronome/logging/src/io/iohk/metronome/logging/HybridLogObject.scala @@ -0,0 +1,38 @@ +package io.iohk.metronome.logging + +import io.circe.JsonObject +import io.circe.syntax._ +import java.time.Instant +import cats.Show + +/** A hybrid log has a human readable message, which is intended to be static, + * and some key-value paramters that vary by events. + * + * See https://medium.com/unomaly/logging-wisdom-how-to-log-5a19145e35ec + */ +case class HybridLogObject( + timestamp: Instant, + source: String, + level: HybridLogObject.Level, + // Something captured about what emitted this event. + // Human readable message, which typically shouldn't + // change between events emitted at the same place. + message: String, + // Key-Value pairs that capture arbitrary data. + event: JsonObject +) +object HybridLogObject { + sealed trait Level + object Level { + case object Error extends Level + case object Warn extends Level + case object Info extends Level + case object Debug extends Level + case object Trace extends Level + } + + implicit val show: Show[HybridLogObject] = Show.show { + case HybridLogObject(t, s, l, m, e) => + s"$t ${l.toString.toUpperCase.padTo(5, ' ')} - $s: $m ${e.asJson.noSpaces}" + } +} diff --git a/metronome/logging/src/io/iohk/metronome/logging/InMemoryLogTracer.scala b/metronome/logging/src/io/iohk/metronome/logging/InMemoryLogTracer.scala new file mode 100644 index 00000000..d67dd7f2 --- /dev/null +++ b/metronome/logging/src/io/iohk/metronome/logging/InMemoryLogTracer.scala @@ -0,0 +1,50 @@ +package io.iohk.metronome.logging + +import cats.implicits._ +import cats.effect.Sync +import cats.effect.concurrent.Ref +import io.iohk.metronome.tracer.Tracer + +/** Collect logs in memory, so we can inspect them in tests. */ +object InMemoryLogTracer { + + class HybridLogTracer[F[_]: Sync]( + logRef: Ref[F, Vector[HybridLogObject]] + ) extends Tracer[F, HybridLogObject] { + + override def apply(a: => HybridLogObject): F[Unit] = + logRef.update(_ :+ a) + + def getLogs: F[Seq[HybridLogObject]] = + logRef.get.map(_.toSeq) + + def getLevel(l: HybridLogObject.Level) = + getLogs.map(_.filter(_.level == l)) + + def getErrors = getLevel(HybridLogObject.Level.Error) + def getWarns = getLevel(HybridLogObject.Level.Warn) + def getInfos = getLevel(HybridLogObject.Level.Info) + def getDebugs = getLevel(HybridLogObject.Level.Debug) + def getTraces = getLevel(HybridLogObject.Level.Trace) + } + + /** For example: + * + * ``` + * val logTracer = InMemoryLogTracer.hybrid[Task] + * val networkEventTracer = logTracer.contramap(implicitly[HybridLog[NetworkEvent]].apply _) + * val consensusEventTracer = logTracer.contramap(implicitly[HybridLog[ConsensusEvent]].apply _) + * + * val test = for { + * msg <- network.nextMessage + * _ <- consensus.handleMessage(msg) + * warns <- logTracer.getWarns + * } yield { + * warns shouldBe empty + * } + * + * ``` + */ + def hybrid[F[_]: Sync]: Tracer[F, HybridLogObject] = + new HybridLogTracer[F](Ref.unsafe[F, Vector[HybridLogObject]](Vector.empty)) +} diff --git a/metronome/logging/src/io/iohk/metronome/logging/LogTracer.scala b/metronome/logging/src/io/iohk/metronome/logging/LogTracer.scala new file mode 100644 index 00000000..aa1ab995 --- /dev/null +++ b/metronome/logging/src/io/iohk/metronome/logging/LogTracer.scala @@ -0,0 +1,38 @@ +package io.iohk.metronome.logging + +import cats.syntax.contravariant._ +import cats.effect.Sync +import io.circe.syntax._ +import io.iohk.metronome.tracer.Tracer +import org.slf4j.LoggerFactory + +/** Forward traces to SLF4J logs. */ +object LogTracer { + + /** Create a logger for `HybridLogObject` that delegates to SLF4J. */ + def hybrid[F[_]: Sync]: Tracer[F, HybridLogObject] = + new Tracer[F, HybridLogObject] { + override def apply(log: => HybridLogObject): F[Unit] = Sync[F].delay { + val logger = LoggerFactory.getLogger(log.source) + + def message = s"${log.message} ${log.event.asJson.noSpaces}" + + log.level match { + case HybridLogObject.Level.Error => + if (logger.isErrorEnabled) logger.error(message) + case HybridLogObject.Level.Warn => + if (logger.isWarnEnabled) logger.warn(message) + case HybridLogObject.Level.Info => + if (logger.isInfoEnabled) logger.info(message) + case HybridLogObject.Level.Debug => + if (logger.isDebugEnabled) logger.debug(message) + case HybridLogObject.Level.Trace => + if (logger.isTraceEnabled) logger.trace(message) + } + } + } + + /** Create a logger for a type that can be transformed to a `HybridLogObject`. */ + def hybrid[F[_]: Sync, T: HybridLog]: Tracer[F, T] = + hybrid[F].contramap(implicitly[HybridLog[T]].apply _) +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala b/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala index 13b9c6b6..62f80d24 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala @@ -28,18 +28,22 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( messageQueue: ConcurrentQueue[F, MessageReceived[K, M]], cancelToken: TryableDeferred[F, Unit], connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] -) { +)(implicit tracers: NetworkTracers[F, K, M]) { private val numberOfRunningConnections = AtomicInt(0) private def closeAndDeregisterConnection( handledConnection: HandledConnection[F, K, M] ): F[Unit] = { - for { + val close = for { _ <- Concurrent[F].delay(numberOfRunningConnections.decrement()) _ <- connectionsRegister.deregisterConnection(handledConnection) _ <- handledConnection.close } yield () + + close.guarantee { + tracers.deregistered(handledConnection) + } } /** Registers connections and start handling incoming messages in background, in case connection is already handled @@ -54,9 +58,12 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( case Some(_) => //TODO [PM-3092] for now we are closing any new connections in case of conflict, we may investigate other strategies // like keeping old for outgoing and replacing for incoming - possibleNewConnection.close + tracers.discarded(possibleNewConnection) >> + possibleNewConnection.close + case None => - connectionQueue.offer(possibleNewConnection) + tracers.registered(possibleNewConnection) >> + connectionQueue.offer(possibleNewConnection) } } @@ -104,12 +111,14 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( .attemptNarrow[ConnectionAlreadyClosed] .flatMap { case Left(_) => - connection.close.as( - Left(ConnectionAlreadyClosedException(recipient)) - ) + // Closing the connection will cause it to be re-queued for reconnection. + tracers.sendError(connection) >> + connection.close.as( + Left(ConnectionAlreadyClosedException(recipient)) + ) case Right(_) => - Concurrent[F].pure(Right(())) + tracers.sent((connection, message)).as(Right(())) } case None => Concurrent[F].pure(Left(ConnectionAlreadyClosedException(recipient))) @@ -140,15 +149,17 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( ) .takeWhile(_.isDefined) .map(_.get) - .mapEval { + .mapEval[Unit] { case Right(m) => - messageQueue.offer( - MessageReceived(connection.key, m) - ) + tracers.received((connection, m)) >> + messageQueue.offer( + MessageReceived(connection.key, m) + ) case Left(e) => - Concurrent[F].raiseError[Unit]( - UnexpectedConnectionError(e, connection.key) - ) + tracers.receiveError((connection, e)) >> + Concurrent[F].raiseError[Unit]( + UnexpectedConnectionError(e, connection.key) + ) } .guarantee( closeAndDeregisterConnection(connection) @@ -241,6 +252,8 @@ object ConnectionHandler { private def buildHandler[F[_]: Concurrent: ContextShift, K, M]( connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] + )(implicit + tracers: NetworkTracers[F, K, M] ): F[ConnectionHandler[F, K, M]] = { for { cancelToken <- Deferred.tryable[F, Unit] @@ -263,6 +276,8 @@ object ConnectionHandler { */ def apply[F[_]: Concurrent: ContextShift, K, M]( connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] + )(implicit + tracers: NetworkTracers[F, K, M] ): Resource[F, ConnectionHandler[F, K, M]] = { Resource .make(buildHandler(connectionFinishCallback)) { handler => diff --git a/metronome/networking/src/io/iohk/metronome/networking/NetworkEvent.scala b/metronome/networking/src/io/iohk/metronome/networking/NetworkEvent.scala new file mode 100644 index 00000000..ba8bc477 --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/NetworkEvent.scala @@ -0,0 +1,54 @@ +package io.iohk.metronome.networking + +import java.net.InetSocketAddress + +/** Events we want to trace. */ +sealed trait NetworkEvent[K, +M] + +object NetworkEvent { + + case class Peer[K](key: K, address: InetSocketAddress) + + /** The connection to/from the peer has been added to the register. */ + case class ConnectionRegistered[K](peer: Peer[K]) + extends NetworkEvent[K, Nothing] + + /** The connection to/from the peer has been closed and removed from the register. */ + case class ConnectionDeregistered[K](peer: Peer[K]) + extends NetworkEvent[K, Nothing] + + /** We had two connections to/from the peer and discarded one of them. */ + case class ConnectionDiscarded[K](peer: Peer[K]) + extends NetworkEvent[K, Nothing] + + /** Failed to establish connection to remote peer. */ + case class ConnectionFailed[K]( + peer: Peer[K], + numberOfFailures: Int, + error: Throwable + ) extends NetworkEvent[K, Nothing] + + /** Error reading data from a connection. */ + case class ConnectionReceiveError[K]( + peer: Peer[K], + error: EncryptedConnectionProvider.ConnectionError + ) extends NetworkEvent[K, Nothing] + + /** Error sending data over a connection, already disconnected. */ + case class ConnectionSendError[K]( + peer: Peer[K] + ) extends NetworkEvent[K, Nothing] + + /** Incoming connection from someone outside the federation. */ + case class ConnectionUnknown[K](peer: Peer[K]) + extends NetworkEvent[K, Nothing] + + /** Received incoming message from peer. */ + case class MessageReceived[K, M](peer: Peer[K], message: M) + extends NetworkEvent[K, M] + + /** Sent outgoing message to peer. */ + case class MessageSent[K, M](peer: Peer[K], message: M) + extends NetworkEvent[K, M] + +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/NetworkTracers.scala b/metronome/networking/src/io/iohk/metronome/networking/NetworkTracers.scala new file mode 100644 index 00000000..c463ae9c --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/NetworkTracers.scala @@ -0,0 +1,71 @@ +package io.iohk.metronome.networking + +import cats.implicits._ +import io.iohk.metronome.tracer.Tracer + +case class NetworkTracers[F[_], K, M]( + unknown: Tracer[F, EncryptedConnection[F, K, M]], + registered: Tracer[F, ConnectionHandler.HandledConnection[F, K, M]], + deregistered: Tracer[F, ConnectionHandler.HandledConnection[F, K, M]], + discarded: Tracer[F, ConnectionHandler.HandledConnection[F, K, M]], + failed: Tracer[F, RemoteConnectionManager.ConnectionFailure[K]], + receiveError: Tracer[F, NetworkTracers.HandledConnectionError[F, K, M]], + sendError: Tracer[F, ConnectionHandler.HandledConnection[F, K, M]], + received: Tracer[F, NetworkTracers.HandledConnectionMessage[F, K, M]], + sent: Tracer[F, NetworkTracers.HandledConnectionMessage[F, K, M]] +) + +object NetworkTracers { + import NetworkEvent._ + import ConnectionHandler.HandledConnection + + type HandledConnectionError[F[_], K, M] = ( + ConnectionHandler.HandledConnection[F, K, M], + EncryptedConnectionProvider.ConnectionError + ) + type HandledConnectionMessage[F[_], K, M] = ( + ConnectionHandler.HandledConnection[F, K, M], + M + ) + + def apply[F[_], K, M]( + tracer: Tracer[F, NetworkEvent[K, M]] + ): NetworkTracers[F, K, M] = + NetworkTracers[F, K, M]( + unknown = tracer.contramap[EncryptedConnection[F, K, M]] { conn => + ConnectionUnknown((Peer.apply[K] _).tupled(conn.remotePeerInfo)) + }, + registered = tracer.contramap[HandledConnection[F, K, M]] { conn => + ConnectionRegistered(Peer(conn.key, conn.serverAddress)) + }, + deregistered = tracer.contramap[HandledConnection[F, K, M]] { conn => + ConnectionDeregistered(Peer(conn.key, conn.serverAddress)) + }, + discarded = tracer.contramap[HandledConnection[F, K, M]] { conn => + ConnectionDiscarded(Peer(conn.key, conn.serverAddress)) + }, + failed = + tracer.contramap[RemoteConnectionManager.ConnectionFailure[K]] { fail => + ConnectionFailed( + Peer(fail.connectionRequest.key, fail.connectionRequest.address), + fail.connectionRequest.numberOfFailures, + fail.err + ) + }, + receiveError = + tracer.contramap[HandledConnectionError[F, K, M]] { case (conn, err) => + ConnectionReceiveError(Peer(conn.key, conn.serverAddress), err) + }, + sendError = tracer.contramap[HandledConnection[F, K, M]] { conn => + ConnectionSendError(Peer(conn.key, conn.serverAddress)) + }, + received = tracer.contramap[HandledConnectionMessage[F, K, M]] { + case (conn, msg) => + MessageReceived(Peer(conn.key, conn.serverAddress), msg) + }, + sent = tracer.contramap[HandledConnectionMessage[F, K, M]] { + case (conn, msg) => + MessageSent(Peer(conn.key, conn.serverAddress), msg) + } + ) +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala index e4119adf..e40ecb16 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala @@ -40,7 +40,7 @@ class RemoteConnectionManager[F[_]: Sync, K, M: Codec]( connectionHandler.sendMessage(recipient, message) } } -//TODO add logging + object RemoteConnectionManager { case class ConnectionSuccess[F[_], K, M]( encryptedConnection: EncryptedConnection[F, K, M] @@ -170,7 +170,7 @@ object RemoteConnectionManager { connectionsToAcquire: ConcurrentQueue[F, OutGoingConnectionRequest[K]], connectionsHandler: ConnectionHandler[F, K, M], retryConfig: RetryConfig - ): F[Unit] = { + )(implicit tracers: NetworkTracers[F, K, M]): F[Unit] = { /** Observable is used here as streaming primitive as it has richer api than Iterant and have mapParallelUnorderedF * combinator, which makes it possible to have multiple concurrent retry timers, which are cancelled when whole @@ -184,11 +184,11 @@ object RemoteConnectionManager { } .mapParallelUnorderedF(Integer.MAX_VALUE) { case Left(failure) => - //TODO add logging of failure val failureToLog = failure.err - retryConnection(retryConfig, failure.connectionRequest).flatMap( - updatedRequest => connectionsToAcquire.offer(updatedRequest) - ) + tracers.failed(failure) >> + retryConnection(retryConfig, failure.connectionRequest).flatMap( + updatedRequest => connectionsToAcquire.offer(updatedRequest) + ) case Right(connection) => val newOutgoingConnections = HandledConnection.outgoing(connection.encryptedConnection) @@ -204,7 +204,7 @@ object RemoteConnectionManager { pg: EncryptedConnectionProvider[F, K, M], connectionsHandler: ConnectionHandler[F, K, M], clusterConfig: ClusterConfig[K] - ): F[Unit] = { + )(implicit tracers: NetworkTracers[F, K, M]): F[Unit] = { Iterant .repeatEvalF(pg.incomingConnection) .takeWhile(_.isDefined) @@ -225,7 +225,8 @@ object RemoteConnectionManager { case None => // unknown connection, just close it - encryptedConnection.close + tracers.unknown(encryptedConnection) >> + encryptedConnection.close } } .completedL @@ -308,7 +309,8 @@ object RemoteConnectionManager { clusterConfig: ClusterConfig[K], retryConfig: RetryConfig )(implicit - cs: ContextShift[F] + cs: ContextShift[F], + tracers: NetworkTracers[F, K, M] ): Resource[F, RemoteConnectionManager[F, K, M]] = { for { connectionsToAcquireQueue <- Resource.liftF( @@ -341,11 +343,13 @@ object RemoteConnectionManager { connectionsHandler, retryConfig ).background + _ <- handleServerConnections( encryptedConnectionsProvider, connectionsHandler, clusterConfig ).background + } yield new RemoteConnectionManager[F, K, M]( connectionsHandler, encryptedConnectionsProvider.localPeerInfo diff --git a/metronome/networking/test/resources/logback.xml b/metronome/networking/test/resources/logback.xml index 244621d7..d3406af3 100644 --- a/metronome/networking/test/resources/logback.xml +++ b/metronome/networking/test/resources/logback.xml @@ -11,7 +11,7 @@ - + diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala index e2bee90b..a48c6f3c 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala @@ -22,7 +22,7 @@ import monix.eval.Task import ConnectionHandlerSpec._ import io.iohk.metronome.networking.EncryptedConnectionProvider.DecodingError import io.iohk.metronome.networking.RemoteConnectionManagerWithMockProviderSpec.fakeLocalAddress - +import io.iohk.metronome.tracer.Tracer import java.net.InetSocketAddress class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { @@ -245,6 +245,9 @@ object ConnectionHandlerSpec { } } + implicit val tracers: NetworkTracers[Task, Secp256k1Key, TestMessage] = + NetworkTracers(Tracer.noOpTracer) + def buildHandlerResource( cb: HandledConnection[Task, Secp256k1Key, TestMessage] => Task[Unit] = _ => Task(()) diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala index da61d0ad..2d4677cd 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala @@ -18,6 +18,7 @@ import io.iohk.metronome.networking.RemoteConnectionManagerWithMockProviderSpec. fakeLocalAddress, longRetryConfig } +import io.iohk.metronome.tracer.Tracer import monix.eval.Task import monix.execution.Scheduler import org.scalatest.flatspec.AsyncFlatSpecLike @@ -341,6 +342,9 @@ object RemoteConnectionManagerWithMockProviderSpec { val defalutAllowed = Secp256k1Key.getFakeRandomKey val defaultToMake = Secp256k1Key.getFakeRandomKey + implicit val tracers: NetworkTracers[Task, Secp256k1Key, TestMessage] = + NetworkTracers(Tracer.noOpTracer) + def buildConnectionsManagerWithMockProvider( ec: MockEncryptedConnectionProvider, retryConfig: RetryConfig = quickRetryConfig, diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala index 79437aed..1d708a5a 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala @@ -2,7 +2,8 @@ package io.iohk.metronome.networking import cats.data.NonEmptyList import cats.effect.concurrent.Ref -import cats.effect.{Concurrent, ContextShift, Resource, Timer} +import cats.effect.{Concurrent, ContextShift, Resource, Timer, Sync} +import io.circe.{Json, JsonObject, Encoder} import io.iohk.metronome.networking.ConnectionHandler.MessageReceived import io.iohk.metronome.networking.RemoteConnectionManager.{ ClusterConfig, @@ -13,6 +14,7 @@ import io.iohk.metronome.networking.RemoteConnectionManagerWithScalanetProviderS Cluster, buildTestConnectionManager } +import io.iohk.metronome.logging.{HybridLogObject, HybridLog, LogTracer} import io.iohk.scalanet.peergroup.PeerGroup import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.FramingConfig import monix.eval.{Task, TaskLift, TaskLike} @@ -21,7 +23,6 @@ import org.bouncycastle.crypto.AsymmetricCipherKeyPair import org.scalatest.flatspec.AsyncFlatSpecLike import org.scalatest.matchers.should.Matchers import scodec.Codec - import java.net.InetSocketAddress import java.security.SecureRandom import scala.concurrent.duration._ @@ -30,6 +31,8 @@ import monix.execution.UncaughtExceptionReporter class RemoteConnectionManagerWithScalanetProviderSpec extends AsyncFlatSpecLike with Matchers { + import RemoteConnectionManagerWithScalanetProviderSpec.secp256k1Encoder + implicit val testScheduler = Scheduler.fixedPool( "RemoteConnectionManagerSpec", @@ -129,9 +132,45 @@ object RemoteConnectionManagerWithScalanetProviderSpec { FramingConfig.buildStandardFrameConfig(1000000, 4).getOrElse(null) val testIncomingQueueSize = 20 + implicit val secp256k1Encoder: Encoder[Secp256k1Key] = + Encoder.instance(key => Json.fromString(key.key.toHex)) + + // Just an example of setting up logging. + implicit def tracers[F[_]: Sync, K: io.circe.Encoder, M] + : NetworkTracers[F, K, M] = { + import io.circe.syntax._ + import NetworkEvent._ + + implicit val peerEncoder: Encoder.AsObject[Peer[K]] = + Encoder.AsObject.instance { case Peer(key, address) => + JsonObject("key" -> key.asJson, "address" -> address.toString.asJson) + } + + implicit val hybridLog: HybridLog[NetworkEvent[K, M]] = + HybridLog.instance[NetworkEvent[K, M]]( + level = _ => HybridLogObject.Level.Debug, + message = _.getClass.getSimpleName, + event = { + case e: ConnectionUnknown[_] => e.peer.asJsonObject + case e: ConnectionRegistered[_] => e.peer.asJsonObject + case e: ConnectionDeregistered[_] => e.peer.asJsonObject + case e: ConnectionDiscarded[_] => e.peer.asJsonObject + case e: ConnectionSendError[_] => e.peer.asJsonObject + case e: ConnectionFailed[_] => + e.peer.asJsonObject.add("error", e.error.toString.asJson) + case e: ConnectionReceiveError[_] => + e.peer.asJsonObject.add("error", e.error.toString.asJson) + case e: NetworkEvent.MessageReceived[_, _] => e.peer.asJsonObject + case e: NetworkEvent.MessageSent[_, _] => e.peer.asJsonObject + } + ) + + NetworkTracers(LogTracer.hybrid[F, NetworkEvent[K, M]]) + } + def buildTestConnectionManager[ F[_]: Concurrent: TaskLift: TaskLike: Timer, - K: Codec, + K: Codec: Encoder, M: Codec ]( bindAddress: InetSocketAddress = randomAddress(), diff --git a/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala b/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala index 412ac330..09bb2ace 100644 --- a/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala +++ b/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala @@ -1,9 +1,9 @@ -package io.iohk.tracer +package io.iohk.metronome.tracer import language.higherKinds import cats.{Applicative, Contravariant, FlatMap, Id, Monad, Monoid, Show, ~>} -/** Contravariant tracer +/** Contravariant tracer. * * Ported from https://github.com/input-output-hk/contra-tracer/blob/master/src/Control/Tracer.hs */ @@ -13,10 +13,26 @@ trait Tracer[F[_], -A] { object Tracer { + def instance[F[_], A](f: (=> A) => F[Unit]): Tracer[F, A] = + new Tracer[F, A] { + override def apply(a: => A): F[Unit] = f(a) + } + + def const[F[_], A](f: F[Unit]): Tracer[F, A] = + instance(_ => f) + /** If you know: * - how to enrich type A that is traced * - how to squeeze B's to create A's (possibly enrich B with extra stuff, or forget some details) * then you have Tracer for B + * + * Example + * ``` + * import cats.syntax.contravariant._ + * + * val atracer: Tracer[F, A] = ??? + * val btracer: Tracer[F, B] = atracer.contramap[B](b => b.toA) + * ```. */ implicit def contraTracer[F[_]]: Contravariant[Tracer[F, *]] = new Contravariant[Tracer[F, *]] { From 5b8d46ff5794be2c52c6d2c5bf3d7ce2ffe57d24 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 30 Mar 2021 17:38:12 +0100 Subject: [PATCH 14/48] FIX: Add io.iohk prefix to packages. (#16) --- .../src/{ => io/iohk}/metronome/core/Tagger.scala | 2 +- .../{ => io/iohk}/metronome/core/Validated.scala | 2 +- .../src/{ => io/iohk}/metronome/core/package.scala | 2 +- .../iohk}/metronome/crypto/GroupSignature.scala | 2 +- .../iohk}/metronome/crypto/PartialSignature.scala | 2 +- .../iohk}/metronome/crypto/Secp256k1Utils.scala | 2 +- .../src/io/iohk/metronome/crypto/hash/Hash.scala | 6 ++++++ .../iohk}/metronome/crypto/hash/Keccak256.scala | 2 +- .../iohk}/metronome/crypto/hash/package.scala | 2 +- .../crypto/src/metronome/crypto/hash/Hash.scala | 6 ------ .../metronome/crypto/hash/Keccak256Spec.scala | 2 +- .../metronome/hotstuff/consensus/Federation.scala | 2 +- .../hotstuff/consensus/LeaderSelection.scala | 4 ++-- .../metronome/hotstuff/consensus/ViewNumber.scala | 4 ++-- .../hotstuff/consensus/basic/Agreement.scala | 2 +- .../metronome/hotstuff/consensus/basic/Block.scala | 2 +- .../hotstuff/consensus/basic/Effect.scala | 4 ++-- .../metronome/hotstuff/consensus/basic/Event.scala | 4 ++-- .../hotstuff/consensus/basic/Message.scala | 6 +++--- .../metronome/hotstuff/consensus/basic/Phase.scala | 2 +- .../hotstuff/consensus/basic/ProtocolError.scala | 4 ++-- .../hotstuff/consensus/basic/ProtocolState.scala | 6 +++--- .../consensus/basic/QuorumCertificate.scala | 6 +++--- .../hotstuff/consensus/basic/Signing.scala | 6 +++--- .../metronome/hotstuff/consensus/package.scala | 2 +- .../hotstuff/consensus/FederationSpec.scala | 2 +- .../hotstuff/consensus/LeaderSelectionProps.scala | 4 ++-- .../consensus/basic/HotStuffProtocolProps.scala | 10 +++++++--- .../RemoteConnectionManagerTestUtils.scala | 14 +++++++------- ...ConnectionManagerWithScalanetProviderSpec.scala | 3 ++- .../io/iohk/metronome/tracer/Tracer.scala | 0 31 files changed, 61 insertions(+), 56 deletions(-) rename metronome/core/src/{ => io/iohk}/metronome/core/Tagger.scala (96%) rename metronome/core/src/{ => io/iohk}/metronome/core/Validated.scala (93%) rename metronome/core/src/{ => io/iohk}/metronome/core/package.scala (72%) rename metronome/crypto/src/{ => io/iohk}/metronome/crypto/GroupSignature.scala (88%) rename metronome/crypto/src/{ => io/iohk}/metronome/crypto/PartialSignature.scala (88%) rename metronome/crypto/src/{ => io/iohk}/metronome/crypto/Secp256k1Utils.scala (95%) create mode 100644 metronome/crypto/src/io/iohk/metronome/crypto/hash/Hash.scala rename metronome/crypto/src/{ => io/iohk}/metronome/crypto/hash/Keccak256.scala (92%) rename metronome/crypto/src/{ => io/iohk}/metronome/crypto/hash/package.scala (60%) delete mode 100644 metronome/crypto/src/metronome/crypto/hash/Hash.scala rename metronome/crypto/test/src/{ => io/iohk}/metronome/crypto/hash/Keccak256Spec.scala (93%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/Federation.scala (98%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/LeaderSelection.scala (95%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/ViewNumber.scala (77%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/Agreement.scala (92%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/Block.scala (93%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/Effect.scala (94%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/Event.scala (86%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/Message.scala (93%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/Phase.scala (93%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/ProtocolError.scala (96%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/ProtocolState.scala (99%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/QuorumCertificate.scala (72%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/Signing.scala (90%) rename metronome/hotstuff/consensus/src/{ => io/iohk}/metronome/hotstuff/consensus/package.scala (66%) rename metronome/hotstuff/consensus/test/src/{ => io/iohk}/metronome/hotstuff/consensus/FederationSpec.scala (97%) rename metronome/hotstuff/consensus/test/src/{ => io/iohk}/metronome/hotstuff/consensus/LeaderSelectionProps.scala (93%) rename metronome/hotstuff/consensus/test/src/{ => io/iohk}/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala (99%) rename metronome/tracing/src/{main/scala => }/io/iohk/metronome/tracer/Tracer.scala (100%) diff --git a/metronome/core/src/metronome/core/Tagger.scala b/metronome/core/src/io/iohk/metronome/core/Tagger.scala similarity index 96% rename from metronome/core/src/metronome/core/Tagger.scala rename to metronome/core/src/io/iohk/metronome/core/Tagger.scala index 975e344e..7b3a5a76 100644 --- a/metronome/core/src/metronome/core/Tagger.scala +++ b/metronome/core/src/io/iohk/metronome/core/Tagger.scala @@ -1,4 +1,4 @@ -package metronome.core +package io.iohk.metronome.core import shapeless.tag, tag.@@ diff --git a/metronome/core/src/metronome/core/Validated.scala b/metronome/core/src/io/iohk/metronome/core/Validated.scala similarity index 93% rename from metronome/core/src/metronome/core/Validated.scala rename to metronome/core/src/io/iohk/metronome/core/Validated.scala index d6895e5a..96c902f8 100644 --- a/metronome/core/src/metronome/core/Validated.scala +++ b/metronome/core/src/io/iohk/metronome/core/Validated.scala @@ -1,4 +1,4 @@ -package metronome.core +package io.iohk.metronome.core /** Can be used to tag any particular type as validated, for example: * diff --git a/metronome/core/src/metronome/core/package.scala b/metronome/core/src/io/iohk/metronome/core/package.scala similarity index 72% rename from metronome/core/src/metronome/core/package.scala rename to metronome/core/src/io/iohk/metronome/core/package.scala index 7432ecba..7b803e5a 100644 --- a/metronome/core/src/metronome/core/package.scala +++ b/metronome/core/src/io/iohk/metronome/core/package.scala @@ -1,4 +1,4 @@ -package metronome +package io.iohk.metronome package object core { type Validated[U] = Validated.Tagged[U] diff --git a/metronome/crypto/src/metronome/crypto/GroupSignature.scala b/metronome/crypto/src/io/iohk/metronome/crypto/GroupSignature.scala similarity index 88% rename from metronome/crypto/src/metronome/crypto/GroupSignature.scala rename to metronome/crypto/src/io/iohk/metronome/crypto/GroupSignature.scala index 1eccd169..a453c135 100644 --- a/metronome/crypto/src/metronome/crypto/GroupSignature.scala +++ b/metronome/crypto/src/io/iohk/metronome/crypto/GroupSignature.scala @@ -1,4 +1,4 @@ -package metronome.crypto +package io.iohk.metronome.crypto /** Group signature of members with identity `K` over some content `H`, * represented by type `G`, e.g. `G` could be a `List[Secp256k1Signature]` diff --git a/metronome/crypto/src/metronome/crypto/PartialSignature.scala b/metronome/crypto/src/io/iohk/metronome/crypto/PartialSignature.scala similarity index 88% rename from metronome/crypto/src/metronome/crypto/PartialSignature.scala rename to metronome/crypto/src/io/iohk/metronome/crypto/PartialSignature.scala index c54223f1..de6eeeef 100644 --- a/metronome/crypto/src/metronome/crypto/PartialSignature.scala +++ b/metronome/crypto/src/io/iohk/metronome/crypto/PartialSignature.scala @@ -1,4 +1,4 @@ -package metronome.crypto +package io.iohk.metronome.crypto /** An individual signature of a member with identity `K` over some content `H`, * represented by type `P`, e.g. `P` could be a single `Secp256k1Signature` diff --git a/metronome/crypto/src/metronome/crypto/Secp256k1Utils.scala b/metronome/crypto/src/io/iohk/metronome/crypto/Secp256k1Utils.scala similarity index 95% rename from metronome/crypto/src/metronome/crypto/Secp256k1Utils.scala rename to metronome/crypto/src/io/iohk/metronome/crypto/Secp256k1Utils.scala index 380d5845..0223b2a1 100644 --- a/metronome/crypto/src/metronome/crypto/Secp256k1Utils.scala +++ b/metronome/crypto/src/io/iohk/metronome/crypto/Secp256k1Utils.scala @@ -1,4 +1,4 @@ -package metronome.crypto +package io.iohk.metronome.crypto import java.security.SecureRandom import org.bouncycastle.crypto.AsymmetricCipherKeyPair diff --git a/metronome/crypto/src/io/iohk/metronome/crypto/hash/Hash.scala b/metronome/crypto/src/io/iohk/metronome/crypto/hash/Hash.scala new file mode 100644 index 00000000..b4d0df14 --- /dev/null +++ b/metronome/crypto/src/io/iohk/metronome/crypto/hash/Hash.scala @@ -0,0 +1,6 @@ +package io.iohk.metronome.crypto.hash + +import io.iohk.metronome.core.Tagger +import scodec.bits.ByteVector + +object Hash extends Tagger[ByteVector] diff --git a/metronome/crypto/src/metronome/crypto/hash/Keccak256.scala b/metronome/crypto/src/io/iohk/metronome/crypto/hash/Keccak256.scala similarity index 92% rename from metronome/crypto/src/metronome/crypto/hash/Keccak256.scala rename to metronome/crypto/src/io/iohk/metronome/crypto/hash/Keccak256.scala index 4a16720d..b6e96a8b 100644 --- a/metronome/crypto/src/metronome/crypto/hash/Keccak256.scala +++ b/metronome/crypto/src/io/iohk/metronome/crypto/hash/Keccak256.scala @@ -1,4 +1,4 @@ -package metronome.crypto.hash +package io.iohk.metronome.crypto.hash import org.bouncycastle.crypto.digests.KeccakDigest import scodec.bits.{BitVector, ByteVector} diff --git a/metronome/crypto/src/metronome/crypto/hash/package.scala b/metronome/crypto/src/io/iohk/metronome/crypto/hash/package.scala similarity index 60% rename from metronome/crypto/src/metronome/crypto/hash/package.scala rename to metronome/crypto/src/io/iohk/metronome/crypto/hash/package.scala index df00e947..e9df1174 100644 --- a/metronome/crypto/src/metronome/crypto/hash/package.scala +++ b/metronome/crypto/src/io/iohk/metronome/crypto/hash/package.scala @@ -1,4 +1,4 @@ -package metronome.crypto +package io.iohk.metronome.crypto package object hash { type Hash = Hash.Tagged diff --git a/metronome/crypto/src/metronome/crypto/hash/Hash.scala b/metronome/crypto/src/metronome/crypto/hash/Hash.scala deleted file mode 100644 index 9fb44d4d..00000000 --- a/metronome/crypto/src/metronome/crypto/hash/Hash.scala +++ /dev/null @@ -1,6 +0,0 @@ -package metronome.crypto.hash - -import metronome.core.Tagger -import scodec.bits.ByteVector - -object Hash extends Tagger[ByteVector] diff --git a/metronome/crypto/test/src/metronome/crypto/hash/Keccak256Spec.scala b/metronome/crypto/test/src/io/iohk/metronome/crypto/hash/Keccak256Spec.scala similarity index 93% rename from metronome/crypto/test/src/metronome/crypto/hash/Keccak256Spec.scala rename to metronome/crypto/test/src/io/iohk/metronome/crypto/hash/Keccak256Spec.scala index 24bccd88..ce87ac86 100644 --- a/metronome/crypto/test/src/metronome/crypto/hash/Keccak256Spec.scala +++ b/metronome/crypto/test/src/io/iohk/metronome/crypto/hash/Keccak256Spec.scala @@ -1,4 +1,4 @@ -package metronome.crypto.hash +package io.iohk.metronome.crypto.hash import scodec.bits._ import org.scalatest.flatspec.AnyFlatSpec diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala similarity index 98% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala index 1ca46c75..08a6fded 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/Federation.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala @@ -1,4 +1,4 @@ -package metronome.hotstuff.consensus +package io.iohk.metronome.hotstuff.consensus /** Collection of keys of the federation members. * diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/LeaderSelection.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/LeaderSelection.scala similarity index 95% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/LeaderSelection.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/LeaderSelection.scala index ecd795f5..a4940711 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/LeaderSelection.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/LeaderSelection.scala @@ -1,6 +1,6 @@ -package metronome.hotstuff.consensus +package io.iohk.metronome.hotstuff.consensus -import metronome.crypto.hash.Keccak256 +import io.iohk.metronome.crypto.hash.Keccak256 import scodec.bits.ByteVector /** Strategy to pick the leader for a given view number from diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/ViewNumber.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/ViewNumber.scala similarity index 77% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/ViewNumber.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/ViewNumber.scala index f92e1f26..1fc1ab33 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/ViewNumber.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/ViewNumber.scala @@ -1,6 +1,6 @@ -package metronome.hotstuff.consensus +package io.iohk.metronome.hotstuff.consensus -import metronome.core.Tagger +import io.iohk.metronome.core.Tagger object ViewNumber extends Tagger[Long] { implicit class Ops(val vn: ViewNumber) extends AnyVal { diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Agreement.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Agreement.scala similarity index 92% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Agreement.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Agreement.scala index ce2d2266..4361bf4e 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Agreement.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Agreement.scala @@ -1,4 +1,4 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic /** Capture all the generic types in the BFT agreement, * so we don't have to commit to any particular set of content. diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Block.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala similarity index 93% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Block.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala index 32a8c231..dff0db04 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Block.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala @@ -1,4 +1,4 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic /** Type class to project the properties we need a HotStuff block to have * from the generic `Block` type in the `Agreement`. diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Effect.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Effect.scala similarity index 94% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Effect.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Effect.scala index 5bc868c9..2e467b0e 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Effect.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Effect.scala @@ -1,8 +1,8 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic import scala.concurrent.duration.FiniteDuration -import metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.ViewNumber /** Represent all possible effects that a protocol transition can * ask the host system to carry out, e.g. send messages to replicas. diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Event.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Event.scala similarity index 86% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Event.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Event.scala index ade31767..130d7a3a 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Event.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Event.scala @@ -1,6 +1,6 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic -import metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.ViewNumber /** Input events for the protocol model. */ sealed trait Event[A <: Agreement] diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Message.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Message.scala similarity index 93% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Message.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Message.scala index 0fd7c8f0..9980905e 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Message.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Message.scala @@ -1,7 +1,7 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic -import metronome.crypto.PartialSignature -import metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.crypto.PartialSignature +import io.iohk.metronome.hotstuff.consensus.ViewNumber /** Basic HotStuff protocol messages. */ sealed trait Message[A <: Agreement] { diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Phase.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Phase.scala similarity index 93% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Phase.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Phase.scala index 5b6808f7..2506fb0d 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Phase.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Phase.scala @@ -1,4 +1,4 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic /** All phases of the basic HotStuff protocol. */ sealed trait Phase { diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolError.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolError.scala similarity index 96% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolError.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolError.scala index 7b48fa90..08b437ed 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolError.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolError.scala @@ -1,6 +1,6 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic -import metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.ViewNumber sealed trait ProtocolError[A <: Agreement] diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala similarity index 99% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala index e437edc2..de667699 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/ProtocolState.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala @@ -1,7 +1,7 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic -import metronome.core.Validated -import metronome.hotstuff.consensus.{ViewNumber, Federation} +import io.iohk.metronome.core.Validated +import io.iohk.metronome.hotstuff.consensus.{ViewNumber, Federation} import scala.concurrent.duration.FiniteDuration /** Basic HotStuff protocol state machine. diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/QuorumCertificate.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/QuorumCertificate.scala similarity index 72% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/QuorumCertificate.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/QuorumCertificate.scala index 043dea65..1390e062 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/QuorumCertificate.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/QuorumCertificate.scala @@ -1,7 +1,7 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic -import metronome.crypto.GroupSignature -import metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.crypto.GroupSignature +import io.iohk.metronome.hotstuff.consensus.ViewNumber /** A Quorum Certifcate (QC) over a tuple (message-type, view-number, block-hash) is a data type * that combines a collection of signatures for the same tuple signed by (n − f) replicas. diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Signing.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Signing.scala similarity index 90% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Signing.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Signing.scala index 81a74f5e..57c8c0d6 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/basic/Signing.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Signing.scala @@ -1,7 +1,7 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic -import metronome.crypto.{PartialSignature, GroupSignature} -import metronome.hotstuff.consensus.{ViewNumber, Federation} +import io.iohk.metronome.crypto.{PartialSignature, GroupSignature} +import io.iohk.metronome.hotstuff.consensus.{ViewNumber, Federation} trait Signing[A <: Agreement] { diff --git a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/package.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/package.scala similarity index 66% rename from metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/package.scala rename to metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/package.scala index ef8e0249..94f9988f 100644 --- a/metronome/hotstuff/consensus/src/metronome/hotstuff/consensus/package.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/package.scala @@ -1,4 +1,4 @@ -package metronome.hotstuff +package io.iohk.metronome.hotstuff package object consensus { type ViewNumber = ViewNumber.Tagged diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/FederationSpec.scala similarity index 97% rename from metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala rename to metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/FederationSpec.scala index eb3d6737..4e94f78f 100644 --- a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/FederationSpec.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/FederationSpec.scala @@ -1,4 +1,4 @@ -package metronome.hotstuff.consensus +package io.iohk.metronome.hotstuff.consensus import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/LeaderSelectionProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/LeaderSelectionProps.scala similarity index 93% rename from metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/LeaderSelectionProps.scala rename to metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/LeaderSelectionProps.scala index 0fbfe1bb..7880cf1e 100644 --- a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/LeaderSelectionProps.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/LeaderSelectionProps.scala @@ -1,6 +1,6 @@ -package metronome.hotstuff.consensus +package io.iohk.metronome.hotstuff.consensus -import metronome.core.Tagger +import io.iohk.metronome.core.Tagger import org.scalacheck._ import org.scalacheck.Prop.forAll diff --git a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala similarity index 99% rename from metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala rename to metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala index 1a5bfdfe..88a2ef0f 100644 --- a/metronome/hotstuff/consensus/test/src/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala @@ -1,7 +1,11 @@ -package metronome.hotstuff.consensus.basic +package io.iohk.metronome.hotstuff.consensus.basic -import metronome.crypto.{GroupSignature, PartialSignature} -import metronome.hotstuff.consensus.{ViewNumber, Federation, LeaderSelection} +import io.iohk.metronome.crypto.{GroupSignature, PartialSignature} +import io.iohk.metronome.hotstuff.consensus.{ + ViewNumber, + Federation, + LeaderSelection +} import org.scalacheck.commands.Commands import org.scalacheck.{Properties, Gen, Prop} import org.scalacheck.Arbitrary.arbitrary diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala index 30b005b2..18bcc686 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala @@ -1,17 +1,17 @@ package io.iohk.metronome.networking import cats.effect.Resource +import io.iohk.metronome.crypto.Secp256k1Utils +import java.net.{InetSocketAddress, ServerSocket} +import java.security.SecureRandom import monix.eval.Task import monix.execution.Scheduler import org.bouncycastle.crypto.AsymmetricCipherKeyPair import org.scalatest.Assertion -import scodec.Codec -import scodec.bits.BitVector - -import java.net.{InetSocketAddress, ServerSocket} -import java.security.SecureRandom import scala.concurrent.Future import scala.util.Random +import scodec.bits.BitVector +import scodec.Codec object RemoteConnectionManagerTestUtils { def customTestCaseResourceT[T]( @@ -66,11 +66,11 @@ object RemoteConnectionManagerTestUtils { object NodeInfo { def generateRandom(secureRandom: SecureRandom): NodeInfo = { val keyPair = - metronome.crypto.Secp256k1Utils.generateKeyPair(secureRandom) + Secp256k1Utils.generateKeyPair(secureRandom) NodeInfo( keyPair, Secp256k1Key( - metronome.crypto.Secp256k1Utils.keyPairToUncompressed(keyPair) + Secp256k1Utils.keyPairToUncompressed(keyPair) ) ) } diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala index 1d708a5a..3788bbf4 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala @@ -4,6 +4,7 @@ import cats.data.NonEmptyList import cats.effect.concurrent.Ref import cats.effect.{Concurrent, ContextShift, Resource, Timer, Sync} import io.circe.{Json, JsonObject, Encoder} +import io.iohk.metronome.crypto.Secp256k1Utils import io.iohk.metronome.networking.ConnectionHandler.MessageReceived import io.iohk.metronome.networking.RemoteConnectionManager.{ ClusterConfig, @@ -175,7 +176,7 @@ object RemoteConnectionManagerWithScalanetProviderSpec { ]( bindAddress: InetSocketAddress = randomAddress(), nodeKeyPair: AsymmetricCipherKeyPair = - metronome.crypto.Secp256k1Utils.generateKeyPair(secureRandom), + Secp256k1Utils.generateKeyPair(secureRandom), secureRandom: SecureRandom = secureRandom, useNativeTlsImplementation: Boolean = false, framingConfig: FramingConfig = standardFraming, diff --git a/metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala b/metronome/tracing/src/io/iohk/metronome/tracer/Tracer.scala similarity index 100% rename from metronome/tracing/src/main/scala/io/iohk/metronome/tracer/Tracer.scala rename to metronome/tracing/src/io/iohk/metronome/tracer/Tracer.scala From 58f728b910a9f4f7d97058938c303e104c58711c Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Fri, 2 Apr 2021 12:01:44 +0200 Subject: [PATCH 15/48] [PM-3092] Replace duplicated connections (#19) * [PM-3092] Make HandledConnection internal to ConnectionHandler * [PM-3092] Refactor incrementin and decrementig connections * [PM-3092] Decouple pushing connections from handling them * [PM-3092] Re-add registred and discard events after merge * [PM-3092] Move conflic handling to background * [PM-3092] Make it possible to discern diffrent disconnect reasons * [PM-3092] Add test showing replacing incoming connections * [PM-3092] Fix outgoing test * [PM-3092] Add test case showing that in case of mulitple conflicts last connection wins * [PM-3092] Pr comments Add replace method to registry Add proper tracing events Use more clear constructors for effect F --- .../networking/ConnectionHandler.scala | 370 ++++++++++++++---- .../networking/ConnectionsRegister.scala | 10 + .../networking/RemoteConnectionManager.scala | 48 ++- .../networking/ConnectionHandlerSpec.scala | 213 ++++++---- .../MockEncryptedConnectionProvider.scala | 2 + ...onnectionManagerWithMockProviderSpec.scala | 10 +- 6 files changed, 470 insertions(+), 183 deletions(-) diff --git a/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala b/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala index 62f80d24..342a7f87 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala @@ -1,42 +1,51 @@ package io.iohk.metronome.networking +import cats.effect.concurrent.Deferred +import cats.effect.implicits._ import cats.effect.{Concurrent, ContextShift, Resource, Sync} -import cats.effect.concurrent.{Deferred, TryableDeferred} -import io.iohk.metronome.networking.RemoteConnectionManager.withCancelToken -import monix.catnap.ConcurrentQueue -import monix.execution.atomic.AtomicInt -import monix.tail.Iterant import cats.implicits._ -import cats.effect.implicits._ +import io.iohk.metronome.networking.ConnectionHandler.HandledConnection._ import io.iohk.metronome.networking.ConnectionHandler.{ ConnectionAlreadyClosedException, + ConnectionWithConflictFlag, + FinishedConnection, HandledConnection, - MessageReceived, - UnexpectedConnectionError + MessageReceived } import io.iohk.metronome.networking.EncryptedConnectionProvider.{ ConnectionAlreadyClosed, ConnectionError } +import monix.catnap.ConcurrentQueue +import monix.execution.atomic.AtomicInt +import monix.tail.Iterant import java.net.InetSocketAddress import scala.util.control.NoStackTrace class ConnectionHandler[F[_]: Concurrent, K, M]( - connectionQueue: ConcurrentQueue[F, HandledConnection[F, K, M]], + connectionQueue: ConcurrentQueue[F, ConnectionWithConflictFlag[F, K, M]], connectionsRegister: ConnectionsRegister[F, K, M], messageQueue: ConcurrentQueue[F, MessageReceived[K, M]], - cancelToken: TryableDeferred[F, Unit], - connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] + cancelToken: Deferred[F, Unit], + connectionFinishCallback: FinishedConnection[K] => F[Unit] )(implicit tracers: NetworkTracers[F, K, M]) { private val numberOfRunningConnections = AtomicInt(0) + private def incrementRunningConnections: F[Unit] = { + Concurrent[F].delay(numberOfRunningConnections.increment()) + } + + private def decrementRunningConnections: F[Unit] = { + Concurrent[F].delay(numberOfRunningConnections.decrement()) + } + private def closeAndDeregisterConnection( handledConnection: HandledConnection[F, K, M] ): F[Unit] = { val close = for { - _ <- Concurrent[F].delay(numberOfRunningConnections.decrement()) + _ <- decrementRunningConnections _ <- connectionsRegister.deregisterConnection(handledConnection) _ <- handledConnection.close } yield () @@ -46,25 +55,45 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( } } - /** Registers connections and start handling incoming messages in background, in case connection is already handled + private def register( + possibleNewConnection: HandledConnection[F, K, M] + ): F[Unit] = { + connectionsRegister.registerIfAbsent(possibleNewConnection).flatMap { + maybeConflicting => + // in case of conflict we deal with it in the background + connectionQueue.offer( + (possibleNewConnection, maybeConflicting.isDefined) + ) + } + } + + /** Registers incoming connections and start handling incoming messages in background, in case connection is already handled * it closes it * - * @param possibleNewConnection, possible connection to handle + * @param serverAddress, server address of incoming connection which should already be known + * @param encryptedConnection, established connection */ - def registerOrClose( - possibleNewConnection: HandledConnection[F, K, M] + def registerIncoming( + serverAddress: InetSocketAddress, + encryptedConnection: EncryptedConnection[F, K, M] ): F[Unit] = { - connectionsRegister.registerIfAbsent(possibleNewConnection).flatMap { - case Some(_) => - //TODO [PM-3092] for now we are closing any new connections in case of conflict, we may investigate other strategies - // like keeping old for outgoing and replacing for incoming - tracers.discarded(possibleNewConnection) >> - possibleNewConnection.close + HandledConnection + .incoming(cancelToken, serverAddress, encryptedConnection) + .flatMap(connection => register(connection)) - case None => - tracers.registered(possibleNewConnection) >> - connectionQueue.offer(possibleNewConnection) - } + } + + /** Registers out connections and start handling incoming messages in background, in case connection is already handled + * it closes it + * + * @param encryptedConnection, established connection + */ + def registerOutgoing( + encryptedConnection: EncryptedConnection[F, K, M] + ): F[Unit] = { + HandledConnection + .outgoing(cancelToken, encryptedConnection) + .flatMap(connection => register(connection)) } /** Checks if handler already handles connection o peer with provided key @@ -100,6 +129,11 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( def getConnection(key: K): F[Option[HandledConnection[F, K, M]]] = connectionsRegister.getConnection(key) + /** Send message to remote peer if its connected + * + * @param recipient, key of the remote peer + * @param message message to send + */ def sendMessage( recipient: K, message: M @@ -113,7 +147,7 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( case Left(_) => // Closing the connection will cause it to be re-queued for reconnection. tracers.sendError(connection) >> - connection.close.as( + connection.closeAlreadyClosed.as( Left(ConnectionAlreadyClosedException(recipient)) ) @@ -125,12 +159,108 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( } } - private def callCallBackIfNotClosed( + private def handleConflict( + newConnectionWithPossibleConflict: ConnectionWithConflictFlag[F, K, M] + ): F[Option[HandledConnection[F, K, M]]] = { + val (newConnection, conflictHappened) = + newConnectionWithPossibleConflict + + if (conflictHappened) { + connectionsRegister.registerIfAbsent(newConnection).flatMap { + case Some(oldConnection) => + newConnection.connectionDirection match { + case HandledConnection.IncomingConnection => + // even though we have connection to this peer, they are calling us. One of the reason may be, that they failed and + // we did not notice. Lets try to replace old connection with new one. + replaceConnection(newConnection, oldConnection) + + case HandledConnection.OutgoingConnection => + // for some reason we were calling while we already have connection, most probably we have received incoming + // connection during call. Close this new connection, and keep the old one + tracers.discarded(newConnection) >> newConnection.close.as(none) + } + case None => + // in the meantime between detection of conflict, and processing it old connection has dropped. Register new one + tracers.registered(newConnection) >> newConnection.some.pure[F] + } + } else { + tracers.registered(newConnection) >> newConnection.some.pure[F] + } + } + + /** Safely replaces old connection from remote peer with new connection with same remote peer. + * + * 1. The callback for old connection will not be called. As from the perspective of outside world connection is never + * finished + * 2. From the point of view of outside world connection never leaves connection registry i.e during replacing all call to + * registerOutgoing or registerIncoming will report conflicts to be handled + */ + private def replaceConnection( + newConnection: HandledConnection[F, K, M], + oldConnection: HandledConnection[F, K, M] + ): F[Option[HandledConnection[F, K, M]]] = { + for { + result <- oldConnection.requestReplace(newConnection) + maybeNew <- result match { + case ConnectionHandler.ReplaceFinished => + // Replace succeeded, old connection should already be closed and discarded, pass the new one forward + tracers.registered(newConnection) >> + newConnection.some.pure[F] + case ConnectionHandler.ConnectionAlreadyDisconnected => + // during or just before replace, old connection disconnected for some other reason, + // the reconnect call back will be fired either way so close the new connection + tracers.discarded(newConnection) >> + newConnection.close.as(None: Option[HandledConnection[F, K, M]]) + } + + } yield maybeNew + } + + private def callCallBackWithConnection( handledConnection: HandledConnection[F, K, M] ): F[Unit] = { - cancelToken.tryGet.flatMap { - case Some(_) => Sync[F].unit - case None => connectionFinishCallback(handledConnection) + connectionFinishCallback( + FinishedConnection( + handledConnection.key, + handledConnection.serverAddress + ) + ) + } + + private def handleReplace( + replaceRequest: ReplaceRequested[F, K, M] + ): F[Unit] = { + connectionsRegister.replace(replaceRequest.newConnection).flatMap { + case Some(oldConnection) => + // close connection just in case someone who requested replace forgot it + oldConnection.close + case None => + // this case should not happen, as we handle each connection in separate fiber, and only this fiber can remove + // connection with given key. + ().pure[F] + } >> replaceRequest.signalReplaceSuccess + } + + private def handleConnectionFinish( + connection: HandledConnection[F, K, M] + ): F[Unit] = { + // at this point closeReason will always be filled + connection.getCloseReason.flatMap { + case HandledConnection.RemoteClosed => + closeAndDeregisterConnection( + connection + ) >> callCallBackWithConnection(connection) + case RemoteError(e) => + tracers.receiveError( + (connection, e) + ) >> closeAndDeregisterConnection( + connection + ) >> callCallBackWithConnection(connection) + case HandledConnection.ManagerShutdown => + closeAndDeregisterConnection(connection) + case replaceRequest: ReplaceRequested[F, K, M] => + // override old connection with new one, connection count is not changed, and callback is not called + handleReplace(replaceRequest) } } @@ -141,33 +271,27 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( private def handleConnections: F[Unit] = { Iterant .repeatEvalF(connectionQueue.poll) + .mapEval(handleConflict) + .collect { case Some(newConnection) => newConnection } .mapEval { connection => - Sync[F].delay(numberOfRunningConnections.increment()).flatMap { _ => + incrementRunningConnections >> Iterant .repeatEvalF( - withCancelToken(cancelToken, connection.incomingMessage) + connection.incomingMessage ) .takeWhile(_.isDefined) .map(_.get) - .mapEval[Unit] { - case Right(m) => - tracers.received((connection, m)) >> - messageQueue.offer( - MessageReceived(connection.key, m) - ) - case Left(e) => - tracers.receiveError((connection, e)) >> - Concurrent[F].raiseError[Unit]( - UnexpectedConnectionError(e, connection.key) - ) + .mapEval[Unit] { m => + tracers.received((connection, m)) >> + messageQueue.offer( + MessageReceived(connection.key, m) + ) } .guarantee( - closeAndDeregisterConnection(connection) - .flatMap(_ => callCallBackIfNotClosed(connection)) + handleConnectionFinish(connection) ) .completedL .start - } } .completedL } @@ -177,6 +301,9 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( } object ConnectionHandler { + type ConnectionWithConflictFlag[F[_], K, M] = + (HandledConnection[F, K, M], Boolean) + case class ConnectionAlreadyClosedException[K](key: K) extends RuntimeException( s"Connection with node ${key}, has already closed" @@ -200,6 +327,10 @@ object ConnectionHandler { case class MessageReceived[K, M](from: K, message: M) + sealed abstract class ReplaceResult + case object ReplaceFinished extends ReplaceResult + case object ConnectionAlreadyDisconnected extends ReplaceResult + /** Connection which is already handled by connection handler i.e it is registered in registry and handler is subscribed * for incoming messages of that connection * @@ -208,10 +339,13 @@ object ConnectionHandler { * underlyingConnection remoteAddress * @param underlyingConnection, encrypted connection to send and receive messages */ - case class HandledConnection[F[_], K, M]( - key: K, - serverAddress: InetSocketAddress, - underlyingConnection: EncryptedConnection[F, K, M] + class HandledConnection[F[_]: Concurrent, K, M] private ( + val connectionDirection: HandledConnectionDirection, + globalCancelToken: Deferred[F, Unit], + val key: K, + val serverAddress: InetSocketAddress, + underlyingConnection: EncryptedConnection[F, K, M], + closeReason: Deferred[F, HandledConnectionCloseReason] ) { def sendMessage(m: M): F[Unit] = { underlyingConnection.sendMessage(m) @@ -221,46 +355,131 @@ object ConnectionHandler { underlyingConnection.close } - def incomingMessage: F[Option[Either[ConnectionError, M]]] = { - underlyingConnection.incomingMessage + def closeAlreadyClosed: F[Unit] = { + completeWithReason(RemoteClosed) >> underlyingConnection.close + } + + def requestReplace( + newConnection: HandledConnection[F, K, M] + ): F[ReplaceResult] = { + ReplaceRequested.requestReplace(newConnection).flatMap { request => + closeReason.complete(request).attempt.flatMap { + case Left(_) => + (ConnectionAlreadyDisconnected: ReplaceResult).pure[F] + case Right(_) => + underlyingConnection.close >> + request.waitForReplaceToFinish >> (ReplaceFinished: ReplaceResult) + .pure[F] + } + } + } + + private def completeWithReason(r: HandledConnectionCloseReason): F[Unit] = + closeReason.complete(r).attempt.void + + def getCloseReason: F[HandledConnectionCloseReason] = closeReason.get + + private def handleIncomingEvent( + incomingEvent: Option[Either[ConnectionError, M]] + ): F[Option[M]] = { + incomingEvent match { + case Some(Right(m)) => m.some.pure[F] + case Some(Left(e)) => completeWithReason(RemoteError(e)).as(None) + case None => completeWithReason(RemoteClosed).as(None) + } + } + + def incomingMessage: F[Option[M]] = { + Concurrent[F] + .race(globalCancelToken.get, underlyingConnection.incomingMessage) + .flatMap { + case Left(_) => completeWithReason(ManagerShutdown).as(None) + case Right(e) => handleIncomingEvent(e) + } } } object HandledConnection { - def outgoing[F[_], K, M]( + sealed abstract class HandledConnectionCloseReason + case object RemoteClosed extends HandledConnectionCloseReason + case class RemoteError(e: ConnectionError) + extends HandledConnectionCloseReason + case object ManagerShutdown extends HandledConnectionCloseReason + class ReplaceRequested[F[_]: Sync, K, M]( + val newConnection: HandledConnection[F, K, M], + replaced: Deferred[F, Unit] + ) extends HandledConnectionCloseReason { + def signalReplaceSuccess: F[Unit] = replaced.complete(()).attempt.void + def waitForReplaceToFinish: F[Unit] = replaced.get + } + + object ReplaceRequested { + def requestReplace[F[_]: Concurrent, K, M]( + newConnection: HandledConnection[F, K, M] + ): F[ReplaceRequested[F, K, M]] = { + for { + signal <- Deferred[F, Unit] + } yield new ReplaceRequested(newConnection, signal) + } + } + + sealed abstract class HandledConnectionDirection + case object IncomingConnection extends HandledConnectionDirection + case object OutgoingConnection extends HandledConnectionDirection + + private def buildLifeCycleListener[F[_]: Concurrent] + : F[Deferred[F, HandledConnectionCloseReason]] = { + for { + closeReason <- Deferred[F, HandledConnectionCloseReason] + } yield closeReason + } + + private[ConnectionHandler] def outgoing[F[_]: Concurrent, K, M]( + globalCancelToken: Deferred[F, Unit], encryptedConnection: EncryptedConnection[F, K, M] - ): HandledConnection[F, K, M] = { - HandledConnection( - encryptedConnection.remotePeerInfo._1, - encryptedConnection.remotePeerInfo._2, - encryptedConnection - ) + ): F[HandledConnection[F, K, M]] = { + buildLifeCycleListener[F].map { closeReason => + new HandledConnection[F, K, M]( + OutgoingConnection, + globalCancelToken, + encryptedConnection.remotePeerInfo._1, + encryptedConnection.remotePeerInfo._2, + encryptedConnection, + closeReason + ) {} + } } - def incoming[F[_], K, M]( + private[ConnectionHandler] def incoming[F[_]: Concurrent, K, M]( + globalCancelToken: Deferred[F, Unit], serverAddress: InetSocketAddress, encryptedConnection: EncryptedConnection[F, K, M] - ): HandledConnection[F, K, M] = { - HandledConnection( - encryptedConnection.remotePeerInfo._1, - serverAddress, - encryptedConnection - ) + ): F[HandledConnection[F, K, M]] = { + buildLifeCycleListener[F].map { closeReason => + new HandledConnection[F, K, M]( + IncomingConnection, + globalCancelToken, + encryptedConnection.remotePeerInfo._1, + serverAddress, + encryptedConnection, + closeReason + ) {} + } } } private def buildHandler[F[_]: Concurrent: ContextShift, K, M]( - connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] + connectionFinishCallback: FinishedConnection[K] => F[Unit] )(implicit tracers: NetworkTracers[F, K, M] ): F[ConnectionHandler[F, K, M]] = { for { - cancelToken <- Deferred.tryable[F, Unit] + cancelToken <- Deferred[F, Unit] acquiredConnections <- ConnectionsRegister.empty[F, K, M] messageQueue <- ConcurrentQueue.unbounded[F, MessageReceived[K, M]]() connectionQueue <- ConcurrentQueue - .unbounded[F, HandledConnection[F, K, M]]() + .unbounded[F, ConnectionWithConflictFlag[F, K, M]]() } yield new ConnectionHandler[F, K, M]( connectionQueue, acquiredConnections, @@ -270,17 +489,22 @@ object ConnectionHandler { ) } + case class FinishedConnection[K]( + connectionKey: K, + connectionServerAddress: InetSocketAddress + ) + /** Starts connection handler, and polling form connections * - * @param connectionFinishCallback, callback to be called when connection is finished and get deregistred + * @param connectionFinishCallback, callback to be called when connection is finished and get de-registered */ def apply[F[_]: Concurrent: ContextShift, K, M]( - connectionFinishCallback: HandledConnection[F, K, M] => F[Unit] + connectionFinishCallback: FinishedConnection[K] => F[Unit] )(implicit tracers: NetworkTracers[F, K, M] ): Resource[F, ConnectionHandler[F, K, M]] = { Resource - .make(buildHandler(connectionFinishCallback)) { handler => + .make(buildHandler[F, K, M](connectionFinishCallback)) { handler => handler.shutdown } .flatMap { handler => diff --git a/metronome/networking/src/io/iohk/metronome/networking/ConnectionsRegister.scala b/metronome/networking/src/io/iohk/metronome/networking/ConnectionsRegister.scala index 9c93414e..59ad6620 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/ConnectionsRegister.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/ConnectionsRegister.scala @@ -42,6 +42,16 @@ class ConnectionsRegister[F[_]: Concurrent, K, M]( ): F[Option[HandledConnection[F, K, M]]] = registerRef.get.map(register => register.get(connectionKey)) + def replace( + connection: HandledConnection[F, K, M] + ): F[Option[HandledConnection[F, K, M]]] = { + registerRef.modify { register => + register.updated(connection.key, connection) -> register.get( + connection.key + ) + } + } + } object ConnectionsRegister { diff --git a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala index e40ecb16..75357bc8 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala @@ -1,11 +1,10 @@ package io.iohk.metronome.networking -import cats.effect.concurrent.Deferred import cats.effect.implicits._ import cats.effect.{Concurrent, ContextShift, Resource, Sync, Timer} import cats.implicits._ import io.iohk.metronome.networking.ConnectionHandler.{ - HandledConnection, + FinishedConnection, MessageReceived } import io.iohk.metronome.networking.RemoteConnectionManager.RetryConfig.RandomJitterConfig @@ -172,6 +171,19 @@ object RemoteConnectionManager { retryConfig: RetryConfig )(implicit tracers: NetworkTracers[F, K, M]): F[Unit] = { + def connectWithErrors( + connectionToAcquire: OutGoingConnectionRequest[K] + ): F[Either[ConnectionFailure[K], Unit]] = { + connectTo(encryptedConnectionProvider, connectionToAcquire).flatMap { + case Left(err) => + Concurrent[F].pure(Left(err)) + case Right(connection) => + connectionsHandler + .registerOutgoing(connection.encryptedConnection) + .as(Right(())) + } + } + /** Observable is used here as streaming primitive as it has richer api than Iterant and have mapParallelUnorderedF * combinator, which makes it possible to have multiple concurrent retry timers, which are cancelled when whole * outer stream is cancelled @@ -179,21 +191,15 @@ object RemoteConnectionManager { Observable .repeatEvalF(connectionsToAcquire.poll) .filterEvalF(request => connectionsHandler.isNewConnection(request.key)) - .mapEvalF { connectionToAcquire => - connectTo(encryptedConnectionProvider, connectionToAcquire) - } + .mapEvalF(connectWithErrors) .mapParallelUnorderedF(Integer.MAX_VALUE) { case Left(failure) => - val failureToLog = failure.err tracers.failed(failure) >> retryConnection(retryConfig, failure.connectionRequest).flatMap( updatedRequest => connectionsToAcquire.offer(updatedRequest) ) - case Right(connection) => - val newOutgoingConnections = - HandledConnection.outgoing(connection.encryptedConnection) - connectionsHandler.registerOrClose(newOutgoingConnections) - + case Right(_) => + Concurrent[F].pure(()) } .completedF } @@ -217,11 +223,10 @@ object RemoteConnectionManager { encryptedConnection.remotePeerInfo._1 ) match { case Some(incomingConnectionServerAddress) => - val handledConnection = HandledConnection.incoming( + connectionsHandler.registerIncoming( incomingConnectionServerAddress, encryptedConnection ) - connectionsHandler.registerOrClose(handledConnection) case None => // unknown connection, just close it @@ -232,25 +237,16 @@ object RemoteConnectionManager { .completedL } - def withCancelToken[F[_]: Concurrent, A]( - token: Deferred[F, Unit], - ops: F[Option[A]] - ): F[Option[A]] = - Concurrent[F].race(token.get, ops).map { - case Left(()) => None - case Right(x) => x - } - class HandledConnectionFinisher[F[_]: Concurrent: Timer, K, M]( connectionsToAcquire: ConcurrentQueue[F, OutGoingConnectionRequest[K]], retryConfig: RetryConfig ) { - def finish(handledConnection: HandledConnection[F, K, M]): F[Unit] = { + def finish(finishedConnection: FinishedConnection[K]): F[Unit] = { retryConnection( retryConfig, OutGoingConnectionRequest.initial( - handledConnection.key, - handledConnection.serverAddress + finishedConnection.connectionKey, + finishedConnection.connectionServerAddress ) ).flatMap(req => connectionsToAcquire.offer(req)) } @@ -331,7 +327,7 @@ object RemoteConnectionManager { retryConfig ) - connectionsHandler <- ConnectionHandler.apply( + connectionsHandler <- ConnectionHandler.apply[F, K, M]( // when each connection will finished it the callback will be called, and connection will be put to connections to acquire // queue handledConnectionFinisher.finish diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala index a48c6f3c..44e1a1f6 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala @@ -1,29 +1,27 @@ package io.iohk.metronome.networking -import monix.execution.Scheduler -import org.scalatest.flatspec.AsyncFlatSpecLike -import org.scalatest.matchers.should.Matchers - -import scala.concurrent.duration._ -import RemoteConnectionManagerTestUtils._ import cats.effect.Resource -import cats.effect.concurrent.Deferred +import cats.effect.concurrent.{Deferred, Ref} import io.iohk.metronome.networking.ConnectionHandler.{ ConnectionAlreadyClosedException, - HandledConnection + FinishedConnection } import io.iohk.metronome.networking.ConnectionHandlerSpec.{ buildHandlerResource, buildNConnections, - newHandledConnection + _ } +import io.iohk.metronome.networking.EncryptedConnectionProvider.DecodingError import io.iohk.metronome.networking.MockEncryptedConnectionProvider.MockEncryptedConnection +import io.iohk.metronome.networking.RemoteConnectionManagerTestUtils._ import monix.eval.Task -import ConnectionHandlerSpec._ -import io.iohk.metronome.networking.EncryptedConnectionProvider.DecodingError -import io.iohk.metronome.networking.RemoteConnectionManagerWithMockProviderSpec.fakeLocalAddress +import monix.execution.Scheduler +import org.scalatest.flatspec.AsyncFlatSpecLike +import org.scalatest.matchers.should.Matchers import io.iohk.metronome.tracer.Tracer + import java.net.InetSocketAddress +import scala.concurrent.duration._ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { implicit val testScheduler = @@ -36,11 +34,11 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { buildHandlerResource() ) { handler => for { - handledConnection1 <- newHandledConnection() - _ <- handler.registerOrClose(handledConnection1._1) - connections <- handler.getAllActiveConnections + newConnection <- MockEncryptedConnection() + _ <- handler.registerOutgoing(newConnection) + connections <- handler.getAllActiveConnections } yield { - assert(connections.contains(handledConnection1._1.key)) + assert(connections.contains(newConnection.key)) } } @@ -48,12 +46,12 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { buildHandlerResource() ) { handler => for { - handledConnection1 <- newHandledConnection() - _ <- handler.registerOrClose(handledConnection1._1) - connections <- handler.getAllActiveConnections - sendResult <- handler.sendMessage(handledConnection1._1.key, MessageA(1)) + newConnection <- MockEncryptedConnection() + _ <- handler.registerOutgoing(newConnection) + connections <- handler.getAllActiveConnections + sendResult <- handler.sendMessage(newConnection.key, MessageA(1)) } yield { - assert(connections.contains(handledConnection1._1.key)) + assert(connections.contains(newConnection.key)) assert(sendResult.isRight) } } @@ -62,15 +60,15 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { buildHandlerResource() ) { handler => for { - handledConnection1 <- newHandledConnection() - connections <- handler.getAllActiveConnections - sendResult <- handler.sendMessage(handledConnection1._1.key, MessageA(1)) + newConnection <- MockEncryptedConnection() + connections <- handler.getAllActiveConnections + sendResult <- handler.sendMessage(newConnection.key, MessageA(1)) } yield { assert(connections.isEmpty) assert(sendResult.isLeft) assert( sendResult.left.getOrElse(null) == ConnectionAlreadyClosedException( - handledConnection1._1.key + newConnection.key ) ) } @@ -80,41 +78,107 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { buildHandlerResource() ) { handler => for { - handledConnection1 <- newHandledConnection() - (handled, underLaying) = handledConnection1 - _ <- underLaying.closeRemoteWithoutInfo - _ <- handler.registerOrClose(handledConnection1._1) - connections <- handler.getAllActiveConnections - sendResult <- handler.sendMessage(handledConnection1._1.key, MessageA(1)) + newConnection <- MockEncryptedConnection() + _ <- newConnection.closeRemoteWithoutInfo + _ <- handler.registerOutgoing(newConnection) + connections <- handler.getAllActiveConnections + sendResult <- handler.sendMessage(newConnection.key, MessageA(1)) } yield { - assert(connections.contains(handledConnection1._1.key)) + assert(connections.contains(newConnection.key)) assert(sendResult.isLeft) assert( sendResult.left.getOrElse(null) == ConnectionAlreadyClosedException( - handledConnection1._1.key + newConnection.key ) ) } } - it should "not register and close duplicated connection" in customTestCaseResourceT( + it should "not register and close duplicated outgoing connection" in customTestCaseResourceT( buildHandlerResource() ) { handler => for { - handledConnection <- newHandledConnection() - duplicatedConnection <- newHandledConnection(remotePeerInfo = - (handledConnection._1.key, handledConnection._1.serverAddress) + initialConnection <- MockEncryptedConnection() + duplicatedConnection <- MockEncryptedConnection( + (initialConnection.key, initialConnection.address) + ) + _ <- handler.registerOutgoing(initialConnection) + connections <- handler.getAllActiveConnections + _ <- handler.registerOutgoing(duplicatedConnection) + connectionsAfterDuplication <- handler.getAllActiveConnections + _ <- duplicatedConnection.isClosed.waitFor(closed => closed) + duplicatedClosed <- duplicatedConnection.isClosed + initialClosed <- initialConnection.isClosed + } yield { + assert(connections.contains(initialConnection.key)) + assert(connectionsAfterDuplication.contains(initialConnection.key)) + assert(duplicatedClosed) + assert(!initialClosed) + } + } + + it should "replace incoming connections" in customTestCaseResourceT( + buildHandlerResourceWithCallbackCounter + ) { case (handler, counter) => + for { + initialConnection <- MockEncryptedConnection() + duplicatedConnection <- MockEncryptedConnection( + (initialConnection.key, initialConnection.address) ) - (handled, underlyingEncrypted) = handledConnection - _ <- handler.registerOrClose(handled) + _ <- handler.registerIncoming(fakeLocalAddress, initialConnection) connections <- handler.getAllActiveConnections - _ <- handler.registerOrClose(duplicatedConnection._1) + _ <- handler.registerIncoming(fakeLocalAddress, duplicatedConnection) + _ <- initialConnection.isClosed.waitFor(closed => closed) connectionsAfterDuplication <- handler.getAllActiveConnections - closedAfterDuplication <- duplicatedConnection._2.isClosed + initialClosed <- initialConnection.isClosed + duplicatedClosed <- duplicatedConnection.isClosed + numberOfCalledCallbacks <- counter.get + } yield { + assert(connections.contains(initialConnection.key)) + assert(connectionsAfterDuplication.contains(initialConnection.key)) + assert(initialClosed) + assert(!duplicatedClosed) + assert(numberOfCalledCallbacks == 0) + } + } + + it should "treat last conflicting incoming connection as live one" in customTestCaseResourceT( + buildHandlerResourceWithCallbackCounter + ) { case (handler, counter) => + val numberOfConflictingConnections = 4 + + for { + initialConnection <- MockEncryptedConnection() + duplicatedConnections <- Task.traverse( + (0 until numberOfConflictingConnections).toList + )(_ => + MockEncryptedConnection( + (initialConnection.key, initialConnection.address) + ) + ) + + _ <- handler.registerIncoming(fakeLocalAddress, initialConnection) + connections <- handler.getAllActiveConnections + (closed, last) = ( + duplicatedConnections.dropRight(1), + duplicatedConnections.last + ) + _ <- Task.traverse(duplicatedConnections)(duplicated => + handler.registerIncoming(fakeLocalAddress, duplicated) + ) + allDuplicatesClosed <- Task + .sequence(closed.map(connection => connection.isClosed)) + .map(statusList => statusList.forall(closed => closed)) + .waitFor(allClosed => allClosed) + lastClosed <- last.isClosed + numberOfCalledCallbacks <- counter.get + activeConnectionsAfterConflicts <- handler.getAllActiveConnections } yield { - assert(connections.contains(handled.key)) - assert(connectionsAfterDuplication.contains(handled.key)) - assert(closedAfterDuplication) + assert(connections.contains(initialConnection.key)) + assert(allDuplicatesClosed) + assert(!lastClosed) + assert(numberOfCalledCallbacks == 0) + assert(activeConnectionsAfterConflicts.size == 1) } } @@ -126,7 +190,7 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { (handler, release) = handlerAndRelease connections <- buildNConnections(expectedNumberOfConnections) _ <- Task.traverse(connections)(connection => - handler.registerOrClose(connection._1) + handler.registerOutgoing(connection) ) maxNumberOfActiveConnections <- handler.numberOfActiveConnections .waitFor(numOfConnections => @@ -148,11 +212,10 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { cb <- Deferred.tryable[Task, Unit] handlerAndRelease <- buildHandlerResource(_ => cb.complete(())).allocated (handler, release) = handlerAndRelease - connection <- newHandledConnection() - (handledConnection, underlyingEncrypted) = connection - _ <- handler.registerOrClose(handledConnection) + newConnection <- MockEncryptedConnection() + _ <- handler.registerOutgoing(newConnection) numberOfActive <- handler.numberOfActiveConnections.waitFor(_ == 1) - _ <- underlyingEncrypted.pushRemoteEvent(None) + _ <- newConnection.pushRemoteEvent(None) numberOfActiveAfterDisconnect <- handler.numberOfActiveConnections .waitFor(_ == 0) callbackCompleted <- cb.tryGet.waitFor(_.isDefined) @@ -169,11 +232,10 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { cb <- Deferred.tryable[Task, Unit] handlerAndRelease <- buildHandlerResource(_ => cb.complete(())).allocated (handler, release) = handlerAndRelease - connection <- newHandledConnection() - (handledConnection, underlyingEncrypted) = connection - _ <- handler.registerOrClose(handledConnection) + newConnection <- MockEncryptedConnection() + _ <- handler.registerOutgoing(newConnection) numberOfActive <- handler.numberOfActiveConnections.waitFor(_ == 1) - _ <- underlyingEncrypted.pushRemoteEvent(Some(Left(DecodingError))) + _ <- newConnection.pushRemoteEvent(Some(Left(DecodingError))) numberOfActiveAfterError <- handler.numberOfActiveConnections .waitFor(_ == 0) callbackCompleted <- cb.tryGet.waitFor(_.isDefined) @@ -190,9 +252,8 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { cb <- Deferred.tryable[Task, Unit] handlerAndRelease <- buildHandlerResource(_ => cb.complete(())).allocated (handler, release) = handlerAndRelease - connection <- newHandledConnection() - (handledConnection, underlyingEncrypted) = connection - _ <- handler.registerOrClose(handledConnection) + newConnection <- MockEncryptedConnection() + _ <- handler.registerOutgoing(newConnection) numberOfActive <- handler.numberOfActiveConnections.waitFor(_ == 1) _ <- release numberOfActiveAfterDisconnect <- handler.numberOfActiveConnections @@ -212,13 +273,13 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { for { connections <- buildNConnections(expectedNumberOfConnections) _ <- Task.traverse(connections)(connection => - handler.registerOrClose(connection._1) + handler.registerOutgoing(connection) ) maxNumberOfActiveConnections <- handler.numberOfActiveConnections .waitFor(numOfConnections => numOfConnections == expectedNumberOfConnections ) - _ <- Task.traverse(connections) { case (_, encConnection) => + _ <- Task.traverse(connections) { encConnection => encConnection.pushRemoteEvent(Some(Right(MessageA(1)))) } receivedMessages <- handler.incomingMessages @@ -226,7 +287,7 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { .toListL } yield { - val senders = connections.map(_._1.key).toSet + val senders = connections.map(_.key).toSet val receivedFrom = receivedMessages.map(_.from).toSet assert(receivedMessages.size == expectedNumberOfConnections) assert(maxNumberOfActiveConnections == expectedNumberOfConnections) @@ -239,6 +300,8 @@ class ConnectionHandlerSpec extends AsyncFlatSpecLike with Matchers { } object ConnectionHandlerSpec { + val fakeLocalAddress = new InetSocketAddress("localhost", 9081) + implicit class TaskOps[A](task: Task[A]) { def waitFor(condition: A => Boolean)(implicit timeOut: FiniteDuration) = { task.restartUntil(condition).timeout(timeOut) @@ -249,38 +312,28 @@ object ConnectionHandlerSpec { NetworkTracers(Tracer.noOpTracer) def buildHandlerResource( - cb: HandledConnection[Task, Secp256k1Key, TestMessage] => Task[Unit] = - _ => Task(()) + cb: FinishedConnection[Secp256k1Key] => Task[Unit] = _ => Task(()) ): Resource[Task, ConnectionHandler[Task, Secp256k1Key, TestMessage]] = { ConnectionHandler .apply[Task, Secp256k1Key, TestMessage](cb) } - def newHandledConnection( - remotePeerInfo: (Secp256k1Key, InetSocketAddress) = - (Secp256k1Key.getFakeRandomKey, fakeLocalAddress) - )(implicit - s: Scheduler - ): Task[ - ( - HandledConnection[Task, Secp256k1Key, TestMessage], - MockEncryptedConnection - ) + def buildHandlerResourceWithCallbackCounter: Resource[ + Task, + (ConnectionHandler[Task, Secp256k1Key, TestMessage], Ref[Task, Long]) ] = { for { - enc <- MockEncryptedConnection(remotePeerInfo) - } yield (HandledConnection.outgoing(enc), enc) + counter <- Resource.liftF(Ref.of[Task, Long](0L)) + handler <- buildHandlerResource(_ => + counter.update(current => current + 1) + ) + } yield (handler, counter) } def buildNConnections(n: Int)(implicit s: Scheduler - ): Task[List[ - ( - HandledConnection[Task, Secp256k1Key, TestMessage], - MockEncryptedConnection - ) - ]] = { - Task.traverse((0 until n).toList)(_ => newHandledConnection()) + ): Task[List[MockEncryptedConnection]] = { + Task.traverse((0 until n).toList)(_ => MockEncryptedConnection()) } } diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala index 46ca8893..e4183784 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala @@ -245,6 +245,8 @@ object MockEncryptedConnectionProvider { ) { lazy val key = connection.remotePeerInfo._1 + lazy val address = connection.remotePeerInfo._2 + def pushRemoteEvent( ev: Option[ Either[EncryptedConnectionProvider.ConnectionError, TestMessage] diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala index 2d4677cd..73062941 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala @@ -213,21 +213,23 @@ class RemoteConnectionManagerWithMockProviderSpec } } - it should "not allow duplicated incoming peer" in customTestCaseResourceT( + it should "prefer most fresh incoming connection" in customTestCaseResourceT( buildTestCaseWithNPeers(2, shouldBeOnline = false, longRetryConfig) ) { case (provider, manager, clusterPeers) => for { initialAcquired <- manager.getAcquiredConnections - incomingConnection <- provider.newIncomingPeer(clusterPeers.head) + firstIncoming <- provider.newIncomingPeer(clusterPeers.head) _ <- manager.waitForNConnections(1) - containsIncoming <- manager.containsConnection(incomingConnection) + containsIncoming <- manager.containsConnection(firstIncoming) duplicatedIncoming <- provider.newIncomingPeer(clusterPeers.head) _ <- Task.sleep(500.millis) // Let the offered connection be processed. duplicatedIncomingClosed <- duplicatedIncoming.isClosed + firstIncomingClosed <- firstIncoming.isClosed } yield { assert(initialAcquired.isEmpty) assert(containsIncoming) - assert(duplicatedIncomingClosed) + assert(!duplicatedIncomingClosed) + assert(firstIncomingClosed) } } From 949e3f33c75fa55058911492cb6d2c297407d242 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 5 Apr 2021 14:07:56 +0100 Subject: [PATCH 16/48] PM-2929: Transaction data structure. (#15) --- .../interpreter/models/Transaction.scala | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/models/Transaction.scala diff --git a/metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/models/Transaction.scala b/metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/models/Transaction.scala new file mode 100644 index 00000000..6c60b359 --- /dev/null +++ b/metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/models/Transaction.scala @@ -0,0 +1,54 @@ +package io.iohk.metronome.checkpointing.interpreter.models + +import scodec.bits.BitVector + +/** Transactions are what comprise the block body used by the Checkpointing Service. + * + * The HotStuff BFT Agreement doesn't need to know about them, their execution and + * validation is delegated to the Checkpointing Service, which, in turn, delegates + * to the interpreter. The only component that truly has to understand the contents + * is the PoW specific interpreter. + * + * What the Checkpointing Service has to know is the different kinds of transactions + * we support, which is to register proposer blocks in the ledger, required by Advocate, + * and to register checkpoint candidates. + */ +sealed trait Transaction + +object Transaction { + + /** In PoW chains that support Advocate checkpointing, the Checkpoint Certificate + * can enforce the inclusion of proposed blocks on the chain via references; think + * uncle blocks that also get executed. + * + * In order to know which proposed blocks can be enforced, i.e. ones that are valid + * and have saturated the network, first the federation members need to reach BFT + * agreement over the list of existing proposer blocks. + * + * The `ProposerBlock` transaction adds one of these blocks that exist on the PoW + * chain to the Checkpointing Ledger, iff it can be validated by the members. + * + * The contents of the transaction are opaque, they only need to be understood + * by the PoW side interpreter. + * + * Using Advocate is optional; if the PoW chain doesn't support references, + * it will just use `CheckpointCandidate` transactions. + */ + case class ProposerBlock(value: BitVector) extends Transaction + + /** When a federation member is leading a round, it will ask the PoW side interpreter + * if it wants to propose a checkpoint candidate. The interpreter decides if the + * circumstances are right, e.g. enough new blocks have been build on the previous + * checkpoint that a new one has to be issued. If so, a `CheckpointCandidate` + * transaction is added to the next block, which is sent to the HotStuff replicas + * in a `Prepare` message, to be validated and committed. + * + * If the BFT agreement is successful, a Checkpoint Certificate will be formed + * during block execution which will include the `CheckpointCandidate`. + * + * The contents of the transaction are opaque, they only need to be understood + * by teh PoW side interpreter, either for validation, or for following the + * fork indicated by the checkpoint. + */ + case class CheckpointCandidate(value: BitVector) extends Transaction +} From 6a88c7ed2480c791ce71d66ecbb3f7b21640f10e Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 5 Apr 2021 14:37:24 +0100 Subject: [PATCH 17/48] PM-2931: Ledger data structure (#17) * PM-2931: Basic ledger. * PM-2931: Use a Vector, maybe we can save some time on sorting for deterministic hashes. * PM-2931: Testing the update method. * PM-2931: Add Ledger.hash using RLP. * PM-2931: Test RLP roundtrip for a ledger. * PM-2931: RLPCodecsSpec with example for Ledger. * PM-2931: Simplify ledger update check. * PM-2931: Simplify property declaration. --- build.sc | 2 +- .../checkpointing/service/models/Ledger.scala | 52 ++++++++++ .../service/models/RLPCodecs.scala | 29 ++++++ .../service/models/ArbitraryInstances.scala | 40 ++++++++ .../service/models/LedgerProps.scala | 34 +++++++ .../service/models/RLPCodecsProps.scala | 23 +++++ .../service/models/RLPCodecsSpec.scala | 95 +++++++++++++++++++ 7 files changed, 274 insertions(+), 1 deletion(-) create mode 100644 metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala create mode 100644 metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala create mode 100644 metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala create mode 100644 metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/LedgerProps.scala create mode 100644 metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala create mode 100644 metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala diff --git a/build.sc b/build.sc index 9b20c525..4f6035cd 100644 --- a/build.sc +++ b/build.sc @@ -241,7 +241,7 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ) override def ivyDeps = super.ivyDeps() ++ Agg( - ivy"io.iohk::scalanet:${VersionOf.scalanet}" + ivy"io.iohk::mantis-rlp:${VersionOf.mantis}" ) object test extends TestModule diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala new file mode 100644 index 00000000..246cede7 --- /dev/null +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala @@ -0,0 +1,52 @@ +package io.iohk.metronome.checkpointing.service.models + +import io.iohk.ethereum.rlp +import io.iohk.metronome.core.Validated +import io.iohk.metronome.checkpointing.interpreter.models.Transaction +import io.iohk.metronome.crypto.hash.{Hash, Keccak256} + +/** Current state of the ledger after applying all previous blocks. + * + * Basically it's the last checkpoint, plus any accumulated proposer blocks + * since then. Initially the last checkpoint is empty; conceptually it could + * the the genesis block of the PoW chain, but we don't know what that is + * until we talk to the interpreter, and we also can't produce it on our + * own since it's opaque data. + */ +case class Ledger( + maybeLastCheckpoint: Option[Transaction.CheckpointCandidate], + proposerBlocks: Vector[Transaction.ProposerBlock] +) { + + /** Calculate the hash of the ledger so we can put it in blocks + * and refer to it when syncing state between federation members. + */ + lazy val hash: Hash = Ledger.hash(this) + + /** Apply a validated transaction to produce the next ledger state. + * + * The transaction should have been validated against the PoW ledger + * by this point, so we know for example that the new checkpoint is + * a valid extension of the previous one. + */ + def update(transaction: Validated[Transaction]): Ledger = + (transaction: Transaction) match { + case t @ Transaction.ProposerBlock(_) => + if (proposerBlocks.contains(t)) + this + else + copy(proposerBlocks = proposerBlocks :+ t) + + case t @ Transaction.CheckpointCandidate(_) => + Ledger(Some(t), Vector.empty) + } +} + +object Ledger { + val empty = Ledger(None, Vector.empty) + + def hash(ledger: Ledger): Hash = { + import RLPCodecs._ + Keccak256(rlp.encode(ledger)) + } +} diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala new file mode 100644 index 00000000..1ed91901 --- /dev/null +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala @@ -0,0 +1,29 @@ +package io.iohk.metronome.checkpointing.service.models + +import io.iohk.ethereum.rlp.RLPCodec +import io.iohk.ethereum.rlp.RLPCodec.Ops +import io.iohk.ethereum.rlp.RLPImplicitDerivations._ +import io.iohk.ethereum.rlp.RLPImplicits._ +import io.iohk.metronome.checkpointing.interpreter.models.Transaction +import scodec.bits.{BitVector, ByteVector} + +object RLPCodecs { + implicit val rlpBitVector: RLPCodec[BitVector] = + implicitly[RLPCodec[Array[Byte]]].xmap(BitVector(_), _.toByteArray) + + implicit val rlpByteVector: RLPCodec[ByteVector] = + implicitly[RLPCodec[Array[Byte]]].xmap(ByteVector(_), _.toArray) + + implicit val rlpProposerBlock: RLPCodec[Transaction.ProposerBlock] = + deriveLabelledGenericRLPCodec + + implicit val rlpCheckpointCandidate + : RLPCodec[Transaction.CheckpointCandidate] = + deriveLabelledGenericRLPCodec + + implicit def rlpVector[T: RLPCodec]: RLPCodec[Vector[T]] = + seqEncDec[T]().xmap(_.toVector, _.toSeq) + + implicit val rlpLedger: RLPCodec[Ledger] = + deriveLabelledGenericRLPCodec +} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala new file mode 100644 index 00000000..df62c32c --- /dev/null +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala @@ -0,0 +1,40 @@ +package io.iohk.metronome.checkpointing.service.models + +import io.iohk.metronome.checkpointing.interpreter.models.Transaction +import org.scalacheck._ +import org.scalacheck.Arbitrary.arbitrary +import scodec.bits.BitVector + +object ArbitraryInstances { + implicit val arbBitVector: Arbitrary[BitVector] = + Arbitrary { + arbitrary[Array[Byte]].map(BitVector(_)) + } + + implicit val arbProposerBlock: Arbitrary[Transaction.ProposerBlock] = + Arbitrary { + arbitrary[BitVector].map(Transaction.ProposerBlock(_)) + } + + implicit val arbCheckpointCandidate + : Arbitrary[Transaction.CheckpointCandidate] = + Arbitrary { + arbitrary[BitVector].map(Transaction.CheckpointCandidate(_)) + } + + implicit val arbTransaction: Arbitrary[Transaction] = + Arbitrary { + Gen.frequency( + 4 -> arbitrary[Transaction.ProposerBlock], + 1 -> arbitrary[Transaction.CheckpointCandidate] + ) + } + + implicit val arbLeger: Arbitrary[Ledger] = + Arbitrary { + for { + mcp <- arbitrary[Option[Transaction.CheckpointCandidate]] + pbs <- arbitrary[Set[Transaction.ProposerBlock]].map(_.toVector) + } yield Ledger(mcp, pbs) + } +} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/LedgerProps.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/LedgerProps.scala new file mode 100644 index 00000000..de412bb5 --- /dev/null +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/LedgerProps.scala @@ -0,0 +1,34 @@ +package io.iohk.metronome.checkpointing.service.models + +import io.iohk.metronome.core.Validated +import io.iohk.metronome.checkpointing.interpreter.models.Transaction +import org.scalacheck._ +import org.scalacheck.Prop.forAll + +object LedgerProps extends Properties("Ledger") { + import ArbitraryInstances._ + + property("update") = forAll { (ledger: Ledger, transaction: Transaction) => + val updated = ledger.update(Validated[Transaction](transaction)) + + transaction match { + case _: Transaction.ProposerBlock + if ledger.proposerBlocks.contains(transaction) => + updated == ledger + + case _: Transaction.ProposerBlock => + updated.proposerBlocks.last == transaction && + updated.proposerBlocks.distinct == updated.proposerBlocks && + updated.maybeLastCheckpoint == ledger.maybeLastCheckpoint + + case _: Transaction.CheckpointCandidate => + updated.maybeLastCheckpoint == Some(transaction) && + updated.proposerBlocks.isEmpty + } + } + + property("hash") = forAll { (ledger1: Ledger, ledger2: Ledger) => + ledger1 == ledger2 && ledger1.hash == ledger2.hash || + ledger1 != ledger2 && ledger1.hash != ledger2.hash + } +} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala new file mode 100644 index 00000000..d9e8b068 --- /dev/null +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala @@ -0,0 +1,23 @@ +package io.iohk.metronome.checkpointing.service.models + +import io.iohk.ethereum.rlp +import io.iohk.ethereum.rlp.RLPCodec +import org.scalacheck._ +import org.scalacheck.Prop.forAll +import scala.reflect.ClassTag + +object RLPCodecsProps extends Properties("RLPCodecs") { + import ArbitraryInstances._ + import RLPCodecs._ + + /** Test that encoding to and decoding from RLP preserves the value. */ + def propRoundTrip[T: RLPCodec: Arbitrary: ClassTag] = + property(implicitly[ClassTag[T]].runtimeClass.getSimpleName) = forAll { + (value0: T) => + val bytes = rlp.encode(value0) + val value1 = rlp.decode[T](bytes) + value0 == value1 + } + + propRoundTrip[Ledger] +} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala new file mode 100644 index 00000000..b3eae079 --- /dev/null +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala @@ -0,0 +1,95 @@ +package io.iohk.metronome.checkpointing.service.models + +import io.iohk.ethereum.rlp._ +import io.iohk.metronome.checkpointing.interpreter.models.Transaction +import org.scalactic.Equality +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import scala.reflect.ClassTag +import org.scalacheck.Arbitrary +import org.scalacheck.Arbitrary.arbitrary + +/** Concrete examples of RLP encoding, so we can make sure the structure is what we expect. + * + * Complements `RLPCodecsProps` which works with arbitrary data. + */ +class RLPCodecsSpec extends AnyFlatSpec with Matchers { + import ArbitraryInstances._ + import RLPCodecs._ + + def sample[T: Arbitrary] = arbitrary[T].sample.get + + // Structrual equality checker for RLPEncodeable. + // It has different wrappers for items based on whether it was hand crafted or generated + // by codecs, and the RLPValue has mutable arrays inside. + implicit val eqRLPList = new Equality[RLPEncodeable] { + override def areEqual(a: RLPEncodeable, b: Any): Boolean = + (a, b) match { + case (a: RLPList, b: RLPList) => + a.items.size == b.items.size && a.items.zip(b.items).forall { + case (a, b) => + areEqual(a, b) + } + case (a: RLPValue, b: RLPValue) => + a.bytes.sameElements(b.bytes) + case other => + false + } + } + + abstract class Example[T: RLPCodec: ClassTag] { + def decoded: T + def encoded: RLPEncodeable + + def name = + s"RLPCodec[${implicitly[ClassTag[T]].runtimeClass.getSimpleName}]" + + def encode = RLPEncoder.encode(decoded) + def decode = RLPDecoder.decode[T](encoded) + } + + def exampleBehavior[T](example: Example[T]) = { + it should "encode the example value to the expected RLP data" in { + example.encode shouldEqual example.encoded + } + + it should "decode the example RLP data to the expected value" in { + example.decode shouldEqual example.decoded + } + } + + def test[T](example: Example[T]) = { + example.name should behave like exampleBehavior(example) + } + + test { + new Example[Ledger] { + val ledger = Ledger( + maybeLastCheckpoint = Some( + sample[Transaction.CheckpointCandidate] + ), + proposerBlocks = Vector( + sample[Transaction.ProposerBlock], + sample[Transaction.ProposerBlock] + ) + ) + + override val decoded: Ledger = ledger + + override val encoded: RLPEncodeable = + RLPList( // Ledger + RLPList( // Option + RLPList( // CheckpointCandidate + RLPValue(ledger.maybeLastCheckpoint.get.value.toByteArray) + ) + ), + RLPList( // Vector + RLPList( // ProposerBlock + RLPValue(ledger.proposerBlocks(0).value.toByteArray) + ), + RLPList(RLPValue(ledger.proposerBlocks(1).value.toByteArray)) + ) + ) + } + } +} From 42b8d9dff6d0b3e45613a861738fcf4a8d39f932 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 5 Apr 2021 22:30:41 +0100 Subject: [PATCH 18/48] PM-2930: Block data structure (#18) * PM-2930: Skeleton block data structure with RLP. * PM-2930: Add pre/post state hash to the header. * PM-2930: Separate hash types. * PM-2930: Comments about adding a Merkle root. * PM-2930: Remove preStateHash --- .../checkpointing/service/models/Block.scala | 60 +++++++++++++++++ .../checkpointing/service/models/Ledger.scala | 16 +---- .../service/models/RLPCodecs.scala | 67 +++++++++++++++++++ .../service/models/RLPHash.scala | 45 +++++++++++++ .../service/models/ArbitraryInstances.scala | 31 +++++++++ .../service/models/RLPCodecsProps.scala | 3 + .../service/models/RLPCodecsSpec.scala | 18 ++++- 7 files changed, 224 insertions(+), 16 deletions(-) create mode 100644 metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Block.scala create mode 100644 metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPHash.scala diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Block.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Block.scala new file mode 100644 index 00000000..6445aab9 --- /dev/null +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Block.scala @@ -0,0 +1,60 @@ +package io.iohk.metronome.checkpointing.service.models + +import io.iohk.metronome.checkpointing.interpreter.models.Transaction + +/** Represents what the HotStuff paper called "nodes" as the "tree", + * with the transactions in the body being the "commands". + * + * The block contents are specific to the checkpointing application. + * + * The header and body are separated because headers have to part + * of the Checkpoint Certificate; there's no need to repeat all + * the transactions there, the Merkle root will make it possible + * to prove that a given CheckpointCandidate transaction was + * indeed part of the block. The headers are needed for parent-child + * validation in the certificate as well. + */ +sealed abstract case class Block( + header: Block.Header, + body: Block.Body +) { + def hash: Block.Header.Hash = header.hash +} + +object Block { + + /** Create a from a header and body we received from the network. + * It will need to be validated before it can be used, to make sure + * the header really belongs to the body. + */ + def makeUnsafe(header: Header, body: Body): Block = + new Block(header, body) {} + + /** Create a block from a header and a body, updating the `bodyHash` in the + * header to make sure the final block hash is valid. + */ + def make( + header: Header, + body: Body + ) = makeUnsafe(header = header.copy(bodyHash = body.hash), body = body) + + case class Header( + parentHash: Header.Hash, + // Hash of the Ledger after executing the block. + postStateHash: Ledger.Hash, + // Hash of the transactions in the body. + bodyHash: Body.Hash + // TODO (PM-3102): Add merkle root for contents. + // Instead of the hash of the body, should we use the + // the Merkle root of the transactions? + // Or should that be an additional field? + ) extends RLPHash[Header, Header.Hash] + + object Header extends RLPHashCompanion[Header]()(RLPCodecs.rlpBlockHeader) + + case class Body( + transactions: Vector[Transaction] + ) extends RLPHash[Body, Body.Hash] + + object Body extends RLPHashCompanion[Body]()(RLPCodecs.rlpBlockBody) +} diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala index 246cede7..580b0337 100644 --- a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala @@ -1,9 +1,7 @@ package io.iohk.metronome.checkpointing.service.models -import io.iohk.ethereum.rlp import io.iohk.metronome.core.Validated import io.iohk.metronome.checkpointing.interpreter.models.Transaction -import io.iohk.metronome.crypto.hash.{Hash, Keccak256} /** Current state of the ledger after applying all previous blocks. * @@ -16,12 +14,7 @@ import io.iohk.metronome.crypto.hash.{Hash, Keccak256} case class Ledger( maybeLastCheckpoint: Option[Transaction.CheckpointCandidate], proposerBlocks: Vector[Transaction.ProposerBlock] -) { - - /** Calculate the hash of the ledger so we can put it in blocks - * and refer to it when syncing state between federation members. - */ - lazy val hash: Hash = Ledger.hash(this) +) extends RLPHash[Ledger, Ledger.Hash] { /** Apply a validated transaction to produce the next ledger state. * @@ -42,11 +35,6 @@ case class Ledger( } } -object Ledger { +object Ledger extends RLPHashCompanion[Ledger]()(RLPCodecs.rlpLedger) { val empty = Ledger(None, Vector.empty) - - def hash(ledger: Ledger): Hash = { - import RLPCodecs._ - Keccak256(rlp.encode(ledger)) - } } diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala index 1ed91901..cae92a22 100644 --- a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala @@ -4,6 +4,8 @@ import io.iohk.ethereum.rlp.RLPCodec import io.iohk.ethereum.rlp.RLPCodec.Ops import io.iohk.ethereum.rlp.RLPImplicitDerivations._ import io.iohk.ethereum.rlp.RLPImplicits._ +import io.iohk.ethereum.rlp.{RLPEncoder, RLPList} +import io.iohk.metronome.crypto.hash.Hash import io.iohk.metronome.checkpointing.interpreter.models.Transaction import scodec.bits.{BitVector, ByteVector} @@ -14,6 +16,18 @@ object RLPCodecs { implicit val rlpByteVector: RLPCodec[ByteVector] = implicitly[RLPCodec[Array[Byte]]].xmap(ByteVector(_), _.toArray) + implicit val hashRLPCodec: RLPCodec[Hash] = + implicitly[RLPCodec[ByteVector]].xmap(Hash(_), identity) + + implicit val headerHashRLPCodec: RLPCodec[Block.Header.Hash] = + implicitly[RLPCodec[ByteVector]].xmap(Block.Header.Hash(_), identity) + + implicit val bodyHashRLPCodec: RLPCodec[Block.Body.Hash] = + implicitly[RLPCodec[ByteVector]].xmap(Block.Body.Hash(_), identity) + + implicit val ledgerHashRLPCodec: RLPCodec[Ledger.Hash] = + implicitly[RLPCodec[ByteVector]].xmap(Ledger.Hash(_), identity) + implicit val rlpProposerBlock: RLPCodec[Transaction.ProposerBlock] = deriveLabelledGenericRLPCodec @@ -26,4 +40,57 @@ object RLPCodecs { implicit val rlpLedger: RLPCodec[Ledger] = deriveLabelledGenericRLPCodec + + implicit val rlpTransaction: RLPCodec[Transaction] = { + import Transaction._ + + val ProposerBlockTag: Short = 1 + val CheckpointCandidateTag: Short = 2 + + def encodeWithTag[T: RLPEncoder](tag: Short, value: T) = { + val t = RLPEncoder.encode(tag) + val l = RLPEncoder.encode(value).asInstanceOf[RLPList] + t +: l + } + + RLPCodec.instance[Transaction]( + { + case tx: ProposerBlock => + encodeWithTag(ProposerBlockTag, tx) + case tx: CheckpointCandidate => + encodeWithTag(CheckpointCandidateTag, tx) + }, + { case RLPList(tag, items @ _*) => + val rest = RLPList(items: _*) + tag.decodeAs[Short]("tag") match { + case ProposerBlockTag => + rest.decodeAs[ProposerBlock]("transaction") + case CheckpointCandidateTag => + rest.decodeAs[CheckpointCandidate]("transaction") + } + } + ) + } + + implicit val rlpBlockBody: RLPCodec[Block.Body] = + deriveLabelledGenericRLPCodec + + implicit val rlpBlockHeader: RLPCodec[Block.Header] = + deriveLabelledGenericRLPCodec + + // Cannot use derivation because Block is a sealed abstract case class, + // so it doesn't allow creation of an invalid block. + implicit val rlpBlock: RLPCodec[Block] = + RLPCodec.instance[Block]( + block => + RLPList( + RLPEncoder.encode(block.header), + RLPEncoder.encode(block.body) + ), + { case RLPList(header, body) => + val h = header.decodeAs[Block.Header]("header") + val b = body.decodeAs[Block.Body]("body") + Block.makeUnsafe(h, b) + } + ) } diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPHash.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPHash.scala new file mode 100644 index 00000000..d2388ced --- /dev/null +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPHash.scala @@ -0,0 +1,45 @@ +package io.iohk.metronome.checkpointing.service.models + +import io.iohk.ethereum.rlp +import io.iohk.ethereum.rlp.RLPEncoder +import io.iohk.metronome.crypto +import io.iohk.metronome.crypto.hash.Keccak256 +import io.iohk.metronome.core.Tagger +import scodec.bits.ByteVector +import scala.language.implicitConversions + +/** Type class to produce a specific type of hash based on the RLP + * representation of a type, where the hash type is typically + * defined in the companion object of the type. + */ +trait RLPHasher[T] { + type Hash + def hash(value: T): Hash +} +object RLPHasher { + type Aux[T, H] = RLPHasher[T] { + type Hash = H + } +} + +/** Base class for types that have a hash value based on their RLP representation. */ +abstract class RLPHash[T, H](implicit ev: RLPHasher.Aux[T, H]) { self: T => + lazy val hash: H = ev.hash(self) +} + +/** Base class for companion objects for types that need hashes based on RLP. + * + * Every companion will define a separate `Hash` type, so we don't mix them up. + */ +abstract class RLPHashCompanion[T: RLPEncoder] extends RLPHasher[T] { self => + object Hash extends Tagger[ByteVector] + override type Hash = Hash.Tagged + + override def hash(value: T): Hash = + Hash(Keccak256(rlp.encode(value))) + + implicit val hasher: RLPHasher.Aux[T, Hash] = this + + implicit def `Hash => crypto.Hash`(h: Hash): crypto.hash.Hash = + crypto.hash.Hash(h) +} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala index df62c32c..b259ec31 100644 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala @@ -1,9 +1,11 @@ package io.iohk.metronome.checkpointing.service.models +import io.iohk.metronome.crypto.hash.Hash import io.iohk.metronome.checkpointing.interpreter.models.Transaction import org.scalacheck._ import org.scalacheck.Arbitrary.arbitrary import scodec.bits.BitVector +import scodec.bits.ByteVector object ArbitraryInstances { implicit val arbBitVector: Arbitrary[BitVector] = @@ -11,6 +13,20 @@ object ArbitraryInstances { arbitrary[Array[Byte]].map(BitVector(_)) } + implicit val arbHash: Arbitrary[Hash] = + Arbitrary { + Gen.listOfN(32, arbitrary[Byte]).map(ByteVector(_)).map(Hash(_)) + } + + implicit val arbHeaderHash: Arbitrary[Block.Header.Hash] = + Arbitrary(arbitrary[Hash].map(Block.Header.Hash(_))) + + implicit val arbBodyHash: Arbitrary[Block.Body.Hash] = + Arbitrary(arbitrary[Hash].map(Block.Body.Hash(_))) + + implicit val arbLedgerHash: Arbitrary[Ledger.Hash] = + Arbitrary(arbitrary[Hash].map(Ledger.Hash(_))) + implicit val arbProposerBlock: Arbitrary[Transaction.ProposerBlock] = Arbitrary { arbitrary[BitVector].map(Transaction.ProposerBlock(_)) @@ -37,4 +53,19 @@ object ArbitraryInstances { pbs <- arbitrary[Set[Transaction.ProposerBlock]].map(_.toVector) } yield Ledger(mcp, pbs) } + + implicit val arbBlock: Arbitrary[Block] = + Arbitrary { + for { + parentHash <- arbitrary[Block.Header.Hash] + postStateHash <- arbitrary[Ledger.Hash] + transactions <- arbitrary[Vector[Transaction]] + body = Block.Body(transactions) + header = Block.Header( + parentHash, + postStateHash, + body.hash + ) + } yield Block.make(header, body) + } } diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala index d9e8b068..e34cfc34 100644 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala @@ -2,6 +2,7 @@ package io.iohk.metronome.checkpointing.service.models import io.iohk.ethereum.rlp import io.iohk.ethereum.rlp.RLPCodec +import io.iohk.metronome.checkpointing.interpreter.models.Transaction import org.scalacheck._ import org.scalacheck.Prop.forAll import scala.reflect.ClassTag @@ -20,4 +21,6 @@ object RLPCodecsProps extends Properties("RLPCodecs") { } propRoundTrip[Ledger] + propRoundTrip[Transaction] + propRoundTrip[Block] } diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala index b3eae079..88544e98 100644 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala @@ -74,9 +74,9 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { ) ) - override val decoded: Ledger = ledger + override val decoded = ledger - override val encoded: RLPEncodeable = + override val encoded = RLPList( // Ledger RLPList( // Option RLPList( // CheckpointCandidate @@ -92,4 +92,18 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { ) } } + + test { + new Example[Transaction] { + val transaction = sample[Transaction.ProposerBlock] + + override val decoded = transaction + + override val encoded = + RLPList( // ProposerBlock + RLPValue(Array(1.toByte)), // Tag + RLPValue(transaction.value.toByteArray) + ) + } + } } From f6cc6e23d03fba9b35522f3ca3042d6292ac4455 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 5 Apr 2021 22:57:10 +0100 Subject: [PATCH 19/48] PM-2932: Checkpoint certificate data structure (#20) * PM-2932: Create checkpointing.models module, move classes. * PM-2932: Checkpoint Certificate type. * PM-2932: RLP format for CheckpointCertificate. * PM-2932: Testing the RLP formats. * PM-2932: Change subject to class name. * PM-2932: Block.genesis and better smart constructor. * PM-2932: Rename arbitrary variable. * PM-2932: Fix typo. * PM-2932: Remove preStateHash * PM-2932: Use Matchers and Inspectors in flaky network tests. --- build.sc | 47 ++-- .../CheckpointingAgreement.scala | 26 +++ .../checkpointing}/models/Block.scala | 51 +++-- .../models/CheckpointCertificate.scala | 30 +++ .../checkpointing}/models/Ledger.scala | 7 +- .../checkpointing/models/MerkleTree.scala | 66 ++++++ .../checkpointing/models/RLPCodecs.scala | 200 ++++++++++++++++++ .../checkpointing}/models/RLPHash.scala | 2 +- .../checkpointing}/models/Transaction.scala | 2 +- .../metronome/checkpointing/package.scala | 5 + .../models/ArbitraryInstances.scala | 129 +++++++++++ .../checkpointing}/models/LedgerProps.scala | 3 +- .../models/RLPCodecsProps.scala | 6 +- .../checkpointing/models/RLPCodecsSpec.scala | 182 ++++++++++++++++ .../service/models/RLPCodecs.scala | 96 --------- .../service/models/ArbitraryInstances.scala | 71 ------- .../service/models/RLPCodecsSpec.scala | 109 ---------- ...ctionManagerWithScalanetProviderSpec.scala | 39 ++-- 18 files changed, 739 insertions(+), 332 deletions(-) create mode 100644 metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala rename metronome/checkpointing/{service/src/io/iohk/metronome/checkpointing/service => models/src/io/iohk/metronome/checkpointing}/models/Block.scala (54%) create mode 100644 metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/CheckpointCertificate.scala rename metronome/checkpointing/{service/src/io/iohk/metronome/checkpointing/service => models/src/io/iohk/metronome/checkpointing}/models/Ledger.scala (83%) create mode 100644 metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/MerkleTree.scala create mode 100644 metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/RLPCodecs.scala rename metronome/checkpointing/{service/src/io/iohk/metronome/checkpointing/service => models/src/io/iohk/metronome/checkpointing}/models/RLPHash.scala (96%) rename metronome/checkpointing/{interpreter/src/io/iohk/metronome/checkpointing/interpreter => models/src/io/iohk/metronome/checkpointing}/models/Transaction.scala (97%) create mode 100644 metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/package.scala create mode 100644 metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala rename metronome/checkpointing/{service/test/src/io/iohk/metronome/checkpointing/service => models/test/src/io/iohk/metronome/checkpointing}/models/LedgerProps.scala (89%) rename metronome/checkpointing/{service/test/src/io/iohk/metronome/checkpointing/service => models/test/src/io/iohk/metronome/checkpointing}/models/RLPCodecsProps.scala (80%) create mode 100644 metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala delete mode 100644 metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala delete mode 100644 metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala delete mode 100644 metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala diff --git a/build.sc b/build.sc index 4f6035cd..5d5bb5df 100644 --- a/build.sc +++ b/build.sc @@ -215,7 +215,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { object hotstuff extends SubModule { /** Pure consensus models. */ - object consensus extends SubModule { + object consensus extends SubModule with Publishing { + override def description: String = + "Pure HotStuff consensus models." + override def moduleDeps: Seq[PublishModule] = Seq(core, crypto) @@ -240,10 +243,6 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { hotstuff.forensics ) - override def ivyDeps = super.ivyDeps() ++ Agg( - ivy"io.iohk::mantis-rlp:${VersionOf.mantis}" - ) - object test extends TestModule } } @@ -251,9 +250,27 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { /** Components realising the checkpointing functionality using HotStuff. */ object checkpointing extends SubModule { + /** Library to be included on the PoW side to validate checkpoint certificats. + * + * Includes the certificate model and the checkpoint ledger and chain models. + */ + object models extends SubModule with Publishing { + override def description: String = + "Checkpointing domain models, including the checkpoint certificate and its validation logic." + + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"io.iohk::mantis-rlp:${VersionOf.mantis}" + ) + + override def moduleDeps: Seq[PublishModule] = + Seq(core, crypto, hotstuff.consensus) + + object test extends TestModule + } + /** Library to be included on the PoW side to talk to the checkpointing service. * - * Includes the certificate models, the local communication protocol messages and networking. + * Includes the local communication protocol messages and networking. */ object interpreter extends SubModule with Publishing { override def description: String = @@ -264,19 +281,25 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ) override def moduleDeps: Seq[PublishModule] = - Seq(tracing, crypto) + Seq(tracing, crypto, checkpointing.models) } - /** Implements the checkpointing functionality, the ledger rules, - * and state synchronisation, which is not an inherent part of + /** Implements the checkpointing functionality, validation rules, + * state synchronisation, anything that is not an inherent part of * HotStuff, but applies to the checkpointing use case. * - * If it was published, it could be directly included in the checkpoint assisted blockchain application, - * so the service and the interpreter can share data in memory. + * If it was published, it could be directly included in the checkpoint + * assisted blockchain application, so the service and the interpreter + * can share data in memory. */ object service extends SubModule { override def moduleDeps: Seq[JavaModule] = - Seq(tracing, hotstuff.service, checkpointing.interpreter) + Seq( + tracing, + hotstuff.service, + checkpointing.models, + checkpointing.interpreter + ) object test extends TestModule } diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala new file mode 100644 index 00000000..d1335553 --- /dev/null +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala @@ -0,0 +1,26 @@ +package io.iohk.metronome.checkpointing + +import io.iohk.ethereum.crypto.ECDSASignature +import io.iohk.metronome.crypto +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, VotingPhase} +import org.bouncycastle.crypto.params.{ + ECPublicKeyParameters, + ECPrivateKeyParameters +} + +object CheckpointingAgreement extends Agreement { + override type Block = models.Block + override type Hash = models.Block.Header.Hash + override type PSig = ECDSASignature + // TODO (PM-2935): Replace list with theshold signatures. + override type GSig = List[ECDSASignature] + override type PKey = ECPublicKeyParameters + override type SKey = ECPrivateKeyParameters + + type GroupSignature = crypto.GroupSignature[ + PKey, + (VotingPhase, ViewNumber, Hash), + GSig + ] +} diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Block.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala similarity index 54% rename from metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Block.scala rename to metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala index 6445aab9..932da9c4 100644 --- a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Block.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala @@ -1,6 +1,6 @@ -package io.iohk.metronome.checkpointing.service.models +package io.iohk.metronome.checkpointing.models -import io.iohk.metronome.checkpointing.interpreter.models.Transaction +import scodec.bits.ByteVector /** Represents what the HotStuff paper called "nodes" as the "tree", * with the transactions in the body being the "commands". @@ -14,7 +14,7 @@ import io.iohk.metronome.checkpointing.interpreter.models.Transaction * indeed part of the block. The headers are needed for parent-child * validation in the certificate as well. */ -sealed abstract case class Block( +sealed abstract case class Block private ( header: Block.Header, body: Block.Body ) { @@ -24,36 +24,57 @@ sealed abstract case class Block( object Block { /** Create a from a header and body we received from the network. + * * It will need to be validated before it can be used, to make sure * the header really belongs to the body. */ def makeUnsafe(header: Header, body: Body): Block = new Block(header, body) {} - /** Create a block from a header and a body, updating the `bodyHash` in the - * header to make sure the final block hash is valid. - */ + /** Smart constructor for a block, setting the correct hashes in the header. */ def make( - header: Header, - body: Body - ) = makeUnsafe(header = header.copy(bodyHash = body.hash), body = body) + parent: Block, + postStateHash: Ledger.Hash, + transactions: IndexedSeq[Transaction] + ): Block = { + val body = Body(transactions) + val header = Header( + parentHash = parent.hash, + postStateHash = postStateHash, + bodyHash = body.hash, + // TODO (PM-3102): Compute Root Hash over the transactions. + contentMerkleRoot = MerkleTree.Hash.empty + ) + makeUnsafe(header, body) + } + + /** The first, empty block. */ + val genesis: Block = { + val body = Body(Vector.empty) + val header = Header( + parentHash = Block.Header.Hash(ByteVector.empty), + postStateHash = Ledger.empty.hash, + bodyHash = body.hash, + contentMerkleRoot = MerkleTree.Hash.empty + ) + makeUnsafe(header, body) + } case class Header( parentHash: Header.Hash, // Hash of the Ledger after executing the block. postStateHash: Ledger.Hash, // Hash of the transactions in the body. - bodyHash: Body.Hash - // TODO (PM-3102): Add merkle root for contents. - // Instead of the hash of the body, should we use the - // the Merkle root of the transactions? - // Or should that be an additional field? + bodyHash: Body.Hash, + // Merkle root of the transactions in the body. + // TODO (PM-3102): Should this just replace the `bodyHash`? + contentMerkleRoot: MerkleTree.Hash ) extends RLPHash[Header, Header.Hash] object Header extends RLPHashCompanion[Header]()(RLPCodecs.rlpBlockHeader) case class Body( - transactions: Vector[Transaction] + transactions: IndexedSeq[Transaction] ) extends RLPHash[Body, Body.Hash] object Body extends RLPHashCompanion[Body]()(RLPCodecs.rlpBlockBody) diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/CheckpointCertificate.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/CheckpointCertificate.scala new file mode 100644 index 00000000..040e7c56 --- /dev/null +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/CheckpointCertificate.scala @@ -0,0 +1,30 @@ +package io.iohk.metronome.checkpointing.models + +import cats.data.NonEmptyList +import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate +import io.iohk.metronome.checkpointing.CheckpointingAgreement + +/** The Checkpoint Certificate is a proof of the BFT agreement + * over a given Checkpoint Candidate. + * + * It contains the group signature over the block that the + * federation committed, together with the sequence of blocks + * from the one that originally introduced the Candidate. + * + * The interpreter can follow the parent-child relationships, + * validate the hashes and the inclusion of the Candidate in + * the original block, check the group signature, then unpack + * the contents fo the Candidate to interpet it according to + * whatever rules apply on the checkpointed PoW chain. + */ +case class CheckpointCertificate( + // `head` is the `Block.Header` that has the Commit Q.C.; + // `last` is the `Block.Header` that had the `CheckpointCandiate` in its `Body`. + headers: NonEmptyList[Block.Header], + // The opaque contents of the checkpoint that has been agreed upon. + checkpoint: Transaction.CheckpointCandidate, + // Proof that `checkpoint` is part of `headers.last.contentMerkleRoot`. + proof: MerkleTree.Proof, + // Commit Q.C. over `headers.head`. + commitQC: QuorumCertificate[CheckpointingAgreement] +) diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Ledger.scala similarity index 83% rename from metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala rename to metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Ledger.scala index 580b0337..f73611c3 100644 --- a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/Ledger.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Ledger.scala @@ -1,19 +1,18 @@ -package io.iohk.metronome.checkpointing.service.models +package io.iohk.metronome.checkpointing.models import io.iohk.metronome.core.Validated -import io.iohk.metronome.checkpointing.interpreter.models.Transaction /** Current state of the ledger after applying all previous blocks. * * Basically it's the last checkpoint, plus any accumulated proposer blocks * since then. Initially the last checkpoint is empty; conceptually it could - * the the genesis block of the PoW chain, but we don't know what that is + * be the genesis block of the PoW chain, but we don't know what that is * until we talk to the interpreter, and we also can't produce it on our * own since it's opaque data. */ case class Ledger( maybeLastCheckpoint: Option[Transaction.CheckpointCandidate], - proposerBlocks: Vector[Transaction.ProposerBlock] + proposerBlocks: IndexedSeq[Transaction.ProposerBlock] ) extends RLPHash[Ledger, Ledger.Hash] { /** Apply a validated transaction to produce the next ledger state. diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/MerkleTree.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/MerkleTree.scala new file mode 100644 index 00000000..7ec3ed85 --- /dev/null +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/MerkleTree.scala @@ -0,0 +1,66 @@ +package io.iohk.metronome.checkpointing.models + +import io.iohk.metronome.core.Tagger +import scodec.bits.ByteVector + +object MerkleTree { + object Hash extends Tagger[ByteVector] { + val empty = apply(ByteVector.empty) + } + type Hash = Hash.Tagged + + /** Merkle proof that some leaf content is part of the tree. + * + * It is expected that the root hash and the leaf itself is available to + * the verifier, so the proof only contains things the verifier doesn't + * know, which is the overall size of the tree and the position of the leaf + * among its siblings leaves. Based on that it is possible to use the sibling + * hash path to check whether they add up to the root hash. + * + * `leafCount` gives the height of the binary tree: `leafCount = 2^h` + * `leafIndex` can be interpreted as a binary number, which represents + * the path from the root of the tree down to the leaf, with the bits + * indicating whether to go left or right in each fork, while descending + * the levels. + * + * For example, take the following Merkle tree: + * ``` + * h0123 + * / \ + * h01 h23 + * / \ / \ + * h0 h1 h2 h3 + * ``` + * + * Say we want to prove that leaf 2 is part of the tree. The binary + * representation of 2 is `10`, which, we can interpret as: go right, + * then go left. + * + * The sibling path in the proof would be: `Vector(h3, h01)`. + * + * Based on this we can take the leaf value we know, reconstruct the hashes + * from the bottom to the top, and compare it agains the root hash we know: + * + * ``` + * h2 = h(value) + * h23 = h(h2, path(0)) + * h0123 = h(path(1), h23) + * assert(h0123 == root) + * ``` + * + * The right/left decisions we gleaned from the `leafIndex` tell us the order + * we have to pass the arguments to the hash function. + * + * Note that if `leafCount` would be higher, the binary representation of 2 + * would conceptually be longer, e.g. `0010` for a tree with 16 leaves. + */ + case class Proof( + // Position of the leaf in the lowest level. + leafIndex: Int, + // Number of leaves in the lowest level. + leafCount: Int, + // Hashes of the "other" side of the tree, level by level, + // starting from the lowest up to the highest. + siblingPath: IndexedSeq[MerkleTree.Hash] + ) +} diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/RLPCodecs.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/RLPCodecs.scala new file mode 100644 index 00000000..98d83cb5 --- /dev/null +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/RLPCodecs.scala @@ -0,0 +1,200 @@ +package io.iohk.metronome.checkpointing.models + +import cats.data.NonEmptyList +import io.iohk.ethereum.crypto.ECDSASignature +import io.iohk.ethereum.rlp.{RLPEncoder, RLPList} +import io.iohk.ethereum.rlp.RLPCodec +import io.iohk.ethereum.rlp.RLPCodec.Ops +import io.iohk.ethereum.rlp.RLPException +import io.iohk.ethereum.rlp.RLPImplicitDerivations._ +import io.iohk.ethereum.rlp.RLPImplicits._ +import io.iohk.metronome.checkpointing.CheckpointingAgreement +import io.iohk.metronome.crypto.hash.Hash +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{ + Phase, + VotingPhase, + QuorumCertificate +} +import scodec.bits.{BitVector, ByteVector} + +object RLPCodecs { + implicit val rlpBitVector: RLPCodec[BitVector] = + implicitly[RLPCodec[Array[Byte]]].xmap(BitVector(_), _.toByteArray) + + implicit val rlpByteVector: RLPCodec[ByteVector] = + implicitly[RLPCodec[Array[Byte]]].xmap(ByteVector(_), _.toArray) + + implicit val hashRLPCodec: RLPCodec[Hash] = + implicitly[RLPCodec[ByteVector]].xmap(Hash(_), identity) + + implicit val headerHashRLPCodec: RLPCodec[Block.Header.Hash] = + implicitly[RLPCodec[ByteVector]].xmap(Block.Header.Hash(_), identity) + + implicit val bodyHashRLPCodec: RLPCodec[Block.Body.Hash] = + implicitly[RLPCodec[ByteVector]].xmap(Block.Body.Hash(_), identity) + + implicit val ledgerHashRLPCodec: RLPCodec[Ledger.Hash] = + implicitly[RLPCodec[ByteVector]].xmap(Ledger.Hash(_), identity) + + implicit val merkleHashRLPCodec: RLPCodec[MerkleTree.Hash] = + implicitly[RLPCodec[ByteVector]].xmap(MerkleTree.Hash(_), identity) + + implicit val rlpProposerBlock: RLPCodec[Transaction.ProposerBlock] = + deriveLabelledGenericRLPCodec + + implicit val rlpCheckpointCandidate + : RLPCodec[Transaction.CheckpointCandidate] = + deriveLabelledGenericRLPCodec + + implicit def rlpIndexedSeq[T: RLPCodec]: RLPCodec[IndexedSeq[T]] = + seqEncDec[T]().xmap(_.toVector, _.toSeq) + + implicit def rlpNonEmptyList[T: RLPCodec]: RLPCodec[NonEmptyList[T]] = + seqEncDec[T]().xmap( + xs => + NonEmptyList.fromList(xs.toList).getOrElse { + RLPException.decodeError("NonEmptyList", "List cannot be empty.") + }, + _.toList + ) + + implicit val rlpLedger: RLPCodec[Ledger] = + deriveLabelledGenericRLPCodec + + implicit val rlpTransaction: RLPCodec[Transaction] = { + import Transaction._ + + val ProposerBlockTag: Short = 1 + val CheckpointCandidateTag: Short = 2 + + def encodeWithTag[T: RLPEncoder](tag: Short, value: T) = { + val t = RLPEncoder.encode(tag) + val l = RLPEncoder.encode(value).asInstanceOf[RLPList] + t +: l + } + + RLPCodec.instance[Transaction]( + { + case tx: ProposerBlock => + encodeWithTag(ProposerBlockTag, tx) + case tx: CheckpointCandidate => + encodeWithTag(CheckpointCandidateTag, tx) + }, + { case RLPList(tag, items @ _*) => + val rest = RLPList(items: _*) + tag.decodeAs[Short]("tag") match { + case ProposerBlockTag => + rest.decodeAs[ProposerBlock]("transaction") + case CheckpointCandidateTag => + rest.decodeAs[CheckpointCandidate]("transaction") + case unknown => + RLPException.decodeError( + "Transaction", + s"Unknown tag: $unknown", + List(tag) + ) + } + } + ) + } + + implicit val rlpBlockBody: RLPCodec[Block.Body] = + deriveLabelledGenericRLPCodec + + implicit val rlpBlockHeader: RLPCodec[Block.Header] = + deriveLabelledGenericRLPCodec + + // Cannot use derivation because Block is a sealed abstract case class, + // so it doesn't allow creation of an invalid block. + implicit val rlpBlock: RLPCodec[Block] = + RLPCodec.instance[Block]( + block => + RLPList( + RLPEncoder.encode(block.header), + RLPEncoder.encode(block.body) + ), + { case RLPList(header, body) => + val h = header.decodeAs[Block.Header]("header") + val b = body.decodeAs[Block.Body]("body") + Block.makeUnsafe(h, b) + } + ) + + implicit val rlpMerkleProof: RLPCodec[MerkleTree.Proof] = + deriveLabelledGenericRLPCodec + + implicit val rlpViewNumber: RLPCodec[ViewNumber] = + implicitly[RLPCodec[Long]].xmap(ViewNumber(_), identity) + + implicit val rlpVotingPhase: RLPCodec[VotingPhase] = + RLPCodec.instance[VotingPhase]( + phase => { + val tag: Short = phase match { + case Phase.Prepare => 1 + case Phase.PreCommit => 2 + case Phase.Commit => 3 + } + RLPEncoder.encode(tag) + }, + { case tag => + tag.decodeAs[Short]("phase") match { + case 1 => Phase.Prepare + case 2 => Phase.PreCommit + case 3 => Phase.Commit + case u => + RLPException.decodeError( + "VotingPhase", + s"Unknown phase tag: $u", + List(tag) + ) + } + } + ) + + implicit val rlpECDSASignature: RLPCodec[ECDSASignature] = + RLPCodec.instance[ECDSASignature]( + sig => RLPEncoder.encode(sig.toBytes), + { case enc => + val bytes = enc.decodeAs[ByteVector]("signature") + ECDSASignature + .fromBytes(akka.util.ByteString.fromArrayUnsafe(bytes.toArray)) + .getOrElse { + RLPException.decodeError( + "ECDSASignature", + "Invalid signature format.", + List(enc) + ) + } + } + ) + + implicit val rlpGroupSignature + : RLPCodec[CheckpointingAgreement.GroupSignature] = + deriveLabelledGenericRLPCodec + + // Derviation doesn't seem to work on generic case class. + implicit val rlpQuorumCertificate + : RLPCodec[QuorumCertificate[CheckpointingAgreement]] = + RLPCodec.instance[QuorumCertificate[CheckpointingAgreement]]( + { case QuorumCertificate(phase, viewNumber, blockHash, signature) => + RLPList( + RLPEncoder.encode(phase), + RLPEncoder.encode(viewNumber), + RLPEncoder.encode(blockHash), + RLPEncoder.encode(signature) + ) + }, + { case RLPList(phase, viewNumber, blockHash, signature) => + QuorumCertificate[CheckpointingAgreement]( + phase.decodeAs[VotingPhase]("phase"), + viewNumber.decodeAs[ViewNumber]("viewNumber"), + blockHash.decodeAs[CheckpointingAgreement.Hash]("blockHash"), + signature.decodeAs[CheckpointingAgreement.GroupSignature]("signature") + ) + } + ) + + implicit val rlpCheckpointCertificate: RLPCodec[CheckpointCertificate] = + deriveLabelledGenericRLPCodec +} diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPHash.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/RLPHash.scala similarity index 96% rename from metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPHash.scala rename to metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/RLPHash.scala index d2388ced..785da739 100644 --- a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPHash.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/RLPHash.scala @@ -1,4 +1,4 @@ -package io.iohk.metronome.checkpointing.service.models +package io.iohk.metronome.checkpointing.models import io.iohk.ethereum.rlp import io.iohk.ethereum.rlp.RLPEncoder diff --git a/metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/models/Transaction.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Transaction.scala similarity index 97% rename from metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/models/Transaction.scala rename to metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Transaction.scala index 6c60b359..683b589a 100644 --- a/metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/models/Transaction.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Transaction.scala @@ -1,4 +1,4 @@ -package io.iohk.metronome.checkpointing.interpreter.models +package io.iohk.metronome.checkpointing.models import scodec.bits.BitVector diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/package.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/package.scala new file mode 100644 index 00000000..e9a57fb3 --- /dev/null +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/package.scala @@ -0,0 +1,5 @@ +package io.iohk.metronome + +package object checkpointing { + type CheckpointingAgreement = CheckpointingAgreement.type +} diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala new file mode 100644 index 00000000..7178b107 --- /dev/null +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala @@ -0,0 +1,129 @@ +package io.iohk.metronome.checkpointing.models + +import cats.data.NonEmptyList +import io.iohk.ethereum.crypto.ECDSASignature +import io.iohk.metronome.checkpointing.CheckpointingAgreement +import io.iohk.metronome.crypto.hash.Hash +import io.iohk.metronome.hotstuff.consensus.basic.Phase +import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import org.scalacheck._ +import org.scalacheck.Arbitrary.arbitrary +import scodec.bits.BitVector +import scodec.bits.ByteVector +import io.iohk.metronome.crypto.GroupSignature + +object ArbitraryInstances { + implicit val arbBitVector: Arbitrary[BitVector] = + Arbitrary { + arbitrary[Array[Byte]].map(BitVector(_)) + } + + implicit val arbHash: Arbitrary[Hash] = + Arbitrary { + Gen.listOfN(32, arbitrary[Byte]).map(ByteVector(_)).map(Hash(_)) + } + + implicit val arbHeaderHash: Arbitrary[Block.Header.Hash] = + Arbitrary(arbitrary[Hash].map(Block.Header.Hash(_))) + + implicit val arbBodyHash: Arbitrary[Block.Body.Hash] = + Arbitrary(arbitrary[Hash].map(Block.Body.Hash(_))) + + implicit val arbLedgerHash: Arbitrary[Ledger.Hash] = + Arbitrary(arbitrary[Hash].map(Ledger.Hash(_))) + + implicit val arbMerkleHash: Arbitrary[MerkleTree.Hash] = + Arbitrary(arbitrary[Hash].map(MerkleTree.Hash(_))) + + implicit val arbProposerBlock: Arbitrary[Transaction.ProposerBlock] = + Arbitrary { + arbitrary[BitVector].map(Transaction.ProposerBlock(_)) + } + + implicit val arbCheckpointCandidate + : Arbitrary[Transaction.CheckpointCandidate] = + Arbitrary { + arbitrary[BitVector].map(Transaction.CheckpointCandidate(_)) + } + + implicit val arbTransaction: Arbitrary[Transaction] = + Arbitrary { + Gen.frequency( + 4 -> arbitrary[Transaction.ProposerBlock], + 1 -> arbitrary[Transaction.CheckpointCandidate] + ) + } + + implicit val arbLeger: Arbitrary[Ledger] = + Arbitrary { + for { + mcp <- arbitrary[Option[Transaction.CheckpointCandidate]] + pbs <- arbitrary[Set[Transaction.ProposerBlock]].map(_.toVector) + } yield Ledger(mcp, pbs) + } + + implicit val arbBlock: Arbitrary[Block] = + Arbitrary { + for { + parentHash <- arbitrary[Block.Header.Hash] + postStateHash <- arbitrary[Ledger.Hash] + transactions <- arbitrary[Vector[Transaction]] + contentMerkleRoot <- arbitrary[MerkleTree.Hash] + body = Block.Body(transactions) + header = Block.Header( + parentHash, + postStateHash, + body.hash, + contentMerkleRoot + ) + } yield Block.makeUnsafe(header, body) + } + + implicit val arbBlockHeader: Arbitrary[Block.Header] = + Arbitrary(arbitrary[Block].map(_.header)) + + implicit val arbECDSASignature: Arbitrary[ECDSASignature] = + Arbitrary { + for { + r <- Gen.posNum[BigInt] + s <- Gen.posNum[BigInt] + v <- Gen.oneOf( + ECDSASignature.positivePointSign, + ECDSASignature.negativePointSign + ) + } yield ECDSASignature(r, s, v) + } + + implicit val arbCheckpointCertificate: Arbitrary[CheckpointCertificate] = + Arbitrary { + for { + n <- Gen.posNum[Int] + headers <- Gen + .listOfN(n, arbitrary[Block.Header]) + .map(NonEmptyList.fromListUnsafe(_)) + + checkpoint <- arbitrary[Transaction.CheckpointCandidate] + + leafCount <- Gen.choose(1, 10) + leafIndex <- Gen.choose(0, leafCount - 1) + siblings <- arbitrary[Vector[MerkleTree.Hash]] + proof = MerkleTree.Proof(leafIndex, leafCount, siblings) + + viewNumber <- Gen.posNum[Long].map(x => ViewNumber(x + n)) + signature <- arbitrary[CheckpointingAgreement.GSig] + commitQC = QuorumCertificate[CheckpointingAgreement]( + phase = Phase.Commit, + viewNumber = viewNumber, + blockHash = headers.head.hash, + signature = GroupSignature(signature) + ) + + } yield CheckpointCertificate( + headers, + checkpoint, + proof, + commitQC + ) + } +} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/LedgerProps.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/LedgerProps.scala similarity index 89% rename from metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/LedgerProps.scala rename to metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/LedgerProps.scala index de412bb5..eca124c0 100644 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/LedgerProps.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/LedgerProps.scala @@ -1,7 +1,6 @@ -package io.iohk.metronome.checkpointing.service.models +package io.iohk.metronome.checkpointing.models import io.iohk.metronome.core.Validated -import io.iohk.metronome.checkpointing.interpreter.models.Transaction import org.scalacheck._ import org.scalacheck.Prop.forAll diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsProps.scala similarity index 80% rename from metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala rename to metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsProps.scala index e34cfc34..5b8d6d82 100644 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsProps.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsProps.scala @@ -1,8 +1,8 @@ -package io.iohk.metronome.checkpointing.service.models +package io.iohk.metronome.checkpointing.models +import io.iohk.ethereum.crypto.ECDSASignature import io.iohk.ethereum.rlp import io.iohk.ethereum.rlp.RLPCodec -import io.iohk.metronome.checkpointing.interpreter.models.Transaction import org.scalacheck._ import org.scalacheck.Prop.forAll import scala.reflect.ClassTag @@ -23,4 +23,6 @@ object RLPCodecsProps extends Properties("RLPCodecs") { propRoundTrip[Ledger] propRoundTrip[Transaction] propRoundTrip[Block] + propRoundTrip[ECDSASignature] + propRoundTrip[CheckpointCertificate] } diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala new file mode 100644 index 00000000..95e92dc0 --- /dev/null +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala @@ -0,0 +1,182 @@ +package io.iohk.metronome.checkpointing.models + +import cats.data.NonEmptyList +import io.iohk.ethereum.crypto.ECDSASignature +import io.iohk.ethereum.rlp._ +import io.iohk.metronome.crypto.GroupSignature +import io.iohk.metronome.checkpointing.CheckpointingAgreement +import io.iohk.metronome.hotstuff.consensus.basic.{Phase, QuorumCertificate} +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import org.scalacheck.Arbitrary +import org.scalacheck.Arbitrary.arbitrary +import org.scalactic.Equality +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import scala.reflect.ClassTag + +/** Concrete examples of RLP encoding, so we can make sure the structure is what we expect. + * + * Complements `RLPCodecsProps` which works with arbitrary data. + */ +class RLPCodecsSpec extends AnyFlatSpec with Matchers { + import ArbitraryInstances._ + import RLPCodecs._ + + def sample[T: Arbitrary] = arbitrary[T].sample.get + + // Structrual equality checker for RLPEncodeable. + // It has different wrappers for items based on whether it was hand crafted or generated + // by codecs, and the RLPValue has mutable arrays inside. + implicit val eqRLPList = new Equality[RLPEncodeable] { + override def areEqual(a: RLPEncodeable, b: Any): Boolean = + (a, b) match { + case (a: RLPList, b: RLPList) => + a.items.size == b.items.size && a.items.zip(b.items).forall { + case (a, b) => + areEqual(a, b) + } + case (a: RLPValue, b: RLPValue) => + a.bytes.sameElements(b.bytes) + case other => + false + } + } + + abstract class Example[T: RLPCodec: ClassTag] { + def decoded: T + def encoded: RLPEncodeable + + def name = + s"RLPCodec[${implicitly[ClassTag[T]].runtimeClass.getSimpleName}]" + + def encode = RLPEncoder.encode(decoded) + def decode = RLPDecoder.decode[T](encoded) + } + + def exampleBehavior[T](example: Example[T]) = { + it should "encode the example value to the expected RLP data" in { + example.encode shouldEqual example.encoded + } + + it should "decode the example RLP data to the expected value" in { + example.decode shouldEqual example.decoded + } + } + + def test[T](example: Example[T]) = { + example.name should behave like exampleBehavior(example) + } + + test { + new Example[Ledger] { + override val decoded = Ledger( + maybeLastCheckpoint = Some( + sample[Transaction.CheckpointCandidate] + ), + proposerBlocks = Vector( + sample[Transaction.ProposerBlock], + sample[Transaction.ProposerBlock] + ) + ) + + override val encoded = + RLPList( // Ledger + RLPList( // Option + RLPList( // CheckpointCandidate + RLPValue(decoded.maybeLastCheckpoint.get.value.toByteArray) + ) + ), + RLPList( // Vector + RLPList( // ProposerBlock + RLPValue(decoded.proposerBlocks(0).value.toByteArray) + ), + RLPList(RLPValue(decoded.proposerBlocks(1).value.toByteArray)) + ) + ) + } + } + + test { + new Example[Transaction] { + override val decoded = sample[Transaction.ProposerBlock] + + override val encoded = + RLPList( // ProposerBlock + RLPValue(Array(1.toByte)), // Tag + RLPValue(decoded.value.toByteArray) + ) + } + } + + test { + new Example[CheckpointCertificate] { + val decoded = CheckpointCertificate( + headers = NonEmptyList.of( + sample[Block.Header], + sample[Block.Header] + ), + checkpoint = sample[Transaction.CheckpointCandidate], + proof = MerkleTree.Proof( + leafIndex = 2, + leafCount = 4, + siblingPath = Vector(sample[MerkleTree.Hash], sample[MerkleTree.Hash]) + ), + commitQC = QuorumCertificate[CheckpointingAgreement]( + phase = Phase.Commit, + viewNumber = ViewNumber(10), + blockHash = sample[Block.Header.Hash], + signature = GroupSignature( + List( + sample[ECDSASignature], + sample[ECDSASignature] + ) + ) + ) + ) + + override val encoded = + RLPList( // CheckpointCertificate + RLPList( // NonEmptyList + RLPList( // BlockHeader + RLPValue(decoded.headers.head.parentHash.toArray), + RLPValue(decoded.headers.head.postStateHash.toArray), + RLPValue(decoded.headers.head.bodyHash.toArray), + RLPValue(decoded.headers.head.contentMerkleRoot.toArray) + ), + RLPList( // BlockHeader + RLPValue(decoded.headers.last.parentHash.toArray), + RLPValue(decoded.headers.last.postStateHash.toArray), + RLPValue(decoded.headers.last.bodyHash.toArray), + RLPValue(decoded.headers.last.contentMerkleRoot.toArray) + ) + ), + RLPList( // CheckpointCandidate + RLPValue(decoded.checkpoint.value.toByteArray) + ), + RLPList( // Proof + RLPValue(Array(decoded.proof.leafIndex.toByte)), + RLPValue(Array(decoded.proof.leafCount.toByte)), + RLPList( // siblingPath + RLPValue(decoded.proof.siblingPath.head.toArray), + RLPValue(decoded.proof.siblingPath.last.toArray) + ) + ), + RLPList( // QuorumCertificate + RLPValue(Array(3.toByte)), // Commit + RLPValue(Array(10.toByte)), // ViewNumber + RLPValue(decoded.commitQC.blockHash.toArray), + RLPList( // GroupSignature + RLPList( // sig + RLPValue( // ECDSASignature + decoded.commitQC.signature.sig.head.toBytes.toArray[Byte] + ), + RLPValue( + decoded.commitQC.signature.sig.last.toBytes.toArray[Byte] + ) + ) + ) + ) + ) + } + } +} diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala deleted file mode 100644 index cae92a22..00000000 --- a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/models/RLPCodecs.scala +++ /dev/null @@ -1,96 +0,0 @@ -package io.iohk.metronome.checkpointing.service.models - -import io.iohk.ethereum.rlp.RLPCodec -import io.iohk.ethereum.rlp.RLPCodec.Ops -import io.iohk.ethereum.rlp.RLPImplicitDerivations._ -import io.iohk.ethereum.rlp.RLPImplicits._ -import io.iohk.ethereum.rlp.{RLPEncoder, RLPList} -import io.iohk.metronome.crypto.hash.Hash -import io.iohk.metronome.checkpointing.interpreter.models.Transaction -import scodec.bits.{BitVector, ByteVector} - -object RLPCodecs { - implicit val rlpBitVector: RLPCodec[BitVector] = - implicitly[RLPCodec[Array[Byte]]].xmap(BitVector(_), _.toByteArray) - - implicit val rlpByteVector: RLPCodec[ByteVector] = - implicitly[RLPCodec[Array[Byte]]].xmap(ByteVector(_), _.toArray) - - implicit val hashRLPCodec: RLPCodec[Hash] = - implicitly[RLPCodec[ByteVector]].xmap(Hash(_), identity) - - implicit val headerHashRLPCodec: RLPCodec[Block.Header.Hash] = - implicitly[RLPCodec[ByteVector]].xmap(Block.Header.Hash(_), identity) - - implicit val bodyHashRLPCodec: RLPCodec[Block.Body.Hash] = - implicitly[RLPCodec[ByteVector]].xmap(Block.Body.Hash(_), identity) - - implicit val ledgerHashRLPCodec: RLPCodec[Ledger.Hash] = - implicitly[RLPCodec[ByteVector]].xmap(Ledger.Hash(_), identity) - - implicit val rlpProposerBlock: RLPCodec[Transaction.ProposerBlock] = - deriveLabelledGenericRLPCodec - - implicit val rlpCheckpointCandidate - : RLPCodec[Transaction.CheckpointCandidate] = - deriveLabelledGenericRLPCodec - - implicit def rlpVector[T: RLPCodec]: RLPCodec[Vector[T]] = - seqEncDec[T]().xmap(_.toVector, _.toSeq) - - implicit val rlpLedger: RLPCodec[Ledger] = - deriveLabelledGenericRLPCodec - - implicit val rlpTransaction: RLPCodec[Transaction] = { - import Transaction._ - - val ProposerBlockTag: Short = 1 - val CheckpointCandidateTag: Short = 2 - - def encodeWithTag[T: RLPEncoder](tag: Short, value: T) = { - val t = RLPEncoder.encode(tag) - val l = RLPEncoder.encode(value).asInstanceOf[RLPList] - t +: l - } - - RLPCodec.instance[Transaction]( - { - case tx: ProposerBlock => - encodeWithTag(ProposerBlockTag, tx) - case tx: CheckpointCandidate => - encodeWithTag(CheckpointCandidateTag, tx) - }, - { case RLPList(tag, items @ _*) => - val rest = RLPList(items: _*) - tag.decodeAs[Short]("tag") match { - case ProposerBlockTag => - rest.decodeAs[ProposerBlock]("transaction") - case CheckpointCandidateTag => - rest.decodeAs[CheckpointCandidate]("transaction") - } - } - ) - } - - implicit val rlpBlockBody: RLPCodec[Block.Body] = - deriveLabelledGenericRLPCodec - - implicit val rlpBlockHeader: RLPCodec[Block.Header] = - deriveLabelledGenericRLPCodec - - // Cannot use derivation because Block is a sealed abstract case class, - // so it doesn't allow creation of an invalid block. - implicit val rlpBlock: RLPCodec[Block] = - RLPCodec.instance[Block]( - block => - RLPList( - RLPEncoder.encode(block.header), - RLPEncoder.encode(block.body) - ), - { case RLPList(header, body) => - val h = header.decodeAs[Block.Header]("header") - val b = body.decodeAs[Block.Body]("body") - Block.makeUnsafe(h, b) - } - ) -} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala deleted file mode 100644 index b259ec31..00000000 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/ArbitraryInstances.scala +++ /dev/null @@ -1,71 +0,0 @@ -package io.iohk.metronome.checkpointing.service.models - -import io.iohk.metronome.crypto.hash.Hash -import io.iohk.metronome.checkpointing.interpreter.models.Transaction -import org.scalacheck._ -import org.scalacheck.Arbitrary.arbitrary -import scodec.bits.BitVector -import scodec.bits.ByteVector - -object ArbitraryInstances { - implicit val arbBitVector: Arbitrary[BitVector] = - Arbitrary { - arbitrary[Array[Byte]].map(BitVector(_)) - } - - implicit val arbHash: Arbitrary[Hash] = - Arbitrary { - Gen.listOfN(32, arbitrary[Byte]).map(ByteVector(_)).map(Hash(_)) - } - - implicit val arbHeaderHash: Arbitrary[Block.Header.Hash] = - Arbitrary(arbitrary[Hash].map(Block.Header.Hash(_))) - - implicit val arbBodyHash: Arbitrary[Block.Body.Hash] = - Arbitrary(arbitrary[Hash].map(Block.Body.Hash(_))) - - implicit val arbLedgerHash: Arbitrary[Ledger.Hash] = - Arbitrary(arbitrary[Hash].map(Ledger.Hash(_))) - - implicit val arbProposerBlock: Arbitrary[Transaction.ProposerBlock] = - Arbitrary { - arbitrary[BitVector].map(Transaction.ProposerBlock(_)) - } - - implicit val arbCheckpointCandidate - : Arbitrary[Transaction.CheckpointCandidate] = - Arbitrary { - arbitrary[BitVector].map(Transaction.CheckpointCandidate(_)) - } - - implicit val arbTransaction: Arbitrary[Transaction] = - Arbitrary { - Gen.frequency( - 4 -> arbitrary[Transaction.ProposerBlock], - 1 -> arbitrary[Transaction.CheckpointCandidate] - ) - } - - implicit val arbLeger: Arbitrary[Ledger] = - Arbitrary { - for { - mcp <- arbitrary[Option[Transaction.CheckpointCandidate]] - pbs <- arbitrary[Set[Transaction.ProposerBlock]].map(_.toVector) - } yield Ledger(mcp, pbs) - } - - implicit val arbBlock: Arbitrary[Block] = - Arbitrary { - for { - parentHash <- arbitrary[Block.Header.Hash] - postStateHash <- arbitrary[Ledger.Hash] - transactions <- arbitrary[Vector[Transaction]] - body = Block.Body(transactions) - header = Block.Header( - parentHash, - postStateHash, - body.hash - ) - } yield Block.make(header, body) - } -} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala deleted file mode 100644 index 88544e98..00000000 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/models/RLPCodecsSpec.scala +++ /dev/null @@ -1,109 +0,0 @@ -package io.iohk.metronome.checkpointing.service.models - -import io.iohk.ethereum.rlp._ -import io.iohk.metronome.checkpointing.interpreter.models.Transaction -import org.scalactic.Equality -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import scala.reflect.ClassTag -import org.scalacheck.Arbitrary -import org.scalacheck.Arbitrary.arbitrary - -/** Concrete examples of RLP encoding, so we can make sure the structure is what we expect. - * - * Complements `RLPCodecsProps` which works with arbitrary data. - */ -class RLPCodecsSpec extends AnyFlatSpec with Matchers { - import ArbitraryInstances._ - import RLPCodecs._ - - def sample[T: Arbitrary] = arbitrary[T].sample.get - - // Structrual equality checker for RLPEncodeable. - // It has different wrappers for items based on whether it was hand crafted or generated - // by codecs, and the RLPValue has mutable arrays inside. - implicit val eqRLPList = new Equality[RLPEncodeable] { - override def areEqual(a: RLPEncodeable, b: Any): Boolean = - (a, b) match { - case (a: RLPList, b: RLPList) => - a.items.size == b.items.size && a.items.zip(b.items).forall { - case (a, b) => - areEqual(a, b) - } - case (a: RLPValue, b: RLPValue) => - a.bytes.sameElements(b.bytes) - case other => - false - } - } - - abstract class Example[T: RLPCodec: ClassTag] { - def decoded: T - def encoded: RLPEncodeable - - def name = - s"RLPCodec[${implicitly[ClassTag[T]].runtimeClass.getSimpleName}]" - - def encode = RLPEncoder.encode(decoded) - def decode = RLPDecoder.decode[T](encoded) - } - - def exampleBehavior[T](example: Example[T]) = { - it should "encode the example value to the expected RLP data" in { - example.encode shouldEqual example.encoded - } - - it should "decode the example RLP data to the expected value" in { - example.decode shouldEqual example.decoded - } - } - - def test[T](example: Example[T]) = { - example.name should behave like exampleBehavior(example) - } - - test { - new Example[Ledger] { - val ledger = Ledger( - maybeLastCheckpoint = Some( - sample[Transaction.CheckpointCandidate] - ), - proposerBlocks = Vector( - sample[Transaction.ProposerBlock], - sample[Transaction.ProposerBlock] - ) - ) - - override val decoded = ledger - - override val encoded = - RLPList( // Ledger - RLPList( // Option - RLPList( // CheckpointCandidate - RLPValue(ledger.maybeLastCheckpoint.get.value.toByteArray) - ) - ), - RLPList( // Vector - RLPList( // ProposerBlock - RLPValue(ledger.proposerBlocks(0).value.toByteArray) - ), - RLPList(RLPValue(ledger.proposerBlocks(1).value.toByteArray)) - ) - ) - } - } - - test { - new Example[Transaction] { - val transaction = sample[Transaction.ProposerBlock] - - override val decoded = transaction - - override val encoded = - RLPList( // ProposerBlock - RLPValue(Array(1.toByte)), // Tag - RLPValue(transaction.value.toByteArray) - ) - } - } -} diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala index 3788bbf4..427050b8 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala @@ -16,18 +16,19 @@ import io.iohk.metronome.networking.RemoteConnectionManagerWithScalanetProviderS buildTestConnectionManager } import io.iohk.metronome.logging.{HybridLogObject, HybridLog, LogTracer} -import io.iohk.scalanet.peergroup.PeerGroup import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.FramingConfig +import io.iohk.scalanet.peergroup.PeerGroup +import java.net.InetSocketAddress +import java.security.SecureRandom import monix.eval.{Task, TaskLift, TaskLike} import monix.execution.Scheduler +import monix.execution.UncaughtExceptionReporter import org.bouncycastle.crypto.AsymmetricCipherKeyPair import org.scalatest.flatspec.AsyncFlatSpecLike +import org.scalatest.Inspectors import org.scalatest.matchers.should.Matchers -import scodec.Codec -import java.net.InetSocketAddress -import java.security.SecureRandom import scala.concurrent.duration._ -import monix.execution.UncaughtExceptionReporter +import scodec.Codec class RemoteConnectionManagerWithScalanetProviderSpec extends AsyncFlatSpecLike @@ -67,8 +68,8 @@ class RemoteConnectionManagerWithScalanetProviderSpec size <- cluster.clusterSize eachNodeCount <- cluster.getEachNodeConnectionsCount } yield { - assert(eachNodeCount.forall(count => count == 2)) - assert(size == 3) + Inspectors.forAll(eachNodeCount)(count => count shouldEqual 2) + size shouldEqual 3 } } @@ -79,8 +80,8 @@ class RemoteConnectionManagerWithScalanetProviderSpec size <- cluster.clusterSize eachNodeCount <- cluster.getEachNodeConnectionsCount } yield { - assert(eachNodeCount.forall(count => count == 3)) - assert(size == 4) + Inspectors.forAll(eachNodeCount)(count => count shouldEqual 3) + size shouldEqual 4 } } @@ -95,15 +96,13 @@ class RemoteConnectionManagerWithScalanetProviderSpec cluster.getMessageFromNode(receiver) ) } yield { - assert(eachNodeCount.forall(count => count == 2)) - assert(receivers.size == 2) - assert(received.size == 2) + Inspectors.forAll(eachNodeCount)(count => count shouldEqual 2) + receivers.size shouldEqual 2 + received.size shouldEqual 2 //every node should have received the same message - assert( - received.forall(receivedMessage => - receivedMessage == MessageReceived(sender, MessageA(1)) - ) - ) + Inspectors.forAll(received) { receivedMessage => + receivedMessage shouldBe MessageReceived(sender, MessageA(1)) + } } } @@ -122,8 +121,10 @@ class RemoteConnectionManagerWithScalanetProviderSpec _ <- cluster.startNode(address, keyPair, clusterConfig) _ <- cluster.waitUntilEveryNodeHaveNConnections(2) } yield { - assert(size == 3) - assert(connectionAfterFailure.forall(connections => connections == 1)) + size shouldEqual 3 + Inspectors.forAll(connectionAfterFailure) { connections => + connections shouldEqual 1 + } } } } From 8824d4726e8a28c7199fdd41115211c0f9790992 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 5 Apr 2021 23:07:55 +0100 Subject: [PATCH 20/48] PM-[2929,2931,2932]: Golden files for RLP formats. (#21) --- .../RLPCodec[CheckpointCertificate].rlp | 1 + .../RLPCodec[CheckpointCertificate].txt | 1 + .../resources/golden/RLPCodec[Ledger].rlp | 1 + .../resources/golden/RLPCodec[Ledger].txt | 1 + .../golden/RLPCodec[Transaction].rlp | 1 + .../golden/RLPCodec[Transaction].txt | 1 + .../models/ArbitraryInstances.scala | 8 ++- .../checkpointing/models/RLPCodecsSpec.scala | 56 ++++++++++++++++++- 8 files changed, 67 insertions(+), 3 deletions(-) create mode 100644 metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp create mode 100644 metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt create mode 100644 metronome/checkpointing/models/test/resources/golden/RLPCodec[Ledger].rlp create mode 100644 metronome/checkpointing/models/test/resources/golden/RLPCodec[Ledger].txt create mode 100644 metronome/checkpointing/models/test/resources/golden/RLPCodec[Transaction].rlp create mode 100644 metronome/checkpointing/models/test/resources/golden/RLPCodec[Transaction].txt diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp new file mode 100644 index 00000000..cfb94fc7 --- /dev/null +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp @@ -0,0 +1 @@ +f9020bf9010cf884a0004b6b79006d007e8100018001b28018017f00530d7f3a009e7f44ff34920100a0001dbe01ee39800001a180ff35e8007fff015a0f4f80c74d39755a01a79dff7fa011bf30c5de133d08efadc7586c8490171de86fa429031d44441f2e84827e5b79a001d1046600ff80ff01c4008d038043ff805975f901806a0101ff73ff80807f80f884a0b101ff927401d400ff7fbc007fdb282db1ffb37f940099ab7f801bf9017c7fe8a0d2737f3100007f00328680ccc16001ff0037ac417a5720ff007f01a63101be0ea0d9b7f9d47a106a6c178873aff3e71990274315b2edc595384824fbaca2e86932a00098007765ffc41a8001a700cd7f018e007f2750325c800000c91d2080008083c48301e400f8460204f842a0005180ff80806f009880f670b58025b98fd6ff800151ff8149baf70115ff0800a024f9f25480007f87bc6c3c677f1c808080eb416a473b010101447c0b807f01def8ad030aa068cc81000e7fa4a2017f39ff01ff007f65507f004eff5a83ff403293a8d47f00f888f886b841000000000000000000000000000000000000000000000000000000000000005e00000000000000000000000000000000000000000000000000000000000000411cb841000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000003f1c \ No newline at end of file diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt new file mode 100644 index 00000000..bbca5a93 --- /dev/null +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt @@ -0,0 +1 @@ +CheckpointCertificate(NonEmptyList(Header(ByteVector(32 bytes, 0x004b6b79006d007e8100018001b28018017f00530d7f3a009e7f44ff34920100),ByteVector(32 bytes, 0x001dbe01ee39800001a180ff35e8007fff015a0f4f80c74d39755a01a79dff7f),ByteVector(32 bytes, 0x11bf30c5de133d08efadc7586c8490171de86fa429031d44441f2e84827e5b79),ByteVector(32 bytes, 0x01d1046600ff80ff01c4008d038043ff805975f901806a0101ff73ff80807f80)), Header(ByteVector(32 bytes, 0xb101ff927401d400ff7fbc007fdb282db1ffb37f940099ab7f801bf9017c7fe8),ByteVector(32 bytes, 0xd2737f3100007f00328680ccc16001ff0037ac417a5720ff007f01a63101be0e),ByteVector(32 bytes, 0xd9b7f9d47a106a6c178873aff3e71990274315b2edc595384824fbaca2e86932),ByteVector(32 bytes, 0x0098007765ffc41a8001a700cd7f018e007f2750325c800000c91d2080008083))),CheckpointCandidate(BitVector(24 bits, 0x01e400)),Proof(2,4,Vector(ByteVector(32 bytes, 0x005180ff80806f009880f670b58025b98fd6ff800151ff8149baf70115ff0800), ByteVector(32 bytes, 0x24f9f25480007f87bc6c3c677f1c808080eb416a473b010101447c0b807f01de))),QuorumCertificate(Commit,10,ByteVector(32 bytes, 0x68cc81000e7fa4a2017f39ff01ff007f65507f004eff5a83ff403293a8d47f00),GroupSignature(List(ECDSASignature(94,65,28), ECDSASignature(11,63,28))))) \ No newline at end of file diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[Ledger].rlp b/metronome/checkpointing/models/test/resources/golden/RLPCodec[Ledger].rlp new file mode 100644 index 00000000..0d950b6d --- /dev/null +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[Ledger].rlp @@ -0,0 +1 @@ +f874e9e8a780a2017fe6000001f6ff6562991fa96676ab000100c6eaff7fb080d1017f4900047f00fbb1ff17f848de9d80d80100567f8f7f4d00ff27843963ffff7aff7f4101ff7f00ffff8001e8a700cb05f2ffff2dd91fff57446e803f3001d7cf80e3b5007f7601ff0708808001e000a0ff6e8057 \ No newline at end of file diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[Ledger].txt b/metronome/checkpointing/models/test/resources/golden/RLPCodec[Ledger].txt new file mode 100644 index 00000000..0fb67def --- /dev/null +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[Ledger].txt @@ -0,0 +1 @@ +Ledger(Some(CheckpointCandidate(BitVector(312 bits, 0x80a2017fe6000001f6ff6562991fa96676ab000100c6eaff7fb080d1017f4900047f00fbb1ff17))),Vector(ProposerBlock(BitVector(232 bits, 0x80d80100567f8f7f4d00ff27843963ffff7aff7f4101ff7f00ffff8001)), ProposerBlock(BitVector(312 bits, 0x00cb05f2ffff2dd91fff57446e803f3001d7cf80e3b5007f7601ff0708808001e000a0ff6e8057)))) \ No newline at end of file diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[Transaction].rlp b/metronome/checkpointing/models/test/resources/golden/RLPCodec[Transaction].rlp new file mode 100644 index 00000000..0a293a78 --- /dev/null +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[Transaction].rlp @@ -0,0 +1 @@ +ef01adcbff7913ff0000ac1a01009bb245579601b680016500cf02597f070080c318000004ad002faa27b58001ea7f00 \ No newline at end of file diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[Transaction].txt b/metronome/checkpointing/models/test/resources/golden/RLPCodec[Transaction].txt new file mode 100644 index 00000000..66fadbbb --- /dev/null +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[Transaction].txt @@ -0,0 +1 @@ +ProposerBlock(BitVector(360 bits, 0xcbff7913ff0000ac1a01009bb245579601b680016500cf02597f070080c318000004ad002faa27b58001ea7f00)) \ No newline at end of file diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala index 7178b107..dda779c6 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala @@ -16,7 +16,13 @@ import io.iohk.metronome.crypto.GroupSignature object ArbitraryInstances { implicit val arbBitVector: Arbitrary[BitVector] = Arbitrary { - arbitrary[Array[Byte]].map(BitVector(_)) + for { + // Choose a size that BitVector still renders as hex in toString, + // so the exact value is easy to see in test output or golden files. + // Over that it renders hashCode which can differ between Scala versions. + n <- Gen.choose(0, 64) + bs <- Gen.listOfN(n, arbitrary[Byte]) + } yield BitVector(bs.toArray) } implicit val arbHash: Arbitrary[Hash] = diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala index 95e92dc0..d2e4273d 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala @@ -3,16 +3,19 @@ package io.iohk.metronome.checkpointing.models import cats.data.NonEmptyList import io.iohk.ethereum.crypto.ECDSASignature import io.iohk.ethereum.rlp._ +import io.iohk.ethereum.rlp import io.iohk.metronome.crypto.GroupSignature import io.iohk.metronome.checkpointing.CheckpointingAgreement import io.iohk.metronome.hotstuff.consensus.basic.{Phase, QuorumCertificate} import io.iohk.metronome.hotstuff.consensus.ViewNumber +import java.nio.file.{Files, Path, StandardOpenOption} import org.scalacheck.Arbitrary import org.scalacheck.Arbitrary.arbitrary import org.scalactic.Equality import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import scala.reflect.ClassTag +import scodec.bits.BitVector /** Concrete examples of RLP encoding, so we can make sure the structure is what we expect. * @@ -49,8 +52,11 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { def name = s"RLPCodec[${implicitly[ClassTag[T]].runtimeClass.getSimpleName}]" - def encode = RLPEncoder.encode(decoded) - def decode = RLPDecoder.decode[T](encoded) + def encode: RLPEncodeable = RLPEncoder.encode(decoded) + def decode: T = RLPDecoder.decode[T](encoded) + + def decode(bytes: BitVector): T = + rlp.decode[T](bytes.toByteArray) } def exampleBehavior[T](example: Example[T]) = { @@ -63,8 +69,54 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { } } + /** When the example is first executed, create a golden file that we can + * check in with the code for future reference, and to detect any regression. + * + * If there are intentional changes, just delete it and let it be recreated. + * This could be used as a starter for implemnting the same format in a + * different language. + * + * The String format is not expected to match other implementations, but it's + * easy enough to read, and should be as good as a hard coded example either + * in code or a README file. + */ + def goldenBehavior[T](example: Example[T]) = { + + def resourcePath(extension: String): Path = { + val goldenPath = Path.of(getClass.getResource("/golden").toURI) + goldenPath.resolve(s"${example.name}.${extension}") + } + + def maybeCreateResource(path: Path, content: => String) = { + if (!Files.exists(path)) { + Files.writeString(path, content, StandardOpenOption.CREATE_NEW) + } + } + + val goldenRlpPath = resourcePath("rlp") + val goldenTxtPath = resourcePath("txt") + + maybeCreateResource( + goldenRlpPath, + BitVector(rlp.encode(example.encoded)).toHex + ) + + maybeCreateResource( + goldenTxtPath, + example.decoded.toString + ) + + it should "decode the golden RLP content to a value that matches the golden String" in { + val goldenRlp = BitVector.fromHex(Files.readString(goldenRlpPath)).get + val goldenTxt = Files.readString(goldenTxtPath) + + example.decode(goldenRlp).toString shouldBe goldenTxt + } + } + def test[T](example: Example[T]) = { example.name should behave like exampleBehavior(example) + example.name should behave like goldenBehavior(example) } test { From bc125e431a7cfc205f716cbe4e95fa34d4d1e1ab Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Wed, 14 Apr 2021 10:53:49 +0100 Subject: [PATCH 21/48] PM-3104: Generic BlockStorage component for HotStuff (#22) * PM-3104: Added a generic BlockStorage component to the hotstuff.service module. * PM-3104: Test putting blocks in the store. * PM-3104: Test all methods on BlockStorage. * PM-3104: Test by constructing subtrees. * PM-3104: Use KVStoreRead for read-only operations. --- build.sc | 1 + .../checkpointing/models/Block.scala | 1 + .../service/storage/BlockStorage.scala | 190 +++++++++++++ .../service/storage/BlockStorageProps.scala | 250 ++++++++++++++++++ .../iohk/metronome/storage/KVStoreRead.scala | 8 +- .../iohk/metronome/storage/KVStoreState.scala | 4 + 6 files changed, 450 insertions(+), 4 deletions(-) create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala create mode 100644 metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala diff --git a/build.sc b/build.sc index 5d5bb5df..2b3913a5 100644 --- a/build.sc +++ b/build.sc @@ -296,6 +296,7 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { override def moduleDeps: Seq[JavaModule] = Seq( tracing, + storage, hotstuff.service, checkpointing.models, checkpointing.interpreter diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala index 932da9c4..b563bd0b 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala @@ -22,6 +22,7 @@ sealed abstract case class Block private ( } object Block { + type Hash = Block.Header.Hash /** Create a from a header and body we received from the network. * diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala new file mode 100644 index 00000000..91835355 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala @@ -0,0 +1,190 @@ +package io.iohk.metronome.hotstuff.service.storage + +import cats.implicits._ +import io.iohk.metronome.storage.{KVStore, KVStoreRead, KVCollection} +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, Block} +import scala.collection.immutable.Queue + +/** Storage for blocks that maintains parent-child relationships as well, + * to facilitate tree traversal and pruning. + * + * It is assumed that the application maintains some pointers into the tree + * where it can start traversing from, e.g. the last Commit Quorum Certificate + * would point at a block hash which would serve as the entry point. + */ +class BlockStorage[N, A <: Agreement: Block]( + blockColl: KVCollection[N, A#Hash, A#Block], + childToParentColl: KVCollection[N, A#Hash, A#Hash], + parentToChildrenColl: KVCollection[N, A#Hash, Set[A#Hash]] +) { + private implicit val kvn = KVStore.instance[N] + private implicit val kvrn = KVStoreRead.instance[N] + + /** Insert a block into the store, and if the parent still exists, + * then add this block to its children. + */ + def put(block: A#Block): KVStore[N, Unit] = { + val blockHash = implicitly[Block[A]].blockHash(block) + val parentHash = implicitly[Block[A]].parentBlockHash(block) + + blockColl.put(blockHash, block) >> + childToParentColl.put(blockHash, parentHash) >> + parentToChildrenColl.put(blockHash, Set.empty) >> + parentToChildrenColl.update(parentHash, _ + blockHash) + } + + /** Retrieve a block by hash, if it exists. */ + def get(blockHash: A#Hash): KVStoreRead[N, Option[A#Block]] = + blockColl.read(blockHash) + + /** Check whether a block is present in the tree. */ + def contains(blockHash: A#Hash): KVStoreRead[N, Boolean] = + childToParentColl.read(blockHash).map(_.isDefined) + + /** Check how many children the block has in the tree. */ + private def childCount(blockHash: A#Hash): KVStoreRead[N, Int] = + parentToChildrenColl.read(blockHash).map(_.fold(0)(_.size)) + + /** Check whether the parent of the block is present in the tree. */ + private def hasParent(blockHash: A#Hash): KVStoreRead[N, Boolean] = + childToParentColl.read(blockHash).flatMap { + case None => KVStoreRead[N].pure(false) + case Some(parentHash) => contains(parentHash) + } + + /** Check whether it's safe to delete a block. + * + * A block is safe to delete if doing so doesn't break up the tree + * into a forest, in which case we may have blocks we cannot reach + * by traversal, leaking space. + * + * This is true if the block has no children, + * or it has no parent and at most one child. + */ + private def canDelete(blockHash: A#Hash): KVStoreRead[N, Boolean] = + (hasParent(blockHash), childCount(blockHash)).mapN { + case (_, 0) => true + case (false, 1) => true + case _ => false + } + + /** Delete a block by hash, if doing so wouldn't break the tree; + * otherwise do nothing. + * + * Return `true` if block has been deleted, `false` if not. + * + * If this is not efficent enough, then move the deletion traversal + * logic into the this class so it can make sure all the invariants + * are maintained, e.g. collect all hashes that can be safely deleted + * and then do so without checks. + */ + def delete(blockHash: A#Hash): KVStore[N, Boolean] = + canDelete(blockHash).lift.flatMap { ok => + deleteUnsafe(blockHash).whenA(ok).as(ok) + } + + /** Delete a block and remove it from any parent-to-child mapping, + * without any checking for the tree structure invariants. + */ + private def deleteUnsafe(blockHash: A#Hash): KVStore[N, Unit] = + childToParentColl.get(blockHash).flatMap { + case None => + KVStore[N].unit + case Some(parentHash) => + parentToChildrenColl.update(parentHash, _ - blockHash) + } >> + blockColl.delete(blockHash) >> + childToParentColl.delete(blockHash) >> + parentToChildrenColl.delete(blockHash) + + /** Get the ancestor chain of a block from the root, + * including the block itself. + * + * If the block is not in the tree, the result will be empty, + * otherwise `head` will be the root of the block tree, + * and `last` will be the block itself. + */ + def getPathFromRoot(blockHash: A#Hash): KVStoreRead[N, List[A#Hash]] = { + def loop( + blockHash: A#Hash, + acc: List[A#Hash] + ): KVStoreRead[N, List[A#Hash]] = { + childToParentColl.read(blockHash).flatMap { + case None => + // This block doesn't exist in the tree, so our ancestry is whatever we collected so far. + KVStoreRead[N].pure(acc) + + case Some(parentHash) => + // So at least `blockHash` exists in the tree. + loop(parentHash, blockHash :: acc) + } + } + loop(blockHash, Nil) + } + + /** Collect all descendants of a block, + * including the block itself. + * + * The result will start with the blocks furthest away, + * so it should be safe to delete them in the same order; + * `last` will be the block itself. + * + * The `skip` parameter can be used to avoid traversing + * branches that we want to keep during deletion. + */ + def getDescendants( + blockHash: A#Hash, + skip: Set[A#Hash] = Set.empty + ): KVStoreRead[N, List[A#Hash]] = { + // BFS traversal. + def loop( + queue: Queue[A#Hash], + acc: List[A#Hash] + ): KVStoreRead[N, List[A#Hash]] = { + queue.dequeueOption match { + case None => + KVStoreRead[N].pure(acc) + + case Some((blockHash, queue)) if skip(blockHash) => + loop(queue, acc) + + case Some((blockHash, queue)) => + parentToChildrenColl.read(blockHash).flatMap { + case None => + loop(queue, acc) + case Some(children) => + loop(queue ++ children, blockHash :: acc) + } + } + } + loop(Queue(blockHash), Nil) + } + + /** Delete all blocks which are not descendants of a given block, + * making it the new root. + * + * Return the list of deleted block hashes. + */ + def pruneNonDescendants(blockHash: A#Hash): KVStore[N, List[A#Hash]] = + getPathFromRoot(blockHash).lift.flatMap { + case Nil => + KVStore[N].pure(Nil) + + case path @ (rootHash :: _) => + // The safe order to delete blocks would be to go down the main chain + // from the root, delete each non-mainchain child, then the parent, + // then descend on the main chain until we hit `blockHash`. + + // A similar effect can be achieved by collecting all descendants + // of the root, then deleting everything that isn't on the main chain, + // from the children towards the root, and finally the main chain itself, + // going from the root towards the children. + val isMainChain = path.toSet + + for { + deleteables <- getDescendants(rootHash, skip = Set(blockHash)).lift + _ <- deleteables.filterNot(isMainChain).traverse(deleteUnsafe(_)) + _ <- path.init.traverse(deleteUnsafe(_)) + } yield deleteables + } +} diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala new file mode 100644 index 00000000..9f51f048 --- /dev/null +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala @@ -0,0 +1,250 @@ +package io.iohk.metronome.hotstuff.service.storage + +import cats.implicits._ +import io.iohk.metronome.storage.{KVCollection, KVStoreState} +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, Block => BlockOps} +import java.util.UUID +import org.scalacheck._ +import org.scalacheck.Prop.{all, forAll, propBoolean} +import scodec.codecs.implicits._ +import scodec.Codec + +object BlockStorageProps extends Properties("BlockStorage") { + + case class TestBlock(id: String, parentId: String) { + def isGenesis = parentId.isEmpty + } + + object TestAggreement extends Agreement { + type Block = TestBlock + type Hash = String + type PSig = Nothing + type GSig = Nothing + type PKey = Nothing + type SKey = Nothing + + implicit val block = new BlockOps[TestAggreement] { + override def blockHash(b: TestBlock) = b.id + override def parentBlockHash(b: TestBlock) = b.parentId + } + } + type TestAggreement = TestAggreement.type + type Hash = TestAggreement.Hash + + implicit def `Codec[Set[T]]`[T: Codec] = + implicitly[Codec[List[T]]].xmap[Set[T]](_.toSet, _.toList) + + type Namespace = String + object Namespace { + val Blocks = "blocks" + val BlockToParent = "block-to-parent" + val BlockToChildren = "block-to-children" + } + + object TestBlockStorage + extends BlockStorage[Namespace, TestAggreement]( + new KVCollection[Namespace, Hash, TestBlock](Namespace.Blocks), + new KVCollection[Namespace, Hash, Hash](Namespace.BlockToParent), + new KVCollection[Namespace, Hash, Set[Hash]](Namespace.BlockToChildren) + ) + + object TestKVStore extends KVStoreState[Namespace] + + implicit class TestStoreOps(store: TestKVStore.Store) { + def putBlock(block: TestBlock) = + TestKVStore.compile(TestBlockStorage.put(block)).runS(store).value + + def containsBlock(blockHash: Hash) = + TestKVStore + .compile(TestBlockStorage.contains(blockHash)) + .run(store) + + def getBlock(blockHash: Hash) = + TestKVStore + .compile(TestBlockStorage.get(blockHash)) + .run(store) + + def deleteBlock(blockHash: Hash) = + TestKVStore + .compile(TestBlockStorage.delete(blockHash)) + .run(store) + .value + + def getPathFromRoot(blockHash: Hash) = + TestKVStore + .compile(TestBlockStorage.getPathFromRoot(blockHash)) + .run(store) + + def getDescendants(blockHash: Hash) = + TestKVStore + .compile(TestBlockStorage.getDescendants(blockHash)) + .run(store) + + def pruneNonDescendants(blockHash: Hash) = + TestKVStore + .compile(TestBlockStorage.pruneNonDescendants(blockHash)) + .run(store) + .value + } + + def genBlockId: Gen[Hash] = + Gen.delay(UUID.randomUUID().toString) + + /** Generate a block with a given parent, using the next available ID. */ + def genBlock(parentId: Hash): Gen[TestBlock] = + genBlockId.map { uuid => + TestBlock(uuid, parentId) + } + + def genBlock: Gen[TestBlock] = + genBlockId.flatMap(genBlock) + + /** Generate a (possibly empty) block tree. */ + def genBlockTree(parentId: Hash): Gen[List[TestBlock]] = + for { + childCount <- Gen.frequency( + 3 -> 0, + 5 -> 1, + 2 -> 2 + ) + children <- Gen.listOfN( + childCount, { + for { + block <- genBlock(parentId) + tree <- genBlockTree(block.id) + } yield block +: tree + } + ) + } yield children.flatten + + def genBlockTree: Gen[List[TestBlock]] = + genBlockTree(parentId = "") + + def genNonEmptyBlockTree: Gen[List[TestBlock]] = for { + genesis <- genBlock(parentId = "") + tree <- genBlockTree(genesis.id) + } yield genesis +: tree + + case class TestData( + tree: List[TestBlock], + store: TestKVStore.Store + ) + object TestData { + def apply(tree: List[TestBlock]): TestData = { + val insert = tree.map(TestBlockStorage.put).sequence + val store = TestKVStore.compile(insert).runS(Map.empty).value + TestData(tree, store) + } + } + + def genExisting = for { + tree <- genNonEmptyBlockTree + existing <- Gen.oneOf(tree) + data = TestData(tree) + } yield (data, existing) + + def genNonExisting = for { + tree <- genBlockTree + nonExisting <- genBlock + data = TestData(tree) + } yield (data, nonExisting) + + def genSubTree = for { + tree <- genNonEmptyBlockTree + leaf = tree.last + subTree <- genBlockTree(parentId = leaf.id) + data = TestData(tree ++ subTree) + } yield (data, leaf, subTree) + + property("put") = forAll(genNonExisting) { case (data, block) => + val s = data.store.putBlock(block) + s(Namespace.Blocks)(block.id) == block + s(Namespace.BlockToParent)(block.id) == block.parentId + } + + property("contains existing") = forAll(genExisting) { case (data, existing) => + data.store.containsBlock(existing.id) + } + + property("contains non-existing") = forAll(genNonExisting) { + case (data, nonExisting) => + !data.store.containsBlock(nonExisting.id) + } + + property("get existing") = forAll(genExisting) { case (data, existing) => + data.store.getBlock(existing.id).contains(existing) + } + + property("get non-existing") = forAll(genNonExisting) { + case (data, nonExisting) => + data.store.getBlock(nonExisting.id).isEmpty + } + + property("delete existing") = forAll(genExisting) { case (data, existing) => + val childCount = data.tree.count(_.parentId == existing.id) + val noParent = !data.tree.exists(_.id == existing.parentId) + val (s, ok) = data.store.deleteBlock(existing.id) + all( + "deleted" |: s.containsBlock(existing.id) == !ok, + "ok" |: ok && (childCount == 0 || childCount == 1 && noParent) || !ok + ) + } + + property("delete non-existing") = forAll(genNonExisting) { + case (data, nonExisting) => + data.store.deleteBlock(nonExisting.id)._2 == true + } + + property("getPathFromRoot existing") = forAll(genExisting) { + case (data, existing) => + val path = data.store.getPathFromRoot(existing.id) + all( + "nonEmpty" |: path.nonEmpty, + "head" |: path.headOption.contains(data.tree.head.id), + "last" |: path.lastOption.contains(existing.id) + ) + } + + property("getPathFromRoot non-existing") = forAll(genNonExisting) { + case (data, nonExisting) => + data.store.getPathFromRoot(nonExisting.id).isEmpty + } + + property("getDescendants existing") = forAll(genSubTree) { + case (data, block, subTree) => + val ds = data.store.getDescendants(block.id) + val dss = ds.toSet + all( + "nonEmpty" |: ds.nonEmpty, + "last" |: ds.lastOption.contains(block.id), + "size" |: ds.size == subTree.size + 1, + "subtree" |: subTree.forall(block => dss.contains(block.id)) + ) + } + + property("getDescendants non-existing") = forAll(genNonExisting) { + case (data, nonExisting) => + data.store.getDescendants(nonExisting.id).isEmpty + } + + property("pruneNonDescendants existing") = forAll(genSubTree) { + case (data, block, subTree) => + val (s, ps) = data.store.pruneNonDescendants(block.id) + val pss = ps.toSet + val descendants = subTree.map(_.id).toSet + val nonDescendants = + data.tree.map(_.id).filterNot(descendants).filterNot(_ == block.id) + all( + "size" |: ps.size == nonDescendants.size, + "pruned" |: nonDescendants.forall(pss), + "deleted" |: nonDescendants.forall(!s.containsBlock(_)), + "kept-block" |: s.containsBlock(block.id), + "kept-descendants" |: descendants.forall(s.containsBlock(_)) + ) + } + + property("pruneNonDescendants non-existing") = forAll(genNonExisting) { + case (data, nonExisting) => + data.store.pruneNonDescendants(nonExisting.id)._2.isEmpty + } +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala index 0a8afcca..39f923bd 100644 --- a/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala @@ -10,10 +10,10 @@ import scodec.Codec */ object KVStoreRead { - def unit[N]: KVStore[N, Unit] = + def unit[N]: KVStoreRead[N, Unit] = pure(()) - def pure[N, A](a: A): KVStore[N, A] = + def pure[N, A](a: A): KVStoreRead[N, A] = Free.pure(a) def instance[N]: Ops[N] = new Ops[N] {} @@ -25,9 +25,9 @@ object KVStoreRead { type KVNamespacedOp[A] = ({ type L[A] = KVStoreReadOp[N, A] })#L[A] - def unit: KVStore[N, Unit] = KVStore.unit[N] + def unit: KVStoreRead[N, Unit] = KVStoreRead.unit[N] - def pure[A](a: A) = KVStore.pure[N, A](a) + def pure[A](a: A) = KVStoreRead.pure[N, A](a) def read[K: Codec, V: Codec]( namespace: N, diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStoreState.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStoreState.scala index 29996235..67cee1d9 100644 --- a/metronome/storage/src/io/iohk/metronome/storage/KVStoreState.scala +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStoreState.scala @@ -74,6 +74,10 @@ class KVStoreState[N] { def compile[A](program: KVStore[N, A]): KVNamespacedState[A] = program.foldMap(stateCompiler) + /** Compile a KVStore program to a Reader monad, which can be executed like: + * + * `new KvStoreState[String].compile(program).run(Map.empty)` + */ def compile[A](program: KVStoreRead[N, A]): KVNamespacedReader[A] = program.foldMap(readerCompiler) } From 96cc907a695e9dba3730fa31f3f08e9ad50e9558 Mon Sep 17 00:00:00 2001 From: Radek Tkaczyk Date: Fri, 9 Apr 2021 20:49:04 +0200 Subject: [PATCH 22/48] [PM-3102,3103] merkle tree --- .gitignore | 2 + build.sc | 10 +- .../checkpointing/models/Block.scala | 12 +- .../checkpointing/models/MerkleTree.scala | 164 +++++++++++++++++- .../checkpointing/models/Transaction.scala | 5 +- .../RLPCodec[CheckpointCertificate].rlp | 2 +- .../RLPCodec[CheckpointCertificate].txt | 2 +- .../models/ArbitraryInstances.scala | 8 +- .../models/MerkleTreeProps.scala | 65 +++++++ .../checkpointing/models/RLPCodecsSpec.scala | 4 - 10 files changed, 242 insertions(+), 32 deletions(-) create mode 100644 metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/MerkleTreeProps.scala diff --git a/.gitignore b/.gitignore index e990eec4..8d68b3db 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,5 @@ .metals .vscode out/ +*.iml +/.idea* diff --git a/build.sc b/build.sc index 2b3913a5..03884b36 100644 --- a/build.sc +++ b/build.sc @@ -67,7 +67,8 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { "KonradStaniec", "Konrad Staniec", "https://github.com/KonradStaniec" - ) + ), + Developer("rtkaczyk", "Radek Tkaczyk", "https://github.com/rtkaczyk") ) ) } @@ -139,7 +140,12 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ) def single(args: String*) = T.command { - super.runMain("org.scalatest.run", args: _*) + // ScalaCheck test + if (args.headOption.exists(_.endsWith("Props"))) + super.runMain(args.head, args.tail: _*) + // ScalaTest test + else + super.runMain("org.scalatest.run", args: _*) } } } diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala index b563bd0b..2c4a1c8a 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala @@ -39,12 +39,12 @@ object Block { transactions: IndexedSeq[Transaction] ): Block = { val body = Body(transactions) + val txMerkleTree = + MerkleTree.build(transactions.map(tx => MerkleTree.Hash(tx.hash))) val header = Header( parentHash = parent.hash, postStateHash = postStateHash, - bodyHash = body.hash, - // TODO (PM-3102): Compute Root Hash over the transactions. - contentMerkleRoot = MerkleTree.Hash.empty + contentMerkleRoot = txMerkleTree.hash ) makeUnsafe(header, body) } @@ -55,8 +55,7 @@ object Block { val header = Header( parentHash = Block.Header.Hash(ByteVector.empty), postStateHash = Ledger.empty.hash, - bodyHash = body.hash, - contentMerkleRoot = MerkleTree.Hash.empty + contentMerkleRoot = MerkleTree.empty.hash ) makeUnsafe(header, body) } @@ -65,10 +64,7 @@ object Block { parentHash: Header.Hash, // Hash of the Ledger after executing the block. postStateHash: Ledger.Hash, - // Hash of the transactions in the body. - bodyHash: Body.Hash, // Merkle root of the transactions in the body. - // TODO (PM-3102): Should this just replace the `bodyHash`? contentMerkleRoot: MerkleTree.Hash ) extends RLPHash[Header, Header.Hash] diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/MerkleTree.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/MerkleTree.scala index 7ec3ed85..624c8918 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/MerkleTree.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/MerkleTree.scala @@ -1,14 +1,29 @@ package io.iohk.metronome.checkpointing.models import io.iohk.metronome.core.Tagger +import io.iohk.metronome.crypto.hash.Keccak256 import scodec.bits.ByteVector +import scala.annotation.tailrec + +sealed trait MerkleTree { + def hash: MerkleTree.Hash +} + object MerkleTree { - object Hash extends Tagger[ByteVector] { - val empty = apply(ByteVector.empty) - } + object Hash extends Tagger[ByteVector] type Hash = Hash.Tagged + /** MerkleTree with no elements */ + val empty = Leaf(Hash(Keccak256(ByteVector.empty))) + + private val hashFn: (Hash, Hash) => Hash = + (a, b) => Hash(Keccak256(a ++ b)) + + case class Node(hash: Hash, left: MerkleTree, right: Option[MerkleTree]) + extends MerkleTree + case class Leaf(hash: Hash) extends MerkleTree + /** Merkle proof that some leaf content is part of the tree. * * It is expected that the root hash and the leaf itself is available to @@ -17,7 +32,6 @@ object MerkleTree { * among its siblings leaves. Based on that it is possible to use the sibling * hash path to check whether they add up to the root hash. * - * `leafCount` gives the height of the binary tree: `leafCount = 2^h` * `leafIndex` can be interpreted as a binary number, which represents * the path from the root of the tree down to the leaf, with the bits * indicating whether to go left or right in each fork, while descending @@ -51,16 +65,148 @@ object MerkleTree { * The right/left decisions we gleaned from the `leafIndex` tell us the order * we have to pass the arguments to the hash function. * - * Note that if `leafCount` would be higher, the binary representation of 2 - * would conceptually be longer, e.g. `0010` for a tree with 16 leaves. + * Note that the length of binary representation of `leafIndex` corresponds + * to the height of the tree, e.g. `0010` for a tree of height 4 (9 to 16 leaves). */ case class Proof( // Position of the leaf in the lowest level. leafIndex: Int, - // Number of leaves in the lowest level. - leafCount: Int, // Hashes of the "other" side of the tree, level by level, // starting from the lowest up to the highest. - siblingPath: IndexedSeq[MerkleTree.Hash] + siblingPath: IndexedSeq[Hash] ) + + def build(elems: Iterable[Hash]): MerkleTree = { + @tailrec + def buildTree(nodes: Seq[MerkleTree]): MerkleTree = { + if (nodes.size == 1) + nodes.head + else { + val paired = nodes.grouped(2).toSeq.map { + case Seq(a, b) => + Node(hashFn(a.hash, b.hash), a, Some(b)) + case Seq(a) => + // if the element has no pair we hash it with itself + Node(hashFn(a.hash, a.hash), a, None) + } + buildTree(paired) + } + } + + if (elems.isEmpty) + empty + else + buildTree(elems.toSeq.map(Leaf(_))) + } + + def verifyProof( + proof: Proof, + root: Hash, + leaf: Hash + ): Boolean = { + def verify(currentHash: Hash, height: Int, siblings: Seq[Hash]): Hash = { + if (siblings.isEmpty) + currentHash + else { + val goLeft = shouldTraverseLeft(height, proof.leafIndex) + val nextHash = + if (goLeft) hashFn(currentHash, siblings.head) + else hashFn(siblings.head, currentHash) + + verify(nextHash, height + 1, siblings.tail) + } + } + + verify(leaf, 1, proof.siblingPath) == root + } + + def generateProofFromIndex(root: MerkleTree, index: Int): Option[Proof] = { + if (index < 0 || index >= findSize(root)) + None + else { + val siblings = findSiblings(root, findHeight(root), index) + Some(Proof(index, siblings)) + } + } + + def generateProofFromHash(root: MerkleTree, elem: Hash): Option[Proof] = { + if (root == empty) + None + else + findElem(root, elem).map { index => + val siblings = findSiblings(root, findHeight(root), index) + Proof(index, siblings) + } + } + + @tailrec + /** Finds tree height based on leftmost branch traversal */ + private def findHeight(tree: MerkleTree, height: Int = 0): Int = tree match { + case Leaf(_) => height + case Node(_, left, _) => findHeight(left, height + 1) + } + + @tailrec + /** Finds the tree size (number of leaves), by traversing the rightmost branch */ + private def findSize(tree: MerkleTree, maxIndex: Int = 0): Int = tree match { + case `empty` => + 0 + case Leaf(_) => + maxIndex + 1 + case Node(_, left, None) => + findSize(left, maxIndex << 1) + case Node(_, _, Some(right)) => + findSize(right, maxIndex << 1 | 1) + } + + /** Looks up an element hash in the tree returning its index if it exists */ + private def findElem( + tree: MerkleTree, + elem: Hash, + index: Int = 0 + ): Option[Int] = tree match { + case Leaf(`elem`) => + Some(index) + case Leaf(_) => + None + case Node(_, left, None) => + findElem(left, elem, index << 1) + case Node(_, left, Some(right)) => + findElem(left, elem, index << 1) orElse + findElem(right, elem, index << 1 | 1) + } + + /** Traverses the tree from root towards the leaf collecting the hashes of siblings nodes. + * If a node has only one child then that child's hash is collected. Theses hashes constitute + * the Merkle proof, they are returned ordered from lowest to highest (with regard to + * the height of the tree) + */ + private def findSiblings( + tree: MerkleTree, + height: Int, + leafIndex: Int + ): IndexedSeq[Hash] = tree match { + case Leaf(_) => + Vector.empty + + case Node(_, left, None) => + if (!shouldTraverseLeft(height, leafIndex)) + Vector.empty + else + findSiblings(left, height - 1, leafIndex) :+ left.hash + + case Node(_, left, Some(right)) => + val goLeft = shouldTraverseLeft(height, leafIndex) + val (traverse, sibling) = if (goLeft) (left, right) else (right, left) + findSiblings(traverse, height - 1, leafIndex) :+ sibling.hash + } + + /** Determines tree traversal direction from a given height towards the leaf indicated + * by the index: + * + * true - traverse left child (take right hash) + * false - traverse right + */ + private def shouldTraverseLeft(height: Int, leafIndex: Int): Boolean = + (leafIndex >> (height - 1) & 1) == 0 } diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Transaction.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Transaction.scala index 683b589a..1d6ee6dc 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Transaction.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Transaction.scala @@ -13,9 +13,10 @@ import scodec.bits.BitVector * we support, which is to register proposer blocks in the ledger, required by Advocate, * and to register checkpoint candidates. */ -sealed trait Transaction +sealed trait Transaction extends RLPHash[Transaction, Transaction.Hash] -object Transaction { +object Transaction + extends RLPHashCompanion[Transaction]()(RLPCodecs.rlpTransaction) { /** In PoW chains that support Advocate checkpointing, the Checkpoint Certificate * can enforce the inclusion of proposed blocks on the chain via references; think diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp index cfb94fc7..220860ec 100644 --- a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp @@ -1 +1 @@ -f9020bf9010cf884a0004b6b79006d007e8100018001b28018017f00530d7f3a009e7f44ff34920100a0001dbe01ee39800001a180ff35e8007fff015a0f4f80c74d39755a01a79dff7fa011bf30c5de133d08efadc7586c8490171de86fa429031d44441f2e84827e5b79a001d1046600ff80ff01c4008d038043ff805975f901806a0101ff73ff80807f80f884a0b101ff927401d400ff7fbc007fdb282db1ffb37f940099ab7f801bf9017c7fe8a0d2737f3100007f00328680ccc16001ff0037ac417a5720ff007f01a63101be0ea0d9b7f9d47a106a6c178873aff3e71990274315b2edc595384824fbaca2e86932a00098007765ffc41a8001a700cd7f018e007f2750325c800000c91d2080008083c48301e400f8460204f842a0005180ff80806f009880f670b58025b98fd6ff800151ff8149baf70115ff0800a024f9f25480007f87bc6c3c677f1c808080eb416a473b010101447c0b807f01def8ad030aa068cc81000e7fa4a2017f39ff01ff007f65507f004eff5a83ff403293a8d47f00f888f886b841000000000000000000000000000000000000000000000000000000000000005e00000000000000000000000000000000000000000000000000000000000000411cb841000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000003f1c \ No newline at end of file +f901f9f8caf863a068017f7a80015c7f01ffc10001000087017fa3b8d2807faa8000ff8005018080a07f08b76d73018044cdff7f1b6780ffcc04258b74ff807fff3b5c01867fff493da06fd07f2e8687007f807f4380a9eeff647f454f1600a2801280570048568ff300f863a066001b8000ff518000f79723000100264c01d17f01797adb3600e4cf41ff769aa09b1ee3dfc2000180be00809f7f77017bff00d6e001e6ed408a7f0091095700b7a0803100737cff804f7f7cf2ccd5ff7fc2809ad67a7f51b2ff007f266c29fff080f6b53e807fffc5ff7f7f7f01430101a5017fff80001f40ce800100000770ff020077007f007fc601354501ff0000017f12ff0b7f7f7201f84502f842a00063ff8000e56b01bb7f7a047fc40180cd00ff8013827f7f607fffdba4ffba88a043ff01ff7f80847fb97f6bceae0001fff6c0ffde80a1b2010081017f7f70807ff8ad030aa01f3e7f7f2f337fb680a1808bd622ff977f9bff7bffbb80007f23ffa3ff8053cff888f886b8410000000000000000000000000000000000000000000000000000000000000047000000000000000000000000000000000000000000000000000000000000005b1cb841000000000000000000000000000000000000000000000000000000000000005e00000000000000000000000000000000000000000000000000000000000000611b \ No newline at end of file diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt index bbca5a93..a2667d42 100644 --- a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt @@ -1 +1 @@ -CheckpointCertificate(NonEmptyList(Header(ByteVector(32 bytes, 0x004b6b79006d007e8100018001b28018017f00530d7f3a009e7f44ff34920100),ByteVector(32 bytes, 0x001dbe01ee39800001a180ff35e8007fff015a0f4f80c74d39755a01a79dff7f),ByteVector(32 bytes, 0x11bf30c5de133d08efadc7586c8490171de86fa429031d44441f2e84827e5b79),ByteVector(32 bytes, 0x01d1046600ff80ff01c4008d038043ff805975f901806a0101ff73ff80807f80)), Header(ByteVector(32 bytes, 0xb101ff927401d400ff7fbc007fdb282db1ffb37f940099ab7f801bf9017c7fe8),ByteVector(32 bytes, 0xd2737f3100007f00328680ccc16001ff0037ac417a5720ff007f01a63101be0e),ByteVector(32 bytes, 0xd9b7f9d47a106a6c178873aff3e71990274315b2edc595384824fbaca2e86932),ByteVector(32 bytes, 0x0098007765ffc41a8001a700cd7f018e007f2750325c800000c91d2080008083))),CheckpointCandidate(BitVector(24 bits, 0x01e400)),Proof(2,4,Vector(ByteVector(32 bytes, 0x005180ff80806f009880f670b58025b98fd6ff800151ff8149baf70115ff0800), ByteVector(32 bytes, 0x24f9f25480007f87bc6c3c677f1c808080eb416a473b010101447c0b807f01de))),QuorumCertificate(Commit,10,ByteVector(32 bytes, 0x68cc81000e7fa4a2017f39ff01ff007f65507f004eff5a83ff403293a8d47f00),GroupSignature(List(ECDSASignature(94,65,28), ECDSASignature(11,63,28))))) \ No newline at end of file +CheckpointCertificate(NonEmptyList(Header(ByteVector(32 bytes, 0x68017f7a80015c7f01ffc10001000087017fa3b8d2807faa8000ff8005018080),ByteVector(32 bytes, 0x7f08b76d73018044cdff7f1b6780ffcc04258b74ff807fff3b5c01867fff493d),ByteVector(32 bytes, 0x6fd07f2e8687007f807f4380a9eeff647f454f1600a2801280570048568ff300)), Header(ByteVector(32 bytes, 0x66001b8000ff518000f79723000100264c01d17f01797adb3600e4cf41ff769a),ByteVector(32 bytes, 0x9b1ee3dfc2000180be00809f7f77017bff00d6e001e6ed408a7f0091095700b7),ByteVector(32 bytes, 0x803100737cff804f7f7cf2ccd5ff7fc2809ad67a7f51b2ff007f266c29fff080))),CheckpointCandidate(BitVector(424 bits, 0x3e807fffc5ff7f7f7f01430101a5017fff80001f40ce800100000770ff020077007f007fc601354501ff0000017f12ff0b7f7f7201)),Proof(2,Vector(ByteVector(32 bytes, 0x0063ff8000e56b01bb7f7a047fc40180cd00ff8013827f7f607fffdba4ffba88), ByteVector(32 bytes, 0x43ff01ff7f80847fb97f6bceae0001fff6c0ffde80a1b2010081017f7f70807f))),QuorumCertificate(Commit,10,ByteVector(32 bytes, 0x1f3e7f7f2f337fb680a1808bd622ff977f9bff7bffbb80007f23ffa3ff8053cf),GroupSignature(List(ECDSASignature(71,91,28), ECDSASignature(94,97,27))))) \ No newline at end of file diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala index dda779c6..ff6c144b 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala @@ -61,7 +61,7 @@ object ArbitraryInstances { ) } - implicit val arbLeger: Arbitrary[Ledger] = + implicit val arbLedger: Arbitrary[Ledger] = Arbitrary { for { mcp <- arbitrary[Option[Transaction.CheckpointCandidate]] @@ -80,7 +80,6 @@ object ArbitraryInstances { header = Block.Header( parentHash, postStateHash, - body.hash, contentMerkleRoot ) } yield Block.makeUnsafe(header, body) @@ -111,10 +110,9 @@ object ArbitraryInstances { checkpoint <- arbitrary[Transaction.CheckpointCandidate] - leafCount <- Gen.choose(1, 10) - leafIndex <- Gen.choose(0, leafCount - 1) + leafIndex <- Gen.choose(0, 10) siblings <- arbitrary[Vector[MerkleTree.Hash]] - proof = MerkleTree.Proof(leafIndex, leafCount, siblings) + proof = MerkleTree.Proof(leafIndex, siblings) viewNumber <- Gen.posNum[Long].map(x => ViewNumber(x + n)) signature <- arbitrary[CheckpointingAgreement.GSig] diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/MerkleTreeProps.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/MerkleTreeProps.scala new file mode 100644 index 00000000..55c83e64 --- /dev/null +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/MerkleTreeProps.scala @@ -0,0 +1,65 @@ +package io.iohk.metronome.checkpointing.models + +import org.scalacheck.{Gen, Properties} +import org.scalacheck.Arbitrary.arbitrary +import ArbitraryInstances.arbMerkleHash +import org.scalacheck.Prop.forAll + +object MerkleTreeProps extends Properties("MerkleTree") { + + def genElements(max: Int = 256): Gen[List[MerkleTree.Hash]] = + Gen.choose(0, max).flatMap { n => + Gen.listOfN(n, arbitrary(arbMerkleHash)) + } + + property("inclusionProof") = forAll(genElements()) { elements => + val merkleTree = MerkleTree.build(elements) + elements.zipWithIndex.forall { case (elem, idx) => + val fromHash = MerkleTree.generateProofFromHash(merkleTree, elem) + val fromIndex = MerkleTree.generateProofFromIndex(merkleTree, idx) + fromHash == fromIndex && fromHash.isDefined + } + } + + property("proofVerification") = forAll(genElements()) { elements => + val merkleTree = MerkleTree.build(elements) + elements.forall { elem => + val maybeProof = MerkleTree.generateProofFromHash(merkleTree, elem) + maybeProof.exists(MerkleTree.verifyProof(_, merkleTree.hash, elem)) + } + } + + property("noFalseInclusion") = forAll(genElements(128), genElements(32)) { + (elements, other) => + val nonElements = other.diff(elements) + val merkleTree = MerkleTree.build(elements) + + val noFalseProof = nonElements.forall { nonElem => + MerkleTree.generateProofFromHash(merkleTree, nonElem).isEmpty + } + + val noFalseVerification = elements.forall { elem => + val proof = MerkleTree.generateProofFromHash(merkleTree, elem).get + !nonElements.exists(MerkleTree.verifyProof(proof, merkleTree.hash, _)) + } + + noFalseProof && noFalseVerification + } + + property("emptyTree") = { + val empty = MerkleTree.build(Nil) + + MerkleTree.generateProofFromHash(empty, MerkleTree.empty.hash).isEmpty && + empty.hash == MerkleTree.empty.hash + } + + property("singleElementTree") = forAll(arbMerkleHash.arbitrary) { elem => + val tree = MerkleTree.build(elem :: Nil) + + tree.hash == elem && + MerkleTree + .generateProofFromHash(tree, elem) + .map(MerkleTree.verifyProof(_, tree.hash, elem)) + .contains(true) + } +} diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala index d2e4273d..b5e8b0f4 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala @@ -170,7 +170,6 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { checkpoint = sample[Transaction.CheckpointCandidate], proof = MerkleTree.Proof( leafIndex = 2, - leafCount = 4, siblingPath = Vector(sample[MerkleTree.Hash], sample[MerkleTree.Hash]) ), commitQC = QuorumCertificate[CheckpointingAgreement]( @@ -192,13 +191,11 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { RLPList( // BlockHeader RLPValue(decoded.headers.head.parentHash.toArray), RLPValue(decoded.headers.head.postStateHash.toArray), - RLPValue(decoded.headers.head.bodyHash.toArray), RLPValue(decoded.headers.head.contentMerkleRoot.toArray) ), RLPList( // BlockHeader RLPValue(decoded.headers.last.parentHash.toArray), RLPValue(decoded.headers.last.postStateHash.toArray), - RLPValue(decoded.headers.last.bodyHash.toArray), RLPValue(decoded.headers.last.contentMerkleRoot.toArray) ) ), @@ -207,7 +204,6 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { ), RLPList( // Proof RLPValue(Array(decoded.proof.leafIndex.toByte)), - RLPValue(Array(decoded.proof.leafCount.toByte)), RLPList( // siblingPath RLPValue(decoded.proof.siblingPath.head.toArray), RLPValue(decoded.proof.siblingPath.last.toArray) From 8990ac002677ade92da124eb4d6b60bcc5a1701f Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Sat, 17 Apr 2021 12:49:37 +0100 Subject: [PATCH 23/48] FIX: Ignore line width in developer list. (#30) --- build.sc | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/build.sc b/build.sc index 03884b36..8cc53c6f 100644 --- a/build.sc +++ b/build.sc @@ -56,20 +56,14 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { licenses = Seq(License.`Apache-2.0`), versionControl = VersionControl.github("input-output-hk", "metronome"), // Add yourself if you make a PR! + // format: off developers = Seq( Developer("aakoshh", "Akosh Farkash", "https://github.com/aakoshh"), - Developer( - "lemastero", - "Piotr Paradzinski", - "https://github.com/lemastero" - ), - Developer( - "KonradStaniec", - "Konrad Staniec", - "https://github.com/KonradStaniec" - ), + Developer("lemastero","Piotr Paradzinski","https://github.com/lemastero"), + Developer("KonradStaniec","Konrad Staniec","https://github.com/KonradStaniec"), Developer("rtkaczyk", "Radek Tkaczyk", "https://github.com/rtkaczyk") ) + // format: on ) } From 748a12d4170db0cd1023524c567ce6d21393124e Mon Sep 17 00:00:00 2001 From: Radek Tkaczyk Date: Tue, 20 Apr 2021 09:15:37 +0200 Subject: [PATCH 24/48] [PM-3064,3065] signing --- build.sc | 8 +- .../CheckpointingAgreement.scala | 26 +-- .../checkpointing/CheckpointSigningSpec.scala | 44 +++++ .../models/ArbitraryInstances.scala | 9 +- .../checkpointing/models/RLPCodecsSpec.scala | 4 - .../io/iohk/metronome/crypto/ECKeyPair.scala | 28 +++ .../iohk/metronome/crypto/ECPrivateKey.scala | 27 +++ .../iohk/metronome/crypto/ECPublicKey.scala | 22 +++ .../metronome/crypto/Secp256k1Utils.scala | 28 --- .../hotstuff/consensus/Federation.scala | 2 +- .../consensus/basic/Secp256k1Agreement.scala | 12 ++ .../consensus/basic/Secp256k1Signing.scala | 87 ++++++++++ .../hotstuff/consensus/basic/Signing.scala | 14 +- .../consensus/ArbitraryInstances.scala | 32 ++++ .../consensus/LeaderSelectionProps.scala | 7 +- .../basic/Secp256k1SigningProps.scala | 162 ++++++++++++++++++ .../ScalanetConnectionProvider.scala | 6 +- .../networking/ConnectionHandlerSpec.scala | 11 +- .../MockEncryptedConnectionProvider.scala | 45 ++--- .../RemoteConnectionManagerTestUtils.scala | 34 ++-- ...onnectionManagerWithMockProviderSpec.scala | 25 +-- ...ctionManagerWithScalanetProviderSpec.scala | 54 +++--- 22 files changed, 531 insertions(+), 156 deletions(-) create mode 100644 metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/CheckpointSigningSpec.scala create mode 100644 metronome/crypto/src/io/iohk/metronome/crypto/ECKeyPair.scala create mode 100644 metronome/crypto/src/io/iohk/metronome/crypto/ECPrivateKey.scala create mode 100644 metronome/crypto/src/io/iohk/metronome/crypto/ECPublicKey.scala delete mode 100644 metronome/crypto/src/io/iohk/metronome/crypto/Secp256k1Utils.scala create mode 100644 metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1Agreement.scala create mode 100644 metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1Signing.scala create mode 100644 metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/ArbitraryInstances.scala create mode 100644 metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1SigningProps.scala diff --git a/build.sc b/build.sc index 8cc53c6f..00132538 100644 --- a/build.sc +++ b/build.sc @@ -188,7 +188,8 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { override def ivyDeps = super.ivyDeps() ++ Agg( ivy"io.iohk::mantis-crypto:${VersionOf.mantis}", - ivy"org.scodec::scodec-bits:${VersionOf.`scodec-bits`}" + ivy"org.scodec::scodec-bits:${VersionOf.`scodec-bits`}", + ivy"org.scodec::scodec-core:${VersionOf.`scodec-core`}" ) object test extends TestModule @@ -265,7 +266,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { override def moduleDeps: Seq[PublishModule] = Seq(core, crypto, hotstuff.consensus) - object test extends TestModule + object test extends TestModule { + override def moduleDeps: Seq[JavaModule] = + super.moduleDeps ++ Seq(hotstuff.consensus.test) + } } /** Library to be included on the PoW side to talk to the checkpointing service. diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala index d1335553..2a9b408b 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala @@ -1,26 +1,30 @@ package io.iohk.metronome.checkpointing -import io.iohk.ethereum.crypto.ECDSASignature import io.iohk.metronome.crypto import io.iohk.metronome.hotstuff.consensus.ViewNumber -import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, VotingPhase} -import org.bouncycastle.crypto.params.{ - ECPublicKeyParameters, - ECPrivateKeyParameters +import io.iohk.metronome.hotstuff.consensus.basic.{ + Secp256k1Agreement, + Signing, + VotingPhase } +import scodec.bits.ByteVector +import io.iohk.ethereum.rlp +import io.iohk.metronome.checkpointing.models.RLPCodecs._ -object CheckpointingAgreement extends Agreement { +object CheckpointingAgreement extends Secp256k1Agreement { override type Block = models.Block override type Hash = models.Block.Header.Hash - override type PSig = ECDSASignature - // TODO (PM-2935): Replace list with theshold signatures. - override type GSig = List[ECDSASignature] - override type PKey = ECPublicKeyParameters - override type SKey = ECPrivateKeyParameters type GroupSignature = crypto.GroupSignature[ PKey, (VotingPhase, ViewNumber, Hash), GSig ] + + implicit val signing: Signing[CheckpointingAgreement] = + Signing.secp256k1((phase, viewNumber, hash) => + ByteVector( + rlp.encode(phase) ++ rlp.encode(viewNumber) ++ rlp.encode(hash) + ) + ) } diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/CheckpointSigningSpec.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/CheckpointSigningSpec.scala new file mode 100644 index 00000000..0dd90099 --- /dev/null +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/CheckpointSigningSpec.scala @@ -0,0 +1,44 @@ +package io.iohk.metronome.checkpointing + +import io.iohk.metronome.crypto.ECKeyPair +import io.iohk.metronome.hotstuff.consensus.basic.{Signing, VotingPhase} +import io.iohk.metronome.hotstuff.consensus.{ + Federation, + LeaderSelection, + ViewNumber +} +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +import java.security.SecureRandom + +/** A single positive case spec to test type interoperability. + * See [[io.iohk.metronome.hotstuff.consensus.basic.Secp256k1SigningProps]] for a more in-depth test + */ +class CheckpointSigningSpec extends AnyFlatSpec with Matchers { + import models.ArbitraryInstances._ + + "Checkpoint signing" should "work :)" in { + val keyPairs = IndexedSeq.fill(2)(ECKeyPair.generate(new SecureRandom)) + val federation = Federation(keyPairs.map(_.pub))(LeaderSelection.RoundRobin) + .getOrElse(throw new Exception("Could not build federation")) + + val signing = implicitly[Signing[CheckpointingAgreement]] + + val phase = sample[VotingPhase] + val viewNumber = sample[ViewNumber] + val hash = sample[CheckpointingAgreement.Hash] + + val partialSigs = + keyPairs.map(kp => signing.sign(kp.prv, phase, viewNumber, hash)) + val groupSig = signing.combine(partialSigs) + + signing.validate( + federation, + groupSig, + phase, + viewNumber, + hash + ) shouldBe true + } +} diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala index ff6c144b..09452d50 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala @@ -10,10 +10,10 @@ import io.iohk.metronome.hotstuff.consensus.ViewNumber import org.scalacheck._ import org.scalacheck.Arbitrary.arbitrary import scodec.bits.BitVector -import scodec.bits.ByteVector import io.iohk.metronome.crypto.GroupSignature -object ArbitraryInstances { +object ArbitraryInstances + extends io.iohk.metronome.hotstuff.consensus.ArbitraryInstances { implicit val arbBitVector: Arbitrary[BitVector] = Arbitrary { for { @@ -25,11 +25,6 @@ object ArbitraryInstances { } yield BitVector(bs.toArray) } - implicit val arbHash: Arbitrary[Hash] = - Arbitrary { - Gen.listOfN(32, arbitrary[Byte]).map(ByteVector(_)).map(Hash(_)) - } - implicit val arbHeaderHash: Arbitrary[Block.Header.Hash] = Arbitrary(arbitrary[Hash].map(Block.Header.Hash(_))) diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala index b5e8b0f4..01328a29 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala @@ -9,8 +9,6 @@ import io.iohk.metronome.checkpointing.CheckpointingAgreement import io.iohk.metronome.hotstuff.consensus.basic.{Phase, QuorumCertificate} import io.iohk.metronome.hotstuff.consensus.ViewNumber import java.nio.file.{Files, Path, StandardOpenOption} -import org.scalacheck.Arbitrary -import org.scalacheck.Arbitrary.arbitrary import org.scalactic.Equality import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers @@ -25,8 +23,6 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { import ArbitraryInstances._ import RLPCodecs._ - def sample[T: Arbitrary] = arbitrary[T].sample.get - // Structrual equality checker for RLPEncodeable. // It has different wrappers for items based on whether it was hand crafted or generated // by codecs, and the RLPValue has mutable arrays inside. diff --git a/metronome/crypto/src/io/iohk/metronome/crypto/ECKeyPair.scala b/metronome/crypto/src/io/iohk/metronome/crypto/ECKeyPair.scala new file mode 100644 index 00000000..0aaff420 --- /dev/null +++ b/metronome/crypto/src/io/iohk/metronome/crypto/ECKeyPair.scala @@ -0,0 +1,28 @@ +package io.iohk.metronome.crypto + +import org.bouncycastle.crypto.AsymmetricCipherKeyPair + +import java.security.SecureRandom + +/** The pair of EC private and public keys for Secp256k1 elliptic curve */ +case class ECKeyPair(prv: ECPrivateKey, pub: ECPublicKey) { + + /** The bouncycastle's underlying type for efficient use with + * `io.iohk.ethereum.crypto.ECDSASignature` + */ + def underlying: AsymmetricCipherKeyPair = prv.underlying +} + +object ECKeyPair { + + def apply(keyPair: AsymmetricCipherKeyPair): ECKeyPair = { + val (prv, pub) = io.iohk.ethereum.crypto.keyPairToByteArrays(keyPair) + ECKeyPair(ECPrivateKey(prv), ECPublicKey(pub)) + } + + /** Generates a new keypair on the Secp256k1 elliptic curve */ + def generate(secureRandom: SecureRandom): ECKeyPair = { + val kp = io.iohk.ethereum.crypto.generateKeyPair(secureRandom) + ECKeyPair(kp) + } +} diff --git a/metronome/crypto/src/io/iohk/metronome/crypto/ECPrivateKey.scala b/metronome/crypto/src/io/iohk/metronome/crypto/ECPrivateKey.scala new file mode 100644 index 00000000..3e0102f7 --- /dev/null +++ b/metronome/crypto/src/io/iohk/metronome/crypto/ECPrivateKey.scala @@ -0,0 +1,27 @@ +package io.iohk.metronome.crypto + +import org.bouncycastle.crypto.AsymmetricCipherKeyPair +import scodec.bits.ByteVector +import io.iohk.ethereum.crypto.keyPairFromPrvKey + +/** Wraps the bytes representing an EC private key */ +case class ECPrivateKey(bytes: ByteVector) { + require( + bytes.length == ECPrivateKey.Length, + s"Key must be ${ECPrivateKey.Length} bytes long" + ) + + /** Converts the byte representation to bouncycastle's `AsymmetricCipherKeyPair` for efficient use with + * `io.iohk.ethereum.crypto.ECDSASignature` + */ + val underlying: AsymmetricCipherKeyPair = keyPairFromPrvKey( + bytes.toArray + ) +} + +object ECPrivateKey { + val Length = 32 + + def apply(bytes: Array[Byte]): ECPrivateKey = + ECPrivateKey(ByteVector(bytes)) +} diff --git a/metronome/crypto/src/io/iohk/metronome/crypto/ECPublicKey.scala b/metronome/crypto/src/io/iohk/metronome/crypto/ECPublicKey.scala new file mode 100644 index 00000000..5036fb1d --- /dev/null +++ b/metronome/crypto/src/io/iohk/metronome/crypto/ECPublicKey.scala @@ -0,0 +1,22 @@ +package io.iohk.metronome.crypto + +import scodec.Codec +import scodec.bits.ByteVector +import scodec.codecs.bytes + +/** Wraps the bytes representing an EC public key in uncompressed format and without the compression indicator */ +case class ECPublicKey(bytes: ByteVector) { + require( + bytes.length == ECPublicKey.Length, + s"Key must be ${ECPublicKey.Length} bytes long" + ) +} + +object ECPublicKey { + val Length = 64 + + def apply(bytes: Array[Byte]): ECPublicKey = + ECPublicKey(ByteVector(bytes)) + + implicit val codec: Codec[ECPublicKey] = bytes.as[ECPublicKey] +} diff --git a/metronome/crypto/src/io/iohk/metronome/crypto/Secp256k1Utils.scala b/metronome/crypto/src/io/iohk/metronome/crypto/Secp256k1Utils.scala deleted file mode 100644 index 0223b2a1..00000000 --- a/metronome/crypto/src/io/iohk/metronome/crypto/Secp256k1Utils.scala +++ /dev/null @@ -1,28 +0,0 @@ -package io.iohk.metronome.crypto - -import java.security.SecureRandom -import org.bouncycastle.crypto.AsymmetricCipherKeyPair -import org.bouncycastle.crypto.params.ECPublicKeyParameters -import scodec.bits.BitVector - -object Secp256k1Utils { - - def generateKeyPair( - secureRandom: SecureRandom - ): AsymmetricCipherKeyPair = { - io.iohk.ethereum.crypto.generateKeyPair(secureRandom) - } - - /** Returns secp256k1 public key bytes in uncompressed form, with compression indicator stripped - */ - def keyPairToUncompressed(keyPair: AsymmetricCipherKeyPair): BitVector = { - BitVector( - keyPair.getPublic - .asInstanceOf[ECPublicKeyParameters] - .getQ - .getEncoded(false) - .drop(1) - ) - } - -} diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala index 08a6fded..c0563268 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala @@ -75,5 +75,5 @@ object Federation { } /** Maximum number of Byzantine nodes in a federation of size `n` */ - private def maxByzantine(n: Int): Int = (n - 1) / 3 + def maxByzantine(n: Int): Int = (n - 1) / 3 } diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1Agreement.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1Agreement.scala new file mode 100644 index 00000000..0aa5103e --- /dev/null +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1Agreement.scala @@ -0,0 +1,12 @@ +package io.iohk.metronome.hotstuff.consensus.basic + +import io.iohk.ethereum.crypto.ECDSASignature +import io.iohk.metronome.crypto.{ECPrivateKey, ECPublicKey} + +trait Secp256k1Agreement extends Agreement { + override final type SKey = ECPrivateKey + override final type PKey = ECPublicKey + override final type PSig = ECDSASignature + // TODO (PM-2935): Replace list with theshold signatures. + override final type GSig = List[ECDSASignature] +} diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1Signing.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1Signing.scala new file mode 100644 index 00000000..60e44628 --- /dev/null +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1Signing.scala @@ -0,0 +1,87 @@ +package io.iohk.metronome.hotstuff.consensus.basic + +import io.iohk.ethereum.crypto.ECDSASignature +import io.iohk.metronome.crypto.hash.Keccak256 +import io.iohk.metronome.crypto.{ + ECPrivateKey, + ECPublicKey, + GroupSignature, + PartialSignature +} +import io.iohk.metronome.hotstuff.consensus.basic.Signing.{GroupSig, PartialSig} +import io.iohk.metronome.hotstuff.consensus.{Federation, ViewNumber} +import scodec.bits.ByteVector + +/** Facilitates a Secp256k1 elliptic curve signing scheme using + * `io.iohk.ethereum.crypto.ECDSASignature` + * A group signature is simply a concatenation (sequence) of partial signatures + */ +class Secp256k1Signing[A <: Secp256k1Agreement]( + contentSerializer: (VotingPhase, ViewNumber, A#Hash) => ByteVector +) extends Signing[A] { + + override def sign( + signingKey: ECPrivateKey, + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: A#Hash + ): PartialSig[A] = { + val msgHash = contentHash(phase, viewNumber, blockHash) + PartialSignature(ECDSASignature.sign(msgHash, signingKey.underlying)) + } + + override def combine( + signatures: Seq[PartialSig[A]] + ): GroupSig[A] = + GroupSignature(signatures.map(_.sig).toList) + + /** Validate that partial signature was created by a given public key. + * + * Check that the signer is part of the federation. + */ + override def validate( + publicKey: ECPublicKey, + signature: PartialSig[A], + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: A#Hash + ): Boolean = { + val msgHash = contentHash(phase, viewNumber, blockHash) + signature.sig + .publicKey(msgHash) + .map(ECPublicKey(_)) + .contains(publicKey) + } + + /** Validate a group signature. + * + * Check that enough members of the federation signed, + * and only the members. + */ + override def validate( + federation: Federation[ECPublicKey], + signature: GroupSig[A], + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: A#Hash + ): Boolean = { + val msgHash = contentHash(phase, viewNumber, blockHash) + val signers = + signature.sig + .flatMap(s => s.publicKey(msgHash).map(ECPublicKey(_))) + .toSet + + val areUniqueSigners = signers.size == signature.sig.size + val areFederationMembers = (signers -- federation.publicKeys).isEmpty + val isQuorumReached = signers.size == federation.quorumSize + + areUniqueSigners && areFederationMembers && isQuorumReached + } + + private def contentHash( + phase: VotingPhase, + viewNumber: ViewNumber, + blockHash: A#Hash + ): Array[Byte] = + Keccak256(contentSerializer(phase, viewNumber, blockHash)).toArray +} diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Signing.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Signing.scala index 57c8c0d6..f5cf31b7 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Signing.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Signing.scala @@ -1,7 +1,8 @@ package io.iohk.metronome.hotstuff.consensus.basic -import io.iohk.metronome.crypto.{PartialSignature, GroupSignature} -import io.iohk.metronome.hotstuff.consensus.{ViewNumber, Federation} +import io.iohk.metronome.crypto.{GroupSignature, PartialSignature} +import io.iohk.metronome.hotstuff.consensus.{Federation, ViewNumber} +import scodec.bits.ByteVector trait Signing[A <: Agreement] { @@ -16,10 +17,7 @@ trait Signing[A <: Agreement] { signatures: Seq[Signing.PartialSig[A]] ): Signing.GroupSig[A] - /** Validate that partial signature was created by a given public key. - * - * Check that the signer is part of the federation. - */ + /** Validate that partial signature was created by a given public key. */ def validate( publicKey: A#PKey, signature: Signing.PartialSig[A], @@ -66,6 +64,10 @@ trait Signing[A <: Agreement] { object Signing { def apply[A <: Agreement: Signing]: Signing[A] = implicitly[Signing[A]] + def secp256k1[A <: Secp256k1Agreement]( + contentSerializer: (VotingPhase, ViewNumber, A#Hash) => ByteVector + ): Signing[A] = new Secp256k1Signing[A](contentSerializer) + type PartialSig[A <: Agreement] = PartialSignature[A#PKey, (VotingPhase, ViewNumber, A#Hash), A#PSig] diff --git a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/ArbitraryInstances.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/ArbitraryInstances.scala new file mode 100644 index 00000000..16084920 --- /dev/null +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/ArbitraryInstances.scala @@ -0,0 +1,32 @@ +package io.iohk.metronome.hotstuff.consensus + +import io.iohk.metronome.crypto.hash.Hash +import io.iohk.metronome.hotstuff.consensus.basic.Phase.{ + Commit, + PreCommit, + Prepare +} +import io.iohk.metronome.hotstuff.consensus.basic.VotingPhase +import org.scalacheck.Arbitrary.arbitrary +import org.scalacheck.{Arbitrary, Gen} +import scodec.bits.ByteVector + +trait ArbitraryInstances { + + def sample[T: Arbitrary]: T = arbitrary[T].sample.get + + implicit val arbViewNumber: Arbitrary[ViewNumber] = Arbitrary { + Gen.posNum[Long].map(ViewNumber(_)) + } + + implicit val arbVotingPhase: Arbitrary[VotingPhase] = Arbitrary { + Gen.oneOf(Prepare, PreCommit, Commit) + } + + implicit val arbHash: Arbitrary[Hash] = + Arbitrary { + Gen.listOfN(32, arbitrary[Byte]).map(ByteVector(_)).map(Hash(_)) + } +} + +object ArbitraryInstances extends ArbitraryInstances diff --git a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/LeaderSelectionProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/LeaderSelectionProps.scala index 7880cf1e..ff8e4c1e 100644 --- a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/LeaderSelectionProps.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/LeaderSelectionProps.scala @@ -1,8 +1,9 @@ package io.iohk.metronome.hotstuff.consensus import io.iohk.metronome.core.Tagger -import org.scalacheck._ +import io.iohk.metronome.hotstuff.consensus.ArbitraryInstances._ import org.scalacheck.Prop.forAll +import org.scalacheck._ abstract class LeaderSelectionProps(name: String, val selector: LeaderSelection) extends Properties(name) { @@ -10,10 +11,6 @@ abstract class LeaderSelectionProps(name: String, val selector: LeaderSelection) object Size extends Tagger[Int] type Size = Size.Tagged - implicit val arbViewNumber: Arbitrary[ViewNumber] = Arbitrary { - Gen.posNum[Long].map(ViewNumber(_)) - } - implicit val arbFederationSize: Arbitrary[Size] = Arbitrary { Gen.posNum[Int].map(Size(_)) } diff --git a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1SigningProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1SigningProps.scala new file mode 100644 index 00000000..25a67e25 --- /dev/null +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/Secp256k1SigningProps.scala @@ -0,0 +1,162 @@ +package io.iohk.metronome.hotstuff.consensus.basic + +import cats.implicits._ +import io.iohk.metronome.crypto +import io.iohk.metronome.crypto.hash.Hash +import io.iohk.metronome.crypto.{ECKeyPair, ECPublicKey} +import io.iohk.metronome.hotstuff.consensus.ArbitraryInstances._ +import io.iohk.metronome.hotstuff.consensus.{ + Federation, + LeaderSelection, + ViewNumber +} +import org.scalacheck.Arbitrary.arbitrary +import org.scalacheck.Prop.forAll +import org.scalacheck.{Gen, Properties, Test} +import scodec.bits.ByteVector + +import java.security.SecureRandom + +object Secp256k1SigningProps extends Properties("Secp256k1Signing") { + + override def overrideParameters(p: Test.Parameters): Test.Parameters = + p.withMinSuccessfulTests(10) + + object TestAgreement extends Secp256k1Agreement { + type Block = Nothing + type Hash = crypto.hash.Hash + } + type TestAgreement = TestAgreement.type + + def serializer( + phase: VotingPhase, + viewNumber: ViewNumber, + hash: crypto.hash.Hash + ): ByteVector = + ByteVector(phase.toString.getBytes) ++ + ByteVector.fromLong(viewNumber) ++ + hash + + def atLeast[A](n: Int, xs: Iterable[A]): Gen[Seq[A]] = { + require( + xs.size >= n, + s"There has to be at least $n elements to choose from" + ) + Gen.choose(n, xs.size).flatMap(Gen.pick(_, xs)).flatMap(_.toSeq) + } + + val signing = Signing.secp256k1[TestAgreement](serializer) + + val keyPairs = List.fill(20)(ECKeyPair.generate(new SecureRandom)) + + def buildFederation(kps: Iterable[ECKeyPair]): Federation[ECPublicKey] = + Federation(kps.map(_.pub).toIndexedSeq)( + LeaderSelection.RoundRobin + ).valueOr(e => throw new Exception(s"Could not build Federation: $e")) + + property("partialSignatureCreation") = forAll( + Gen.oneOf(keyPairs), + arbitrary[ViewNumber], + arbitrary[VotingPhase], + arbitrary[Hash] + ) { (keyPair, viewNumber, votingPhase, hash) => + val partialSig = signing.sign(keyPair.prv, votingPhase, viewNumber, hash) + signing.validate(keyPair.pub, partialSig, votingPhase, viewNumber, hash) + } + + property("noFalseValidation") = forAll( + Gen.pick(2, keyPairs), + arbitrary[ViewNumber], + arbitrary[VotingPhase], + arbitrary[Hash] + ) { case (kps, viewNumber, votingPhase, hash) => + val Seq(signingKp, validationKp) = kps.toSeq + + val partialSig = signing.sign(signingKp.prv, votingPhase, viewNumber, hash) + + !signing.validate( + validationKp.pub, + partialSig, + votingPhase, + viewNumber, + hash + ) + } + + property("groupSignatureCreation") = forAll( + for { + kps <- Gen.atLeastOne(keyPairs) + fed = buildFederation(kps) + signers <- Gen.pick(fed.quorumSize, kps) + } yield (fed, signers.map(_.prv)), + arbitrary[ViewNumber], + arbitrary[VotingPhase], + arbitrary[Hash] + ) { case ((federation, prvKeys), viewNumber, votingPhase, hash) => + val partialSigs = + prvKeys.map(k => signing.sign(k, votingPhase, viewNumber, hash)) + val groupSig = signing.combine(partialSigs.toList) + + signing.validate(federation, groupSig, votingPhase, viewNumber, hash) + } + + property("groupSignatureNonUniqueSigners") = forAll( + for { + kps <- atLeast(2, keyPairs) + fed = buildFederation(kps) + signers <- Gen.pick(fed.quorumSize - 1, kps) + repeated <- Gen.oneOf(signers) + } yield (kps, signers.map(_.prv), repeated.prv), + arbitrary[ViewNumber], + arbitrary[VotingPhase], + arbitrary[Hash] + ) { case ((kps, prvKeys, repeated), viewNumber, votingPhase, hash) => + val federation = buildFederation(kps) + + val partialSigs = + (repeated +: prvKeys).map(k => + signing.sign(k, votingPhase, viewNumber, hash) + ) + val groupSig = signing.combine(partialSigs.toList) + + !signing.validate(federation, groupSig, votingPhase, viewNumber, hash) + } + + property("groupSignatureForeignSigners") = forAll( + for { + kps <- Gen.atLeastOne(keyPairs) if kps.size < keyPairs.size + fed = buildFederation(kps) + signers <- Gen.pick(fed.quorumSize - 1, kps) + foreign <- Gen.oneOf(keyPairs.diff(kps)) + } yield (fed, signers.map(_.prv), foreign.prv), + arbitrary[ViewNumber], + arbitrary[VotingPhase], + arbitrary[Hash] + ) { case ((federation, prvKeys, foreign), viewNumber, votingPhase, hash) => + val partialSigs = + (foreign +: prvKeys).map(k => + signing.sign(k, votingPhase, viewNumber, hash) + ) + val groupSig = signing.combine(partialSigs.toList) + + !signing.validate(federation, groupSig, votingPhase, viewNumber, hash) + } + + property("groupSignatureNoQuorum") = forAll( + for { + kps <- Gen.atLeastOne(keyPairs) + fed = buildFederation(kps) + n <- Gen.choose(0, kps.size) if n != fed.quorumSize + signers <- Gen.pick(n, kps) + } yield (signers.map(_.prv), fed), + arbitrary[ViewNumber], + arbitrary[VotingPhase], + arbitrary[Hash] + ) { case ((prvKeys, federation), viewNumber, votingPhase, hash) => + val partialSigs = + prvKeys.map(k => signing.sign(k, votingPhase, viewNumber, hash)) + val groupSig = signing.combine(partialSigs.toList) + + !signing.validate(federation, groupSig, votingPhase, viewNumber, hash) + } +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala b/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala index 02003afc..ef44b620 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala @@ -1,6 +1,7 @@ package io.iohk.metronome.networking import cats.effect.{Resource, Sync} +import io.iohk.metronome.crypto.ECKeyPair import io.iohk.metronome.networking.EncryptedConnectionProvider.{ ConnectionAlreadyClosed, ConnectionError, @@ -21,7 +22,6 @@ import io.iohk.scalanet.peergroup.dynamictls.{DynamicTLSPeerGroup, Secp256k1} import io.iohk.scalanet.peergroup.{Channel, InetMultiAddress} import monix.eval.{Task, TaskLift} import monix.execution.Scheduler -import org.bouncycastle.crypto.AsymmetricCipherKeyPair import scodec.Codec import java.net.InetSocketAddress @@ -91,7 +91,7 @@ object ScalanetConnectionProvider { // Codec constraint for K is necessary as scalanet require peer key to be in BitVector format def scalanetProvider[F[_]: Sync: TaskLift, K: Codec, M: Codec]( bindAddress: InetSocketAddress, - nodeKeyPair: AsymmetricCipherKeyPair, + nodeKeyPair: ECKeyPair, secureRandom: SecureRandom, useNativeTlsImplementation: Boolean, framingConfig: FramingConfig, @@ -106,7 +106,7 @@ object ScalanetConnectionProvider { .Config( bindAddress, Secp256k1, - nodeKeyPair, + nodeKeyPair.underlying, secureRandom, useNativeTlsImplementation, framingConfig, diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala index 44e1a1f6..46bbce7f 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala @@ -2,6 +2,7 @@ package io.iohk.metronome.networking import cats.effect.Resource import cats.effect.concurrent.{Deferred, Ref} +import io.iohk.metronome.crypto.ECPublicKey import io.iohk.metronome.networking.ConnectionHandler.{ ConnectionAlreadyClosedException, FinishedConnection @@ -308,19 +309,19 @@ object ConnectionHandlerSpec { } } - implicit val tracers: NetworkTracers[Task, Secp256k1Key, TestMessage] = + implicit val tracers: NetworkTracers[Task, ECPublicKey, TestMessage] = NetworkTracers(Tracer.noOpTracer) def buildHandlerResource( - cb: FinishedConnection[Secp256k1Key] => Task[Unit] = _ => Task(()) - ): Resource[Task, ConnectionHandler[Task, Secp256k1Key, TestMessage]] = { + cb: FinishedConnection[ECPublicKey] => Task[Unit] = _ => Task(()) + ): Resource[Task, ConnectionHandler[Task, ECPublicKey, TestMessage]] = { ConnectionHandler - .apply[Task, Secp256k1Key, TestMessage](cb) + .apply[Task, ECPublicKey, TestMessage](cb) } def buildHandlerResourceWithCallbackCounter: Resource[ Task, - (ConnectionHandler[Task, Secp256k1Key, TestMessage], Ref[Task, Long]) + (ConnectionHandler[Task, ECPublicKey, TestMessage], Ref[Task, Long]) ] = { for { counter <- Resource.liftF(Ref.of[Task, Long](0L)) diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala index e4183784..06c8b0c0 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala @@ -2,11 +2,12 @@ package io.iohk.metronome.networking import cats.effect.concurrent.{Deferred, Ref, TryableDeferred} import cats.implicits.toFlatMapOps +import io.iohk.metronome.crypto.ECPublicKey import io.iohk.metronome.networking.EncryptedConnectionProvider.ConnectionAlreadyClosed import io.iohk.metronome.networking.MockEncryptedConnectionProvider._ import io.iohk.metronome.networking.RemoteConnectionManagerTestUtils.{ - Secp256k1Key, - TestMessage + TestMessage, + getFakeRandomKey } import io.iohk.metronome.networking.RemoteConnectionManagerWithMockProviderSpec.fakeLocalAddress import monix.catnap.ConcurrentQueue @@ -18,14 +19,14 @@ class MockEncryptedConnectionProvider( private val incomingConnections: ConcurrentQueue[Task, IncomingServerEvent], private val onlineConnections: Ref[ Task, - Map[Secp256k1Key, MockEncryptedConnection] + Map[ECPublicKey, MockEncryptedConnection] ], private val connectionStatistics: ConnectionStatisticsHolder, - val localPeerInfo: (Secp256k1Key, InetSocketAddress) = - (Secp256k1Key.getFakeRandomKey, fakeLocalAddress) -) extends EncryptedConnectionProvider[Task, Secp256k1Key, TestMessage] { + val localPeerInfo: (ECPublicKey, InetSocketAddress) = + (getFakeRandomKey(), fakeLocalAddress) +) extends EncryptedConnectionProvider[Task, ECPublicKey, TestMessage] { - private def connect(k: Secp256k1Key) = { + private def connect(k: ECPublicKey) = { onlineConnections.get.flatMap { state => state.get(k) match { case Some(value) => Task.now(value) @@ -36,7 +37,7 @@ class MockEncryptedConnectionProvider( } override def connectTo( - k: Secp256k1Key, + k: ECPublicKey, address: InetSocketAddress ): Task[MockEncryptedConnection] = { (for { @@ -53,7 +54,7 @@ object MockEncryptedConnectionProvider { def apply(): Task[MockEncryptedConnectionProvider] = { for { queue <- ConcurrentQueue.unbounded[Task, IncomingServerEvent]() - connections <- Ref.of[Task, Map[Secp256k1Key, MockEncryptedConnection]]( + connections <- Ref.of[Task, Map[ECPublicKey, MockEncryptedConnection]]( Map.empty ) connectionsStatistics <- Ref.of[Task, ConnectionStatistics]( @@ -72,7 +73,7 @@ object MockEncryptedConnectionProvider { private def disconnect( withFailure: Boolean, - chosenPeer: Option[Secp256k1Key] = None + chosenPeer: Option[ECPublicKey] = None ): Task[MockEncryptedConnection] = { provider.onlineConnections .modify { current => @@ -98,7 +99,7 @@ object MockEncryptedConnectionProvider { } def specificPeerDisconnect( - key: Secp256k1Key + key: ECPublicKey ): Task[MockEncryptedConnection] = { disconnect(withFailure = false, Some(key)) } @@ -107,7 +108,7 @@ object MockEncryptedConnectionProvider { disconnect(withFailure = true) } - def registerOnlinePeer(key: Secp256k1Key): Task[MockEncryptedConnection] = { + def registerOnlinePeer(key: ECPublicKey): Task[MockEncryptedConnection] = { for { connection <- MockEncryptedConnection((key, fakeLocalAddress)) _ <- provider.onlineConnections.update { connections => @@ -125,7 +126,7 @@ object MockEncryptedConnectionProvider { ) } - def newIncomingPeer(key: Secp256k1Key): Task[MockEncryptedConnection] = { + def newIncomingPeer(key: ECPublicKey): Task[MockEncryptedConnection] = { registerOnlinePeer(key).flatMap { connection => provider.incomingConnections .offer(Some(Right(connection))) @@ -134,7 +135,7 @@ object MockEncryptedConnectionProvider { } def getReceivedMessagesPerPeer - : Task[Set[(Secp256k1Key, List[TestMessage])]] = { + : Task[Set[(ECPublicKey, List[TestMessage])]] = { provider.onlineConnections.get.flatMap { connections => Task.traverse(connections.toSet) { case (key, connection) => connection.getReceivedMessages.map(received => (key, received)) @@ -150,11 +151,11 @@ object MockEncryptedConnectionProvider { case class ConnectionStatistics( inFlightConnections: Long, maxInFlightConnections: Long, - connectionCounts: Map[Secp256k1Key, Long] + connectionCounts: Map[ECPublicKey, Long] ) class ConnectionStatisticsHolder(val stats: Ref[Task, ConnectionStatistics]) { - def incrementInFlight(connectionTo: Secp256k1Key): Task[Unit] = { + def incrementInFlight(connectionTo: ECPublicKey): Task[Unit] = { stats.update { current => val newInFlight = current.inFlightConnections + 1 val newMax = @@ -181,7 +182,7 @@ object MockEncryptedConnectionProvider { type IncomingServerEvent = Option[Either[ EncryptedConnectionProvider.HandshakeFailed, - EncryptedConnection[Task, Secp256k1Key, TestMessage] + EncryptedConnection[Task, ECPublicKey, TestMessage] ]] type IncomingConnectionEvent = @@ -194,9 +195,9 @@ object MockEncryptedConnectionProvider { ], private val closeToken: TryableDeferred[Task, Unit], private val sentMessages: Ref[Task, List[TestMessage]], - val remotePeerInfo: (Secp256k1Key, InetSocketAddress) = - (Secp256k1Key.getFakeRandomKey, fakeLocalAddress) - ) extends EncryptedConnection[Task, Secp256k1Key, TestMessage] { + val remotePeerInfo: (ECPublicKey, InetSocketAddress) = + (getFakeRandomKey(), fakeLocalAddress) + ) extends EncryptedConnection[Task, ECPublicKey, TestMessage] { override def close: Task[Unit] = { Task @@ -224,8 +225,8 @@ object MockEncryptedConnectionProvider { object MockEncryptedConnection { def apply( - remotePeerInfo: (Secp256k1Key, InetSocketAddress) = - (Secp256k1Key.getFakeRandomKey, fakeLocalAddress) + remotePeerInfo: (ECPublicKey, InetSocketAddress) = + (getFakeRandomKey(), fakeLocalAddress) ): Task[MockEncryptedConnection] = { for { incomingEvents <- ConcurrentQueue diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala index 18bcc686..258464e8 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerTestUtils.scala @@ -1,16 +1,17 @@ package io.iohk.metronome.networking import cats.effect.Resource -import io.iohk.metronome.crypto.Secp256k1Utils +import io.iohk.metronome.crypto.{ECKeyPair, ECPublicKey} + import java.net.{InetSocketAddress, ServerSocket} import java.security.SecureRandom import monix.eval.Task import monix.execution.Scheduler -import org.bouncycastle.crypto.AsymmetricCipherKeyPair import org.scalatest.Assertion + import scala.concurrent.Future import scala.util.Random -import scodec.bits.BitVector +import scodec.bits.ByteVector import scodec.Codec object RemoteConnectionManagerTestUtils { @@ -48,31 +49,18 @@ object RemoteConnectionManagerTestUtils { .typecase(2, utf8.as[MessageB]) } - case class Secp256k1Key(key: BitVector) - - object Secp256k1Key { - implicit val codec: Codec[Secp256k1Key] = bits.as[Secp256k1Key] - - def getFakeRandomKey: Secp256k1Key = { - val array = new Array[Byte](64) - Random.nextBytes(array) - Secp256k1Key(BitVector(array)) - } - + def getFakeRandomKey(): ECPublicKey = { + val array = new Array[Byte](ECPublicKey.Length) + Random.nextBytes(array) + ECPublicKey(ByteVector(array)) } - case class NodeInfo(keyPair: AsymmetricCipherKeyPair, publicKey: Secp256k1Key) + case class NodeInfo(keyPair: ECKeyPair) object NodeInfo { def generateRandom(secureRandom: SecureRandom): NodeInfo = { - val keyPair = - Secp256k1Utils.generateKeyPair(secureRandom) - NodeInfo( - keyPair, - Secp256k1Key( - Secp256k1Utils.keyPairToUncompressed(keyPair) - ) - ) + val keyPair = ECKeyPair.generate(secureRandom) + NodeInfo(keyPair) } } } diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala index 73062941..f02d9862 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala @@ -1,6 +1,7 @@ package io.iohk.metronome.networking import cats.effect.Resource +import io.iohk.metronome.crypto.ECPublicKey import io.iohk.metronome.networking.ConnectionHandler.ConnectionAlreadyClosedException import io.iohk.metronome.networking.EncryptedConnectionProvider.DecodingError import io.iohk.metronome.networking.MockEncryptedConnectionProvider._ @@ -59,7 +60,7 @@ class RemoteConnectionManagerWithMockProviderSpec it should "continue to make connections to unresponsive peers one connection at the time" in customTestCaseT { val connectionToMake = - (0 to 3).map(_ => (Secp256k1Key.getFakeRandomKey, fakeLocalAddress)).toSet + (0 to 3).map(_ => (getFakeRandomKey(), fakeLocalAddress)).toSet MockEncryptedConnectionProvider().flatMap(provider => buildConnectionsManagerWithMockProvider( provider, @@ -168,7 +169,7 @@ class RemoteConnectionManagerWithMockProviderSpec it should "fail sending message to unknown peer" in customTestCaseResourceT( buildTestCaseWithNPeers(2) ) { case (provider, manager, _) => - val randomKey = Secp256k1Key.getFakeRandomKey + val randomKey = getFakeRandomKey() for { sendResult <- manager.sendMessage(randomKey, MessageA(1)) } yield { @@ -186,7 +187,7 @@ class RemoteConnectionManagerWithMockProviderSpec ) { case (provider, manager, _) => for { incomingPeerConnection <- provider.newIncomingPeer( - Secp256k1Key.getFakeRandomKey + getFakeRandomKey() ) _ <- Task.sleep(100.milliseconds) notContainsNotAllowedIncoming <- manager.notContainsConnection( @@ -269,7 +270,7 @@ class RemoteConnectionManagerWithMockProviderSpec object RemoteConnectionManagerWithMockProviderSpec { implicit class RemoteConnectionManagerOps( - manager: RemoteConnectionManager[Task, Secp256k1Key, TestMessage] + manager: RemoteConnectionManager[Task, ECPublicKey, TestMessage] ) { def waitForNConnections( n: Int @@ -309,11 +310,11 @@ object RemoteConnectionManagerWithMockProviderSpec { Task, ( MockEncryptedConnectionProvider, - RemoteConnectionManager[Task, Secp256k1Key, TestMessage], - Set[Secp256k1Key] + RemoteConnectionManager[Task, ECPublicKey, TestMessage], + Set[ECPublicKey] ) ] = { - val keys = (0 until n).map(_ => (Secp256k1Key.getFakeRandomKey)).toSet + val keys = (0 until n).map(_ => getFakeRandomKey()).toSet for { provider <- Resource.liftF(MockEncryptedConnectionProvider()) @@ -341,21 +342,21 @@ object RemoteConnectionManagerWithMockProviderSpec { val fakeLocalAddress = new InetSocketAddress("localhost", 127) - val defalutAllowed = Secp256k1Key.getFakeRandomKey - val defaultToMake = Secp256k1Key.getFakeRandomKey + val defalutAllowed = getFakeRandomKey() + val defaultToMake = getFakeRandomKey() - implicit val tracers: NetworkTracers[Task, Secp256k1Key, TestMessage] = + implicit val tracers: NetworkTracers[Task, ECPublicKey, TestMessage] = NetworkTracers(Tracer.noOpTracer) def buildConnectionsManagerWithMockProvider( ec: MockEncryptedConnectionProvider, retryConfig: RetryConfig = quickRetryConfig, - nodesInCluster: Set[(Secp256k1Key, InetSocketAddress)] = Set( + nodesInCluster: Set[(ECPublicKey, InetSocketAddress)] = Set( (defaultToMake, fakeLocalAddress) ) ): Resource[ Task, - RemoteConnectionManager[Task, Secp256k1Key, TestMessage] + RemoteConnectionManager[Task, ECPublicKey, TestMessage] ] = { val clusterConfig = ClusterConfig(nodesInCluster) diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala index 427050b8..161a14eb 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala @@ -2,9 +2,9 @@ package io.iohk.metronome.networking import cats.data.NonEmptyList import cats.effect.concurrent.Ref -import cats.effect.{Concurrent, ContextShift, Resource, Timer, Sync} -import io.circe.{Json, JsonObject, Encoder} -import io.iohk.metronome.crypto.Secp256k1Utils +import cats.effect.{Concurrent, ContextShift, Resource, Sync, Timer} +import io.circe.{Encoder, Json, JsonObject} +import io.iohk.metronome.crypto.{ECKeyPair, ECPublicKey} import io.iohk.metronome.networking.ConnectionHandler.MessageReceived import io.iohk.metronome.networking.RemoteConnectionManager.{ ClusterConfig, @@ -15,25 +15,26 @@ import io.iohk.metronome.networking.RemoteConnectionManagerWithScalanetProviderS Cluster, buildTestConnectionManager } -import io.iohk.metronome.logging.{HybridLogObject, HybridLog, LogTracer} +import io.iohk.metronome.logging.{HybridLog, HybridLogObject, LogTracer} import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.FramingConfig import io.iohk.scalanet.peergroup.PeerGroup + import java.net.InetSocketAddress import java.security.SecureRandom import monix.eval.{Task, TaskLift, TaskLike} import monix.execution.Scheduler import monix.execution.UncaughtExceptionReporter -import org.bouncycastle.crypto.AsymmetricCipherKeyPair import org.scalatest.flatspec.AsyncFlatSpecLike import org.scalatest.Inspectors import org.scalatest.matchers.should.Matchers + import scala.concurrent.duration._ import scodec.Codec class RemoteConnectionManagerWithScalanetProviderSpec extends AsyncFlatSpecLike with Matchers { - import RemoteConnectionManagerWithScalanetProviderSpec.secp256k1Encoder + import RemoteConnectionManagerWithScalanetProviderSpec.ecPublicKeyEncoder implicit val testScheduler = Scheduler.fixedPool( @@ -54,7 +55,7 @@ class RemoteConnectionManagerWithScalanetProviderSpec behavior of "RemoteConnectionManagerWithScalanetProvider" it should "start connectionManager without any connections" in customTestCaseResourceT( - buildTestConnectionManager[Task, Secp256k1Key, TestMessage]() + buildTestConnectionManager[Task, ECPublicKey, TestMessage]() ) { connectionManager => for { connections <- connectionManager.getAcquiredConnections @@ -134,8 +135,8 @@ object RemoteConnectionManagerWithScalanetProviderSpec { FramingConfig.buildStandardFrameConfig(1000000, 4).getOrElse(null) val testIncomingQueueSize = 20 - implicit val secp256k1Encoder: Encoder[Secp256k1Key] = - Encoder.instance(key => Json.fromString(key.key.toHex)) + implicit val ecPublicKeyEncoder: Encoder[ECPublicKey] = + Encoder.instance(key => Json.fromString(key.bytes.toHex)) // Just an example of setting up logging. implicit def tracers[F[_]: Sync, K: io.circe.Encoder, M] @@ -176,8 +177,7 @@ object RemoteConnectionManagerWithScalanetProviderSpec { M: Codec ]( bindAddress: InetSocketAddress = randomAddress(), - nodeKeyPair: AsymmetricCipherKeyPair = - Secp256k1Utils.generateKeyPair(secureRandom), + nodeKeyPair: ECKeyPair = ECKeyPair.generate(secureRandom), secureRandom: SecureRandom = secureRandom, useNativeTlsImplementation: Boolean = false, framingConfig: FramingConfig = standardFraming, @@ -205,11 +205,11 @@ object RemoteConnectionManagerWithScalanetProviderSpec { } type ClusterNodes = Map[ - Secp256k1Key, + ECPublicKey, ( - RemoteConnectionManager[Task, Secp256k1Key, TestMessage], - AsymmetricCipherKeyPair, - ClusterConfig[Secp256k1Key], + RemoteConnectionManager[Task, ECPublicKey, TestMessage], + ECKeyPair, + ClusterConfig[ECPublicKey], Task[Unit] ) ] @@ -227,11 +227,11 @@ object RemoteConnectionManagerWithScalanetProviderSpec { _ <- Task.traverse(keyWithAddress) { case (info, address) => val clusterConfig = ClusterConfig(clusterNodes = keyWithAddress.map(keyWithAddress => - (keyWithAddress._1.publicKey, keyWithAddress._2) + (keyWithAddress._1.keyPair.pub, keyWithAddress._2) ) ) - buildTestConnectionManager[Task, Secp256k1Key, TestMessage]( + buildTestConnectionManager[Task, ECPublicKey, TestMessage]( bindAddress = address, nodeKeyPair = info.keyPair, clusterConfig = clusterConfig @@ -248,7 +248,7 @@ object RemoteConnectionManagerWithScalanetProviderSpec { class Cluster(nodes: Ref[Task, ClusterNodes]) { private def broadcastToAllConnections( - manager: RemoteConnectionManager[Task, Secp256k1Key, TestMessage], + manager: RemoteConnectionManager[Task, ECPublicKey, TestMessage], message: TestMessage ) = { manager.getAcquiredConnections.flatMap { connections => @@ -300,7 +300,7 @@ object RemoteConnectionManagerWithScalanetProviderSpec { def sendMessageFromRandomNodeToAllOthers( message: TestMessage - ): Task[(Secp256k1Key, Set[Secp256k1Key])] = { + ): Task[(ECPublicKey, Set[ECPublicKey])] = { for { runningNodes <- nodes.get (key, (node, _, _, _)) = runningNodes.head @@ -310,7 +310,7 @@ object RemoteConnectionManagerWithScalanetProviderSpec { def sendMessageFromAllClusterNodesToTheirConnections( message: TestMessage - ): Task[List[(Secp256k1Key, Set[Secp256k1Key])]] = { + ): Task[List[(ECPublicKey, Set[ECPublicKey])]] = { nodes.get.flatMap { current => Task.parTraverseUnordered(current.values) { case (manager, _, _, _) => broadcastToAllConnections(manager, message).map { receivers => @@ -320,14 +320,14 @@ object RemoteConnectionManagerWithScalanetProviderSpec { } } - def getMessageFromNode(key: Secp256k1Key) = { + def getMessageFromNode(key: ECPublicKey) = { nodes.get.flatMap { runningNodes => runningNodes(key)._1.incomingMessages.take(1).toListL.map(_.head) } } def shutdownRandomNode: Task[ - (InetSocketAddress, AsymmetricCipherKeyPair, ClusterConfig[Secp256k1Key]) + (InetSocketAddress, ECKeyPair, ClusterConfig[ECPublicKey]) ] = { for { current <- nodes.get @@ -342,16 +342,16 @@ object RemoteConnectionManagerWithScalanetProviderSpec { def startNode( bindAddress: InetSocketAddress, - key: AsymmetricCipherKeyPair, - clusterConfig: ClusterConfig[Secp256k1Key] + keyPair: ECKeyPair, + clusterConfig: ClusterConfig[ECPublicKey] )(implicit s: Scheduler): Task[Unit] = { - buildTestConnectionManager[Task, Secp256k1Key, TestMessage]( + buildTestConnectionManager[Task, ECPublicKey, TestMessage]( bindAddress = bindAddress, - nodeKeyPair = key, + nodeKeyPair = keyPair, clusterConfig = clusterConfig ).allocated.flatMap { case (manager, release) => nodes.update { current => - current + (manager.getLocalPeerInfo._1 -> (manager, key, clusterConfig, release)) + current + (manager.getLocalPeerInfo._1 -> (manager, keyPair, clusterConfig, release)) } } } From ce40d17b5e3c25946c34dbfaa26eda5c24707981 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 11 May 2021 23:14:20 +0100 Subject: [PATCH 25/48] PM-3104: Insert blocks in any order (#29) * PM-3104: Add KVStore.alter method. * PM-3104: Separate the update function argument. * PM-3104: Insert in any order. * PM-3104: Keep parent-child relationships until children are removed. * PM-3104: Test descendants can safely be deleted in the opposite order * PM-3104: Update is a special case of alter. * PM-3104: Deterministic random. Simplified delete. * PM-3104: A bit nicer check at the end of getDescendants. --- .../service/storage/BlockStorage.scala | 31 ++++++++--- .../service/storage/BlockStorageProps.scala | 55 ++++++++++++++++++- .../iohk/metronome/storage/KVCollection.scala | 8 ++- .../io/iohk/metronome/storage/KVStore.scala | 24 ++++++-- 4 files changed, 100 insertions(+), 18 deletions(-) diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala index 91835355..087b0e4d 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala @@ -29,8 +29,10 @@ class BlockStorage[N, A <: Agreement: Block]( blockColl.put(blockHash, block) >> childToParentColl.put(blockHash, parentHash) >> - parentToChildrenColl.put(blockHash, Set.empty) >> - parentToChildrenColl.update(parentHash, _ + blockHash) + parentToChildrenColl.alter(parentHash) { maybeChildren => + maybeChildren orElse Set.empty.some map (_ + blockHash) + } + } /** Retrieve a block by hash, if it exists. */ @@ -86,16 +88,23 @@ class BlockStorage[N, A <: Agreement: Block]( /** Delete a block and remove it from any parent-to-child mapping, * without any checking for the tree structure invariants. */ - private def deleteUnsafe(blockHash: A#Hash): KVStore[N, Unit] = + private def deleteUnsafe(blockHash: A#Hash): KVStore[N, Unit] = { + def deleteIfEmpty(maybeChildren: Option[Set[A#Hash]]) = + maybeChildren.filter(_.nonEmpty) + childToParentColl.get(blockHash).flatMap { case None => KVStore[N].unit case Some(parentHash) => - parentToChildrenColl.update(parentHash, _ - blockHash) + parentToChildrenColl.alter(parentHash) { maybeChildren => + deleteIfEmpty(maybeChildren.map(_ - blockHash)) + } } >> blockColl.delete(blockHash) >> childToParentColl.delete(blockHash) >> - parentToChildrenColl.delete(blockHash) + // Keep the association from existing children, until they last one is deleted. + parentToChildrenColl.alter(blockHash)(deleteIfEmpty) + } /** Get the ancestor chain of a block from the root, * including the block itself. @@ -151,13 +160,21 @@ class BlockStorage[N, A <: Agreement: Block]( case Some((blockHash, queue)) => parentToChildrenColl.read(blockHash).flatMap { case None => - loop(queue, acc) + // Since we're not inserting an empty child set, + // we can't tell here if the block exists or not. + loop(queue, blockHash :: acc) case Some(children) => loop(queue ++ children, blockHash :: acc) } } } - loop(Queue(blockHash), Nil) + + loop(Queue(blockHash), Nil).flatMap { + case result @ List(`blockHash`) => + result.filterA(contains) + case result => + KVStoreRead[N].pure(result) + } } /** Delete all blocks which are not descendants of a given block, diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala index 9f51f048..33473dbd 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala @@ -5,9 +5,11 @@ import io.iohk.metronome.storage.{KVCollection, KVStoreState} import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, Block => BlockOps} import java.util.UUID import org.scalacheck._ +import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.Prop.{all, forAll, propBoolean} import scodec.codecs.implicits._ import scodec.Codec +import scala.util.Random object BlockStorageProps extends Properties("BlockStorage") { @@ -48,7 +50,12 @@ object BlockStorageProps extends Properties("BlockStorage") { new KVCollection[Namespace, Hash, Set[Hash]](Namespace.BlockToChildren) ) - object TestKVStore extends KVStoreState[Namespace] + object TestKVStore extends KVStoreState[Namespace] { + def build(tree: List[TestBlock]): Store = { + val insert = tree.map(TestBlockStorage.put).sequence + compile(insert).runS(Map.empty).value + } + } implicit class TestStoreOps(store: TestKVStore.Store) { def putBlock(block: TestBlock) = @@ -131,8 +138,7 @@ object BlockStorageProps extends Properties("BlockStorage") { ) object TestData { def apply(tree: List[TestBlock]): TestData = { - val insert = tree.map(TestBlockStorage.put).sequence - val store = TestKVStore.compile(insert).runS(Map.empty).value + val store = TestKVStore.build(tree) TestData(tree, store) } } @@ -162,6 +168,18 @@ object BlockStorageProps extends Properties("BlockStorage") { s(Namespace.BlockToParent)(block.id) == block.parentId } + property("put unordered") = forAll { + for { + ordered <- genNonEmptyBlockTree + seed <- arbitrary[Int] + unordered = new Random(seed).shuffle(ordered) + } yield (ordered, unordered) + } { case (ordered, unordered) => + val orderedStore = TestKVStore.build(ordered) + val unorderedStore = TestKVStore.build(unordered) + orderedStore == unorderedStore + } + property("contains existing") = forAll(genExisting) { case (data, existing) => data.store.containsBlock(existing.id) } @@ -195,6 +213,13 @@ object BlockStorageProps extends Properties("BlockStorage") { data.store.deleteBlock(nonExisting.id)._2 == true } + property("reinsert one") = forAll(genExisting) { case (data, existing) => + val (deleted, _) = data.store.deleteBlock(existing.id) + val inserted = deleted.putBlock(existing) + // The existing child relationships should not be lost. + inserted == data.store + } + property("getPathFromRoot existing") = forAll(genExisting) { case (data, existing) => val path = data.store.getPathFromRoot(existing.id) @@ -227,6 +252,30 @@ object BlockStorageProps extends Properties("BlockStorage") { data.store.getDescendants(nonExisting.id).isEmpty } + property("getDescendants delete") = forAll(genSubTree) { + case (data, block, subTree) => + val ds = data.store.getDescendants(block.id) + + val (deleted, ok) = ds.foldLeft((data.store, true)) { + case ((store, oks), blockHash) => + val (deleted, ok) = store.deleteBlock(blockHash) + (deleted, oks && ok) + } + + val prefixTree = data.tree.takeWhile(_ != block) + val prefixStore = TestKVStore.build(prefixTree) + + all( + "ok" |: ok, + "not contains deleted" |: + ds.forall(!deleted.containsBlock(_)), + "contains non deleted" |: + prefixTree.map(_.id).forall(deleted.containsBlock(_)), + "same as a rebuild" |: + prefixStore == deleted + ) + } + property("pruneNonDescendants existing") = forAll(genSubTree) { case (data, block, subTree) => val (s, ps) = data.store.pruneNonDescendants(block.id) diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVCollection.scala b/metronome/storage/src/io/iohk/metronome/storage/KVCollection.scala index f6f026ac..927ff55a 100644 --- a/metronome/storage/src/io/iohk/metronome/storage/KVCollection.scala +++ b/metronome/storage/src/io/iohk/metronome/storage/KVCollection.scala @@ -32,6 +32,10 @@ class KVCollection[N, K: Codec, V: Codec](namespace: N) { KVStore[N].delete(namespace, key) /** Update a key by getting the value and applying a function on it, if the value exists. */ - def update(key: K, f: V => V): KVStore[N, Unit] = - KVStore[N].update(namespace, key, f) + def update(key: K)(f: V => V): KVStore[N, Unit] = + KVStore[N].update(namespace, key)(f) + + /** Insert, update or delete a value, depending on whether it exists. */ + def alter(key: K)(f: Option[V] => Option[V]): KVStore[N, Unit] = + KVStore[N].alter(namespace, key)(f) } diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala index 650dd780..ab66c80a 100644 --- a/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala @@ -31,6 +31,7 @@ object KVStore { def pure[A](a: A) = KVStore.pure[N, A](a) + /** Insert or replace a value under a key. */ def put[K: Codec, V: Codec]( namespace: N, key: K, @@ -40,24 +41,35 @@ object KVStore { Put[N, K, V](namespace, key, value) ) + /** Get a value under a key, if it exists. */ def get[K: Codec, V: Codec](namespace: N, key: K): KVStore[N, Option[V]] = liftF[KVNamespacedOp, Option[V]]( Get[N, K, V](namespace, key) ) + /** Delete a value under a key. */ def delete[K: Codec](namespace: N, key: K): KVStore[N, Unit] = liftF[KVNamespacedOp, Unit]( Delete[N, K](namespace, key) ) - def update[K: Codec, V: Codec]( - namespace: N, - key: K, + /** Apply a function on a value, if it exists. */ + def update[K: Codec, V: Codec](namespace: N, key: K)( f: V => V ): KVStore[N, Unit] = - get[K, V](namespace, key).flatMap { - case None => unit - case Some(value) => put(namespace, key, f(value)) + alter[K, V](namespace, key)(_ map f) + + /** Insert, update or delete a value, depending on whether it exists. */ + def alter[K: Codec, V: Codec](namespace: N, key: K)( + f: Option[V] => Option[V] + ): KVStore[N, Unit] = + get[K, V](namespace, key).flatMap { current => + (current, f(current)) match { + case ((None, None)) => unit + case ((Some(existing), Some(value))) if existing == value => unit + case (_, Some(value)) => put(namespace, key, value) + case (Some(_), None) => delete(namespace, key) + } } /** Lift a read-only operation into a read-write one, so that we can chain them together. */ From 0fce067fe79943254445621f73664f02ca61c4c5 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 11 May 2021 23:19:08 +0100 Subject: [PATCH 26/48] PM-2936: Remote messaging protocol (#23) * PM-2936: Remote messages in checkpointing, Protocol message in hotstuff. * PM-2936: Status message contents. * PM-2936: Reorg messages to be rooted in the HotStuff module. * PM-2936: Moved RPCMessage back to hotstuff from core. * PM-2936: Restrict self-type to be RPCMessage, rather than inherit it. * PM-2936: Moved RPCMessage back to core so we can use it in checkpointing.interpreter as well. --- .../messages/CheckpointingMessage.scala | 31 ++++++++++++++++ .../metronome/core/messages/RPCMessage.scala | 23 ++++++++++++ .../metronome/hotstuff/service/Status.scala | 16 +++++++++ .../service/messages/DuplexMessage.scala | 24 +++++++++++++ .../service/messages/HotStuffMessage.scala | 23 ++++++++++++ .../service/messages/SyncMessage.scala | 35 +++++++++++++++++++ 6 files changed, 152 insertions(+) create mode 100644 metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/messages/CheckpointingMessage.scala create mode 100644 metronome/core/src/io/iohk/metronome/core/messages/RPCMessage.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Status.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/DuplexMessage.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/HotStuffMessage.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/messages/CheckpointingMessage.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/messages/CheckpointingMessage.scala new file mode 100644 index 00000000..de7bb493 --- /dev/null +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/messages/CheckpointingMessage.scala @@ -0,0 +1,31 @@ +package io.iohk.metronome.checkpointing.service.messages + +import io.iohk.metronome.checkpointing.models.Ledger +import io.iohk.metronome.core.messages.{RPCMessage, RPCMessageCompanion} + +/** Checkpointing specific messages that the HotStuff service doesn't handle, + * which is the synchronisation of committed ledger state. + * + * These will be wrapped in an `ApplicationMessage`. + */ +sealed trait CheckpointingMessage { self: RPCMessage => } + +object CheckpointingMessage extends RPCMessageCompanion { + + /** Request the ledger state given by a specific hash. + * + * The hash is something coming from a block that was + * pointed at by a Commit Q.C. + */ + case class GetStateRequest( + requestId: RequestId, + stateHash: Ledger.Hash + ) extends CheckpointingMessage + with Request + + case class GetStateResponse( + requestId: RequestId, + state: Ledger + ) extends CheckpointingMessage + with Response +} diff --git a/metronome/core/src/io/iohk/metronome/core/messages/RPCMessage.scala b/metronome/core/src/io/iohk/metronome/core/messages/RPCMessage.scala new file mode 100644 index 00000000..7b691f03 --- /dev/null +++ b/metronome/core/src/io/iohk/metronome/core/messages/RPCMessage.scala @@ -0,0 +1,23 @@ +package io.iohk.metronome.core.messages + +import java.util.UUID + +/** Messages that go in request/response pairs. */ +trait RPCMessage { + + /** Unique identifier for request, which is expected to be + * included in the response message that comes back. + */ + def requestId: UUID +} + +abstract class RPCMessageCompanion { + type RequestId = UUID + object RequestId { + def apply(): RequestId = + UUID.randomUUID() + } + + trait Request extends RPCMessage + trait Response extends RPCMessage +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Status.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Status.scala new file mode 100644 index 00000000..faf1dfc0 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Status.scala @@ -0,0 +1,16 @@ +package io.iohk.metronome.hotstuff.service + +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate + +/** Status has all the fields necessary for nodes to sync with each other. + * + * This is to facilitate nodes rejoining the network, + * or re-syncing their views after some network glitch. + */ +case class Status[A <: Agreement]( + viewNumber: ViewNumber, + prepareQC: QuorumCertificate[A], + commitQC: QuorumCertificate[A] +) diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/DuplexMessage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/DuplexMessage.scala new file mode 100644 index 00000000..f80ea1de --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/DuplexMessage.scala @@ -0,0 +1,24 @@ +package io.iohk.metronome.hotstuff.service.messages + +import io.iohk.metronome.hotstuff +import io.iohk.metronome.hotstuff.consensus.basic.Agreement + +/** Messages type to use in the networking layer if the use case has + * application specific message types, e.g. ledger synchronisation, + * not just the general BFT agreement (which could be enough if + * we need to execute all blocks to synchronise state). + */ +sealed trait DuplexMessage[A <: Agreement, M] + +object DuplexMessage { + + /** General BFT agreement message. */ + case class AgreementMessage[A <: Agreement]( + message: hotstuff.service.messages.HotStuffMessage[A] + ) extends DuplexMessage[A, Nothing] + + /** Application specific message. */ + case class ApplicationMessage[M]( + message: M + ) extends DuplexMessage[Nothing, M] +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/HotStuffMessage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/HotStuffMessage.scala new file mode 100644 index 00000000..90809bf9 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/HotStuffMessage.scala @@ -0,0 +1,23 @@ +package io.iohk.metronome.hotstuff.service.messages + +import io.iohk.metronome.hotstuff +import io.iohk.metronome.hotstuff.consensus.basic.Agreement + +/** Messages which are generic to any HotStuff BFT agreement. */ +sealed trait HotStuffMessage[A <: Agreement] + +object HotStuffMessage { + + /** Messages which are part of the basic HotStuff BFT algorithm itself. */ + case class ConsensusMessage[A <: Agreement]( + message: hotstuff.consensus.basic.Message[A] + ) extends HotStuffMessage[A] + + /** Messages that support the HotStuff BFT agreement but aren't part of + * the core algorithm, e.g. block and view number synchronisation. + */ + case class SyncMessage[A <: Agreement]( + message: hotstuff.service.messages.SyncMessage[A] + ) extends HotStuffMessage[A] + +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala new file mode 100644 index 00000000..3dcc2643 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala @@ -0,0 +1,35 @@ +package io.iohk.metronome.hotstuff.service.messages + +import io.iohk.metronome.core.messages.{RPCMessage, RPCMessageCompanion} +import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.hotstuff.service.Status + +/** Messages needed to fully realise the HotStuff protocol, + * without catering for any application specific concerns. + */ +sealed trait SyncMessage[A <: Agreement] { self: RPCMessage => } + +object SyncMessage extends RPCMessageCompanion { + case class GetStatusRequest( + requestId: RequestId + ) extends SyncMessage[Nothing] + with Request + + case class GetStatusResponse[A <: Agreement]( + requestId: RequestId, + status: Status[A] + ) extends SyncMessage[A] + with Response + + case class GetBlockRequest[A <: Agreement]( + requestId: RequestId, + blockHash: A#Hash + ) extends SyncMessage[Nothing] + with Request + + case class GetBlockResponse[A <: Agreement]( + requestId: RequestId, + block: A#Block + ) extends SyncMessage[A] + with Response +} From a33267d33763784c9625636c967bf5a142e36bbe Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 11 May 2021 23:32:19 +0100 Subject: [PATCH 27/48] PM-3146: HotStuff Service (#25) * PM-3146: Fix ProtocolState to update the lastExecutedBlockHash. * PM-3146: Skeleton for HotStuffService. * PM-3146: Add Network.splitter * PM-3146: Rename HotStuffService to ConsensusService. * PM-3146: Schedule initial effects. * PM-3146: Add better-monadic-for plugin. * PM-3146: Add commitQC to ProtocolState. * PM-3146: Created SyncService. * PM-3146: Added FiberPool. * PM-3146: Use the FiberPool in SyncService. * PM-3146: Create SyncService in HotStuffService. * PM-3146: Moved FiberPool to core. * PM-3146: Support setting a capcity in FiberPool. * PM-3146: Validation and sync method skeletons. * PM-3146: Introduced a Pipe between the two services. * PM-3146: Processing local effects first. * PM-3146: Apply local effects recursively. * PM-3146: Stashing and unstashing too early messages. * PM-3146: Return Effect.SaveBlock when a block has a Prepare Q.C. * PM-3146: Moved SyncPipe into a separate package. * PM-3146: Execute the save block effect. * PM-3146: Use BlockStorage directly. Add KVStoreRunner. * PM-3146: Rename processing methods. * PM-3146: Pass getState rather than the ConsensusService to the SyncService. * PM-3146: Renamed SyncPipe to BlockSyncPipe. * PM-3146: Extracted fibers from ConsensusService into a new FiberSet. Renamed FiberPool to FiberMap. * PM-3146: Extracted MessageStash and fixed due logic. * PM-3146: Ignore Prepare if High Q.C. is older than Commit Q.C. * PM-3146: Fix FiberMap to cancel the running task. * PM-3146: Factor out DeferredTask. Support shutdown notification in FiberSet. --- build.sc | 49 +- .../src/io/iohk/metronome/core/Pipe.scala | 50 ++ .../src/io/iohk/metronome/core/Tagger.scala | 1 + .../metronome/core/fibers/DeferredTask.scala | 35 ++ .../iohk/metronome/core/fibers/FiberMap.scala | 155 ++++++ .../iohk/metronome/core/fibers/FiberSet.scala | 68 +++ .../hotstuff/consensus/basic/Effect.scala | 16 + .../hotstuff/consensus/basic/Event.scala | 2 +- .../hotstuff/consensus/basic/Phase.scala | 18 + .../consensus/basic/ProtocolState.scala | 45 +- .../basic/HotStuffProtocolProps.scala | 30 +- .../hotstuff/service/ConsensusService.scala | 508 ++++++++++++++++++ .../hotstuff/service/HotStuffService.scala | 62 +++ .../metronome/hotstuff/service/Network.scala | 73 +++ .../hotstuff/service/SyncService.scala | 155 ++++++ .../service/messages/SyncMessage.scala | 4 +- .../service/pipes/BlockSyncPipe.scala | 39 ++ .../hotstuff/service/pipes/package.scala | 11 + .../metronome/storage/KVStoreRunner.scala | 7 + 19 files changed, 1263 insertions(+), 65 deletions(-) create mode 100644 metronome/core/src/io/iohk/metronome/core/Pipe.scala create mode 100644 metronome/core/src/io/iohk/metronome/core/fibers/DeferredTask.scala create mode 100644 metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala create mode 100644 metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Network.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/BlockSyncPipe.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/package.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/KVStoreRunner.scala diff --git a/build.sc b/build.sc index 00132538..bf5dc5a2 100644 --- a/build.sc +++ b/build.sc @@ -11,22 +11,23 @@ import mill.contrib.versionfile.VersionFileModule object versionFile extends VersionFileModule object VersionOf { - val cats = "2.3.1" - val circe = "0.12.3" - val config = "1.4.1" - val `kind-projector` = "0.11.3" - val logback = "1.2.3" - val mantis = "3.2.1-SNAPSHOT" - val monix = "3.3.0" - val prometheus = "0.10.0" - val rocksdb = "6.15.2" - val scalacheck = "1.15.2" - val scalatest = "3.2.5" - val scalanet = "0.7.0" - val shapeless = "2.3.3" - val slf4j = "1.7.30" - val `scodec-core` = "1.11.7" - val `scodec-bits` = "1.1.12" + val `better-monadic-for` = "0.3.1" + val cats = "2.3.1" + val circe = "0.12.3" + val config = "1.4.1" + val `kind-projector` = "0.11.3" + val logback = "1.2.3" + val mantis = "3.2.1-SNAPSHOT" + val monix = "3.3.0" + val prometheus = "0.10.0" + val rocksdb = "6.15.2" + val scalacheck = "1.15.2" + val scalatest = "3.2.5" + val scalanet = "0.7.0" + val shapeless = "2.3.3" + val slf4j = "1.7.30" + val `scodec-core` = "1.11.7" + val `scodec-bits` = "1.1.12" } // Using 2.12.13 instead of 2.12.10 to access @nowarn, to disable certain deperaction @@ -77,6 +78,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ivy"org.typelevel::cats-effect:${VersionOf.cats}" ) + override def scalacPluginIvyDeps = Agg( + ivy"com.olegpy::better-monadic-for:${VersionOf.`better-monadic-for`}" + ) + override def repositories = super.repositories ++ Seq( MavenRepository("https://oss.sonatype.org/content/repositories/snapshots") ) @@ -144,13 +149,14 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { } } - /** Data models shared between all modules. */ + /** Abstractions shared between all modules. */ object core extends SubModule with Publishing { override def description: String = - "Common data models." + "Common abstractions." - override def ivyDeps = Agg( - ivy"com.chuusai::shapeless:${VersionOf.shapeless}" + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"com.chuusai::shapeless:${VersionOf.shapeless}", + ivy"io.monix::monix:${VersionOf.monix}" ) } @@ -319,8 +325,7 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { override def ivyDeps = super.ivyDeps() ++ Agg( ivy"com.typesafe:config:${VersionOf.config}", ivy"ch.qos.logback:logback-classic:${VersionOf.logback}", - ivy"io.iohk::scalanet-discovery:${VersionOf.scalanet}", - ivy"io.monix::monix:${VersionOf.monix}" + ivy"io.iohk::scalanet-discovery:${VersionOf.scalanet}" ) object test extends TestModule diff --git a/metronome/core/src/io/iohk/metronome/core/Pipe.scala b/metronome/core/src/io/iohk/metronome/core/Pipe.scala new file mode 100644 index 00000000..56e21cc0 --- /dev/null +++ b/metronome/core/src/io/iohk/metronome/core/Pipe.scala @@ -0,0 +1,50 @@ +package io.iohk.metronome.core + +import cats.implicits._ +import cats.effect.{Concurrent, ContextShift, Sync} +import monix.tail.Iterant +import monix.catnap.ConcurrentQueue + +/** A `Pipe` is a connection between two components where + * messages of type `L` are going from left to right, and + * message of type `R` are going from right to left. + */ +trait Pipe[F[_], L, R] { + type Left = Pipe.Side[F, L, R] + type Right = Pipe.Side[F, R, L] + + def left: Left + def right: Right +} +object Pipe { + + /** One side of a `Pipe` with + * messages of type `I` going in and + * messages of type `O` coming out. + */ + trait Side[F[_], I, O] { + def send(in: I): F[Unit] + def receive: Iterant[F, O] + } + object Side { + def apply[F[_]: Sync, I, O]( + iq: ConcurrentQueue[F, I], + oq: ConcurrentQueue[F, O] + ): Side[F, I, O] = new Side[F, I, O] { + override def send(in: I): F[Unit] = + iq.offer(in) + + override def receive: Iterant[F, O] = + Iterant.repeatEvalF(oq.poll) + } + } + + def apply[F[_]: Concurrent: ContextShift, L, R]: F[Pipe[F, L, R]] = + for { + lq <- ConcurrentQueue.unbounded[F, L](None) + rq <- ConcurrentQueue.unbounded[F, R](None) + } yield new Pipe[F, L, R] { + override val left = Side[F, L, R](lq, rq) + override val right = Side[F, R, L](rq, lq) + } +} diff --git a/metronome/core/src/io/iohk/metronome/core/Tagger.scala b/metronome/core/src/io/iohk/metronome/core/Tagger.scala index 7b3a5a76..da4a1f68 100644 --- a/metronome/core/src/io/iohk/metronome/core/Tagger.scala +++ b/metronome/core/src/io/iohk/metronome/core/Tagger.scala @@ -16,6 +16,7 @@ import shapeless.tag, tag.@@ trait Tagger[U] { trait Tag type Tagged = U @@ Tag + def apply(underlying: U): Tagged = tag[Tag][U](underlying) } diff --git a/metronome/core/src/io/iohk/metronome/core/fibers/DeferredTask.scala b/metronome/core/src/io/iohk/metronome/core/fibers/DeferredTask.scala new file mode 100644 index 00000000..85a05fa6 --- /dev/null +++ b/metronome/core/src/io/iohk/metronome/core/fibers/DeferredTask.scala @@ -0,0 +1,35 @@ +package io.iohk.metronome.core.fibers + +import cats.implicits._ +import cats.effect.Sync +import cats.effect.concurrent.Deferred +import cats.effect.Concurrent + +/** A task that can be executed on a fiber pool, or canceled if the pool is shut down.. */ +protected[fibers] class DeferredTask[F[_]: Sync, A]( + deferred: Deferred[F, Either[Throwable, A]], + task: F[A] +) { + + /** Execute the task and set the success/failure result on the deferred. */ + def execute: F[Unit] = + task.attempt.flatMap(deferred.complete) + + /** Get the result of the execution, raising an error if it failed. */ + def join: F[A] = + deferred.get.rethrow + + /** Signal to the submitter that the pool has been shut down. */ + def shutdown: F[Unit] = + deferred + .complete(Left(new RuntimeException("The pool has been shut down."))) + .attempt + .void +} + +object DeferredTask { + def apply[F[_]: Concurrent, A](task: F[A]): F[DeferredTask[F, A]] = + Deferred[F, Either[Throwable, A]].map { d => + new DeferredTask[F, A](d, task) + } +} diff --git a/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala b/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala new file mode 100644 index 00000000..450c088e --- /dev/null +++ b/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala @@ -0,0 +1,155 @@ +package io.iohk.metronome.core.fibers + +import cats.implicits._ +import cats.effect.{Sync, Concurrent, ContextShift, Fiber, Resource} +import cats.effect.concurrent.{Ref, Semaphore} +import monix.catnap.ConcurrentQueue +import monix.execution.BufferCapacity +import monix.execution.ChannelType +import scala.util.control.NoStackTrace + +/** Execute tasks on a separate fiber per source key, + * facilitating separate rate limiting and fair concurrency. + * + * Each fiber executes tasks one by one. + */ +class FiberMap[F[_]: Concurrent: ContextShift, K]( + isShutdownRef: Ref[F, Boolean], + actorMapRef: Ref[F, Map[K, FiberMap.Actor[F]]], + semaphore: Semaphore[F], + capacity: BufferCapacity +) { + + /** Submit a task to be processed in the background. + * + * Create a new fiber if the given key hasn't got one yet. + * + * The result can be waited upon or discarded, the processing + * will happen in the background regardless. + */ + def submit[A](key: K)(task: F[A]): F[F[A]] = { + isShutdownRef.get.flatMap { + case true => + Sync[F].raiseError( + new IllegalStateException("The pool is already shut down.") + ) + case false => + actorMapRef.get.map(_.get(key)).flatMap { + case Some(actor) => + actor.submit(task) + case None => + semaphore.withPermit { + actorMapRef.get.map(_.get(key)).flatMap { + case Some(actor) => + actor.submit(task) + case None => + for { + actor <- FiberMap.Actor[F](capacity) + _ <- actorMapRef.update( + _.updated(key, actor) + ) + join <- actor.submit(task) + } yield join + } + } + } + } + } + + /** Cancel all existing background processors. */ + private def shutdown: F[Unit] = { + semaphore.withPermit { + for { + _ <- isShutdownRef.set(true) + actorMap <- actorMapRef.get + _ <- actorMap.values.toList.traverse(_.shutdown) + } yield () + } + } +} + +object FiberMap { + + /** The queue of a key is at capacity and didn't accept the task. */ + class QueueFullException + extends RuntimeException("The fiber task queue is full.") + with NoStackTrace + + private class Actor[F[_]: Concurrent]( + queue: ConcurrentQueue[F, DeferredTask[F, _]], + runningRef: Ref[F, Option[DeferredTask[F, _]]], + fiber: Fiber[F, Unit] + ) { + + private val reject = Sync[F].raiseError[Unit](new QueueFullException) + + /** Submit a task to the queue, to be processed by the fiber. + * + * If the queue is full, a `QueueFullException` is raised so the submitting + * process knows that this key is producing too much data. + */ + def submit[A](task: F[A]): F[F[A]] = + for { + wrapper <- DeferredTask[F, A](task) + enqueued <- queue.tryOffer(wrapper) + _ <- reject.whenA(!enqueued) + } yield wrapper.join + + /** Cancel the processing and signal to all enqueued tasks that they will not be executed. */ + def shutdown: F[Unit] = + for { + _ <- fiber.cancel + maybeRunning <- runningRef.get + _ <- maybeRunning.fold(().pure[F])(_.shutdown) + tasks <- queue.drain(0, Int.MaxValue) + _ <- tasks.toList.traverse(_.shutdown) + } yield () + } + private object Actor { + + /** Execute all tasks in the queue. */ + def process[F[_]: Sync]( + queue: ConcurrentQueue[F, DeferredTask[F, _]], + runningRef: Ref[F, Option[DeferredTask[F, _]]] + ): F[Unit] = + queue.poll.flatMap { task => + for { + _ <- runningRef.set(task.some) + _ <- task.execute + _ <- runningRef.set(none) + } yield () + } >> process(queue, runningRef) + + /** Create an actor and start executing tasks in the background. */ + def apply[F[_]: Concurrent: ContextShift]( + capacity: BufferCapacity + ): F[Actor[F]] = + for { + queue <- ConcurrentQueue + .withConfig[F, DeferredTask[F, _]](capacity, ChannelType.MPSC) + runningRef <- Ref[F].of(none[DeferredTask[F, _]]) + fiber <- Concurrent[F].start(process(queue, runningRef)) + } yield new Actor[F](queue, runningRef, fiber) + } + + /** Create an empty fiber pool. Cancel all fibers when it's released. */ + def apply[F[_]: Concurrent: ContextShift, K]( + capacity: BufferCapacity = BufferCapacity.Unbounded(None) + ): Resource[F, FiberMap[F, K]] = + Resource.make(build[F, K](capacity))(_.shutdown) + + private def build[F[_]: Concurrent: ContextShift, K]( + capacity: BufferCapacity + ): F[FiberMap[F, K]] = + for { + isShutdownRef <- Ref[F].of(false) + actorMapRef <- Ref[F].of(Map.empty[K, Actor[F]]) + semaphore <- Semaphore[F](1) + pool = new FiberMap[F, K]( + isShutdownRef, + actorMapRef, + semaphore, + capacity + ) + } yield pool +} diff --git a/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala b/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala new file mode 100644 index 00000000..39d46699 --- /dev/null +++ b/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala @@ -0,0 +1,68 @@ +package io.iohk.metronome.core.fibers + +import cats.implicits._ +import cats.effect.{Concurrent, Fiber, Resource} +import cats.effect.concurrent.{Ref, Deferred} + +/** Execute tasks in the background, canceling all fibers if the resource is released. + * + * Facilitates structured concurrency where the release of the component that submitted + * these fibers causes the cancelation of all of its scheduled tasks. + */ +class FiberSet[F[_]: Concurrent]( + isShutdownRef: Ref[F, Boolean], + fibersRef: Ref[F, Set[Fiber[F, Unit]]], + tasksRef: Ref[F, Set[DeferredTask[F, _]]] +) { + private def raiseIfShutdown: F[Unit] = + isShutdownRef.get.ifM( + Concurrent[F].raiseError( + new IllegalStateException("The pool is already shut down.") + ), + ().pure[F] + ) + + def submit[A](task: F[A]): F[F[A]] = for { + _ <- raiseIfShutdown + deferredFiber <- Deferred[F, Fiber[F, Unit]] + + // Run the task, then remove the fiber from the tracker. + background: F[A] = for { + exec <- task.attempt + fiber <- deferredFiber.get + _ <- fibersRef.update(_ - fiber) + result <- Concurrent[F].delay(exec).rethrow + } yield result + + wrapper <- DeferredTask[F, A](background) + _ <- tasksRef.update(_ + wrapper) + + // Start running in the background. Only now do we know the identity of the fiber. + fiber <- Concurrent[F].start(wrapper.execute) + + // Add the fiber to the collectin first, so that if the effect is + // already finished, it gets to remove it and we're not leaking memory. + _ <- fibersRef.update(_ + fiber) + _ <- deferredFiber.complete(fiber) + + } yield wrapper.join + + def shutdown: F[Unit] = for { + _ <- isShutdownRef.set(true) + fibers <- fibersRef.get + _ <- fibers.toList.traverse(_.cancel) + tasks <- tasksRef.get + _ <- tasks.toList.traverse(_.shutdown) + } yield () +} + +object FiberSet { + def apply[F[_]: Concurrent]: Resource[F, FiberSet[F]] = + Resource.make[F, FiberSet[F]] { + for { + isShutdownRef <- Ref[F].of(false) + fibersRef <- Ref[F].of(Set.empty[Fiber[F, Unit]]) + tasksRef <- Ref[F].of(Set.empty[DeferredTask[F, _]]) + } yield new FiberSet[F](isShutdownRef, fibersRef, tasksRef) + }(_.shutdown) +} diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Effect.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Effect.scala index 2e467b0e..a2737e8c 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Effect.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Effect.scala @@ -46,6 +46,22 @@ object Effect { highQC: QuorumCertificate[A] ) extends Effect[A] + /** Once the Prepare Q.C. has been established for a block, + * we know that it's not spam, it's safe to be persisted. + * + * This prevents a rouge leader from sending us many `Prepare` + * messages in the same view with the intention of eating up + * space using the included block. + * + * It's also a way for us to delay saving a block we created + * as a leader to the time when it's been voted on. Since it's + * part of the `Prepare` message, replicas shouldn't be asking + * for it anyway, so it's not a problem if it's not yet persisted. + */ + case class SaveBlock[A <: Agreement]( + preparedBlock: A#Block + ) extends Effect[A] + /** Execute blocks after a decision, from the last executed hash * up to the block included in the Quorum Certificate. */ diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Event.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Event.scala index 130d7a3a..5a03c359 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Event.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Event.scala @@ -3,7 +3,7 @@ package io.iohk.metronome.hotstuff.consensus.basic import io.iohk.metronome.hotstuff.consensus.ViewNumber /** Input events for the protocol model. */ -sealed trait Event[A <: Agreement] +sealed trait Event[+A <: Agreement] object Event { diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Phase.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Phase.scala index 2506fb0d..f7a83534 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Phase.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Phase.scala @@ -18,6 +18,24 @@ sealed trait Phase { case Commit => PreCommit case Decide => Commit } + + /** Check that *within the same view* phase this phase precedes the other. */ + def isBefore(other: Phase): Boolean = + (this, other) match { + case (Prepare, PreCommit | Commit | Decide) => true + case (PreCommit, Commit | Decide) => true + case (Commit, Decide) => true + case _ => false + } + + /** Check that *within the same view* this phase follows the other. */ + def isAfter(other: Phase): Boolean = + (this, other) match { + case (PreCommit, Prepare) => true + case (Commit, Prepare | PreCommit) => true + case (Decide, Prepare | PreCommit | Commit) => true + case _ => false + } } /** Subset of phases over which there can be vote and a Quorum Certificate. */ diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala index de667699..b69138fd 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala @@ -45,10 +45,10 @@ case class ProtocolState[A <: Agreement: Block: Signing]( prepareQC: QuorumCertificate[A], // Locked QC, for which a replica voted Commit, because it received a Pre-Commit Q.C. from leader. lockedQC: QuorumCertificate[A], - // Hash of the block that was last decided upon. - lastExecutedBlockHash: A#Hash, - // Hash of the block the federation is currently voting on. - preparedBlockHash: A#Hash, + // Commit QC, which a replica received in the Decide phase, and then executed the block in it. + commitQC: QuorumCertificate[A], + // The block the federation is currently voting on. + preparedBlock: A#Block, // Timeout for the view, so that it can be adjusted next time if necessary. timeout: FiniteDuration, // Votes gathered by the leader in this phase. They are guarenteed to be over the same content. @@ -71,6 +71,12 @@ case class ProtocolState[A <: Agreement: Block: Signing]( */ def quorumSize = federation.quorumSize + /** Hash of the block that was last decided upon. */ + def lastExecutedBlockHash: A#Hash = commitQC.blockHash + + /** Hash of the block currently being voted on. */ + def preparedBlockHash: A#Hash = Block[A].blockHash(preparedBlock) + /** No state transition. */ private def stay: Transition[A] = this -> Nil @@ -191,7 +197,7 @@ case class ProtocolState[A <: Agreement: Block: Signing]( sendVote(Phase.Prepare, blockHash) ) val next = moveTo(Phase.PreCommit).copy( - preparedBlockHash = blockHash + preparedBlock = m.block ) Right(next -> effects) } else { @@ -206,7 +212,8 @@ case class ProtocolState[A <: Agreement: Block: Signing]( handleVotes(e, Phase.Prepare) orElse handleQuorum(e, Phase.Prepare) { m => val effects = Seq( - sendVote(Phase.PreCommit, m.quorumCertificate.blockHash) + sendVote(Phase.PreCommit, m.quorumCertificate.blockHash), + SaveBlock(preparedBlock) ) val next = moveTo(Phase.Commit).copy( prepareQC = m.quorumCertificate @@ -244,7 +251,9 @@ case class ProtocolState[A <: Agreement: Block: Signing]( m.quorumCertificate ) +: effects - next -> withExec + val withLast = next.copy(commitQC = m.quorumCertificate) + + withLast -> withExec } } } @@ -470,26 +479,4 @@ object ProtocolState { /** Return an initial set of effects; at the minimum the timeout for the first round. */ def init[A <: Agreement](state: ProtocolState[A]): Seq[Effect[A]] = List(Effect.ScheduleNextView(state.viewNumber, state.timeout)) - - private implicit class PhaseOps(val a: Phase) extends AnyVal { - import Phase._ - - /** Check that *within the same view* phase `a` precedes phase `b`. */ - def isBefore(b: Phase): Boolean = - (a, b) match { - case (Prepare, PreCommit | Commit | Decide) => true - case (PreCommit, Commit | Decide) => true - case (Commit, Decide) => true - case _ => false - } - - /** Check that *within the same view* phase `a` follows phase `b`. */ - def isAfter(b: Phase): Boolean = - (a, b) match { - case (PreCommit, Prepare) => true - case (Commit, Prepare | PreCommit) => true - case (Decide, Prepare | PreCommit | Commit) => true - case _ => false - } - } } diff --git a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala index 88a2ef0f..13e84d62 100644 --- a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala @@ -40,10 +40,12 @@ object HotStuffProtocolCommands extends Commands { } type TestAgreement = TestAgreement.type + val genesis = TestBlock(blockHash = 0, parentBlockHash = -1, command = "") + val genesisQC = QuorumCertificate[TestAgreement]( phase = Phase.Prepare, viewNumber = ViewNumber(0), - blockHash = 0, + blockHash = genesis.blockHash, signature = GroupSignature(Nil) ) @@ -192,8 +194,8 @@ object HotStuffProtocolCommands extends Commands { .getOrElse(sys.error("Invalid federation!")), prepareQC = genesisQC, lockedQC = genesisQC, - lastExecutedBlockHash = genesisQC.blockHash, - preparedBlockHash = genesisQC.blockHash, + commitQC = genesisQC, + preparedBlock = genesis, timeout = 10.seconds, votes = Set.empty, newViews = Map.empty @@ -866,7 +868,7 @@ object HotStuffProtocolCommands extends Commands { val nextS = nextState(state) all( "moves to the next state" |: next.phase == nextS.phase, - if (state.phase != Phase.Decide) { + "votes for the next phase" |: (state.phase == Phase.Decide || effects .collectFirst { case Effect @@ -874,14 +876,20 @@ object HotStuffProtocolCommands extends Commands { recipient, Message.Vote(_, phase, _, _) ) => - "votes for the next phase" |: phase == state.phase + phase == state.phase } - .getOrElse(fail("expected to vote")) - } else { - "executes the block" |: effects.collectFirst { - case _: Effect.ExecuteBlocks[_] => - }.isDefined - } + .getOrElse(false)), + "makes a decision" |: (state.phase != Phase.Decide || + all( + "executes the block" |: effects.collectFirst { + case _: Effect.ExecuteBlocks[_] => + }.isDefined, + "remembers the executed block" |: + next.lastExecutedBlockHash == message.quorumCertificate.blockHash + )), + "saves the prepared block" |: (state.phase != Phase.PreCommit || + effects.collectFirst { case _: Effect.SaveBlock[_] => + }.isDefined) ) case other => diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala new file mode 100644 index 00000000..e3a651c1 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala @@ -0,0 +1,508 @@ +package io.iohk.metronome.hotstuff.service + +import cats.implicits._ +import cats.effect.{Concurrent, Timer, Fiber, Resource, ContextShift} +import cats.effect.concurrent.Ref +import io.iohk.metronome.core.Validated +import io.iohk.metronome.core.fibers.FiberSet +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + Effect, + Event, + ProtocolState, + ProtocolError, + Phase, + Message, + Block, + QuorumCertificate +} +import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe +import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.networking.ConnectionHandler +import io.iohk.metronome.storage.KVStoreRunner +import monix.catnap.ConcurrentQueue +import scala.annotation.tailrec +import scala.collection.immutable.Queue + +/** An effectful executor wrapping the pure HotStuff ProtocolState. + * + * It handles the `consensus.basic.Message` events coming from the network. + */ +class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( + publicKey: A#PKey, + network: Network[F, A, Message[A]], + storeRunner: KVStoreRunner[F, N], + blockStorage: BlockStorage[N, A], + stateRef: Ref[F, ProtocolState[A]], + stashRef: Ref[F, ConsensusService.MessageStash[A]], + blockSyncPipe: BlockSyncPipe[F, A]#Left, + eventQueue: ConcurrentQueue[F, Event[A]], + blockExecutionQueue: ConcurrentQueue[F, Effect.ExecuteBlocks[A]], + fiberSet: FiberSet[F], + maxEarlyViewNumberDiff: Int +) { + + /** Get the current protocol state, perhaps to respond to status requests. */ + def getState: F[ProtocolState[A]] = + stateRef.get + + /** Process incoming network messages. */ + private def processNetworkMessages: F[Unit] = + network.incomingMessages + .mapEval[Unit] { case ConnectionHandler.MessageReceived(from, message) => + validateMessage(Event.MessageReceived(from, message)).flatMap { + case None => + ().pure[F] + case Some(valid) => + syncDependencies(valid) + } + } + .completedL + + /** First round of validation of message to decide if we should process it at all. */ + private def validateMessage( + event: Event.MessageReceived[A] + ): F[Option[Validated[Event.MessageReceived[A]]]] = + stateRef.get.flatMap { state => + state + .validateMessage(event) + .map(m => m: Event.MessageReceived[A]) match { + case Left(error) => + protocolError(error).as(none) + + case Right( + Event.MessageReceived( + sender, + message @ Message.Prepare(_, _, highQC) + ) + ) if state.commitQC.viewNumber > highQC.viewNumber => + // The sender is building on a block that is older than the committed one. + // This could be an attack, forcing us to re-download blocks we already pruned. + protocolError(ProtocolError.UnsafeExtension[A](sender, message)) + .as(none) + + case Right(valid) if valid.message.viewNumber < state.viewNumber => + // TODO: Trace that obsolete message was received. + // TODO: Also collect these for the round so we can realise if we're out of sync. + none.pure[F] + + case Right(valid) + if valid.message.viewNumber > state.viewNumber + maxEarlyViewNumberDiff => + // TODO: Trace that a message from view far ahead in the future was received. + // TODO: Also collect these for the round so we can realise if we're out of sync. + none.pure[F] + + case Right(valid) => + // We know that the message is to/from the leader and it's properly signed, + // althought it may not match our current state, which we'll see later. + validated(valid).some.pure[F] + } + } + + /** Synchronize any missing block dependencies, then enqueue the event for final processing. */ + private def syncDependencies( + message: Validated[Event.MessageReceived[A]] + ): F[Unit] = { + import Message._ + // Only syncing Prepare messages. They have the `highQC` as block parent, + // so we know that is something that is safe to sync, it's not a DoS attack. + // Other messages may be bogus: + // - a Vote can point at a non-existing block to force some download; + // we'd reject it anyway if it doesn't match the state we prepared + // - a Quorum could be a replay of some earlier one, maybe a block we have pruned + // - a NewView is similar, it's best to first wait and select the highest we know + message.message match { + case prepare @ Prepare(_, block, highQC) + if Block[A].parentBlockHash(block) != highQC.blockHash => + // The High Q.C. may be valid, but the block is not built on it. + protocolError(ProtocolError.UnsafeExtension(message.sender, prepare)) + + case prepare: Prepare[_] => + // Carry out syncing and validation asynchronously. + syncAndValidatePrepare(message.sender, prepare) + + case _: Vote[_] => + // Let the ProtocolState reject it if it's not about the prepared block. + enqueueEvent(message) + + case _: Quorum[_] => + // Let the ProtocolState reject it if it's not about the prepared block. + enqueueEvent(message) + + case _: NewView[_] => + // Let's assume that we will have the highest prepare Q.C. available, + // while some can be replays of old data we may not have any more. + // If it turns out we don't have the block after all, we'll figure it + // out in the `CreateBlock` effect, at which point we can time out + // and sync with the `Prepare` message from the next leader. + enqueueEvent(message) + } + } + + /** Report an invalid message. */ + private def protocolError( + error: ProtocolError[A] + ): F[Unit] = + // TODO: Trace + ().pure[F] + + /** Add a Prepare message to the synchronisation and validation queue. + * + * The High Q.C. in the message proves that the parent block is valid + * according to the federation members. + * + * Any missing dependencies should be downloaded and the application asked + * to validate each block in succession as the downloads are finished. + */ + private def syncAndValidatePrepare( + sender: A#PKey, + prepare: Message.Prepare[A] + ): F[Unit] = + blockSyncPipe.send( + BlockSyncPipe.Request(sender, prepare) + ) + + /** Process the synchronization. result queue. */ + private def processBlockSyncPipe: F[Unit] = + blockSyncPipe.receive + .mapEval[Unit] { case BlockSyncPipe.Response(request, isValid) => + if (isValid) { + enqueueEvent( + validated(Event.MessageReceived(request.sender, request.prepare)) + ) + } else { + protocolError( + ProtocolError.UnsafeExtension(request.sender, request.prepare) + ) + } + } + .completedL + + /** Add a validated event to the queue for processing against the protocol state. */ + private def enqueueEvent(event: Validated[Event[A]]): F[Unit] = + eventQueue.offer(event) + + /** Take a single event from the queue, apply it on the state, + * kick off the resulting effects, then recurse. + * + * The effects will communicate their results back to the state + * through the event queue. + */ + private def processEvents: F[Unit] = { + eventQueue.poll.flatMap { event => + stateRef.get.flatMap { state => + val handle: F[Unit] = event match { + case e @ Event.NextView(_) => + // TODO (PM-3063): Check whether we have timed out because we are out of sync + handleTransition(state.handleNextView(e)) + + case e @ Event.MessageReceived(_, _) => + handleTransitionAttempt( + state.handleMessage(Validated[Event.MessageReceived[A]](e)) + ) + + case e @ Event.BlockCreated(_, _, _) => + handleTransition(state.handleBlockCreated(e)) + } + + handle >> processEvents + } + } + } + + /** Handle successful state transition: + * - apply local effects on the state + * - schedule other effects to execute in the background + * - if there was a phase or view transition, unstash delayed events + */ + private def handleTransition( + transition: ProtocolState.Transition[A] + ): F[Unit] = { + val (state, effects) = transition + + // Apply local messages to the state before anything else. + val (nextState, nextEffects) = + applySyncEffects(state, effects) + + // Unstash messages before we change state. + unstash(nextState) >> + stateRef.set(nextState) >> + scheduleEffects(nextEffects) + } + + /** Requeue messages which arrived too early, but are now due becuase + * the state caught up with them. + */ + private def unstash(nextState: ProtocolState[A]): F[Unit] = + stateRef.get.flatMap { state => + val requeue = for { + dueEvents <- stashRef.modify { + _.unstash(nextState.viewNumber, nextState.phase) + } + _ <- dueEvents.traverse(e => enqueueEvent(validated(e))) + } yield () + + requeue.whenA( + nextState.viewNumber != state.viewNumber || nextState.phase != state.phase + ) + } + + /** Carry out local effects before anything else, + * to eliminate race conditions when a vote sent + * to self would have caused a state transition. + * + * Return the updated state and the effects to be + * carried out asynchornously. + */ + private def applySyncEffects( + state: ProtocolState[A], + effects: Seq[Effect[A]] + ): ProtocolState.Transition[A] = { + @tailrec + def loop( + state: ProtocolState[A], + effectQueue: Queue[Effect[A]], + asyncEffects: List[Effect[A]] + ): ProtocolState.Transition[A] = + effectQueue.dequeueOption match { + case None => + (state, asyncEffects.reverse) + + case (Some((effect, effectQueue))) => + effect match { + case Effect.SendMessage(recipient, message) + if recipient == publicKey => + val event = + Validated(Event.MessageReceived(recipient, message)) + + state.handleMessage(event) match { + case Left(error) => + // This shouldn't happen, but let's just skip this event here and redeliver it later. + loop(state, effectQueue, effect :: asyncEffects) + + case Right((state, effects)) => + loop(state, effectQueue ++ effects, asyncEffects) + } + + case _ => + loop(state, effectQueue, effect :: asyncEffects) + } + } + + loop(state, Queue(effects: _*), Nil) + } + + /** Try to apply a transition: + * - if it's `TooEarly`, add it to the delayed stash + * - if it's another error, ignore the event + * - otherwise carry out the transition + */ + private def handleTransitionAttempt( + transitionAttempt: ProtocolState.TransitionAttempt[A] + ): F[Unit] = transitionAttempt match { + case Left(error @ ProtocolError.TooEarly(_, _, _)) => + // TODO: Trace too early message. + stashRef.update { _.stash(error) } + + case Left(error) => + protocolError(error) + + case Right(transition) => + handleTransition(transition) + } + + /** Effects can be processed independently of each other in the background. */ + private def scheduleEffects(effects: Seq[Effect[A]]): F[Unit] = + effects.toList.traverse(scheduleEffect).void + + /** Start processing an effect in the background. Add the background fiber + * to the scheduled items so they can be canceled if the service is released. + */ + private def scheduleEffect(effect: Effect[A]): F[Unit] = { + fiberSet.submit(processEffect(effect)).void + } + + /** Process a single effect. This will always be wrapped in a Fiber. */ + private def processEffect(effect: Effect[A]): F[Unit] = { + import Event._ + import Effect._ + + // TODO: Trace errors. + effect match { + case ScheduleNextView(viewNumber, timeout) => + val event = validated(NextView(viewNumber)) + Timer[F].sleep(timeout) >> enqueueEvent(event) + + case CreateBlock(viewNumber, highQC) => + // Ask the application to create a block for us. + // TODO (PM-3109): Create block. + ??? + + case SaveBlock(preparedBlock) => + storeRunner.runReadWrite { + blockStorage.put(preparedBlock) + } + + case effect @ ExecuteBlocks(_, commitQC) => + // Each node may be at a different point in the chain, so how + // long the executions take can vary. We could execute it in + // the forground here, but it may cause the node to lose its + // sync with the other federation members, so the execution + // should be offloaded to another queue. + // + // Save the Commit Quorum Certificate to the view state. + saveCommitQC(commitQC) >> + blockExecutionQueue.offer(effect) + + case SendMessage(recipient, message) => + network.sendMessage(recipient, message) + } + } + + /** Update the view state with the last Commit Quorum Certificate. */ + private def saveCommitQC(qc: QuorumCertificate[A]): F[Unit] = { + assert(qc.phase == Phase.Commit) + // TODO (PM-3112): Persist View State. + ??? + } + + /** Execute blocks in order, updating pesistent storage along the way. */ + private def executeBlocks: F[Unit] = { + blockExecutionQueue.poll.flatMap { + case Effect.ExecuteBlocks(lastExecutedBlockHash, commitQC) => + // Retrieve the blocks from the storage from the last executed + // to the one in the Quorum Certificate and tell the application + // to execute them one by one. Update the persistent view state + // after reach execution to remember which blocks we have truly + // done. + + // TODO (PM-3133): Execute block + ??? + } >> executeBlocks + } + + private def validated(event: Event[A]): Validated[Event[A]] = + Validated[Event[A]](event) + + private def validated( + event: Event.MessageReceived[A] + ): Validated[Event.MessageReceived[A]] = + Validated[Event.MessageReceived[A]](event) +} + +object ConsensusService { + + /** Stash to keep too early messages to be re-queued later. + * + * Every slot just has 1 place per federation member to avoid DoS attacks. + */ + case class MessageStash[A <: Agreement]( + slots: Map[(ViewNumber, Phase), Map[A#PKey, Message[A]]] + ) { + def stash(error: ProtocolError.TooEarly[A]): MessageStash[A] = { + val slotKey = (error.expectedInViewNumber, error.expectedInPhase) + val slot = slots.getOrElse(slotKey, Map.empty) + copy(slots = + slots.updated( + slotKey, + slot.updated(error.event.sender, error.event.message) + ) + ) + } + + def unstash( + dueViewNumber: ViewNumber, + duePhase: Phase + ): (MessageStash[A], List[Event.MessageReceived[A]]) = { + val dueKeys = slots.keySet.filter { case (viewNumber, phase) => + viewNumber < dueViewNumber || + viewNumber == dueViewNumber && + !phase.isAfter(duePhase) + } + + val dueEvents = dueKeys.toList.map(slots).flatten.map { + case (sender, message) => Event.MessageReceived(sender, message) + } + + copy(slots = slots -- dueKeys) -> dueEvents + } + } + object MessageStash { + def empty[A <: Agreement] = MessageStash[A](Map.empty) + } + + /** Create a `ConsensusService` instance and start processing events + * in the background, shutting processing down when the resource is + * released. + * + * `initState` is expected to be restored from persistent storage + * instances upon restart. + */ + def apply[F[_]: Timer: Concurrent: ContextShift, N, A <: Agreement: Block]( + publicKey: A#PKey, + network: Network[F, A, Message[A]], + storeRunner: KVStoreRunner[F, N], + blockStorage: BlockStorage[N, A], + blockSyncPipe: BlockSyncPipe[F, A]#Left, + initState: ProtocolState[A], + maxEarlyViewNumberDiff: Int = 1 + ): Resource[F, ConsensusService[F, N, A]] = + // TODO (PM-3187): Add Tracing + for { + fiberSet <- FiberSet[F] + service <- Resource.liftF( + build[F, N, A]( + publicKey, + network, + storeRunner, + blockStorage, + blockSyncPipe, + initState, + maxEarlyViewNumberDiff, + fiberSet + ) + ) + _ <- Concurrent[F].background(service.processNetworkMessages) + _ <- Concurrent[F].background(service.processBlockSyncPipe) + _ <- Concurrent[F].background(service.processEvents) + _ <- Concurrent[F].background(service.executeBlocks) + initEffects = ProtocolState.init(initState) + _ <- Resource.liftF(service.scheduleEffects(initEffects)) + } yield service + + private def build[F[ + _ + ]: Timer: Concurrent: ContextShift, N, A <: Agreement: Block]( + publicKey: A#PKey, + network: Network[F, A, Message[A]], + storeRunner: KVStoreRunner[F, N], + blockStorage: BlockStorage[N, A], + blockSyncPipe: BlockSyncPipe[F, A]#Left, + initState: ProtocolState[A], + maxEarlyViewNumberDiff: Int, + fiberSet: FiberSet[F] + ): F[ConsensusService[F, N, A]] = + for { + stateRef <- Ref[F].of(initState) + stashRef <- Ref[F].of(MessageStash.empty[A]) + fibersRef <- Ref[F].of(Set.empty[Fiber[F, Unit]]) + eventQueue <- ConcurrentQueue[F].unbounded[Event[A]](None) + blockExecutionQueue <- ConcurrentQueue[F] + .unbounded[Effect.ExecuteBlocks[A]](None) + + service = new ConsensusService( + publicKey, + network, + storeRunner, + blockStorage, + stateRef, + stashRef, + blockSyncPipe, + eventQueue, + blockExecutionQueue, + fiberSet, + maxEarlyViewNumberDiff + ) + } yield service +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala new file mode 100644 index 00000000..04ab3fe1 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala @@ -0,0 +1,62 @@ +package io.iohk.metronome.hotstuff.service + +import cats.effect.{Concurrent, ContextShift, Resource, Timer} +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + ProtocolState, + Message, + Block +} +import io.iohk.metronome.hotstuff.service.messages.{ + HotStuffMessage, + SyncMessage +} +import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe +import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.storage.KVStoreRunner + +object HotStuffService { + + /** Start up the HotStuff service stack. */ + def apply[F[_]: Concurrent: ContextShift: Timer, N, A <: Agreement: Block]( + publicKey: A#PKey, + network: Network[F, A, HotStuffMessage[A]], + storeRunner: KVStoreRunner[F, N], + blockStorage: BlockStorage[N, A], + initState: ProtocolState[A] + ): Resource[F, Unit] = + for { + (consensusNetwork, syncNetwork) <- Network + .splitter[F, A, HotStuffMessage[A], Message[A], SyncMessage[A]]( + network + )( + split = { + case HotStuffMessage.ConsensusMessage(message) => Left(message) + case HotStuffMessage.SyncMessage(message) => Right(message) + }, + merge = { + case Left(message) => HotStuffMessage.ConsensusMessage(message) + case Right(message) => HotStuffMessage.SyncMessage(message) + } + ) + + blockSyncPipe <- Resource.liftF { BlockSyncPipe[F, A] } + + consensusService <- ConsensusService( + publicKey, + consensusNetwork, + storeRunner, + blockStorage, + blockSyncPipe.left, + initState + ) + + syncService <- SyncService( + syncNetwork, + storeRunner, + blockStorage, + blockSyncPipe.right, + consensusService.getState + ) + } yield () +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Network.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Network.scala new file mode 100644 index 00000000..9c6f3892 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Network.scala @@ -0,0 +1,73 @@ +package io.iohk.metronome.hotstuff.service + +import cats.effect.{Sync, Resource, Concurrent, ContextShift} +import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.networking.ConnectionHandler.MessageReceived +import monix.tail.Iterant +import monix.catnap.ConcurrentQueue + +/** Network adapter for specialising messages. */ +trait Network[F[_], A <: Agreement, M] { + + /** Receive incoming messages from the network. */ + def incomingMessages: Iterant[F, MessageReceived[A#PKey, M]] + + /** Try sending a message to a federation member, if we are connected. */ + def sendMessage(recipient: A#PKey, message: M): F[Unit] +} + +object Network { + + /** Consume messges from a network and dispatch them either left or right, + * based on a splitter function. Combine messages the other way. + */ + def splitter[F[_]: Concurrent: ContextShift, A <: Agreement, M, L, R]( + network: Network[F, A, M] + )( + split: M => Either[L, R], + merge: Either[L, R] => M + ): Resource[F, (Network[F, A, L], Network[F, A, R])] = + for { + leftQueue <- makeQueue[F, A, L] + rightQueue <- makeQueue[F, A, R] + + _ <- Concurrent[F].background { + network.incomingMessages.mapEval { + case MessageReceived(from, message) => + split(message) match { + case Left(leftMessage) => + leftQueue.offer(MessageReceived(from, leftMessage)) + case Right(rightMessage) => + rightQueue.offer(MessageReceived(from, rightMessage)) + } + }.completedL + } + + leftNetwork = new SplitNetwork[F, A, L]( + leftQueue.poll, + (r, m) => network.sendMessage(r, merge(Left(m))) + ) + + rightNetwork = new SplitNetwork[F, A, R]( + rightQueue.poll, + (r, m) => network.sendMessage(r, merge(Right(m))) + ) + + } yield (leftNetwork, rightNetwork) + + private def makeQueue[F[_]: Concurrent: ContextShift, A <: Agreement, M] = + Resource.liftF { + ConcurrentQueue.unbounded[F, MessageReceived[A#PKey, M]](None) + } + + private class SplitNetwork[F[_]: Sync, A <: Agreement, M]( + poll: F[MessageReceived[A#PKey, M]], + send: (A#PKey, M) => F[Unit] + ) extends Network[F, A, M] { + override def incomingMessages: Iterant[F, MessageReceived[A#PKey, M]] = + Iterant.repeatEvalF(poll) + + def sendMessage(recipient: A#PKey, message: M) = + send(recipient, message) + } +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala new file mode 100644 index 00000000..e5a827f7 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -0,0 +1,155 @@ +package io.iohk.metronome.hotstuff.service + +import cats.implicits._ +import cats.effect.{Sync, Resource, Concurrent} +import io.iohk.metronome.core.fibers.FiberMap +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, ProtocolState} +import io.iohk.metronome.hotstuff.service.messages.SyncMessage +import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe +import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.networking.ConnectionHandler +import io.iohk.metronome.storage.KVStoreRunner +import cats.effect.ContextShift + +/** The `SyncService` handles the `SyncMessage`s coming from the network, + * i.e. serving block and status requests, as well as receive responses + * for outgoing requests for missing dependencies. + * + * It will match up the `requestId`s in the responses and discard any + * unsolicited message. + * + * The block and view synchronisation components will use this service + * to send requests to the network. + */ +class SyncService[F[_]: Sync, N, A <: Agreement]( + network: Network[F, A, SyncMessage[A]], + storeRunner: KVStoreRunner[F, N], + blockStorage: BlockStorage[N, A], + blockSyncPipe: BlockSyncPipe[F, A]#Right, + getState: F[ProtocolState[A]], + fiberMap: FiberMap[F, A#PKey] +) { + + /** Request a block from a peer. + * + * Returns `None` if we're not connected or the request times out. + */ + def getBlock(from: A#PKey, blockHash: A#Hash): F[Option[A#Block]] = ??? + + /** Request the status of a peer. + * + * Returns `None` if we're not connected or the request times out. + */ + def getStatus(from: A#PKey): F[Option[Status[A]]] = ??? + + /** Process incoming network messages */ + private def processNetworkMessages: F[Unit] = { + import SyncMessage._ + // TODO (PM-3186): Rate limiting per source. + network.incomingMessages + .mapEval[Unit] { case ConnectionHandler.MessageReceived(from, message) => + val handler: F[Unit] = + message match { + case GetStatusRequest(requestId) => + getState.flatMap { state => + val status = + Status(state.viewNumber, state.prepareQC, state.commitQC) + + network.sendMessage( + from, + GetStatusResponse(requestId, status) + ) + } + + case GetBlockRequest(requestId, blockHash) => + storeRunner + .runReadOnly { + blockStorage.get(blockHash) + } + .flatMap { + case None => + ().pure[F] + case Some(block) => + network.sendMessage( + from, + GetBlockResponse(requestId, block) + ) + } + + case GetStatusResponse(requestId, status) => + // TODO (PM-3063): Hand over to view synchronisation. + ??? + + case GetBlockResponse(requestId, block) => + // TODO (PM-3134): Hand over to block synchronisation. + ??? + } + + // TODO: Catch and trace errors. + + // Handle on a fiber dedicated to the source. + fiberMap + .submit(from)(handler) + .attemptNarrow[FiberMap.QueueFullException] + .flatMap { + case Right(_) => ().pure[F] + case Left(ex) => ().pure[F] // TODO: Trace submission error. + } + } + .completedL + } + + /** Read Requests from the BlockSyncPipe and send Responses. */ + def processBlockSyncPipe: F[Unit] = { + blockSyncPipe.receive + .mapEval[Unit] { case request @ BlockSyncPipe.Request(sender, prepare) => + // TODO (PM-3134): Block sync. + // TODO (PM-3132, PM-3133): Block validation. + + // We must take care not to insert blocks into storage and risk losing + // the pointer to them in a restart. Maybe keep the unfinished tree + // in memory until we find a parent we do have in storage, then + // insert them in the opposite order, validating against the application side + // as we go along, finally responding to the requestor. + // + // It is enough to respond to the last block positively, it will indicate + // that the whole range can be executed later (at that point from storage). + val isValid: F[Boolean] = ??? + + isValid.flatMap { isValid => + blockSyncPipe.send(BlockSyncPipe.Response(request, isValid)) + } + } + .completedL + } +} + +object SyncService { + + /** Create a `SyncService` instance and start processing messages + * in the background, shutting processing down when the resource is + * released. + */ + def apply[F[_]: Concurrent: ContextShift, N, A <: Agreement]( + network: Network[F, A, SyncMessage[A]], + storeRunner: KVStoreRunner[F, N], + blockStorage: BlockStorage[N, A], + blockSyncPipe: BlockSyncPipe[F, A]#Right, + getState: F[ProtocolState[A]] + ): Resource[F, SyncService[F, N, A]] = + // TODO (PM-3187): Add Tracing + // TODO (PM-3186): Add capacity as part of rate limiting. + for { + fiberMap <- FiberMap[F, A#PKey]() + service = new SyncService( + network, + storeRunner, + blockStorage, + blockSyncPipe, + getState, + fiberMap + ) + _ <- Concurrent[F].background(service.processNetworkMessages) + _ <- Concurrent[F].background(service.processBlockSyncPipe) + } yield service +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala index 3dcc2643..d2fd6312 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala @@ -7,7 +7,7 @@ import io.iohk.metronome.hotstuff.service.Status /** Messages needed to fully realise the HotStuff protocol, * without catering for any application specific concerns. */ -sealed trait SyncMessage[A <: Agreement] { self: RPCMessage => } +sealed trait SyncMessage[+A <: Agreement] { self: RPCMessage => } object SyncMessage extends RPCMessageCompanion { case class GetStatusRequest( @@ -24,7 +24,7 @@ object SyncMessage extends RPCMessageCompanion { case class GetBlockRequest[A <: Agreement]( requestId: RequestId, blockHash: A#Hash - ) extends SyncMessage[Nothing] + ) extends SyncMessage[A] with Request case class GetBlockResponse[A <: Agreement]( diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/BlockSyncPipe.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/BlockSyncPipe.scala new file mode 100644 index 00000000..83e57ad0 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/BlockSyncPipe.scala @@ -0,0 +1,39 @@ +package io.iohk.metronome.hotstuff.service.pipes + +import cats.effect.{Concurrent, ContextShift} +import io.iohk.metronome.core.Pipe +import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.hotstuff.consensus.basic.Message + +object BlockSyncPipe { + + /** Request the synchronization component to download + * any missing dependencies up to the High Q.C., + * perform any application specific validation, + * including the block in the `Prepare` message, + * and persist the blocks up to, but not including + * the block in the `Prepare` message. + * + * This is because the block being prepared is + * subject to further validation and voting, + * while the one in the High Q.C. has gathered + * a quorum from the federation. + */ + case class Request[A <: Agreement]( + sender: A#PKey, + prepare: Message.Prepare[A] + ) + + /** Respond with the outcome of whether the + * block we're being asked to prepare is + * valid, according to the application rules. + */ + case class Response[A <: Agreement]( + request: Request[A], + isValid: Boolean + ) + + def apply[F[_]: Concurrent: ContextShift, A <: Agreement] + : F[BlockSyncPipe[F, A]] = + Pipe[F, BlockSyncPipe.Request[A], BlockSyncPipe.Response[A]] +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/package.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/package.scala new file mode 100644 index 00000000..12f51399 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/package.scala @@ -0,0 +1,11 @@ +package io.iohk.metronome.hotstuff.service + +import io.iohk.metronome.core.Pipe +import io.iohk.metronome.hotstuff.consensus.basic.Agreement + +package object pipes { + + /** Communication pipe with the block synchronization and validation component. */ + type BlockSyncPipe[F[_], A <: Agreement] = + Pipe[F, BlockSyncPipe.Request[A], BlockSyncPipe.Response[A]] +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStoreRunner.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStoreRunner.scala new file mode 100644 index 00000000..7372062b --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStoreRunner.scala @@ -0,0 +1,7 @@ +package io.iohk.metronome.storage + +/** Convenience interface to turn KVStore queries into effects. */ +trait KVStoreRunner[F[_], N] { + def runReadOnly[A](query: KVStoreRead[N, A]): F[A] + def runReadWrite[A](query: KVStore[N, A]): F[A] +} From 83b4622c0054dfee24523fd6328d652f6a7386f8 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 11 May 2021 23:34:22 +0100 Subject: [PATCH 28/48] PM-3146: HotStuff Service Tests (#27) * PM-3146: Test FiberMap. * PM-3146: Test FiberSet. * PM-3146: Test Pipe. * PM-3146: Test Network. * PM-3146: Test MessageStash. * PM-3146: Rename to MessageStashSpec. * PM-3146: No need for Task for MessageStashSpec --- build.sc | 2 + .../src/io/iohk/metronome/core/PipeSpec.scala | 27 +++ .../metronome/core/fibers/FiberMapSpec.scala | 160 ++++++++++++++++++ .../metronome/core/fibers/FiberSetSpec.scala | 83 +++++++++ .../hotstuff/service/MessageStashSpec.scala | 111 ++++++++++++ .../hotstuff/service/NetworkSpec.scala | 107 ++++++++++++ 6 files changed, 490 insertions(+) create mode 100644 metronome/core/test/src/io/iohk/metronome/core/PipeSpec.scala create mode 100644 metronome/core/test/src/io/iohk/metronome/core/fibers/FiberMapSpec.scala create mode 100644 metronome/core/test/src/io/iohk/metronome/core/fibers/FiberSetSpec.scala create mode 100644 metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/MessageStashSpec.scala create mode 100644 metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/NetworkSpec.scala diff --git a/build.sc b/build.sc index bf5dc5a2..5628395e 100644 --- a/build.sc +++ b/build.sc @@ -158,6 +158,8 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ivy"com.chuusai::shapeless:${VersionOf.shapeless}", ivy"io.monix::monix:${VersionOf.monix}" ) + + object test extends TestModule } /** Storage abstractions, e.g. a generic key-value store. */ diff --git a/metronome/core/test/src/io/iohk/metronome/core/PipeSpec.scala b/metronome/core/test/src/io/iohk/metronome/core/PipeSpec.scala new file mode 100644 index 00000000..5f24205e --- /dev/null +++ b/metronome/core/test/src/io/iohk/metronome/core/PipeSpec.scala @@ -0,0 +1,27 @@ +package io.iohk.metronome.core + +import org.scalatest.flatspec.AsyncFlatSpec +import monix.eval.Task +import monix.execution.Scheduler.Implicits.global +import org.scalatest.matchers.should.Matchers + +class PipeSpec extends AsyncFlatSpec with Matchers { + + behavior of "Pipe" + + it should "send messages between the sides" in { + val test = for { + pipe <- Pipe[Task, String, Int] + _ <- pipe.left.send("foo") + _ <- pipe.left.send("bar") + _ <- pipe.right.send(1) + rs <- pipe.right.receive.take(2).toListL + ls <- pipe.left.receive.headOptionL + } yield { + rs shouldBe List("foo", "bar") + ls shouldBe Some(1) + } + + test.runToFuture + } +} diff --git a/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberMapSpec.scala b/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberMapSpec.scala new file mode 100644 index 00000000..575e2eab --- /dev/null +++ b/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberMapSpec.scala @@ -0,0 +1,160 @@ +package io.iohk.metronome.core.fibers + +import cats.effect.concurrent.Ref +import monix.eval.Task +import monix.execution.atomic.AtomicInt +import monix.execution.Scheduler.Implicits.global +import org.scalatest.{Inspectors, Inside} +import org.scalatest.compatible.Assertion +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers +import scala.util.Random +import scala.concurrent.duration._ +import monix.execution.BufferCapacity + +class FiberMapSpec extends AsyncFlatSpec with Matchers with Inside { + + def test(t: Task[Assertion]) = + t.timeout(10.seconds).runToFuture + + def testMap(f: FiberMap[Task, String] => Task[Assertion]) = test { + FiberMap[Task, String]().use(f) + } + + behavior of "FiberMap" + + it should "process tasks in the order they are submitted" in testMap { + fiberMap => + val stateRef = Ref.unsafe[Task, Map[String, Vector[Int]]](Map.empty) + + val keys = List("a", "b", "c") + + val valueMap = keys.map { + _ -> Random.shuffle(Range(0, 10).toVector) + }.toMap + + val tasks = for { + k <- keys + v <- valueMap(k) + } yield (k, v) + + def append(k: String, v: Int): Task[Unit] = + stateRef.update { state => + state.updated(k, state.getOrElse(k, Vector.empty) :+ v) + } + + for { + handles <- Task.traverse(tasks) { case (k, v) => + // This is a version that wouldn't preserve the order: + // append(k, v).start.map(_.join) + fiberMap.submit(k)(append(k, v)) + } + _ <- Task.parTraverse(handles)(identity) + state <- stateRef.get + } yield { + Inspectors.forAll(keys) { k => + state(k) shouldBe valueMap(k) + } + } + } + + it should "process tasks concurrently across keys" in testMap { fiberMap => + val running = AtomicInt(0) + val maxRunning = AtomicInt(0) + + val keys = List("a", "b") + val tasks = List.fill(10)(keys).flatten + + for { + handles <- Task.traverse(tasks) { k => + val task = for { + r <- Task(running.incrementAndGet()) + _ <- Task(maxRunning.getAndTransform(m => math.max(m, r))) + _ <- Task.sleep(20.millis) // Increase chance for overlap. + _ <- Task(running.decrement()) + } yield () + + fiberMap.submit(k)(task) + } + _ <- Task.parTraverse(handles)(identity) + } yield { + running.get() shouldBe 0 + maxRunning.get() shouldBe keys.size + } + } + + it should "return a value we can wait on" in testMap { fiberMap => + for { + task <- fiberMap.submit("foo")(Task("spam")) + value <- task + } yield { + value shouldBe "spam" + } + } + + it should "reject new submissions after shutdown" in test { + FiberMap[Task, String]().allocated.flatMap { case (fiberMap, release) => + for { + _ <- fiberMap.submit("foo")(Task("alpha")) + _ <- release + r <- fiberMap.submit("foo")(Task(2)).attempt + } yield { + inside(r) { case Left(ex) => + ex shouldBe a[IllegalStateException] + ex.getMessage should include("shut down") + } + } + } + } + + it should "reject new submissions for keys that hit their capacity limit" in test { + FiberMap[Task, String](BufferCapacity.Bounded(capacity = 1)).use { + fiberMap => + def trySubmit(k: String) = + fiberMap.submit(k)(Task.never).attempt + + for { + _ <- trySubmit("foo") + _ <- trySubmit("foo") + r3 <- trySubmit("foo") + r4 <- trySubmit("bar") + } yield { + inside(r3) { case Left(ex) => + ex shouldBe a[FiberMap.QueueFullException] + } + r4.isRight shouldBe true + } + } + } + + it should "cancel and raise errors in already submitted tasks after shutdown" in test { + FiberMap[Task, String]().allocated.flatMap { case (fiberMap, release) => + for { + r <- fiberMap.submit("foo")(Task.never) + _ <- release + r <- r.attempt + } yield { + inside(r) { case Left(ex) => + ex shouldBe a[RuntimeException] + ex.getMessage should include("shut down") + } + } + } + } + + it should "keep processing even if a task fails" in testMap { fiberMap => + for { + t1 <- fiberMap.submit("foo")( + Task.raiseError(new RuntimeException("Boom!")) + ) + t2 <- fiberMap.submit("foo")(Task(2)) + r1 <- t1.attempt + r2 <- t2 + } yield { + inside(r1) { case Left(ex) => + ex.getMessage shouldBe "Boom!" + } + r2 shouldBe 2 + } + } +} diff --git a/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberSetSpec.scala b/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberSetSpec.scala new file mode 100644 index 00000000..422b85c9 --- /dev/null +++ b/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberSetSpec.scala @@ -0,0 +1,83 @@ +package io.iohk.metronome.core.fibers + +import monix.eval.Task +import monix.execution.Scheduler.Implicits.global +import monix.execution.atomic.AtomicInt +import org.scalatest.compatible.Assertion +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatest.Inside +import scala.concurrent.duration._ + +class FiberSetSpec extends AsyncFlatSpec with Matchers with Inside { + + def test(t: Task[Assertion]) = + t.timeout(10.seconds).runToFuture + + behavior of "FiberSet" + + it should "reject new submissions after shutdown" in test { + FiberSet[Task].allocated.flatMap { case (fiberSet, release) => + for { + _ <- fiberSet.submit(Task("foo")) + _ <- release + r <- fiberSet.submit(Task("bar")).attempt + } yield { + inside(r) { case Left(ex) => + ex shouldBe a[IllegalStateException] + ex.getMessage should include("shut down") + } + } + } + } + + it should "cancel and raise errors in already submitted tasks after shutdown" in test { + FiberSet[Task].allocated.flatMap { case (fiberSet, release) => + for { + r <- fiberSet.submit(Task.never) + _ <- release + r <- r.attempt + } yield { + inside(r) { case Left(ex) => + ex shouldBe a[RuntimeException] + ex.getMessage should include("shut down") + } + } + } + } + + it should "return a value we can wait on" in test { + FiberSet[Task].use { fiberSet => + for { + task <- fiberSet.submit(Task("spam")) + value <- task + } yield { + value shouldBe "spam" + } + } + } + + it should "process tasks concurrently" in test { + FiberSet[Task].use { fiberSet => + val running = AtomicInt(0) + val maxRunning = AtomicInt(0) + + for { + handles <- Task.traverse(1 to 10) { _ => + val task = for { + r <- Task(running.incrementAndGet()) + _ <- Task(maxRunning.getAndTransform(m => math.max(m, r))) + _ <- Task.sleep(20.millis) // Increase chance for overlap. + _ <- Task(running.decrement()) + } yield () + + fiberSet.submit(task) + } + _ <- Task.parTraverse(handles)(identity) + } yield { + running.get() shouldBe 0 + maxRunning.get() should be > 1 + } + } + } +} diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/MessageStashSpec.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/MessageStashSpec.scala new file mode 100644 index 00000000..1ef30214 --- /dev/null +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/MessageStashSpec.scala @@ -0,0 +1,111 @@ +package io.iohk.metronome.hotstuff.service + +import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import io.iohk.metronome.hotstuff.consensus.basic.{ + ProtocolError, + Event, + Message, + Phase, + QuorumCertificate +} +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.crypto.GroupSignature + +class MessageStashSpec extends AnyFlatSpec with Matchers { + import ConsensusService.MessageStash + + object TestAgreement extends Agreement { + override type Block = Nothing + override type Hash = Int + override type PSig = Nothing + override type GSig = Int + override type PKey = String + override type SKey = Nothing + } + type TestAgreement = TestAgreement.type + + "MessageStash" should behave like { + + val emptyStash = MessageStash.empty[TestAgreement] + + val error = ProtocolError.TooEarly[TestAgreement]( + Event.MessageReceived[TestAgreement]( + "Alice", + Message.NewView( + ViewNumber(10), + QuorumCertificate[TestAgreement]( + Phase.Prepare, + ViewNumber(9), + 123, + GroupSignature(456) + ) + ) + ), + expectedInViewNumber = ViewNumber(11), + expectedInPhase = Phase.Prepare + ) + val errorSlotKey = (error.expectedInViewNumber, error.expectedInPhase) + + it should "stash errors" in { + emptyStash.slots shouldBe empty + + val stash = emptyStash.stash(error) + + stash.slots should contain key errorSlotKey + stash.slots(errorSlotKey) should contain key error.event.sender + stash.slots(errorSlotKey)(error.event.sender) shouldBe error.event.message + } + + it should "stash only the last message from a sender" in { + val error2 = error.copy(event = + error.event.copy(message = + Message.NewView( + ViewNumber(10), + QuorumCertificate[TestAgreement]( + Phase.Prepare, + ViewNumber(8), + 122, + GroupSignature(455) + ) + ) + ) + ) + val stash = emptyStash.stash(error).stash(error2) + + stash.slots(errorSlotKey)( + error.event.sender + ) shouldBe error2.event.message + } + + it should "unstash due errors" in { + val errors = List( + error, + error.copy( + expectedInPhase = Phase.PreCommit + ), + error.copy( + expectedInViewNumber = error.expectedInViewNumber.next + ), + error.copy( + expectedInViewNumber = error.expectedInViewNumber.next, + expectedInPhase = Phase.Commit + ), + error.copy( + expectedInViewNumber = error.expectedInViewNumber.next.next + ) + ) + + val stash0 = errors.foldLeft(emptyStash)(_ stash _) + + val (stash1, unstashed) = stash0.unstash( + errors(2).expectedInViewNumber, + errors(2).expectedInPhase + ) + + stash1.slots.keySet should have size 2 + unstashed should have size 3 + } + } +} diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/NetworkSpec.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/NetworkSpec.scala new file mode 100644 index 00000000..82145acc --- /dev/null +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/NetworkSpec.scala @@ -0,0 +1,107 @@ +package io.iohk.metronome.hotstuff.service + +import cats.effect.Resource +import cats.effect.concurrent.Ref +import monix.eval.Task +import monix.execution.Scheduler.Implicits.global +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers +import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.networking.ConnectionHandler.MessageReceived +import monix.tail.Iterant + +class NetworkSpec extends AsyncFlatSpec with Matchers { + + sealed trait TestMessage + case class TestFoo(foo: String) extends TestMessage + case class TestBar(bar: Int) extends TestMessage + + object TestAgreement extends Agreement { + override type Block = Nothing + override type Hash = Nothing + override type PSig = Nothing + override type GSig = Nothing + override type PKey = String + override type SKey = Nothing + } + type TestAgreement = TestAgreement.type + + type TestKeyAndMessage = (TestAgreement.PKey, TestMessage) + type TestMessageReceived = MessageReceived[TestAgreement.PKey, TestMessage] + + class TestNetwork( + outbox: Vector[TestKeyAndMessage], + val inbox: Ref[Task, Vector[ + MessageReceived[TestAgreement.PKey, TestMessage] + ]] + ) extends Network[Task, TestAgreement, TestMessage] { + + override def incomingMessages: Iterant[Task, TestMessageReceived] = + Iterant.fromIndexedSeq { + outbox.map { case (sender, message) => + MessageReceived(sender, message) + } + } + + override def sendMessage( + recipient: TestAgreement.PKey, + message: TestMessage + ): Task[Unit] = + inbox.update(_ :+ MessageReceived(recipient, message)) + } + + object TestNetwork { + def apply(outbox: Vector[TestKeyAndMessage]) = + Ref + .of[Task, Vector[TestMessageReceived]](Vector.empty) + .map(new TestNetwork(outbox, _)) + } + + behavior of "splitter" + + it should "split and merge messages" in { + val messages = Vector( + "Alice" -> TestFoo("spam"), + "Bob" -> TestBar(42), + "Charlie" -> TestFoo("eggs") + ) + val resources = for { + network <- Resource.liftF(TestNetwork(messages)) + (fooNetwork, barNetwork) <- Network + .splitter[Task, TestAgreement, TestMessage, String, Int](network)( + split = { + case TestFoo(msg) => Left(msg) + case TestBar(msg) => Right(msg) + }, + merge = { + case Left(msg) => TestFoo(msg) + case Right(msg) => TestBar(msg) + } + ) + } yield (network, fooNetwork, barNetwork) + + val test = resources.use { case (network, fooNetwork, barNetwork) => + for { + fms <- fooNetwork.incomingMessages.take(2).toListL + bms <- barNetwork.incomingMessages.take(1).toListL + _ <- barNetwork.sendMessage("Dave", 123) + _ <- fooNetwork.sendMessage("Eve", "Adam") + _ <- barNetwork.sendMessage("Fred", 456) + nms <- network.inbox.get + } yield { + fms shouldBe List( + MessageReceived("Alice", "spam"), + MessageReceived("Charlie", "eggs") + ) + bms shouldBe List(MessageReceived("Bob", 42)) + nms shouldBe List( + MessageReceived("Dave", TestBar(123)), + MessageReceived("Eve", TestFoo("Adam")), + MessageReceived("Fred", TestBar(456)) + ) + } + } + + test.runToFuture + } +} From 382ead3bfe7c96f96a040f38553db584c6f11645 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 11 May 2021 23:36:09 +0100 Subject: [PATCH 29/48] PM-3187: HotStuff Service Tracing (#28) * PM-3187: Thread tracing through implicit parameters. * PM-3187: Emit traces from ConsensusService. * PM-3187: SyncService traces. --- .../hotstuff/service/ConsensusService.scala | 57 +++++---- .../hotstuff/service/HotStuffService.scala | 11 +- .../hotstuff/service/SyncService.scala | 113 ++++++++++-------- .../service/tracing/ConsensusEvent.scala | 47 ++++++++ .../service/tracing/ConsensusTracers.scala | 40 +++++++ .../hotstuff/service/tracing/SyncEvent.scala | 14 +++ .../service/tracing/SyncTracers.scala | 22 ++++ 7 files changed, 228 insertions(+), 76 deletions(-) create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala index e3a651c1..8b8ee38f 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala @@ -19,11 +19,13 @@ import io.iohk.metronome.hotstuff.consensus.basic.{ } import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.hotstuff.service.tracing.ConsensusTracers import io.iohk.metronome.networking.ConnectionHandler import io.iohk.metronome.storage.KVStoreRunner import monix.catnap.ConcurrentQueue import scala.annotation.tailrec import scala.collection.immutable.Queue +import scala.util.control.NonFatal /** An effectful executor wrapping the pure HotStuff ProtocolState. * @@ -32,7 +34,6 @@ import scala.collection.immutable.Queue class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( publicKey: A#PKey, network: Network[F, A, Message[A]], - storeRunner: KVStoreRunner[F, N], blockStorage: BlockStorage[N, A], stateRef: Ref[F, ProtocolState[A]], stashRef: Ref[F, ConsensusService.MessageStash[A]], @@ -41,7 +42,7 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( blockExecutionQueue: ConcurrentQueue[F, Effect.ExecuteBlocks[A]], fiberSet: FiberSet[F], maxEarlyViewNumberDiff: Int -) { +)(implicit tracers: ConsensusTracers[F, A], storeRunner: KVStoreRunner[F, N]) { /** Get the current protocol state, perhaps to respond to status requests. */ def getState: F[ProtocolState[A]] = @@ -83,15 +84,13 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( .as(none) case Right(valid) if valid.message.viewNumber < state.viewNumber => - // TODO: Trace that obsolete message was received. - // TODO: Also collect these for the round so we can realise if we're out of sync. - none.pure[F] + // TODO (PM-3063): Also collect these for the round so we can realise if we're out of sync. + tracers.fromPast(valid).as(none) case Right(valid) if valid.message.viewNumber > state.viewNumber + maxEarlyViewNumberDiff => - // TODO: Trace that a message from view far ahead in the future was received. - // TODO: Also collect these for the round so we can realise if we're out of sync. - none.pure[F] + // TODO (PM-3063): Also collect these for the round so we can realise if we're out of sync. + tracers.fromFuture(valid).as(none) case Right(valid) => // We know that the message is to/from the leader and it's properly signed, @@ -140,12 +139,11 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( } } - /** Report an invalid message. */ + /** Trace an invalid message. Could include other penalties as well to the sender. */ private def protocolError( error: ProtocolError[A] ): F[Unit] = - // TODO: Trace - ().pure[F] + tracers.rejected(error) /** Add a Prepare message to the synchronisation and validation queue. * @@ -193,9 +191,14 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( eventQueue.poll.flatMap { event => stateRef.get.flatMap { state => val handle: F[Unit] = event match { - case e @ Event.NextView(_) => + case e @ Event.NextView(viewNumber) + if viewNumber < state.viewNumber => + ().pure[F] + + case e @ Event.NextView(viewNumber) => // TODO (PM-3063): Check whether we have timed out because we are out of sync - handleTransition(state.handleNextView(e)) + tracers.timeout(viewNumber) >> + handleTransition(state.handleNextView(e)) case e @ Event.MessageReceived(_, _) => handleTransitionAttempt( @@ -245,7 +248,10 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( requeue.whenA( nextState.viewNumber != state.viewNumber || nextState.phase != state.phase - ) + ) >> + tracers + .newView(nextState.viewNumber) + .whenA(nextState.viewNumber != state.viewNumber) } /** Carry out local effects before anything else, @@ -302,8 +308,8 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( transitionAttempt: ProtocolState.TransitionAttempt[A] ): F[Unit] = transitionAttempt match { case Left(error @ ProtocolError.TooEarly(_, _, _)) => - // TODO: Trace too early message. - stashRef.update { _.stash(error) } + tracers.stashed(error) >> + stashRef.update { _.stash(error) } case Left(error) => protocolError(error) @@ -328,8 +334,7 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( import Event._ import Effect._ - // TODO: Trace errors. - effect match { + val process = effect match { case ScheduleNextView(viewNumber, timeout) => val event = validated(NextView(viewNumber)) Timer[F].sleep(timeout) >> enqueueEvent(event) @@ -358,11 +363,16 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( case SendMessage(recipient, message) => network.sendMessage(recipient, message) } + + process.handleErrorWith { case NonFatal(ex) => + tracers.error(ex) + } } /** Update the view state with the last Commit Quorum Certificate. */ private def saveCommitQC(qc: QuorumCertificate[A]): F[Unit] = { assert(qc.phase == Phase.Commit) + tracers.quorum(qc) // TODO (PM-3112): Persist View State. ??? } @@ -442,20 +452,20 @@ object ConsensusService { def apply[F[_]: Timer: Concurrent: ContextShift, N, A <: Agreement: Block]( publicKey: A#PKey, network: Network[F, A, Message[A]], - storeRunner: KVStoreRunner[F, N], blockStorage: BlockStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Left, initState: ProtocolState[A], maxEarlyViewNumberDiff: Int = 1 + )(implicit + tracers: ConsensusTracers[F, A], + storeRunner: KVStoreRunner[F, N] ): Resource[F, ConsensusService[F, N, A]] = - // TODO (PM-3187): Add Tracing for { fiberSet <- FiberSet[F] service <- Resource.liftF( build[F, N, A]( publicKey, network, - storeRunner, blockStorage, blockSyncPipe, initState, @@ -476,12 +486,14 @@ object ConsensusService { ]: Timer: Concurrent: ContextShift, N, A <: Agreement: Block]( publicKey: A#PKey, network: Network[F, A, Message[A]], - storeRunner: KVStoreRunner[F, N], blockStorage: BlockStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Left, initState: ProtocolState[A], maxEarlyViewNumberDiff: Int, fiberSet: FiberSet[F] + )(implicit + tracers: ConsensusTracers[F, A], + storeRunner: KVStoreRunner[F, N] ): F[ConsensusService[F, N, A]] = for { stateRef <- Ref[F].of(initState) @@ -494,7 +506,6 @@ object ConsensusService { service = new ConsensusService( publicKey, network, - storeRunner, blockStorage, stateRef, stashRef, diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala index 04ab3fe1..27f7be69 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala @@ -13,6 +13,10 @@ import io.iohk.metronome.hotstuff.service.messages.{ } import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.hotstuff.service.tracing.{ + ConsensusTracers, + SyncTracers +} import io.iohk.metronome.storage.KVStoreRunner object HotStuffService { @@ -21,9 +25,12 @@ object HotStuffService { def apply[F[_]: Concurrent: ContextShift: Timer, N, A <: Agreement: Block]( publicKey: A#PKey, network: Network[F, A, HotStuffMessage[A]], - storeRunner: KVStoreRunner[F, N], blockStorage: BlockStorage[N, A], initState: ProtocolState[A] + )(implicit + consensusTracers: ConsensusTracers[F, A], + syncTracers: SyncTracers[F, A], + storeRunner: KVStoreRunner[F, N] ): Resource[F, Unit] = for { (consensusNetwork, syncNetwork) <- Network @@ -45,7 +52,6 @@ object HotStuffService { consensusService <- ConsensusService( publicKey, consensusNetwork, - storeRunner, blockStorage, blockSyncPipe.left, initState @@ -53,7 +59,6 @@ object HotStuffService { syncService <- SyncService( syncNetwork, - storeRunner, blockStorage, blockSyncPipe.right, consensusService.getState diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala index e5a827f7..7cc8ab9d 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -1,15 +1,16 @@ package io.iohk.metronome.hotstuff.service import cats.implicits._ -import cats.effect.{Sync, Resource, Concurrent} +import cats.effect.{Sync, Resource, Concurrent, ContextShift} import io.iohk.metronome.core.fibers.FiberMap import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, ProtocolState} import io.iohk.metronome.hotstuff.service.messages.SyncMessage import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.hotstuff.service.tracing.SyncTracers import io.iohk.metronome.networking.ConnectionHandler import io.iohk.metronome.storage.KVStoreRunner -import cats.effect.ContextShift +import scala.util.control.NonFatal /** The `SyncService` handles the `SyncMessage`s coming from the network, * i.e. serving block and status requests, as well as receive responses @@ -23,12 +24,11 @@ import cats.effect.ContextShift */ class SyncService[F[_]: Sync, N, A <: Agreement]( network: Network[F, A, SyncMessage[A]], - storeRunner: KVStoreRunner[F, N], blockStorage: BlockStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Right, getState: F[ProtocolState[A]], fiberMap: FiberMap[F, A#PKey] -) { +)(implicit tracers: SyncTracers[F, A], storeRunner: KVStoreRunner[F, N]) { /** Request a block from a peer. * @@ -42,63 +42,76 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( */ def getStatus(from: A#PKey): F[Option[Status[A]]] = ??? - /** Process incoming network messages */ + /** Process incoming network messages. */ private def processNetworkMessages: F[Unit] = { - import SyncMessage._ // TODO (PM-3186): Rate limiting per source. network.incomingMessages .mapEval[Unit] { case ConnectionHandler.MessageReceived(from, message) => - val handler: F[Unit] = - message match { - case GetStatusRequest(requestId) => - getState.flatMap { state => - val status = - Status(state.viewNumber, state.prepareQC, state.commitQC) - - network.sendMessage( - from, - GetStatusResponse(requestId, status) - ) - } - - case GetBlockRequest(requestId, blockHash) => - storeRunner - .runReadOnly { - blockStorage.get(blockHash) - } - .flatMap { - case None => - ().pure[F] - case Some(block) => - network.sendMessage( - from, - GetBlockResponse(requestId, block) - ) - } - - case GetStatusResponse(requestId, status) => - // TODO (PM-3063): Hand over to view synchronisation. - ??? - - case GetBlockResponse(requestId, block) => - // TODO (PM-3134): Hand over to block synchronisation. - ??? - } - - // TODO: Catch and trace errors. - // Handle on a fiber dedicated to the source. fiberMap - .submit(from)(handler) + .submit(from) { + processNetworkMessage(from, message) + } .attemptNarrow[FiberMap.QueueFullException] .flatMap { case Right(_) => ().pure[F] - case Left(ex) => ().pure[F] // TODO: Trace submission error. + case Left(_) => tracers.queueFull(from) } } .completedL } + /** Process one incoming network message. + * + * It's going to be executed on a fiber. + */ + private def processNetworkMessage( + from: A#PKey, + message: SyncMessage[A] + ): F[Unit] = { + import SyncMessage._ + + val process = message match { + case GetStatusRequest(requestId) => + getState.flatMap { state => + val status = + Status(state.viewNumber, state.prepareQC, state.commitQC) + + network.sendMessage( + from, + GetStatusResponse(requestId, status) + ) + } + + case GetBlockRequest(requestId, blockHash) => + storeRunner + .runReadOnly { + blockStorage.get(blockHash) + } + .flatMap { + case None => + ().pure[F] + case Some(block) => + network.sendMessage( + from, + GetBlockResponse(requestId, block) + ) + } + + case GetStatusResponse(requestId, status) => + // TODO (PM-3063): Hand over to view synchronisation. + ??? + + case GetBlockResponse(requestId, block) => + // TODO (PM-3134): Hand over to block synchronisation. + ??? + } + + process.handleErrorWith { case NonFatal(ex) => + tracers.error(ex) + } + } + /** Read Requests from the BlockSyncPipe and send Responses. */ def processBlockSyncPipe: F[Unit] = { blockSyncPipe.receive @@ -132,18 +145,18 @@ object SyncService { */ def apply[F[_]: Concurrent: ContextShift, N, A <: Agreement]( network: Network[F, A, SyncMessage[A]], - storeRunner: KVStoreRunner[F, N], blockStorage: BlockStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Right, getState: F[ProtocolState[A]] + )(implicit + tracers: SyncTracers[F, A], + storeRunner: KVStoreRunner[F, N] ): Resource[F, SyncService[F, N, A]] = - // TODO (PM-3187): Add Tracing // TODO (PM-3186): Add capacity as part of rate limiting. for { fiberMap <- FiberMap[F, A#PKey]() service = new SyncService( network, - storeRunner, blockStorage, blockSyncPipe, getState, diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala new file mode 100644 index 00000000..ceccc78c --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala @@ -0,0 +1,47 @@ +package io.iohk.metronome.hotstuff.service.tracing + +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + Event, + ProtocolError +} +import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate + +sealed trait ConsensusEvent[+A <: Agreement] + +object ConsensusEvent { + + /** The round ended without having reached decision. */ + case class Timeout(viewNumber: ViewNumber) extends ConsensusEvent[Nothing] + + /** The state advanced to a new view. */ + case class NewView(viewNumber: ViewNumber) extends ConsensusEvent[Nothing] + + /** Quorum over some block. */ + case class Quorum[A <: Agreement](quorumCertificate: QuorumCertificate[A]) + extends ConsensusEvent[A] + + /** A formally valid message was received from an earlier view number. */ + case class FromPast[A <: Agreement](message: Event.MessageReceived[A]) + extends ConsensusEvent[A] + + /** A formally valid message was received from a future view number. */ + case class FromFuture[A <: Agreement](message: Event.MessageReceived[A]) + extends ConsensusEvent[A] + + /** An event that arrived too early but got stashed and will be redelivered. */ + case class Stashed[A <: Agreement]( + error: ProtocolError.TooEarly[A] + ) extends ConsensusEvent[A] + + /** A rejected event. */ + case class Rejected[A <: Agreement]( + error: ProtocolError[A] + ) extends ConsensusEvent[A] + + /** An unexpected error in one of the background tasks. */ + case class Error( + error: Throwable + ) extends ConsensusEvent[Nothing] +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala new file mode 100644 index 00000000..dee0e679 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala @@ -0,0 +1,40 @@ +package io.iohk.metronome.hotstuff.service.tracing + +import cats.implicits._ +import io.iohk.metronome.tracer.Tracer +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + Event, + ProtocolError, + QuorumCertificate +} + +case class ConsensusTracers[F[_], A <: Agreement]( + timeout: Tracer[F, ViewNumber], + newView: Tracer[F, ViewNumber], + quorum: Tracer[F, QuorumCertificate[A]], + fromPast: Tracer[F, Event.MessageReceived[A]], + fromFuture: Tracer[F, Event.MessageReceived[A]], + stashed: Tracer[F, ProtocolError.TooEarly[A]], + rejected: Tracer[F, ProtocolError[A]], + error: Tracer[F, Throwable] +) + +object ConsensusTracers { + import ConsensusEvent._ + + def apply[F[_], A <: Agreement]( + tracer: Tracer[F, ConsensusEvent[A]] + ): ConsensusTracers[F, A] = + ConsensusTracers[F, A]( + timeout = tracer.contramap[ViewNumber](Timeout(_)), + newView = tracer.contramap[ViewNumber](NewView(_)), + quorum = tracer.contramap[QuorumCertificate[A]](Quorum(_)), + fromPast = tracer.contramap[Event.MessageReceived[A]](FromPast(_)), + fromFuture = tracer.contramap[Event.MessageReceived[A]](FromFuture(_)), + stashed = tracer.contramap[ProtocolError.TooEarly[A]](Stashed(_)), + rejected = tracer.contramap[ProtocolError[A]](Rejected(_)), + error = tracer.contramap[Throwable](Error(_)) + ) +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala new file mode 100644 index 00000000..b67195a3 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala @@ -0,0 +1,14 @@ +package io.iohk.metronome.hotstuff.service.tracing + +import io.iohk.metronome.hotstuff.consensus.basic.Agreement + +sealed trait SyncEvent[+A <: Agreement] + +object SyncEvent { + + /** A federation member is sending us so many requests that its work queue is full. */ + case class QueueFull[A <: Agreement](publicKey: A#PKey) extends SyncEvent[A] + + /** An unexpected error in one of the background tasks. */ + case class Error(error: Throwable) extends SyncEvent[Nothing] +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala new file mode 100644 index 00000000..a4ce6590 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala @@ -0,0 +1,22 @@ +package io.iohk.metronome.hotstuff.service.tracing + +import cats.implicits._ +import io.iohk.metronome.tracer.Tracer +import io.iohk.metronome.hotstuff.consensus.basic.Agreement + +case class SyncTracers[F[_], A <: Agreement]( + queueFull: Tracer[F, A#PKey], + error: Tracer[F, Throwable] +) + +object SyncTracers { + import SyncEvent._ + + def apply[F[_], A <: Agreement]( + tracer: Tracer[F, SyncEvent[A]] + ): SyncTracers[F, A] = + SyncTracers[F, A]( + queueFull = tracer.contramap[A#PKey](QueueFull(_)), + error = tracer.contramap[Throwable](Error(_)) + ) +} From fc3ecb5d0b44d32c62a791d87173291b0a206e81 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Fri, 14 May 2021 09:01:16 +0100 Subject: [PATCH 30/48] PM-3112: ViewState persistence (#31) * PM-3112: Initializing a ViewStateStorage. * PM-3112: Separate Encoder and Decoder requriements in KVStore. * PM-3112: Set view state components, get bundle. * PM-3112: Testing the ViewStateStorage. * PM-3112: Update view state upon change of state. * PM-3112: Remove obsolete comment. * PM-3112: Use Keys.code for smaller keys. --- .../hotstuff/service/ConsensusService.scala | 59 ++++-- .../hotstuff/service/HotStuffService.scala | 7 +- .../service/storage/ViewStateStorage.scala | 147 +++++++++++++ .../storage/ViewStateStorageProps.scala | 193 ++++++++++++++++++ .../iohk/metronome/rocksdb/RocksDBStore.scala | 16 +- .../io/iohk/metronome/storage/KVStore.scala | 15 +- .../io/iohk/metronome/storage/KVStoreOp.scala | 15 +- .../iohk/metronome/storage/KVStoreRead.scala | 4 +- 8 files changed, 414 insertions(+), 42 deletions(-) create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala create mode 100644 metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala index 8b8ee38f..4ac5b610 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala @@ -18,7 +18,10 @@ import io.iohk.metronome.hotstuff.consensus.basic.{ QuorumCertificate } import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe -import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.hotstuff.service.storage.{ + BlockStorage, + ViewStateStorage +} import io.iohk.metronome.hotstuff.service.tracing.ConsensusTracers import io.iohk.metronome.networking.ConnectionHandler import io.iohk.metronome.storage.KVStoreRunner @@ -35,6 +38,7 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( publicKey: A#PKey, network: Network[F, A, Message[A]], blockStorage: BlockStorage[N, A], + viewStateStorage: ViewStateStorage[N, A], stateRef: Ref[F, ProtocolState[A]], stashRef: Ref[F, ConsensusService.MessageStash[A]], blockSyncPipe: BlockSyncPipe[F, A]#Left, @@ -229,11 +233,40 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( applySyncEffects(state, effects) // Unstash messages before we change state. - unstash(nextState) >> + captureChanges(nextState) >> + unstash(nextState) >> stateRef.set(nextState) >> scheduleEffects(nextEffects) } + /** Update the view state with and trace changes when they happen. */ + private def captureChanges(nextState: ProtocolState[A]): F[Unit] = { + stateRef.get.flatMap { state => + def ifChanged[T](get: ProtocolState[A] => T)(f: T => F[Unit]) = { + val prev = get(state) + val next = get(nextState) + f(next).whenA(prev != next) + } + + ifChanged(_.viewNumber)(updateViewNumber) >> + ifChanged(_.prepareQC)(updateQuorum) >> + ifChanged(_.lockedQC)(updateQuorum) >> + ifChanged(_.commitQC)(updateQuorum) + } + } + + private def updateViewNumber(viewNumber: ViewNumber): F[Unit] = + tracers.newView(viewNumber) >> + storeRunner.runReadWrite { + viewStateStorage.setViewNumber(viewNumber) + } + + private def updateQuorum(quorumCertificate: QuorumCertificate[A]): F[Unit] = + tracers.quorum(quorumCertificate) >> + storeRunner.runReadWrite { + viewStateStorage.setQuorumCertificate(quorumCertificate) + } + /** Requeue messages which arrived too early, but are now due becuase * the state caught up with them. */ @@ -248,10 +281,7 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( requeue.whenA( nextState.viewNumber != state.viewNumber || nextState.phase != state.phase - ) >> - tracers - .newView(nextState.viewNumber) - .whenA(nextState.viewNumber != state.viewNumber) + ) } /** Carry out local effects before anything else, @@ -355,10 +385,7 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( // the forground here, but it may cause the node to lose its // sync with the other federation members, so the execution // should be offloaded to another queue. - // - // Save the Commit Quorum Certificate to the view state. - saveCommitQC(commitQC) >> - blockExecutionQueue.offer(effect) + blockExecutionQueue.offer(effect) case SendMessage(recipient, message) => network.sendMessage(recipient, message) @@ -369,14 +396,6 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( } } - /** Update the view state with the last Commit Quorum Certificate. */ - private def saveCommitQC(qc: QuorumCertificate[A]): F[Unit] = { - assert(qc.phase == Phase.Commit) - tracers.quorum(qc) - // TODO (PM-3112): Persist View State. - ??? - } - /** Execute blocks in order, updating pesistent storage along the way. */ private def executeBlocks: F[Unit] = { blockExecutionQueue.poll.flatMap { @@ -453,6 +472,7 @@ object ConsensusService { publicKey: A#PKey, network: Network[F, A, Message[A]], blockStorage: BlockStorage[N, A], + viewStateStorage: ViewStateStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Left, initState: ProtocolState[A], maxEarlyViewNumberDiff: Int = 1 @@ -467,6 +487,7 @@ object ConsensusService { publicKey, network, blockStorage, + viewStateStorage, blockSyncPipe, initState, maxEarlyViewNumberDiff, @@ -487,6 +508,7 @@ object ConsensusService { publicKey: A#PKey, network: Network[F, A, Message[A]], blockStorage: BlockStorage[N, A], + viewStateStorage: ViewStateStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Left, initState: ProtocolState[A], maxEarlyViewNumberDiff: Int, @@ -507,6 +529,7 @@ object ConsensusService { publicKey, network, blockStorage, + viewStateStorage, stateRef, stashRef, blockSyncPipe, diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala index 27f7be69..23112c96 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala @@ -12,7 +12,10 @@ import io.iohk.metronome.hotstuff.service.messages.{ SyncMessage } import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe -import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.hotstuff.service.storage.{ + BlockStorage, + ViewStateStorage +} import io.iohk.metronome.hotstuff.service.tracing.{ ConsensusTracers, SyncTracers @@ -26,6 +29,7 @@ object HotStuffService { publicKey: A#PKey, network: Network[F, A, HotStuffMessage[A]], blockStorage: BlockStorage[N, A], + viewStateStorage: ViewStateStorage[N, A], initState: ProtocolState[A] )(implicit consensusTracers: ConsensusTracers[F, A], @@ -53,6 +57,7 @@ object HotStuffService { publicKey, consensusNetwork, blockStorage, + viewStateStorage, blockSyncPipe.left, initState ) diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala new file mode 100644 index 00000000..b0d887fc --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala @@ -0,0 +1,147 @@ +package io.iohk.metronome.hotstuff.service.storage + +import cats.implicits._ +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + QuorumCertificate, + Phase +} +import io.iohk.metronome.storage.{KVStore, KVStoreRead} +import scodec.{Codec, Encoder, Decoder} + +class ViewStateStorage[N, A <: Agreement] private ( + namespace: N +)(implicit + keys: ViewStateStorage.Keys[A], + kvn: KVStore.Ops[N], + kvrn: KVStoreRead.Ops[N], + codecVN: Codec[ViewNumber], + codecQC: Codec[QuorumCertificate[A]], + codecH: Codec[A#Hash] +) { + import keys.Key + + private def put[V: Encoder](key: Key[V], value: V) = + KVStore[N].put[Key[V], V](namespace, key, value) + + private def read[V: Decoder](key: Key[V]): KVStoreRead[N, V] = + KVStoreRead[N].read[Key[V], V](namespace, key).map(_.get) + + def setViewNumber(viewNumber: ViewNumber): KVStore[N, Unit] = + put(Key.ViewNumber, viewNumber) + + def setQuorumCertificate(qc: QuorumCertificate[A]): KVStore[N, Unit] = + qc.phase match { + case Phase.Prepare => + put(Key.PrepareQC, qc) + case Phase.PreCommit => + put(Key.LockedQC, qc) + case Phase.Commit => + put(Key.CommitQC, qc) + } + + def setLastExecutedBlockHash(blockHash: A#Hash): KVStore[N, Unit] = + put(Key.LastExecutedBlockHash, blockHash) + + def getBundle: KVStoreRead[N, ViewStateStorage.Bundle[A]] = + ( + read(Key.ViewNumber), + read(Key.PrepareQC), + read(Key.LockedQC), + read(Key.CommitQC), + read(Key.LastExecutedBlockHash) + ).mapN(ViewStateStorage.Bundle.apply[A] _) + +} + +object ViewStateStorage { + + /** Storing elements of the view state individually under separate keys, + * because they get written independently. + */ + trait Keys[A <: Agreement] { + sealed abstract class Key[V](private val code: Int) + object Key { + case object ViewNumber extends Key[ViewNumber](0) + case object PrepareQC extends Key[QuorumCertificate[A]](1) + case object LockedQC extends Key[QuorumCertificate[A]](2) + case object CommitQC extends Key[QuorumCertificate[A]](3) + case object LastExecutedBlockHash extends Key[A#Hash](4) + + implicit def encoder[V]: Encoder[Key[V]] = + scodec.codecs.uint8.contramap[Key[V]](_.code) + } + } + + /** The state of consensus that needs to be persisted between restarts. + * + * The fields are a subset of the `ProtocolState` but have a slightly + * different life cylce, e.g. `lastExecutedBlockHash` is only updated + * when the blocks are actually executed, which happens asynchronously. + */ + case class Bundle[A <: Agreement]( + viewNumber: ViewNumber, + prepareQC: QuorumCertificate[A], + lockedQC: QuorumCertificate[A], + commitQC: QuorumCertificate[A], + lastExecutedBlockHash: A#Hash + ) { + assert(prepareQC.phase == Phase.Prepare) + assert(lockedQC.phase == Phase.PreCommit) + assert(commitQC.phase == Phase.Commit) + } + object Bundle { + + /** Convenience method reflecting the expectation that the signature + * in the genesis Q.C. will not depend on the phase, just the genesis + * hash. + */ + def fromGenesisQC[A <: Agreement](genesisQC: QuorumCertificate[A]) = + Bundle[A]( + viewNumber = genesisQC.viewNumber, + prepareQC = genesisQC.copy[A](phase = Phase.Prepare), + lockedQC = genesisQC.copy[A](phase = Phase.PreCommit), + commitQC = genesisQC.copy[A](phase = Phase.Commit), + lastExecutedBlockHash = genesisQC.blockHash + ) + } + + /** Create a ViewStateStorage instance by pre-loading it with the genesis, + * unless it already has data. + */ + def apply[N, A <: Agreement]( + namespace: N, + genesis: Bundle[A] + )(implicit + codecVN: Codec[ViewNumber], + codecQC: Codec[QuorumCertificate[A]], + codecH: Codec[A#Hash] + ): KVStore[N, ViewStateStorage[N, A]] = { + implicit val kvn = KVStore.instance[N] + implicit val kvrn = KVStoreRead.instance[N] + implicit val keys = new Keys[A] {} + import keys.Key + + def setDefault[V](default: V): Option[V] => Option[V] = + (current: Option[V]) => current orElse Some(default) + + for { + _ <- KVStore[N].alter(namespace, Key.ViewNumber)( + setDefault(genesis.viewNumber) + ) + _ <- KVStore[N].alter(namespace, Key.PrepareQC)( + setDefault(genesis.prepareQC) + ) + _ <- KVStore[N].alter(namespace, Key.LockedQC)( + setDefault(genesis.lockedQC) + ) + _ <- KVStore[N].alter(namespace, Key.CommitQC)( + setDefault(genesis.commitQC) + ) + _ <- KVStore[N].alter(namespace, Key.LastExecutedBlockHash)( + setDefault(genesis.lastExecutedBlockHash) + ) + } yield new ViewStateStorage[N, A](namespace) + } +} diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala new file mode 100644 index 00000000..226cbdb8 --- /dev/null +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala @@ -0,0 +1,193 @@ +package io.iohk.metronome.hotstuff.service.storage + +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{ + QuorumCertificate, + Phase, + Agreement +} +import scala.annotation.nowarn +import io.iohk.metronome.crypto.GroupSignature +import io.iohk.metronome.storage.{KVStore, KVStoreRead, KVStoreState} +import org.scalacheck.{Gen, Prop, Properties} +import org.scalacheck.commands.Commands +import org.scalacheck.Arbitrary.arbitrary +import scala.util.Try +import scodec.bits.BitVector +import scodec.Codec +import scala.util.Success + +object ViewStateStorageProps extends Properties("ViewStateStorage") { + property("commands") = ViewStateStorageCommands.property() +} + +object ViewStateStorageCommands extends Commands { + object TestAggreement extends Agreement { + type Block = Nothing + type Hash = String + type PSig = Unit + type GSig = List[String] + type PKey = Nothing + type SKey = Nothing + } + type TestAggreement = TestAggreement.type + + type Namespace = String + + object TestKVStoreState extends KVStoreState[Namespace] + + type TestViewStateStorage = ViewStateStorage[Namespace, TestAggreement] + + class StorageWrapper( + viewStateStorage: TestViewStateStorage, + private var store: TestKVStoreState.Store + ) { + def getStore = store + + def write( + f: TestViewStateStorage => KVStore[Namespace, Unit] + ): Unit = { + store = TestKVStoreState.compile(f(viewStateStorage)).runS(store).value + } + + def read[A]( + f: TestViewStateStorage => KVStoreRead[Namespace, A] + ): A = { + TestKVStoreState.compile(f(viewStateStorage)).run(store) + } + } + + type State = ViewStateStorage.Bundle[TestAggreement] + type Sut = StorageWrapper + + val genesisState = ViewStateStorage.Bundle + .fromGenesisQC[TestAggreement] { + QuorumCertificate[TestAggreement]( + Phase.Prepare, + ViewNumber(1), + "", + GroupSignature(Nil) + ) + } + + /** The in-memory KVStoreState doesn't invoke the codecs. */ + implicit def neverUsedCodec[T] = + Codec[T]( + (_: T) => sys.error("Didn't expect to encode."), + (_: BitVector) => sys.error("Didn't expect to decode.") + ) + + @nowarn + override def canCreateNewSut( + newState: State, + initSuts: Traversable[State], + runningSuts: Traversable[Sut] + ): Boolean = true + + override def initialPreCondition(state: State): Boolean = + state == genesisState + + override def newSut(state: State): Sut = { + val init = TestKVStoreState.compile( + ViewStateStorage[Namespace, TestAggreement]("test-namespace", state) + ) + val (store, storage) = init.run(Map.empty).value + new StorageWrapper(storage, store) + } + + override def destroySut(sut: Sut): Unit = () + + override def genInitialState: Gen[State] = Gen.const(genesisState) + + override def genCommand(state: State): Gen[Command] = + Gen.oneOf( + genSetViewNumber(state), + genSetQuorumCertificate(state), + genSetLastExecutedBlockHash(state), + genGetBundle + ) + + def genSetViewNumber(state: State) = + for { + d <- Gen.posNum[Long] + vn = ViewNumber(state.viewNumber + d) + } yield SetViewNumberCommand(vn) + + def genSetQuorumCertificate(state: State) = + for { + p <- Gen.oneOf(Phase.Prepare, Phase.PreCommit, Phase.Commit) + h <- arbitrary[TestAggreement.Hash] + s <- arbitrary[TestAggreement.GSig] + qc = QuorumCertificate[TestAggreement]( + p, + state.viewNumber, + h, + GroupSignature(s) + ) + } yield SetQuorumCertificateCommand(qc) + + def genSetLastExecutedBlockHash(state: State) = + for { + h <- Gen.oneOf( + state.prepareQC.blockHash, + state.lockedQC.blockHash, + state.commitQC.blockHash + ) + } yield SetLastExecutedBlockHashCommand(h) + + val genGetBundle = Gen.const(GetBundleCommand) + + case class SetViewNumberCommand(viewNumber: ViewNumber) extends UnitCommand { + override def run(sut: Sut): Result = + sut.write(_.setViewNumber(viewNumber)) + override def nextState(state: State): State = + state.copy(viewNumber = viewNumber) + override def preCondition(state: State): Boolean = + state.viewNumber < viewNumber + override def postCondition(state: State, success: Boolean): Prop = success + } + + case class SetQuorumCertificateCommand(qc: QuorumCertificate[TestAggreement]) + extends UnitCommand { + override def run(sut: Sut): Result = + sut.write(_.setQuorumCertificate(qc)) + + override def nextState(state: State): State = + qc.phase match { + case Phase.Prepare => state.copy(prepareQC = qc) + case Phase.PreCommit => state.copy(lockedQC = qc) + case Phase.Commit => state.copy(commitQC = qc) + } + + override def preCondition(state: State): Boolean = + state.viewNumber <= qc.viewNumber + + override def postCondition(state: State, success: Boolean): Prop = success + } + + case class SetLastExecutedBlockHashCommand(blockHash: TestAggreement.Hash) + extends UnitCommand { + override def run(sut: Sut): Result = + sut.write(_.setLastExecutedBlockHash(blockHash)) + + override def nextState(state: State): State = + state.copy(lastExecutedBlockHash = blockHash) + + override def preCondition(state: State): Boolean = + Set(state.prepareQC, state.lockedQC, state.commitQC) + .map(_.blockHash) + .contains(blockHash) + + override def postCondition(state: State, success: Boolean): Prop = success + } + + case object GetBundleCommand extends Command { + type Result = ViewStateStorage.Bundle[TestAggreement] + + override def run(sut: Sut): Result = sut.read(_.getBundle) + override def nextState(state: State): State = state + override def preCondition(state: State): Boolean = true + override def postCondition(state: State, result: Try[Result]): Prop = + result == Success(state) + } +} diff --git a/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala b/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala index cd8b2f77..005c8306 100644 --- a/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala +++ b/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala @@ -28,7 +28,7 @@ import org.rocksdb.{ CompressionType, ClockCache } -import scodec.Codec +import scodec.{Encoder, Decoder} import scodec.bits.BitVector import scala.collection.mutable import java.nio.file.Path @@ -85,14 +85,14 @@ class RocksDBStore[F[_]: Sync]( /** Execute one `Get` operation. */ private def read[K, V](op: Get[Namespace, K, V]): F[Option[V]] = { for { - kbs <- encode(op.key)(op.keyCodec) + kbs <- encode(op.key)(op.keyEncoder) mvbs <- db.read(handles(op.namespace), kbs) mv <- mvbs match { case None => none.pure[F] case Some(bytes) => - decode(bytes)(op.valueCodec).map(_.some) + decode(bytes)(op.valueDecoder).map(_.some) } } yield mv } @@ -110,8 +110,8 @@ class RocksDBStore[F[_]: Sync]( case op @ Put(n, k, v) => ReaderT { batch => for { - kbs <- encode(k)(op.keyCodec) - vbs <- encode(v)(op.valueCodec) + kbs <- encode(k)(op.keyEncoder) + vbs <- encode(v)(op.valueEncoder) _ = batch.put(handles(n), kbs, vbs) } yield () } @@ -123,7 +123,7 @@ class RocksDBStore[F[_]: Sync]( case op @ Delete(n, k) => ReaderT { batch => for { - kbs <- encode(k)(op.keyCodec) + kbs <- encode(k)(op.keyEncoder) _ = batch.delete(handles(n), kbs) } yield () } @@ -146,10 +146,10 @@ class RocksDBStore[F[_]: Sync]( } } - private def encode[T](value: T)(implicit ev: Codec[T]): F[Array[Byte]] = + private def encode[T](value: T)(implicit ev: Encoder[T]): F[Array[Byte]] = Sync[F].fromTry(ev.encode(value).map(_.toByteArray).toTry) - private def decode[T](bytes: Array[Byte])(implicit ev: Codec[T]): F[T] = + private def decode[T](bytes: Array[Byte])(implicit ev: Decoder[T]): F[T] = Sync[F].fromTry(ev.decodeValue(BitVector(bytes)).toTry) /** Mostly meant for writing batches atomically. diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala index ab66c80a..9a61383b 100644 --- a/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStore.scala @@ -3,7 +3,7 @@ package io.iohk.metronome.storage import cats.{~>} import cats.free.Free import cats.free.Free.liftF -import scodec.Codec +import scodec.{Encoder, Decoder, Codec} /** Helper methods to read/write a key-value store. */ object KVStore { @@ -32,7 +32,7 @@ object KVStore { def pure[A](a: A) = KVStore.pure[N, A](a) /** Insert or replace a value under a key. */ - def put[K: Codec, V: Codec]( + def put[K: Encoder, V: Encoder]( namespace: N, key: K, value: V @@ -42,25 +42,28 @@ object KVStore { ) /** Get a value under a key, if it exists. */ - def get[K: Codec, V: Codec](namespace: N, key: K): KVStore[N, Option[V]] = + def get[K: Encoder, V: Decoder]( + namespace: N, + key: K + ): KVStore[N, Option[V]] = liftF[KVNamespacedOp, Option[V]]( Get[N, K, V](namespace, key) ) /** Delete a value under a key. */ - def delete[K: Codec](namespace: N, key: K): KVStore[N, Unit] = + def delete[K: Encoder](namespace: N, key: K): KVStore[N, Unit] = liftF[KVNamespacedOp, Unit]( Delete[N, K](namespace, key) ) /** Apply a function on a value, if it exists. */ - def update[K: Codec, V: Codec](namespace: N, key: K)( + def update[K: Encoder, V: Codec](namespace: N, key: K)( f: V => V ): KVStore[N, Unit] = alter[K, V](namespace, key)(_ map f) /** Insert, update or delete a value, depending on whether it exists. */ - def alter[K: Codec, V: Codec](namespace: N, key: K)( + def alter[K: Encoder, V: Codec](namespace: N, key: K)( f: Option[V] => Option[V] ): KVStore[N, Unit] = get[K, V](namespace, key).flatMap { current => diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStoreOp.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStoreOp.scala index 44f59287..1e929092 100644 --- a/metronome/storage/src/io/iohk/metronome/storage/KVStoreOp.scala +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStoreOp.scala @@ -1,6 +1,6 @@ package io.iohk.metronome.storage -import scodec.Codec +import scodec.{Encoder, Decoder} /** Representing key-value storage operations as a Free Monad, * so that we can pick an execution strategy that best fits @@ -20,15 +20,16 @@ sealed trait KVStoreWriteOp[N, A] extends KVStoreOp[N, A] object KVStoreOp { case class Put[N, K, V](namespace: N, key: K, value: V)(implicit - val keyCodec: Codec[K], - val valueCodec: Codec[V] + val keyEncoder: Encoder[K], + val valueEncoder: Encoder[V] ) extends KVStoreWriteOp[N, Unit] case class Get[N, K, V](namespace: N, key: K)(implicit - val keyCodec: Codec[K], - val valueCodec: Codec[V] + val keyEncoder: Encoder[K], + val valueDecoder: Decoder[V] ) extends KVStoreReadOp[N, Option[V]] - case class Delete[N, K](namespace: N, key: K)(implicit val keyCodec: Codec[K]) - extends KVStoreWriteOp[N, Unit] + case class Delete[N, K](namespace: N, key: K)(implicit + val keyEncoder: Encoder[K] + ) extends KVStoreWriteOp[N, Unit] } diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala b/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala index 39f923bd..000db508 100644 --- a/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala +++ b/metronome/storage/src/io/iohk/metronome/storage/KVStoreRead.scala @@ -2,7 +2,7 @@ package io.iohk.metronome.storage import cats.free.Free import cats.free.Free.liftF -import scodec.Codec +import scodec.{Encoder, Decoder} /** Helper methods to compose operations that strictly only do reads, no writes. * @@ -29,7 +29,7 @@ object KVStoreRead { def pure[A](a: A) = KVStoreRead.pure[N, A](a) - def read[K: Codec, V: Codec]( + def read[K: Encoder, V: Decoder]( namespace: N, key: K ): KVStoreRead[N, Option[V]] = From 75fc8a73989d30fde4b768d79902997dffcaed67 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 17 May 2021 11:40:44 +0100 Subject: [PATCH 31/48] PM-3105: Ledger persistence (#34) * PM-3105: Added LedgerStorage. * PM-3105: Testing LedgerStorage. * PM-3105: Generalized to a KVRingBuffer. * PM-3105: Fix ringbuffer to count references. --- build.sc | 5 +- .../service/storage/LedgerStorage.scala | 43 +++++++ .../service/storage/LedgerStorageProps.scala | 76 ++++++++++++ .../iohk/metronome/storage/KVRingBuffer.scala | 116 ++++++++++++++++++ 4 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorage.scala create mode 100644 metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorageProps.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/KVRingBuffer.scala diff --git a/build.sc b/build.sc index 5628395e..e7cc1245 100644 --- a/build.sc +++ b/build.sc @@ -314,7 +314,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { checkpointing.interpreter ) - object test extends TestModule + object test extends TestModule { + override def moduleDeps: Seq[JavaModule] = + super.moduleDeps ++ Seq(checkpointing.models.test) + } } /** Executable application for running HotStuff and checkpointing as a stand-alone process, diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorage.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorage.scala new file mode 100644 index 00000000..ff11d4ef --- /dev/null +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorage.scala @@ -0,0 +1,43 @@ +package io.iohk.metronome.checkpointing.service.storage + +import cats.implicits._ +import io.iohk.metronome.checkpointing.models.Ledger +import io.iohk.metronome.storage.{KVRingBuffer, KVCollection, KVStore} +import scodec.Codec + +/** Storing the committed and executed checkpoint ledger. + * + * Strictly speaking the application only needs the committed state, + * since it has been signed by the federation and we know it's not + * going to be rolled back. Uncommitted state can be kept in memory. + * + * However we want to support other nodes catching up by: + * 1. requesting the latest Commit Q.C., then + * 2. requesting the block the Commit Q.C. points at, then + * 3. requesting the ledger state the header points at. + * + * We have to allow some time before we get rid of historical state, + * so that it doesn't disappear between step 2 and 3, resulting in + * nodes trying and trying to catch up but always missing the beat. + * + * Therefore we keep a collection of the last N ledgers in a ring buffer. + */ +class LedgerStorage[N]( + ledgerColl: KVCollection[N, Ledger.Hash, Ledger], + ledgerMetaNamespace: N, + maxHistorySize: Int +)(implicit codecH: Codec[Ledger.Hash]) + extends KVRingBuffer[N, Ledger.Hash, Ledger]( + ledgerColl, + ledgerMetaNamespace, + maxHistorySize + ) { + + /** Save a new ledger and remove the oldest one, if we reached + * the maximum history size. Since we only store committed + * state, they form a chain. They will always be retrieved + * by going through a block pointing at them directly. + */ + def put(ledger: Ledger): KVStore[N, Unit] = + put(ledger.hash, ledger).void +} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorageProps.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorageProps.scala new file mode 100644 index 00000000..7c4e996a --- /dev/null +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorageProps.scala @@ -0,0 +1,76 @@ +package io.iohk.metronome.checkpointing.service.storage + +import cats.implicits._ +import io.iohk.metronome.core.Tagger +import io.iohk.metronome.checkpointing.models.Ledger +import io.iohk.metronome.checkpointing.models.ArbitraryInstances +import io.iohk.metronome.storage.{KVCollection, KVStoreState} +import org.scalacheck.{Properties, Gen, Arbitrary}, Arbitrary.arbitrary +import org.scalacheck.Prop.{forAll, all, propBoolean} +import scodec.Codec +import scodec.bits.BitVector +import org.scalacheck.Shrink +import scala.annotation.nowarn + +object LedgerStorageProps extends Properties("LedgerStorage") { + import ArbitraryInstances.arbLedger + + type Namespace = String + object Namespace { + val Ledgers = "ledgers" + val LedgerMeta = "ledger-meta" + } + + /** The in-memory KVStoreState doesn't invoke the codecs. */ + implicit def neverUsedCodec[T] = + Codec[T]( + (_: T) => sys.error("Didn't expect to encode."), + (_: BitVector) => sys.error("Didn't expect to decode.") + ) + + object TestKVStore extends KVStoreState[Namespace] + + object HistorySize extends Tagger[Int] { + @nowarn + implicit val shrink: Shrink[HistorySize] = Shrink(s => Stream.empty) + implicit val arb: Arbitrary[HistorySize] = Arbitrary { + Gen.choose(1, 10).map(HistorySize(_)) + } + } + type HistorySize = HistorySize.Tagged + + property("buffer") = forAll( + for { + ledgers <- arbitrary[List[Ledger]] + maxSize <- arbitrary[HistorySize] + } yield (ledgers, maxSize) + ) { case (ledgers, maxSize) => + val ledgerStorage = new LedgerStorage[Namespace]( + new KVCollection[Namespace, Ledger.Hash, Ledger](Namespace.Ledgers), + Namespace.LedgerMeta, + maxHistorySize = maxSize + ) + + val store = + TestKVStore + .compile(ledgers.traverse(ledgerStorage.put)) + .runS(Map.empty) + .value + + def getByHash(ledgerHash: Ledger.Hash) = + TestKVStore.compile(ledgerStorage.get(ledgerHash)).run(store) + + val ledgerMap = store.get(Namespace.Ledgers).getOrElse(Map.empty[Any, Any]) + val (current, old) = ledgers.reverse.splitAt(maxSize) + + all( + "max-history" |: ledgerMap.values.size <= maxSize, + "contains current" |: current.forall { ledger => + getByHash(ledger.hash).contains(ledger) + }, + "not contain old" |: old.forall { ledger => + getByHash(ledger.hash).isEmpty + } + ) + } +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVRingBuffer.scala b/metronome/storage/src/io/iohk/metronome/storage/KVRingBuffer.scala new file mode 100644 index 00000000..414506f3 --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/KVRingBuffer.scala @@ -0,0 +1,116 @@ +package io.iohk.metronome.storage + +import cats.implicits._ +import scodec.{Decoder, Encoder, Codec} + +/** Storing the last N items inserted into a collection. */ +class KVRingBuffer[N, K, V]( + coll: KVCollection[N, K, V], + metaNamespace: N, + maxHistorySize: Int +)(implicit codecK: Codec[K]) { + require(maxHistorySize > 0, "Has to store at least one item in the buffer.") + + import KVRingBuffer._ + import scodec.codecs.implicits.implicitIntCodec + + private implicit val kvn = KVStore.instance[N] + + private implicit val metaKeyEncoder: Encoder[MetaKey[_]] = { + import scodec.codecs._ + import scodec.codecs.implicits._ + + val bucketIndexCodec = provide(BucketIndex) + val bucketCodec: Codec[Bucket[_]] = Codec.deriveLabelledGeneric + val keyRefCountCodec: Codec[KeyRefCount[K]] = Codec.deriveLabelledGeneric + + discriminated[MetaKey[_]] + .by(uint2) + .typecase(0, bucketIndexCodec) + .typecase(1, bucketCodec) + .typecase(2, keyRefCountCodec) + .asEncoder + } + + private def getMetaData[V: Decoder](key: MetaKey[V]) = + KVStore[N].get[MetaKey[V], V](metaNamespace, key) + + private def putMetaData[V: Encoder](key: MetaKey[V], value: V) = + KVStore[N].put(metaNamespace, key, value) + + private def setRefCount(key: K, count: Int) = + if (count > 0) + putMetaData[Int](KeyRefCount(key), count) + else + KVStore[N].delete(metaNamespace, KeyRefCount(key)) + + private def getRefCount(key: K) = + getMetaData[Int](KeyRefCount(key)).map(_ getOrElse 0) + + /** Return the index of the next bucket to write the data into. */ + private def nextIndex(maybeIndex: Option[Int]): Int = + maybeIndex.fold(0)(index => (index + 1) % maxHistorySize) + + private def add(key: K, value: V) = + getRefCount(key).flatMap { cnt => + if (cnt == 0) + setRefCount(key, 1) >> coll.put(key, value) + else + setRefCount(key, cnt + 1) + } + + private def maybeRemove(key: K) = + getRefCount(key).flatMap { cnt => + if (cnt > 1) + setRefCount(key, cnt - 1).as(none[K]) + else + setRefCount(key, 0) >> coll.delete(key).as(key.some) + } + + /** Save a new item and remove the oldest one, if we reached + * the maximum history size. + * + * Returns the key which has been evicted, unless it's still + * referenced by something or the history hasn't reached maximum + * size yet. + */ + def put(key: K, value: V): KVStore[N, Option[K]] = { + for { + index <- getMetaData(BucketIndex).map(nextIndex) + maybeOldestKey <- getMetaData(Bucket[K](index)) + maybeRemoved <- maybeOldestKey match { + case Some(oldestKey) if oldestKey == key => + KVStore[N].pure(none[K]) + + case Some(oldestKey) => + add(key, value) >> maybeRemove(oldestKey) + + case None => + add(key, value).as(none[K]) + } + _ <- putMetaData(Bucket(index), key) + _ <- putMetaData(BucketIndex, index) + } yield maybeRemoved + } + + /** Retrieve an item by hash, if we still have it. */ + def get(key: K): KVStoreRead[N, Option[V]] = + coll.read(key) +} + +object KVRingBuffer { + + /** Keys for different pieces of meta-data stored under a single namespace. */ + sealed trait MetaKey[+V] + + /** Key under which the last written index of the ring buffer is stored. */ + case object BucketIndex extends MetaKey[Int] + + /** Contents of a ring buffer bucket by index. */ + case class Bucket[V](index: Int) extends MetaKey[V] { + assert(index >= 0) + } + + /** Number of buckets currently pointing at a key. */ + case class KeyRefCount[K](key: K) extends MetaKey[Int] +} From b99095c01a83bd5e635b20fde9c333a416de8f17 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Wed, 19 May 2021 09:51:42 +0100 Subject: [PATCH 32/48] PM-3134: Block synchronisation (#32) * PM-3134: Added RPCTracker. * PM-3134: Use RPCTracker in SyncService. * PM-3134: Wire together a skeleton BlockSynchronizer with the SyncService. * PM-3134: Simple block sync method. * PM-3134: Use deleteUnsafe in persist. * PM-3134: Added Block.isValid, calling after download. * PM-3134: Perform sync asynchronously. * PM-3134: Use the in-memory block store to restore the path. * PM-3134: Combine read and delete. * PM-3134: Use KVStoreRunner for inMemory. * PM-3134: Extract in-memory store. * PM-3134: Only sending 1 request to a peer at a time. * PM-3134: Move the fiber submission. * PM-3134: Using a combination of path, semaphore and traversal to make sure we clean up and insert safely. * PM-3134: Add cancel by key support to FiberMap. * PM-3134: Canceling any already enqueued sync and validate routine on subsequent prepare signals. * PM-3134: Not using a fiber map in BlockSynchronizer so that we can cancel it from the outside and discard it. * PM-3134: Comment about implementing state sync. * PM-3134: Testing the BlockSynchronizer. * PM-3134: Make sure there's at least one download happening. * PM-3134: Trace timeouts. * PM-3134: Common method to send a request and trace timeouts. * PM-3134: Test not downloading existing records. * PM-3134: Use seed with Random. * PM-3134: Added some comments, fixed tracker to complete with None if wrong type arrives, rather than timeout. * PM-3134: Make getBlock and getStatus referrentially transparent. * PM-3134: Differentiate between response ignored because of a timeout vs error. * PM-3134: Rename to persistAndClear. * PM-3134: Ask federation members in a random order if the block cannot be downloaded from the original sender. --- .../CheckpointingAgreement.scala | 11 + .../checkpointing/models/Block.scala | 15 +- .../src/io/iohk/metronome/core/Pipe.scala | 4 +- .../metronome/core/fibers/DeferredTask.scala | 12 +- .../iohk/metronome/core/fibers/FiberMap.scala | 19 +- .../iohk/metronome/core/fibers/FiberSet.scala | 2 +- .../metronome/core/messages/RPCMessage.scala | 25 ++ .../metronome/core/messages/RPCTracker.scala | 117 ++++++++ .../metronome/core/fibers/FiberMapSpec.scala | 17 +- .../metronome/core/fibers/FiberSetSpec.scala | 3 +- .../core/messages/RPCTrackerSpec.scala | 85 ++++++ .../hotstuff/consensus/basic/Block.scala | 3 + .../basic/HotStuffProtocolProps.scala | 1 + .../hotstuff/service/ConsensusService.scala | 2 +- .../hotstuff/service/HotStuffService.scala | 5 +- .../hotstuff/service/SyncService.scala | 165 +++++++++--- .../service/messages/SyncMessage.scala | 10 +- .../service/storage/BlockStorage.scala | 6 +- .../service/sync/BlockSynchronizer.scala | 252 ++++++++++++++++++ .../hotstuff/service/tracing/SyncEvent.scala | 20 +- .../service/tracing/SyncTracers.scala | 13 + .../service/storage/BlockStorageProps.scala | 25 +- .../storage/ViewStateStorageProps.scala | 27 +- .../service/sync/BlockSynchronizerProps.scala | 211 +++++++++++++++ .../metronome/storage/InMemoryKVStore.scala | 24 ++ 25 files changed, 980 insertions(+), 94 deletions(-) create mode 100644 metronome/core/src/io/iohk/metronome/core/messages/RPCTracker.scala create mode 100644 metronome/core/test/src/io/iohk/metronome/core/messages/RPCTrackerSpec.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala create mode 100644 metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala create mode 100644 metronome/storage/src/io/iohk/metronome/storage/InMemoryKVStore.scala diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala index 2a9b408b..5ca27862 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala @@ -1,6 +1,7 @@ package io.iohk.metronome.checkpointing import io.iohk.metronome.crypto +import io.iohk.metronome.hotstuff.consensus import io.iohk.metronome.hotstuff.consensus.ViewNumber import io.iohk.metronome.hotstuff.consensus.basic.{ Secp256k1Agreement, @@ -27,4 +28,14 @@ object CheckpointingAgreement extends Secp256k1Agreement { rlp.encode(phase) ++ rlp.encode(viewNumber) ++ rlp.encode(hash) ) ) + + implicit val block: consensus.basic.Block[CheckpointingAgreement] = + new consensus.basic.Block[CheckpointingAgreement] { + override def blockHash(b: models.Block) = + b.hash + override def parentBlockHash(b: models.Block) = + b.header.parentHash + override def isValid(b: models.Block) = + models.Block.isValid(b) + } } diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala index 2c4a1c8a..bdfbd724 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala @@ -39,16 +39,18 @@ object Block { transactions: IndexedSeq[Transaction] ): Block = { val body = Body(transactions) - val txMerkleTree = - MerkleTree.build(transactions.map(tx => MerkleTree.Hash(tx.hash))) val header = Header( parentHash = parent.hash, postStateHash = postStateHash, - contentMerkleRoot = txMerkleTree.hash + contentMerkleRoot = Body.contentMerkleRoot(body) ) makeUnsafe(header, body) } + /** Check that the block hashes are valid. */ + def isValid(block: Block): Boolean = + block.header.contentMerkleRoot == Body.contentMerkleRoot(block.body) + /** The first, empty block. */ val genesis: Block = { val body = Body(Vector.empty) @@ -74,5 +76,10 @@ object Block { transactions: IndexedSeq[Transaction] ) extends RLPHash[Body, Body.Hash] - object Body extends RLPHashCompanion[Body]()(RLPCodecs.rlpBlockBody) + object Body extends RLPHashCompanion[Body]()(RLPCodecs.rlpBlockBody) { + def contentMerkleRoot(body: Body): MerkleTree.Hash = + MerkleTree + .build(body.transactions.map(tx => MerkleTree.Hash(tx.hash))) + .hash + } } diff --git a/metronome/core/src/io/iohk/metronome/core/Pipe.scala b/metronome/core/src/io/iohk/metronome/core/Pipe.scala index 56e21cc0..53eed942 100644 --- a/metronome/core/src/io/iohk/metronome/core/Pipe.scala +++ b/metronome/core/src/io/iohk/metronome/core/Pipe.scala @@ -6,8 +6,8 @@ import monix.tail.Iterant import monix.catnap.ConcurrentQueue /** A `Pipe` is a connection between two components where - * messages of type `L` are going from left to right, and - * message of type `R` are going from right to left. + * messages of type `L` are going from left to right and + * messages of type `R` are going from right to left. */ trait Pipe[F[_], L, R] { type Left = Pipe.Side[F, L, R] diff --git a/metronome/core/src/io/iohk/metronome/core/fibers/DeferredTask.scala b/metronome/core/src/io/iohk/metronome/core/fibers/DeferredTask.scala index 85a05fa6..72cd0e99 100644 --- a/metronome/core/src/io/iohk/metronome/core/fibers/DeferredTask.scala +++ b/metronome/core/src/io/iohk/metronome/core/fibers/DeferredTask.scala @@ -4,12 +4,14 @@ import cats.implicits._ import cats.effect.Sync import cats.effect.concurrent.Deferred import cats.effect.Concurrent +import scala.util.control.NoStackTrace /** A task that can be executed on a fiber pool, or canceled if the pool is shut down.. */ protected[fibers] class DeferredTask[F[_]: Sync, A]( deferred: Deferred[F, Either[Throwable, A]], task: F[A] ) { + import DeferredTask.CanceledException /** Execute the task and set the success/failure result on the deferred. */ def execute: F[Unit] = @@ -19,15 +21,19 @@ protected[fibers] class DeferredTask[F[_]: Sync, A]( def join: F[A] = deferred.get.rethrow - /** Signal to the submitter that the pool has been shut down. */ - def shutdown: F[Unit] = + /** Signal to the submitter that this task is canceled. */ + def cancel: F[Unit] = deferred - .complete(Left(new RuntimeException("The pool has been shut down."))) + .complete(Left(new CanceledException)) .attempt .void } object DeferredTask { + class CanceledException + extends RuntimeException("This task has been canceled.") + with NoStackTrace + def apply[F[_]: Concurrent, A](task: F[A]): F[DeferredTask[F, A]] = Deferred[F, Either[Throwable, A]].map { d => new DeferredTask[F, A](d, task) diff --git a/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala b/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala index 450c088e..06f3f0e2 100644 --- a/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala +++ b/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala @@ -56,6 +56,13 @@ class FiberMap[F[_]: Concurrent: ContextShift, K]( } } + /** Cancel all enqueued tasks for a key. */ + def cancelQueue(key: K): F[Unit] = + actorMapRef.get.map(_.get(key)).flatMap { + case Some(actor) => actor.cancelQueue + case None => ().pure[F] + } + /** Cancel all existing background processors. */ private def shutdown: F[Unit] = { semaphore.withPermit { @@ -95,14 +102,20 @@ object FiberMap { _ <- reject.whenA(!enqueued) } yield wrapper.join + /** Cancel all enqueued tasks. */ + def cancelQueue: F[Unit] = + for { + tasks <- queue.drain(0, Int.MaxValue) + _ <- tasks.toList.traverse(_.cancel) + } yield () + /** Cancel the processing and signal to all enqueued tasks that they will not be executed. */ def shutdown: F[Unit] = for { _ <- fiber.cancel maybeRunning <- runningRef.get - _ <- maybeRunning.fold(().pure[F])(_.shutdown) - tasks <- queue.drain(0, Int.MaxValue) - _ <- tasks.toList.traverse(_.shutdown) + _ <- maybeRunning.fold(().pure[F])(_.cancel) + tasks <- cancelQueue } yield () } private object Actor { diff --git a/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala b/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala index 39d46699..befc7e19 100644 --- a/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala +++ b/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala @@ -52,7 +52,7 @@ class FiberSet[F[_]: Concurrent]( fibers <- fibersRef.get _ <- fibers.toList.traverse(_.cancel) tasks <- tasksRef.get - _ <- tasks.toList.traverse(_.shutdown) + _ <- tasks.toList.traverse(_.cancel) } yield () } diff --git a/metronome/core/src/io/iohk/metronome/core/messages/RPCMessage.scala b/metronome/core/src/io/iohk/metronome/core/messages/RPCMessage.scala index 7b691f03..3e7d9ddf 100644 --- a/metronome/core/src/io/iohk/metronome/core/messages/RPCMessage.scala +++ b/metronome/core/src/io/iohk/metronome/core/messages/RPCMessage.scala @@ -1,5 +1,6 @@ package io.iohk.metronome.core.messages +import cats.effect.Sync import java.util.UUID /** Messages that go in request/response pairs. */ @@ -13,11 +14,35 @@ trait RPCMessage { abstract class RPCMessageCompanion { type RequestId = UUID + object RequestId { def apply(): RequestId = UUID.randomUUID() + + def apply[F[_]: Sync]: F[RequestId] = + Sync[F].delay(apply()) } trait Request extends RPCMessage trait Response extends RPCMessage + + /** Establish a relationship between a request and a response + * type so the compiler can infer the return value of methods + * based on the request parameter, or validate that two generic + * parameters belong with each other. + */ + def pair[A <: Request, B <: Response]: RPCPair.Aux[A, B] = + new RPCPair[A] { type Response = B } +} + +/** A request can be associated with at most one response type. + * On the other hand a response type can serve multiple requests. + */ +trait RPCPair[Request] { + type Response +} +object RPCPair { + type Aux[A, B] = RPCPair[A] { + type Response = B + } } diff --git a/metronome/core/src/io/iohk/metronome/core/messages/RPCTracker.scala b/metronome/core/src/io/iohk/metronome/core/messages/RPCTracker.scala new file mode 100644 index 00000000..45200889 --- /dev/null +++ b/metronome/core/src/io/iohk/metronome/core/messages/RPCTracker.scala @@ -0,0 +1,117 @@ +package io.iohk.metronome.core.messages + +import cats.implicits._ +import cats.effect.{Concurrent, Timer, Sync} +import cats.effect.concurrent.{Ref, Deferred} +import java.util.UUID +import scala.concurrent.duration.FiniteDuration +import scala.reflect.ClassTag + +/** `RPCTracker` can be used to register outgoing requests and later + * match them up with incoming responses, thus it facilitates turning + * the two independent messages into a `Kleisli[F, Request, Option[Response]]`, + * by a component that has access to the network, where a `None` result means + * the operation timed out before a response was received. + * + * The workflow is: + * 0. Receive some request parameters in a method. + * 1. Create a request ID. + * 2. Create a request with the ID. + * 3. Register the request with the tracker, hold on to the handle. + * 4. Send the request over the network. + * 5. Wait on the handle, eventually returning the optional result to the caller. + * 6. Pass every response received from the network to the tracker (on the network handler fiber). + */ +class RPCTracker[F[_]: Timer: Concurrent, M]( + deferredMapRef: Ref[F, Map[UUID, RPCTracker.Entry[F, _]]], + defaultTimeout: FiniteDuration +) { + import RPCTracker.Entry + + def register[ + Req <: RPCMessageCompanion#Request, + Res <: RPCMessageCompanion#Response + ]( + request: Req, + timeout: FiniteDuration = defaultTimeout + )(implicit + ev1: Req <:< M, + ev2: RPCPair.Aux[Req, Res], + // Used by `RPCTracker.Entry.complete` to make sure only the + // expected response type can complete a request. + ct: ClassTag[Res] + ): F[F[Option[Res]]] = { + val requestId = request.requestId + for { + d <- Deferred[F, Option[Res]] + e = RPCTracker.Entry(d) + _ <- deferredMapRef.update(_ + (requestId -> e)) + _ <- Concurrent[F].start { + Timer[F].sleep(timeout) >> completeWithTimeout(requestId) + } + } yield d.get + } + + /** Try to complete an outstanding request with a response. + * + * Returns `true` if the response was expected, `false` if + * it wasn't, or already timed out. An error is returned + * if the response was expected but the there was a type + * mismatch. + */ + def complete[Res <: RPCMessageCompanion#Response]( + response: Res + )(implicit ev: Res <:< M): F[Either[Throwable, Boolean]] = { + remove(response.requestId).flatMap { + case None => false.asRight[Throwable].pure[F] + case Some(e) => e.complete(response) + } + } + + private def completeWithTimeout(requestId: UUID): F[Unit] = + remove(requestId).flatMap { + case None => ().pure[F] + case Some(e) => e.timeout + } + + private def remove(requestId: UUID): F[Option[Entry[F, _]]] = + deferredMapRef.modify { dm => + (dm - requestId, dm.get(requestId)) + } +} + +object RPCTracker { + case class Entry[F[_]: Sync, Res]( + deferred: Deferred[F, Option[Res]] + )(implicit ct: ClassTag[Res]) { + def timeout: F[Unit] = + deferred.complete(None).attempt.void + + def complete[M](response: M): F[Either[Throwable, Boolean]] = { + response match { + case expected: Res => + deferred + .complete(Some(expected)) + .attempt + .map(_.isRight.asRight[Throwable]) + case _ => + // Wrong type, as evidenced by `ct` not maching `Res`. + // Returning an error so that this kind of programming error + // can be highlighted as soon as possible. Note though that + // if the request already timed out we can't tell if this + // error would have happened if the response arrived earlier. + val error = new IllegalArgumentException( + s"Invalid response type ${response.getClass.getName}; expected ${ct.runtimeClass.getName}" + ) + deferred.complete(None).attempt.as(error.asLeft[Boolean]) + } + } + } + + def apply[F[_]: Concurrent: Timer, M]( + defaultTimeout: FiniteDuration + ): F[RPCTracker[F, M]] = + Ref[F].of(Map.empty[UUID, Entry[F, _]]).map { + new RPCTracker(_, defaultTimeout) + } +} diff --git a/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberMapSpec.scala b/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberMapSpec.scala index 575e2eab..208dd46a 100644 --- a/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberMapSpec.scala +++ b/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberMapSpec.scala @@ -135,13 +135,26 @@ class FiberMapSpec extends AsyncFlatSpec with Matchers with Inside { r <- r.attempt } yield { inside(r) { case Left(ex) => - ex shouldBe a[RuntimeException] - ex.getMessage should include("shut down") + ex shouldBe a[DeferredTask.CanceledException] } } } } + it should "cancel and raise errors in a canceled task" in testMap { + fiberMap => + for { + _ <- fiberMap.submit("foo")(Task.never) + r <- fiberMap.submit("foo")(Task("easy")) + _ <- fiberMap.cancelQueue("foo") + r <- r.attempt + } yield { + inside(r) { case Left(ex) => + ex shouldBe a[DeferredTask.CanceledException] + } + } + } + it should "keep processing even if a task fails" in testMap { fiberMap => for { t1 <- fiberMap.submit("foo")( diff --git a/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberSetSpec.scala b/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberSetSpec.scala index 422b85c9..ab4205f4 100644 --- a/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberSetSpec.scala +++ b/metronome/core/test/src/io/iohk/metronome/core/fibers/FiberSetSpec.scala @@ -39,8 +39,7 @@ class FiberSetSpec extends AsyncFlatSpec with Matchers with Inside { r <- r.attempt } yield { inside(r) { case Left(ex) => - ex shouldBe a[RuntimeException] - ex.getMessage should include("shut down") + ex shouldBe a[DeferredTask.CanceledException] } } } diff --git a/metronome/core/test/src/io/iohk/metronome/core/messages/RPCTrackerSpec.scala b/metronome/core/test/src/io/iohk/metronome/core/messages/RPCTrackerSpec.scala new file mode 100644 index 00000000..1ac23f20 --- /dev/null +++ b/metronome/core/test/src/io/iohk/metronome/core/messages/RPCTrackerSpec.scala @@ -0,0 +1,85 @@ +package io.iohk.metronome.core.messages + +import monix.eval.Task +import monix.execution.Scheduler.Implicits.global +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatest.compatible.Assertion +import org.scalatest.Inside +import scala.concurrent.Future +import scala.concurrent.duration._ + +class RPCTrackerSpec extends AsyncFlatSpec with Matchers with Inside { + + sealed trait TestMessage extends RPCMessage + object TestMessage extends RPCMessageCompanion { + case class FooRequest(requestId: RequestId) extends TestMessage with Request + case class FooResponse(requestId: RequestId, value: Int) + extends TestMessage + with Response + case class BarRequest(requestId: RequestId) extends TestMessage with Request + case class BarResponse(requestId: RequestId, value: String) + extends TestMessage + with Response + + implicit val foo = pair[FooRequest, FooResponse] + implicit val bar = pair[BarRequest, BarResponse] + } + import TestMessage._ + + def test( + f: RPCTracker[Task, TestMessage] => Task[Assertion] + ): Future[Assertion] = + RPCTracker[Task, TestMessage](10.seconds) + .flatMap(f) + .timeout(5.seconds) + .runToFuture + + behavior of "RPCTracker" + + it should "complete responses within the timeout" in test { tracker => + val req = FooRequest(RequestId()) + val res = FooResponse(req.requestId, 1) + for { + join <- tracker.register(req) + ok <- tracker.complete(res) + got <- join + } yield { + ok shouldBe Right(true) + got shouldBe Some(res) + } + } + + it should "complete responses with None after the timeout" in test { + tracker => + val req = FooRequest(RequestId()) + val res = FooResponse(req.requestId, 1) + for { + join <- tracker.register(req, timeout = 50.millis) + _ <- Task.sleep(100.millis) + ok <- tracker.complete(res) + got <- join + } yield { + ok shouldBe Right(false) + got shouldBe empty + } + } + + it should "complete responses with None if the wrong type of response arrives" in test { + tracker => + for { + rid <- RequestId[Task] + req = FooRequest(rid) + res = BarResponse(rid, "one") + join <- tracker.register(req) + ok <- tracker.complete(res) + got <- join + } yield { + inside(ok) { case Left(error) => + error.getMessage should include("Invalid response type") + } + got shouldBe empty + } + } + +} diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala index dff0db04..87379b0b 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala @@ -12,6 +12,9 @@ package io.iohk.metronome.hotstuff.consensus.basic trait Block[A <: Agreement] { def blockHash(b: A#Block): A#Hash def parentBlockHash(b: A#Block): A#Hash + + /** Perform simple content validation. */ + def isValid(b: A#Block): Boolean } object Block { diff --git a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala index 13e84d62..578c89b4 100644 --- a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala @@ -52,6 +52,7 @@ object HotStuffProtocolCommands extends Commands { implicit val block: Block[TestAgreement] = new Block[TestAgreement] { override def blockHash(b: TestBlock) = b.blockHash override def parentBlockHash(b: TestBlock) = b.parentBlockHash + override def isValid(b: TestBlock) = true } implicit val leaderSelection = LeaderSelection.Hashing diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala index 4ac5b610..1044ec63 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala @@ -165,7 +165,7 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( BlockSyncPipe.Request(sender, prepare) ) - /** Process the synchronization. result queue. */ + /** Process the synchronization result queue. */ private def processBlockSyncPipe: F[Unit] = blockSyncPipe.receive .mapEval[Unit] { case BlockSyncPipe.Response(request, isValid) => diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala index 23112c96..42639bdd 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala @@ -26,7 +26,6 @@ object HotStuffService { /** Start up the HotStuff service stack. */ def apply[F[_]: Concurrent: ContextShift: Timer, N, A <: Agreement: Block]( - publicKey: A#PKey, network: Network[F, A, HotStuffMessage[A]], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], @@ -54,7 +53,7 @@ object HotStuffService { blockSyncPipe <- Resource.liftF { BlockSyncPipe[F, A] } consensusService <- ConsensusService( - publicKey, + initState.publicKey, consensusNetwork, blockStorage, viewStateStorage, @@ -63,6 +62,8 @@ object HotStuffService { ) syncService <- SyncService( + initState.publicKey, + initState.federation, syncNetwork, blockStorage, blockSyncPipe.right, diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala index 7cc8ab9d..1922668d 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -1,16 +1,29 @@ package io.iohk.metronome.hotstuff.service import cats.implicits._ -import cats.effect.{Sync, Resource, Concurrent, ContextShift} +import cats.effect.{Sync, Resource, Concurrent, ContextShift, Timer} import io.iohk.metronome.core.fibers.FiberMap -import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, ProtocolState} +import io.iohk.metronome.core.messages.{ + RPCMessageCompanion, + RPCPair, + RPCTracker +} +import io.iohk.metronome.hotstuff.consensus.Federation +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + ProtocolState, + Block +} import io.iohk.metronome.hotstuff.service.messages.SyncMessage import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.hotstuff.service.sync.BlockSynchronizer import io.iohk.metronome.hotstuff.service.tracing.SyncTracers import io.iohk.metronome.networking.ConnectionHandler import io.iohk.metronome.storage.KVStoreRunner import scala.util.control.NonFatal +import scala.concurrent.duration._ +import scala.reflect.ClassTag /** The `SyncService` handles the `SyncMessage`s coming from the network, * i.e. serving block and status requests, as well as receive responses @@ -27,20 +40,49 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( blockStorage: BlockStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Right, getState: F[ProtocolState[A]], - fiberMap: FiberMap[F, A#PKey] + incomingFiberMap: FiberMap[F, A#PKey], + syncFiberMap: FiberMap[F, A#PKey], + rpcTracker: RPCTracker[F, SyncMessage[A]] )(implicit tracers: SyncTracers[F, A], storeRunner: KVStoreRunner[F, N]) { + import SyncMessage._ - /** Request a block from a peer. - * - * Returns `None` if we're not connected or the request times out. - */ - def getBlock(from: A#PKey, blockHash: A#Hash): F[Option[A#Block]] = ??? + /** Request a block from a peer. */ + private def getBlock(from: A#PKey, blockHash: A#Hash): F[Option[A#Block]] = { + for { + requestId <- RequestId[F] + request = GetBlockRequest(requestId, blockHash) + maybeResponse <- sendRequest(from, request) + } yield maybeResponse.map(_.block) + } - /** Request the status of a peer. + /** Request the status of a peer. */ + private def getStatus(from: A#PKey): F[Option[Status[A]]] = { + for { + requestId <- RequestId[F] + request = GetStatusRequest[A](requestId) + maybeResponse <- sendRequest(from, request) + } yield maybeResponse.map(_.status) + } + + /** Send a request to the peer and track the response. * * Returns `None` if we're not connected or the request times out. */ - def getStatus(from: A#PKey): F[Option[Status[A]]] = ??? + private def sendRequest[ + Req <: RPCMessageCompanion#Request, + Res <: RPCMessageCompanion#Response + ](from: A#PKey, request: Req)(implicit + ev1: Req <:< SyncMessage[A] with SyncMessage.Request, + ev2: RPCPair.Aux[Req, Res], + ct: ClassTag[Res] + ): F[Option[Res]] = { + for { + join <- rpcTracker.register[Req, Res](request) + _ <- network.sendMessage(from, request) + res <- join + _ <- tracers.requestTimeout(from -> request).whenA(res.isEmpty) + } yield res + } /** Process incoming network messages. */ private def processNetworkMessages: F[Unit] = { @@ -48,7 +90,7 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( network.incomingMessages .mapEval[Unit] { case ConnectionHandler.MessageReceived(from, message) => // Handle on a fiber dedicated to the source. - fiberMap + incomingFiberMap .submit(from) { processNetworkMessage(from, message) } @@ -69,8 +111,6 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( from: A#PKey, message: SyncMessage[A] ): F[Unit] = { - import SyncMessage._ - val process = message match { case GetStatusRequest(requestId) => getState.flatMap { state => @@ -98,13 +138,13 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( ) } - case GetStatusResponse(requestId, status) => - // TODO (PM-3063): Hand over to view synchronisation. - ??? - - case GetBlockResponse(requestId, block) => - // TODO (PM-3134): Hand over to block synchronisation. - ??? + case response: SyncMessage.Response => + rpcTracker.complete(response).flatMap { + case Right(ok) => + tracers.responseIgnored((from, response, None)).whenA(!ok) + case Left(ex) => + tracers.responseIgnored((from, response, Some(ex))) + } } process.handleErrorWith { case NonFatal(ex) => @@ -112,29 +152,49 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( } } - /** Read Requests from the BlockSyncPipe and send Responses. */ - def processBlockSyncPipe: F[Unit] = { + /** Read Requests from the BlockSyncPipe and send Responses. + * + * These are coming from the `ConsensusService` asking for a + * `Prepare` message to be synchronised with the sender. + */ + private def processBlockSyncPipe( + blockSynchronizer: BlockSynchronizer[F, N, A] + ): F[Unit] = { blockSyncPipe.receive - .mapEval[Unit] { case request @ BlockSyncPipe.Request(sender, prepare) => - // TODO (PM-3134): Block sync. - // TODO (PM-3132, PM-3133): Block validation. - - // We must take care not to insert blocks into storage and risk losing - // the pointer to them in a restart. Maybe keep the unfinished tree - // in memory until we find a parent we do have in storage, then - // insert them in the opposite order, validating against the application side - // as we go along, finally responding to the requestor. - // - // It is enough to respond to the last block positively, it will indicate - // that the whole range can be executed later (at that point from storage). - val isValid: F[Boolean] = ??? - - isValid.flatMap { isValid => - blockSyncPipe.send(BlockSyncPipe.Response(request, isValid)) - } + .mapEval[Unit] { + // TODO (PM-3063): Change `BlockSyncPipe` to just `SyncPipe` and add + // ViewState sync requests which poll the fedreation for the latest + // Commit Q.C. and jump to it. When that signal comes, cancel the + // `syncFiberMap`, discard the `blockSynchronizer` and move over to + // state syncing, then create a new new block synchronizer and resume. + // For this, change the input of this method to a `F[BlockSynchronizer[F,N,A]]` + // and call some mutually recursive method representing different states: + + case request @ BlockSyncPipe.Request(sender, prepare) => + // It is enough to respond to the last block positively, it will indicate + // that the whole range can be executed later (at that point from storage). + // If the same leader is sending us newer proposals, we can ignore the + // previous pepared blocks - they are either part of the new Q.C., + // in which case they don't need to be validated, or they have not + // gathered enough votes, and been superseded by a new proposal. + syncFiberMap.cancelQueue(sender) >> + syncFiberMap + .submit(sender) { + for { + _ <- blockSynchronizer.sync(sender, prepare.highQC) + isValid <- validateBlock(prepare.block) + _ <- blockSyncPipe.send( + BlockSyncPipe.Response(request, isValid) + ) + } yield () + } + .void } .completedL } + + // TODO (PM-3132, PM-3133): Block validation. + private def validateBlock(block: A#Block): F[Boolean] = ??? } object SyncService { @@ -143,26 +203,43 @@ object SyncService { * in the background, shutting processing down when the resource is * released. */ - def apply[F[_]: Concurrent: ContextShift, N, A <: Agreement]( + def apply[F[_]: Concurrent: ContextShift: Timer, N, A <: Agreement: Block]( + publicKey: A#PKey, + federation: Federation[A#PKey], network: Network[F, A, SyncMessage[A]], blockStorage: BlockStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Right, - getState: F[ProtocolState[A]] + getState: F[ProtocolState[A]], + timeout: FiniteDuration = 10.seconds )(implicit tracers: SyncTracers[F, A], storeRunner: KVStoreRunner[F, N] ): Resource[F, SyncService[F, N, A]] = // TODO (PM-3186): Add capacity as part of rate limiting. for { - fiberMap <- FiberMap[F, A#PKey]() + incomingFiberMap <- FiberMap[F, A#PKey]() + syncFiberMap <- FiberMap[F, A#PKey]() + rpcTracker <- Resource.liftF { + RPCTracker[F, SyncMessage[A]](timeout) + } service = new SyncService( network, blockStorage, blockSyncPipe, getState, - fiberMap + incomingFiberMap, + syncFiberMap, + rpcTracker ) + blockSync <- Resource.liftF { + BlockSynchronizer[F, N, A]( + publicKey, + federation, + blockStorage, + service.getBlock + ) + } _ <- Concurrent[F].background(service.processNetworkMessages) - _ <- Concurrent[F].background(service.processBlockSyncPipe) + _ <- Concurrent[F].background(service.processBlockSyncPipe(blockSync)) } yield service } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala index d2fd6312..bfea9c39 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/SyncMessage.scala @@ -10,9 +10,9 @@ import io.iohk.metronome.hotstuff.service.Status sealed trait SyncMessage[+A <: Agreement] { self: RPCMessage => } object SyncMessage extends RPCMessageCompanion { - case class GetStatusRequest( + case class GetStatusRequest[A <: Agreement]( requestId: RequestId - ) extends SyncMessage[Nothing] + ) extends SyncMessage[A] with Request case class GetStatusResponse[A <: Agreement]( @@ -32,4 +32,10 @@ object SyncMessage extends RPCMessageCompanion { block: A#Block ) extends SyncMessage[A] with Response + + implicit def getBlockPair[A <: Agreement] = + pair[GetBlockRequest[A], GetBlockResponse[A]] + + implicit def getStatusPair[A <: Agreement] = + pair[GetStatusRequest[A], GetStatusResponse[A]] } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala index 087b0e4d..70b486e7 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala @@ -24,8 +24,8 @@ class BlockStorage[N, A <: Agreement: Block]( * then add this block to its children. */ def put(block: A#Block): KVStore[N, Unit] = { - val blockHash = implicitly[Block[A]].blockHash(block) - val parentHash = implicitly[Block[A]].parentBlockHash(block) + val blockHash = Block[A].blockHash(block) + val parentHash = Block[A].parentBlockHash(block) blockColl.put(blockHash, block) >> childToParentColl.put(blockHash, parentHash) >> @@ -88,7 +88,7 @@ class BlockStorage[N, A <: Agreement: Block]( /** Delete a block and remove it from any parent-to-child mapping, * without any checking for the tree structure invariants. */ - private def deleteUnsafe(blockHash: A#Hash): KVStore[N, Unit] = { + def deleteUnsafe(blockHash: A#Hash): KVStore[N, Unit] = { def deleteIfEmpty(maybeChildren: Option[Set[A#Hash]]) = maybeChildren.filter(_.nonEmpty) diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala new file mode 100644 index 00000000..43467dad --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala @@ -0,0 +1,252 @@ +package io.iohk.metronome.hotstuff.service.sync + +import cats.implicits._ +import cats.effect.{Sync, Timer, Concurrent, ContextShift} +import cats.effect.concurrent.Semaphore +import io.iohk.metronome.hotstuff.consensus.Federation +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + QuorumCertificate, + Block +} +import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.storage.{InMemoryKVStore, KVStoreRunner} +import scala.concurrent.duration._ +import scala.util.Random + +/** The job of the `BlockSynchronizer` is to procure missing blocks when a `Prepare` + * message builds on a High Q.C. that we don't have. + * + * It will walk backwards, asking for the ancestors until we find one that we already + * have in persistent storage, then append blocks to the storage in the opposite order. + * + * Since the final block has a Quorum Certificate, there's no need to validate the + * ancestors, assuming an honest majority in the federation. The only validation we + * need to do is hash checks to make sure we're getting the correct blocks. + * + * The synchronizer keeps the tentative blocks in memory until they can be connected + * to the persistent storage. We assume that we never have to download the block history + * back until genesis, but rather that the application will always have support for + * syncing to any given block and its associated state, to catch up after spending + * a long time offline. Once that happens the block history should be pruneable. + */ +class BlockSynchronizer[F[_]: Sync: Timer, N, A <: Agreement: Block]( + publicKey: A#PKey, + federation: Federation[A#PKey], + blockStorage: BlockStorage[N, A], + getBlock: BlockSynchronizer.GetBlock[F, A], + inMemoryStore: KVStoreRunner[F, N], + semaphore: Semaphore[F], + retryTimeout: FiniteDuration = 5.seconds +)(implicit storeRunner: KVStoreRunner[F, N]) { + + private val otherPublicKeys = + federation.publicKeys.filterNot(_ == publicKey) + + // We must take care not to insert blocks into storage and risk losing + // the pointer to them in a restart, hence keeping the unfinished tree + // in memory until we find a parent we do have in storage, then + // insert them in the opposite order. + + /** Download all blocks up to the one included in the Quorum Certificate. + * + * Only expected to be called once per sender at the same time, otherwise + * it may request the same ancestor block multiple times concurrently. + * + * This could be managed with internal queueing, but not having that should + * make it easier to cancel all calling fibers and discard the synchronizer + * instance and its in-memory store, do state syncing, then replace it with + * a fresh one. + */ + def sync( + sender: A#PKey, + quorumCertificate: QuorumCertificate[A] + ): F[Unit] = + for { + path <- download(sender, quorumCertificate.blockHash, Nil) + _ <- persist(quorumCertificate.blockHash, path) + } yield () + + /** Download a block and all of its ancestors into the in-memory block store. + * + * Returns the path from the greatest ancestor that had to be downloaded + * to the originally requested block, so that we can persist them in that order. + * + * The path is maintained separately from the in-memory store in case another + * ongoing download would re-insert something on a path already partially removed + * resulting in a forest that cannot be traversed fully. + */ + private def download( + from: A#PKey, + blockHash: A#Hash, + path: List[A#Hash] + ): F[List[A#Hash]] = { + storeRunner + .runReadOnly { + blockStorage.contains(blockHash) + } + .flatMap { + case true => + path.pure[F] + + case false => + inMemoryStore + .runReadOnly { + blockStorage.get(blockHash) + } + .flatMap { + case Some(block) => + downloadParent(from, block, path) + + case None => + getAndValidateBlock(from, blockHash) + .flatMap { + case Some(block) => + inMemoryStore.runReadWrite { + blockStorage.put(block) + } >> downloadParent(from, block, path) + + case None => + Timer[F].sleep(retryTimeout) >> + download(from, blockHash, path) + } + } + } + } + + private def downloadParent( + from: A#PKey, + block: A#Block, + path: List[A#Hash] + ): F[List[A#Hash]] = { + val blockHash = Block[A].blockHash(block) + val parentBlockHash = Block[A].parentBlockHash(block) + download(from, parentBlockHash, blockHash :: path) + } + + /** Try downloading the block from the source and perform basic content validation. + * + * If the download fails, try random alternative sources in the federation. + */ + private def getAndValidateBlock( + from: A#PKey, + blockHash: A#Hash + ): F[Option[A#Block]] = { + def fetch(from: A#PKey) = + getBlock(from, blockHash) + .map { maybeBlock => + maybeBlock.filter { block => + Block[A].blockHash(block) == blockHash && + Block[A].isValid(block) + } + } + + def loop(sources: List[A#PKey]): F[Option[A#Block]] = + sources match { + case Nil => none.pure[F] + case from :: alternatives => + fetch(from).flatMap { + case None => loop(alternatives) + case block => block.pure[F] + } + } + + loop(List(from)).flatMap { + case None => + loop(Random.shuffle(otherPublicKeys.filterNot(_ == from).toList)) + case block => + block.pure[F] + } + } + + /** See how far we can go in memory from the original block hash we asked for, + * which indicates the blocks that no concurrent download has persisted yet, + * then persist the rest. + * + * Only doing one persist operation at a time to make sure there's no competition + * in the insertion order of the path elements among concurrent downloads. + */ + private def persist( + targetBlockHash: A#Hash, + path: List[A#Hash] + ): F[Unit] = + semaphore.withPermit { + inMemoryStore + .runReadOnly { + blockStorage.getPathFromRoot(targetBlockHash) + } + .flatMap { unpersisted => + persistAndClear(path, unpersisted.toSet) + } + } + + /** Move the blocks on the path from memory to persistent storage. + * + * `path` and `unpersisted` can be different when a concurrent download + * re-inserts some ancestor block into the in-memory store that another + * download has already removed during persistence. The `unpersisted` + * set only contains block that need to be inserted into persistent + * storage, but all `path` elements have to be visited to make sure + * nothing is left in the in-memory store, leaking memory. + */ + private def persistAndClear( + path: List[A#Hash], + unpersisted: Set[A#Hash] + ): F[Unit] = + path match { + case Nil => + ().pure[F] + + case blockHash :: rest => + inMemoryStore + .runReadWrite { + for { + maybeBlock <- blockStorage.get(blockHash).lift + // There could be other, overlapping paths being downloaded, + // but as long as they are on the call stack, it's okay to + // create a forest here. + _ <- blockStorage.deleteUnsafe(blockHash) + } yield maybeBlock + } + .flatMap { + case Some(block) if unpersisted(blockHash) => + storeRunner + .runReadWrite { + blockStorage.put(block) + } + case _ => + // Another download has already persisted it. + ().pure[F] + + } >> + persistAndClear(rest, unpersisted) + } +} + +object BlockSynchronizer { + + /** Send a network request to get a block. */ + type GetBlock[F[_], A <: Agreement] = (A#PKey, A#Hash) => F[Option[A#Block]] + + /** Create a block synchronizer resource. Stop any background downloads when released. */ + def apply[F[_]: Concurrent: ContextShift: Timer, N, A <: Agreement: Block]( + publicKey: A#PKey, + federation: Federation[A#PKey], + blockStorage: BlockStorage[N, A], + getBlock: GetBlock[F, A] + )(implicit + storeRunner: KVStoreRunner[F, N] + ): F[BlockSynchronizer[F, N, A]] = + for { + semaphore <- Semaphore[F](1) + inMemoryStore <- InMemoryKVStore[F, N] + synchronizer = new BlockSynchronizer[F, N, A]( + publicKey, + federation, + blockStorage, + getBlock, + inMemoryStore, + semaphore + ) + } yield synchronizer +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala index b67195a3..8df961fa 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala @@ -1,13 +1,31 @@ package io.iohk.metronome.hotstuff.service.tracing import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.hotstuff.service.messages.SyncMessage sealed trait SyncEvent[+A <: Agreement] object SyncEvent { /** A federation member is sending us so many requests that its work queue is full. */ - case class QueueFull[A <: Agreement](publicKey: A#PKey) extends SyncEvent[A] + case class QueueFull[A <: Agreement]( + sender: A#PKey + ) extends SyncEvent[A] + + /** A request we sent couldn't be matched with a response in time. */ + case class RequestTimeout[A <: Agreement]( + recipient: A#PKey, + request: SyncMessage[A] with SyncMessage.Request + ) extends SyncEvent[A] + + /** A response was ignored either because the request ID didn't match, or it already timed out, + * or the response type didn't match the expected one based on the request. + */ + case class ResponseIgnored[A <: Agreement]( + sender: A#PKey, + response: SyncMessage[A] with SyncMessage.Response, + maybeError: Option[Throwable] + ) extends SyncEvent[A] /** An unexpected error in one of the background tasks. */ case class Error(error: Throwable) extends SyncEvent[Nothing] diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala index a4ce6590..681d6be7 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala @@ -3,20 +3,33 @@ package io.iohk.metronome.hotstuff.service.tracing import cats.implicits._ import io.iohk.metronome.tracer.Tracer import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.hotstuff.service.messages.SyncMessage case class SyncTracers[F[_], A <: Agreement]( queueFull: Tracer[F, A#PKey], + requestTimeout: Tracer[F, SyncTracers.Request[A]], + responseIgnored: Tracer[F, SyncTracers.Response[A]], error: Tracer[F, Throwable] ) object SyncTracers { import SyncEvent._ + type Request[A <: Agreement] = + (A#PKey, SyncMessage[A] with SyncMessage.Request) + + type Response[A <: Agreement] = + (A#PKey, SyncMessage[A] with SyncMessage.Response, Option[Throwable]) + def apply[F[_], A <: Agreement]( tracer: Tracer[F, SyncEvent[A]] ): SyncTracers[F, A] = SyncTracers[F, A]( queueFull = tracer.contramap[A#PKey](QueueFull(_)), + requestTimeout = tracer + .contramap[Request[A]]((RequestTimeout.apply[A] _).tupled), + responseIgnored = tracer + .contramap[Response[A]]((ResponseIgnored.apply[A] _).tupled), error = tracer.contramap[Throwable](Error(_)) ) } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala index 33473dbd..ea9ca284 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala @@ -3,7 +3,6 @@ package io.iohk.metronome.hotstuff.service.storage import cats.implicits._ import io.iohk.metronome.storage.{KVCollection, KVStoreState} import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, Block => BlockOps} -import java.util.UUID import org.scalacheck._ import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.Prop.{all, forAll, propBoolean} @@ -17,21 +16,22 @@ object BlockStorageProps extends Properties("BlockStorage") { def isGenesis = parentId.isEmpty } - object TestAggreement extends Agreement { + object TestAgreement extends Agreement { type Block = TestBlock type Hash = String type PSig = Nothing - type GSig = Nothing - type PKey = Nothing + type GSig = Unit + type PKey = Int type SKey = Nothing - implicit val block = new BlockOps[TestAggreement] { + implicit val block = new BlockOps[TestAgreement] { override def blockHash(b: TestBlock) = b.id override def parentBlockHash(b: TestBlock) = b.parentId + override def isValid(b: Block) = true } } - type TestAggreement = TestAggreement.type - type Hash = TestAggreement.Hash + type TestAgreement = TestAgreement.type + type Hash = TestAgreement.Hash implicit def `Codec[Set[T]]`[T: Codec] = implicitly[Codec[List[T]]].xmap[Set[T]](_.toSet, _.toList) @@ -44,7 +44,7 @@ object BlockStorageProps extends Properties("BlockStorage") { } object TestBlockStorage - extends BlockStorage[Namespace, TestAggreement]( + extends BlockStorage[Namespace, TestAgreement]( new KVCollection[Namespace, Hash, TestBlock](Namespace.Blocks), new KVCollection[Namespace, Hash, Hash](Namespace.BlockToParent), new KVCollection[Namespace, Hash, Set[Hash]](Namespace.BlockToChildren) @@ -95,7 +95,7 @@ object BlockStorageProps extends Properties("BlockStorage") { } def genBlockId: Gen[Hash] = - Gen.delay(UUID.randomUUID().toString) + Gen.uuid.map(_.toString) /** Generate a block with a given parent, using the next available ID. */ def genBlock(parentId: Hash): Gen[TestBlock] = @@ -127,11 +127,14 @@ object BlockStorageProps extends Properties("BlockStorage") { def genBlockTree: Gen[List[TestBlock]] = genBlockTree(parentId = "") - def genNonEmptyBlockTree: Gen[List[TestBlock]] = for { - genesis <- genBlock(parentId = "") + def genNonEmptyBlockTree(parentId: Hash): Gen[List[TestBlock]] = for { + genesis <- genBlock(parentId = parentId) tree <- genBlockTree(genesis.id) } yield genesis +: tree + def genNonEmptyBlockTree: Gen[List[TestBlock]] = + genNonEmptyBlockTree(parentId = "") + case class TestData( tree: List[TestBlock], store: TestKVStore.Store diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala index 226cbdb8..6b490811 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala @@ -22,7 +22,7 @@ object ViewStateStorageProps extends Properties("ViewStateStorage") { } object ViewStateStorageCommands extends Commands { - object TestAggreement extends Agreement { + object TestAgreement extends Agreement { type Block = Nothing type Hash = String type PSig = Unit @@ -30,13 +30,13 @@ object ViewStateStorageCommands extends Commands { type PKey = Nothing type SKey = Nothing } - type TestAggreement = TestAggreement.type + type TestAgreement = TestAgreement.type type Namespace = String object TestKVStoreState extends KVStoreState[Namespace] - type TestViewStateStorage = ViewStateStorage[Namespace, TestAggreement] + type TestViewStateStorage = ViewStateStorage[Namespace, TestAgreement] class StorageWrapper( viewStateStorage: TestViewStateStorage, @@ -53,16 +53,17 @@ object ViewStateStorageCommands extends Commands { def read[A]( f: TestViewStateStorage => KVStoreRead[Namespace, A] ): A = { + val b = scodec.bits.ByteVector.empty TestKVStoreState.compile(f(viewStateStorage)).run(store) } } - type State = ViewStateStorage.Bundle[TestAggreement] + type State = ViewStateStorage.Bundle[TestAgreement] type Sut = StorageWrapper val genesisState = ViewStateStorage.Bundle - .fromGenesisQC[TestAggreement] { - QuorumCertificate[TestAggreement]( + .fromGenesisQC[TestAgreement] { + QuorumCertificate[TestAgreement]( Phase.Prepare, ViewNumber(1), "", @@ -89,7 +90,7 @@ object ViewStateStorageCommands extends Commands { override def newSut(state: State): Sut = { val init = TestKVStoreState.compile( - ViewStateStorage[Namespace, TestAggreement]("test-namespace", state) + ViewStateStorage[Namespace, TestAgreement]("test-namespace", state) ) val (store, storage) = init.run(Map.empty).value new StorageWrapper(storage, store) @@ -116,9 +117,9 @@ object ViewStateStorageCommands extends Commands { def genSetQuorumCertificate(state: State) = for { p <- Gen.oneOf(Phase.Prepare, Phase.PreCommit, Phase.Commit) - h <- arbitrary[TestAggreement.Hash] - s <- arbitrary[TestAggreement.GSig] - qc = QuorumCertificate[TestAggreement]( + h <- arbitrary[TestAgreement.Hash] + s <- arbitrary[TestAgreement.GSig] + qc = QuorumCertificate[TestAgreement]( p, state.viewNumber, h, @@ -147,7 +148,7 @@ object ViewStateStorageCommands extends Commands { override def postCondition(state: State, success: Boolean): Prop = success } - case class SetQuorumCertificateCommand(qc: QuorumCertificate[TestAggreement]) + case class SetQuorumCertificateCommand(qc: QuorumCertificate[TestAgreement]) extends UnitCommand { override def run(sut: Sut): Result = sut.write(_.setQuorumCertificate(qc)) @@ -165,7 +166,7 @@ object ViewStateStorageCommands extends Commands { override def postCondition(state: State, success: Boolean): Prop = success } - case class SetLastExecutedBlockHashCommand(blockHash: TestAggreement.Hash) + case class SetLastExecutedBlockHashCommand(blockHash: TestAgreement.Hash) extends UnitCommand { override def run(sut: Sut): Result = sut.write(_.setLastExecutedBlockHash(blockHash)) @@ -182,7 +183,7 @@ object ViewStateStorageCommands extends Commands { } case object GetBundleCommand extends Command { - type Result = ViewStateStorage.Bundle[TestAggreement] + type Result = ViewStateStorage.Bundle[TestAgreement] override def run(sut: Sut): Result = sut.read(_.getBundle) override def nextState(state: State): State = state diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala new file mode 100644 index 00000000..bb419629 --- /dev/null +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala @@ -0,0 +1,211 @@ +package io.iohk.metronome.hotstuff.service.sync + +import cats.effect.concurrent.{Ref, Semaphore} +import io.iohk.metronome.crypto.GroupSignature +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{QuorumCertificate, Phase} +import io.iohk.metronome.hotstuff.service.storage.BlockStorageProps +import io.iohk.metronome.storage.InMemoryKVStore +import org.scalacheck.{Properties, Arbitrary, Gen}, Arbitrary.arbitrary +import org.scalacheck.Prop.{all, forAll, propBoolean} +import monix.eval.Task +import monix.execution.schedulers.TestScheduler +import scala.util.Random +import scala.concurrent.duration._ +import io.iohk.metronome.hotstuff.consensus.Federation +import io.iohk.metronome.hotstuff.consensus.LeaderSelection + +object BlockSynchronizerProps extends Properties("BlockSynchronizer") { + import BlockStorageProps.{ + TestAgreement, + TestBlock, + TestBlockStorage, + TestKVStore, + Namespace, + genNonEmptyBlockTree + } + + // Insert the prefix three into "persistent" storage, + // then start multiple concurrent download processes + // from random federation members pointing at various + // nodes in the subtree. + // + // In the end all synced subtree elements should be + // persisted and the ephemeral storage left empty. + // At no point during the process should the persistent + // storage contain a forest. + case class TestFixture( + ancestorTree: List[TestBlock], + descendantTree: List[TestBlock], + requests: List[(TestAgreement.PKey, QuorumCertificate[TestAgreement])], + federation: Federation[TestAgreement.PKey], + random: Random + ) { + val persistentRef = Ref.unsafe[Task, TestKVStore.Store] { + TestKVStore.build(ancestorTree) + } + val ephemeralRef = Ref.unsafe[Task, TestKVStore.Store](Map.empty) + + val persistentStore = InMemoryKVStore[Task, Namespace](persistentRef) + val inMemoryStore = InMemoryKVStore[Task, Namespace](ephemeralRef) + + val blockMap = (ancestorTree ++ descendantTree).map { block => + block.id -> block + }.toMap + + val downloadedRef = Ref.unsafe[Task, Set[TestAgreement.Hash]](Set.empty) + + def getBlock( + from: TestAgreement.PKey, + blockHash: TestAgreement.Hash + ): Task[Option[TestAgreement.Block]] = { + val timeout = 5000 + val delay = random.nextDouble() * 3000 + val isLost = random.nextDouble() < 0.2 + val isCorrupt = random.nextDouble() < 0.2 + + if (isLost) { + Task.pure(None).delayResult(timeout.millis) + } else { + val block = blockMap(blockHash) + val result = if (isCorrupt) corrupt(block) else block + Task { + downloadedRef.update(_ + blockHash) + }.as(Some(result)).delayResult(delay.millis) + } + } + + implicit val storeRunner = persistentStore + + val synchronizer = new BlockSynchronizer[Task, Namespace, TestAgreement]( + publicKey = federation.publicKeys.head, + federation = federation, + blockStorage = TestBlockStorage, + getBlock = getBlock, + inMemoryStore = inMemoryStore, + semaphore = makeSemapshore() + ) + + private def makeSemapshore() = { + import monix.execution.Scheduler.Implicits.global + Semaphore[Task](1).runSyncUnsafe() + } + + def corrupt(block: TestBlock) = block.copy(id = "corrupt") + def isCorrupt(block: TestBlock) = block.id == "corrupt" + } + object TestFixture { + + implicit val arb: Arbitrary[TestFixture] = Arbitrary { + for { + ancestorTree <- genNonEmptyBlockTree + leaf = ancestorTree.last + descendantTree <- genNonEmptyBlockTree(parentId = leaf.id) + + federationSize <- Gen.choose(1, 10) + federationKeys = Range(0, federationSize).toVector + federation = Federation(federationKeys)(LeaderSelection.RoundRobin) + .getOrElse(sys.error("Can't create federation.")) + + existingPrepares <- Gen.someOf(ancestorTree) + newPrepares <- Gen.atLeastOne(descendantTree) + + prepares = (existingPrepares ++ newPrepares).toList + proposerKeys <- Gen.listOfN(prepares.size, Gen.oneOf(federationKeys)) + + requests = (prepares zip proposerKeys).zipWithIndex.map { + case ((parent, publicKey), idx) => + publicKey -> QuorumCertificate[TestAgreement]( + phase = Phase.Prepare, + viewNumber = ViewNumber(100L + idx), + blockHash = parent.id, + signature = GroupSignature(()) + ) + } + + random <- arbitrary[Int].map(seed => new Random(seed)) + + } yield TestFixture( + ancestorTree, + descendantTree, + requests, + federation, + random + ) + } + } + + property("persists") = forAll { (fixture: TestFixture) => + implicit val scheduler = TestScheduler() + + val test = for { + fibers <- Task.traverse(fixture.requests) { case (publicKey, qc) => + fixture.synchronizer.sync(publicKey, qc).start + } + _ <- Task.traverse(fibers)(_.join) + downloaded <- fixture.downloadedRef.get + persistent <- fixture.persistentRef.get + ephemeral <- fixture.ephemeralRef.get + } yield { + all( + "ephermeral empty" |: ephemeral.isEmpty, + "persistent contains all" |: fixture.requests.forall { case (_, qc) => + persistent(Namespace.Blocks).contains(qc.blockHash) + }, + "all uncorrupted" |: persistent(Namespace.Blocks).forall { + case (blockHash, block: TestBlock) => + blockHash == block.id && !fixture.isCorrupt(block) + }, + "not download already persisted" |: fixture.ancestorTree.forall { + block => !downloaded(block.id) + } + ) + } + + // Schedule the execution, using a Future so we can check the value. + val testFuture = test.runToFuture + + // Simulate a long time, which should be enough for all downloads to finish. + scheduler.tick(1.day) + + testFuture.value.get.get + } + + property("no forest") = forAll( + for { + fixture <- arbitrary[TestFixture] + duration <- Gen.choose(1, fixture.requests.size).map(_ * 500.millis) + } yield (fixture, duration) + ) { case (fixture: TestFixture, duration: FiniteDuration) => + implicit val scheduler = TestScheduler() + + // Schedule the downloads in the background. + Task + .traverse(fixture.requests) { case (publicKey, qc) => + fixture.synchronizer.sync(publicKey, qc).startAndForget + } + .runAsyncAndForget + + // Simulate a some random time, which may or may not be enough to finish the downloads. + scheduler.tick(duration) + + // Check now that the persistent store has just one tree. + val test = for { + persistent <- fixture.persistentRef.get + } yield { + persistent(Namespace.Blocks).forall { case (_, block: TestBlock) => + // Either the block is the Genesis block with an empty parent ID, + // or it has a parent which has been inserted into the store. + block.parentId.isEmpty || + persistent(Namespace.Blocks).contains(block.parentId) + } + } + + val testFuture = test.runToFuture + + // Just simulate the immediate tasks. + scheduler.tick() + + testFuture.value.get.get + } +} diff --git a/metronome/storage/src/io/iohk/metronome/storage/InMemoryKVStore.scala b/metronome/storage/src/io/iohk/metronome/storage/InMemoryKVStore.scala new file mode 100644 index 00000000..ee17b901 --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/InMemoryKVStore.scala @@ -0,0 +1,24 @@ +package io.iohk.metronome.storage + +import cats.implicits._ +import cats.effect.Sync +import cats.effect.concurrent.Ref + +/** Simple in-memory key-value store based on `KVStoreState` and `KVStoreRunner`. */ +object InMemoryKVStore { + def apply[F[_]: Sync, N]: F[KVStoreRunner[F, N]] = + Ref.of[F, KVStoreState[N]#Store](Map.empty).map(apply(_)) + + def apply[F[_]: Sync, N]( + storeRef: Ref[F, KVStoreState[N]#Store] + ): KVStoreRunner[F, N] = + new KVStoreState[N] with KVStoreRunner[F, N] { + def runReadOnly[A](query: KVStoreRead[N, A]): F[A] = + storeRef.get.map(compile(query).run) + + def runReadWrite[A](query: KVStore[N, A]): F[A] = + storeRef.modify { store => + compile(query).run(store).value + } + } +} From 1d1c354718ef621ba1128e12e076deb529d484b1 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Thu, 20 May 2021 11:44:03 +0100 Subject: [PATCH 33/48] PM-2937: Local messaging protocol (#24) * PM-2937: Added InterpreterMessage. * PM-2937: Just return isValid, not an updated ledger. * PM-2937: Use CreateBlockRequest instead of GetCheckpointCandidateRequest. * PM-2937: Comment about block history. * PM-2937: Rename to CreateBlockBodyRequest. * PM-2937: Update master based message passing diagram. --- docs/master-based.png | Bin 52422 -> 75677 bytes .../messages/InterpreterMessage.scala | 175 ++++++++++++++++++ 2 files changed, 175 insertions(+) create mode 100644 metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/messages/InterpreterMessage.scala diff --git a/docs/master-based.png b/docs/master-based.png index edd1ecc8765eb2dd0550165e013e358c174a749b..d75c758355d8cc402ffb6f57457ff8387a6b5929 100644 GIT binary patch literal 75677 zcmeFZcT`hd*Dnf)0wNtn=^YWpgc=kOLJ2jYh9U?ALI?zsP*Nx=3euz~SdeDNhEfzk zih!Yr2nvFsDosS1fQBY;R-X5H-h00Dedmlb?j7f^d&Zz_R`yzZt-0r#LA0PpAi77iATCgdingf^ z6#UTD)zs3{_x#uWemHE{-vjDuLcjnA-J#$nIJmOIhegrAZyHR`M^8r`T$z$c1gskt zV~Yn*v((kr*VNWycd@l{vA0vvG6UBHd>YI{O9sh2#IbVc3n>NI%@aW=?}dYP%bRx)Q7+Jp!S6He@8}?-f$8;SqRZ*uQRr z>S*d|f(ie%_DDJj`>$pkk%%LJr)sHaTL9_(Ehg{){NI8hN9!XUNN@|s2qct_c5=X= zEs6gcB+L&?_b&lE0NEV*YiMl?eNSkBzX4Il(i+Ik(%ds3C<5h5vBO1@BBG;QJV}Jm zP$*5y$}-9n3G>585^SBpZ4@$^>SW+S)1q5LX?A8dBzrpxdkjL?z}+0HqZeXpY7cS8 zXk%$8T_8ocr@Iac9!<1n%Lv%>5>skjw0>Q`Nm02Q*v;ZpI8U=1a(Qy$` z7N8w`S!+ep2v%<3D#%U;WgFt}Y#L(YhcSynh7m*X;czQkEohXL4H9X9G1WEmi*$F# zMA(_b!=oHruomGSpdZ{t7pspB!D`dN(4fK6-wy7fO9*recd&{gc)DS%bfW{DF*cAu zjGd<+#5NGFh4Zt3*k}hs{V1+9dMMSO5*!gm2_#Ta6w4r31TDzIKoJY zLn2Id!YNQaJ$FhtDJV1?rDcQGvefdYcE7I}{a7BH`GRp@*XMv`wQ7LNIQ+p;%ixod_Di6^S>uGt&vg+aL(;M7lpj z$K5*A11O{=9FMm)$JjvvDY{e&&Lxn&6oNYf7wlx`?rC6UY5=j-a&Zp}rW1l`7!r|2 z(DgJ8#71CYHWn6+Zsr6s5^Z4<3?tIRY|I>KemVqenzf#Pw6#4|*CyQG-ZIES-!{yO z8sQ&>$9t0eP3;jdzrctfv^x&01bh()eq>h?%_)jNqWe2h{MieHnpp<;MLO!+BHeUs zNv`H3cN;PRX=7>Y=NgDc5Fzevb`-3umKoj@<3uFD@Rp8_0qBs>a8n4`!^Je%+Q3wc z0_GAK7!?`fX78tu!UM$$(MN!x4fIU^(m$6_BHq=)(GRa{6Kd%m9%Sy~=}dKp5nMg2 zqqT6M=57W^H<+EjUy!p7G7uU_548(6_q25LB)QNd2oSiLgIQ1@Cde+@!YV=^8ws`y z+<{_ck9WZ$QBFvx4$e752ZHts(+T&(IBNxJxdey162e?ycsD1z0EiO;8f-<1w(|ow z!9v{h!(sj*0c2erbW{k=35#~o$GO^)!)e;!RiO3Ufb#0lkdanIDg*;s%tN6Bdnn!o z4s&)4(Wcn5?{cuWw4piM(#g&`0iFgx3E65j9KrCt6mO74TH*-JR5KAZ_!V>f)LiJpOiFAa15Yp1tQa{W&7)!#?t>{<} zGYh;m4r}dZ=Il;k?|*kYdrAl;6lX<(LBQ;Sv^_(y+Mezhuuf-8n7aiH1@S=mMNlGK z?8C?b{#FseHhP#aM@uVLiVo*a?4hX6))xzA<76wE4c?JbX=$N{Zbf7ec zNKas~Es17y4-dECh+umU5*6VdinfM9^!?BfphA|&AdHK-C(Obi$^+tLt)uO3?;Pf= zXX}K6+0!62yuKv@7U>^i2aAkGQRx_}6*KDCEV63Dp;GnS9Prr zdNeB+8c93Y0Ih8w8s!J2A|fqe)bQ{?7#!GSPbXry6G4{%b+mCaCAg6>o-_k%XB(RU zEjKD2rR!`%q(<7&;gR;yw$22krE8?ECq~;DY87E^8v*3%4hist5S*<%Z6OeyKua?- z@Q!UP!=Mf}Xd6Okl#3N1GysB&)-p8*uEvra4!VR9qx=#2;cmfNI<~>4rceiS2VhAp zZ6l)a=7DZ*)L?yAe6WtbXMm}mMUX)##Kzp-+7IUDPYr-t;NekNwn2cC-OQ-uP<=~3 z>k!Z2sOSKKX(SvHjqtOF!9wXa?qM(}(JnZW3JcLiQ=ooMNIROVCC1&!z|RkE5RSHY z^)vUhfQ5vhb^g*MYdFrFP6ysCD%{;tOJA25Lbk-%>yX1d?Obiq7J5#0PVSLzZnm}< zD}x9JcVwWwBN~EmgLsBVIfrNwJp<{wVL`S;12P1SMmrK*33Pq;AT(H%nWe6?y(7k) z>|&~~<7#6MnnJX!;JV=yqNA=h)YZlVM*yn{1y&0VL1E1FtOH@tAU&wA9ac-%BE%mK z)6*h_>4iJk23R^k-E3TS-R+@~78Gl9f2x~)R3yYL2m_0Zj+GExf{gpJbEf}x}RU=ik?1em#w1=iM{PBJ69k>~-o*w83#YZ}T4W`Lmw z7&s$5Z38@A^sHz$=I%%jQ*;#3(lp#EgoYyoM!7)94sbFU+0TKhtpx?fo8*VL$JtRL z(F7<4hlN1htsQK@=l-E6J2xFm4_gBrPaBLuq-!Vz5=J08;W2g;n}|p}9%_!Z!1(D} zxnhu{kO(~0f@BSLk)11$o_Qo87z1+%4#2S8G6X^K(22m}AW>%So*@utLWsFO0{B~G zxSux7CIs9;ry`xAz?`8DF6eMHHkf4U6yQwsq+*B=Te2CAJtW%724c=04eqK#@(73e zMcCjCFerOEInv(@sue~ubs&2LI~yRWIAkC;+)Pgk?}S1kQ3NF31&xR5+WMm-sR02L ze>&Jn5Ev!E+7_5pXIm{hB+N3*$&Bh}6&j@%NOOQio4X^Tkq&5EYNWfVt`pqFNyiLA zGDVR5fmGb=@%CXrnc!hjuwZy7G%^$t?xg1!tQBn&4y8D01-k~3h!96J9Ky+g4lE}e z6X_BKT#h9wAXv|V9d>|V3jF*hp86}e0)PJ5sWhgf>k;mRv%&}WaMjFJ=57A!K~=uftYOlp3UH5~l-v02)*N-x8M#*a z^HZdPJa@Qa*8RnLG&E(i?UGUnuB(vpyVq^-f>c=0NkUoTCre&lP6Y=&-rBWqd905J zx&9Qj!XW+O$n3e-+gsS4m$AohFB>0&U&6EgW;zfBW~dGyTu; zJxk3tUHqJ2em48!uehD+A33ufcM^Z^icO`^nx5BqTz&F19emo(8IMU{u~1;C<&)0M zy_$JSX;VnbIY0Y}2Yi@daz(gmW48Caz7 z2D-$vKcdUXO|#tj1X!Rjm#d}x^Tngjx6mFUzixuRv6&{&me=4;&lkZSmr(tOW3Wy< ziD!ka)VOY?pSks0zreE?%?G5rhdULzmBN%T>we_S%Ny@2aG~tg^(a8PjY=4~^6 z#NWSt<>bZq`t_JAH5bCaAEiKJShK0fzGpsP>JYmGrGvce#`7)+%KVrQKX9D#WbSzn zvT`8!admjn>2>@U#VaUY@tvIgRupF1Uvpz}$EIxlSz9il%nj@eOHUQ!T2shEV|nM*P)k0pYffPrMGH>9=HP6(Xwrz>IH~zKwZdg?LIMcFDko? zH<*LO*P(gD`s#u+TWc+stcG7jRgBcdNW4GjS|-fF*^l@=V`?Bg9P4l>X)CZIO}opm zN3-silYS-fW_W*EzTKbZ5Q+WqrE1##Va! z&oXLesJh~~YShDfS0DX~%gH1FZ8<2;RW2g3;UXHd>w}=oF<3cxcu2_Srv*yWQ~41Y zCDq?u3Bqv)C-rch_2MkTMu5Q;oYRryY1viS_3Np}#w@GatY5 z<;?5lpAJ0qV!PZiI0fw;*_}XLQT=aqdTV+bKB#;e>-n( z@JphGNqQqNcPmNu(NCsaLxR>c>LD~Gq9UCpB03+tIvNM&5$|% z>fHgl5NRX&se`oD=HxY>u+QGq0wosgs^+WjipN)mA|mQD8$7Lf#dmP_Cnv`pN{Q3q z5VK9WR_|PlWX;zs3{1Su9`2sedUtrd_Q;;9^E^V7^UDt$@60m&GoyAG%Wg@WqBS;q z!GEX=EoBr3Hk{?0S1Em~n#YV=8J-$DxK;v$M42ghx$ljjYq799cYJ$t-RvF6Zf8Mr zeL3a!Td#8$-~5s*-Xj}nI9*K`FQAT)Uk}GD)Uk%CU#g{&76--;M*~R?Z>UI=wdOJo zE!?%vmL5VGuifQe>krHe4X@8k{^%yk)ofy-r-&)C!F18@9L_C=rv7S`JL6t(EQ-+g z#3Q5Q3s6+nc;<-#pEH}2TR%Syo!*%1JTH2r_4Kcgh`#4|XT+?!(B;>^x@~JFokg>E zT_vmx5>Fsz`+eV4*udolcwH=Kv2ux*9M}q$bueiQBOyFzdVW{`P<4d&cV+U=kE)-& zDi_}!eGS88Y9~>5;a7i-?(Mt@%sNhmg);h`bEJCWa%HP*3{wncSSCCqy|-W{?o$I4eDwS$vsEo$hX?qkrCpJg`iGt<|lJ3~E+=>(nC*Xy9Kwx>oUz}mp|GWowWMbo>yTl_6R6CN>iAAVK-H_M1%|{ z{xtC))p=_61$)i=qJRT<6SZt76DF@(Ss0)HMPY$Zh-px5kqwt?W>%Q)JGv>LKjeG# z^`G)Qjv;cZ%sjWW$Oc?ccOSzvo3@*dpwhfbCqZ4thE zEJ7jc2=X!xs7_CAxcsZNn~s8;C9@2nd_BV5_pEt$>Tr}yS<l@J+`ez!iE9{f;CUp^EkH>o6Lq`|#A@hridQ6I#pcd&U9{+G)# z=FQph@qO_Z;Ecqik~en<8Gc1pI*M|^eI2>m`z?W6+DSFNa?$3mAixf|sSl4|H=A*E zH<4Ru16RNhZOGYoz2Lr)H0HbZzlPLJHi7P1mz*6@bWQC0LXh1ES2@!BNu$>rJZpL4 z<^J)a-ke$|K3)EnT-Gk?7W*Z3`8^9LWWQ%f z3AQM9DdpX}MU0nSb7)1rYgeLE{^^Jx4~x;_620%jNS&{yu9|c=^2ia0^=tPvG>XIv za^5)R=0CmTQqN2JB%?2yc|f}bwu!0x-51f?PYyLqky5TMfW6{SQy1-x+U;ySH_&Gz+-kKke z9$$2BzUoc!piV%FI|T><`Jw$$FdxDE_*S9RL{EA0}T8Eb)d>7s}WFY!GsGOg_b z9o3?>!39x$P+Fh8{_uqC@jpM`3}cIizPx#ORylGB7`Mt?ZJgw|Mb7Q$k0+5=6v=tZ zz(9V}`&wc1Y&+036Xu5R$+ ztkPk}Dl3mu(bFo_36=KNNf&9414SkbHF0Ubx4F&AA{%#uJx11m3&xF~J`Xl+_rOcy zLb4L#;ccJIjTI`+b;|7e<8$aJk?D_j5Q9tajxL0adU=6>q+}>gNH{cXA{O}h;fcg8 zi9;5Q!VMKwyX#+1i7K)Z;h&%7$NCVY)h_PX32(UGvB((OSviH9I|CP4pxTF0VAlLtjE{@p#gdGs7q?H(Ih$H8z+zS*VKJwN5z{a zPa@4rbG^o4lcK13-?PMKzB78Zs0a9zi4hWX7e}%@4CQ1ygY_9)~p#Kc~YOpz7T(s}iB|Sw}mQJ#mHnMgrmrquMs4Vx({}ql98kSC zl-Hol>D{HwvO6V5pAbRF|GaxAhGq`)$xGqE-YmZBRe8R%MNOVYJU9hnGV-ww72}~- z7iGvh)Yk3B>UmG?UgRHu4N47v%ijB3K#$n~A_HSw-kv`>5i7&g2ND&+ufm zQ^KDT3(kL5NRlq+EqnIl1XEIM6G&;G-+0CS!-Y2gBL0a zDHb?kq@2~o*WX1XIJErCPAozyQg?HH(<8MIov`EIbU%BIqqzb*u8*QQVdqc32YT(oX>(C*qkUbhUm*_H zoj4~TYsjn%S!B+{U9p_eu**|Cm)?_ea98)!n)x4^!};2{jN|o&*JOls8w2urd6(`G zVYKUaw^zqA!=AK=>_A}l9xr@w&9w&KbKm1-*znNtquI>m!uXd)VC!jipZmW1m#9=j z=R-NE(Qe0$pZSjg()heI8^M8FJ*BUztub$C=IFpKEN9w3OawKCd4w^+u?hb8Px1@_K}a z##SaE;Vk+-I0xP|=>y`|S^l2R6Wg)+^iU80_nd%txjSOG_@)~*T2ntU%lwCZB}MSN z40&O%v-K6_Yx6&1(GTPgeOHfu4I4~>U$`Ak86qR}b1jP>=5gkm${0-r+NtW?g_RY* zu9cUceZDt5-RPqr`b(+vF3JEuUEzu9NnQ>{;gtLlrr=Rdg4*t8zoqQQC%kJYQDVe_ zcIMeSZ6w?8W-#3@#25#oK@wBM+PmH#yDnAtyoS0k-_lJ`@78XiYKEqsVlC9I6y~jd zlhpqxQ9G7UJL(>9SCN{Kuas#dA-E)y&0pvfJ$-L@gA;Y^@vc}QvTtH)nWR)eQ={C1 zfU5Mxu8Ru^vJAI<;nT3WR$29;O5ZnjElL=lTyN;~gwQ!g#01W%fuXD&dsvb6Ni5L} zLQi4qQ3MFO4UXRpg^7Ka%HSrQo0yJs4SHvdqW32o`7eTKdWgpNtR2f!n|7VHY&_~A z4P)v>Giu@&%KS(6UVM?u*aB$_^|5VMl$G(V$BPcqs^9GdKb&blv9&s0aN3^-ArqCe zF;REn$gUSmRl3xicn5 z;#}9#;r8^T#Q4isxZ3vq{&_oo)nQ+k)4XaPVXXH0bGL_)BEGj<6EdqjRjJjA}Vck}ya#efK3roVCUg0X4P&kq$C;gT>5hC+9V zn@?P?$c|(KvCMXY+I(4NyWf&&-tAJoT@JM|G{Xb#3h#0?cyrF%-=0$D)!Qt7P4kl9 zd*aW}^LZ0r4`7xqr7^W1IG@Ot-Pi43PXk%#vhc>r^-}Xh%H!x zq9O|len2#C?iCe@LQi$|xz$uHaa2ztm`U@C`Omu2HLG(= zAd8=O_YUEAmIt}iWmon#N0xgfl*3gW3~R+|bygW-@ICH^%_&AldBihK0Z)F`6 z{k+gD!o)I|rroU8HI@8JyZ3dby&F6Cm&Yt7HKjYj$Hg#*nnX_f!>@xt5# zwXB8Y@5{FdBem5^!2Kl)yX@)_+FXU2!biTy<-ao-29UA5IjuP*XP@!tmg_e{ zh9(SJ2C5^bG`pLFoMkHGjVv}E(;z+j?xd({z=co$I$VBlweX0lKrX}hT83L96IY?3 zy6^j8qzd}JHMt<2sIAvBhfhobZ8}zYRfd9u9v1 zxsR}W-YIvaG;@!ds)lzKwGb;R=yThPAj-3My&!j0#(Q$JIhDg-U&ABEI$2w6_b<(- z+fQnjq%!1NcC>^_S1F?((Bw@2^E<`e5s_U|x$53@31>K@eCiu*S}{|Zo3rU0{Oc99 zKrzKK;PhW{%1oXQ$d((uL+a4cHuD{06iV!n{cuqFoQk)&R*%0B9n&BF;ihTl45w*| zAqjMBBGzxDNMAIhRSZRam8u9R~HZhap(4wzUCgrdFUwG(8jOtL2Nkhw^Go((vP$k zju~Ja;xOM`lisxR&@!{9IB1e{?9GHEMvgvPk-pRJXXDW&=J+P}%Y#o8B<5)sm#jf_ zzKr*|RrJ32^>+w#;o`@Ojlww5jJ-c`ht(2}Ag_-z3Q1XBFJiQT-NP2f4P@tEgE&tc83~Fs~=Tl!S0fD#_sy5h!G7RYSsCD z;Zq>NJV?k^J12Yl zB|$6kt4rWllwv{2MP@51${)r5 z>XgTsgA&IlgcdZi`Oq>zcy-Sny@~F246NM0L3?+oM@qT`rbYLGlBb5xy~$bGE2vtZ zCWp#TV=fzw2G_o}`bfUUV4?~Q`=?Jl4yctYb%aOUu2cS#M@|mnQGNZh2A)2ZmhDhK z{$;HFar=U)aLn1Wz@T=daV~J37nRLFI9evp+J7&enU&nz^!SaFu}u{W)3xxCZQj6cetdeUHAlTuq6MkO{T>|ANpR7ck1T3*Nr&KnS3u|mk#So@ZzhLr{99O&t^y+Pv9yL$PyGwHfH zT=c_>?+NGH3B`{Xvga75Md9Q7lNBJ~^(I*QN(diQZA$p|;Y^#}NDr;)Ji|AmwJ@Rh z!dt=X7E?^OJ~^x1QKl0lag&1zfM%ib#z-#xaEnP1YGe226Vw^iN@;jxlFzJI**V2`i_8?z@Up9?6NTXoru3WfU@)bf1#mzyW7tU;+^Owr-uB8pL8rR6pUzxkbaBEl>X zDk&vnd*Nl4dl2j1SJ!5<{h^1CTGqtQ04XmF_Q5AKesAqtFRz08N*+DGmm7}ZJ(AqC z#{0OD|Cus?bjYB1vFav{=_(KYrsYSz1;&;4;_gPy-g$?T*O;X%JikEV!9?3a6ZN*P zM4#zF&i1RB9h4c}m2cd1O1$Y9AK3xGPdBUn0(o|q;daPC6Na8V1Vf({=H1Du*IBWa zm0bn5mRvBq^n6_%JW=}Z3ec>l>eXw*+cuzH!W-`({FRB!z`R>gE^ul=jK`_lqCzFO z;0+Rg-<;3e6sVk$=mMT)!kci8qI}JnRkWWDo zq3WPi0}4(uoKMYg<418fldkHFB&m{ryYVKZ|5d&ci81foDVKlmeg8r}>ivC~3b;MN zq*~tOqKf{5@DrR<*-2JElstlKk4luF*(Ia43xCziL}Sj5o|6gtHI6pw{4&{n;9vb7 zDmAD?0`w?cBAlG!Xf>)|9ZS+fBJ?f|*odpi_ag>AvUEN*1~ITI6>NW%4AYZUmTutEK#Fen#Y zPVr!jh@DU`R3o+Th1c%hH83&Gg`^iUS7NUl>m?PQ4hnsbB~Jlr(dqu`gh z32XG99=X>oW{noJ$LL(e1XFqW4PJQ1kGpDMHUIQ@%Q%c#NGTHU5`$s+xj^wo7=uUBR-#oLuHNOJtyZs$xCxcv`pqy)c*gO|FSHV? z@6pu7Q*Y!*0(CFnAu~kYdge9_ zIn|2qwy?Jm0A}!0GF-q~0bKmMCE&Ju@?Vwg#Nm#j6JI2Cg989qTLDUt{p_mxiS0iX z0{bBiy?UT9lm{c<-XIKCmdJpjD+|Wt%Hsz9f#tf}(a`nLlC!RN{wPJVfF2`RXYGC{jNMbu5?%zw?cg zsn?tuA9y?juH@2g=mQaT3vlu87P~#JiYyV2Gpv4`_r*<6rXc`G=7rMQT~M(50P1}_ zH9(7pw_7u0W@=eoC$?+Z#ErL}*Smq_b9P(4x7i5-Ru^`T&cL#M2F!_$!}>?NgIq}%YZm&6NyZ>7RD86+nhv%0{i zmj4wprSmrQ<#3W@XoL--<{N!ZY&#q)j4wYkV2iZ9H>z=}P$Hnf9>H)W7z!lpd z(82ikO7a^I=pC;9)A9}(tV`Cjmn_rv9m(f84M+s#AdF!4piays$7ZYA50RJh&Dk%1 z;PJEi;N@@cK|FH9qZnE6gueW(@1Cth&E(aSUWLc!tNay4`)=rk4x9s$PuEfY5M!xzM8nY}D_MZXAnxH>|T&4W4cSHj)8OAJIOZlEjpY z{(h_PzP%Ln?w>=CpJ04WuidNL-rCTk{q&s~sjC?VM4d1=X>~Md@Knqk79mY$+-4L` z#sguEY&Ot;v8hf1a>he49=S(NGi_GhOAuv3{hZ>d+nFb}N)fVj#^xVTQK??IPI&Ti zB-+ApHQc54HpfGoccNf-d8E1)91;eT<>GvZX%~$Te=d}~`TgS^Z{~Y8W5)50rKWV; zQ~|Xzeh1G0(04B~-)yPhoBTU@U6rAO1JG7!|ApG+jFPIc)V>!XZc3~zP;j95%^4!x zk%k7Darw+`P^G9GrVJjCjkJm||G@X*1dHkKeYgwo2udQRIw{@OYSrcJlK+a~$mu|_ zNcr}pmj(BBY{?lFxrn7`90Tm0p~Fex#F{xhw+XNrC-qdGfXbBh<5IwE=#|t7%Bsi* zD`MPy$f5942n1XY&;2`mY%0$r+=7X4?bx-a^gr7li*mca?a%qN@yOOUUuvjC#H_Zn z#X-RdS*H&Baz>%n0k7mtIKMGKN{%7gklE28n1(iIUd|cJh%)*1Vx7y6U%^uKL4VHJP{ z@VQwmrjYitkw*Z-_I_yLjpfecUD3aPxSG!L%@e=W*QHT2k18-yHg`!8e8*cesmG}X zeZ|++kEQc;#eLd`R%K>!jIs*?-nC9Syfgg)+4e(+7~Qr<=bBFI4H7C+zktn{sxH^- zeF{vp-L~+Dy&nj-#rL7EpxG^#-$q>8AJas>?mpYfae`18?uQfB9&$hCvL%VT04h7`G=mvo zteP_Xia=AbI@6E6RwB=`&(n*N@?VTKOHRS!iZ%I~Ho)5ec*m~6)o&iPqqD1Vz{|}6 z|7p`VAld#t9@ebBB=;P&%rweoBDPtT#s)Jn6D{o}{-KSadLNkgM}$_Ucng zT8M@AeM2|i#Wis9RPE(cg(^3Ze|&oA50v8C$?B}J-u%NZ&eICOtsCV&Y}QQzKKNTH z+WuaW$-_Y&wSm%?*WnJfB8p5<0z`h29~Ve=`H||@ zlX`!>2@bUTYHjb}AGUTUT`Lsik9R5)Qam1-1aQR{!AcFP*AF$%q zW6dz$u?RSQZ!?b1!>EH2tpa5@`ICqCHb2HEI>o_F#C)dD3O|tMtUGa60WPo4g?}!@ zN`ZehFwhzZd7F|63bjT2A6ph{`aDC9bgO415QnPDbn)Gc;NC}*^D$#B{TffJnhK)F zV{${VYv!{?B^v3eQ~i6fVq7~BK*vc|uD%D@%UAMyq_Gom^E#{1*jdnJ^bai~O0AAH z5Hi0S!D~p>wP>w(yEiVpv3hAq+PI>5-$~}r(Ko}Ft|+oxP0KD6s^1^$b&zmP7yfAp z@isziJIxg`$tu zo0kcN<$UqC*@4-iKaUJ2!E1NYl-GUsfY&8ZZZ>A3r5A*mZGFIw9(pMz+9fApa!Gyx z_|9^6MCkp?d@P{#`^q7S+Uq=b9HiTfS|3!1Hptj~y!RBKZP#h*EnCRibwf5ot2%{K zY|lVj88SAat$#unTI*^$EIy*1#Z>^px z3<1eY)U^v^WpesqLvZ)6&EGOtS5Fe_diB1H)ECrT+VIHY4e;^nTH=+Ph4p1xgllY1|4g5`l+$v21x zVLgBIt=cz2Cx&1zlD4ve?yEeD@Bas9_>ahha6bQ zMxj6@0P=mIq^~9to{^wa3RBIITcE@GgY!r@d*jWEqPH&t48`hP;nLOPT$kd6Pr)Bp zpWh|ml~2zBS<6JSCi#a=nxcGWT`@Z7e})V(=FW@x!G2u&nGG0g%qsJov_jsKeqx~@ zCOzMvT!c(zPMt^3dI)s<*LX{O`|LQIU}L@wtPkndp^qv~rX0@Apqrb>gO1k!sV)V` z{a>W#|CgYbMsnQ$kbtonuzi5(J@es~S*Il1iqwECti<&(dOr8@(^u`57x()S>wcpx z3e)ul+Sr-cVEDTu*)MdkZ0JBN`9}63Hgq7GB$=(nh7SH|dCMphF8dPMV?UQ5h9s$+ z2K2JZxyr!-pyxpvCC{DuJUfrg9M?@H0G|5D9wcHS>whO= zm;YA}bJpR$Z=V+^7x(MZ_>I`56+mqp6gu(8H*WnGlwhHLf;(I^CKdnjcH(ja@kK9M z@N03==>d^7Rr?!EK;|Tlx1?8q(k=??7p@Gdh6Q*ZouYyAa`!C6`1EZop z>44q%6c7plXv#OC%wqd3K*9dZH~#+-M=5-7OVktSn7)$kiH-$gZ@w1qnRNulKZ73zx|mML*}S$vttVUN`tt+xqVr9LNbO z1f*dfP6Gm=6vX%J<4|U)pB{P4e5JoK^yKkx*ZpUKzb)JA=JoLvWiFU8a^}xsYvs2a zA+M%*->JV;Zr0FGd2}KXfX$9pIz3wYN?r^0fY7B%o&N)Wx)OL1gQxCU`|I!;jDQlforXhp$>i2tX~}qI0DY5 ztDmyjc_;858w5YZxM_J;99-D!6mzf8GQ%l~qW*p$gW*QK9H(qnWy3I5oB_uno}`t1 zp^c}{fm0b2<)Y`K7o-LRkI(vor1tT~HlSe@ds-f_ z)G_BECRmJc@Bce&M!d%ub`=D>FDt=0jk?>vL9TsQ1{@sHWc2Af&dZ(o;|I3f^W$gF zi!C#gUIOm^+CGnxwfm@|uyiSXN-H&@tF!3L)-;lj1Pe3A{s~wrG_P=sAjw+^VM+%?-?gskVDWjJ} zA+7gi%vuctm~=25ST?45&NdePkUkqZnf_E=LX5lIvgXCrm8vD2Gaiao? z>fGIIWH_#(rw_2QTj(1g7!7Bih|ySb9045Cc_jbD(;_>}HfxYrbKZ(sdMt)TnCY)5 zo>|T}h!~1j{?z#Kve$Zbht@kL$a}xei$;SKdDuGPm--Fy4;cVQc|R{Q**k(&Kq0ntjsczpZY%jBQ9)4^O>6zj)v#=IcBGPS3}P1#BWNSS*s zjdvd44KSZInC0R^(prGO!mW(NJWH32v#<= z!Xry-7&aDa9L5(H45iS8u^F}#ZG<*M?khX=VX^0z zB_MZYnny17Yn#q*(ltTzz2Wcb425HOwKL4yj%Hs0M@u@qOAjQDeI+D(*LE~mo5(H! zWq^G`4*@)rnU`O>Zt|L5sKxs%{)6^g4wR~z-MJwC#~i*CfKT=V68Lywx>(E)f6UG` z_7S?d+i9BNp~sgyw7rL)uhehMM~F$~Cw*;sc-}5NNG-0@aux)t!%yO-65zDm5}B97 zUQMg&H{KF#zua11Ygy>obKm}CmPY4A580OF54oe0*-P(R59ewxPOwoLhkR^HMMvuAe*lDPL>j$`^IxzYx!-(2u7QNNZz z&2^AyJHGgX_F91@o%>!5n;-q-AqybDRa1-Z;~g;|HTF&xCa(*JS8?3Eu5rkXnE&I4!k{^I)FIsc*VKQ3{gT*GKiAF=IeBxa_ zvOkoe&?-EIkaBTsT%du;W_YhGgOuxAy?=S5)=8*Slt72@me<`PYZDz2 zU*8_?oBJ=j6}Xz-d^LOLP}Ix;g)9npDqc>cEXiCHy~Ff^m0aWp(~Mb>CDF{Z%u+^U z%QE3r;;Lvg{NRy7-zF0+BNq99$@gjPS`K)Z4HfW2uV#g-^4jgDeRd8rUtKGJ228T+A_TMKW6%5ck&!f3R{bsf|D+D5D=ijb5hXB38O$9!(ywZ`;vMGR`cwuYqd zn^Y_D)HkX8U84JP9xpr*m7f8Jb3PfM_=*(kPce+=?pSQiolIc1j(5nv7cs@L3XXJM zJaiO*tP zyC&w~w+1_(I8}}-_T0M_B(5lMD{`lX%3V$nw$kPT1|RJ0$bHfQ!gx%l!uR|>u4&c# zQcV|ERss8*bPJLdn0(|^NE4PeWu~K0!Dh*f{XERbPvL&~d0A5}XD8~|wUga~m??!@ z329$LRU>zHZGdoT94(aWhzpO`zmdX+G%@VU3ai@Pc_P`6`!zU*p!w;g7DkAAwgxil zcTSi-+nD%M*6=$_3PhOmO{cGa{qtwJ>#0D(TayMhu$~%A8*IEFoNtFg88_X$ZuLR) z$D6dSfcUqZUMWVrQ$=U$OG=w|XZVX)?kH+1d)xBG8EMdb>hv0x+9ZfAQ{=+E>!Y$H z&gzFNi(D`Z63u|OV=rgjcd^^B5)-TseaPGVb|232mWZiZr{>B00~M!oGVP%UPwYOA zE9gIRfdYW@SN(eApnrkEkO-_Z|8J;$-Uh+WN5n~KWk@lT^{;ybu8`Z(CfE59C<>d@B9*w&f0s5vd);; zWk~ZG&$6I^MCAPdb>dUWN&T0c*9|9lG+=^$CVzGX;;n%qJ@?JJE0ac>9VZ7eu2?W+ zceL-rwd~p_o__#mcqy}ANjoU;t8OmDq;}5}t*qo8wLXC^?rh>A3``bEKq@ z_SAzTvu9hyI7VA-&q^rTB>X0nzf}#}dk$-Pb@#YLs1?AS?RyI-;EDKaSp#l%;6jmQ zysm~>IPXcL?^9qc!nB(l6){3@yTB@)M9PSaXy|z04j$WRX(sv8?8+VqflpQmzNjdKSMleMMe)WBFh!WlxnXTM=^*Klitn$o173#1rvUNWF zt3~xv>i!J^IS`zoCjewh^gg(D5gNtPc`;or>3^ZfjIgqgiSLwb6hxLS4HXdh(dkT4 z+=S@)RwCZf&|7GWRCy%*A1=Vl$Vj6q18@6xzrW(kGM)AlKehY>Xzt;|5$H{WzRM?& zu_(w)!av<~g`m zTcwRnN0TCOsx#nc_RMoHp~8I|52{7Xbot^RDGA?>E8Sg~UbQT<;L#SHUyD!%=#h<| z0%#X^YEoo*de0XoD@@oQk;BJ5Imz6d5^cr;=1m*T@rofabPoH0CWxNsbBR6_C)KT&mZIyzl9%mfU39Xxx36 z7bLaQcP$oyyr1%*#@X@~LM@~96dX!;@4hv@{UO(mohQXs1jp`}dGz?HXUXb=pQR#J z*>O3yl>wS}@9Mthp5I^lQ|g}bthme?mYed9QBN=`?l&7Reb12H%cXmvVQh9B1c5I> z<+JJ?2mdABg13`#QwOIy#BlP9y9?C0mkzz*iEr*tLggwMz*LTi65jg*&?d&tq$g$D=?jjrK%${()6OV5nk((QQN!9!P!Az(h zz!2V8ZkWvBh@N0^xY>6*&4n0%C_eH^0tHjucoWZ6SNmTtj{OiWTQPS{bu%Enl8s#i z70Um{-h0P$-T!^VRv~4k$lfzq8D&!_QOe$%jEsa3LZYmY83`$)?1)0PWE9ylQ~KI_ z->p>ubVDWJED5!`FXT_&_F;`m_)Xsh*S+gyqvn%ZsX}`p%N9pG zY1I{$&kWx699T{TM!k4gAFAkZ;4S2;F$B=Y*H;GRowWFg*UmSe!pp+-Ygugos?{Du zF+!>qEKV+)(2rKOm(515K537)8*&S5y%v;y%~F7rwDQ8ew|^Bz z38M008(N=NMj(+%Dql^t`1ttN&x}~z6e(}oCUAbs4f_x%%!Z>k)%nm97VaaHt=ngF zBHF1|2%M}gl=Q&QC`EzTZ$8_c{4ZTw%mJJZyr%$hgL8nQ)-39hNIs+#$k)E-P0v+5 zdcyC=b~Wo7a-)kTC`Or0>0W=CacsjU^_P~19OEZ~`$lY0VSgTQt{o{9ECEk|fe?ZI zWzr%b&QV_Io8-oiv|E4*g!IIQ7Ij6+F_ZfEY=J$m1Qq}HNEjT9yynjxXqt4c1?WC2 zqAQD+=`uO`Y6^hoe^=M#@{+EB_a_(JTYM$-vtH1h{3x*TJyw@QlC zVB{9klB{h&r~0j~A3V+xr0tt5@kM{h@GShWXzYUyQ=3Y9t_X)we^Mz3F91p1l}iNI z>ZBl&;O9z?<5ct`QUcg$dK8u1$NRO&HoeF;Cq4d8(GoJ(*|tCRJ8j?BscGcxs%I8@ znj3X^qHIj0PlSKk3@W`4px>_*B+y&2s+G4FUnILgQQ$uvtp%XV77W}hEh@-$h2wTnM#njsnE?bdJ|BK9-z4G7bn}^XlUxx zy{g=~4P5u9ijO(155Hupy#W!69zdZv0BNj1dD{n_tgV*5Q{YMMys8r-cGi_U8@7g* z;zVy4e)R(HbvT_TCZCTscnNs2UWgdDAv)mbp9}g5kFG0?L7>sG0p|S;q%xF8)ZLDW zIkq`>y+V-C?>b!CE1$5k*By z9XcU)4_VLC^$`-e4zrZ2YWtTlWwWeA*&ziSPFhekHKA(+`DO9n(DltppVu(hVH^nR7KWY!kt;UqSjY*>MBhod%$53nKu$ zB$GH8<~JP&+bm4HUJ6ulHECgrG~v5&5pMj2Hz80MG*AVwfAt{@{5|+j--95+z#+pJ;zUG% z?OojlTyZivjlNlWt_t^w;tt59_3!_wr782W%`FCO7ZBdZizRJw0ySNOWiD1{f2K!a zVvX)N*>8VOypi-~7XbS=_HPDDm>DLfVIkatAL)hv|1%Fvn$TYN6N`_NZh}6?eg)I} zwa`kfdlIA!+5p}f(Aiy{>3kV=vam(4r1~ID4fw?LI- zHGS3@(~%Zb6ZsZGczDVoQDhMB-Yz&TgKzcOWO`h`Wm1q)Cw1rxO$;;Mp$CBO=ecJU zEr1*#QYHD1b+h6g!sLvSZdf8B8}0zo;)<6v_YkJ05>Sh!HGD!qNv7Kl)mOeCOW`AE79=T{YJGo|BHM@e@ja8D>n=1H&!GLN51 zV!z={P=Aam38#v=Pcv<(TvL@=H~$43OP&06kcy76bL2?1-$>3p{K1xSNky5b%A~v5{GvA>MtHj~U_!doOvo@X0?0DA+{E8~1ufHhII!`bj2s0zL)0XN$*bY6 zn2zF5D?yyjOZ>q6AeXm#=wF#AppHy65N+y8i_vReX*Z z4VT(z>{2LV8h2oC1&i z0I6qlD1me(h2;xtJ;TkFIB7lT8Uh&7@-l4o*`6-?uGfmVb>&0TJqgb~Xq`UY*h zM0)_y(3ht%mk2r;acn$%I*nsBVn|;I!iLE$&t{r(I4jU4^V~eRA;0L{e{)ZX!i89N zvglZ&sK6kpWkmz%4+Dq-{e zg%E5K7A!y6J}?|gs~j*wnjq{tbxn&eDj-2gMItWA{35iv301PL1b$xdav6URmrO#4 zUivm4&1Qp*bdR1=#r!;!N@wT5_Lx3gz?@$2;u?|paS{>pT@b6^OrGGn`drNU-rq5r zv0#phd)m%QQbYC<-R_SG-!TmY&Yc*Y(qS{E;H*t@OoTV$P~>fqa`K?oaXdBQ5xyFH`vku1UZUcGY`TI}DwJ{Ep5!q`9BQ~g<1 z=4jsywcxVn#^s-3uO7r?vrISzGhd`ST{L|1hOf*0^sx# zd^PedkbNnh>Te&Ru`et1{a7YuDJ;|^)eOcYh0{>%zhF1ku+!Nqde8rLHISp~-^QbV zd$6Dt`nMhJe~mLw^B93jqR6MUL=eL;r+BTRXi}hwO@EWs{3pB3#l%r)em?V6%*ky6 zIAo7R7hiMr`ayhR2~v~_E0W#VX$S)=LK>LII+pQC7Rx_6YOLh}jLL;(@at1q+;VU& z*@I^I4TMOR&qBGpRMEEp!IS;Kve~35h8uDkS-5Hx<23g;luu(CS;AWq*bMPWS`tOu zOm0sXclSZ(Y0!23(9S?@Fh#E6yW5a7Sa>(~l3y=@r^v#4_;wl}NYAT>lX8O^z8A!p ze5OVx)zwoo!esh}7&5{#`;2~*USCDUsW8_N9J(E378W3ho^FYq4TR>EvvdGX6bOuR z*0Uhg=k^ubP7az8%t4OeNTbIqJhGubq)*`wL6$gEI!5dXex2;Vp2@82tS8xr^258~ z68ZGN<-jy~;N+%7x5rw?KTVvGg6B5JgCstZC?Bfo_2vQh`9bJp|ca z#>j0TU{){IwgF9?6?~{Tk2c`=!WYx(TOQB(kJ`t}pM?3 zJ=EA?1B>rE$Pusv_6tY)O@w}Lq3#G}n7$f-`#M18*{ly~y#oPvWBkAY@>yiSvF4B| z$;gm(S|}#>%}@cPsCHfAWc;8)1?jjJaQl)wzM?F$l6l@#u`OBa_@l_667%(($|?xg zt|QC?!5WbxNU?xWfpXCMVB)QkodPPsjO1`vriVQHkmxtB!cQRUiF8Vq1aRUw^Gb9X ze5@DJO>g{yCta1c5pr##=r-`5=Om3o+R{hXsTQHOnOk=G;tx;32>3=xSc7%IYaV7C4oMdp)@FJQB$(Vwkdf z$<3)vn>Z9G9ue4Z$Z7|tG%SX_*#RN4TmBO1I+%WREAMP`-@K7yjTZO-N&ratS*_y}f=u@tUQ?KntfMcS276u3 zT$a@<;@V%P6L-%*e|ky_-~MSBbjmFrwVSK6v+rm7inEEb{aA&VZo`7afP$t3CGbI( z>$5<15>kf^Wd>x@h?S-xF2ZF#@;t{k?@X1#3qjMifI~ODkA7D$I#o7KF3M!O>>Gn8 zMIttgC5nCj{x)D2zm&XnuO?tva;aV#jKI1g&{~;VAB7U1mp-jI85oG#Q z6=k=m2jT||WF9-4oE#rS9lX|SL&W=oQPxv2iz_w)1dRswB^64bvk#+{2C(l{bMusG z8lS5|ZSq#9?C#vKV(ku~U@ob=cF7jZRYQ0meBze!aL8Z#&oN5O=ivCVanTS%p0?w7 zB0s}@Nmq4zdud4H8{j^7G^MRJq>DmRDS}3@Ma6vz`6bO37l2fBfat}0i7(nSz05F@ zT?1$J5->I86wMGhigvuy*x9BZkupzE0F|q-^|5FsekyeD4-Vw$3}`1F&0gnI#Xx2l z+i`!@97)y_cf%(WBx>{IG4o0C3xFu9O-j}iNdiuxsKS(my2V9zWc_;uvaMAW_TGD) z&dkIktVv-w`I&~QE=)&*dVJ&@7_$S?NJDU|>&shr2^USGtZ#A?D;%3$1x=H-jX@E< z>7_)*g&b3U*{7;2q@<%CW@&Cz@+!vY=5t(k8KqOAut*g-t#3 zPI1Ua)utd^cr3jn5Lx$B%CfsPz?gOAbds|q8yNVF`i{=R3){89fZQ##n`$A*?Zg5- z$I8uE$0TH?kZfRA6LB9kVJAs6V;UrNQxcw^4XEZPV_Zp`WSq|-AnlV=Ih&2+6{JXa zLppfUnnXeJ&H&qxrxIT(Z>LYq5z*2Ja*;OmgRGB5uHNuk#DLs(SkBja70|)hLcYH6 z<4NuwefTCE0|!30SYB%s4v2qS9T?|9ZZY<=fhIY$XOT`|M&+2bP@+}gNL8)hWRGpt zp#$5#JwA|(?vR4DdGN<63W62Zyc9oY_j@te?EC?j@?jNbQdphCCZrd1gnMveu zX8b_rCn@%#Rz#nrmbNezt<`#{n099@Y5tau2Azw8q2A_P{2rJ6GyDHKfC1-xvmvzR=2Sv?L@pH#jV+%L|*%yX87 zR>?Used7upq7qWM|BG1(WqxBf<9^&tZ+Y-&^iA?;hozZA2~pJOw|u8-FN=^7`Ef;O zUjlNh6}o4aeBMJH4bhGl_`==k13kx&c-CY+BcW5O&F6>C^+?9N#w!T4W?*AHc3OZ9 z4n&=q;517NI-=lnsA6$euc#wixnKKR`4H(rc1taTJI!vwoWX+im@C`A`bZWheE=1X zeh!{V+jpI|EeEFnN#=QGOx+{%pIv&Co1E_qU)ISz<%i!meYohzwV*-hFgkKvja>2IAeyhXMmcTs7=#`rNYf!_dpIGqk{>jq~S;gfc+ z{K_EBbB3I)QS?oa^X7Gps~QAnS$9CDYH&Yq^3Ve^lh@n}w_mjr+%G+P;!2E`Dttk+ za>CTbRD_?)Uqh#=Qs>{|HZ#zB9h9LC9D&ygUqhPm!61p1>`dL({E)Tn_C63E(@j@+9O znt}7uen?QB$sc{v6;y|+Pn0^J9z~o~B!6dg=N>r)F*E3E^dLLferIca=2Y@rj}hn% z!RVN%1ADE6?G-@v65!K(m&#sto%V;G*wRt+WCnVY;|EaF{^00q*o4DM_kwmou5v{-STMLg1T9(V3h4U081BTJ*t-(^UWrd(ID$`sgp5wg3x-Am#Rz=$6%q@LbtDlZ*UL@H zNx2!(7$746&&^R>*Df7ywD6TL>@EDO6V2;Q=y?} zv$q&ep>qUn2defF8#Pn8dq>Y>B4!$%vSNSYGmVfEd;{rm&&|eR$!skuyL@lSN5(p6 z5%RQ6)wUBI-o9k1R^b)tF~%()zDty~*S~OkXG2Q&)Ke9H3)2OlX8J34mS1OGqu3Gx z9CG*o6P7w!%rT}{($w#wRn3T2^+o?t*{{Hom_zYh0M?7jXB2vF+d9j z+ z5BVS`D2{$Ure|jY-QbgXyYb(6I7FT8?yOdpKx+PY&M$c6+*S|fsSCKx=z~`<1+qGt z>ruYKG(>xZC3+2@BzriJ-t#o}=DAu_(9+qcgNYd|f!& zUg~5P#I#-(_b?c1EeXofl~1DI#3~{`0wd;th)^C88v5523n4Xe!5t5&g$BNXT=n~S ze6CmXC-$pq#eCW&!aa)BXVUHl$oRqIa+K;arIsdo5Q2Z}SviLggZANqSO>e--2nN+ z{Y5rr#|>^qvP=a0Bo!1pSqRs@8+05K3$hqx36kKt>?1ls56`EHjY~sX?sP`1p>u7rvkfHGKcUs}f$rTt zkWw=b@ch2FZnF;1wcmmuGNO*j-J*8|Z-L>{I{x+x_(~M>AXohqE=%91bjw(STlpa3 z*Mmp~15RSsc>~Dg#Y8I7=fmY`Adn1EyY}PwxDN`+Kw+SB&+NpK+=e{(%!8GW1-Emu zUyE?(N7)C7!uGJ!I+)G_oncvL20oix>()kK%cGT2c@!66&cgsIxM<~z0#{F9(&DzY z2IZb2+tOzerU)8TP)_d^)ki{B!69m+SY_gb!XYN1~ z{%Wpe>{Bx2m(aO(UU87a9tr0##YnFW${$#2uA(;vS01>111VcB*Mf2B=KV+<>33Esq6tcLE@PiVw7~mUfCQeA?eur!Os_gks z79~2ANUrpxnJ)5XA9MdX|8UHn;~|G(16*priAoaR!LStKU(Jq;1Hnzrh-fn2fH&(~ zwGW|jVvz(BHo&T9OViK?oJ!SG2>=DNCqYe%j^!5Vbkw>B>7pY=Ao zD^pJJF<7~Y*{!--9=!_>ysy0zy{%5rNedNpLXGa} z7A6byrA2`EI}CgfL>o3kLGzh63F8TjHnGS#}ksfc|U(r zR7r$2h5rOQo%=_o#)xT`8c&ydWbo(QRGqg1Ge$nXe>q`7>rNfoGQupcsDUARnmPY zR!25-1uyl*xheAmOqvqu3tgrN2qsh%oIvl&KLQD5UiK)Kw9`xM=3wz2?W3?C?RqIG z`{RbITfsF<+FPf~>nKza7VT}O{XeDtCj6Y7=d(){^H)6w1fJOL2w`8gRwE5Dc&hsN zt@lxagOn`LBEY(QPK6rcKjINNtu|i}OglB)B>zl7G_7O5T-i1m{-iIhJ(=;E)N9qp zh2Sv_pg)GA16=~^RigdZG2l9b!x-P>VuR|m4rkG1%HkSN>!cR^=1N3w)8%OvC6nwg z9+2SFNK+6f_}5ZKsgC)1aXfOKJ4`#2lZ)l;$rd|O865O|i*1$ummg!D<=P~;{A0>F z2I{eE)LA(se-WwZvsYXNcHt#86P;Wc{&R~1ezpI7cx zJdyECB;&W`|MwsNZ-|s+J-)c~^Ghz&PiO>|Ru2rXu><*T5s2Wc!3ecJPWd9>8LT|c z6H8g8c<9nmJlemQU?;0P{F|p6Uh!^way^I)vN>`>I1kkOInVCGoCZCR%tdL%U6FH* z(VM&X*x0Wiocw4)g1#AAoU$kP-F#R=&H`5Co)^H5if>C#N1%_S4A57~q2( z$i`JzsH*D%^)^)3k~H8GMkS00#4@a6FujG-s4+$rA_$9!2#6O^xR^{vdH3g@8+=V?qD4GE(G9ZnFehrqtVHpdT zT!}*=cc59E+)JIJ>#Z+T>uCjlvMt@&TSdr;r=}KMHjr(i;xvA}imyVmZ9t)v3MJXo*?a*o$3W|cG(c@f6o@H7BziU2G z6BAj15FjXfXbKE^D{PA5=m!Ye7zGnP_!88kl`*huHES?@le>F&_v1Qg&1I&~esbx~ zFOG6Ib2T`>QlNQ~u4Jz_o|$Bp#;RVjQ(kMyVyCR`&C-+_t*-uKob=$B-JFm2O#HID zr9@kcP)3Ao!dJy#x8J`CUmsX~`}Owc69ymh&74_(4sP-91nq3GIN{`t zsWn}FDl;17bRgb)c2-l@^F??O_kHO%D!v}{g!0hIid$4 zEp7J;j)#S0;fPYcFHw0W9XTlq^|p)snTe0FZqp+C->nn5^h$Do;;LEh9UIY!+XVtu z*~_a!|7ORz&yXGGF&fM<4!wLNP-Q%h9E<}_^Huhic79)zht)=wbv^HDtw-J7aJgRbzIZ2>mwu-+sH1-sU_{D>Q+F^2OpFF1eD`*;~qz+ts;}fl-?4{{3DBw&>$g z*^%AJy?8H3d;O7aXIt*{roRF5Na4_9-uMi{ zXHo@6rPY!@5ng84Ay+VaJ>Kx)>Rq!qLY&T^U}39GS^8c$%Z2jf9rzCYPd>_t}+pZ>1t^SKk&Fm|(2SN>?s98>D1Lz-k>j= z-+jj^prz^9@*?~NFC8(rKLMPh%;z4S_58W$VJgeJSj#5NpS~ZA&ncc%^AgiCO>FD^ zn#@|5QuLwH;(K|)(i*1p||(JyoU_>-bxFu82udz9R9L(W~TqSC{(0&)jQ=rJl%qU4Nzg?MY5 zHAbh~|8Y#T<|pu^tbM{8DN6hC!8d3fVMDIpIFy-6X)nDDG43^3Hh$qiFjRuUj zr5M30eQZZ!zLsJ}P16AkL@^NmsSHWQg$shrQf{0z^vxBqTvyM}O@ePse3IwAC}<0qmS!$DWk}x`s+gHXFT(6?mh%rCC(8ldvG|;>>~Nq*?`I02 zYG|6>Ldx!zBBFE&MON-ZzKiF;^3ADEuJPqj+PFO9QNKzvW?vVSUgqE|=&@k#X1KKp z#bfMAy^=-%A}4@Ajd^m`tr-yFbjTU_4DJh^GUtr)n#i+m4%i$6@oNCaWvHb{sY8AT zz`t(k*IueqzY2An<{w&>q+s~t+NAOECoD%^dFW1Cj&_r>eXk@x>#vHy4U9u+8w13Q zm^oj^S%AIl*TECP#?F2omIE8nalV_`^RRSp`czPEo_lbtDG)YsxPxxbS3w>U0f-5= zF$QK2bDc0JmF=5${at2EEX29-doY%oZ)C}27+zsIju_&WyI|opDhto7Phxs)7;VRt@ z%O`QD(|LEhSWi-MS4wl#q}_=(=lW~i;%2&Efd5BBVn{QP0~4)*F>}L5Bv<&G*Q8eJ zYH?^DN{TEMK*W%eYKqjfZZM2yV?rjs#$R=;HT}{?@4Xg4uYD0TXjf0KP>E{(diNC) zQ&$=xVRn!k4RjrA*#d-(&ixouM!uR&Pfc-M`qxVOtwIcU3EaS27OlUU#rJOQv%g*x zaBJ~(EkRN8OLNp;G{=tN>@5|P*mo3rJGn+Q$FVf**<9c$sA(oJ-#$Bw^5jkwLEePf8Xnc^wUNa_Y!dYlZID&pvPG~ zPqWnK_Ur4X;sgnpaO@0zi!(xDkExdWS-!(~X@a7(n?Kq|l7g&El1$<{tinZ~SM{V( z;%84=*Oh^(WE>`lx>6j>DbdQhrVO$?9;>ii{OD(_-WCM=t<1<_i4Cxc=!KV$#x>&Cy` z*`bT$5;EkzR8|u(M)FXqWedeq5fN*?&p$-6_^|8xQmR?kF09_ih>g>>)238BtZRSQ zNH4g{v#3ztvHF0+0YqN8u~U(>eNY|3H8Fk?U`N@&8e>e8{Ah zT#c;wO2#wpUMk#~XmYnQwvS^lfwP2G490cfvxmld4WT;f^1dp}_nbE#&5Gl!QNya+ zbRT7hEuBqInilu5WwLiLFi9{0)@C^8zN1ixlKL!&Ne(6v!Hf)5hRuG-ON@}o@{g0t zjQm3j@Q~e`Kan+}13jn;`?n!-?<&%oAC_GQYv`|ANKUNoeNm=KTb9cAipBHK!jnX~ z4Fw3z52H({xbG4wU1D&5eMV}SU{XzTn5JyB9 zbLdCAWcSsHBTIfHtPsebyxIOd<6`VvHQmsg4{GyjYG;SuosBmk={_c3L(kWY6?01+uZ^TpTbpB4X$U0taDB40&z&+DpF7Y!Ou16jXKLMZ zDLO1zc3~AdF_(zF%2r>UYTvbY-zbHR%^W~sXPB;SKKAM|Wd;^L>W+v7ML{)GpeCo$CML+#tvim)R_G&FndEcXqAd24~jNc)n|359CLZK8y z;Bo-v83S`AE+k8Mq=O@GNSE=$c@z=B2y&(v z2pBMcqRsRr$h99bNE<>OAM;9=kn)Yoq$-!ztE6i=+8uPi;IX(%a)jpWtsInM`SXmF zTdNpOM^5;|v|1J&Hng2f?0c9MvZX*vD&#Tjhd&PQ;;3J*%3G-?;D=FV#Rjx9QDQ$7 z@k(GK#M!LsX^R(9%+kna2M=~6#7tZWJVZGTFX}c&vv3zcXE_}0QBXvYU9UQHA`|TF z1Fh12)?E$j98E&!SITf80wwA|PHKmN=79?mJYo?d2f~KfmL3v%32i6{*+IMPnmvPnQE0fn zUBYrCu3dpbm>x3kW%MBxI0LTy%r`NI3ufWAyjPwDc!7d^#$Y$~l_<1PNG{{1h3(@5 zlaL3+A$FVvgigTWhR;DWFKnLad0DX7=U07ra*aW#$&WRKg9 zpk8MDE)k#Ov>bmISE%I5Gze$5pt!u-wRySw0U_?gH7Fd%0Tpy`UKo#3duxj(%p8WH zEZ$U9uV{=+sz~V0jO%*wH&u%tKV`@ZaI|j zaM;ePE^!7_COHe4!YhV6#4GaQI|~yZ2eaOH^Mo(biaAzM8`yzlEfR#_<>grX+fe3T z)Vz4VrVYIOZtE}&da$j6-jmASPWwtjvvim%jInNPjAW5FJk=NK=r#%>Y!@TF;_DO1 z_*LSh{;zx5D=Tj=UtjP4w)35xC{7A)2#aHm7BKubaK7%Vl{zPCQ%ftwatY}@^I5)a`SNGG3Jr+^=IM1rOL z&5duq`3@{AK%MgbijgO#|}&1Q+4Q>27|ab zN5u+89+fCW`|6APn%=`u?!Hc|Lp`o*nSSy07`@1berMyB4%;w%n+{7{|8HRWvAUK& z_0HxC`*Df^vv8j@@9*Hsxt0AbIlm#FUq^`;aJu|Dg(6L(N^Q+@yq zLdq3$r#ikp&p+mFEo#9-9M;c>Rz{m}3L> z^p&CW-sT$T<|@SfP|JC7-;)m|hE;+~LVma~(aTF%cV`LQDSV9!8M4?K6S0Pgk0evi z83u+-63tx}SSGWvEKNBFTSsNizCPl2Z2jjYEOp!6>L*w?aC~nu7oARhLa3*r^3c{_ zPVXzId&hM{D?|^*`!``n$xs2XKJr?4UsdC~fpAm7!`SfQNfPzVxhcU%RaPk)jyR|Nssqe39N!>nt{Zixe~@WkDR`-(@)oiKvF2Z`m$p0lK@Ts zK+rUKhSQZl$qttY6N2F=&-r?mrLShL^s{RXLAN|;ma9KA{fjQ*x6;Fe;AtEE>r-|3 z@gv^bT%uF{8S*&7EAbk*a3u@2!gc-)E&pGQsk?7*>J+3yFqtB^Miwgq2{-*K5v=TC zelJp{l>?#P&Pw}Ajz0?8WGSdb*&qh`G316`fl3Ag4>3UIlX2c{OUJp?6+AT27cinP zSokMthbm&5pJNaz|J$FR)D_Mvn2q_Y3XZ} zV3_G*E{W0c#3TMtr7D~3qY=fG*!(c*;n6^dQutoZm0TNQi`9QdD_*0-XphsY%wS_= zztZntZx~PUCy|V{glpuDTpI*keg3m_67KvMZfQNw=yTP1Sbj;*!Z8VgCp;}I|D=6? zBWoBlf3aBDs?Bj%cK16}L%Sb8UgM|bq_aQ)5{>}`#->5qIhyislEx4MY$!x;;oz8;g3E7{EavD!ov zU~8^4;Lgnd{xg8>i;KY(?&RJ*a9p3%Ifj8}XW&^B+Pz=@ZTCj39O9!Ch#f%p%%ruR zKguxBM^Vw&_BFY2-LgUQC5ewt>R+fEbOZ_saBcr5*|mZPQc@c~Sbl;|+Y2m3P3Hw~ z{y`dpEt~f+r9?ebm5y4>@#&`2D{0C80*jC4TWj+L)*a7WUWpHsF-s!JfJM_I zHfNa36m*!7p5zwRM;Hvy2&QBz2Dg*$L(H|k9-@A#Js}YGG?z+*_r>w zYrbTSFj#W>-a`EnkQJde0F6l=9wd3*8Ydus9SZNPd=k+urH7IarB(P_Jgzsq1Ra3! zD8GH5um=dSE!Gz%;!QC%Pk25HE1F{b+nh#g3DIm7p;bUlucc=;&24XhiILqjSMN>h zoS7|goWW9R*$Rdf9?KS-e>^JY9NrZ`2NZy|ff_nWnol71V1m*#&zSP~!EF!)#X`K- zMNj|@UiX*_dH3e}P^KEg&#$?ODDYPOy_bi$*IFJ3mL(nP=(9VJzCPn}s+{^$8SHql z|0W7EJ_p6`0t|9g0wIIYtXNyVS;IK+9TDJH7E9KFT1vE}ap zJyQn6?Z7N%wkJQW{MY4t({t+}-x$mX6Q>k4=~S=yoDlyu z{D1T2XfC+%k0WSN%e?CS5M6l(FXRrdiU7jm3_n-ZX+%Vk)6nn$vioAVdJOf>ARQT8 zo*8Bz$3Gwls{mC+R-gPGowpQ~`|JdhXkKwT9gH`H{zt;?M|(B(2x3k`7oZl51zYo3mLJ8V?)CJW;WVPE?R~go(8y?5MVnw0 z!)UnxGmLn|cJT~uhzO-uAk#Kz$OrPlcESmaS_D`ZuYkdq1YrwygKxX2J&RiOL6hsS zfY>j@)eQ7IR=CtSj)jWRh}cD_XIyB4sSO9tQ&HT5afR||*jpLQN}E6^|FlHU@m#Sr zT=@se)u23nJXMCY2fE?>>*CPko8`IBn_A_WnVKT#z|E|>RG_SS-AQZ2Xm=wP)BmO| z-l?Ky7u-f_mKdiNq%)$w{e#H^G-EZ+GFx#1W&`HF*IU-q-N-K;LeML4sYW#Yuu5b8a<36Guf#e(1P03-^;HP&ZgzK=g)28COL~rjoQohQE3@J(sn;uHzIc)bvb`DF(t}C3 zxw8CiL)@a8ox8KUJ8hvyrd3M&=V3j*Ri>DEh^KbVxKu#(;XJZ#Hyo+M?)K|;GbN8W zWh5o)RHeXq+6?K_-ssL2i<{&iV+IBDfMnGZJTH4dk3OCZ^**|lNiK{BuOg|!exH_R z6aI&If`M4ekzCpy?VL;zgG<cTfTAe1$?Vd26j{mt;@Rn6`DQY$5@D*pVUi1pBSSZ8#{9|5fgR zd@%$);sUPm9v;)Hi(j@Q$lFM-XtmG%^*13nQfb{Uz$Y8|2WtR17(&fkUP zPI~?ocnV`;c(mS@lgAOjmQXXeT-Ce=!%8ACwALgaE=bJa42R*AT&A6)#cL7mOEkvh zFOZa!uJ~+?G0gALk$=b2mRNJR zW9ajXD`_s8n0YL|+dn&|uyJ*Po)IOK=_k6~V}i+dCXNbs&!IP?Jkby+5O~EKu(yE= zyWrdS-Wu!;*CT`-W1m!kK|qi7dK>5t0QTS` zxiQ(XYxGKAy!$q29Jt)9u)817nk>^(lwp&OZ@a0(qYj>A_z~U$@cm)Vp(1JSVRw<7v?jymuwYq_!rX ze{fAyMVOtSlqC;%F1Ji9o(}$b=vhxn{;*N>Bz+%5>8g(_f)NGy&8pMi$PI|NIQ^o6 z;Q=(9$CK))y@J?56<*D0r=j5>tJRsoU%I|Yg=Dam?~!cAz*th2YcqSF>PZT{KQl; zMHSwmu{u1QHdf;TzcFKm0qQEegCu>Ud_ajF}=8=Y}qnroWWmc`?J5W_x_qLh~W zp4AaY4jeb^IXk)Y4)Hb^Gzt05MtHf9PT3WPoge>fkD&(K6FcRpu=!4M&TZjrteDKr z4;*-ZaF_1&YRmm2_6qLp$4LY-2LOlvyS{Iqps2@#$rnEmJ~(yR>&VVosUIJOwzxv2 zr8pQrA8ErGUdLKmAB%ZfO|wkKRoA`=b?D#ZZ+{*GFNd$%x8=h=7skThumtK(fSS%f z$bFs~*Zm;2y^OmgrwD+?|Ea1;yM*2M%|?Lk#45-e^=|WHSJ}#a^V#(lW%s&FZCD(l z<);4srq^<2#fGZ3Gy|W)7qSyn+u!?Z-8ZetzA*pu-I_6yO_y~i%kd0+R`jix@~*(~ zT$HxlCqMgLc*UDVFia6T-+N%_UI{Igw-@7asfq{nhOrJOJn2M-xYeD_;*Q`5^NB39 zT$utg)@z;mUhbOxCH8|su_OMC))?3I=lR`EP=)`C4-(_7 zr5kyDI4RXZl`@R%krx0Y#_@;cG4Y?O&dwe1qu@{_*qAXhGm{Ss0>JLy60<#}?{5v- zzqNJ$hl&dMo?D;!LkoaZ)&CRA-p{(TfhAmm8FHUuN9!JZj2E(Eg%)H1@i{=6(THRs zD>0(vQ0&v2HpFm4C~E<@=4D!-AU+AZ%d1Xs7|cnY@XSzo20~y?s6r>9oX|eHN6J=D zF=Fd&_~yUX%uQID>iT%Z3(@OeE{Qh5$X-tHnwlt4`9SS6h-VDo|I8$4)}lf3Z>af7 zpDW`+%p(-#L%jh~GUZqLW!9MOkdR%0O}550YfyM7xZDyKsRUBB5|5hU6W+5c48ZX$ zXuZ4b`Ub}+Tk|BfgnJ&ZUP%k}dI%*?Gvo=g^JwNoz=+UB)a)S@SediMPY4sdmaD6Q zRAA8Zxe1`$r~>EVkay~I9e`mRJeNTpycG|JKbS>e!Csn!yQd8{c8%~o>0Q-xO1G%7crZ!ITi4Xg%mU&M zkx-9X+&LXsAsHBJIPw(90KUF90waZWha5PS`DjEPqM;`O_SXRK-6~Z4QPC&$zD@^~ zBn<visxU#C!s$~wFz#Aa(wfSutgXH_x8>fTpUdpILPX* zT>@2=L!b{(Dy==40|4O&2G@VM`plS$FbT*L<0}!qxs=M^6Y&H?WQ2Eils@(NK*)3J zi?mEBD6v07J`tjaZ0pvcQ_28QqVv*^E33o2B6Enynm{r_VDrB86wV2TV1)w;%$I{r z-cEY|{MfURDk^e3S0s2MfiPwCyq2Ys?03oAb3_x=z$?K#f5I({~dlje})>y;aKgGv}%3Ly|d9+lyHFG$T z;uk`R->N0ii8>^C{<<{y*3|+aA$92TZVFsH8CEZ4z}YsFL8P}&hcK9smeTUk3=8Z(a+bi#CTCAFD$@zrgH(BYV|D%4-pO-Z}UaG z`GtaLnd=*3H-AKI)^-6e-vo(%)bp+%YRCQ`_TDUN73-gL?)L`1qp8kBBCQbAM% zl@bXBl@RGhQ9)V+K|(O;P`Wz=R1}ad0qNSrCcgFKjf3y;oO{19?zmr!`-j7!u=jrU z^Lt{gx#pY;Esr8)Xhmf6T=0z@SyloB*x1+>mLH(w>qDTZt3TTgOzAm%S!kaoYTGSL zH$}g4kBpAAUK|dGf_I_YNSXOT13q4t%ld+ubAu^xsOK_qF1ZYV4!Q_zE3U!r_vHhm z(K2*bgzs-P$Udks;0utrye2ol=scGm*LLIVRoGcXBRB{C?RQl*nRSn79nFwx+j%%c zB2?DD^Nc@|(;qEyn&w6vi-BapV>&}b!8o}y`+mc;t{f85Tu&FVr5-(YwPNYXtA_>`kdyz7i_nbytfbr26C}rr3Nnf zi$4X`9-CJ8%j1~B2Stt(&0tKG|-7F8o~X=0+24X4fmbkn_R{;%3PZM_`5wN$4p11g5144nuB4 zW$pouFf%Vq0@!yje>UIxQ#I6JU4Rc>F_a;9SV0^Rh#!A(z5Xbu{Vja(c^wKab=tsd z{lUz=`T+n4nPm#GUiiw2HmC0rjIMSAPTOO|e(AwN<#eh>WGcY2IOZmA^|;nd`M=)_ zVU%KrtMLi&F-i{0%9dPfqPCh@^triE1O1*I#?-qm9S5HM5qE`qmAf(99p<+AuvcHm z)i5TqKz)(LE%@n3$z_5HT2qAR9mC<1{SO% za;}jB_a}+=))a@Ld7`|rZ`p5`Q8Wh$qx7Alv94(GMT92@VaX*Zkd2j(pyhpTjMP3n zj%l`qT)FDEcRv4Rng#tn|Hm>r%q%R4t))b9>n~J;62!RwYkC?@quwJ;V>?ntyJk8X zf)wGnjk->Irjm6@$^6H(d|4TlA(mjKQ?waEiy8VBYf6)NmQKxco#Q`}&_tsYo4?Vw z*QPkti-9a)@Doj94+F4Gg!i#cbXoTCc3bRlfkz?PQ2V;Ti91E;(dL3$)bHViKh##g zS1nz8`D@uF(+f`BB{1M^)`*(6S=^2*~dD6Ez?#CYiy11lVb*&lk;rvh-wev_T6l-o8YLREr~#NU!4^5-%2w zhRBHS)Se27d9wL{uoj*qf9^i99qbK7{hxT2MQ{wZ$%56EoG|W+9(1VqyPh(J)KhjL z5{$pDGb;(|V=`^V54Jt*2;%M@gAljK1I!rVqxCE+O$>m_|9%c(yD54U>Or7x(UoZw z!K1_M%_Dl`IWmH=-;Q8<6pT!xwy|p^Pb_Q^H(A-Og_>NiZ8u*fM3vJ2(-FesaX6-< zhiI{&Ad)u!FVz$l!IgRM-OC{E;3ObD7zSQbMH+Zle|}BlK@9EP3#9b7?Gu12R&-(G z&ss|N=q9i;#cw|2H^^38C|2#e=>lZ+{zWME+FLEyllM7QYPy1odJLr1;XtlyK@M(& zX%8YA4GL!2qo9$v^NImG>JG%kXh2_^!ML6euA*o-?~6q^t^FE+0X8B$1zh}(fJDhtXRF5M~Gep zq!gn;`cIK-1JP=MgGv~pbV8)l61m_vX$%%ybHN|F9*xqw&nSRDtC9^`+W{UtLX&Ij zu8~DZgPPI&Q`)QWK<3J$1&uC&ODvId`uzh8%@3$J#=s{O;ojB6jzgT8U_N6u@~Iqe zR2qn!&gGy~k`eO;2d|S*UQrFz9XTE3!*BfgDn?)1Dg{IZ>!{Cb+tIT8yDBaso)eF#?+X7L= zb;+P3$s);BAW=yye&U>-3Pe2NRDf)YYQKRD*%#+tjv6Fyj#q@_f@Omx^uNfNhb|-H zh_+6525tu7=fa~1pYf)I!NU-^M{|Uy*MDZTrqI5&8?K&f!^@;)82I5=LIHch@fG7K zdWOv**s%iM*xteTsd{`AgZ|7X@kk)8yZ(r^ijS?p&XwS8FAmp-=Qc3Y;M*3`_;|Ue z2be&|94P+M6ug*w&zx*w1!5?NuNN*BC7NCJ%Zkx8C6~O_JYC;^aE9eZJhVM|6(^jO zQ2OvNxr{@sgRCS)+r6eE;P=}?u`eRbyfF|fPMC<&sv#W<`#Q$?8}oanLiuq4H&iRRKh&>wi6 zd1-nz;317mmSNs0M6JXv#GB;~xy!9w${X>8`k{eVGeGX0nk!FbMAtvEsdCN5Ml-F1 z_tU?;>j|gY%zhCwa<1O(AlfWhtmNP9GP|1d=)k#$%$Pm(O^!JM^Y#LyU(!}}%;=#P zL`*liCbQJMCAe|c&+DyB*L0gdq5pAs(co=4I7j!{@6qgV(6S3Ym#5TNWT6tmegdNy z>gATMo#IK5)@iVDs$C&?4fO3(`njJ5{7Fc^doI}GWYSJlrQ*-3z$r`I)}Ps z2$}ud=1k3Fj`#YEF)qP$Wh*f&Lv9NmDE-F8va>0tiY_?xsVd4GyksrR2E}5NzLTt- zPANBeJ0<;jq^n@V-HRHnx;6?1eNnI*ivnMNx*aM*+^BT7H`iZ2mmzZH3i`39n>E4g zzpVsS7K5_-=53%Q_q~%Yf5Ak+H)}0~$o6g0e5vZ*iEMr;V|HI@9mUZ-Y!!1p{aH;^ z^_v>g(c$hO!Q#ekEI7;B(ct${RgxZ`J2=+zv^~}(zc=Zn71%1Zb-!xpPO%_vsP8A} zQ&MCgV^+Mkn0X;BdfG8CS#4fHf6@N*c-k%uT;Oj($F&ne33;(wN3|XmYYLKR75C9}LjD2VPs?d zN)pPGXPOsHM+n72iEh~!;5*B9G~?wZ_J5cG21`BH=jr5}(5Ul|w*Nt6LB!TU!mXFj z1{SqzTs~7a7i_5w5sc<XlflQvLVimu`5y?K)hQ@g z81JnWLxO20zRqX-_+g;OqHZRHiVb|MMXEdq1KoHZ1N{%GpPokL3;Bg?%cG+q^TSH| zF{`G_bFnU^pCjS~a1X79htq=|{PRC3F^Jr6K$+&oDpQsxxkcZiT){owe{>;aBGmZAXLr%y4kw2R%|o$bQWJN1Su_b6}Tr4lY`L;d<8k*cUCqqLZ%o$IPsslN4++GcdTfF<6<7^higt zO}bJNfNUmNGW9Jbq-w>6f7w<1$y++%3FOC9(fo$&;93FniDxH0?gLPmxZLrsOWFTh ztXrfw|S``EePd@O6c{N1^}DeWELW*B*IBe!sc zoCI|?qW{^li2Kb{v43iSJem{4$4fsxrpb~4qs@x?!ATY=$7>T4?_jUQ|KmyjA0LIl z|6?HW{}R}Nn@ZAiDK%d9pb*c<==im*o~$`{B6M-$b6FSNGeb5WrHtKWmo2^W&K~

<#XV;1}W;2kU{C#O1`h~uC$d3v?( zQS6(NDE}4x4w{Rf(9$&twzP8APW`j)M~xjOKDvL4=b^W^`ey!!AXsXL$#!h{(6K1N z@=mp4J9{g01RW;X=S2XdPLF4BKnsFXe;8zOgm<5T^sE%Z>0AiJ!+f=(`Kh+(6}&v& z)tAGtbFJE$ftn>|HHP>kAYK>1#KN_QH;4u-%gTkx@|(SVvb7UQ%t})9)02^2te>)L zQh746%?ATi0y#~>+1pZiGG8=s?#Q|94Laik9q+NCFnZjw*yvlA`-BoD?4H~d@bb6HubBw6r3HF+f%vCOg;=5@wPd(J4Eb%7te@1jXj;GA)bow~_BacZYPk&oQ1JL7g|;410R`Z+|? zo`(KKzw!CZ^7}4hzTJ&>?^L8MOvTPKn@s!r<$Dedp^4xJcWPv01h*b>eOrRkn-&t} zJm);mcV>Y?c+7gf3bC@=09+wrbzl@Lyiya0b{3H8?#RspcxNqp9#nsVNXi#=^sF)= z`Qf3>6|)(b-0#P2XO%p=nYv71qAP;69%qR zS8oWdOa2HHWWUDV+}ePm;}{G-681wA_~XsEO)UOkSm~;H!y#x9>M3bn0ZPEGmI`1{ zS+&|1pwh4|*fYR5v4B5wA*f9(P~HS2pvA5`A*$XB{FvLuByMyz8d+uMpq;PuogTUy zjasd`@jF?2r`po-l7eDRPu;VhTBv(XI+Xn{UKvR~zmjPl6dLO69awd$mps@BFdqv@ zVlz)qK=-34pyd`3S(}xAQVS)z3dSZZOT#^O^~{56Eg!+5@Jien2$YOU4g_zI36&0I zl9laF<~7vh3D3L8@np`w2oj8dF3^QUu88Mt5$IKMopGy~=+J!*DLGaL!XToC`ZKlW z@Y+6}IjbOx++SP(U-cDx(J7|@sRQcO;T#XMjLAzci7akU4iChJBtDmSI<}ysS`czz zx4Ajp(pLu0MUf8u)uBZeH++&~xvh;OQW3M~ntbK#Y);My1=xGpTWxQjJA0q^QBGC z4XjFgNcbnj^Xln-=|1S+Gsu3l{3zH}XnMjEdfszggx4L*L|;AH`H;p+;X{O{%C2~KyD8wx`zaExy5-(9kCxN`klDQUf2gr_d50SkIEAD%+9wJ-# z+pkLRr}=KELi7DabAnlk*C9+!_2r*hrPvHPKNNEwS-uAE$hc|y2kMGu>+o=^?kDppPc+}L7kBEMcTp+#F zc=j3v9Gxr|_if^Zl|}uvz|_`Zm#WKVeg*7Lqo20S-J09%G;1yMa{5pla}>Rdh}oC9 z6j5&xqrWFa`yL_wbjroJZ@Eqb-WIz*So&WlCupeKq1(P47S_GWcTuFu-=EXh6r^HR zZok_G$t(6AyIwsz75A2=iRN3T0`QwIz6tAF{BfDv#V(9f(Yzf@AsUH0OjPT6`;7NZ zNm#V}-HaV&FHi5B^HT#VkD^LC@oB&XK^C+${;BGfqR0h-`Ax?9|G_BSNB+Z>)vG&& zbC-&;JzRNpW%Ws;j)t$izcw^apjE5s+fB_K1CupGH8hX@hD#CvRpj+FieH|v$el{m z)GBz-czDM%4i*SaO%GHKhRy~>(qKn9y%!Daxf5wnG^=KwVO!f~Ww??%oruxM&;2VL zoC!wlvH08y5hRMf-3Gje<3=b@J4~0^?Hwv*P~128dC;O|n~hE8fQu~)PQj3k~{=-jK|JtwvuXv150!9KltqWOtd zX5k1a!UM4F&Q_AXGszk!aXVVpbqW7WF&K038WrdFBJS$BwmqUvaS~Z2G7I4Nr2+1C z{q63c+Czv;porL$!@v+Dgv!^Ija|oSLC-q`9X6r>@xM$A61HzF)_u=ceXA13cIOJ9v3a#+v(a#*X&d<00}F9kxs~Mg%@kfD#7;2rU zTQ5I8*>9t`)*zMV&3`K~{y|Wrx$ut0;Bq;k4hokB$Ad!gb1Lg4ZWLVdn?uZZks#ec zXFtxR_Wk;q&>S_s)9ndz{r+!-w_J3==k~;nPP>c~21SAN4VUe++vV2!M)mu^QNjpx z2Cg0cgFs31_4NiZ4u|M$cnnySHPqG?N-Wxb&*z=33hd!ezDH{V*m}GI`r}KLY_d-*U|#2RiiX={V_;p47{EQLhLo5+euICmb$h(Cl=&R0;)0AQO7Q5cJwC3UaqC_xmri9uqa{uRbk<6 zrP5$a@S&vn*Jr|?r|D~coFLT7A*GEN1~OKjw98C9qTYY1^viYyx7S|u()!N!!^BO7 z{GUM48cLcU*SU#H$-of9m}-SfSuo{pD6ys5^hBoM$`vek(T9ukD50Mmy-z8GgvpSJ z9{Oi-|EyT!c^;IDJnMA_$`}-W>~yF_ux zQI-4T_F3$8gGJ$pPbG2@68>Y1J$7KBelq$55YYyfnI>ylRNk6ZuX1l(sEI*lr47Sjxw8+dM;)we;`m`Hap zJoDUOYK|2_xFYeMLZ1JZEOk#IPm5b2w5#qFgSj6s?WJw{qn$H8>T|)jCoRuTJhi`F z0VF|vG)VyR!8@A!ui!t_3pJFI&%;<*?%tRn_Y8L!A6q(mvVOV0uE9TTzPU1mIQGPX8k4n4I2&P<^Is_xzP9reN-pbS zUR&$uAA5pt>oqvdZzY00u@NY+#}J|u_{rdpfD@(wfP570jM~fy>**?-wmi_->M7_e zdyOuGv1i+5Pi85HlfopI3T_!an>d-KRhdfkzLfLX3f{x}Ox$wU?=fMy8KpTl9v!sj zmb0go<6fMvN}E(lo4)nF3?S0yLMG3;MxBXTHWyk#7XLke%}z%j{s#W_+6aw;7aR&& zBwAMh(c?eBARYw@$Y$`98iP8^=eOs0K#`q>7#L8n$n)#SE(XXVUA$Bo_sa2h(-h!}!%4bWxjzzQZy9iTLh??hNLTrr?`xPtB`6(LzF zH(;9FI?iF_5iaOEH`(2XdUUBZ0qKDBbRw4%Jb*c zO{b86a1lvM)0HRu{14qDn0>B>i* zP?{eH3=*79^MYwNftlJj>;F&@Jd^rJrQBvj=qv9(*#M=yq~n2e=r1891nWoU4i`qE z%oV`nHq7aPleAb&i_fRz9fY zpPz}g+4$k|MGayymaHiEUoK*{TdG)v-Hp)8PwL-cPIO%vCHMS8fvTXwN6Kuysz0Ol z_y`xYpuU(}?t1V5bHeGKM>JQo!DZ{ttXy|BlR1?1<~%Ncu(glGxvi2Y!#r2_4hu9} z(6G{w=l>b7U4wwcnezLGo72JDz+<<75S?mP#aw9av?Sc>!Yr&pC&~m3~2VSkVHM>h#XJAp3W91YW{iWjRKQQ_wkfmJ197*@9QA|-|Vh9%7 z3F*50;wGVhK!1s|bJ~dY7hm5~GUPjz)?jMuv9W~IExXuh-sUtEDKp{C9ybX021{xq zv_eQ|;H|(v&8NB^^a+@h_pP>Ee5zFE&Ir$eZpIj~>petr<7C zoL)#_Z^}Nm zkX=}~o{JVF-8ufrpXgkSb19G9r^*RIUDm~~LB1FHxXW4k-RrJyaDU9T)t0s2b3>8h z&?#+BN37gN_glFQujDvYdGu^yc6o}H#=_sB6}T;KS8%Af#7o;_ZXT(d0$CG5wy=4#J#ZXb6%qkiaNhTy|`*`C)AnYA|uq!MM_DCfOov2S0@MV^Yx zdO_s9WWAI6*b2oR z)KRw$T53lNg09zIxcBAWnrz#qX}u6}zTao#`cPw&S^nnnDXWKxvPU>v{fzsqafOLu zQ{etr3pv9gBp-=H$f$iA6X@Lq#R{Aj6b&ul=O6jFr19nWcy38tiM-Qc$1IcQ8G_u+ zHCwD3*cuNKO2uPHA!~fUJ!IA9MNQr3pLc?gJgU3-g7(4? zs|ITf)Z4R}f9FYh)RS?r!sNs5?c}Y^Dy)Mvo6-^_gwCZK(Im`_oFpB4q9#$?m5cwx zFj8NJ8l_c)nGBlnUnpJ+V;>hhQ5E3Ni4&@LX_J+gI{P=xMgCVRW#CTv-`B?=DO6xL zmX|5BL|>5K#F+G8=o4HnO-@e%`gQjz>Kr{lwMe*OvRnZC^?$F3JB&Qycl%?0Sx<5% zVMRB&l`zIZ7M_N+S*^`tiG?+D?8sQ{j%1UGtbTmc1b*d7PXiWCZ*PMvREWX}Jd%<^anJ4Y5}?~FnG^EaK!=OOBlqN>N!{IMCUloLRNqqx+f)sKr{-h z2DK3U4iKbRuK{ZXoK>n>`XB3C$Z_03v#(48C28}|NWTUx2!-~bDu7EG0lIn#L45Ab zUU6XKnAOK~R;K`6%>=RT0qUAl09Bs=BhF9%_e#H6cW*+2H5{Q>Hx2@fC%5{G2`F5E z@rJO1khVFwU_s~#3JWgKk2uV9i_k;6Da18(P6slFYXI2zHC2I`$2GvP_B=NqR!8c{ zPtDe(3EZ^#cALeET#yoErh_Ry{Wj8GnBGMjx5L;!F$4IWcIk*YD2O5ZH-Nu<43RgA zVc@+Ypsrl3@KQg6SoZJ?Gj4sFq81n-a&dZpzU*B(?|K_|1fSJ)6Q=GHQ7H9u3x;cx}OZ5+T~>4t8if|0^NpEVvY(uJHtPY2vY z($d=|L+%4{zMtb!oTn+$wNd|dXuz9+<{=Zw)ar01|IGX1fm*WyVazd zkMp23BK?O*6o<~B#O*?w~%d5xrjf3EEWJZSpuK#CjitNUd89Y?GwvQ(v>mx!`15k-; zm)qBZ9$~^w@sE7_3PIo zVq)GTiy_U^ZuygZQUJvV0zH=#V-Y7@Xc%_fQbD24^XQl|Gb)Nd)^jB5G<`6`>EpaJ2R3p>>HW;bFH$CX7QTq( zD356P@+FUM^Lyz|!a3iYS+pN-qQaj2q-PwnK-rm2cjbCDvhp6JHGua%4i8`r5S(Rf?gqA9}Wv`y}F2 z<_69^Z4D-W7$kIz{q;-Bd3lFV2b=;hRANSfv`s|Fyy*Vh*o-b}w4*AyF>+!kFG3WL zSZK?qLGPzucyx}XRcq88=eJsXcy8&+rDx=flDew$G$dh_^l+TmePcNK&u?kigu+k9 zsjLsL8J|p$Grm|ydpgV{=xG=c_vu&JR%mt{1^o5x;q=S8|NJ(25k-{i8e!@r)Sp@s^)@UJ(p`l$WyBRN8y z@`ZNhH&h?_^tV<~oU z^OEMHa=Jull(Z-#Wq2x-MF{Wn=W8I1KZp_O2{Pd2LDq9X0`V{N6POp*4oXP$drUmBQ>kx9JQ~P8c_?sca<6 z-hXH*Lf|vnlI-q-6-Km7=0|A{gAuJL-SC82Rh_1t-Z|9**RHR<#_Wb8Bcmz;(Th&` zX!AR*leowrAOE_UIMytgk6VYOzJA>TO5K%e7+Eu7Y7r-XQ?E+FW8H4G4e|IyYzs>6 z&PE`ef6&3USBO=2T*4K=rn7~a;O!anGXHY;NH zh0Sls_v;zhSQg9n0_jJ-0W0T1=^9A6RBV}DhH&0qdjF1eD@dr^fLl9;6_Bwgh!M0P z&RZp7#sXf|0`{dZfk+0G!wSUm{`>=#1tBPQ4sEYJ-C25C@rI}Z1R=+k;*MW`VWF<2 z^`#>gX_Ezwl*p=_3UA-I@or!mAk8S~@gp{QLgwy_u9<@Oj)#t`P*1iXkENWUAj-zB}Bigr4~@OTns}F)yZGaFXDk#V1J&=k}2(>J$60}YK6oB z_8`l*fd6Tr3Fn5nN8aLx4QU+1U`qPuwZHW&FpgedmbFz*#97P>|KOvh!#C(c7H z4DfJrPGVzle3BBO&g`~`U7dbyCUcY_K!KX@05H|R=q8wU&GeV7@tX}MHIEx73qLS( zc7NenSVok+Q$&^IDLKvZ>v8BOu`DxR$^li?Z!sPF#>fUG0P@1pq|-ii30L>!s>)=>H)t<73Ct%E%f(Sk1gv zrC39af#r5V$RpbZj0r>Y0z7Zo;QF0=^pVe+C8sAd{elae$MN>5zZ}0*rLkgSVkcS& z$gHUz`De!j2a_LY^6fsQEh{??6BP*scXe3E_yca__(S7Bw9V^-mALdpN1+LPp*ayJ zL?C=lV-FvP4ooJalOU{0E14ZSQTqjg=P#d2T^(uX3Ry-bsccxZ^CvEJ_qH&^-!I98 zs3fh-u(uTEXzcOR`LDmuQfZu_vs07Q8)BIc@!n1c%1{HY#J60UMn^_gPRv;2g;UH+ z%T5EMv@?v}`!7x9oKPC6UaC_JVd)CHtpjeK^4?#?yy#GL ze>GZmjz`Ch;#hTvfW(%KZvKV1@dF&bx-%TXp`#hcq@X=wDH``ohYlZf5(FT(8LHOl-*7BTnu^ z267uijU1Kpp^Y{)(MKJSw|`Ht|LuKe%qNH($KtM^d#O8I?eLXDF4eZ?l-aPosMvDr z+ZS#IXHA8QogSS17S1uS??VF_Ey&gjtMAq5Sq{{eA(zU)2dj_%G)bQLR#CmQ`ybBw zcQugGS2n$W~PCL zo?u$K{m*YN^t2iquD{XxAlyYs$2Z2sHE|+d{MoWGrYVJ2PtwZd?!`(Au9O5+g~E$+ z5<_1P2ukb-pYD%VTI2^NzpFMgN^y46)+cC2@d?ph%I}nPB6j36ex59tImw)v#aW?n zWsrcFl@7TN`u+F6yu@?mhWqOok6j~;UCR>Fs-IKo{6>D_M<72D`@PzesnDlmqtp{?Lt2yJ`LQ1m>E1y$|E>G z(>f#xiXZL!TwhbnBG$^rqU*&yy)qjKUi<4(7lqBZG%DFCTjkXs;rH!**6RO55z#M! z;{WR45QSNs?G<~e^6`j+(#BGRo$)))sAEr-u^#P%4`1qWvAvJti$P|cYG{oJi%Wvf z4Uhbr%&#lTd?Bx0GYdKhx@_V|l|w)0c~9ssKC8eQJ4KW_>H78sdmFviVD-?)pA-ln6t1((5RmPeA8+4eixfoXXv|FLQ~A;&JR$S%Q9O{`e~Y zq9~JJhGgssWP4e)rCG1{owKYJGkADDqeK*qIe7A7sC5M^JJXLSJJisl%tKF}v-#CQM7vCctf;V8D*RxWO1@90;$p}oxy_*fD8AYXWREB#u2x0;veP?-iR zCB)}><@=Kw#q=JvNj#tBT=VUl%-;P6)QRwV#Bkxu-Xj^i)MSu{{&18eAQB=8`qTT5 z%Dx*c(XUOqP%f6JY{AOSs_7VT*DTvw zP)VOsc{3-)ZoT82tvQ|XvG~XD=F$g4m0($qLJiA6E#;|{ z`52hRAd>#++gs_zWtTuuzymeIgWaT&Vz`tT#3CN78MJQadu%vNb-s;k1!n}{cTLXx zkSgMxsqqs@ja$HSaB7z}zC>2jk%Ye=OOPfm)N?S@^LMFCUYsOB;wz+M&nANci`RN` zFq2GlwY+x5EZ>ZMFX;W>(Rl+72&UQW^kBq|C|ls4&_>WW9O3=|lkO$(upGdzT0>Ja z0&40_NcRL_<^>~dRuQW+*P$*=aG&=6 z&9Lpz(9jU%iGb1n()QY53+U6|;WBTq{>)eb2leU7t91yrlU><6yBGg8PDf?b)sNIO8@>G9G!OBd}Ie7XLs~zxI(<&A0hI~j}_bwlW4k$)l1y15z$7GUx2!Mi&1XNH+Dt zf#*3VWtFY?tLCOX(N7IpGT8G&v&EMUtGy7tDX`iMvrT=Wp{uH3$&6@~O}7BiBG?5l z2Hu(LGgPr>-5Tym6t#GC|EuW)1K+{d;Agk=Sxe~p(Kb*4nH*^G%?_+o+W}Bf2Qg|W zvjke|a7I*ONzdK@vE+&e51>L|*FBs_oPSC`%Q09&%|cnXx0yo!-b@Gtq2>}t&x}RO zuPdsUQQmMIzDIY1T=wc)35}K z&Ir+sA+~OUjs2jC5=>B)=TF#%=jcRrd}M=z>z*v?sG%sf)+DZL(D7LI?E9@4oOQR) ze7Wu)kG<``ofMURrH71ZmP5~tx`pZ?GQLaqeiF4D9 z{Y%MF6t>%!zM<(k?~RO%yyN6nKf){M_!U@ZK#Q_%`k4 zLM-w)wEws^O=)1aF`8@%_a!*1Okb7bCq3DmaVt4`6kd8h)DjZxPAqXK&mqPSH@<4N z_%SJgNrKzAD!Jr77VJZFlf^#jgt*w)w`BCg7wXU-j@0TXy+U$2@Jg7Y4V=xFeT5lb zDQeZh<;HjTZxCU_$oLn%JZF}o$@|i7AMl{BLz};bjfhSyz2l4Me-uRMi6M6?*h`elwTB%to$Vb+h&WiDox%t?P~kZB|0 zHhp~Vbj=g(S&l316Q1g<3dAmiDDj;6Seykhlw_>ym5`mCl6ZMny`XcFZ)5I{^UK?7 zCTMA;WVeT&8j;(JL*q}dA+h zZu;mT?+YR>>a@_Qm-?e{x0yL($-;!Y>*ic^g_N zETQihP|9NNnxX3v(|!yNc|GoUjToia<9i7C<*lhWp1(Np5(y3WWICQDqn!`sckzy4 z5ylfJvq0Q>z6(6l@2o*a&0-PwdzRO0!>^R}-rRMCOU+?=|V`_bN)5BZ+4RvZ#y zLu)_NHZ!T5t!+@_#_{r&w4wWRDfvO8)c0=hII5Lc(=p&|O;Se>@J<3V3C?|02g(k} z$@T3+I}Sc@bV(2@o@950{=Z}4YRut)+mKfCvaT#tRN$MT!;yr2(vynRf8h|Zig&v| zE3>ev53><^m^|lko%`u$$ImK}Q#~llv^ODkCRfxIpDD!kCC>?El1Omo|V*kK@ z(tWdK4QceP5?GF%lVLhKEak$Sz@XkuBz=V#Pb7d0(dyOh!VT?Y>bvQGLJo7Eg|PJ$ zt{3svR#QXn?E8!3V~ggvBHq%|0a4{I6V6)?=$+DuYtv=K9HAywtUPZ*Wi5_F>Ryvz z`hAh#3|;kzhHZG`-r<6)GgRj&z>zOE`zBNAvZOZQ2(ur#GWmc$aWzyaC&is9zfih$ zw@Zg*2l6l_+|Y^|3D9(D+?GSXK(o`@Phjwsu7JdHl@i)JYY+Od-ftKn6>vkUUf`m1 z4lPz~P1zC>U_M7R5fY2rVvaUL>qwV=r0gq$UqacRvhGDBooh z*?1FSO%($SQRIrkhc7{&K*U%7r)oif9O2C!3bN`XQe37c>4U^K(ZeBk4@9BGm=BMc z*#+RYZ^I>lN9sPK$~7N5<81hzO)zlwM>@Y*Ctu!$b^0PUmr_z5m#x))lw6 z=WV~9YVJ&%FZ#ZM^{>>ZJg|2)An1AG^@cn$_kiz}X^-tFT|U@Xl9#SiPApXS(#S4B z@36roDBq^B22QNA?5+or+SYzk(XJ`Et+QZI!xy|3o?e%?AfCeiZCJd#-T!cVW8=aD zhv@`L4CTyGveJ6QX_0i3P38U@t3JUYD&}hyM77d~4w&oMmShuY_0d%`kI>;AD9J|a zO9WJ|oBt>=l@zKTK(=po5)y55&ARFfH3Gv6GV&t|pDai6g-<{=a#R`4s3 z*Oez9=%|m*R(nIaL+BpBmpq>&kb`3I=Oo{9khwx^Cd=Zv5`Lf2Oji;(SgqE8b#cvITyFc+KO9YQfmKwQe=-PGl~qVQ(_jvIA(Co#1|+z2AJk+N`h1@ zhnx@)c&Jmu#g}c4vL3fxzFJc^6ChxN(Ko}V?*}rovB$72@XUZ^kWodue*wdQku2G` zk9EG~0Xw1EvO)0vKBmpKBr=M!C#l-0#6w#fkTm<_kk#@A2w!ay@mYsSy@nqapKE@}`e^@X57x#naCzs#AZX0DVJ(M>+%X z9fPpjM=1#Thk~k=@L1@MT>xjHu zI4|%6tE4qPsp1_T{`b_p^Qdsw7n7X{~j|9UK#61w(2DxSCsMA%`*<;@Dc=^-FZal~d zR8%FVA|$Ai#k^rTDR78*RI2E!0HxTVv_HehBBGbX!Z?!>o5CtLr8zDj+ zSV`yY510k_L@EXnY>>L!Vhy1k&`mLNY20xY*PNqtY+`^5e^vcTiLqt~a7^A7iK#Rt zw#WKX&{bC(kyC#X?7mAg8+3xkH27Z6;cJ&JXCHZXk0>dVIDmF9|bmoA)=;a6a!lr&nf!U#BC$SI4zM1uqK10o>BwhAt2OME@=TVt5%_GhWB>Gd|x z;laFD^C(fv6Uoa-LcKj#`76@BpEv{&Rk9`>jv5%{?%9j z%i`Y)!sJz0&%5YP2H)-`FE_}heW@@ngR!w<_thmnS>hR>r)I2z`wHK$hp6rS0xdRr zvcdI`FC+h}aWi@92b{Q7GaM1kM;=-vXG_>Vv7U4xeonG7?jzTO=P4z8r#}0fa9p6` z@ou8HAbiqCJ~Y@$LpEP!c7ed)c`F$echYw+H?Gj&o|&l`bIpkkga@L^@pjZOS*)C$ zIa$p2m1B)-^BXp0%OEq{@M|n0!Lb*D^l&#USiTVwmRGYR%lR@&#o(B-`>L~tnP8|i zoLbv+0d@O);M<}d`*lF*$0M`^MLhhR#RJ;#m&#widX?~glpJ49#j4sbK|+KTAT;t@ zP5hjcV)$+EJz4X4!k)5-V9RBq4JndiaM~W7r1f>Y$#Rx}C!kudI}p`0$W`Q23SLz| zr;@--y!<%OsK{g}f~~nIH88^r z-_2Q2>twT4Hp31Esy5i1z6~wbh#3xVn`a8n2wL-RC;Wn22zp%pPZ#5Zscaru$J{~kPk2@du{+0p|vIJ-iD$|s1B<4{B%Ji^&sg|Jp;{(V`F0zO6nRK+_!J9ABIch4JVB4jji~hO-3ra zG|!ptoEn<@H_&SDmEE^oH|W1RhDclB%hhLKs4}^evL!gL&$}R{GbmPEL@IO&TMPbb zV+fC93G6uN<$d9Nbt75umvZY3;cEAW4SBmBzNvzcLfeJKiSY&-?HmmRL@+{fVuGJ8 z&;(RJTmi`JkF&g?uj=uEgopTjCw*92Z{4{0?L-zF7|Ap>&hIK{J}*9#%Ws&Qg)cbi z!Lg_b$eKQEjWIgShF&01rKYB;hqJ4aCsx~XQa;Cr+QMWyz1C^mHl*$#HpX=IDN>&6 z)%$mf!v7FzZDj*d6N!}lde}8)X-*!B=NlPW7S|@-c|=7yX)4f65&rI&#@3IQ$CfZm z1Rt(OShmIULNDh;P@-wPyO@<(Phyll+jLW;pY{v5-#h z_Bk3{gRu5JD^h@-^SYRE>BM!UDKdaOnPr$u2TD{8KSOaF03Sc`W*fv=1za?FyKG$q z#ix&6%6~Ig8JLKeQ+kt%LI(kNq_PBdnRm{ULx^9^^EZC?7wJI6unC$swLe&9WXrrN zH=3jtGwNLurtstm*CJQMOKXSRTAoChpO>kV?u)#2R8Dap$Hli2xech^rh2b{_<3v3Vh^v3?= zADq+tlYhXjq2c<@Q*s7x8~^olbE+vgbcA3B6+CU&0ZW%9Bx604s4V>9_Mov`^TMQx zq;eAC^9eRE$s2=i(*lSFpu$Mv6o3v~Gnt@y&DXOp4tiIv-_Rfql z0Ehfrx*6dxg6tL+O45dRUwDncir{~?fh4dkgS$JZQeu1Ks;>OuQ zsK6gR*Q%cs3{E3Y;X*YgI&c(Sx&qp!Z#LOl1K`_j0eSD|6g6}PnWciTy59l6Dlmf2 zc-?8;NNBj~VIn0PIRRe44xDp=Czh{)4hDaX1O@bV~P?5&}L!v_Jcg;*cak@y5?RG~DeqEMxX-U7ANJBvYZ zI6`QOMBD)EJ%zfvq>i{*g(M@!Y-{0gqk3qW8AtBl#I6_#u>fr^m#ZvDp&sZQ23N_r z0^S71xPdE8E13s{1AAv3$LE`DLOtx;rXNfL)!P@dN57}n1ag(1B%z%GB1hCr#q1uD zqimKdf=f_de$eJ11y)f3@lPQY3h-AnnC&m=qIo0+DstKYN>f8aLtGpnE7;Pm5uf&z z44l|N+(p1Z^E9-Jr$RWEtns+Y=v}N9WF%9Fk>&thvM}*&U*VNCS>mhl)#U(~({@w# zvUn``9>4ziB!S3D)(`o$o@{+0D5V}dKLLEvO)J0`30{||=qmq`Dt8+n`ERLmH1z#2 zn7#t`LZ0$ox^y-^xbsqknv$N(tCh>FGa^Hi(CaeE<1mWN8#ox}buZr~+Ltrenlk#a z+`Q-eG2Jk4MOb@mnHB?Gk;?j@Wt*R`^!G0&75GvCH{A1_&b#T*?!s$?46E4x zM9z2Mz#{to4c@cWBfm)pzFMz~#Hv*blci@VB)<^>Tvt3t9k4yVEEg76kX%cdK`w$d z0lE6{R!ge(mZ3}WRYRr0AVYRvWntzm{C&Q}NT+5Tmsqo?o8!ay4W2{59)<7<#VI&d zwljtd0)x0BN|k;_no!{rs+%pQqMxFySXL*j-w?hJq&6LD4+?;aCP zl@9^Ag~$Mf((hH>U0NlTq!hQvFOJLXimu+43tFrD??aI^?CV>JwT&k_d3MLn<8gy~rLkm54ir`};ZUg)0Wp^i>(L;m znQnZBYOv~vvn2@L!FON6cWS%DJW4!mSf&VmTW_4VOt9C zX+d9}2+RYu4=xY=3ge`d`B!7V>g-z0mRJn+=T$!_3w}_DowKn{F2m}>Tu)st`bL6@ z9@V<{pl&C((1#?UF^f3g?K^GM8x{hEUc8PzC+cQpD1bYHQN$<%-ZN-K)1dO=86#lQ z1nq$KHLdP{G3rnB}KAX+}l(-;mNq^1(uhyb*9S_m7eHs={&rn0i1LlyG3NSnG;zd zoN2T8RU*D`&h2}aXp!VLlmpB34JHW4|!;mQIDl;lGc z)tKauu*8Le$_2t7ay}akAUquKOykt$*-qb2nL@p985TO}&lgVR9|@paR*S6&zY!Cl zYg;upig;VD8Tr^mh&NMCbEry+to{O@Sx(|7_Qd>G(Sr%Qw@uq^cZyog5ErT|V z;v~6GDQIns$gbwCSRYh?CW$_4R96ea$G$AYH|%Z^zneSFfLdN~zTlqSZt8JDs_dW_ zWkOUjR)*_*GY+*I#{YlA6IQPCUrLkVrCDP_7Ip~g*x>}7@COJ~QR~#GluAMqmDq4g zbfd_KZ0V`lu+_ULx2pcv{AT5SJO#MN@uTkDs1FV|_w}SpYQ=kEQR10GQxez=Y9$Ja zrK&>SS48)DQW%BrEG}Hj3Z4~eZHnfO(00OPiiBKr7M!>`Hs&Q?;^`_xC5Aj`@*k8x z_pUHbCjw`}^xP!e)xxn3DW@;zJ^Fa#G$TTaZ{9qfKLHBS#a(iO2`hR@WxV4s2(KA? zZf097pt!kk4kstxSDpEt&W8?;0DQ|73CQ1^h}GomUj2 zO-Up?z;>l_=U|^==V-i%db>$)$>SR-E+flr2h_ge7)$bEl4H#&B9{_Qi%`xG+~$ZA zQ}vGw7b%+EpRpma1NKnEo#>i*t*5qTiIO@FK>ko~&_#X5@dCz1uu%LS1Rrnc&*+UjMhrviOFh@ z=GRTDQHk4bo3Ml;M$&nJ=u9PFM2cQT^g^lNZ1-R6EBTg{5|POui6yX|n-O zT(zVq|5w4rf5R7o8r+|}PBtC)xIID_xjf#s7k8L1{Oo~RiG6eBtn3U=qn!Y$P^unGPi6$!UWz<-6{trQo?Z9kU8AZ$=(}?G z#v6VO^FW36kmeYpvTQ4^KoOg8NJn~DaD`qa*|qXn%p+Oh?LyND8WuxU9oAU{n)v$O%n2A41=3k?RytA9;It> z`_(HC5q7+`$b=l&t$s7YV76(X>!`DS5A#FPTI5u zTQSKM_j5dqZgTk+$yhw$&!-71iad8qVLP51;m_<8R8X|YNZ#{&kh|)< zzQ>_@;jItA+v}cGJ;PvePuJ^LDg;9KltsQpA+BkO09mR(A}t zboDOpT6AM#+D7~q%~vtM-1t7FNMb7O z7I(2H_oM}P#MW15>9>iDvdU*sZ^H=We%Z4nv3jIz`7Y;n&uQi+Y===ul$K>fwyq=p zhOO#{i7(Bb>2?l&XCBkf&JOjtYe6jCtXiisKCD<9T}5(xkSaH4RNWDkv-y$Rg4m4S z*k<3uQVG}fPHycVA74tGsx|55YJA#oWq1jEeCyi}v(1Xgf%GTT&HU^c1qFq&l=yqy z$KXs(+YLA4-rhB(dx}cRN|)z5y-)op!+xGj{bmj_ySn70i=eq3ja=RRf;}mXt{d!s=Sm->;)8>1wmF5?z~Tm_sFe$JR~5$jO@`mruSs*b@04c`$5Cl1 zIAYkup&SHdb98UUxtNg8CTMKBN7cBFA1uG6A#7YuI+5$Bef~@Z>%GX~!5z97r^UGl z@I6?m^LkwsdU=>(w?*Y}sq#-(9sBfxPuSg78Dr`6s<>pmgnT1PI2xQ3{IME^Zghn27slld0itnntts{XFSZohmK@?=@0?V(ptf@@HT#guXSZX zY!TrDv!a3fblY+*3XJ(*Lb~=qsu=Gciwx-ToRp*3K8cEShJU*C#X;Zp?agibwi1Bw zw8yL>XE)U5-OQ|~D)9P3_Kgy||DMaOw<056T5_`Pzaq3*2>H_hYpj>7^MPti=z=N( zgH|M?nbTo5x8=7q;zYUBM{vkG>#pVdLM~E-FYgO%-EzL~Wb`gSE zrh*_mu{4}jb@J_@L5k9v%Varg*A%5w{w?fs`9MSI_3a3dzLsV12tm&(%Pg5@YowAV zu*Qj+q1om_1N|wmWE>}y495{QBiNglQ1T%!hkw;lA@bmHF!2P6*VQd$b@sXegX%~u zWFjuiswQ~qX1^(tkmby+x882Th2j*%Ay36fWZo4|Sn}C}wZQ9#$1YC?CUdqxZ_pdO zMsVb`ZAJcHFPh!Muk1YaZZncF0S7%fw0ubm-(&+yj4z2vXscmC&NsZ_s6WVx?lbt9 zcb`zBxYYMx=g@)RA*pURyD=2%H*h0&lmlZDg97>tEL=;S$|0tAG)cABAhkLIGY#Cf z$tmc?BO0}-E(aUpuO0(=SkEV*_>6?v{ZmFph9a!hGcq%knQtZH;*fYnteJ>S)L%q+ zfulo^m-p>tDBI3w%u>~)xs!v3M{i`CaeSm7C3FK{I>%p|i5nkYPR+rh}7f-8YD?BL!j~aoJr@Pal5FuRV#m+%&Z)9gbN&{_4(CBux!|PWEbm4VGnN zv|MbN=`v%=G`qmclTI>SGs{wX2#-;g0xn$ZVDGh5!^?x-tmGra1}=MBqz2hI=)=Bm z*P8PNuA@Om^XKLf*NCKhB0a|Y`E@`6IcTYP5l1{C&p=ZC)~k1gYUG^+(I>)q!>*=g zg+$ka-6^{LyNC82V{+QZOGv-4EM70MvAtswBn$oV)xd1%?!FE|XH(@(vtK=_@st&S z^Zh#Z0|>E0!WaBOd4Q%0kg<*CTwvMr zpACBr4JqNu2*z>T-275wQ5tnX^zOmiH#`(X38$>wnvIl{%z#CGH*jY5jZ#!pM>;`Y zaeiR>t|-Xa9Z1kvI5UG`%Uga!M(;<+Pg8882W_tVsLW z)#Xb;6Si0MVr+P0p*&wwvys@57;ikM*xwt>8uTC**oG8!F-|38RufbeHdX^CNw4^U zJ3jgo6O4F9oyRuXw?))!R-{H6y}DSX^25jRG3k8xeY|-T|F@~s9mP&k$$F8c15i$W z?XRB%iV7~=JP%=E!o4FgUop=j@vkD)G2`!{%fZJ$bLJ*d^&CSa6end(_Ipu|50uzmkr?d^4D?HWN9jK{F@+jO|_C_pk3)iN?z1K0XU( zI(O2aKm?59jja-kqCJA@=)dmOKaN~a$%H&}K_Te!59c8rnwtgmG@6e);a``wQ3i+` zP(ZK!aNyBs#aNE`NTd;J`qzktp?&&321$j5N~eS{Ro#a|{T+5eIMcxE55R}`<@~p) zpOIKh%hr!WPoCsirHt$Cfm`S@pxp?ka^tM-cAw64Pzv$I*NeaGwEp3nvBBsZ-II5C ztu6w*UUiQVV-Hbl-40&RRFbr7Hj+M?OLO^i8Y9L>YB@#Z|FE<85KO5{)d9r*5ea~j z*m{)dQ2xW*l>tTKz_Y8Gz#;L!#9){^149h4lqQbP&P%Bsv;t=*W${h>x?`F^Z@fTM zru0FGqb5*&7bt8XQEmYPz5dI`0q)v@_@sj$QU{a@Z^*v0FWN0s-#$Ci63kNw4V`Vr zUMg#tAsTp~0wcCl_3L2AbmZ?@OdYH?e}TrKJ+uA5PVfCcUW8cD#`VwG$$H>@B`SS@Q;WpO+9b{OwwyUel5MnIFdxLofzF?Yny5JQZR>9s(Y(Gf-$6zC-$I z$vL^eW$ZhItky3A4pgvf{rQjzySKH5NN9@!UDFDbySFMTDi)_bPTJdNc=`BD4i7`j z+6G2<%jKcka{&;?c5#raAAB>9oOi*6XUZ&#eov!ja|#Ncp4W(v>^y68i2CDoKgfk? zHD&Uf2Sa9_>PC-V(M@svpHh4Y?7NP4HvvsB6C&4+A8jC-UNDA8RGi(ZdS0@Kc51k; zU$;rCF?AOy9M1+lk>US2`55)~jR*gD(`s830J+@0ki>1uHt={un4AG?flvr%<4&>r z#uv_Lf;MAImvP0gStgpzQQ&yT3BCXRqaDVAO*o=hr?W&AuX_->oB#?edGjis}uT7>Xf|N z8z_=Xi~@IvlSMG+kFP6kO*1Y7)0wtxSt26Ym@2JJN|w^Rztn!?2o|D%hies$j=}z!OHQv1)_QO6v(J6sx&Q9_@%TR1QmSg!oFnw!dTXtZyJycRGOgOUibA0< zoj86(g+f_Uh(C2Jmg6@{7sk$0DCPH0965O2DdKmFt6P=*LdkH6BUkvQ5LMX++{Rni zaowpfShLmp2;-+MpHf)wzVMK_{LJbJ>${zY&)hw9{0e8a&94Iz+DE8cY?{m*oEGM- z-1F;=8Cds@=d@YWo6~w{WOC%A-4)cTa&z+r1By5d*RtS8;PUU0Yzp~X;Ox`Qf_(V7 z{qRO|>874vzfRs<&SXKGH@tKySzXX?lk7Nk)8Vd*w)XZ#mr@$$O>&j_wr*Xh_Asdk zyIpk2psy~0;^gA;HPxt0>uicoGb1;-umFd+xOj3}ODUWTmiL2?)ZxdpHuTccr&lxxba?iIM;>0 z&KtD6v!~3?j#jA)vb8#OGW~_OXj$gk_nuMN#-OXKn=?C7M*b*8NC(c(&DaiqKYHoX zC9mee`<0Qhs(+mqX!V#eKlxxF>V=~TfI@P{`WTQUdO`n9`7`pktklv##Vfto}nSfS>kl~ z-tJHQ(c8qt0-qoCT{48xt4`8WXnK3~kzSn8k^?7Cu2Kx(>Hbk2^SdeC*D`NvxuT-t zU*}&x?8~J)`}WE?Jdfg`Lrb)%X76ijpO{2B-+A%k#r}YaVlGQj3}LbVrZP;%RQpZl zkp0d!Q4`@l&4zwH&F6%Lg?EUGik8&BbLhV&$mVjj+&RJ*i~bWF+B}Jl z54KrSMjK6{8+H3?gl&%!eaNdG2-wi2St7c9g84&u#!X|Lw zOw|6nVt24GmsoXw-oU+S6J3b3HD9#Hw1DT_82itkKPfL>z8vn2_FST-uym=~g0sx% zJOwjQJTsVYsWp%Ps-_vArtDHnbPKOK*;PC_JuU7$a{1w-M?YJ#rS_QCbzx$bj^s&E ze*gZxQ#E!q<>jg1<%#2*y~P}dG3Ly2e;$}?V1t+^bvxwWEzLcez{0o8Cpehh!NK8o zbJn9=*NIfm1&{IHjUVnXafR$P-$=iF^=28FP>;FkMrz8uqQaIfbY7h$0Va*S2Nve% z8nYcmTqg#)+uz^B;FM7Pb93h+rS}B(?b1ly79cmyfmI*lxiD|m^!9GLY5kI{Hm-}D zoSYv^O6Z45cs(f%hiuOXvN2-xsOA)G`AKY$(VVs;U9C1B4m$N#Mry=T1yo+FV3wcU zjA8m|pW)sUYJw<0T{ytI&u)cp(V^<+M^BtNvs@ozj;%F6mgL#(q%9>SbtZD3krodd z+ns=bs{28fF|S{~*z6M!VDkB~fW|yKFRyaknaI-8QvLTgmT@b1M3_Xo2jN=^>iV2| z$~p7qr%mgU^q31K+B}tyA78=ddpo1eBRna8KJ4y#{w9Cb^lKM*Tz+h+OVlYF%wMSd zUgZ1p>q}oDNnh6UNx9=2vEvN8N&{cS#r3*asy8+^4u9UAlx$qFv8OWfRH(SoiWMtL zM>)v$Q&^al;B_0K<8}YT;y(Sypejnvxz&;{Lvv=lKdmcX-01U{bW=52{*Ue+>L z^mxCiR$YuDGsbOU!gIky>teo#`^g}|(1el}m*&8&icLXt(|t*JE1R&kQ%!Gmf|isZ zTiI#L;6aPG-rq6ol3puUt`r;08TIE>@W^?s^73Aax}b51|0ZV(S;qlw`j9Gxg=P-J z^1|DU=5qQps<}ilhlIzh6Me{!A3wMQ7(<>N_HGJNn4j#7MMRR)%)6g1VpAR>CY75| zn(#nXWSc(KvdseVR>{dJ^Yz(hH>@2T_6liHB^CX-#Xf!d6swc%P>laeUi(&hL{Tvu z0X6*V%Trv2exdv9{}Rx7_cm)`e#U$vJY7V3$ByD#w{CH;v$Lz_xjBy}dCrw_jOM#` zR7kazC!N}I>5=N-tPji9)%x<&KVq~8nG6%hSFj7He4OpmTM!=@7>Kp${%kxqJyy)| z$RuE>$d~$WW2({VyX&@>4|RMvt*O~$KPVNb^%{4zTy#DE8A&oS&eK2RnDm>d z=B_Q~m|>66FNc{MJ;sNJ#k{W8{`CaWunbFamyBQq%_Xr}w6rKN@LH4no#`EQclhyi#M78cfN&Fe8~fbYG6UmE*;;a&KT9Spfwzd-XKn1R1$AS+EE#XwTKJAEl+GLw2hrhCF`EI*KO?Q{4Y zvVBGVXr*lIrJ{Sq*hF@Ntu5mR>u>b%j;+w*-=1&xbDc?L#1=`($BgX4LkQu+rCS4r zaIZiV2W$VjFg!G*k84w~##FIKuGPKy#H=t&DZmq7`SO%*%^Dd`56c2CI#wag$5L%> z5;W8Bz2?u=l61fQb-{PT?u+dM1C@pWa>eg&GAxXLQ{m&YnGg+YQX1HTXWT3#bib?X za;w|u=^e^ZN{EG_3ZC-Vv@wUh*Sv_0UB}I>ukEXir6F5xd?}ePk*UK?PJ} zhknEaG->*5o_KwL@delE^18rAq~)-!ivA&UGZW$Xs^+>8QIkq;`VgbCAWlO+fCI70 z%F0;#zPjR&_^p}SPX$S0>lIg5SG)bGRy6+d^kC0*1|Cs<`sI-$T>~UK8CqIKw;B1&3e~Z6 zU}e~Lm9mU>2kUJFrqC)oJT=l?ag$m8ik4^Lt(8=VeSIqivIp}_k>^d3E6qcHJyMnz z1Hu#c_xIl+BO_DNIXyKMnr2cJMpj#EuB!B2XtmJMacK5MyEP4@Q5yc85tSQ zlO5jXNyvQVd-)>j?Yx&&oDNt~6}HD0foT{JVg&1QwCj=b*Vn52v^YB~Zf5Pd-7$7Q zzkVz#qElPgIMtVwKRHq+f^pg+D*B+H;DE=%oFnZ~qRotN(KWybRuSC@V1~<_GV`4Q z3OhC5zUp6JSf~8Cq^PK6=$lGnV&b=WHRjBH>5qMUnl}XrYCL~k(Uqu{pt)z)g};s< zs3Q0oBcXB_`uO^$8SO3&v%dE2)xl2>w>M_l@@G5r^Wke}2C@SHxKgn;p2x(z#u5yA zy*%kc>g5x<9#ck$W@Skg_H~9B)zNxAPXZH~QZMwp+uUd8jk|cGot0W(93LK~ zm}%Qv`R8}zdRJFhz^+1!{fgPaLy^+fOERZ#Jw?K)*{gQ7v-mz;h^!+(l`vMO=P8HCJa$-6Xek_WC!VjW$ZpjF6 zIAILHQe0D0L!N1ekWdA-`HK^e$av9}_f&-67gIA?mnvK~yHH!tQ(sk8H9q=SK-Dab zLqkEZKGWK;b6~(CNLc4utLK8dp^1r!=@X}4uj7!DC)?>qJG^l*=6je$6&Wo1VvI|Inen{oS>Z6)dKv*Ad)WY~@cgT@*Z;aeAQbB* zL&Kl-NxVo{42k~gT7qoyGruKxoZ9J-WJ(oz-H#;c=96 zF^UDM%nffQu3}UO7N>Mbh@R>W&M<0InVfA~nBzs1zg+Ccxqq}=;#-1NFw%J=&z)}= z|HaZaEykkd9cAhAUIR#?+F>LmxkKkQ_pqc zUaDW6+e!iqR)y9(yPr$6E7S}O3~qUQr&)CzYRa%w4&*zUI$P#?{bAb)?Jft@17URK znbw_VWnaI3Enly^-{1~2uZ3UMzIBc7>`f(Xx{h~#|9*)8<58Mreq&vg`LEy@#egtH z9_NpUqs{w=3g}5JEDrg-HCb71?6I}2t;Ed4;NsSxE5~QdcU^pQ1z`5-u>c-(*^3sc z%$j{~{a0vBq+R~BuCuq-2D+~OdjLOBRj17D;pN~6Nr-y5{Z!aMOLjPdVryn* zW(amm<1vb=s%nK79lg@<_oCAkVSCJZiyM*+4pUA0$X$9&b_gzfx9#__((N6cn`jIF zRU6NR!eJDJD2IqhlHk@t$Fj$=j`09`qySgNKC(q^KpR0#TP$hb^bNRzc8ssLJk*fK zZRnut^CK&1e}ap<w;f=eEd}!HDL;w!%s{@QQvd=Cx7Sf-)2$n;~IwL zwG+mO|4JDaElMeuiU@i$uQ}Yk$NpRT*21jZ5_UoL2S$|x$lOM+`IBZohr|yyUaS3! zQ%y~66euBlcTEbd-8xfYH`_D7_XMpIh72^dSiRx1uFqDq|a z%}@EG7IMoBviLPjBxS6>A4%+ULWzqF5<);kb&Micfn9$S0ihU23(BY`3XC4i${q^~ z3u9z&gwkJi38q-v@@dy?#aQ%F@$mXHx+ zA!@_oii!$?l*hl^#}Wsq36-*pqEi}d&8;B!Igr0Fuf1@hZeQ=^m}B>q@VrWznwmkI zBkHG6fQ5g4vOAnSB0_LDyPjKh1C2(zVWnHIW5LuHS2#1)7u>h6ZbIY00k2RQ+t}9{ zX|Z(WU|>!kJ9Y;p0hQD*cY)eboQaX)1V$5&S4*N+2*1S;EKa&>{l@(Z(|sbUFHdf$ z!IpaT@ZlxDtm)RbX=xI^Y(nBdmEvd4oS`x-QD4w-!Z`MzN{~cujKy^J;QnOf1-Hgc zOomFBn58kAH)mEy085@)TiKU)QdwCF>s$=}TQTH6Z$NK53vgI6eg8j9;ep&`sfgi8J z{5w_V9@bmIf*P0nrrK0Ds=ZM+AfyY0gd0x7)PS?DSW|eg#%m+P!zq!L69X+w`dnLP z^A~3F_um}Cel%nLHU&(Uy0jywvczRh*KPPHwlhJc_`|kqAXdO};NrP+=a!42;C4yu zQ*Sc?6eiUb=#7R}6X(;iSKWCv_?s7oOL)!a?$)(8ChEwNsaVg;YbhJ$?X4KpW(%6* zJ`n5)W#uZ{U#}u=SXm5O_x+oLHr+jmKC1{=2=zJp4JP)R_a&dWO{C!K0u zoLJx4*=eYvs+#uC9nNENafgN_{^>2=VFRjNNc(S6aTaT0cR zb}ue}dPp@NTpY8y3EeY;Nv4AVSX1ew&dUq}*~KreyOOz`^xAJe33(*UkMvO43hP1sI zeZm?3(b2q7QBeTLU)e?|do7xMGBTvd_7y7oIbXWJzYFyRxg!Q~IF+3~^Kq%mnMD%nHt#c`JAmX^_oeZS5m-)U`bP0L!j zW(}#butlN>FRGSgqT5SyhKh=cwBrExgBiQ%dR<4BsTtN=-BI7N9W7^PXG25{mINrw zOIzj)FFze7xkX(3Au5pHzeha!9DjYg>*u$cO;EjHxU<9=2!l|fuQbzlfk-3x@iy8sR*G-M@L6$2;ATolwWwkEN!}r#5|Iy zz(54Gc`ihPk|EK4-MV#VX-n^O%REFe_XYG8n~1L2j9qrorDrNHPv#64a{(URMD^M* z?uz=7u;Ji1K2XhR3-DD2f2Wy!kro|%7 zl34tB*M-mfLG$&XHY*P4%ddhYp>+JXk2U!5?y_L1h3WcyvzbVccCXb_o|8HUOQgQk)OYEl82$SZ6 zcOzVG*}4V{;)V%*MN;=zUwv=G)(F~^Mbq0)<5BLT6;h|Ma!;ojSETCHl*<)f#65@~ z2-4VUBO49AswT}unT*?#B})heNmq{2A<4DnXxCs0N6g&=M=dfm(u~EfeA!I3XgPWI z>{*ZCM=`f*H zuHQV|Tm8rYclt4(=7Y<|1}h13)ZJagD3WvF^5t#3p0i>Y>*rYZQ#xU+>z0kRdc_CC zC+OzxYjYcA0+!ry_wL=d7A^bo%hm;I9XP0dyi2Pnby5|h9wP*mTc2~@*b&U$iHA<@%Rb_%TU z#lMd~JOA{i6>bgrY&#Z6{wWp#D?3l0t}_$f#|P{@XO*ur7ow=G5jnYy;hn+}o4A?7 z+Z^91%*P#IFd*L$NIQzV#Luej%NBnj@R;@1QT+SGS(UYHT{QqzGuj2rIwf&e< zxFKw%z z=McL1!P4Bms8xkhpEp{~&CQK!4-PKv$+@w3MONeH$CyQvo@5az3yO`@5Wd1D_@j~ezFDS0UJGYl0aKTf@fUAEnN_-=Wznu>lDCJ+8aZ^ zsgjbZI4o(3m7^p7jvaUKE|mVEfEPq-fxtF(QA1JYfxXJCO)IC&T;+vbwS-64lm@LFOXmk9XfQSA$dzpoH7HERPgR< zRPHFJ%E0NaVGJQHkKaepHroo?dJ=|oYv zk*h&aKy=u?XHTsI6RKH*eU1a=B_^M5y2P8RU=5P zHO~$+c+B)mX=Yed08l_Plq3AluWzrx$)1Zlvk6p0;kVbSL`f&h2r2X18;wUO9wjg_ z$NafZfK^?*EVMIr5#7ru>Iuc4<75pI>NA9AjZCX8n31B2iu<4(10OtikkA9R_l zTi(?K~st5T&{}i_gPIY^I_r*s$PG16Wg&cGi_er=iEvcP5A#vUiy8|q6 z=SBM0!U%#g6H=#zMfITPLD73uUp%2Wfx%RI10?&31N(bS=P^WV2HRC`T_13iB zAEJAjVaXbxEGMx3bjMGf`T#-YqM?;la{mnY;&a&!<`g1N2Lvz!9v4?u`X^{*q*}Jg zW2I3tp|n6w*uG=Oay>o0d+Z`>4#48zbMM~gv5j1Y4SDXefH`l4(lRo7G5KKRZh|yi z&&GBP8Y{VHF)=ZzFsGf3z5Uv{aO%kr= zo`dPRZ;^q&1Ey6CJB2Eq7GgUURPQI&(5TaYD`k98bHdWZB)C7bZVcJg8h1E}VrB(1 zvk6AkuCI=XmzP&-=Af=U935$ngSr&5EI^PPANy5nzUR*E+hRta9}Chf0UXP!>RgiK z;@-Y}D_B+;>E#KHMNP6)_obzy|Ijo=TlcopW5aP&ym){{d9?bq^$ZY*O=)KbIC zSVNuQuE1(?CuwE)Ll|klcBLiP)sbqN?9i`CLER@-{2(M`MFI3evzaxkR;A9RSkF#c zlWlbA5+8vdz$=$Q34%QL!gk~V>yDCzGBhy}hw$>JuRZ|shp6kg8HB$gtRq4k<#`Sb zeZ0GV%EgV$Y3Q+AmNDR{#XV*x1Djt-z|VpLI8mb(qUrI-T}yXyyguss1N4%V@<9zmsf6`>Uc zK4FB3w*}jT2=ZSLtoO^w0sU?l5LoHOvh~;((3k*fw;w%P&n~P@(MO4~np^sT^Jq`t z``sW` zw&%;-mWQ$}ynIg#3=AzUP4YVdYxEqdECF`w&~_Y~jf z0zNN)ts19JHRD89FoaA+W!`nq$EUO{ZnJi&_RPVz*#~h?Kk!M9U~3&It`G_D&YUH- zc%uxe++Y#CGbyYOx63;?n32Zx97r|8WNM9XS{84)4a# z@n*1k=?o!LmfA&wrz+hvC_LE3%=JtzB!?GA8c*y>D4h^C6oaBis+4&EUJ; zH-_$~E^qEbIL=tsS$=)0$>{G7P9(bSTG6)Cv|wyvqRKWGkZp8TCHatw73*gJzxgxz zRF4X70J#kzs=A?Df2^r_gsstmtPU(fj5?O8wF}yC6=9mr$6vfpUhM1h z&CgeWy~^ApxEL(RNcK`(`I8C0uOP>H)CmRA znm0)zw>IXvXPZxff(L!O7kY-qu@kmh9zsY8C`(Jgauad;2jn2atP>4#oMtHr0SWZl zS_pqMWhhac@@~@jDAbfxr{PN!VeQNVSoPTB&majYMaysU0;|P>;DT-YS}Q{sOoki< zjAP8zn%G-+?f}9CSYaDd!X&R20#Xpw(SMKBwQoM4bf6P1D?s!_;(mt6@~58`jP*?0 z;{+|62t^_}Lrdgb{=jy{>FDT4%OmnHA_rpuG~g>Wu}r{-l2^ap!nUA*WoP_-Jn|%{AMdPOzrzY? zrMbD8h;}vrR<|L%hBZ`V0uA8}UH89{g_)-?k8Ffp7mHrqDII&_iCVj*egI0fQn zST=1sjgng9ot*$gBjb+3+c0LZ0$V!tHyuT~rG`OOKZ-y@d>t5Tcrc#;dmE;xb4Xqx zCQ>_4X7F)5g4#jAARzUfM~_sa>Yd^7Aihc@WLsc0m@i~(x>k_t7k~ys3|g=hS;whA zy?zqD1?-T_?fHhdV3w@FiZ)}wqi&Qk#g|fsAb6}@yS4!Sg;PpOs{ttiF`2xuaFPL! zw1c&Pt?!a>1abiPsX;XhHE($eJ1ZMwF$aYGl}t>_o+1*!AJTN=DgYm4XnZ^tlKHl6 z+W_GdM9msRDZoL;kR%#&U8Qv1*%=UUd-P~E2yTM!k;RW6Ki&?{n4q<4-GBf7 zJ$0}Ou1b*0bkfcv+vMftQ8R8tA_u?mInR9t>z1w^Ea_t*tsv|uH+Odd-5hBOTtITyFjNSO1NB?m?lEFX=Sm*#mo?hOR`hzKLV{ z`r@X1)0X_mNF8dYX1yn9Fzkg6=s;V5wJ@s0vgYRIX4cQ(T-yN?4oF%$bu0^bpSB>e zXVh}QjtZtM)hr)-SWrQ$FLyiwt2pz9SrB=2u5`nVWhpcw*-1vrqeJV1)?oOsr^(1EQcf(T&>wb z@t7M^gV}0mXo#YZ5`qH5>{1Hy7Ua0gS_>#vNG+12E7$7MCxnSd@CVuyz#Nj=7xPyP z7WsPoTp=TyV5Wr*UM6J+usO=nC4}qLF25V9!PP`~YmO zk$!ChJbqmSyv@J=fXb$;Zy1SUz-S{&-hDu)GPn=Y1Q{6_J6{It^1QG^5U)Y-FZ z}&4eNoYEkc3O<>2U)^sC2Mn+kgJN3y%s!7CeEm^IvT+ORJ+M z3NiwOYh#l8IoQO~r$v@$mHdn!kl1=;?y?D_AC(XdB@brjZUgYseq2)i1S~uJZkQGB zjatjiT~0y;*5GLNV=gKrY(mE)5sq|E!f>?>=9^Q}RFB;1f(RMuHYI)kd-t9rufTct zCB?8ru>Qc`gdAb!N*1mhQSaIZ0EdP56e+|xF<4c99&rg}icV0w8B@8;{at!oAzV@x zuntO~^11}DEvQBHBc!?ftDVS{p3yg~w%GPFR1Yl2J;k61p3*6{Z!y!a4Yy(AsJfO-;3@PqqH2g0pTz?JXdi}a z9ebj5jHM2@YSZ~`;}(?zk@DbR_1vF8e>j)tHZ?1(NU&neW;O_rZZ#eA1zL`(PHz0r?_z{K%Gd=LxfI$_5>&R}sh6wxx{1d0x zrKN;LMg;!w{{3QYgD9Zqm<_@JbPs(2Xk?PE%klq{ptgsGhYi59?4NAExsXW={a?WV zLAU$}xkQ7o4)yETts~w@2no3zF18OJJU9TN9dZxd=B-=l;XHuL>E!X_r3&%^`F+Q( z<+s?Z%9OdSBO)S_laqsFMs-h6ja!36^npeT2XYWIZ+hz{%Z$a59eHC&=o^$CFl!YE zfrPp!3E+*xdMAn%)R-;x{m79pS;^r;9avQ$#lwi_5r}a~G8`$IRI^XOKvoV8dE3+X znm0WLp){XhE>R!Ojw}wGXr_QGP3(sgIAXu^I#<`3eEZ*?zmAzM$ci&GN8B*irIXsc zc{44wPXic8NP&Umf+HX~8&eD!2#P@;M?vMlwZ1Ww)K$cYz?g*ix_YCe57Ysapaj$? zE6hm(VDKX}!jI2uxEjFg?QMN8SR~i3TXzFn52N+xSFJBFov_ZkkA#r}YsP_24J0|R zJ8QYPPJxJ+LimI>jUq*!Ti%VhYZuFOlwi&XIUwhlQ1-9p6u$*k$P3h4lYe+k3XkjW z6u~hNpQ1FG+@V578C}pM8=*zK2l5&k8M*ZR<6X0cWIAvMa14a(F=ay;4Z!6MQ!4h4 zGX+uiXLSq%B+&?2$2FPLB?qAcz|}(Xq*9pVN{G3?`};%XT=zoXF=;QjPQWoNWvD}S z;3ZO@)3 zpiXI3Q28#8bbUhkg3M6>AIf`t1E3t~TcF^#wm-f+6P!75>Qq^3wCNKOy(p}J6uX}) zD8``D=a|odr=;jxS}G%`%eamUD$Mu|??bC`oE8&1zv2mN#ZZ8yltM(W1DKdJI-r!M4iZm0GKBL$R&VN?G=QudH*RnM z+(0Y24CV_7DHO|*R%dNhI^8E(tnc?JRZvb&POCq({?n|BZb|Cr{s>T;ApK3i#ZkyS zVfQ}3qtd8=GKQq*gBmVsa40BR0_KT66Jw?>Hs58a2=T3HwN(4Qi=a%2b^hpq0~B;7 zeHw8A#G{%6?YH)+nsEukzAoy0dgXDNX5wG}z7U0lO$*}yx zpE-f{y?gK8CGvo4GxgsuS-zU%0CoWqGtmMm<dt7W~<4o=Qn>L^zBIZb%1Ez2*YD6!D$Yj?E3)tJQ#!mp{ zQ-^;2_~9c~T-Rx)6Nm5sF~JWJi}p(|S)Y#dn}BZHwt2IcRI3v`df0$x>jl$^7}62D z*^MzlJ8K=jf;id$!NCHSeEM`3Wh$Mh-H)@#*;ku0#ZYm8N~6Qu0HUVgBjDI`jTI=j z18Bz(MmrMF(cW-dByaMDW&Ru+^2VJtn?8d-y8=H5_yVGF5JEhw?i<7aEWcCw`kOJt zeA3blY{?0T>%|ZvoVx-QGp#yq^p7?BKgnust`y)W6#zIt?tOMXsHd8w7LW^`M(d{)Z=UD6<>v9x$sEmo!CrZ?;aLj?P|G~210}wxX zgB~pr8f92X7#IskW7&_zdhYPlghPlhN~D$RNq7$a{2AI7w~cHVMS&IUqM`H_y2=u= zFfaJOr|Raqtjfeod{PGvPOiz)ElBr>mT5O$^Y-uGX4L8M-U1vzx~36p>#T5nX)s;>yn?C)8%QZE)qLTg6X5!OJUMW^HEcHEPGO7QGL-M- zn!EQl*UC%~Vi2plMjMe2d`kQ6K!!n6LmgQOZzeb!x~I_fz!Pi)H7H0o(X3P_Y^4Zl zzV$)sv;k)iC(%jtM6&sAFVWb92v>ygktR3_a&ONz6I~L~`6d!U+v&w)xQIxLHaH2; z;>5YqF@9``8MX_R`77f2ZkqVt3aV>81~&ozM2{UxAyzds4f@3Ve9x%9xyXVDHLFFX z{6=Tun!*fP+sg8~q)_^_vG_%;_V{}wQ^w|B1;2m(bZ&82{rBZOk6}5yt1ehn_cNk( z2u8%uLtfTfmVsc=Zo_ALi+~c8SCI8ep{2UE^b;{iNy)Fjm+Z!U_P@@el0#p>A@o5Z zv&;&;PQhB|mUDTG8X3ms!2U7R0YmT=ww=$F0VrGvhfW;cLO4ET&#$1!;ftT}RLLWg zEc6mLN<7U3tdV9;j0!CCWiW4^24jW{S87W`#n^bG8#Qhfnwv=10pO((3U6r{8RXLd zEzLpGq}*a?xC|5 z$P^I?7kmjig7O9;2#n1g2!N0kL1d7wIZQl~_;N&aClsq3=y^-~p zDMTrG1KAXXENB7-7-#^54}qZ}dG_*9ETpk!= zF16-3gQvs8s=vL$0w~FgBL6a~Wc-pK{WltE;P^@tJi;-3$LV4)eFU^Uefrd_EMdze zIvJrP`k{V+%vXtQ!U@?034>JMkk;;F7gpzcdZG*nGvb9o^cmF4f?5s!Smo$57@tX! z3=}-}lVA!?5ECqho?fE%LKgZ_8_!NSLDU7r&;>>b4Zm*$XXLTV*Y8kj2P8!A$S^{4 z;}U&~lF!pczV*yPBSTNA6{yoPBlm`T^P@q|tT4w|F5%)Q<#GpW!gl?Du zEfRC{J*9*v2E+@21n{3O7|zd55k&{hqyQ+);7^O)kzpWJ64lVU^VpdP z>4Rowg8xk^1_AI}!PAh8-gS~NLFPRS3NoI_LOgN9c*FfkWP1dp6aR&&>oxno;hh|8 z@C0nbEKwOHR|aT<7ACWXwVC@Qihz9SCQ;QB_+yo>kO}@&W1RiJ3C7*;N^Zn=H~^zj z=je%}**_PzjM9p!0JW9#_q+ULsgE=#o(dKw^NB8HP)UlUIRerz(wu__;CK#>cL1HP#L_jAgJR~-QzU&fHB`g8hyKD!^n)~HSn_V!vW24k&hFE=w>OEzG=v^H zfC#dMY^&rL4j_(G5V5xam_WIPnvCMad7u~mba>Gof+K4PGP-J-2=FJhhf^((Ajn-qfh7#-n#0Xv=n zn^Ws;n+D+H;|GNUQS%Oh8PO|c2Xk(UEjL^ZSPLsBXtC5<3%~c31D-;IGCBc3PFAG2 zNYEl9g9TA?DTUk;p%x==E$17J+V931=b{=_Wgyc z5UiKzzXqN;6X%wyssIQs#MDV4O~?;=Q^f1JSXn;;XHulmhZ}URwH)Swelu)Wb352UMVR=)*J0DDz(-DeztG>;Ow1_dpl?kJ?e5pX<9*B6PIG%td&Ed|u# zgw26y7+4u3gf8mH>Y!bv{vit+mrO)Dpty9CL)0(LRoYsZaR3x6EC822`mg}b(*etp z;|UPef41c-V7$?fLrfv49K?!6(N^64OW`M=*b*IQOI;ByV3YOv^G8u5G@@-Wkv26wy$&Zdpi>Qm7RWtr3Xm$H=+Nv_ z1R@YC;3?Ktd4$ZP5+2vpQ0}3pPr=)4lgkF~4m*AA-;HjlvdOs-n4qt)9zr9z2S+W6 z)%L93s2JoUD*qg4M0cq_FBJNAP9?Cy;*5s#Vh9o|VNmU!VF%7E7u=EWoihURP#q*d z3BWSqk(o#E)+xgDgWv$0@Yr$>n2m=ZgaiZLL+G1z*S#)dYn2yTz`bDuEWqZZfe)iu z9xhci9yY)n*!**T?Wf3rfQ4$GK;R1!&WOF_q;d0qu!hu7SpvtA1I$KG zqnapf150P)4RVp#T+`e{N6=1g)|MAJ+FLbx-d^Dv@9h1S_5AI>b=I&C+GIMpc7E@_ zZ}>?iW9B@*i!fY#%N~1sGw8XGZ9vVmq@b}e6w^ekRJu+b%krjw?Zz-ER`^F=VQsqe zB6Gd=WYw1M85Uc_g#G_?m5~en?!NCEEG69iS#PeD#^t>r{~I{-h3LPtj{>?wOIXk}jI9nZyexkF7VpDe+weDUYRny6tCZEZV8#@cvXDO34?&JHD z__H}9Om8)R&pi2OA%Cey%9j_jgF^#`nQ(JehJb`UtP8Yhh z2%M!^{9P^AhS~30^QTfZXD8_0@Wr1SV?9;lZzBewvE|5sv;X!}-_=tu`;%u-acb;# zq$7}+Z|WdkU6laZ4(pW^?r;WufQt{Y#|98!R`@iAnS$>S_y*(gzrdynNKk~u3c)_0 zwdNqhw#2!6KBf211tj~FdTI!=0hvxg2o)w`;Ptmzy&Ubw z&?P0Ve%%K61|JL|fq<#cLX!l*#(@)zJF)EGM z;r6@oQEqEMV}muj0OBL+8VKOZU!EUjB)T4QP$N7??O~gb)uuW2L_44(`8;sMf#w#VIMm(ca<1{ce^D>L0LQZKx zDMKe(`~_t)WEDDXbDb+Fv?&A-!5WQI87NkNb4RD}&BGuHYHK%m0kfj9j)6j?c8RL} zH-V`Esle`b+?km9?aE7Oh;=^Y=i6E6Ie)m2ny{@naktBZ%TSHM#pEAY-R$7{@p*Rtgf_D8 zx;(L5k60YAvJ!bH-WWz&3e*Y_x4fMZ@)$Gv;kHO(AgwBW0{rmE**V-R$)*5`*MH zv{dY$Xtp(@QSYPRBAgjXkoNX30WEPa=OBH-P;dwoAUXI44yu%NMQzLcg-hU$xvK#1*`cCoRs35te;qX?NcH9-wa5{(uS z=2Jvu>F$L*+4{}Hl0hrJXir(q!$&7ROWvF99cY=&Gqw-9$k@B!&qqdTU<0noXOmw0 zT*c^~+Fud-EB5rR6;zq6B%z;3NT2w|CG0B}12ThVB? zA8l6>i8M~@7G|EwX_4S@_3qt_+}iIWKWk<;jB>T!4{v@rclUS)vIvB$@U&p|N?l#4 z`fKOj{#fWaB0P~V(2jW7az4JO$S^<6vS;;~!P!7>Z>Ico{vCX`Z(!Z{G}|~+HNkW5 z*=%wxJ#(Mh$lW&yZ1C)4J+#HYMV<Z7+RFb(9cf@HvO0_i*Lo9>H7q9A(D-X%YB8p@yyb`++O&{V!#&E@5&m z*s`xc)`h5(r%*zW><#O151N{Zg`$dZ9rvNYAlMs1yC7vUia8iWtnBTLz=KJeH9SBR z-2^ZNK-nGs*Y1pdP#VQxnsBhg&SDNfF`~FYp=kb-)pm0C#Wzoh94jN!ZMsQnSDaV4 zYQxybNMTtSqot)KtsZ2`7x*&Z!_`2k4I;8^u!bdY$r+M*BJ$^_eG#HZTU`f9)TXQj zK!ckCy#0EVHCYzfW{u)#ggJnARj>H>V<=e}+94|Rq}S^jB0^`p93YNma@-TZ*ijIf zq`wS*&FFTL`VQw!EXuPfo?Fly{!`f6B)yv@i-!1e{5s*MpaU8*zNf&b+yiSg>+XxI zy?}VckevW#Q6`(fa7QR&;B}J2e0@(*=c3>XBJGS?^EiJ*6s#mrJfLNJ+rqpQfwkac z^id5^AS{G~X%{VXkT5Rai~=_Lf`_k?KD5ti+0*21aOfn z!T7;|+@=pUgj@(T%S{yI)=&cB;^IVY2g+mH_U+XW4GHA??*s`st_mUysD(-JRj>t3 zD&_}Abg^AYV#*DrPu%lmKGJJ9pUv*Yu}H(gR-k%Qg);86`?8CO!)pV%kr-i6BOAe( zfD+^ZnCGuRn25ebh?k`3C8x8XIQ#&g0O5MdQALT`xK_yDlM<%%-W;$~xT4Nyh05>o) zc;_rwI%CR+;~jlskAi}ZL6QL1Wq|byht5NUXd8@>$NZED&e0%jl184J4AK%D)|tA= z72H-542`DriT0sB#}ATSyzC5wb)51>jV3fNst{725(QIQIt2KL$NkSa3cNE=RV%Ia zASb4#A0zr2;q#D2i^iwnXCzI-=5HlYTYJv+=@H6+sFHY2I#StDFa{rRUm^SKcQLHt zE`W)dnv|R@3bvg%$I%r>h$|up5Yv}>lGq(`VYX>4j50=eBxf%w7os(hBf=g;MO6Z` z5UCYovvt?5dnop*QH*hCL>_$W7NbES*-$fd_PBVlgiZ5%FaNLZH+*C|X1hi?c0t5M4FJFFPej*>Pzf5!J z-DT(k-vRf~Gt3HXKTEnalg19)UknW}Nl`azGntP891}(JDUL{pPDp(4U@as@uEGx= zh`VO7CoT!*gkBD-1i4Ib@+urxc>px6Fo7Qs5vD-bnKRx4Z(xW$LYfW0iV(Bl8eaL; z+WqXhP-!SAC{bN@WwsFll$|~JrE(^eoLL+J2Znp27n=S^?*vkd64W5}H~9yu<^nIm&Hez5dh|o6bsEP>R|IzHvPFk@89Js3 zWHotC9Riuw=G+Jx7EU#x7bA>EX{m;)V34LHIE?E>UX2R}}UJH+{# z=kBU?>ZZ?egJ&${p)nhW&!1=AddyEBfC|FFIrQ*C=2Zq!fUXFXdPIt8)Li{0ff3vi z`b7(=NwB~BV(974qRUo(e$r4YoBjak7WC)uAU&)oyIJn?LX!-CvegoY3tmaUzW-LmA!B3smBY6<)eSEkTY~VgBoPRSv<6N*C2kS; ztS~sC=zS7uTS%vq%Gi4wig8baCw5PW(c-(i7~tb}CFd^SK*jCF98qAe!=F(R2|12gB-TfCvr>q53*_I2!^(Lat~z z;6z1O=QNV_uahS`F8t@ROkj31YP4clzi<{0@u-40p+JNy0ET$J7P#^uBYC0&uv<KiB%nj`@f|?0|My18G_;L&Be8#m$0K$BKAE@@0iwlTTztU)(Mqn;KyI!Z z=3Ub1Jm3;~czA4yt;H6ZQSrY$H_6Ah7%*kllY3W4aJMDu@EfT5E*L}1P>E~avn+Zp$G zUj55|#VU=YM-*o8VTe8(fRjGL&z6|o0?(ycn;L{9l!vOhF7{wTPa^h{<}(VY$KTCa zqp^LTuV(+(y1l-h9TYP;x+uIgAB&dIBPvhfs1!pEC-H$C0s!9=;KDQL3(gk7Xx4xi zI!zE6W&>%_OG!x~Tp?mgOHddZ||IKi1HbYKO0L_I|x1DB+Xh06VB9H=T07Czg z0xruO6zfdn_(!&<_r*YS-iCD1>$C=Vp{MX! ze*u(vnH-eVdnN-pXMS-1Yg#jm^YlXit`*tN4KJ#2V6K#KB*v#%D*IzyC6V& zGLxXr5a&9Dd?NzhiM-j7Q?x#X*&%vxJSFctel2XcbL&>JZSNVh0v*OgfQI-0M=w%z zBb6Lz2D(Tg^$#Ul*3lml487Gz*9j=agrz}}&#I$=FeVLW0DF+wm_Yo$?N7pdQ83i1 z$bVRE=nLQ_y?j<<0XIMyr@`z&LFquzW~v6>?uM=REs6=+{CzP(O^_2n{f#Uwt3XT% zT=?r7(}F*$JW{c%JcX`FPOb%RpaXIolLIt_b^r{Ge0VabA2zy?W=ph1K=`u@ zGAa%+Q=m0A8u)G5x^-*G91?irjo0>FP3X-4`d*R@ixO$(f7~n>xr$PL)ngFb5}xyJq(={#6|4Vzl5Qj* za8ncWKbDt&V~a3(@UH-YFNwu9s_(A6i59VR}&Ye3;Tp}YP$h%?Sfh>6& z{c`O+JYYGN!^ac zSGF!9;kr!Rxo%T>TkLh5?bw625SKM}VhRl2D5t7r?Z)|RG9WXs2pkZv3U(98YJ|a? zyLUqf#FS5%$4qp3g2Rl3(FZ%S?QYGw!0ZUZSg~@Tp^o&l(hi?(|8-N5)QMQBmX5Fa zY?!~iVFBq@Pr6?)Ow_mu8CluR_IA?mjkcl}Sj1D>&wQGh{w8~HTI^?Z4f%@Qfe}g^ zCxQ|(Ia&W;RL$qRK9^(3SrJ)%#4ht6_UZu@L<;gXIM@|V?)u7|UcaF~IIKyyC<%Z;-|Qghh|tSZ6>kX@UWr0rR&$Mv}O+7=tf#d9z|eFRek zYc%x0Fi@nM{o&uPyB0{>nTm@NTC{luAsYj^9&7o21do;K$ET$wgaZpe`Bt!jmG(V8 zzj&c3Z*-dyhP|y#fo57y)g&30ItUgMNwtVZljW^E^kRtI9!u7J3u-Oj-n~KaY;^6r z_vRvFpq3&ToNdv~SK^B>*+nqnroPqe`u0eWPipa{7hYL`7a2{@RyJQd2Re#dAsNz6 z;Q!U$n}_wBw}1a1`wT|IkdT_mAQEB7TK0Vnp-`a=X%R`4!#_ zEQ=Vh=KW#q*7*;Yc|FjCzp10p_*G+P1{rySpII1NTSbKI)L3F2fj{=viq=n4Q+ui3 z8J2ga?%hY18V;h$7{(pX0bP5*cP z2hC%rRI8%%q7j2WP)w7Y*4gJIG;X<72kg(Tdb7V{th-&@DzEX^FW?ulHpEhEJnc_X z*(gPqi^`}e<`}%(3Z8&w$5{W^m?jG|ndD7SIg;dCmUjLusq`PBmR;y59S?>vz!5R7 zT`IjZn4;Y^>#bN~7)C2rJ4X+c$4#e~GrrrD)v5)05{t@fzGjXB#5qn$s593qeT2bog@Xf$DZHg2mG8nuGPQ*g_GA0@6A)=}{52FH?zi9X zt|K~()ejS?go*3wHQ>(unLZ`f`tt7Dk7fgT*an|kk$W8l%T~Pj-dv;-9Djs0CrioX z8N~FXEol@GWRU2dAW;_SH?R;%rj@&rI#Gl>P99vltu8ND{X%cPePn_+M9&QpCsJ5R z@c`F326`*TtiRxRw>}J@IT2YfggT}hqdy~kEl*{ zt4ZMALC)_WCO3iE$g`qP4yT(VcryiC{teuCx5TCaKFTjhxOJ;rYIO_d3tAR~I9C@@ zM^R;^`NsH!luN?0>03w#XebHzakR-k`m2w>_<95+9CUmoW@}0J1^X~%SW)cNn!OXn zU}FM}X*&7BNA66&aqZePvh~vwbch1K0q$5<316r3CGi1#!RBj&tnx}!UaSaa?lwIH7L;vK-lj*TaNZ>&= z<>>%{;UFkqo=yfXnp9KE@DL*;o*WvB`H6vQx}`VYMD$Zx6cQfuY=v*M`+%WCI}RQk zGcTqoMErg zg+a}@6Hx50w~-)Rm&{WjtD)lT1f`n942n6Y!MQur%nEIulWam|ILcQm1%5;&v%I*Y z{;i09Hb;)R(}d?J6Bz-MKCM;EJUw$OT#^p%Lxn)Kw5S7|%XMrcOx7R*czW)D{mhJe z_gXG$4jiH2DN2Yeh7V5vG|07T0E32XSdlS^H`@p8Iw@{`M zE73uUKxH1;DRR0A%Jh^>DEqe$vjOv8Qekf-By@^P>b~picU|-gN|@Zd zTYL8Qm`hq-xA%Yh&qz1}a5P||oj;sBSpNBY#@>HQpf9b9>GN`$>Fj)|@Ltm68#)EI z>}u=802+2}V<^EsA$|itGOXkfr$GQiY-}v1l)b{Z!7b!DJZ)5`Zh*j#Z*SeVNhTKy zt!QEovna4|iBaRP$<$KK2&AQcy4U`%Z@1|dOXGHZ+!t70_yp<&IxE<`zP`S=BVyjg z_{jXZ`AmYrEzRYbq$P;N+6^aCWo#upJDCXFI3JAHWTJQ-QC2ghnJD7bo z(l<=U8wFmPfH7+fFg6Pf8sQ;~^m@Wu=tG$W3H+{k^Tpgz3R?X2^ypxaVzY-E8l(`W zmbc3xK7-du-;-iD$g@C{_^`Yz2-yadN|!mBW*)RHG8d`oHqx5t1+GKUC3D1=oE;oh zs)~FGcx)lT9O%fNjQO~7@FBpOd*_$VKw&4oFP`Wp_Nik__Q`E|Q~UABsp>M#VW3lR z-A>wM#mdSV@uV<{gram71v-M|l^HiU0|(HB@=kM!IWglPiA)nGF6b-W*>T>UR0H?| zSuO#}ypw|q8-b|L@y@Q7Kw5eXRZrX_YJbf@0kG*yLFdc5PB$Rx0Q#}(DjVrcWNx5~ z65fj|u^S?rGgVUTaE_bhZJ3wh$wP};cU@J`0ry=JgyNvUt6|MwccMQYQ z(-5-;fau8w2JZulAT@C%O@#P$;nyW%I?u>qLB1oMmTd9T`;FWckf4KkX@ zi-=_zn@#LihaGuM>cWmP76NOZWGZ#$&X!IT;%>+$-nhq|pd?d+W1Z_|QPDS(_!|Dp=$ zFi2_Lzt6AZz5sNH=c?BmH+5=0;{_=BZjt=cwR7wU&S_N{6IoPVUOpuC!I!}ybR@W0 zm@@j`C>2XM2Z?ye;~&KmH>V9f{!GnmQS;TSXV>-~2-ZH) z3VPt_vdD9_5D3zh(F%?vft%nxS>TX1RO!OAPRl#?Gx^6{6y0y41gjVnjHO7<1(GI^ zGnrX>WzgW@(p9#$H_MEs2HQZ~%ctCe3yem+j6KH>>$KuMyKYIQM2@sZJ zn}REs$)DYso<;~A!@mNf`Zzd!#a}z|bUi`jtrQ$6XXk|0uiF&Yu4AmYiiu2PU~edi z1lwrAINtvWQ6G--WO;!JodIODjMPZ*+2(izStW_;o0yb_q;}H%2cZ7KpqM{CsXn-W zUuDP|9a13kILgKSE9pyDny8DQLIm@RGC`Ic4=$NvH|Wqm>MX3R07I{&%(J`Iq>ihN z{$ZQ4_5|+xImiGA#;Jc;Tz(|SFoPPv|HxR(l!G5OHPVIKUE*k{1O$s6Wbj5wataLz z(x}5wH$pYiH}_tA^T$D}UQ1LWm8H=km$G(}^*lM+=WCXn^%qs4PX4=c@Ei}`__Pcj zM&NpLrfIP`aViK7ya=H(rbiw$0R&`lg)B$YYi3z}GqC8=oxWWM9*|-9U)yV4mz0++ z`Zjuk7ooZ2x$HqU*m}S>puDVU)24mUY%L-O?m^Mxi#Gi-)oYLaEra-L9*3?u;8Vr? zt66qYkF}#b>(26_MZijQtF3Se(q)7>Ytv6MywSNli~aKDivbf?RZru*NCD7WMmJGe zWY_#65GzWsr}fX#4{-~g-?RWxOjXbMzh-?MibsP-c;&t>0ejQHcn zZFg-$+^bu2yriQKJB|DW017L2CsX}36VUg z7lnl`6U!WctLBY$0GaePUo|_Zxke-RI+VXVWpN>epGr}3VuLNKI3o}HMSK5J`ubdi z_Z0wcj+^u~{WVIa%BLH5R5+bX0OE+yZ+=Y7P;0T z_o*Ei?8Nyfq!q&J^^!n+<6b7E#-d%O6*EX(NA_cM6Tvrc4lQ6{?%cRN>iEwgNyYVP4z2frNP#aV zAnFpTNdOn_eCEgdbYfLKr%IZy=yD&(239PY5}+yj2V~Ktc>MuWLEZ_%{>jVvxZFwI ze#2udu>d0P8UQjvaZW{c@vFMLDT1tlStKX8#P|O(6#tK*_`k$ZbU>ZC6^v@Ju^#0Y z@r8eGe^Qu^(=k!HB=(BFt5OT z@;W`L*gndHbX)Y6HO4n^=SBs0_(LWasYQYwul=ZrT2hi90Bx#*7QGT#axSf4t#Nu! z_+N-bKvqGbu-%$3L*B&sve4RfAI|0Y>9Ps=`_g!l)=uL#9} z|E~g~!Lri^iw$~BK~R)p{beNskr7?_#h|YE~jpT)xnE%A%jph{C z4wJ6ZG%A0(qu6Bm=~DV@NuUl4L?!d_5@sGT+af*jnrT3Re1>^W6D|N)DCs;4j!z5M}Q{Z1G zipHi4S76{F&PwPu?p~Y2)G-!%bwg%#DurvqeK^G z*ri(&lX&U#b0^A2dnPD6ifWzwzf~1RSJ2x&2$i8$EBMOAwo4sDy}_oY-9f9nt@(-y z_(|>#((ez9;15pE(vm$Qw5t>FjY<#&MVmX#3sq#Dm*#?}z*`c_^8$E@j5=Ai99yIZ zvm*PgJ@vk5D$v#dVx`AMHU8-T7rJ9(VFX`k{n_dzE6U!HWy{W%UXd@gjn4lJ@19fx zSHQNH(}ly;_)n%>q_m;R>w0fSNFrAE^gi72xW}8tf77V*t7MPKcD12@A}e@#F%!~C z(kz#!g@J)%H}95aSx>Gmf+8WgZ0$$Y{M(tRG{duX^RHy&FfOmGH3csfT$zS~i=`?I zCmlKfZfgHNe!hR3au}+MFILKKX18Z?k42Ens7kO+h<4J=n=iOW*$CqP4~uT7My>)rc?e3rkWw1=0lbK!{_};ewyn=3p)YW5CS~PYR`k!bZDP&=Pe%WYaDU1w zB8j<*zGF9yh(>^idc#OtBz`L`(;a}APybKx+kS~0qKY-_y=liWW93F>L~|6w=sN8F zVhKxGsoWT3nKSMwskDUy(n?B81p5Kbl65LNxyiAm?<#?&>_h^E=!F-_a)3i z%pGEJ2%O(k?dzdXPk>95l04WMk9$4CLr%8nByKU$y-W5gpI5-|^s&oAkdrFG5t5h4 zNxRr`?oF|&h$g$TxGsyL$FZ{t(YF0HL7cXEWH0 zXMBwWDy=QdbegxMstH^s)cPVFq>~5 zXH@YiB(T8njG<-Lw24;pBOFfOt->Wv%li{QCEQK*I#w-xM$BE5@7cx@BN}K{t`IlIYN)(kehX(rb}%U?N!HhmsNvRyK(- zh-D)nW3%2a{Q1s=ufMse)|rP_S=nu-wj@Dk067Yj&A80@XV3rU!g;b~Tt5%5Fz%j7?666XAl;^wTpu`%X5DYmQGpk#j)?~}f zPL%6xIxtYi*{f!EXIZ0A6w9u$M-Hw0wxTiKhuh14YPmv``}~vhZS~HzZ?CXr#@A2U z^5i!HV#>EwJpQ|7^xCpzz&9!S|G^6wk9I(b#hkZ;@p3o8d*rEd_<_oYbK$lX83r7w zyWR$y%+1XQ{*jq;1dYERNgTXS;_th1`cm-3P%>*(lolwUe75fK%GMrDG9l;s^zGa6 zsgtDD6~flX)L&~d@Z0y(YWBW9t(7iE+IHa91zV5+T3!Vy%j{KAXULhC zmrU2x;({Ia+q+jndJ)dPW z!EL_%NPPd@Tw{~USU&0Ly+>Nz8*-H&(umqR!uI7+`H&?Ys$ceUGvoH^T;_n~hH`)E z;Y#7MK>}~4rk(_84^R^s4DGCailrM!eOjRj_%&_}Vd6P61%%|3bj~cjEsEJ8)gK=y zU{3u2lzj%gvdA3E93G7jlE}5t%jaLPo}{YHW#w*l7q+AWz0h{<^FaX^MO3zQ-Sne}QM4*V{Kz3L5&XdIlA^1n z<$)l^OLl*{o_n+`kvx;LXM0H04fbphnP2g4Z`e@(aLBpy=leqx-GN7TBzs);9dV$e zmb88SKSK`mKtJ3Ry2lr#yYyv^vQv^7?wi_!CN) zcpXI!AZR{@d0E>d3T_^MNSDa!1+<&{>1-$hTA%c8Uwr)Nvw+3$6FPn={GJl}0j2|k z9H+S4or8r`5jz+aDd6C2)GuIXS12>lbf6MLg(zLND0AgE1Aju3E=54)R+*lqkg*53 z&sAhYa5R$SEfBqV%j@Ae$Wfl0sR++Rad;7!qQR{9~R)2VOG(*i^*#mlzdP^<7mDC-)Z=nCWUV z1U>U3s(g{rqxael`$5*GVAP;D&qjt!M%L41&sJC#xqJC~bOd>5fR=0C{fSzTN$^%U=7SoX=jqzPA7lWs3^aa2|F^`HliyO(Yx| zmDhQ2%H&c$Z8_g{1*L${U?qswPW}3|K|8<>k~z3HL@UK(zzn@l#$_1*kN`^l0o^t? zjHoeTvc%f)A=s_j12C4``Sx-*NJF9Nu)R`rl+h4O!usx5Yri{vR3~|sb+51({U%jp z{N%|m(QZkLN2L=+Fr4)+^!{6cOaYcxX&%wErV*Bq8_l!sK_ILmwurOt!1pL$8@jo} zw&$KfJKblr=@Z;!8o|*B%f%(y+b=~fGhb$dP39f=bPbwrEG4S*78m*gt4Q8y`H<=8 zjd3At-skC$BetcDQ|Bd#ufrp!)OzQBCr8Jp`sz$~z}Sn;&E-z#>6JZB=-2E6^EYkY zEX@q>kLs-irxxb9(4i=!or16$nKvnI6_it|VPZ;|F-OJC<f9kx@8X3Edtb+xsgxDREARAhdzGC%eOV@0qfQ%ZlL)o!Mw zG-_I~k`vq0EGZ6+t4&J}9|v-MQ1a>B#qThXK1Jb9qN_#Sq5Ge4>iWG`Xf8 zZ-DDO%EF?Hg6yjcM=CW7jS`C`EeKWivjtP+JYwRSI8XA+}v4M;K#b^k2D4@2p`3YWc)T;Xl#v>G=Zdy)_c z^V&PdOQzMq?BgYxf%n2Kng9U3j(gOX8RI^)0p_yTKv;~?TA!-mHA2bKW0HnHsmEkW za2%%#(oh}WNyq>RE+0$Sl@Lkvc@|t@X7BnT7Afr+YNfE_Y}&VA?N**%Kw&KcjeB#% zj6n_~r#}aTEWcc!wjzSZspO=7_0|?02i*lMD!N$E)7|TlW#-tqX(c5kVOj1Fg`8VM zz)T<$O2~Ec=A80*wh!qJdOrR4Kt(=|&pGl^g!tXIO=HlY_Go5Mk+q0F;PCtwwNkz# zw!Ho z?1Oo@t!Uf0fMbW0yu#k(k36@8oZsury9@4Rp8D}ih33&Zoe3S=+Bs9W=>B&=x3n%c zGx)xax|q2V=A0I4PhCH?JHZmT<6Tml^aa=+3YGG=&SZHC$JL{CW-0-VGYRGj3lA@> zr%SJk$LYyP8;D}9iv=2O#z?DryeYcfNrA^=(0)3oJhCE+pYqt5*m|zgwxY;!0d>uT z!L26~+D+(?i4bI^N+y?Fm>;sQR+SdWtEs8V=8^H*YHNF$#JnPJQHmn=?`KF*-}s@9N{FzBGp9Qcpl9H~feJPOizbjN^zK+pck>@tu+9yB z$BLr0Lp=WUFO{(rd(wl@oGfWnHfdD%2K%-ZNn%HkU2XMeNv+1hH~K3G5>EpU4uE=6 z>!Oo}!4*-%95-sl{s}rr5i|2K#=-W~rxcj@y53a8p;HHS=Led4~UvkAEwzMuz zgu61kFu;I*AZATs75cnJEQQTqw^;ZACJS+TXFObpwVgqjHv>B$MrnheY+N8!Qq()Q*(=K9X*)8}SV_l&Gp`%Kmh>2vgGjJAe%oiirG@Nv>UkE`#zUo5+o z{A;h=qeE{#8(HLCSpQ5L2OISv&d#=cKVmolPI*?final~(o&}3)ze(t(?^Q>z+sYIQgui0bhO)c|ZBXu4w=+QJL z@oG>%t!553#YcZG$Tef#c(V(j6B*FoCl7cU?jbo#Mn^mJ}vgF>Ms1WUyrB8`dy zS&ghH0t=C}x?G0Oyu3Uexa)(iUzXDQC2>@X=v6HIq;JwB>^g8DrIY6nNoAdrGl;Uv z)cv0B+DxS}qMsoE=e%$Hp($kIJ-x7^6F(smTe>RxP0;|M2o-jcV#ON3^(eGhLaYo# z6~K?}PqCmRkLMJ;{pg%isO{XNAL9)N8=u`=_I6Nu{WFh_WWhYoEv&$YR6 zzhO(S(;^8x-4Uma7|ZbGNoFaioE6A>QBN=u^nB>RA%4~cmIool>~LF{g%l`-Q`u5A%j zl$9@L+^&ke{QQ;3A3(B4lbKJARy3X1iBl7}a>%qf7w>XjDJXOjDhqw1khT&nmsD8( z3Y%$l6(TRg08sBRP_itRlvsS#ff5;fJG1CmuiASH{(N!t%bb>8cK2*1whZ*4!j7FX zr3KBQ1tfs*1)9x+6WeW`ezfTE;y&V%fOjYe(MNub;t&AY_-()W^XC_KZpyuZu4v%H z77aiEx5}wnSykmsi!}faN=|S9!LEoEX^S$a#egN{oK%S>as5n+wQUV&So?K;|o z_wZ89x6qu=0kv(QHABTday=33rLKK`@5I@Y<2%Py}6IOr+pTS7fcoIpL&&&(D@*h($1jL%xaW-4p$m$%v=N z&PPTs3)5)W3lMkF_kQhBD=-q+7e$6UBn_YH(8k-d}{JSQ~H3@2qwG$0nYxNka**+j5 zSou|fhr_sWTfGy9IJ>Oq=co0)Cz5YLoV>ahom;u2qIm1#rahG1tH-$Nxn^gV{cNc< z{8Uju6H4LB0QoHkPNl7@Y@!!`t6y!Gp##59(9t<-Xm{t(NsG&0%5@(a`f_cGxs17$_H!Q@7fmQzh*%GzYI|Icf}#JaKge+5t^vSOy0=h!iK z2fg`uaLsSZ;fM5Jty`xR98s8luwKpn>IU5}p5gs^@8oi}DZ7&Wt40&OYp=}`c+Z`5 z`*~-4{H@Ov)lLqxvsX`9*52{W(lt*;gi7rBcLaPHyquen8Q7-%+RR+L2L`vsPu7}k z+P|_g_l?^u)3RR8>-Hxa%;y;#@O`-3u;}{Bt&O@zFI}2!IUUUHzf?G)c5b7d5l1H%k(Om$y|)-V`j`740hN1aE5F zmo^ADF?+LVq-l!~ld@49cqgsKhEdZb&O4>RuVsY8$uAF#m6KJn%JqZ+pMj}_33DncN4-Nk<7U;2dfdNC4 z8~_Rgkl9u-=eCTq7aaZ3pJ<;zp|UAj$az~6i6L8u-4~(5I`{NC$?9O3uW*thl=?s~ zqqFeZcQ~T#wPLKQ7R+uH|eAl$8g!vLO z)k>CTeH(>p0i^*>ON|H) z+0ww@g`8N7Vp)v%aJ;w*rGn9rCj!kM2?EKO)a54$9+xO-uwgMx&{VWkS67P1Lm*y6 zNp$0*NW;zkx<1g(07LVN7A^k=bS%y}W{<}WPp>_HZZlJf%5T=XWuV2!5dSSREg?-{ zyy`0wbYvkVa^YcS<418?*OOhhK%0!eie28`64oYr5e&~nB$vfuxuo>2SgBQI-yUfc z5i%0!j+^l$kd;FAmqLs@iz2_W-fjCyOI@E`3bg%Yk5*svHRToa=Vta&Os!Q6IX9`( zlJq4`{d_0IGpf{i%$B+aGuNo-DDgoB0Ks@!v5IM|2mmf`}PM$V%EgaDYLw ztQ^i0NApXNHPLNH9rq-_Deu)QpD(^vMg!Ytx}@~q*2E&+auzGh+V*@gH)i#w`6i=I z*P-R znD+7AvUO`V{c8CvtBjBF^UfG%FVswI5N`D-&RBK-)Uyjl-`;6;{@@UAqa#OjFT1o1 ztdTGH8BbOY>Ns=PgM0SmzIR1&M8Vo_dx`saL;JJcZGU4K50iae%a zp#A%B3Phk?va9Q2{~_a_CpK5i8;ys~V7UX<2Jst)JTJMjh30 z>U=@EuWaLH#e)mdv)U@fTK8L)F)i+_^_POlqs~o@>o)56?XN40pZ_xF9M7BpZ0$GH zljIVJymRgTsE7P@0J~Hbo6U3(rAR%3z*(e!;_zWb+kIi`p(~dd0W8^s?#IM#>(GqSudnb0zU;h;fe$qwp}>&t*NEhUvwQuJ<@? zUwem!gSJ0^wjtOfrZT9eDV2G&j7k2o_k?|zE8!yQRv@63Q(=KGz?#JA3IOl#uPWL$ z0?$)u@rAdM;7Lei$!&xqSlGu`43d0r-B)yE+M|1y1qwRNH&RyA*1V|KJmCfSQO}4S zRJ0WzUr*Po{y2kBhI+El2;&|>Pv5Nr50nj|;H6N^i_rd(w~jmh8spnQ_8cQFUkJ;< zFWrc$=ufeBB5Swn_!B-5PMM0ztlGaZVQcK)cWTk^{Oq)+%5#ERyllKPpb6dSlT$j$ zYX|#u;TbcRQT`<$Rd0YfNlKF6fj$=1o}97bFu~^{ni4U!ianpa0a+~k&9DvsvwQ#k zn-CtoeJ4q*Bw&P$Xto{yUHv6_-^ii^g&)r5vDyYJbRhL)(+8rf2FxQ8cLdKs##kze zBT3(CEop&AWVzkfS{sI*T1c3gGiOHc3>7DyWJC$^w4kbIQEY6BfF_9vMLT@d_;5|+ z!*!-kk>C8g%)c-=Yb1q_U_Yi(fy@*_*D&&m7|?y?Pk$L^6%2FYY??+A8#;6No%L6D zZ$UX=62605cVUE4(!0;%r~vG>p1o|&qJ;}3)DSUC4?ZSiSMznk=g_lMu4aZF7QM6E zUAde(q{%S& z^$FY8eEBxVT7k>>!`eT~n)-EU9i%z~RTty4iACJ;z#Y|;$?ucfI`KMBDUg-YLVqvtM zzsJ*eAx}0~xf&^{t6yS!a7ZPy)mx6K<9`mUI27?~FXIij9(i|37@K*1b!lnIIiuxQ zR_uLjmRC06Ld1fLc^B@EU|`0#(6mKQ<<%~>ad!Smvy}dj1*}bFpdIzE18qRsW@R6> z#e3&z_vmzNyLV!rcpYBz$nSd}hfnF~+`eD;_R$gAe#VWGh?l(P$lNq@MNdO5r~Kqs zS8H76^fHMo{%rl@)%BZdLz>K;7}_SW=|rV%W0n@4*;Mf*r(-LFh33714CZ-EZo~pC zgN}87-{HXEB)6%*g5VB#r(06`p_x)tlexyiYUYZrZ~MKdS+8Vkml8LA z>XfTG&6}U`#%~!oH(0M`hs|;$9r%VNGF|`haOv|6gHnul`5nBYbm&~}$0gtA8Ee0D zaZ&E=kt`m2%7^{GarS>>oUIFm+$_+Rx51q5*=`T&uS|6diq@{?D7Y;lL8k*s8e6PG zFv41v;9WjGn>oxd<;d0(K*P)uSK3~0W~wks4rP8LeFC73wY@TT-E-wdJpg=3$Q?**C$lU%G<_*NyF*SnWRuskV zF1!{)T-$Y|q3O_*0Ttdr86xFYG6JD|5h1!u#+E2)B-j@Fk}a}5Ieke}_=E4UZO?h8 zAe7=!Mw857)sE;K31B1zC&Z*La&*QYw4IY)>>L&Qyv+RGNVnNOiDe&^;&X40bS(=A z3m#H_GQJ1cD5uo5z_~$&XALa_54GETg@PyRUKL=84Dpazir+IY_ETWBl_gwdR_VW& zJi0-~!jEolwFaJAMW4g-pG2*Eu+1m&&iIL)Q_JoivI=~srBpLtSI5LrvOv3u_-RTI z+2c_1oA`>Kd~VaawPfJlgN$o+<(^LSdGng82NSp+QQRxmN@Yx7rH74)%e=YEm8$Q( zF0xc}$c%S>8L`GRvdI#*+O3eZv7Cqa)NY9VPr(I}VpXCN zz-LLWywuqRXyO-L-OsZRR9e;#FN`?d_?3UNw(o&iub%7ml?5CSA#!=BiQZ5w2#`=9 zSRf@wvebMXj&E+qUhj!xKEORxSz{~<=V&7vyl2DD^WBzT2Su|3b|2!G8(+&)Zt{v8 z9zUMIj+8SNisWx~5&vBfA}Uh!|^_>`2C1(kzY zBvIMhA=ADsTvJHg)Jc=h+aMhKVI7NreE7F)cGiuQnMLvb2N%paId6#0p+TdI9Gq^e z@Aqhw-{Je@E7K2@S0rh_Tl>lFLT>EpD(jCPai1bbU%SOKyX7Vc%%utM@;|5754ZXh zm$N>4&VZg;I+bIedz^7^;@Ga;@#R@#ZkLVv)3e=;j-Mkg57p)SIVcoehMlhJeveJL zmW?cQZESk%>p9)@Rs+*oKev;H=oKDsNVlD>VeqJqAq|+`dVjEszqU^CfxaC&>1W02 z*V^@ebL-Y04?o66bZq0bY0_>!I$iWfO%*Yk2l*q*#|-sNS#DbMG| zZE(`mn6-Y4u%EZPg%}OI@Lro)viy0++w-d%|ET&XP!9B2+vTmz9LO8b7;5|D4{OV= zg|9xat!U?#nL=SnwK})iO6g#|dgXuob}cilty1yTYB%GMbxMXcet~UV z!Zi#6`L4%BltN|n6( zDkPLlmXLC$p-MBS?l2K$zj}3@4ALZ6KeXqv9!5wam_?Z;Ky*;-WD|T2WYm*EVxt_* zXlUx0_V*KXv(B&0K5o8mVf`Sr`1ZSUFi1v@Yz<_LmAEF9Ua}feGPZbWjGSTfZU}?L z@FUxR+-s2GWV+UNo}jcmB0O=~@4@H7wL^?L-u^*#qSD(J4??D!?fd?lbMM=yG~3ib4-1~f;nTy%Fa(Jr_i6&mUy&tu)rZ}KP?ko1=|o%k2EJO zgFbF=rHUUf<>Y4B+1lLrYS*&+4cj_6+_>MsxbSRn@W?mAKZH0P6aM1Sdbu(a#Ss9Y zDWXH!jfxk^7F7}epy)UeZa4gy?=Y^q(56Lx`^{GX5(re=;BtuSM;7m#uN5opx8@s} zXzmmEra+-aJUnExq7G!nAvF=oPb?sDQ9CsEMLdzO6R%`JRC7a$p`vKi8!&|)o3%@<6r z#~}U7R!+3c0#A;OD~~L^P@=XnVqKTX6Ek}uSvb+C->Boy-U1^d5crivpb}0X{(o6` zRKxUMe^Xx$^(-~f-U;*g`blUf5nH`L_V5+L4sB zQ z&fJxcjr#QMb~E(`0r`5l!wXNuH84eci5n>ONHr2A9n<)Atx8DRTjU9}4mc?Gofg`(aHuym~m4gkAF|Jek#=yz?s zu~uWQ&oa~f$;0Y#gYc(cNl!IBy)|jUCJXNvIjE~=3>m8RPLHay40(K@#gv*Q+P9K? zOzrFgZoQwb8CzcxV@;>MkNeLK`gFhZjk`w9JB>_CE^XN8v1z_~hE=%QyidFK zi?~Nfs>#Q@b`G_vnb%|PTvHWOm4^kbX1opE6wsty=h@AYZfY(v4{ffqU0$$#z^Ah_ znj9LlW7`;OtE5+kSHha3-B+WE&GPqnbo=?h@aPaXb)+qYm6tpGVQZSL|8DqWgXN<` zuioDXDOtJ9EH-wH`yYGf%uP;6YdbqUx_+?%DLk+6#s}sU4RY|QJvi<}{K6GaZ1VDR z+p1YBg+;b%=naUnrL}y}oGA}249gPOzk@Ak*Vv^Oi0Csk(T@|NYuddb>=z^jsI^W0@>W~ zg=-c!4BFEoEO+Xn-jx-LT6}k*#3((su-yyKr}~GF#&oCrzmYN2OkH(~hTV&i+tvDS zhf%bfG2<5WV(zuxb`EW<^N*kK+IMiXyLM|W9WwJ2@_ipn%sc?RpEk6ouC}-T!}E5H zEW+dT57r1jc=hmE`e%(tn>fq%=u|e@s}=w1M8Ixnktxq+tskhJqIN=vz2k#KEZHP7 z{+-i-T(a@Ylr)>)POKmPG)p~IAv>C*tGP?_W>?HwUvXQ|`tsBAvQZ*P%P`Q@*Qspd zvtU5OSBgq|8MuyLlB3^1)6F#2U~Tq~`={>mi<~HgqxBX!Q@d>nXjP-zPv^#^O1G$h zVm^4Z3}_LnI<0SK%2Q3FtBjh04tEsdQn!jE6P9!o{9xh)_a(b*NP8RbZtD2)M*wz+ z=6Sn0y>&}3zp2ut&FEWMNgy?D1(>;V^uZFag;EepWdwRM|`*kN1NpO}gSiP`6GI3l(!tVEP@hcR_2 z>(H0n@n?a_HgRx1II*Ke9mn=`-@i;!OZxbB&(UhtGVnfEXVmQ%IXN4iE`J5RM=`n^ z=jNt3YpAtg!2%>sf*%U~Cwqn82mk(R!gY`YISQ) zXR*6p-ibEa9tZaLOgLD0L_gi&>B}FIXD^VuY@4+7d(9|NeCds8LgvhwvvsW*l7QwL zp;r5(Yc6=c>(C)3Q>9~voJ+oc{``5`yN$etFlo8xI_wX=q^y)&_CoIHDMG6pZ;gCe z;yXknjay)-q&~}G}@CeF9TwMA31NE*QT@h)$dc@JQ8Q@c7 zM($*#Wb6WuyFuqij-F(_!c;GGZJTa`Bk!#(nYnD!=I0;FMj!toks|eo=eR~F-0iAM ztkuJB6LYNwyE2x7w;{dgvH&vbZL+^2Q<__UhBgJN(038nyr)MuSh;p>-8ks{wsmw? zVK+C^p343bC|<}P3FSnU5)Wwu4G;%1!x z0HF$2|LDJV=u$h5- zjrJN!y!Tg8`0j2~12<`Fw(S)mmXirCrc(oyH3I75bpl76o3%{%EGNyLK@X3fpT5}J zXiML(f7FG{6*c}n*s9JcIXy2YK$*V2hkfgT8X8CI7Y)lRtMeKLzT!2!cJE%OE}!p7 zq{lv^)=Ia#N9=klD`G}n+e+XUp9xO$?OO zetucdv8uebzum%=xPh%|-JEMZb)WQIIml#c%}*wm2W;yGK|wUAq`EA?HqjMW2{O`{-fsnZ{aE*8D-*3Mdpmb3Ml@Na3A zJ>PBra)0uqre_Ti*1bp9mU;5^qs;UsFxk!{+UFD%c(^-c*TiYhiEpA+oAPj5>{H9X zs(Y;`oI3x_%J>(m^!enN>nXc;?Xpct4G$0ZWwC_lKwp$s*AomA&nEcGaojUZhD%)ZQ z5DT%8+OGRjt$Q=$Y1+*r+l{W*zf*#mU1^`b9pPR5jHmCv+-hE?i}U;UL7P4w_*7Wz z(ZnRGeLw3{CWcX`Kc)G7RTN1U12SUC28TXVaNIBA-zo_!q7cYDf<=hr;HBy=Js7?O zD#HnYc{U7l2~tyebFz}D@Ja|ib1~PDj0>7;_|}nU!)!egGO`L{8bkdSB6BBIb&BDJ! z=C1juHf6(K_|xqSy;#=zqh<-HnT!7T@$(y~FpWRjcw|hPRiMkd)XBp1NgiYfb|=64 z2R%KNNGN=PttF3zjOU9@$ZQ7vuEE69f{M|JFahWEGqIvK~B+vdr`)3M0y>(U^2i$uww>ohLHGVa0!Az;a9xJ zHX~UptzfU24kda7b0Xirk4ncMTC|9?*|&NDHbD$l&Igz?OWkv1(@AK9UH- ziUbzKiMOxNDhTa>>>{43SJ4o$5;0bi>MiaY*^b!Dt)dx*D>GW!cj-}{1jR{(K2%$^ zM6dis1E;}an*tiIqW!#zhn4nWFAj83Y|$S~#m`7UL+4n(+NykgY|}rfsEi{zBk;4t zE{h-mcz-7na{&A64<3Xk7Z2{+cMl#R+1P`RLN=M9(On1@z{O}?d~D_qoJ%=-p}Z$x zZj1p>B3}`82D2|K&HyWsxeh*-UtTliDw{W#zr_@qAZXJUm1I z9@!)to|pa0l6J<9ou+)x0AuGKwgf2dFWxO!_#5(3==cVLDdBNkj5to3X{6EK1lhdmM3z@1$90X@-hC{UM}|ijf2X6TRv08;&R7{Kjo!OoI3R+?e;LUrK4cA zY4C37Z10mQ42#IsWIYs#aCAB-=_Epwj6Q|z$H7m6Vj;vRDDBB~{@EYd0oAtM6t z9Fi=Nxv#+EgEl)wvi|w4;yHc-2E-vLX&CYYTn(|{0N9935gVBM<-)Kr`!Vt)(skC8 zpXN&GGj+TKNs@%_tt%4B&C{ZG1~i%d=;^ZwKh4X?>^MnK9uedbNYP1`Pu=R zHPxhc?xxLtGV#Fei|$w$+&@QFqW>TXAuIaUVp*w8GQr*pu1npNIlpbT<{r0`R^VhC zW##K)_`tZvip&Ai%G(zGZKN3>R^1shBy9mDh(wJ@*&%ASfu{4gC8$jm_y@K_rb$#h zlFfDl?T#n(OKNX+n!Ln)y-d3BH1bO&0f;}9HGPzzoPC~Y10LTY3$T;U-6MA%8~*vi zAo1;@M6ajIuj@3Iffo*_g6#WxvR{hJ0Z{6m{ z9dU0s63>xz$sNTrUF^-|WbkZv@rclQ^oc8F!ev+JQlTX&B9sk(eYb;*Lm?0KJvcIa zo{kOl|Cnwt%KsU6wK&Z|4;yV8vsq%BDr^6YkS~#NrOvdil1Avpb19Yx5P&D1F5L3+ zDcl&EMUzC-!0O*~jXF(|fW%xsbBuSh6P3|#a`_5-9Q~73f*nxVJ zowDL;!^;vvJ`D7!I~@dHFs9BhoZg3R|0ol*l43!-(~&e%+3AM;g-o!ndU|R^O^d6Z zD=xesYFAmV^76}+|1hM6I9HUkI@F6V0#Ar1=R@tEpWlnIG>eU|02is81zW7owmtv)$CE-J61batz`ny6+ug7 zB{cI*_|*vU<|gPgKgKCB#@YE2DiPTgzX-wbW;2yBYu8?6o$Zobc*o29(Fcq+?Q!ie zBk#|?>xyM*2NRA8)|d0eenT9?K5(0N@b&c_!$7hF{G=VPfv!Xs+txyr&1U$g@350A z?gN9p{E^bokGo{~@FGfk)C=QN&p(z<5AC7s!6xaHZAI7ZSWY_uM3#S&!y~hfj3*?(ErtmfF6)tpPf-3yc?D zZvd=2q~PI|lhZp@Zf{bU(1{?}kc57QkF&aJkU z(r2VpnX$~6#jQI3G)-mqN;|uE#U@-nE@41{voV+#w*R?v=Mt$?(0<4o+pSrRbS2vb ziF+poO$@j~%85noYm83ktD%Hy9ZR&a=NeNsBQD&q7kqMLqlyY=U8ly@{Vx5kcOvIi zaKGjWz0WHjI@Z5a?&%P}t%>~$)<2#1cBJV)`CG+-L-Zoj8uk3m@+a888gjD@G!pP6GMNqQ`bV{FxmIAK&L!RgzzHryxAe4APE`DCTiei(f{ig&k2f|oQk%u5Eg=#(laGT2 zcGiRI@E`g!K86p};-^txhr0l8Bc|%MXa%KGqRo(j2`S{G!V8zX*NL6RO`WhXt15a< zSXjJHyw0h#&Ld}wYc#ws&mRck0!R=7Nlu7XYI7#CRmziC#SV7-nFZheWY4!>;+}NT_#|zaRm&o zV|Ee>)I>>*&0QY2d)XW&i|QnZg0~TLcb!0rI=ty4>^@x=iZObbJtRJyR9F3X1^mlYm z?N2?p)X4tNsGoI*9#Getf8$MRtD8|qPb$CkSg3qs_Nvd&1^tb}Eq@LH;L<-zz=&{~(`eX5f+?Z29&`IJ2eoy;S#cPrs#FweHP1Hgi>1pdYSERISp~ Y?loR_507Z9;GglMr;Iu`!gRy`01vpkmH+?% diff --git a/metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/messages/InterpreterMessage.scala b/metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/messages/InterpreterMessage.scala new file mode 100644 index 00000000..8226fa93 --- /dev/null +++ b/metronome/checkpointing/interpreter/src/io/iohk/metronome/checkpointing/interpreter/messages/InterpreterMessage.scala @@ -0,0 +1,175 @@ +package io.iohk.metronome.checkpointing.interpreter.messages + +import io.iohk.metronome.core.messages.{RPCMessage, RPCMessageCompanion} +import io.iohk.metronome.checkpointing.models.{ + Transaction, + Ledger, + Block, + CheckpointCertificate +} + +/** Messages exchanged between the Checkpointing Service + * and the local Checkpointing Interpreter. + */ +sealed trait InterpreterMessage { self: RPCMessage => } + +object InterpreterMessage extends RPCMessageCompanion { + + /** Messages from the Service to the Interpreter. */ + sealed trait FromService + + /** Messages from the Interpreter to the Service. */ + sealed trait FromInterpreter + + /** Mark requests that require no response. */ + sealed trait NoResponse { self: Request => } + + /** The Interpreter notifies the Service about a new + * proposer block that should be added to the mempool. + * + * Only used in Advocate. + */ + case class NewProposerBlockRequest( + requestId: RequestId, + proposerBlock: Transaction.ProposerBlock + ) extends InterpreterMessage + with Request + with FromInterpreter + with NoResponse + + /** The Interpreter signals to the Service that it can + * potentially produce a new checkpoint candidate in + * the next view when the replica becomes leader. + * + * In that round, the Service should send a `CreateBlockBodyRequest`. + * + * This is a potential optimization, so we don't send the `Ledger` + * in futile attempts when there's no chance for a block to + * be produced when there have been no events. + */ + case class NewCheckpointCandidateRequest( + requestId: RequestId + ) extends InterpreterMessage + with Request + with FromInterpreter + with NoResponse + + /** When it becomes a leader of a view, the Service asks + * the Interpreter to produce a new block body, populating + * it with transactions in the correct order, based on + * the current ledger and the mempool. + * + * A response is expected even when there are no transactions + * to be put in a block, so that we can move on to the next + * leader after an idle round (agreeing on an empty block), + * without incurring a full timeout. + * + * The reason the mempool has to be sent to the Interpreter + * and not just appended to the block, with a potential + * checkpoint at the end, is because the checkpoint empties + * the Ledger, and the Service has no way of knowing whether + * all proposer blocks have been rightly checkpointed. The + * Interpreter, on the other hand, can put the checkpoint + * in the correct position in the block body, and make sure + * that proposer blocks which cannot be checkpointed yet are + * added in a trailing position. + * + * The mempool will be eventually cleared by the Service as + * blocks are executed, based on what transactions they have. + * + * Another reason the ledger and mempool are sent and not + * handled inside the Interpreter alone is because the Service + * can project the correct values based on what (potentially + * uncommitted) parent block it's currently trying to extends, + * by updating the last stable ledger and filtering the mempool + * based on the blocks in the tentative branch. The Interpreter + * doesn't have access to the block history, so it couldn't do + * the same on its own. + */ + case class CreateBlockBodyRequest( + requestId: RequestId, + ledger: Ledger, + mempool: Seq[Transaction.ProposerBlock] + ) extends InterpreterMessage + with Request + with FromService + + /** The Interpreter may or may not be able to produce a new + * checkpoint candidate, depending on whether the conditions + * are right (e.g. the next checkpointing height has been reached). + * + * The response should contain an empty block body if there is + * nothing to do, so the Service can either propose an empty block + * to keep everyone in sync, or just move to the next leader by + * other means. + */ + case class CreateBlockBodyResponse( + requestId: RequestId, + blockBody: Block.Body + ) extends InterpreterMessage + with Response + with FromInterpreter + + /** The Service asks the Interpreter to validate all transactions + * in a block, given the current ledger state. + * + * This could be done transaction by transaction, but that would + * require sending the ledger every step along the way, which + * would be less efficient. + * + * If the Interpreter doesn't have enough data to validate the + * block, it should hold on to it until it does, only responding + * when it has the final conclusion. + * + * If the transactions are valid, the Service will apply them + * on the ledger on its own; the update rules are transparent. + */ + case class ValidateBlockBodyRequest( + requestId: RequestId, + blockBody: Block.Body, + ledger: Ledger + ) extends InterpreterMessage + with Request + with FromService + + /** The Interpreter responds to the block validation request when + * it has all the data available to perform the validation. + * + * The result indicates whether the block contents were valid. + * + * Reasons for being invalid could be that a checkpoint + * was proposed which is inconsistent with the current ledger, + * or that a proposer block was pointing at an invalid block. + * + * If valid, the Service updates its copy of the ledger + * and checks that the `postStateHash` in the block also + * corresponds to its state. + */ + case class ValidateBlockBodyResponse( + requestId: RequestId, + isValid: Boolean + ) extends InterpreterMessage + with Response + with FromInterpreter + + /** The Service notifies the Interpreter about a new Checkpoint Certificate + * having been constructed, after a block had been committed that resulted + * in the commit of a checkpoint candidate. + * + * The certificate is created by the Service because it has access to all the + * block headers and quorum certificates, and thus can construct the Merkle proof. + */ + case class NewCheckpointCertificateRequest( + requestId: RequestId, + checkpointCertificate: CheckpointCertificate + ) extends InterpreterMessage + with Request + with FromService + with NoResponse + + implicit val createBlockBodyPair = + pair[CreateBlockBodyRequest, CreateBlockBodyResponse] + + implicit val validateBlockBodyPair = + pair[ValidateBlockBodyRequest, ValidateBlockBodyResponse] +} From a3af6b18df1d5115392f0d61b1df6bc61281ac29 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 1 Jun 2021 10:50:01 +0100 Subject: [PATCH 34/48] PM-3063: View synchronisation (Part 1) (#36) * PM-3063: SyncService to short circuit getStatus to self. * PM-3063: Tracing responses for status requests. * PM-3063: Simple ViewSynchronizer component. * PM-3063: Validate the Q.C. signature. * PM-3063: Validate the phases. * PM-3063: Renamed basic protocol test and made it available to service tests. * PM-3063: Error hints. * PM-3063: Testing ViewSynchronizer. * PM-3063: Testing median. * PM-3063: Testing aggregateStatus. * PM-3063: Comments, checking response counter. * PM-3063: Fix QC invalidation to cater for more permissive genesis signature check. * PM-3063: Fix poll test condition. * PM-3063: Fix test and add comments for median. * PM-3063: Refactor genCommitQC and genPrepareQC * PM-3063: Move prepare vs commit comparison into a method. --- build.sc | 5 +- .../hotstuff/consensus/Federation.scala | 2 +- .../hotstuff/consensus/ViewNumber.scala | 4 + ...olProps.scala => ProtocolStateProps.scala} | 15 +- .../metronome/hotstuff/service/Status.scala | 3 +- .../hotstuff/service/SyncService.scala | 29 +- .../service/sync/ViewSynchronizer.scala | 176 +++++++++ .../hotstuff/service/tracing/SyncEvent.scala | 19 +- .../service/tracing/SyncTracers.scala | 24 +- .../service/sync/ViewSynchronizerProps.scala | 363 ++++++++++++++++++ 10 files changed, 615 insertions(+), 25 deletions(-) rename metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/{HotStuffProtocolProps.scala => ProtocolStateProps.scala} (98%) create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizer.scala create mode 100644 metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala diff --git a/build.sc b/build.sc index e7cc1245..01de43f3 100644 --- a/build.sc +++ b/build.sc @@ -252,7 +252,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { hotstuff.forensics ) - object test extends TestModule + object test extends TestModule { + override def moduleDeps: Seq[JavaModule] = + super.moduleDeps ++ Seq(hotstuff.consensus.test) + } } } diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala index c0563268..bec35752 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/Federation.scala @@ -23,7 +23,7 @@ package io.iohk.metronome.hotstuff.consensus * * Extra: The above two inequalities `(n+f)/2 < q <= n-f`, lead to the constraint: `f < n/3`, or `n >= 3*f+1`. */ -abstract case class Federation[PKey]( +abstract case class Federation[PKey] private ( publicKeys: IndexedSeq[PKey], // Maximum number of Byzantine nodes. maxFaulty: Int diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/ViewNumber.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/ViewNumber.scala index 1fc1ab33..941f7394 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/ViewNumber.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/ViewNumber.scala @@ -1,6 +1,7 @@ package io.iohk.metronome.hotstuff.consensus import io.iohk.metronome.core.Tagger +import cats.kernel.Order object ViewNumber extends Tagger[Long] { implicit class Ops(val vn: ViewNumber) extends AnyVal { @@ -10,4 +11,7 @@ object ViewNumber extends Tagger[Long] { implicit val ord: Ordering[ViewNumber] = Ordering.by(identity[Long]) + + implicit val order: Order[ViewNumber] = + Order.fromOrdering(ord) } diff --git a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala similarity index 98% rename from metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala rename to metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala index 578c89b4..f7557109 100644 --- a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/HotStuffProtocolProps.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala @@ -14,9 +14,9 @@ import scala.annotation.nowarn import scala.concurrent.duration._ import scala.util.{Try, Failure, Success} -object HotStuffProtocolProps extends Properties("Basic HotStuff") { +object ProtocolStateProps extends Properties("Basic HotStuff") { - property("protocol") = HotStuffProtocolCommands.property() + property("protocol") = ProtocolStateCommands.property() } @@ -26,7 +26,7 @@ object HotStuffProtocolProps extends Properties("Basic HotStuff") { * and invalid commands using `genCommand`. Each `Command`, has its individual post-condition * check comparing the model state to the actual protocol results. */ -object HotStuffProtocolCommands extends Commands { +object ProtocolStateCommands extends Commands { case class TestBlock(blockHash: Int, parentBlockHash: Int, command: String) @@ -71,12 +71,10 @@ object HotStuffProtocolCommands extends Commands { (phase, viewNumber, blockHash).hashCode private def isGenesis( - phase: VotingPhase, viewNumber: ViewNumber, blockHash: TestAgreement.Hash ): Boolean = - phase == genesisQC.phase && - viewNumber == genesisQC.viewNumber && + viewNumber == genesisQC.viewNumber && blockHash == genesisQC.blockHash private def sign( @@ -125,7 +123,7 @@ object HotStuffProtocolCommands extends Commands { viewNumber: ViewNumber, blockHash: TestAgreement.Hash ): Boolean = { - if (isGenesis(phase, viewNumber, blockHash)) { + if (isGenesis(viewNumber, blockHash)) { signature.sig.isEmpty } else { val h = hash(phase, viewNumber, blockHash) @@ -315,7 +313,8 @@ object HotStuffProtocolCommands extends Commands { genLazy( qc.copy[TestAgreement](blockHash = invalidateHash(qc.blockHash)) ), - genLazy(qc.copy[TestAgreement](phase = nextVoting(qc.phase))), + genLazy(qc.copy[TestAgreement](phase = nextVoting(qc.phase))) + .suchThat(_.blockHash != genesisQC.blockHash), genLazy( qc.copy[TestAgreement](viewNumber = invalidateViewNumber(qc.viewNumber) diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Status.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Status.scala index faf1dfc0..7336ac34 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Status.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Status.scala @@ -1,8 +1,7 @@ package io.iohk.metronome.hotstuff.service import io.iohk.metronome.hotstuff.consensus.ViewNumber -import io.iohk.metronome.hotstuff.consensus.basic.Agreement -import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, QuorumCertificate} /** Status has all the fields necessary for nodes to sync with each other. * diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala index 1922668d..b621a53f 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -36,6 +36,7 @@ import scala.reflect.ClassTag * to send requests to the network. */ class SyncService[F[_]: Sync, N, A <: Agreement]( + publicKey: A#PKey, network: Network[F, A, SyncMessage[A]], blockStorage: BlockStorage[N, A], blockSyncPipe: BlockSyncPipe[F, A]#Right, @@ -46,6 +47,11 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( )(implicit tracers: SyncTracers[F, A], storeRunner: KVStoreRunner[F, N]) { import SyncMessage._ + private def protocolStatus: F[Status[A]] = + getState.map { state => + Status(state.viewNumber, state.prepareQC, state.commitQC) + } + /** Request a block from a peer. */ private def getBlock(from: A#PKey, blockHash: A#Hash): F[Option[A#Block]] = { for { @@ -56,13 +62,16 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( } /** Request the status of a peer. */ - private def getStatus(from: A#PKey): F[Option[Status[A]]] = { - for { - requestId <- RequestId[F] - request = GetStatusRequest[A](requestId) - maybeResponse <- sendRequest(from, request) - } yield maybeResponse.map(_.status) - } + private def getStatus(from: A#PKey): F[Option[Status[A]]] = + if (from == publicKey) { + protocolStatus.map(_.some) + } else { + for { + requestId <- RequestId[F] + request = GetStatusRequest[A](requestId) + maybeResponse <- sendRequest(from, request) + } yield maybeResponse.map(_.status) + } /** Send a request to the peer and track the response. * @@ -113,10 +122,7 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( ): F[Unit] = { val process = message match { case GetStatusRequest(requestId) => - getState.flatMap { state => - val status = - Status(state.viewNumber, state.prepareQC, state.commitQC) - + protocolStatus.flatMap { status => network.sendMessage( from, GetStatusResponse(requestId, status) @@ -223,6 +229,7 @@ object SyncService { RPCTracker[F, SyncMessage[A]](timeout) } service = new SyncService( + publicKey, network, blockStorage, blockSyncPipe, diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizer.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizer.scala new file mode 100644 index 00000000..11f90c26 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizer.scala @@ -0,0 +1,176 @@ +package io.iohk.metronome.hotstuff.service.sync + +import cats._ +import cats.implicits._ +import cats.effect.{Timer, Sync} +import cats.data.NonEmptySeq +import io.iohk.metronome.core.Validated +import io.iohk.metronome.hotstuff.consensus.{Federation, ViewNumber} +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + Signing, + QuorumCertificate, + Phase +} +import io.iohk.metronome.hotstuff.service.Status +import io.iohk.metronome.hotstuff.service.tracing.SyncTracers +import scala.concurrent.duration._ +import io.iohk.metronome.hotstuff.consensus.basic.ProtocolError + +/** The job of the `ViewSynchronizer` is to ask the other federation members + * what their status is and figure out a view number we should be using. + * This is something we must do after startup, or if we have for some reason + * fallen out of sync with the rest of the federation. + */ +class ViewSynchronizer[F[_]: Sync: Timer: Parallel, A <: Agreement: Signing]( + federation: Federation[A#PKey], + getStatus: ViewSynchronizer.GetStatus[F, A], + retryTimeout: FiniteDuration = 5.seconds +)(implicit tracers: SyncTracers[F, A]) { + import ViewSynchronizer.aggregateStatus + + /** Poll the federation members for the current status until we have gathered + * enough to make a decision, i.e. we have a quorum. + * + * Pick the highest Quorum Certificates from the gathered responses, but be + * more careful with he view number as these can be disingenuous. + * + * Try again until in one round we can gather all statuses from everyone. + */ + def sync: F[Status[A]] = { + federation.publicKeys.toVector + .parTraverse(getAndValidateStatus) + .flatMap { maybeStatuses => + tracers + .statusPoll(federation.publicKeys -> maybeStatuses) + .as(maybeStatuses.flatten) + } + .map(NonEmptySeq.fromSeq) + .flatMap { + case Some(statuses) if statuses.size >= federation.quorumSize => + aggregateStatus(statuses).pure[F] + + case _ => + // We traced all responses, so we can detect if we're in an endless loop. + Timer[F].sleep(retryTimeout) >> sync + } + } + + private def getAndValidateStatus( + from: A#PKey + ): F[Option[Validated[Status[A]]]] = + getStatus(from).flatMap { + case None => + none.pure[F] + + case Some(status) => + validate(from, status) match { + case Left((error, hint)) => + tracers.invalidStatus(status, error, hint).as(none) + case Right(valid) => + valid.some.pure[F] + } + } + + private def validate( + from: A#PKey, + status: Status[A] + ): Either[ + (ProtocolError.InvalidQuorumCertificate[A], ViewSynchronizer.Hint), + Validated[Status[A]] + ] = + for { + _ <- validateQC(from, status.prepareQC)( + checkPhase(Phase.Prepare), + checkSignature, + checkVisible(status), + checkPrepareIsAfterCommit(status) + ) + _ <- validateQC(from, status.commitQC)( + checkPhase(Phase.Commit), + checkSignature, + checkVisible(status) + ) + } yield Validated[Status[A]](status) + + private def check(cond: Boolean, hint: => String) = + if (cond) none else hint.some + + private def checkPhase(phase: Phase)(qc: QuorumCertificate[A]) = + check(phase == qc.phase, s"Phase should be $phase.") + + private def checkSignature(qc: QuorumCertificate[A]) = + check(Signing[A].validate(federation, qc), "Invalid signature.") + + private def checkVisible(status: Status[A])(qc: QuorumCertificate[A]) = + check( + status.viewNumber >= qc.viewNumber, + "View number of status earlier than Q.C." + ) + + // This could be checked from either Q.C. perspective. + private def checkPrepareIsAfterCommit(status: Status[A]) = + (_: QuorumCertificate[A]) => + check( + status.prepareQC.viewNumber >= status.commitQC.viewNumber, + "Prepare Q.C. lower than Commit Q.C." + ) + + private def validateQC(from: A#PKey, qc: QuorumCertificate[A])( + checks: (QuorumCertificate[A] => Option[String])* + ) = + checks.toList.traverse { check => + check(qc) + .map { hint => + ProtocolError.InvalidQuorumCertificate(from, qc) -> hint + } + .toLeft(()) + } +} + +object ViewSynchronizer { + + /** Extra textual description for errors. */ + type Hint = String + + /** Send a network request to get the status of a replica. */ + type GetStatus[F[_], A <: Agreement] = A#PKey => F[Option[Status[A]]] + + /** Determines the best values to adopt: it picks the highest Prepare and + * Commit Quorum Certificates, and the median View Number. + * + * The former have signatures to prove their validity, but the latter could be + * gamed by adversarial actors, hence not using the highest value. + * Multiple rounds of peers trying to sync with each other and picking the + * median should make them converge in the end, unless an adversarial group + * actively tries to present different values to every honest node. + * + * Another candiate would be to use _mode_. + */ + def aggregateStatus[A <: Agreement]( + statuses: NonEmptySeq[Status[A]] + ): Status[A] = { + val prepareQC = statuses.map(_.prepareQC).maximumBy(_.viewNumber) + val commitQC = statuses.map(_.commitQC).maximumBy(_.viewNumber) + val viewNumber = + math.max(median(statuses.map(_.viewNumber)), prepareQC.viewNumber) + Status( + viewNumber = ViewNumber(viewNumber), + prepareQC = prepareQC, + commitQC = commitQC + ) + } + + /** Pick the middle from an ordered sequence of values. + * + * In case of an even number of values, it returns the right + * one from the two values in the middle, it doesn't take the average. + * + * The idea is that we want a value that exists, not something made up, + * and we prefer the higher value, in case this is a progression where + * picking the lower one would mean we'd be left behind. + */ + def median[T: Order](xs: NonEmptySeq[T]): T = + xs.sorted.getUnsafe(xs.size.toInt / 2) + +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala index 8df961fa..fc822a30 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncEvent.scala @@ -1,7 +1,10 @@ package io.iohk.metronome.hotstuff.service.tracing -import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.core.Validated +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, ProtocolError} import io.iohk.metronome.hotstuff.service.messages.SyncMessage +import io.iohk.metronome.hotstuff.service.Status +import io.iohk.metronome.hotstuff.consensus.basic.ProtocolError sealed trait SyncEvent[+A <: Agreement] @@ -27,6 +30,20 @@ object SyncEvent { maybeError: Option[Throwable] ) extends SyncEvent[A] + /** Performed a poll for `Status` across the federation. + * Only contains results for federation members that responded within the timeout. + */ + case class StatusPoll[A <: Agreement]( + statuses: Map[A#PKey, Validated[Status[A]]] + ) extends SyncEvent[A] + + /** A federation members sent a `Status` with invalid content. */ + case class InvalidStatus[A <: Agreement]( + status: Status[A], + error: ProtocolError.InvalidQuorumCertificate[A], + hint: String + ) extends SyncEvent[A] + /** An unexpected error in one of the background tasks. */ case class Error(error: Throwable) extends SyncEvent[Nothing] } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala index 681d6be7..584a26eb 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala @@ -1,14 +1,18 @@ package io.iohk.metronome.hotstuff.service.tracing import cats.implicits._ +import io.iohk.metronome.core.Validated import io.iohk.metronome.tracer.Tracer -import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, ProtocolError} import io.iohk.metronome.hotstuff.service.messages.SyncMessage +import io.iohk.metronome.hotstuff.service.Status case class SyncTracers[F[_], A <: Agreement]( queueFull: Tracer[F, A#PKey], requestTimeout: Tracer[F, SyncTracers.Request[A]], responseIgnored: Tracer[F, SyncTracers.Response[A]], + statusPoll: Tracer[F, SyncTracers.Statuses[A]], + invalidStatus: Tracer[F, SyncTracers.StatusError[A]], error: Tracer[F, Throwable] ) @@ -21,6 +25,12 @@ object SyncTracers { type Response[A <: Agreement] = (A#PKey, SyncMessage[A] with SyncMessage.Response, Option[Throwable]) + type Statuses[A <: Agreement] = + (IndexedSeq[A#PKey], IndexedSeq[Option[Validated[Status[A]]]]) + + type StatusError[A <: Agreement] = + (Status[A], ProtocolError.InvalidQuorumCertificate[A], String) + def apply[F[_], A <: Agreement]( tracer: Tracer[F, SyncEvent[A]] ): SyncTracers[F, A] = @@ -30,6 +40,18 @@ object SyncTracers { .contramap[Request[A]]((RequestTimeout.apply[A] _).tupled), responseIgnored = tracer .contramap[Response[A]]((ResponseIgnored.apply[A] _).tupled), + statusPoll = tracer + .contramap[Statuses[A]] { case (publicKeys, maybeStatuses) => + StatusPoll[A] { + (publicKeys zip maybeStatuses).toMap.collect { + case (key, Some(status)) => key -> status + } + } + }, + invalidStatus = + tracer.contramap[StatusError[A]] { case (status, error, hint) => + InvalidStatus(status, error, hint) + }, error = tracer.contramap[Throwable](Error(_)) ) } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala new file mode 100644 index 00000000..d825950d --- /dev/null +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala @@ -0,0 +1,363 @@ +package io.iohk.metronome.hotstuff.service.sync + +import cats.effect.concurrent.Ref +import io.iohk.metronome.hotstuff.consensus.{ + Federation, + LeaderSelection, + ViewNumber +} +import io.iohk.metronome.hotstuff.consensus.basic.{ + ProtocolStateCommands, + QuorumCertificate, + Phase, + VotingPhase, + Signing +} +import io.iohk.metronome.hotstuff.service.Status +import io.iohk.metronome.hotstuff.service.tracing.{SyncTracers, SyncEvent} +import io.iohk.metronome.tracer.Tracer +import monix.eval.Task +import monix.execution.schedulers.TestScheduler +import org.scalacheck.{Arbitrary, Properties, Gen}, Arbitrary.arbitrary +import org.scalacheck.Prop.{forAll, forAllNoShrink, propBoolean, all} +import scala.concurrent.duration._ +import java.util.concurrent.TimeoutException +import cats.data.NonEmptySeq +import scala.util.Random + +object ViewSynchronizerProps extends Properties("ViewSynchronizer") { + import ProtocolStateCommands.{ + TestAgreement, + mockSigning, + mockSigningKey, + genInitialState, + genHash + } + + /** Projected responses in each round from every federation member. */ + type Responses = Vector[Map[TestAgreement.PKey, TestResponse]] + + /** Generate N rounds worth of test responses, during which the synchronizer + * should find the first quorum, unless there's none in any of the rounds, + * in which case it will just keep getting timeouts forever. + */ + case class TestFixture( + rounds: Int, + federation: Federation[TestAgreement.PKey], + responses: Responses + ) { + val responseCounterRef = + Ref.unsafe[Task, Map[TestAgreement.PKey, Int]]( + federation.publicKeys.map(_ -> 0).toMap + ) + + val syncEventsRef = + Ref.unsafe[Task, Vector[SyncEvent[TestAgreement]]](Vector.empty) + + private val syncEventTracer = + Tracer.instance[Task, SyncEvent[TestAgreement]] { event => + syncEventsRef.update(_ :+ event) + } + + implicit val syncTracers: SyncTracers[Task, TestAgreement] = + SyncTracers(syncEventTracer) + + def getStatus( + publicKey: TestAgreement.PKey + ): Task[Option[Status[TestAgreement]]] = + for { + round <- responseCounterRef.modify { responseCounter => + val count = responseCounter(publicKey) + responseCounter.updated(publicKey, count + 1) -> count + } + result = + if (round >= responses.size) None + else + responses(round)(publicKey) match { + case TestResponse.Timeout => None + case TestResponse.InvalidStatus(status) => Some(status) + case TestResponse.ValidStatus(status) => Some(status) + } + } yield result + } + + object TestFixture { + implicit val leaderSelection = LeaderSelection.RoundRobin + + implicit val arb: Arbitrary[TestFixture] = Arbitrary { + for { + state <- genInitialState + federation = Federation(state.federation, state.f).getOrElse( + sys.error("Invalid federation.") + ) + byzantineCount <- Gen.choose(0, state.f) + byzantines = federation.publicKeys.take(byzantineCount).toSet + rounds <- Gen.posNum[Int] + genesisQC = state.newViewsHighQC + responses <- genResponses(rounds, federation, byzantines, genesisQC) + } yield TestFixture( + rounds, + federation, + responses + ) + } + } + + sealed trait TestResponse + object TestResponse { + case object Timeout extends TestResponse + case class ValidStatus(status: Status[TestAgreement]) extends TestResponse + case class InvalidStatus(status: Status[TestAgreement]) extends TestResponse + } + + /** Generate a series of hypothetical responses projected from an idealized consensus process. */ + def genResponses( + rounds: Int, + federation: Federation[TestAgreement.PKey], + byzantines: Set[TestAgreement.PKey], + genesisQC: QuorumCertificate[TestAgreement] + ): Gen[Responses] = { + + def genQC( + viewNumber: ViewNumber, + phase: VotingPhase, + blockHash: TestAgreement.Hash + ) = + for { + quorumKeys <- Gen + .pick(federation.quorumSize, federation.publicKeys) + .map(_.toVector) + partialSigs = quorumKeys.map { publicKey => + val signingKey = mockSigningKey(publicKey) + Signing[TestAgreement].sign( + signingKey, + phase, + viewNumber, + blockHash + ) + } + groupSig = mockSigning.combine(partialSigs) + } yield QuorumCertificate[TestAgreement]( + phase, + viewNumber, + blockHash, + groupSig + ) + + /** Extend a Q.C. by building a new block on top of it. */ + def genPrepareQC(qc: QuorumCertificate[TestAgreement]) = + genHash.flatMap { blockHash => + genQC(qc.viewNumber.next, Phase.Prepare, blockHash) + } + + /** Extend a Q.C. by committing the block in it. */ + def genCommitQC(qc: QuorumCertificate[TestAgreement]) = + genQC(qc.viewNumber, Phase.Commit, qc.blockHash) + + def genInvalid(status: Status[TestAgreement]) = { + def delay(invalid: => Status[TestAgreement]) = + Gen.delay(Gen.const(invalid)) + Gen.oneOf( + delay(status.copy(viewNumber = status.prepareQC.viewNumber.prev)), + delay(status.copy(prepareQC = status.commitQC)), + delay(status.copy(commitQC = status.prepareQC)), + delay( + status.copy(commitQC = + status.commitQC.copy[TestAgreement](signature = + status.commitQC.signature + .copy(sig = status.commitQC.signature.sig.map(_ + 1)) + ) + ) + ).filter(_.commitQC.viewNumber > 0) + ) + } + + def loop( + round: Int, + prepareQC: QuorumCertificate[TestAgreement], + commitQC: QuorumCertificate[TestAgreement], + accum: Responses + ): Gen[Responses] = + if (round == rounds) Gen.const(accum) + else { + val keepCommit = Gen.const(commitQC) + + def maybeCommit(qc: QuorumCertificate[TestAgreement]) = + if (qc.blockHash != commitQC.blockHash) genCommitQC(qc) + else keepCommit + + val genRound = for { + nextPrepareQC <- Gen.oneOf( + Gen.const(prepareQC), + genPrepareQC(prepareQC) + ) + nextCommitQC <- Gen.oneOf( + keepCommit, + maybeCommit(prepareQC), + maybeCommit(nextPrepareQC) + ) + status = Status(ViewNumber(round + 1), nextPrepareQC, nextCommitQC) + responses <- Gen.sequence[Vector[TestResponse], TestResponse] { + federation.publicKeys.map { publicKey => + if (byzantines.contains(publicKey)) { + Gen.frequency( + 3 -> Gen.const(TestResponse.Timeout), + 2 -> Gen.const(TestResponse.ValidStatus(status)), + 5 -> genInvalid(status).map(TestResponse.InvalidStatus(_)) + ) + } else { + Gen.frequency( + 1 -> TestResponse.Timeout, + 4 -> TestResponse.ValidStatus(status) + ) + } + } + } + responseMap = (federation.publicKeys zip responses).toMap + } yield (nextPrepareQC, nextCommitQC, responseMap) + + genRound.flatMap { case (prepareQC, commitQC, responseMap) => + loop(round + 1, prepareQC, commitQC, accum :+ responseMap) + } + } + + loop( + 0, + genesisQC, + genesisQC.copy[TestAgreement](phase = Phase.Commit), + Vector.empty + ) + } + + property("sync") = forAll { (fixture: TestFixture) => + implicit val scheduler = TestScheduler() + import fixture.syncTracers + + val retryTimeout = 5.seconds + val syncTimeout = fixture.rounds * retryTimeout * 2 + val synchronizer = new ViewSynchronizer[Task, TestAgreement]( + federation = fixture.federation, + getStatus = fixture.getStatus, + retryTimeout = retryTimeout + ) + + val test = for { + status <- synchronizer.sync.timeout(syncTimeout).attempt + events <- fixture.syncEventsRef.get + + quorumSize = fixture.federation.quorumSize + + indexOfQuorum = fixture.responses.indexWhere { responseMap => + responseMap.values.collect { case TestResponse.ValidStatus(_) => + }.size >= quorumSize + } + hasQuorum = indexOfQuorum >= 0 + + invalidResponseCount = { + val responses = + if (hasQuorum) fixture.responses.take(indexOfQuorum + 1) + else fixture.responses + responses + .flatMap(_.values) + .collect { case TestResponse.InvalidStatus(_) => + } + .size + } + + invalidEventCount = { + events.collect { case x: SyncEvent.InvalidStatus[_] => + }.size + } + + pollSizes = events.collect { case SyncEvent.StatusPoll(statuses) => + statuses.size + } + + responseCounter <- fixture.responseCounterRef.get + } yield { + val statusProps = status match { + case Right(status) => + "status" |: all( + "quorum" |: hasQuorum, + "reports polls each round" |: + pollSizes.size == indexOfQuorum + 1, + "stop at the first quorum" |: + pollSizes.last >= quorumSize && + pollSizes.init.forall(_ < quorumSize), + "reports all invalid" |: + invalidEventCount == invalidResponseCount + ) + + case Left(ex: TimeoutException) => + "timeout" |: all( + "no quorum" |: !hasQuorum, + "empty polls" |: pollSizes.forall(_ < quorumSize), + "keeps polling" |: pollSizes.size >= fixture.rounds, + "reports all invalid" |: invalidEventCount == invalidResponseCount + ) + + case Left(ex) => + ex.getMessage |: false + } + + all( + statusProps, + "poll everyone in each round" |: + responseCounter.values.toList.distinct.size <= 2 // Some members can get an extra query, down to timing. + ) + } + + val testFuture = test.runToFuture + + scheduler.tick(syncTimeout) + + testFuture.value.get.get + } + + property("median") = forAllNoShrink( + for { + m <- arbitrary[Int].map(_.toLong) + l <- Gen.posNum[Int] + h <- Gen.oneOf(l, l - 1) + ls <- Gen.listOfN(l, Gen.posNum[Int].map(m - _)) + hs <- Gen.listOfN(h, Gen.posNum[Int].map(m + _)) + rnd <- arbitrary[Int].map(new Random(_)) + } yield (m, rnd.shuffle(ls ++ Seq(m) ++ hs)) + ) { case (m, xs) => + m == ViewSynchronizer.median(NonEmptySeq.fromSeqUnsafe(xs)) + } + + property("aggregateStatus") = forAllNoShrink( + for { + fixture <- arbitrary[TestFixture] + statuses = fixture.responses.flatMap(_.values).collect { + case TestResponse.ValidStatus(status) => status + } + if (statuses.nonEmpty) + rnd <- arbitrary[Int].map(new Random(_)) + } yield NonEmptySeq.fromSeqUnsafe(rnd.shuffle(statuses)) + ) { statuses => + val status = + ViewSynchronizer.aggregateStatus(statuses) + + val medianViewNumber = ViewSynchronizer.median(statuses.map(_.viewNumber)) + + val maxViewNumber = + statuses.map(_.viewNumber).toSeq.max + + val maxPrepareQC = + statuses.find(_.viewNumber == maxViewNumber).get.prepareQC + + val maxCommitQC = + statuses.find(_.viewNumber == maxViewNumber).get.commitQC + + all( + "viewNumber" |: + status.viewNumber == + (if (maxPrepareQC.viewNumber > medianViewNumber) maxPrepareQC.viewNumber + else medianViewNumber), + "prepareQC" |: status.prepareQC == maxPrepareQC, + s"commitQC ${status.commitQC} vs ${maxCommitQC}" |: status.commitQC == maxCommitQC + ) + } +} From 575e3b44add572eae8eb148725c5728ce9ffc7b5 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Wed, 2 Jun 2021 10:08:42 +0100 Subject: [PATCH 35/48] PM-3063: View synchronisation (Part 2) (#37) * PM-3063: Create a ViewSynchronizer in SyncService. * PM-3063: Rename BlockSyncPipe to SyncPipe. * PM-3063: Raise ShutdownException. * PM-3063: Add StatusRequest/StatusResponse to SyncPipe, handle them in SyncService. * PM-3063: Return sources that we can download the commited state from. * PM-3063: BlockSynchronizer.downloadBlockInQC * PM-3063: Added rootBlockHash to ViewStateStorage * PM-3063: Raise error if we all sources failed a given number of times. * PM-3063: Prune storage and set new root. * PM-3063: Handle SyncPipe.StatusResponse in ConsensusService. * PM-3063: Trigger view synchronization from the ConsensusService. * PM-3063: Testing block download without persistence. * PM-3063: Test timeout behaviour. * PM-3063: Move counter reset to change capture. * PM-3063: Add ApplicationService to delegate validation and state sync to. * PM-3063: Ignore old prepare messages. * PM-3063: Simplify sync modes by a recursive function. * PM-3063: Component diagram. * PM-3063: ApplicationService.createBlock * PM-3063: Disable timeouts in test that expects success. * PM-3063: Return an Either from getBlockFromQuorumCertificate * PM-3063: Fix merge. * PM-3063: Fix sync test. * PM-3063: Fix formatting of long F[_] * PM-3105: Fix ledger storage test. * PM-3063: validateBlock to return Option[Boolean] --- README.md | 2 + docs/components.png | Bin 0 -> 88322 bytes .../service/storage/LedgerStorageProps.scala | 9 +- .../iohk/metronome/core/fibers/FiberMap.scala | 8 +- .../iohk/metronome/core/fibers/FiberSet.scala | 7 +- .../hotstuff/service/ApplicationService.scala | 22 ++ .../hotstuff/service/ConsensusService.scala | 162 +++++++--- .../hotstuff/service/HotStuffService.scala | 22 +- .../hotstuff/service/SyncService.scala | 288 ++++++++++++++---- .../service/messages/DuplexMessage.scala | 2 +- .../service/pipes/BlockSyncPipe.scala | 39 --- .../hotstuff/service/pipes/SyncPipe.scala | 58 ++++ .../hotstuff/service/pipes/package.scala | 4 +- .../service/storage/ViewStateStorage.scala | 16 +- .../service/sync/BlockSynchronizer.scala | 69 ++++- .../service/sync/ViewSynchronizer.scala | 31 +- .../service/tracing/ConsensusEvent.scala | 17 +- .../service/tracing/ConsensusTracers.scala | 12 +- .../service/tracing/SyncTracers.scala | 14 +- .../storage/ViewStateStorageProps.scala | 25 ++ .../service/sync/BlockSynchronizerProps.scala | 120 ++++++-- .../service/sync/ViewSynchronizerProps.scala | 6 +- 22 files changed, 728 insertions(+), 205 deletions(-) create mode 100644 docs/components.png create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ApplicationService.scala delete mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/BlockSyncPipe.scala create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/SyncPipe.scala diff --git a/README.md b/README.md index ee0f34a2..933a9e50 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,8 @@ The architecture also enables a convenient forensic monitoring module. By simply ![Architecture diagram](docs/architecture.png) +![Component diagram](docs/components.png) + ### BFT Algorithm The BFT service delegates checkpoint proposal and candidate validation to the Checkpointing Interpreter using 2-way communication to allow asynchronous responses as and when the data becomes available. diff --git a/docs/components.png b/docs/components.png new file mode 100644 index 0000000000000000000000000000000000000000..17df5ed8750c80224074b7d410249140c4e7499d GIT binary patch literal 88322 zcmeFZcTiJr*ER|vQWPAK!d4=ggcxzM0?i2q}Btd)=#C>sr@M3fazL`BIss ze0+S%tt`zP`S=6`;D6P^g78X0%xOFy-_pArb9W9aI*1+|$fvAry6}^-mIf^(f}^Z$ zrmUsq7aFP-K=-4D`>`U_*nu2)3EpRg1ki)%fdLDj(bCY;RMo(#YHK)a7$|EKv^0_b z(@-3J@E-j5o{Uc8`=ng(1z#a$CVWCB0g(%IZ-_%~W#*H;&}34S7kh6V?^1yXG2 za5tikww{`{F7g!{OJ|a;vX&|Q986~h!ao*){>%{ME2gyY5Ei^bG|*7fP{ZN0HPrNV z;B_azAir?>f4d5@_&|;ybz$KyF3ws}q!3SQI+xyGIZw!qz{~E!H`J&DEe#g8ZCpf+(TU zE<~ChA=tu<#c~RwxQEA>vpmeB{FqEql&)W}i>o^wXJSENv2@sEj*ht&JIp{?%OqM0 z7Za#KfS(xVeqp#U0}HK4S1VT!3vO5#Il>w2W9e>z3l1i^SP&d^3<6vUAuOCbeB8m* z(S_ygLJG1Cr`npjSUH+H+6C&`nB!4n3#~rmi|uG z4)A4Ktga`;&k0<}J;pwoY7qgi*|<{!gCk-iVgqn2TPh1VpGF3oWgdKfqR6bQ>3;*4A+1V;1_P8tATTObqFL_>4w{rL)}@Xp>{3?fp*R;TPs!+%gKU3 zbJEjr(Fl$74+$_Qhm*ARL&NoGL>rck9&!s0rwBLy=x7(5HO|@J+SQ4yXCD;cNDcQ4 zvS9^AxCUz52D+I9+OaHkB6a-yknMA|GH^A}b>rZ|^@tJ9L>)65TUR|+u#Jn3X?V0* z5RFER)V7bMkYhbKu8yuY5f1*aD;lOuGjgPs3BlTm9L~1)b9Znzqnm|u?cqKBKyAl} zKr>QMloKn`Okc+jUh}t#GKh{apg2dHTDn>=9n1`H3_ELPpjDt>xT6N$%@ktP#Lm@> zM0X3aH8C({STWdoI;Q$KM{1y*ou`YbnVuDiqM_|%7ZmNPPmc`ru%y~V5F$NT*6>Aw zhP7!B!_~ynO~cCH%+7}C7{D}-3gmFz0%;nISPNZil7*QS&6>(_u#d85Is{vX8j!4` z?e%HM(rg2eOIR4-%mZQr$U@FtiG0M~yN1zc26ay=r*dR(6LpL!i}wQ zHU=zv99JiZZlI-Mpy6k35FS8wXL)EcW7+=h$Tn(g``h50LZTVSEzF%9b({j6!Zh>@ zENsAQi8@aHG05wVVJ0|};GjshHO^fp#w9qMqQ$l&Iq6c^1Y2_mK_bK2I)r0p3pvol zR9l-!32~%_lS9aXZXsp_mWN|ZR7|X17?KIXED1UseWn{u%f-!<;Tjbh5oG2O7HYxq zV>pC|nz@E^gQG%pqGDK9|yY6a2RGQin9z%w$;gkfc`VL%OV z^<#vDdxXS9xR{!T8?b_%_1I>b<{r_3`dZvrcXLzcAXleI6J0kPgX7@m#^ADOkvQ{k zwq2B#Q*4m4wo|CP1&6>UM@KQDEFf)|SaI~tVG~S4iMEk??ldmRStr_>NDH*Wg>szi zae*Wa8*+3EBamRpaHNEI#6+0JxUrlqsF0e7o)OeQH%$kkU630GaW66*XRqOG8W61) zOR{xjID~04?VXwMK@W0BU|=|dq7|&^9BLP8;6Ne=kagGrp-!Gwv7Xl9;8;Xg|FAHY za|FdkD~N968R-(qF(t>wFr&!&j;5h{n)>e1!BEvq;GKw2D;pOJQ@WYGjMX%8sb2+B?WWXZUGcm zf6eePdj}h;0g~oo-L<*Fma*^vfk?IvK`Mt9hvwj+?Z=>)SjPBAFryu{wfrK3Tx{u9 zVT?#C*I1So+XL)HG>P`J(+ab7v3Cu3_lynl4>cee3@iLCb?)M{tf?8x&H0}1danYMAH+H3ekcYYDNh5_lvOc3p8bM zh~YMx!NKOSCL~-m-IV4L5g8Z~9pH?kSr9Z#!a~itY>TKEHrta#GYfZhvuAi3kU}*q zY{N7-oM==3kidW#Q|%aiHp4;R%!Wb@wu#g?4Y6S|NRTu_2?T!^SdOi|IoZ?BhCvDU zGiTCuHBITXz=$ZDV8k?zBohz#$4VQZ5>h6R6`R>RI%-p4`6d*ajvvFxUN6#In`yy< zQf0<)4##;~=vaGNnfbX|#<2A~k!S|rGtsp1v{t`WNC(>(e@$|zsWZ*l!p7WGFFcsevWtjy zbF{#@ny`sXa&#zH)5hP;1;^xQdC>H9gFRp9W^LSH^xlYPD?*lAAw2Q z0Qrdj@YOung5MXhs_y&V)_;EyE7lsS5vWJvB9Yw>bRsQ`Z7p>F$`X*{lwqsLHNNX6@aZ$cm9QCJXX6;BkFPnV81S zy#AKU-6u9V1wT40UA5~T%BU|{vB)}ebJf&HZTI=O5jwNIsNC_Ab!WQK2+ii?*7Egx zChpBlRd(#uPC2+w%&-w&*Jr-YPm$xcI(SrOKmB);v5kpq#;6aoG08go@xyoFa;!IH zey*iEjhUU-FX^y*-pJ@MPMmzVW9HX!S*z39)O1o7?#Y*?Z02xxb@j(1JKowEd_Ga} zv@htN&9rRzG&4NvPKfa=G2c3GT<(nPE4>MT#2!n}ip3EJFj8%u8CbHcCF@LVc0PjiG|G?CS-jvz|xR8Ec7s{dysx zDn)Xm=jhuz!}-VU!nq?inzu`ro?80vnN&{mQ4*gg;!Q-heQ%J|jUL-I-d5**Gu-2{ zM(i(|tRd%$+@6UKY=*?@dG7#QcY+lfw?{CN@J%A3Yc%Rrr{y#$W=*V^_dKDYLt9Eh zTawjkhqOZCblE+$gz4`Gr*7<-iS1_B4i`}#9vWHkp9}3lV+8h`t67FBm(TglziP8{ z65|2)}vL7z}YdAPKjS$b#aw10XzqwQL!-$A)AvtHdf;eM+9 zc4^ChG)+%`xi&r4SXJayd=!T zKw+u(!O3UpcSY|!re3 za6f-=AsEfFAjn>Scn~c-AxVBb+3pom32x9lcX4g+O}__%-I3iaZPzL7=4~H|797ft z`0-w9TcKOCyU$9|g}4ybhPXJLw~vJpPOfjU-@!Vveez0o`|-g!;lt~9ye+x?!@9+A zX?8;WViK^FfzQ8INWYt+lyf}kpr~|T)%Qh%zBZf6AAy;w(P%IWrueOiNf9AcA zZFu<@yr`1$>j~*CsDzo{r}|%(b>5(!6&p32`PI@pJ1Mj-I_IpAZys@ZH#}Nz`Zhyy zk?k3+@*g{#nx1DHb*QSfv(5fpO9{D_{L_gwvZyXZo}Spmj=Jdg;6PwC zg|Tm8_XCc??x&VrU5e_*tkI+-b<2O_39YmIsyVVeqaI0K7ZPS~x|Dbu)H`lky09&n zb+9dC4JaFIhQ<3PqtObv$Tz%7euJizw|C;Yw@(dylww3I21K7FpC;zLZO=WSg=|$X1@Wr11Z_ghfuPe(nB86IeATgW ztIc_XmI6`)Q*eXbxzpReeqS@c5(xriwq%Wu0nWm-i(?Q~tolTL2TginS>FIp7e+%0J= zGI{&OMIVAb_r5L!zB6TW1^_ki$8Yv|!0k=ivt9n)N7Q zeN@Pu-zS$#Yc-sY?h{E)N~&M7+IttI$-3<5z7+X#VeQ4++>^kIqMsdqjU@-8O7G6z zqHlR7tsS(oR&O%rp6wu zIKE8Y>^!97uI$;K8`Nfu$No)=?yyS)E4cTw6&<4@&QG##Pnl2+5Zl zbR)mLyf;!ix;>|uSA7{6V|ggJxlFCu%wEC!*BzGvHrdhhxB zxj;Pmn0Nmn9yoh9l*`Jt&0nE(_0~pJpV}Gwr1u6SSSU-L7n@Hs?72YIktutoR$H^A z_h0l1at5JVV@HJ22DchWMpZYUA3}rmZ_iN?xrX6#C?Y(XU zrTOgs{*cX!fD|#DVJcR>V{W=7O_X*ydrRr<*A?p%&S(V|IbF813;ou$XKL7I;~Y3N zZurEnR~G9%9;eIwtgBifXV@xc{EKY#K2e}&j@eUF{^U_>V|_{O2;)xd@9W<`KCb;C zW;CQxJ2yM!bCY^Lra*_$+8FdAD-g_Ai6krbRPzf9<3kU~fdjXNsw7p#jL-8BpW$5* z$BUY&Qr^hMp_+}y&v}^@iP1CM94Pqg-Q0F{C~#1!$A=DUHUu+S7=qcHrWypCf7v(Kuza;(FlG=#9tcKse`Q{Hrdx(MHn$|tT#w!NZ(vqHB%Y1=l7ZC;zP zM!!G*c+bjn)2AXe7_V~oBQ;~s+|2mxeuGJg(&HIiRB2sT;m1-N-zg~I79G4&m%@{S zZw9~KV%K~S91rvjymlT!=54xupYXUgx9r(T)xo;AgC{;i%4=ya_NsQ#%!yUnTyl3d zc6=KAFgS~NiEb_-FgMw8_eU@Tz*2VXz}3iC=BqPRNZ^6gxZ%oElAHf|(tk3!6$;{| z%0~T8Ysy~#bc2G1?Uth73hx%&`*asKOmvEFr8nQDx#jUmpcILR%OaYMJJfEEnT)?d zQuc-2KT;$G1}5A}ygRK=sSWR`iX8T2wiKjIS*vU2+;Y6UX>c0-_c}n6mfR09Kfm0( zTdk`V`b7EE8>r;j9rrqD`wN8VPmeYvCZ$b$>zaP2%u^Qs(duM>^Hs))#!Xf++m`*0 z5}UGdtj*2$j98uHLC@M@9Q}3v8{4I4H{^@`fviotJ3Bcj^|>&j^eUmlJ0flBOkL~X z_7on}wGQqVS>528hY=D2o;Epg73P;`&jWTGhBz!SEma@epTf_5I@nPfqW3mBZ%Z6f zDb6;p*gk>Hqu{yme>$mb@oM6F$33fMx2-T)z&a+j5b1sIcgdg~(YW5AmpOz#pHKPT zx*?pU%NqFdn5kAb>e8CU|DxZ`V(wf-Q&>wP?yhehYTy?@*B0IQNlweTSdo4HM~vHN z=G1&{)jVvFA&?*Ml%u6cWoG({(zZB?NT=|nDO|}b-DD{fAN@Jgtteq`_76CqQGz2= z@Yg6*wq8~5_CMz(lqDppd%|rVJ;WcnT0o?G9uNM!cOfe`AoVTX;o;>s5-(RXvrRR^ zKUo&;ymM<%WooB+3jgqid~Kl$qa#wEujQ3y^IH8na(%f?<8S;|7$HUC+}y$Sd)~l@ zhV=puHq>vre2x8o4vRv(UYq(%z6bbRI`PX8fa3aY*gcSz3?K{Kg(tfWUFC(a`Njw_fb%`0-%H zjvffYs&gX`FXX##`}NY)ZY6!tS#6;@;?F^c)VGqfJ4J_oI5f|^+W*nvu(;ufz*C9s zVzk=Uvmtc`wX2+D7f>{IAJkImB%4f z#Q^3<3X$;fTx^C2YG(3xM8sI-!5waF5t@93l)Jin8MgWW*=X^ySg8Jqct=sR7U4ah z+0e^l3_bahP|UYic?Y|HI8(8qi=`mf4jkmSFeRZ#gmx-h-t<7tk_X-&usN!(mC0VI zH~;lj4uJKOW6|Y1s?eOxWmS{s3?hPp;}2i+-*{p7^)y+%CRLR$i<@b71~&8Qs|SDF zC8HrjllJ1hlEe3>CW`HBiCJo~Hq5L>^nuKxt+L?h>8Ov9!U3uEGvn_Y}}giesY*bKM74tY_;m+mkFUg`c0%evI@oQvh*IDB?kj-S8o(qvYL@+eCPWC6QL* z4m1L~l!oNP9p+2+pkx0G(V=xIuVQsi8rSm6bv6AOFqmd6zYspS#YOTEo6>i=ntL*=$!CD^FJh97Gvfg z_-CEf@Vqr<5<4g)ywDMD|LgtfwDjCL8;Cdos(Us0XEoc;Vv)+$? zkA|*Js5S`ObCLgx;I^c1W<7NK-{lyeh1fvTQ&_{q%2x^16Wwg>eO}l4E}j& zV3%#!IsNx1YwZ2K_ARL2X>1$DTnwwv_I`2HX{z@X_2EX3}9Egx_+WT z(!S!*sx7gyRiPhDYN13H18QY&w(Tk1Qc`n&-JS`*$5((1Kzd39F7>X=fr8XMJ2e^t zuM|xs{3}ZTX;r*X&?|-;E$>a3n~9wL`_=37)6C&3dFj^y@w|Zq5hP;;tiZl_XydB_ zo=8l6%R6El@j>VJ=ZY4)i19QTou?Z}Su4GWu=UrtfbS4a;LT*R|DvPxK^wF3piDC! zn~x59e%cSIfS;e8s?9$5Yn8XtE(AEtdL0au3u_QZdX#RcdOvSZfa5afrrymB0ly(~ zmHJR`@cuG&&URmbbr02(HZJbJrZR*xeOK5rRKLU;TPxXMGB^ICAe)t zqoW#H!5W`>?CXoO)H`wGRX&lO%Hca{#%85E?*#YMgekwJZwhbn*Z7~|dJKz?Byy~Pnr{kJ@OI!Wb0XP9VK(m!-G)br>v}bS!l`!fdAcKXM&VQAFs)=I8Duz-V-c;FSH4)ub<4d?1LAFut0u1^+M4DPC++X?po zkj#V2Lb#tsQElg(IVXzCv9T zH!m9F9Ab>a%UlWAD)gf?S*rTo>Faa{&HotTCghk#)da>tJQx9?6zQ*(;-HI{{Awg- zx=`WnBYkpJ@~#zVirp?~?*BlXc1?W$=s&V)WYM+nBdzM}8b5h7#r!=0@=8trM{j`V zlH77PRB(~%Fh_6!ars$!j2{Ymu-nVmZ|{NhcT+fy*T0xdqam3#D5gjA^BOTP`~0%? zr(fHsn?DK?-M@%HGr{sDM<1#SC!MJO)!#A<05)o)_&4gO)>gkbt*IDR z_wir;3I=odr3(MSW51x^#ty1NLtS|1@0YWh<15D({qX~UDB*;|Wf)Ysf=1k*;M&Q7 z!eMCJQ!;(^1s2j-vI2PQ%De+FFv%J`fcEu3{s57~CkFl6D!> zD9rj0ZkJO;|JG5SJkPBk4LoG0;?AB6}AkrJD&$Xzi`0*dh^vj0ruOzC()mh2d(4VL^Fu73BWUgp{& z5roI1*GSK2vLf+s5a~z}M;b$D1ai~XXx?vv4z{Sx-u#~hG_>$yuqp$E939jp>&);< z$cw!dC;wYQ65kbojT*WtjY?5^d4p;%s~?)v?ePuBKr1DIbN>Q-Ke*Y}qxE9KN=b=Q zn5dDS&Whe|Ro(2fn9?xGMR&GCb)W8h6q_Hk)Y!-TVcWm#$HOWDT%MJNZb?Vl9E)Hn4iD}~P6(ki6 zG~5OMNO+>^cQWtr{=~g``1*14l)dV2g!z3@pYEt{8bL}F#s^i$GNxc}X+HYFCp=N7 z!tZP@juBoC_rYDP#~^Mdt@ysXT~Sa=3biyH#s6-#^3u9pis;=U8RK>NH7k(2a75Vu;s~HI44VD*8*&yM0fnOzcg=j49*a zP95vpj@;#y^88(LW&O9VNK70}mB72K2s zGd^#0SY0@%=rh6Ny-_Pp+`Ap~vk1lpfBG+95cFhAEL9vmjtSfiK7GutzPi{#RLR&$ zV&v{<>ahS@2?6-t9VE(H%S+u#&p-IEMbK`?3}^7|omvDm^89)LovON*K@Ve_`zc=Y z-uE?-+Ihm_W2`|_RY5_~HM{hBLia%=HO&I$?E$gtBOE(E!h$Y7b-3(-7=i<(WKIGk z?`pm3Gz=r3Wb%ZDju*0#!JU$iVwD7EzaL3>1EJ(^zhkE24ph7)hj_qXy;v9XNnEl8 zI<|X&WCD;G6Ybz0s-YfVX$}1 zPUK~(`@VG^xqA6LBjU^R@>KP@H)7ky@KcbxeLAlm|0OX?wMjCnQky& zkN&!Wr0V!}=AO`cj-o)^Q2D_-zuUc#RA@9*_Tbz-34y?8Co7>AbZ(#grfBr#wExhT zvmsRxO$LvpR6PUcroZl(A9%sM$p`=itM!TlGCF#;gJgN%Br<)iftEIg;<`Cz{f)#v zF=Q7}NQ5872n;0RfaoM1Uh|<&>RLuBjbOZVwM!F?v8XnHhPKCq)t1|?>?G2c!wQrvuGj)pu7PmJ!={Vu5ub{!ckDtzK{~@A65BSZI8@fzkY2M;q8uP-oW!<6IdW?ftBA%#VQakTkos zV26_3DH)H~C-kImpQO3j5&jekJj(Pyl2%Z$a8&<5z~Hhwv*$Ja4?y=2SaN(8hJas+ zrC+X|y2TyzgjrO!w)*X{z0dlAUu8*ZzJyB11R4{9ruEKc;Lzr(Kc*Xbc2}{6VKi6) zBegE5r6X^5_(&VIbFpiUrxf={Ssv^|X{Z9`}%Hyf@s5Zb#tj{3sv+87Xz9T~Ri zL3@$M$2!AnPPyriz~qB<&TnCIT_UN?`9|*e-f?0-JlVt9>7Vw4F!psRbbX&-0w;8gP3| z%q}eMe3xf&Tx55#MMljj{L87|5Nlsz^2dKY*KX!0E|d7d5aRU58xCB_ zJ6G0jR9eM*b_5dbHw5;~=j#p~l4W*W{O4)dUhmyH;i3JPPTl(2Y&f`e`FFA}4 zzJoz*cbP#(fOr4gN>6m!|F21ixMmx*%Og(bJ19)N-6?1ch{ZGh12 zl4wczL1{34t;z6_a0%kQ$oXm&@buA(hxI^Tv*9w4c{?SVv()mVEL(UbH@{@5K5vBB z%HDnFLahC-^h-p^idhQ<58pW*Eu_J^Svq&i`BcNi+qhBI#%#{9co;v$Q1BFQrpT}A zF2NMRl3b{R=GuK?NDXt?2sO;YMD)soqSDE%grciL2z@7)cw)`Vo`85idsPkBgF92x z=K%bm9wr+0XVv8thPtBXWPA z`{Fn_Idr4hn@Uua&XUNForUKAR{CM0(62ciWvjsay8Ky~JCvL&?p*)dOtf|eLhH>; zV}AVERQ@()zb*X5QKl5U(}#+a_|Z%$S*G=cc77U%NKA2Q;<}uERZ#|L<2(=A=DAQQ)u9_|>EQ#R6oiiCkP7 znkKXFi`nW8g1YOaA#LmRtWJo(*jll~SCf2t=DU4cPwcm;+)wRKd)jwqtjWN(yTz-b z?6S47N4Ez(U(?dG-P7skOVbrMbqpm$1cbyYfZ1php^z$p-iJ9^gF(Sjt+W9v%;0M= z)2yW`7q^Cmf{f&5_FmkrY8}m-uN7WL+ec_Ke5)ymYY{0-1_xCaf37i^2NUVqt9p}9 zuQ>2?r5q`ExxYwS9${W8SoV#1cci6#U)G_VO~%mda|ea~KnRfPeledprU&&K`w;_e^=Si{v`npBp?dEk%2GW9N zY*|2HZsNIb$p`{7@!LwoW3jM!Ja!Y(rQwqopddy_yhY0%0Oyn2%B_67A#v>JjmKG8 zNbdM_Xda|Bx+GoETZtfu+{102;=|#Wd?H}b@v*9kA=n&m4r&afkdFIjn3rr!+Y{%OW^|h@1l~NX$Q&)d=^ytb| z;pXg`)p9;4wyIUl!tZv)vg5itUecctya}J1brl(@&Sg>g+mQv(0^w@r+TY7X>dAXZ4&G zXUAG@2t07^y5^_N0E6ZMm{GjD5ugsIl)^r52?4`R-d?$1#_bPE5A>{%V@MG-b6U=E zBM<3re@RF-oG8n8!^HSH%0{o7?+~)4E!0P)mS9WvU$J1@H}x~!XJb2+G^$fG4YtN4b#`kFiRcf1RhFV>3OGp|9+h+%}h709xR=g-N?9iq*;j+bZ zC2OFcGjlsIwENRh3)`Mb)l%XHj{cb@;a8g;NjOw8+QPrC`bNG1f`!_aM>;=pV;vO* zuHF#V9lu*P`Ecjwtsd7KfKd4yxwJ*uA%EX;9YY08Ea8&-p^xtp=7uAMoTin2Reh;` z`l^ZY!=7R-wEC&NX1mC@zD@0hC&=A$g=%T<8&B@-%gb(Va7N(|7WZpZ8;#y>IjQRP zC6ctIf0fzWQ>}sv0vz6R35Wu5fir0WmkyqTHsrdFUTc{1WyfqAE@hd7z(8^}z2#B5 z!Iw2r?r%XqHds~f^7T8EmdNZdxt_Khf`FcjdA8HctJPD5Fj3`BbEO`^oPN|W-O$N2 z+f1$@3W)5Q^a{g?0hNK*$w`Jex5NxSVFurn=OMs{z04=O?K_N(Yt^4`_L{p>Y13ye zN9GQ2DP=@G=PI$byK{uUue^rIGpwo#eWun?a`Z zZgn7Y`xWh8;P*SOc4SHjOvaX6*Li5_d#X*|?Xd?_q}5|L&`7#PI3*_+LkB?d4+QzjySv4~zkZ9W4>_8PLBbFjbl`v&CNY8*XQB z=@h`e&m%1z@!V^7r+?UtXZq$zdew67R~hzSjTu1btU}WkM!@BQu5Y0`d>}%?vZ>ih zTV)Z_Vtnpv^Mz63R%c(vAv;79)x0zM!xCz0T}I(~P!zPyB*c4kSiPq>W~?DU-p!mH zd^XiSU=M|FT1V?AkbZ_ob0ch=fp|0iNfi8aaG>K-`kjV-NzdD-+gB>4UCVDzQmOMi zJN9%-ss6o!iEq;qJ~B%j&(m51ncrXCRFRN3>^E3tWL$VuwoX62jKS>wuA&n`^* zu|^01(14HZBdE$NL)#tvVmDW%joyCB+vn^N(QjKyq%Jc);S&ZOH{Jj(CA%v{jl28z zZ#SpCt)06eXnm+5pqDIBEJ?U*cy#hmaZ^d1W8uE+Tb>oP4|UfDpCvE?@Xh_Hv5eup zBQ6`GV8k(EbC)PxVYiEUs^HI+^QmgL8hdk(E@`_+4Y-_?H1sNSvbrvVX@+$vDbTw# zTYS|tc0=2fljJUd!P&+#%8jG;PYd=oBo_351OBb8oOA+Q-Er0Faf*1FAT5BHJp(wY zRZ*nkVd`q5@=KeH4U@<_-etX$h=zf#rpN{#8Rb{b@@;LmVeJ#FoBNhf~w1hSuq^&q*%*Um9u}sqyJ=VG?}4W%aHX{2i^*J_C4r;PT^p-%kG! z7Z}JG^oWnQkJo8e*=JZdiOPof$|%&%p6ok+S8e8Qc4XI+_?zwLCR#5w-7aXRxhZj7 zbuJN|PIBnW_YS-i#)mZ|wxb)@WnP#&Cj4^F{%_vH>jPU0O}*bV;t8|R>l|70Qspp_ z&T+iiF0HbClV;9QJ`o)0)vH?~ovLB3r`KeV9XI+qT~qo6L-1r6c6(5>Kd)ipOimF~ zaFg=i%PJ>6t#(M}W$3P(C;G6T(7UJf;ddd4pHMc&%{#MJ4O9#~`|+hE>Bs1Xe7a#) zUG7yyn1^yxohr4{>ynvDgq9$X*KE`4ZL|>ej_T!>NOtB~tS|x%8w;izY|Rp>wKi*M zhf)lmcCB*`WN-*hF25dn4We;hclv%+cEpAF%xwp1u9xD=ZQbI0<-L?3lfSf}m9{xe zY7_Gp1w1UOg>gtxaD)B%q4RANchY5tJD%H$mOPe_yYA$b3y}JbT{P`QVuAbhbIBR(a?_ArHFNs4yTYAom;6oU z=Vm$8_S59A8;%ZyG90d{k!&0eet)>ym*ac&_a(1BGZTV|Qf0)^kvpSxQ$1b$DKdXA zZnB?bl-S*BH_VTeJa)m8uQ>gd84-MAprs>XcxAr&$E+Ve0K#^HeraI()#~UpiEnd* z>)=4)I-K=Ixs$&t_x$Wl(j8CusXp*`E;}JBBiO;ho4KU*Z?n2!MZHI!F(oIxwqPgy z+6k%MgQI&3ED*Ye%}O%6NDNZi_3K=_6)nI#m$cowffk=Mbn?op-1mF?oL)B8!|1hl zr6$SPrg+t+5hjwQ*>|=r$RIiw_`d6_B^gWQ=OIx`8=>_Si(7FMOUCev6tfs7lz^T)#x9R zFMp||eW4kiGUHY8NUXd88*8RP#e)+C_ViEr{|f}ixFAUsdHB*4x&uT@tn=bDZ$TM0%cC&5&Net=iL@bQBQCnOw8)3}#FL|2EFA zZv)b~9o;ZrnH1!L4y$iMo;m;b*f#WiGoVuEM@E2bgBBd;`Pk*wWCZ&v^oa$T)TcsI zyg%jad{dk?4<(a1r#0*Hk-HpgfAA9{ED2Pq?W=kzc&!z;q3B0Wah>{}Zt)D~#;gfW9r9F~0M3 zjR=Cl^A|9<Hi(OFq8o2p!P5W?{$S3o!LS+l8Q7k4LhB_HS!auPY0f^e=MqT`r zCrm(#ERL+RBJxHTsBzw5nCk?fx9o=?CSX*2>uwkO)V4nb7251SISL-@4p-V0;|N1M z42C~#X;1hwRXg+vU^Urwe}jlZD2k2d;?-bq&36-QnRE=#aK;9%%@epNDoLE5MBy7? zq@u76|NJ0+t;nbKt1SOM{)S(xm}bA=Nldthw?*P4G)$_(S3ddzrHC+p#qQ5nIqDNf>=9VBjkKv)8ON22F3S|ZN#%rKKs2>D!xvGn%2fG00g43KecEQNJx~n@rSNQ`2ljW+~2!m=-*`fXyWLohaFbT|5up^N(2k8mX5v3`Et6OH~i=AH_yeVZOD{jC$YNPMUXZWM)(wx&@++$mLnDc-X{!7pQ5avpbren6wfGhmvkk^uW44>bQ5fM2 zWVO38SN@xPiSoaDcjK_A(gx+wm8cI2hy|Z9iWWymX8RV_gX>Gl#3ak{zawsrO#iqz zw8kz&?@nk|5WJ2Ny#~}kl@fgSp4h(){k5>z{N|Gz!GbU~(RG}kn#`Z4;WdcQTi4Q? z@m>RcpHN?Ybm_rLcNlFUOHP(T)|AYnJzT|<$H*d92wwY0e9;P03l|R1RB?u9m|hQy zS{k(Avi!pNi&luQ+6EUx1#m&O__jAbeXe{~}9vi=~<^4B4ivZCBQpw@Yxvx?s?X+;CRZ0j6fK=jM$xID3$#7jj;yd^oSRPKAXl5)MXa$W*wD`+!cqDHDp;xqOOyLn}alyYJ{9k{3*b5ONB|;m| zLBy@Z^tJS?%?iNxz&Ov|I+KLLhBhy5L@kNUmuVfQ|Wz2B`vd#kR3TmtxO0?yn<; z11eEfiRgMzw^5*Nz9T7U7_keE+1P2outvST+}{Eu#$HXskNFmT#u*+vSH@uI0A5sB zbglkN?iV2#6u|_zB1?xEi>MQPJCSoUo~UBg8W2I>x$qgHb79^;y8{yJ8KhWc*8>?} zWS%M&35SJlrQf!bRU5gq?}?I%l={wKrvlqeY|Ta30g?s?WrYXnEr^_h+lA;~kukMr zC1XiYBREGfsK`osU<4!Ns(_66vT#TN?>kajJ7S2)ue5?ZKn*?w{a2Nw z{WeX1F8SZ|UJWcAmZG!=4~V&fuLcxl?H5gjx@*Gne)qR*3{8cTEI}>&f%}A3RKcP> zfaVY=-G^FKQ-vX#ASc}SB`Em~Ilv=kUWU$c1XG@~gkUZFYpAouwaEQLfx-+N0g&IY zm%iu#J&?seZ5MRsZ1slT_&GFyn=qKv(> z^!Mhd$Y)#|yZy82MpL$t7tHS!@uHH82I)kkA8H|tefV5q&Tv;-Md$=bv$nn-<)C?` z81f~gp2Mi4lU3d8n!mgUNg3cE|2cAm98m%{MI=A_PgWsQ4iJ3!!!+rFTk-b2SULF| z>(y%Bw6EtP(qVj=LzV?TAbjsU*n;bbBfCK^(^!S_QqCf0Mjw*G-iI9ww#XXA$eUc5q z_N|J^Gnhtk9?%1?K~FVE6q3;mrkNfT>or?>8#p#LqY30AEmdV$0@+(W=V@iMN8mb` zbH!9yqP@9oF3542mMis;%ow$!b#Yx)KLFm%=ggS=Kt_@`azT-J zHhHhb`uNrY+cOj}^OlpDf0q!fk`g!XfR=0+bm-ic{XbF@VN7e-jp#fOPOW_CllyaZB?;!zIK`1SAQ5=Yr#6e$y|5vFe}kt{jt*Kmd&7wYMnHb$uJ4m#qa|Q@hs&-AJn|yD6#o3n6vNt zWC?8DMNymfwAUyKOWG6AT6L(tGRpD5i2(jE0uybmAiUHQf6jL$a9&M72tGpf6;T~( zHs`aJxos;0M!Qjr|6SDz&5Z15vs|fn`6)@-YdnHcD|Sqm^@0ww;I58D+22dOF)*b( zSzZ68I`_WRvej;wb1`4ez@d*ERJBy5O3k<2)FBY2+(7ifSx@3MmGIqC4|4h>&{(q4 z{ME@yPe1j%KZ|=Jf6UhYx6Ym5&g=UPec_bpS^cow{xQu9&!`M#_1jqHQQc6}>mo0!l#OPiSMc(I^qj&7SiQ(VP5&XuWDs`2EK@i5jfSjI zWbj3e*W#%DrKq%}98j^y=&^0_Sh5O#m#zdTOe_@^%+oOR=1_B;m4FsRw8+MDyNqLh zmBHYu4v&2(j;h;>NV3a$0zzGIG{jZ|dF-w#1@SNeKiH3) zwi68zY8-W?0|KXZzrV6LYWv>z$Z;Xa)}6phu2sJheOw$>!IHjbeG$rPBlJ7*NE4xT z_RHT5Bwbc+NvPyEpj5N>O!h~VnJ# zn+jd)_6EGvK8Ziqb|2p`v}c!Zq{t8`c&liy6p%9tOE#gGN}!Qq3`c)DLCJ8+XX91X z*JxU|g537Qpr<=zRbM@9D!NiKbnW_|q%8f(bo+-JkwdD5zIW>?L*Bi8nz@Avy2Y(~ z{JdhcDxbZ~kT(qm4YVuBjOqt->L(;}Sg9ew-=T4MV>oYCywqp$vHAUoK&jvPbqvJb z4?#ANceuV26djUl4MmIL9E!Y=$CN>XRh)C^(mAwv4$FpI76zEia!OHMQ9y~*h3w)=Vh)r)MD{iK z?(SWXU(~%RLuS`4AA;cM&yCePqc6VsJcfl#_6gEjhS|aaq~vV`G^{jBtV+8LaO#-lyakmsjW7ac z#x`iAP|3ncXqO`QS9V6jI3t)w+t#iahG24qG}SVK?~=Am&7d}&qa%O`4mHgyMnKGB zda@`#t44BcX8)Qfu?gTclv3y8!_sx<%7NmL*NLJah<1n5b%WoU&+0Y*`s|C!VfWr* z$Mcfz!TgvBuqt%#heQD}%j)QngM2Jm10;3t(ni(I7&h=-J#JC2N5>1oOIT}gfZUa; z!O-UHg%;zFyx?Aib^F4>g#=6bP+%J@II74}-4aMr^n_35q8T{)MhH;2o*AMIwtjWr z6)vN)U_q5WO_j%bbjth9Y8j=6z5ZzdpxTKj;JZ)e>_SwT8GkHxHt<;;k3=2={Qm@i zzW+~_@PBUme~kRU?bS(V>Omw{3=|0Rw*?T==TbxcX-)qk(AVqknT|%J^QV34=MUp3 zZ+Jf?kJcnpTEB0Ci0b>Oc^-T&OqpD_I|k9FyIdjPgB-pQvQ<_w3@70zcL-cDt+0Cn z>7B5{M{XzcAKNgT&o7NYkuu3|kga}?^maolbBYSb4g(SJ(GI5)%FZ5%NAS3{ZT+~S zIwc20^iF^`8Xo&^145wB7N3U#f$Niqa)3K$r;VEGKF%N9dbdZ8lIZOhFs~4xSS_@M z<)WH)0J!%B9Gi2m;Ib$vOb39>rR3PQKhebbaHTzKbMvs1< zY9NIGb*R-uq1fL8f$>v+0*m>qKtiDeL-D&}ni*FrRC+w#Ek4c_%*zo1oW#AFw$|V^ z&LtVq#N!=>C;kjVt$rfBVn_V(M|mo2SigehuEq>`U24->&bdWA{evA?5ZBYrKV3JOswG6snbM*z~$zq^yaUbPJ57E)Nd;BY%pbhvyBV zx@zc2Ztd{?poA^gjT-Uxs-PXUsyBuMgYCKIsi88dpj*W^Ev(02nZv~9KcuWNk93gI z;=@F|;dnptCmaw6W0aq|#S_g*H-Xxbos1mG$qU@p4hc~jM4zyT0G#Nn%{_Yb+|R3! zSG^Rm@?=Cf59h{tE5lHr+Q^oSh=Rk*4aWC&FCZJjm92LR)`n)p2nHbScl?)q;wfZfG2-s4*@C@SC3-fgQh_hFFGMXs*i_7@vW>8=DAD0_c_9iPMdgSR2MA- z>SW|r^`~g45r9$XoO*NNhAg;I(Sy`A$x+dOcHhoV7!vhfPrUnf7WaK4?;s{wkN+*V zPc{Sv7Z6UKC*rZbi&&VCM?>xqIeqB*`3WiWc*kI>;}C<4iAEa(f0G^K{s!|t~|Mc6d>0C9eTYutUgKs^S`v9c-(g}^|yrj zv-8hCMwFS81wWrv8pEKz!!FqE{z&2*g$LyjdEx?}Y}WA`3M&bqk0B5AHf%%P_jKgT zh=v;e>Wy+%;uL@wew9#hRQU$Xr`ormb)SL(`!G0dPm%|mfn&k=7&*>Ye0>@O!b6Di z5+b&__XcwA4h|CzfylN34&uH6?P4H81%Qa-=7k;8V-F%f>|ZSspspwjAkHxVthL>X;aR; z2~~q6YarbyL9(Y?{n^um3)=cnUyy>yL85q2fg)Z({Aft^MQfG{YQ7=-pEXkP|KrQP z2LP4a5Ws5=5Yvw?<_liUD*`B!>2A#B^T(_B+qWLK>;vMi2diN2tY${;C&`<5Yl${E zJ#I3fSB*F6NTEdRHLxI~0F5}nlNC=l-$%>vt%ILd` zkl5zHLv|rmV~8jj4wowLwoIzn;V|ZC+ffsbR66&e4Be#YM96*{;c^;If70tBg zhT#S57u#Qz7W~l}0w0a9GC$}sh>4G}*XKtYKoD1IlTwC4-ZuWJ8otrFX;)AgU9Y+Q z-RK&CWVhu(g}7^j=$xT~^iEG9crk!_HXq0v#K#HoR62V8gnmKt8}J(`KosrLlaM}@ zhrPc^;a*AspX{Q;v=@A`I|rZahXrHxQ<$^vA(pVU#WDwAC)YDuaF0&vA;l&Lax;R2 z=TQ9boHB`U_^0<4u`gA~d!%M*5IOkwr*;L!ka+q&q%EnNv0DVR*Y51Xwv#aVFDDJ{ zzIdKbg<{8f5%sP&Jg#xo;+);x-i-U4w?Nz8V|nL$`B3!-5{=V;fSURA{y3jieM=xJ zxW&H{41=P4fb0D+^5NM91f`l)QUxguD&&Rg!UWty=G^Xjp`UbP2?An45%Kp1(!*s) z!V8*2YeWlnT~z4XJa+bIf;J-xfkh8CgS)m(h8eIK43P=};C>H{`dy$#Lvuu;-ADSp z-vaeER9}AsuN`NO&xrZLjzP+AM1_tZI$#CyNr}dG4rluP%mZw3(WLL+#7=!1Ikaf{ z9~L14Gf21!->h~D_K-gUA;#jjCBYpSmLDo7-9I@uA1ZL0thwVPxqd;89SQ_~*gy@{ z%cX5Kfn?yYHq!RC-%>`$vnI&-R4+~0Nhl71>=^mVjKi{r?r-sjq+8UgeyJF=mI z?OWvLhpl1mRssG3$A~LKn%3ZAl0=3C=QBugoRB5AnCAyYvp=Zs?7o5&YXi`!8qLBN z-H0E|&$l2FDb)*7m(^!2FGEssUwt5xoxOygD;+0o}wM1qH@6)eABuIvie6 zC$m(jkvc{dB1G86PmTz~UL0HK69`l}U+umyQ@0Jus3#jSe%m~kL5N%UA#B@ssfw7C z+#pTP$H*&3Eh3raw8^&@T0jxAO=7=g(7^1IJeYlsGYp;AVfMwhSuz6ds)234^CtTy z{s>#|xYw!Cf#;{^=t)PrzkpJ@hk+FF6Iqf4(}|9xt&DOa#A?LwYm(nntqA5*<4?8_ zMgOP56H-rib%rG}5P(->r=y-vVTn|C7tC=!N$_bHpR}dm^_cJ|{PE#=&iSW3{rQHH zV5E|Kbf@=`pB3&I(TV!SX{`AY5h*;J`>Jw#P@0GUf~s(OxiCl02iT6UU4GHM!VZ2=eZc ztio-2!RC-d;tt-q+N2KNqafzmJT70)3PkX;q?7JQ8G?*XChu`_t9oYDF3G3|UpXH= z`xIB!71u%lEz#o{sDSM-jAU2pB>%tjH3US{3%~uLFUS#n8vV@pPFbkI+FH3r_WYM6 zb>i>bS!Ynymp|UIdiJ9V(BJVHL*wow$vaD!U**PuI1Xozy7K1Oy4u(FBYhqduPgiw zIDC9MhV<|GJRd4O5*Ca75P$4DpF5ZfRYh0t+e^&!V0(M;AfO)k2sJyBB^m29UNB$8BJOb$D}sw{*jYne<85}JBu@j50}RCk>y1xh zlUW_r=DL9Iqb|XLtCgG~7`YW9%*%?}Ct)FMlBT>YCAhBf)cJZ0MArVNA)+z8@}V|n zOGVt#;7~yN!ADQ;xKJqzNMun&0xdB*p$ewhS`X7@k`~ESmsUY^*1^$|rFOP$^Dm!9 zB;C{z6#f1QZ~ot&@DQXeEsL!({9K;lK-I#n)MqXeu}VTHoN*j} zHQI}^U|l(c;t9rKR}a?RoFYduDa4CLQSq=j&ILlSC!B3ls5z5#ey|@EAOd_a43BxV z#bXDE2!)cAJzxzqpgf02$&ZxGggUj81WE%|zdzQo&xnGHuS2xSLt_X-~UM=rjy94-%p(VgI`bXn%aTH)c3YYJ>bG_uqdiN@7e zHz8ak(VF5$S-?M^ADp&+s#V(@aR;+}phl^UYui(SFD*|gOTg7qg1<})L{jCR=Mv3+ zhB^fX#VldkFEqS11=!c&--WtTV4C3Y>+1(3G_ySyxNe9^K)4Rqj^jvz_!Ty5N>CF( z+QloYy;}1#NdgOJPo~Bc)MV`2-w9WL%{=%&Rxme&6SRx--m=1f{6Fvo1Gw&j8PtYu zKmal9PfYsY%e{B!>*k-@^hhh0GCM5(L^K{~CXea7;05%jjF&tJJg~%?!G$-LaV7MJF zGV`ga?tK?;!+|{-#-GaScG(3g5trc*g7@0$Rc}8J$pwbJk`g$#uTTdE1aS{JEjz2X z>FkemB^*?^eFtm3sG3&@Ht0#epR~ya@J*kc%(_TIeUAWXHo|8?L6|KAzh)V1*!->E zhqrQa6c7Rwh&M(qOj7m?8WnhOOH}hY=I5{z3njNAG~UEo^O8JOl;D5SD` zsG~m(Htj|vt=Ih!&wvSldZ!*lpvsR}P+D=EyG@gA6^=j|D*H1c)NoF1D#ocVKAnma zQDDUCkpQ9k8o2Zk1dD;3_ga3^zR%^{g9GTHOCUJU5!OxONCHx$Z1!rOK?aDT9Q--1 zftwk2A94{zFb%K^%~JXh4Rim)aTf{%dU3||W_~TV8>(Jo{3W?x#*l^CAJqpB=s11< z@I_tF9^Wq&`GS%-=?tDMQgrExg9pMKS577xfM!aR_n*A|suH&rJh@T>lRBtZuu~P6 zy5)~Fd?mUFls|9S!NX?d2{h^d3nwXJXqP`3Tk_~bK+%Omf8k-U2P3I)CC2j_Y%4C3 zqQ@Y0CIV5e+7n?12qsIAdN@Fot-yyy;=6E`&XX!+(zQTX1}F?>Q(d)L9%jNbRmc6Y z^B@7gyHdcE#(Fapzk_|kYuz#Mx|CY#&v|%$yu`MIsRAf)q*u(uo*#W1e0#?K~6h&nT|z#IKL zoW338d4MvjJ}uivC1B|)HEgn2#NR`3WFit}NWp>2QI1pZeGj3Gf~qrM`=P`@xUiz= zkV}=p-VSU@*@w1GNNU4Q6U)LPolyn;0F0UVA&fYHqSFN)Q2wcM>f=RIBG9!c7M?H& zyN_s}-G9Q^v!h#bKw7OF_qTz0$_{K&t7z^P?758aArT@(SxiA5CNhevQ4iJxIRNDr z7$qYZqdwT*;1&8*j!sfQepZKC1{^z-2cA(%g((f5F%wP?c*aaszOP?kA!B(fLEzBh zPvl@o6BtSjcN>L+1K65T+u6SeiuZ&l6?R=0gjfT6Lk1w$4n|Rnu>-XOz!E(Hr4OhE zD+U-A1&j;Sx2r;q41AgDQHOiS0KnU#Zvg598x7Am!>V@=GV|~QFh86-F+B~=(ZJX# zK;AK=0OpILxrJe%0r*1(`sTB!pUBx|O5zE802o|37_vVL2elYZ``{V1c^Q7W!DPbF z8^L6mIe(2jSa1Y`I@D%KhRlGXX_5b=5vp73Sdyo1$ai6$dhs~BLBZl2DEyL%m9*& zB0>t-bgtz(92LJ2=>ZI4Tg)*h2+LxEUTwiEAo~omp%6b1h(XSWeL`ax#GIxR;Nj)~ z9Y z%K7{;0F#&5#k0Q1KO$t8g@4w?bN{e7RIn=*K`J4u)AdO3bw~ZPDblkr>oFk1m;{VD zV1_<=+6}(4;*bk`m{6{O?PsKsR4pP6mB@U*w7=xBabPq+0iSS~UNwqVB(p;Dtu8MG(x9zz9n1vt}zW=Lg z2z%mS8_u#K+)sn}N}KUDOi4^TWdIO@BXb=O9d@^WL++n8f*5hu?`)JpYCjBkhWX!b zr7;}ty$TTFzq}Sie#q5=XJ7l*4z`x1fB>KRxHC2}&UW|Pja}CxY3Hoc-4pdLuKmm$&TWvm6-G2i7dQ zNd1C`E@KroV!W_9vVriBTqhcbBxwm;XC1~XKY(71q4ExxK9(-{mox@Zi~+Kq2k=e6 zkV3%zaRLHDg{Gn>$q7e%IT5-xDRp1j2Gr<&s34$xrnv$_g^=!aPvnG3!q@^V zgs?OKrB#x2#4BMK`B1teWj8;5gTEBz=)9z7LVg$rTQG!kN+|IU=4(Ua3GnDUH#;Nwvq3S>wwXrnx9!A9a^{3m0ppF1(0`N%(Axsl}F=lu=NrFp+ zi2?q~iZQ77ee>vq&I_%}8P*$dDx-44KtQ5;~zF$(!6? zkMO~eaq%t#W^o-V;Xi%FLALGyVLOP8V8sn+bSKLB+#MZc;YEiFqI9d6PrrHxE|6#6 zGl%&j_0{w5)0~blZ2`XN0D#%$v8$YLej~<&`$Nn3^+K6C1QFTZ35Q^%#v)}Z`-}K7 zR8iez&|U^D-Y9rfAvR#GC9?HcfiR5LX8=PA5Ia!EYAs*7B7g~P43o|}e1kY%;Bks9 zY^XRa!@<;1?~SodgptzT6gg*wGV$B-ND{SFY0qm)wrALaQn2r^^ozjMW=`*vwsQk9 zMFOD-+3&!3xb>`BAdZIyvKR3bA)|3M9e-r=L+o=;nbZzn;l_PN>@JY1$uWqcBMfEV zw*o8oXf;Nlby&}cn(bgSvbIa;SKx(Q~PSBmb4eDa82SNt}LC=bSGwn7{ zGMbE_BKqSh&B}GbDe&0k>i;wjh-U=T3KB6tfK$Dh|59| zOJU3yyRKdkP1XcApP-99S;$KrN`0M2i(Wyimt_~t`kBBf#_*oy($Wax4Fxi^m<6XOl)dlGJE;KWeLf|!aq4wZ zYf~!|3W2{sG?CqquY8;eGDvQO0LXI~tEju-YKlkq4j$+2<#PWhxrrg=qA@8@U)6YO zG;-n13OBBfm3jXy^X^*W{-MfA0l0N2ZQ87X*Xi%U0Hq$Gb zhCcq2bF_M{RaS`e?>r~C?9fpMAO{wSjwly7s{J)9N|ajI*2)ZcyfVNInXe_qU{skN zL}PoeQ6M1%AhPAEM=Oa*k{HtSdH7wG8<2;p3fDp}0rrA<8hw$D7gwh&Vk<4;_3vd+ zU(uk_y&DUEg-XJ;SBT!s4~L(++hX(ifHzBCgRC)MVh&jc&UE=t>r?0oX5 zwJlKD2y^@jB#~1dT4p84>=B$WUmZj~lzI5|XonSzr|d}=*G0Kt_VC7DyFL5-iAf3A z)((SyGCuC9r2X&|>OQR^A~jH1Oz*X#OV3xkTH8FAk33HkV>$cSOp=83*@ugkcvvtO zv2If#b;C$0BKTo3Y~IltnrV$l!59Ei3apA?w#5nlY536esYKH5lgL9z(_9(O1-83r zBgwn#q>_+kmSDf)VF*9o%VH=QKwRf1?N zgzL9mYoe-A5a0gd$FWkP{)Q8)MTcA9%Y2Bu{@>!*kRZjWoJTJARI&{M|-Rh-E#N&7F+>0 zW7A#O1PCuIub?Pno#<%x&)}P|Ce)6y9Y!hb?>EH6F^ZzFYf+f4aBafP#@`ZALxk`S ze$^#wc?*$q!c=WfmxZ)}R1A$$@V`eL$c->9=KMHAmJM|4kQ_2Xz~c99yaU@I;RFh- z77_7kR2dA=1IRLkDYa=|#vtQ^w%K;Icm!upi`2;|4j7`_T&-N-p?3J#94_>rGK3&t zTXnWo@}{c;|N7e?*mPo~X21qiOA4l#7_UAG8N}FE=-?s~*iteU5*QWW?NLD?9q{&} z@b-NwZKT8p?~iMk)ry3iyixyN`+2z8z6n1Ly9U1!d!KNGiVxXYh{gVu?kQlflY!}~ zP^gC+rtBRIK9651e~+xOmF|GD^8okmYik&B{`YuJ!2t2XBvQl2H!nQfU&?It$p)Wc zDT_v5vb+FG*;v&D+N4Z1Q%GaD4+>YfsIC`izJzhXyh5ZI^{#y%T>C5=glP)5xEu zlLt@{Y!D6upA{=`1w_)A+azTk<}q6LzONxOJ7<}SDWODMx7bf;DGME5UYp<|`?_CRDR z8~}|}458D_zWbkY{Qs=~Iqg`yUsi(Eg;B*bMYkN8-ABC7Dq~0qa+3dK`?Z}1*}j`4 zQ#l?!CHgUJ8X%rIq{Qo@!YD?k(PEoSEWnk${Yu zhm`Un`j{k0v0e)O2*8=6u9&`KWx`&DxhjPmgKqB4Ka=)R#a)9N-FVnMo+{`v8j}36 zKukUl004`l*6aYn!6p)5bbv+g9`m*#DRf)eS@DXC<>h9|B$P1H=P)WOX=f?lK2dh* z?+g6n2l&Ulm#zyam#Jp7UOk~keez0r)5m{z zdXj&?yJypL_6tf{jcaM__6I_wgE@a7I`;fmK>n|SY~TukijtBf&>tWqlvQEGY@UjZ zdWCC>0O^NBo%hn^#uBOG+#GXwvv&!iw?O`}2+Xt=ba=$iSNWI5?+|^caC`=5K@3kIpIHo*%f}ziomIPegv^iwl zd&#OmO!W-PEnfK%n!<=WN8$2?h!I(^mbl%Z&;V3VI*SP!QIc@saU0bbjSw=ylA&w< zj1Ml@f0ofE$BUVpn>E9m1q@ZLIRiJ43+j_>sG+*bvOpisOwzB7)Zbj438CdUa%w)< zP@w^N(KR~z!_^Po^pI*9+xJ~9V6bjb?tL~9|2;G8k5qGy{C&#|d~iS5vo`hQF6$GK zK-ENV=Qy0HWP%DfOnZ?3H;nM4&q{N1v94Bjw`<_V|*(mh2 zQZB$i-s9Z zTdk#VisNY6MGy{(&GU5EO&j~H599)@kZW4+J0j`HR*a3MARbwoso{mMGBiHg>5)l8 zu$v(H+;;l|s?#d++)wb2m-!s8qYF2;lHCI-&Q4Flzsi;0uV@1A})3KheoN>e8@zzw9H ziCW{9V-{M%#V|j^9(|Bj!EMOAja6PGgWsw@`9Ht?e`_4tblE&XB-9T;VE+?S-yEo3 zWBA#zX6p_hNzxxs=^*P^7jTFHus(hUe$WMorYhzx&4p4}MwXnlGx+%-L93M4cIQ|! zX{6qst#BF^emV^NUIS{+pTA-D$E-*_v|ZAd{N(=dDR>4Va-H)kLf%P}V4$fB8rOdC ztNdtLCbMtB72gWn!>?`V=rQmD1b#ZWSug=4}`rJ|(OE$OzKC64}BqhJ5+ z1KHRBTq;eCvf=5ctcwigP$yD-*g)M7^%e?LzeE;*4dP;Y5N)4S!S(V>(=MVdKU2Nt z*}#!x%^-tr-HGe!!|6sdE8p`(XS(?uzTo2nB#5JD?J6SpqAd?MC!qF3-UZ%~Rr2yR z1u}C0q136k*;41+4l=+b+&)ptI@+HT3}K?$YeCYVQ2J zIt43P{}q%es_Z#JdXo#1s$vD+&PlvIUt@DX`}SRQLGugb-o*DC+J3vhytknVWKWtp z#`|YtlVjlbr((9Nqlc{9-^;>LPVZc*9Evu3i+T|k^S z7boleqMLw84jCrUD6boajy(8fFC|u=s<2dPK30XXZV(0fqVYC3oCla);M<~zs2_TB z!Nd17KmhCDEtw5Or~%Rx&o04YL@sa#dbGXRS_7O>2e##kbt&)l#pj4e=)iL|mi1xF zm}~Vc^AykioFcG&6j|YeB|lP>LVckV2in1YfYqACFHN`Tq3l_rUZq^Jlt!a>S4?QM zlP3YnQ%rH~Jh2ER8(LsbnOJd)TKYl*fr9SHFW~FW-6A5(ghJBrt}(>mdWJn(MtQ|B zRA%0H_4L{j|LE^cYr)s&u8>;=Hj6i!ICHNpk>zeU$TaSO@<|&K<1{cizwds%33%%q zV05#gfXMKXfOb>e$57cSD6u*N-4}csSK-=M0QmY~=>Xo8f;^Rwv=2bvj7+5Gfc~i3 zpsK=J4YVhmJRM#K6@yIlR8~c1j-rsJdWLGytRf>PAm; z;_5nfP)qCv)pdO`A8O5&6JNasm!VP4bGW(SjzEqfxURl>bH?!j<#&titf*6u9f4(H?Sg4_&aH%hJa&2iqYNoefe|x+?a{DGtMMT^4A6bdAXsO0M5Oe9jm;0M{ zl^n9ktix5HhgR7E!>jNh0Y^enh*&R>Fl3rm23)&$*XFx(fp)&ytQ;m_RO#Gj0|IgV z;8@j57YYRiwln+3!6xVyd^&9qhz99@;&)&nesqKwRNwcjSK5y5t*HZPU#@(lv5nf* z(j;ZoI-YouM?`K{<16j7RLkdCGKFk6!RP#e0+;k;OafYpf#Fq0H1D^d9&nG%HP%jm zZI>)xkl#DyH@LEg6N=KIm?^edBMWJ4*rb!|@GU`&}O)+@;W)Gp9Y zdH~J$-QOoz5@1^~?T2E~a;>0VZ>VnlVn8R2dK-20Ww3Fc9&CdjJ6iyRDL5VMDup;$ z{rLp#vuv0C0v?)u>a$fOR}$s#&tCITRICqGPJ@9Rz{aFQAzyy5a+9i5p6zRxl~i_r zL3TL+%F}!uev40Y#)KoaY}?1IcB8b?^7f!dm707|lVVy3X52QW_{GfOpB*4t>_?(2 zq^>H8mqZxRa_QN4S+LV>+M?}p+310(;{qvX}yWdunupy?o)6Zy~>~L9Jp@L zey$$vY#_+hMszjh+?@)?_fZsVhV{NXzwR|pHUyBm#goJnIm>`MPd1Ky?E$wb># zTdOEU(Q4RSgU0|iEsYeYkz54*YCqeZcE|n_Yqj9Fkv5!u+b-w%=2?!5#jTzsoxcM= z99UDR;GJbd+OFIMFyz;t8#|4sP3Ys%W9$Bz!6g(G+vkmCpeSzrx(chr^mTIs#g4Xz zlhrd`V{e3w^XMl*F*Ee*$V*%*m(I6C>|j)s&Xflr@seq&$#CiNRMzO&mG5*}_n#Lk zoj&sS&|m6Kh1@gXmv!mQVOUFsNYlt$kk{O;cYx4wp3>T5YkB&5hlrhceYu=fJLb(~ zp!i?Tzf+)0{Z5{I)nQAz{wj_2ieiNuS0|47^n+T(*TxbgiIV8y8F^6OoSNe;#T@BD zINlQb^i82xDJ6*Sx$*c;-V}-#dYY!Vpbnv?MYhPHX zkjlfvYAFFt`TQ_Hie1PIy$Q}(A0F$Kq?@g`p1d&6hLXzr$sV5*9{@F;f;aaBjvG}H z^!rZ1?Q*K(NeFAY+g!X?Dk-V4v%DPhu-;z2p~6te&0?94TKiL%=msczT7ENY|Ji;B z!bG0|=uGxK1p|(_pzO+~dzgn>DCQC! zK3AK!xhUj8)QN#={P28|dCW?X$b8n?T<^)Iy^wTo;i`p%Bl!jH%c&>)0gkIVYlMgS;<(S-EIKJaV;HsrI z7y^R4aO~-M?}}ZHLD$izP%T+e1c#0;aI4ya=#VQXNLy_Am(n(ixSLs;Fl0$mR?v>; zs!uOd%P~AplWdE5SAB7rd6Z&9rm36F6nM^mtc2Iw;kNeyORij)J=IgpqVt#u=84(B z{6nJk1~hbhMQp-w)xm@8p{s<}Go#;v^4 zp1yGYOPo|h_;W4yT<-IU$_f>V5gpEZ1#*6TA(^aESSJy__S&RDqIQdRvcpi2VP^wy zr7wq$PM+$#Gd!eKvfflpVwgARdxCVi*kSA}bm$Y<`Tp#erIwk-Gu=$}nQ0@?v)6r8 zkYY*gg1!|Z3lkyqkL_bbf;Y7W+jm>4r}so^vB{}!d;>l8 zVBjeSwl2|Bk~ODM92@Q$HaYC?A;MVJ6>_QN2kS95JaI)^7N_Q8m8F_3qb$RO3fM<%Dpdw%7^K?CPSyF?vC_R?T#Ei`E ziI@#%*&6VjM^1k2yQMH&vEwu0rOb-ARV;1|3UWsJG@e4w(upj|uS`0OmVMZM>XDZB zvz8@Pn-SButc015^StO$Uh2D+SQ^FxxW0JzKOJWQ{+8CvY7U_3d5#Ys_32yn|j5CxmJHIo?1Nu`^-R7 z!-HGhX5(h>Rm?>QeH3W|KCCBf1zj9oTwn6+84ikaZQ{Iuj%Ug6&8c%3D}8s{DfUa> zg$88Zs2-agesisvZ1O1jM!!gEKJfNI^njea zbpuY}Smi6}^sm%h=dWFA(WSUyVL*AG&0NyJnbM`{N(9;CEkA;JJZWD2JT0*-??#Lb zaX<_vO$m=xYRouqxa*llqclZ$iIOW`J${!Q9gH7o#8dbgEJzvr_L~(|wdt5kF^$LY z^tznpZa+tgZdakiwh(jIu~G8`QiZ3DwWLID&oi|~o)dg`YFc4jQ6cu-Wi}io*6LlJ zHzRp`M~m8gP7~qX793mx$p+qBb<(vC0&^8hjrr$D-;ucma+@*!7Rk%7T51|+Q>`fQ zoVZ=+X4x&8cCU-@l@(8CKG|SKTtLtkOCwN#d_j5g;l=XT0bV8NXTTtCfb%V=Di>DR znn)FxC7On=G|IGE`ywVZ*bF^RZC}X_@)8_S-UMF4d)-bV6544I zBX>fP?F#MVYmC&R-1ekP0*=0US9569`xeZNXPuw>;k~9PCC>+POpwpG)3t@&Z{=?L z`EdO*8qFn*X02Rqq;w(*CtBGSpsUyx6(%?~g@ksTc|sK>h7*!<;98D5nz~HPcp~)z z_zh96H&(Y#)9kj9==Wx7oT+F3BcO)nSFmEh597?vT*A#r6-`-OYoWHd;=(MUIW`&O zx+TZz@MfJ5~%TX7?^&aJ}p#a#O5iG&xmAIVip~ar=B_ zoCZ4h$dXL>A-A|EfkuiQ^GOj^zxSiT?{_b)vvZyEBpzY8reoG|hx&#m5v4JAe8^w* z?N3?p8s)ExH;<3;*aq#yF&3fc-Z9+?SN*MSK>+=W(C4RILMJ|qwHjz`55K1Lcs4=7 zlEUyPK-VtiO_)z=5%!0j=+!|@1*SsrR_PTV`7c8nb+f~d_ao`IBbVLOAu;Tp|6ERj zaSoTCmPHR36u5Nol<~BeuFPhh9U&A2x~$MNG6#r15<@KDmzJr7wha0({ZguQE$S7V zGv(4|l|>!OwYn61P4a=_6e8xVp2V0k%D1?${2|PZ1rR*R$dZzgnv3i05sq*b(f_2@ zBoy_ALcc^4TTGU5Sb@0pxM&h)@Rrc%l;TN!S&<6e-fiai zq)!#c_}`l^%x7Aj#3!^R5lED6pN{GF*7hBv!m=c5t7L4)6Azx}6k&K6-K3J|!!4~x zjuuxW;t!$;rxcwRNJrb9U25!SoBG>CpQ4#UT<1o(h<~|SBgoitMv{{74pt({`p zR27=U-{06p)!lL9m_qV0j4S^w<6Uz3O-*ED4M1j47`Vy>;X zj+fjH33RS?rO;eq#%EpNO%9}v!uMcAuKG?iox`zSE$J1!V)8&aLN^z)OLZW610HiyK+)lmXOw22D(>xqKLN?^t#8pKaF@jKq(=@nKjPR#-81 z4pjZWi7e-8u;NUuA89gZoViA<$atazO^8DCWCctJw_3w8cTct46s~2Bcvr}h{bJ|^ zvc_UL>x%Dn0W#sb%*7f%bu94Wa%`~FyVHpY=t%|KnM(opW33%N63Dcdqu!kjYu%k8 zQ3bH5@?9{?Z0zrKl7U~NWyojkYnFWMG$Q=efL0C_M8B;lLdjWzdHwr1hfq0*YI(KI zd(Ojg5l$cHW?mjS>xJ}n%`WBBMn%LIxV)(Jp>|1q<=)1(mPC7NqSg|Dvpj?Rcntwd zk7GANB`|OEh+JeV5?D(j_FTOM(LF#&AAhGcwr_{n`0fKp%kQpDB?J1^z!r6aG|*3- ze`C_TW4q1U*V=spa8a`uJ;t>7--q7kjUZ9YbYf|Pbg{8SfcOy|a@1aeW}X1)N~!@{ ziY36VZZy}jOk&};?*Hev|J67^R{R1)|EwQBbQD?nS|37_>?z{vf;|gS2%cQH4|99Y z3pr~7%smy^79RlcOAaW(vH|OCb0>_P%L7fMDQH;K@{Ov*ycJ^Os7dy?vEIh_o&s;M{GPJ`Nuhml)~t1sq{?0U*-`%bd^^fl@bHNb;}#KY`lUiY?#fLW-0F zx&g_yXYYD4{sY=i=^uF=qlyE*WJ{Q0WU1-Q>wQjV`sr(26*5)C*w%WWWGxpON3xUy zV)gc3)`)m5RJZAYLP`@NQbBL>Q#?#4s{KoxqJv9FzC5@@QcZ(Y30y)nfs6xeR4#+b zpGkMhIZ0}jBo0OUf!s%c9@Zn6%5540l@n+KEY>@POqI@C2}lc#g#ScZ+_?A_!E^rr z9ytK*980ZQRyg=B;lk*T_XEo~drp-t5gW?~RMK-wBedzmH^PL6nehjV$(L=irboaB z5UPZheuW%$)B=5<9s*091VgsRQF4EpTa72`e7q5Vc5PIqe2N&!VY=8iJ&Mjgw%0dg zj^TRJsrNq&4K8}@6`abBv$RW|Cihm92f!T815)!`F!s-edR)OPtCYbE#8(#MWZXep zb_>h~K)rem=$W1z`T~3ikU?BYu_VKBZ-iymtoQki6zTP39bmeod9IB%LgSwts6*LG z`w(+fQjl6irQRo5}dZaD#d^z0j8qbpC23a z!@97GsFuj&fCe#hZ5n+JuPwNzJ--C{-rnNR<4l-UX5wpKCT0ZlsY!lw6*+#A82~x! zIuvJbQS~GE5EYAX9@JwVGyVHqe8lbEeIuD|7UHpT{!V|PM0NyO#m&o+Jb2LUqpnjf zurw~8_ncn=dm8D>J@{x4@K7()Mg=y3D_Nm{{ng+5t{+c4IsKTH0aJATrxIJNHBToF zJ=g$GlZ~`tUkBY7!9U5f6e&Vo3qfJDPxIay#jG&wRtbncr4pY7K=-Z+=wC zH?5!8X?xykgf8ka1q1*Q8!4#31@Ga_0Ghu}ya&DP?k$Smku+kTfSye5ZD)n)|6X*h z_!Cy~>rtrx_fZCG++dUdDVG6h)>!lP`8y#|7@ALam@z1%KHUSV?Eg%>b{9Zf36enH z$Nnc9e>Z;~L!$&ptvlmCY?g#GpIQ_=TtSjpsD9DrTEPDGu!4_+K4BrKFt%HbXF!1{ zIu{y*r9T$BHZym|>+B&^ILqmRM*-0s=U%K-)N;dz(4<53-!nEqV`!RseD{(i0sOK* zv1OQan^)R!N6GNdC8dVj-^0YPOMDc>2uxBH(*Tr7KQw?G03quqC!_0t_Yf;!v8a|_ zF6Fl**YEb_2L;~Aq-l4hv&N$A{39F+Ygfa8U%rm6q+^Dk0`bZo04*SmpaYaH*p*AK zwQ-LJvW{}gdePZ@LT-jx3A_E!2iDzd4g6tLcO4P(*(M3kE~2KM?d13Qun)9qxBjQqu(tMVTn*U{4z8GFMcRnxskW;n8v zk!9oo3akAYj;nQsepvm`d3ff$7F!B<5R)M2&mXYf!ZU4D+6@W&*!WYmKt;nc6Psbc zJN*|~rFpphfr(}6YIKZ+B(30#S0^QjzOl!n3InA6Uen~E%|~21xJ>>bnTCm#+%~ov zj>V$0@KeAC^sQe(l$I$NE_J00@wy!lKwm(-U-Sp(?AMtb7leraS=UEbXb5J9(oxAE zMO+M|7Df|GuH47~J|q`pVF=&4!kHbZiCLx#0JKHx6Zj_osR3FpHRk-7N4eVQcIZHjTSYZ(xKS9fH-c?bg8vPiEYwNJ>bZ|zf5)w{~M(i0XhOK z+LM4Z*<^YYJXUV^vMripN&EB%$E)!Hw(m{jt6n4HspR$8c>dGv50&Hj5-;O(!PJ49 z>=?5h7kpw2swtX?g`iD>^xHTC8^uV?YNCj=VRPNn*{(EX@?-1l8dln2&zcSS9DI*X zq}3U?MjqAQ{zPob&|*6mY}I9ujwi?N#tWkYwy;Pu&UHwq7b0Z^h@o07x@9UGR|`5= zk8j0qk-_-gS(=<**K{i7IM%FY1KH0W2Kj%_q-!5n_K(OzBnriO_z9lgY7aZ(kHlR! zz$&Nqr`BBziaVN!%~OkL>*jc6@zpG_W^7T7S_E3*154;dk5S+fO}(C!4(L}t7uP1Y zI%E~-Z*P#8{$#fXbb*0Sq5aBJaU0D)QSC{C=-8^T8ApN?;-544vm+=FSHXp}*}6$K z1$(J3Vg<7+z1%>`L)i?vLWD(SBIB%j_L-l|rQ(mVTH|He=Y@M*IGNSJc31{B=~&*< zubXhBMPOH~v!Rx;zp9YpFy}WlzmbU!5H+~(uBD#$&ILaVn{(`9z6w|t5(ozR2HiG} z3w;$Ja#>CX+}k(>YAqLG0_Z_A>s61zASuJ3uOIcbN^8Z(etWF4Ki}GQ(P?g9l@9;5 z?7*U7MZtQdt;T&J^LmASj?A4O>{n=mPr>&L6(j)$bI|1C+DABuJ)nL+ckR8r>7!GI zXA7D)Jd6~Q^A&zA2xecKNc_Gn;{}0|kg4vCt%?YhDOVO^SCfZHNuE~^CPuA_8SqKr z5e))FK>1eOg3fO}?*Wir^4PpKNxS#doT=q%MrL28DsAdxd$A%7T630 z7tX$BY9F^c87lW_&UCc%81X-^Sh6TVvOiRZiF0X>L50sEbiVZ{)V_Jb6a_69^2m;t zNO3WOldl$r0mU8G07rQa6g)gyTZ3~v7h>42dD3A`esGNONXtnkn$+CRzXh%r+)2Y{ zhKy!!d?c%2Y8g4Alm_Ovhbu$;555rb&KYVeR#kb1!b5pB9WR(|? zHSP%*d4g&(5VNA3`8215(XssU+RI;lLMiGHlYjSRsEqaC@CRP^t@Ut0RFKv?aGB5; za#uL9{4j8Tj$KwSeRKMs7aXa}-Q}o<#aKD6fLo-l40=|^jcy?esNdT)bbu38`*@Q4 z?+m>36Q4~eA2JG*r@}#KFe~=UAciNj^h~In<@qqKqsjkFtym3Lt#1&6r*B0V)+1c> zA}C(K9L0D%gm0D(g$iIq^D=BjE$_9mm}Y~AOAb%86n?McFo_?O^|pAkPBR@02VH7r zd$HrWdd*bWj$FGMj)uV5Dfz}m{}n=?SA*@z02s~0818m{?>5Tr zyHGGw#tnR?d^QNuvj!g5GM-1ZS2iD+q6m;HjCI<5169Y#lWq0IMR3m<%q zY`E>4A`5yf_xVX3O_&TX)mw^On$Q9yN+-J@E;h)(peadAc$Oi25wIBUzO1k}U#0tg zd9S60$A8UcX0m&a>DNoI`!z?Y}+ZKEy9l_#EmH*xW2$3(_$lE+$}67u&ZfA1tsKEE5H z#fowobdBRy{uT4P=6l+F3aHBU*USljp&-_zQGSbwhaF9_z%$i4au<{oUz@`2=`(7+ zd&MaA-3=16YO_xdj;pyIiqGu^LCp~5>EVxB+2UMcH^3+KGUAPMopQL;CZ-yCcYEk< zL6N9+-!H$3a&7Wq`H`4DF8TmQkPz%y;o%^DGCPX>=e@-eTUp=O0cjy&HFO{UmGX%W z#i1ivr*qBU@D>;dh@+4+nq%z!;j5xzB*%A`|JLrkt$(<)2lj8Te%fOR-0j>}r8z^{ z+KOD+PrFkllj`~ZKD6Ny>fUx1;NQLZ*Khv(s2E8VT_?ky`ue8T&*i*W2&1;;NjwWM zvLg)b0(1OcE*0Fm4_zO3eTRaBf$)2oSEkA?(;OZ5YV_ADzomERa0fH&WBCG}_wP4& zk~b`ubP0cKI`^05(fHqA3(ro?c?uP^xFvG(yoKXV4-Wf&IA+SPh(hc;P-rR-_Wjk# zCmOJ8kb2~v)XU8VLv7nuas^KObQLHh*3ZYl4;G4gpkz^JO=(ykY82LYeL9XmvGpB( zf*gpT-0=SZ4qCZ(x3wN6)mSioR?OiFXn|Oxj4Q;7C^F!j;fYsagbEyW+Qe_U$Bwj5`0e?;2?G>FgVCQ+%N1v4exq5r$c8}@@9XPlaRO>X%67Ttf;7&o zytL|gu=iWuH|^V=TD(98Xfg8-i`1TBzrjwGb>|xt7vG4$O)$d&pv8@puiXlW5Ya%{ z)_K>5mafAY?EIz%7?ZCxmqWCu(xC~iodSd6eFm`1h$z!WO&`T$Rfwn5XIEhw{n`9k zSZTHgqNasdd6XyKPccTDCsz!-5iAJr3p-C$50vsc)}U3W>wrUfcZ`uaf!o1v()2brS6qKjE2zRDNf)Hz?4*#NRbAJ&Gmp zx5_eK1bcWB)LW#Fsjw08`(6mXC5z2U4XFnxM4ftN{Dj(|Q;7)I#CbS=&Kwia(GUcb%gBysgLqmy+xW~IrjC9%|r&+^-P|Ghb5RiSGwviAb7@n0veX{2sPqKfE z{+;97&>}z#qPU@>=Z=a8?jZU!p1Gsu@*`>Xza0AxNsgxAB} zptsHfaop&$6wQPO`L*-NQ9Qnj=NU}}Q2Xn$taVbCo&o#J&k! z`nfr&Qv2m~#RSHBYMsh{1Pd{52ne%F#E|4k;}H$^WpG8ndU6}cfkr`wxApYujJ*a8 z1D;ZZio!%wAVn@Lje;efhDHQQO0L=y29&IcxHuG}t0FINF%3EhtqD+hqLDV>>wweP zElFma-QEKPW7q8qQ7cg)@dQl|Q*DC)g>kj}hg!4B#?CsBu{l(VK8a@7i+cUU@5>wi zzCe}oS6nDy*g$&YH{6Lkq{&&1ejG!N0 z3|hXp(V&1Y+#Dq-ru^^2#A18YTb4Os7By$sNbp+zZWv`e!@a?9=eWGV&yg|)GQT{Q zIh`*rq1H>#aG;@2EpjkXokZrZk14p)iN4G^c zoc9i0(j`ad@Y0~|?wT~6j9yj`#WacF&vFjO@myH!>exQkNj5DWg8rzG&fou7-!8c<<-fi~ z)~vz?hB}AKD9GM3$SMj56OPf*COug_DXCeH`e!H7wBdU39(JMxTKVthbex3aWOvLA z!n$20I|4LynRX*T|`?800R=_oS*dYPAb z7kZBLY^Q&fxIZ`OOZh*M2kro(kb7#A>;x$R=i=*nhs)`Rjx*BOW8D~4izQqZH4(Pq zuUK-=A1OG2eAp>>W{W?yFv}N~KOPx`7&mLaJL#q58xhPS_u_(tV-|n3b%%TDwu;MG zr_ zTH&ipLj=mW6spewm*zsI_CQwIPhJUcq&pipM64X{+af+SG{nCnrGkCf-Rxo`>J?y* znlq=(^d(AC%bbeweh?I`raMoO!A#a*6dq3bjqNaL*I_quUknw4%QJQrBi~sp2HE?z zu(%hjCEsIrLSiq7&1@+jB)oj7=w44+RmGFP04e-3)ZsS67W?Q0piL4aLwh_8!RDw- z@ODw}lR^lHQ%+82CKE2#>O2eMt@lKxZ{>E_(h1O`i6?r@< zFfixj=3UVlY|kL}k303bJ~w)Nb6O&^v|Gf>qH@cBau)kic8oj#1lSGy(5XvABeq5D z-U{{vl`Vcn1<2*x6wkUh{@%J3==hLCs-^HLwIzWLE0pl-p28ac4mDJac%z+9 zJaVS3C2g@Qz4P~<_aaXfxXH-+teq|6EWU%T=wLg*zS7_^tFc8D07mghF;Cq}#yO(h z`(5ivF=5Q)ErOqL1Q_z|7olL$O4I@Sj~+Rgr<7#)&szIn!cgGi1T5>4$jqTQ1^@V) z+g5ncr6pe_QhnYQeBAY%&O|NfOcNypf66HNcMZ<<&zGEG60h=CX*_ZO9*8Mm_j3mE zM%4+Af6HWT+%RJ=0lly%c9tqqzY_i+7_BcrMO2{0NMtdrA?G2rx%yX{}StztVBm6Y#=hZv^W%X^CBNI=Ug-!(r}&*KBgYm$9i-wcBF`D3{&k;gM$ybq8lIOGsnR{F4YV?`N1EB6ftDxvM9VxT z{>yo(5M(cLpFyMdr=$}KO}#i;{;Gg6MzYtFe229>A2-i;8qglTy}qlvAjv9x_yy25 zJzyc4&-m-H{lw*VHfQB!2}v(}$c_|X)IepQD*~sE0pQteX>pZEeAYl$ga9ED2S5lt zAmKP1x_#dK@HQw=^#IlUA7s6EJk|gI|9{SLaO`tz3CBK0NM(~fQf3*U5E&^mBMsx& zdnJXe2FZ#-=-8`}qHUIytwK@1`?Fr3&+q%aeE)j?^LmSOp3leQK5n<`Z3If#{I8;( zRKy(tK=Z{njXFv~rwP%q6$xWz{o#_YfJ0*qL)-W9QG!pm{XMZH-dTJE>zcrM7@XTa`FrNZwqZy78G+|^+U1pRa_NRA>EM&01!FRf= z6gt~EX%TQKq)=kXdho*yqbV?ExceY+1#*KM=WL*@ISz4Gm(fyxAI2+aOO==Z`$2Sm zaR-F<5u_r4&X;~SPbo(DIzyn}Cxoi_Y~*7dJTe%&wU+6y0ELD z==I{RNi>%jX$}nm&~p{bO>zH(awMSVG;BuI1T^t(5A2CQE}x`Y(Ubu#FZG%WF_uZH%#ahB zSKuh09_Pi!D`4P4Gza*FOJ73UM+wTW4e&%5gch_l6I1WtE<>t!8HCx#tH?$7U}GY# zK`{}UeB|#C^%8SQKq^eYk#TGdo$EXX$^H7SXER)0EpbzC1Nx&fsmdmc7rDxoY2jLi zrBDi&-n!6eWX+KeL+uuF*BvK|xb%$Qz%3Yk#ud+xVUIhLCoHWbD9QE)y6}7a9LT-6 zDzpmv=~AfM%Ea@7g<^Vv3!_6=&$f~3dO&zf?{eXXfbQ)J>$2y+{LeubE`92vO}#{0 zgr<)9of_BvGU)J;ft$|KyGP;)Y1=D9NgqZo+s6syBt12uFp6VJqIO}ie6iC>Tvx0V zph2G-O(#BI39&-9cUlwBbd3n+73(MU&HKQ?u>iQ^pePg5UXJ)y>#Ds++n*X)!6u=o7Z_N!#`AR15K*oiO6GhVf?qq$BLb zRDv2xt~0q?l-OwzF*vN?4ceD(*twzZCz%9pyilBNJdMoF0Eq#;xNK-xY8vupAHu*6?_wk;3(N$m?bc(B z|K}X>@$f5pf3zq_vk9|3=zMA=ESS2ealwD%6R^DbEJ0;Ef0?#JuLD>e#^&WmkyU0< zI4UO+*+F0IleqWUd28W8r4?d7$qhY~j-1&n|L>W7Z-sG!316w%njrX6m3S&VVlBRn zjh*f3LO|0k4FVUs=CH_>MRwn6^U-PtpE<-xwR>m-guQ0l_jh|NZ93@7xxN z3Yu8`=>Vx)4L6W6?hj5!#?|x){Bzh+7&G2kLNvdBKWE~Dqi!tn9tWBObM*O^5sgFI zt*fwg^@OQI(oY}qwL6)ww4KQezP?+ao9>Sq2Q^1!);#b?=qr$(j1_D6=Q1264`6$E zh|8d4Z|51!EMM7PE24i!4AfW|*F2=I{VU>ZOBsQ1tPe5}3xaX1-L;oRdNn_==p;=* z363D^asQqT#3PH7kDy$`6F(yXN9G?Dkvb6$P!8l9y@iAC^VW{B9jJyHX&|ewSS4Vt z;7n-sGFY#z|3H}(hbs_ifwQF1Su}ncAcV(3L4XxO&fnv*cihJTY<=_VpK67x@Z`MT zBY}JY?~*j}5g|}EY)b!9y@b?M_M^cx70o|R);YD)EREe8x}HjOj6@yce*eU~GOHWO zDQyxO)WwG*du@jAU}vh4`dDmckr8&J^Y7ih7XzDw1{LU$GOQTx747J8Ih4Q&A~48Z zi(Jpd|7%ZSZ$+XaVc^;u_UIsE!vpZ#zyPxnieVARANaa%((`wfEW>Ybx!SISFM`** z$z1*0kj{le5=*e~57WUWD^1~c&s#q{`TejrSnqNWjYR&XtLLfZW7_*R*ZzG4#+m3h zZ%~C=h>!+k3xUn&J>;I^iO1()5@v~zKr6!CTHTQw<%$7$mH%D zU~(Q8Ohk%MO*vex*Eq_#2J?7+r_ho)yEDtC}lR)MQ36Wg6Hl9dEtJTPKr#!mq7G)dx9wL)0T z$-jD6qL5<@rjQ|zSuzY^8>L~ya65q36RFY@n%xS(2&{4@j7)Rq>EdVgNK_rSp?UWu z)_4I7;Os}7wC9hriKz=ZX@TMSDE)I-=>?3+^lEZEyC^J7uZY2A16TTJpGd?4IA6e? z-K@q-hPRm<4t0eAf@Y`RJ_~TEKj!qg-{Rzl?wJaV07)y)IRtX%RD5ajkGwz;@-h@l=NfZU7bZuq zJnr!*HW40T9tUSZ21qg6b$p0sxEk-@aY^S%p4A8UV_$skJ+P)3mJ$rhxQedp-Bdh0 zQA`+iWg*oOnC>}l)T;@@t4uIJg(tSTU3dlO3_Iq&U?#>L;0q^v^faFo!Y4I(sf|px z02R-xLY-7|8hlp@Y|+01(0?wKcUjw)f67*i_zdQdwVFw@dn3DnXhh|NU+y|LELtCelCfc62Pdd`7}=~KSEtReSRam`>4!yI z^8}L~btT=V4xFtQzr&vBoNE~U#wrs3oX1H1Hc4Vj;#C|yL;UOsAB9A>!InkMRl-JzXS*L5ol23}vJ^k}z5VpQ%oNxaM;A zDSM_=-$oVOX?a18Ycv-Vo9?@kIaJMgzo4r&hQe!-_*Pw zY9f!2B^cdW*i7$lO3Oij87h6#Fl9Ji+49qKfX@tjA->yb+khvG3pYs3&y2m884lsE z4?u7BbsRWz`p>9L$ro$%acf! znKx3vzjMp{WNs>n%4Epm2;k9dlpSC!%-5!uP!Qm@E3Cv6g1>}aeRdykdP!M~FuO!)U) zb)<3|p$}hU3=`H?#Z(Dme}h$WzN!Gy5v>VHX@+Dvr-ufRjbeCFk*X8b%0J1?picfG zpcnRgRl#woTRWwihMHOakHT)G@u|L0H_8sLQ*QqkQUboT-7<;AN!|9@YTA9qu+uR8 z`1a{Q+g+lME?v=S9ea%1i%E5@Nubv?7Ms37R&HG92E+E@lh11L4_v~!ICKWlRW(fW z&pP&zt8^#z-5#0hbv(9ckD#i8iX$gLd<7~eO;#*kq&j~#n{^L5b%>Sx=sx}zO%AJ3 zVw(fH7|Z=TFV{R|g>i`H`c>^{{Np6~FgIxWU?pZA6v@TzijqZ5ss&qDJo|lVS9O`z z>?!(z5?9_D3jaeVmaw)<9&}7n9KAHAm3mjbi9MKr6BjCvUL~?ClrNQLqT4P8a|+%H zjC*mf1-r?!Zb<`joUDu&xIxXbwd!QAOtczTirRWCbDq4>IVDRa+MfK(?DgdCz}#sh-hYO9hmZ`m&ntlAC|28}NAiXUuA-Ew_ z7pgc>3X^M?oW9JXbjEOo27R7_^Vkd6kYz^4`R=FN5Z*d>IPQbE41d%93q-6i`>oeS zHe-+{QE&~vY;-z)^;j^=FJc!P`BM2Wvb68~b>prbzWf5PjGSW7K2R#5cUDn55!b&z zh=$cNLF^I@%O4wsUnT(N&K_Y?(%)X+scUTv1Y3#sSId0aUSABn#$H;lA38 z8^>@AihokRzQF8>_Jy^PnW7WveHLfcyJztM3ynxa<+jemV$AztxP3^U)i zsG5j55Tp)qt$|Xj=)dL_UNH9X`do7|zhFT|h~eCyf5S1O8>!`TS`#!uG890|;Ln5~s~r_P$Mog)P;!7|>b=3dAn&fp z#G8=8Jw|mpbm`DaSiF1xe``yjMsD)b1iosQ5;1LdX@ET(0%jzqjEp>HInB*t7N*=OiTpmx7V_RbOkc zpC9=INZjGTFns`kznPLY={Y8u^7K-1=8@JfiIVP*i+4l+|7l^{6%_Kzp{v9k^l@ls z=ibXs#dx`Ln0BA!`cYl&r;np?IsuQCy}srEDI3jX>;K84V^MU{GjwujG$u3LGxu_-cqer-xW*0+~=g3BGD6eMxrw`Y3*CEF7N4; zTGYRF;fd3)@P<#7wX?Dq8+wD}y`?>?(;R6Z8bNQCd~4QOWF@`G6zO9=YK7I_*fFb@ z>=SW2p6OQUICdTx2NyB2CqDho_BnAEz^?3Vih4&uV|aG=!Wb=E7TWKX(2d0><5!bE zoH-+GEn7%41iisUy_Nt)Rlnai`^cYl?iJwHo+suoWGm>lOdcOS_FjL|Q2NvPS9fm@ z!pCxTUF1qgB&wlCLSOf{XC+-!-bzn&&sQdImk%}#ey%6!qmCV3PbA_`V0#!lLY)0W z-X;=E`KX%>W%s(KACT;OWG%Py-{(c&_30;F*&yMS@njtIM<=XtUDfM5%k<;D4_o-_Z6V&RD#@^~oAz zLC;6vfi|WK(qqSBU{N$T+pri#>~KE3RQu{RtfucVWN&G~3o6iLC@>~OQ8e1WceMP? zL4Xr2QJ3IZ>S(-Rmwi57pvAl873sKH9ZxpYYA*4b5I%DpBFVqyERfCmLSits=Az2Q zMJoP*mp}Y>p0~G9?m@V#@ILG_2wrIvQOugXuR?1cm6AyOI{NU{b~51yAzJ*{&5{Sd zCKm)?;5FcT^p3@Qq0c1m8~8!XMqV_(;5AsYF8~+yjp!4jnCPnKQKitYSsacW8Wfep zUc-j=z-??>vM&>75kwe02xKA$jlJQyl94vM=B#gzHyn>b4cOq{6F&DwN35qB!sqtp zh!NnmE`A^%1z53pO5Tes4YPFjNZ+F821dr-1c4`lGN!1H$Do9KCL3Mw9nmp)NyaYxN9+PLl2JyUD2Klprn z1K&(7SE?6@vO8>~O+LYKPP`AB&z4ENx(WQ42VjHuV8H|z1~fr$4sxn|G**Zm zl_?$@h$s#?n6wp-N-G}Wx**q7CU_^2+tlify?>Gw4erTRX%5oTuB|&$H z^f?l`2e#&S+x>}Z)Ny|lu1q@=6flf(X@}w7Ce7+E{H6dq0k>n`82pB%W|y)vq4#r@7Tsdo!jkE5m~N1pzY5gs z3QIyR!FyJ@@r@*8GO=c$IdDICu=+n?A=fBl^;X9_VO`#~z9f8tS+abXA}T>ew^fii z$UcokwKH&BU8E&{eA3l$hKD$lZ{K6{rh4a$)9e^JA3L?)z(#4tW+s3$QH_c`X=Xz= zl`h}LJQ~DKcIiVE>ISocp(C^S+p3J9If;OiS#?w6iW{1uo{ZSv#M9>2{wP>H)Rc3nJ zEe=8XZz)xY1=?!QBs|G&nz79E*tV921E|}$*SK-q)S0ftmt67V!P7~9f-#A1DA90L zrjN#epO>4}(1h~jI7|XwBS5mOJzLWe`?+`ttL8#I1F6z3 zJbg*@%q!Vn4D?LzPOL6C<+?)}teuyUYJ{dcenMkUuk3)z*YetsHoGKqnY>@B9zlC1 zx!zg6J+1U0sx3V1VYyIW7{f-CE3YvxFH6OiZku0{&j9rpWVJ2C7rRgi<=?L2pZaW0 zbD`l2Cjt-h2;YSY_I36j3s<(2kIp7piQzb=PDS*B$MGY`{2qXQ&s{H!fpR&MJm&L~ zOAjlHD#VE0!e{%2Fh_;UiIY7Qo;5jX#r-*(TXImBB-9kc@Y3u0>Wc_%ig<3EmqD;B zE<@Ys$P5okYFRT?o+Dh~$T!BShE$KZRIyq|c7~tPxK`)IRTT3_r`7%`54iQKno8nQ z1u2S1-nORY_*I`y}wGwrY!w?Y+E#HvsW&Hr_xVlC#{2O z`VEYuhC|f9j!ib{)3l{Zurp)fp@F*drfm6d655{TxiP#3%n$0#4kC43(f&P?@?|*u zLbv+f*k1VCzX%FWd%-~pY zc0{oTF)(E|=`hEdpXFFlHt6~UEqj|09_Q_Wwmla^>_ z*i?s)p>V_iHf_zM)GB;oi2cEt3f4oW@`A%`aF8c)K-zpR^HbvU)FDklEutNsxEPh$ z9CB{KrGLvc_vODINo@3&&ZPx23@s-yVYrS3N96YJ9r7?27O?4`kGRQKA~7Q*8gy=0 zRGu*T`<|c5A;bEw(AmR#36=WmplJ|*p_DstDl7 zXNPN>q;M{YD0-Pi4jR#4h*^d!)aN*4cl;ydvPbjx%Rrz}VogWFyg=<9XPcbVRulzrGar?(*=` zZ3s&TBw-zGk2@KKv2xkx8`*PRSHC2{LXCA)OF?}2_Fd!00yd3H{h=>)!ti{|ZE|c8 ziGEFM@kDBi#4e2c$tMMuj zNtMs2yaK9+^uu_x6&1h$e!FlHI0oM;s;z(2w&^$V?Z{!r8kTqpg zqERQ98GD?QyC#n!E@70%s#ko!l44Wsc~=Mi4wdu$sxNiJ_V5B*{6+6F+YfWI*u<$A z+!~^nTeM&xdk-+(iT0d(eTe84h8M=k_Cd9L95yjJ7q?Xw{Y7`)35TKM4}RbtX%doo zWBEp(q=*T|8VVrqCsQt$+-Bj!Lpn0%sgC~DgNf*CX``hfdbZivz(SD+jjF9{Dy=NP>7JX7H zL&&QJVpNgNA*ZQC^VrpRGtnIS*4JE`s2I1dD55V6a=xn2Pv{AX$WiEHF;Q)qH_p4l zit_!%gsT1EQ+Rl}s&xGtLX8@nx*}L7*he?*%1FY`v1Z4^k%&+tR0DuGb$Q z{S>w16Jy=g-gM`GKW05tr6v5d6VN;hDaV4^Ps+J?d+h$6g74d_besR58$`8fTIpz5#1M;Gl7=pTZF zx-Or|^!pFs zs2T8Rh7>$QiaU*9T2~aBzh6BbH@e;-C)gEb78Zj&I`lIxo{6slu?^d4CU{sYjeX_x9}7kxEt! z$EZ8Uld1#~S;T@=ch~YV26rJr^mQn&u9IG7pjz;SV5uTC@wU)<{6y1dX+)*p-g(1E z7$-t>lzISGcb9iWPBMqp|7Zb*SZ9xX*NW|<*xVH6 zbp*kPBW)z*i8$UgZ3Fx%0vXf9V(;e?>G~YsNApLYQWPPNq2g!ocMCfzllZ|=`&mao zo3-S}V`aKOv9|+cj=#U^SR>`Ch#~coYDfd-Vn4*#1Ak~{x@YWjfs*UzhX=qi95eoS z|7lqFaIH=c?U=2Mw0~vzF1jt1(h|`c)vS`|t<<+6*c3yB9?(xbqwm2?Yu)OrRoell zwEZTDMzLsa)9c@dSW*3YdPe*%XIs{>8!S$;+Y(fJGhtO%f3@SIAVuCpKe8j?=w7eA zo+Lh-k96mZM99B0OjL}dXgY|dAUyta^H29wGOJT;fB(+mNfq)BZUz;896Tzypl&6| zO&54?tn6flyG*y~9qD0>Xb_YUbMEX9^+@2plF2U}U)lWQrbZ7TopnW|pr8QTO3Tk< z*_t{ehGoiRk|DB1N71&CsA}UP!nHFDgB2|%FT|64E5o;i%G)L0U3E11kTp9Ug$kvm zKfk^HrA|xm0_-USO5w2ns*YoE`t@V!JAS(YN8NFAP$QfcukBmVK?=b!z1CYU<6G;b8ry>Y%R^nmpgZPT7$)2OK!)R;@6{(MDs0Dk}1^!upa z`7b;U?bcVO4eWK@Q2l!a7+KMAiZja-=BSSs%9yoQ34SM*kY)2-(JoaPmBw;lTp#J z`BdF8owusZLmU&3R!S6PKG_z#%7{N3RiqVbDpVLO$IQ-6{_V0n;=)x;7l(0EwlhL@WV3E}sZEn7|U*#?r z%-wLoEq(|V-9JHj+OSDE)sv#({^!d}+O0=544H@HL;aX(wCo;xz5E*UJ$%Z$C1^Ct z_KDW&7q{Eof(=x0GEs^@q2|E<|;7D$4Ud7oF8#Zh!xU zREOB_TROK_^B82C%oql7%e9fH{!WKsA75LZevLt~F&@gH6wzC${LZVB2{zY$v$S%% zQniMHV1YhcebApf4}yE&JL`Kr18fgB{(LI~J-IBT55KY&)|qx44lYPPWjy?HKWga95&WG~P@JUzK&D<3OrQi&FVpdpa)p>r(u}C;0aq7EpaQ8#Xa4XH@{LtjvR&kd3uR zf<8VuI#|Jcs#qJph*jt3P z&FFD6$6V0rL&XRkiBmSD0mYM`3_v@N9bb|IyOQz4oGU5lpwu|pni8~#O7WaQc{vH<5_NQ2vr||YJM?34J<>ChS zL*jr;;*qAov4mE7A*??X4GRb;%l32PEOwA4-ZYcGYx0Ca@3fz)Qgq$iZCioo`_2uP zwDUuzsnusW*7frY;KKUv`ovsuUa6MhNyy;J_*{Fe{7n5jf=j#R-gI{`VMHNaBm>!iMu+uJj76L;ols)@v*}W?U%b?cf{eDohw7h16?O)n*wr~4VoFj4 zJmG-RTf)J^^FLDLX-B!?28Y!-FWQ{ux87^c*e15HUv*$|QP%7Jm|qa^-v6n*uZo&T zOXSm=2_rj>N%!mEdNhxQCJ0jRPhZv6#MK+!Je{is@@T3lXNo0$4hKV;=Dxo{R&Sl-P(Qr!{dZD*^eMz#H1srr7${G&B;c-vh@JgreZ zTaWf~6y2*$2WJGGhJRG}`muY! zw);KN-2!Fc%ji4(4rw-t7Y@iyuTi5vusr8FTar<(k7`BBhMNW8iMQB;DEKnEmy^lX zdvSMYE7APa(jSA91efjc7oyhB44>_GYC4sOHf81ZyC&yJW(~u5Em!C!{ZU#!hg1sq z?swVyLz1E8oKY4Y4u{o-E+@LF?BgT(>^6xxeMwjBJf!TX*G%{ID{*>zYNjR#m|iy$ zY5il))@FbG36ETm0y2KG^lzKVyRRPkMmC;a=!gJULykb9&%Y-UpS;Z~BLv!HTFz=G zx~H(}7nFZ?WS$^!YVqCd0QwY_wom;WgD9t60xznhedZLk#65Fv5XZz`j)_fu{;AgG zkmpo%QlbGOyMQky6RkLOU7>~L(mwsUo=6>0 z|L_HJ5LVtpQKHLW!A)l0feRT5nYp_q{bIjN6!4|b8)Dwn`%ZgtS0uiK9St&#P(z)* znRpm>RyI3nsf1`Iv}Ah5h%kNjc6+qLjcvi?Tt;{MVKS>_bac*2KyYzB<`gR4z(Ofw zDxW=JT0Zc}g-Yi@r(?GYZ%7Aj=X$&hI@ zZx4$S=LA&Yp_1#(Kr6o3xn0jj{ggDImqaUR1kP&tWu4J!VO6&L7E)q%`PpFask!F( z1?NXuC!8x8&759lmZU7W(2q~*XpWxPn$cD2OF~u89>0C8yhw(-lL0OB*#y{oh!O6A z{E@VWJ?w#@ptt?jOcj6Ed{dL8gf&tA!hY!Nb8fvG*>Gy(T_?svC?^Ka+QJ{=^E}aq z5Q43G{p~;g&-$4(bO$fmLRscilM<^S&00R4w?TJv1XP|2Cd;tN$QwRzLHRdbR^hVg z*iShZmp0Gj^@NzuWe+OWIlVm8MKOqqz^%C9m!KJSN*Iai&j9*@-f-RbxtGFQwghJ9 za0)~{jzG+pa1N~sCmS)cN*`58;&7ppAoMZ+6*}i%{psPbwgqFc#1{p$0P>Lf07=SG z65ZcGxKWC5aL31eyZPQ*rjBozV`6@>0YvBtc|zA?Sex3@v%WQk}VlzQ-mH>Yoo#CX0>jBLNoUB-RtuSLRh?uYh z{&`;sPlz>q=~(nQe(uxLCo;=X7uCx^6~-db749>gnJAAaj~t3A$Mpl>n+y$RSk!^T zZ{hB(3-mX${(O4|{EDS;FIdXV1^1tg2I}OEE7BsFy$X8vH6cztZx84cfMB5v0z(n; z72;B6z-S1C1!dYV`&ADb4C#z=tbHt*{|FZ8@&}tUw5rq}Wu7j|pLZ}2gkb5=p0g6y z0)g7>9o5}#pU=#;f#3)bh-yHVN8u`w*r%D_pqcR-w5P&p`BG~AhDt`dDABM8ML6j> z9kJm(f@Tk__I(FwRT)^543dMK-tu9Q5Dh;0=qGz+XElB;PMfq7&PA!vTxLLutX#|O z23Q;tc=hVv-eIP>VZoqHQo|uFpv=%ou&+PJzw*QOM7d4%>htS9te#kBq|b-uhFfNFRd+#rF42V_OiR|M#l3OicXl9eoA6zBZrGHUnt)d#fJW%YPv{lM zmoP?Izd>3oX+aBj^tK>;i0H3pKy7u2>hnEVOUEZ^8sbsm@K8$nSr?Nm=4(I&h)Miw zrmEHr;|^dC2Ps}u;AIy*q0whBFUa38#F>NM9`L{q0^HZ&d7llrZK68CEOLme!*Yqe zVKMYnX87W*jYR$DG;cu-e*V0pFe8JnCRFow-2s0k*fY2e!51zwBtwQ?JTY-G@xTC% zPYJOGOWN00RbKXeHU$}pZ}Jb8Eu?pfOY6=_X1B&L@p5ur57_#Ski3~y<_d;ZMG{GQ zSH6N^NCHovFKqhe@MD$_YSxaG>zZ@;dYJuP{L%*_AqND(QaH)d{})sK*n6CRPd=HT zMeM^d^2qB-y>&j3xGsIEUG5k3{1bz-=@N-U1Mq2h~Z*a#A&v zgi#Gz{#O1wVudQ5*}*^))TV+-Zjrd(KpM|}5BA*=_-{uLLbf7T4MUpMKKH32;nLH+ zW~;EJj*@(3KJ?b`PlToz&dev?v(_R_iH-fy^e3>QJV1=aK77yOFm@s`$~&v=yUXtz zpMl5ZYp4=J{`jtXL2V0&>2s^O5M0~v(*FH-cTrGpg-Q$U%?4?LJM#m=hJh1Zh z&ktR0@LYUbT?YD4Wr_Q-Ixd!HHoRevI&r3Js^&~L^9Z2%4+YE(QML&**l;CcSlxHH zcPzlqozU*%!pJX-*S%z#5(7#-k*6f{B63Ac>iOevRvm(@xV-2S|MLLEVim#Eu+pjq zHq}Pw1(3N(DktvJoc@_`2xGt>Qj&Z{j4k%Br1+OF98YxywlHu7nes5_x|c<6tk8WB zVJ1uC=>;&rlzaVuq~B=G+rXD?ujdFZlExbrVAHWbZ@ni$5o2%f0D(g=ri*&k6Bx9! z)@?xZ8pNo_-Yl)PXS{GohP5>;Z9+Z3GXkg` z|Gpi6e_Bo8Ujxfkoi4qzHN1TO&-Z&m>heh*m`mx9mJe(tMST-Tez>NBpbzjf%D@u! z04{>Ve(k3qqPQrQ_uXn!2^@HHSYB9Z{ixyfP>p~IG!T_^ef*l8-vz!59=UdP37H2k zzy%Hp-=jm>w*)TD3GS}kw@rwknEYhNuIr%mo`Rzj%1IMK0cgi5u2{LV=1*tg9QnRD zz$;PT%cviy`7G|JehDe1sAfjdO6!~3Uou_CBDvNS%q`eoNW?|*vAs$@1yd`Zo8bb+ zhCe3|Ly;jXhN9qy$crnY=9FYxA1 z5K}+mq2URo!7=VRX!xdV9%4sm9*SH$WReqN_{F%+aapS)9MdFmh{CO5*E5o_1?w$w5HG6B9fY1VTdlF$zx6PK4OSBqz#A9xeNih z0NPL{?@dO_2B3Y)>@GCuH3LbmqK+p($Ax>Ut@0L}<%dgQ-AQUWb2u*10+~uno^ENn z{prYQW@yzRrpX88`B4Dud`_+5UYs_6QAZ3V=ik-nA@D^fCsR05|8dGt;gEr2+YIVy z&u?=td1}-7DyR6mEmACGMR45MzqwT|FFpfq$~}Ty4q8!=*M=yMip!`^5!}J^-lk#! zi+tj{j219f0ru=9_Ui9#w>r-e$q}GRS>qkjiFf3Qoohr~>P z8R!J)@nGv+#2;98PTl)Jp;QwQ8U>vT0ZV06qUe}MUEo&2VN zw;ACYI3$3!?CkSC1R;YE(#ESi0g9z@=FW_1m*uPn)!#2J-$bW&4@4tqr6A5H`ULZ_ z+&%!%v>>>Hv)(YSRM{}Qb0n)a6;^dP%(n1Qt%oHjS*7&`{NM#-!>+zg26x3|F9!gS zYW73Dv)`ihit;%+qf0>Ya=E0F9QGG4*&{w|`UCi2x7C#Azk)Ev8cxF(S?|I!KSw`;%Tta~C;0D^MtqE`8c(-A{Ar=~^A~YDdj%4eG{)yAyqd!% zyqP;2q9sMuv`X4Qy0=-K|wl_ ztL*<8!d1(dHMS~hIjnN{SQ0HUNW-2eDC}4L0tO?w^#CC`*Dcv)d1EoB zNT#WAOKnqx@;0JD&o7+)*mcx0R=#wWz>a zGowZ?n%a|)S}mBv75;V0k0K-v1X;Pum?L+;Jk}Ksj2=#Gz2@^yidS{V+5RRPWs~JXFxlv9TSw$# zxqj-GCY))efaN*gx_fofBtxtLqWc z9qe^kHjS+1*2W?TqdK^`N+E4Vg#3}t_+2!L)6cVqUGg~TLv#hw5nqH0Rb_n`%wFE~ z-oeljz3Ms#Hbf*h<#_N5-h=MM=Qw9AM>$zpK{4%SDW#B#v{Tj5cq2w_v;&3m=CL6=;QK=$x25pi5|JpT5uvo#+Vk0DkhU zglUiBFfxzgZL7cQscDYu1gJth~%vwtvQ zzYmZX!|%R!VZuLh|Gu6bVxz`^@Ooye)xQiZ3R@{ruaWb4xdmc*D|kX0zCU1BcwqOW zVqx^*Ei19cz$MS!3N;drOoxtJVSeS+k&=BTSy{gslA0D^7borGV{diGP-J^EYLQTo zJir(9(aB*o$qZ2pl+trZz6}yG)u%!K#mM#?E!|9fFSBPw+4o)y)Vwczn}kb`yyK=5 zjxhXg1*$g;&re8?sHLXuoB)H}=%D`!3M_J9vdzMZFRx)#GikBt6g*Md5ma(KoqVAk zHxKgqVu3WrmnR{9;>VL%7FoZ=jO$OK#xA?Oy`DDh!2S5&fk9y&ED!Dre5p&Fh+I2} zG1HCaJs6LNoq3)3wfGe!w=2#;68a|u;8X0AlyUCX>q}yyynkja?BP-=|By~gjehpG z(XJRy>NG)K2GF$g|9p8vVNKQ{I!)HS9_J>LYkP%#2xW zUl`}?RW!|af3F7ZnV<9H27gC+oe3k7bw%!lAgGSZbmLf)f2DH$tsHQy;b8hbHCuHU zM6VV#;hPs>#Az`ni56s+kdy}o+Ck9^z}U;bG!af4keBPCt1m9Y&898I4h}>lh;r%~ zXSsp~c3?M6M9`J=PPcIiVJ8nAtrp(CCruEHnT8?_k_mJw8rw|VT9 zgV3H&HU@a~&Gy1-F}m5ZhabJn>6rSyDa3y`8?kt=wB)QF|8@OKFG=WMHPZR+d#oEk zTFp0G6whJv5u$4-13<)Pc~}r8-+SP534M+^x(|Pib^|iG?|qWZ&IFt9nJYZ+$h>)w zXdPc&s96pPRd(ryJaNBn+@$Y_G*va*B6gFccr{i(~oyuSl90ysTTq1(k7JtCFK1jE<1e&?SEm zoL5cS%@yW`du)g!&KQ@;p}1G9hYo44l$E7Yly^ zvdGe4uP>STmdb3dzx_P$DZcDF5{T#MvJ6GPI96>Cn;9^@_@2>6C#@p=!5R-@nt|%1 zC0@7~D^Nb>N%-54HE|Rm^CzLR*}PnDvm%kHKOf=_l<`IdG(A1_P(}R|{qCPrCsgv4 ze2dzSbO-a?47l?@T7U*h;u}=I1^m|+FB!4pehUbl&3b+Rqep>Z(ns6X%OTid^rSmH zm=@iCPDALdx8?WWf4+Mcq|V@PGQPNfoK^3pIL9%e7t`74`bS7NQx87KG`w^CV8M_y z(_IS>eu*+~Hm2jhD>oOOtZddVD+et-+SG8n-F*1#_r}WphS199(a?uiRyJ(EhUCKm zZ=PQnBX%40G;7go{i$xJ09yWP>q@*-#p=arsGrVUt9-OooX(7?2+ZryRGEpilywkK zvUH`ZpSx~%q545_{U}iB)C;kDpE|d$8i-7^eEt%88@PrZkj=$`!$qBHzVvQkE3z+8 zMU{6u|DKuo(^X|rQ7KjNn7w|n%`VXWDx8ptKyOM$h*o|_TS8JgiJ1-&3xb>zCSfz+1$g_LHyVq4$(!-Z_*MNPhEK;+S@ZNx4i+)##2D7HQdZ^{Aq!1C)E28kR`gSS%5^&)m`ZfwC(&)BMRfu>A0wHomip74BQ#Mp8EbNpNWH%68nVqH@& zU4#Khn|$m=bnA=9Qc>kx<%w`QJ#}B|*gE??CgKCz_Fdag-Ai+a|7@r1?tqi6)>h^1 z8ZmhSrgo2SXFQB`Kr#AWy_*{6J!nkH#fg{#_NhK~kg2`w{ z;IoRy)Obc)o!o)9*;Mw9J$*_Ixe|YNSFSNhtjkr{=4=k&NcIfYSB9Y(x$tECoy^(q zpHyG67T+9EVq65bY&VUirA^(m+m5f6S^!XnuA1<#f}EOCG4_aFk1^aW*1FU}^mXk+ zZYG>u2URqZ=jVO9vu_f55AWMi3CAHLZ*jIn+9jLpK&xR;YtEewpX+<7Pk4@R*%3;u zbN8Y2x){FxGuHg5H1&rC9D7$<4XBKsvPC-5tE!*hKO4R2#XNGxU=h6C@jCvmquE&< z*TOb6;#RC}+UFZElbj_&7!HC@-B%HsgV*|S^(K1+`;QS13}t5pV#0cvzkywkhnc)a zf6r%1to_Ju(6qZBJ}b(2__F-3)y@z!`)@htjx#RPS1o%Er{eY`UTrypKeJ_N}~ur-B@Nn(8O!CdO;g@z%AD|x;dXO%e+|Mw*BVUW~!LjD9V*sjcU(2a5%sJ z>ZCW|^>&RXCylfi;G>VdeWoe28n+mq%FL!A7sWx%C1%y3mYhfxt@h^98aIwnHzm6> zj?tJs&{!!0^Ro7(KJUAYy(FAk2aWF@9P`S-6A=#xthHR#;j!yoS)W)c?)};QwfW@s z^!i1ZVSK2*%RhsSI%)=rx5vAV-Nm30{ILPw zuJR(t3J?w0GKFH>MeM-mr0&*PM}#7Ck7ZYx_=^mk`BA8z^G) zNjy4iB!vFKkF7^SMK}^Pdv|toPlR5LzVCQw+}};x9frXjdmh$nR3~T&(x9h#(F&kR z;~hGxm8cGRh*r<>rdqcpd?@z~*oQbHIwiLD3m<>{DbViG3xyAFLnTbrzh^2>lGzq> zn7Jq0V_2TtKA@jFN2iK;N>EwcTcsL)i?d6u!UeX?b#MHCQ=N&ecT|j0rr5Mced4me z#Z6^F5!!o$0mtH3Bb@wpS2J_%K*UE0#8bm0bMJh;!LeMWYOk?X<##%7Th}`1b~V6m zyJSLpzBrG4S-4sHwlJr=)EAiF9zqsQ91)k34U34_c>|aXE`HPr4hfl_Y}E=zsPCWl zZ8%#^9QpOmR+|#MJvIZBq&JabD^u z{v=MHemdFAnSUZ;YGhE&aTBLySS9(4His=y>bqQw(-)5SOL2ZbXXy;P(gvHiPX$K) zPRl@j%z2e)bk>QHo8e3Upn7omV zd7hv^2iI$V%mL zTY_gdDDR;5buX|~)Mp0I&&nO|S&a;j+aH-0hko9teyz#2km!cB)p?aR*?JV?NWEa}o zv}jkG9{Jg4(z%XYcl_WQH~0lL_yQrPtAmQMBjbEn@nyS||hk{!%S`#Gq( zq9zrlwn?eFom28N_JGdTil>d*tw+^bZ?ZQWi4bp5%8aJ&VNGPdv zNf z-ut<0-D_RzTG!%#^4dNryT0H|72)B#CttKi8F5F(gv#*a6e`6%36YlSqA;m;!)qaV zjnn(FQ{CWgFK)poU2J7Hll#Z36>*UItl+nw6n1B^YrI45SUdnt z8VkKS3VJ4@_DeW|(x@FReG(b53a5$D_Q+zYZ&m(Z+JoP}iNeo+tdqqpa~dsq&k9?M z!cYCsXY`$kZ1tUy`Hs?$wE8`)_I?Y3!m)VU{iUCn!2K&abUc1km(RbC8iM4^ckMI& ze!gb@QG)n_)Tb&7CT|40s?1P1|7I|~rz_;n@SN(?uyL!{+9fKVR;CrBHw5Dydm-wg zr~oY&a|_x~V?qlfG3s#tVN7DWF;|)$g+oZFv21aEh%fR27@Q&^p+5OrZajpOVR98G z$ZXin;v&xQtZ=!EIldlokZGkw@u5iK(vN3&l1LLZRa*r&TZe8^MJN)vaSC4%`(Ebm zvqFfbzT7D4da{z`_Aygdj!sPQNe*`EqU%;#?DbiCFHHvdLets-8G5;y7IK?dDVoJ` z`gKTICR#u}f+#=8b!WAozN41$hDj&l{`@2fB&mN?;FfkaXI%MJ-6m?LH_h;|OW= zYDrTst|SR$m~0KU&02AN8m1`G#$I;{q#l-cX1}zV4H7%iTBo}GXK$x48r)qt+}q7; zzCMDIc!fl6_`YETUMR7xBwyV#pM1-hJCg&%Fgp)`lr2$!T);dD1ab@&Uxhq$cRTx}Ms661PPl-|=?o9pO;rLo@urfU z5rK^Kyt~qpYkP^J z6+%woY`r-i)Tzn!q|4^AkPp9LOt))fxn5BmSHDu;>PIR}8s~#MuCXuSfV%V@cf!w( zigYutk9i#HMeRX#GPMMg@K7xJ+BZkbT+AA}t!t_CKHk(IRk&44H2_eK_lOE+O$LRH z22bWBGbDW|r;z$}Q4xg!&)v>eW-TYe7w#Y-ijY{WBCNvoSjS-bM*;-eM3>FF9-R`= z$@3I5_+l7yKqan`C|{MRTu4%5qtJB_T3x11c=}p--U62By?OD(Xl7NM#5&v^f_v*^ zKH+ZC;(N&xj{x0jt4g|Ih<$X{@t7V}N_V;Ux7MAwVJbwheC!1zWklZ9seU1hIUY;G zVdAva{2H@KHNrP}R+r2yt2w*Z?})PK{c<;*~oJ2?GPJ+|a4Hl&m?Q zFlxAs5-am1VdGVylnLgmDIQ#_4};N@9iZG2+8Fr}U$HNgaJlm_E-!8a|rj+yfP zERSNp>o`U2VZ?CpM<_N+uCe;4Qf(t}9ts)LR3(mv_#^cM&H5;~c1Z-($ympjN~+bK zU!x==Luq!5qWYp6@9Oj zLU~w6Th~(@rIzD`a=FaT*bA+E%-ej7Jn(nXU%on7PswlD;S0MGiW~h)b*#o1SmQ&F~_IRk_GGoS~C)HJ{6rB+#LD>r52Z_ zk^@$tJIV50d>&{|tT4ve5({)g2Gn|qYe?GZGQg|1viYM;@BfY$2VA}f`iuWu)Qi>3 zoe?L=ukRTs`p_%$Joimk8geCb;jqP!;efmSN~!Id{fqc0GTUFVGU_B^H(wQm$Xvle z{kqx6Cfm;xeEeHf6F#GZ8MHv4D%;gQ;%rBV7-oD-iNUr)@;iWu|a)DyUb@fB2CVVZyXe~#mMPk|8$G1|)ZQw)rk^FF|w zRk2jHDS$l4n71^D0Qzg|d7P0Xhk~Yx2coeCDx7IYzj3QKn&|G&LnAmPhh_N$s@iQH zFnffk#eIxIQDNrMNltL{#ZHQiigm&n~Z(2>Y$#DI2uHR}0rQQzj z)Dh>~0KQUuC#2fwt--gm%T^A3uEOdPGXL z$h7;V^@>ht6c)d*)p54BS`2SvSmnE+Oy9aw{cBp_LYJS}CZz4InY*@-iY5Z@WB9kO zae@F2B`zUjpWLku_kQXuzB|DLm6Qn5XI^0JvuqSvW%?TS{x!=ifX5a^b0)hW;oq7r zR|9xecIN9j3qeAVCTb_bk+ufVeg+yYoCUg3cE;E(M;zp=jnb=3PR@>T4qc3M-mAWh z%3_n@|5^4s|tsTz6$a2MLP&65@B!_wAVa5usp zJ77H4yITpWb|ei;V*00PQy?Gm@z>Jl&+VjVL66)kAfKBy{_uQX`Dh+C)B^ZV?X?$! zA&m`(h|z>W#fS|fD}|pwB?fOEhGY*or(WMI*Kc#&V`OKcLal7}JVLO@pXjbJZCzae z5`xHWxr%0S+z>C#SJ0`7ZOQgWdq3A9T=QsM?7`@rp^H>=@;XTbA-)70zKx)%_aegs zTJ%u%rA&Ez0kL|40DMxRZ2A|V0_w_0wU+BfrqU6`tY-&+khk53FZGa=))o7YRk~{a z5&*ZDP+x-V0IXw9U!IPG_|mOg`gIYc*rkL$8PcY%xbYDL+zcQu@+w$ny89-OBaiXn8dP?dt6UOk?PR=@Uu!?H{>@pB)?WNr@+VE6+!3g%gf` zv}<#ve|W3h5(>(KC#Sl#Qp8&wAocMKyqq7tnI`z92%6ysXaTi$e4;n{Qky4IZp$C& z^$k#&*DE*@F4Mw zd7MyUIhBU5ooqnG-%0fQa~*>2M8TF@OP?mHEAzf8W62^N+}5DYSTf)}(haJ&^xjY% zLNTh|c`fsBNfR!GKEH@+=5Bl}F11)C_} zetcOmdgJ!Tm`{d9AQQDyvy>foNr=&**zZd>e$|T{PpPBGA&tG@*ZARe1-k(LC@A`> zi&f_G8CRcuuFO$wLT1jvNHr3O^rg@O1qy`?y|lNN4y{GsX&&~;LzAR{VMd8nhQz6Z zYGphSLCZ(z7}X3!J3(P z+6+-7-H-p?8_RvPr+2SB7{Z1<{fSB^jv(-|Z$MaQN#y4WRRNqzhGJnC$baD#oDA}m zhO}<5%rHn3J}cgsyIDfG3vPaJ8G~4x0cYKjx&77qQ0LC#k-1f$1S-rq$)DwMaH^fB z#=D0renQg|5l~w$ay((-OJ(+ELGI12frBJEE*N34%)kPQfW5U*zosqGqhEioYQ!R( zuiGDwST*;(p#2Z%Lt{oJz3bBA8UKs&aC!SBm0^RfhaPt7N`e$aG7veE0NG>kIM%5o z7v)G|;6eC^<%44-g=XQ&Pf(C=1CordMenb)(LPg-W|;x2Cv0V}nV%>#a~^6(%f6sy z!9p0W^_e%jgz+z4udINMRm>u|WVWO(%gp?GU@;!mT;pUSKff%K(q94fx*|Cz{ck{O z*ak%FMY3zZd1h*Ul2C6)8M6t-WHz1F)-&SPcryBQHpKS?j{S>Qj}0s%&viqeBuA&Z z-@0zVY#9AuW?;<`a;Ys6OKAiQgp;_1(G@O@WGe#`5Y`mx62{ zt@kyW^K|l4Ws1zq1q)M6`-@;E%7tUfACmW-fHj1VNK-L&8`0*m zqmMs$Etv@_r}TG_SLns_}T)atiPMw`vZT8>9FHGL}Tuve-JJK$0UW_l+y<0ysxHL!%#!@` z69j&o>Ee#whn=bCald3+t8f-l@E;inzn;jAiQ5I;t74{&F=y!==PvME9Gz~8A;?!P z0QT!{WC7K``qlW9dpX5`I2b++IAn0Up&t#Y`o%WYFK%SV1K5wz z+~!8Fl|j*5$IV^1f?i;&H93BVGR#*@U&cWxaAynjKb;|9vKD7!gL)+@J?i>VM9ENc zNvy|xsx{OmPD4cS0n44d3quwKFDgnOoNpo-BB2BGKj+cAg3#Ja&yc;JI#{+GNZH<$ z({>^Ew+HRgSEc~c{vK$rG&$|jq^v{|AgpdrdbkCz*{a~}c^vxZS1G+9PUr?os{@iQ zEQ2)Cm~tIZj@QqQ=NaMa*WCxINnuk**ak>UqOEC(H8()I`*a6Xkfum3)qap#YdF|@ z*>mnw-qoinqrP{Ky$fUyUV=bq!3~iMzMJK6issS_8&5kBhk2%zd&vW@0m6Ynm1ql= z=IgLy@LQtKc$lK`QVS=*aBIrhUfw-kV)(dQ46B={Fe(nbkX4^xN%X52%e z%tSF@nL6LLY(G)`stWwC1tOPxYTX6bNPqVI{q!B}lF{gzs~}U<1N>S?ac5f8tkE~?73)v{1s9?qbQ=t2H(*H=Z$<7zGDzIY8-Wg5 zFEXlUS!iYraOLExSij%ZUpJnf+c{|DE4ERgxN3&dTa1jRlF5f*7W}$B*PD z_n;-7v!_JXY52~fqwhT0kc;Y6J~{B6=Ly^eZs8O*#*V?#z9g?(w9pd+GGa=7WXo%! ztHU~5pmfT9(b*tmefCZ}P(Uo91<}XOWWVSOIAK`h*)*+S^Ikea;N z%LQ15gv-dI^upl+6nkSY)@kt*tliDx6o?F|#U;g}kRD(~+m{IgYe-oA)_MeK330GC zNx;g7iLIlI;_7rtXAN9wY?3c8Dax7kri!nB9D^ni+_Br!+NYY{XLkcX&W59Bsuq{T zY6>sbh(JKu$Pwb6C+==Qew}$5Emc}DbMXv9zZwCZHAfOzI>iNlEov~_I`ZB}m;CM~ z*D626gq^(wS`X1;ki~uBvkn5k)ED>c@_#|JR?H+`sn#ZqeTkwy(*9s?W5dTSbVAU3 z*Nc&fd5{%3M>rwK1hYyX;R@dOsBR~~gjmfEEh7dOFOREHa&8x%h&RNuDoizx|`^jS3Te_Kc zHgus@f`$-#bux}znh&qR>=KHsCWaM%BME%KAY=E_^G_R~9ba@6c%SlipMl6=2ej$3 zD|0+Ss?7_|iS9X(X+x%hr-L3|wqZ0z64h2wJ|=bW#kX~eQZz4`El)N*gOo`AcS+vT z)Mw%ESv+n-t38K0KEn!Cu#% zdAOxlk5X1&J47FPR$WR~@GwcE^z%iDMbdqqJlQJK_$+?>X9y2jU-Jc?h~}jVXlIdx zOD2Yin6ohMfScKo43xP0W}Xb=opAcUp9MdFUf%2i$=wEEgox~YF&zo09cl41gHD{` z*#?y-zBo1BP5#Y00h5_;nP-p&I}(^TFcP~}B0=3`e6k!}km1imkYVrrr!t93mFP zFTPCN-K;zTV|iQT0)AN0#ig5hdzLJh(j*2*=mZ+QFW>m`^-DofbyvC!)8m@tVZ02! zBU2lFg!`TNtnld@9QBdg?mUz@pHiDc+P<;#&?c)eEa{)xniv22Bh{@}5OG5cAnKS- zTTHlZkqO03y81!)C0^cD%c@z0IJ%69_Kb-G>9vRvsr%c3t1`|kNJLRl&vM~Tpr&&w z4?TzjrukS|5UqJu!E#yuU9R7reyuY9=V9Qukb+kF2K(HpSNh=I+cVES^RgV|HV5?p zhlq9My_EwR*k#{TA_zIPb_3r(?sf%*ZDo?CPXM(MJ)P74_#R}!-QYhZlQByKN|Cqm zZ=%Eh9zCBp4*dTb44Me6T#9L-@=tf>X%>>Mp8pPv=d;i#usR=XhQaTkdk<=HI}t&! zlht~~5+m41?}2Wc6i40l?vrg;gcLnL17us#Ub&u} z>jNoo2@Q=9H!GS*2PzsI?j|0$6IOc>!yNh&ae(8Wv7Drt&2g+!cx=5bsk%E9zDiOZe{Y)dk z31$#t6e+{mlU`KD6E-YRZ(?sn)m;k(UNp67RVeiy0 z$rk$J?8X$NeqA%^8``0M*EbWaUch8=NaiZh2KmmXZbP)#10^+%bKO5a}g|KAr5l!=YrQid5k$eo(0JK>d3c;`>C}XAlMyk{bo@K@U^sb zS{;!mT`Wj&Ig0xZ(QTGL-42CPc9-LoTB)mdLAx<{LU?@6tdrjmG>_DQ-@@w{Hh)*o zPEpE#s*PV#V`v2}pFp2)zl(NByGx1doyWC%4X!E|k~xzQl)n`fgS2-oD-DPEhn1gB zP{$F%w_~biKB}CJ*pb`0i2)e?zcSz@T282Ep$6u4JQix1luj5YZZa0vw`o{AywHH~lS5 zh6n)f%6}Er5O>iII!3OWl}Af$cSbOZ_Ex_aH1VGJvHHVV)bc~3IzvGk zjqZyVNdh(M1Lq+*$T_ZfuCA2;X+0PG63FUp?Cq=#FXY_CZVkVjB(K zMpSpa(Y=q-o0GWmEIUB_7(ig#^*g#Rp#AchfcFt^yTIFEPP}CF`3VAE7j?J9PTEFX zQ+=0yp$BZIeZ{Z^u^Y7ae1s<*Hy3A2rcc>AU!}UY(Qxnz`)zTn@pDMqRHOr?&miNj z)LJ_iLAO|lU-!CYW&=cyHc)9bNE%L}NucfD_ImUNFYm2Qho_MurCG!t-LmzGaos)O zhQGbJHpA-P6e~uqnI-UmiOXy0PHD;O$wcU7>2ZXIr{Rta>;X zD9*IZ9Y6y8s|$-BVf=s)+g!XF!rFFYABx*<++Bgdg06Jl%nNBUXA}CQcrN;3(8Kn* zMaVZ;OLuHGSCR_*zM{tQ5`K`Hx&uJ7Fw!5R3UH+thSb&_%%0^*-ewnL&SZt&1V+%c zhCBKu_g31hvv5SuXa)p-yM*jy*=JI6l$Pmd#8|~_dsFQYl07e(%THMpw4>;+u48=; zE{ZjhLV4>1AX^C=v26HC^g%-balBd$9C{WT<8*?{#5KjOGFyU|KkT|CEmd<#Vh?TT zS;&;-{Ye(OaK`Ya#Mm=e_YUjqIgA9h=HA4y+nG3S(J+bd%RH6X z46{-kIgUkcmn|@-*jePH!JraEYto!z%@n8Um8q%hnGBw)T3^=IO&=j-rK`w#lwEaA z$r!aaB|+`c^f`SZ1Oj$&g$B2Gd*yXk2r<1L>49o3gpll#Ex+N6IKx6nF}p&agx_G{=7S4Gpm8SvjcAG&?{zz;%b%ur&{o18M9exJUr=eH z%xqq|HcDN7X?p|eV4*(piWcAj+f^A*$JM|v|7$0>17Wt>c! zILcZTq54H~!;&;Qsc`8W=&W{6c=C+r5vXt$5AnuV3HN}TXyh?s{gBCK#-frMrmKV7 z$s)6Kh37yjckfX~wX43ojJAqjd5VVG3`o4D9+l96BFTjUTv283*~(h2kxdnn%KdY^J@ZCB zv??TzW;T`jfuBkBrbWk&jSvKWw8Lbw?mRFmC5)%VWodvs(#Y;F0u;$>=p)i}TEDo_ z<^`*bukiy=YfFLSdKkOg9I&2DIjx3dGDwGuzDw+myBg1Z$c@(M2z@BGf7d_OKyb!w z@#zAbN)fD7j3I4Hn>BaqyWM3;{;F0i?0y1H3!D&g%unv%IS5?Mw{D6t?;pTbT($)r z)(WwEy0(4x*G|+tR}1F!u&*S;l{CC>RX%kdKagH_sW7JIc*9LEVsB=8dL+?wJgiA(c4^j(OkdC`@Sc=+i(i+T(P`UR9LnH%gORj&q6E18yH!ivS% zBs-G>5yU7%OZjxNeNlhtY$>VfA9}V`a9lF(IN&1vfXi0WrTK;mkGizz9J4H5O zen6ugefGgR0(2gKN=)ch@po?}c(UJm$~lSbg=B$0kOQR5)H4&8u80WA2ne)uD>3pu zr+b;b%|Y<+Y!aBSXFbN!Q1-SXtXXigkw)oslG{yVw5yA#b7A7oI@oc}e~oToGiS^CbXQ`3>Y94^UQ)1iJ2_mPfg zBVeE%(C6?kJRs2|ul)r5SmuKRm*1SOYzF{a%7cb!1Jh5e)u4yg@8O|O1)}+&8*+Q-8-u)byB2E)yH@II`Umbn@_Q%K>14F6@8*`(6?C5&A4zm3@P$BH zI_9pCHp$-d;Hv}nNyLqiDO0kLag#yGu}u!8v1fOT-slR))>Ss(Y>jEQO`gEI#AyQZ z&&CFAVr`M%!ETmrl$9dI=8UToRK*LSenvy)Vi>GDzrLH#H@>U^^DqP1jZ!7Tkq0Lc z@+LlGd-~g6nUXR>>oNL9tmy?m5MCVhZn_E0+g>k(yfou(vus+#R)Q9S1h^48MC~UF zstQZv@=qDw4j<*-)rgA9;S}3?QX8uyVAN3BcF5>uq?2!)xwNgZ}d@?H^^qZ+&NHg zbUzI^lKl}GmkL`Cr;F>WD$j;H|43lhF|_3{lI>Z?I;Xq48St*_Y?iOTA)XU+68CA# z&S`H!u>V4WG4m4R{sO%&A39H;f6!7|`%Z8(>inuo0Hc369l?X-1L=@Y%p?L6(2 z{Cp-~)bYs-B>};8R5c7l;|0X8@wqTh*%Ao7jJffM<|fIU!|#$7|GRXj-1Z)W$7(wm z^uCv$z#=66e)~tYpYyOd_vO$x$rt4oDAC#8hpB;5apMuN2nQOmY_#)i?KngvtUx)%|7NsSAQJ1(yjf2U67>>lSRqT2xvPz{u3O@z>VF zT^u)fE+mcmWg|+qN%l^Xl9VL2)v$T4zc~JDICRc5f-&|ax0MnXve39DY2Ct~ZqHnt z>Gl2W8PAL_MrDNvqKu{`XWG70XzF{nJA@pwL-TYFOcd@ziq7@$ka&LGJXg|5hL_97 z(gt*No3~o1U;8&Tt9s4wINLBzKhnrMU$bg~)xzB1Ng{c#-hig{MM#`S2e6aNUb&bZ zpMLaHqGyl!#D9hVilVzz*iObM z(p}_KcH>eovj;I6Kb0-wNw{FR@8q%wj%#Qx#>RIm_C>u|81@V`znn|v=#R`Av+4Q<;J6yDjYkU2@=5X#@J#W0u^hA$wC4m>J?OJY2&y7FgTfEytHD8$ zPPYenb=L9^(LD~Mo7^Cm{kBk>4#hpzOVmF%c8Vz3zveW4OS9$)khfjB~$$B>kBO9Tmi&nczqVSsx?+w@2 z623F|o2fc>M!ayC@-ZQF4NkkGD?S~K%4e-=$c9LCccJBwshKhsE~jt`I>Qn*z0uPf z5KiYZLbxJquop$VMm3X!PqBCkwSOn+`tU__9VUvl-*kC&F9}ZSQ*L(6oh$fI+3O;G zzhgGthw8g(qkYBp6c4@twd=~4yP%??OM$z@O4poGEd+j|x!6L=Y zCd*-VF&EC}&Cg~5qzhTt`AV+wvJoq3!z81==68V?q%X?nVN*NrQqwgc)!^1<4bH3Z zS}JzBa^gj1;(=?dj0GY*4HXZHD)lgm3dz+tMI_OacrbNy*%&B%_He_EP`T_-PV+upE2Ye*9K zA^c##ACn~@El`=bQ8_-TNuvmEcd&sT^?lNs-jmymifQH>lHJ?=gP#RPf{YW8_e6q` zIZK@&K0}hzB)rc3#XR9>1jdTJFZ1&~SuDs`wg#!kjU`afFga31=)$Pi%2T=<%OXBkLdYKV-qngpm}xlc;@VjA~?W>#`9@xVanP?Y1h`;Z~P!ti;yC6zY(d(** zDdtb0BWUIss)O4l>y8KI9AAa2|2$e!qm>e)?baRa zL$NY-L!USS@Yq~g+HhV)8e@|Qtq|=OCLNr)BK#{6MQQdsol5B~?PZc;3DpQ05Vm(P z!pg@sc%w{W);C=={bJ5`P*$L@fDYpoPh;RcQZm-&L{~#F(r;S&6p-)@FG)(&hyG`v zX*ElYi&Bc!;98Jd<|`658SN^bk2buI`bPPoi7x1BX-|v5T$CmA;mb{RKEIce_1&4; zy*GmF2~)Q5vGQLONm!ZmE!&q*}DdqQCSqCsMX)XR)6B{7^WsHA z-^O&u>#rNq9mBlGU)}y-TUz{;$9(0@>(A!nuc4+aKr+p<(x^|OjsK)Jv>??v->ai7Dx`E zE2=((Rpxn2%z-!|+vvxcItRJd_sdQ$$*1w%iX7xBC0Vt3Bx0U7dX-5G;;MG=sF~vXlCW37_<=2G|^aCt9{Kz|uM&;*O6{8NpPvZAN^aIOjV{s^{d(y-rQx)Nly>3g+46dC zP1fsP-5vp?a%6mcfxko|I)y2>Ha7jStqVT(tkC4mU`ei;6HFGtHAX=H{8>aRd1j=4 zPPaXtna^W5L1SxR;pIb^-dXX8;~)u?@$-4rPfC5&^-u$n05Sp+a+3` z%%#d{vK>UuP8eYNlrLdB!+zD^u9Q6+=xK{*PKP#zeYHR{*t+zPXbC#HiXCS3CNpH- zgkhhF&p64hY?*Ex*)pU1kNUXzG;=kUXHEz}uPoU%y~ zv06-wFIuoxg;Q(1iKzJ27&#C6kzU_*Lot^hOk8wC#0WkNXuFmJ96t`uXiL?IArzE8O21usbTvGua9^ z(@)_OIP^Z=r}6ueJ6VrXo|4bv$u7=uX;-l3q|JcZe?I$d11|w36Ra~e1ljf9CMAiS z^g`MZAX%PytG`V!!dpc5dcl`0ttA#N2M1iL5pSH)P@71RH zh4sG!Px$em&M^^c4;+Ejk(EZBs0iIw6b3)UmenkM(uPQoA>>SJz=@&|LT8dS|0%Dy zVR8R1*Q9@Pr(mZS)!lOl6qI0kBMY(d3h8{VM`S{h{I&08jZaTi&UMYz%}1fI%d=CN z#BXLJ1-!ovJ7n5IT7$4HGvd+&U8Hvwgc(mq$i8iIhHdm1rGfCXKZk?dC>ZUt$Q9S&hKH+u;`3cS5 zsa`Fqz0);kz|yV>dXf3HR+_t3;MI<3R0AOU)8IG(RjVTO4|crIakKNW?ofd2q~IwF z3VKukZ7FQ;(ZlVXmwGsmE8=R#tT+#;T<)8*JF8%yXf(H=OUls{>ke&H*L4zS73If! ziX%H&EsuWS5jRC8%ItBp-d#ei=ubT zO!?TxQ;Xvz#FtDDxe_CgDo216MK&}pPSF_$l4OR9RLf8HfkL)GRQ8t0lOr-OQ?uVe zNTmD+LP@UrLRcQ@=ErLAAk5J!!B%00^~@Nh_nJwc1CxGi*c#|ImRhQNaFRXeZ9UK7 zfqhloMI_5tkUfYu*xQCRF$s4HeKoaPBSs)8bLvVf|K`cVRPPfDoT3Vd1#DsnT77-z zi5UZ)DfOc?RV}{9tWze00_R3?m1#&-kVudWT4-6htc8>v+})C*C=S~p%|GQTddkK; zJ_VQ5j*1PhMYI=hh$)bWcdZi3RrA~m&DFFd-!UD~_gv^joiYmLo*dbg+5}AGr`;e@ zpLn5cGAo>4$hi7eiQj}PPzdg6AF?=#g|PL;26 z^cQIo=6Z7{K>F3}cJmW;tMaDd-$K)U0;Ll%_G$4(-q?4hu>Jh}UI9Bfg(vO|(JTx4 zp4!ZDp~^}8N3G377o4~dS|}+F*Y(G8$61G)DW1s42nI#S8XZ&K9k~#7FSvy=|KuDudUz3i(mbC0dGQ#A-Mdmqi9>suvowU!wm@2Wg zHi6sp_J{8(`J^4_*hkdf*5JdVjZR_JlJ8fCJ*6Sc{@WM@70Sq;#1>f6naJ(7v2V3p{@r$*Q37$`y#k;CgMtX=o_xr`jpd%(UB(!01n zEhnt3FV2}zdu8%>(3w|IMvlaxeyU2@AS#Iz&|sp8g{fCv2@qr`l?zeeT%R~Qn|VyX>O@&=y)#I9{RsImq7uS zZJh^j+t)|pxHN7)j}GT~z6QcvBG6^0%)$(E%%Vu$G(v6b1|6gxkhqvN@_a;Vb8{aM zV-mSM0Qs*WlI0B3Rh|EE(IC)x*JKpRkw&vnhm#OKkzOwi}^QkzS3M1u9`D z(tPh3WQ=EkjwzrFI(GEXah)Dg4%jXLNGycijpxxR{1W!X8<15)OZZQt9z*6pZ@2kd zvH^|x1faK$P;TnYuo3eI%m@#<8zC|x(!zI_fZN**W3>jDkVWgM5@{Kv=~EDT8^dy8 zs@^SQ&~0>JM3jpHybw)N2gbTmuN4ce2Jic8kl#-RrQn47h#QxTR0^ctnakIW0m@Sc zA}6^%3mw7>svu+MkTGcLbKea~pRgdsJ&t!F!0R5ba?^lPm>!TqxMAYGWet?-&xv3d zN0?k1u`8k+Qa|0_Fm{88Hwg$%UF+^&J0IErz{~48BmSuuYY&(PW!G}GhBk9 z>Hb)oUVR3$>}vONl=bf{qn(fL7IHzCw|uV?p-Lth^cD@+S*G29;+K5q0oQ;7@de%W8{g`IU9B z+|>^_fc|CYKn(1~2xPEd$o7=sle0LGZosqd^T0%1A6X#zT202DI*U{sFbpSI17mIm zN<@S+K<`(VErexMgAs^rHZD(^`Q{LD3`WQDhcrUPGPPHvaqt4_Aaa=j99{^fIg{p6 zc|e8jG%6Yht;s+mOM-3+{Zekw?7hM#DfNOVtB26$E~v5yAewZtDoE0iNW{zPu6L@5 z`4FwhB}ZMuKQ>1OHKlfF8J17syCgJ)GCPbK&+KQH0#`9}^#lZp&C zmKs#w${&H#^rG@faanWN9`Lx#6|X9$Lc>ISz7v!r`=;Jo zZA7PuftbSl+VrpOYS<4q%0~Y=b!gM0{cu^(x8(etU)S%_O5%Mw5RTL~ve;yr zj`#QR1~cCcBe#y6LBzinv96$T-DbK4k+^(MdQlQl13G8pnqwZH=AUS$=f6|u@3#lT zP60*U9CcXcC}vc8=C!O{R}Ccu7Zr$Q`Wx3W`4XCvIVTK?D(aw&3O+_ zSUfPMu;6eJwRp`j)a&bc!I9nDzrqm$4@%sXy$d;y|GD-Q6d<(cC@!+V+DHU$R69fZ z=I0HlyQ)3_fdL5-N%E!^?zCI|JW9eAzVD!|;SExgh@VglHVh`8oR_`?jB=@T%TT&8 zp6)loEA2XjuQ~laL$6}cVq@-*->f7c<68G!2Q|J=BEi_K#gL^MYc#|0`oZpqQAyCp zpQ8J2FJ*yYdkl*BVxz zu~L=mjv*U(%hj5MLX?QleH0JCoDghWD|Ka)L`xo9pll?6T)1j`lPfCop2!nK6(x+e zR03`o?*f;?5PKCZ&MW8OV$Xc=-R7U0`y+BUf92Btda9`xYs6Va?Rl&Ta{J#Q%%A<| z_V+%zp}q5zT=*<}2j(nNjhJLTwXM?;ALM+&kmEo9Pee8YaWZaOAb(+5ta^bt;p)*5 z{%5W%fRqegO30SX^V>fVPvKcW@SRitnBqrAH}0eIMOr^J0XPG!=T&u%oFn)<+xPHs zjQ^WV{`L&SlQ>Av3;O5RU7=nYe!bPa{A2J(BrwtkCpJc|XYCv_}?$ zSi_hdhvG0Ka|(V=`}esD!C|C;VZ|+SdL)7kZbtdt{6Q$jIA)H%g(&IVC$}RBAN|H~w|p zo*YeR5Cg>pWzf=(quK^|~kKo15Ikzw1ek9qRWh!aeB1Zx6iM6F1;7)qfw;z+-LF zjjptwahhfCtO-Br%LMNK?lgCRUNuF(9?S~_A28S#nuF@N`Eek8ii}uMm4Wo8yvGx$ zx6Mh6sOJB>{V*WM2P24bHOWZ;*H;kuac5tT-wa3Kr4_PVx(j#8e}Kv*J<#S5(txI_ z^2G7KyASPm3k_jGhnU`|+ECma&kGgWe-0G<;}HqGZ7lus>x?{tXga6@bI(317+oyYNKHG8UOGg_e69p?>=0(G1-G z4&ouaNC?XQhYk-?kNNEQKSuy5A*aNZCFJ_*9qJcIl3o3ebB4i@f@RtCjsL2~31wc@ zb1DBWnYKFc6ogwSp(I-2PbdO|_WMNv!~LA7H*6V-t%3S?x2v2$mMyZX)wkW~-`7=W zJpBBoC);krfvs+#J}bljv2$Q|#bh=04(R;PFS{Ld;qm|cbv2Uf-Txd?*rlhT~ zSSkxU3=4dCWj|D!=!aO{#fh2}r1F2E;b8w%&QquV9NRxOlXu}85Voxc^({%uz`aUA zoDT;Ocbfrzv*_D|GorxFw+(xfH~;U^0yzngMWUM}x{FjhBm;4~|FmI6;leD)3kyOy z*EyuR_ipQ~X-WYC~iN~~lNLYY0bDx1oRRo0N1k@g%E)nEK>N*TSW`b0ZFU6rW zbOdBfG$6E7hoE=$PLvbnUmM$b365V`OR;2M?&T97pt(>R(RC>W)k%nJL=na|M2(^$ z*d%)ZJ|tDU&yV_0)z**RC@>IQ26QSJDZzuA?Sp!fP}?pPmOq2q3>#3D76Q8OInZg3 zDu40(uf^(@fdfbS)aW93ewQSFo^=~C_M|rog8Y&?4n*Z8RnWws8w5`rB*u^uFKFD8 z3?AU8Axof!Nv=WBS_N(ATfZ7Prfbm_`LLSE0fTK}*0nKXq7G_b+pUVYAy z(g^K!V<(!}HllRymul8Q2sVQV1i}mmAtdk@I+fH&Fk!Q~{AK+vfT)Fe6HrVocQ=^l z&+PbR!(GwIioZ)29&${2i2J?;O|_5r)=pi0-`=U;vlGxIWPP$4iBMB-cAa|Yc`V^6 zA~*mNyNMUphg*Kl4ZK}|n$U;RdgOocCQWXPAV%=YoZE24=a;Ta8`}ypJXg5&pLB@L zo}IUN1gZBv5+<>}vrvtSz<>zm({?>L{jZzJDGRG*k;%bz!#eVp{W!cy>FoUM0p<76 zR|QCP-UEW@*KTn=)t8nHA|miuxYsX5t&@~|23sLP5p?hOrNYXj*SlYB>n z&6pI4KpDc8?Ide&xHE6~I)=#++&l60%=ZjPmI2 z=~(``7aGAKf&WRbaaIg%pIh8BIWZR1k!_CX)`(B~;&#I(V~IQdVmtz=eu5DD9a7qR zs6SEf+c<;dKpBvF)`oM-pg3*@0`b=;db?;X{G|?d-R|<^&_|-_Qa@b)F=2$(o&>dO zgCK};_VJ$H(?2hOVMJcQPob9CJMwPB5I2sdLK>a04FrflEQcf``aM;1K?PI~+CX#K znMBJdLEuTJj(}7Uu3l7TBFK4GK^F3-tX(AsqG9hhuY>pJJYOMzy-0~0lxX`FLq4WYDQOpV6!5RS7nnU=L6?R|@9l>o? znp}y7_PWpW;U5Y%rC&i|K2oMH3OKL#E~8=N@!kyS&*MqON53H}Tlca(`|A{-fFBA0!5VZn$+5smeI;<~`xUp@G|#zlTn)-YgX5|zTDb^;o#RQ(!{1*3 zyAVXmpl|;k(9?TkLp>s|Q}V|F5i@F7s8i>??2{V&KS-y(Gc@+zkGQ9Gbs-T8|rm& zmtn*Eo09c{>Ml3 z3j?oBg@j|OXBjzQQWP;f-&=uHay`HYfm=W1YjKwWcEb6ppyc}yg3(oc_P$%@wKAn& zt{nOI{y*AMcz?H4)g_lJ!vLgO9~SXEFH$upD8`k4{6sbYpJvAI&R<{tD|pJ_vxU&M z{xJ>P4VPSE7Xg5>=db?G<_2ZZT>18#9}*Dz2dWuR!k%W8!^)OlWek?>zsr8oS_J9-aV_Kd&1rXw$H_E{q}u zLq2-(hvvm~ODGsY;6Wh}xN*5yVH$blqN|V{eX`r;aEHWU{Xoidax=WE?M6ziDXXDp5!@K5Z(hBB_1Mv&Q%yLuH$QqmMLga?4!!AI3{raz^>XH*`7o%tq-rtR05%{Q zj`ruXfOZMq*Hr%V+32Is_KXr;TR5~;`SPRCgP{k|bq5jHaEBlM{6zlPJWu(A#~A0c z%X1-zBbE699vS%lbq2hGllI++I^|;hj?>y?0Qq+HWm;u5U#c`HSvM{NBd5HB`6rwZ z|Fa+PR%-geU~2~70z5he;FY0NXn&J49;xu^{*V3~8z}b_MaosPwsU%q!jwlWhx5~) z@@kZR`J4?Cj|Wmk1FFs4L-(u2BybQq@d^$ilyQsvYR{h|I|!u*gtL}ArT@-wJJu5ka#>I{8R znl>1vryagMIN>dT;zU4rm@#MB6at5uPK+@gqdvJEI$@#?_c%DV>k zAql~NPH${M;kOl1P}AZmZ4Ld0f&=0SjQA8_GEGN*)7=0{hcjz3%Nt=JTJRz<7#?2+z30VR(N1U9{F5J3QX+f;L(MW64^RY*p!kW#9wTj-<7*dgCsw%*8rT|8T$D9J(g=fj z+$^kyRZuY419RYjGt%4wBxzj|Z@UNWZg_P9=qB9)R{-7D zMt?)|JOYp>|=*G*M`5nQn_QW#3IAWG5|_Tr@7iv?wLxDqCbm8e6xt zhzZkn6PYGap;1|ed}Hh0WXb4G6x~9!;wImDXQuo6{`mas=QrbB&U?;tp65A`S-Gq{ z3kmXo0Pa!Z;*b}iN=J_XaP1tGZ=fp96Y0tFAo!r+ColA|14&Q8F(MYCwJ5a)o8J`` zn{&eNf_HBLkjC1PBH*WeYHl_>0m4-~>c zY~bMQ9hFzRZ-xGap7pqVdNCcM%#-ZJ3RPedQh=1oM;CEI%>%wCxk??-FH%_S-8I?q z^|is56fojm3kSD+N7tF(+5_SfzqDy4_+^ByVjp>~vK*BMixd$6^yg(L6)$J2Q~mbB|2IGDf6aultUaIY@4{PQA4HU9-uGj0_kr z@%#?-xqR8d(ya)b>;$Suecl}a2PYSEdafaVK*!TQSSt7jao&cTSJe}IL;cEf- zqNH5&?*XypAhl2JpaWiuz4^?a)MJY4Dr{w6$K%bWfu?0scJBn|Rnk!tp+nTr#ZD|>qY70eg+V{s!OgMux_)>ftFugrA&;}f^VArwBU3*YoR2m20JNi^|FZ5OH{dk>4WKiFP7oUQx5EsYuy|r$3NhY(=R9P41z-~vRDEgY z<7iOtDuq25`QF<$JE{+c?B6~0gD)JNs;w$2uCE@_8GV5D27F1E7cCMsGc zZfN<|YM$oXegJFs=jM+i9w9k#frUoD$L}qBrKA)Yn_QjzJlob)_sa{ZW?ocxzr=&i z$zG{@Pvua@y35Y0d%RF8e(&@|@kg>F4OdnLC(Hg`MLae287i0{%}4=*mGoABAW31B zz`;x~b8@PAv#z@mV{z}xoZl{=jY%BHv8uWnp(CN9v%>j)v{UiLYRC z$s-a-Y^>R5G!umMxE+*3Rr{OqBqcST5c2a#faQt1W#IS zO>U?nU<*zqKc+^+(+?G~@YZijuU2*%W2$tq2}{U-L`jsklM~7yn5U%e*RW-S{qYCVGgFN_OIVmSlMBm&d|k{=Bv*!NfFPbk|}C9!#dOC zlR2tfz5W;au2LzVIx&t4!YhxxRNl+qwli7aMCYy$e9Jb^p9Nr7{6fESaikDhDD{g8 zCV#mGya^2Fk*|m8X<^CC5U8Kndne{E_`DzqP>@o7`zl zSR0!qeUxF=jJs}nN5h;Mbua~js9~0Lx}5DYGsgtN4!K<;a|QM(Nu9O7y=igFjL&pl z&Px8U>D`2@CN*W}oF1nn>KJqgf1DyAUijYXdhx5|DO4N95ihK_uH))qTQPHo5M*UdBy#}k?1 z3y+z0x$$>XSyH=?uT?ykW~RW}l%14}TD<%NUy{ zlN&xQYNYRvIa^wKPD3`>WrXO;sII&?UR(2Bc%;L?u&ch+IU)Dqxf7;iCKXQ`_~x~L z$7n99O4r8Lt$XQxGQr@P%zl}S2gNl0Y(r7K)prp5R4_<}vrO%LywJw95Z8FRMm<1C zn$g1OdKK-h`bEo2e@sXD(8)IjruNf`C55@YOEHrNx#N2lU?_xML8EqNhBQ(rtQ)_ZjQ4Tmo$ zRB>zX0M2JX|Fzj#$t6tRj51zAx4b`_zXtp5tnSbk@Xs4kB@8I*Z#3k8^8eOT zCMm2pX^X3*T4QrzH2y#F-a@ul)91&={POkrvfGU-! zyS|I^vp_Y2DPqM_FIy&}=CTGr03p5#$3^v*PiErM8PXc8K=~^GIH>$A0z2smtH1ea zIH3{7xA9k-J3lHmwKP#RqBRousw)>y-3Yvoh$B6+%hLUZ+-PT;*Y9JJmi)UiIPuDu zcM($=JeutgM1TUQ0GYtE6iLMyToz?njjF$Exn6+)4aM}hcrtT7WXD~1GKZHE8%yG7#x zZQSlMf_O9kaJOyW=q^Oofslx^m}cS5ySl)0sgE@LTfCoJEk2g{ z&J;qQ8SHKM9()p{|DI3{9P=;X8m@7qO~Ncph3g6*-@}})3&5Umx%1=Jl#0$qwvBXC zQVq|MdWICyxpsTV;k^C>RUPdxJ_gxI$oStE`&;BW9zMtKK;SfBz2;B>-VGhQ(+&s_ zFcft~&fY!F+x%RZ`e!4Vc=>UK#q9`HrvjbWBHwU zc?3O8s6yo&)qQ}x7J6Dk;U(M(Gn&SG!M1fTBJzO3H*84)*u@X3ttk4Gb?H0eLpWxM z;0L`w%DBH)X=58T-31JkGNbcpbNKD9b$=ztHqBsAsa6)ky%+v^9w9^s_?>|ftBM)S zm~k*ii;~;QvfO_@ytxj>p1-s*(Xmw0v=>xd{T0qp%kOTH3`xwG7Ol20x`p>CGK>BO zI|6yKCCJ$=qwA&^Gk_Yhr-0HF;-XUo4rT?Qp=ugwK^0L4SjdI-00XA7Wd*=>U#cJs z7~jV(E!!iLDlc;s)-R`9W~Ek-Fr%>g^US@-@5Ew&y11xcA{mx%;{B5iI8Lpif3mF(gkT&}KNlKUdZ; zL>zl?siMD`x_C{=_#B=}^Lqt@7hkC?-$@0qiuo)c=w|E9srhLrTzjdJq$|h<^h+px zLIO9@@v!uOQ&X&WMEYi8JU*BDl4W30swE;x)dG&v4g>uB`Z5+KJclKS7}jxzF;UH6 zb1x2*YmU=+j=t``b2z#RkqS#tXxT$<8WHFkM+CQlb8pmzZp6fbUdrRU%*cGnU%m2# zm>qrSJ~Yk=a%hljf5}R17v?JnvMK?)Hxd2;epG9rzDg#r zb33H^dzN?>!q~jOpIkmRH}qufkInN~49U=)6L)6yWwc#|06$p)FVX@Lg)O@uef!X6 zVRZ$C>q#_z6>$oYZ*8UN5bnHDAmzMCNaQ1H+&D6e1k9YdL3;)~ge=BfsmQFc9hs%W zpu+ueP*bQ}xmuDfd8HEI>NoBzy8xkpNUYayElU0dC}?04%e2y|gGsvHKKXe17R;V? z@<@=m43MY@#2*UJF}`OO6m>W6u-nl8kO9nr?*0RS7Du&QUpQL&D-#X}Ne;C*flbe?-$2lzRzf|-gkpKVy literal 0 HcmV?d00001 diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorageProps.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorageProps.scala index 7c4e996a..88261834 100644 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorageProps.scala +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/storage/LedgerStorageProps.scala @@ -60,8 +60,13 @@ object LedgerStorageProps extends Properties("LedgerStorage") { def getByHash(ledgerHash: Ledger.Hash) = TestKVStore.compile(ledgerStorage.get(ledgerHash)).run(store) - val ledgerMap = store.get(Namespace.Ledgers).getOrElse(Map.empty[Any, Any]) - val (current, old) = ledgers.reverse.splitAt(maxSize) + val ledgerMap = store.get(Namespace.Ledgers).getOrElse(Map.empty[Any, Any]) + + val (current, old) = { + val (lastN, prev) = ledgers.reverse.splitAt(maxSize) + // There can be duplicates, re-insertions. + (lastN, prev.filterNot(lastN.contains)) + } all( "max-history" |: ledgerMap.values.size <= maxSize, diff --git a/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala b/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala index 06f3f0e2..9aedadcc 100644 --- a/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala +++ b/metronome/core/src/io/iohk/metronome/core/fibers/FiberMap.scala @@ -30,9 +30,8 @@ class FiberMap[F[_]: Concurrent: ContextShift, K]( def submit[A](key: K)(task: F[A]): F[F[A]] = { isShutdownRef.get.flatMap { case true => - Sync[F].raiseError( - new IllegalStateException("The pool is already shut down.") - ) + Sync[F].raiseError(new FiberMap.ShutdownException) + case false => actorMapRef.get.map(_.get(key)).flatMap { case Some(actor) => @@ -82,6 +81,9 @@ object FiberMap { extends RuntimeException("The fiber task queue is full.") with NoStackTrace + class ShutdownException + extends IllegalStateException("The pool is already shut down.") + private class Actor[F[_]: Concurrent]( queue: ConcurrentQueue[F, DeferredTask[F, _]], runningRef: Ref[F, Option[DeferredTask[F, _]]], diff --git a/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala b/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala index befc7e19..2598156c 100644 --- a/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala +++ b/metronome/core/src/io/iohk/metronome/core/fibers/FiberSet.scala @@ -16,9 +16,7 @@ class FiberSet[F[_]: Concurrent]( ) { private def raiseIfShutdown: F[Unit] = isShutdownRef.get.ifM( - Concurrent[F].raiseError( - new IllegalStateException("The pool is already shut down.") - ), + Concurrent[F].raiseError(new FiberSet.ShutdownException), ().pure[F] ) @@ -57,6 +55,9 @@ class FiberSet[F[_]: Concurrent]( } object FiberSet { + class ShutdownException + extends IllegalStateException("The pool is already shut down.") + def apply[F[_]: Concurrent]: Resource[F, FiberSet[F]] = Resource.make[F, FiberSet[F]] { for { diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ApplicationService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ApplicationService.scala new file mode 100644 index 00000000..57bfca6f --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ApplicationService.scala @@ -0,0 +1,22 @@ +package io.iohk.metronome.hotstuff.service + +import cats.data.NonEmptyVector +import io.iohk.metronome.hotstuff.consensus.basic.Agreement +import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate + +/** Represents the "application" domain to the HotStuff module, + * performing all delegations that HotStuff can't do on its own. + */ +trait ApplicationService[F[_], A <: Agreement] { + // TODO (PM-3109): Create block. + def createBlock(highQC: QuorumCertificate[A]): F[Option[A#Block]] + + // TODO (PM-3132, PM-3133): Block validation. + // Returns None if validation cannot be carried out due to data availability issues within a given timeout. + def validateBlock(block: A#Block): F[Option[Boolean]] + + // TODO (PM-3135): Tell the application to sync any state of the block, i.e. the Ledger. + // The `sources` are peers who most probably have this state. + // The full `block` is given because it may not be persisted yet. + def syncState(sources: NonEmptyVector[A#PKey], block: A#Block): F[Unit] +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala index 1044ec63..a167b7c6 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala @@ -15,9 +15,10 @@ import io.iohk.metronome.hotstuff.consensus.basic.{ Phase, Message, Block, + Signing, QuorumCertificate } -import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe +import io.iohk.metronome.hotstuff.service.pipes.SyncPipe import io.iohk.metronome.hotstuff.service.storage.{ BlockStorage, ViewStateStorage @@ -34,20 +35,28 @@ import scala.util.control.NonFatal * * It handles the `consensus.basic.Message` events coming from the network. */ -class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( +class ConsensusService[ + F[_]: Timer: Concurrent, + N, + A <: Agreement: Block: Signing +]( publicKey: A#PKey, network: Network[F, A, Message[A]], + appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], stateRef: Ref[F, ProtocolState[A]], stashRef: Ref[F, ConsensusService.MessageStash[A]], - blockSyncPipe: BlockSyncPipe[F, A]#Left, + counterRef: Ref[F, ConsensusService.MessageCounter], + syncPipe: SyncPipe[F, A]#Left, eventQueue: ConcurrentQueue[F, Event[A]], blockExecutionQueue: ConcurrentQueue[F, Effect.ExecuteBlocks[A]], fiberSet: FiberSet[F], maxEarlyViewNumberDiff: Int )(implicit tracers: ConsensusTracers[F, A], storeRunner: KVStoreRunner[F, N]) { + import ConsensusService.MessageCounter + /** Get the current protocol state, perhaps to respond to status requests. */ def getState: F[ProtocolState[A]] = stateRef.get @@ -88,18 +97,18 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( .as(none) case Right(valid) if valid.message.viewNumber < state.viewNumber => - // TODO (PM-3063): Also collect these for the round so we can realise if we're out of sync. - tracers.fromPast(valid).as(none) + tracers.fromPast(valid) >> + counterRef.update(_.incPast).as(none) case Right(valid) if valid.message.viewNumber > state.viewNumber + maxEarlyViewNumberDiff => - // TODO (PM-3063): Also collect these for the round so we can realise if we're out of sync. - tracers.fromFuture(valid).as(none) + tracers.fromFuture(valid) >> + counterRef.update(_.incFuture).as(none) case Right(valid) => // We know that the message is to/from the leader and it's properly signed, // althought it may not match our current state, which we'll see later. - validated(valid).some.pure[F] + counterRef.update(_.incPresent).as(validated(valid).some) } } @@ -161,26 +170,44 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( sender: A#PKey, prepare: Message.Prepare[A] ): F[Unit] = - blockSyncPipe.send( - BlockSyncPipe.Request(sender, prepare) - ) + syncPipe.send(SyncPipe.PrepareRequest(sender, prepare)) /** Process the synchronization result queue. */ - private def processBlockSyncPipe: F[Unit] = - blockSyncPipe.receive - .mapEval[Unit] { case BlockSyncPipe.Response(request, isValid) => - if (isValid) { - enqueueEvent( - validated(Event.MessageReceived(request.sender, request.prepare)) - ) - } else { - protocolError( - ProtocolError.UnsafeExtension(request.sender, request.prepare) - ) - } + private def processSyncPipe: F[Unit] = + syncPipe.receive + .mapEval[Unit] { + case SyncPipe.PrepareResponse(request, isValid) => + if (isValid) { + enqueueEvent( + validated(Event.MessageReceived(request.sender, request.prepare)) + ) + } else { + protocolError( + ProtocolError.UnsafeExtension(request.sender, request.prepare) + ) + } + + case SyncPipe.StatusResponse(status) => + fastForwardState(status) } .completedL + /** Replace the current protocol state based on what was synced with the federation. */ + private def fastForwardState(status: Status[A]): F[Unit] = { + stateRef.get.flatMap { state => + val forward = state.copy[A]( + viewNumber = status.viewNumber, + prepareQC = status.prepareQC, + commitQC = status.commitQC + ) + // Trigger the next view, so we get proper tracing and effect execution. + tracers.adoptView(status) >> + handleTransition( + forward.handleNextView(Event.NextView(status.viewNumber)) + ) + } + } + /** Add a validated event to the queue for processing against the protocol state. */ private def enqueueEvent(event: Validated[Event[A]]): F[Unit] = eventQueue.offer(event) @@ -200,9 +227,12 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( ().pure[F] case e @ Event.NextView(viewNumber) => - // TODO (PM-3063): Check whether we have timed out because we are out of sync - tracers.timeout(viewNumber) >> - handleTransition(state.handleNextView(e)) + for { + counter <- counterRef.get + _ <- tracers.timeout(viewNumber -> counter) + _ <- maybeRequestStatusSync(viewNumber, counter) + _ <- handleTransition(state.handleNextView(e)) + } yield () case e @ Event.MessageReceived(_, _) => handleTransitionAttempt( @@ -218,6 +248,26 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( } } + /** Request view state synchronisation if we timed out and it looks like we're out of sync. */ + private def maybeRequestStatusSync( + viewNumber: ViewNumber, + counter: MessageCounter + ): F[Unit] = { + // Only requesting a state sync if we haven't received any message that looks to be in sync + // but we have received some from the future. If we have received messages from the past, + // then by the virtue of timeouts they should catch up with us at some point. + val isOutOfSync = counter.present == 0 && counter.future > 0 + + // In the case that there were two groups being in sync within group members, but not with + // each other, than there should be rounds when none of them are leaders and they shouldn't + // receive valid present messages. + val requestSync = + tracers.viewSync(viewNumber) >> + syncPipe.send(SyncPipe.StatusRequest(viewNumber)) + + requestSync.whenA(isOutOfSync) + } + /** Handle successful state transition: * - apply local effects on the state * - schedule other effects to execute in the background @@ -248,7 +298,8 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( f(next).whenA(prev != next) } - ifChanged(_.viewNumber)(updateViewNumber) >> + ifChanged(_.viewNumber)(_ => counterRef.set(MessageCounter.empty)) >> + ifChanged(_.viewNumber)(updateViewNumber) >> ifChanged(_.prepareQC)(updateQuorum) >> ifChanged(_.lockedQC)(updateQuorum) >> ifChanged(_.commitQC)(updateQuorum) @@ -371,8 +422,15 @@ class ConsensusService[F[_]: Timer: Concurrent, N, A <: Agreement: Block]( case CreateBlock(viewNumber, highQC) => // Ask the application to create a block for us. - // TODO (PM-3109): Create block. - ??? + appService.createBlock(highQC).flatMap { + case None => + ().pure[F] + + case Some(block) => + enqueueEvent( + validated(Event.BlockCreated(viewNumber, block, highQC)) + ) + } case SaveBlock(preparedBlock) => storeRunner.runReadWrite { @@ -461,6 +519,22 @@ object ConsensusService { def empty[A <: Agreement] = MessageStash[A](Map.empty) } + /** Count the number of messages received from others in a round, + * to determine whether we're out of sync or not in case of a timeout. + */ + case class MessageCounter( + past: Int, + present: Int, + future: Int + ) { + def incPast = copy(past = past + 1) + def incPresent = copy(present = present + 1) + def incFuture = copy(future = future + 1) + } + object MessageCounter { + val empty = MessageCounter(0, 0, 0) + } + /** Create a `ConsensusService` instance and start processing events * in the background, shutting processing down when the resource is * released. @@ -468,12 +542,17 @@ object ConsensusService { * `initState` is expected to be restored from persistent storage * instances upon restart. */ - def apply[F[_]: Timer: Concurrent: ContextShift, N, A <: Agreement: Block]( + def apply[ + F[_]: Timer: Concurrent: ContextShift, + N, + A <: Agreement: Block: Signing + ]( publicKey: A#PKey, network: Network[F, A, Message[A]], + appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], - blockSyncPipe: BlockSyncPipe[F, A]#Left, + syncPipe: SyncPipe[F, A]#Left, initState: ProtocolState[A], maxEarlyViewNumberDiff: Int = 1 )(implicit @@ -486,30 +565,34 @@ object ConsensusService { build[F, N, A]( publicKey, network, + appService, blockStorage, viewStateStorage, - blockSyncPipe, + syncPipe, initState, maxEarlyViewNumberDiff, fiberSet ) ) _ <- Concurrent[F].background(service.processNetworkMessages) - _ <- Concurrent[F].background(service.processBlockSyncPipe) + _ <- Concurrent[F].background(service.processSyncPipe) _ <- Concurrent[F].background(service.processEvents) _ <- Concurrent[F].background(service.executeBlocks) initEffects = ProtocolState.init(initState) _ <- Resource.liftF(service.scheduleEffects(initEffects)) } yield service - private def build[F[ - _ - ]: Timer: Concurrent: ContextShift, N, A <: Agreement: Block]( + private def build[ + F[_]: Timer: Concurrent: ContextShift, + N, + A <: Agreement: Block: Signing + ]( publicKey: A#PKey, network: Network[F, A, Message[A]], + appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], - blockSyncPipe: BlockSyncPipe[F, A]#Left, + syncPipe: SyncPipe[F, A]#Left, initState: ProtocolState[A], maxEarlyViewNumberDiff: Int, fiberSet: FiberSet[F] @@ -521,6 +604,7 @@ object ConsensusService { stateRef <- Ref[F].of(initState) stashRef <- Ref[F].of(MessageStash.empty[A]) fibersRef <- Ref[F].of(Set.empty[Fiber[F, Unit]]) + counterRef <- Ref[F].of(MessageCounter.empty) eventQueue <- ConcurrentQueue[F].unbounded[Event[A]](None) blockExecutionQueue <- ConcurrentQueue[F] .unbounded[Effect.ExecuteBlocks[A]](None) @@ -528,11 +612,13 @@ object ConsensusService { service = new ConsensusService( publicKey, network, + appService, blockStorage, viewStateStorage, stateRef, stashRef, - blockSyncPipe, + counterRef, + syncPipe, eventQueue, blockExecutionQueue, fiberSet, diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala index 42639bdd..66be53a1 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala @@ -1,17 +1,19 @@ package io.iohk.metronome.hotstuff.service +import cats.Parallel import cats.effect.{Concurrent, ContextShift, Resource, Timer} import io.iohk.metronome.hotstuff.consensus.basic.{ Agreement, ProtocolState, Message, - Block + Block, + Signing } import io.iohk.metronome.hotstuff.service.messages.{ HotStuffMessage, SyncMessage } -import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe +import io.iohk.metronome.hotstuff.service.pipes.SyncPipe import io.iohk.metronome.hotstuff.service.storage.{ BlockStorage, ViewStateStorage @@ -25,8 +27,13 @@ import io.iohk.metronome.storage.KVStoreRunner object HotStuffService { /** Start up the HotStuff service stack. */ - def apply[F[_]: Concurrent: ContextShift: Timer, N, A <: Agreement: Block]( + def apply[ + F[_]: Concurrent: ContextShift: Timer: Parallel, + N, + A <: Agreement: Block: Signing + ]( network: Network[F, A, HotStuffMessage[A]], + appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], initState: ProtocolState[A] @@ -50,14 +57,15 @@ object HotStuffService { } ) - blockSyncPipe <- Resource.liftF { BlockSyncPipe[F, A] } + syncPipe <- Resource.liftF { SyncPipe[F, A] } consensusService <- ConsensusService( initState.publicKey, consensusNetwork, + appService, blockStorage, viewStateStorage, - blockSyncPipe.left, + syncPipe.left, initState ) @@ -65,8 +73,10 @@ object HotStuffService { initState.publicKey, initState.federation, syncNetwork, + appService, blockStorage, - blockSyncPipe.right, + viewStateStorage, + syncPipe.right, consensusService.getState ) } yield () diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala index b621a53f..d7780629 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -1,6 +1,7 @@ package io.iohk.metronome.hotstuff.service import cats.implicits._ +import cats.Parallel import cats.effect.{Sync, Resource, Concurrent, ContextShift, Timer} import io.iohk.metronome.core.fibers.FiberMap import io.iohk.metronome.core.messages.{ @@ -8,19 +9,26 @@ import io.iohk.metronome.core.messages.{ RPCPair, RPCTracker } -import io.iohk.metronome.hotstuff.consensus.Federation +import io.iohk.metronome.hotstuff.consensus.{Federation, ViewNumber} import io.iohk.metronome.hotstuff.consensus.basic.{ Agreement, ProtocolState, - Block + Block, + Signing } import io.iohk.metronome.hotstuff.service.messages.SyncMessage -import io.iohk.metronome.hotstuff.service.pipes.BlockSyncPipe -import io.iohk.metronome.hotstuff.service.storage.BlockStorage -import io.iohk.metronome.hotstuff.service.sync.BlockSynchronizer +import io.iohk.metronome.hotstuff.service.pipes.SyncPipe +import io.iohk.metronome.hotstuff.service.storage.{ + BlockStorage, + ViewStateStorage +} +import io.iohk.metronome.hotstuff.service.sync.{ + BlockSynchronizer, + ViewSynchronizer +} import io.iohk.metronome.hotstuff.service.tracing.SyncTracers import io.iohk.metronome.networking.ConnectionHandler -import io.iohk.metronome.storage.KVStoreRunner +import io.iohk.metronome.storage.{KVStoreRunner, KVStore} import scala.util.control.NonFatal import scala.concurrent.duration._ import scala.reflect.ClassTag @@ -35,18 +43,21 @@ import scala.reflect.ClassTag * The block and view synchronisation components will use this service * to send requests to the network. */ -class SyncService[F[_]: Sync, N, A <: Agreement]( +class SyncService[F[_]: Concurrent: ContextShift, N, A <: Agreement: Block]( publicKey: A#PKey, network: Network[F, A, SyncMessage[A]], + appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], - blockSyncPipe: BlockSyncPipe[F, A]#Right, + viewStateStorage: ViewStateStorage[N, A], + syncPipe: SyncPipe[F, A]#Right, getState: F[ProtocolState[A]], incomingFiberMap: FiberMap[F, A#PKey], - syncFiberMap: FiberMap[F, A#PKey], rpcTracker: RPCTracker[F, SyncMessage[A]] )(implicit tracers: SyncTracers[F, A], storeRunner: KVStoreRunner[F, N]) { import SyncMessage._ + type BlockSync = SyncService.BlockSynchronizerWithFiberMap[F, N, A] + private def protocolStatus: F[Status[A]] = getState.map { state => Status(state.viewNumber, state.prepareQC, state.commitQC) @@ -158,49 +169,177 @@ class SyncService[F[_]: Sync, N, A <: Agreement]( } } - /** Read Requests from the BlockSyncPipe and send Responses. + /** Read Requests from the SyncPipe and send Responses. * * These are coming from the `ConsensusService` asking for a - * `Prepare` message to be synchronised with the sender. + * `Prepare` message to be synchronized with the sender, or + * for the view to be synchronized with the whole federation. */ - private def processBlockSyncPipe( - blockSynchronizer: BlockSynchronizer[F, N, A] - ): F[Unit] = { - blockSyncPipe.receive - .mapEval[Unit] { - // TODO (PM-3063): Change `BlockSyncPipe` to just `SyncPipe` and add - // ViewState sync requests which poll the fedreation for the latest - // Commit Q.C. and jump to it. When that signal comes, cancel the - // `syncFiberMap`, discard the `blockSynchronizer` and move over to - // state syncing, then create a new new block synchronizer and resume. - // For this, change the input of this method to a `F[BlockSynchronizer[F,N,A]]` - // and call some mutually recursive method representing different states: - - case request @ BlockSyncPipe.Request(sender, prepare) => - // It is enough to respond to the last block positively, it will indicate - // that the whole range can be executed later (at that point from storage). - // If the same leader is sending us newer proposals, we can ignore the - // previous pepared blocks - they are either part of the new Q.C., - // in which case they don't need to be validated, or they have not - // gathered enough votes, and been superseded by a new proposal. - syncFiberMap.cancelQueue(sender) >> - syncFiberMap - .submit(sender) { - for { - _ <- blockSynchronizer.sync(sender, prepare.highQC) - isValid <- validateBlock(prepare.block) - _ <- blockSyncPipe.send( - BlockSyncPipe.Response(request, isValid) - ) - } yield () - } - .void + private def processSyncPipe( + makeBlockSync: F[BlockSync], + viewSynchronizer: ViewSynchronizer[F, A] + ): F[Unit] = + syncPipe.receive.consume.use { consumer => + def loop( + blockSync: BlockSync, + lastSyncedViewNumber: ViewNumber + ): F[Unit] = { + consumer.pull.flatMap { + case Right(SyncPipe.PrepareRequest(_, prepare)) + if prepare.viewNumber < lastSyncedViewNumber => + // We have already synced to a Commit Q.C. higher than this old PrepareRequest. + loop(blockSync, lastSyncedViewNumber) + + case Right(SyncPipe.StatusRequest(viewNumber)) + if viewNumber < lastSyncedViewNumber => + // We have already synced higher than this old StatusRequest. + loop(blockSync, lastSyncedViewNumber) + + case Right(request @ SyncPipe.PrepareRequest(sender, prepare)) => + handlePrepareRequest(blockSync, request) >> + loop(blockSync, lastSyncedViewNumber) + + case Right(request @ SyncPipe.StatusRequest(_)) => + handleStatusRequest( + makeBlockSync, + blockSync, + viewSynchronizer, + request + ).flatMap { + (loop _).tupled + } + + case Left(maybeError) => + blockSync.fiberMapRelease >> + maybeError.fold(().pure[F])(Sync[F].raiseError(_)) + } } - .completedL + + makeBlockSync.flatMap { blockSync => + loop(blockSync, ViewNumber(0)) + } + } + + /** Sync with the sender up to the High Q.C. it sent, then validate the prepared block. + * + * This is done in the background, while further requests are taken from the pipe. + */ + private def handlePrepareRequest( + blockSync: BlockSync, + request: SyncPipe.PrepareRequest[A] + ): F[Unit] = { + val sender = request.sender + val prepare = request.prepare + // It is enough to respond to the last block positively, it will indicate + // that the whole range can be executed later (at that point from storage). + // If the same leader is sending us newer proposals, we can ignore the + // previous pepared blocks - they are either part of the new Q.C., + // in which case they don't need to be validated, or they have not + // gathered enough votes, and been superseded by a new proposal. + blockSync.fiberMap.cancelQueue(sender) >> + blockSync.fiberMap + .submit(sender) { + blockSync.synchronizer.sync(sender, prepare.highQC) >> + appService.validateBlock(prepare.block) >>= { + case Some(isValid) => + syncPipe.send(SyncPipe.PrepareResponse(request, isValid)) + case None => + // We didn't have data to decide validity in time; not responding. + ().pure[F] + } + } + .void } - // TODO (PM-3132, PM-3133): Block validation. - private def validateBlock(block: A#Block): F[Boolean] = ??? + /** Shut down the any outstanding block downloads, sync the view, + * then create another block synchronizer instance to resume with. + */ + private def handleStatusRequest( + makeBlockSync: F[BlockSync], + blockSync: BlockSync, + viewSynchronizer: ViewSynchronizer[F, A], + request: SyncPipe.StatusRequest + ): F[(BlockSync, ViewNumber)] = + for { + // Cancel all outstanding block syncing. + _ <- blockSync.fiberMapRelease + // The block synchronizer is still usable. + viewNumber <- syncStatus( + blockSync.synchronizer, + viewSynchronizer + ).handleErrorWith { case NonFatal(ex) => + tracers.error(ex).as(request.viewNumber) + } + // Create a fresh fiber and block synchronizer instance. + // When the previous goes out of scope, its ephemeral storage is freed. + newBlockSync <- makeBlockSync + } yield (newBlockSync, viewNumber) + + /** Get the latest status of federation members, download the corresponding block + * and prune all existing block history, making the latest Commit Q.C. the new + * root in the block tree. + * + * This is done in the foreground, no further requests are taken from the pipe. + */ + private def syncStatus( + blockSynchronizer: BlockSynchronizer[F, N, A], + viewSynchronizer: ViewSynchronizer[F, A] + ): F[ViewNumber] = + for { + // Sync to the latest Commit Q.C. + federationStatus <- viewSynchronizer.sync + status = federationStatus.status + + // Download the block in the Commit Q.C. + block <- blockSynchronizer + .getBlockFromQuorumCertificate( + federationStatus.sources, + status.commitQC + ) + .rethrow + + // Sync any application specific state, e.g. a ledger. + // Do this before we prune the existing blocks and set the new root. + _ <- appService.syncState(federationStatus.sources, block) + + // Prune the block store from earlier blocks that are no longer traversable. + _ <- fastForwardStorage(status, block) + + // Tell the ConsensusService about the new Status. + _ <- syncPipe.send(SyncPipe.StatusResponse(status)) + } yield status.viewNumber + + /** Replace the state we have persisted with what we synced with the federation. + * + * Prunes old blocks, the Commit Q.C. will be the new root. + */ + private def fastForwardStorage(status: Status[A], block: A#Block): F[Unit] = { + val blockHash = Block[A].blockHash(block) + assert(blockHash == status.commitQC.blockHash) + + val query: KVStore[N, Unit] = + for { + viewState <- viewStateStorage.getBundle.lift + // Insert the new block. + _ <- blockStorage.put(block) + + // Prune old data, but keep the new block. + ds <- blockStorage + .getDescendants( + viewState.rootBlockHash, + skip = Set(blockHash) + ) + .lift + _ <- ds.traverse(blockStorage.deleteUnsafe(_)) + + // Considering the committed block as executed, we have its state already. + _ <- viewStateStorage.setLastExecutedBlockHash(blockHash) + _ <- viewStateStorage.setRootBlockHash(blockHash) + // The rest of the fields will be set by the ConsensusService. + } yield () + + storeRunner.runReadWrite(query) + } } object SyncService { @@ -209,12 +348,18 @@ object SyncService { * in the background, shutting processing down when the resource is * released. */ - def apply[F[_]: Concurrent: ContextShift: Timer, N, A <: Agreement: Block]( + def apply[ + F[_]: Concurrent: ContextShift: Timer: Parallel, + N, + A <: Agreement: Block: Signing + ]( publicKey: A#PKey, federation: Federation[A#PKey], network: Network[F, A, SyncMessage[A]], + appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], - blockSyncPipe: BlockSyncPipe[F, A]#Right, + viewStateStorage: ViewStateStorage[N, A], + syncPipe: SyncPipe[F, A]#Right, getState: F[ProtocolState[A]], timeout: FiniteDuration = 10.seconds )(implicit @@ -224,29 +369,62 @@ object SyncService { // TODO (PM-3186): Add capacity as part of rate limiting. for { incomingFiberMap <- FiberMap[F, A#PKey]() - syncFiberMap <- FiberMap[F, A#PKey]() rpcTracker <- Resource.liftF { RPCTracker[F, SyncMessage[A]](timeout) } service = new SyncService( publicKey, network, + appService, blockStorage, - blockSyncPipe, + viewStateStorage, + syncPipe, getState, incomingFiberMap, - syncFiberMap, rpcTracker ) - blockSync <- Resource.liftF { - BlockSynchronizer[F, N, A]( + + blockSync = for { + (syncFiberMap, syncFiberMapRelease) <- FiberMap[F, A#PKey]().allocated + blockSynchronizer <- BlockSynchronizer[F, N, A]( publicKey, federation, blockStorage, service.getBlock ) + } yield BlockSynchronizerWithFiberMap( + blockSynchronizer, + syncFiberMap, + syncFiberMapRelease + ) + + viewSynchronizer = new ViewSynchronizer[F, A]( + federation, + service.getStatus + ) + + _ <- Concurrent[F].background { + service.processNetworkMessages + } + _ <- Concurrent[F].background { + service.processSyncPipe(blockSync, viewSynchronizer) } - _ <- Concurrent[F].background(service.processNetworkMessages) - _ <- Concurrent[F].background(service.processBlockSyncPipe(blockSync)) } yield service + + /** The `SyncService` can be in two modes: either we're in sync with the federation + * and downloading the odd missing block every now and then, or we are out of sync, + * in which case we need to ask everyone to find out what the current view number + * is, and then jump straight to the latest Commit Quorum Certificate. + * + * Our implementation assumes that this is always supported by the application. + * + * When we go from block sync to view sync, the block syncs happening in the + * background on the fiber ap in this class are canceled, and the synchronizer + * instance with its ephemeral storage is discarded. + */ + case class BlockSynchronizerWithFiberMap[F[_], N, A <: Agreement]( + synchronizer: BlockSynchronizer[F, N, A], + fiberMap: FiberMap[F, A#PKey], + fiberMapRelease: F[Unit] + ) } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/DuplexMessage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/DuplexMessage.scala index f80ea1de..92015dc4 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/DuplexMessage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/messages/DuplexMessage.scala @@ -6,7 +6,7 @@ import io.iohk.metronome.hotstuff.consensus.basic.Agreement /** Messages type to use in the networking layer if the use case has * application specific message types, e.g. ledger synchronisation, * not just the general BFT agreement (which could be enough if - * we need to execute all blocks to synchronise state). + * we need to execute all blocks to synchronize state). */ sealed trait DuplexMessage[A <: Agreement, M] diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/BlockSyncPipe.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/BlockSyncPipe.scala deleted file mode 100644 index 83e57ad0..00000000 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/BlockSyncPipe.scala +++ /dev/null @@ -1,39 +0,0 @@ -package io.iohk.metronome.hotstuff.service.pipes - -import cats.effect.{Concurrent, ContextShift} -import io.iohk.metronome.core.Pipe -import io.iohk.metronome.hotstuff.consensus.basic.Agreement -import io.iohk.metronome.hotstuff.consensus.basic.Message - -object BlockSyncPipe { - - /** Request the synchronization component to download - * any missing dependencies up to the High Q.C., - * perform any application specific validation, - * including the block in the `Prepare` message, - * and persist the blocks up to, but not including - * the block in the `Prepare` message. - * - * This is because the block being prepared is - * subject to further validation and voting, - * while the one in the High Q.C. has gathered - * a quorum from the federation. - */ - case class Request[A <: Agreement]( - sender: A#PKey, - prepare: Message.Prepare[A] - ) - - /** Respond with the outcome of whether the - * block we're being asked to prepare is - * valid, according to the application rules. - */ - case class Response[A <: Agreement]( - request: Request[A], - isValid: Boolean - ) - - def apply[F[_]: Concurrent: ContextShift, A <: Agreement] - : F[BlockSyncPipe[F, A]] = - Pipe[F, BlockSyncPipe.Request[A], BlockSyncPipe.Response[A]] -} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/SyncPipe.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/SyncPipe.scala new file mode 100644 index 00000000..61a9f03a --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/SyncPipe.scala @@ -0,0 +1,58 @@ +package io.iohk.metronome.hotstuff.service.pipes + +import cats.effect.{Concurrent, ContextShift} +import io.iohk.metronome.core.Pipe +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, Message} +import io.iohk.metronome.hotstuff.service.Status + +object SyncPipe { + + sealed trait Request[+A <: Agreement] + sealed trait Response[+A <: Agreement] + + /** Request the synchronization component to download + * any missing dependencies up to the High Q.C., + * perform any application specific validation, + * including the block in the `Prepare` message, + * and persist the blocks up to, but not including + * the block in the `Prepare` message. + * + * This is because the block being prepared is + * subject to further validation and voting, + * while the one in the High Q.C. has gathered + * a quorum from the federation. + */ + case class PrepareRequest[A <: Agreement]( + sender: A#PKey, + prepare: Message.Prepare[A] + ) extends Request[A] + + /** Respond with the outcome of whether the + * block we're being asked to prepare is + * valid, according to the application rules. + */ + case class PrepareResponse[A <: Agreement]( + request: PrepareRequest[A], + isValid: Boolean + ) extends Response[A] + + /** Request that the view state is synchronized with the whole federation, + * including downloading the block and state corresponding to the latest + * Commit Q.C. + * + * The eventual response should contain the new view status to be applied + * on the protocol state. + */ + case class StatusRequest(viewNumber: ViewNumber) extends Request[Nothing] + + /** Response with the new status to resume the protocol from, after the + * state has been synchronized up to the included Commit Q.C. + */ + case class StatusResponse[A <: Agreement]( + status: Status[A] + ) extends Response[A] + + def apply[F[_]: Concurrent: ContextShift, A <: Agreement]: F[SyncPipe[F, A]] = + Pipe[F, SyncPipe.Request[A], SyncPipe.Response[A]] +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/package.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/package.scala index 12f51399..ea4defe7 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/package.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/pipes/package.scala @@ -6,6 +6,6 @@ import io.iohk.metronome.hotstuff.consensus.basic.Agreement package object pipes { /** Communication pipe with the block synchronization and validation component. */ - type BlockSyncPipe[F[_], A <: Agreement] = - Pipe[F, BlockSyncPipe.Request[A], BlockSyncPipe.Response[A]] + type SyncPipe[F[_], A <: Agreement] = + Pipe[F, SyncPipe.Request[A], SyncPipe.Response[A]] } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala index b0d887fc..3976c28e 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala @@ -44,13 +44,17 @@ class ViewStateStorage[N, A <: Agreement] private ( def setLastExecutedBlockHash(blockHash: A#Hash): KVStore[N, Unit] = put(Key.LastExecutedBlockHash, blockHash) + def setRootBlockHash(blockHash: A#Hash): KVStore[N, Unit] = + put(Key.RootBlockHash, blockHash) + def getBundle: KVStoreRead[N, ViewStateStorage.Bundle[A]] = ( read(Key.ViewNumber), read(Key.PrepareQC), read(Key.LockedQC), read(Key.CommitQC), - read(Key.LastExecutedBlockHash) + read(Key.LastExecutedBlockHash), + read(Key.RootBlockHash) ).mapN(ViewStateStorage.Bundle.apply[A] _) } @@ -68,6 +72,7 @@ object ViewStateStorage { case object LockedQC extends Key[QuorumCertificate[A]](2) case object CommitQC extends Key[QuorumCertificate[A]](3) case object LastExecutedBlockHash extends Key[A#Hash](4) + case object RootBlockHash extends Key[A#Hash](5) implicit def encoder[V]: Encoder[Key[V]] = scodec.codecs.uint8.contramap[Key[V]](_.code) @@ -85,7 +90,8 @@ object ViewStateStorage { prepareQC: QuorumCertificate[A], lockedQC: QuorumCertificate[A], commitQC: QuorumCertificate[A], - lastExecutedBlockHash: A#Hash + lastExecutedBlockHash: A#Hash, + rootBlockHash: A#Hash ) { assert(prepareQC.phase == Phase.Prepare) assert(lockedQC.phase == Phase.PreCommit) @@ -103,7 +109,8 @@ object ViewStateStorage { prepareQC = genesisQC.copy[A](phase = Phase.Prepare), lockedQC = genesisQC.copy[A](phase = Phase.PreCommit), commitQC = genesisQC.copy[A](phase = Phase.Commit), - lastExecutedBlockHash = genesisQC.blockHash + lastExecutedBlockHash = genesisQC.blockHash, + rootBlockHash = genesisQC.blockHash ) } @@ -142,6 +149,9 @@ object ViewStateStorage { _ <- KVStore[N].alter(namespace, Key.LastExecutedBlockHash)( setDefault(genesis.lastExecutedBlockHash) ) + _ <- KVStore[N].alter(namespace, Key.RootBlockHash)( + setDefault(genesis.rootBlockHash) + ) } yield new ViewStateStorage[N, A](namespace) } } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala index 43467dad..698458b7 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala @@ -1,6 +1,7 @@ package io.iohk.metronome.hotstuff.service.sync import cats.implicits._ +import cats.data.NonEmptyVector import cats.effect.{Sync, Timer, Concurrent, ContextShift} import cats.effect.concurrent.Semaphore import io.iohk.metronome.hotstuff.consensus.Federation @@ -13,6 +14,7 @@ import io.iohk.metronome.hotstuff.service.storage.BlockStorage import io.iohk.metronome.storage.{InMemoryKVStore, KVStoreRunner} import scala.concurrent.duration._ import scala.util.Random +import scala.util.control.NoStackTrace /** The job of the `BlockSynchronizer` is to procure missing blocks when a `Prepare` * message builds on a High Q.C. that we don't have. @@ -39,6 +41,7 @@ class BlockSynchronizer[F[_]: Sync: Timer, N, A <: Agreement: Block]( semaphore: Semaphore[F], retryTimeout: FiniteDuration = 5.seconds )(implicit storeRunner: KVStoreRunner[F, N]) { + import BlockSynchronizer.DownloadFailedException private val otherPublicKeys = federation.publicKeys.filterNot(_ == publicKey) @@ -67,6 +70,55 @@ class BlockSynchronizer[F[_]: Sync: Timer, N, A <: Agreement: Block]( _ <- persist(quorumCertificate.blockHash, path) } yield () + /** Download the block in the Quorum Certificate without ancestors. + * + * Return it without being persisted. + * + * Unlike `sync`, which is expected to be canceled if consensus times out, + * or be satisfied by alternative downloads happening concurrently, this + * method returns and error if it cannot download the block after a certain + * number of attempts, from any of the sources. This is becuause its primary + * use is during state syncing where this is the only operation, and if for + * any reason the block would be gone from everyone honest members' storage, + * we have to try something else. + */ + def getBlockFromQuorumCertificate( + sources: NonEmptyVector[A#PKey], + quorumCertificate: QuorumCertificate[A] + ): F[Either[DownloadFailedException[A], A#Block]] = { + val otherSources = sources.filterNot(_ == publicKey).toList + + def loop( + alternatives: List[A#PKey] + ): F[Either[DownloadFailedException[A], A#Block]] = { + alternatives match { + case Nil => + new DownloadFailedException( + quorumCertificate.blockHash, + sources.toVector + ).asLeft[A#Block].pure[F] + + case source :: alternatives => + getAndValidateBlock(source, quorumCertificate.blockHash, otherSources) + .flatMap { + case None => + loop(alternatives) + case Some(block) => + block.asRight[DownloadFailedException[A]].pure[F] + } + } + } + + storeRunner + .runReadOnly { + blockStorage.get(quorumCertificate.blockHash) + } + .flatMap { + case None => loop(Random.shuffle(otherSources)) + case Some(block) => block.asRight[DownloadFailedException[A]].pure[F] + } + } + /** Download a block and all of its ancestors into the in-memory block store. * * Returns the path from the greatest ancestor that had to be downloaded @@ -130,7 +182,8 @@ class BlockSynchronizer[F[_]: Sync: Timer, N, A <: Agreement: Block]( */ private def getAndValidateBlock( from: A#PKey, - blockHash: A#Hash + blockHash: A#Hash, + alternativeSources: Seq[A#PKey] = otherPublicKeys ): F[Option[A#Block]] = { def fetch(from: A#PKey) = getBlock(from, blockHash) @@ -144,16 +197,16 @@ class BlockSynchronizer[F[_]: Sync: Timer, N, A <: Agreement: Block]( def loop(sources: List[A#PKey]): F[Option[A#Block]] = sources match { case Nil => none.pure[F] - case from :: alternatives => + case from :: sources => fetch(from).flatMap { - case None => loop(alternatives) + case None => loop(sources) case block => block.pure[F] } } loop(List(from)).flatMap { case None => - loop(Random.shuffle(otherPublicKeys.filterNot(_ == from).toList)) + loop(Random.shuffle(alternativeSources.filterNot(_ == from).toList)) case block => block.pure[F] } @@ -225,6 +278,14 @@ class BlockSynchronizer[F[_]: Sync: Timer, N, A <: Agreement: Block]( object BlockSynchronizer { + class DownloadFailedException[A <: Agreement]( + blockHash: A#Hash, + sources: Seq[A#PKey] + ) extends RuntimeException( + s"Failed to download block ${blockHash} from ${sources.size} sources." + ) + with NoStackTrace + /** Send a network request to get a block. */ type GetBlock[F[_], A <: Agreement] = (A#PKey, A#Hash) => F[Option[A#Block]] diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizer.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizer.scala index 11f90c26..0e6b8452 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizer.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizer.scala @@ -3,7 +3,7 @@ package io.iohk.metronome.hotstuff.service.sync import cats._ import cats.implicits._ import cats.effect.{Timer, Sync} -import cats.data.NonEmptySeq +import cats.data.{NonEmptySeq, NonEmptyVector} import io.iohk.metronome.core.Validated import io.iohk.metronome.hotstuff.consensus.{Federation, ViewNumber} import io.iohk.metronome.hotstuff.consensus.basic.{ @@ -27,7 +27,7 @@ class ViewSynchronizer[F[_]: Sync: Timer: Parallel, A <: Agreement: Signing]( getStatus: ViewSynchronizer.GetStatus[F, A], retryTimeout: FiniteDuration = 5.seconds )(implicit tracers: SyncTracers[F, A]) { - import ViewSynchronizer.aggregateStatus + import ViewSynchronizer.{aggregateStatus, FederationStatus} /** Poll the federation members for the current status until we have gathered * enough to make a decision, i.e. we have a quorum. @@ -37,18 +37,28 @@ class ViewSynchronizer[F[_]: Sync: Timer: Parallel, A <: Agreement: Signing]( * * Try again until in one round we can gather all statuses from everyone. */ - def sync: F[Status[A]] = { + def sync: F[ViewSynchronizer.FederationStatus[A]] = { federation.publicKeys.toVector .parTraverse(getAndValidateStatus) .flatMap { maybeStatuses => + val statusMap = (federation.publicKeys zip maybeStatuses).collect { + case (k, Some(s)) => k -> s + }.toMap + tracers - .statusPoll(federation.publicKeys -> maybeStatuses) - .as(maybeStatuses.flatten) + .statusPoll(statusMap) + .as(statusMap) } - .map(NonEmptySeq.fromSeq) .flatMap { - case Some(statuses) if statuses.size >= federation.quorumSize => - aggregateStatus(statuses).pure[F] + case statusMap if statusMap.size >= federation.quorumSize => + val statuses = statusMap.values.toList + val status = aggregateStatus(NonEmptySeq.fromSeqUnsafe(statuses)) + + // Returning everyone who responded so we always have a quorum sized set to talk to. + val sources = + NonEmptyVector.fromVectorUnsafe(statusMap.keySet.toVector) + + FederationStatus(status, sources).pure[F] case _ => // We traced all responses, so we can detect if we're in an endless loop. @@ -173,4 +183,9 @@ object ViewSynchronizer { def median[T: Order](xs: NonEmptySeq[T]): T = xs.sorted.getUnsafe(xs.size.toInt / 2) + /** The final status coupled with the federation members that can serve the data. */ + case class FederationStatus[A <: Agreement]( + status: Status[A], + sources: NonEmptyVector[A#PKey] + ) } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala index ceccc78c..24b29cb7 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala @@ -7,13 +7,28 @@ import io.iohk.metronome.hotstuff.consensus.basic.{ ProtocolError } import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate +import io.iohk.metronome.hotstuff.service.ConsensusService.MessageCounter +import io.iohk.metronome.hotstuff.service.Status sealed trait ConsensusEvent[+A <: Agreement] object ConsensusEvent { /** The round ended without having reached decision. */ - case class Timeout(viewNumber: ViewNumber) extends ConsensusEvent[Nothing] + case class Timeout( + viewNumber: ViewNumber, + messageCounter: MessageCounter + ) extends ConsensusEvent[Nothing] + + /** A full view synchronization was requested after timing out without any in-sync messages. */ + case class ViewSync( + viewNumber: ViewNumber + ) extends ConsensusEvent[Nothing] + + /** Adopting the view of the federation after a sync. */ + case class AdoptView[A <: Agreement]( + status: Status[A] + ) extends ConsensusEvent[A] /** The state advanced to a new view. */ case class NewView(viewNumber: ViewNumber) extends ConsensusEvent[Nothing] diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala index dee0e679..7aa6c061 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala @@ -9,9 +9,13 @@ import io.iohk.metronome.hotstuff.consensus.basic.{ ProtocolError, QuorumCertificate } +import io.iohk.metronome.hotstuff.service.ConsensusService.MessageCounter +import io.iohk.metronome.hotstuff.service.Status case class ConsensusTracers[F[_], A <: Agreement]( - timeout: Tracer[F, ViewNumber], + timeout: Tracer[F, (ViewNumber, MessageCounter)], + viewSync: Tracer[F, ViewNumber], + adoptView: Tracer[F, Status[A]], newView: Tracer[F, ViewNumber], quorum: Tracer[F, QuorumCertificate[A]], fromPast: Tracer[F, Event.MessageReceived[A]], @@ -28,7 +32,11 @@ object ConsensusTracers { tracer: Tracer[F, ConsensusEvent[A]] ): ConsensusTracers[F, A] = ConsensusTracers[F, A]( - timeout = tracer.contramap[ViewNumber](Timeout(_)), + timeout = tracer.contramap[(ViewNumber, MessageCounter)]( + (Timeout.apply _).tupled + ), + viewSync = tracer.contramap[ViewNumber](ViewSync(_)), + adoptView = tracer.contramap[Status[A]](AdoptView(_)), newView = tracer.contramap[ViewNumber](NewView(_)), quorum = tracer.contramap[QuorumCertificate[A]](Quorum(_)), fromPast = tracer.contramap[Event.MessageReceived[A]](FromPast(_)), diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala index 584a26eb..f480a448 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/SyncTracers.scala @@ -26,7 +26,7 @@ object SyncTracers { (A#PKey, SyncMessage[A] with SyncMessage.Response, Option[Throwable]) type Statuses[A <: Agreement] = - (IndexedSeq[A#PKey], IndexedSeq[Option[Validated[Status[A]]]]) + Map[A#PKey, Validated[Status[A]]] type StatusError[A <: Agreement] = (Status[A], ProtocolError.InvalidQuorumCertificate[A], String) @@ -41,17 +41,9 @@ object SyncTracers { responseIgnored = tracer .contramap[Response[A]]((ResponseIgnored.apply[A] _).tupled), statusPoll = tracer - .contramap[Statuses[A]] { case (publicKeys, maybeStatuses) => - StatusPoll[A] { - (publicKeys zip maybeStatuses).toMap.collect { - case (key, Some(status)) => key -> status - } - } - }, + .contramap[Statuses[A]](StatusPoll(_)), invalidStatus = - tracer.contramap[StatusError[A]] { case (status, error, hint) => - InvalidStatus(status, error, hint) - }, + tracer.contramap[StatusError[A]]((InvalidStatus.apply[A] _).tupled), error = tracer.contramap[Throwable](Error(_)) ) } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala index 6b490811..96b9bd0b 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala @@ -136,6 +136,15 @@ object ViewStateStorageCommands extends Commands { ) } yield SetLastExecutedBlockHashCommand(h) + def genSetRootBlockHash(state: State) = + for { + h <- Gen.oneOf( + state.prepareQC.blockHash, + state.lockedQC.blockHash, + state.commitQC.blockHash + ) + } yield SetRootBlockHashCommand(h) + val genGetBundle = Gen.const(GetBundleCommand) case class SetViewNumberCommand(viewNumber: ViewNumber) extends UnitCommand { @@ -182,6 +191,22 @@ object ViewStateStorageCommands extends Commands { override def postCondition(state: State, success: Boolean): Prop = success } + case class SetRootBlockHashCommand(blockHash: TestAgreement.Hash) + extends UnitCommand { + override def run(sut: Sut): Result = + sut.write(_.setRootBlockHash(blockHash)) + + override def nextState(state: State): State = + state.copy(rootBlockHash = blockHash) + + override def preCondition(state: State): Boolean = + Set(state.prepareQC, state.lockedQC, state.commitQC) + .map(_.blockHash) + .contains(blockHash) + + override def postCondition(state: State, success: Boolean): Prop = success + } + case object GetBundleCommand extends Command { type Result = ViewStateStorage.Bundle[TestAgreement] diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala index bb419629..21ee7627 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala @@ -1,19 +1,23 @@ package io.iohk.metronome.hotstuff.service.sync +import cats.implicits._ +import cats.data.NonEmptyVector import cats.effect.concurrent.{Ref, Semaphore} import io.iohk.metronome.crypto.GroupSignature -import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.{ + ViewNumber, + Federation, + LeaderSelection +} import io.iohk.metronome.hotstuff.consensus.basic.{QuorumCertificate, Phase} import io.iohk.metronome.hotstuff.service.storage.BlockStorageProps import io.iohk.metronome.storage.InMemoryKVStore -import org.scalacheck.{Properties, Arbitrary, Gen}, Arbitrary.arbitrary -import org.scalacheck.Prop.{all, forAll, propBoolean} +import org.scalacheck.{Properties, Arbitrary, Gen, Prop}, Arbitrary.arbitrary +import org.scalacheck.Prop.{all, forAll, forAllNoShrink, propBoolean} import monix.eval.Task import monix.execution.schedulers.TestScheduler import scala.util.Random import scala.concurrent.duration._ -import io.iohk.metronome.hotstuff.consensus.Federation -import io.iohk.metronome.hotstuff.consensus.LeaderSelection object BlockSynchronizerProps extends Properties("BlockSynchronizer") { import BlockStorageProps.{ @@ -25,6 +29,10 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { genNonEmptyBlockTree } + case class Prob(value: Double) { + require(value >= 0 && value <= 1) + } + // Insert the prefix three into "persistent" storage, // then start multiple concurrent download processes // from random federation members pointing at various @@ -39,7 +47,9 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { descendantTree: List[TestBlock], requests: List[(TestAgreement.PKey, QuorumCertificate[TestAgreement])], federation: Federation[TestAgreement.PKey], - random: Random + random: Random, + timeoutProb: Prob, + corruptProb: Prob ) { val persistentRef = Ref.unsafe[Task, TestKVStore.Store] { TestKVStore.build(ancestorTree) @@ -60,11 +70,11 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { blockHash: TestAgreement.Hash ): Task[Option[TestAgreement.Block]] = { val timeout = 5000 - val delay = random.nextDouble() * 3000 - val isLost = random.nextDouble() < 0.2 - val isCorrupt = random.nextDouble() < 0.2 + val delay = random.nextDouble() * 2900 + 100 + val isTimeout = random.nextDouble() < timeoutProb.value + val isCorrupt = random.nextDouble() < corruptProb.value - if (isLost) { + if (isTimeout) { Task.pure(None).delayResult(timeout.millis) } else { val block = blockMap(blockHash) @@ -96,13 +106,15 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { } object TestFixture { - implicit val arb: Arbitrary[TestFixture] = Arbitrary { + implicit val arb: Arbitrary[TestFixture] = Arbitrary(gen()) + + def gen(timeoutProb: Prob = Prob(0.2), corruptProb: Prob = Prob(0.2)) = for { ancestorTree <- genNonEmptyBlockTree leaf = ancestorTree.last descendantTree <- genNonEmptyBlockTree(parentId = leaf.id) - federationSize <- Gen.choose(1, 10) + federationSize <- Gen.choose(3, 10) federationKeys = Range(0, federationSize).toVector federation = Federation(federationKeys)(LeaderSelection.RoundRobin) .getOrElse(sys.error("Can't create federation.")) @@ -130,14 +142,23 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { descendantTree, requests, federation, - random + random, + timeoutProb, + corruptProb ) - } } - property("persists") = forAll { (fixture: TestFixture) => + def simulate(duration: FiniteDuration)(test: Task[Prop]): Prop = { implicit val scheduler = TestScheduler() + // Schedule the execution, using a Future so we can check the value. + val testFuture = test.runToFuture + // Simulate a time. + scheduler.tick(duration) + // Get the completed results. + testFuture.value.get.get + } + property("sync - persist") = forAll { (fixture: TestFixture) => val test = for { fibers <- Task.traverse(fixture.requests) { case (publicKey, qc) => fixture.synchronizer.sync(publicKey, qc).start @@ -148,7 +169,7 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { ephemeral <- fixture.ephemeralRef.get } yield { all( - "ephermeral empty" |: ephemeral.isEmpty, + "ephemeral empty" |: ephemeral.isEmpty, "persistent contains all" |: fixture.requests.forall { case (_, qc) => persistent(Namespace.Blocks).contains(qc.blockHash) }, @@ -161,19 +182,13 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { } ) } - - // Schedule the execution, using a Future so we can check the value. - val testFuture = test.runToFuture - // Simulate a long time, which should be enough for all downloads to finish. - scheduler.tick(1.day) - - testFuture.value.get.get + simulate(1.day)(test) } - property("no forest") = forAll( + property("sync - no forest") = forAll( for { - fixture <- arbitrary[TestFixture] + fixture <- TestFixture.gen(timeoutProb = Prob(0)) duration <- Gen.choose(1, fixture.requests.size).map(_ * 500.millis) } yield (fixture, duration) ) { case (fixture: TestFixture, duration: FiniteDuration) => @@ -208,4 +223,59 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { testFuture.value.get.get } + + property("getBlockFromQuorumCertificate") = forAllNoShrink( + for { + fixture <- TestFixture + .gen(timeoutProb = Prob(0), corruptProb = Prob(0)) + sources <- Gen.pick( + fixture.federation.quorumSize, + fixture.federation.publicKeys + ) + // The last request is definitely new. + qc = fixture.requests.last._2 + } yield (fixture, sources, qc) + ) { case (fixture, sources, qc) => + val test = for { + block <- fixture.synchronizer + .getBlockFromQuorumCertificate( + sources = NonEmptyVector.fromVectorUnsafe(sources.toVector), + quorumCertificate = qc + ) + .rethrow + persistent <- fixture.persistentRef.get + ephemeral <- fixture.ephemeralRef.get + } yield { + all( + "downloaded" |: block.id == qc.blockHash, + "not in ephemeral" |: ephemeral.isEmpty, + "not in persistent" |: + !persistent(Namespace.Blocks).contains(qc.blockHash) + ) + } + simulate(1.minute)(test) + } + + property("getBlockFromQuorumCertificate - timeout") = forAllNoShrink( + for { + fixture <- TestFixture.gen(timeoutProb = Prob(1)) + request = fixture.requests.last // Use one that isn't persisted yet. + } yield (fixture, request._1, request._2) + ) { case (fixture, source, qc) => + val test = for { + result <- fixture.synchronizer + .getBlockFromQuorumCertificate( + sources = NonEmptyVector.one(source), + quorumCertificate = qc + ) + } yield "fail with the right exception" |: { + result match { + case Left(ex: BlockSynchronizer.DownloadFailedException[_]) => + true + case _ => + false + } + } + simulate(1.minute)(test) + } } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala index d825950d..b409d46e 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala @@ -33,6 +33,7 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { genInitialState, genHash } + import ViewSynchronizer.FederationStatus /** Projected responses in each round from every federation member. */ type Responses = Vector[Map[TestAgreement.PKey, TestResponse]] @@ -276,7 +277,7 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { responseCounter <- fixture.responseCounterRef.get } yield { val statusProps = status match { - case Right(status) => + case Right(FederationStatus(status, sources)) => "status" |: all( "quorum" |: hasQuorum, "reports polls each round" |: @@ -285,7 +286,8 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { pollSizes.last >= quorumSize && pollSizes.init.forall(_ < quorumSize), "reports all invalid" |: - invalidEventCount == invalidResponseCount + invalidEventCount == invalidResponseCount, + "returns sources" |: sources.toVector.size >= quorumSize ) case Left(ex: TimeoutException) => From 6099f127063dd000eef5e4b8608036932176428f Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Wed, 2 Jun 2021 10:10:52 +0100 Subject: [PATCH 36/48] PM-3063: Enable additional compiler warnings (#48) * PM-3063: Rename shadowing. * PM-3063: Enable warning for unused local variables. * PM-3063: Warn on unused pattern matcing --- build.sc | 2 ++ .../checkpointing/models/RLPCodecsSpec.scala | 2 +- .../consensus/basic/ProtocolState.scala | 4 +-- .../consensus/basic/ProtocolStateProps.scala | 6 +--- .../hotstuff/service/ConsensusService.scala | 31 +++++++++---------- .../hotstuff/service/SyncService.scala | 2 +- .../service/sync/BlockSynchronizer.scala | 4 +-- .../service/storage/BlockStorageProps.scala | 2 +- .../storage/ViewStateStorageProps.scala | 1 - .../service/sync/BlockSynchronizerProps.scala | 2 +- .../service/sync/ViewSynchronizerProps.scala | 6 ++-- ...onnectionManagerWithMockProviderSpec.scala | 2 +- ...ctionManagerWithScalanetProviderSpec.scala | 4 +-- .../metronome/rocksdb/RocksDBStoreProps.scala | 4 +-- 14 files changed, 33 insertions(+), 39 deletions(-) diff --git a/build.sc b/build.sc index 01de43f3..d6b46071 100644 --- a/build.sc +++ b/build.sc @@ -102,6 +102,8 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { "-Xlint:unsound-match", "-Ywarn-inaccessible", "-Ywarn-unused-import", + "-Ywarn-unused:locals", + "-Ywarn-unused:patvars", "-Ypartial-unification", // Required for the `>>` syntax. "-language:higherKinds", "-language:postfixOps" diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala index 01328a29..40a1d5de 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala @@ -36,7 +36,7 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { } case (a: RLPValue, b: RLPValue) => a.bytes.sameElements(b.bytes) - case other => + case _ => false } } diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala index b69138fd..cfe75eb7 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolState.scala @@ -133,14 +133,14 @@ case class ProtocolState[A <: Agreement: Block: Signing]( case _ if !federation.contains(e.sender) => Left(NotFromFederation(e)) - case m: LeaderMessage[_] if e.sender != currLeader => + case _: LeaderMessage[_] if e.sender != currLeader => Left(NotFromLeader(e, currLeader)) case m: ReplicaMessage[_] if !m.isInstanceOf[NewView[_]] && publicKey != currLeader => Left(NotToLeader(e, currLeader)) - case m: NewView[_] if publicKey != nextLeader => + case _: NewView[_] if publicKey != nextLeader => Left(NotToLeader(e, nextLeader)) case m: Vote[_] if !Signing[A].validate(e.sender, m) => diff --git a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala index f7557109..4594cd58 100644 --- a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala @@ -871,11 +871,7 @@ object ProtocolStateCommands extends Commands { "votes for the next phase" |: (state.phase == Phase.Decide || effects .collectFirst { - case Effect - .SendMessage( - recipient, - Message.Vote(_, phase, _, _) - ) => + case Effect.SendMessage(_, Message.Vote(_, phase, _, _)) => phase == state.phase } .getOrElse(false)), diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala index a167b7c6..c31ba4be 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala @@ -222,8 +222,7 @@ class ConsensusService[ eventQueue.poll.flatMap { event => stateRef.get.flatMap { state => val handle: F[Unit] = event match { - case e @ Event.NextView(viewNumber) - if viewNumber < state.viewNumber => + case Event.NextView(viewNumber) if viewNumber < state.viewNumber => ().pure[F] case e @ Event.NextView(viewNumber) => @@ -360,11 +359,10 @@ class ConsensusService[ effect match { case Effect.SendMessage(recipient, message) if recipient == publicKey => - val event = - Validated(Event.MessageReceived(recipient, message)) + val event = Event.MessageReceived(recipient, message) - state.handleMessage(event) match { - case Left(error) => + state.handleMessage(validated(event)) match { + case Left(_) => // This shouldn't happen, but let's just skip this event here and redeliver it later. loop(state, effectQueue, effect :: asyncEffects) @@ -437,7 +435,7 @@ class ConsensusService[ blockStorage.put(preparedBlock) } - case effect @ ExecuteBlocks(_, commitQC) => + case effect @ ExecuteBlocks(_, _) => // Each node may be at a different point in the chain, so how // long the executions take can vary. We could execute it in // the forground here, but it may cause the node to lose its @@ -456,16 +454,15 @@ class ConsensusService[ /** Execute blocks in order, updating pesistent storage along the way. */ private def executeBlocks: F[Unit] = { - blockExecutionQueue.poll.flatMap { - case Effect.ExecuteBlocks(lastExecutedBlockHash, commitQC) => - // Retrieve the blocks from the storage from the last executed - // to the one in the Quorum Certificate and tell the application - // to execute them one by one. Update the persistent view state - // after reach execution to remember which blocks we have truly - // done. - - // TODO (PM-3133): Execute block - ??? + blockExecutionQueue.poll.flatMap { case Effect.ExecuteBlocks(_, _) => + // Retrieve the blocks from the storage from the last executed + // to the one in the Quorum Certificate and tell the application + // to execute them one by one. Update the persistent view state + // after reach execution to remember which blocks we have truly + // done. + + // TODO (PM-3133): Execute block + ??? } >> executeBlocks } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala index d7780629..8e2e6dd3 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -195,7 +195,7 @@ class SyncService[F[_]: Concurrent: ContextShift, N, A <: Agreement: Block]( // We have already synced higher than this old StatusRequest. loop(blockSync, lastSyncedViewNumber) - case Right(request @ SyncPipe.PrepareRequest(sender, prepare)) => + case Right(request @ SyncPipe.PrepareRequest(_, _)) => handlePrepareRequest(blockSync, request) >> loop(blockSync, lastSyncedViewNumber) diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala index 698458b7..3f5f1ee7 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizer.scala @@ -89,9 +89,9 @@ class BlockSynchronizer[F[_]: Sync: Timer, N, A <: Agreement: Block]( val otherSources = sources.filterNot(_ == publicKey).toList def loop( - alternatives: List[A#PKey] + sources: List[A#PKey] ): F[Either[DownloadFailedException[A], A#Block]] = { - alternatives match { + sources match { case Nil => new DownloadFailedException( quorumCertificate.blockHash, diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala index ea9ca284..b9d3f0da 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala @@ -256,7 +256,7 @@ object BlockStorageProps extends Properties("BlockStorage") { } property("getDescendants delete") = forAll(genSubTree) { - case (data, block, subTree) => + case (data, block, _) => val ds = data.store.getDescendants(block.id) val (deleted, ok) = ds.foldLeft((data.store, true)) { diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala index 96b9bd0b..c95dc5c9 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorageProps.scala @@ -53,7 +53,6 @@ object ViewStateStorageCommands extends Commands { def read[A]( f: TestViewStateStorage => KVStoreRead[Namespace, A] ): A = { - val b = scodec.bits.ByteVector.empty TestKVStoreState.compile(f(viewStateStorage)).run(store) } } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala index 21ee7627..c019b32e 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala @@ -270,7 +270,7 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { ) } yield "fail with the right exception" |: { result match { - case Left(ex: BlockSynchronizer.DownloadFailedException[_]) => + case Left(_: BlockSynchronizer.DownloadFailedException[_]) => true case _ => false diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala index b409d46e..f6c77df9 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala @@ -266,7 +266,7 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { } invalidEventCount = { - events.collect { case x: SyncEvent.InvalidStatus[_] => + events.collect { case _: SyncEvent.InvalidStatus[_] => }.size } @@ -277,7 +277,7 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { responseCounter <- fixture.responseCounterRef.get } yield { val statusProps = status match { - case Right(FederationStatus(status, sources)) => + case Right(FederationStatus(_, sources)) => "status" |: all( "quorum" |: hasQuorum, "reports polls each round" |: @@ -290,7 +290,7 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { "returns sources" |: sources.toVector.size >= quorumSize ) - case Left(ex: TimeoutException) => + case Left(_: TimeoutException) => "timeout" |: all( "no quorum" |: !hasQuorum, "empty polls" |: pollSizes.forall(_ < quorumSize), diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala index f02d9862..1609f436 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala @@ -168,7 +168,7 @@ class RemoteConnectionManagerWithMockProviderSpec it should "fail sending message to unknown peer" in customTestCaseResourceT( buildTestCaseWithNPeers(2) - ) { case (provider, manager, _) => + ) { case (_, manager, _) => val randomKey = getFakeRandomKey() for { sendResult <- manager.sendMessage(randomKey, MessageA(1)) diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala index 161a14eb..eb7bb72f 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithScalanetProviderSpec.scala @@ -43,7 +43,7 @@ class RemoteConnectionManagerWithScalanetProviderSpec reporter = UncaughtExceptionReporter { case ex: IllegalStateException if ex.getMessage.contains("executor not accepting a task") => - case ex: PeerGroup.ChannelBrokenException[_] => + case _: PeerGroup.ChannelBrokenException[_] => // Probably test already closed with some task running in the background. case ex => UncaughtExceptionReporter.default.reportFailure(ex) @@ -291,7 +291,7 @@ object RemoteConnectionManagerWithScalanetProviderSpec { def closeAllNodes: Task[Unit] = { nodes.get.flatMap { nodes => Task - .parTraverseUnordered(nodes.values) { case (node, _, _, release) => + .parTraverseUnordered(nodes.values) { case (_, _, _, release) => release } .void diff --git a/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreProps.scala b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreProps.scala index e1790122..27eb7cd2 100644 --- a/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreProps.scala +++ b/metronome/rocksdb/test/src/io/iohk/metronome/rocksdb/RocksDBStoreProps.scala @@ -74,9 +74,9 @@ object RocksDBStoreProps extends Properties("RocksDBStore") { val prop2 = prog2.postCondition(state, Success(result2)) // The other should run second, on top of the changes from the first. val prop12 = prog12.postCondition(state, Success(result1 ++ result2)) - val prope1 = prog21.postCondition(state, Success(result2 ++ result1)) + val prop21 = prog21.postCondition(state, Success(result2 ++ result1)) - (prop1 && prop12) || (prop2 && prop1) + (prop1 && prop12) || (prop2 && prop21) } finally { destroySut(sut) } From cdb0e59d1497370b048a2588ecc09326e6caaf86 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Thu, 3 Jun 2021 17:32:13 +0100 Subject: [PATCH 37/48] PM-3146: Move Network to networking. (#49) * PM-3146: Move Network to networking. * PM-3146: Add fromRemoteConnectionManager --- .../hotstuff/service/ConsensusService.scala | 8 +- .../hotstuff/service/HotStuffService.scala | 5 +- .../metronome/hotstuff/service/Network.scala | 73 ---------------- .../hotstuff/service/SyncService.scala | 6 +- .../iohk/metronome/networking/Network.scala | 85 +++++++++++++++++++ .../metronome/networking}/NetworkSpec.scala | 27 ++---- 6 files changed, 104 insertions(+), 100 deletions(-) delete mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Network.scala create mode 100644 metronome/networking/src/io/iohk/metronome/networking/Network.scala rename metronome/{hotstuff/service/test/src/io/iohk/metronome/hotstuff/service => networking/test/src/io/iohk/metronome/networking}/NetworkSpec.scala (77%) diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala index c31ba4be..269b25b0 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala @@ -24,7 +24,7 @@ import io.iohk.metronome.hotstuff.service.storage.{ ViewStateStorage } import io.iohk.metronome.hotstuff.service.tracing.ConsensusTracers -import io.iohk.metronome.networking.ConnectionHandler +import io.iohk.metronome.networking.{ConnectionHandler, Network} import io.iohk.metronome.storage.KVStoreRunner import monix.catnap.ConcurrentQueue import scala.annotation.tailrec @@ -41,7 +41,7 @@ class ConsensusService[ A <: Agreement: Block: Signing ]( publicKey: A#PKey, - network: Network[F, A, Message[A]], + network: Network[F, A#PKey, Message[A]], appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], @@ -545,7 +545,7 @@ object ConsensusService { A <: Agreement: Block: Signing ]( publicKey: A#PKey, - network: Network[F, A, Message[A]], + network: Network[F, A#PKey, Message[A]], appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], @@ -585,7 +585,7 @@ object ConsensusService { A <: Agreement: Block: Signing ]( publicKey: A#PKey, - network: Network[F, A, Message[A]], + network: Network[F, A#PKey, Message[A]], appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala index 66be53a1..df7c2bf0 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala @@ -22,6 +22,7 @@ import io.iohk.metronome.hotstuff.service.tracing.{ ConsensusTracers, SyncTracers } +import io.iohk.metronome.networking.Network import io.iohk.metronome.storage.KVStoreRunner object HotStuffService { @@ -32,7 +33,7 @@ object HotStuffService { N, A <: Agreement: Block: Signing ]( - network: Network[F, A, HotStuffMessage[A]], + network: Network[F, A#PKey, HotStuffMessage[A]], appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], @@ -44,7 +45,7 @@ object HotStuffService { ): Resource[F, Unit] = for { (consensusNetwork, syncNetwork) <- Network - .splitter[F, A, HotStuffMessage[A], Message[A], SyncMessage[A]]( + .splitter[F, A#PKey, HotStuffMessage[A], Message[A], SyncMessage[A]]( network )( split = { diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Network.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Network.scala deleted file mode 100644 index 9c6f3892..00000000 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/Network.scala +++ /dev/null @@ -1,73 +0,0 @@ -package io.iohk.metronome.hotstuff.service - -import cats.effect.{Sync, Resource, Concurrent, ContextShift} -import io.iohk.metronome.hotstuff.consensus.basic.Agreement -import io.iohk.metronome.networking.ConnectionHandler.MessageReceived -import monix.tail.Iterant -import monix.catnap.ConcurrentQueue - -/** Network adapter for specialising messages. */ -trait Network[F[_], A <: Agreement, M] { - - /** Receive incoming messages from the network. */ - def incomingMessages: Iterant[F, MessageReceived[A#PKey, M]] - - /** Try sending a message to a federation member, if we are connected. */ - def sendMessage(recipient: A#PKey, message: M): F[Unit] -} - -object Network { - - /** Consume messges from a network and dispatch them either left or right, - * based on a splitter function. Combine messages the other way. - */ - def splitter[F[_]: Concurrent: ContextShift, A <: Agreement, M, L, R]( - network: Network[F, A, M] - )( - split: M => Either[L, R], - merge: Either[L, R] => M - ): Resource[F, (Network[F, A, L], Network[F, A, R])] = - for { - leftQueue <- makeQueue[F, A, L] - rightQueue <- makeQueue[F, A, R] - - _ <- Concurrent[F].background { - network.incomingMessages.mapEval { - case MessageReceived(from, message) => - split(message) match { - case Left(leftMessage) => - leftQueue.offer(MessageReceived(from, leftMessage)) - case Right(rightMessage) => - rightQueue.offer(MessageReceived(from, rightMessage)) - } - }.completedL - } - - leftNetwork = new SplitNetwork[F, A, L]( - leftQueue.poll, - (r, m) => network.sendMessage(r, merge(Left(m))) - ) - - rightNetwork = new SplitNetwork[F, A, R]( - rightQueue.poll, - (r, m) => network.sendMessage(r, merge(Right(m))) - ) - - } yield (leftNetwork, rightNetwork) - - private def makeQueue[F[_]: Concurrent: ContextShift, A <: Agreement, M] = - Resource.liftF { - ConcurrentQueue.unbounded[F, MessageReceived[A#PKey, M]](None) - } - - private class SplitNetwork[F[_]: Sync, A <: Agreement, M]( - poll: F[MessageReceived[A#PKey, M]], - send: (A#PKey, M) => F[Unit] - ) extends Network[F, A, M] { - override def incomingMessages: Iterant[F, MessageReceived[A#PKey, M]] = - Iterant.repeatEvalF(poll) - - def sendMessage(recipient: A#PKey, message: M) = - send(recipient, message) - } -} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala index 8e2e6dd3..d450baef 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -27,7 +27,7 @@ import io.iohk.metronome.hotstuff.service.sync.{ ViewSynchronizer } import io.iohk.metronome.hotstuff.service.tracing.SyncTracers -import io.iohk.metronome.networking.ConnectionHandler +import io.iohk.metronome.networking.{ConnectionHandler, Network} import io.iohk.metronome.storage.{KVStoreRunner, KVStore} import scala.util.control.NonFatal import scala.concurrent.duration._ @@ -45,7 +45,7 @@ import scala.reflect.ClassTag */ class SyncService[F[_]: Concurrent: ContextShift, N, A <: Agreement: Block]( publicKey: A#PKey, - network: Network[F, A, SyncMessage[A]], + network: Network[F, A#PKey, SyncMessage[A]], appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], @@ -355,7 +355,7 @@ object SyncService { ]( publicKey: A#PKey, federation: Federation[A#PKey], - network: Network[F, A, SyncMessage[A]], + network: Network[F, A#PKey, SyncMessage[A]], appService: ApplicationService[F, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], diff --git a/metronome/networking/src/io/iohk/metronome/networking/Network.scala b/metronome/networking/src/io/iohk/metronome/networking/Network.scala new file mode 100644 index 00000000..9220ef3f --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/Network.scala @@ -0,0 +1,85 @@ +package io.iohk.metronome.networking + +import cats.implicits._ +import cats.effect.{Sync, Resource, Concurrent, ContextShift} +import io.iohk.metronome.networking.ConnectionHandler.MessageReceived +import monix.tail.Iterant +import monix.catnap.ConcurrentQueue + +/** Network adapter for specializing messages. */ +trait Network[F[_], K, M] { + + /** Receive incoming messages from the network. */ + def incomingMessages: Iterant[F, MessageReceived[K, M]] + + /** Try sending a message to a federation member, if we are connected. */ + def sendMessage(recipient: K, message: M): F[Unit] +} + +object Network { + + def fromRemoteConnnectionManager[F[_]: Sync, K, M]( + manager: RemoteConnectionManager[F, K, M] + ): Network[F, K, M] = new Network[F, K, M] { + override def incomingMessages = + manager.incomingMessages + + override def sendMessage(recipient: K, message: M) = + // Not returning an error if we are trying to send to someone no longer connected, + // this should be handled transparently, delivery is best-effort. + manager.sendMessage(recipient, message).void + } + + /** Consume messges from a network and dispatch them either left or right, + * based on a splitter function. Combine messages the other way. + */ + def splitter[F[_]: Concurrent: ContextShift, K, M, L, R]( + network: Network[F, K, M] + )( + split: M => Either[L, R], + merge: Either[L, R] => M + ): Resource[F, (Network[F, K, L], Network[F, K, R])] = + for { + leftQueue <- makeQueue[F, K, L] + rightQueue <- makeQueue[F, K, R] + + _ <- Concurrent[F].background { + network.incomingMessages.mapEval { + case MessageReceived(from, message) => + split(message) match { + case Left(leftMessage) => + leftQueue.offer(MessageReceived(from, leftMessage)) + case Right(rightMessage) => + rightQueue.offer(MessageReceived(from, rightMessage)) + } + }.completedL + } + + leftNetwork = new SplitNetwork[F, K, L]( + leftQueue.poll, + (r, m) => network.sendMessage(r, merge(Left(m))) + ) + + rightNetwork = new SplitNetwork[F, K, R]( + rightQueue.poll, + (r, m) => network.sendMessage(r, merge(Right(m))) + ) + + } yield (leftNetwork, rightNetwork) + + private def makeQueue[F[_]: Concurrent: ContextShift, K, M] = + Resource.liftF { + ConcurrentQueue.unbounded[F, MessageReceived[K, M]](None) + } + + private class SplitNetwork[F[_]: Sync, K, M]( + poll: F[MessageReceived[K, M]], + send: (K, M) => F[Unit] + ) extends Network[F, K, M] { + override def incomingMessages: Iterant[F, MessageReceived[K, M]] = + Iterant.repeatEvalF(poll) + + def sendMessage(recipient: K, message: M) = + send(recipient, message) + } +} diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/NetworkSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/NetworkSpec.scala similarity index 77% rename from metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/NetworkSpec.scala rename to metronome/networking/test/src/io/iohk/metronome/networking/NetworkSpec.scala index 82145acc..dbf04bdd 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/NetworkSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/NetworkSpec.scala @@ -1,14 +1,13 @@ -package io.iohk.metronome.hotstuff.service +package io.iohk.metronome.networking import cats.effect.Resource import cats.effect.concurrent.Ref import monix.eval.Task +import monix.tail.Iterant import monix.execution.Scheduler.Implicits.global import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers -import io.iohk.metronome.hotstuff.consensus.basic.Agreement import io.iohk.metronome.networking.ConnectionHandler.MessageReceived -import monix.tail.Iterant class NetworkSpec extends AsyncFlatSpec with Matchers { @@ -16,25 +15,17 @@ class NetworkSpec extends AsyncFlatSpec with Matchers { case class TestFoo(foo: String) extends TestMessage case class TestBar(bar: Int) extends TestMessage - object TestAgreement extends Agreement { - override type Block = Nothing - override type Hash = Nothing - override type PSig = Nothing - override type GSig = Nothing - override type PKey = String - override type SKey = Nothing - } - type TestAgreement = TestAgreement.type + type TestKey = String - type TestKeyAndMessage = (TestAgreement.PKey, TestMessage) - type TestMessageReceived = MessageReceived[TestAgreement.PKey, TestMessage] + type TestKeyAndMessage = (TestKey, TestMessage) + type TestMessageReceived = MessageReceived[TestKey, TestMessage] class TestNetwork( outbox: Vector[TestKeyAndMessage], val inbox: Ref[Task, Vector[ - MessageReceived[TestAgreement.PKey, TestMessage] + MessageReceived[TestKey, TestMessage] ]] - ) extends Network[Task, TestAgreement, TestMessage] { + ) extends Network[Task, TestKey, TestMessage] { override def incomingMessages: Iterant[Task, TestMessageReceived] = Iterant.fromIndexedSeq { @@ -44,7 +35,7 @@ class NetworkSpec extends AsyncFlatSpec with Matchers { } override def sendMessage( - recipient: TestAgreement.PKey, + recipient: TestKey, message: TestMessage ): Task[Unit] = inbox.update(_ :+ MessageReceived(recipient, message)) @@ -68,7 +59,7 @@ class NetworkSpec extends AsyncFlatSpec with Matchers { val resources = for { network <- Resource.liftF(TestNetwork(messages)) (fooNetwork, barNetwork) <- Network - .splitter[Task, TestAgreement, TestMessage, String, Int](network)( + .splitter[Task, TestKey, TestMessage, String, Int](network)( split = { case TestFoo(msg) => Left(msg) case TestBar(msg) => Right(msg) From 415b8ee385c77ddd7c44df013ed9642b7321e421 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Fri, 11 Jun 2021 12:56:38 +0100 Subject: [PATCH 38/48] PM-3241: Fix RocksDB locking to be synchronous. (#43) * PM-3241: Fix RocksDB locking to be synchronous. * PM-3241: Remove debug decoration. * PM-3105: Fix ledger storage test. * FIX: Use withLockUpgrade in nonBatchingCompiler. --- .../iohk/metronome/rocksdb/RocksDBStore.scala | 184 ++++++++++++------ 1 file changed, 124 insertions(+), 60 deletions(-) diff --git a/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala b/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala index 005c8306..8134ea25 100644 --- a/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala +++ b/metronome/rocksdb/src/io/iohk/metronome/rocksdb/RocksDBStore.scala @@ -3,8 +3,7 @@ package io.iohk.metronome.rocksdb import cats._ import cats.implicits._ import cats.data.ReaderT -import cats.effect.{Resource, Sync} -import cats.free.Free.liftF +import cats.effect.{Resource, Sync, ContextShift, Blocker} import io.iohk.metronome.storage.{ KVStore, KVStoreOp, @@ -44,9 +43,10 @@ import scala.annotation.nowarn * locking could be performed in their respective middle-layers, before they forward the * query for execution to this class. */ -class RocksDBStore[F[_]: Sync]( - db: RocksDBStore.DBSupport[F], - lock: RocksDBStore.LockSupport[F], +class RocksDBStore[F[_]: Sync: ContextShift]( + db: RocksDBStore.DBSupport, + lock: RocksDBStore.LockSupport, + blocker: Blocker, handles: Map[RocksDBStore.Namespace, ColumnFamilyHandle] ) { @@ -62,7 +62,7 @@ class RocksDBStore[F[_]: Sync]( // Type aliases to support the `~>` transformation with types that // only have 1 generic type argument `A`. type Batch[A] = - ({ type L[A] = ReaderT[F, BatchEnv, A] })#L[A] + ({ type L[A] = ReaderT[Eval, BatchEnv, A] })#L[A] type KVNamespacedOp[A] = ({ type L[A] = KVStoreOp[Namespace, A] })#L[A] @@ -71,25 +71,23 @@ class RocksDBStore[F[_]: Sync]( ({ type L[A] = KVStoreReadOp[Namespace, A] })#L[A] /** Execute the accumulated write operations in a batch. */ - private val writeBatch: ReaderT[F, BatchEnv, Unit] = + private val writeBatch: ReaderT[Eval, BatchEnv, Unit] = ReaderT { batch => if (batch.hasPut() || batch.hasDelete()) db.write(batch) >> - Sync[F].delay { - batch.clear() - } + Eval.always(batch.clear()) else - ().pure[F] + ().pure[Eval] } /** Execute one `Get` operation. */ - private def read[K, V](op: Get[Namespace, K, V]): F[Option[V]] = { + private def get[K, V](op: Get[Namespace, K, V]): Eval[Option[V]] = { for { kbs <- encode(op.key)(op.keyEncoder) - mvbs <- db.read(handles(op.namespace), kbs) + mvbs <- db.get(handles(op.namespace), kbs) mv <- mvbs match { case None => - none.pure[F] + none.pure[Eval] case Some(bytes) => decode(bytes)(op.valueDecoder).map(_.some) @@ -118,7 +116,7 @@ class RocksDBStore[F[_]: Sync]( case op @ Get(_, _) => // Execute any pending deletes and puts before performing the read. - writeBatch >> ReaderT.liftF(read(op)) + writeBatch >> ReaderT.liftF(get(op)) case op @ Delete(n, k) => ReaderT { batch => @@ -130,27 +128,39 @@ class RocksDBStore[F[_]: Sync]( } } - /** Intended for reads, with fallback to writes. */ - private val nonBatchingCompiler: KVNamespacedOp ~> F = - new (KVNamespacedOp ~> F) { - def apply[A](fa: KVNamespacedOp[A]): F[A] = + /** Intended for reads, with fallback to writes executed individually. */ + private val nonBatchingCompiler: KVNamespacedOp ~> Eval = + new (KVNamespacedOp ~> Eval) { + def apply[A](fa: KVNamespacedOp[A]): Eval[A] = fa match { case op @ Get(_, _) => - read(op) - case op => - lock.withLockUpgrade { - runWithBatchingNoLock { - liftF[KVNamespacedOp, A](op) - } - } + get(op) + + case op @ Put(n, k, v) => + for { + kbs <- encode(k)(op.keyEncoder) + vbs <- encode(v)(op.valueEncoder) + _ <- lock.withLockUpgrade(db.put(handles(n), kbs, vbs)) + } yield () + + case op @ Delete(n, k) => + for { + kbs <- encode(k)(op.keyEncoder) + _ <- lock.withLockUpgrade(db.delete(handles(n), kbs)) + } yield () } } - private def encode[T](value: T)(implicit ev: Encoder[T]): F[Array[Byte]] = - Sync[F].fromTry(ev.encode(value).map(_.toByteArray).toTry) + private def encode[T](value: T)(implicit ev: Encoder[T]): Eval[Array[Byte]] = + Eval.always(ev.encode(value).map(_.toByteArray).require) + + private def decode[T](bytes: Array[Byte])(implicit ev: Decoder[T]): Eval[T] = + Eval.always(ev.decodeValue(BitVector(bytes)).require) - private def decode[T](bytes: Array[Byte])(implicit ev: Decoder[T]): F[T] = - Sync[F].fromTry(ev.decodeValue(BitVector(bytes)).toTry) + private def block[A](evalA: Eval[A]): F[A] = + ContextShift[F].blockOn(blocker) { + Sync[F].delay(evalA.value) + } /** Mostly meant for writing batches atomically. * @@ -164,11 +174,12 @@ class RocksDBStore[F[_]: Sync]( */ def runWithBatchingNoLock[A]( program: KVStore[Namespace, A] - ): DBQuery[F, A] = { - autoCloseableR(new WriteBatch()).use { - (program.foldMap(batchingCompiler) <* writeBatch).run + ): DBQuery[F, A] = + autoCloseableR(new WriteBatch()).use { batch => + block { + (program.foldMap(batchingCompiler) <* writeBatch).run(batch) + } } - } /** Same as `runWithBatchingNoLock`, but write lock is taken out * to make sure concurrent reads are not affected. @@ -179,8 +190,12 @@ class RocksDBStore[F[_]: Sync]( * point to is retrieved. */ def runWithBatching[A](program: KVStore[Namespace, A]): DBQuery[F, A] = - lock.withWriteLock { - runWithBatchingNoLock(program) + autoCloseableR(new WriteBatch()).use { batch => + block { + lock.withWriteLock { + (program.foldMap(batchingCompiler) <* writeBatch).run(batch) + } + } } /** Similar to `runWithBatching` in that it can contain both reads @@ -194,8 +209,10 @@ class RocksDBStore[F[_]: Sync]( * threads to get in before the write statement runs. */ def runWithoutBatching[A](program: KVStore[Namespace, A]): DBQuery[F, A] = - lock.withReadLock { - program.foldMap(nonBatchingCompiler) + block { + lock.withReadLock { + program.foldMap(nonBatchingCompiler) + } } /** For strictly read-only operations. @@ -204,7 +221,9 @@ class RocksDBStore[F[_]: Sync]( * where reads don't need isolation from writes. */ def runReadOnlyNoLock[A](program: KVStoreRead[Namespace, A]): DBQuery[F, A] = - kvs.lift(program).foldMap(nonBatchingCompiler) + block { + kvs.lift(program).foldMap(nonBatchingCompiler) + } /** Same as `runReadOnlyNoLock`, but a read lock is taken out * to make sure concurrent writes cannot affect the results. @@ -213,8 +232,10 @@ class RocksDBStore[F[_]: Sync]( * updates are happening. */ def runReadOnly[A](program: KVStoreRead[Namespace, A]): DBQuery[F, A] = - lock.withReadLock { - runReadOnlyNoLock(program) + block { + lock.withReadLock { + kvs.lift(program).foldMap(nonBatchingCompiler) + } } } @@ -263,7 +284,7 @@ object RocksDBStore { ) } - def apply[F[_]: Sync]( + def apply[F[_]: Sync: ContextShift]( config: Config, namespaces: Seq[Namespace] ): Resource[F, RocksDBStore[F]] = { @@ -348,9 +369,13 @@ object RocksDBStore { s" Expected ${allNamespaces.size}; got ${columnFamilyHandleBuffer.size}." ) + // Use a cached thread pool for blocking on locks and IO. + blocker <- Blocker[F] + store = new RocksDBStore[F]( new DBSupport(db, readOpts, writeOptions), new LockSupport(new ReentrantReadWriteLock()), + blocker, columnFamilyHandles ) @@ -391,8 +416,15 @@ object RocksDBStore { ): Resource[F, R] = Resource.fromAutoCloseable[F, R](Sync[F].delay(mk)) - /** Help run reads and writes isolated from each other. */ - private class LockSupport[F[_]: Sync](rwlock: ReentrantReadWriteLock) { + /** Help run reads and writes isolated from each other. + * + * Uses a `ReentrantReadWriteLock` so has to make sure that + * all operations are carried out on the same thread, that's + * why it's working with `Eval` and not `F`. + */ + private class LockSupport( + rwlock: ReentrantReadWriteLock + ) { // Batches can interleave multiple reads (and writes); // to make sure they see a consistent view, writes are @@ -400,16 +432,24 @@ object RocksDBStore { // read an ID, then retrieve the record from a different // collection, we can be sure it hasn't been deleted in // between the two operations. - private val lockRead = Sync[F].delay(rwlock.readLock().lock()) - private val unlockRead = Sync[F].delay(rwlock.readLock().unlock()) - private val lockWrite = Sync[F].delay(rwlock.writeLock().lock()) - private val unlockWrite = Sync[F].delay(rwlock.writeLock().unlock()) + private val lockRead = Eval.always { + rwlock.readLock().lock() + } + private val unlockRead = Eval.always { + rwlock.readLock().unlock() + } + private val lockWrite = Eval.always { + rwlock.writeLock().lock() + } + private val unlockWrite = Eval.always { + rwlock.writeLock().unlock() + } - def withReadLock[A](fa: F[A]): F[A] = - Sync[F].bracket(lockRead)(_ => fa)(_ => unlockRead) + def withReadLock[A](evalA: Eval[A]): Eval[A] = + bracket(lockRead, unlockRead)(evalA) - def withWriteLock[A](fa: F[A]): F[A] = - Sync[F].bracket(lockWrite)(_ => fa)(_ => unlockWrite) + def withWriteLock[A](evalA: Eval[A]): Eval[A] = + bracket(lockWrite, unlockWrite)(evalA) /* * In case there's a write operation among the reads and we haven't @@ -425,31 +465,55 @@ object RocksDBStore { * See here for the rules up (non-)upgrading and downgrading: * https://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html */ - def withLockUpgrade[A](fa: F[A]): F[A] = - Sync[F].bracket { - unlockRead >> lockWrite - }(_ => fa) { _ => + def withLockUpgrade[A](fa: Eval[A]): Eval[A] = + bracket( + unlockRead >> lockWrite, lockRead >> unlockWrite + )(fa) + + private def bracket[A](lock: Eval[Unit], unlock: Eval[Unit])( + evalA: Eval[A] + ): Eval[A] = Eval.always { + try { + (lock >> evalA).value + } finally { + unlock.value } + } } /** Wrap a RocksDB instance. */ - private class DBSupport[F[_]: Sync]( + private class DBSupport( db: RocksDB, readOptions: ReadOptions, writeOptions: WriteOptions ) { - def read( + def get( handle: ColumnFamilyHandle, key: Array[Byte] - ): F[Option[Array[Byte]]] = Sync[F].delay { + ): Eval[Option[Array[Byte]]] = Eval.always { Option(db.get(handle, readOptions, key)) } def write( batch: WriteBatch - ): F[Unit] = Sync[F].delay { + ): Eval[Unit] = Eval.always { db.write(writeOptions, batch) } + + def put( + handle: ColumnFamilyHandle, + key: Array[Byte], + value: Array[Byte] + ): Eval[Unit] = Eval.always { + db.put(handle, writeOptions, key, value) + } + + def delete( + handle: ColumnFamilyHandle, + key: Array[Byte] + ): Eval[Unit] = Eval.always { + db.delete(handle, writeOptions, key) + } } } From 21de7f1bb0de82161a19f53130f7e51b69253a76 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Fri, 11 Jun 2021 13:00:48 +0100 Subject: [PATCH 39/48] PM-3129: Add LocalConnectionManager on top of the remote one. (#50) --- .../networking/LocalConnectionManager.scala | 57 +++++++++++++++++++ .../networking/RemoteConnectionManager.scala | 3 +- 2 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 metronome/networking/src/io/iohk/metronome/networking/LocalConnectionManager.scala diff --git a/metronome/networking/src/io/iohk/metronome/networking/LocalConnectionManager.scala b/metronome/networking/src/io/iohk/metronome/networking/LocalConnectionManager.scala new file mode 100644 index 00000000..7b117542 --- /dev/null +++ b/metronome/networking/src/io/iohk/metronome/networking/LocalConnectionManager.scala @@ -0,0 +1,57 @@ +package io.iohk.metronome.networking + +import cats.implicits._ +import cats.effect.{Concurrent, Timer, Resource, ContextShift} +import java.net.InetSocketAddress +import monix.eval.{TaskLift, TaskLike} +import monix.tail.Iterant +import scodec.Codec + +trait LocalConnectionManager[F[_], K, M] { + def isConnected: F[Boolean] + def incomingMessages: Iterant[F, M] + def sendMessage( + message: M + ): F[Either[ConnectionHandler.ConnectionAlreadyClosedException[K], Unit]] +} + +/** Connect to a single local process and keep the connection alive. */ +object LocalConnectionManager { + + def apply[ + F[_]: Concurrent: TaskLift: TaskLike: Timer: ContextShift, + K: Codec, + M: Codec + ]( + encryptedConnectionsProvider: EncryptedConnectionProvider[F, K, M], + targetKey: K, + targetAddress: InetSocketAddress, + retryConfig: RemoteConnectionManager.RetryConfig + )(implicit + tracers: NetworkTracers[F, K, M] + ): Resource[F, LocalConnectionManager[F, K, M]] = { + for { + remoteConnectionManager <- RemoteConnectionManager[F, K, M]( + encryptedConnectionsProvider, + RemoteConnectionManager.ClusterConfig[K]( + Set(targetKey -> targetAddress) + ), + retryConfig + ) + localConnectionManager = new LocalConnectionManager[F, K, M] { + override def isConnected = + remoteConnectionManager.getAcquiredConnections.map( + _.contains(targetKey) + ) + + override def incomingMessages = + remoteConnectionManager.incomingMessages.map { + case ConnectionHandler.MessageReceived(_, m) => m + } + + override def sendMessage(message: M) = + remoteConnectionManager.sendMessage(targetKey, message) + } + } yield localConnectionManager + } +} diff --git a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala index 75357bc8..edbe930d 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala @@ -297,7 +297,7 @@ object RemoteConnectionManager { * @param retryConfig retry configuration for outgoing connections (incoming connections are not retried) */ def apply[ - F[_]: Concurrent: TaskLift: TaskLike: Timer, + F[_]: Concurrent: TaskLift: TaskLike: Timer: ContextShift, K: Codec, M: Codec ]( @@ -305,7 +305,6 @@ object RemoteConnectionManager { clusterConfig: ClusterConfig[K], retryConfig: RetryConfig )(implicit - cs: ContextShift[F], tracers: NetworkTracers[F, K, M] ): Resource[F, RemoteConnectionManager[F, K, M]] = { for { From 806d62182b16c4e354aac927d392d6ea94b79084 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 14 Jun 2021 12:32:50 +0100 Subject: [PATCH 40/48] PM-3092: Connection conflict resolution based on ephemeral port ordering (#52) * PM-3092: Upgraded scalanet. Using the ephemeral port to resolve conflicts if the connection is less than 1 second old. * PM-3092: Handle the edge case of ephemeral ports being equal. * PM-3092: Move connection replacement to a separate method. --- build.sc | 2 +- .../networking/ConnectionHandler.scala | 103 ++++++++++++++---- .../EncryptedConnectionProvider.scala | 1 + .../metronome/networking/NetworkEvent.scala | 19 +++- .../metronome/networking/NetworkTracers.scala | 15 ++- .../networking/RemoteConnectionManager.scala | 24 ++-- .../ScalanetConnectionProvider.scala | 30 +++-- .../networking/ConnectionHandlerSpec.scala | 5 +- .../MockEncryptedConnectionProvider.scala | 3 +- ...onnectionManagerWithMockProviderSpec.scala | 4 +- 10 files changed, 157 insertions(+), 49 deletions(-) diff --git a/build.sc b/build.sc index d6b46071..ae73aaa1 100644 --- a/build.sc +++ b/build.sc @@ -23,7 +23,7 @@ object VersionOf { val rocksdb = "6.15.2" val scalacheck = "1.15.2" val scalatest = "3.2.5" - val scalanet = "0.7.0" + val scalanet = "0.8.0" val shapeless = "2.3.3" val slf4j = "1.7.30" val `scodec-core` = "1.11.7" diff --git a/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala b/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala index 342a7f87..adaa9c06 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/ConnectionHandler.scala @@ -22,13 +22,16 @@ import monix.tail.Iterant import java.net.InetSocketAddress import scala.util.control.NoStackTrace +import scala.concurrent.duration._ +import java.time.Instant class ConnectionHandler[F[_]: Concurrent, K, M]( connectionQueue: ConcurrentQueue[F, ConnectionWithConflictFlag[F, K, M]], connectionsRegister: ConnectionsRegister[F, K, M], messageQueue: ConcurrentQueue[F, MessageReceived[K, M]], cancelToken: Deferred[F, Unit], - connectionFinishCallback: FinishedConnection[K] => F[Unit] + connectionFinishCallback: FinishedConnection[K] => F[Unit], + oppositeConnectionOverlap: FiniteDuration )(implicit tracers: NetworkTracers[F, K, M]) { private val numberOfRunningConnections = AtomicInt(0) @@ -168,16 +171,14 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( if (conflictHappened) { connectionsRegister.registerIfAbsent(newConnection).flatMap { case Some(oldConnection) => - newConnection.connectionDirection match { - case HandledConnection.IncomingConnection => - // even though we have connection to this peer, they are calling us. One of the reason may be, that they failed and - // we did not notice. Lets try to replace old connection with new one. - replaceConnection(newConnection, oldConnection) - - case HandledConnection.OutgoingConnection => - // for some reason we were calling while we already have connection, most probably we have received incoming - // connection during call. Close this new connection, and keep the old one - tracers.discarded(newConnection) >> newConnection.close.as(none) + val replace = shouldReplaceConnection( + newConnection = newConnection, + oldConnection = oldConnection + ) + if (replace) { + replaceConnection(newConnection, oldConnection) + } else { + tracers.discarded(newConnection) >> newConnection.close.as(none) } case None => // in the meantime between detection of conflict, and processing it old connection has dropped. Register new one @@ -188,6 +189,39 @@ class ConnectionHandler[F[_]: Concurrent, K, M]( } } + /** Decide whether a new connection to/from a peer should replace an old connection from/to the same peer in case of a conflict. */ + private def shouldReplaceConnection( + newConnection: HandledConnection[F, K, M], + oldConnection: HandledConnection[F, K, M] + ): Boolean = { + if (oldConnection.age() < oppositeConnectionOverlap) { + // The old connection has just been created recently, yet we have a new connection already. + // Most likely the two nodes opened connections to each other around the same time, and if + // we close one of the connections connection based on direction, the node opposite will + // likely be doing the same to the _other_ connection, symmetrically. + // Instead, let's try to establish some ordering between the two, so the same connection + // is chosen as the victim on both sides. + val (newPort, oldPort) = ( + newConnection.ephemeralAddress.getPort, + oldConnection.ephemeralAddress.getPort + ) + newPort < oldPort || newPort == oldPort && + newConnection.ephemeralAddress.getHostName < oldConnection.ephemeralAddress.getHostName + } else { + newConnection.connectionDirection match { + case HandledConnection.IncomingConnection => + // Even though we have connection to this peer, they are calling us. One of the reason may be + // that they failed and we did not notice. Lets try to replace old connection with new one. + true + + case HandledConnection.OutgoingConnection => + // For some reason we were calling while we already have connection, most probably we have + // received incoming connection during call. Close this new connection, and keep the old one. + false + } + } + } + /** Safely replaces old connection from remote peer with new connection with same remote peer. * * 1. The callback for old connection will not be called. As from the perspective of outside world connection is never @@ -335,8 +369,9 @@ object ConnectionHandler { * for incoming messages of that connection * * @param key, key of remote node - * @param serverAddress, address of the server of remote node. In case of incoming connection it will be diffrent that - * underlyingConnection remoteAddress + * @param serverAddress, address of the server of remote node. In case of incoming connection it will be different than + * the underlyingConnection remoteAddress, because we will look up the remote address based on the + * `key` in the cluster configuration. * @param underlyingConnection, encrypted connection to send and receive messages */ class HandledConnection[F[_]: Concurrent, K, M] private ( @@ -347,6 +382,29 @@ object ConnectionHandler { underlyingConnection: EncryptedConnection[F, K, M], closeReason: Deferred[F, HandledConnectionCloseReason] ) { + private val createdAt = Instant.now() + + def age(): FiniteDuration = + (Instant.now().toEpochMilli() - createdAt.toEpochMilli()).millis + + /** For an incoming connection, this is the remote ephemeral address of the socket + * for an outgoing connection, it is the remote server address. + */ + def remoteAddress: InetSocketAddress = + underlyingConnection.remotePeerInfo._2 + + /** For an incoming connection, this is the local server address; + * for an outgoing connection, it is the local ephemeral address of the socket. + */ + def localAddress: InetSocketAddress = underlyingConnection.localAddress + + /** The client side address of the TCP socket. */ + def ephemeralAddress: InetSocketAddress = + connectionDirection match { + case IncomingConnection => remoteAddress + case OutgoingConnection => localAddress + } + def sendMessage(m: M): F[Unit] = { underlyingConnection.sendMessage(m) } @@ -368,8 +426,7 @@ object ConnectionHandler { (ConnectionAlreadyDisconnected: ReplaceResult).pure[F] case Right(_) => underlyingConnection.close >> - request.waitForReplaceToFinish >> (ReplaceFinished: ReplaceResult) - .pure[F] + request.waitForReplaceToFinish.as(ReplaceFinished: ReplaceResult) } } } @@ -470,7 +527,8 @@ object ConnectionHandler { } private def buildHandler[F[_]: Concurrent: ContextShift, K, M]( - connectionFinishCallback: FinishedConnection[K] => F[Unit] + connectionFinishCallback: FinishedConnection[K] => F[Unit], + oppositeConnectionOverlap: FiniteDuration )(implicit tracers: NetworkTracers[F, K, M] ): F[ConnectionHandler[F, K, M]] = { @@ -485,7 +543,8 @@ object ConnectionHandler { acquiredConnections, messageQueue, cancelToken, - connectionFinishCallback + connectionFinishCallback, + oppositeConnectionOverlap ) } @@ -499,12 +558,18 @@ object ConnectionHandler { * @param connectionFinishCallback, callback to be called when connection is finished and get de-registered */ def apply[F[_]: Concurrent: ContextShift, K, M]( - connectionFinishCallback: FinishedConnection[K] => F[Unit] + connectionFinishCallback: FinishedConnection[K] => F[Unit], + oppositeConnectionOverlap: FiniteDuration )(implicit tracers: NetworkTracers[F, K, M] ): Resource[F, ConnectionHandler[F, K, M]] = { Resource - .make(buildHandler[F, K, M](connectionFinishCallback)) { handler => + .make( + buildHandler[F, K, M]( + connectionFinishCallback, + oppositeConnectionOverlap + ) + ) { handler => handler.shutdown } .flatMap { handler => diff --git a/metronome/networking/src/io/iohk/metronome/networking/EncryptedConnectionProvider.scala b/metronome/networking/src/io/iohk/metronome/networking/EncryptedConnectionProvider.scala index 3e0adda3..9e0ebbf2 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/EncryptedConnectionProvider.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/EncryptedConnectionProvider.scala @@ -8,6 +8,7 @@ import io.iohk.metronome.networking.EncryptedConnectionProvider.{ import java.net.InetSocketAddress trait EncryptedConnection[F[_], K, M] { + def localAddress: InetSocketAddress def remotePeerInfo: (K, InetSocketAddress) def sendMessage(m: M): F[Unit] def incomingMessage: F[Option[Either[ConnectionError, M]]] diff --git a/metronome/networking/src/io/iohk/metronome/networking/NetworkEvent.scala b/metronome/networking/src/io/iohk/metronome/networking/NetworkEvent.scala index ba8bc477..9b33f609 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/NetworkEvent.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/NetworkEvent.scala @@ -6,20 +6,27 @@ import java.net.InetSocketAddress sealed trait NetworkEvent[K, +M] object NetworkEvent { + import ConnectionHandler.HandledConnection.HandledConnectionDirection case class Peer[K](key: K, address: InetSocketAddress) /** The connection to/from the peer has been added to the register. */ - case class ConnectionRegistered[K](peer: Peer[K]) - extends NetworkEvent[K, Nothing] + case class ConnectionRegistered[K]( + peer: Peer[K], + direction: HandledConnectionDirection + ) extends NetworkEvent[K, Nothing] /** The connection to/from the peer has been closed and removed from the register. */ - case class ConnectionDeregistered[K](peer: Peer[K]) - extends NetworkEvent[K, Nothing] + case class ConnectionDeregistered[K]( + peer: Peer[K], + direction: HandledConnectionDirection + ) extends NetworkEvent[K, Nothing] /** We had two connections to/from the peer and discarded one of them. */ - case class ConnectionDiscarded[K](peer: Peer[K]) - extends NetworkEvent[K, Nothing] + case class ConnectionDiscarded[K]( + peer: Peer[K], + direction: HandledConnectionDirection + ) extends NetworkEvent[K, Nothing] /** Failed to establish connection to remote peer. */ case class ConnectionFailed[K]( diff --git a/metronome/networking/src/io/iohk/metronome/networking/NetworkTracers.scala b/metronome/networking/src/io/iohk/metronome/networking/NetworkTracers.scala index c463ae9c..0e75bb3f 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/NetworkTracers.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/NetworkTracers.scala @@ -36,13 +36,22 @@ object NetworkTracers { ConnectionUnknown((Peer.apply[K] _).tupled(conn.remotePeerInfo)) }, registered = tracer.contramap[HandledConnection[F, K, M]] { conn => - ConnectionRegistered(Peer(conn.key, conn.serverAddress)) + ConnectionRegistered( + Peer(conn.key, conn.serverAddress), + conn.connectionDirection + ) }, deregistered = tracer.contramap[HandledConnection[F, K, M]] { conn => - ConnectionDeregistered(Peer(conn.key, conn.serverAddress)) + ConnectionDeregistered( + Peer(conn.key, conn.serverAddress), + conn.connectionDirection + ) }, discarded = tracer.contramap[HandledConnection[F, K, M]] { conn => - ConnectionDiscarded(Peer(conn.key, conn.serverAddress)) + ConnectionDiscarded( + Peer(conn.key, conn.serverAddress), + conn.connectionDirection + ) }, failed = tracer.contramap[RemoteConnectionManager.ConnectionFailure[K]] { fail => diff --git a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala index edbe930d..21973336 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/RemoteConnectionManager.scala @@ -70,7 +70,8 @@ object RemoteConnectionManager { initialDelay: FiniteDuration, backOffFactor: Long, maxDelay: FiniteDuration, - randomJitterConfig: RandomJitterConfig + randomJitterConfig: RandomJitterConfig, + oppositeConnectionOverlap: FiniteDuration ) object RetryConfig { @@ -124,10 +125,11 @@ object RemoteConnectionManager { import scala.concurrent.duration._ def default: RetryConfig = { RetryConfig( - 500.milliseconds, - 2, - 30.seconds, - RandomJitterConfig.defaultConfig + initialDelay = 500.milliseconds, + backOffFactor = 2, + maxDelay = 30.seconds, + randomJitterConfig = RandomJitterConfig.defaultConfig, + oppositeConnectionOverlap = 1.second ) } @@ -327,9 +329,15 @@ object RemoteConnectionManager { ) connectionsHandler <- ConnectionHandler.apply[F, K, M]( - // when each connection will finished it the callback will be called, and connection will be put to connections to acquire - // queue - handledConnectionFinisher.finish + // when each connection will finished it the callback will be called, + // and connection will be put to connections to acquire queue + handledConnectionFinisher.finish, + // A duration where we consider the possibilty that both nodes opened + // connections against each other at the same time, and they should try + // to determinstically pick the same one to close. After this time, + // we interpret duplicate connections as repairing a failure the other + // side has detected, but we haven't yet. + oppositeConnectionOverlap = retryConfig.oppositeConnectionOverlap ) _ <- acquireConnections( diff --git a/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala b/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala index ef44b620..07a5e5e2 100644 --- a/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala +++ b/metronome/networking/src/io/iohk/metronome/networking/ScalanetConnectionProvider.scala @@ -36,6 +36,10 @@ object ScalanetConnectionProvider { override def close: F[Unit] = underlyingChannelRelease + override val localAddress: InetSocketAddress = ( + underlyingChannel.from.address.inetSocketAddress + ) + override val remotePeerInfo: (K, InetSocketAddress) = ( channelKey, underlyingChannel.to.address.inetSocketAddress @@ -52,16 +56,25 @@ object ScalanetConnectionProvider { }) } - override def incomingMessage: F[Option[Either[ConnectionError, M]]] = { - TaskLift[F].apply(underlyingChannel.nextChannelEvent.map { + override def incomingMessage: F[Option[Either[ConnectionError, M]]] = + TaskLift[F].apply(nextNonIdleMessage) + + private val nextNonIdleMessage: Task[Option[Either[ConnectionError, M]]] = { + underlyingChannel.nextChannelEvent.flatMap { case Some(event) => event match { - case Channel.MessageReceived(m) => Some(Right(m)) - case Channel.UnexpectedError(e) => Some(Left(UnexpectedError(e))) - case Channel.DecodingError => Some(Left(DecodingError)) + case Channel.MessageReceived(m) => + Task.pure(Some(Right(m))) + case Channel.UnexpectedError(e) => + Task.pure(Some(Left(UnexpectedError(e)))) + case Channel.DecodingError => + Task.pure(Some(Left(DecodingError))) + case Channel.ChannelIdle(_, _) => + nextNonIdleMessage } - case None => None - }) + case None => + Task.pure(None) + } } } @@ -111,7 +124,8 @@ object ScalanetConnectionProvider { useNativeTlsImplementation, framingConfig, maxIncomingQueueSizePerPeer, - None + incomingConnectionsThrottling = None, + stalePeerDetectionConfig = None ) ) ) diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala index 46bbce7f..5c8a7588 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/ConnectionHandlerSpec.scala @@ -316,7 +316,10 @@ object ConnectionHandlerSpec { cb: FinishedConnection[ECPublicKey] => Task[Unit] = _ => Task(()) ): Resource[Task, ConnectionHandler[Task, ECPublicKey, TestMessage]] = { ConnectionHandler - .apply[Task, ECPublicKey, TestMessage](cb) + .apply[Task, ECPublicKey, TestMessage]( + cb, + oppositeConnectionOverlap = Duration.Zero + ) } def buildHandlerResourceWithCallbackCounter: Resource[ diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala index 06c8b0c0..d74f9975 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/MockEncryptedConnectionProvider.scala @@ -196,7 +196,8 @@ object MockEncryptedConnectionProvider { private val closeToken: TryableDeferred[Task, Unit], private val sentMessages: Ref[Task, List[TestMessage]], val remotePeerInfo: (ECPublicKey, InetSocketAddress) = - (getFakeRandomKey(), fakeLocalAddress) + (getFakeRandomKey(), fakeLocalAddress), + val localAddress: InetSocketAddress = fakeLocalAddress ) extends EncryptedConnection[Task, ECPublicKey, TestMessage] { override def close: Task[Unit] = { diff --git a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala index 1609f436..00fb2d16 100644 --- a/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala +++ b/metronome/networking/test/src/io/iohk/metronome/networking/RemoteConnectionManagerWithMockProviderSpec.scala @@ -298,9 +298,9 @@ object RemoteConnectionManagerWithMockProviderSpec { val noJitterConfig = RandomJitterConfig.buildJitterConfig(0).get val quickRetryConfig = - RetryConfig(50.milliseconds, 2, 2.seconds, noJitterConfig) + RetryConfig(50.milliseconds, 2, 2.seconds, noJitterConfig, Duration.Zero) val longRetryConfig: RetryConfig = - RetryConfig(5.seconds, 2, 20.seconds, noJitterConfig) + RetryConfig(5.seconds, 2, 20.seconds, noJitterConfig, Duration.Zero) def buildTestCaseWithNPeers( n: Int, From 34def0438f2bf9af5c9044ccb86cc3409c4f7ce6 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Thu, 17 Jun 2021 10:10:04 +0100 Subject: [PATCH 41/48] PM-2941: Config parser (#38) * PM-2941: Parsing HOCON as JSON with env var overrides. * PM-2941: Allow environment overrides. * PM-2941: Parsing into case classes. * PM-2941: Added comments to conf to make sure they are handled. * PM-2941: Test null -> Some. * PM-2941: Not using an accumulator. * PM-2941: Return Nil without another loop. * PM-2941: Change ConfigParser return type. * PM-2941: Strategy pattern. --- build.sc | 25 ++- .../metronome/config/ConfigDecoders.scala | 78 ++++++++ .../iohk/metronome/config/ConfigParser.scala | 185 ++++++++++++++++++ metronome/config/test/resources/complex.conf | 27 +++ metronome/config/test/resources/override.conf | 22 +++ metronome/config/test/resources/simple.conf | 11 ++ .../metronome/config/ConfigParserSpec.scala | 172 ++++++++++++++++ 7 files changed, 518 insertions(+), 2 deletions(-) create mode 100644 metronome/config/src/io/iohk/metronome/config/ConfigDecoders.scala create mode 100644 metronome/config/src/io/iohk/metronome/config/ConfigParser.scala create mode 100644 metronome/config/test/resources/complex.conf create mode 100644 metronome/config/test/resources/override.conf create mode 100644 metronome/config/test/resources/simple.conf create mode 100644 metronome/config/test/src/io/iohk/metronome/config/ConfigParserSpec.scala diff --git a/build.sc b/build.sc index ae73aaa1..1888740a 100644 --- a/build.sc +++ b/build.sc @@ -222,6 +222,21 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { } } + /** General configuration parser, to be used by application modules. */ + object config extends SubModule { + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"com.typesafe:config:${VersionOf.config}", + ivy"io.circe::circe-core:${VersionOf.circe}", + ivy"io.circe::circe-parser:${VersionOf.circe}" + ) + + object test extends TestModule { + override def ivyDeps = super.ivyDeps() ++ Agg( + ivy"io.circe::circe-generic:${VersionOf.circe}" + ) + } + } + /** Generic HotStuff BFT library. */ object hotstuff extends SubModule { @@ -330,10 +345,16 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { */ object app extends SubModule { override def moduleDeps: Seq[JavaModule] = - Seq(hotstuff.service, checkpointing.service, rocksdb, logging, metrics) + Seq( + hotstuff.service, + checkpointing.service, + rocksdb, + logging, + metrics, + config + ) override def ivyDeps = super.ivyDeps() ++ Agg( - ivy"com.typesafe:config:${VersionOf.config}", ivy"ch.qos.logback:logback-classic:${VersionOf.logback}", ivy"io.iohk::scalanet-discovery:${VersionOf.scalanet}" ) diff --git a/metronome/config/src/io/iohk/metronome/config/ConfigDecoders.scala b/metronome/config/src/io/iohk/metronome/config/ConfigDecoders.scala new file mode 100644 index 00000000..7e12241b --- /dev/null +++ b/metronome/config/src/io/iohk/metronome/config/ConfigDecoders.scala @@ -0,0 +1,78 @@ +package io.iohk.metronome.config + +import io.circe._ +import com.typesafe.config.{ConfigFactory, Config} +import scala.util.Try +import scala.concurrent.duration._ + +object ConfigDecoders { + + /** Parse a string into a TypeSafe config an use one of the accessor methods. */ + private def tryParse[T](value: String, f: (Config, String) => T): Try[T] = + Try { + val key = "dummy" + val conf = ConfigFactory.parseString(s"$key = $value") + f(conf, key) + } + + /** Parse HOCON byte sizes like "128M". */ + val bytesDecoder: Decoder[Long] = + Decoder[String].emapTry { + tryParse(_, _ getBytes _) + } + + /** Parse HOCON durations like "5m". */ + val durationDecoder: Decoder[FiniteDuration] = + Decoder[String].emapTry { + tryParse(_, _.getDuration(_).toMillis.millis) + } + + /** Parse an object where a discriminant tells us which other key value + * to deserialise into the target type. + * + * For example take the following config: + * + * ``` + * virus { + * variant = alpha + * alpha { + * r = 1.1 + * } + * delta { + * r = 1.4 + * } + * } + * + * It should deserialize into a class that matches a sub-key: + * ``` + * case class Virus(r: Double) + * object Virus { + * implicit val decoder: Decoder[Virus] = + * ConfigDecoders.strategyDecoder[Virus]("variant", deriveDecoder) + * } + * ``` + * + * The decoder will deserialise all the other keys as well to make sure + * that all of them are valid, in case the selection changes. + */ + def strategyDecoder[T]( + discriminant: String, + decoder: Decoder[T] + ): Decoder[T] = { + // Not passing the decoder implicitly so the compiler doesn't pass + // the one we are constructing here. + implicit val inner: Decoder[T] = decoder + + Decoder.instance[T] { (c: HCursor) => + for { + obj <- c.value.as[JsonObject] + selected <- c.downField(discriminant).as[String] + value <- c.downField(selected).as[T] + // Making sure that everything else is valid. We could pick the value from the result, + // but it's more difficult to provide the right `DecodingFailure` with a list of operations + // if the selected key is not present in the map. + _ <- Json.fromJsonObject(obj.remove(discriminant)).as[Map[String, T]] + } yield value + } + } +} diff --git a/metronome/config/src/io/iohk/metronome/config/ConfigParser.scala b/metronome/config/src/io/iohk/metronome/config/ConfigParser.scala new file mode 100644 index 00000000..5df4e42d --- /dev/null +++ b/metronome/config/src/io/iohk/metronome/config/ConfigParser.scala @@ -0,0 +1,185 @@ +package io.iohk.metronome.config + +import cats.implicits._ +import com.typesafe.config.{ConfigObject, ConfigRenderOptions} +import io.circe.{Json, JsonObject, ParsingFailure, Decoder, DecodingFailure} +import io.circe.parser.{parse => parseJson} + +object ConfigParser { + protected[config] type ParsingResult = Either[ParsingFailure, Json] + + type Result[T] = Either[Either[ParsingFailure, DecodingFailure], T] + + /** Parse configuration into a type using a JSON decoder, thus allowing + * validations to be applied to all configuraton values up front, rather + * than fail lazily when something is accessed or instantiated from + * the config factory. + * + * Accept overrides from the environment in PREFIX_PATH_TO_FIELD format. + */ + def parse[T: Decoder]( + conf: ConfigObject, + prefix: String = "", + env: Map[String, String] = sys.env + ): Result[T] = { + // Render the whole config to JSON. Everything needs a default value, + // but it can be `null` and be replaced from the environment. + val orig = toJson(conf) + // Transform fields which use dash for segmenting into camelCase. + val withCamel = withCamelCase(orig) + // Apply overrides from env vars. + val withEnv = withEnvVarOverrides(withCamel, prefix, env) + // Map to the domain config model. + withEnv match { + case Left(error) => Left(Left(error)) + case Right(json) => + Decoder[T].decodeJson(json) match { + case Left(error) => Left(Right(error)) + case Right(value) => Right(value) + } + } + } + + /** Render a TypeSafe Config section into JSON. */ + protected[config] def toJson(conf: ConfigObject): Json = { + val raw = conf.render(ConfigRenderOptions.concise) + parseJson(raw) match { + case Left(error: ParsingFailure) => + // This shouldn't happen with a well formed config file, + // which would have already failed during parsing or projecting + // to a `ConfigObject` passed to this method. + throw new IllegalArgumentException(error.message, error.underlying) + + case Right(json) => + json + } + } + + /** Transform a key in the HOCON config file to camelCase. */ + protected[config] def toCamelCase(key: String): String = { + def loop(cs: List[Char]): List[Char] = + cs match { + case ('_' | '-') :: cs => + cs match { + case c :: cs => c.toUpper :: loop(cs) + case Nil => Nil + } + case c :: cs => c :: loop(cs) + case Nil => Nil + } + + loop(key.toList).mkString + } + + /** Turn `camelCaseKey` into `SNAKE_CASE_KEY`, + * which is what it would look like as an env var. + */ + protected[config] def toSnakeCase(camelCase: String): String = { + def loop(cs: List[Char]): List[Char] = + cs match { + case a :: b :: cs if a.isLower && b.isUpper => + a.toUpper :: '_' :: b :: loop(cs) + case '-' :: cs => + '_' :: loop(cs) + case a :: cs => + a.toUpper :: loop(cs) + case Nil => + Nil + } + + loop(camelCase.toList).mkString + } + + /** Transform all keys into camelCase form, + * so they can be matched to case class fields. + */ + protected[config] def withCamelCase(json: Json): Json = { + json + .mapArray { arr => + arr.map(withCamelCase) + } + .mapObject { obj => + JsonObject(obj.toIterable.map { case (key, value) => + toCamelCase(key) -> withCamelCase(value) + }.toList: _*) + } + } + + /** Apply overrides from the environment to a JSON structure. + * + * Only considers env var keys that start with prefix and are + * in a PREFIX_SNAKE_CASE format. + * + * The operation can fail if a value in the environment is + * incompatible with the default in the config files. + * + * Default values in the config file are necessary, because + * the environment variable name in itself doesn't uniquely + * define a data structure (a single underscore matches both + * a '.' or a '-' in the path). + */ + protected[config] def withEnvVarOverrides( + json: Json, + prefix: String, + env: Map[String, String] = sys.env + ): ParsingResult = { + def extend(path: String, key: String) = + if (path.isEmpty) key else s"${path}_${key}" + + def loop(json: Json, path: String): ParsingResult = { + + def tryParse( + default: => Json, + validate: Json => Boolean + ): ParsingResult = + env + .get(path) + .map { value => + val maybeJson = parseJson(value) orElse parseJson(s""""$value"""") + + maybeJson.flatMap { json => + if (validate(json)) { + Right(json) + } else { + val msg = s"Invalid value for $path: $value" + Left(ParsingFailure(value, new IllegalArgumentException(msg))) + } + } + } + .getOrElse(Right(default)) + + json + .fold[ParsingResult]( + jsonNull = tryParse(Json.Null, _ => true), + jsonBoolean = x => tryParse(Json.fromBoolean(x), _.isBoolean), + jsonNumber = x => tryParse(Json.fromJsonNumber(x), _.isNumber), + jsonString = x => tryParse(Json.fromString(x), _.isString), + jsonArray = { arr => + arr.zipWithIndex + .map { case (value, idx) => + loop(value, extend(path, idx.toString)) + } + .sequence + .map { values => + Json.arr(values: _*) + } + }, + jsonObject = { obj => + obj.toIterable + .map { case (key, value) => + val snakeKey = toSnakeCase(key) + loop(value, extend(path, snakeKey)).map(key ->) + } + .toList + .sequence + .map { values => + Json.obj(values: _*) + } + } + ) + } + + loop(json, prefix) + } + +} diff --git a/metronome/config/test/resources/complex.conf b/metronome/config/test/resources/complex.conf new file mode 100644 index 00000000..ee1e5520 --- /dev/null +++ b/metronome/config/test/resources/complex.conf @@ -0,0 +1,27 @@ +metronome { + metrics { + enabled = false + } + network { + bootstrap = [ + "localhost:40001" + ], + timeout = 5s + max-packet-size = 512kB + client-id = null + } + blockchain { + consensus = "development" + default { + max-block-size = 1MB + view-timeout = 15s + } + development = ${metronome.blockchain.default} { + max-block-size = 10MB + } + main = ${metronome.blockchain.default} { + view-timeout = 5s + } + } + chain-id = test-chain +} diff --git a/metronome/config/test/resources/override.conf b/metronome/config/test/resources/override.conf new file mode 100644 index 00000000..ef9aeb70 --- /dev/null +++ b/metronome/config/test/resources/override.conf @@ -0,0 +1,22 @@ +override { + metrics { + enabled = false + } + network { + bootstrap = [ + "localhost:40001", + "localhost:40002" + ] + } + optional = null + numeric = 123 + textual = Hello World + boolean = true +} + +# Other setting that shouldn't be affected. +other { + metrics { + enabled = false + } +} diff --git a/metronome/config/test/resources/simple.conf b/metronome/config/test/resources/simple.conf new file mode 100644 index 00000000..d285e7b8 --- /dev/null +++ b/metronome/config/test/resources/simple.conf @@ -0,0 +1,11 @@ +# The root we are going to start from. +simple { + # Property name with a dash. + nested-structure { + foo = 10 + # Property name with an underscore. + bar_baz { + spam = eggs + } + } +} diff --git a/metronome/config/test/src/io/iohk/metronome/config/ConfigParserSpec.scala b/metronome/config/test/src/io/iohk/metronome/config/ConfigParserSpec.scala new file mode 100644 index 00000000..a1cf8fb0 --- /dev/null +++ b/metronome/config/test/src/io/iohk/metronome/config/ConfigParserSpec.scala @@ -0,0 +1,172 @@ +package io.iohk.metronome.config + +import com.typesafe.config.ConfigFactory +import io.circe.Decoder +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatest.prop.TableDrivenPropertyChecks +import org.scalatest.Inside +import scala.concurrent.duration._ + +class ConfigParserSpec + extends AnyFlatSpec + with Matchers + with TableDrivenPropertyChecks + with Inside { + + "toJson" should "parse simple.conf to JSON" in { + val conf = ConfigFactory.load("simple.conf") + val json = ConfigParser.toJson(conf.getConfig("simple").root()) + json.noSpaces shouldBe """{"nested-structure":{"bar_baz":{"spam":"eggs"},"foo":10}}""" + } + + "toCamelCase" should "turn keys into camelCase" in { + val examples = Table( + ("input", "expected"), + ("nested-structure", "nestedStructure"), + ("nested_structure", "nestedStructure"), + ("multiple-dashes_and_underscores", "multipleDashesAndUnderscores"), + ("multiple-dashes_and_underscores", "multipleDashesAndUnderscores"), + ("camelCaseKey", "camelCaseKey") + ) + forAll(examples) { case (input, expected) => + ConfigParser.toCamelCase(input) shouldBe expected + } + } + + "toSnakeCase" should "turn camelCase keys into SNAKE_CASE" in { + val examples = Table( + ("input", "expected"), + ("nestedStructure", "NESTED_STRUCTURE"), + ("nested_structure", "NESTED_STRUCTURE"), + ("nested-structure", "NESTED_STRUCTURE") + ) + forAll(examples) { case (input, expected) => + ConfigParser.toSnakeCase(input) shouldBe expected + } + } + + "withCamelCase" should "turn all keys in a JSON object into camelCase" in { + val conf = ConfigFactory.load("simple.conf") + val orig = ConfigParser.toJson(conf.root()) + val json = (ConfigParser.withCamelCase(orig) \\ "simple").head + json.noSpaces shouldBe """{"nestedStructure":{"barBaz":{"spam":"eggs"},"foo":10}}""" + } + + "withEnvVarOverrides" should "overwrite keys from the environment" in { + val conf = ConfigFactory.load("override.conf") + val orig = ConfigParser.toJson(conf.getConfig("override").root()) + val json = ConfigParser.withCamelCase(orig) + + val env = Map( + "TEST_METRICS_ENABLED" -> "true", + "TEST_NETWORK_BOOTSTRAP_0" -> "localhost:50000", + "TEST_OPTIONAL" -> "test", + "TEST_NUMERIC" -> "456", + "TEST_TEXTUAL" -> "Terra Nostra", + "TEST_BOOLEAN" -> "false" + ) + + val result = ConfigParser.withEnvVarOverrides(json, "TEST", env) + + inside(result) { case Right(json) => + json.noSpaces shouldBe """{"boolean":false,"metrics":{"enabled":true},"network":{"bootstrap":["localhost:50000","localhost:40002"]},"numeric":456,"optional":"test","textual":"Terra Nostra"}""" + } + } + + it should "validate that data types are not altered" in { + val conf = ConfigFactory.load("override.conf") + val orig = ConfigParser.toJson(conf.root()) + val json = ConfigParser.withCamelCase(orig) + + val examples = Table( + ("path", "invalid"), + ("OVERRIDE_NUMERIC", "NaN"), + ("OVERRIDE_TEXTUAL", "123"), + ("OVERRIDE_BOOLEAN", "no") + ) + forAll(examples) { case (path, invalid) => + ConfigParser + .withEnvVarOverrides(json, "", Map(path -> invalid)) + .isLeft shouldBe true + } + } + + "parse" should "decode into a configuration model" in { + import ConfigParserSpec.TestConfig + + val config = ConfigParser.parse[TestConfig]( + ConfigFactory.load("complex.conf").getConfig("metronome").root(), + prefix = "TEST", + env = Map("TEST_METRICS_ENABLED" -> "true") + ) + + inside(config) { case Right(config) => + config shouldBe TestConfig( + TestConfig.Metrics(enabled = true), + TestConfig.Network( + bootstrap = List("localhost:40001"), + timeout = 5.seconds, + maxPacketSize = TestConfig.Size(512000), + clientId = None + ), + TestConfig + .Blockchain( + maxBlockSize = TestConfig.Size(10000000), + viewTimeout = 15.seconds + ), + chainId = Some("test-chain") + ) + } + } +} + +object ConfigParserSpec { + import io.circe._, io.circe.generic.semiauto._ + + case class TestConfig( + metrics: TestConfig.Metrics, + network: TestConfig.Network, + blockchain: TestConfig.Blockchain, + chainId: Option[String] + ) + object TestConfig { + implicit val durationDecoder: Decoder[FiniteDuration] = + ConfigDecoders.durationDecoder + + case class Metrics(enabled: Boolean) + object Metrics { + implicit val decoder: Decoder[Metrics] = + deriveDecoder + } + + case class Network( + bootstrap: List[String], + timeout: FiniteDuration, + maxPacketSize: Size, + clientId: Option[String] + ) + object Network { + implicit val decoder: Decoder[Network] = + deriveDecoder + } + + case class Size(bytes: Long) + object Size { + implicit val decoder: Decoder[Size] = + ConfigDecoders.bytesDecoder.map(Size(_)) + } + + case class Blockchain( + maxBlockSize: Size, + viewTimeout: FiniteDuration + ) + object Blockchain { + implicit val decoder: Decoder[Blockchain] = + ConfigDecoders.strategyDecoder[Blockchain]("consensus", deriveDecoder) + } + + implicit val decoder: Decoder[TestConfig] = + deriveDecoder + } +} From df448b84181c7f5779beb75c0aeb21b616e546d3 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Thu, 17 Jun 2021 14:27:30 +0100 Subject: [PATCH 42/48] PM-2941: Use camelCase in strategy. (#57) --- .../src/io/iohk/metronome/config/ConfigDecoders.scala | 10 +++++++--- metronome/config/test/resources/complex.conf | 4 ++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/metronome/config/src/io/iohk/metronome/config/ConfigDecoders.scala b/metronome/config/src/io/iohk/metronome/config/ConfigDecoders.scala index 7e12241b..7fb4dd26 100644 --- a/metronome/config/src/io/iohk/metronome/config/ConfigDecoders.scala +++ b/metronome/config/src/io/iohk/metronome/config/ConfigDecoders.scala @@ -42,6 +42,7 @@ object ConfigDecoders { * r = 1.4 * } * } + * ``` * * It should deserialize into a class that matches a sub-key: * ``` @@ -59,19 +60,22 @@ object ConfigDecoders { discriminant: String, decoder: Decoder[T] ): Decoder[T] = { + // This parser is applied after the fields have been transformed to camelCase. + import ConfigParser.toCamelCase // Not passing the decoder implicitly so the compiler doesn't pass // the one we are constructing here. implicit val inner: Decoder[T] = decoder Decoder.instance[T] { (c: HCursor) => for { - obj <- c.value.as[JsonObject] - selected <- c.downField(discriminant).as[String] + obj <- c.value.as[JsonObject] + ccd = toCamelCase(discriminant) + selected <- c.downField(ccd).as[String].map(toCamelCase) value <- c.downField(selected).as[T] // Making sure that everything else is valid. We could pick the value from the result, // but it's more difficult to provide the right `DecodingFailure` with a list of operations // if the selected key is not present in the map. - _ <- Json.fromJsonObject(obj.remove(discriminant)).as[Map[String, T]] + _ <- Json.fromJsonObject(obj.remove(ccd)).as[Map[String, T]] } yield value } } diff --git a/metronome/config/test/resources/complex.conf b/metronome/config/test/resources/complex.conf index ee1e5520..34fe76f9 100644 --- a/metronome/config/test/resources/complex.conf +++ b/metronome/config/test/resources/complex.conf @@ -11,12 +11,12 @@ metronome { client-id = null } blockchain { - consensus = "development" + consensus = "research-and-development" default { max-block-size = 1MB view-timeout = 15s } - development = ${metronome.blockchain.default} { + research-and-development = ${metronome.blockchain.default} { max-block-size = 10MB } main = ${metronome.blockchain.default} { From af2e06716bd72a0244ffe2abfc6731a0824c0ac5 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Fri, 18 Jun 2021 14:19:52 +0100 Subject: [PATCH 43/48] PM-3133: Block executor (#39) * PM-3133: Establish a BlockExecutor. * PM-3133: BlockStorage.getPathFromAncestor * PM-3133: Execute batches of blocks. * PM-3133: Testing BlockExecutor. * PM-3133: Generate multiple requests. * PM-3133: Test executing from the last block. * PM-3133: Test with pruning. * PM-3133: Testing exceptions. * PM-3133: ApplicationService.executeBlocks will need the Commit Q.C. * PM-3133: ViewStateStorage.getLastExecutedBlockHash * PM-3133: More permissive check in waiting for block to be processed. * PM-3133: Avoid race condition in setting the last executed block hash. * PM-3133: Fix mock for optional return value. * PM-3133: Add maxDistance to BlockStorage.getPathFromAncestor * PM-3133: Remove unused patter match. * PM-3063: Fix a rare edge case in the test data generation of the view synchronizer. * PM-3133: Abandon batch if a block isn't found or the hash cannot be updated. * PM-3133: Change ApplicationService.executeBlock to return Boolean. * PM-3133: Test skipped execution. * PM-3133: Comments about dealing with extra block executions after fast forwarding. * PM-3133: Move syncState to the BlockExecutor so we can take out a Semaphore. * PM-3133: Test BlockExecutor.syncState * PM-3133: Add BlockStorage.purgeTree --- .../hotstuff/service/ApplicationService.scala | 23 +- .../hotstuff/service/ConsensusService.scala | 33 +- .../hotstuff/service/HotStuffService.scala | 9 + .../hotstuff/service/SyncService.scala | 47 +-- .../service/execution/BlockExecutor.scala | 303 ++++++++++++++ .../service/storage/BlockStorage.scala | 64 +++ .../service/storage/ViewStateStorage.scala | 17 +- .../service/tracing/ConsensusEvent.scala | 11 + .../service/tracing/ConsensusTracers.scala | 8 +- .../execution/BlockExecutorProps.scala | 393 ++++++++++++++++++ .../service/storage/BlockStorageProps.scala | 105 +++++ .../service/sync/ViewSynchronizerProps.scala | 45 +- 12 files changed, 976 insertions(+), 82 deletions(-) create mode 100644 metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutor.scala create mode 100644 metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutorProps.scala diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ApplicationService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ApplicationService.scala index 57bfca6f..deeda9d3 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ApplicationService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ApplicationService.scala @@ -1,8 +1,7 @@ package io.iohk.metronome.hotstuff.service -import cats.data.NonEmptyVector -import io.iohk.metronome.hotstuff.consensus.basic.Agreement -import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate +import cats.data.{NonEmptyVector, NonEmptyList} +import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, QuorumCertificate} /** Represents the "application" domain to the HotStuff module, * performing all delegations that HotStuff can't do on its own. @@ -15,8 +14,24 @@ trait ApplicationService[F[_], A <: Agreement] { // Returns None if validation cannot be carried out due to data availability issues within a given timeout. def validateBlock(block: A#Block): F[Option[Boolean]] + // TODO (PM-3108, PM-3107, PM-3137, PM-3110): Tell the application to execute a block. + // I cannot be sure that all blocks that get committed to gether fit into memory, + // so we pass them one by one, but all of them are accompanied by the final Commit Q.C. + // and the path of block hashes from the block being executed to the one committed. + // Perhaps the application service can cache the headers if it needs to produce a + // proof of the BFT agreement at the end. + // Returns a flag to indicate whether the block execution results have been persisted, + // whether the block and any corresponding state can be used as a starting point after a restart. + def executeBlock( + block: A#Block, + commitQC: QuorumCertificate[A], + commitPath: NonEmptyList[A#Hash] + ): F[Boolean] + // TODO (PM-3135): Tell the application to sync any state of the block, i.e. the Ledger. // The `sources` are peers who most probably have this state. // The full `block` is given because it may not be persisted yet. - def syncState(sources: NonEmptyVector[A#PKey], block: A#Block): F[Unit] + // Return `true` if the block storage can be pruned after this operation from earlier blocks, + // which may not be the case if the application syncs by downloading all the blocks. + def syncState(sources: NonEmptyVector[A#PKey], block: A#Block): F[Boolean] } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala index 269b25b0..cd92ed66 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/ConsensusService.scala @@ -18,6 +18,7 @@ import io.iohk.metronome.hotstuff.consensus.basic.{ Signing, QuorumCertificate } +import io.iohk.metronome.hotstuff.service.execution.BlockExecutor import io.iohk.metronome.hotstuff.service.pipes.SyncPipe import io.iohk.metronome.hotstuff.service.storage.{ BlockStorage, @@ -30,6 +31,7 @@ import monix.catnap.ConcurrentQueue import scala.annotation.tailrec import scala.collection.immutable.Queue import scala.util.control.NonFatal +import io.iohk.metronome.hotstuff.service.execution.BlockExecutor /** An effectful executor wrapping the pure HotStuff ProtocolState. * @@ -43,6 +45,7 @@ class ConsensusService[ publicKey: A#PKey, network: Network[F, A#PKey, Message[A]], appService: ApplicationService[F, A], + blockExecutor: BlockExecutor[F, N, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], stateRef: Ref[F, ProtocolState[A]], @@ -50,7 +53,6 @@ class ConsensusService[ counterRef: Ref[F, ConsensusService.MessageCounter], syncPipe: SyncPipe[F, A]#Left, eventQueue: ConcurrentQueue[F, Event[A]], - blockExecutionQueue: ConcurrentQueue[F, Effect.ExecuteBlocks[A]], fiberSet: FiberSet[F], maxEarlyViewNumberDiff: Int )(implicit tracers: ConsensusTracers[F, A], storeRunner: KVStoreRunner[F, N]) { @@ -441,31 +443,17 @@ class ConsensusService[ // the forground here, but it may cause the node to lose its // sync with the other federation members, so the execution // should be offloaded to another queue. - blockExecutionQueue.offer(effect) + blockExecutor.enqueue(effect) case SendMessage(recipient, message) => network.sendMessage(recipient, message) } process.handleErrorWith { case NonFatal(ex) => - tracers.error(ex) + tracers.error(s"Error processing effect $effect", ex) } } - /** Execute blocks in order, updating pesistent storage along the way. */ - private def executeBlocks: F[Unit] = { - blockExecutionQueue.poll.flatMap { case Effect.ExecuteBlocks(_, _) => - // Retrieve the blocks from the storage from the last executed - // to the one in the Quorum Certificate and tell the application - // to execute them one by one. Update the persistent view state - // after reach execution to remember which blocks we have truly - // done. - - // TODO (PM-3133): Execute block - ??? - } >> executeBlocks - } - private def validated(event: Event[A]): Validated[Event[A]] = Validated[Event[A]](event) @@ -547,6 +535,7 @@ object ConsensusService { publicKey: A#PKey, network: Network[F, A#PKey, Message[A]], appService: ApplicationService[F, A], + blockExecutor: BlockExecutor[F, N, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], syncPipe: SyncPipe[F, A]#Left, @@ -558,11 +547,13 @@ object ConsensusService { ): Resource[F, ConsensusService[F, N, A]] = for { fiberSet <- FiberSet[F] + service <- Resource.liftF( build[F, N, A]( publicKey, network, appService, + blockExecutor, blockStorage, viewStateStorage, syncPipe, @@ -571,10 +562,11 @@ object ConsensusService { fiberSet ) ) + _ <- Concurrent[F].background(service.processNetworkMessages) _ <- Concurrent[F].background(service.processSyncPipe) _ <- Concurrent[F].background(service.processEvents) - _ <- Concurrent[F].background(service.executeBlocks) + initEffects = ProtocolState.init(initState) _ <- Resource.liftF(service.scheduleEffects(initEffects)) } yield service @@ -587,6 +579,7 @@ object ConsensusService { publicKey: A#PKey, network: Network[F, A#PKey, Message[A]], appService: ApplicationService[F, A], + blockExecutor: BlockExecutor[F, N, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], syncPipe: SyncPipe[F, A]#Left, @@ -603,13 +596,12 @@ object ConsensusService { fibersRef <- Ref[F].of(Set.empty[Fiber[F, Unit]]) counterRef <- Ref[F].of(MessageCounter.empty) eventQueue <- ConcurrentQueue[F].unbounded[Event[A]](None) - blockExecutionQueue <- ConcurrentQueue[F] - .unbounded[Effect.ExecuteBlocks[A]](None) service = new ConsensusService( publicKey, network, appService, + blockExecutor, blockStorage, viewStateStorage, stateRef, @@ -617,7 +609,6 @@ object ConsensusService { counterRef, syncPipe, eventQueue, - blockExecutionQueue, fiberSet, maxEarlyViewNumberDiff ) diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala index df7c2bf0..1aa875e2 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/HotStuffService.scala @@ -9,6 +9,7 @@ import io.iohk.metronome.hotstuff.consensus.basic.{ Block, Signing } +import io.iohk.metronome.hotstuff.service.execution.BlockExecutor import io.iohk.metronome.hotstuff.service.messages.{ HotStuffMessage, SyncMessage @@ -60,10 +61,17 @@ object HotStuffService { syncPipe <- Resource.liftF { SyncPipe[F, A] } + blockExecutor <- BlockExecutor[F, N, A]( + appService, + blockStorage, + viewStateStorage + ) + consensusService <- ConsensusService( initState.publicKey, consensusNetwork, appService, + blockExecutor, blockStorage, viewStateStorage, syncPipe.left, @@ -75,6 +83,7 @@ object HotStuffService { initState.federation, syncNetwork, appService, + blockExecutor, blockStorage, viewStateStorage, syncPipe.right, diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala index d450baef..9be8e304 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -16,6 +16,7 @@ import io.iohk.metronome.hotstuff.consensus.basic.{ Block, Signing } +import io.iohk.metronome.hotstuff.service.execution.BlockExecutor import io.iohk.metronome.hotstuff.service.messages.SyncMessage import io.iohk.metronome.hotstuff.service.pipes.SyncPipe import io.iohk.metronome.hotstuff.service.storage.{ @@ -28,7 +29,7 @@ import io.iohk.metronome.hotstuff.service.sync.{ } import io.iohk.metronome.hotstuff.service.tracing.SyncTracers import io.iohk.metronome.networking.{ConnectionHandler, Network} -import io.iohk.metronome.storage.{KVStoreRunner, KVStore} +import io.iohk.metronome.storage.KVStoreRunner import scala.util.control.NonFatal import scala.concurrent.duration._ import scala.reflect.ClassTag @@ -47,6 +48,7 @@ class SyncService[F[_]: Concurrent: ContextShift, N, A <: Agreement: Block]( publicKey: A#PKey, network: Network[F, A#PKey, SyncMessage[A]], appService: ApplicationService[F, A], + blockExecutor: BlockExecutor[F, N, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], syncPipe: SyncPipe[F, A]#Right, @@ -298,48 +300,13 @@ class SyncService[F[_]: Concurrent: ContextShift, N, A <: Agreement: Block]( ) .rethrow - // Sync any application specific state, e.g. a ledger. - // Do this before we prune the existing blocks and set the new root. - _ <- appService.syncState(federationStatus.sources, block) - - // Prune the block store from earlier blocks that are no longer traversable. - _ <- fastForwardStorage(status, block) + // Sync any application specific state, e.g. a ledger, + // then potentially prune old blocks from the storage. + _ <- blockExecutor.syncState(federationStatus.sources, block) // Tell the ConsensusService about the new Status. _ <- syncPipe.send(SyncPipe.StatusResponse(status)) } yield status.viewNumber - - /** Replace the state we have persisted with what we synced with the federation. - * - * Prunes old blocks, the Commit Q.C. will be the new root. - */ - private def fastForwardStorage(status: Status[A], block: A#Block): F[Unit] = { - val blockHash = Block[A].blockHash(block) - assert(blockHash == status.commitQC.blockHash) - - val query: KVStore[N, Unit] = - for { - viewState <- viewStateStorage.getBundle.lift - // Insert the new block. - _ <- blockStorage.put(block) - - // Prune old data, but keep the new block. - ds <- blockStorage - .getDescendants( - viewState.rootBlockHash, - skip = Set(blockHash) - ) - .lift - _ <- ds.traverse(blockStorage.deleteUnsafe(_)) - - // Considering the committed block as executed, we have its state already. - _ <- viewStateStorage.setLastExecutedBlockHash(blockHash) - _ <- viewStateStorage.setRootBlockHash(blockHash) - // The rest of the fields will be set by the ConsensusService. - } yield () - - storeRunner.runReadWrite(query) - } } object SyncService { @@ -357,6 +324,7 @@ object SyncService { federation: Federation[A#PKey], network: Network[F, A#PKey, SyncMessage[A]], appService: ApplicationService[F, A], + blockExecutor: BlockExecutor[F, N, A], blockStorage: BlockStorage[N, A], viewStateStorage: ViewStateStorage[N, A], syncPipe: SyncPipe[F, A]#Right, @@ -376,6 +344,7 @@ object SyncService { publicKey, network, appService, + blockExecutor, blockStorage, viewStateStorage, syncPipe, diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutor.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutor.scala new file mode 100644 index 00000000..63cbe831 --- /dev/null +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutor.scala @@ -0,0 +1,303 @@ +package io.iohk.metronome.hotstuff.service.execution + +import cats.implicits._ +import cats.data.{NonEmptyList, NonEmptyVector} +import cats.effect.{Sync, Concurrent, ContextShift, Resource} +import cats.effect.concurrent.Semaphore +import io.iohk.metronome.hotstuff.service.ApplicationService +import io.iohk.metronome.hotstuff.service.storage.{ + BlockStorage, + ViewStateStorage +} +import io.iohk.metronome.hotstuff.consensus.basic.{ + Agreement, + Block, + Effect, + QuorumCertificate +} +import io.iohk.metronome.hotstuff.service.tracing.ConsensusTracers +import io.iohk.metronome.storage.KVStoreRunner +import monix.catnap.ConcurrentQueue + +/** The `BlockExecutor` receives ranges of committed blocks from the + * `ConsensusService` and carries out their effects, marking the last + * executed block in the `ViewStateStorage`, so that we can resume + * from where we left off last time after a restart. + * + * It delegates other state updates to the `ApplicationService`. + * + * The `BlockExecutor` is prepared for gaps to appear in the ranges, + * which happens if the node is out of sync with the federation and + * needs to jump ahead. + */ +class BlockExecutor[F[_]: Sync, N, A <: Agreement: Block]( + appService: ApplicationService[F, A], + blockStorage: BlockStorage[N, A], + viewStateStorage: ViewStateStorage[N, A], + executionQueue: ConcurrentQueue[F, Effect.ExecuteBlocks[A]], + executionSemaphore: Semaphore[F] +)(implicit tracers: ConsensusTracers[F, A], storeRunner: KVStoreRunner[F, N]) { + + /** Add a newly committed batch of blocks to the execution queue. */ + def enqueue(effect: Effect.ExecuteBlocks[A]): F[Unit] = + executionQueue.offer(effect) + + /** Fast forward state to a given block. + * + * This operation is delegated to the `BlockExecutor` so that it can make sure + * that it's not executing other blocks at the same time. + */ + def syncState( + sources: NonEmptyVector[A#PKey], + block: A#Block + ): F[Unit] = + executionSemaphore.withPermit { + for { + // Sync any application specific state, e.g. a ledger. + // Do this before we prune the existing blocks and set the new root. + canPrune <- appService.syncState(sources, block) + // Prune the block store from earlier blocks that are no longer traversable. + _ <- fastForwardStorage(block, canPrune) + } yield () + } + + /** Execute blocks in order, updating pesistent storage along the way. */ + private def executeBlocks: F[Unit] = { + executionQueue.poll + .flatMap { case Effect.ExecuteBlocks(lastCommittedBlockHash, commitQC) => + // Retrieve the blocks from the storage from the last executed + // to the one in the Quorum Certificate and tell the application + // to execute them one by one. Update the persistent view state + // after reach execution to remember which blocks we have truly + // done. + // Protect the whole thing with a semaphore from `syncState` being + // carried out at the same time. + executionSemaphore.withPermit { + for { + lastExecutedBlockHash <- getLastExecutedBlockHash + blockHashes <- getBlockPath( + lastExecutedBlockHash, + lastCommittedBlockHash, + commitQC + ) + _ <- blockHashes match { + case _ :: newBlockHashes => + tryExecuteBatch(newBlockHashes, commitQC, lastExecutedBlockHash) + case Nil => + ().pure[F] + } + } yield () + } + } >> executeBlocks + } + + /** Read whatever was the last executed block that we peristed, + * either by doing individual execution or state sync. + */ + private def getLastExecutedBlockHash: F[A#Hash] = + storeRunner.runReadOnly { + viewStateStorage.getLastExecutedBlockHash + } + + /** Update the last executed block hash, unless something else updated it + * while we were executing blocks. This shouldn't happen if we used the + * executor to carry out the state sync within the semaphore. + */ + private def setLastExecutedBlockHash( + blockHash: A#Hash, + lastExecutedBlockHash: A#Hash + ): F[Boolean] = + storeRunner.runReadWrite { + viewStateStorage + .compareAndSetLastExecutedBlockHash( + blockHash, + lastExecutedBlockHash + ) + } + + /** Get the more complete path. We may not have the last executed block any more. + * + * The first hash in the return value is a block that has already been executed. + */ + private def getBlockPath( + lastExecutedBlockHash: A#Hash, + lastCommittedBlockHash: A#Hash, + commitQC: QuorumCertificate[A] + ): F[List[A#Hash]] = { + def readPath(ancestorBlockHash: A#Hash) = + storeRunner + .runReadOnly { + blockStorage.getPathFromAncestor( + ancestorBlockHash, + commitQC.blockHash + ) + } + + readPath(lastExecutedBlockHash) + .flatMap { + case Nil => + readPath(lastCommittedBlockHash) + case path => + path.pure[F] + } + } + + /** Try to execute a batch of newly committed blocks. + * + * The last executed block hash is used to track that it hasn't + * been modified by the jump-ahead state sync mechanism while + * we were executing blocks. + * + * In general we cannot expect to be able to cancel an ongoing execution, + * it may be in the middle of carrying out some real-world effects that + * don't support cancellation. We use the semaphore to protect against + * race conditions between executing blocks here and the fast-forward + * synchroniser making changes to state. + */ + private def tryExecuteBatch( + newBlockHashes: List[A#Hash], + commitQC: QuorumCertificate[A], + lastExecutedBlockHash: A#Hash + ): F[Unit] = { + def loop( + newBlockHashes: List[A#Hash], + lastExecutedBlockHash: A#Hash + ): F[Unit] = + newBlockHashes match { + case Nil => + ().pure[F] + + case blockHash :: nextBlockHashes => + executeBlock( + blockHash, + commitQC, + NonEmptyList(blockHash, nextBlockHashes), + lastExecutedBlockHash + ).attempt.flatMap { + case Left(ex) => + // If a block fails, return what we managed to do so far, + // so we can re-attempt it next time if the block is still + // available in the storage. + tracers + .error(s"Error executing block $blockHash", ex) + + case Right(None) => + // Either the block couldn't be found, or the last executed + // hash changed, but something suggests that we should not + // try to execute more of this batch. + nextBlockHashes.traverse(tracers.executionSkipped(_)).void + + case Right(Some(nextLastExecutedBlockHash)) => + loop(nextBlockHashes, nextLastExecutedBlockHash) + } + } + + loop(newBlockHashes, lastExecutedBlockHash) + } + + /** Execute the next block in line and update the view state. + * + * The last executed block hash is only updated if the application + * indicates that it has persisted the results, and if no other + * changes have been made to it outside this loop. The execution + * result carries the new last executed block hash to use in the + * next iteration, or `None` if we should abandon the execution. + */ + private def executeBlock( + blockHash: A#Hash, + commitQC: QuorumCertificate[A], + commitPath: NonEmptyList[A#Hash], + lastExecutedBlockHash: A#Hash + ): F[Option[A#Hash]] = { + assert(commitPath.head == blockHash) + assert(commitPath.last == commitQC.blockHash) + + storeRunner.runReadOnly { + blockStorage.get(blockHash) + } flatMap { + case None => + tracers.executionSkipped(blockHash).as(none) + + case Some(block) => + for { + isPersisted <- appService.executeBlock(block, commitQC, commitPath) + _ <- tracers.blockExecuted(blockHash) + + maybeLastExecutedBlockHash <- + if (!isPersisted) { + // Keep the last for the next compare and set below. + lastExecutedBlockHash.some.pure[F] + } else { + // Check that nothing else changed the view state, + // which should be true as long as we use the semaphore. + // Otherwise it would be up to the `ApplicationService` to + // take care of isolation, and check that the block being + // executed is the one we expected. + setLastExecutedBlockHash(blockHash, lastExecutedBlockHash).map { + case true => blockHash.some + case false => none + } + } + } yield maybeLastExecutedBlockHash + } + } + + /** Replace the state we have persisted with what we synced with the federation. + * + * Prunes old blocks, the Commit Q.C. will be the new root. + */ + private def fastForwardStorage( + block: A#Block, + canPrune: Boolean + ): F[Unit] = { + val blockHash = Block[A].blockHash(block) + + val prune = for { + viewState <- viewStateStorage.getBundle.lift + // Prune old data, but keep the new block. + // Traversing from the old root, because the + // new block is probably not connected to it. + _ <- blockStorage.purgeTree( + viewState.rootBlockHash, + keep = blockHash.some + ) + _ <- viewStateStorage.setRootBlockHash(blockHash) + } yield () + + val query = for { + // Insert the new block. + _ <- blockStorage.put(block) + _ <- prune.whenA(canPrune) + // Considering the committed block as executed, we have its state already. + _ <- viewStateStorage.setLastExecutedBlockHash(blockHash) + } yield () + + storeRunner.runReadWrite(query) + } +} + +object BlockExecutor { + def apply[F[_]: Concurrent: ContextShift, N, A <: Agreement: Block]( + appService: ApplicationService[F, A], + blockStorage: BlockStorage[N, A], + viewStateStorage: ViewStateStorage[N, A] + )(implicit + tracers: ConsensusTracers[F, A], + storeRunner: KVStoreRunner[F, N] + ): Resource[F, BlockExecutor[F, N, A]] = for { + executionQueue <- Resource.liftF { + ConcurrentQueue[F].unbounded[Effect.ExecuteBlocks[A]](None) + } + executionSemaphore <- Resource.liftF(Semaphore[F](1)) + executor = new BlockExecutor[F, N, A]( + appService, + blockStorage, + viewStateStorage, + executionQueue, + executionSemaphore + ) + _ <- Concurrent[F].background { + executor.executeBlocks + } + } yield executor +} diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala index 70b486e7..fdb91e6b 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala @@ -131,6 +131,48 @@ class BlockStorage[N, A <: Agreement: Block]( loop(blockHash, Nil) } + /** Get the ancestor chain between two hashes in the chain, if there is one. + * + * If either of the blocks are not in the tree, or there's no path between them, + * return an empty list. This can happen if we have already pruned away the ancestry as well. + * + * The `maxDistance` parameter can be used to limit the maximum traversal depth; + * it's useful with blocks that have a `height` field, where we know up front that + * if we have ascended more than N blocks from the descendant and haven't encountered + * the ancestor, then we must be on a different branch. + */ + def getPathFromAncestor( + ancestorBlockHash: A#Hash, + descendantBlockHash: A#Hash, + maxDistance: Int = Int.MaxValue + ): KVStoreRead[N, List[A#Hash]] = { + def loop( + blockHash: A#Hash, + acc: List[A#Hash], + maxDistance: Int + ): KVStoreRead[N, List[A#Hash]] = { + if (blockHash == ancestorBlockHash) { + KVStoreRead[N].pure(blockHash :: acc) + } else if (maxDistance == 0) { + KVStoreRead[N].pure(Nil) + } else { + childToParentColl.read(blockHash).flatMap { + case None => + KVStoreRead[N].pure(Nil) + case Some(parentBlockHash) => + loop(parentBlockHash, blockHash :: acc, maxDistance - 1) + } + } + } + + (contains(ancestorBlockHash), contains(descendantBlockHash)) + .mapN((_, _)) + .flatMap { + case (true, true) => loop(descendantBlockHash, Nil, maxDistance) + case _ => KVStoreRead[N].pure(Nil) + } + } + /** Collect all descendants of a block, * including the block itself. * @@ -204,4 +246,26 @@ class BlockStorage[N, A <: Agreement: Block]( _ <- path.init.traverse(deleteUnsafe(_)) } yield deleteables } + + /** Remove all blocks in a tree, given by a `blockHash` that's in the tree, + * except perhaps a new root (and its descendants) we want to keep. + * + * This is used to delete an old tree when starting a new that's most likely + * not connected to it, and would otherwise result in a forest. + */ + def purgeTree( + blockHash: A#Hash, + keep: Option[A#Hash] + ): KVStore[N, List[A#Hash]] = + getPathFromRoot(blockHash).lift.flatMap { + case Nil => + KVStore[N].pure(Nil) + + case rootHash :: _ => + for { + ds <- getDescendants(rootHash, skip = keep.toSet).lift + // Going from the leaves towards the root. + _ <- ds.reverse.traverse(deleteUnsafe(_)) + } yield ds + } } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala index 3976c28e..4cb166eb 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/ViewStateStorage.scala @@ -44,10 +44,23 @@ class ViewStateStorage[N, A <: Agreement] private ( def setLastExecutedBlockHash(blockHash: A#Hash): KVStore[N, Unit] = put(Key.LastExecutedBlockHash, blockHash) + /** Set `LastExecutedBlockHash` to `blockHash` if it's still what it was before. */ + def compareAndSetLastExecutedBlockHash( + blockHash: A#Hash, + lastExecutedBlockHash: A#Hash + ): KVStore[N, Boolean] = + read(Key.LastExecutedBlockHash).lift.flatMap { current => + if (current == lastExecutedBlockHash) { + setLastExecutedBlockHash(blockHash).as(true) + } else { + KVStore[N].pure(false) + } + } + def setRootBlockHash(blockHash: A#Hash): KVStore[N, Unit] = put(Key.RootBlockHash, blockHash) - def getBundle: KVStoreRead[N, ViewStateStorage.Bundle[A]] = + val getBundle: KVStoreRead[N, ViewStateStorage.Bundle[A]] = ( read(Key.ViewNumber), read(Key.PrepareQC), @@ -57,6 +70,8 @@ class ViewStateStorage[N, A <: Agreement] private ( read(Key.RootBlockHash) ).mapN(ViewStateStorage.Bundle.apply[A] _) + val getLastExecutedBlockHash: KVStoreRead[N, A#Hash] = + read(Key.LastExecutedBlockHash) } object ViewStateStorage { diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala index 24b29cb7..0d97af44 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusEvent.scala @@ -55,8 +55,19 @@ object ConsensusEvent { error: ProtocolError[A] ) extends ConsensusEvent[A] + /** A block has been removed from storage by the time it was to be executed. */ + case class ExecutionSkipped[A <: Agreement]( + blockHash: A#Hash + ) extends ConsensusEvent[A] + + /** A block has been executed. */ + case class BlockExecuted[A <: Agreement]( + blockHash: A#Hash + ) extends ConsensusEvent[A] + /** An unexpected error in one of the background tasks. */ case class Error( + message: String, error: Throwable ) extends ConsensusEvent[Nothing] } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala index 7aa6c061..2aaf1faa 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/tracing/ConsensusTracers.scala @@ -22,7 +22,9 @@ case class ConsensusTracers[F[_], A <: Agreement]( fromFuture: Tracer[F, Event.MessageReceived[A]], stashed: Tracer[F, ProtocolError.TooEarly[A]], rejected: Tracer[F, ProtocolError[A]], - error: Tracer[F, Throwable] + executionSkipped: Tracer[F, A#Hash], + blockExecuted: Tracer[F, A#Hash], + error: Tracer[F, (String, Throwable)] ) object ConsensusTracers { @@ -43,6 +45,8 @@ object ConsensusTracers { fromFuture = tracer.contramap[Event.MessageReceived[A]](FromFuture(_)), stashed = tracer.contramap[ProtocolError.TooEarly[A]](Stashed(_)), rejected = tracer.contramap[ProtocolError[A]](Rejected(_)), - error = tracer.contramap[Throwable](Error(_)) + executionSkipped = tracer.contramap[A#Hash](ExecutionSkipped(_)), + blockExecuted = tracer.contramap[A#Hash](BlockExecuted(_)), + error = tracer.contramap[(String, Throwable)]((Error.apply _).tupled) ) } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutorProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutorProps.scala new file mode 100644 index 00000000..98fbb577 --- /dev/null +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutorProps.scala @@ -0,0 +1,393 @@ +package io.iohk.metronome.hotstuff.service.execution + +import cats.implicits._ +import cats.effect.Resource +import cats.effect.concurrent.{Ref, Semaphore} +import cats.data.{NonEmptyVector, NonEmptyList} +import io.iohk.metronome.hotstuff.consensus.ViewNumber +import io.iohk.metronome.hotstuff.consensus.basic.{ + Effect, + QuorumCertificate, + Phase +} +import io.iohk.metronome.crypto.GroupSignature +import io.iohk.metronome.hotstuff.service.ApplicationService +import io.iohk.metronome.hotstuff.service.storage.ViewStateStorage +import io.iohk.metronome.hotstuff.service.storage.{ + BlockStorageProps, + ViewStateStorageCommands +} +import io.iohk.metronome.hotstuff.service.tracing.{ + ConsensusEvent, + ConsensusTracers +} +import io.iohk.metronome.storage.InMemoryKVStore +import io.iohk.metronome.tracer.Tracer +import monix.eval.Task +import monix.execution.Scheduler +import org.scalacheck.{Properties, Arbitrary, Gen} +import org.scalacheck.Prop, Prop.{forAll, propBoolean, all} +import scala.concurrent.duration._ + +object BlockExecutorProps extends Properties("BlockExecutor") { + import BlockStorageProps.{ + TestAgreement, + TestBlock, + TestBlockStorage, + TestKVStore, + Namespace, + genNonEmptyBlockTree + } + import ViewStateStorageCommands.neverUsedCodec + + case class TestResources( + blockExecutor: BlockExecutor[Task, Namespace, TestAgreement], + viewStateStorage: ViewStateStorage[Namespace, TestAgreement], + executionSemaphore: Semaphore[Task] + ) + + case class TestFixture( + blocks: List[TestBlock], + batches: Vector[Effect.ExecuteBlocks[TestAgreement]] + ) { + val storeRef = Ref.unsafe[Task, TestKVStore.Store] { + TestKVStore.build(blocks) + } + val eventsRef = + Ref.unsafe[Task, Vector[ConsensusEvent[TestAgreement]]](Vector.empty) + + val store = InMemoryKVStore[Task, Namespace](storeRef) + + implicit val storeRunner = store + + val eventTracer = + Tracer.instance[Task, ConsensusEvent[TestAgreement]] { event => + eventsRef.update(_ :+ event) + } + + implicit val consensusTracers = ConsensusTracers(eventTracer) + + val failNextRef = Ref.unsafe[Task, Boolean](false) + val isExecutingRef = Ref.unsafe[Task, Boolean](false) + + private def appService(semaphore: Semaphore[Task]) = + new ApplicationService[Task, TestAgreement] { + def createBlock( + highQC: QuorumCertificate[TestAgreement] + ): Task[Option[TestBlock]] = ??? + + def validateBlock(block: TestBlock): Task[Option[Boolean]] = ??? + + def syncState( + sources: NonEmptyVector[Int], + block: TestBlock + ): Task[Boolean] = + Task.pure(true) + + def executeBlock( + block: TestBlock, + commitQC: QuorumCertificate[TestAgreement], + commitPath: NonEmptyList[TestAgreement.Hash] + ): Task[Boolean] = + isExecutingRef + .set(true) + .bracket(_ => + semaphore.withPermit { + for { + fail <- failNextRef.modify(failNext => (false, failNext)) + _ <- Task + .raiseError(new RuntimeException("The application failed!")) + .whenA(fail) + } yield true + } + )(_ => isExecutingRef.set(false)) + + } + + val resources: Resource[Task, TestResources] = + for { + viewStateStorage <- Resource.liftF { + storeRunner.runReadWrite { + val genesisQC = QuorumCertificate[TestAgreement]( + phase = Phase.Commit, + viewNumber = ViewNumber(0), + blockHash = blocks.head.id, + signature = GroupSignature(()) + ) + val genesisBundle = ViewStateStorage.Bundle.fromGenesisQC(genesisQC) + + ViewStateStorage[Namespace, TestAgreement]( + "view-state", + genesisBundle + ) + } + } + semaphore <- Resource.liftF(Semaphore[Task](1)) + blockExecutor <- BlockExecutor[Task, Namespace, TestAgreement]( + appService(semaphore), + TestBlockStorage, + viewStateStorage + ) + } yield TestResources(blockExecutor, viewStateStorage, semaphore) + + val executedBlockHashes = + eventsRef.get + .map { events => + events.collect { case ConsensusEvent.BlockExecuted(blockHash) => + blockHash + } + } + + val lastBatchCommitedBlockHash = + batches.last.quorumCertificate.blockHash + + def awaitBlockExecution( + blockHash: TestAgreement.Hash + ): Task[Vector[TestAgreement.Hash]] = { + executedBlockHashes + .restartUntil { blockHashes => + blockHashes.contains(blockHash) + } + } + } + + object TestFixture { + implicit val arb: Arbitrary[TestFixture] = Arbitrary(gen()) + + /** Create a random number of tree extensions, with each extension + * covered by a batch that goes from its root to one of its leaves. + */ + def gen(minBatches: Int = 1, maxBatches: Int = 5): Gen[TestFixture] = { + def loop( + i: Int, + tree: List[TestBlock], + effects: Vector[Effect.ExecuteBlocks[TestAgreement]] + ): Gen[TestFixture] = { + if (i == 0) { + Gen.const(TestFixture(tree, effects)) + } else { + val extension = for { + viewNumber <- Gen.posNum[Int].map(ViewNumber(_)) + ancestor = tree.last + descendantTree <- genNonEmptyBlockTree(parentId = ancestor.id) + descendant = descendantTree.last + commitQC = QuorumCertificate[TestAgreement]( + phase = Phase.Commit, + viewNumber = viewNumber, + blockHash = descendant.id, + signature = GroupSignature(()) + ) + effect = Effect.ExecuteBlocks[TestAgreement]( + lastExecutedBlockHash = ancestor.id, + quorumCertificate = commitQC + ) + } yield (tree ++ descendantTree, effects :+ effect) + + extension.flatMap { case (tree, effects) => + loop(i - 1, tree, effects) + } + } + } + + for { + prefixTree <- genNonEmptyBlockTree + i <- Gen.choose(minBatches, maxBatches) + fixture <- loop(i, prefixTree, Vector.empty) + } yield fixture + } + } + + def run(test: Task[Prop]): Prop = { + import Scheduler.Implicits.global + test.runSyncUnsafe(timeout = 5.seconds) + } + + property("executeBlocks - from root") = forAll { (fixture: TestFixture) => + run { + fixture.resources.use { res => + for { + _ <- fixture.batches.traverse(res.blockExecutor.enqueue) + + executedBlockHashes <- fixture.awaitBlockExecution( + fixture.lastBatchCommitedBlockHash + ) + + // The genesis was the only block we marked as executed. + pathFromRoot <- fixture.storeRunner.runReadOnly { + TestBlockStorage.getPathFromRoot(fixture.lastBatchCommitedBlockHash) + } + + } yield { + "executes from the root" |: executedBlockHashes == pathFromRoot.tail + } + } + } + } + + property("executeBlocks - from last") = forAll { (fixture: TestFixture) => + run { + fixture.resources.use { res => + val lastBatch = fixture.batches.last + val lastExecutedBlockHash = lastBatch.lastExecutedBlockHash + for { + _ <- fixture.storeRunner.runReadWrite { + res.viewStateStorage.setLastExecutedBlockHash(lastExecutedBlockHash) + } + _ <- res.blockExecutor.enqueue(lastBatch) + + executedBlockHashes <- fixture.awaitBlockExecution( + fixture.lastBatchCommitedBlockHash + ) + + pathFromLast <- fixture.storeRunner.runReadOnly { + TestBlockStorage.getPathFromAncestor( + lastExecutedBlockHash, + fixture.lastBatchCommitedBlockHash + ) + } + + } yield { + "executes from the last" |: executedBlockHashes == pathFromLast.tail + } + } + } + } + + property("executeBlocks - from pruned") = forAll { (fixture: TestFixture) => + run { + fixture.resources.use { res => + val lastBatch = fixture.batches.last + val lastExecutedBlockHash = lastBatch.lastExecutedBlockHash + for { + _ <- fixture.storeRunner.runReadWrite { + TestBlockStorage.pruneNonDescendants(lastExecutedBlockHash) + } + _ <- res.blockExecutor.enqueue(lastBatch) + + executedBlockHashes <- fixture.awaitBlockExecution( + fixture.lastBatchCommitedBlockHash + ) + + // The last executed block should be the new root. + pathFromRoot <- fixture.storeRunner.runReadOnly { + TestBlockStorage.getPathFromRoot(fixture.lastBatchCommitedBlockHash) + } + } yield { + all( + "new root" |: pathFromRoot.head == lastExecutedBlockHash, + "executes from the last" |: executedBlockHashes == pathFromRoot.tail + ) + } + } + } + } + + property("executeBlocks - from failed") = + // Only the next commit batch triggers re-execution, so we need at least 2. + forAll(TestFixture.gen(minBatches = 2)) { (fixture: TestFixture) => + run { + fixture.resources.use { res => + for { + _ <- fixture.failNextRef.set(true) + _ <- fixture.batches.traverse(res.blockExecutor.enqueue) + _ <- fixture.awaitBlockExecution(fixture.lastBatchCommitedBlockHash) + events <- fixture.eventsRef.get + } yield { + 1 === events.count { + case _: ConsensusEvent.Error => true + case _ => false + } + } + } + } + } + + property("executeBlocks - skipped") = + // Using 4 batches so the 2nd batch definitely doesn't start with the last executed block, + // which will be the root initially, and it's distinct from the last batch as well. + forAll(TestFixture.gen(minBatches = 4)) { (fixture: TestFixture) => + run { + fixture.resources.use { res => + val execBatch = fixture.batches.tail.head + val lastBatch = fixture.batches.last + for { + // Make the execution wait until we update the view state. + _ <- res.executionSemaphore.acquire + _ <- res.blockExecutor.enqueue(execBatch) + + // Wait until the execution has started before updating the view state + // so that all the blocks are definitely enqueued already. + _ <- fixture.isExecutingRef.get.restartUntil(identity) + + // Now skip ahead, like if we did a fast-forward sync. + _ <- fixture.storeRunner.runReadWrite { + res.viewStateStorage.setLastExecutedBlockHash( + lastBatch.lastExecutedBlockHash + ) + } + _ <- res.executionSemaphore.release + + // Easiest indicator of everything being finished is to execute the last batch. + _ <- res.blockExecutor.enqueue(lastBatch) + _ <- fixture.awaitBlockExecution( + lastBatch.quorumCertificate.blockHash + ) + + events <- fixture.eventsRef.get + executedBlockHashes = events.collect { + case ConsensusEvent.BlockExecuted(blockHash) => blockHash + } + skippedBlockHashes = events.collect { + case ConsensusEvent.ExecutionSkipped(blockHash) => blockHash + } + + path <- fixture.storeRunner.runReadOnly { + TestBlockStorage.getPathFromRoot( + execBatch.quorumCertificate.blockHash + ) + } + } yield { + all( + // The first block after the root will be executed, only then do we skip the rest. + "executes the first block" |: executedBlockHashes.head == path.tail.head, + "skips rest of the blocks" |: skippedBlockHashes == path.drop(2) + ) + } + } + } + } + + property("syncState") = forAll { (fixture: TestFixture) => + run { + fixture.resources.use { res => + val lastBatch = fixture.batches.last + for { + block <- fixture.storeRunner.runReadOnly { + TestBlockStorage.get(lastBatch.lastExecutedBlockHash).map(_.get) + } + _ <- res.blockExecutor.syncState( + sources = NonEmptyVector.one(0), + block = block + ) + _ <- fixture.batches.traverse(res.blockExecutor.enqueue) + + executedBlockHashes <- fixture.awaitBlockExecution( + fixture.lastBatchCommitedBlockHash + ) + + // The last executed block should be the new root after pruning away old blocks. + pathFromRoot <- fixture.storeRunner.runReadOnly { + TestBlockStorage.getPathFromRoot( + fixture.lastBatchCommitedBlockHash + ) + } + } yield { + all( + "prunes to the fast forwared block" |: pathFromRoot.head == lastBatch.lastExecutedBlockHash, + "executes from the fast forwarded block" |: executedBlockHashes == pathFromRoot.tail + ) + } + } + } + } +} diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala index b9d3f0da..a7075293 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala @@ -82,6 +82,22 @@ object BlockStorageProps extends Properties("BlockStorage") { .compile(TestBlockStorage.getPathFromRoot(blockHash)) .run(store) + def getPathFromAncestor( + ancestorBlockHash: Hash, + descendantBlockHash: Hash, + maxDistance: Int = Int.MaxValue + ) = + TestKVStore + .compile( + TestBlockStorage + .getPathFromAncestor( + ancestorBlockHash, + descendantBlockHash, + maxDistance + ) + ) + .run(store) + def getDescendants(blockHash: Hash) = TestKVStore .compile(TestBlockStorage.getDescendants(blockHash)) @@ -92,6 +108,12 @@ object BlockStorageProps extends Properties("BlockStorage") { .compile(TestBlockStorage.pruneNonDescendants(blockHash)) .run(store) .value + + def purgeTree(blockHash: Hash, keep: Option[Hash]) = + TestKVStore + .compile(TestBlockStorage.purgeTree(blockHash, keep)) + .run(store) + .value } def genBlockId: Gen[Hash] = @@ -238,6 +260,53 @@ object BlockStorageProps extends Properties("BlockStorage") { data.store.getPathFromRoot(nonExisting.id).isEmpty } + property("getPathFromAncestor") = forAll( + for { + prefix <- genNonEmptyBlockTree + ancestor = prefix.last + postfix <- genNonEmptyBlockTree(ancestor.id) + descendant <- Gen.oneOf(postfix) + data = TestData(prefix ++ postfix) + nonExisting <- genBlock + } yield (data, ancestor, descendant, nonExisting) + ) { case (data, ancestor, descendant, nonExisting) => + def getPath(a: TestBlock, d: TestBlock, maxDistance: Int = Int.MaxValue) = + data.store.getPathFromAncestor(a.id, d.id, maxDistance) + + def pathExists(a: TestBlock, d: TestBlock) = { + val path = getPath(a, d) + path.nonEmpty && + path.distinct.size == path.size && + path.head == a.id && + path.last == d.id && + (path.init zip path.tail).forall { case (parentHash, childHash) => + data.store.getBlock(childHash).get.parentId == parentHash + } + } + + def pathNotExists(a: TestBlock, d: TestBlock) = + getPath(a, d).isEmpty + + all( + "fromAtoD" |: pathExists(ancestor, descendant), + "fromDtoA" |: pathNotExists(descendant, ancestor), + "fromAtoA" |: pathExists(ancestor, ancestor), + "fromDtoD" |: pathExists(descendant, descendant), + "fromAtoN" |: pathNotExists(ancestor, nonExisting), + "fromNtoD" |: pathNotExists(nonExisting, descendant), + "maxDistance" |: { + val (a, d, n) = (ancestor, descendant, nonExisting) + val dist = getPath(ancestor, descendant).length - 1 + all( + "fromAtoD maxDistance=dist" |: getPath(a, d, dist).nonEmpty, + "fromAtoD maxDistance=dist-1" |: getPath(a, d, dist - 1).isEmpty, + "fromDtoD maxDistance=0" |: getPath(d, d, 0).nonEmpty, + "fromNtoN maxDistance=0" |: getPath(n, n, 0).isEmpty + ) + } + ) + } + property("getDescendants existing") = forAll(genSubTree) { case (data, block, subTree) => val ds = data.store.getDescendants(block.id) @@ -299,4 +368,40 @@ object BlockStorageProps extends Properties("BlockStorage") { case (data, nonExisting) => data.store.pruneNonDescendants(nonExisting.id)._2.isEmpty } + + property("purgeTree keep block") = forAll( + for { + (data, keepBlock, subTree) <- genSubTree + refBlock <- Gen.oneOf(data.tree) + } yield (data, refBlock, keepBlock, subTree) + ) { case (data, refBlock, keepBlock, subTree) => + val (s, ps) = data.store.purgeTree( + blockHash = refBlock.id, + keep = Some(keepBlock.id) + ) + val pss = ps.toSet + val descendants = subTree.map(_.id).toSet + val nonDescendants = + data.tree.map(_.id).filterNot(descendants).filterNot(_ == keepBlock.id) + all( + "size" |: ps.size == nonDescendants.size, + "pruned" |: nonDescendants.forall(pss), + "deleted" |: nonDescendants.forall(!s.containsBlock(_)), + "kept-block" |: s.containsBlock(keepBlock.id), + "kept-descendants" |: descendants.forall(s.containsBlock(_)) + ) + } + + property("purgeTree keep nothing") = forAll(genSubTree) { + case (data, block, _) => + val (s, ps) = data.store.purgeTree( + blockHash = block.id, + keep = None + ) + val pss = ps.toSet + all( + "pruned all" |: pss.size == data.tree.size, + "kept nothing" |: s.isEmpty + ) + } } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala index f6c77df9..c6bd409c 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/ViewSynchronizerProps.scala @@ -75,9 +75,9 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { if (round >= responses.size) None else responses(round)(publicKey) match { - case TestResponse.Timeout => None - case TestResponse.InvalidStatus(status) => Some(status) - case TestResponse.ValidStatus(status) => Some(status) + case TestResponse.Timeout => None + case TestResponse.InvalidStatus(status, _) => Some(status) + case TestResponse.ValidStatus(status) => Some(status) } } yield result } @@ -106,9 +106,10 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { sealed trait TestResponse object TestResponse { - case object Timeout extends TestResponse - case class ValidStatus(status: Status[TestAgreement]) extends TestResponse - case class InvalidStatus(status: Status[TestAgreement]) extends TestResponse + case object Timeout extends TestResponse + case class ValidStatus(status: Status[TestAgreement]) extends TestResponse + case class InvalidStatus(status: Status[TestAgreement], reason: String) + extends TestResponse } /** Generate a series of hypothetical responses projected from an idealized consensus process. */ @@ -156,20 +157,32 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { genQC(qc.viewNumber, Phase.Commit, qc.blockHash) def genInvalid(status: Status[TestAgreement]) = { - def delay(invalid: => Status[TestAgreement]) = + def delay(invalid: => (Status[TestAgreement], String)) = Gen.delay(Gen.const(invalid)) Gen.oneOf( - delay(status.copy(viewNumber = status.prepareQC.viewNumber.prev)), - delay(status.copy(prepareQC = status.commitQC)), - delay(status.copy(commitQC = status.prepareQC)), + delay( + status.copy(viewNumber = + status.prepareQC.viewNumber.prev + ) -> "view number less than prepare" + ), + delay( + status.copy(prepareQC = + status.commitQC + ) -> "commit instead of prepare" + ), + delay( + status.copy(commitQC = + status.prepareQC + ) -> "prepare instead of commit" + ), delay( status.copy(commitQC = status.commitQC.copy[TestAgreement](signature = status.commitQC.signature - .copy(sig = status.commitQC.signature.sig.map(_ + 1)) + .copy(sig = status.commitQC.signature.sig.map(_ * 2)) ) - ) - ).filter(_.commitQC.viewNumber > 0) + ) -> "wrong commit signature" + ).filter(_._1.commitQC.viewNumber > 0) ) } @@ -204,7 +217,9 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { Gen.frequency( 3 -> Gen.const(TestResponse.Timeout), 2 -> Gen.const(TestResponse.ValidStatus(status)), - 5 -> genInvalid(status).map(TestResponse.InvalidStatus(_)) + 5 -> genInvalid(status).map( + (TestResponse.InvalidStatus.apply _).tupled + ) ) } else { Gen.frequency( @@ -260,7 +275,7 @@ object ViewSynchronizerProps extends Properties("ViewSynchronizer") { else fixture.responses responses .flatMap(_.values) - .collect { case TestResponse.InvalidStatus(_) => + .collect { case _: TestResponse.InvalidStatus => } .size } From 3ce9ec727639d2160e2d9aa9b61723ede6ba9096 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Fri, 18 Jun 2021 18:50:04 +0100 Subject: [PATCH 44/48] PM-3418: Add height to the Block type class. Validate it in SyncService. (#55) * PM-3418: Add height to the Block type class. Validate it in SyncService. * PM-3418: Quit traversing if descendant height is lower than ancestor height. * PM-3418: Try to make validation look less tied up. * PM-3418: Validate a list of checks so there's no chance to miss anything. --- .../CheckpointingAgreement.scala | 2 + .../checkpointing/models/Block.scala | 3 + .../RLPCodec[CheckpointCertificate].rlp | 2 +- .../RLPCodec[CheckpointCertificate].txt | 2 +- .../models/ArbitraryInstances.scala | 2 + .../checkpointing/models/RLPCodecsSpec.scala | 10 +++ .../hotstuff/consensus/basic/Block.scala | 11 ++- .../consensus/basic/ProtocolStateProps.scala | 10 ++- .../hotstuff/service/SyncService.scala | 24 +++++- .../service/storage/BlockStorage.scala | 74 ++++++++++++------- .../execution/BlockExecutorProps.scala | 2 +- .../service/storage/BlockStorageProps.scala | 71 +++++++++--------- .../service/sync/BlockSynchronizerProps.scala | 2 +- 13 files changed, 144 insertions(+), 71 deletions(-) diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala index 5ca27862..944a8b47 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/CheckpointingAgreement.scala @@ -35,6 +35,8 @@ object CheckpointingAgreement extends Secp256k1Agreement { b.hash override def parentBlockHash(b: models.Block) = b.header.parentHash + override def height(b: Block): Long = + b.header.height override def isValid(b: models.Block) = models.Block.isValid(b) } diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala index bdfbd724..45614769 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Block.scala @@ -41,6 +41,7 @@ object Block { val body = Body(transactions) val header = Header( parentHash = parent.hash, + height = parent.header.height + 1, postStateHash = postStateHash, contentMerkleRoot = Body.contentMerkleRoot(body) ) @@ -56,6 +57,7 @@ object Block { val body = Body(Vector.empty) val header = Header( parentHash = Block.Header.Hash(ByteVector.empty), + height = 0, postStateHash = Ledger.empty.hash, contentMerkleRoot = MerkleTree.empty.hash ) @@ -64,6 +66,7 @@ object Block { case class Header( parentHash: Header.Hash, + height: Long, // Hash of the Ledger after executing the block. postStateHash: Ledger.Hash, // Merkle root of the transactions in the body. diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp index 220860ec..25e01a48 100644 --- a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].rlp @@ -1 +1 @@ -f901f9f8caf863a068017f7a80015c7f01ffc10001000087017fa3b8d2807faa8000ff8005018080a07f08b76d73018044cdff7f1b6780ffcc04258b74ff807fff3b5c01867fff493da06fd07f2e8687007f807f4380a9eeff647f454f1600a2801280570048568ff300f863a066001b8000ff518000f79723000100264c01d17f01797adb3600e4cf41ff769aa09b1ee3dfc2000180be00809f7f77017bff00d6e001e6ed408a7f0091095700b7a0803100737cff804f7f7cf2ccd5ff7fc2809ad67a7f51b2ff007f266c29fff080f6b53e807fffc5ff7f7f7f01430101a5017fff80001f40ce800100000770ff020077007f007fc601354501ff0000017f12ff0b7f7f7201f84502f842a00063ff8000e56b01bb7f7a047fc40180cd00ff8013827f7f607fffdba4ffba88a043ff01ff7f80847fb97f6bceae0001fff6c0ffde80a1b2010081017f7f70807ff8ad030aa01f3e7f7f2f337fb680a1808bd622ff977f9bff7bffbb80007f23ffa3ff8053cff888f886b8410000000000000000000000000000000000000000000000000000000000000047000000000000000000000000000000000000000000000000000000000000005b1cb841000000000000000000000000000000000000000000000000000000000000005e00000000000000000000000000000000000000000000000000000000000000611b \ No newline at end of file +f901d7f8ccf864a07f80dddfff7f3a0b809480dd53010196127f7f7f01270027ff00803651c4fc7f64a0ffff597f000000007fd4486a017f80a57f5cd17f0000ffd41032f77f0080d77fa0ff51807f7f7fc33d7f00d4ff017f7fd5ff008e01007f94017ffb57800174d9fff864a080ff63ff9a798000177f9ab900419d80345eff00007f000144d7e67fffef800020a0147fa3550080ff6c007fd59028ba1b7f4313eed26a7f52c4ad4dd28d804a289ba001a90100ff0096fe4c7f800199747f80ff01ff8085176c00146d8064fff58001d2917f707f808000f7ff5380617d5201005b7ff84502f842a0800380b0c46000ff2c805bfba901c90104011f99241801477e1fa70b8000a101a080ff1315ea810000ee00000db0ed007fff00a79d0000ffb400ffff274f012531f8ad030aa001ffcdffffff0d157f347f018dd1ec012180ea011900a001379f7f01800e3715f888f886b841000000000000000000000000000000000000000000000000000000000000005e00000000000000000000000000000000000000000000000000000000000000111cb841000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000461c \ No newline at end of file diff --git a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt index a2667d42..16bb10ac 100644 --- a/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt +++ b/metronome/checkpointing/models/test/resources/golden/RLPCodec[CheckpointCertificate].txt @@ -1 +1 @@ -CheckpointCertificate(NonEmptyList(Header(ByteVector(32 bytes, 0x68017f7a80015c7f01ffc10001000087017fa3b8d2807faa8000ff8005018080),ByteVector(32 bytes, 0x7f08b76d73018044cdff7f1b6780ffcc04258b74ff807fff3b5c01867fff493d),ByteVector(32 bytes, 0x6fd07f2e8687007f807f4380a9eeff647f454f1600a2801280570048568ff300)), Header(ByteVector(32 bytes, 0x66001b8000ff518000f79723000100264c01d17f01797adb3600e4cf41ff769a),ByteVector(32 bytes, 0x9b1ee3dfc2000180be00809f7f77017bff00d6e001e6ed408a7f0091095700b7),ByteVector(32 bytes, 0x803100737cff804f7f7cf2ccd5ff7fc2809ad67a7f51b2ff007f266c29fff080))),CheckpointCandidate(BitVector(424 bits, 0x3e807fffc5ff7f7f7f01430101a5017fff80001f40ce800100000770ff020077007f007fc601354501ff0000017f12ff0b7f7f7201)),Proof(2,Vector(ByteVector(32 bytes, 0x0063ff8000e56b01bb7f7a047fc40180cd00ff8013827f7f607fffdba4ffba88), ByteVector(32 bytes, 0x43ff01ff7f80847fb97f6bceae0001fff6c0ffde80a1b2010081017f7f70807f))),QuorumCertificate(Commit,10,ByteVector(32 bytes, 0x1f3e7f7f2f337fb680a1808bd622ff977f9bff7bffbb80007f23ffa3ff8053cf),GroupSignature(List(ECDSASignature(71,91,28), ECDSASignature(94,97,27))))) \ No newline at end of file +CheckpointCertificate(NonEmptyList(Header(ByteVector(32 bytes, 0x7f80dddfff7f3a0b809480dd53010196127f7f7f01270027ff00803651c4fc7f),100,ByteVector(32 bytes, 0xffff597f000000007fd4486a017f80a57f5cd17f0000ffd41032f77f0080d77f),ByteVector(32 bytes, 0xff51807f7f7fc33d7f00d4ff017f7fd5ff008e01007f94017ffb57800174d9ff)), Header(ByteVector(32 bytes, 0x80ff63ff9a798000177f9ab900419d80345eff00007f000144d7e67fffef8000),32,ByteVector(32 bytes, 0x147fa3550080ff6c007fd59028ba1b7f4313eed26a7f52c4ad4dd28d804a289b),ByteVector(32 bytes, 0x01a90100ff0096fe4c7f800199747f80ff01ff8085176c00146d8064fff58001))),CheckpointCandidate(BitVector(136 bits, 0x7f707f808000f7ff5380617d5201005b7f)),Proof(2,Vector(ByteVector(32 bytes, 0x800380b0c46000ff2c805bfba901c90104011f99241801477e1fa70b8000a101), ByteVector(32 bytes, 0x80ff1315ea810000ee00000db0ed007fff00a79d0000ffb400ffff274f012531))),QuorumCertificate(Commit,10,ByteVector(32 bytes, 0x01ffcdffffff0d157f347f018dd1ec012180ea011900a001379f7f01800e3715),GroupSignature(List(ECDSASignature(94,17,28), ECDSASignature(39,70,28))))) \ No newline at end of file diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala index 09452d50..03058f0b 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala @@ -68,12 +68,14 @@ object ArbitraryInstances Arbitrary { for { parentHash <- arbitrary[Block.Header.Hash] + height <- Gen.posNum[Long] postStateHash <- arbitrary[Ledger.Hash] transactions <- arbitrary[Vector[Transaction]] contentMerkleRoot <- arbitrary[MerkleTree.Hash] body = Block.Body(transactions) header = Block.Header( parentHash, + height, postStateHash, contentMerkleRoot ) diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala index 40a1d5de..ca444359 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/RLPCodecsSpec.scala @@ -186,11 +186,21 @@ class RLPCodecsSpec extends AnyFlatSpec with Matchers { RLPList( // NonEmptyList RLPList( // BlockHeader RLPValue(decoded.headers.head.parentHash.toArray), + RLPValue( + rlp.RLPImplicits.longEncDec + .encode(decoded.headers.head.height) + .bytes + ), RLPValue(decoded.headers.head.postStateHash.toArray), RLPValue(decoded.headers.head.contentMerkleRoot.toArray) ), RLPList( // BlockHeader RLPValue(decoded.headers.last.parentHash.toArray), + RLPValue( + rlp.RLPImplicits.longEncDec + .encode(decoded.headers.last.height) + .bytes + ), RLPValue(decoded.headers.last.postStateHash.toArray), RLPValue(decoded.headers.last.contentMerkleRoot.toArray) ) diff --git a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala index 87379b0b..70780d05 100644 --- a/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala +++ b/metronome/hotstuff/consensus/src/io/iohk/metronome/hotstuff/consensus/basic/Block.scala @@ -12,9 +12,18 @@ package io.iohk.metronome.hotstuff.consensus.basic trait Block[A <: Agreement] { def blockHash(b: A#Block): A#Hash def parentBlockHash(b: A#Block): A#Hash + def height(b: A#Block): Long - /** Perform simple content validation. */ + /** Perform simple content validation, e.g. + * whether the block hash matches the header + * and the header content matches the body. + */ def isValid(b: A#Block): Boolean + + def isParentOf(parent: A#Block, child: A#Block): Boolean = { + parentBlockHash(child) == blockHash(parent) && + height(child) == height(parent) + 1 + } } object Block { diff --git a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala index 4594cd58..a363a36a 100644 --- a/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala +++ b/metronome/hotstuff/consensus/test/src/io/iohk/metronome/hotstuff/consensus/basic/ProtocolStateProps.scala @@ -28,7 +28,11 @@ object ProtocolStateProps extends Properties("Basic HotStuff") { */ object ProtocolStateCommands extends Commands { - case class TestBlock(blockHash: Int, parentBlockHash: Int, command: String) + case class TestBlock( + blockHash: Int, + parentBlockHash: Int, + command: String + ) object TestAgreement extends Agreement { type Block = TestBlock @@ -40,7 +44,8 @@ object ProtocolStateCommands extends Commands { } type TestAgreement = TestAgreement.type - val genesis = TestBlock(blockHash = 0, parentBlockHash = -1, command = "") + val genesis = + TestBlock(blockHash = 0, parentBlockHash = -1, command = "") val genesisQC = QuorumCertificate[TestAgreement]( phase = Phase.Prepare, @@ -52,6 +57,7 @@ object ProtocolStateCommands extends Commands { implicit val block: Block[TestAgreement] = new Block[TestAgreement] { override def blockHash(b: TestBlock) = b.blockHash override def parentBlockHash(b: TestBlock) = b.parentBlockHash + override def height(b: TestBlock): Long = 0 // Not used by this model. override def isValid(b: TestBlock) = true } diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala index 9be8e304..fd675782 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/SyncService.scala @@ -242,7 +242,7 @@ class SyncService[F[_]: Concurrent: ContextShift, N, A <: Agreement: Block]( blockSync.fiberMap .submit(sender) { blockSync.synchronizer.sync(sender, prepare.highQC) >> - appService.validateBlock(prepare.block) >>= { + validateBlock(prepare.block) >>= { case Some(isValid) => syncPipe.send(SyncPipe.PrepareResponse(request, isValid)) case None => @@ -253,6 +253,28 @@ class SyncService[F[_]: Concurrent: ContextShift, N, A <: Agreement: Block]( .void } + /** Validate the prepared block after the parent has been downloaded. */ + private def validateBlock(block: A#Block): F[Option[Boolean]] = { + // Short circuiting validation. + def runChecks(checks: F[Option[Boolean]]*) = + checks.reduce[F[Option[Boolean]]] { case (a, b) => + a.flatMap { + case Some(true) => b + case other => other.pure[F] + } + } + + runChecks( + storeRunner.runReadOnly { + blockStorage + .get(Block[A].parentBlockHash(block)) + .map(_.map(Block[A].isParentOf(_, block))) + }, + Block[A].isValid(block).some.pure[F], + appService.validateBlock(block) + ) + } + /** Shut down the any outstanding block downloads, sync the view, * then create another block synchronizer instance to resume with. */ diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala index fdb91e6b..a2c278a9 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala @@ -14,9 +14,11 @@ import scala.collection.immutable.Queue */ class BlockStorage[N, A <: Agreement: Block]( blockColl: KVCollection[N, A#Hash, A#Block], - childToParentColl: KVCollection[N, A#Hash, A#Hash], + blockMetaColl: KVCollection[N, A#Hash, BlockStorage.BlockMeta[A]], parentToChildrenColl: KVCollection[N, A#Hash, Set[A#Hash]] ) { + import BlockStorage.BlockMeta + private implicit val kvn = KVStore.instance[N] private implicit val kvrn = KVStoreRead.instance[N] @@ -24,12 +26,13 @@ class BlockStorage[N, A <: Agreement: Block]( * then add this block to its children. */ def put(block: A#Block): KVStore[N, Unit] = { - val blockHash = Block[A].blockHash(block) - val parentHash = Block[A].parentBlockHash(block) + val blockHash = Block[A].blockHash(block) + val meta = + BlockMeta(Block[A].parentBlockHash(block), Block[A].height(block)) blockColl.put(blockHash, block) >> - childToParentColl.put(blockHash, parentHash) >> - parentToChildrenColl.alter(parentHash) { maybeChildren => + blockMetaColl.put(blockHash, meta) >> + parentToChildrenColl.alter(meta.parentBlockHash) { maybeChildren => maybeChildren orElse Set.empty.some map (_ + blockHash) } @@ -41,7 +44,7 @@ class BlockStorage[N, A <: Agreement: Block]( /** Check whether a block is present in the tree. */ def contains(blockHash: A#Hash): KVStoreRead[N, Boolean] = - childToParentColl.read(blockHash).map(_.isDefined) + blockMetaColl.read(blockHash).map(_.isDefined) /** Check how many children the block has in the tree. */ private def childCount(blockHash: A#Hash): KVStoreRead[N, Int] = @@ -49,11 +52,16 @@ class BlockStorage[N, A <: Agreement: Block]( /** Check whether the parent of the block is present in the tree. */ private def hasParent(blockHash: A#Hash): KVStoreRead[N, Boolean] = - childToParentColl.read(blockHash).flatMap { - case None => KVStoreRead[N].pure(false) - case Some(parentHash) => contains(parentHash) + blockMetaColl.read(blockHash).flatMap { + case None => KVStoreRead[N].pure(false) + case Some(meta) => contains(meta.parentBlockHash) } + private def getParentBlockHash( + blockHash: A#Hash + ): KVStoreRead[N, Option[A#Hash]] = + blockMetaColl.read(blockHash).map(_.map(_.parentBlockHash)) + /** Check whether it's safe to delete a block. * * A block is safe to delete if doing so doesn't break up the tree @@ -92,7 +100,7 @@ class BlockStorage[N, A <: Agreement: Block]( def deleteIfEmpty(maybeChildren: Option[Set[A#Hash]]) = maybeChildren.filter(_.nonEmpty) - childToParentColl.get(blockHash).flatMap { + getParentBlockHash(blockHash).lift.flatMap { case None => KVStore[N].unit case Some(parentHash) => @@ -101,7 +109,7 @@ class BlockStorage[N, A <: Agreement: Block]( } } >> blockColl.delete(blockHash) >> - childToParentColl.delete(blockHash) >> + blockMetaColl.delete(blockHash) >> // Keep the association from existing children, until they last one is deleted. parentToChildrenColl.alter(blockHash)(deleteIfEmpty) } @@ -118,7 +126,7 @@ class BlockStorage[N, A <: Agreement: Block]( blockHash: A#Hash, acc: List[A#Hash] ): KVStoreRead[N, List[A#Hash]] = { - childToParentColl.read(blockHash).flatMap { + getParentBlockHash(blockHash).flatMap { case None => // This block doesn't exist in the tree, so our ancestry is whatever we collected so far. KVStoreRead[N].pure(acc) @@ -135,41 +143,42 @@ class BlockStorage[N, A <: Agreement: Block]( * * If either of the blocks are not in the tree, or there's no path between them, * return an empty list. This can happen if we have already pruned away the ancestry as well. - * - * The `maxDistance` parameter can be used to limit the maximum traversal depth; - * it's useful with blocks that have a `height` field, where we know up front that - * if we have ascended more than N blocks from the descendant and haven't encountered - * the ancestor, then we must be on a different branch. */ def getPathFromAncestor( ancestorBlockHash: A#Hash, - descendantBlockHash: A#Hash, - maxDistance: Int = Int.MaxValue + descendantBlockHash: A#Hash ): KVStoreRead[N, List[A#Hash]] = { def loop( blockHash: A#Hash, acc: List[A#Hash], - maxDistance: Int + maxDistance: Long ): KVStoreRead[N, List[A#Hash]] = { if (blockHash == ancestorBlockHash) { KVStoreRead[N].pure(blockHash :: acc) - } else if (maxDistance == 0) { + } else if (maxDistance <= 0) { KVStoreRead[N].pure(Nil) } else { - childToParentColl.read(blockHash).flatMap { + blockMetaColl.read(blockHash).flatMap { case None => KVStoreRead[N].pure(Nil) - case Some(parentBlockHash) => - loop(parentBlockHash, blockHash :: acc, maxDistance - 1) + case Some(meta) => + loop(meta.parentBlockHash, blockHash :: acc, maxDistance - 1) } } } - (contains(ancestorBlockHash), contains(descendantBlockHash)) - .mapN((_, _)) + ( + blockMetaColl.read(ancestorBlockHash), + blockMetaColl.read(descendantBlockHash) + ).mapN((_, _)) .flatMap { - case (true, true) => loop(descendantBlockHash, Nil, maxDistance) - case _ => KVStoreRead[N].pure(Nil) + case (Some(ameta), Some(dmeta)) => + loop( + descendantBlockHash, + Nil, + maxDistance = dmeta.height - ameta.height + ) + case _ => KVStoreRead[N].pure(Nil) } } @@ -269,3 +278,12 @@ class BlockStorage[N, A <: Agreement: Block]( } yield ds } } + +object BlockStorage { + + /** Properties about the block that we frequently need for traversal. */ + case class BlockMeta[A <: Agreement]( + parentBlockHash: A#Hash, + height: Long + ) +} diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutorProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutorProps.scala index 98fbb577..18d1af0e 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutorProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/execution/BlockExecutorProps.scala @@ -169,7 +169,7 @@ object BlockExecutorProps extends Properties("BlockExecutor") { val extension = for { viewNumber <- Gen.posNum[Int].map(ViewNumber(_)) ancestor = tree.last - descendantTree <- genNonEmptyBlockTree(parentId = ancestor.id) + descendantTree <- genNonEmptyBlockTree(parent = ancestor) descendant = descendantTree.last commitQC = QuorumCertificate[TestAgreement]( phase = Phase.Commit, diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala index a7075293..c67477ef 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala @@ -12,7 +12,7 @@ import scala.util.Random object BlockStorageProps extends Properties("BlockStorage") { - case class TestBlock(id: String, parentId: String) { + case class TestBlock(id: String, parentId: String, height: Long) { def isGenesis = parentId.isEmpty } @@ -27,6 +27,7 @@ object BlockStorageProps extends Properties("BlockStorage") { implicit val block = new BlockOps[TestAgreement] { override def blockHash(b: TestBlock) = b.id override def parentBlockHash(b: TestBlock) = b.parentId + override def height(b: Block): Long = b.height override def isValid(b: Block) = true } } @@ -39,14 +40,16 @@ object BlockStorageProps extends Properties("BlockStorage") { type Namespace = String object Namespace { val Blocks = "blocks" - val BlockToParent = "block-to-parent" + val BlockMetas = "block-metas" val BlockToChildren = "block-to-children" } object TestBlockStorage extends BlockStorage[Namespace, TestAgreement]( new KVCollection[Namespace, Hash, TestBlock](Namespace.Blocks), - new KVCollection[Namespace, Hash, Hash](Namespace.BlockToParent), + new KVCollection[Namespace, Hash, BlockStorage.BlockMeta[ + TestAgreement + ]](Namespace.BlockMetas), new KVCollection[Namespace, Hash, Set[Hash]](Namespace.BlockToChildren) ) @@ -84,16 +87,14 @@ object BlockStorageProps extends Properties("BlockStorage") { def getPathFromAncestor( ancestorBlockHash: Hash, - descendantBlockHash: Hash, - maxDistance: Int = Int.MaxValue + descendantBlockHash: Hash ) = TestKVStore .compile( TestBlockStorage .getPathFromAncestor( ancestorBlockHash, - descendantBlockHash, - maxDistance + descendantBlockHash ) ) .run(store) @@ -120,16 +121,24 @@ object BlockStorageProps extends Properties("BlockStorage") { Gen.uuid.map(_.toString) /** Generate a block with a given parent, using the next available ID. */ - def genBlock(parentId: Hash): Gen[TestBlock] = + def genBlock(parent: TestBlock): Gen[TestBlock] = genBlockId.map { uuid => - TestBlock(uuid, parentId) + TestBlock(uuid, parentId = parent.id, height = parent.height + 1) } def genBlock: Gen[TestBlock] = - genBlockId.flatMap(genBlock) + for { + id <- genBlockId + parentId <- genBlockId + height <- Gen.posNum[Long] + } yield TestBlock(id, parentId, height) + + // A block we can pass as parent to tree generators so the first block is a + // genesis block with height = 0 and parentId = "". + val preGenesisParent = TestBlock(id = "", parentId = "", height = -1) /** Generate a (possibly empty) block tree. */ - def genBlockTree(parentId: Hash): Gen[List[TestBlock]] = + def genBlockTree(parent: TestBlock): Gen[List[TestBlock]] = for { childCount <- Gen.frequency( 3 -> 0, @@ -139,23 +148,23 @@ object BlockStorageProps extends Properties("BlockStorage") { children <- Gen.listOfN( childCount, { for { - block <- genBlock(parentId) - tree <- genBlockTree(block.id) + block <- genBlock(parent) + tree <- genBlockTree(block) } yield block +: tree } ) } yield children.flatten def genBlockTree: Gen[List[TestBlock]] = - genBlockTree(parentId = "") + genBlockTree(preGenesisParent) - def genNonEmptyBlockTree(parentId: Hash): Gen[List[TestBlock]] = for { - genesis <- genBlock(parentId = parentId) - tree <- genBlockTree(genesis.id) - } yield genesis +: tree + def genNonEmptyBlockTree(parent: TestBlock): Gen[List[TestBlock]] = for { + child <- genBlock(parent) + tree <- genBlockTree(child) + } yield child +: tree def genNonEmptyBlockTree: Gen[List[TestBlock]] = - genNonEmptyBlockTree(parentId = "") + genNonEmptyBlockTree(preGenesisParent) case class TestData( tree: List[TestBlock], @@ -183,14 +192,16 @@ object BlockStorageProps extends Properties("BlockStorage") { def genSubTree = for { tree <- genNonEmptyBlockTree leaf = tree.last - subTree <- genBlockTree(parentId = leaf.id) + subTree <- genBlockTree(parent = leaf) data = TestData(tree ++ subTree) } yield (data, leaf, subTree) property("put") = forAll(genNonExisting) { case (data, block) => val s = data.store.putBlock(block) s(Namespace.Blocks)(block.id) == block - s(Namespace.BlockToParent)(block.id) == block.parentId + s(Namespace.BlockMetas)(block.id) + .asInstanceOf[BlockStorage.BlockMeta[TestAgreement]] + .parentBlockHash == block.parentId } property("put unordered") = forAll { @@ -264,14 +275,14 @@ object BlockStorageProps extends Properties("BlockStorage") { for { prefix <- genNonEmptyBlockTree ancestor = prefix.last - postfix <- genNonEmptyBlockTree(ancestor.id) + postfix <- genNonEmptyBlockTree(ancestor) descendant <- Gen.oneOf(postfix) data = TestData(prefix ++ postfix) nonExisting <- genBlock } yield (data, ancestor, descendant, nonExisting) ) { case (data, ancestor, descendant, nonExisting) => - def getPath(a: TestBlock, d: TestBlock, maxDistance: Int = Int.MaxValue) = - data.store.getPathFromAncestor(a.id, d.id, maxDistance) + def getPath(a: TestBlock, d: TestBlock) = + data.store.getPathFromAncestor(a.id, d.id) def pathExists(a: TestBlock, d: TestBlock) = { val path = getPath(a, d) @@ -293,17 +304,7 @@ object BlockStorageProps extends Properties("BlockStorage") { "fromAtoA" |: pathExists(ancestor, ancestor), "fromDtoD" |: pathExists(descendant, descendant), "fromAtoN" |: pathNotExists(ancestor, nonExisting), - "fromNtoD" |: pathNotExists(nonExisting, descendant), - "maxDistance" |: { - val (a, d, n) = (ancestor, descendant, nonExisting) - val dist = getPath(ancestor, descendant).length - 1 - all( - "fromAtoD maxDistance=dist" |: getPath(a, d, dist).nonEmpty, - "fromAtoD maxDistance=dist-1" |: getPath(a, d, dist - 1).isEmpty, - "fromDtoD maxDistance=0" |: getPath(d, d, 0).nonEmpty, - "fromNtoN maxDistance=0" |: getPath(n, n, 0).isEmpty - ) - } + "fromNtoD" |: pathNotExists(nonExisting, descendant) ) } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala index c019b32e..dd6596c2 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/sync/BlockSynchronizerProps.scala @@ -112,7 +112,7 @@ object BlockSynchronizerProps extends Properties("BlockSynchronizer") { for { ancestorTree <- genNonEmptyBlockTree leaf = ancestorTree.last - descendantTree <- genNonEmptyBlockTree(parentId = leaf.id) + descendantTree <- genNonEmptyBlockTree(parent = leaf) federationSize <- Gen.choose(3, 10) federationKeys = Range(0, federationSize).toVector From a97a1b98e6175b03b1202c2150a8e6ae23141fe9 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Wed, 23 Jun 2021 16:43:04 +0100 Subject: [PATCH 45/48] PM-3419: Generalise BlockStorage into KVTree. (#56) --- .../service/storage/BlockStorage.scala | 288 +---------------- .../service/storage/BlockStorageProps.scala | 12 +- .../iohk/metronome/storage/KVRingBuffer.scala | 5 +- .../io/iohk/metronome/storage/KVTree.scala | 293 ++++++++++++++++++ 4 files changed, 319 insertions(+), 279 deletions(-) create mode 100644 metronome/storage/src/io/iohk/metronome/storage/KVTree.scala diff --git a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala index a2c278a9..4d658e6f 100644 --- a/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala +++ b/metronome/hotstuff/service/src/io/iohk/metronome/hotstuff/service/storage/BlockStorage.scala @@ -1,9 +1,7 @@ package io.iohk.metronome.hotstuff.service.storage -import cats.implicits._ -import io.iohk.metronome.storage.{KVStore, KVStoreRead, KVCollection} +import io.iohk.metronome.storage.{KVCollection, KVTree} import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, Block} -import scala.collection.immutable.Queue /** Storage for blocks that maintains parent-child relationships as well, * to facilitate tree traversal and pruning. @@ -14,276 +12,22 @@ import scala.collection.immutable.Queue */ class BlockStorage[N, A <: Agreement: Block]( blockColl: KVCollection[N, A#Hash, A#Block], - blockMetaColl: KVCollection[N, A#Hash, BlockStorage.BlockMeta[A]], + blockMetaColl: KVCollection[N, A#Hash, KVTree.NodeMeta[A#Hash]], parentToChildrenColl: KVCollection[N, A#Hash, Set[A#Hash]] -) { - import BlockStorage.BlockMeta - - private implicit val kvn = KVStore.instance[N] - private implicit val kvrn = KVStoreRead.instance[N] - - /** Insert a block into the store, and if the parent still exists, - * then add this block to its children. - */ - def put(block: A#Block): KVStore[N, Unit] = { - val blockHash = Block[A].blockHash(block) - val meta = - BlockMeta(Block[A].parentBlockHash(block), Block[A].height(block)) - - blockColl.put(blockHash, block) >> - blockMetaColl.put(blockHash, meta) >> - parentToChildrenColl.alter(meta.parentBlockHash) { maybeChildren => - maybeChildren orElse Set.empty.some map (_ + blockHash) - } - - } - - /** Retrieve a block by hash, if it exists. */ - def get(blockHash: A#Hash): KVStoreRead[N, Option[A#Block]] = - blockColl.read(blockHash) - - /** Check whether a block is present in the tree. */ - def contains(blockHash: A#Hash): KVStoreRead[N, Boolean] = - blockMetaColl.read(blockHash).map(_.isDefined) - - /** Check how many children the block has in the tree. */ - private def childCount(blockHash: A#Hash): KVStoreRead[N, Int] = - parentToChildrenColl.read(blockHash).map(_.fold(0)(_.size)) - - /** Check whether the parent of the block is present in the tree. */ - private def hasParent(blockHash: A#Hash): KVStoreRead[N, Boolean] = - blockMetaColl.read(blockHash).flatMap { - case None => KVStoreRead[N].pure(false) - case Some(meta) => contains(meta.parentBlockHash) - } - - private def getParentBlockHash( - blockHash: A#Hash - ): KVStoreRead[N, Option[A#Hash]] = - blockMetaColl.read(blockHash).map(_.map(_.parentBlockHash)) - - /** Check whether it's safe to delete a block. - * - * A block is safe to delete if doing so doesn't break up the tree - * into a forest, in which case we may have blocks we cannot reach - * by traversal, leaking space. - * - * This is true if the block has no children, - * or it has no parent and at most one child. - */ - private def canDelete(blockHash: A#Hash): KVStoreRead[N, Boolean] = - (hasParent(blockHash), childCount(blockHash)).mapN { - case (_, 0) => true - case (false, 1) => true - case _ => false - } - - /** Delete a block by hash, if doing so wouldn't break the tree; - * otherwise do nothing. - * - * Return `true` if block has been deleted, `false` if not. - * - * If this is not efficent enough, then move the deletion traversal - * logic into the this class so it can make sure all the invariants - * are maintained, e.g. collect all hashes that can be safely deleted - * and then do so without checks. - */ - def delete(blockHash: A#Hash): KVStore[N, Boolean] = - canDelete(blockHash).lift.flatMap { ok => - deleteUnsafe(blockHash).whenA(ok).as(ok) - } - - /** Delete a block and remove it from any parent-to-child mapping, - * without any checking for the tree structure invariants. - */ - def deleteUnsafe(blockHash: A#Hash): KVStore[N, Unit] = { - def deleteIfEmpty(maybeChildren: Option[Set[A#Hash]]) = - maybeChildren.filter(_.nonEmpty) - - getParentBlockHash(blockHash).lift.flatMap { - case None => - KVStore[N].unit - case Some(parentHash) => - parentToChildrenColl.alter(parentHash) { maybeChildren => - deleteIfEmpty(maybeChildren.map(_ - blockHash)) - } - } >> - blockColl.delete(blockHash) >> - blockMetaColl.delete(blockHash) >> - // Keep the association from existing children, until they last one is deleted. - parentToChildrenColl.alter(blockHash)(deleteIfEmpty) - } - - /** Get the ancestor chain of a block from the root, - * including the block itself. - * - * If the block is not in the tree, the result will be empty, - * otherwise `head` will be the root of the block tree, - * and `last` will be the block itself. - */ - def getPathFromRoot(blockHash: A#Hash): KVStoreRead[N, List[A#Hash]] = { - def loop( - blockHash: A#Hash, - acc: List[A#Hash] - ): KVStoreRead[N, List[A#Hash]] = { - getParentBlockHash(blockHash).flatMap { - case None => - // This block doesn't exist in the tree, so our ancestry is whatever we collected so far. - KVStoreRead[N].pure(acc) - - case Some(parentHash) => - // So at least `blockHash` exists in the tree. - loop(parentHash, blockHash :: acc) - } - } - loop(blockHash, Nil) - } - - /** Get the ancestor chain between two hashes in the chain, if there is one. - * - * If either of the blocks are not in the tree, or there's no path between them, - * return an empty list. This can happen if we have already pruned away the ancestry as well. - */ - def getPathFromAncestor( - ancestorBlockHash: A#Hash, - descendantBlockHash: A#Hash - ): KVStoreRead[N, List[A#Hash]] = { - def loop( - blockHash: A#Hash, - acc: List[A#Hash], - maxDistance: Long - ): KVStoreRead[N, List[A#Hash]] = { - if (blockHash == ancestorBlockHash) { - KVStoreRead[N].pure(blockHash :: acc) - } else if (maxDistance <= 0) { - KVStoreRead[N].pure(Nil) - } else { - blockMetaColl.read(blockHash).flatMap { - case None => - KVStoreRead[N].pure(Nil) - case Some(meta) => - loop(meta.parentBlockHash, blockHash :: acc, maxDistance - 1) - } - } - } - - ( - blockMetaColl.read(ancestorBlockHash), - blockMetaColl.read(descendantBlockHash) - ).mapN((_, _)) - .flatMap { - case (Some(ameta), Some(dmeta)) => - loop( - descendantBlockHash, - Nil, - maxDistance = dmeta.height - ameta.height - ) - case _ => KVStoreRead[N].pure(Nil) - } - } - - /** Collect all descendants of a block, - * including the block itself. - * - * The result will start with the blocks furthest away, - * so it should be safe to delete them in the same order; - * `last` will be the block itself. - * - * The `skip` parameter can be used to avoid traversing - * branches that we want to keep during deletion. - */ - def getDescendants( - blockHash: A#Hash, - skip: Set[A#Hash] = Set.empty - ): KVStoreRead[N, List[A#Hash]] = { - // BFS traversal. - def loop( - queue: Queue[A#Hash], - acc: List[A#Hash] - ): KVStoreRead[N, List[A#Hash]] = { - queue.dequeueOption match { - case None => - KVStoreRead[N].pure(acc) - - case Some((blockHash, queue)) if skip(blockHash) => - loop(queue, acc) - - case Some((blockHash, queue)) => - parentToChildrenColl.read(blockHash).flatMap { - case None => - // Since we're not inserting an empty child set, - // we can't tell here if the block exists or not. - loop(queue, blockHash :: acc) - case Some(children) => - loop(queue ++ children, blockHash :: acc) - } - } - } - - loop(Queue(blockHash), Nil).flatMap { - case result @ List(`blockHash`) => - result.filterA(contains) - case result => - KVStoreRead[N].pure(result) - } - } - - /** Delete all blocks which are not descendants of a given block, - * making it the new root. - * - * Return the list of deleted block hashes. - */ - def pruneNonDescendants(blockHash: A#Hash): KVStore[N, List[A#Hash]] = - getPathFromRoot(blockHash).lift.flatMap { - case Nil => - KVStore[N].pure(Nil) - - case path @ (rootHash :: _) => - // The safe order to delete blocks would be to go down the main chain - // from the root, delete each non-mainchain child, then the parent, - // then descend on the main chain until we hit `blockHash`. - - // A similar effect can be achieved by collecting all descendants - // of the root, then deleting everything that isn't on the main chain, - // from the children towards the root, and finally the main chain itself, - // going from the root towards the children. - val isMainChain = path.toSet - - for { - deleteables <- getDescendants(rootHash, skip = Set(blockHash)).lift - _ <- deleteables.filterNot(isMainChain).traverse(deleteUnsafe(_)) - _ <- path.init.traverse(deleteUnsafe(_)) - } yield deleteables - } - - /** Remove all blocks in a tree, given by a `blockHash` that's in the tree, - * except perhaps a new root (and its descendants) we want to keep. - * - * This is used to delete an old tree when starting a new that's most likely - * not connected to it, and would otherwise result in a forest. - */ - def purgeTree( - blockHash: A#Hash, - keep: Option[A#Hash] - ): KVStore[N, List[A#Hash]] = - getPathFromRoot(blockHash).lift.flatMap { - case Nil => - KVStore[N].pure(Nil) - - case rootHash :: _ => - for { - ds <- getDescendants(rootHash, skip = keep.toSet).lift - // Going from the leaves towards the root. - _ <- ds.reverse.traverse(deleteUnsafe(_)) - } yield ds - } -} +) extends KVTree[N, A#Hash, A#Block]( + blockColl, + blockMetaColl, + parentToChildrenColl + )(BlockStorage.node[A]) object BlockStorage { - - /** Properties about the block that we frequently need for traversal. */ - case class BlockMeta[A <: Agreement]( - parentBlockHash: A#Hash, - height: Long - ) + implicit def node[A <: Agreement: Block]: KVTree.Node[A#Hash, A#Block] = + new KVTree.Node[A#Hash, A#Block] { + override def key(value: A#Block): A#Hash = + Block[A].blockHash(value) + override def parentKey(value: A#Block): A#Hash = + Block[A].parentBlockHash(value) + override def height(value: A#Block): Long = + Block[A].height(value) + } } diff --git a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala index c67477ef..993755d3 100644 --- a/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala +++ b/metronome/hotstuff/service/test/src/io/iohk/metronome/hotstuff/service/storage/BlockStorageProps.scala @@ -1,7 +1,7 @@ package io.iohk.metronome.hotstuff.service.storage import cats.implicits._ -import io.iohk.metronome.storage.{KVCollection, KVStoreState} +import io.iohk.metronome.storage.{KVCollection, KVStoreState, KVTree} import io.iohk.metronome.hotstuff.consensus.basic.{Agreement, Block => BlockOps} import org.scalacheck._ import org.scalacheck.Arbitrary.arbitrary @@ -47,9 +47,9 @@ object BlockStorageProps extends Properties("BlockStorage") { object TestBlockStorage extends BlockStorage[Namespace, TestAgreement]( new KVCollection[Namespace, Hash, TestBlock](Namespace.Blocks), - new KVCollection[Namespace, Hash, BlockStorage.BlockMeta[ - TestAgreement - ]](Namespace.BlockMetas), + new KVCollection[Namespace, Hash, KVTree.NodeMeta[Hash]]( + Namespace.BlockMetas + ), new KVCollection[Namespace, Hash, Set[Hash]](Namespace.BlockToChildren) ) @@ -200,8 +200,8 @@ object BlockStorageProps extends Properties("BlockStorage") { val s = data.store.putBlock(block) s(Namespace.Blocks)(block.id) == block s(Namespace.BlockMetas)(block.id) - .asInstanceOf[BlockStorage.BlockMeta[TestAgreement]] - .parentBlockHash == block.parentId + .asInstanceOf[KVTree.NodeMeta[Hash]] + .parentKey == block.parentId } property("put unordered") = forAll { diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVRingBuffer.scala b/metronome/storage/src/io/iohk/metronome/storage/KVRingBuffer.scala index 414506f3..30ed83ab 100644 --- a/metronome/storage/src/io/iohk/metronome/storage/KVRingBuffer.scala +++ b/metronome/storage/src/io/iohk/metronome/storage/KVRingBuffer.scala @@ -3,7 +3,10 @@ package io.iohk.metronome.storage import cats.implicits._ import scodec.{Decoder, Encoder, Codec} -/** Storing the last N items inserted into a collection. */ +/** Storing the last N items inserted into a collection. + * + * This component is currently tested through `LedgerStorage`. + */ class KVRingBuffer[N, K, V]( coll: KVCollection[N, K, V], metaNamespace: N, diff --git a/metronome/storage/src/io/iohk/metronome/storage/KVTree.scala b/metronome/storage/src/io/iohk/metronome/storage/KVTree.scala new file mode 100644 index 00000000..a3a9d91b --- /dev/null +++ b/metronome/storage/src/io/iohk/metronome/storage/KVTree.scala @@ -0,0 +1,293 @@ +package io.iohk.metronome.storage + +import cats.implicits._ +import scala.collection.immutable.Queue + +/** Storage for nodes that maintains parent-child relationships as well, + * to facilitate tree traversal and pruning. + * + * It is assumed that the application maintains some pointers into the tree + * where it can start traversing from, e.g. the last Commit Quorum Certificate + * would point at a block hash which would serve as the entry point. + * + * This component is currently tested through `BlockStorage`. + */ +class KVTree[N, K, V]( + nodeColl: KVCollection[N, K, V], + nodeMetaColl: KVCollection[N, K, KVTree.NodeMeta[K]], + parentToChildrenColl: KVCollection[N, K, Set[K]] +)(implicit ev: KVTree.Node[K, V]) { + import KVTree.NodeMeta + + private implicit val kvn = KVStore.instance[N] + private implicit val kvrn = KVStoreRead.instance[N] + + /** Insert a node into the store, and if the parent still exists, + * then add this node to its children. + */ + def put(value: V): KVStore[N, Unit] = { + val nodeKey = ev.key(value) + val meta = + NodeMeta(ev.parentKey(value), ev.height(value)) + + nodeColl.put(nodeKey, value) >> + nodeMetaColl.put(nodeKey, meta) >> + parentToChildrenColl.alter(meta.parentKey) { maybeChildren => + maybeChildren orElse Set.empty.some map (_ + nodeKey) + } + + } + + /** Retrieve a node by key, if it exists. */ + def get(key: K): KVStoreRead[N, Option[V]] = + nodeColl.read(key) + + /** Check whether a node is present in the tree. */ + def contains(key: K): KVStoreRead[N, Boolean] = + nodeMetaColl.read(key).map(_.isDefined) + + /** Check how many children the node has in the tree. */ + private def childCount(key: K): KVStoreRead[N, Int] = + parentToChildrenColl.read(key).map(_.fold(0)(_.size)) + + /** Check whether the parent of the node is present in the tree. */ + private def hasParent(key: K): KVStoreRead[N, Boolean] = + nodeMetaColl.read(key).flatMap { + case None => KVStoreRead[N].pure(false) + case Some(meta) => contains(meta.parentKey) + } + + private def getParentKey( + key: K + ): KVStoreRead[N, Option[K]] = + nodeMetaColl.read(key).map(_.map(_.parentKey)) + + /** Check whether it's safe to delete a node. + * + * A node is safe to delete if doing so doesn't break up the tree + * into a forest, in which case we may have nodes we cannot reach + * by traversal, leaking space. + * + * This is true if the node has no children, + * or it has no parent and at most one child. + */ + private def canDelete(key: K): KVStoreRead[N, Boolean] = + (hasParent(key), childCount(key)).mapN { + case (_, 0) => true + case (false, 1) => true + case _ => false + } + + /** Delete a node by hash, if doing so wouldn't break the tree; + * otherwise do nothing. + * + * Return `true` if node has been deleted, `false` if not. + * + * If this is not efficent enough, then move the deletion traversal + * logic into the this class so it can make sure all the invariants + * are maintained, e.g. collect all hashes that can be safely deleted + * and then do so without checks. + */ + def delete(key: K): KVStore[N, Boolean] = + canDelete(key).lift.flatMap { ok => + deleteUnsafe(key).whenA(ok).as(ok) + } + + /** Delete a node and remove it from any parent-to-child mapping, + * without any checking for the tree structure invariants. + */ + def deleteUnsafe(key: K): KVStore[N, Unit] = { + def deleteIfEmpty(maybeChildren: Option[Set[K]]) = + maybeChildren.filter(_.nonEmpty) + + getParentKey(key).lift.flatMap { + case None => + KVStore[N].unit + case Some(parentKey) => + parentToChildrenColl.alter(parentKey) { maybeChildren => + deleteIfEmpty(maybeChildren.map(_ - key)) + } + } >> + nodeColl.delete(key) >> + nodeMetaColl.delete(key) >> + // Keep the association from existing children, until they last one is deleted. + parentToChildrenColl.alter(key)(deleteIfEmpty) + } + + /** Get the ancestor chain of a node from the root, including the node itself. + * + * If the node is not in the tree, the result will be empty, + * otherwise `head` will be the root of the node tree, + * and `last` will be the node itself. + */ + def getPathFromRoot(key: K): KVStoreRead[N, List[K]] = { + def loop( + key: K, + acc: List[K] + ): KVStoreRead[N, List[K]] = { + getParentKey(key).flatMap { + case None => + // This node doesn't exist in the tree, so our ancestry is whatever we collected so far. + KVStoreRead[N].pure(acc) + + case Some(parentKey) => + // So at least `key` exists in the tree. + loop(parentKey, key :: acc) + } + } + loop(key, Nil) + } + + /** Get the ancestor chain between two hashes in the chain, if there is one. + * + * If either of the nodes are not in the tree, or there's no path between them, + * return an empty list. This can happen if we have already pruned away the ancestry as well. + */ + def getPathFromAncestor( + ancestorKey: K, + descendantKey: K + ): KVStoreRead[N, List[K]] = { + def loop( + key: K, + acc: List[K], + maxDistance: Long + ): KVStoreRead[N, List[K]] = { + if (key == ancestorKey) { + KVStoreRead[N].pure(key :: acc) + } else if (maxDistance == 0) { + KVStoreRead[N].pure(Nil) + } else { + nodeMetaColl.read(key).flatMap { + case None => + KVStoreRead[N].pure(Nil) + case Some(meta) => + loop(meta.parentKey, key :: acc, maxDistance - 1) + } + } + } + + ( + nodeMetaColl.read(ancestorKey), + nodeMetaColl.read(descendantKey) + ).mapN((_, _)) + .flatMap { + case (Some(ameta), Some(dmeta)) => + loop( + descendantKey, + Nil, + maxDistance = dmeta.height - ameta.height + ) + case _ => KVStoreRead[N].pure(Nil) + } + } + + /** Collect all descendants of a node, including the node itself. + * + * The result will start with the nodes furthest away, + * so it should be safe to delete them in the same order; + * `last` will be the node itself. + * + * The `skip` parameter can be used to avoid traversing + * branches that we want to keep during deletion. + */ + def getDescendants( + key: K, + skip: Set[K] = Set.empty + ): KVStoreRead[N, List[K]] = { + // BFS traversal. + def loop( + queue: Queue[K], + acc: List[K] + ): KVStoreRead[N, List[K]] = { + queue.dequeueOption match { + case None => + KVStoreRead[N].pure(acc) + + case Some((key, queue)) if skip(key) => + loop(queue, acc) + + case Some((key, queue)) => + parentToChildrenColl.read(key).flatMap { + case None => + // Since we're not inserting an empty child set, + // we can't tell here if the node exists or not. + loop(queue, key :: acc) + case Some(children) => + loop(queue ++ children, key :: acc) + } + } + } + + loop(Queue(key), Nil).flatMap { + case result @ List(`key`) => + result.filterA(contains) + case result => + KVStoreRead[N].pure(result) + } + } + + /** Delete all nodes which are not descendants of a given node, making it the new root. + * + * Return the list of deleted node keys. + */ + def pruneNonDescendants(key: K): KVStore[N, List[K]] = + getPathFromRoot(key).lift.flatMap { + case Nil => + KVStore[N].pure(Nil) + + case path @ (rootHash :: _) => + // The safe order to delete nodes would be to go down the main chain + // from the root, delete each non-mainchain child, then the parent, + // then descend on the main chain until we hit `key`. + + // A similar effect can be achieved by collecting all descendants + // of the root, then deleting everything that isn't on the main chain, + // from the children towards the root, and finally the main chain itself, + // going from the root towards the children. + val isMainChain = path.toSet + + for { + deleteables <- getDescendants(rootHash, skip = Set(key)).lift + _ <- deleteables.filterNot(isMainChain).traverse(deleteUnsafe(_)) + _ <- path.init.traverse(deleteUnsafe(_)) + } yield deleteables + } + + /** Remove all nodes in a tree, given by a key that's in the tree, + * except perhaps a new root (and its descendants) we want to keep. + * + * This is used to delete an old tree when starting a new that's most likely + * not connected to it, and would otherwise result in a forest. + */ + def purgeTree( + key: K, + keep: Option[K] + ): KVStore[N, List[K]] = + getPathFromRoot(key).lift.flatMap { + case Nil => + KVStore[N].pure(Nil) + + case rootHash :: _ => + for { + ds <- getDescendants(rootHash, skip = keep.toSet).lift + // Going from the leaves towards the root. + _ <- ds.reverse.traverse(deleteUnsafe(_)) + } yield ds + } +} + +object KVTree { + + /** Type class for the node-like values stored in the tree. */ + trait Node[K, V] { + def key(value: V): K + def parentKey(value: V): K + def height(value: V): Long + } + + /** Properties about the nodes that we frequently need for traversal. */ + case class NodeMeta[K]( + parentKey: K, + height: Long + ) +} From 9bab23f13ffd7ecdf0739666131904d38ac5810d Mon Sep 17 00:00:00 2001 From: Radek Tkaczyk Date: Fri, 14 May 2021 10:28:34 +0200 Subject: [PATCH 46/48] [PM-3108] update ledger [PM-3108] use parent ledger for validation; keep descendants in ledgerTree; minor improvements --- build.sc | 5 +- .../checkpointing/models/Ledger.scala | 9 +- .../models/ArbitraryInstances.scala | 23 +- .../checkpointing/models/LedgerProps.scala | 5 +- .../service/CheckpointingService.scala | 239 +++++++++++ .../service/CheckpointingServiceProps.scala | 405 ++++++++++++++++++ 6 files changed, 676 insertions(+), 10 deletions(-) create mode 100644 metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/CheckpointingService.scala create mode 100644 metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/CheckpointingServiceProps.scala diff --git a/build.sc b/build.sc index 1888740a..02ccaf6f 100644 --- a/build.sc +++ b/build.sc @@ -336,7 +336,10 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { object test extends TestModule { override def moduleDeps: Seq[JavaModule] = - super.moduleDeps ++ Seq(checkpointing.models.test) + super.moduleDeps ++ Seq( + checkpointing.models.test, + hotstuff.service.test + ) } } diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Ledger.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Ledger.scala index f73611c3..3c053010 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Ledger.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/Ledger.scala @@ -1,7 +1,5 @@ package io.iohk.metronome.checkpointing.models -import io.iohk.metronome.core.Validated - /** Current state of the ledger after applying all previous blocks. * * Basically it's the last checkpoint, plus any accumulated proposer blocks @@ -21,8 +19,8 @@ case class Ledger( * by this point, so we know for example that the new checkpoint is * a valid extension of the previous one. */ - def update(transaction: Validated[Transaction]): Ledger = - (transaction: Transaction) match { + def update(transaction: Transaction): Ledger = + transaction match { case t @ Transaction.ProposerBlock(_) => if (proposerBlocks.contains(t)) this @@ -32,6 +30,9 @@ case class Ledger( case t @ Transaction.CheckpointCandidate(_) => Ledger(Some(t), Vector.empty) } + + def update(transactions: Iterable[Transaction]): Ledger = + transactions.foldLeft(this)(_ update _) } object Ledger extends RLPHashCompanion[Ledger]()(RLPCodecs.rlpLedger) { diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala index 03058f0b..f4e092ea 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/ArbitraryInstances.scala @@ -4,8 +4,11 @@ import cats.data.NonEmptyList import io.iohk.ethereum.crypto.ECDSASignature import io.iohk.metronome.checkpointing.CheckpointingAgreement import io.iohk.metronome.crypto.hash.Hash -import io.iohk.metronome.hotstuff.consensus.basic.Phase -import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate +import io.iohk.metronome.hotstuff.consensus.basic.{ + Phase, + QuorumCertificate, + VotingPhase +} import io.iohk.metronome.hotstuff.consensus.ViewNumber import org.scalacheck._ import org.scalacheck.Arbitrary.arbitrary @@ -97,6 +100,22 @@ object ArbitraryInstances } yield ECDSASignature(r, s, v) } + implicit val arbQuorumCertificate + : Arbitrary[QuorumCertificate[CheckpointingAgreement]] = + Arbitrary { + for { + phase <- arbitrary[VotingPhase] + viewNumber <- arbitrary[ViewNumber] + blockHash <- arbitrary[Block.Header.Hash] + signature <- arbitrary[CheckpointingAgreement.GSig] + } yield QuorumCertificate[CheckpointingAgreement]( + phase, + viewNumber, + blockHash, + GroupSignature(signature) + ) + } + implicit val arbCheckpointCertificate: Arbitrary[CheckpointCertificate] = Arbitrary { for { diff --git a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/LedgerProps.scala b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/LedgerProps.scala index eca124c0..f32ef0a5 100644 --- a/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/LedgerProps.scala +++ b/metronome/checkpointing/models/test/src/io/iohk/metronome/checkpointing/models/LedgerProps.scala @@ -1,6 +1,5 @@ package io.iohk.metronome.checkpointing.models -import io.iohk.metronome.core.Validated import org.scalacheck._ import org.scalacheck.Prop.forAll @@ -8,7 +7,7 @@ object LedgerProps extends Properties("Ledger") { import ArbitraryInstances._ property("update") = forAll { (ledger: Ledger, transaction: Transaction) => - val updated = ledger.update(Validated[Transaction](transaction)) + val updated = ledger.update(transaction) transaction match { case _: Transaction.ProposerBlock @@ -21,7 +20,7 @@ object LedgerProps extends Properties("Ledger") { updated.maybeLastCheckpoint == ledger.maybeLastCheckpoint case _: Transaction.CheckpointCandidate => - updated.maybeLastCheckpoint == Some(transaction) && + updated.maybeLastCheckpoint.contains(transaction) && updated.proposerBlocks.isEmpty } } diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/CheckpointingService.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/CheckpointingService.scala new file mode 100644 index 00000000..0e0b8e7e --- /dev/null +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/CheckpointingService.scala @@ -0,0 +1,239 @@ +package io.iohk.metronome.checkpointing.service + +import cats.data.{NonEmptyList, NonEmptyVector, OptionT} +import cats.effect.concurrent.Ref +import cats.effect.{Concurrent, Resource, Sync} +import cats.implicits._ +import io.iohk.metronome.checkpointing.CheckpointingAgreement +import io.iohk.metronome.checkpointing.models.{ + Block, + CheckpointCertificate, + Ledger +} +import io.iohk.metronome.checkpointing.service.CheckpointingService.LedgerTree +import io.iohk.metronome.checkpointing.service.storage.LedgerStorage +import io.iohk.metronome.crypto.ECPublicKey +import io.iohk.metronome.hotstuff.consensus.basic.{Phase, QuorumCertificate} +import io.iohk.metronome.hotstuff.service.ApplicationService +import io.iohk.metronome.hotstuff.service.storage.{ + BlockStorage, + ViewStateStorage +} +import io.iohk.metronome.storage.KVStoreRunner + +import scala.annotation.tailrec + +class CheckpointingService[F[_]: Sync, N]( + ledgerTree: Ref[F, LedgerTree], + lastExecutedHeader: Ref[F, Block.Header], + ledgerStorage: LedgerStorage[N], + blockStorage: BlockStorage[N, CheckpointingAgreement] +)(implicit storeRunner: KVStoreRunner[F, N]) + extends ApplicationService[F, CheckpointingAgreement] { + + override def createBlock( + highQC: QuorumCertificate[CheckpointingAgreement] + ): F[Option[Block]] = ??? + + override def validateBlock(block: Block): F[Option[Boolean]] = { + val ledgers = for { + nextLedger <- OptionT(projectLedger(block)) + tree <- OptionT.liftF(ledgerTree.get) + prevLedger <- tree.get(block.header.parentHash).map(_._1).toOptionT[F] + } yield (prevLedger, nextLedger) + + ledgers.value.flatMap { + case Some((prevLedger, nextLedger)) + if nextLedger.hash == block.header.postStateHash => + validateTransactions(block.body, prevLedger) + + case _ => false.some.pure[F] + } + } + + private def validateTransactions( + body: Block.Body, + ledger: Ledger + ): F[Option[Boolean]] = { + //TODO: Validate transactions PM-3131/3132 + true.some.pure[F] + } + + override def executeBlock( + block: Block, + commitQC: QuorumCertificate[CheckpointingAgreement], + commitPath: NonEmptyList[Block.Hash] + ): F[Boolean] = { + require(commitQC.phase == Phase.Commit, "Commit QC required") + projectLedger(block).flatMap { + case Some(ledger) => + if (block.hash != commitQC.blockHash) + saveLedger(block.header, ledger).as(true) + else + for { + //TODO: PM-3110: + // chkpOpt <- constructCheckpoint(ledger, commitQC) + _ <- saveLedger(block.header, ledger) + //TODO: PM-3110: + // _ <- chkpOpt.map(pushCheckpoint).getOrElse(().pure[F]) + } yield true + + case None => + Sync[F].raiseError( + new IllegalStateException(s"Could not execute block: ${block.hash}") + ) + } + } + + private def projectLedger(block: Block): F[Option[Ledger]] = { + (for { + ledgers <- ledgerTree.get + execHeight <- lastExecutedHeader.get.map(_.height) + } yield { + def loop(block: Block): OptionT[F, Ledger] = { + def doUpdate(ledger: Ledger) = + OptionT.liftF(updateLedgerByBlock(ledger, block)) + + ledgers.get(block.header.parentHash) match { + case Some((oldLedger, _)) => + doUpdate(oldLedger) + + case None if block.header.height <= execHeight => + OptionT.none + + case None => + for { + parent <- OptionT(getBlock(block.header.parentHash)) + oldLedger <- loop(parent) + newLedger <- doUpdate(oldLedger) + } yield newLedger + } + } + + ledgers + .get(block.hash) + .map(_._1) + .toOptionT[F] + .orElse(loop(block)) + .value + }).flatten + } + + private def updateLedgerByBlock( + oldLedger: Ledger, + block: Block + ): F[Ledger] = { + val newLedger = oldLedger.update(block.body.transactions) + ledgerTree + .update { tree => + if (tree.contains(block.header.parentHash)) + tree + (block.hash -> (newLedger, block.header)) + else + tree + } + .as(newLedger) + } + + private def getBlock(hash: Block.Hash): F[Option[Block]] = + storeRunner.runReadOnly(blockStorage.get(hash)) + + private def saveLedger(header: Block.Header, ledger: Ledger): F[Unit] = { + storeRunner.runReadWrite { + ledgerStorage.put(ledger) + } >> + ledgerTree.update(clearLedgerTree(header, ledger)) >> + lastExecutedHeader.set(header) + } + + /** Makes the `commitHeader` and the associated 'ledger' the root of the tree, + * while retaining any descendants of the `commitHeader` + */ + private def clearLedgerTree(commitHeader: Block.Header, ledger: Ledger)( + ledgerTree: LedgerTree + ): LedgerTree = { + + @tailrec + def loop( + oldTree: LedgerTree, + newTree: LedgerTree, + height: Long + ): LedgerTree = + if (oldTree.isEmpty) newTree + else { + val (higherLevels, currentLevel) = oldTree.partition { + case (_, (_, hd)) => hd.height > height + } + val children = currentLevel.filter { case (_, (_, hd)) => + newTree.contains(hd.parentHash) + } + loop(higherLevels, newTree ++ children, height + 1) + } + + loop( + ledgerTree.filter { case (_, (_, hd)) => + hd.height > commitHeader.height + }, + Map(commitHeader.hash -> (ledger, commitHeader)), + commitHeader.height + 1 + ) + } + + private def constructCheckpoint( + ledger: Ledger, + commitQC: QuorumCertificate[CheckpointingAgreement] + ): F[Option[CheckpointCertificate]] = + ??? //TODO: PM-3110 + + private def pushCheckpoint(checkpoint: CheckpointCertificate): F[Unit] = + ??? //TODO: PM-3137 + + override def syncState( + sources: NonEmptyVector[ECPublicKey], + block: Block + ): F[Boolean] = ??? +} + +object CheckpointingService { + type LedgerTree = Map[Block.Hash, (Ledger, Block.Header)] + + def apply[F[_]: Concurrent, N]( + ledgerStorage: LedgerStorage[N], + blockStorage: BlockStorage[N, CheckpointingAgreement], + viewStateStorage: ViewStateStorage[N, CheckpointingAgreement] + )(implicit + storeRunner: KVStoreRunner[F, N] + ): Resource[F, CheckpointingService[F, N]] = { + val lastExecuted: F[(Block, Ledger)] = + storeRunner.runReadOnly { + val query = for { + blockHash <- OptionT.liftF( + viewStateStorage.getLastExecutedBlockHash + ) + block <- OptionT(blockStorage.get(blockHash)) + //a genesis (empty) state should be present in LedgerStorage on first run + ledger <- OptionT(ledgerStorage.get(block.header.postStateHash)) + } yield (block, ledger) + query.value + } >>= { + _.toOptionT[F].getOrElseF { + Sync[F].raiseError( + new IllegalStateException("Last executed block/state not found") + ) + } + } + + val service = for { + (block, ledger) <- lastExecuted + ledgerTree <- Ref.of(Map(block.hash -> (ledger, block.header))) + lastExec <- Ref.of(block.header) + } yield new CheckpointingService[F, N]( + ledgerTree, + lastExec, + ledgerStorage, + blockStorage + ) + + Resource.liftF(service) + } + +} diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/CheckpointingServiceProps.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/CheckpointingServiceProps.scala new file mode 100644 index 00000000..7c81ff4c --- /dev/null +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/CheckpointingServiceProps.scala @@ -0,0 +1,405 @@ +package io.iohk.metronome.checkpointing.service + +import cats.data.NonEmptyList +import cats.effect.Resource +import cats.effect.concurrent.Ref +import cats.implicits._ +import io.iohk.metronome.checkpointing.CheckpointingAgreement +import io.iohk.metronome.checkpointing.models.Block.{Hash, Header} +import io.iohk.metronome.checkpointing.models.{ + ArbitraryInstances, + Block, + Ledger +} +import io.iohk.metronome.checkpointing.service.CheckpointingService.LedgerTree +import io.iohk.metronome.checkpointing.service.storage.LedgerStorage +import io.iohk.metronome.checkpointing.service.storage.LedgerStorageProps.{ + neverUsedCodec, + Namespace => LedgerNamespace +} +import io.iohk.metronome.hotstuff.consensus.basic.Phase.Commit +import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate +import io.iohk.metronome.hotstuff.service.storage.BlockStorage +import io.iohk.metronome.hotstuff.service.storage.BlockStorageProps.{ + Namespace => BlockNamespace +} +import io.iohk.metronome.storage.{ + InMemoryKVStore, + KVCollection, + KVStoreRunner, + KVStoreState, + KVTree +} +import monix.eval.Task +import monix.execution.Scheduler +import org.scalacheck.Arbitrary.arbitrary +import org.scalacheck.Prop.{all, classify, forAll, forAllNoShrink, propBoolean} +import org.scalacheck.{Gen, Prop, Properties} + +import scala.concurrent.duration._ +import scala.util.Random + +class CheckpointingServiceProps extends Properties("CheckpointingService") { + + type Namespace = String + + case class TestResources( + checkpointingService: CheckpointingService[Task, Namespace], + ledgerStorage: LedgerStorage[Namespace], + blockStorage: BlockStorage[Namespace, CheckpointingAgreement], + store: KVStoreRunner[Task, Namespace], + ledgerTreeRef: Ref[Task, LedgerTree] + ) + + case class TestFixture( + initialBlock: Block, + initialLedger: Ledger, + batch: List[Block], + commitQC: QuorumCertificate[CheckpointingAgreement] + ) { + val resources: Resource[Task, TestResources] = { + val ledgerStorage = + new LedgerStorage[Namespace]( + new KVCollection[Namespace, Ledger.Hash, Ledger]( + LedgerNamespace.Ledgers + ), + LedgerNamespace.LedgerMeta, + maxHistorySize = 10 + ) + + val blockStorage = new BlockStorage[Namespace, CheckpointingAgreement]( + new KVCollection[Namespace, Block.Hash, Block](BlockNamespace.Blocks), + new KVCollection[Namespace, Block.Hash, KVTree.NodeMeta[Hash]]( + BlockNamespace.BlockMetas + ), + new KVCollection[Namespace, Block.Hash, Set[Block.Hash]]( + BlockNamespace.BlockToChildren + ) + ) + + implicit val store = InMemoryKVStore[Task, Namespace]( + Ref.unsafe[Task, KVStoreState[Namespace]#Store](Map.empty) + ) + + Resource.liftF { + for { + _ <- store.runReadWrite { + ledgerStorage.put(initialLedger.hash, initialLedger) >> + blockStorage.put(initialBlock) + } + + ledgerTree <- Ref.of[Task, LedgerTree]( + Map(initialBlock.hash -> (initialLedger, initialBlock.header)) + ) + lastExec <- Ref.of[Task, Header](initialBlock.header) + + service = new CheckpointingService[Task, Namespace]( + ledgerTree, + lastExec, + ledgerStorage, + blockStorage + ) + + } yield TestResources( + service, + ledgerStorage, + blockStorage, + store, + ledgerTree + ) + } + } + + // not used in the impl so a senseless value + val commitPath = NonEmptyList.one(initialBlock.header.parentHash) + + val allTransactions = batch.flatMap(_.body.transactions) + val finalLedger = initialLedger.update(allTransactions) + } + + object TestFixture { + import ArbitraryInstances._ + + def gen(minChain: Int = 1): Gen[TestFixture] = { + for { + block <- arbitrary[Block] + ledger = Ledger.empty.update(block.body.transactions) + batch <- genBlockChain(block, ledger, min = minChain) + commitQC <- genCommitQC(batch.last) + } yield TestFixture(block, ledger, batch, commitQC) + } + + def genBlockChain( + parent: Block, + initialLedger: Ledger, + min: Int = 1, + max: Int = 6 + ): Gen[List[Block]] = { + for { + n <- Gen.choose(min, max) + blocks <- Gen.listOfN(n, arbitrary[Block]) + } yield { + def link( + parent: Block, + prevLedger: Ledger, + chain: List[Block] + ): List[Block] = chain match { + case b :: bs => + val nextLedger = prevLedger.update(b.body.transactions) + val header = b.header.copy( + parentHash = parent.hash, + height = parent.header.height + 1, + postStateHash = nextLedger.hash + ) + val linked = Block.makeUnsafe(header, b.body) + linked :: link(linked, nextLedger, bs) + case Nil => + Nil + } + + link(parent, initialLedger, blocks) + } + } + + def genCommitQC( + block: Block + ): Gen[QuorumCertificate[CheckpointingAgreement]] = + arbitrary[QuorumCertificate[CheckpointingAgreement]].map { + _.copy[CheckpointingAgreement](phase = Commit, blockHash = block.hash) + } + } + + def run(fixture: TestFixture)(test: TestResources => Task[Prop]): Prop = { + import Scheduler.Implicits.global + + fixture.resources.use(test).runSyncUnsafe(timeout = 5.seconds) + } + + property("normal execution") = forAll(TestFixture.gen()) { fixture => + run(fixture) { res => + import fixture._ + import res._ + + val execution = batch + .map(checkpointingService.executeBlock(_, commitQC, commitPath)) + .sequence + + val ledgerStorageCheck = store.runReadOnly { + ledgerStorage.get(finalLedger.hash) + } + + for { + results <- execution + persistedLedger <- ledgerStorageCheck + ledgerTree <- ledgerTreeRef.get + } yield { + val ledgerTreeUpdated = ledgerTree == Map( + batch.last.hash -> (finalLedger, batch.last.header) + ) + + all( + "execution successful" |: results.reduce(_ && _), + "ledger persisted" |: persistedLedger.contains(finalLedger), + "ledgerTree updated" |: ledgerTreeUpdated + ) + } + } + } + + property("failed execution - no parent") = + forAll(TestFixture.gen(minChain = 2)) { fixture => + run(fixture) { res => + import fixture._ + import res._ + + // parent block or its state is not saved so this must fail + val execution = batch.tail + .map(checkpointingService.executeBlock(_, commitQC, commitPath)) + .sequence + + execution.attempt.map { + case Left(ex: IllegalStateException) => + ex.getMessage.contains("Could not execute block") + case _ => false + } + } + } + + property("failed execution - height below last executed") = + forAll(TestFixture.gen(minChain = 2)) { fixture => + run(fixture) { res => + import fixture._ + import res._ + + val execution = batch + .map(checkpointingService.executeBlock(_, commitQC, commitPath)) + .sequence + + // repeated execution must fail because we're trying to execute a block of lower height + // than the last executed block + execution >> + execution.attempt.map { + case Left(ex: IllegalStateException) => + ex.getMessage.contains("Could not execute block") + case _ => false + } + } + } + + //TODO: Validate transactions PM-3131/3132 + // use a mocked interpreter client that always evaluates blocks as valid + property("parallel validation") = forAll(TestFixture.gen(minChain = 4)) { + fixture => + run(fixture) { res => + import fixture._ + import res._ + + // validation in random order so blocks need to be persisted first + val persistBlocks = store.runReadWrite { + batch.map(blockStorage.put).sequence + } + + def validation( + validating: Ref[Task, Boolean], + achievedPar: Ref[Task, Boolean] + ) = + Task.parSequence { + Random + .shuffle(batch) + .map(b => + for { + v <- validating.getAndSet(true) + _ <- achievedPar.update(_ || v) + r <- checkpointingService.validateBlock(b) + _ <- validating.set(false) + } yield r.getOrElse(false) + ) + } + + for { + _ <- persistBlocks + + // used to make sure that parallelism was achieved + validating <- Ref[Task].of(false) + achievedPar <- Ref[Task].of(false) + + result <- validation(validating, achievedPar) + par <- achievedPar.get + ledgerTree <- ledgerTreeRef.get + } yield { + val ledgerTreeUpdated = batch.forall(b => ledgerTree.contains(b.hash)) + + classify(par, "parallelism achieved") { + all( + "validation successful" |: result.forall(identity), + "ledgerTree updated" |: ledgerTreeUpdated + ) + } + } + } + } + + //TODO: Validate transactions PM-3131/3132 + // use a mocked interpreter client that always evaluates blocks as valid + property("execution parallel to validation") = forAllNoShrink { + for { + f <- TestFixture.gen(minChain = 4) + ext <- TestFixture.genBlockChain(f.batch.last, f.finalLedger) + } yield (f, f.batch ++ ext) + } { case (fixture, validationBatch) => + run(fixture) { res => + import fixture._ + import res._ + + // validation in random order so blocks need to be persisted first + val persistBlocks = store.runReadWrite { + validationBatch.map(blockStorage.put).sequence + } + + def validation( + validating: Ref[Task, Boolean], + executing: Ref[Task, Boolean], + achievedPar: Ref[Task, Boolean] + ) = + Random + .shuffle(validationBatch) + .map(b => + for { + _ <- validating.set(true) + e <- executing.get + _ <- achievedPar.update(_ || e) + r <- checkpointingService.validateBlock(b) + _ <- validating.set(false) + } yield (r.getOrElse(false), b.header.height) + ) + .sequence + + def execution( + validating: Ref[Task, Boolean], + executing: Ref[Task, Boolean], + achievedPar: Ref[Task, Boolean] + ) = + batch + .map(b => + for { + _ <- executing.set(true) + v <- validating.get + _ <- achievedPar.update(_ || v) + r <- checkpointingService.executeBlock(b, commitQC, commitPath) + _ <- executing.set(false) + } yield r + ) + .sequence + + val ledgerStorageCheck = store.runReadOnly { + ledgerStorage.get(finalLedger.hash) + } + + for { + _ <- persistBlocks + + // used to make sure that parallelism was achieved + validating <- Ref[Task].of(false) + executing <- Ref[Task].of(false) + achievedPar <- Ref[Task].of(false) + + (validationRes, executionRes) <- Task.parZip2( + validation(validating, executing, achievedPar), + execution(validating, executing, achievedPar) + ) + + par <- achievedPar.get + persistedLedger <- ledgerStorageCheck + ledgerTree <- ledgerTreeRef.get + } yield { + val validationsAfterExec = validationRes.collect { + case (r, h) if h > batch.last.header.height => r + } + + val ledgerTreeReset = batch.reverse match { + case committed :: rest => + ledgerTree + .get(committed.hash) + .contains((finalLedger, committed.header)) && + rest.forall(b => !ledgerTree.contains(b.hash)) + + case _ => false + } + + val validationsSaved = + validationBatch.diff(batch).forall(b => ledgerTree.contains(b.hash)) + + classify(par, "parallelism achieved") { + all( + "validation successful" |: validationsAfterExec.forall(identity), + "execution successful" |: executionRes.forall(identity), + "ledger persisted" |: persistedLedger.contains(finalLedger), + "ledgerTree reset" |: ledgerTreeReset, + "ledgerTree contains validations" |: validationsSaved + ) + } + } + } + } + +} From abce650ff8bb2c2932732ad31ae4631571d3f6f3 Mon Sep 17 00:00:00 2001 From: Radek Tkaczyk Date: Fri, 11 Jun 2021 17:56:19 +0200 Subject: [PATCH 47/48] [PM-3110] create checkpoint [PM-3110] improved state accumulation; more docstrings [PM-3110] single updateCheckpointData call; naming: *Ref postfixes --- .../models/CheckpointCertificate.scala | 32 +++- .../service/CheckpointingService.scala | 174 +++++++++++++----- .../service/CheckpointingServiceProps.scala | 120 ++++++++++-- 3 files changed, 258 insertions(+), 68 deletions(-) diff --git a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/CheckpointCertificate.scala b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/CheckpointCertificate.scala index 040e7c56..dc0167cd 100644 --- a/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/CheckpointCertificate.scala +++ b/metronome/checkpointing/models/src/io/iohk/metronome/checkpointing/models/CheckpointCertificate.scala @@ -3,6 +3,7 @@ package io.iohk.metronome.checkpointing.models import cats.data.NonEmptyList import io.iohk.metronome.hotstuff.consensus.basic.QuorumCertificate import io.iohk.metronome.checkpointing.CheckpointingAgreement +import io.iohk.metronome.checkpointing.models.Transaction.CheckpointCandidate /** The Checkpoint Certificate is a proof of the BFT agreement * over a given Checkpoint Candidate. @@ -18,13 +19,36 @@ import io.iohk.metronome.checkpointing.CheckpointingAgreement * whatever rules apply on the checkpointed PoW chain. */ case class CheckpointCertificate( - // `head` is the `Block.Header` that has the Commit Q.C.; - // `last` is the `Block.Header` that had the `CheckpointCandiate` in its `Body`. + // `head` is the `Block.Header` that had the `CheckpointCandidate` in its `Body`. + // `last` is the `Block.Header` that has the Commit Q.C.; headers: NonEmptyList[Block.Header], // The opaque contents of the checkpoint that has been agreed upon. checkpoint: Transaction.CheckpointCandidate, - // Proof that `checkpoint` is part of `headers.last.contentMerkleRoot`. + // Proof that `checkpoint` is part of `headers.head.contentMerkleRoot`. proof: MerkleTree.Proof, - // Commit Q.C. over `headers.head`. + // Commit Q.C. over `headers.last`. commitQC: QuorumCertificate[CheckpointingAgreement] ) + +object CheckpointCertificate { + def construct( + block: Block, + headers: NonEmptyList[Block.Header], + commitQC: QuorumCertificate[CheckpointingAgreement] + ): Option[CheckpointCertificate] = + constructProof(block).map { case (proof, cp) => + CheckpointCertificate(headers, cp, proof, commitQC) + } + + private def constructProof( + block: Block + ): Option[(MerkleTree.Proof, CheckpointCandidate)] = + block.body.transactions.reverseIterator.collectFirst { + case cp: CheckpointCandidate => + val txHashes = + block.body.transactions.map(tx => MerkleTree.Hash(tx.hash)) + val tree = MerkleTree.build(txHashes) + val cpHash = MerkleTree.Hash(cp.hash) + MerkleTree.generateProofFromHash(tree, cpHash).map(_ -> cp) + }.flatten +} diff --git a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/CheckpointingService.scala b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/CheckpointingService.scala index 0e0b8e7e..42bc2a92 100644 --- a/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/CheckpointingService.scala +++ b/metronome/checkpointing/service/src/io/iohk/metronome/checkpointing/service/CheckpointingService.scala @@ -5,12 +5,17 @@ import cats.effect.concurrent.Ref import cats.effect.{Concurrent, Resource, Sync} import cats.implicits._ import io.iohk.metronome.checkpointing.CheckpointingAgreement +import io.iohk.metronome.checkpointing.models.Transaction.CheckpointCandidate import io.iohk.metronome.checkpointing.models.{ Block, CheckpointCertificate, Ledger } -import io.iohk.metronome.checkpointing.service.CheckpointingService.LedgerTree +import io.iohk.metronome.checkpointing.service.CheckpointingService.{ + CheckpointData, + LedgerNode, + LedgerTree +} import io.iohk.metronome.checkpointing.service.storage.LedgerStorage import io.iohk.metronome.crypto.ECPublicKey import io.iohk.metronome.hotstuff.consensus.basic.{Phase, QuorumCertificate} @@ -24,8 +29,12 @@ import io.iohk.metronome.storage.KVStoreRunner import scala.annotation.tailrec class CheckpointingService[F[_]: Sync, N]( - ledgerTree: Ref[F, LedgerTree], - lastExecutedHeader: Ref[F, Block.Header], + ledgerTreeRef: Ref[F, LedgerTree], + lastCommittedHeaderRef: Ref[F, Block.Header], + checkpointDataRef: Ref[F, Option[CheckpointData]], + //TODO: PM-3137, this is used for testing that a certificate was created correctly + // replace with proper means of pushing the certificate to the Interpreter + pushCheckpointFn: CheckpointCertificate => F[Unit], ledgerStorage: LedgerStorage[N], blockStorage: BlockStorage[N, CheckpointingAgreement] )(implicit storeRunner: KVStoreRunner[F, N]) @@ -38,8 +47,8 @@ class CheckpointingService[F[_]: Sync, N]( override def validateBlock(block: Block): F[Option[Boolean]] = { val ledgers = for { nextLedger <- OptionT(projectLedger(block)) - tree <- OptionT.liftF(ledgerTree.get) - prevLedger <- tree.get(block.header.parentHash).map(_._1).toOptionT[F] + tree <- OptionT.liftF(ledgerTreeRef.get) + prevLedger <- tree.get(block.header.parentHash).map(_.ledger).toOptionT[F] } yield (prevLedger, nextLedger) ledgers.value.flatMap { @@ -67,16 +76,22 @@ class CheckpointingService[F[_]: Sync, N]( require(commitQC.phase == Phase.Commit, "Commit QC required") projectLedger(block).flatMap { case Some(ledger) => - if (block.hash != commitQC.blockHash) - saveLedger(block.header, ledger).as(true) - else - for { - //TODO: PM-3110: - // chkpOpt <- constructCheckpoint(ledger, commitQC) - _ <- saveLedger(block.header, ledger) - //TODO: PM-3110: - // _ <- chkpOpt.map(pushCheckpoint).getOrElse(().pure[F]) - } yield true + updateCheckpointData(block).flatMap { checkpointDataOpt => + if (block.hash != commitQC.blockHash) + false.pure[F] + else { + val certificateOpt = checkpointDataOpt + .flatMap { cd => + CheckpointCertificate + .construct(cd.block, cd.headers.toNonEmptyList, commitQC) + } + .toOptionT[F] + + saveLedger(block.header, ledger) >> + certificateOpt.cataF(().pure[F], pushCheckpoint) >> + true.pure[F] + } + } case None => Sync[F].raiseError( @@ -85,20 +100,27 @@ class CheckpointingService[F[_]: Sync, N]( } } + /** Computes and saves the intermediate ledgers leading up to and including + * the one resulting from the `block` transactions, either by looking up + * already computed ledgers in the `ledgerTree` or fetching ancestor blocks + * from `blockStorage`. + * Only descendants of the root of the `ledgerTree` (last committed ledger) + * will be evaluated + */ private def projectLedger(block: Block): F[Option[Ledger]] = { (for { - ledgers <- ledgerTree.get - execHeight <- lastExecutedHeader.get.map(_.height) + ledgerTree <- ledgerTreeRef.get + commitHeight <- lastCommittedHeaderRef.get.map(_.height) } yield { def loop(block: Block): OptionT[F, Ledger] = { def doUpdate(ledger: Ledger) = OptionT.liftF(updateLedgerByBlock(ledger, block)) - ledgers.get(block.header.parentHash) match { - case Some((oldLedger, _)) => - doUpdate(oldLedger) + ledgerTree.get(block.header.parentHash) match { + case Some(oldLedger) => + doUpdate(oldLedger.ledger) - case None if block.header.height <= execHeight => + case None if block.header.height <= commitHeight => OptionT.none case None => @@ -110,30 +132,52 @@ class CheckpointingService[F[_]: Sync, N]( } } - ledgers + ledgerTree .get(block.hash) - .map(_._1) + .map(_.ledger) .toOptionT[F] .orElse(loop(block)) .value }).flatten } + /** Computes a new ledger from the `block` and saves it in the ledger tree only if + * a parent state exists. + * + * Because we're only adding to the tree no locking around it is necessary + */ private def updateLedgerByBlock( oldLedger: Ledger, block: Block ): F[Ledger] = { val newLedger = oldLedger.update(block.body.transactions) - ledgerTree + + ledgerTreeRef .update { tree => if (tree.contains(block.header.parentHash)) - tree + (block.hash -> (newLedger, block.header)) + tree + (block.hash -> LedgerNode(newLedger, block.header)) else tree } .as(newLedger) } + private def updateCheckpointData( + block: Block + ): F[Option[CheckpointData]] = { + val containsCheckpoint = block.body.transactions.exists { + case _: CheckpointCandidate => true + case _ => false + } + + checkpointDataRef.updateAndGet { cd => + if (containsCheckpoint) + CheckpointData(block).some + else + cd.map(_.extend(block.header)) + } + } + private def getBlock(hash: Block.Hash): F[Option[Block]] = storeRunner.runReadOnly(blockStorage.get(hash)) @@ -141,8 +185,9 @@ class CheckpointingService[F[_]: Sync, N]( storeRunner.runReadWrite { ledgerStorage.put(ledger) } >> - ledgerTree.update(clearLedgerTree(header, ledger)) >> - lastExecutedHeader.set(header) + ledgerTreeRef.update(clearLedgerTree(header, ledger)) >> + lastCommittedHeaderRef.set(header) >> + checkpointDataRef.set(None) } /** Makes the `commitHeader` and the associated 'ledger' the root of the tree, @@ -160,32 +205,24 @@ class CheckpointingService[F[_]: Sync, N]( ): LedgerTree = if (oldTree.isEmpty) newTree else { - val (higherLevels, currentLevel) = oldTree.partition { - case (_, (_, hd)) => hd.height > height + val (higherLevels, currentLevel) = oldTree.partition { case (_, ln) => + ln.height > height } - val children = currentLevel.filter { case (_, (_, hd)) => - newTree.contains(hd.parentHash) + val children = currentLevel.filter { case (_, ln) => + newTree.contains(ln.parentHash) } loop(higherLevels, newTree ++ children, height + 1) } loop( - ledgerTree.filter { case (_, (_, hd)) => - hd.height > commitHeader.height - }, - Map(commitHeader.hash -> (ledger, commitHeader)), + ledgerTree.filter { case (_, ln) => ln.height > commitHeader.height }, + LedgerTree.root(ledger, commitHeader), commitHeader.height + 1 ) } - private def constructCheckpoint( - ledger: Ledger, - commitQC: QuorumCertificate[CheckpointingAgreement] - ): F[Option[CheckpointCertificate]] = - ??? //TODO: PM-3110 - private def pushCheckpoint(checkpoint: CheckpointCertificate): F[Unit] = - ??? //TODO: PM-3137 + pushCheckpointFn(checkpoint) //TODO: PM-3137 override def syncState( sources: NonEmptyVector[ECPublicKey], @@ -194,12 +231,56 @@ class CheckpointingService[F[_]: Sync, N]( } object CheckpointingService { - type LedgerTree = Map[Block.Hash, (Ledger, Block.Header)] + + /** A node in LedgerTree + * `parentHash` and `height` are helpful when resetting the tree + */ + case class LedgerNode( + ledger: Ledger, + parentHash: Block.Hash, + height: Long + ) + + object LedgerNode { + def apply(ledger: Ledger, header: Block.Header): LedgerNode = + LedgerNode(ledger, header.parentHash, header.height) + } + + /** The internal structure used to represent intermediate ledgers resulting + * from execution and validation + */ + type LedgerTree = Map[Block.Hash, LedgerNode] + + object LedgerTree { + def root(ledger: Ledger, header: Block.Header): LedgerTree = + Map(header.hash -> LedgerNode(ledger, header)) + } + + /** Used to track the most recent checkpoint candidate + * `block` - last containing a checkpoint candidate + * `headers` - path from the `block` to the last executed one + * + * These values along with Commit QC can be used to construct + * a `CheckpointCertificate` + */ + case class CheckpointData( + block: Block, + headers: NonEmptyVector[Block.Header] + ) { + def extend(header: Block.Header): CheckpointData = + copy(headers = headers :+ header) + } + + object CheckpointData { + def apply(block: Block): CheckpointData = + CheckpointData(block, NonEmptyVector.of(block.header)) + } def apply[F[_]: Concurrent, N]( ledgerStorage: LedgerStorage[N], blockStorage: BlockStorage[N, CheckpointingAgreement], - viewStateStorage: ViewStateStorage[N, CheckpointingAgreement] + viewStateStorage: ViewStateStorage[N, CheckpointingAgreement], + pushCheckpointFn: CheckpointCertificate => F[Unit] )(implicit storeRunner: KVStoreRunner[F, N] ): Resource[F, CheckpointingService[F, N]] = { @@ -224,11 +305,14 @@ object CheckpointingService { val service = for { (block, ledger) <- lastExecuted - ledgerTree <- Ref.of(Map(block.hash -> (ledger, block.header))) + ledgerTree <- Ref.of(LedgerTree.root(ledger, block.header)) lastExec <- Ref.of(block.header) + checkpointData <- Ref.of(None: Option[CheckpointData]) } yield new CheckpointingService[F, N]( ledgerTree, lastExec, + checkpointData, + pushCheckpointFn, ledgerStorage, blockStorage ) diff --git a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/CheckpointingServiceProps.scala b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/CheckpointingServiceProps.scala index 7c81ff4c..f079c6c5 100644 --- a/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/CheckpointingServiceProps.scala +++ b/metronome/checkpointing/service/test/src/io/iohk/metronome/checkpointing/service/CheckpointingServiceProps.scala @@ -6,12 +6,18 @@ import cats.effect.concurrent.Ref import cats.implicits._ import io.iohk.metronome.checkpointing.CheckpointingAgreement import io.iohk.metronome.checkpointing.models.Block.{Hash, Header} +import io.iohk.metronome.checkpointing.models.Transaction.CheckpointCandidate import io.iohk.metronome.checkpointing.models.{ ArbitraryInstances, Block, + CheckpointCertificate, Ledger } -import io.iohk.metronome.checkpointing.service.CheckpointingService.LedgerTree +import io.iohk.metronome.checkpointing.service.CheckpointingService.{ + CheckpointData, + LedgerNode, + LedgerTree +} import io.iohk.metronome.checkpointing.service.storage.LedgerStorage import io.iohk.metronome.checkpointing.service.storage.LedgerStorageProps.{ neverUsedCodec, @@ -39,6 +45,13 @@ import org.scalacheck.{Gen, Prop, Properties} import scala.concurrent.duration._ import scala.util.Random +/** Props for Checkpointing service + * + * Do take note of tests that use `classify` to report whether parallelism + * was achieved. This is not a hard requirement because it may fail on CI, + * but one should make sure to achieve 100% parallelism locally when making + * changes to this tests or the service + */ class CheckpointingServiceProps extends Properties("CheckpointingService") { type Namespace = String @@ -48,14 +61,17 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { ledgerStorage: LedgerStorage[Namespace], blockStorage: BlockStorage[Namespace, CheckpointingAgreement], store: KVStoreRunner[Task, Namespace], - ledgerTreeRef: Ref[Task, LedgerTree] + ledgerTreeRef: Ref[Task, LedgerTree], + checkpointDataRef: Ref[Task, Option[CheckpointData]], + lastCheckpointCertRef: Ref[Task, Option[CheckpointCertificate]] ) case class TestFixture( initialBlock: Block, initialLedger: Ledger, batch: List[Block], - commitQC: QuorumCertificate[CheckpointingAgreement] + commitQC: QuorumCertificate[CheckpointingAgreement], + randomSeed: Long ) { val resources: Resource[Task, TestResources] = { val ledgerStorage = @@ -89,13 +105,17 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { } ledgerTree <- Ref.of[Task, LedgerTree]( - Map(initialBlock.hash -> (initialLedger, initialBlock.header)) + LedgerTree.root(initialLedger, initialBlock.header) ) lastExec <- Ref.of[Task, Header](initialBlock.header) + chkpData <- Ref.of[Task, Option[CheckpointData]](None) + lastCert <- Ref.of[Task, Option[CheckpointCertificate]](None) service = new CheckpointingService[Task, Namespace]( ledgerTree, lastExec, + chkpData, + cc => lastCert.set(cc.some), ledgerStorage, blockStorage ) @@ -105,7 +125,9 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { ledgerStorage, blockStorage, store, - ledgerTree + ledgerTree, + chkpData, + lastCert ) } } @@ -113,8 +135,20 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { // not used in the impl so a senseless value val commitPath = NonEmptyList.one(initialBlock.header.parentHash) - val allTransactions = batch.flatMap(_.body.transactions) - val finalLedger = initialLedger.update(allTransactions) + lazy val allTransactions = batch.flatMap(_.body.transactions) + lazy val finalLedger = + initialLedger.update(batch.flatMap(_.body.transactions)) + + lazy val expectedCheckpointCert = allTransactions.reverse.collectFirst { + case candidate: CheckpointCandidate => + //apparently identical checkpoints can be generated in different blocks + val blockPath = batch.drop( + batch.lastIndexWhere(_.body.transactions.contains(candidate)) + ) + val headerPath = NonEmptyList.fromListUnsafe(blockPath.map(_.header)) + + CheckpointCertificate.construct(blockPath.head, headerPath, commitQC) + }.flatten } object TestFixture { @@ -126,7 +160,8 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { ledger = Ledger.empty.update(block.body.transactions) batch <- genBlockChain(block, ledger, min = minChain) commitQC <- genCommitQC(batch.last) - } yield TestFixture(block, ledger, batch, commitQC) + seed <- Gen.posNum[Long] + } yield TestFixture(block, ledger, batch, commitQC, seed) } def genBlockChain( @@ -192,20 +227,57 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { results <- execution persistedLedger <- ledgerStorageCheck ledgerTree <- ledgerTreeRef.get + lastCheckpoint <- lastCheckpointCertRef.get + checkpointData <- checkpointDataRef.get } yield { - val ledgerTreeUpdated = ledgerTree == Map( - batch.last.hash -> (finalLedger, batch.last.header) - ) + val ledgerTreeUpdated = + ledgerTree == LedgerTree.root(finalLedger, batch.last.header) + + val executionSuccessful = results.reverse match { + case true :: rest => !rest.exists(identity) + case _ => false + } all( - "execution successful" |: results.reduce(_ && _), + "execution successful" |: executionSuccessful, "ledger persisted" |: persistedLedger.contains(finalLedger), - "ledgerTree updated" |: ledgerTreeUpdated + "ledgerTree updated" |: ledgerTreeUpdated, + "checkpoint constructed correctly" |: lastCheckpoint == expectedCheckpointCert, + "checkpoint data cleared" |: checkpointData.isEmpty ) } } } + property("interrupted execution") = forAll(TestFixture.gen(minChain = 2)) { + fixture => + run(fixture) { res => + import fixture._ + import res._ + + // not executing the committed block + val execution = batch.init + .map(checkpointingService.executeBlock(_, commitQC, commitPath)) + .sequence + + for { + results <- execution + ledgerTree <- ledgerTreeRef.get + lastCheckpoint <- lastCheckpointCertRef.get + } yield { + val ledgerTreeUpdated = + batch.init.map(_.hash).forall(ledgerTree.contains) + val executionSuccessful = !results.exists(identity) + + all( + "executed correctly" |: executionSuccessful, + "ledgerTree updated" |: ledgerTreeUpdated, + "checkpoint constructed correctly" |: lastCheckpoint.isEmpty + ) + } + } + } + property("failed execution - no parent") = forAll(TestFixture.gen(minChain = 2)) { fixture => run(fixture) { res => @@ -264,7 +336,7 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { achievedPar: Ref[Task, Boolean] ) = Task.parSequence { - Random + new Random(randomSeed) .shuffle(batch) .map(b => for { @@ -320,8 +392,8 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { validating: Ref[Task, Boolean], executing: Ref[Task, Boolean], achievedPar: Ref[Task, Boolean] - ) = - Random + ) = { + new Random(randomSeed) .shuffle(validationBatch) .map(b => for { @@ -333,6 +405,7 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { } yield (r.getOrElse(false), b.header.height) ) .sequence + } def execution( validating: Ref[Task, Boolean], @@ -371,16 +444,23 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { par <- achievedPar.get persistedLedger <- ledgerStorageCheck ledgerTree <- ledgerTreeRef.get + lastCheckpoint <- lastCheckpointCertRef.get + checkpointData <- checkpointDataRef.get } yield { val validationsAfterExec = validationRes.collect { case (r, h) if h > batch.last.header.height => r } + val executionSuccessful = executionRes.reverse match { + case true :: rest => !rest.exists(identity) + case _ => false + } + val ledgerTreeReset = batch.reverse match { case committed :: rest => ledgerTree .get(committed.hash) - .contains((finalLedger, committed.header)) && + .contains(LedgerNode(finalLedger, committed.header)) && rest.forall(b => !ledgerTree.contains(b.hash)) case _ => false @@ -392,10 +472,12 @@ class CheckpointingServiceProps extends Properties("CheckpointingService") { classify(par, "parallelism achieved") { all( "validation successful" |: validationsAfterExec.forall(identity), - "execution successful" |: executionRes.forall(identity), + "execution successful" |: executionSuccessful, "ledger persisted" |: persistedLedger.contains(finalLedger), "ledgerTree reset" |: ledgerTreeReset, - "ledgerTree contains validations" |: validationsSaved + "ledgerTree contains validations" |: validationsSaved, + "checkpoint constructed correctly" |: lastCheckpoint == expectedCheckpointCert, + "checkpoint data cleared" |: checkpointData.isEmpty ) } } From 768d2cd69050efa0e9ef04c799c413b47c03a544 Mon Sep 17 00:00:00 2001 From: biandratti <72261652+biandratti@users.noreply.github.com> Date: Tue, 13 Jul 2021 09:26:44 -0300 Subject: [PATCH 48/48] adding config publishing (#58) --- build.sc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/build.sc b/build.sc index 02ccaf6f..f478b64c 100644 --- a/build.sc +++ b/build.sc @@ -62,7 +62,8 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { Developer("aakoshh", "Akosh Farkash", "https://github.com/aakoshh"), Developer("lemastero","Piotr Paradzinski","https://github.com/lemastero"), Developer("KonradStaniec","Konrad Staniec","https://github.com/KonradStaniec"), - Developer("rtkaczyk", "Radek Tkaczyk", "https://github.com/rtkaczyk") + Developer("rtkaczyk", "Radek Tkaczyk", "https://github.com/rtkaczyk"), + Developer("biandratti", "Maxi Biandratti", "https://github.com/biandratti") ) // format: on ) @@ -223,7 +224,7 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { } /** General configuration parser, to be used by application modules. */ - object config extends SubModule { + object config extends SubModule with Publishing { override def ivyDeps = super.ivyDeps() ++ Agg( ivy"com.typesafe:config:${VersionOf.config}", ivy"io.circe::circe-core:${VersionOf.circe}", @@ -235,6 +236,8 @@ class MetronomeModule(val crossScalaVersion: String) extends CrossScalaModule { ivy"io.circe::circe-generic:${VersionOf.circe}" ) } + + override def description = "Typesafe config wrapper powered by circe" } /** Generic HotStuff BFT library. */