From 4ac807443d7ebc74b9e9c301754be6e1a04536e3 Mon Sep 17 00:00:00 2001 From: danhalligan Date: Tue, 27 Aug 2024 11:18:09 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20danhalli?= =?UTF-8?q?gan/ISLRv2-solutions@f58cca931c4cae50c6197e83e1fcafbe30766453?= =?UTF-8?q?=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 03-linear-regression.md | 10 +-- 05-resampling-methods.md | 2 +- 08-tree-based-methods.md | 4 +- 10-deep-learning.md | 68 +++++++------- .../figure-html/unnamed-chunk-12-1.png | Bin 172911 -> 165230 bytes .../figure-html/unnamed-chunk-21-1.png | Bin 83368 -> 86607 bytes .../figure-html/unnamed-chunk-7-1.png | Bin 51339 -> 50925 bytes deep-learning.html | 84 +++++++++--------- linear-regression.html | 10 +-- resampling-methods.html | 2 +- search_index.json | 2 +- tree-based-methods.html | 4 +- 12 files changed, 93 insertions(+), 93 deletions(-) diff --git a/03-linear-regression.md b/03-linear-regression.md index 13314b1..1abb545 100644 --- a/03-linear-regression.md +++ b/03-linear-regression.md @@ -50,13 +50,13 @@ the $K$ nearest neighbors. The model is: -$y = \beta_0 + \beta_1 \text{GPA} + \beta_2 \text{IQ} + \beta_3 \text{Level} + \beta_4 \text{GPA} \text{IQ} + \beta_5 \text{GPA} \text{Level}$ +$y = \beta_0 + \beta_1 \cdot \text{GPA} + \beta_2 \cdot \text{IQ} + \beta_3 \cdot \text{Level} + \beta_4 \cdot \text{GPA} \cdot \text{IQ} + \beta_5 \cdot \text{GPA} \cdot \text{Level}$ Fixing IQ and GPA, changing Level from 0 to 1 will change the outcome by: -$\Delta y = \beta_3 + \beta_5 \text{GPA}$ +$\Delta y = \beta_3 + \beta_5 \cdot \text{GPA}$ -$\Delta y > 0 \Rightarrow \beta_3 + \beta_5 \text{GPA} > 0 \Rightarrow \text{GPA} > \dfrac{-\beta3}{\beta_5} = - \dfrac{35}{-10} = 3.5$ +$\Delta y > 0 \Rightarrow \beta_3 + \beta_5 \cdot \text{GPA} > 0 \Rightarrow \text{GPA} < \dfrac{-\beta_3}{\beta_5} = \dfrac{-35}{-10} = 3.5$ From a graphical standpoint: @@ -96,8 +96,8 @@ plot_ly(x = x, y = y) |> ``` ```{=html} -
- +
+ ``` Option iii correct. diff --git a/05-resampling-methods.md b/05-resampling-methods.md index 646a231..738db63 100644 --- a/05-resampling-methods.md +++ b/05-resampling-methods.md @@ -170,7 +170,7 @@ mean(store) ``` ``` -## [1] 0.6308 +## [1] 0.6355 ``` The probability of including $4$ when resampling numbers $1...100$ is close to diff --git a/08-tree-based-methods.md b/08-tree-based-methods.md index 7408dec..952b924 100644 --- a/08-tree-based-methods.md +++ b/08-tree-based-methods.md @@ -509,7 +509,7 @@ bartfit <- gbart(Carseats[train, 2:11], Carseats[train, 1], ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) -## time: 2s +## time: 3s ## trcnt,tecnt: 1000,1000 ``` @@ -1150,7 +1150,7 @@ bart <- gbart(College[train, pred], College[train, "Outstate"], ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) -## time: 4s +## time: 3s ## trcnt,tecnt: 1000,1000 ``` diff --git a/10-deep-learning.md b/10-deep-learning.md index d681516..ed77b16 100644 --- a/10-deep-learning.md +++ b/10-deep-learning.md @@ -393,7 +393,7 @@ npred <- predict(nn, x[testid, ]) ``` ``` -## 6/6 - 0s - 61ms/epoch - 10ms/step +## 6/6 - 0s - 55ms/epoch - 9ms/step ``` ``` r @@ -401,7 +401,7 @@ mean(abs(y[testid] - npred)) ``` ``` -## [1] 2.219039 +## [1] 2.269432 ``` In this case, the neural network outperforms logistic regression having a lower @@ -433,7 +433,7 @@ model <- application_resnet50(weights = "imagenet") ``` ## Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5 -## 8192/102967424 [..............................] - ETA: 0s 3956736/102967424 [>.............................] - ETA: 1s 4202496/102967424 [>.............................] - ETA: 2s 8396800/102967424 [=>............................] - ETA: 1s 16785408/102967424 [===>..........................] - ETA: 1s 25174016/102967424 [======>.......................] - ETA: 1s 33562624/102967424 [========>.....................] - ETA: 0s 41951232/102967424 [===========>..................] - ETA: 0s 50905088/102967424 [=============>................] - ETA: 0s 58728448/102967424 [================>.............] - ETA: 0s 67117056/102967424 [==================>...........] - ETA: 0s 83894272/102967424 [=======================>......] - ETA: 0s 101908480/102967424 [============================>.] - ETA: 0s 102967424/102967424 [==============================] - 1s 0us/step +## 8192/102967424 [..............................] - ETA: 0s 4202496/102967424 [>.............................] - ETA: 3s 14876672/102967424 [===>..........................] - ETA: 1s 16785408/102967424 [===>..........................] - ETA: 1s 25174016/102967424 [======>.......................] - ETA: 1s 33562624/102967424 [========>.....................] - ETA: 1s 41951232/102967424 [===========>..................] - ETA: 1s 53223424/102967424 [==============>...............] - ETA: 0s 58728448/102967424 [================>.............] - ETA: 0s 68878336/102967424 [===================>..........] - ETA: 0s 75505664/102967424 [====================>.........] - ETA: 0s 86802432/102967424 [========================>.....] - ETA: 0s 92282880/102967424 [=========================>....] - ETA: 0s 102967424/102967424 [==============================] - 1s 0us/step ``` ``` r @@ -721,7 +721,7 @@ kpred <- predict(model, xrnn[!istrain,, ]) ``` ``` -## 56/56 - 0s - 58ms/epoch - 1ms/step +## 56/56 - 0s - 59ms/epoch - 1ms/step ``` ``` r @@ -729,7 +729,7 @@ kpred <- predict(model, xrnn[!istrain,, ]) ``` ``` -## [1] 0.412886 +## [1] 0.4129694 ``` Both models estimate the same number of coefficients/weights (16): @@ -763,24 +763,24 @@ model$get_weights() ``` ## [[1]] ## [,1] -## [1,] -0.031145222 -## [2,] 0.101065643 -## [3,] 0.141815767 -## [4,] -0.004181504 -## [5,] 0.116010934 -## [6,] -0.003764492 -## [7,] 0.038601257 -## [8,] 0.078083567 -## [9,] 0.137415737 -## [10,] -0.029184511 -## [11,] 0.036070298 -## [12,] -0.821708620 -## [13,] 0.095548652 -## [14,] 0.511229098 -## [15,] 0.521453559 +## [1,] -0.032474127 +## [2,] 0.097779043 +## [3,] 0.178456694 +## [4,] -0.005626136 +## [5,] 0.121273242 +## [6,] -0.076247886 +## [7,] 0.035232600 +## [8,] 0.077857092 +## [9,] 0.163645267 +## [10,] -0.026966114 +## [11,] 0.032263778 +## [12,] -0.807968795 +## [13,] 0.095888853 +## [14,] 0.513532162 +## [15,] 0.496699780 ## ## [[2]] -## [1] -0.006889343 +## [1] -0.004996791 ``` The flattened RNN has a lower $R^2$ on the test data than our `lm` model @@ -833,11 +833,11 @@ xfun::cache_rds({ ``` ``` -## 56/56 - 0s - 66ms/epoch - 1ms/step +## 56/56 - 0s - 64ms/epoch - 1ms/step ``` ``` -## [1] 0.4271516 +## [1] 0.4262716 ``` This approach improves our $R^2$ over the linear model above. @@ -906,11 +906,11 @@ xfun::cache_rds({ ``` ``` -## 56/56 - 0s - 133ms/epoch - 2ms/step +## 56/56 - 0s - 134ms/epoch - 2ms/step ``` ``` -## [1] 0.4405331 +## [1] 0.4429825 ``` ### Question 13 @@ -966,21 +966,21 @@ xfun::cache_rds({ ``` ## Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz -## 8192/17464789 [..............................] - ETA: 0s 3784704/17464789 [=====>........................] - ETA: 0s 4202496/17464789 [======>.......................] - ETA: 0s 8396800/17464789 [=============>................] - ETA: 0s 17464789/17464789 [==============================] - 0s 0us/step -## 782/782 - 16s - 16s/epoch - 20ms/step -## 782/782 - 16s - 16s/epoch - 20ms/step -## 782/782 - 16s - 16s/epoch - 20ms/step -## 782/782 - 16s - 16s/epoch - 20ms/step +## 8192/17464789 [..............................] - ETA: 0s 2924544/17464789 [====>.........................] - ETA: 0s 4202496/17464789 [======>.......................] - ETA: 0s 8396800/17464789 [=============>................] - ETA: 0s 17464789/17464789 [==============================] - 0s 0us/step +## 782/782 - 15s - 15s/epoch - 19ms/step +## 782/782 - 15s - 15s/epoch - 20ms/step +## 782/782 - 15s - 15s/epoch - 20ms/step +## 782/782 - 15s - 15s/epoch - 20ms/step ``` | Max Features| Accuracy| |------------:|--------:| -| 1000| 0.86084| -| 3000| 0.87224| -| 5000| 0.87460| -| 10000| 0.86180| +| 1000| 0.85324| +| 3000| 0.87808| +| 5000| 0.88076| +| 10000| 0.86936| Varying the dictionary size does not make a substantial impact on our estimates of accuracy. However, the models do take a substantial amount of time to fit and diff --git a/10-deep-learning_files/figure-html/unnamed-chunk-12-1.png b/10-deep-learning_files/figure-html/unnamed-chunk-12-1.png index bc6d686eb7dfab274aee9174eddacf0522359e51..d55c77603de49918b00b6d8a8c1d41c8606647bb 100644 GIT binary patch literal 165230 zcmc$`bx@UY*ENiyDBTEBdL)!aN*duviHbCcfYROFA_5{JA}tcq-JQ}P(kZ2Mmvp@w zkM}*#AI~@Q&HK-LW*pDJIoEalYVWnyUYh{<$5Oah*Rjyh&~Rm>kqT&Nm&MS~(Ce;V zgrB%xLcWLpFs-E3ZPC!E{m%bg2%g>4M?<@TCWE}M4SoGG8F@@#q6u=st==xC`%amk>HY17lplC^3pW+pr_0==}Wtys5S zi?!~b5I!^}I(pSa_hEfL*X2j~yvQD}m6OeyuwQ~oCMGULwu46GgnH}cf`%!CdMhUE z^?!HSl6-WIYYOQ~451WLNIX0|91MwMg@C4)=%0W5;f#_?kiQ)w&BVm?jyh0gYWqNw zEyDlp+qWcGzWEKDm>0dpk`-j=h;*;KB)UiSK0BM0uubo7x{}F?mE5c3WcuKij;<~~ zhZ`vu(SyivxHUMs7ru!p#>(JwzkmO}85b?SgaiHm%>J3To*c6;{h zr%JZQ$jAtVAOba?X_x6XaO04OgWrMd4y&WeT3R`GUKEZhtk$KGgB)U1QgxOWf1o zEqrMzYErMqky2tIgM9k*=^K0;)HqGBG2JA>!@?dQANd|$`@H9v$C&v2yPQum>PgXZ zFT?MfXKez~#1p!oF=r|fgeVtFEu-#Hr+3ravi11nBw)ej%_+abqw*A7)MX>@!St!e ziMa2(oqq9?aV)Sc%D5{_w`@77o3Hlb#S6)Re;+hCzdi%ORgsfFeX?OD-4ha`V?nGe zxi=8T`mhk*Z)5b<;F)O;we*D^ZWC-1Ft_47cJK; zCaMl5T-TjlTwGjTD=R8?mj+Ci2eWO;$A+!{P2J=TZ$8sZ)m)mpdabac6k%IpS66AxTQHbI_%Fec#)U8i18rTE`GDtcX-kSnjO zB!24VQ8O_*T;+x^=}ii_uZz0BmnzI&N`>9xOLKGg+jn@1^VBkBlT|#4-6Q-Dca{y` zb))Vth7UJrZ*HFZ8Z7eh4vFQusGH!?1Lg9vDz~iw+xag-+dDhdyk?PvxBeZtr9Awq zgJ>?pA77<{-&67T+RvYc2T{dftzvaBCl(fTFp>WF@k7vYZN}cb8Q$T6%-T{74_J>8tI#`4Gq)`HJsZ1_J5R3}<@>ksZ{h4jxN?k?~M@LQ(rthO#~dOQE1 zpja3|-iwALh4g&%e*?c>kK=~E*K|#LWwJI7`Gea~q!|AuhL8Wk(~H?Bhp?My-T(Bk z*B-xIAxACIlxG7DM1+`_O6P5Zf|3+_$~#F79V@Z^7o&SBf@W51R)+H7vp ztMcX`$3eI3YBnewFySF2oF>)o<4{M!D6a3;SmVIMM#>p9)O?TDJw9M~FhFyuPKRRy zGhJb)zD_tjv7E7tvcl>KH%GToz1UZ5^hfsOMrfzF$KT2QeSLk4i;L?Yx9HtnU0Jz! z@uI(fuR@w5t#FWnqJQh;Mv_?)-cjWAheX67JBsCgwNb5{3|bRp88WRuSm zsu0bSY!zMA2e{bS*pFxwo<6-9+V+n6z08zNxr&Mk?vbi~wi2z;c`mLKgU2|3pn<4< zF&|6i_=)@QKqkc+3G(Dy@34HoH8tsvm01fqti+ecb^lJDS=}yRo4@Q{9sMQAadouV zcK!zj-uJplIR?UXrHnW3Q9OHFO;`v>{jQ0CTsAS&6Awae9``GKb^b=|C~<8q00ZMX zE4h)~i}-Xr7=Nwzr5f*N#U=xxj8E9KYve*1LxS1)1r_gKpl1wGpdcqN)9)brAMsat zf%4wNhaK)myV)AW?O$bxWu4_zbLC%(-%7BWsB%ty66auL#o^$E+7|M+BE2p5)mR2U z-BC=Di~1NF8ygd2YH7K?wx(4qz9QY)YQ1mN9|fa+@uzC;I5lRT|SZ#!bfA%`78NN5I)sC4%0!FD7j`m z4yx?z_8HPri}4Ek<-zyVf`eJ=g+@Jzu6t|BJT*?6zu}}};8B%XPfL1>Z5Vt*K{gIk z>8bm{%R{+ck(@qC8BMVgDaDv3dgjRt6`?1;`vQk@8(Z)x?jqSDN*xJ1%_b|t6mNxT zxbHa6jV@F&KKVigtMseEFF%>_@8gNZN+jy96%-bhT26Y*wug<4X)-fsPEAd5>Nc}z z*JvilA9Lq*xw4}3bdn=ipMQSx&ZWh3dpApAk;8Mcwe+a)OcU8ZS?{42KUr7p$F2B> zkdUys^2Xn5d%eNLtWp4#JzVJdTJ5?=^fcZlkI6OnF(S7}uaI|lSv{=U zbc6phuDk2&wc$#`E}H+KPkTJfs)GuvU*G&_J<{kBL%uF%-|Qa_lBKuX#yB}9o}Ft~ zW*V314SY*)<6O!R z+_+_9DK5t_f|WLjHd+<-8JX_;VCajXF}1S7O-4jQ@|Tt85=O@9^0zadETWRNSS%dF zl(mWKnDB7P`}O^m!49$29~j;c_k4Lxmwla)(iG8tJ7i&1FPlVlAT46p`7#gemn{KP+5D6v4}P zHqG@E$!)*X&qv0CT80Ava|Z1rBT@dhnH4?l4HyWOnUu#J)=bxz#>RT?r=(T=Vd1J& zGBGx$ARw>;kgD>`G1y{la&SZ@N9noi7v~#ylyj6Ge-SEI&DFxF!uxwwG+S@TuD);H z6sWPxHr6Zy^%AqF($YOZ27reS`>M+_ zZe6-q-+Qo~Di`&#E)aD+j$6`Gou7Ps8URl@u8qk&BuXf9)HbK;eTIjxa68c1FG^ahDQU`;TJ&z?utiit2=NvZLwfvuRrm=C2)w_} z(?0bSwfK>DSY8^|0Kn||DkZunbrr`@Ha8aiX zaA+_qJG*xOci`Dk?OCP!k%NOnNl?%uE33TH($f6te;CeOddl0(+#F7Q_Ol$7%&&=w zKOoYCg*DCEprfNty6=rMJc=e45l77ukhsI-)YKl?Mk{7Sj@}3&Dps4NKcS>xEzZZ!A)gZVL|L8Ue z5#j9oa&CC|N4dGF#q?*P0G2$CodS?|znX6(K3=8H&h}4BOZ*+PP-&W|JK=eD0?F&( z=%}{56$M$Yb9}G!DFFg!U9MT-9!N(dJaFwtsK$7{uGDz6WzB&kBILWtq>K5wMKs`; zm9nv}N_&mLVqItFgQbDYNuz&jgiB83)NP=v%VpAk`(t?TwX=k`TeSo9j6F|8IikLA zY*>w>2U80+zsM)E$LqLFq>{S1RNd4L_1=*8-bM@7agJ>ztsZlIzP9uX07 zpAEGO;_u(Ti0?A|^CvC8jULcD$QDU)rAlGj2geTrRph=1sqYa01xKU!MG>!0fdyx7riHT;pHYGnw%${BCnQT zEUL;>EFe-__wC#7AF+bWDp}If((<=SEG#Te4tHYiStnLkpTyMOK&}5FE~e0NXx;fs ztXkRG+4=kiyDJodO3#Wg65!-qgihLleb(`OWDZO^au!*^}Wyf5tdf! zS;pqCSR&;R3K;_-`Ixppv|nZgKK<4)d6kriWRWG+x?$u z=Ywe{yZiUmD(WR9w4>zbaFcQZj`M&?F$BpS$I*`V_I)z5X6B0GUus69Ro)-|9YBB8 zwNc!7W>L{N|5ll#+UPbqsg|v+t=?XR?CXYxhWS~Vnp!z~Wi7uE2pfC*><0ON-y^5n zkjHitiSw6M*lH*HNW>dsuB-c1dF-ncewaq}jz`rRp|$hDP@Ew@#G|95A^j}W65?Xh zB=~7;Y;5Ee7Bb5^NjEjo5g4g53M2B%^p^+aQg*LN(0c6u`XcOlYRfY^9xj?Pq8;EO zUgOkXPb^#P0`SnJSO8wUyebT#ez}x?H z4i(v5R@PRi$$D3F5bfXmj^e9pd~k3V@F&rOna~Yg!@=>2pVa{|^f9Cn&#ZV)(iO7t zu(7vych~yU6+s0UxXD6h+UQTnteQiz;Gu{bg^mZ@X7ziV=oy67C+ylu!6`ldPiN+z z$kI27#0yv65zJIfeN46p1@PPU5G zW;AzHI8Zt6&y+Vd&UQrPYS*52K1VTUv1-sFO*+TNPZ|S=4ZFSw+s?_Em}L0+KIlt% zH0eAa`{BcfXsWgKb#3?Riq=-?C^_C*-JeS%f`pi5pP#=#k3K2VU7x6ipg@Y{Yww22 z7HB(wx}2!x7z#Zhb{*}V`SFsHlA08c_V#YYRl+l0Ck(QrNqeT>GK*)tR4vSVQ$WCF zs?Ph2K?gPRL%g4@_o+MD+74CPXFn_LjNudQ`i>ZCo|!vE@Hzf0F+Wdio~Kn4 z5>7%igrurU3)!qpx$zk%^Qdk99TH0#m%6$-NGPO%O(eT)Cm#L|>&daM3sX<83qMxS zxK2ZJy1%JA{uOj58Khxv+>%Oq|5D*%SH|ey`=dp2!AUV3%zmb-pdj8L)tgnWR6O@N z+bhLGI&>nUO%H^mL1B~rw5Q0Tm90)WnRExg(lz1l>_+f(=aX)=g1Sx$9)yi35J8B3 zzG9GENT=j_Z{}q9<8ykqvr-LgHr?iTZA`4IF#mg;_#c~mo%IWtDWcY?g{${_d4PrF z)*QAI^!&qLyZ|`tzk*;DAMaIF70}YBX#V2fgFfPZgBy~8_ANIm=`Llh_>A;I|-1)(=w;DWJHf{r=9 zzL(8M^P z+j$h6!GpM@Tf#5FqG zJ34@!+i%Zxe*8#8OB)BE2A5Xk^A3Jyaa8_`hBpU?haLyB5s)qwG;3v%a2jkQ*nmM{ zVPZbb9Jt1&dCzG>M_l}sc!HI!?efpk^|Ys70r8%7%JlX1S&moGlLh*UzmS&pUt`)s zO$siWM79d0AjWi=$I0Q4wr92y(}-C~Z0z$FFA@~eXKczlBG@f~p(_`H0(*8mFS@w6 zh@>O9dJg>3K&gbA|K2{}5sg)13TxA|_aLo+h(wO_MmCu!unFVdc&0SnM?fvG#O&`Se#u0Bg>`1tYT@bK{O z-!B;%Q>UhM@cgzR(C6iGfI59A&wXgJG^AKrD{4jF#zaX;3Gh-d6+aOH!Nwdr$=}th z6Z_8X?YS{^AA*aRSY0HiE;l_48%}(s>mHys3t8E>21Mrs$=cdlh-Mui{lNE95Tx0j zSJ;gf8ABxi(2;3>S_mim*VExzPft5LZl0a5xRaq_10B8bDkS}HJ;s#&{LBR`=>P6r zzpI&p!!Cr(_cX#9JFFtXE!g<@!EfKfQumEs%3d4@>vSupn)a-)*RcAvlv0VG@+ijP z^w_y-@!w{96O|(ll8p>o=y!az^Ivof*`1hRx|<*B!a;$0S4D; zO%jWWK4oS3AH3~~3F_?N^>hSnxh0r(Z_DZ|$McD4V6E1k)vpmddrLXw7l#s;qwRu1 z`ozzlKUeruqxK738ouf_Bq?Q{rH zQPJ#3o?H$&Q{p7SP76z886-WaO?4`B&IjU}}bE3e*)<t*hjN>}b{OBrdFwtkGebf`l0<4NDTzUNfW2xj zh+|_zmk8;2VspDETR7(QlRuzaAt52|Sb~u3>{UQW;q2NLmbs}CTzU#|1d3jiiP(Ed z0%N$(%NPfKSuA`L?s;|oqf7B6aA0fg7JEV0KX5z8^*@VEQ}4Wc`}Xh@YJSIC9bKYl znuV9I$91tYG4+nuKIhsy&2XQBf}?D5FVoDXlWcOqdv=Mq4ErHm+luSa{I&pHl59E* zOvWMw98NP4^Osql%V9M!#<__LaGLUNGpjC*h&xCtuB*?pt-gCgH8fUR7-PDV5fe2M zVjTde6ws6$#@Fh=v;fxHysF=l2D_z`9{H)8Mo_E~1d4GvBg4+afigMUcuqtocQT)*`u~h1{*%hiL7UkGj?(c zwC4YlQ0{(TG)V)E7ijDO>|5iWizWln8l^Y!{1g-y+vzjX76vj^ot(-n>flHr?N*&t z@=q@zux~sh-?jg}Kzs)oG?$f7FN~_-u=Pq6JmQDUB zP1lUqNN07A>e;hr2qt#+p(@Ese-ltwl1KXW2xB~M$ws}qzTyc=Ozs0;o#!zAE8c&t z93=G$;A**4Q&x75k1xW1wA3gsYvGhUl|rzk%^Nx*mMAGHnIk$tJFf%|T5PI{;j_8Z z8TsP$(F5MW0oTf?D0flDuJ(2))MZsUn9deIQ&tAxCC8vxmw!*>?)UW3r|$<OyX*3KUz8((r5l$hFcaUDU%_~)gF_uy=~^rhifS!Jat070N|=omO@ zTFL6WV?#r#+g(6TB>eEk%WYE=6UDbV$O*$feS%s6NDr`8DerAzcs0HBef?UAl~@`H zEE0kE-5$m~vntV-qPSerXb`FowIJ)U2 zt5%iMx7V1+A_%`rscMA=xP&~GNoNQCP?B8in%zX@$|XWBcU-5xkRIX__BA#mvM$hK zq~227r zaiOL7=EvTnjyF0VTZ-R&^ftcfA&AZ%9;Yy_D|l2!>kDc>bSla#LFC#!KG+7uulHwy zLGC?Y*|QA(FeYVX%D)fh<K|$fQ_>1`;u}~y8&yF$wW-22Kv;zxo zPjXU?Zc1pek3xMHa<;JZ z*6Vtm{r!ErrT#WxpgR@+lmpW3M^gb%TLkxp?^$7#{O#j~ufaG3!Okw_i+FxtYoiRh)6@T~Q1I;ZDW0rg5|)>j4`g%! z*aQvc4EPD~IN@o#PuN|Onp$798Ez%QD!QGW>=olGWUF+2=C=jbO+{uJS!?<1T8M1+ z`yVdW2zqxINiePG=~f53>Cb-^s%&s8-C72a9>%P?FhyIW6XNjP0`GaXiN5{-2=5v@ zZ8{vF-i_3Ho&j6du6E&3*K9Q?{8?%#3Zh%cEvW*vtWk^uAcHI8l|w;Yf&zD0y6>wg zd+a^17+#lxlEf%PTOcF@CC^YpjOOZ7UZ}UL+M93%8O(0$-UEwU$y>Q^=P8PeE+~I` z5-YH?qe8VXTD%Oqnd|OdpeNW^SdTy7<23jlIn+84U<6Z$Bsp+6JJ%VCtsO>%;vJPy zB-D%V3fG2?-bHU6V9UUb!_6lpTY{f1^Xi#Cd=l3?E|{dWyE0;XdVGMSi|8OGCJqb? z1c(Y3jQXzOwx?%m${ZqHgr%sU0Q31NHr5%MIceJP!U|F|9)_A0J$2rWRk|okk%$5#0cOWm~1Lm;V|@@6#Hdo zPfSGOyh#=N5OD$kZL+CpmghH0n@Gc?+t48bYUgpZT3S_A1#BK5t*Yu!eEbdGn3I{d z(7e37tSpv-f`C6=16BCj=qVGYbn&qKxdha)YfJM@O$1o9Npv`+oWI zr7Gi}rEUU^<>i%>Wf&|bCMF4qTpU~S(xSC4-{HCj(N&L=ouRC(tZ2vEWPu=Ocejo% z&DXmFrwBTopxG8I3GJ8-2?^1V;*9!EcuP_#Ly4;kc_&lY{s}USX`sb0u^;(WGALtb z7B%1f7(`AS&mjukLR{y4WfzfENGe|o0AdS7V4igj0RfG4rJC90&Y@DR@nM5gq#an5 z#xq6=4f%|FXLM{#CWWs&P7-Nj4xN?lz@tyy9WFjIJLK~8JcCm{B!seCcnx_;!CBEa zFd!myc3R5a-9jXSHa}F=@aok?gvs}zv#iP(*0^&FYnQpQx_TZOKucZT+?0qrGyhQw zMS5^gZ7-YKp@OZvE$mIZYE8YG!?}%tJ7J#1=@frq&ao|zE9*;jenQ~k)x|KQ=VG7D zOl!JgDvhYecfGsdt-}DN(WpCKW7QaSv2%sRXK`zDlizihrKu&PH2BL>nHs_b*l4F9 zNJ9*Xea27Dem?Ac$x&nyoSH=Z6n4Fq{k@n@nsX4AAYtkui3$l7uc9P4o`WOrCgRZk zS{RAxknrOa?0MK%@D!#K34VT3Mn*=mFM2$4Bu$l*NlDpjT|&WG5p;bO&k&Umm+^DF4vAYxq49YCC}tc|Ju%6h_HC@y^yv zmj%QnCpQLQ$+GrjZh86WdW5{X`T@|5GvFNfv?8B6W^65})i<;nVecwT!*z!A(J|c(KPnA-i_pO!--<{6wKab4#QM&K9VjuAm42Kh>fiS$ch zH)uxDvoxSCDlzdm++(Qk?fuv?Jv%>7DM+B$vw;!qDYGu7`{RGLt){%M*HL1nPE|r)c#MeV4-{}q4`KF!+~5k5&pb>`_AK91hopJbUatYw-m6?hlSKGg;5)=@K2^ zFgZv<9B_l1@vLwF_{&!D14p4Hn-mgx0Z&|zjgOvwmevJojQ4D2q!koCDy0AU^9OeI z8Ss|?BmKP};b!3~(;HKBl<{Qz(q===Zi5_pDM|9}lC6B#cxv<-O zW9j!=wAziwj~`3^Gi394U!qx}FV}pGOyVj$`FJ_z^n06W>-c!AKhZY!pTRll4xok; zA|e$dm9ojecodYB09MUSJ7iasmEWa!cct8Ua@_fY+KssawcJC6y0@JSe_`JhlqGx) z2bAdQKQ{O!jEjeveZ31^pR3+L_H}b}d#N>}<#wo9>M)*NX_5#P(j}GEpQRuMWR!nx zh~40{6(4x|@P#;@-PpEUbWYe$0bU^ml^3Q$XDzLTj$q z5C5s}4;+UFm#Ia>2DVl~bc{BODATFGh`8=LqqlSNYgTr4iL#V$tfpmp6Yx@kYd@5K z1B`TQY4B8320F@F2KULlkwtgryw{!vT>cFXNbv-Yd-wJ|F;F_ZnA2;TrF5_;0c4Bi zw|zs#n)D>%gYMS|(gWur7gAx*2@t{Iwtx+_Djl-3vfgzXl$s2ffVKp9%OL-csFNrJ zcfiwNXnb?x(KuNzGMpYFmrxcG6|2nvfnl`7Jf^U4_m#6r@gbCd;IU%IH0=dHPbRF+ zx#q+Eu>3ynK9)ne-PN)_y|pDpE%Y>9!F{WZ5i&ZcTjj8o)zzaQ6azwp(mXJM*l|=n z*yuJnPgK-v&nHY_?Rt07KeyJ`d)MovDKX`%nR)e5gs8E2%*Q$s5fit4d!=#d=`E@D zuGBKMj4T`+9J9%q6Cj=b{#n^F3u4JcOiYMGh=IUKw+@a~?OkDbJf?$v`PfaXKJ1-bn1--k zD*?n0FsfurtgNg+P`js7=QUKu$W#AcCHxM_TRyw+!v{AA;GqMMFIU9mF8Vd(7Z=kb zqU0FDWP`v!m{(K;qy~T@s1!<(U1Ew7K=W^t$kKg$`xZ;L6N1dX23t<>%#A{ej;J%5=iEO4 zbiq>3e8+hHgSW_+i)?(wc?%K4ug&p)gB={A?GV7^p<2PeTfKWq2V+8v1O067c33-@ z>cKO|$jB%vS_{V>;;HPc4H(L*+)oFKSS$gXz@T8=B=3F(^YG))qKpp$FY0c z+m$Xm8Ec7qXEY3wb}wEg34}(Ia}q{+550VY-YMFEQw8l0pj#>#Dxl*UOeIXzBOK({ z^VPDzJ8{mwF))A-flDA=Nw)gP#Bd#{XD*@JOxy#Lu)MSafCAL-#Wcu&OS3Q44;sIw zhMCwSk(Yex2?CIi*J-$o_xPm65*RdydELtjPPTiHWYJ9i~Kzl;h! zxM)y1f5X7a8svXFFY0v3pflh$4^MH{yO-T#G zOjw(M!^%C-h{wNjuMh8SnkANY)o9W5sI&i`p8i>6tf*@JmjR2p`(2{B9u4FMk*LbT z^TdpoaA1|#9g66-j*e%LkUcA|?e$gPdvr6f={k6$K|A3ICN$|0;$WDv`Qwmwhnm0+ zj5nf_!DpLYQfoYKnP;YhKeva3<5Kc|hl1dTn}5lIS3m=f5VsgeH&0|_K&wYQkW2DH zBMOj4B<$bINV9?oU8`INGee=}WKD!+2QgttZtgl*ngEblL|*k3|Ft*iDGFxtCqJ~t zXpXGngHvOZ^-W&%tOqM#>D`6X0rdSk11eek^*o?A&f0ha5o84oZK8^wi_@Ek*x$cv zJ{U@|0MNmT;IzMSbJN_o`&oYyrVO3kyo0RU7@QzJ^HFZIVUC7NfchpI{P5Y;@}{)h zj;lWlJ-P|VUD)LhEHoI7UVQs|T)hrofyp?C+DH9=2bU@9Kxd$WY{vVup~0uqpeu%t z{qEh58l}mk%mLcw_`9}-MmtQ*c=KQa2Gs)t8*I}pO-1v^3W5 zbrR_Fv$M)Oj>^{7o4K{8vTh>T`(2YzYFg?G?(Ng7T^ohuof_w1lD9#QBMIkIUj9S< z4-{b9AtB30=OUt}2BA(S;dy{%W~xiw4MYV5k%WeSN)j}D=}6kx+6MS*6<1USG84|u z%#d>+Q1b^(D)BU~PfIEfAH5a&rAehJ_;#m9z@b0 ztZ$)+9~T6M?++mm0FXQPM^+W4F0L#tDTCzza#fJO{|&^}2A#HcMw)zM3rHS^K=7d; z@(|v>BmUQS0^C^wZW8$Z{RHSk=MKw)0w`~vQ-=3cQQ_gczx-%F#!|Ct$*ziPm8dB7 zPE59Yo*IMxIh3pIXm8JBg7QJ=)Bxk-hr0E-yZo-c?9MmXZ^S^(Lfib? z=okr=+tcH{{IBzv0L=x`b$Cc=@_TdWLU5?@FCiupC^XsDd0nt4-vx2M_Vmznc5pWhZw`EK(EEtWUp_yO@4qrH)jdh$Thr%CNqMpabEGqhQ4ngh{BqUp|vU0zZ+Uev^MLfM%*b%a3HPu&+2fEnA6FOk@oDLftEQ zC!z~55;PYUugZgT8z<@sUIiGAH{Q2|(;8^q%Jx46RR;p^SdDunp3mQ%=f!u4YXjpI zg_ExHY5-xmW{$c;4TMJ6;xluSf}VYSkF!%=yvX|a8)O_PcX-lnz%CKAThK{RiK7+F z3QK#slL6w)F9O3e2tp{T$Hs!7WhCUl9BhuU@?p;9rO0{jAM_pZTkz^FX`4%Gp23CkkTK!8Xsje6m9J^UClh z9V+re6=lFbykunByI0+F^>yywuP-tGxkQKRn{~3ydmZKtp56$-Aj-GiPYesVW$pzw zxvWyYw6uJJq+@l}5A0Py5P|v3ri*7iZEwdUv9u&EqGdgphLMgAhgzWM4K)~(_`tLg z;pa+e$T&XAf79Hdksb_(4j`B3Qro(>=y2ei5u1P&KYUKq>Pd#t8s^#?wlU%$Ptk5} z4rRf@yb-gN-b8Qjl!^=|&W00`oBFT6QhwU zRLL5v)6=>?XvvE;%=hEuLzQ=~3a`nKhRHtIPtL$!3wUE7JmtyX%}u9_O$YAXF=AYZ zA}``E?DM_*P)7?03S2G|6B7_HdOks@38LaJF!7#*5CQT4{u2uCMexENLCXaKfdJYi z_K-|DcM0TD(09Nx?_R9&=tAf1+y0R(vd*6=Z4K*RT?Tfh2Bwp2oBjJf7|u>kpXJ=Q z4hCW#iVJd_36+SaAMeX4xoO=8^Z0oO!T{vCEoNgwo+(U5Mh3j;0Jp)XcUI*C zh242b0;X**-6ygYkyf^pwJLlB|M3D~Vp2HH_#bIg|K$UK{y!ayyJuK)KLk0LA6<>aiatPzH})IjLN) zSst(41IAi=xReQ2!OsHrCXk=r;NVZi0SAQa53eO(P`7{`oZFoA*OA}??hs847I6!@ zR09$)^v8XH=1wrn(U4XhA4S&doU6dKXD7K(VFbfaiRGl$j$}?yP$zi6$XGR!DDdoe z7W?MGu&Ghyw0VrJ8m?Kfb~7)mb#^3K0RaV@BdCgY^1J>`3~B5*(RHQ;j(mKjKsH>R zosF@e(jkTf2LFXq7lSZctNIWmIas>U3VUV%VeVi+M6g91?hYvqRna>i(J?X_>g$7% zdNAWBoU-YbU`9v}jg9_IFGHl^=t+S~?Dy~AaE+GcW+0U`G!|=a9`B1*WF_Z7r|bg7 z1boWZu3f99hT5>>LiY<;WxyNKWPubol2D0$0};h}OHX1g7=^=fJL55{41AV@MGn)T zZ!IkiBvvfK>Z7$v21Q+W-}z@S(g6h;tl}612%I6_*|!5|@y$EjDIV~X z=c%#iA7pB~d?a3s-iLoN*;jn^gH}^3j~C?RQK{+&g;L#A%1>-H+9jSgdv(fFE$}H9~09)jW`_} z`jeb3`qbEXyTlwex~6-!0kCZ{vfTky?UA29hn6%Ui=BDyt_jAcFT0}c%Us0teoa{7 zWp?>~llN2_@lz7{&sTFQvMHA{3J0q_Egm#_Bl>-(#0jjt-lnxGe4sDDy!e9tX4`tO zp(0jKE;>=&_Ce;D>tsiOZ`J3$HQ{S$EIevNu5s5$E_iw0-R{5YcVSg@cB%YxJN+I) zVnY9M)LnWOmUq23#9tGLU%mNSe))d*Tp<6wduU!@JyTPQl%^Aa4XUY0g;rKk@wSDI zfdLE*=wfhYk|-#}Ck>&j3x;!oBa$kzhK_q_ndk`w3X|XBD5q?0#6AuV3}j|y_w)97 zaUiQ!N+^_yHJ}DoNJ<_PC1_IOt0v=Q^<^jEUdx!-JRzFA-TKtx5jpFRi3?)EZDrgp zNZJzzz<$stzF*H5`1kP$KHf}c*nnK|R9QJYBjdV&UD~Tx-vAe1sO#_l3UR<}5?k3vFh}=TGVPI2Cf8!(#7ctrnjlB60x&DvY|ZC<15ghYtfgW?v;DRs^q7 z^7#}p<4|A~&efG;cNeP4%C-Zzs#z?Sd-UkSl#Ipw=^X!+&!ZI;oYbA2>KV8dGp#d| zt1f7DaLQ};o+D5bsP`lTv+C_zS!lfr4<|$0a1uh3S5OduKEfbLbTlxH6A_7Leb6p8 z34{F3tt+umxwyPM3Ben3+Ib@k21bQ|0BDmV;CKRO#%uAL-#?>2uQ;7YdqA&NOGpyN zrd6f({Q1ca`?)Z2J;;IK<|6Bnu20VEL?X0>jO3e}VwC2%XsthfoTlvgq9)DcX;K9S z4H~rmT%)D+gcJtOw8e!5XyTs$L2)cIHFXFMHNWlLJwd^T{2Z@6UV~;|dH!-eX$HoM zhIS7p%*v|(>rX*uP}LcD)F3^(&t@8#n4BAKq3SEa{(PKkKjJC1oH2=PtUP)2!D(xt z6B@x=BoRJcG_1r>?X7orJW&BV%T^G9+is7$OZ#$q-cEhK7bF7T6f5^Q+%~ z0gelkj)eBj zYq_uaxVbg+bn5>s_SGIUc0)-D)PHm@;JCrxySgUxUijGRbcej~k&31u&R!P@XT=M8 z$9%h>xY&FE)6K$S4Xy*;gJnMG{RNzq3)awrw^*o&F#dS|A}tk_+v+F}F>xqZQjFd=%*F>!G_+uJ`vIg^hMqzW)WP2*P`rGTI0yrd357`|yuHrJfd-Za2d zZ%kn=BLn`X)6>(&RUFio8Rh->X+9p5P*AC9(Ndnott~HOqXB;gBO*Yc9lxEmapO)0^KmZb`4s?bpIF*M7LrZ}g3!Y!YtWEOS67V|6m6u3K$=C6O z&AlJ{ea9(txvW*2D1ZB+H`<$Eugihn_4{^W8cb#rS7~iPKiCCNk6Q~fG*q37exRz@ z+uNh*z@}PU7P{S56< zLucnI2;ho>&uAQ|kje6mt5@P`HSEt4!b^4mE0O>9Y_J569)bjmpX#yVrJqY61H+z1 zO9O2tBjcm}Jyzh17k%pY2j7yKlWQ4W|FG(ta@oW!;l5KsdhUbgM%>(V>r+1Rf%F56 z8-Oq93LKfjGIDdDaD(~AyLAG7B&65+0ZO$iW93{74AB272-0SRfzwrJ`3Emu!MLG{X>5We8NoQPtBaS@$y`0<>Jo&Tn2PWfH}m{9sbzL`HL;$nWiR~ zBhKdK!nd|;0CW`0Od98zsT0g67Es3mW)0Ynme*`J{=N^Gc2>YBU_7$xrV{K~V1 zJN4bWci@Q|E$$mo`jppw60z&Ta`|%9`k*9>1^dzW_FfydYiKpP*%!{C0)cq;dnIBg zWo2cwwCoedXDiV1hSuHF!^y=(%twUZm!^~fBb1IlPoHQCAL$$!=}!ey_<`4>do`|5 zqe-J(e`^Pu=FC#%5dJjIMKnjQ0{Y#F$6Z9h_ffIcJk;dk6FNHR-2NR7aSGilG*t2A z1q=gC2OPGY!vr50zyXNnWozj5^ETgOIj@Blm@-JAP^gRMc<~KVs<5yyKD9ujL17Fp z4q4!3H0Q97*mYRQ#8>T1AZv|~C9XsC^!BbFf4AcGsUPF{=7T0Ujfo2B(9;$sV}_bX znNS!f$arWQySvc-NF0C!&+xmzKy)-!Rn^T%FzQ=cTB0rVgV+9iz*N-KU?Ib%Sez=p zG*NTV-#*{48k1*m+5a^Z8&75nzbs0j%-p`rm*RiNB^P@FP;*iJ!7X=-dN+`-(+ zmTNTT$|#)qss#|Q={MD5JS8T3IEauyhCX!rCJ;W^2FWv1>spwZ^Rd~6FLRjCDaaP@ z_m2K1{O&HXjH5b`nkXqKKt{cQ2I|J96Pk{RN%W^r$ge%J4Sq-!cV`8=bZxPSM3f3I<_bIzr3!B@n|=M&qe#lQ=Mm=t~K`z?`AM-=BGKZj)0vU#Mw z@Ey_5?t7d1nt_kj~*IIt>|<7-%F;nHCr)o5A+#BU{nDe#UI7>BWLaz z#5;iZ>BD~|+S+UKru&)TG`dWj=P5S3RHcs2dJ4rr*b-f9%n;!)A!*s!}&=J@v1jF#KKv~j!?tJ#vZm7V(-~)Y;#QpE@pLtg>4CSwfyL;`dvl0V$MDM%DToI*=OBA(a&)-PB z*()mbHduFLN5d9vUy(_bAYIbKnf5LuQqs~SF%fB+Mh0C-{K_IIJO+Ze z^5<^o;6OvrYm77eJvR-r$F#^_TPlo;Bh{rSqJ&s(hJ-YtQ3G9JkJf{fj3dwFo2m-@ z4*y)T_oh5oaKp>TXL0(&qFDnY4F&4k`JX?ThUMB{zYYrzhezFO;FnEN+`~o}{wtAE z%a2oSmRI?{Me*Y!`)hXRE@wy(QYN&Y_`xSbHZ-dn2M3*Sb6&c$GH{sq?UXqxc`0jZ zScZXHaV>B28tQ29a-XECTfWPTE2#?K8~La9`eo7G%WmdtxLN4zgA|1StGrdt7Hf=P zK3G?`fv1V_2ENXpKi|>OLBgLeTu}0uG@5wVtdceUrni@Z5E3Hh8JMfL{31JIX7;SA z0nUU7eh8Dsmt&f0Za3YaYHf%09po5-^OB34oWOuE$k)&8;nv~0@H9t7>dITIY4+ZN zirSf1=PO|HGdeQDBPKR%bAkL{5>1s>&H3<}iR8zDjsqSLJDs#%3#z5 zrNzaxw6u5qjA+Uj5wa)=ni`Q*U7=tfPqHINkDdb(j1(v%DOmz-28bXi9uQrn6Fa`m zN1il#&42e5nVnM(gfE4Kb6fV{ZLh?f9&9Kyk;z?ksJ-=s%&muyKCHPFDIwz_CEec9 z;k&kUF{Xz4f9gFpN2Mu6w~+v^u023!K~k`>0TL6330^|NX?^|ZgakRbi-D983AtWt z(e7U62zZ~>*YkYqK3eIz|(+)UZMAm|b064b=>9G>tmHm3Zs?{ACQ)u3rb; z3Lutds$K#=-Q!kY*5t)1vU~E1r0jX(hqjImijQ>^^shv~@ohXyR&RBbJUmpEy<%%e zJ9q~evy5z|xc%=D>m>^GQT#;^hmw#W*lv};Z7XNjE7Vi)M*b;3@0x9@-i@v=!J0I4$)yFMoK8d(j9TNGWII zQV1A5E@oZ{ldy8jI-J%J5J7Oyw8ZPvl!^1 z28TytPIh*nwT_ij{~WQ)f?YTmVMTw)IlFnw;(ekism`j!r!AVNdbEp$~MWb(8_f z7&0f<{wpTR4j3e<=sM4TLKP{tb*d+`3m0-x*DIK0%>{AG&nN)81YzOAA2s7dPJja8 zl4ESFuU`d8MX6&Cub}~N$;SMmgR86G*LIGx+K2V4Rd^V`PfbltOdL~HCF-BMT5u-2 z`7Mf?r6sSg3wE3^^gVgO?BXTM7o#0Cn>(^cWDg%kVQ}#OyCCh6V;f0ftK-&|huBk- zkGr^t5X8N`Rf#|>MB_dL7dSXO?_^`UvixftDk99e!&P|=4SPTf*Lo-$t(NvAJ>7Yr zSU_IhoQIKd6f`)zzeN4RCxc7QbUACXTO{jV$s}?qR>eM)d)uzAt-L$;`Io^D?pL%U z!^2zfj1tAY{^#Vg1KokG1fWI4JtzUzUlxJ^ONfi#G0)*lP43SuzCAmvIpdeSKKmd8 zhOP<_E6U4rmWJNA(Fv2Xws9w6;lYZY)V7w!?f3Wvd=t1V4UPARO{fKBJmw`H#K+g5 z@TPwj>ku0g1LDr`bMF#_lpd<6|IV@*Zy$vp$k_P#a5N1f zvC544w}tb0KHa}Y+I&)yK0|$p@_{4zI-h6^)0(Iiy*9`Qs3Ju6>{<5%kc^MgSFSW3+KsUGMs5qyDWDXHE!04Cbm%uRDE^?Y51+vpJ z01FaO^tvmE-v5aAa^0@JzCzzE>HLWg9>8=BAVC>mT|aanh~lL0)h1TwJXEPK#3F6Y z4^0+&&gGA_&6l3dd_l>+Gm_qTaFGaz+7h-XC~-+9i=#l*n9U7-+V3EYpyP|cDA;j z5jQl9{*Wk#B@i`yO!M4SKrK1_!g$M=xz2b?+-9HT8VD@{0|Q0!dV%U8bHPco zm?8aGia|Ee9zxI1(D39*fho7s6o3Gzg7ggxc&Cn>rX|%vO`=jt`5K~9E_7!cCeQ0{ z=cK2nAD{|&Q*rkS*tp8d$|wz2%=3QW6`27a8r&&^+c{O%o5c3H%=)l~HVl9o3V$0f zhFi~{ui<~d7pjLB=P4^AR=UWlxUF88ON4I2$ic;BeRMcil{XsiY|itcRZ2+#h6kM2 zWW)s)44rKxEOhd(H=be z$t{7f@%2h|OqsTD@M9GGf3-FRSDBoF1-ULe7z!mN^WcJjH#CeI4FC)wE=l?#jfCKC zI5uX($f}Y@8PkG6{VGNm$fy=3Ca=#H0T%mI>+6H@VqvlCgBtRHM~YgH1m!P@^YGLU z4z4tl^F*s(x^&6odRJ%5o*054IjN!BhHP?h@U*63ParvU=OcyHwnkx)Wbm6z92}+_ zWjE+JpkEVx>a@HIfA~|KKU^6u*x9N5SPuPnv`-%#NN;7KwFPGz_*+~{E?+Ozn+bPV z$oxb_9!?N7KLn!6XX%eK2=enw_2nieB;a4t@B96zcQTPVBgCxGB7%w)f7TMc$-87p z0fEN8zOtuJ+f7c|x>Fl^RI6O{=qRh0#bJQ17fMi6HHmxwN{jLH+n{s9fb~_BjRg3* ze{mXSVa)4H{$KV|Qo)I4ROuM&c`AS7?wBjp=_-HhE$@SI#phH{9Ujq#E(B#pIJmD1+55l#LInXsxB>choVc{EVO|B12AMz zxDxK(J^Lh8u_b3zcFW~m#~RV6W5R#u4kIv^e>bO=TX2r1j53+FYCFq`8PV6&1elXR zJ7tmf6B00Vas(L!5J?zW!4G%$Tx%ujCRGs^oJ4bg2Kj=t@e z-(dJn`-5b9T|if|%!M;>uzPw=^%L@@7sNS`n&9FtHA$p#<^B$b!CDWRA=18=h!+6>f-Xw`StJ@&n+e$1|F>pIC zUFahYNqAdF<=d7Gi9{ts!xP3H^xvD7WT!Pk;vjL$ljFAf3i$f2Lm7A@IJmyQqwtEt z8oa}yJk+V3%| zH3ZE{D6&DjDre{)*@IRJ1y)9b*)ESeLE{=}j=uYvh^!A06 z2r)UGhd2@)w(!KCXPGwz~TC-c;ZFH*PecbR^NDtmE?Y3TF@Hr&VPz z*Ay1Zi3@nlmdMuXRi2BFc~B3 zeob=fipd$<@ziex_Xn@!`iIe2xs07#vfpy#Uy;_!X}u{*Z%1ZY8aZ(%4pWva8ZUVi zB91rsyHUEMyM@KfpKlF&{1vb9x2X=;qzNJ&vRwg-imyHzviOKKbDw zQBg5F`c2Btb`ud_Ziin`Q7zG0j zxM9GdIelkdbaXdzVEWqQye&mZTt#JP&P|_;9e-_elC2HX0JJNwc5y&^fKC8%ws#OAw6f$ee-y3bj*{_PQg` zACNp}Ay7^0j);szxWp`rGa@@LZ()Ak=hv9Vv11`92*Jic+gjM9=yVJiFQ5%vKT2Oy z4sUK=r7|ce4<9;&&$Pqnr1hw!)x7S2WHimp5c7`@)8pfPI7cXWT7}&Bl`%smcz&%@ zr>d*E#y$+Suh||&MznuoBE9;9cavoTx6@Fd&AJ&LPDTjEm>L?+WAcd5hzY0{7`tDr zAszN#Uof__6C`j%`SG8~%FWFMrX@F8Y+CnU4X$OncftZCDRi^&od7!o`wRd_ceJ;I zgF{0Ue(v%w@g#Vn>mB(~KSpt5rvC;516!7Zck2Po_VTIg-{*q(GDmiN~Qs91#(zW?MI>S*Qhe zwZNs*@whM$>tmlke+D!F$t6w)ZbIJKl4?B7s>;fz;a;&AG}rlT*Q93)ZQIh;In1vrK^_f+1z?b@oLs{Dy~~Kb z0Ok1iyi-PSv02y1**6! z%|)Z`gG=D%{JhautUpv z`c!L;eb+8}0%!fHlGz!oy?FZ;=<-j5YXGlr>2{=MEC}m=))Wn;BI-xc+#ISpwWb{X z-~kMyUqB)Zu@E6HB}FpI0+(R>eiKG6u6&iW&L5wiWZo`6!hf@+W2?INGh8hc)A#F~ ztS~W#GlPQv-)j)Af{hZXHZ@c=4{La(yznEId4+{;2-pBKq57aC_TD1}aNKFkDb4y* z{~fr4y*KOPHp0+bZVITW@eR5{!kLO*DP?44k{>}iy@PHFr|Rrk-tJp^Qy9IfJ~8kX zZqaL|KD2z} zFj_`ufDz*T$)}0ij+d?4+m~f0iPT*6;AZktK-t~h9sS@&&s|IxMZvF!(iR;KtIxvZ zJxEj%J2DJ}6g9KYrYgcN^fjDRmerd;jKEa@@25yqf@q+1}cEb!pDp z$!Twh3R?J!?(R=Fv>rXM+J9!Nm|E%B*RLlI*aRnV1-)R$eh5siKfm;TAU&$&tFn8i z0tJDj8xInhmzV5;zK18Cpf_}bQ8>m4_cYoqp_I7Pc$3vg!tuk1rQLu@&-fU@lX<;F zdM^^)Jqou@50hsbGq+qj4HOiwy?$Lk1u#=YDe$;b*7N5>|H&=Ni~i2XecBIG3mR44 zjdYo|mt1S$i-E;$qD_#Vi%6?(LjATFM?Ae9jEwb|+BE@hMehMOOMg6cRn3zpSqZ6p z=`JgA)Nj?NzqEWD8Va+&w?Gm_LrS8($nck1B7SJ8wxKJc#pX*s8GTkx&Zc@iOm2BE zJ&*qFihdNHTiV~JU~`R!2PdTd&6Jboe_bg#~$r1kGehH79{_BRGLA%u7+iEy;22~Cev z>pzn83=GMMt6FNKhGfL%Bgrj7IXQME1K`tk>^yw;3G5ySlb+`Ci*Yg8`Z6vZaTu&R zI)breu$7+lz}1VBVYm{lPMRo^toFafmxs&Q^pl8o(aE2BmGxhe1n0<_#6wm_9s#=4C!{xN7;o0^-^ zUZACtm6VK8Ni`YxKx&3yrw1oA@kJT-*}tT@A z$0z?N*bkWO5RyDUMX@MRQuC5~%*gHPOXiR)teIO&%P-Vb7B0FHsi|1=dikKJr1Q%%EM^bTwV*t%bx|4Xz}@XHUI|Lh_||_q^R?su6EBp z!Xx`l!uh3=?jXXF=vP6Nfbe&&cQw+Rj+dPinh=l0=lgc@RQY-L0+G~8{%6l01Iok7 zjIy$1?Oz|nA@%?9>IKfWt*XmCFm7IKsEdR}x&8xi0=aX}0{I|K8r7e_SsMd?2o$W) zhG-8S5tUojZ$=DwdMEGL?h0@Q&5-O!kiyujX*e{$qCaQf!%Q}<$3H80!GKx2~ zcq64{^0v`_`&iqxJ`0lqu8oSrOvK2<#DKGY+I@Ht6y9yt-FNTd_1gXWHxU4ZCaC_= zynYKH3zC2gP-biy-o#*1&)p+~K#k*mqAD094c04_q90lSKs{fdX4+>%`VIBtiE48ib1}^yz=ge_azrX zJm#z3^ele);uFnbJC5q2((C7A6kxHR$nPe>d<4ykk;t}AlOh|am)?!LmG$Q&Q1n=FGmN5*!%YZK5#~sV@JkHRR8?^ zJobN?Vtp8TV>H<4c(|f~b3Mak(7)$O0ZxBp?tl#SMVHhj%|(A5RTtRV1(k)+d>MpC znyqEN2#Ej&X>QJ>(}VA1j|>B{7Ef7FlKpl2yEAfc*V!V)yuf$8n-0;kum&Ch?%)P9 zPz=L`pU3_47uu4e{`+dbjfGmn4yXB<;AHP4dB?ZurKP3tn)_0s16|C(fH&mUuQqXP z>SxQ*H8;!3$WZ%Q7LuM)S?Ps>2$#A~SeT4do539vE!IlKr7+$ruLug9ehQraYwICj zzXfhjSV0V=y7fNw)VFWiBV;L{H1Y6^0Qf|OixE942dJwe_kFl`|312s1EQk&&!0bR zJPVF`Gh7wk)Bc?z0lG^yIe`~e&ag5u9e`&vLn!&BOI8jP{LD7?WXT|Tt7rBMI7`#9 zQr*%;A8pz2qO2mtPqjX4D$lQNKl- zgzpmDmj1x>W1*nS*|2$|TD=!az8E0X)8mV$o>cEeBgS?SnGq$x|dkY{G zom*U-Mcmtv-|SbAQ*g|;z-hPmf3%K~kc_|!HE(Drt^+Rk2;36y5pVSC_V#wvw-COH zxV(w{pNFx{t9R0BlbFDR`Zij>0;iE@+!4?mMXRf>U8m=i5;|~zsdF``R=MFKFO!!J zKkFDM`^>8n(lT68Bp3w@yZ(){ze!_`kJWv!I6L1NFq{i~emM6u&HuYKBZ;C?fYHzp zz|8Jg&>!%ymR45!EoXXj{+D<{0>fRt^$V(GnP%ta-r6|K$4CC4mARPIP^QG(6H*f6 z<2x$1SspDY<_+k1@Pb6g#bx5aV4aeJ!e*Z~Qd6T3FVE)49V7_d%wn4LkHYkai971) z;59r;Zc^}GzuNxb0GdAB_hN=^yMs^T`7x;stMM~qltHHbxr9%Mg#b_B?XArLYH6en zBcT9g`FHAFAU@zGHfTqPMsWxq|65w|Q(bcObB5X@WWCT(J+zsG3g%wU9JIU#eChEaj55Eji3q}s9si_zNAUZw` z0%9R8z{3-u#kmo%`D6l8#fr}yyHk~ll<1N{_^)>Qu%IwgG9YZ1jYQJyf^bE zu@^(boPQb>LQ9RnR>g$EqRy|Tx;nw3ytLH3*ro-8k0-mPDn@^LA+RcW|Kg8o-X`OU zN*nAqpctze>wM^IeRa?p>*(BmBB%L0RNZ&*#7&D*sWU7rtlPIE#%n6c<3`7e2Og*K zErDAKc#x@k>$%zK9>{JU8&mRlrP^>ikI*wRa>uhy=}A4Vt7mk5aR3-9I$j^V^qAjj z(9^!F;D-kpz;&S4da3%~K3ti~%iMQduue$mcepMPQ3Kf1p#2K}qLqAj4K!h(Gf;8& zcXvq{K)Lg}Nx^M_ zR(%{}2~} zo*zHH+8Xk(BVzlXQjP`BIq;g0hstq<;mxp>EHJP7g(vuh?a-kr=*L_KijN^)b9VEA z{*MQ$oT~T9*iDsSzA$BX|6YtbKM9k`QZzRVW?bQdFV*HPTXw*T(A-u)-j61FarZZp zYzB-95J#;?cucC-hA}F1u71&}a3uA|hYH=+h3VQBRIy|u-wT!LGu(?=SURg^4b&lu zwhVuE0mwMKtG?obFe8{+`7?MpePeCL!YKO}2H||iZ@>Cj?x%tp3w1jlaxnu%D%;Hy zPf*~q8J+sr@!Dx<2d49MgA;U-ppmO-T*j z^-Ig>Wc8FomEwxDaux1 zKYbN70jBaD3-|9nZdF5&gMy!0fns{75{ta>9WM-3{@il@4Wo+XX3}TdYAw!=6Tf|9 z>F>G^LBw>iILjOieefLyEBE8;FUyi#1JWL^RUKTZrV}8nN(Shez2Mry5L%LQ{2htu zl=UVHQ<%d^@}8LVYbQ!OIyr4}>dDE?-J_j=B>~!-zNf!9L*(?|Z|`mnO8dJ=LBQ*}2-A!aBs{=jB>Hd^jvA+0)e(rIPwuPAmAd z3@J=$Yv|}O5J!ZFJHHF{^^Qk;o?k}z0nHIjvxvQqv|xPHW#Upua5yQuEp>UW+tL>TL?_3K}&)g!}_7^KC|zO8ZiAv1Xp&Pz+b9qn5u%i zhA!EE&}>IuFHR}Og8TbSD+F%~x_Xn>Ke%lNo^u3ECD%U@7RhB|fGS+?eCT=72W)x& zo}J)1WO~W-gaiRPU>m+U%r-D)fZ7j;BqV@9Mh)l;B_!gV&z&YlzA57s2= zZ&f}tIVM_UW!`{I@S_k1+zB6rkFJ9WtlUZf@JZ4>e1}*McL_|0;N->1&CQL#4)hUt z=E~}7b8|C90b5HJ;f8hG+i6Vi;vbziwnEDhJNI%0Fs?9>B3!~<_*WA3@jZGMety`~ zm8`tB4<7hCcbcEv6Y!|UYJ}&z&Ar4pkgu+KdU}4bj&;_Km!Ks4p83?JuPAkW&RzB> zaSdTIlxwjm))NBQ0A&IA+mr~7WCW}EqgR)ir(d!6-uPJKV+^~UiGEY<)X%)SwtE*U zxGcy(2076$+OR@E&eP zL~Lj{hmsvcC%h%)<>df>km3!Ejj02C;s=sE2VjMr8(*UxAVYlkP6{W)6$Iv6kf8pzNNB&|TEpMdka2 zg|W}7(wR^7`!lB1JSnd~XDkm>9hIls{rjUEy}U}6FiJOzN;kOJZO$&zwNBXM;7A@s z=3S*{rYRT`LcD3D>USkBfd#A9ROq9mH9xnoV1e~*&iiVjTWl;XhcVe$T3x;1#b{X9 zhJFaxCV~p^A1T_#Q^uj9y50?c*;!eSwa>3j{gga@?r6})jFRUUTNQKVXsABW?Hdhm zzAN|T-N@x_qXo6vX87PKeAEaU z?^jm&998D)lccLvg`xz@c+&NRc< zgMv^|!O5kl=qK2xc(qAHY6Am_wx+A|Vl}4jlq5A(ps=In%(2+Z-jw%F`gT&Tb!WbF z+jf+da`4&PV#A-|-?kZK8BK!i>UO|B(cZ>}f`C#=ErCPkI}~y`VfP1)5Q(H@mbNlsUG``#Z#pWo`1ySb0bOH&Xyd1a?2Lb0rf{57h zRXX{>gH1|*pEeiZ8&$gZr&U^>>>YyghmQR<6qzC8M!ggVU4wDewGOv zTiMf3`uo|Yz^pYJE}`_ig;??H0`5G%EMYN;bUEjHWe z;^ENdMBl?&zjaeNg5N~dQ*iC>jevLALbB#k=B}@t+bj5b`@%^x6LgEnsufx3Gr(qu z!i)-+QBmr&KjqbfMav}JS<2?IL zC)C0Jk!7XyUQXib#H6}>q48lzGIVt}`i((x0-3e)>VngHyBlikZF_Wviq3`JeQR(0 zSh@=15(s}#Sl=>(1;KNSHIMll?`DzLc4z85wCIA?`tl1RY@P zz|#o)jPlyQTYTIF6seLCyRQD7^rdX>@4o_NeerFk)Kgng$J~hp=>&s%l}pU*oEqPC zbT$RRxW|2}e;NT>H4$PB(vfYC=MxiiH1?hVmn@^L#lt9WO|@IiOj5e;j#`?_aIHma zps#}ezt$d7Fc*PFjJb(9`8shJn`6X>!?iFscSnaE+d}|iftp20Z1FgC_{Lkr(Rbe7 zB$XhHC`i@(C{n1F)<2$!AGqBjl=ZZA?}kx|PX5m?SP#Y1-+DQ^IJxn@g2b94y!1R#^WU2X^u3wr>Ao?XdGpI#0%w5phC z{5&VJcy;pd@VgcOHEgsbBVPlWv~fvsajetT(bA$jp~-Fg$&F*J{JTAG06sz+zjSZ{ z1za8$7cI-3P2Jq)Zmw>f^28{Rv=jOCY!TY5RZ??tml-M^@mw=HS#}`!z`P;f(^!Q744#8kC~!>pnr# zg`j4ao4ZTiI(F}LPm@OwJ^k1B?p#b>q|y#}a}tup*4ng<@a9nw%i4!s-;&)pCGsxs zxwy*67u3D0eJG!z0La4oLTi+X9lJDg3$elpNu6W&;-ABF@_8a*N)1tELmg3%9(pG| z&Ckbz&uga5VQ;pAPjm=lDZr<=8#uRA1cnDd5(|rpqCFgF%9zqOs(7~?33EJFdy`9Nu!pIYdSn+KEFKzo<$cd-6ZZz|Oc(Y*3OH6`dK^(rr^K z^CnVes{$&oKZZFE+~U7(o<9yF)djq`8#3-AL$35PrH6k?Shv)upVhqt1iW|ue|kcv zU*cd>l2{e7Y0n>i&YmCsc*ALm!ncc(|6U;)#kY@Jt3crwXOFnc+%mEF&B)j|E;bg7 zO9bAcfgjKg?z^i|V5j3+*nIa0Kj)v zPIsDP6m$(kSDD7Ody|$9FB=-bG6;=lr;3bWTzgxXH7a%nAdLlW1+&d}Krkd-OQ~aw zo{&*y&tb>H%1tk(9)B5>BfHgD4J0>qdx_Mxfin)LGYWv!^CzsT%xlm6;dSzG<3NB2 zq)-rMTnaAN-p!x7dJ45fRn>A_$s$U6yk>A>+rXS+Sk7{V8Y_{&5Xu#PW9MGxG;}yE z_g=y#5nkTw&7%iB`GIp|2w-^F^Q~j4apCoywqUmqFcsDTcmm@sY*26q6%k1CmXMcU z8J@NX@7)Wpi$^A5KmZi+urG;Tuo+o+b++7#?pcQ2>A8>Jw|Zj_1zO6rm6*8u_c13n z%PQftpC#U$)a)_RMp^U(%#P;a*KsvoKQ|HP?OObQsuL2!nt5I#>XTgiwsN&I(Qtho zQ*7IpB6*J_0ApqClP>nm272`js4iA&fg7>#`!}=|MQ0mtg7C*w1LZJDD2hV{czOli zxyTA%9K}^=%C5+UaP8(-fNB~i8l5`!JCYg<^wnVh*)F+ZScdTHwqwi9oAgooXX}Mz zy0f?8dzRgC^v;An7_IqA>j%%@X4W65e(!FLc(*xZd(0=%t^VjP`U@;Ks?H5K|3S{c zH{IWP2h}x9po>7Eprj0W|K0@UA!;`edO;$FeC{%CCLsIsFGn!pIv00smuGq-MDyO z-k1H0XLT*6|FpIvGCUYsOQ@Cumk(HqflzFLk{PMCk;?M0OfJ7`u+sbs?Sb^-#v3SK z37#t_owb5=Lj4~fJByNqmWF2Jrx(yfqrE3_de-7sXue;DVjT6u*>mT}j%b`YV|r%h zs7@#D3TfbodJ`?)E#ogjt!+=zuW4y$Kw>}k|A&DAEOR))&m676&&pXQuA~GMN=V27)dyySe>2i&wuH^oDAn2J=m>54sYBKQ z>45Lq1JqX$xh{W&;V2lgJZ*9S* zP5Kw_cvUq)e;6QXq!e`G$%*-GPetGrr~1b=H@D=R-tecZ)i%u8r5S(5a)YP4i|RK1 zK2zs+Drg^)E1ab<|Gp=O+OT8RF7a!^BCI=^Lh#gZQDv@-s0jv@dMUsaRe$XnY&EXIAXhG9w zVP*BeO+}^wt&@z1RJ)9n{_9t9*}cM=odjn69Flg}R{6|x((MgCk>$|y*4FG7p4>Hn znikHpSy)d6D30!Lk2i8+P4?##Ij21~3K&Z=z8%0(i3abxH*fS#ozmHBZTxfjMC9HW z{G&y{SuiF$d%!L3m*8=m{bIB?_n4VU&wQNYp(1X?vp@3OCHlAG^t+08Rl%{#o-e_K zaE4V@2v6eX5EZH<=7e7kr8yZT=$oR+Hzi;%i{H=pY>Bcc-eLK@C z9N$+-W5A2Cn;X=#Nil_wNI7^u@Sp!XYqscGT2VnA00r*wRd<&)Pxd>%*odo(Gwa&l zoB={Pp*czhx98{RaS)TCqC$-W^0sMn-wMVe-;sl0CZ4G$gvJ-)^ZtEiMe4tq7#e{# z$0c_7h#u{bOciLQ^4+gKt7WBKP*(0c9ns2?E07D|baJJjf_`HqP8!+_D!DHO$ z^<6vh-^4|TaM=4+TdHqg^yR?=?4NWP_=)O=Lp2)b{HD(~rW=rAg`Yv@?dV6N zy(|u-=C!re>24vMlA(+~Sm%0N%X#!u(t)nrrE8eE3d;vVZVRQtH|Che#N2S(`zPf) z_PsGmU+ruAv>E;bowJ|NN-ZrfAM^}1u(GisO=UGT+qjphVE`aUv_JQbS|6s(hA#s~ zso~U8<%moGRr~w;b!OjAJZvjGf6k(u{>OPx2|!o*^5s`v*~-<0NzC^a_7_<71U;bg z909+Rl0Op*-|@_@^&B#b1+NY>RFaV@?k9-{4u0l!$}QENvwmoJO+)(=@RCPiA3BJU z0BjJH88&YQV%rr!Mh>)fZI^%_aCHV^hksF~%*vL%+4*Dvkmhq553t)fj5ynw!#gCe zaTgd-unEl3$y|-%imtJ;>mBW$dpo-4&sh#&qZ? zyr0{I>ChmP_7H6RNU}e|V!y5V8?EPWhU2p}LOfw90Va(4KWP83NflW=)lWN6ZX$jq zl70tmR#;e=6s7Xj3vYc2Jl!i-{NBIToM9jj3$yVbes90%ZC6kz)78F4eL~Z!-+5%+ z{(y_ct~GBqll`)nSRNW*kCmGH8TddpLT1yg7Dl$a?AuDf%hK2H-d)hy*4B2mh$?qj zkcWq7|NgZC;ThaY9i)Rp9>u9FK5Rvne)>nO!*4~uaJp?$DkN5POU>W%)NVe$bFQw6 zLJcdR-`1mL^u1_$erw+Y{*JB&aw0OxqV9O3g}OQu3(Irg)pDSr453HQ8oJA^pBI|m zOhe=D;c=|-)$_gk58Sz{$I7aF+g@5q^cOvkr)Po08;g|2PrN$_8wyI-Oj}^p5FuDs zOwJ=WZVG%eFmR!EMaCN9JG2>_4^>4KUq0+mDk_&2HuOt&pQquQ55@QU@sNNUM3fZE z;WE*;^HQk1{&g%df^Ojnzf|Kfkuh7D2$|Ox1e;m!Nl8n~$;w{yo2cE&iOD5;dpw!^ zN+}Q}Z|M{`pO4Cas<@NmvVd?>XeenBL*e*@#i^dVAd)JMY*3qaGL4K_!w3!J-oS>SV*$iEnxd!{@6Kk zT8h&Jq_^Ig8LMw0JttXwrdh@XM7z5qaSHmk}V+)5qNUtm(xpSCu7&ma2S>^z_su9cUG3 zXje;Z}SB*b8m%j zI>HqH(y9m^e<=B>KZIU;jXt#wPQ>rtdH&g^bVym5()vY4em?J|EfP6E(-!u!>RS`& zG-ZT8Y%(&$ew_PhTI5 z>e2aT=H)3Ec+PtOQU_;E2WlrzPnp0Y4`4@uffV(WBVT1z6~akJpWeblrDyNP9|`R# z>D0TB5qAM@TNwUfP)wz``)z(y!027I!DE@^guM+{U^4YqBIu?YKg3A~4|a6x9lR${ zME_a*0r`D9ecc?;7Yj?Qeqdsv8A@GDEv;8|d4wsye0kGL zp`Bdp-LdClo%3Bv`R5R0A3A%5femi|M9QUGZ5}-nME!@Zt=fu@>PC$l93slf4nE#B z2dVs80w;m;Jqh&!j@fGU0R)R&D(0VFYiPKpr>C*K_Z~EuA3re=AXMNnDJm-Ft-HT< zotPmbcJEFtuz&9v8W2zqSA1k=sJ9Wb(T##NSyQuapWe;bHzOctPQe#KL*a9fwd==F zZ?6&FXJymJnVBSWLQtC@Jb2L5q=rr&BR`OinS@%|Wzl7#ne!hmX4%P50Wk||H-uuh zM{rIuH#V+NJznz8XSUH)cV9~3`#lv_P`d!l_AQAABLRTs?}e)0=hp@SVql-~-g|qY z*yLD$=^ygK3Z6PWI7ozQ7=`x%rc1`UcmtfBpTfW#qdDr6N5B?B=hGV6JY1OE$tE5* z_U)Ooj7LV?EL?D?#_JKaNlT^=#vGDN91Ec!Qc~g{3$~6cbX>^biQ9TjWQ_cr*4?ji zPP}~d1gZ$~WVhEK)t;$#*9capF|BoYUTRc8rs+}U7ZstfEv^(3UtHY@{?5LB#?kRS z*9-o}r!Qi-2M~|sBd~raEhXhzRK(*w`w$2GIk3nYZwL76SeOPzkJ>qMhEPfAgZJ*- zp@B77pr4H&B_&x(UY?7p%Pu-~`=#%#oRX#-<-5IiD-#I22~mGp(k*|goj05$538xIZFoAr&2hfz%ux~@)6UtXLQ zSYfh?*~W6DljAxEJ6+SumuGt?`>;jK26AqwKu&A;W!x;LiN5nr%kOT??DTXJ_Iu6F zcE!8jdH9en`Sif$vw7(wF=lrjJP;D8uQM?_dPcplEOaF&MfSjf?oXdcDjvRN1EW)? z9v!;eG(5~L{2Q8l;FYlV`%!SP=^NmpQov!)?~F8ZNHcKe$kN@QoU48M_Mj9mkL zC?GkOoH5a6!63s_|88*y-%>YV6*< z8wDKF0sfAnkp2sI_CQzWAflC3=qVv`6Iu4+l7ZuaDnAdS6)hbotUHEX;sGW?XV|0S|u` zmcp7?Zp?i?irtR>{u^SJGm>^NW8gJoMnpgq7cN_nF_3t#lk4De_|ILdvH{I~psp!07DtYu6X2YSm zT#RqOJUTCYkdW_eB?|~SJ$Wdx-~54fN%OUr$l2y`kGD&Kz{yPe=9OGibB6NdL#KjoS1 z-zPMObOLaI;hM^rZS2_SqnD9ncgDU!t(0oVQ;EZ|18=-!6m}4s zZk<@Hy)}8*=v{Z!E18u1w95m$aSQvoHzz6Fm#Gc!DVKg1Jj zDY(~p;A5Z{Y;!NWBvT-==IGm*aq!*;e_1JS-n`6j@L1dFe9ieaG9(jCu!Z%kKMmcj z+#JW5Nm1Skb4{HJucdU{3h??!QX8mcP}7;24S4*}|9fxqHbJ7kZH>1xRa8!)vXU;e z0s4}tN(;+40AF~Rvk4Bx2xsd;$evLxWrX+32M?YtG65r%b14`iBtoyTC@B1hy~Ozshbbv2 zs7|H>GrY9%*O!be(6A^xL1w1Fm-YEU*=yC`F3+!iIu^?YstOZR^ga#8K^bo6tK0re ztvcA(9ruI(mB2mb*YP3KA?0=itC&PaHjdD9Xn)s;pdv`gN;QXKJ8xi2B@R z@6O7U*Kn{x=Bxm-!&j`=4JNRdaF4i^=aNZ#GpwLl9xi;h}hEUY zmj~F}Qx^|-Yj1dUv+wWkN}ZjOlfx$P+VpEz-LcMsp2(FQR3HL`qQ7UQs%7?!j*br8 zU`)M~JhZ7Zm$!8F7j*k->{s2}V}dg~l5jhe^cH$jDqe_b%;^o+gB z-597wFue*oF{k#WbldoQO+Uu=aT_8fE3A!5&mT$=Saa<$dUxr{ZHfSyV^3U+Cet<6 zvmXBL!YXd4=FeZaF#M)(oqx|T*jClW3H*GQCwqmtxsy?flH6pkxGT1|FH0&miq+A0 z(O}t$_xehio03HQI($w3!0^Ui;@j#HHEY}-t^$`?lWFeX`A=2M%pk^p5>)0NP+DFt zEhWYCnE&1EOng^&+xR-EezRfEv6Cnb4-luxqpn|<7{uO|$Jlx9qRyC} z8942JCx-lzJhdL0hWYR8JwT6Mjo9!GM^7%T6_MC<8+$>M`mWY zSXh2z)Qz=9rzPT5c}-1BNVQsgd|7$<-=EFgB$xl6!a^r_U=Fqu>g@#qf!k(g#f^p; z!uwoh4qm#Y=?8y6H^*7NpQUL-d%I*fcKnUM1Msh>w>MbzZLKsGnPP3n?_a-eMnqtn z#`x&y4k+9JxY5(mx#FdTH8WMvZdINKuhl(!CaU`S@QVLIdiwLHzi<4@qtwC!*f9f9 zJG_$YZ#MU6H&IC%#Zq{AT}nu}kYSh6+u!#3^+VZ>KRv8lEQL9g;v=RG|E|uWAdIS7(#Z(sXZd;4nRE6g=n%*PYQAoh4c$LxyJugk zZ&p@2TwC7>+Sl-iT==<@o`%WG&4ge#8W!8n_6(IeD(I))tUoGV=|9&^SE!5s!O2NR z0I1rz6kLNg$;ZnJizSe0VDd?VV9%fD%i9LM5GpY!JwPtN5^0=WK*38Z(SA$DaC{EM zFbT90O2q<}V2Xo{je}XerPWk&G1*yJOBlDmC?8lger@xMM5I&lzwO)$1?7jqK@e79 z_o&p?vf)Cz>PP*{Z{ate;>}oQx@V*eGfdkXcwAYTV;bq3ikcc_FRV^u$JZ+tmoQPq zvI`QS<@y~)=M9dG4uYTalqp%9=$L@O)6->prKH?r-CvYdRD4UYgO5IG!yYCc>F}q0 z_YS~5Tx%e?lNT3v3%oC9ugFjAf7|bVOH*y^RozauTPMxU_lwjI@9>%3!WI-Sq->tYN=x`ozM=42uyc(G*%4=`&@spo8@8IgWzc*R;DPCIw zU?MKT=JyW#R0CDjF+hI)O_^&P8yjC}KvBgFkGP>wU`@?r>M$`A%{F52Mu*g@bk^^W zR)>k*T^M6@7A}&7G-|p_-3tzBGc)tOwB^(NQ^b+;N(wb*SF>tv`MI>VwQZJt$X20t zj}X^6F?sZuhpFj6{Vp#s9EsPi4yUskpY+S+rfyce7qays2JGn4M0X$Pe!c$l>QMzg zjtMTg(WAEa?60`)iL3?W1~itz>1A!$&708AU)psyHum9xAd7KV8Y}S!@T`C)7q%7X z7oHU7?IN(?`e9O%=a2UX7(F~`L$0Swt7~d*Oi}a6$h-s9Iwyzr){F1zVe_9aQrfga zr*cttCs&kG@}0B0#bgXmW-ngjj4@Slqzw4p>Lhvlj3>c(QlFAIm+@jZIt?{9!pKa-8ZVq@AYw``4?k-1aqkF-}1_ z+mY0F)c*Nvu}ff%d3dbfVDl5RDtZq;Q^ual47ea{1=bLj|<^gsB6fC-_4f z&zh2~!M?0~IuJ-n1P#7=h;bH%@*jIj%HwZuU$=i&$wf8lWFSY-^$&j?AIAuJ0_?41 zdGAYtqS=eG+}y)~3O=M~kTp=t3gc&d0Yt>i5Ic(js*^9*zG`YhrznRl@)HNcnn&@w zq&2;kMX+s`Q#CtUCI;&c{GHc zL6~|b3~nS<;7P8mToq6Ly>)gMSyX7*l=WJA$^W6udchvh4>IS!WD%6M*p&1YAUW?Jv(W&lQ3gp|frve<3 z3oeZ`aDpG3nDFM`+j+{A$#M97+j4MhRAdKgJOJ(%(nFFzV5GGqBI*e`<)a0fwHQM4<@G-ol{E==ZCiapiB6wJVC z#}^)Z?h-p28*!Z@xEus^L@h+)XTUHi+;YBh_Eff;m%o42dluFx@>P(vNPFsRYa5C& zX)#Q?4B8A?)sRp<`PWrkKH++xr+2OCx{T>=pB{LR>>_E~DD!t}iySmri(uQaO z@S8%`zx&nv_x!-JPChZ@22E&c%ni=_`EgWS#V?9x71RsZ0UVbUBW|hQ6q=fvP?8|) zG(1#i+2l!+o-Xsqp+6^%_iKFfer)owuFefPG0mp;>xOXtamyk`9D_B=>q(-% zk*T@Q^J(8__f)2=>S^)3FTIM990kjKt(ZVSstsm8FNq^fZu6$a+e}+KY8r}`@J|Da zs=s~9!LY)8xM8k1^nKau*95-{Zf@7Cwf6?5BA@EebTwv*OQxnF-fJr=$YjQg=5g~~ z_HtX?qyz8lo=D&L_dAQ7ZzA)#)0 z*%fw1d=_}?ushST0B_{ua(C_O!Jsg54Y$2X4b>K+81{c5jZOWRINp;mf2ld*$tUQ? z?_#xn#g~;q&Kr6u6Mo2ZJ!xc-ry!*0wg(95f~}^h{n;$x^Pfje;VF z(xC<4V`W2J=L02u@9*fKyX1x!ct2+bnQY(rtsQWgFbB&DO{cLES%cijD=IuemQlAW zERj?^hlYfTh$Sqi9Sr#5lNLY@;QnV#J^#F_t z_Sd_hDT|}OAaW>UnFE`B&@mDt5JXvI2?&pb-90JgcQlhoe`0ac9f6C^ZLBNeZ}wI^ zsI?F^HTmVofnti537b!+0dF6RZLe1Z_3+K1W9nzoF8z2GE=Z)I+xsch!}L$8oYEt| z>pDa2N(*ac#RJG|Hqcph^(raa#L?Qjvd#D*)z# ziNV2#NUlvimu7theh1v@mThhU0V|rXDe>VcG*2OyduV5Wh?_g$lf@IQKThp+zOxnu zM%Z8n6)Ywi$LozR71Y(K#`Dgmo*Ofj&i0c>k}bG)meEZ;4r zI9Ki9EB8KKe*gzpXmGGkWIMt*VdBIixXwYdcQ5w6sKeAdHinV|pdW1;XNbY_FBFa8 zVWKR*dJvd|eb`;s7F$P6K>%L!F8XOD>gRRGjb5tgY0ZgGqXNu zg&P{iHD$g-8wV8>d}H8Cc2ZMgX3jG%(tv!B<@%N(>NYOEv6reS{<2qsHr9O0M(3+|E%T)Vnrd15VqT09hyDiaxFXOdL1E_~b8Jruw z;O^=~)%V+48~t?F9WWHMSXR+L+JbL025$@JS(E%yZ|K@Q@fX_@?>%@B7ybMEV1$?v znvivdz3gmoo#JgoMO+V&u0iF+vxjt(IBXr!MTw`Prv8?29flPtUzvT8=fw{cEZ+MF#0igrEr64|g=(XOH1gxyW3{Bb_!OORs8F2$?lfR1 zy7h>q|2t`?h?2S};p0YdzSX`(mmel3A6*0=#S(+vuz^X`n0Jm!i%(oeX8(AYu#gY{ z@Tsw}Q%=}{o&!1ux zxB#8L_V)Cg9H4dLmkb(imJN9It6Vyx(E|$kWS-?%V+)4_001aHVFU&|;lAFHOjoz5YCSH~G!I zFHTFen;|K5?u?H_NL&WgsD1=&8$>+o*MAXo?9WHMRnW1>`EIZf6cp(mJxa}C`g@CH zyjkeT&c5cs3HX{6Nu}3_xBC& z0*9oojp@~ipX1M(cLWE6J)x{v!DCrK4F8gf=UAAMR9lFzAL`{q2M;jo%GIj?z8B`^ z#>U1#0N#8iV@Wfi2!|?hTRONpEPEIZi;8N2+#YGo{M?_)TUJWRd~j4n?2Si@Nu2f4 z(iS-$#>Dhs-~lcZdNTM37Z;I6H1bc@sy*7-X28T`H#W5>A|S_}@IxaVc}2v06SCwS zH1|qMft+O4CMW4nY3z+JEsf3l5Pn9(=IYhAyNe17Q9H5<3T}ostnwPffSn;&kgSgE zyT04A^H(qS_#VyuyVSq$#e`xY&hq{PR`$F0@7OU?NkI`vL^=?8gc5?l!IU?o1Vp~P zo|+;hCH+_*6uz>Dz?bo{D(2zCW8eSWzl8S|P8)2az%K%-=--ELfYA{!h-#$1ogFUQ+d~pzh72q2jh3q*%g-hf zJ(T9x%ZQ>uYHD=aNetSjUX1*?C5Az>T>cNP*nMsD3Gp8t9i;vrSXO?K?7pc>qX69( zer=bfZNbAB{qKWAg;V-dZ+nI{MUh2J;Fmv*-@A((U)b`0Xb=d?BlkvsC?g!tNS4<>pfd+Oht zr(b!Z<8;n2>EBp4u3S=LqMp7!ebQ7!{aOBZzIoqzeE1)bx6jmVpkf9w>UkI4U`kl0q&PX4C-(1Tc{Sn6P-AI zoO`nbT0m?>!B0+s5poYDDJd>F-aKj=8X-YJB@J0LGo?6|^WszFmH|yjISnYdySvZQ z;WUJHcmt_>C<*v8rlo^ESs+-*2gn-yk*Pmd9bq z4`FZfhnt?Y2hsO4UAQU-f?Z8h!pn z?27>l3ZkFF%fVdz_U+rKF(xLkdq2UdA$rxx2he-1udip{y(i;TC>|5L<`ptC)9c7v z{$>G@vEazPNCQNXA;S0t)deytSR1gE6gLKSt=wb5Mk4xz1VAE~tJ2aAVnQt`9@&N} zZTi=HhDOWH<6gD)O1)+}vRN5T4I(X2(0u)R4R)idHwFNRarm3=Za14P-zR(KrlI%{ zL+d;aVLT)ptBI&Mdw1;uZ&MMd+9TPjoEW!{VBaHsw))_&GmQZ3dfwmW@7#szgdKk% zSJ7`j7tAIhgjPLGNC0!t1B1ZJ5@3sBt_uI5)W_~%53Hl=On>Fj;%-kIfb%^ukp{@A$~7C9@&NOaSehEaW5_dx)FzB7x(nq)A{uYG)&8?Ms)E zhCbw-#-+xGK=NYxP8<8Ouxo7?M$ni{>_?q5<; zGc>w9h=+qRyZ*_;hXv5^9^_O^e6TO{6QBuvp<#sntoie2Qpue1@+6!Bckf2pCydFhm=Rd7LHitfOJL~*CWq9WYTQoA<3l6S+qsI%;!^UyHA4f_7&N&#pv zAeDey{5rq<=NtQksqgjc2#s|7?b7ChP`vS+lr96h=g!q~fe}R8`KVCuWgexS-SXh5 zPC&yIn5j*T-m?u4lMpmC2cCJUT&aebf`^wERIA#riP;E<12J?Imy~RaH7w3C-rnH9 zm=)5L%e3YAeCI9_i^-t*tJg9Y_W)CwVxynuS{TCO9{|sxjc49@Ebvhbw92fT%^tX1 zR`{dnVjD(6;`(<*$vI437<1;XEa2&nJACg}@D&ZTRp!x}o36eLtOSZ!^Au!<*xEjM z_;mc)SDxuScaWJ&B(M>dV2VdpKM>54wD*`rfpC#eOG?nMv(h^!weF-YXj z$l$vwn`3_T8(sPg)KhP7Hq4A)gnrQ*{w!kHPMJH=Qci&M1sGvJEI#noRDRp+f1Z=` zt-D*2o1292Ls{kE>Z<+E9vuD;XaC@JLD8(p35Jg36isbSm}VzI{9<(0Yb{IS{(#o} zYZgFn$^5B0qqBADLQ`VIh+8P?>+z>m;u2Qo?jfL{;5kJ`f%wb&lS8C z_agWKAbu$c4h_8uMXN&k=Z8uP+}l<{vb#C@C<#txBwj1q({QfdJF;Jv#B~{18Kx7q z%Yl_@KwD)N`&8_E{h7Q3Q59|;M|1FekOaOa!?*@*s@ih@R31J}R(;+pBj8|fW zjo9c2hDce6kP)%fSBQD9Zgk1+I4?s#Ct~Py4kf{P(a;tN1=#^KoPgv^%0H z7BfPhwf?D|@SvswK{)3ecJHPLoUhs|kFhJ4Ju_~5=tIvQ@v~hEts9UlkPsi=@u00z-p#-<`KUGBpu^j1ZV3r^w_>uHQKxb{LrfFhKG9XT}(42zY z!ask|Mk4H{{>m-n7@to5TMsl5E_o{uAMI7<|a!jhBw@e|d44bMh|g;5clj{TMoAzC;u)iNYh3Q#UFy;w9v_*Z+RG{%H_T$-Iw!s6l`0dp@pruX21a#RTT; z5j}ab5}H@2i1H3)nroiD%HGa6+DESy5hw7i+1$jW@rqdGdBYqt93gQ@N!PhWynH$& zt|gaI6G>(nuQo3;yeA0aLH|1G#bn7;{$QttISaUSn&RYNcqitKrl{F>HKk57L^SiK zxw^RI{OFXtY;{-=plq*m?R4SSs)xtWlj!T}LWjZe=71^S_bQjf$xB=XuzrJ!$x{{C$CjjFLmH2gq{(V&c^Fgo@nGZx7KG(Yu50ZhS2@r{e#-X z2Lmo7)9xQi2WxJxE(n>vCvVlI50g&I+M#?yUa+QGQJ1)V1LukA3mB&`C8g%1zPnSO z>EINjlQ%s^PGF*&`D-)r{CRntpJdn=*Zb1`m6wI-I|%U!@jx^<-WH#-6Za@6RyFim z2Q*T0t^4!vA2S}dow2`s?S~4wvcBvj3_N?5KHs_NLh@<7O8!(i)sOc}T+d&hN4~(f zZ`Y~Z1_uUc+1X2R384>&Qa|Y8vIJ0!w^Y4%(`|4fqYdy_=W-tjL6^t*nApc#da|@m z?}|b^%SRVgw6%8=qV3;l>Q;CDSbiq104M-`aACE|p?B}E&#(giZ#-kz=+mTCjb*cD zGB--IT6eFrVgd)%NoMYF4P+BL0WKK&Kj&y4?V~q6dG>+o3%E{cF5BSK0e_f4q!%L1 zjg5gYz-?NAU?Qpu2)WSp^b})flMEfxveB~My(g4Cm*zGSB%qpNmbhR_s#H3@y_feb zp{e>%dA$hs>LHnMEC2Oz3Wf8x;Q#FIZ7onY847DpGe632EZBg7%&{wf5VKVOPX$c( zd)WSC=ibgOEF??bwt-_0Z4LB@mafX%!U05NH zYvu0uRqZ|?jqSxiqEVPme~p{#=+Sai=!)`;hAV&ZR9I!(k7@BetI}+?x3!;{IUy#- z_VZRCY4`PU?Tc5HG))-^7|FWsn;08ovaJ*rx|^4KJT7Kj`iJ(_#CvDR{ehnD`9*Ui zb|)e6QW4$8LO97l$zjv)Qi)52XKVPM^RBs za>kA)N&HK#XCb^h;b3fzKF9fBwe0?@Bp;!ZksLKG_dJdnBRJ?$+zyp%3^%+F7(?uY zO@7Ly>j0;ro`7ZsG%3*$6jkn)i&akDU0pv4-4erfY}lWRKdlT>Bl%b_5v*z&*W~;6 zsr3388$Mbp*wifZjdpo&a9$nxE_X;et@Gq5nI+9^hbIzt(SdWlauxm9d47(;7k4ojXfPHH{cv_RJ*&~f#Oxt;;#g@x!;c>l0s{3&3}NYH(}$pA z2BSkuixcKFJR*32-ol3l#X2%nIu}5o;!iGMiRir3eT~(~_0<>jzoJjIa<>i9Wrytg zuh#$VKW${>4)zvL94!;I!TucuX3tvM$XIAzU)tLwxOMhzX4~MB;J>0qR9T3jp|}2B zpCG&QB4Pl5-s{xyHFfI6tUW4-@Yj3Lf40#$H_uAzU#RH?E$gDIuDQTsv7$btF&h3J za!SheKTHO4%iWT#)q1LI4f(R{$;#t1bVys9nx-Kzj9kGZ1S%2qU6({U3}gUI0f|m* z@Hl0>dXp6YhUMXtek~w^CfywnD|sTankdJCLv6yTcZcEq{gRkAZ%Z7>{ZKx1Rov>h zE*++J^GjX_kDPc@rU4-g4r=pu0{NAIOJ@1&P zvzk%Ry)Y*$L$~Xw{lsK<6sN!mW&avW*eaJXF|M<6o@9L+9&>?Ji!{K?i9=2@-Bgf| zfL&a(apt~v@86%1y>pF*=w)ySEqll>E$xN#Hq0g=B89M**+kdOcYXP~qvI@y5SXlD zCiOu`fv*Ig!aQ`yIP2xzg?Z-a!^dKeFD)$*qRcw$iwlCI3_1x!b1~UTXfsXUIgy~% zH2U3TSR55rwRz)sR>j+e8=l1o0E7&OLJpCqAiB(o?Dem>F8`>LD|6q^5svChP%t&! zfF>6Q_qJ3+3U+b8}&>HO9COsIYVu|w1T&S z@6v>Xh0*%WKqFi3!|H`>21rSs2=G_wC*KOLXo4aOU%Zxb=mZ(RTDTeMhM?(I!xTd$ zbnyS!8ygV*R6-~J!Q22o4s44pt2o$JtYIN1>!kMU{1l}I`6G3Kzgwr1URx) z2lMUYdcjmdUy7I4S-WrCY|!J2sj;M{cL%+Bw4;NInhM3)%kS&9xZL~jvnUH2L{PGI z3Jg6aNuG)MYY{PzwT$Y@b-O>0=JhegswyixX;Z>SLd2K)sj$Bc(v15F3GC+&%8LOw zyw+F7x)vv_K9DN@R6R1cqGAiWZ&g0-vcke2fJX_Tk!C~H%(pdjb4q-slm>=0LHsm# zLPrLdpC<-BH8o1+@b@s zrj0@~#V}2m+BC{OJD1&fg@G$xQ^H@dLwkH`N^c*-)0_;RqaV$+>|Cnf9RB}WfO!#t z8#5CI`uaYY#47DW{H7^)Kh=7M!~!3ma%l1Bvd^fSRA6>qpgs)#kXqRi>{{l@p0+lt z%a_r1&cA4QCw~oW#KwWQ4cG0>#u&~4UpXYqT-tGFAbI-tgp^cdFK0=MEj|hSgJgxG zl9JuX=g)ZcJv-fWXU+dmW$h=TVEJtGSI(xNFUJy`jSYBNvh-fvyY~f&VF15W|4N?{ zf^0wHG+im&1nd9)HR|^2D*VZ1%Wl#I(i}_m0Ie)#hAB2>f?RCH*{@fx)4nz>T|gmUIm_c^a^6D;C;&3#;H+SXS}>0WKmHH$DA0%y3MC1_HZwjodJ6T#5$p z{z5PtCZ2!0PJnU(Pv^a{ij7-{kAS(6M#K5~JM89y70cDg$jHv_aDW09uo7`X=oUf; z2z!Gdb^+O)nVya(A1WW1mT`ze*z_0w!ubvFx}2l?>>a{rsp0JElU%EN{@Xa~hy2!R zRZXx}b9f-ScKFpru8D2|-Nhhu?5UQ^F}mB57lkNd^e)8pEUxf6#uN;WJiX8@OnpT7 z(!hd-ni~I+BloVqEG_N%{P{U7=_%>mVA{)%_~htKg!o7Ke~!lKzU{ES5V-AaC%_9tnY&1C4fNd*JT^;TGySU0=pDee;q$|rU7DvfM&^B zmf$_!`uedD&qoma{_M@(K|R&~1+xPnSezt)v;I}oiHnJ0#k8E7($X5w3@P(1!y9g* z7n4P1h|nXr8tqhw+tx(hIQGlIAp<7^;4{U`B)6R4T*IWtCD>SlGl7OeLW1??F~}0E zx&k;E$w>#XhsPN2Ec8!6QqYY8GKD4)A2sY}L@uZjN%lHE+G7CYGTR&{Q>RS2Oh1l?L#v%VvnL94GiVnm{nL-#BK3KE30@htdMN@ zl{SGu!x!_((kFeB?-4gnCgkTo9=E^gIZMdDoEWfDw}CxJCn&MxBT4;WtwD!#>$vy+ z>s%Km+2Tt-V2S7))3mDMS~KV#UUKH}L$@3b%Sxo>jmfV4%TbggU7V;oQ?%3 zd|cxFv%z^8vH0Q4Fm4|?O}YcuCOSI$AU(BQOdP}>3YM0?;dKB#gnby%(ce*=5Ror! zWBRX81Z&*b2Pz-JyNw*rz{w~;Y$?CptD5ls;US-3W1GKq*UEh97GHRg_>|Adqyn4q z^N@l!f5l3Z>eb3Qoh6vcFo;!I6#yh0wSBphj>`II4wU${SFgfF3l~>4Xl?NQ2vNZ~ z@#@VRD8Zq;28m-{;f(VlK!L@=sSkyqqoWx8Y)XoM!xej5Tds6++!;_f5I6qP`j*J0 z^n3Kv)gLDfii)g6pI{Z_O{NVc%fFL#H?vX)s)h_FCKK;zN8G=^<;}f#l|urLc2Q9g z8UXOjO6yduwuNw!?ci3Fyb;MhXY~51(73j)9cE|~F1o`{Iv)<0by{de|J`~*R3$*8 znqjENMTwf^yD?vf==j~mC%OOrtJW`Y<55t5nQQ&V2y?O3k|vfFn^kzTGBWC2ym;my zJ;Et~cH3K8eqY%S`JF_`wt?!cJJ&RLm{=3JJfm>FUps;A%xEc~iB_7m+DhEod*JB} zDJ#P+FM<(P293@-Q|)B~%WZZp&nD>O&^&LqKY4h~o9XK9I@X?E^N`xPFV=pQEUc_c z*Q?>tHoRm)c0Gryy0t2?A!x%CEEJmPJ}Uq$n400^fmV3yr>6}s4K6Yn-CHb*s_E=J zDI?zYr}eG5&{sae`pd_n^e*@b%Spa^4hm-+{2tkcqvW6MEU>S{9r51o8yEy%$8IIZ z=HE!%`V=;{i~2C$e%zZ0$G0}NY3b=nvR)?y1gIVgsC^lN7LoF=J@`LNlS{Iej*iLC zo^ACwR<8Z~$MfCu)cz){P{Zi5`Bavc6P|nzuH}4LTM#9FP-gx5Wjd^bA_Ctn%8(!5 zP@o`woEG2P%0C_ig^#}p$e9ZLre4iqFaN5ilHK7a87&w0qC;%(GBq_BLql`xapW5-U6{>NMP zzO+={YdPY=^&4D}ctChG2!0gvxCEQFnHfjJ{qfmprC$-QY;K$xks4$OVP0QzOP*G& zx*|qgv$QyVfglFolu z85A7c9igYIdl$7L>$T0oOFDcT0gk=gtgO2*xT_x=+tz2J!s^GHh<~Q05p=M-ee8sS zv2I$&@u|Jrw3jYkx3Cz1nq#>_8Al(~koZCHIu*x9oQ#SdyDSPjMDOnyPFYVqbMtM` zxd#csNZ`{+_)*Poyu^0f5*pX18-@Z_On^Zw4y%rJ3e$$Vg&wlDfH07`Pk$3iB> zLRe%{|GtQ*%V%RFI|i^)o#}Ll)eTox5b66Ro*L z!fA@mXNC6FK`v#AxlK#!Ys#$@oGRE8;v;)#DDF(gHKn|r_jWZfLbwQX!3ka{1!hM^ zmejOO^^gIsJ9v zhKg_N=^Ve8GJ3^D$@jHCTZm$G{bRoX^*_Y*SY*J;#%2thq;3fB1+0X#0|OYG*Gsk8 zB3+?>K*z8g$%z*A7WzP#RF!Ln&z+;7dUYCo`H{u`oqEB~cH_ScCvM^?97CgmplHG~ zNk#OWT| zBcm}eGO}6Bxni4-R5is@GB8g5&kjCy5I@0FQ_Qo#yaS(OFo?9C6%BcEl-j5yoDR;+e zw0CySUULXHAAK#z-Q@b`UYw)%!cIyr2LMJmooU#ne>qRWt@uvI@+9Q1!U3sAW%V$r z<{l?S@Y*iyXB9a*a^jo)z32Ox1MOa%=Y(zW?mbNH&~z3+h!K!*j20FRxGy>A=ydh< z;Yf$LEKca+8(0L6T^#*|84ST)p94i!Zyv}@G;zAw?ajH3)&$F*KfTmTrSu;d7=ReU zg2YrivV(2HE-d_ntLqNt*bNRQLx6KoS5@{D(J-iYO6l^m>isM7?079`qFmAetTfI6Mb6NL%oTnTareDVHab=4G&*DQ0K%o(!~|0=%D zj(rjrL|ML~x&D_uSBWkf$)Kq4(`7z8sAZljTV7Y=hdy{{z7o+s`ChXGFuyJc6^E%7 zPI)0_>0-Bgz^|Zx%lWV~6WXCReMah2rWoMs*-tnj$iW3T5RkA$9d*g%sPAsJSSyA# zX>7Gc;Gq#|5~~uV6en_?eycRs_rty4e7s8hm7S@Qk`T8LaZ=ITusdP~|C#t18C{9^ z7^VlnydZ)RiwuCJZV1x10QI1)>Zjw*L|Y8bHqj)iJEZ4ybxrl&;lRc;jhwtll5c)u zPZMKepz`1q6wHR4Zbl>G81liOu`wTJx*ucnK<${3P~`>>aqlbp<07EzBp}RN)a^{|UzQe56M?#jdAaqjVyYZ# zoCyTsru^}xKDt$pxtTrUuqh%7K?;#hg!b8U=e8RE)7bCa>6UMF&0O1tmGx=IhIdMo z;-eQw86qh-c}NI0%>g+*+x@C?E4=&VCuV06CEjLOx>X95(f|xph$#K4bFTyP9#Fac z`IpHiY9Fvdcj^nDH3p#KFZX<0ri=(E>cbMp6A)ZJHb&EEOhZ9NnA==+XCdsT5Vw42 zvVRXu^Dr)FVsjG=TmW@(N+YL-*f$#-1Gbp!%(wDSSt^EFY5JNJMkLIF4;zL7Cs72Z zVx63z;Ci0npwlTt0^x^nzYhEB5%pv8ONVRji|cwv(o0QdyadvsV`GVenNG7qhYz2L z`e>Dz@m2ER(U0;M6=$y7sX*z?(sD;daqZ{-F3A^vgiCgjZH zd^h<;7h9LbkJB=*&3*9fLNDN(nX`^J#PGFID_9fyffC*Gv;%_xh3;p+`iTSvoMT<; z!U`nB1;r2m?w4_I8O-X}@JW(nd&Q%CnmT>SOx4xfB?nA1sJqacM+x=z1G#6+TC~QW zw-v=c3UPQJ@UcVpRy{0mWD4FcM>|`CCO-7t4hebkq$@vERW~FALZq-Tb!86~eUs9% zD6XEDD|Ox!;m1Ax4m(=**g{E*OL~NQ*WkbIg+tU-1j4KSpgUr;wZ3;4h=PRte8^Qu z0|+)ANLVo%F}ApihXis5J8csG+e z^ty5?GdRzbz7$c^dez8!`j3cHeRR31re*}|3^(rF*()Y5>^1NCcqJVElUl=0Vo=Tc z`rG4|w&~$kK#v6kXy}#wDYfPrelq{0q+X|^C!<-|*#!jzlph<0#J(AAxWF1=#rd0H zk#Dyq@PdIfARnIsQ%_CfN$qXMBli(?!$KEXW9bL!xTAI7OVZFPWcZ(SacPrJzgeK$ z*-BEC`6l3wSh1r1m>QK{@RmuytCqL;?`<)sn-;*K0aSw)z|yi9YK7t8Xr8bwf;xj; zWhWQU=Mk~II;Y7zpqM!5Ffo-B7i*}iV|*i~gV{bUG-o=&^bc7MnDaQ<>)1^btQ%cz ze8syLpY5xRI_K)ToT67zA-%hA6}&>8T1C%s7hastNuET09ryp$FM(5O{VaOkJ1t6f!p@qMTH(u~K<`?UB*ez%qP zpl&LClY%WS12YSA%pxsL35(C@e(0?J^o~l z*yXI1v}Rv3dDlq{ejjo->yW4#6%$UX$Y$K@sD|M)6lr0-I8 z^}R=Wk*T%y2z;pCf~{T`kVX8hsi_bCp6ICj_La}O^&V;3JM`=8k>W^@29mjQpuFTT z7;zv^L>D&D^VF9iK?~@5@wZQ~6P9tk?kGJpEW4j-o{W?2=-8(a+ zE?L?~s=-2ycAJ8Ffcg&C78yKzn5eL|7++5BO##5+LC-NN_>35iyVNYUf`iW}S4i{> z^e>NzcF}}H)8D&Cdn@L^k|kgxAcHxQA&>>6pBKoO(EhtjLik{xf9HGG^THnTd2=)y z*FXA`pl!$EL;anZEdEfa<2dTfG2#DtaC;iILaa;~NL32uz`^k$Oey_52sM~0h5={# z#qmEcu|}~VUgRSCcDN4v%RC^kEyb=r+ZHa0KBH_YbcTTR7#zzE6RvB|}5~$U~ zn*YFFH$04iY7S@{UyR+AD~r7wE)%__C3$%w{#b!Y5!hkf3R~e8P zQkB#*_iHcVMUvdTK9vluy0*;2B39TR35?VDL)-R!h>{Wc5|e1^MMf?DT<6wbf-|gi zHR~!H+x89)Xk7L|f0>vlgq#2x8aTk=59`_4ri?hi6LN3nrC+=hc$S>V^^*^XHjxzV zEAv3MrG~cl$hFGZ*kSls0M#wQ-m}o}pYH9}SBrOV3(jweu z&zUHto(m#Sbf2lXoqxcrHY79@$E2FFGUL8|*)LzBlAVQeJXZN4Fg3JsmGa*9vifoJ z^9MK5(;n^fnAO(S23&I_XNE2rXXtTcw_=*JZpY*#g7T^~^WW>+G0zrz)lhZP~4#PBGcdR!|Sg%be8CXqi^t|3Q*lyr8(%& zkH37el`!7SHQco;98#(wMHZp>wD`Q7Pf0x~=kjmfu441@@mZXR3@A2QX#^#U*5dF| z(IScuh7Zh@uxg)&Zt${pB*<2L$!{(~#Q)HFT~&P;ns|#*+Ma)7<$dua%x1^(U^6f= zGdp`;l}@>bo0f`0)^ic*g>cV9M~F@ghQkfV(HWKK?!J@oav=M#%ugbMW z&BeoW1=uq=W@@vJ7eF$+xJMuCMJsAJHuv!py|X%MR$Ym&qVME zvJtTf0C(MPWpaBIdxk*EW<*4U7LB;KHF;{>>mC`2i#adLYXt&8ke~k~%E9BQy~)iF z!oxMSwV8GX1D3~|yE#$$wWGcL=%o*ZA3k_4q>yBW%^K=o}ZI?w!Hl5>M?vyGtf_>Badh@P`prH-gEAVrsaPBb40MQXXRsKaGc@&B!bjD!bx11d?AaB%DhpAF zdhrye_dfJ%MBGzzv*Rdjg7xF_npYg<-&kEg^;cI9qeLb5%CXv37iIdy_9V9Yb`Hlr!d{8gS4S?-HpDqd zwU}1=`rt8w1R+bLwWY-fiNKKmAJ%?j=v|CMSQ|pPCWY;e;hWkHnP#PQQPIADvG5W6 za`?B{bL>;_JyAC(seV`Th!fA+qmYk7@_?@s!rhgA@-rdoR0HNUsjnFgHV+Er8yeQx z@(+$Pfa%Sjpp2Ur6;;sQ&6B)qvkB`y%Rq7hC?eqahb&;s*%PITk&!WeWrQy0F7y^) zhC1cFUmX&WFG{fU@lZb7xA@V7VDQ?AyS$ICKOfyEOg?Ykh$Sq4cCXP0&3}u3PP9_VZ*MF()XagmAn>^yduSwGkbOo$!xNf>U6i;4-qin< z0`<_yNbSyCrItJA|GyUCQjA%7(1VkIn$(8!9JUMzDQ=t%ZnerJA0Ley&0SEs%x~4T zw<O5h7jzBc-Khs~89`0lw zrZ!0U7*%+h`c3wO6XeBc$x~jR;zSx>fHXj*i#~pftxth^h*Cuc=YHIBP(mSwvx|_>wX2@ zmmPe1nGAfeKomtet?j*E>htHM{`V4xwg0@jb&C-1B@$bYUNq?RU~(13$@_gIPaN!? z-Q?i@FyJ7oi6u1(t^BSxnf3I^R{8$!qeZ9omDmHRYlCw{m@-i}&(0qHVK4<`&A~S{UE!q@wbAjp;-&=T?1o)uBo@&RO(!@R$;#$&UXm?ET<9V%Z?361*_aHInIj<}i&gxCrut1! z+2aQklI4<(rd4Q_v;RA3sq7|x7EKa|G+y3hSuX=qQ^a>%!_#A7(SmPP9J}=HTfm*) zYA5v!kj@i5{73j1rGLM#TBWMm`j>jkpES_#VaOP3NyX1N(dNKyoQnHV{PWPa>XP(# z?^ygZ?hHGR{M`9>wg1j^M*~(kpbpgbTh({^^QUXY|CSCjx3PAJQM8EuqcqDzhDvRq|-(VT`=EDbBm2J_7Gc6%*` z_A30`?x33^ztg_LA=~WJi9IsN&Gfr5+<1(IW@qsEPdV#9c8r(``{%`aJVLN#Z|O=X zr|%Pn{kH^pZ{K@rC%t3s^TdQ;-i7AcN@h|>9Is_muj9{V_^*%@s$!tRzG0oPyS0UP zS!{P5IPfTr_xItx5t5H@GX)%Pg>0;^VC(k5_bS#*wzja2N}oD~$ditJE7+Y1xhbaa zZ%Z0z^Ms8qUL<+>SC4#RvFtvQm#~~Sa`@P8DK8X2T$65v*WLrOd8U!ng z7IH;XV_ws~D?9zws}DFs3ak?L2cD)?o>X1_jB8q`3hp0DSe!aB3{*}h$*-OrjF;~l zfq@)SusuvnT^rJ`kX?FmJx9uMMf1ssxFu7>C{qFq$qnj%vI$X5b#_Y@Q z?wE{DO=)Fno-JPSP>o=LBhX(u&tyDMnlZ&4^yCk7q*Ga)R}*d3Le5jR z{6o^k$@7MWNLMEfNI5>MDCMNF3>lJPAH~riML~78I zHy-5NqQMOG*5u&n4%K=wQY5RWlA8bB}Uizc-r%2!NQ)&~v1{ymv_#T|QPzecp{Ngh9b z@_RSLYpbUR!8oD{&K;$ZK=Hvx1|NfRg_n^?d~H_Ol4-*@fHNM_o%PA_jT0?jjCJBv z)0Hb5`xk_coQllJ$^xnN&&TnGy*^na6u{2X3bfly)B`3|)C1|k|D zh=~b^P^LV8{(E{Fn%Dl%B02ipBxNjOEn#a%$i4wx!jzWrxoM?Bz&5n%8eHf6Dl|iPBGY;bo7!=G=Sc9CJ zI*8m(bZK9%Xf!Q|9EEuu--v5x{onq~lDBWMm0=SzW|)W~=_>PSQg=Jz2l`w7D$DVt zf1H4*XiINzIab(9usJ(_8sXXA4E=CJ)@dv($jjb-K#a{)_}^09FV5C(a>Cz*K{pZ- zLzY{&fd8Km7k7By+42P!P^*zff;0fyond&3uAMM>wBi7H5N3VH3Vz>W5l&mg&Od#6 z(c12?SL=S}7;c00jMu+@OkP>vbHI4v@MI+M7jwLs0#F~<02b$KWj_$k;{cWr%8u>m z$7e9Ro8P&TN*t4OQDZVd9UupjC=~QvHX-1##t?vsm}HiLa^IzL8sU8%WChk^3kMm3sE`HN8y^zzm7@-(YZJBcSQ*l3L5P!b{OMn+u_L4X>an3<8oO7vQWH1hePslM`c z+yVxz$f}Ievs2&Si)*&)UAms@v2yjs1tI63NW$9yiTQnI$S@;49sawnL)P>TJ5F<# zrfH#0;>-`0RxQ*3Pc&KP(6?2{co^N$hU7l zg29&Dv?h1DzH#$hd|ddYo=>AgoPWNzeJ4=q>&?eVd+Q8t9GdEi^d-Yv5A#So)NVYz z{6Ftua)Pl`^olKI=-!8G?q|a2lV-*(CIMV%MK@4W)4 zm{a-Ig;F^i4^hp?Ej8DPm$gw2#?m@zk#HM~PRXK%37hq03cIkm>7r1`nIQ`nuZ zpr8Q1p^Hlqs&i!IH-MTd;@iM|aea`HNHdYbfAgff`Sl#vorsy9hXUxQb6s7@*WO;A zt&O#}fl9gWr4k(MI^xuZhmTgc?EMJtCcRVo`E|OHscU&@2MY|;bag{C4tdB@pGI{A za@J}9NiF~nFE1}jN=htaB#2IYuW4=QgK$@mJ-np&d zG{(jt6y+=tV9RMx3qIskL*!AyLOX3gzi#{uW~I}M!k;Zg`wR{A;Y{-3`LL=Ax&t8p z91x?9%neo73ER}Rvv|C}HI>FK8E`RTce!(Y6!-TT-d2OK^VPT6eDY5k7&MMhllPko zpSiPY5Jh;#MfH65)>%ER!t^4E?R5~VX@?K3v(pJ z;}4=Dc_Gbp?uIn^A#Zlxy6q%>YI02+I(~c;aoB{+0@(@JY!7g(x;jYp#qkw~gB5nq zd6W7cpOL#BF2eZ7Ku`4P*IAJ`_TbaqrEYmopLPSzy3^M6$?lr&`}_BsQ1~;>CLQUW zp}Veu=HxKrmjlZQQUMCZ4d>(7?XD5?ZoiMQDboIj1==p)17lY`Qbn73N_XvRO3jqK zV*N1wUguqcLh5Nn#n0D0JlvDWDagFGluKqfcWP^0zWU&HX0~@3?!^rU2QSQ@|6V-6 zVT?uqKzzgM;>MBI+9JfnW9B&q=29gwcS5M(XG&qjcjMgha@pY14>eZenXm~5GcPVZ zFE1~S7rgOz@1DUoHH0ue4@PDatYX2z!JAui{y$AeCD$2G>fs-J5P1#o;j2C8fM_6L)CLzadZrk zcH+mp)7FMP~l;bsyKy43{cVn(?X?KkR17C1+T z&3uw-M;1oEB_JD5gvj8yHFb5Zg7re=LS>ar|K-Xok<9a1m>hawz+zlirMPS`jL<%#(fdB5nVkZV=wEj!yua24!G*pU zM9_@;K6PVP%WP8_4+ne-&=b%1iE3)H5qpYG6A_#^OEy2- z)KJ9dwYD|$e<@X@?WYwZLuFL-j(+&C6~``$O}I;TYo5H25Vy#K*IGfpR8@8moOT`} zpKE?b`mt;NH-Ih*GN}*sje!}=kMoqbNjPu)ImBnN{S2&YX=y0{X`#D_F;tFjA{V(!_w?;Uhum2y z{~uxR9gp?-=E*(`+0mHkMI4*Ym{-{&+EF*>pYM1IF54**>46-)0(W2hnyR#F{@{RufX)S zKcMw3f+2>oA`9_HrqunEyIbGY%?%n|+o3A7F?eGRMg04fn@5rSf%`yoL4#!^mJ>v0 zNEUF~FbZ3Ytf;<{b&KKV@Ol~HW zOj^_N%x9VDQ8etUZKKin!v-4xRIoJNZ-Y7LgF-+MH85xa@@jf_Z*EbM1LS36b!3XS z59D;CH--(%BFG(voC&*QJB&@h^?~)b2Qsd=KWO=m{|)ZE>OB~h2Z$Mw;>g^bI`@ZS zDHN3#TwJniH)Zv&L`^oUd@?d32@^Y2uez%6;d7Q7Tktc?z4#cp%6a+<0^Z8=OnTH)m{G>WvY)4R2pl)zVH5C z&FJ`_wVJ}8`gnPPs|yCL#O~caVAYqFe(5hBg&YeEtx{hZbUJvc|Ve5lXydgbu8l@e9zlhe*#}`TRrX@5@PAdWc%+J|orK%<=szmmEX? zq;#CX?ehm20V-eYThj1a2swN6%#pfVim?z<+fmY*ySVVDwu4uQs-?#*?M5PTV^tqV zx{m>Cg;7!ZT3X>xe`^PA`wPl2I_hE*0I`70ulG3IP0&8j)Fgx)A=^UcChiqyW=9*Q zokex&lJqH>UK<_-!M(;Yb=T)E*OLkgY3^e!N^UHi_Vt|&zo+|jHthZHE|cFntL4-N zMom&0zgJdZsrag?sc&>1a0;pg_<8R{(v5yASOsmJoyEoIUfe|)fCGWhrm-x+hxbZ# z(RIhaRjyV?mX8HNN_|)LJlGSDRi&);^m1$Nz6Z2VbcFoaNd53|xT8{SUQaY9&ChK< z%mkmag7T>;6j_XuRqA(?g(q8&nu)9gpVe8EbQxi94|#Qe$=lm|+9^!5rz=NTWV>KZ z)MzA{e<1X-va(*%j(kQRxEy|Gr|jFW+OzQT=2utWK>A`VglG)ou!H%QK-RkJnuvQs zi}5W=LA>Maipv}dZdeEcIVs6fXlWV9q4?-%zDsw~-@TlbLu<0_n&YMB95a!6%4L~Q zlIvFLZh07^A(+l*Z4-9qw6Cnb@KNe#tFt7T7Z|<`GAO@_)>aFqd)y$kJMQSw+G8IR zk4*Qy9KT`vWd7?AkaSdEcqbYfHAICVf)OrF$oIT~A3pYvBrZ7BN4foJZGU4*aomvW zq&-%-vvU?T)pS?2+FOfl8wBe488I<{Tp5ZU9Bu7$!+Wo9R~ixD&acQp@OivT{)j!D zX3&qS58rPS<`$&ryI;m!rn8wLlsI+oLkzZ3I6XpYJBZ?*TRo_}q@3-@>#lz7%q${` zG&%VT&ZJb4+F53hO~6z=2^l2h2tw$7ow?LJfpm5T{ZZm<(89vs{+>+4ocL}G5}=X8TpOjK$@;f^oD{YpkgCYEv-AQ1@5K)p{TJIPUf zSIK<1J*hJ(Wj}JQQkj;v2(9MCAttWv*5;DqGL?Hv+X?`)I{16rl6t z`W^cqfWnz3C*2y|Lz?LR2Ls9Do5ZA~V8WSl&han>ehe|Q!oO6xO^4o}V)+N)ov*k@ zRq{Qn=S%io_$l_KWIyzgC`7N4*orsJfLI`33jPP*Fl`E$PgZf_BNYypfGkF>@PRVU z$B)Z{=*4)XDy1ZvZ1%DFzba*BN|*OxoAxFDaMjFDj_u|1hI%HI<+#}wrlt*vXL-|- zq`z*ttz~ARtwo|e%PV}l$pyb4_rU|5Lpvs`GV>$rwHuql+7%QP9cn_VFb&=0&A!{~ z>~yc-KZ4a}2If;$Y=5QUSAi!5eAgHoSmb`!(Y)>pi{ z@BJZOrw?t+M>jw$*mWXnv-mrs#lC+#@K8kSG2$2_Oeq192(>-fpU6u9xdf?ez=e6w z9=5Y5>*|@J^v8U*Es@&Z!c*(N@rOwP8olGs=T3MOAJfz_f-~8ifL?kFO67Mt9 zCi0NfKE1{C<89xi%XBj{W7XRygcYxM-l6CWzLTlBHTqKZ4x2LWrEz~5N|mw)Y8Uoi z_;os5li<#I`CP>P9b&uZ_f!harQyLrk#$8MiLu$AWigsa=!a@RZiIb-RG<#=_#DCv z%5h+61TcM(O<%sEam)`^934=rp;0ricmr7qVft-uJ2G38%`@$xYhQ%X*n-s|ynZ%s z*Zy{Yrt_5T-(~YC;a_KzGZhGqqoX7C99nQ*>3U9@OE})=}!gS)<$v zef87RaC`&Ca7QEb1G+j$Pe9eh^@B5mVi}D1%SlQ1f=O^78=-a_M0kIn#&)X>}O~+)x z4Fiv#Waam7(4v$S6?bsIhAaTsxI;!uzgn50TWQI)*bXt&-Fg zOY5s=**Kg>$Z>@q;ri%8zF`f(5xF8NPH<~D%ltvn0%F6x$4e8$FiTiQc1$e*-_lsR z!Gyzo@#DP;0Z?yhKpk-aZUqGb%kA6kC!Rp&jh-<&$Lr_SRu6w??1&?uBX~ij z0}gkr70%bmCxgi9?BMW)#O}udrzBWgr&{x8>ZocS{XiZ6YeyPNn`PeZHupR1Mg~gA6sU<-` z4HBE&-AAX*$HeS4IQx&EpbRt%lN)i91010ZYjVN#`XEe$%5J)aNKl2r-jMMDo`YT) zbN!I-8(bxVXg}Y+eXF}UpI>E;#lLEke)TF8J1pGX>DR9P+%@>P=8%eRJBUFbm?K=_ z<}%bfD0ziQ!UpRCSO7#9*{kP(|I>3Sio;w7Uc|U^(c5`DEsH#4-qSq47lvw^DDx5D zpm7AMf`_IPMF%j9P+3()lu*siX28=3J+R_pjne}Vojfql(ce{lxna%E<^}1{UIH&8 z?>Y$+GlBP`=!<2>P>bJN35=VD^~NtT+;h9(;dqh5nFDp2+AOK@F%O(%=i7&(rjO|A zFslXfO0iZ29&P>+b@0alV;Pt2A*)D=U^Mjt&Hn9Ti;H{$4Xv&4pZ$pT1!{97)pt=- z_AA}Ec{5<@4VNi=x;=0a@*`&}FxR5Cz+Ao0J}b%rMPcvx69C4fc>FElV85% z7ZmJ0-nQL*E2dJYo+qt^xPRVl6}!jp`;Y?Ko*0*oqC)Sb5wX7CfouXKt5~Z8te?pC zp!^QlYwkL{OeJfc}kSSKseEGgAga{SW8+ zw*yXG*vmzjDQ8M+Ytyx2S)&iT_BdiFRZ1(%W$6W;UqovozrVNxbSvTJ2{%=EW;h6D zR*{9N?j~H0FMRVgdBdJ>4e`=(G?Hd-dEKb6>A(B~C7&N)K1_ReS0#f5P*46rVcbao zridf70pX!k09J?Rk05hb!Tr!geikk^crzf+0*d9X>1t@L2R(&3U5!D`@Da# zI1gLjg*Y1|HZgItw?>AT$=hL@@o;biB9K5gSKZM@&I>-EQx|6FF5|ugm5!o)MCO$2 zX-XYi_SIFC=1P~~iIG`AW5n7J%zpYbuK+DKgK5UtRbj%h;~&Q|Y#y)YusJ+EyYbnG z`mTr+8Ntem+6u@;DrJtzHP#OrTPefL6KtxAvdkQ~Z-Sko1DItmV@hN>=85P4f8_V|dOpJU8&(O++ z?7X~k=&z7p!&H-&oRnDiRYv^9c?ZF+e1GfUp0foTKtKPIyK#46zYIL(_TAg7U}i}r z$yy6)Gyv>Z6_bgn-fYS=hrgSdhc}PM1kezQnYRV9>O=?kk_j;r;^G8-fAA>wkp%?s zY^OQVnPO&TK}w+A5qv$svGEPJa8Rw2Q)hv(W=S(om-l5UG;PcJ+_q+le3 zpo1s~Ic{tpf`-vUK@}Cwu%smOo;*8|?L_})NA+c5)_3nYXDuw#LB58wUB@lCse}C( zl1b{C=HYOCa^GoDqbh8HIN2=<&yTfULsA)Rp14UXmQqku^uYyn31ZlOA0ZjBjJ283 zL#+yGYlR+1F;vBzuwT9uXgP`?cx;LxXDTnZ0Z#?9LQzqH(TpZPycPC9sJR>PDpOwEWOA)hH1=HpHr*=eW8{hiCSk=$@Jw{Hr&S`AS zQF>;vrl>4>c<17eODskJEu389Mng8QQV}$rNzTn01aA-#`JG!Sw~Ga)>6r2yKR%LL z3gZCiZ-50apnOo?Y4Ydn!0B^dUUO(!VxBXomA5vfc{!+#|5h!ob6o>-3p+)oH z?DA-6jqewxj86ZvXHi1B-RU@kdmh1u<>iNgfj#2l;xMZx(x(c?d#d3Xf-ZSB`-Sf3 z=VV+WGv06I)ANuIn_5`SK0AT>WuAfOe)gr$X(5ke-fh6TOVOQ3u#P#@UR&#gOpd8A zifA~qGI#$oEId&j;iQ!#X>)K$$8!f0HD;RtAWAr{fx2OH_U9gxyJ6W(<@UYr2wnaC zfP!DWJ23E^>?aMqhj*N24&QHyYmV`rO{_G0+v8@R7F%0Uu(Qv5Eb#A-B{&IudH9lY znQyzxw-q9EH%*(tL3`m>=u>M)9cDX!r?-1h<0M24l;@7e{7&Nt;F5g7jb4u=x%3I~ zR|hor>GZzipP>kYahK0~XlZ(tAu2mu=%6M6nD0sBxt_ss^qWuzy-hLOs@ z8ux-Z1z za+UwuSRVFZ@?iHrg_FlydIar_RNf5IPAt7j| z;YoqMOpN}k6M9?HULksIXLsdb4$jN=qm!4ITb+|a`Z$fG8&E39JxDT_KGSWPK>r-F zpetl6AT&Thiz#zTe0VaUd6d_IG0--=<48>@zxNQa%nH{>=Dlc#&&z}vE&PG}_^phc_Z zHuu5)oj{57&Wqbk{n{@xUUjY;y%+7l$j3W|_=Gim?vWG>lr6O zu(JO9X49__R_V~Jm7`CeqDjc9b+hl-?}2EE<;a!Mp`|U^JdbZ(lkKburNn$?rh@F8 zx`{gHhzK6qp9|&=ThxM$U1b^DWdK%-w%V)nEQBkFs)WTbs|G zon|7g59q}{dz^cN)*n#{h)HL*wu4Cj0o3%DJ;=%`g+Vn>=z5Z2E=^?p%E}6I8!KyT z932KW>CBKU^~B~2;Nc>rx3Xf1Z{pbFm1#p`*Iip6Ci;N){|9y66(BxV6y3w*?e#y) zqlm0TPbnl$uyK@EP(W;Y0sIjhCYUACkB%D5?Q4vPyrjDbOjnBC--EVfIgfhyedXe>f+Ay zDR$T*8DI3E=S$DY&R_6Sdf~N|PUybzCD)MsyqW`Qjk;vDgFVvH5$~@?HE%zA^)?y7 zIcs|4m`9pOTSteUwcNZ|_u=4JxlW2%68VRBJjIjgvly!ajT~k+MRpQ)NHvvH(?=G# z*;F$rD10vSBn|6fEALpn%8_}G4N#85{B2axiOtMZ}+h1B|&YK zyU_-xK*ie<7(kShBXA>R7BxI*tDww-czdSuxRj_U(ZUE}3Oxeo?!SVdWoNowi!ow* z#XnlUM9sfp4~;a)--%lH`>epk(c8tV)CXf>FvjS9|7QO=sxNNwB&x}+r(ZNyA-rRv zsL6x)dY^8ew5?owVGY#$>ebLt$Wviy4Au+FYNVz_(tUAd2!Iz}O*CJ8iG(0liNn5t zqT#s@kPHYQ%03B+M`*=#-jaK;wjyKyOQ*l+YLebznx6|`Ca|%l*4Ao{-(RAV-j`8* z)ml@CbGgCHYJaW;MXFR?rPA7xXTW96uu6LZ)duG;9ljf~hkZW=TkuCI4E~Z5pe3m8 zw_|W~Cm4K)bF>Y=DFX4F-uk|CgiH&9Q|0a(;+`UwRt^0Ab2H^6)_ek&p4HSl>*`3T zkku4bfkW-&l!x=VwwBVGX)1)NlD;pT^L1tS%m2u3Rq zp{{VLyhG_&=k(CNLKdQg`S!!e;xRiD%62&Q4W0(4t!(hLVv6B+&<^oqzm{#83#f!! z2c9Q);z?}#E&63%_wF>CKax&hMuVv5SugFw3`D*d3)r z^ZTy{;pT2aW??kT?(Sw%OEGd%-J05ssumn#QMHJ?f@pz=*b0CJ z^OeN*F6_r`<^fp;(GCrkb5$4oL9PNRm%wr5#Hmvi!2Q5^BA~~JXdD|K4_7#hqE|Fv{V`CQQ&e+*HK7+k z>PK~r3lO8W(CdI_CMWI?SEW7iP1vqS%xy&KwZGneTvB55{PeAtFE4yKuAv$j6aL&O zUtm+Mo+mTU0H*7}VRqkeS?!$m{Nrr3-j0SQ4WUzjkvZ}M&*$$tzqe*u0Ky*}?AqM( zjn!l9f#F_`7mrDIHA%}~W2_>JNg$1+nUZ0`4MC{&d;7PMW@R(X%g6m!6bmSsO@(B` zIy3tsSu|(De2#^)b0k@`Ot8>xmq+E}gEHTD?`Fqs*nc1(09U@#dhEyGlLy{3 zuOQw(jXpFObI68JY^Y$GHu~Zo9*;op#8rthCW?+7(FWvpVnLdoF8RU;^W#yTVTTkX zE`S#Gp(Yff;bU%YYtKyf73SxYbOqRw;Bw@PdR-gJ>%E0r6KV-GUO{37Xn_92)ug1l z&j7{!P}=;-NU-3Qn|rH$Vyx+EY|{OfMgwdWmwPEI`X|RU>_E*%yg`EolzveBz>7y! zMH$v{0)tcFLOi!0XK7@+{h(9-GP-Nv(m~1mNBEh)nT|z<^TN~FS*_1hq zg~&-8YZ|l23=WVGE+q^+pUBDoU6|_K!u=FLg zcES;$fU|~(XwE&G>yNykDn*wARULWc#e^uQQnN!*Pvxm7;5Y;O#)akOO$;vrV6R(+ zi2=+)#w$zxj)=N?yO2%c-}oE+(}j#au^G=gyW2Ipcz1R^REGezC@3l8hkCoa(Utb~ z_a{y?^{If&=miK^VEZRFf9=Y;1oApEL0wMn3i}_(#WBGK!3PrwRiJ}f>vtK z-J-gL-y{f0BJb}e9?LxS(28FE*p~E_$*b%uo6+#gh8qOB6FJ%0K!cHd2TUvO9IqfCWik5FA>v%{+XwR4MXqs;$OivpIJj>lh$jlPW68kMdxT(-O zOv(FGnVj@8n<#e`S-@Q?b)M&BO*#IAZ*+43)9#|H533R6MOOUK;Ch+oxp!|%XyWnR zP|=gH`GNse&K>65Q0ii(qa1lr>s(&Zs{{`;$flgx{MO9+x*6Xo?(u)o*8Cgwoa-n_ zdx-w5dZtC)#CW&P&h>=`Z9L{TNh#)4<740o;QhE;4cY(LJbGjVVn zLLLa$A>19SILEK71oSWCih-?&B0)VP15*nvEV2P?xVUJi+%!%?f5?iTWStTvAQ)&& zh1d;<<+RJuL^8x-J~FaPn20BTWJUon2GTb&OtUaQbf_pX=7zYfEVdo$I@Q3%X{7kX z^Fp@87{wIs674LYHkUB?(7JWPOLZ&jkx}$25g0LJooe|TX=3!afX2htDIc)=Rn-Gj z?HOttTR*BDw9@%yCs?DOTJ+H>d)J_q6;pi(tl&jc+2rRa#g2CCNabyCR6j zMkXeuv=U_LTlgYf!?muR=c)xiIlT{ z`}h&h3$dB6IV@{iK_LVzKumv3O%a93h=wrHkY<~H%i`x3X?qJW2#T=JJ{3p`*fh4u zg|5u?=${gOlObC5t<1HXKU^~`ee$Lpk6PeG!=_=Sa^EE8^<^+LQMij((mmC4q!kh4 z-)+R^^A&^yc#DAZGc_N8&i>bMGjlzYgt-szpZuOzwG~l z;HB#GDW)+V7DJ`5bHPuB&f!L#lP|amXQvnrR>N?aGjxh)PMPK?^Bicv5pgsCKR;}i z`e%?FI1bat@z8OH>zr&KfA6Py$?3nmb#YO8M#XnUcGjVF!a&^7 zM`P|Am?^w{bxJv7t5zR4_nXvver5LGpeMY);4-Zw<^l05O|qI(0Y@UNLmU)0T~F7Yyrzx0;!I414HIJv6NrS=U%th6+m z{H-(ZF2zzaF;95fJ0h>dLEOZ=J=B1>q9^m`Y)L=)oJY9@MS`n~%j16pL@+&$1 z5UW9;o}OpUKwkNXYZpocStXFTx?|9_U_JM+b8=8+}&FrqdD|+WCIA7nP@03cqcCmD#d%T_1>Xi^O zo8P|(ni(5zX+|bn!o~&oBo>cbl?(K|oHZI$Ap{5a7A7uexsICqacbY9V-}vs=mGsE zZ}#H;gO0Cn$Am=L%6XDIdf%KSGC-rn`T>FW4gZZ<2t-yw=m-Y6Ev+O@7!3p5>DM<~x}{fPT3#Lm0Rh#(h_Jf@ zHP-w|pH7@`d0jTQPIDlwzIVH6w$~iN^z35=169{|Gkvpha zQdSLHM_9%cTF=d%mk&^`x*p@~8WTbOdZ8(?hgV`wS526ciMqa?af$8o?n>VqHkW-q zZ(|Rq9M7|HWmo*_6BFN1Rl=Iww?LiPb@ZsEa35Jq2MHl|NGhD4VD_@!dr^+y=0ac$ z;gONaJM<^_+mSg{iq4FjhYCafmMu|wH2>uSJTf%VKeS_Kq2~7&+KJrtEmRr#FH^k! za1frJdT?J%keo2|-1?y3vLt#U?$M5unPZPvkGIwPtG@_nY;JBSr3t!U_fE1{OlBMH zl-l02Gr%TF#4fcSbo}<<>h*}@HR{Sfc|he5sPks%I*Z55s0VG!eQZmEhe5Q(vT`vo zT~q35e7!Sn1d~wkg5k^KHu$2Ksr4_DGoWuJT1)#_o;rJ#TUko)E9*Zw?nfzVsE zNJ(+^;tM|EGvjwz6P{B`lHpN%cQ(2lY*+F7sy3|U(geRbuP|3r8B{tuaJDp@x2 zj^*xQ8TWCEOwS#6C<<8KdgD)A1UaY}ujL*{A7fOlsXw{-ovdbIwz8B^T~q77VMOg= zC@cGxWUOEB=F73%r#zd-hiV=*zqTzXT)uUSq_(@k&Btkc+u9{Bg|8Hrc?3dMmTR}+ zGed)wvwa?p{NGF*SNaH`Cgd_DB)IKh4eAghGu-v~aWCBjD^*WFEdx2>DkH|CO6ZW36WMkCdDobCmw{2!(>17QfcC zM^iO1;du{}LqkIvvAs^UyuD|`(=)XXh!WQ-xT%iRL$lV z0(Uyw{P*~0+}EznwO>|YeS?`m#?+a|-JhOo9~S=jDYmg8dJCkS*btQ+RBhMwB54c^ za;_UH^)~oH#N48Q^$6O)l%G4sm zI3{$2KdnhL#^oYxEXp@Flc572g?WKs&OyjxE1s6Yb6c06ee8QMbfX9fE@m1*eJpSK zGR?+b!LTBs+y=zs-rcD(K6}=F?duro>c*0tp5NDUAXfJp2D^vcN*EVdIMmeH(Ue8D zGDSx?JsRbv(DigMo;E4@N;CsuY_?|Xo&HjS=J&mX5+#D8pLxgCNSP_OlawM3x8ukd z^X(?zzovT;7_dmCJXqiyqgzsw(*a$}(zPvD8;NCtl8***1SDpnKRZAhe}w&82IM^|Rvh zxm!^y<*y5^&5BfaNNL@AeBSEH?aZ(JY(MV)T#?yXSnD|$AIq6wCA~_itA;Uz(>xd5EtTW$|Kp=DPmsnJJ&tujnuRGs?ZOn>f<$%3_0&$Xa^^UV5VEi*C%q^3x>Z&>gxyjm zjL_;hzkA0R`|}2$8Q+c^2emu2y0wzNqQ0F?Lmb_mB^Ou&e@R+eZC#!n9~n#)>Z4UJ zyqu}I&Pm=ZR$x{CCvvBs0F^`XutqGYL#asZp9?LmT2*TWMuLQvw)(rn($cyaA#Nty zqdF2Z=J+qLwtOxrX`Ef9bPB3Jm#+FDYMzY-Tqpe(!p! z9O&C|z*tvFDoAdoiS~wQyf-r=bMo1Zo!`EVmn}9~Y)`Q2y3cjOziC-^|F&4&)q|n57zl*dL9rC3Sl#)b*^4k=JdD9|VT+5t2sSCA)$ryxGh*ZS zq8tKKu*D(H!o|kJ%`N%x{3Hkj#{2_VS~NuP4=xUl0y7aTIee7ho1P1~0{YBS@J<8N zVa`xb0&yv7af)FTOBzbwm-ll0=h zSqMe;b&nYLSYP;Z!`fEBE%0Ye-L!&X zKEj-yoPrgr6oQk-P}balUnG-gYgb>>LuIM=$8j-Mlid)HN>{ID&kd~66as<&nCv1? z-hE|ftCsGgmaNyV8_I-`ZqTFts|ndyJ!w0>TVGe|D5)uos1|=4@W>rAM!i?`mh-y?01V~BHe|`7q`yHjwt+I06)khr+ z@1$?t9Q9r-Nl$mcV3ju?DOQ5X3JzT|h!`$oWZj))Q2nF(D5L=u)iqA>8Z*6-#!)3N z%L7>q^>8C1QwH2R=0 z!Krz4db((@isk^t*d)ELuXR;2da#mhaz$o;uai8eS2Z_TV!? zRyMP+a4)Z8ZbkiBnh4uJ?CfrD0y~aG<%tly#GkJ7p%}fI5_v+xvYCp28>^xs&(^4e zExb|pUT$5!+Xvk(EdJWgPGNhOdM#V8m>2$92<;rLIQsxG*=;LtHtF9my)Brg*M#pz z>oH*M9>a&0zLb@g&Ub2Ts-l&M-ZvHuuHA43iQ6>dLdTJ~u8WI{<>hYaL0}ylM6^S=+l?=mSCFf*yt^WW^cZvJxVG`t3r7lR!C+U57Wp}FgB2{H{-XRoc#SZ3A6f$E)35`wpr@kuC z)-*SfZa#9&<5#0!@nk1usb3_2Rwl8X=H7!GW80-=p{xEE_~)%sBV%Zs{vIbwLo}er z!z&MiUJ%*81-u#CvNK#^C_gTerK1#hk7@>W^O2emuB2`CU+%u}&yYW55lO>Rz5cTu z_&S6~LciN~fd&$_`Srse04xw0l9ga>pZ;Gguio zl)>;7%rjK!Vf#lLTBu$L85lA}-S023`idRHroxeWLa7(1ZES<^aC#_`U-NqlUd>Egf0%@xrdg!QXk98mCppPb&=}zj2YS+QVazD zA9**(y86fnegcM0XGjR983lzH_qvszn@`EwPfw?mP2WdQh+mk}!f$Y&F0f(ifrx%4sfvKm;8Q=l{+q&)!W(9k~g-aEqA912g^5COK8QA;efkZW_(Nb zV`@e@$jSTk_0jl(SDQhfkPi>h$w2Y+fEP+ujKhFt1qU+-xir=jCF~55y{zfTGr7ZH z|F#p9Pf=dtXu~edN-RZMUVHVYfl**WRH&xFkl zTBAR_$<7^H1_o;^mw0!Qt<`2Rz4NdPHY>KwexhPwQL^DbU~O4deraudLoG;Sf5)+i zIx@y{tQsA<@iz*rI@7cL8EBa&01+DY{y07-L;bRcIfd7FOd*20^7-6_6dIwqNSSb{ z80sKr<1wp}9P_#aeZBo(T;KNeSRE1}LRY{Z-=wSEPD?Au$vKGB5zpQC`;e9R4r22I zLtt^VI$Bx?X7Hp$FN1+=uRPqRAiD-KK~ZB4Cx`vXLav1tlO405;{4ADg)SIHk7%;%YEn2Xp+EL!0YU{+}IL4D4d77KR;mp3_OCkR?l&Hzbs=?RV-6s0Up(h|YF~MX7hsSY*$$C4CbiPj z(OJ6TUW^DPK_|G-s_@-&54&-OFDP5V{|Lrt%5?!aiBC^V9KM9V)4u}HrKdb0z1c5Pto(DY6Kv2Vwga9uuFId!Kjfl1mj}Cum+r+y40nb6|$P2_b zOH8qH_6mpi6y;4dCN`=_CEkES9+pV+xZ41~kP>bjDk;^{CgoO(u&~8!KU`lYr#@vb zSOdy?ey-(pZ6vMDOBx0qsCz(q(=la`I9%7g&32HgI&J|~9!%DWthh++@SiMW_VoPQ zw9oL*l-h`c+b|BOC9{yW?0McnRtwSkA1<5ff{icB%fA7+F*Ti4+no5IBoDJ_r{SBUH;J?ByXw?dLGttm=qFaZi{aM?YL(BXS}^+Xok|tn>d}e!$E7Q zAH5W&uiUJ4r(zE97+)4+S$D8vV_vy_j|j{bwI1BAV+kosQdgE$GC>ROr^l46PJ+=6JM5h{z7{|qWQ&0{^eM3a7`in zMPbnPh%qDx&HejQ-CcygwGqBFn<#!Idv_0I`Io*V)DrUGARQ=)gK?vSW1ld3bo}?^yZlf5t@!b~4Omf@zfa+NKpFW404&HWF&?YeO1$qPqf^TbWo7ks_=mTCUZ0J_Q^&4}Lmx>Nf z)G3ikZJj<}VxCgd!tAbDE2FE`BBygW}c*fI$+5Wpt z3@^h48$pO6&no!T@QoW=*0i(IEoIws-#k7ifdZOHw@&2J0YnGXjY?A}`TT_oXktj! z5IrE#k3(1dZFe^!>}9h(E{!pWsi>J%`1rD1nux7@Sj#QW&4EZY_?7d;`GxzpZx7?t z;EY@00i(Xgbf4OW1_33dWwp+s5@#J$2WUg5-@3)l!&3taBaEW}&{hA|>!$keZr=3@ z>56VG!vT)%MKe|ngLS>15FBuDc@(0Y8=@XLRB?pC9Sd0tCPJn;ex90wp#v*?mw6!L ze$7Sz$KIwor~(6)F{=p`!a01l^73-{YOsU>PWZ}s_{oVYckdn{a&W!oP%(nNl$Dc1 z^j*i?_hsd;Xi^gOTJcRlLcwB-Pe>s0{~>WgziM=$E$Hz zDY!q5)#G>dd(w$BgF0oqI7U>O?B9?0e7>$8%6-@}OdQC93HOO z7_5Fb|DSMW9@`BbW~7C((zMHX0auo@|3_xf@sEORwpvTRO54x!6oMn4&ux7VZF^8W zV7DWy0{aB1GJchF@b9vvwI_&x2;D;-3%y$$AO1JciOJ|E>^y~G$=@tWI=|FT*$!$u z7g?+}U$MKi%)=6^iz}zwB_J~6Dsg5OT06Kez(OUiIeco+Mg}?vt<+PdtgE@85br30 zk7rN79?xiR(z#^Wso zo+;LFH_MFlPwU4k=)$losB=kIsXm_@9C)tpgJ=% zG?_@&h~dll^{SY4Q6(M-emoZQu&r$hc#il^!F6ZyU0jNw4)p=7H0%r{Q2Q z2KO;1clSaIA{fXg00BI4C6tlD$UtU*8hG4xt1)JloYXU? zPv1cv3Y;XHE{g|qdhG3qYY4#P!-FpXyYU~(=Og*SU}cQe#V`PMbqBHzmQbA3krCBh zyK+T-8{gRCQ(<4eeEsIlyho8bahzxfMrxa{KTtgtg@SVLBm4+ZvOS^?isLLo@tA+4 z@VT$czMP9(S7>mTm*$A(+4#x3DKYhweC(;&9$lD)01O?6E&4OEx5R7Avdpea*rDO6 zxNBDv#x2K>SLus#XlLBH^KEMCq?;SD<qF3`=-zyx&o zQ6i&&QWuU7*jsO#FQ@+d1?q9IJ6Ks;+kh&ZB6UroP%6_0RcXzllil4=pXz7#fTQ9_}DS9$fz?H8pF&w?mP22sKgtNwlyZ&h3rE zK~FUhoL!gv5FT}V_vFtSV>5MReCsLZd?F3a|HB1fiVZkG^RZ9la{oQv4ReXh{kjyP z$5GVrJt{;^yYX&uSJQ(Gw4xppD`KQkc^0{C!|7iqCl8_Hhs$7n7d!e#l=K|euU_>+ zj)XIN;Fj!Xoo1tdx$V7I!s)f6uyjYzM7#Wa*)2LO9A1&_!_VCP@g1;7C{%MtNfSv6 zlAFR4J#znL^SbxJmR|lD_2n;3wVK@gGh8kXx8yth@BN+YHX*+zAM}wr=x>dUEY1W! z$EpCmg2?^f(7m@*VcJk=!vs!VoUwe%Nz)0wzkz^%EkJ#q+`-YY1~rvfBn}X0k+Qd5 z*VmJVfu%Zh7HlNMC!rnjX!Yh@%cbpfM(nv+^)IDNI)PtViOd2oLuaTsKwL+;2|kJA z*&^gEIO|9l!Dd3K;#e{RpF0AwKlOmx|D^kF-hN%I$i&J}w}PBHzAwpO@pGypsSPq{ zFRv1_n@LIIv$G)aIakbv>r(Bhub= z;JpYULszc+JArX|@>+6|X zTjMfoQN7~gmgMElMBX`+ko2Dm>>++qZX3w!;C^(MQ{P{y?Gh=LSK(|Axxz5;1*3C+ ze%haZormo2f@XhAh#v@ULuTX$P2%<9uu~c}Egwjxrvdc#K4OI`A4Y!PDGdHSN4+NU zje{5gyPq)KwJmb_1#9fsIQ1AxvXVYc?k|MR=h9qnjrByUbY zE4%MSUh_aTU^`H{qM(Dbvl$Fney02V#<7l6cfnZAqO9AXvK21#Iyr28`_VQwG z)`qSj#`Ix}!dL_Is373E{TCNs{65^@*Jm}H_3x)*AaJ-83$|@gaPTuOivRp7l{f#% zY5t!z?L964J8118a%~H-FL4St5Dc6+V~cbJ85<1OLJFT>H~b750W4&$Ia)MM#xeX) z0bzrIz0Y+-*#A|XHLH`H9+W?S{I`Zqk%p3qC~d%Jg^05q-~}>bWZ3F~iw48qRmETk z0XN4d{FQwA#~*J_<$pWm8qu*E!4a1cfj~=Z;K!enjs9K@ax4HvFJ8WMSE=>yhp`JD z+VzL9PVRx1Bg#KY>Mu(x z*(@PxB%HS^EsiqNaa`V(gNo?Hn>(weN2lLC;)IO>A=kpTWF4O$=KR28aGMh_!2pE< zQJE<$CoM8WHtLM-F zsd`JvLL2^f zE^%=PIIBQO;5+bXa&pJ~`zkWTntmL2XpOx-wQRe-JGz=ox`|R?UyfEoOLHRfmMV{7 zMGwk<0i@t(x)=j{RQ=KCYA-X5Zz0op)!ZD~%XawrY2cvjEG&MQk#zAhy3v$f4@`Lo z+B1d4#i0;G;o%}e?Ut1Mfuw|D?CV#%#;6U1(0?L#amQ*^JSiceNl*~s!hsVc7GPsO zgpW1=3M>g58ylX(31sz=w=W?1!h=N~^5DR2%uEOZJ_E5GTv4Fb12qipc`T{{$c_PV z!7>6aX<&$}d4G&R?-Xthpi9KB{P;k^yz-VF^i(JH%*`hRjSe$I zK`2Sg*hRGeuKG}}r5Nl{Ddo;y-(7tFuz1wjrIC>l+}@$R()TR#A3PAoiifS&<8Hy~ z#_MLIb|ZqDc4o9D6dQM;6RoA|DJfP^Za{;}X=g679o#>GFc5<1c+~ApO&K%+ z4a3hzWJk^Q*6L8Zix5McJbs*h=bpI-86iw*ataFXA0IcuHix}IIgaB+%J~1I^0qG* z!X!Ms(?7VR!=-Y$G2s;q~uJ__{ zFdv7ypWW1>x2K1uqN76|rWDv!tLv}MP1`y-?WXZno_H&W=mPIkl$uC{^>V(})T9%l z8nv$SUBo2h72h0G#gXR28+8{=q~kn=aP|DcE0*3+@0QbxFX%jlZ{NRv|L$F^iG{hj zi(&*ky$)Ep`Ho|bF#1y#6U{2ypxQfMuo`w}=r(AJMB>$SQ!>zDr_p&Uvu><-Yoy}}zLmHX6SUL60%FE~oT_$$ z8|M#Cp{)05ijUO|X>gXg=&09s&|3%ta8rFQ+puzNX4~n#;q!lEdN1hB!_}1Rh=`CoUE-2@i@&D3}-uHP;FAV_vX^3D41NJ5*sU<7vtxD^Qz@hO6hqS zwW%X$PVT$Bj>bfWn_qanPhep2DPoQmnf&MgQXho;%2dUi(G#pZS)O7{Qjb?+LgRHS zQjKXaZv~zu0B?A3;bYw-Xrp1AQi5m?;q*Y)91JWiJiNQmUVmwY1HZ)(2qnVC0wJOG zf0xd$K!=(wxC-XRzEKh3n6bquG?(eg??Jm9NKBMO7WK`-{8{RaQ#U5B@V#hwakI>w zam|tHzORN1(|Y+H14a2XKvEv|+ECz%!QZgn#aTwF99MWo_}K-O-_f9<|7GMouu1}$ z2Znank(-L0?P2Z*JPb~6<_jLc{HE+NPX2#k7jAluj|22Gbk+l=I;x`Lm-EQD4B0CC z!o*?+E$9ZPl9YY1;Sj;_R;6aGNa<+4W$Qmaj882b927K%i2Gss^2RI9AM3kT>@~c< zWvo^AgvhH>iE|VB2S$5WE=+*JhaspXDwP7n{hLf;a=GDh>Czp3f!+_oJo49%Y$S$& zRP%YO!&dpM`MQULo9U`h3%UIIYJ1^A7knKSjLemv=4|sQBN*TqVHJ>VnxwoMm<96D zNR9q@1B!BXC}XHOg4%=OHC&mqyH!56BbL=B)8cU1*^a293DUL55IpM}c@ zGYb*CRX+PIA`L;6q-z-WE2|wn(FV;r zh3F76d-g1r9{4-6PrD^_xmWjktx-b6T#kM9XsoeB-f!cpG44l(h9K4@z@J3rEOptR zTi^yA9-aeilX-ebVL(S_1V|9577CDy&d#eFkUn9i zaOuQvVh}F4QHbgomqA1-IM#jnG7C};4yl*_GZ2435ydISolVEc_U=bdPYyic!105v z0A`ixqlN%489gaHBce-(hG6%^ZqL2yJ!&N|G8Qw;dxWfzFmU?xv=g-w3Rj}PqtzQY zK{=Y_)wt2zu3n{0LqQ6<0=*uFZ4$5z2&w_W!Sw6br)xcYixv?2KKrdd@?a((c5ZTr z1A8DDlP#v9jhw6%*HBl7mIUlsXBc1MmUOkZhi(bq8D30C22OVLA@c)X5AanNWi%RK zEttL+<50Gqkr94HU*t_5ZdB!G5J?Q*g%-wv1E*JRK(s}h#aEe`k8c^h45E2jVBaaX zt6a$ATXd$Z$`%F5GjG=QdbK`Q6=lPWcnA?*whG=#kbvy!t2}{CX6hbLFsE<@NCUbt zyYKEsPV@OQ2kf}6+Sz41s}yDfegGai+_Va}9Cg&ndD!S8T189gj?tYxa#AyQ*aq(T zum|y>qC5v3-t0gD0u%&mw%{`Qz5nhH5nW+J!(egaHHj{R>!^ABW?7=f0CforVNGo- zy%I&&_|TB|;^my&e+QoVxUWA(Fnk&kud!|CrzX#&KBlW1(0^qKmT@d0i$u~bGp)(8 zw6^|>i9IN}X+}Z@{s_K3aq{HY+a0vP#fS+o_=eIDB%yc(L!KV-@=&gOF`N|^6OP)TO(ms z^uI8gAl)M;d05<%pT7=&;)gS7W5q0g`t<3DqmOh%5){29$4cQ`2@VFP7l($1Vqsid-h()}xY!OX zhJ}YG>~%bYM%=4c4&dS`E{q=ne?;-P5A`rjtE%@L3k8WAa2Af;t0uZp`Q+r4yQMA; zoq3(U3Fiy_5+ZMUc9#0+MNZ$ZxWk3BUgrFh&6vwIpF9sVPhX0qvqzX)p(s#JU7ebg{oJRc9=hP=B5F@`fm}Qb{XVC!lZ?!;=QV8p)sT9Vqd(Sj@!4(mgasUp4rK(P`_W3FTS6-y?qIKi$v$Cm@UNq)$G8T zT(`vMe*vTva&S|>%Fvrt@hvFE_5qki%PWjekr#k2?qEdJ?6SEG)ku+cMk>CNCVhtX zL^glKQP8&WQ@G`y1C4J(c~esI?f-lKxQEA08Qn7MM%cIjutTgJ{r)`_e#@hY#Mi_Z z6+l2P#Qy+W8@7PY%#vW}0$|JEzuNvdCdU!xEw~5J6vK!@ya-U7%uLK?rb4f2)7XMn zvACF8GJ&KW4cqyai%)IrVxuEVmrZ7VmHY)pSO5$k$WZ11$=$o1P{2U&`9T~?;?Jlj z@pVvehTs(aW~6+e099t9D8r2aEWZ7*-_+!vtjlk%WNw?z8uIAry78;z53;Nx-1PvU zM65Bn48xpevC?UMH!ql6d~*G(v2nRyRt%y$H#__Dv(=30cN2C}pUuXQ;LnlTSbms8 z<;qH$v^}--`Xe?tZdld$)ENmddU(b&Z@{F(pidf&n6qC*UKILz9@d#-h}(CzZjFxP zKBymnGf4jy{P(lg35=uFfszfanO0Pi(rrSes+yWiH1#40HT<_<7n;iJuKgmbMK%3nTVtADnR`lUm0yF^9N_j+*W&RO_^rl_%p7OL5y zE5B06!6NP1v+G~iDPcmn>MZQdF!V+9v6#AgORQcY*M|?;r%NEfdvk{I*H+5Q&E1lY zvOLCr8|d1Kb@cSj1`nr2aOu$WHA%I!y-iJ7DIq0wQW)9*%&N?cN|)O6v8LvjhQ^|j zISX}z@`fr1F2a6qg{Wa3zxYP}QQYi|KBeY=H>+Cqws*RYtxVI;j#n*8fm;3zIn!Iq zm)0>CcNjcZ_@oV}8?)&gZ8u;R0kwM&4^L-im(i_X-)PbNC>fs$ZJcen6N(ACaf7@( zTcmz`U7fDe4gfJygqxfD)TvW9{rVaY$%tS5B^s~j(2|BbWA3Q=Kiz#JuFhX@C^!Vv zmI%f0{=~VD0UI)1TL8ZbDtRF>z(V&avOmkX4{-F3%38Xame0=-eG;=fs?`U9F0q_~S z<#KorBb##^ZV~3-Ogg`IukPUsj89khe`dC@hLILAR+n$M)Pw2Yu8;s8Qb#T;J^DiW z`s_+DTOd#}qzJfXQNjX%dbUy+l2N^4 z$qWvE#L)~iZZdRtZYp$#u7db!^shM5HysApoFJKu3jK4i=tG|>6BKjeXG}%@1mWQY z%+LwcV$=md8pOA6uWW40MU*74Iox$}y8qB-j{34`51^tT8WPB?SJMNt!fjoMsuqaD zhdBAaS*aG(eZ=fJDIHe<{ka=L{Oj`2kPu*P;5!LLsset7hT#q%7K(8%(PYNg@p{C; zyWdR$ZDVZUI!F_Rsxc{y@Mb~lN=|Mzzg{5|Umr!S087mgI?7!FB9BCA>T)dFTXlVxM*Y4UgM|A@X*jW{Jopo zUYbpt?oiCMuSj87GoF}J(ZUV?zqXnpk$Cws*D+q2wU}LIZJm2Ra~Bh3eM{LxQ+3h6 z26`G%V*zdlue{5-S0h_|(6Io@4)4P4cWQ9k0`OH-VZc*{@q&3{xM_in7H~4ELESRy zHF!rgwYB{Lbc-T>o}<^3_Rm;%HyFuS)(zOW6+SDFnWKonK!uWL{nXD67nn=1=8aZ(`XK*B zDkB|&O71!R+~Uy`uEly?Ayyn}KjW~r3w9aYdz1}-qMHvcE8F6l_iML3N!Wpy=W#G- z7lP};4SC^jo4{y^2uNT$xU}|=DBD!@x|@pIDeSOy|H+O&H!_uO3|;zIVB7p1Cy^j@ zLOGtfrU|LBi2>ioI*rr3GsmF{$Klflpn&A2tThl(l7=pOkGN=g)V+JDf1UtXfuBnn zYZ#YI2GCrG*135|sPS;P>=}v4f2dYRMkX@4+S`uLj@Y6ug6dyz6A6SM8{?hzfPgdl`QLKO9a>idkzbjcVaDBHn#Thk04$&*zV0972lmU=naL#iKV9 z{9b12*bKg+f555fDoG83X+qL-tGe3STmWU_!G(o}ZUO3vdCI`s5K-3kfT4hf1>}3I zVXbeqC;Ik&JdG{EiV3j>3+WO&IMo#G7v@LK5U@EXuu@u+AO#g=BJtHQ%Y8u&zeGY`*5Z|@2-VE*`bdhW#%CE{o|rx?3cTrfAE zNa|aV_Mob?-8}j@nwnrI7n`BXul)J+xuummPC(qN9xZ*WLz0?pMe4L0&LGxJV=x+E zC%%$xApC(mDiaA4V5tki39ENq-Lkekp|HL?sD1mlY}Dj=7W@c{5y$08D?VsllEdfjiyX_Fctj3L}96 z*tfv^8wc^ClM~&B4U3RBqYi-{3&$5^G;C{ZC+U~j?#GgV8Uqp?9nBh5B0~S*@<^w( z(n;%%wc|VQa8}R+F>nSkRPEDh?5k+g(%Sh^;a7F(k*p5J*ur0})rSki+KbbBx`gx` zZv^;|UNF60w~gs`?B7Ek!Wy*B6~}y*$4-|1aMtem-WPkRn?^GwC#qt~ZNI5$H&PH= zldH1I=H}+`n*>lNU>b-;pOfwOxP(>#HX?|kFf`xEN(Tc=oP2Hx9U96{+699HYrU{% z$OUNE03q4)W03<5D+sVh;hK8^JgNKlZO)!;Mp6K3N2H4%5A_$74x2e|DuHbR?my%Y zgtq`L;;G-iEie5!15O0e7-AC(&$X$A-7E zi*Krri;8Qv{E(hr6a9p zjoR6_T3hrHaxj-Mt?f~bI69EQZOo!qx0@* zJ82}{n32b{?p}ezAu_mCw`vDKqeZdT0dNERq;_c&`fnZGR%1zOT~=dcewM}tW@%?X z?eqoM#>QR~hDP0os$n_B%)#*)&BQDdcs27g-Pg37Uh3L& zWqwmohzb~raqjqrF-uej2)iM*x1_tuta~B?4r_g~PAKnNt}T1;Lu^w-Nnv4^ zi862fVgB1S7O?g;(nTWqyrAHb;-BVt(_@BxTC*{-gB`2iNOXHlY51O|E|HRM#jw~k zU~*(4$YQ!6_yFM@6=FXCb$BQvPkX+IKH+DEjX~>IT}PcpI7T6`Da78oym#BNlfQX<>^?i|ijhws!_I2FbE2BJGA#}kHAe3YM0G1^$H98G_8STu#eno7bP zEo5bKhl6(X9|^eG9@BK-!(HhwW!0ZP{YA_D>XG1ZqTXplgB*xgDxpyZs~o*^K0Z6! z7sgNFQKy;${mf=rH31S!AmV6%5PaCvb-4ENJTwE3Nsvs{GL8c66rT72FQ)P8<`#7fvZ5IPor>* zi`yx3nX*DY8NRI{Tt9B=u0Yf$>fNxCT=7SFoV#N7r0eD%%?eX>&z&ANmCSC!O?o7h zOZU$@kV8?rne8y5zJSQe=Wlu!Tb`bt2SWqRT=|Gt57Mk{O$MYrH+_9=-v}4>2I2UM+U(51KSlPp*OCcyLVZ|qJ9nxv;w6fK4}r3*V7f15dVB2c+j(e6VX74BD@aibjp`&qjIvDN+JLqX;=TQ?w4Tlf_wO@s z?uHl2wyXAN9YP=Q8~e%69M=>eINkPxD~WdE9yH}ZU15t_=ldEgmR-g#8ZZE7|Dk`* zWGut23dqJ_TB+lz=Hh3L9^y+3xj=hY4Be&0ibP{fb9YCjhw8ja0QV_wO?(%zQI6}l z$Pg3Lbwnm71()wxSRJ=b52F9!I`Y{OwQz2x$$d zc~ym!wuG3;O9PQeeA0UZpL_c8`3Ng4W5&n9{-<{CSj*_YX=>7$+|-PG8RT{RGd)>< z!mV-tmAoMH!5F!g-@)ZORl*nMte7GZ z9f0MiHZmKZ&=G0HM>t>i;rh=jgKf{89Wzc<&Vk?!F0_%ZL-#$`v@CDqBRfb}7w4R3 zRp=?NsyUPsU7>ho@@4?Kl6o%rNNhK0!B>-8@}L|FWZV}|+ejCLx!voI<+ zHR-X~|GhTgT~kxu0%hQt-jjiKe!)8-r+vmJY=E7!`Ax&9>>HO#0no{m@nSSIVdJ^RP(Cg_6z$~Oso zIdRF!*W@wM{9)%2laqS6AK zt;%O{>wl)9>0-}>90s_TJm~9gJDt#q21}o02hNw1Q=XN-6r=wo1P~O-dsb%Jv~Ih$ zBN+fwIEQ2(5E7#S4C5k%lK%ej8-{}+?)L;~2LZmezTOq?$Z&E-iyAr;h28wni>@x$ zAA&p)?>?rhLc9JY8~-@&lmC&AZoTHO>R&(Up%E<&Y6@|!Eie65M6b1`D==*)(ZSn$ z8Hq858w3p?%whfmx;Hn`)$#X7+wy(@I!tK&w}H8RO-a+G8=Wn1A#O)XP&0QCzGqM` zW#CGOMh=Y-Hcn35=KT=BgYpcHultoNu%jY|Gko?g)9#;eL#hJGP*l=N>hGs%Bk)wkq9(s!B?>%-u zf)feLd>fQBPz3;oRRF6Nk0GjY36~7ww~iD;A!FjRYZm4QIXN%yo<#nPEG9f81STY6 z?kaWn=#0fa?Nwy@vw2MNRsRG7ci=?N#l16h!g>q7*XkeL|1S0FaT0w(;2JirT?RIn z&69-J({HG#Hk;=t+L(`mp5Rq#-!zH_nBd*%S84}i=4ixUM4W=-cZV91DiP;!ojaF+ zu?4`{YT#SLOW&5<=I+gQGK&tI7MD(;WrSXj-|3B%HBH=C!B=Xat)uhiPGip zL1tih|5qn$|6Q<^_6Q0-jFG*dU;QPdz9cGsqP*iTxE9@b8=Op-3EMe;0Z^l-$F}nxJU{akpd7MRL5=9UlH&fAxzC=#CIlgHz)LHVn3Ad~#81K)Ghn7TCpiTL z794MTjx=(uzouknZhnxAbO~f;qJF}018R1zO8bKiJJ%r;PXFWl0Ij5)9~QTJ5)2QW ze-tI>?mztn<45wr-?NSw_>!$`oRuZ*yW(S5rJ}8b^uo7@n~w-z5nwY$rEmgKCAWVm zSrer;DzDww53V+5F7HnL?o>p6q?Z{wyizi;Z-*gfZ!2ut;AcL*SzCQ-*P)rG!Uv~r z?oilpdPZIPKS#&FhDKq{r-l_ug*h;+Nv&;_tlHe2R^XLnmkE++R zIzRYcpR~aq1R6|M2$iy;Zno$OvFQpw)OE{X7D?5gUV4;T8g)LjZ*qY&;Jy9$58)sc zMov3ub2}U{SlUBj8LE{!MUPvIe4=8mPEPHx;kr;xIqXWmui^VMad7NK!vS@<&*^!8 znaT6mrg!U065@+~e_VVlSA>fM90akfF%$=|eRf3m7~l$E6Uc7~s`tp3lb7C#Qof~< z3|`P6@A6O+&JrPRJNuzCRMIuLw&e0q9>P$b>>c*tDqN1}Ku8n6F^X??av-i zHw(uEQ81%nY=LVZG!?=t5a1hlE4zz{8UokiYE^;X+)6Re8ywc!Oz92FCMu=JB6C8H zYe>&a-6N}c{LBsFosjc_Hw@Li#kNWbk;tI>AD8yV{*xCEq=`hU3pN*jqdQ}7U)y`J z#jcP{GB+=FiinFlfI183kFch;!$aQ7EcW>TU01I25t@plMf)69@(6#@RNXo#@ zbi*m{md@g1-5+wE6`)p34GrD!AAsgoT&<)NQshf#OmPQjr%MxM6o5Q)4LgU$k!m|!!a0%MDGOiI@@uqiN0YV`F@<$q{ogSI;ZH%-O-nR&(q zl&jxA#$_I}1SX7jqgI|CKZ_(oJ`y5GrE_>VIOr-RKy`p&x}9;YLb7*uNcvgRx{uQz?N<0Kf=9 z$r~3|LaGH{MmKl&%+_I`)OnoT9PdGm46reWs9gD)UTf6vpU+$}h_UDZ-0XBqJ=fJ~ zKl0X$*vyPU@%LPFs4u>z(vH>}6gd){Swnz8*q*7pE`V8An9cPHB$SrTHv>Zks ziyf~zuX@#8(|FE)@kZIzV`Nb62G|6sTP!K}e^0OPl*Mky;IF%9D+CCXW;S&GeZ9mo!#P|8lcPd(S4tL zVdJ8!&?wzQ#qh?b5?pd2e}3Ul@ps>+es{P2Q)-5w8aSk^95DF2+Y2bTf$e#eelHxl z6-dMFg*)9NuJxjveFN3*-y8ml#(`oYj^c}2R&7yhE2o0*f|rH6vw0MGQYC~C7%1J` zyo2>woZiw=^NyioPMFt0+P)nG8iSYXsB27D-FP4|vk&b8jNU0IkXA@g1aA^a_&zx~ zTi!@(4nOnz&|HRM+$<>qAR{z-*$9bGs`MH`A}|)SHYb5U!RY@`FH?YJJ$-XzFem`1 zn|}QG@#$HC&Dij{^Ji|~df<%fBgh=sQ~@r_YdxcDelt`KmYpIr9r*WPjUvbr6P}^` z7qEvsjRK)WM~xojXF&nar1ifY?eYWdiS3){e1Sxe`J-wrcRxs~l%zv91Ai; z-dF|CeL_M3brm99EHinSAwrZx=#7dkT`$PbkC8Bcjb8Q(<6mGBA*_31E;6bBw&0bt zv>-n;XXpz1S1hA}dj<6@$kk9Zo0~V{I)>W8jxA)E;eSiZ@2M6y=0iA)pM06Za3b7U!;-W?&w)pAmlcs~T%VN;S4;BvJ@##U0Hp&A4We24aw5(}^^paL^?_u}K z7O6?|%HGX#w5L#K*f*85_1dTZEF*G*$gmYIv;U7-eLG8V6=x?q;lfSGzJ-{o@*?jRsYp_N-XN+6kAIQx7=P$mpzA1*Bo#@ z1NSiGiO@}0{*0yun)m)IepTq7c(HPM1vnOs;6=;YXJ#In57y97)9cjmo*K&bURU=4 zVGtcoRXtpY02eSvx$&Lkbi%7;4$1F=bq{RCm>@Ysn<~avO+xvFWHP<1it6qR?(HhH zu2_K--&Ers#+PiO(#1IqcP(`2H~gwNlu`*?6VCy>k+m2R6c6n)ace?LLK4I7X_xGONjtgm{&h*9#tl@eqI{; z2N8%|o4I}g^~xYKbLo|-w++{Pu3{&Yg>el9yi2_+wXW(6>GjzJ z$~tNe>5K@vpdAdUbfXA##o`ZxRa2Vy&07C#wiKeoN=Hr>hf}#1&X2^XlbjqK8%2;D zk;xAp%&o0^w-qs+>~y60%KF<#Mi@k+I)k4iZ2=@rZeEt?Qiz@vCle&wLWG<1c=2y&R!GoD{u|_M`jRqy{mlxSs7-LJ=c&B zMx&QBp@`{iWU_$TboHAbu-=f~yZ43i*_HjDP^Us%W8yw;Hk0tU&8w}so-1w{N66C9E0H2>K(-QBqSuJ@r>b7Li}lWE zrL5U7G$rhJ}Xa6Tnvf* zAG<6p;)d$uGh-F@oboQBG0l<0xC@|(1qB5#;9I+PEpR%F27g%_+<<#sd{n6f*1h&7 zO@2ZB!^2m>3j@9iL!)I2r=ZjZ$O8ycYIQg}S}N|db8`f%XJKZ-F?XZTpM&5b#}V`0 zFp=w{${_V*4Ft8W@&d+q?zsL4ET*A8WMZC}^+19Ej2dweFzYsH=@w|~M#`Z5I3u7U z6lox80-G#A0shWCQjMm>ta`j1N+G4!ir$#m9=h ze^Oh!E3*`Y(drY?5Q>9Y#2YS)xPjq&*OcIngSpdgDCO`X$WjBV`y(4q->JwpF4A^= zeyOhCyn-=Fa|Rulm-_5tQ5`s&IP7=-So9<8Al0$7vO;NcAW74}>lGCiysN0U$qkwN zUprGVhgvjbqs{MQNms32ZL5lwzTk;?%)8G*^E@>rC34Z4R)cn|ukX72bKM$PR>Nkv z7>;1}OSV>4ZRoxNJMS4+rYxR>(efmZ)8=s^-)JhnVsB?RJTjtA2E=7K4TcR%AZ*~* zd~uIV(#zZdHX&gv9L{&=$Z5Mz*A`B_my1PLyO3aiEzbKfGEsMKL*HI(qt_xV-ajncv5!W{H5&2&X3sa{%KvZX7Bi)4nA} zl$I|q$+tc^OkeiU&&xq=%Cca2}^lP*%=Vg2#!

)74AY0h09q%+4>?ihln74xPoi zQ{5g4@aI*%^nF)&HMfF?8(ww#Pdgw#J-vH+Q0ALqpV#)2mVCe4di$AQP_~g6cYFTrMDYavfU12rJ_O2K*nvLYTmL*KX9k%}Ok^Yw z`xNYAPeiH#?rkr#0ng)d$J$w1SfEM1dey2B4$0(#^YmR}u_>L~w9*6C(NnW}jvZ$| z=puPhC4&-zGHSDA8QNfI{zC$N2|*3{YGgzO;6?oWyoX-JY*UIF$b47b7pI#~(D^E7 zBIyx9!~E=PuOp~_C=ioTWUo=R_5|?)<}@U4N9P9&qg8*#3%u=x5Lt>)uTuXcG@?Y=**3EBe?*xCUnV7jTTvfH=c2q09BF{LZzQPW{A`MV!Z;eSRO8(R;bG^kc> zdFhh%k@x8}d3JW}Qz2*Ar6U z5mQD=$g3zvSqy}%UY>AQNq~7$b-fM5_Li2BF);*@7<|^KC&iBaYnX>Tl}#!?d>Db- zOnpra1aUUlH#Hf!YbXVL^^tXaf|RP?Js{zP;~$eJH!^Dz{kqD(2E#m};p`jp!*D8K=rHv?)F^9@q$ClQYi>fY zpq`}3uw`Gt%*7YzD}g~y++PMcN~rZ7JV4s&XU+kdDNI+-T{TzIPQM#)PF-WVEl5Kn zFfF7r5;DnR&v_Tp``TLQyjwf3D2R)XLF|lPdQHvx`6dcl=p$(+Bh@&v_;&X-r5qrc z!0E8gWo{Fs9XqN)S;O6pgiU;&$&J)E;fD7|YoV8zK?<}mCgvvw1_2(!{fpW*f0N>% zZoZWUNwqF=I7_3ZPJje3(R)iZW)5cZ!;ue!O+3lgkqq?uVJTz%F?MKVWFhw;wMXG` z)6UU8qPidR6Y&$(IRa+$)!I{p#^o@5sjit2xL8OsIu^Hjr6puv82z<&g4}{u$=@x6 ze>Og}6^GxOGftOwq-f6#HUfcz9vws{=-X?Z)=?iq&kdd6RlFJT_yySdnTCa%UK|?! zoAB)W(y}9so!{&)r|BwdTihRjF`{Y*5D)<-_Ds!U9vu2-mLWuqiV8ELNKRuudY=oa zE|X_=_nhUCKYa7(tb5>Nx;6fr8Gh2@!xm-aB}2d6zfwI_N&Gcr-F{r{)BZ zMp$Wd-@dW1ymEVr;`A%cS>B6OjA-0ML}FX7XF*W#hKw9Isz@+Q>8=!SR2<8-53y}J z+N_fKwXQBSV6bsJAUuzQMRFL;Hoz%Fji>V!TMnn$Aub&w|E`jmZo3+2&(w$*S@<3) z2`Vvl;S)n6oCi8zKidT5UFUmQD(}(9)%w0`kFH270t7l=@JYfCl1os1fzNcq4ihrt z@mV)1K80ATex554OR{?>BM+0&1WP?7gLz@-Xr$|l3fPJ{Zxk%^2@q0KKDB)0@rkw! zU3)C;W)4}($5VR1a7zSfwFQz8=z>KJXHdN0%n=4b$g9EcJ&W7t%SMKbH5fxqD!x1C zajsdVxG*aIzz!0wh0Y9|Iilr>-+y12Y4fxG+pvnS@AUNg^j0F=!Y=`1kq@ukNO?C+ zu4kKO@W+@zsWH`!ECJL>%$!Eo1Gy_eQAA7p7e&)PkC}_8sSLt4WcZ<<7+lr2ef=@K z=f)@fI(fso@#m_3$v^u!(JX*k6~0e0c=9S0t`ZzDb)Ove*ECXd;6D$ahp zgI*1uASO2>Rj%m>3^2cs1$$ytnDMQs8jBGNPyp4z&V>xh8YQ-$1uk9gRSN%EI$yNp z?VD3W^|CPB0~rhk8zGlTfDsYg%h7et<>b}j#D#O~C_g^xB{6bc5tF3=tLkRDx)qMN z)j>8`{=|$t7sbznBB$R20Fa2Ffz=P6CER|#QIg?++pVQfzad0~%gjwbKfTYbln+6} zlp9--aKQau0DNeYVlZl>v-mdzC;%HS!_$N9(2Wc$tZzs{>#EUn|~hZ zTRYl%i4P^R?$vK;Ht*W8;|IR4PDccnY~#t!;tgLuV-S2+T3W#G2I^{anadEc3Mm;i zSY)Sqb468^Z|HrRB@N8bt(4GdzZYaICbt*FTs%LQeo7tYGwj>CY6-%n?a6qwyPalm zAi)TU3&7&OF|nXY$!|z?Ve6KFt&&&UtPiY7|d>p zc>LHJ8iI=#CBm!E{v}cO(!STF7t&0?03qgoy)7K^UY-ozr|0lI?xfU*Xzu4fP6u!y zn*_}m%HXat+G~m4U>nT;wU#uY{Jzg~5MQ4%AmHGFW2C2t!8mGZKgP8gsi2I4C2mN! z+F!Yt-Fl1+=s0y=c1!CUad~-nRGWMfdB$ShFf&1d4xevO`>+?ML2>J|aHG7o(2+Gq zd!W#R3Xzx~+jko`5N6oppS#2R3V9H4PZWseiV|s)(6hk)s`%3-S4d;Bwsd|D4*Y?wWWCR4^dtpYh@Z4cVhllT^E-$by z&T)G($3A#4o3Om_ct-7?w2vsLOyT>BEkovKr311e3Y-4#4IP@{3+jNya9~;+Z$u3r`kgT_?gC^Un3(ta23RKC1K1*Vxp9Z1cm?49}^rC zpnL%~=*gjL*?KK1ik%9wnpGMT{iB%gV-vC4CS;j5q1C z!Wkz&lH=n|+E4DhZ|rpkL|89Y*?dUBMd_7pGC-mJ$6vq3r zXrUhvL@#sVlYLnG8X;5U65pq0&mNDBbCnbk*bxb#pDaG{ z?Br@Azv;i&Uc5JsXWY_V{|8RM@C8j%9UVAk9TgIFRsaPJ3WLqAhbdoOQv*-FpQ&+sF%;BHH4o0H<#y>Few7IeTNczgE0FBq%6a+V(b*&Z5G?;9xbR zbs&pYOQ`$gZ$FVw->E2*Zs{PvO8D?S&j42QNV6fKdin$Fr90a3gm!4}-n;XM}HgFO!_K2#*kzkz|ir~~o{7*>^x6HII`V!Ak7v!T6Z=i~E6RRvxo zAmjT1>a#DKitn$VS-pBSL#0h53;`b0|7c#`(>!!%IqikJKbz+fGEvctjZ?m9;=aCo zIU?aeti`EXgq&`@uX5czGvpktU$4P16`}CJrSIzI=G*)$54Nv7=XCipfAXqh&$^5P zU-oE}eN8#OCG6wk73>U*LjdxAsFeTip9NK~(17~KM<%8v!U70B|K!+7>OtZQXO}Bjmr8x!tE6uI< z^*^3fLSD9-_8?~+mLBTdNqr-u51G?ej539G-esPqbrL9Ui@Hu=E@Cbg3w)9y7=nIJ zaV1Sc+X$!xp;%^FR73=$xgh!b`0?YJ+?oFDpEo9E9m;>gjI!0PaK&~>-u=AcwF7;p zRBp^KuO-o~qV*8u!XE{U+p<t)kcvj5;<&I-hw?u%=7@o zi?6gqHr+B?*doBCD>%k&c>js5FI>msLD}cr4$lB-F|p(5y1Kam0=4r>fs5NBq$}{w zPm|jaU)d{d-*ygkF~4FdP-LX9mDXBoYuGeNMXFi?n5FfvZ1mV_SIAX<#U9|TSX3}y zMin61tJS`#(G~wo!Na<&v@%T;#6*3_21DZV-~4wOy6~?8AbKTzZH2@K+`^!_*AEcK z+e^FI4N{kng3M~a85cLS54WC2q!FD zgAAs*84$LR&s(k2B z?(%*PI{!*esK0J)I|TP4q}+l`>rj#a;+rymIQltvI{-h0G37g{3lEc>X?7W@-gyft zqX)#}#oYbWly~q9`g84v70pDMz+JSTxS5OBx%3^m4P<#?|3LaKz}Roh6s;h&JUgZJs~ z=C*Ftb>wHrddE7RpGen%4KBa|m{A7|e5UF0)lLWfx5udn)mQW`1JxuI3Q0fO_5vh5h3b=Hf$WoEa1ksy}+pW z6c!*d&*S7(`T}kcKtpthh3f>RVQhkwB2MzGUuNbKYh3LB%R@oGZKWG)r0nLF&)ZU$ zX=!t2cmpw&o&2q*=OXI7+h5!<25oqYoYR|EudW3=bLjqv?AVfi<3^ZW?{gbZ%uvFd z&8$>L8T!dbppMytS&V-4Z1^DhDGd$ynt zWSpcadZGJ!j9y*cY6sK(%h%F>So9~WR*#FL(T1!4RAq~W za{h_S_K)XfPc42Lg+scZc>&{1;F61QzMK0qIzKXbxY#K$Hul|{yYY{;4z5V3U-oKD z{un2p@61fyXG1_x7547E9{}f^RPA(Cdr)bRss;*4NZcxYN23bwtV|qGzdtUtPvghf z)siY|pQ*sq1D>;4zsJY-0_;Em-`V+9XjkZMmR++8u*HA(?j7`$n-|P$nE&2 zpB|}sZT-t%=Uo7v0*w_fUvNSz5^Viw1<$MK{iB`%MFnjyAlz8x$XmCR$nk~{LdGOaD{(#5=n+rF{`Lwhm&{jk(fTSKH^MX%B!%8hC zhJE=Gpd5e$Z`d{K#cRqp8=eCCbb0^&rLInVabipX3T@HT+bBz-vnICb0{6Yj2FtH` z3$k#C*|csIlF^;jZVYOWDGWHV3FZ|R4qV)8S7oKQzSg8*4e?1_K~Rf6eOQ zvNh%3hC}(Dk~=W};>wgt=KJPmLSKOQLK}21$~h49&CQcA&Hzn8xNsp!X5G)5WuUQv zYpK)U+@`jEx+5OTVA~}i>`ICNH}WR2$#G8|R@WN(f7c+;(udx2GPKEH`CsG}%tVPZ zUrFO;`b0r_`4<+OVGC_VH}OD$Ak`^&AFh%(8$I)2+>LzK^pUaq)Su+EpJ)&vIEC7^ z*V%O@XXS5Cs4ZHYtIPQL{Nn*H9u^kBs5ekhv2VmfBdm+)>FIry4{ldk-0r@E#OSXQ znq;p9Z3?h06o1yUs+0p(VWVqCTaI!yh?~bp10-A2xt-W=Wpb`pj*MEl`SN4cDUTn2$~2eC4)s2w&n~q>QQ=VD zP+MnrA4@=hLhJLB+oROZE2i9i3=n`!OD%+}L?o^yNSG^EZD6d(g9sa}%LQB=!0n4- z=VAk(opFcj2{-pVQcRhI!kr(xWcHG;Imfzmg-;c3N|-;vhdB!xMdc4~ls~NWwXQcE zsCk#_w^U;n5GaJhaYmflr4-qG*5jrLfF^ zk+Pq+{>zu5d%i^prSa@(jEbuK$pWJttkz>y9_OmECoTAE__?_cNLgAiLGgTk)|86x z>KHwVU%4CxXbnI%z%ac-YiSexHHABO zv9(3PFf~4&*3d(F_K8k_o%AzXA(@1w@L1CeAOV}VY^x|V0{ zZVN}1zrQ1AU}U3PWOQn_JqX_=(ZNbnbBEn#CDKk+wUhwGA;;@*Jpsb=xtPinNaI0i zh;BeeO&5g!z_khEKagKGH3l$m2Xe2Sj`qTL6bEoc#P~Uc4@9}1N=!@B+W`kl+e7LK}zV#wnLfBpJ+ ze1RI#4N;v@5HTQ5-pED>eK_P%gn>8GY{lh;OQbU#R_L#xX2b`v0`Y|M{vbfwLH7f5 zu;GHSh9AG4o{!z)yZ={~{?HMY+jP2+CY3;mY4S`)`Q`9t*DDcGpT@==i?rp=H$6h! zfjbIjDDObq`PHjl5H-%@gVjs`N{xf%_(PQqxdtERswm6v!;Iy}_$vNnPYDm!#M4_ph9V-7MfM@42DJAnt(EeyeVrM&U!O+$E@ez`eRmnjH%K_BdLZ=+gC( z5zslMtQLOQe7pL%J?^Wq`s~6BhMLBk{DxXuN=g;L!k~Z!WCF>LVwNN{nCNZ+s*A*Y zMPUpt;+1py1^~Fo`0Q7A7RmyOZ~>P#GU8&sa`h@(6pkkoRCUUZSo9$Th*b3lc_=~w zBIj`OR;=a5;1sm}0nA~LH{`e&S_iNJ!|4OxVH68=U5$Z(jrFE>TwrHu@JLw!$4VJ( zxew!CBy>m@S^U*_ozqn|T%8hBP*}py;;@jAuZ8wenrYtM?v#&{_8PkhT}w2bYFO%m z%g|UAGaUT!;|Lic!(3{QPdLDrm6V^t_n_=@z}OoX5oF-d1;CmD2IrXIwcC#ie+Q@| zy>W;1B}O^<@?D|cM{6lFC822oRGpoeNqIk-hgD!0!?zcC59Y%d_kllZXGb*uVP))b zQfunz822u>qHVA%qU-A#P zEB}5W&j)RwItM_jMT%7IK3+dS3>{((!Wdn4S@doNlA(zJ`P(OAuVddTX07g}<#$~& zb^$}dzeFN4zWt@@A9Xe%69ZRCAo=FaRo>pqc*OQ}SbQ}y>dRv%PV7j=Bgc5{n*lFh z7My5Zlg3CPZRpgW^e(jaB+wso@vW3kB*BPRg1Cxn@5QamaIb@UCOrBz(%uQTmms&5 z6crK5in=~At%6#ADusTr3OS3$q(I;w2K;r`plDVUKF zUKrS&kX1<7wWI;^IM?wkbfAJJgp9->Fu-d$bkefjd1&bJ!ij52<47m+`8X(_hJ!7$ zq#^2&Gp!45c>LIg0;bKu@FS3&gTm=6DlnF^3vhKnz`AJ3$g=pu&Ob}DUR;XtGCH33 zGNS)&>(kR6iIP;reFcZ;HE(eQbX0$SVYEI1!1o9XE$G+|Z}*RQxT%$?Ip)*d_~T?m zR3uFjmwhOo!Hfoyx3y`nF=~_WCPU!?bPB3$Y|Q|2GC=JmSl(j6Lc+rzv)30HU+7QO zfW&(<3rq4B#Xem5ggFd4YM_x}DPm(|89b@bqh@Y~8Z7~^{GWWp&+YAHYTdBN#a4&j z6to&tP0#TtfocMI-@HXi=L&?~MCJ*%=pZ!+1MrlPoSNC%{(p46c{rAB+dX`mLJ}E@ zOqmlQ86s1PNTkdpDr8QElp&Oop@a}JWfqB0Aw!hRb3&nHCZa-#-@5Lecl-YO{PAwv z``m8#b)DyN?8DmET00^ErYuLt$Hw4GkMD|fTgKY#|I_(R!cvkWIy8hUydano)yH8+P! z8b270cmWQ;SmDlOS}1m(d@;84%hcOl>HH?;WnYoZew~&k`Ph3UK&cSEAfN9U7)ZE{ ztbv~}pUlim_-OqF)7o19#=x~>r4A=GHo5Pe-uO__{z9lwh8Q`)Bo*-VP(O$1_0V0n zAMt&=xjEF+DyetRld_SF_gGI*_3b3X9Nedv@KP<=ypeGcTjmw_uYDt&*)Y zbrTz(*S*`XHZ1Xd{(7|?)R-#ZX?lA1utR-NiCK+?H;7R!ht+ePQkkEB6d_iC$^IXg z8(hibLQ6Qf$gco(k$O5N{&az#4BsXgMXo|-m77PB0$)bUj%5M}5ZHkfU!P3wDFt&Z zl7g2%@s+1uwT+)s!evkY`HvT;TPq^;_51KLF~90$2F@ND8fvw`xNwHnU?8~bjrQk- zz&uA|WZGqnf<uV1*V+83c!RdynW(Cywn@an2rlkU{CcdC}McMB+4uu zor=2)^o#JzPtpZhvotSn4}q`;Krl)(1IMhmSc+DOV%me8oP4O-f8O4vsM7S;R$@ZnoA0M`p1odUd>L;n;k4Hv4^DzlE z644WJ$$EQQTn--iv!;^gSZ;6S;AOP>@gkyO7v2cIwL(&xO3(Y&)`GOOLw1)Wlv6BE zpWZ^?fu+dHiHgz@DF1_+Ktcein6i|{??zM<6Cr6}z-aE6pFu-hw>;VTOm&HwuFcWj zPjM6JEz|J!OnIzRR-r>y6a4w}C;ByONj5w%P~cMJm%;2HcCjfAKTOR)t*p+kuEGc% z1d(I&tk*h2-I0A(gSZD3)SeB!*XKg2cZcK>|4~`2Fx1Y#Ln6Oqpd7?XO3RE95G{!wY78gn0h|h!9V-QFteS&%7A3i)? zF(6a(_N@>$eV#p2%Xukjfx&HwtfsT`{#DE0+OvoGGhWF^&ymSWt36HIcAcJHZ8&wQJ@DcayqdBy zqo+QXZEVK;FQ>`L%9006T5dlieBton!!to43e}`daW&&PD__jA^m^YI7p89>PI&MD z8wltO?{F)bojkeiTkz}V9j}dBS_5Z{ce6ABpEKL*@V}>F{=3L~1mzqJA+~)6i4z%~ z!~_CFqx|e*8NQ>TA*;4^eR1miUww%IMAN3x??IcVJjzGj9CCCVU_BxA+;IcU%1F(x z->>*8R~MXLeFyF}>_PCCro3cbUq#o^99FWiA4qJ~~%`L)NWSYqLAYQS~bO5ye~G!l_SOG7Af_mt$gM3dAj|5iOrT@g=-`ku=03 zo-2Z-{p#;1;dQb*a`tfd=?VusN1w zZEbjL7Qq7$fb;I%lly5ai;LY14Gm#WqW3cOh?bVttSP3S+x9}{1B>6`Z9n5qojl3Q zNTbO8-r(G4UlDAt)LMex^j3X>)VB_q}#l*yClJ!lW z(*60KJ$r(hs4Ptl3=GVqO@5rmBo&oK3Th{ePhjX-Yb_-sE&Y3WUIqM5af@=_@p}A& zyAa(24FMGgGK$5;MZl-1%3@=AhcoM6-p~;+kh8mauQTEeayKe>$95@6 zNx+Ub?BpXF4qjVZ0rH3FNF^w}6P^x!nDt?+^)QF%|AN1c=c4*7FyF6ZV>2@|R3cvP zu<=;1)OumnIgiBx<2SzG*YQw9Seh^tH|Mnus@j&=iaP8!D!k2Mp*0`LA6nE5r6sGb zz?}Zyl(O?Z8Q;JAP^I0VIfCV2*~jA->4lkSg79G|@OP-enWDPsB>M;X6~mOYPCG!W z4hjbZ1im8MBW>#lBK^zB$Q0z~lSnXDxyfLB;ep7=%5G(oRP{)72{P0Bj|3i!XPc0a z@bcv<;5-0PD(-6#>H_dmJ;8G~<<{Y&waXG8#;M~9>u#UKsapE+!}?i26CL;eg5nn% z=VAprycS#AasMH+dp`czZ{KRbeeuvQae&9d!p25MZ%4o97)&GABt+ancYNVlKX{#3 z^y9ckOk5m-Ll6sM;Zh5;^|m(KXZ`pzsEO9{=FY?)&zf}jRgh-y%-fm!M@6W1KyAyW zbc%w#^|sHjIMNlOFXCQo)Tx-rYYq91BW`3?SpA4{oGF2UoIn~yY<6OzZF@*v>VG0_ zVySk*>vqH-xL)XD65emm%gcLRdXv)CIc*&OAB+{9tIG5a@|x5?!Hl(l!d?QQuWK7e zg3(R-JG-6i5*&@`>BY|lD{yMH2Vy@6-k~>u_&Cmwd=fj}SudgyfT-;+NWnHD7$h0jj21Z7gJv<&t*@Rk&UI{Y00f!*u^)0NdYo|AD z+20Yv$224Vai(zp293WPWUp|w0ONo*2^LYzvWfKRZ)q;f6RNvvtQl@c;B5XWixm5J z^wWN+KBC30mNrx#f*aRBT_KqYVCIyfG7B#Axn@KX40%6pzQij4yCu@qD{pEFI^jOQ z=g()QZ91VpqBHGJc*->Fk3)wzHZ_%t^Rt$ZK8hgzp2(lFvik9z?SG-VL7?GqBEGrjj;$?Y>fnS9pfnzV>KN`M@RB-9>+tIEDp zk z#l~(YDz90Jl>IWiN<}~~NfV>L`#xroWD(S&8v%p09VO>aJ4lHwU#~n3hY?}nY>eF6 z0B=J5j(Z8rzZ#?A-p~`*|5ZN@C!;vjPNeBgdU&JkK?wo?Lg2DxkU}g-b)4X6=zXDM z2y;2L@h$km)Bfx;SxhvP*uC7zOp^VaK8@9%4TCS~f6bBEu;!ds^OPZwT``rxP9uoR zDyCL&d!Rh^@%DD z$gXG>tOluNFW#u87^!DBdGVky^$PxH_=syxfByHa)rRkO@c9(@^KQDa&#EQ>a+ZS! z4}y>00e66N&t2S)3q`*FNwf#%5NIi5&O+!k9&wy5Fig+$iv+wWqp zZfdG(>~ZeY$C-!XS_|?*#C)5_-OyNGg2(CAtE(H8nuiZl#2jN?TSo2xz+~0MV)BB9 zCMbeBCa(6&O+7OvHnz#x*^kE}y4OL)g*ar>jn4OhC3IjIEYQ6@SM=bxt$A>_c!!;| zLJ~$FL-=PqK6KDb-KN)hj9|_zt%^*|*(O|ebhXy%Uv77Gk(hQBDBtXN|HojiF73)D!? z4*F*6S9O9(PZ#Rej!e3i4XPkz$2^CpBgQMu%ZMT>ZsZaFJypHnUATo9aO-Tej=8~Z z3Lzr8oil<~@%LG@6`0_ihb9>;_brvlp*3!Pxc;MRtC2){Jkm4{3bVVj}S_3JL#Q@cbJ_-gvEYv69*I& zjwy)!`>hUR2YlokwOHiz8pP@W1;!u+3U>B*Far;BF|RLDjaPfQ9zPxs-H@4)LDI5l zbpGFwzJDaR!#@p0H^4lMU4ZaBFF1cFXTJ%|#ejP`bnSnzkM&jrL2(!>dh0JRa#8sX zrEth4#K)INUFpe=0ly0x- zUUV1?LE4ACj3gtg*w}T*X@Fzlm~cZhx_>`D$H#V+4KY#C!CPnf|9!uNXghKYyumpj zjYuTqKY#*XI(jH)Nkm*6rGN*TFs!e@mU5&AjAW)qMp9E#T@gN@KS7&}J~37!9o%@h z&8DcO!TtvoOm$w~WH26V*EAPKv84{vpuJXUult%S6?HXVNs{RVNhojAJ zYH9+KM_2)b1G}^CGipyo|MiEu+1N$Y1b8Q^h_{FK^eb;qu&Qukbp*kSB$0xoY??qC z-^~&VxEwHu)fH{j;lXvlm{R23g&tRN3zgq!Fklorni4}R95Hs}DAJzdwKbD(`sN+~ zdr<m;;hJhgha|kjbrG0sE5tC0VHMQ#%6%_*K59h$O zlkN}fWJ8GNKOfedY|dt6=+)*`Cnh~#*K$%-B?48k^EYME6x3PUA`{6A+{aT^i+bL% z$vWSt_NC(v20sbG(cCQ}@ZW*`O&Rh;4FpzN0@D^&>v=*FRA1tDXeiT~C3)ZomMTMX zG%*nu6~)*1fpNM60&`qu_>D~Mt1H_&eIf6WfV#lt$e8aXQW1*}^R==qE3fS1gHJGttTNGa9^68BK^7^tOp z#)@0)F`3aNJ^P}s*o(Dk6Sv}(1r+EPhbpzOp1!%6Bj@p)i;$v?#SQU_fzGW2jCA%G zpz>|7eOCLCJfoieZD$8zSPFww$SxHa{_nePX5vzl5EX58L6%RoMgfG_(C$Yi}MzJ)+ zy*SZzTTf3tvph5^J_`S8;~s8Jg+TcA9{_&^%mJzyGkBrHMHa^uv_Dd->G~iwDj*;5Pbe^%DR6O#AC@+$q(6?7tMWI1wwYL$*sb*L1QU zJltNU$2QzC&L^foxXjdni zQg?KhL~7wKiL%Si)yn+jPl6{{AMhycZ>xW2V*1^WYvzpZ(9CiSk;*Ued?>fDL&Uln$#0mvg7M6!eNhC!W zDUDxpYQsS>xr=6A`#T4oKJ+CX04GAfgKh=iXb(R>$*3CcK1gUH*UkxEWiMti3^mZ# zry$@Qem#kc3LJmP$AEm~f4`Y^=uejI)3bCd&JuJlOA#?KvMc+c|G$S)A3e!;IR$|5 zfC6QZLI7%4j9)EvecV;#EOYkcmdt8)X@fmBJ)M`4A&oE#KTLFgXbMp~5xaBF-hMTH z(AGjJShS4)FG#Yge6@TMjPY&^blb_|v4~Cwv5cJI*8h$+xpQ>Q#MoGYMa6q~#bA#Y zbU}^Q#K`w6V9q;NX)CJb$SWjOft}A?Q~$Wuk>_Z3Y;|G&#Vh(JOZN(0n|$-w^A3q5 zi)K3QnfLNHo%BvM_r>OElSh4uaZy`0oHlg=TQbzrqMe)$j4J-1SCe6mPz4yKS&`#^ z0ld285IOb`8)0Q5lBx=}tfNu1mpQ1Mg8IM~t3F_Wa7=kmkU|jD8DPk4%g9*!lKfNo z>Y?+)Cm)YayjKY+nD`D>`6u7RWg$o~L3y-#+OL0cC^!G2-5J(|=;tDP?j9E+Fs@^Hz*n|o7Vk7x$)5Ypb(2m8`je|YlZB-{-P^I2T9Sw25;t$5P* zNnXnr*ZGOB!_94D6evx>gg-LL0_0`$&DY}M(WT?<7Ro zk*6dli%(hra!`V18cr_+<9hrVSZ#qd0kI?eSqeZ)!-zlvO?lK7hP1nC2-dJzZH~z9 zoE*?&z%IK8zH>~Au4zwctq;R3k9LgO;~C;IO?*H!@OHU zq8jiy4)yu->A<>hXdzfNr~WS%3fwxvyaH1H!m2M7)lF1q(=#(++^Kna-k`=~sGgCX z4L#fpjPFSwMfW&5=HcOCd>Ry-P(VXAid_>Ry|8Q^)6W)z@vNdE=jC}ys;f`umWsYy zWpWxMC!o&}#b-d}hf!%SMkeki#rw`{) zko>C)3nhqvn}Q~0WMsSMSOuynN#V%hTp7G0b`e@f4jJGo05KTv9-*at4S*Ga6-cAY z)WM(ucGyMCpM8$C(%hfYUikQNbj{p=3VGlz1e%VH zfC|>ldbtK@LV``UGz2;;mbB@6~Ukcvm zBcBdpV%yQ7r;s!h8qxU(eFMr9JG+(n!~O&|S!$${sh&U2pV;a$<7#%&+WISK1?BIW zJ|;@lo*p~@sO^hu)~8DMgQ8GczQiM^!V+poUeVMIQVmvF9p{6~uYRal=(CvptSgM-?b-?x{a|L`tG$nnJ)#37L-spFkw%M!?u z;;zpdTCP~Q5BkNpCV{RIv=xR#`# zmI4y@2|mWRi917#M6|mf{wy&SGr~#zN^zK!dw#Qo2rWQ9M*w&3@SQopi@7{W;7f#a z%AazJjv?#gKzznh_L=%K5{Aiz0(10+QPpPRy>6VqA@^HhLfOrOnWpuFz6o(4r z`X3qg($6kbs_lsxz!0;M`oBc=Lj=7uSt>$2WB}+0uYbyhmW6T*W)`Hn1zW~bKqdf| zLyO=4?FQm#M+fo!(z$6inT0PevDyaEZT+a)q$p{TYQS>cyN7xHdkdPz-TYtuNzF^E zZEdCBAxHW)dKG+~hLcp~uu25e1?uZzFjjH5@bBY4gj9}#69p(Y+}MT9{khZHE`ZpA zv2U=SPt6BPgI;AL*PXQ&se5`~`gm`mq&!{aD=#nqva0HgwzeaP(3oM@W19jj zla)NMX7oGY90AvgUlE!DdqhQ%nm&FystWexu}pGcD80+G-}d8{qS^z-c-$j6Syol( zu#wQ|>dpP8cr~b;APXW0sp4Itbo0%D4e5Hj@w+72p!wuUvvB-N5I{+?Idf@N;06JN zCkuFU&hvLtyF6zbiPsB_F^27xAF<^Gkmz-Fb0R<^y>kllv&FL8MM|%4Y0zNCWtNvV zb^L#^0I0At2cc&9nBIG2n_5y$+Tmcp`?|@BjR!r+M~>)|R+q7F(ZHP4lDWKl& z2KNe%Axwn;gKQx$b88j4*H>cnbAA{__&@FZK&A(0Cq0+5@-c;u$^@wk_i=tZhSNsnyITW;WRn zUkO7GsJ%zyAEKc^i$)sB07jWVZ}uNl%i)Jak&<{?!er@e^Yi~e;*P$1b_{DM8_7^I zh?_jMf-DyQxg(D9`c? zefwbC1!scRI2oM%0sMRDM6ocSYLu5z&6yvI%;DPv5Py76Q}o_)`_8E`vG;ev^Rw;k zQ8HmwI~MRb%&=`!i>mqT3avJH%!V166F;7obWa}J#h6^RSN=O6iCzq;Gn&GCQLmMr zGX554`b#r4If>?1Ln^F@Kd7n$RA|a>^Kuc`7_k7z+2J>mnrgscf@sm$}pWN_WY681=)GBthq!Yg9#Mv~@6c)Vq2gRSLj z_xa!x@cidwW=8waU3TeO1a{3MGJ&cT7Z>+9FK+-rgICIB^5>Dv zU6pBY4d;PF?!}+tk;&YTmOf>p^!e#Ijv=AKWSJB^-7%v9mcim{`baBsTK+V#i;)I= z@5P@#S&uI^+|qbw?930fk^;Wsg9mJBZou^c^W)8w8r_fT#r*3cX;~lw)Ps0^Z%;$! zJnu7R=1d}I^n=I1wH6a;-1AVJ*+lAj|~)=7%V36*Nxc_=Nw6bxsHrrj1XqDxbG zthe($w}Qv+hz5#?l=|ZlJo~qL|7d{|6U5-6iZ&POKDz=GE`{en)ABs^%+#7g_jzwG zcZ;hp;p@e~AuFb+h=JY_m3wf!B^I7avR0UVIuUD2K$^d|Kh=z6Pl#BUdL)yZZTxhl z^|jtm%dohXVU-rskYiv8)!?5GD%5N(eDcHvtWhL$rShC4_JfLlQ&;NsdZwKQg*}`Gv$uCINIm<lH(i@1=*yjKZ4A}bsmkQq=R z%M%(#NnUow##nm9zH{dn zPEB5n;!$51Fin*Yn(zzW%yMH2Tu6MSvn~CX-j^_v6YR=L$=Q384ZLjB=7tj2Ca+LE z<|bHJSnA)GxdRf!s^{ttzgYhA+o$CzKEL3#v66uuK`80Uw5}u_2F^`3QX_d`x?1=| zWJoN1ZXxN3(VhU{Jljg zehb0LeZpxjFldL9xL3fF_d3g$x;FCJ9UtXS+S*EecUZXbkdt6L@AULiYI-VK=de+$1GM(14F|LxPb7Isr5KU zNnjd*>HGO``h*MfI@>>(a2@l-Hrczhic#TczKNQ-{D=KYV(N|;FN_hYKsyAvjI;rv z5>S;5oO7g}v|DWues%4yS@9uH5{aj{IO@1i^MCLLilqIlHWcdn{9C1`JZ8l6wBEb{ zOau9Nq;JloV`FnO5RppwCK_J7Iq6tfvdmgIZ0y^vTuQ@BhNpr!NQq`}7F3OI-=1V< z-t=>%Ddj~KDC$V-x$yAIvsy@)|3pq_j=tZY)hRp-lnCg;ZSYjNT0S>Q$}o2V7NH2xLl{ zQ>UyxG2f3#ee_6C#s)|lwC|uVnNiQxJ6=KwPetgn3Ko^VJr;WE$T@)W);FT0g`!PQ zol?s@g_=^JdYae41)w0v5xnv8aoxkmrT+9SZr`W=W<&cJ*b|pLJ)IvfR0Ug2nX(tJ zTs&56?bf|7g$XJO4AbG1v?s>F*;xtvP9AwrjBS>eml^2jn4R|Ir={(Kl{ul8dP>!!0F}>s~WUma?;Wr8Jb28d7Ru5 z=^0VAAtSl=jFBFcwEgGAm&BOcf$(lS?4?>|keq`RkI0jo;r>gJ<%jx`=y* zrgO~el*D~I>7olZ1_mOXIp+=ro~YFPOmXGl50~Uu6anNhOp@118YA^fXEhcEx27$p zrdaW;Fws;j^ZN9?4=gyCdUY}P3EAM9RoMkiO-;;wGBY3SD_6m4fpRicgU{wgrIAz@ zR|z<3N6{ZsL*Wcg35u+Wrltpuxqa?0+O>z1{_|@l+4XUGJiIK7x!M90AK278+tb{m zFQK2Z3hwmul^d&+T z`q+fCt^Bqf0R#s!5iRtW&CTeD(N4@ziu^_Uk5cuCYUV7@_^gX9AKTxPro#&p(GSwK z8lgoAyL*-B``+<>BPufup3`_9?5>D&ptxD48o_jtrg%eN>}{^ut&C)~_^ZjK_rH%lPk!+m=mt2e7;Zii zzHEd!FQj_Z!3W!?I$z~J4a;w-{oE$s+soiiH~y)LN3p+jHGe8Rw8^wKjaYzhLCK-| z$%7D$a4C7?c4s0jViHeS+1X*lXXi^5p{EUA4hpCFkr76WX|U`B%112_Skl_DqUZ>C z7wFXyRpTu6fYV`kc-q1OtW>O z3Dy7b`T|!sSl1~2b`1GFFg;mX^VaN!c=Om&%GVtQS2<%$7N3AM3_Ju@m#xd+q~zo_ zzzC}Uwf!>7bs0U07k3?o^s|qof8$WdiT!kiSB69(Be6gRsdj7Nb3`oZ0zWtl02zcP z#~D?#ErF^B5Xc|wJ1?GH{W2v{<+Y||n65-#!K=g=bJO8eu;O9e(b?Ont*woo`LW>9 zVYwdDoZ+g7tl!hu{ETiLO+M)50{EKA_kNiHh)`Ty1$&RC*z6VZKe*2!=W|;0MMp=j zHp=6ZwqH{-4-vOT#b3rI_t%W+!rK7#Z&U=kp9=nV%ZF_af|!89C?paO3u{J)KpSjm zpf=`K%Spn*<9?h=L@W*t4lRKwD^IQS=SNB3p~7#YtYO8aiRWnZgn?mSalfLnzyIqc zdWxWBC*f_hgkE=zQSA2pYgNTnWTYDDpLD4@;sWjZPS-c>Gq>`O6FH9`-&og`bfWYy z3{G&k?fZJm%hGdiLecJ{G57CV@{+!8%#D_Nm^<;QlI)?#V+9j#q}~Tp+K8;Iad0nh zQ@&>Ovp@xbX#FU4@~T-vaBu?fUG5YvtGzI- zudC|7wd_zUV7=q?n;r(bw&=#MP)+4qR+BbZ z{HGs-u$x4)4b`iLZUt* zdq+>1-AV;hW24F=7cMT>92nI!Zv!fY8UrK|5+VlPtUU5w02zg{$_p3t0!(4_yp$VG zV!|WS(>no4V-L5#Ld~a^`a{}Dn!6PJ%)LeCJT3TxkMOO&QPO{Oa>w8=fsO8F4;8r$ z=e^9tH`@taR(AFFv8BWAi$o194|1CHI+oY>usZ#rX{aAPR0Cykp^aqc-}5h>dDtem zfNl*D6i&m=bI-G8Dur|(+FiKdg>fq615o*uKYVz2`5QdCK((yiSg*KS`7tIW-N)mk zqr&?1Op82k8dy_quC9}}&wIgklq~hAu}o3vr~5HzDuCTIZW7eM3Bnj3tBx>Pm(B|e zJ;ff)ie1qdBH-(yLI1MWz7;Tq4d5KmK(V_!KQ9lfmP(3>R$FR9Ns@-iJ)-<&MmNOi z2*kdNnRbJ`;&F7uxo2KvOo0Sx1s{4Gbi%Mz#92a|Jn)l?H>e)-Efw3A2U#09=Rf_w zC5O@~syW|Q-Gy~6?Z6j7?L>rrd0+a_%aW2G7qw%}%+0~j@$XIFy%99=w?sPU)G3MK zn9TcrD@Ntwe731i%3?WRtx`RIQ(v8%GmYgIpp~PXf&ao=YblH&VHgMBc35%R4wZ^Z zdW+@ctc%{geN<1MUdel1)Mv+}4F&T@lRz@y9Tt^WE{Xo-9N3Ge(5VuI?wK+m>t_xqZ$wis1P*h1Ps3%xAKUc&s8;>zHqm$X2{O?r zDyt|Kx@wtR0qq3M1r}8dC}{lHi%VGLx+&Bu{YA0D_rYdbA@;v|VP2Ed(_kcjEp{s^ zWzGMsrcnUQxwv={3ZhF%=P@5R@gI-uw?9u1ql28LK-c0}GcFg&I0j~BFgFI%W_DI= z0k1L;TAB0p@3USN<~V+Rz|N*AEwl~3 zH4uClJUTgl!n1pQVvaRZ)wQ$l>Oyqlr>QU$^X5;j0*i{UBJUSC?dV5fmMbJE*m;Ys zR2I>XJdiOe{@JIODu&j1BR|8%2+T(v!#W0T0eD1!Mm>l?9&c^SXE3v4IlOL^9!3*` z;-Er9@N^%V2TRMs++2C=DX2S9kO>ZH(#<2)a*}LYw1&3w_V7`fJBP)>t%j145^Wg9 zQ}Cm}(0zJxlBfa1fT{~2apTHL#_D%KuNVbelsT~n*Inx9VsmuL(M)IQ={e*3Qvt59 zunfnVPGEG@^TVpTTUlaKi^#jJLDp*`j(}|#uEJD)IK5+tiC6@c$QyzfMFx(Ur+?ObIEe+p` zq`a}UB@YM*X+)^|`nA50FGN69U*8@31@Kd?bbEnE;dYOc)w`vxV*Hn&$-?ZznJw+> z>dMOTWW}qsNz_hTmxZ?%K zYY#*u)FPRHs)*Y6dj#-OK+1l<*Brkn+*JFHwa|#YP@h8CWhxenJMiq;5}rG*@C(ki zz_V@cKG!}Tw~I;t^tysUA|nf?2>+vLqA0N0@l`+9;13xE;5Dq7p_zZfP#sh(MFKTK z!^55S;2gb2PJ%nZ^xT}z+VY-`q(mS~M9-xIDGCAoA2fxc58B0HlgWS+Ps{UcKRm#S8-g?^1%-Dwe2%3TLgUu z$hp7@tJ<(VUK|J$4Jp}*5_=Q7>$;Aszbvl$d`)+~F;3(s=eK!8{q%x1C3u~IJ-LR* z=I#<)5OVulzj{GnvyPZZfr}MX9mxuwpiRv<7Q%$>JBt2ov_VBew{HCOV9D+IvCQh9 zodv5oQ78bRb_h<$4q*J9#<-FauYcQYD^^(+)wkbvH2wc05hJKLb7@!X5*7xtl?;S{ z(50#4g)%f%7ymjrU$PDg(=+6c=$oluZE4B_r}V%62V@f~s&qsTz*aZ3=&CsSpAt2_#d&jQdjcG?OLk`@$rq?k;U8F zn7AquPyHJD)pHX-2+4MK{|*RYuBRx1=#;#@j5>HL*TB=W6miXl8K+teVjTNtOBOea zrvQ26#u{wzu%*s_l;ri0=b9TUu=Q<+D!k5bmgcZu=Rk_f$Ct^G_JFo8Q%>am{XZ_i zM_27$hEF#TqchJ&AktvB!DS%NY>lSJ1uA_)n*6J)vlXE=iLu# ze`_k&7O`6-@y#)Y={Te4-Fm}EzD~)+0+9F?=PAt{gNe=)m$9L~DGAm3g zD%vM39NcP4mv#U7+r3ryKHS1w+!nDCZO3(+Is7sb1;B$<`Wr*fo;|~L(NsJ$64ZH^ z-XpQevH#Bg8}m1<$B)NLk&^k|AJTi%yQQV~lJI6s=r;Tq)#kK^y_*s&2e{0%7)fNh`tJ%;$%0qKih;?5W;Z!JrwxQLl7)=V9lrNWekjgvE9 z@Tg^fp9`P}&YKT^RekAr7AH4st&^;{igAXm?K$O)$;WIQ9KpA4fp3TQq2V8i#mLAA z+M0sp8#nAZDv4k10oL%~^Kqh#i`VabX zR6NK!2>2O#yU{%2GjDQ!HDEep`rO3=ZQb1uL3|o>#mcYpdko|ihoY}PqK~`;F^uy4(AXNQB084 z(iyJ{SQRgpqVO!0(|GHA_ZrFi7l-WHRkk@jkRc#6_x3Kga^8v=KMa^Q6fv8cMn=II zEG;bsOgV+8je}o(p_B&61(6UA4}MFmReEY_uYuBo8ac(XsA}P?aC0rMk`LZsFb1<{ zVM=Umd-u*tk`4LK`46?8uI{%#)XfM8TDa_cmBfeJd3aQya-XjRfv5Da z$}#d3xaOlY#-T>>ixw1XL9lN{!d?vuI}hC2B170(t=}>=r}_JoGHKfIC;cuL>aop6 zO42h|E`GyF0_Rtkx35Y!xNk~l%z!C3u=!@y-E+6zQv{s%Ul`_MWOT*V7=MFaA6fDn zOz!$cyuQjxNs+YNn5DI6XQiCb5oQh{U89qE7y|%$+ZOIPHwiL%R+g>#QQnr=Yi~yp6cQFDDlQI}CY)@%{gj=K zt^Y|5=~-BRJJ{cTuP`%Z*Vu_N+Q`!KM0#g}G2DEKnykRT3B}7ok2&Ysva_%+nk3P^ zQRd<#OL{bb1DUsUZ70#l)6++RASf^C;nJC41mWDB7f)b<)YscfBDHnwRB{riK>$5M;qcT%4DSlidSSvN(mi|65Lt6n zL*pAP+&dn60ULo+2v$;oNL*KU9$sxJD$g@Aj_~c{>sf_`9c-Y6THEIO4RuO>xm@NZ zZg2H!Z*H!*_P2bK#jiIXrzEtSxp%O{NX^{OF*vC6^ZL%Wi+>^x9s2l(AV!vmfJW=< zT5@bE+qTdLexlK8zhYcSk|crE3^l6#XJIEEY#M+&6#|4iir2L@U(ma-6$3#BHW8lp z-;i!0t^x2EEv*osS~&M`h6?^*WQAWxA-%6X#Y^RKo36e^n7DaMzR&Nnpt@1AfDwOn znZ+vv{!JJW^xwB3Q$_uOLjf(sF7IQw)+z0Mf9tPoCpu!{M9=la#>CM6{Rk!xVfCS! znww5@TZs7Ya$tWMSy^Qpl*)Ve&H;MB(|}iFO^p>?W=U4QsPK>yV(d$(e#ugJlw>xN z+Aq^5Bv9d9c>}Vajyo&qV|u9^AnTrz^VXA;V)Q<4T5*~$<99cRan@0A-bD;V#iJt z8>~LBfR9HWxhP;1CO{QJ>y51h`@cCis&VYwt9#_oHUep_DXLM$ zG$CP+Xm2q`o!g>iMn*V~$~tgJ{6$s;^?Nl2z5Z^_x`$#W@5KVakc!;>C!|CtM1v{- zlN1TxA=#7W<}-71e&B_UKVE>EZSehjSqM6e&wQ&jc%GH3@GD_@e?iNIp>i)Jen)_2 z7}_wEn_ar}9ml}_R0fC3=%y)qy3Niq<+`%-WuoGjT1Rs*3;;V=v*g)tBsci5WsAjE zUec>(|I}(|lQFYXp4U{KRGe2_zN3-8`sdH{f%cJfupNn&x-%`Jn>c=o8;In3Z@rKe z^G0`v@Txyk-$bTO@^g4A`*PD%m8_5DN;#S|| z85EM15Psaq(9pVe5VGH!!NHJ~G`)LQqTT~$rDt9a!?;ShbNc3tiP&v#au0?46^fuI{h6X- zX5<7i2DZHLNLqpDhaYoLtEjiIEv&K*oOIOI(t=GO{O#E6|GPI6?pZfaxLfx2=_Y6b ztseyE(wur_7zP4ZyA+E_Fb4-oTLnHejFZ8C21E7n)WxV{H;3hVpZ0?fP7#oF>-Xc; z5C?QdZZ| zDTr+FF&7;dBi%GS)V7^d7#Fy>p$?%Sktqez1N;pF~*2 zH4|w!Wp@j7rEL>mWNE^0sz1N+(rs*W*c&su?sO5Ki;Ib`?!3zKHZw~YX=?55LpfNT zd}P;0SDuQ>h&!n~Cbz_78e6_L{?OX#(GNFcFf1S4*Yad&e_kB)kagELv-nLhUe z{9ArTgnL9@d_VW`*<&-z>`bLr(T0Lhg`wv|8OPX)ia#XNKctJTMaRpnRx;+J*=jF3 zx%(@Jh*(JyR#`FXP1ORsrqjn>!JR*2ZL zQ(ANPK1MNn{+z!ovWXTaPeOpVTRNDL_lGk$a3uF21#z##)=nwiJML^vaeL{C2}VLx z@gg_?aIraZVw>-8ps}5MJj?T+CCgSy1kag&PPr7t6GTZCNB7G6M)*R)S%UEULms&T zvIExbNj?vtbs1403l9fzAPci?)8O}BG($r}$A#Q2{Qu0joNq{AOUT{5+}pL%-N<|H zt$@wWxsJ5M*SC$F14k4^4s7Po{I6!GT!Yb_g|Ba=z>SM{Rd=zmvty?^1RPKgAzOfu zqQQOf_(*G5ZXw48yEc!$f#TQKy?MVk$!{KOs_(S0+34N3@~rgDsfsfoMk7-^>RVn| z7+G8m`=(^=zVUO)ZUkh_9c|PrF!4G3wP`^W;U zj9@{R(}p7*}@%!$toBc2N3Y*Y&6dJRDz*lvdtgdR)xMFwI)dg~R%d(pww zvWFxRBgRhftt84JF#{q)f<^q>NO`qCBFuhcW5qEx%Wt?;+g?z>k&oA)OnZ{=PZiWQ zm*xJr*$jKp3i8EtW!)Dji97bEN=K&k@-=_XsQAyKJNjN#UtRAVU5am)PjOF>pIBP2 z9G>qxC@LD(KEc7pwuMLeBk?WlVsOWoZvS@>fI?gc^)w-t?npkA{Me=} zudQdLewtnB`5_+rXEX)4IIY)S(_#Th$%V7F;{nZ8qO;|Lo*jHNd{16@40qS}u^M7X z0O~Izw+#Lykix4Pg>sYX6f;-9#)+2k-FK7WT#X)wR*t zk1t`d^BV^!k?*{m`rEQH600P><)%{k=8NVzn%X*Lle0+=NF6i=JnQfJ*IP(B4AiYy z#&;-vO7vadQ!@-@@dJ}&@zXDWBbkZlVNc%1t_?p5k{bfwV54VpQPKAcZ=-xlKukgf z>kWZ}scAgw4G2E5_E+ID%PVt+U9g-LVMzk10IaAFLyi7a)>bMv)wUg_zjsgW2voWc z7gf7js5yr-s8uZexAmt!I=K1 zV43WvQkfUN*PPBa1_otdG>RS-|JSHU{Q9neKhnDz*8f&~9vhRv@q~vmToSDf`?{t) zApS?!fQ)w8fa%qnj+3U5L;h!v<_fO5B1T8`pxp6gfctM@Cz6x|MY?Xw6IqCr$os-ncJz+#a&j1 zm4pxbwfivO-6h?>|HE?wHg4o9D%JmpvbYq8>3+hksh}gnaTwb>h2`br-<$(=7g~Cd z?_`C9-X1qp|H?=wV@N;-pmOcbk<3$rKck`c$E`xKcyY19Hc4;ya4)m0z&N)QCb@rTXQ%Gtka2 zI^AWrEg4>~T@fDK2>QoyMZsQ>EZP;eGLhnsd)4vtZPH$f-^Yt*^94j2zbtZBHkxyA zTQoVL=^Tef3l`*B+kN7qqi3+S1^iev)*Q*gkIFV3FqaMgsk zikyN1(>B-#h_D<(8H-FkgC%zHI-xkU$QEMY(Oh!*HoB%(99e7Cg4rUvW6Nvt{u}C~ zxd%}4OD|8G($YG5mfDKT3CnrmZ=jU)#|{^EzwYfq(OQf_u4IZ)%3LLDQtoT}-9NTc zQOC;3s#osCiYgF5o(Jrk8W|axpZDL-Z~h?(Ct;A?lmL;G>dAev%w*HOlkvihIZwzy z$Jp)b563v50q}C_d3@vjYi^4InI4nVXTA=;rl8dVrfY9!x3IjtV|^IS4ZP$G(M)}= zFM8WF|I7QW=eLsl3v$N82SYN)4qaRn8ov6}UC;lpIu#)%KEKv{|KorHaUx8+;qvzT z6P5AFZ^plG|24ViA-QDd>mE6!{gMaaIc8*{^nIzPw|B2a`42#-&UGB{hXgz08ZP-| zu4%d{l&lE617(=26IM&=4aQ{)S5|&7ga+-p4N)}caH#d^=)x}kVOx8h0(V0%qkNO^ z;BdU(Zf`#Y&ZEV{N>D(yU9k6mVS%S7h$3H z`_$1m`rC4mcJpTNk*7cdt^P7%Npqu?v5$(8KkL<6_5KlaGczO%K(MV7OVD>H^Dt}g zTmTnHJ65cyv^2AO>dqY!bGI5{4*3RNr|bI@de7CbjpE=yP8GFuBoo2!5^$W{#Wln}cVws?;bn;HWY2L(bvz5Bnv#Xw(2|3DE-rpQxMp~IbhG<0 zv(d}#VX}>WN6|L-{*A=`iRVvq#CJN3J99ogE_*@4N|QOb!ngDIS>pv10l1I8zu(LR z{w`0~QK1~$R+E+UA)`1Hk#utyIpB9V6`$5>H0jRHzJ71@(%PyvfiK3K{1vnofEm|H zrBwF$O3uCN?A6itE6(g6dHfvah!B-yr}Qq#X`sABWiY)%>Co^eM*CcCm$vuW;(^D0 zwzs^Dj_*EcdeTUZuOh1~q%O15j+k#3{@%t#Pj#K@`p{Q0w*E%Vo;&rL=hJIuKcdYq zZ4Q96(%scnQ&-n0E;8B4-Zt-1Ps`ZNu~+oo|Bt5Y4##@$|F@BZNcI+G3#F3mP@!Zd znMKJcR8qDQWt5SMGRl^{_l$__omCW>Q6lU2y3g-=`r};Bb1kAt^cK1mU=oAT zbu^2y1DT(2S7`02CwRQ^`FH=&-ro^VM5rgt*l9H6jkI@%AcubctwTW}=gGv7TLM35zw;E$)Yv zVp*>dii5};VjU|R8zPb8TiJ>E0T7`F&##IcJJtsS;?oe9 z;zo(u#`iY1M`fHJY472Vyz2FPLG7QQ=Br@07y<6HT9Kj8gF`~LV(G5o8b?%7fB)*= z%!P1#)%yhm=I^p+$E}|lEwyE~t3EI~8#M@rT$ty2?8BQ&Z*jT+i2KWRxbx2ylH&@> zL4T=fT0ReBQh|USB|}GJs+1PdNx_`#RaUd z_b!cESX(2PwzKqRXC#I{gi$k)OAU7SC+eLF{(EPPK|=!;HLNB$yB?lueSI9(?rYwa zpRF%k3;z3Whq&Cx!no9_SV5>EQ1wSy%#8oBn}#NVlFzO(x%}AxPuO~I&)*Pm=v7r! zN-v3Pb^l%3}0Ijdl@hvqE=aL;@mubQzjm5aIhH}`c*_$ z!l+?lVuCxfYQx+7l)e2d`cP;XnJy=nP*WbLXL&(e>a1s`BX#(&UU8TSL`cxyK}ewT z?)?23o0;ZM26;wOejm!{u0>HMk9ESu9IqDxz`9Ns`T^Y2%Rg$mIg4O2@DBMfI z*VNLIk3PEx#b1w>&(Uh`%5(ZM=hmkA3E01pSmcukeXk1IW?kKjM+%(O4!HX|y11Yk z-nOiK0P{KW4uk$9+wz>Y3M~8b8gN5(EEt^ZIQTIvhru zVBxT@5D*kMEXu6T!{W zrBVJHLw!(YA8#ShHJ^4ep1@)`?C)sfu_vD!b6xUX4L z<>54h;536KSSvLpW~g;g%b=vvk1Oo2i{0<8FN3Ayb^a%0WH$7l{;t}7+XPp#w)R4N z;<(8E?Q@Cex2F0;B~SFBSQY|s(h9)xXHTDgxjfcU_xQ@~2}M=@3rCN%QPNP1O-wLC z&Biy~^Elyx6$@KJHq2+TZw@-wf)vH`xsF;B;sAH(mhlj>9pP`iY-P3Ixf`A0>Ida`(*N$D=8Utu|{IO=fgv4druHf zos_>WOH5KGefROtgUYnr(=%LzdpsxwLBH4FmoF`sbBu>Sz*K^|s?)A3EnA(t@339p zaYo(g(ay-I`LrY%sS&8^l8x-1AXgmUbCWOmqrT5YhKEG6m`a?8HI@lfUx13CfX8U zvwxAo1QZ5^;-y1wpHr|#uu|7B^MjE15i z=s^^Fj?*R67U3<7?{1-}Ax_ z(TN_`W12KJ)NGF~UmqxmJHImfMc735VbHU^7iqbwGk%M)`{njjK6UK7S>aF-djLon zqEuI!trlI&XE0=8uEa)y3|bA1Gzge5ux^$b=^+URG7vGN>@lsSnZr#yJx#Zqoc4vQ zZe77WPdXJZE^AYjo+SG~ouNci!Ls%tJ^?@k+}E&M{HS>6_5ykLwzG7TZgk;)zm&f zedl)Xo@udT&346rcA_qvv-m30l9L7H5-KaXy_*G(M%yqxc^hG+X8NYOQw0ZIjfMV^ zJXL{e8H@NlS<9@#cy%)}(nj`if_)A-Ow`fYs zQ1*0sni)MiTTJ8%wBso3%B~oniS=#akG2*6%CRnkfm%J9ec>*9d*QI|*+28H$D$8k zfF-aJN_q*es%KBTy3=e!D{#RRR6WB#1kf+HEsY);UUv}G+IA2Hf3C77)x=o*me;$) zL?%{N5Ti@@bR7b>&NG~Y_8Q51jIXAL<4^8{mVaVnBcfTv=gGxj&T~?E9r|h)twJTw zuxCU^s=InBTUuI{k)H+hk91v?Okz-XPhw)=s}`u07Z;IM)P|MA>qB}CVl@vQoCkn` z5mI2XEF&WW13q?TLR?A706-X2=f=f(<6<;u{3t!U5;G;}!+3l>Lh}{rdEvJ+<+`ce z*za!)70v;)+4(iLsIbc>A74EF&RM{GXV1>TPvo$GFZ%$p|54DJBEbXvB@eV6=JX2J zU%nqML-mDs+rXg1dT7xzFLjC``fRJo4FkQrsM6Bvs^=>45L?0!(+K!hkT&AT(GM&y zunM`0be@8&1i7qTT?|l2GO)>x^!K}Bbhoi z^E~x@&54h9LNk2>Xm${yau0<`v$KgUlRb-!tb|<+eInqn06&-+nP+8W48w4OS&~X< zq+l=D#2?Ga#;>SvX6+%@8E$#2D^%22l3%e6IQy=r6PNScO4o#ym)AD2iy*uSoZF|v ztbBi!xxarWGZGW|Vydm|8Ik&{C*n5}RwcBH*kl~zkf5w>+fb??UofGtq{2`g|8PG6 zqU^a!3aQKW?$>}ux!c&-z%B<3KJtYJkDLd*i^J3E%}?m-^5>}N=yooW>6U>0@6vE{ zc#Z=MfhPriAELTi@9R9xOcH4}QBcf{v(d9MsZA$FE#fgte5$f2L>{I~@Ime?6bA1H zjrUi12e->+#Sv&IWS^wTuZeL72emA_DPJI z=q+Op$D@f76$m#15qN>Hs)R#pMN1PKnjO8phynxaXAB_f;#^Zh!&Ss?#5T$qX_ zPNI-2KV})>5^Zf`Yt1yW)Az~4S=%1pe=5w-gMl?tJ2{uNkIZexhHQ*pRjD_6GM*k2 zG6G%AaxSZt(Vm7tBo`S`#MIz?Bsbusu5fS)$B)1MU%@AKB3(7L+a>Q~IT0yC$ZA$& zx!>|W)6(f4b8Pl$Hkt-RKm!gT9>GXlsQ&oT1{pUnvyVDd#-7Ca2#N@=C+MQswjN+s za05LN5fZVrC=A{fECFaUzO44If~W)p0Ham|M@d6Lf#fCE@)A1kf<@OnfwTX@a^o@{ zkD@{h6D?j3Gj4XL!}50dsX2wO?afR#gY#QnOg_h!jep#&t!fzfkdYh=fDv5;H_QQEdajiKTu;)D^~Ah!VbuH6 zjn8ikMy<`d^9Itn)gDc&~Z3jrLHTPS$ zZgn!9j6bg)!b$80h9xo+mFt+@5#~iir+gngK*&Fov%lXRR0(>yEt9Y?b?_iU0^Dfa(tH?IU(jSuUKf$D7QxQ`$2YAcFO^c+tXB3 zJLI@8zg_I}L4TMXWZF?tF$nbBx6h|;ynXsN1}n#KZkAAI+cP*B}8Fqc$R^aAq&R3cM=>&4)p(e)H3*NuQ8KQryU z=r6@Y19iswfykjYDDwOeh7#`U`?B_{r;1BQX8*O-hfmx@Wo2b+*_ITKCmh}Y4988P zOgw;|8)=G(n{zR%#h<_079%d#4x<)|^AHKpSdt#-;P7w8B~B+*qPZE}q~ItKz@_u`f3!22jPo-0X7&*}*pv&qbo5-cmoA8L&r45yq^IvCD5S*9WZa z-SW7(>4KoKkF6h}rw-WgE-YLDZ}qVX1-ys`O+Fjl%dUwKs@3n`uYyKU+*&<}Od`C# zZ{PZ2V7|A8Zro>USBd|wb&L!DDCF*%sG?#9OMxydNJL5d*YCXpXpu8ibtQ*5Vq3#d4Cn{?%iAn&+0P`B;a3yJco<+c6p5* zl?A_84u{h>sMlq#ACPk+)~}-l!z28b$4xYkgK@`ng(zZHng;mJQ2!%cv#`}P{@~J| z(w_sf;yO+jbUUH@+rG21_97c5HY^6k_4NmnCA4!6ExgTa`0B(z!aSnU_I+^;`XdIm zVifEW^M9c!1QPS!Z9_+(iW^~wZ(BHMGx zM7BX+m6!G6zN`ujDKN!Q)j>|F=rU`hWhqtioFAk$0;}=tK1-A*&ZK$2cqIPIB7sP> z%{?xIWE-~7XB6d1e0rE;Ai6`C;Bn3njH8IzgQ(E$h`-lMstSk({8ZrBvKq-^6?y*e zKxpu`*RvNd1k>@g_io9F{3bwZ<^|_rCi~Auf4Un?>gR-nqyLWSXjnf7I0LLkjrRAS zKc>hVU0V-(+x77PZIf!?=O3o3-LLowJ!FmhCnrF^lH?y5Bf*)AlNYyi;oK&;EI6W( zsllqSH{FXx?ydicCxLq$w;wznX>Phw?6M>9h2{GIzH2WhAHTM@vAeP+(mz=7DO20i zhs-TB_L%G97|00R)P?+bG=K@7i|n(#apTu916^wqk`{pqi;a(>oBTTHv;FbO7ys%t#+EW)Vu0d0Ww{@HT#cy$& zzQ9LKR1 zLU5Nu#bL>{`}^I6%SmaS_2ZAeR}8O9=alDMhh-N};CAwl)idBSuy!?D=e$immuV;^&u@Q&AU95C1MT2g8p0n~Z|SyFKu` z-X8Mt`FU>S3B7+`T$we@OC~QYD_@Y?ygopktLmE=Wjhh^E0^a)1J5K=@t4KE?oMJ> zN$E3AR1~~SWrvw*+se`vGimbxr3@RX%buQF-zYZP9`t)}C-31{B;ClT za0l{*2&UV&rM-deAVGP@pWXNF{2`#>6)WC>GyD7nz3~8`hft65a-!+qi$@LbQ!R-d zl-PgFYwcY??foRBU)hCIoC>WI%T1JAp)+ZKIj}C{<3r3La2Tli@g)Dj(T7~|Z^Prq z8LW;9E03L#KDteEkmc05YL#}k2f;J+Og>ixEd}&7O3OLJJ$-^`KAU(qH2!*3P_Vec zbf-Q3pp03AorZto`zzjq*&xFi&>s&C)OeTA1n-0(DRFV7yn`p{8=#D+_@CQ;Mfuku?TJtV0(iSbeu1|x0^4=dEedyhY? z8*hm^GJ}|B#yJ1NBp+Y7@VdvsVpm}m`S}xhf_NA4kpddwyTFg%i|z+w$jHl<*ay{+ zFp7g20tQ&7W@l$B&+JIea_xYz62V&GX%8`Y;%NRy$CwO?CkAT0YL+mSc(`9aK<8W0 zF|5c6AOQyt^a=}3`8<6k3=h=5F{n-qy|v!=v@6|?ReDgb?J?=vk6_NshMI4=d-t*} z%RNsdA3d*d{Z-ez0@_yisSP~iN$8;OMi84)@xM}Jxkp7AgM|dDCLglmar?x1Z;GSq zI01q~bAmZP<5@x$!}8x7b#yBB)q(WOcUPbKw+GVme;@S?3TlCL1VD`|Xb+gzjuqTt zr1}6QKho~-I`BDniMp*H9?E@t@~YvTjE}brjvl#i z>=?%#Mlq#fW(dNFi=?y}E@))&6jUk{vk(rWCTYBtBODM3DNmoas8*Wdh%4FkxD7WCfJ{XA zf&Izx+TJRB6q%SfiFm-%7j6LrfCd?(wh*HRNO%9;90DfMb7i?0{BW4QIH!J1oz@|k zez@&N7NE=3fj|sG>S}5pbp=~Uj|FXugNN8vdsIB!ezrGb2*IMgKz^s@2J(L3xf2l4cwzG9(iP^P#{KDaZx5Dhyg6j8ar^e~Y|A<* ziEtsa(maC9=jgvD6k-?$s4@Q|_77YEcT{ zEk&n>x_U!Pi#^WB$Ve5shS;a5hk?!>Bai#PiZMUBI%#WbyNmDH4XM4)y^%%fdf6Jq z>$_}&A_M1JFSi!6)ba=TP?Q2r_SO+-xGi#I-@f>Y!=d5fWe`2!l*Y*qbO;_vkh&Pp z!NYvKnnzp`VSJV~t4I`TIv)`sW_c82wd|WpU>{zAe-5e#y{vlNPqzJFkK5R`a6Pvx ziKj7=5pgR4`vdT*xDcjs?wpIW^UsWToBbyxW#4G;k@eT^Da!sPCrDL)lC=NVe$Pw4 z^`H=hcMWeRTIW#SfLQPcAg9LjBJ}L+<5STn%|AsHUHBsgSkljO6@7 zt`qTV3Oq5qjuM2Oe>+n3;W4LT8ZM0w-&HKPUvQsbv_|6nd-tHyhc>fvt}5nSGO?{? zq#SuaVXapI7C*RCzExg+G=Wpb$KPcV3cO+bG zM9VzGT(eG$LMb}gZ$;0P_R>CN#az&!y9XP-_?LBu)U2$cyu9?(OobnZx;HnkvNf5v znZ2hC{0YI%?c4d@bFEf~povJ8yU|HQNA}@qt|p~LOKTZ~9kkqXQ{&^mDt-`^ZmtkU z*dHCzGO*5$k0E^!YO3cZaiZwer-HmZJmKF**9-&ArAaqL&7}E|9d+F4t19MY1T7+r z1IP~I%ZYAyY_>20696XxwkZ%7-*q7IEayS*fun~0{!d1zy8kGcwoQg!KW{$ovn_|p zY~VoPkTvn8-nfB5u&}7e9Q~hFmtQ0>I`L0-o-C zK}I-sv)U;;EDd9J;YCLIx0<+Og?J!!1WldjnC;u(`m)^*u#+ zJ@eoN4eOrYZZ$ov@9{_TN?*Lmz`%edGFk1wb%UPTf_Iq-3Aa|=4^^zVF|e{40gkr3 zE?`kXW^~z(AtIb*<@;HE{fKwpPO*t1EK`j66P(4>_jf9hME;p&{w*K@AMj4rMH~ir zhks-btW(e#wZF5Qd7AdJ>Gt0;JTu!+l@S40H|>)xZQ4X>^q-lX-mO=Qg2|RpfDHcu zIxv9685{_iYl+z0oUyqj85(xMv>R+_YARn5IQer#J}82b zB*DPoW}xT&6Z~$#Jn&Q1P8JNNlbt(>|{ z_J}+;FS?Eg2ediFp)fM?6=}w$Q3$6-Jz2s)zmuGt7!eQ1xM)g3a*o3e^WYBg3k1Z~iIrJ3-n5J@;1t+X3!k5AIFX6ZCQ5h4sM5=;M!N z=#Ym*JG$8&mE+F?gFOrky?WN9>&vnWSH84W!mXK~<`D*qyDnFIlZ>|0!eojEWfV$W zO#55kOqwJ_&N$xKT6p>nSqV^$D&BgC{Q}+;aO0?k!exn#BJ1$#w{PD_g671G4>-NP zn4G=giy#*gvY76d*^JNa2;MEqHU@?3jG!!lPU(yEw6`y&`xQXT2o+&foLirtyh55) zukyM1??x=?dM}{KDFXqtt;AgV@}{37a~l%51+-qG1WL!m1O>OlIaf~6`gdU_THNqu zHw<)S^iVRSebH7_P_U_^#|&l7HcLH4IVi@hjAx#owGWhD>+2@P3ZI#tPTl0e5w`yM z+*EHybXb_KtLtjB+3;N%ccHshnQ6{ciSDbVj3B;jz!UjL{vR zx1=H>&!VV&=?ElStn1X|Vmoz>(%-7o`xzLJ?$K$cRXQWptYJBL@u3B`chMSQah-zF zHQvBrTwzz)*2DLlMYrV138|TFxHaFzMXU z!Rql(*^t>f@k_uW@DHr6aLt7Kk25?}n)`}5l0BBdGe0%QF1#LkO$6<^+|+okdG;}8_Q ze)EP0Y=l))7_IRVA>{$_NkDUO z$7^T)5G3Bkoc}vk0SbpkM3hJWNqRVtA}%O&z(a2O00C7|5Ghn4W?oUz9QkLM(edfS z@fA~JG!37wIl!35RZiU+SvZgOP|u??i8)!!7&-lCiOS;3)a_+*Ti5J36h2MuL6 zc~}JLh~2kcO6(i=@AH*LMLk=m;JV#DpryLdB||XEpWvV2@_r;hL09uCe9D$AYnbOM zpRixfxA?aj_5&X$e@S}LH^v8q&ZjN6sZ1S4!2PbNo!(3qMr?#Q(lN`(B??y2R1i>dJnJiCzklV$hUobPmeWG>isDLOb^u3VxZB4cn0Yj= zv;X)2K-%%~ao9uwL=MARhr4h_4e_Sp&|jN$Dzd;0XEU2x*8USGaIARBTbFg%wrw@8- zVqw_t-!#^hFcT5v zgSe$A$EF*Hq%-Q7UXZTp&(-1c!r~L!_QUQ|)1Xb-E!U0u&Rl#RxDxT4-v|9=u5gcl zdTUpnzx#q7=Q__^tA7TW@Mc!k79XXrM!b;5p=M1@GAKYvNd>fH%He{{64hT}f`}Mz zLDhN&2>}Vg_`0rNz3PIuH>WNSJ^V3*Udj_g7dgFlZo+ zgH76)&Y@#hFtC-bw6wh3Tm(~tJLzaxr;ar5rd%u6pW-4B0XWM?goB5!G zTvhck`M?^B+YtH4%QNZ8Ti8-LIOyy1%I}toLBE}z{F#$ zy^Ts)$o;v3fBnd$>-0E$?VFQKj>jj!jr+udM|&^FeJdNP{WkvDVMS*FDl%`C4?zPj zCFv9TWnKijk-UEAj|uu)EtbPJTOBNQq!e)p8oc#I~dpA58SCi5X4K@56kKTCqxJk}eQeU6-=GC<1 z^Gm!xT5%m#l$Wb|v8h_Z=Lc;7PCx#j-F|KmSCR3~uoNcZPXuUz&UxuFRp6z9vNKUK zUlgx=CBaon#Upv>&=SJBo+WkBo}i1d8hE7fp1qAm7I zpUqcCRS^kk=l+aXda^P2y)e=0MuvTvZ@*h!l1loZqkf#is$y-^={G$3piJ=5{vu04 z%qV1M>`YA72?@n%7w)>ra|z3CTHl*AZwuq=>H=oNv3Kv%X7g^(eW0Ybwzi;24+#h` zS`*4GcvE8mUwv*|oCWMed-Z<`F?^dJWG7da6BgUm>Q)Be6j0m|yIvEZd-xn6O#Ck` zpn70n0I?f$Nii6&>M8jRr9i5JUi9YR{!my`Y!d#@y?@4l7lAnuVrKD z1b3#`RsV<#pk8)73{$vCMy zlD#dbng14->ZjR9uHE>}a^n2rTCwAfK^4uFGgWcor_R|p_kkIaw(vfjyea;jW6@}X^PBYG5yt}8P`{A`+DOq~BC=3kH7X8SVkK%;# zDBfg&Y;s${hiZ2cP+<8*SjfcBua(?pI_H^$-vf)0~BOfyG?t?4&}I%J?;u` z&H+9Kx%yFcFPN^HA7t&%o~K?ZIF0-%1&={MVu=67FRWd_Ndajl*j7XTmStMDg3@rz z!3Zw_B>VFxrl|J8sHktBLjbmC7Ux$cs;L<_n`tMPmRO&4jfju8bah1oo+FB5?%XNx z^;JProO&?(9UKs7$HlFI2_-$5*L?VkSffR|P=vfT!?!?9099U4!9RJX`wczb8C46oOZ78Qo5C*C$RV7r{)B{TjZMDQ05kKCLb-1TNIE=QI3AQKNB3Sv~zrSN{# z&mgm;DZma_Kj1uKifC^1vcy$^o3=A%5eCvuGF%gr_wTkxhQ|G^m|8B+mpgQ*6I0cq z=92Zs3o~irJWzV)Tizu;uQ%;#-`8zPFh&{+3a}6(0 zzmhXdZEbr-U?%r_$$!n{-8!u zN{S6()hypP0u81<1gxD1*uBd0kVu`ZN@S#0?%I|j<0eG6DqwcUVsWV=iU4jO`W6A_ z<7VdOH!oj)%P$yxyiM=)IF}afspYX<+{&z?qM6w7sx^;;X4)bd#4y=8n>fKXi;3=D^$PK?^z{H!4w_fCSRJ}KT>1xyOH>p)9zD>n zL2QSU946BcoQx?N9|7RG{}Ky0j$FUaz?3NH_rXCsK~y4HfPeGtds0TKFX~rXWLQ~! zKd98|y*}=@AxBuf!+!PimW`(zx5=aU#2r5l)>NN&T>1O}QVs8&e5wnATfR%q4Pbl|ob13h7yox|a zdN@w}rE)nUZP86>WAimCD`4#@I|(zj=U?7gbNOPryBm^r2j!9|T{2AllAZcRwM3SV+i*nFSAnuk#O zr#f=ZMzoxTK17^Dc@HkeZ;G_H#+Gwzg~P&>;;TIl3Aiblkr00VbX+67u~s=WIo-(= zbGRc>G)2F>spd~Q6(eO3=?KDk>gYtOQGjOn>YS_4Nr5@DVo%v(E+L9Z&!ar3rpK%=&4z zbS}uFVdscLxK2u(_m}AQt}e)GNdrH?W1=_ob%;y$HY+5UU;=(KP83*K@6~7?4p2H?AguD`E1 zpGq$feZ_ghguy2$i00zOnvK+0azudxg>Zao{%?wsIiox(Oo3mLyxQVqh2pvcCM4?hq8$J99=D@r~M6hq|@vh^ir^Z2s&K^>IPe6V9(qZTZv0t*T{InN#II z%<32o#TDq5p(X%sBZ4T$$jJH5#8210ionES@8E#*X2I%v(9t8zQ*Zu`(Ld$AZ)-Pl zmq&ct?Ak|twKa$x=C5U1^>lANf0lnYJ1$7?wle+o3W=EkhdhtiUdiE>pHWSBxxs8f zjO&084GSbLxduT=NlBYvs*s?d{gju({HAZ5TK|za`E-;SX4^@&Anw7abC2%B>)~%w zCa#=(YkqY+#C+(%@9C}|7zkQEv7Eo?vx~v9n_iCFtfGEEZy~LB?jM62ol;V3SWzmy zG&_u+N!6Vij>bQK3W=(j9Srj0yfHCCTP8(Z97`C zpllM3BDhtymM#1K$H+SOcHar{hi?c13HV$o<$-m+~`B` z`827bL$4>-QAWqec-|;r{jV+A0wj$Qyp-otAvYkSP&u>9z<}~zK%*rpUZ8C-Imog( zJ1$LbJeZQEs&D16SO;KzdS-@!oaBt~@421g;?l>yUFXzza?9BnGz@YUfzbD-DOX!a z{#%**^kX?%<~9H0rf1=G=dZnVq40mum&IPIQlzGss4L{|>uoB5pXClSROme?WaNv zgeEHC5Fh#SWxw%mCxCSs>^|hmDDEPHIG5~6b;yu0if!<=Y=yzd?5HNs`I>K$ zvc~n1ENHrV-XsQLG(OvNx{^2a%IV*IT_w?f1o-*iVFXsRI5d}7S}J?wNDs8W{rEOb z^>$XbCVczT!Jo2FrD-bNeXy9eE8_Xu%m6h!2(g02N-q)7gp>eA7M8!g9cGv9y3|ev z*ZnwEAwI~mEyv3nS6JBF*Y`qXKOnatAa(u!x!Sx)Pt5!vM7ee+6qVO(yxIntp&ST>1QA0y=3rkBW zxH2J8Mx-)!2~EvUKn`hOWy5HQdoYEv^l99BoThhMo!i~?_YYOC7>iuhsw+0Pu-rjN z&2pOge!^`%i%e!J_^2`oFXgk5yqOC*p6|xQnwlqe{aQ$v9#UZ*N6o_XXX>)k2klZ= z?051C(v@rXv3P4OY|HE%=SuMTm(#rICbW>Lvi~{Lvv3B}^ru=fEYBr&ALSsZJ~@@>QwKEGtG8;$p5V{bQl;Bd{JBZ@I|f3!LV>%@!&IWpQQBlkyO`y z;QK0|b%M;hlBE2phs?W@9WCO#5^WV83>wWc_rKPe?HaJ|4!yg|Ya>0|zow&41gNh! zj?R7fv7BRnKs|V^y{C!P&vT`?PC5?4A$Lc05>B!bZlGk3Wk(i2VJt2rt@vqM8G{PweO4u`?%rI~yP(6HWljng#6ofo*SPE})di-7Lg zjg=vUHLXM|5$$+aE?@o}t4J*AijCDy?jn|bLq%j+M*)9Ihz3>BpS5>X{>t26G?vwK5^VO+qPfvKV9-*jm=lm2G@55Cl z=|pT*Y**e%%^ZK>X8VEmkE)9&N2bHg%*^1a5D^n2e@=Ca+Q#-R2o-q;?3+m7?Cb3O z$EzQo^r)dB_k9aC&d~}tc-FL4RK8-6LEO^gcg7Uz^6M$YHgrd-YE6$C044 zC`gtsAk-20B-RE74F00Jj?z_#%1TPIG3fB(Tw)ukX8uKUZP|68r-0%KRYq;PLuhE+ zkgXjg)p%*)MDhIlxR8fAkm&xsNAWGLOKGrC@#KLsO=Rb`8%I1EkKC#7B5dzh;#KPB zL9pH6i$emw! z8W^-^bKvHsJ@-buej6;5oHcS6PQnWqdHcaN^L3AAu5;*gq5)xEi_hVUT&O3L-%K57WXY>*TDR zyho1w`Bv75V4~=kxpjX8OO#QP`v=pQuf`H_G63ROiPD_G1;yvEHp5X^KqRk43|p^9>BBw|;*p5Vh%| zwM{#%Q9HBxv39_R7X7oA|ByrcW z{vHz^lZx zH=Q{%eVxb&yKWLJ9=ynCML01}j!O)44K3S+1e{@eG#ry>US-_Fw@rqP zRR%JP*p3{%q2}!jw>Rc{BEqg(SS(&oDkebTaQn`k*sJBOOjMJZ!YZi|B&!RfPo8pR zr@VZ<^!KsqTL^)%F@iuE?C%%qiFvf#29M8+$Vgh-8>ty-yobu7UrnW?K@GoICZR!@ z8nV|_$|MjwP9lnX8S0+Z}#4JwWN$8fVWkonid^RlP)?YV4*5HEiwaB_(Tf4<~L(Wi{l#&smjAOmY2GXF=<~Jo&=B za$&pxpA6#neXxy(cEQ#GW%a6zU_NxIBBlHWym+ieb9_hmy(*gOPf>ewTNYv-4`Os<#mMYb1}O~gKL z?IKvV<+M^$GaH;e^RcPsoj<{Gnp8a`(hL3O1WLNbmst~DN5$<4ysM9WO9%ozk32TlYI$7G+$XbvDW`wfcyYbRA`WBMM`<5@z zqCGXv>{mmeD&Jp@t(xhZNOKvRNQkQ(uDXHMk(kEoBB*rZz&&-Rri8%0`a=T{m2Ul+ z{XP2j{d+kC^kb*+^qSd?v%KK|En#A6sxUGk({JeGO_lTbCoOqk?vvrs0`srR z8Y13Rcal$Eyhv=mF(<0sJo`>|j-GQED)oyw=W>0ZXZ4sJ8BdkC@EZ*N>njCEZ?4B7 zvdj!2k;!A%toJr8-FO3ppOC=v!u}p>i)71CSlH{_T+hhwn__nRk5?&rh3XpV3q*3V zA6@KI-n{I*r19bMEpeZcNP_oGefvrMO9%by#+3xFuzIH_OiWK7M_3AgM{){^ulhYB zchK+*rY-pKV}xzCReCTK-84Lkz{G;+qeS=}<61D?Ze^R()(lS@Fa4{d11J6p^x@E@ zV~#WGf6lGU^xcFz2<>Vf9jOe9@@UsRz5k;-^`q;9`O~;XQ88jZn=F zyPH3u!b|Jy*>CUONp4?idFglVOvl`Dvef>IE|kk61T;QL7Zr-@>V+w))Ufk=W!4_GXO3?6GsiAaP# zdUU=o3%%*fBHOe34;+AH3&G^afXP5(Q$GEE{(oA49?_!4`g(+=;V!_v;Q8wV(qWl_ zpRprWu(iKGay>-mSe|58r~4({`!i>CJ@1vEe*+px1xcmzCMND6Gyz#BCnYVdZ5U=4 z2Xmb$yi{Z?Dxu(utpc-S5&VSiOS6m2t!|fIE=5Gt9?o+&Z(x3+iiZi>vLY~D~VE^OLapeK0cEKEV0n(^XoM)(K@gFg;!=BT^uHLr-#CHfiE|Y z>{?qmPpVD1&j5hgwHz_-jlkhFJpvP(NpV6ugghG^0l zt0CJM)7dmVcD-}r@2J-`)~k>V0j_X0l3MWoG$~8R>WJL3^73*b{Z{O#?EHgyC;=@u zn?pD8+ojO0)<4$Bx_v${zLX^qvj!s`*Do z_}RsNt#M_U*I3htiR=OQ za~(9VeRGNGYDeB3s)dn;shp*=hK9sR+gv;Rrmm;gv@-t{N^)1W$G)U%L(?V)jSFi7 zDZi8?#+CcVD1_~$XQ7TZeqh}iwQq|lMnP@VxF|$zvf3?e@6`k{rCVz6XzcZ67J`~H%}gwUFRg98m~0Kb1djilQ_^$HI5)7p(&qhXs1_JjGD833a5n}|@!2B^v` z`!9}d;f|4_A>+SN9CB*Nh3O8@=M5McJ_BdtIKVQ6%`f4 zn|6A74Pbh++2Yy;1P7T$Ieb!5PLPCY_g`iv5NQ1mgE;|mjeHRgkd7c+%UCbu7(oPY zjiVo?2H<0Dt*!D1{Zmp>LQ)>|9*B#@f5YbH&27jMF6LcDf{ebxI1*)_avuNs^(%xG zH{gJYj&{QsiV|7qV>vjYfv2Tccs1>X$cGN|D)eP0L4jGbyPh_#12+K9w)QwV06`mg zp^2%>!!KW!J#OlW28D}!n@FTDslHLHO%kc*PE69Sa3`z2JL>Vb$V^ipy!E9NKa5iL zpP0*b9yd5Qs0}1yaN%Eg`+3NT%stm8d8z~n*@cBirKR0pN7Bb$s5aEkn0ME5f2G=) z<@VQW678{69SVzg9jdinm9uMKf-%Yu)$OX;XlOD^5Cg0 zFAn@u7-o?v{83R>bWeEx&-?Tus^(85t}ZjP0~NQdySjo}&b-*VA8wuv>!9|Wf)P^n z(KM54pmk0l9E7_h&AY5D^W)2g0^#t&O@ea6(gX4p1}_{37+wlOLTKGobagx7{Q!WB zNDhA*HeLB^LLwq4UcS;7B`YJ7eHN!BKn6S~>0iDWnVNba4?ZR)24w$W`Mo@SksU9n z&yqZ}p5^+6H3bU%uGUs@97>sOuz{}tj77G0=^Z<@SGT?E#vxlKwu%#pLKcb0)BD@I zPJS}9@SYNy_>r2Dg3ux&Hfzb+-(MRqAMu=?B`{gL<|7E91Www>ELE=`X&0KCe106N z|KMF$!DNk2kIW@L?qS8Fg z!gTY^C8H{e%x_W`Iu<^Bc@+@ysF8|>+xVMI-Pk9ckFB8+-`oBa!}B(Fc0>uN zu`$)Bt5XBigM%NN8!AqGY=M9WdfQc?aV~I=O_wP%Lu?Ao0zh#TF(UzSB2ScSWZY2( z>@6{<;LMp_|BhejW|g+1h~KlXM&>>%+cXA!n}OU(NFL2Ljo&9HBU$da!=UsI9{nRK z@shl}yt1;!`}bZnHpV4fMB?y8eD7np0q^4b#4CUB1zg0$K4LuSXyWtgNi(3SyDfca z_J-~ImdQs~ttbSN4^?B%$0h%}$ZQg#Vo0=R?d>NN*ECE1n6FJv((Vko78!|dmJ9Rx z|LErMxZ-%?iaJ`dI(hnJaN2H`n5$N_+sl`4yjn#58<9P(KHRRb_;7V{C@{ zz@oE|PuAIkJ};WAh{a7vQ4)DdKukm4)&J}5E1>138$iiDLxIE_B&bYeb1`Ks9xOM&DU{0_oY7pW^XAfAq5T~`({4g>yf?J0uePV#)4*}mx%+I|an!%VncSK0Qwu>Y5va;KE_ExDhH4&m0 z%2*QUaVBc|IJ*kVQu(+EtLR>7kgylYQt&>^FETtVB!L=2XqqbZlM~lrQMQZ}3ShE@ zd!7|ByE8FeE;Zj>D}D6_OZ|zuNoMK2*fxC1H6a1kS`x;2+1PigJ8OT2`>xXyXU$%F zDV{Y`6W8pxuy|j?uRJ_llSN^ovRG-fGxXgdLpGt;HPX z{z`7nfYCuO@|7U<2Az&_^}y3)ovcL9``2}1M!HE!SK3s1H7N;yB?bd0L)qvz9c2vd zd0>*qYg6)C%#MaXIuV}|8_9G%0jQxE8|{vhViB=5&3)4Q)b*M9HuZJ%#WXSRb9|dm zDJEI7DzANH6QRt%Mx7SUkjWAj-gk(|JPDmsqg-+CY;>FLxelAz2wvW#Gp85tM%sju&H{R`s)W7oO0NXs}iQg-^`K(WwL5P}9syZnm2 zX%BJ^wp1BWKiH`jEh8X5=l@=!@Hh&B5*!yyDik~Z$F1{penFf-W{Mg8r;;3PP%v$} z^*g|R?>A(II;qOM92~~5a<;c`1)K)u-h#Hr$^kf2NNSQ8WpJf=ja8}t3ol=$Pzb7n z1!p9GU{q}EQ^gbDT-4X^6cVyM(thp0kL}@3#+9dcFg6qypF3*fq3W3fMUc78p9xII zf!pntKzfVDO6+9TkSxPdjbsw_x3M{yyt(w=l44ZH7+Jp~CT3GF30TCtf3}Wtb)Jq- z1;8)32m#B_>whv6==1a-O9o^PjTI~T9b}}eAJVj-c9TZjqoQaKR??na{d|%D-xGD! zLHsK^MG|~9eZe+x`=%a<*xIE%jJ+Ibqc_v8B1UHLSoL;f z57VozjKc;sN>7%_D5zh}Xf350bX@Qs+B6ihXmi`&;0*1~IIYNQ9aBaAthLr@tTI$` z6Cyp7y(?6{^M)R7LG!w!-@*^67pzh*?cD!jBST^T`P=7jyp{If($v)aiTmB1Q@5B1 zf%x9`Zn+>1;?7@9=LM&JQgsd0)Uh?_h;ao^eh;h8dz~ZyvZPH^muroM@0h;H~ z3T6qQl?8N^KmmZWe!1JH=d_6l;fqud{6iiQSe0{J^jc-Iqf=~UByBx9)^v!H8oFdQ`M%bfgU;uPJKR=IrVgC~g zqdwu|i88-2>=pMo)<@dVJ!LvJCS}AYg;&&u>k)<;VI|1V_>wmKHJ%ST=!S+*C&M77 zhrxfaa@KqPkpEBC^$8pD9_K={pV4n@F{xB$SO9^i6el-F$FHbx>aUjuBq4;P}#prH|xSVgWD#d{RI;f9O zKf^gYI(imNARLbi!!4I%DHD!^A6f}y4KsX2nhcxhxoSR|;LyO{m|9r))Y!O%h7!RV zr-{3#=Ru6iU}I}<4}G>baqA&b`*&}kfB^@C#DoMew*KIT;KxR9xNvgbZJHCNuhoD) zT3sCiBcwMkUAp9bI!t@}^xT|M(7Hb~h!pzEK|}>h!|ikC=2~eCj^b;vQBja00J9qh z_{4!8e*-o77C3qF}%|4j87yd zPS6+nik188duXkBDVChmG~6yDcI#hdr5>jaeSc}o&9}+s)A@q@;%z?}&o5fkeP?f zx+OysAR;RD9xRz}8RL>T*m2pwj*{2jz#0v#dluSoL^G8A%QcA`AOt`tUEVz=_kqWk z?@t@7G(Noab4Ak^+Y5wmp?JWOWhn8HHf;lu8Hp9dlKL>xaqnJMW@c-Sk%WlIUL zm0&=j>zk1=(d?L<3up0_M*?+tv%gnr>q3rhol`=M#IN!3z~xz&eIbM8Ke{7I3O?%_ z1*BNH%dHN46w!THgVqW)!`85%IW=&e)*$N91qV+Bg{0cPj&I)rmIm&^+~QaJ?$vcG zcodOF8Y>#MUDCkLO*&t78s56_t^v?M23>(on;JhJwjl$vJ7P0sC>UTA!Fha%GKW0& zd_GBLn7KB8t}sye;#`j9;G#ZH7t=Pg(S`Vl5bdzO%ju6|E(A`~Et1iXGdZxEm*)r^ z<5wcz@dk_RRYjr_g-4-W5cdS_LBrF(qp8U{CLW~q_Pp(143HXp-%x7GS|(*t^YI+* z&bR&DiJ=~>(&&t_D(r9h&R0EZ(ERq-c7G*i9!B#9yU%Pi&xy2BuE)B)zc%mKttEat z^ix9tv->c?&R$m?+M{Wo30pFBT?U4R)YVzfeGko#_+?+>-;a4TU@yynTvI>|keBBs z?!NPKcfSX?2WcF19%h&4T;0nG&Ydo-bTH@PO2E_%q7i_3%bGkYU)Xy%;3!38L)i

o|YmIC>PY=vb2IE-!EI`zk@}aIb^ZpM=%1Q>T2^SC_#4q4oLLiEGwXS$qC8 z===*E#$vOfRj(EI#dk47`UbKst?xy}K8Q(BNW-`oGnj1)=RPc^E@kePxm9y>QzXmc zLAA~P0qK;Mj5{Wa^~tTeMdm6;SX+EqeB-sB1DVH)XyhH5SwfE#bMST>OIGI2VQPYL z4Y@sjv^WVcu5#AsHF|GGKPI87{}a@Cbu~4l1jrMBIU-tN!ic;V*NwNzz{u#hkz8`7 z?M%}xG29~j#!JhyVjvV39RS{H8Y&VuC4Q~8Pnw=$BE|iQQHZSK*Nzk+n88uL}COWGI(6iSAbG4OqN?xJ>WZ zvD|`!Esl;{ifoStMJ3+!Yn2A&XNYgFweF(4>J?t4%(ZEcen)kFM&uyJB zeJ@kZ=o``~Dfv!3$x>)+dSCZ!BK}?tnrC~t6U`o|YfbuU`(Ex9xvmWchyQT1ATWb; zJuY6}4|#3RN=g`+n0$cSeQowtJooad&HYRVwkvhZBWEm5T)jzmrJPTBhXWVgu2;l&Pg(OUxll4cevLXJomKsmMTh+k+pEaZ^n40i!~ZHvjueWb6idqbLbc_K5)4 z{xk%L`D*zQ$kYmmYJd=&ILd*F>h=Wj$SehFlOgnnNGK4AWU#}@r zp$UJ%GdoL`=ExjI>;3eV!JC^}jc+3YKU~zdc~gNNo8~sTw+4M{uCliCV79^ z>Yl)&_vMHQ{OE3F@*w+!+1aW30|uBbZYgBtkjicPoEmOhTi(^h!bF)}fUTBqnOB){EhM$a8_UBslsMEv&G4u#;_L%CnE zM*1!bu>EY1m3`(ze0XAQAC{peX=yBciQ3vZ>aX!h6Ulz}Uvv!U#zYPPx`OQEDd1ZG zv?cDc!xkCy4jZ}oRm+fVF&$GmDSpTBN3y}7=WRJ2v0CS>EVk&nOKPuksuP17z8DXX zpm3Z6Ha4<}=$@05UEu|kwa%q2|1pz<_Qt}WZbSQ$DV zYd!nV2xfX@P~Imk1S7wHc&W?dZ$u*B4UDwjzrsl985!M98Ue2i)jVqU&p5UpKYWOe z7$QwEEJ{_O6@V33@R@bzqk@tM?W=dC^b9k>@^^M#M5)blFql=#<{LfzrUH&e={!~9jP1!#8 z5vl4VrKA-lhF3L*rYDNjsfp4H+x9sga;woaVjmog3JT{xW!-;gD;>3E1BI$R<E@$VA2f{M4@rgHBcn5y}{szB7ASEpE zTp)vf4I|py~w-pC3&bzf!2>Lmw>D#Bg59! zRRQDQNDsu4e|CwaWfVpuAr~Suh^DJyqkK!yId7l9Lp=8VszAY3ky-0TM`yq97DEf_f2nmT?aEq+R z&%RHNbyG^qLwrD1%m28{Jny(@-Rq@$a#R7!YG%}@6H5Q$by_Ea%??Okl>6Rhu4i$Q ze8-Mo_IBOkPs}fm9I%$UpRe8ez|wY_a@I?2pTw;@y)Mpg+Mp{%dw5lpJh9G`862V3ns7^4ElUc87EZHZGsSvKNOZ`L#FL$}OK{FdOS;V1F zmRXg0it?L@(ay&6v@`-zOa$AVTT)^n+LQ+SsMr~ZFiHwNX(6JfKWD=f_GP9lzx@rnhg+^Jy`rGM+$TKM z{WFKZj)rrw$KmVK8%H1B`?IOtN0V&HpPnboty?2F$?*7v#7w=VJY2A&+L`7#7mSR_T5KN;)jFv&JN1Cc#U$Dch*-wT8jV?rAcY2MC6 z-f_2}uyAR8WkTyJSKr{Eci(HOvnFPh6ku%QM==nCT>dU!M}1Z+E_*aCB?0VcgN$<@12NLi?3!%l-_ZJ%p$vWB$~7I4-)>{AK>g zQJwv*ZSciR`(1A(Dy0=?Y4SX?eu`s1*9yD$Sg@T_Y0xPjEmPH&=a185`v5KUo0xtM4hlpi6ap&Ih#^XOuB9JtTN{H+M}6W} z%qQW|p#4~!nuA^%U<~oyyD|R75v}3q=s3zaLIiHCEg;;0qzwk;ldP=9j*e2yN1_fn zAS+8H4K50rT-4Om=$?=K`GZ9ifP@omR=^cl7M%R6ywpMY{_KpAwZzleIaBW8^{Kta zet9_^HVW#O?>j!KR%X}++8OX_b#?W9d-q~hm_B#z>+MCh)miL%0CDRsb6*rq{*RfM zhh#$^Mei}?pASBeKefnQ-kzv=-u~*Z?Jxa9u3Zc2XLb6ss;Tv=@QOccMV-LR(@-rJ zS|ZKSWtn!AVLnrX>ck29^rx}+oPP~AIrWF}e%AO8F2K>F(g(C`g)HB^&9_3blwPo9 zZ$jwGOdKD1@E(baERMFhKb?{U~_T0#y&E{}rwqBKoSUsR)KG~2<0&gW2()m78# zoY<@rAH4u%b#!&l zV|{pbrr>u0k6!`k1;kHRTU+nsNvKmo_P*us-Z;lv73a~TT(af1<1aKq-`y2w=}+~^ z(4^xs?)+iCUCnYgvB;yL5SjD)r=)jNz*+kDXV&0pK}WsN-*uXPo4(y`WgHVk>0cE6 zB5$3Fo|=-?)KcHur>mTF&XMW_nUK&H-f&}Y*~C(Ft|Wnjw4E<($0D}Uku`<0zZidU z$*v)9E8X2tgL$lB&|-%CR(>|-BwvJsMWO~b+<{{6-BPmS}*yy z-JeJAf7%it0AB19{O!vX1*d8R&`*Eskz8F{1d3UenV^w5= zN8n#eZHr;Naq4tUn&2LvO;-+wB)~Vnb4xiI6(#B)T`c3WUHOGKH6g&PQVXlPV`tZd|>*vd2{?R^<5cU=8}i z5y-~IHa#!Nm)m`_Z!|S38te18p1@$x6%W?tCdR$oLa#kWC_jHe)9q-6mF@h+tEpk$ zjJ=-Btg$LOrzG$9tk4C17p@+uD%e77KS{;E`_iM=i!EQtemE?8cD$t;{nBLf!u!}U zgXUX_ z7wM4GUbE(M#Na2`7p!AuYkM1`H^wC%_*7fI8wZjtKsLvZJGq znE3g5+QgULQO%pvH2#>hN)Erqs@G@6yt0r-BPmWr2J`#w9*}hm`A(fYxjbGm0i~Rc z$RQpmoj{=hN(1QiX}pRf1B-Q*SGor1!N-qokX4~(&he=_)4Y74Dn5W)>&XH}$F<$`3@Ck< zmWWdGJXHbFQa|obsjvPb61l+PCtKJu2Z8hOhW^_qzc2vc9+Dt`^yuU-=8q{Mdf+AS zb8|;8^<)$)C7P)lUK{!pzjUXeANJ!#5^M#5C+1#sp_Ku-MUpE!fYgpbqe@UCbZiUd z6cu^gE=XV4fAAuCTx1<(@)E`A1Y*k)d8OaGpr{W|+$VP5S9-ctBdfK9m*^@B?-L$K z>R1{(6moC1*uUyr&!(m(U$#tZD^>3+hpIA?W~itd--XBSV!I}z#6WfTNIR2pK*&-Aq>CMM1Wn!UkjRlxZ7%il_Yrji_HCMNVeas^-l z5h#ExdQ{$vZbe*ZU*mUkY^*xS*mRnsGnewu#cjL`|0|TdeUtWiLjivAfX? zf>0VHsh0j4%^m*zd#tCr8W9+6e$Y&JU*pRgHA6oL`eo^mus|1Ov3W~agyDX4{rxBU z3W4|Fah&U^Qu^(%-SRMbue)63$}FjciPlnm96xCw4vjF#b$G}nL*ESQ)-OSTA>fY3 z%*Iw%C5xO5?PoMCko;l#07tlzs3^_5yW~V9-Qi8}N+>EV<elaMPfr9T zA69YV+ScxDJ8@IxQcS;=bhcv#AFt7rRgVFeN@4v2*nO*t)-|zr6qNmpP?Q+iF_~wQ zmh34=Pz@}THQF~eh25|E*~O_VsPL_n)_o!`!Fb zASrbTz?RoH3BEYgtW;^}WQBEmWw$^@;7e=kdt*83V*L!d*yNxAHFS*+f+$W>t^3?= z*4y!;r8N7h4VW_^9^c7I8$Qy;ah5LpH58D>;E;>; zfvx9;L>7Wa?>OW+qcw1g17?Tykt+dG2AWP%!fg7qA1_RPB1=U#8x1zRNbZ0dLS=q) zIn~ucus)B7h`=huy8C7tOmIa+$Hqdv5>txk7VXO_E?7Np;cUM*AvLuJXpPlNUqO~= za8F>Y2~ypDq(rDVI*KD!dG?nFXy!ud!QvDu5i%c z?wO4Onx#4*n_A!)NZqP=mrhw#vV64n?k+QLY?vau2qxF`G|`ru&#(Sy*x-TNN?e?- z#e59Q;&;)IyXu@<s0QAD5PP)YPO+ z-w6y@I<;E{xIS7jw3|2Eq%x0eZEOnFcmk2<-@g-*lR@082;bBsem#l|9guveVxZ3i zujiAQKBFT&go{5m6c7`qp@Q>`vXK0BZj!p z$y(1wdy@*X%9SRS_g?hRirjxPEy$OH- z+ISm$#?S?h#S2ig#WtgyM8^(#(~#VQytI;1B?LZDaok<|O%l9JwpH<6dVv zzpg~g=69FB-gtFQL>JMr(77d}d}5nm*mYfV5K+46Gfv9@*1laQ?7ei zNJt2YaD4fNy0*I3W`ACUE#|7AOZS4}@89QBC?eTAN#3^I@@eQoO++cLmgJcc{$oIUrXA0@fOb}}-b{uXd;Lu^1CjGp!>-tByG z4LS^1s}c^JM*ICRSO8u!#5U);<8`SloPRZkA7-?0K9ieI_aW%0DBC7#YLT5gVcq`X z?6=Qw97yr^z`jBI1A{Qe_n~YJLfcV@m*IFcKHh%*45g7#<|CvNGfRk5juLFGN^2uK zmT6pQoGm@Nv-=BKwkFyb1v1WtG}>@QDBnCfH|+v0RlaHQnYTA~s0T>-Uc7LzxTv`G z>(O1}%A7N2zFZc0X#18)#g}W_w)Xy1#+WS}EAq{c^|uA=lq_$4$z+8u2Kv3u|mrd$~dKql47eJfF5GgqvcjeGR0p$bijvz;V zMvSSIQ&?$bMOg%-3we7gr+imSy|yfF+prADj{|2r1oFyQ?!Oz@jlvNW1g{=`aobbd z*b8rxLdwPTcy-Ri?;{IpGuhD5z+GjDhN^YzN_a#}NBpM9aZ005(Y0Ik5hm>y`y@@=FEI6TXU;-CbQmzxJvBo&E0vvV|`DS05=7rl!{T_?lS{ zuK+&!=hKZi^b!C5kVY_*EbSi@mh@E@f>h-F`)ykZ(h%N1pIzTXYCZlx`;l9JDkx&9 zFb?4^=QXJY-pcKK6Kbh;0knOVRewNClbtpk5sVwpEav*ic9tWD4v`TNHpBa}HBt|I z;ojjVhd*iKqwH9r%!B34Tc&jl%{?2Pupjx*(Zf>&L^apVIW1<2V*v0J0bXd`j~`W4 z73#|J(4&Li3Ar?iO*9U^`uH?Hj@m8rt9DpTU3nBDP#B|73WlDT9|VBrx_Q;MawV$$ zjZTv8fDG|3U`4u33i6VO$Xn{l#cF zSFNZiYa-7@DZR=1{Q3O!bp4)g!PI1IO`uu?Pz#Iz;5w$oGUynVq|2MHGd-YV5Zs{r zFMr!$_<#Fh4J?lnOYP{dBQUD?&1Mx8G{XlKGIrqNJ_K7aVBtz@9H{K@I?>S3Kzasa zJY<|Cy64A_I^#05+sFk31#Li4{)d)aY0j#tZAa2M!~p<%QFJ^_PoIv3`x#cN=a62Q zj3by<&=~|F0=#&HSO&O6ct}iAIDwph4v8im}!$l)wVtb#--xF*xcOgj$eriaA9sP1$HL2(&(=` zEG#(chslrxB2_k4LGn$Jwh7J8&j;-cbrZUR=9ZSoSxNNXrhQ`CTW`aH8$;-zz$~Op z#!so*s5esUk#ERHAvcVQY6cWqXiKHw^`WlL9>h?1CLjRB=xU`AN*#3{6bKR|(Y~c=2h68^Kfxv#YmBxHwj-#)=+`v^ zhXDfD=YIGq7s;XvSA`66UMA*xJqUGAAZL$t1C|ZMSjo~JJ=%{sH#}PK%#q&~KGwf3 z!-pQaDF}3!dXzFRovgZ`yq6CTdP(|duL|{OyO@?%!^oEySnHi}a%zx%k3TVz@0!V1 z(31?Q4T%?qPjJg_KgAMLg;gw)iHjF6=14us(qR7V499d#=#coBZp(iFxh8P;o$S7S6xVOuc+HnC;GzgcB`A`?#t&$g9M84a|K)nH=!t{g zqoO{^O0P?9)T;}{Lvy~MCnLx1{EY&-{zEqE!8nmHA6HgUA@Pzz7Z;%vxXM+-Y2`2I zV-Cy>@u9W#)|v?R*jEVP5D*h>J0^xS+PlSbpqYv4NdKM-gYWw%UY<`_hG=m(gCPH5 zqhS?l9J<7r_5g>tg4$o1Qzm7e4`26>-t}~Kb)8RT+aKG7nv!41PX;Vj+^?o1Ep4O~ zG&=TW=ZQp++bJn2p+O398d*%8z#FY?;;jKTGS(}aI+sx2;XrDD!Z*rVfTU3D!#j|d zlfz`oHKo|lFvEp^mQ|!amKCr&GQCSLQ3tWA;;NKpqy25c+(N-bNAm3&Wu{H4hvx2B z@2ktu2|Meh9!^^D(AOZ)bU@s=-PYLQxf!{!m#3#~94Vea?HZDN==UbUuNXDVfCrLs z)UQS#(;f*3wguQL>UC_Hwz*Xk#8_v$hfq$H@&U(^S9Q*|Ad^{KN(!4t$!9$I_|7C) zR3HcY){i@K$56bxDxktd(h>&mIXNRMI*0_%JNU=3NMVZ!n?hR$hucX>e};xgENVy> zg7H-#aS<0=MpBm5)Ff+eYXesq`wEnNxY4_ye#f+b#|2SsZLfbr1lQA$4W5F_w*awt zHhepD^t*J?o=cOxuh2+GiC^141+yF#RP{|eX_F5B!4miI@Bn23w!8dtIQ3d=&!-}b zbL%Kb2L&2>d9AG%A>#s!HC}r==y~>khc7UyUVvMPz6iDwue_%{l1@y7m^^pZKYjWe z5*fH7cYOKJSG8j@mBKr>0J=?O{iDnl7Ttcstl8&rMk80n3zB?TrClJXVc04sRn_K; z+?$35kV@+z?cUZlSL<{Vs|WcE5dqZx#mzT(mip&J=&+}e726T8{u+ZR&R@;&j>g!^ z$?REZZR5eSob3LOHEV!13vwm_RCwST1o+9C+uQRamE-+xCnWst>w}Sj9kwef^Bkkx z??ZYWGCEjGdlMx0?=N|_RqY{krC`BCjz6Y_Qy$M`7lH=q?I``~=qC`yxgwBvkYdrH zLx)JRHdw&#WZ}9vfGCSZFGE1E_&?UoMe6kn5PBi&MW5WW0VeB&$?4N=>Z#gmNKz~+ z{k(K^t`!x{;5V>4PNWsMJX!Rw^4K-Xz<{KA{bskvbv&mtEg4k7(32|bC$TcoFYl0_ z$6x12{RH8=tP&}g9nl)(s*rUL4QrkMPn*R$_YKZoL#a`HNR2ZyBg+GPZV+T4JV#QC z#zBf0jl$7--=1*Mvs*>5cLR$3!KWY)AUjaZn}!NyGH=^!PZNyO7^A);4Nya)s;RG! zAcc?zZ~CWDyOSyUPO9cbci+6hOF>SxIs3P8f5__yBp8(I7n)Ig0o@oJ7+71K`84Vd zDU&69UT5n8b|myiM=CC7>U5WSP}F4UKd_&8b^ox9y7E=LV{lDMii&Vr7r%IMpyFY& ziKMdENGterI5=V1S#$p|^XGeoJM;tm{V|MIYiYcls06nLn%u_R8;TOsTTzWzWDuUD8r?9e*TeXN^FdwIM{+ux&n$uuExfKQ{S+z z63$=)mA7->p1t`;O1do$vC9O=) z^tE6CVh14c&Nj@+%#^`Y1&PIp{jxP3-6u4c#sQaki^axWrKa7crmX4$HLtmD>S-x3 zL~+bQ$RDRA45-`N+IXd{Yfwp`fd5nBnyKU9=m?9I-xMM=l%dSRhYd;BN_HJ(Ef<$F zc-lw@M;gSicJV?)!^wzjFf^2$mGveHP7)%DpTUm7 zD3qZa`KJs4moTi(Q5SRI`GVt$}29pYmfAe_;;_3gFtp9(fw#Ys}9)h_eNC6jC zDUdGo|DpnkHTCc)G!%b2VflA%zPj=Eg-nx9BP^Y9h_mBpQ5AqTN0qPyZgB^o>gK1Eyq`JTa_n;vdU z}jAA+yZ_c|OtqKsa15hd8EuD20I!1F?xz8jVq=&*f!90L}zxHya& zN~AEux=ThVBbNm$qpAmm5Z6^<(dNWEda^jRE-1}Kf#yiipA z*x2Yk?BnAjB_@`D5uH>YMuOPHLas=t_WY;{a4j@J(^*sqCgV|8ug*?QiAhMnwa?YX zg*hIu5QrZqWOIoLxS1ejLFNwu^j(%OJM8zOBMpM>68=?)g_RZT=(LpTtSn~~CPI4w zeZn9hx9yBH7o1O^71(;!4}l+#`D+j!5x&OEz5lEUWI=I8qfd?OH0H_`O%NCjjg7a) z9&UR=z6vM_VrT3uEH{#q6%opSGvQof=irEm`gV5Q={`R}&`T;2#PI2(M`NP6V(XEz ztoXru`Hd|zGiDnhg=R#uRhZ|8NM_-K2kL*$gQLK}V5AOwKL^xXZXO;oI)YPcs_oUN{iVWC0J3G-M(69#v09wNQ+IO{J9KUDYfMu2S6Tai18;q?WyU2+k zO^h!nkP}1@)qD*Faq&rB-fl!U4R~2*4ZfA#c~eY8L`&m)XD1MZ0mu_u_u~0JPjF`j ztsPqK8DK+4k%(LD$UHWt2aYiV=8bDRQ>>y6?Nq?SQ6!uVK`iIq&6^L5r=1)ezzFz` zyHVZK-hN60MxA5sdoT-)gU<9GY1W`E60lq9@@|5t>A6NQ5zU1-i*mh2ae94xfN4U< zqSG2kQHG@9Fg^mpw4O}T2N;TR#$&<+P}Ud-tEs5hhGq~CEg!m#tpX5Xi-!$DR+ea_ zeq;1r2 z3^_PFR98Zc(H@xF=J)$EX^a8C8tsWyNE`~W@bHX+*V;vz>?-=UI@}_B?uNO=C(M%j zDJ{jYOZYvdrlOMPv@lve(kxuis}#hy>s8?9p7`!rI(kR z(oF`)<_hkr0sRt#l2;m$Xap@Y{;D3PCI|}F>T%Me%Fh1n^c@6YM3FIx0II-q7Fzq6 zpAZD4^bw6ihHEIO0y`vy&Ty`w&8_(hL;fTA2tFI{ja*vmFA+rWI&&`>aS#n#XXZlF zrz-wua@S@?G%}GfwzKAl`7A9wn}^N8+!slq;^JbV4DavIZtdvlvC?Q&RK(-i;2`58 z((FbwJ#az=DE)qlwSX7GTRJxI57!Nh#cJ3$ryUyM_7tzdVp020sDK9EDJhrs{e`(q zAI-*Z<6B4~IH=xLs`d~BAwua(1YgTO@B*9+0MYVsXeoVcZ*O;&fj4QVjp)98Uu^U% zqfjyO!E1u{2U`hF!l1 z5QLuxT6y5yuvd@4bPF;R$d``iqJoFwz!hx|k13ES&@bd=JdMoO%`LB1d;yk|NH*l8 z$`)p3-hKKM2Yr$(h%=r4{`|;x|2LeL?>Spp~`5dgZs|0;gj=$DF0G!Ic=NbcC-0DcE3 z26&qgJj1C1J%!{MzB#NGxX%DkhT#Y-z40=Yyn~YpC6C-=ALb(vQvD}2M=Ju$<*j|9 zN>CtZr-(@^`M0p)hWYnR62W8|Ng?N5U1w%zSHHmdiI}9j7lJPmgeo=7Bw6P9H}|qM>>B^~sj2nJz1Ld=#bm zyJd%<2^N)S2u*Kzj8RfjLK=`#R5UQ){vDM$-00mqAp~25BtvK<_uay}g&j{pEf<%X zbd<+;i1l|hfAo5|H{ZSynbBeR!`#Y>-2@4V1w-op_Z$2_uA7_5GSUyPZ{gvSW#-iU z{FBYi&6AUpU0q!RxK|5aCn12WaS$37=5n%D`n%LDH!ZCvG;ehE8p;huwX&Yk(b1y( z|K5oqh^)~D8|}Ow;vgQ=^bap|QWJYRx8Vb{4u{iimI;4Rs7Wz6?$z zq&in2_?^y3jz>&P*?Rb;lRq7OQIXZP%4IoLW`ieC%2 zvVxI=V{frHF~G#+ocK>JnP$ z5x07#U$$;2&ahsGl}~?02q7aQ!$C>>?_Ee$j5M3pqYd6TzQ6kG4?pg_)pn$6Yi&*M z{eJcTQ=#|VbKC8BC~#X97!r%wl0W-gS4bJU3Q)IExb<**dwbm8aUU$1 zQBYa}DJS#ay_EC*pDRMYRVBQ+wWU(18<3v(-~A|g+y3uJ<#8{yLig0ZI6u3?toEz3 zv$&*We`kk`)A)O4W@h@#)j8c2>Sff9seLRQD$5)2C0<4c_75;Ri-TItgZ(y8AYsgoG^{y4ik}`NpGcEtb+ zaoSwRo1RojUq%+=?_`i*}kI8;q5wXa^CNV?08}Y?gfd`q+mCyRZR)Jd-lHk3{KHK#_<(3P^Wq+{D_J{}m2p#OcP3|==i{(1#IC?0TQe-^Z*U*qS#6M5T?|aHi3tmybO}>vX-J8sYJpmT`CsA)-$9?w;E2 zzeX6-SvvffJk7I#@ZVb=EuT0nZ*ndu4g%?4(Te2Yi*LG#6Vwfy?zuG^YHjk%8wsEs;W-0JhA@m2+jOW?5err z^~w#?iK_BP&(_8os`9hn*p2-rRQMG*UTw=bLxHP8_U1`kgAQJ>_)do>PAKWCPP6Y< zg;(=dkiN?GP;u+|jtB@sB0@r6L;{?l#`)wjet!j1!^VY++fBewdW2(RVd1bnD<&e7 zul0PrgbF)^Bs?m$Pk%)6Y>6zeElH(`HYX>iqFC?h=6;agch5c1e)lf*Ra96+go;Xm z`HToXeQM||3^Kq4o z=|SD-2Gm41eI8_%>C}5ZdLAOXGMITV?M*qf$#Iq9l#!u$M2IIxKC{#n7Djbu4HXGE zOHFu#!%dc~vb#qg=hRnjsrUT(^J;v(j_>AcW5uv3k1nMOOa z!Z@_M)K5)CrKlEga;{pH86cY6ySN`ST<6POnb(2%pnuNy{tJ`z!1_lPR z3m3Vo>ZuY+%R7oPGQIU4XjoFSAGm&(n4GK@c3&5quHW@<3eU+7s181)nH(NIsB=fb zAY$Z#%?0&UX|ieVt+|+uK}2fmLi97su2|lcnHf3!J6B~u(|!}`r=PFyeX8X@gLc;8 zIaagMl9C;j8#PACt-6H^9T86;7`}i19vyv`IsA8p^~!u_)QX|Hx_UUXdWF@JtfyeT z4qN`}6+o+t@~X;Ye#F2)MddIQZIe%p3y6Uy6_qcJl89(O8YOS)>U{#8bp*#19TO9= zKBD2xWDJb=sHzIE|J}UwqsvkZ1M)@5=uMT1gH?VMDz44es;c}TR7CjlI6FVt zs69W<6s2L?BfPqpIyup)m9n6qplZj1*KvHjkLDSg)Up}x_%%_B_#ZB%#`&})rawtD z;rsW=nQiUu&z%l8 z*M650qI*U~y$Ph?b#ivrxL~`wg=HRyMh$rSEE+XVr-xgR`e*0no^f$qM@8LRAAios z=ueEPy32%G>g@2qPx!_CqILR6?xTTpIlZpvrJ?NK?KTH1xwXu1N*MY1YYQ5?x=bMz zK#qD_Y&eL+fBHlE0YhdOqe>s-0S==fu4m7x_dTyR!oYjcDJEp^hFdt7M=oQz)6q6y zs)uE3{G*PvkBl7jkk2MRqM%_{86)$XO;iPtJuM*i-_(62sJ2{4+)?(Ys(WL#-k7Xjr6Hhnrf@pH~joLbn5lTLnNBPjLwe zYN~Df=3Q$I4JvUdDc5vPsTl5J$S^M-)z@pGAg>H(GD>x^tQK2Nj#t`}-OQHqC;hv$ z$u6@o+Grx~w(;HLWnfCO^TD{SL>Qxevc%PL=G|2GoDk%VY3Z6K&Yj=(NlQrhn>==U z`Ro=`C&n{$HxQ7ELNqf2Kb@lXn(7?-53Yj=v5g0}mQ!Z+0E@(!>Z zcQ&q~zphUcz1nkmB%eCGT>=wJ44mk-NIPYhBJNBCEzGi^G<=IfRaxnc40i zG#rYKsHYo;TQj5qB1%6Ob}nV$m$ydN|Kpf&~81U-XuU``q6Aez4x6OpG z)I=3uo9EL6-}LfIBR2ouF%>rlwZ8c-@4G{GvbQ z)syv!>PHVv4fQgImB{o`-`N&o>-{17^G7H*BQN2vv0_PRs6wV#8XR4>CzAM$Gq7svCGR>zNFa1@6L434*nf0HgeoweQf-p(Fa#GA@Fsy z!ueU6+RMbju!jl=_Jx6RNip+5hH}-j`riIz^d_jiM@}_wJpgq4{B#S?SI6Op#u(a$ zsY7PVg*Ep0<2Akg7A-Jo{{S_Fu{_ z!L7p4-rjCKr>Lm-uG#@(di-T)OVv3mBe1PfwlX(6pJ~YW+z6{r|wIT->CL zsHmC~tezA;P1;BQj-?uZKfiv%;@RP5Q9yk)=Z!iOHO{~Dn%mlzyZJAiHmVP_wY5do z`DKeUAZ%(+x7)vvKX*A^N+nd9fg+xnwcMB74o^9t>l6QF3;JsR;Bgk7#WRm;9v6R! zkz%88LhrM%moqm6wQ}T1L-#ay<~tz^A3^?I9?aCUqxzRQ1eiRpQ|$6OM|Sqd^_&y< z39GFa>f*Bl8taJ{dimwbmosnS)7|B^dhqa} zSh8$+MaBN==o2okLzQ42hg~0^$kE><*#T`10tjbbQZtuKZ{>SEghan|HYlH2)6A?x zK{!|+Kl!WlAFP~bPDn@y^gyk}oVb{n zueY~%ZS6&#R)cc)2yQmEe$S6a(707m+_|`#8`dWCx?V zali6LwSu)RcfLTLWzQQE*WcFAF#s4@aN_>AbV$;b%BIM!OMS`E>Shiaaz>fx>4nF{ z#DsSu4c+{QQoNCkbU- zZ7u=Z^a($Cbgz-{PIE zYQjc*3klglufJOE@AMeX$Z{mY$)b zQIb}}d_VqEzSTgRjfwG6vq_h;<7zt%$YzvIo9?pl z%0o=XB0nfY>U%=({xsxNe7tX*i!r?dSC8 z;dY*X@+mPM-IjGW8LucVE3>m8iwpdC`l!&B%8&+cooq~_ajrl)YqhUn%lnI8?Zv41 zUU$w4XSZKX+Iv=?McFQ)M3c82E}{t7Nv^FcSGb*3gXsAB1oYiLeSLjJs!TH*Niy-$ zixbwl8nrb!G}P1{KFG+(yRUI>-Ym0T3Ck(TfvpI|BAU}gRZZ=W2O7?GIGhw`%YWVr{ z{CFj|qoZS^_QGZMSMclCub(}82CE3@8v4MRnwp9R3O*-Mgx5;nia6smp%HW(5U$ZV`gEY}cPKDdqb z8q1A>9C9=}ht1I}^iXa#-kJV13|jQ2i}M5iY7(NWkS>n&q}$}7Fx&du1p9=`aX)MQ zR^DsqRbhiE9_=bKgyIEjF*6XLr(4uop+IFcfBQD}?eyk!gVDTu9A@&`&en;{xWOSO4zayRnnH0Aca*@Dd6Nyo)#4 zvv?i(qkK-!&H~eL+4n};Dy-UlKs<0p%PP#L|$CUqhj3Rc~tyq>s-#l^*9qv3+l)uWw-mk30b zTKV?Dq-z|9QJ5s%n*tp%;O5p@#|v#EBvWiF)<+%H$I;H8DKidzx`i*AoHQpP8h@yy zZvHr1O#~t2g&6^x;p}*CZEdYdJ3l0(|B{Yg=!(%Rex}<&Om#f}Gc*DH3@N>9#W|aa&X{K5-v{^Z-#S9M~=%1e+c?E=rh8nPwRtpxZnkXOA zK`H@`GC)L5UVfKujOF3OB}lD?h79FTQ{o$13!fRVTkYIfI|@PHMnOhqV`Ka5=jYo* z_0(3Ib@AL;_F;Bb7N_w@PpO$kq6r|EeL&%{*fx`&iZ{jw#v>bAA23XOIGYbNzlBL; z)E&!9{@liC&JEVCy4qPqWwcONQkgVf`WIlZw6ZegWZ4MNJ2cNT zRP65lKBuweOmhH~i+ZR&&3Adk5tvl!i-Pf+-jptjJ@E}IYJTf#*@{LHaSFU>>%J6! zuwpU}t{os!w}*WIXg)8a=O)&VMLj^^tb1W!Qi@u>x2gp0X$OOG+{VS{ljI z0{#W`0_E~#%73xv=2_TyJ+^(WqM}0g{d@H)yL`YD3YkFCAP~Tn^gEN_;aN=8xsQw} zm#gJOVTvFijr;ogsxd2>9akr(S5!xL1yjHDX^1bKd~C71iCbiO0;D!54~I@QTD%_h zi=z;#vAmg2e?dzCg%R@amK_gliK!{gtkA1cTKvtE?g{nFFEk04T+A~|kpsgD@#N3` z{(lPfa6($|f_AW;HiU-n+**O(N$fyqz)EE$G~VE z49M~yC*t&uv4Zen;H7YOz8*E#nVZC z*Zg_d+0##Zd_FPKf0Hi6CrNe*lnY3w@VqiJji5w;7NWdXuk-%>-pViw3rlW#%-Uku zTV|>i?ef35yOrVneK&9@s;pOrD0m&x*{R>g(^bvs(?M$y8)d z#ec1?t`?ril<$ z1gLdp<|`~h;xx6TY-rk{GSyGJGHQ&zBI$D8xF9ETBZf=7 zsma~l{p4Wd&duwCcp~EB;;FA=sv7sr@tgCoChS}{Hd`k{O!0A}T344Re)Uh(C^Pdr z@;^^4lDv2O5+nobH;Csoisy@{KqaTayJd6Gn_pZ!sL!69oD9kYr4P-+^{yAwb{R*?9#yiFI1wUF>(s6b}y}1A~Vp!TtYKE_JrKaM? zI%9dCv$4tMzsJv4X1C%p=RzP1ImkaJSsVHId=gYG`CN5jFXY}_{Yt}$=+2!|!@&%o zGZZ9@WzLNHXMa8>-zPYfm4v37gQ3gi3kS4*ke zBzPy^<7s_=|IR|3>%2Kvkrpo&xoBKRg49A!-ism!TA|W+lSMceq1E`o0Zh(kSmx!m zXZumI=~-c{R#L5M!c+|z7Rk#3s$PWN&YH>>ZL2XpBa-uX;_LC9-Vg?-2Q4BYU3meB zh@c3`OZ=yZ4r1KmkxxB8JBj15FVLuUQL{&FYVIk{ylbEWgd?y9`N{*umBE(L%fYT> zzKE~U(P~VOeGpVsUx}ky>>e(}OC~Qb^fnGQi#qUg)P?&bBqU_eVqRsUWzUDZ(w$Y+ z)vmzLeE$3yq&V1e%gcr)CgXWpLU^M=`Q%f+KFiPWdx3Y%phbcXjEehf@R3YV?OTP{ z1|+!lzH0%oX^)^EO%G$eTB(dXcy@NwhfXeogyX_gGA|4s8nElER$DPVyiWzfR=HYi zSePQTB|l!E0L)QZsZg)90RocGabser@tdtvRi@&cSi7(2zCWrpgS?^oZ0-=tzfZ0s zyS!X{fGcA0KSqeDt=UM_0~bDU!EW8(++2igWoKunsi_H+tLw$tfzk-Ad#7%$a{kAL zX{Qgn11B$EtJ?)x=9bMmOoVve$zd!f5JD_a@~}JnJwkhi_!KjHbrzoc?%t8Cm%t*S zI5WMCZWDnZ#mZKHl}w9y;~ngK3!ixgE!RfSmfNjPm+->lM)mgltBhRr z?7JWiM?9oJpz)vCtP;N>l}c=pjY9fs@o)xv?a0HFwm_NDQ}Bg{SI=y^d^wqu1s;&N z0BYjTwz#wt*fiJ_-8d{vp;3#K&pVmV_|7&Fso0!P*x$M@7-sqm#bC| zBGJO~XB}8Cp!O$$J%N8)SXdaFT!+R7YUW+85BK^PNgc1kT^Bqr0}nJVkI4*LAWfNY zWkOP#rS!+Xb;USuPQBRknqFT&FVgt;b~jbzZnFBu5o1o=-oliT^Z|qwbf!?P9PR9Y z!9O}YtO6DJ@#AKqqTGk10XH^c|NWW(mJl)y-Td_Q3&gi*oht4vI@~kp(4ZzV1-G}R zzcqEK2PN%zZ>7|3>n|y0h|hzkPcOv-)88f8fa3u>3keB9-t$)y_(zrg&;@@F{>o%6 zKW=YsZIEob#vR81yc~So^78(IP^(#*v;078^5j7NL3B?j$P+~dasf^DfO}uZaA#}O z^6jjTdr@hj7-jD6?n1@(_VjFOY8q6&N?DruL3JL!SfuO^SXqNi>HAv>ADnHhjm}wc z=LOcBM*I9K=R3=b{rLN$SJCvUr4q3Mel?RsmyB7ijg)fn8XihAIS~lJ$DkjHg~d z1gX^-@(0K5tD>*WMzg8u0P6B{a0h&)l{h^;Riqm%F3EY&sq(X~1c)k`5fSXmc#eTKGD$3?NYzAG z*i+!%y#$B@eBAuFE%mnP+cIKett_>tc=-4sad8`qi^O;D@)(b>zG_x$Tb;>mRPH!>th5m*(`69H<%r3akuF?w+8lD|13+4@>aHvmy zei*pDXN(qR#F~V8-D-vMjyVO1%mXvF=P?Q3h#zqEq!w;H|l|Q4}pmE#Ka^Lah`}rnP$T)rVmgeR(*p z!iGNoJ39JwwhYM8NSyXJCgIA6 zyTprRzGowL(0e$ryesG{aPPKXQ?7OK)xPTnF1AAn@-e_EIn#+B8$|sVx*>!8gENc0 z)W`FWzKZ+SCkgwd3yt(|H@|wo>Ld7EzxMbq*izOesw+il9Jba$nF$RF>ggSP-yQVf zIfaxGS5EQ4u0U;`iR-LbyDe?%7lT2;jVukkui^xsfe$6}+lw5=_;>ovyULB&9e1KS z&(}&Pqhn&08qG=hK0$OVG6OZAXe%POY(kZ=&U@ZE@yOFAiGmmGJy5QX|2jVmZS(QD z$rn|e?Hbp%s@^g=YVi6u5>=!*)pFpsqPFB^cGWerry=|S6?M9op!cjT{L0j@Nd?u$B#Zf+*a%^s`@XfDu8_Rv;FqoEL_4i z2LGW9_Q=2J#a3!}Hd1x&qJzvuaV=GQ%Wy| zdA-Lu)MAM-lF{qYjdr?DBr`)HZ{+#y)$WN zX$c~q+iEeWwDe%*;A(GL-%VaP_K{pj! zpPKp|bWnoK3j^QpPaZ7(sZ4kL;BlPz#y*CaSI4yS2kh}c{`0bm<9`?9R&fomwIW!K z_`wwhn^ULOI6>l0x{<9##!D%R{?RAjqJbQnwRl#%9e*?_J=eee8C|uM?qX8|a&zF9 zFY_O`986sH29>}+G8q~j_Vz&p#p1%7Zm|?u;mPjv`}@tn`T|gc!@8-~XqY)!_8Ci- z`Uel!S7@5InQj^#C>@W(QY5FOIM~}OCX3m5l-UvIt<|crh{XYe>jL+VX-w zfXBOHp6hG~NQBD`hEE(hMLr3)piFLQqTsgD-<+y`PLRbPFxnF z9xL%i)lOIGq|2Lt`j)09+}^r*3NHo88;P}g{og+REGy&s&!s7S3rua(7UiIQar8hG z-=m@nIf`7#97Kn3k&u++aXmlv#JEF)=Nl8F4(YGIzkky8!g+Zx3Nt+G9jpGP^k=_n zD9MoJG06!0njSxX4CRa0V)hqMO1EGW)93r4uR1#ZEh}$~1 zt#tTuK%X7*Jh$_)6>yo-4=^z?@3QImWM`YqU8~r*O_ialKW%mJ$tZ8%-4l#DPJELC zGT(6}4wH{RG8+D{HwW(InMATG!9QF??CE}EiV!>-_YI3xlV!nPi|p>^1I1^sxO1fh zyb*A;u}H=8h#_X@O;7a?=nz(r_!s`fUG5Fw4GqPLCndqEL*+md%B@P$I!$?b#o5_w z?fX&D(L64vhi^(v6_~);G1QkV0n}0M$=CL|<&t!{2LzF>wQFYi;7(J_RCqve*#qV& zGmgi72Wwoq{6BlHZXwwA=1yhLq=>kBcql#U0pE!y%VsM^aT67~{V%63F3*es`ah$m z5E$wHDW``{e4v~f7R#m=-KcmXB*lN|I%M}F2p`w42_!!ydHDfQ!n3lnoSdAzu*sK~ zmyg?G!otIWmPCk%q@<)|3>w`&>GIHQL5dwF|H(0nan-fKB2u_`TosPF)Lvh`2!H*S+Lc3;Fch(h?dA*};v>a-!P zhVw$SJ1x3}9HaqF=f4&qlqza7zlqbX`(np+L+?^X77FSo)I%pe4KVS|S;)0$6J5FD zLRGNXMW^D2ULfOG2c)LbdCB)GIy$qqlg^huD3|?F zJ;`|qv(BK{SU%YMKtkaK`?Sxo5rdlx4&d*hRl>%=c*663cd?iMbhBak`yuPF*Ere5 zPQANoW{l;rnuv@!@KurQ2HUb)PN!Qfpl<7ah@w@0&UVgxcX8=)PUt}$Z!p-F7C|eF ziu&FD_6pQ|zcus`6+@ETLs*(2lw;?95wK3{^`f0Tn@@$n{%{IqlA%xbA?x>DPUCP< z{oY}k5CwU8=_r9?P0|2IZ-;%lmoHzbj)m?l0NYSuyKyc%4sPd~y`eXN3eO@rMu%#< zGbm#COz%LEE>+8}`4(Xfq##rek{UYb9*oS)Tz3zB$6M2vgoW8ck7XiuN5->oq!8q- zWtU|P)O2vF+`*yd;IN+l={ce8J??Sxn8ZIrVZDSCz4&;w8jX{a6DrZ{mYp$Yln>Bp ztc5b(-3#w@-_a8JHO*}O;?d=dyu}&SIcEWS_$5=UE-;~DBiQrG(N-o$ zE4wsvbwjVL(V%HVQ;k-AvR(;NJ8((;S#QORltM%ib93@X`K1yOIGYQNZE?;ReK@*TNU#$5cE#r(m*uYWn8^ z5N^;VDl02P$N2WI?1iGTvM6FNE1c$kEMgB2el-Zb)w-fJK*;aBb8 zQB_s(z?0)N87l%F7Cd_cIlTGGB3(7s%ow_KGhr60D1O)|^8N-hM zqDG$MZXF#Drb^wGfpvvN1oH0#um{-QGjnr;@j__hrpY4K8xv>Xkoe;?Gj#Fs_o6~f zh3$pS3oQ^70kmeG$oeanTf7Iukz$KlZS;=*N3%>i8_p(+rs?Mqwe;hP2J8|!)QplzT~fnISfPLWPl=eb?#L>Pzf-Z&~TzrPc@H#J`S`gU&%sC3QDiyDzn%_vAnfJgb3+1jc^ zIj zpda*!ub~`!%R;YsMbnd3w(OO1CBg1TPp=1#3@@w_h@im?g+jozR42)PJ@MJ(DBqmx~TACOe1L@Sz&|qThpOt@Q&n?CfGBR#` zlxA}|tMa5q&~6XUuL;_#L@?_w`l=RD%&Jp=0+@U1j>5R#WH!jvynuusbjZZLvMCS5)kUnoY) z$SAW{AKZD6x-JEw3Mr@mvnK}%(&M}OjCcQ+Ce+;9+go()-)siQ^;=cxJS!U+-@aYi z1i)(wV)o2TKePy7{`~kCZ>++Dl)cAnF4v>T@CdZm;arW2=o%1GidKLrT9+Mk5uz|nv|22QDffr5gfl&v}lQtJf|xvLV% z&q)P^h`~t(UF2I;6ks0@s$2Qr8fl%w86@9y?N7#eQR-OWbkvRSQiV7d(4At3y^{XL?7L&$SU~} zP*$g>r@_kr#(klx$nxJKJPaWelHERjek=W{-H>WLJUlcqUi{l8X-qIMbsLx0<@7sH z9W`5q?ry8Au{`z(gN86Hu=Nfz8lVPPe_?1Qp&8rgUeOSt!^m$hN6r|>`O~oxG7aaw zL0A_J9?@*Y=is=4;ipy;Cq?UxMqR$~8{lj*sjap$>GjM66{gy1sn2q#<6lU+jbWTz zp4b<--vqtRnLd3{2Jnot#8%_?f~M%R_?wxWq0pB6MlYDutZ~u7l({#n64ghB4+kYB zrF8AYGDQry9n?K`1ciCR_I2U$U6kF?yE?;02={06J@GynP5hWwtUMK$oJC}5=IcjHc&uBqE!5zhm>Fi7SD zBy}%X4}_bVcKf;e-i*qqimVLX?uBf9N#BA7=;h@Fl*`toz6SFVq2hCeV~k8pbA_YG zC0lnC8j<;~de0z`rc61h5{sdd%O5fSdk9Da&+O1BrqV$#isN^!nVNEGYmJ|#ZcQY4 zNMJ=BTO!;8rMtc!_K<{>R4xx6%rSx0jsI*{4uSXtQ!`o~kmn3MJyF>rl<%=-IZMlm z|I6VN@zM`y629tXS*S6F%gt3@olJTG=rp^w!9i=_Bk<{8clcC`k?t#w>Apg^+ z;U7O5bZ{B=ek~#uuMqxCg@6EBT8x$8wYBp3mH3Ijb z%(9aDD`xV7s|=v3v2m6>LvblMYigV!eful|yfM$N#*%s#W*+EsX*}M$N|bIY4FnEu zuApLXgYpKQ)MfVrR%=nwUmz>Pl*!`ydTdmb1g}GHfx-Y?&~@a3f|RL}A_sC$8?bz+ zm6}4TfSb@M9G#4=wcvbIX;_Y6ro3aHB_B`x)HJ$oq`}@(@Uhm{KWH;`%mZjQ?cO?D^c#4OLTo3k>j18B6i{(b5rN8&~!1JjPLui{$ z0qG)`L7x1R&H*5xj7RYQuiWX{+_7_!q?q}p#8^dHR@@OMC?MNqZ-$4I<4>_n!{MP_ zgQY7VR8$NiR+3Beaz#bu=d$Re)1Palva#RV0q#>#Q(G7Q-E-RQeF@s3v94KZfnzGg zt3S$^UTWEVL1A015j%m&bBIP=w$vMkUv^E`O^tT)vQ`BQo0q?@u!S0e*93$Qm-9J@ zRf@ou2*|L>vejd}>F@quERMS>Cv3{Z|93eHK;pt48cZf#IX)r=1!fi(^~aEq-V&ws zpqzR`>HqmtaNE>?#*se3p=)-2|K7@5uue`h9xwH$4reND0Q2xgvX(5uMM>$Gc0QOi zitVF0XQ!cn7lW?2E%GWJB_ z?Pv2*X6H{F|0h-4;;D1BJ08so9efgaTX_llNrl@VNY~(!y-iN;yfVa8t~LQWB&gEF z>btM$L1)VxJZJe;%jTFnAh^2uCGfy&#TVKD0C%-Oj>0OfuB~}|{MhVIvJ=gDMKWW< zJn1GS01e1{`_n=*gQQqPuR6MYy8<*ROjC`&eYe*(Sqz_3*(k z!*`hm{fe$a+WH1jKFT2u0Cbo|*N1L4jZ9c{wX8Fv)fi&EsLtH=MB?FT5Qh>bMPq$!ty;V3^J#lH zxY*RSfJOr8I}Zl&I6TDSoJFdSZ+mR*ep)f#?K}lo^_Z1458wgIgbcKEA3Z$YX=^u= z^X$vTnPMZ?W&S;(<&H^mAv# zla}7zE#UoO#0wnXL%{g6!D%D?h&#TjF|#l-CW!?2bU$_Zv_v>TENnx%nDsX4G@eh~H!32m| zuVZnS^q8EG69CVXRH|}P$gCNh0+h4DrUKQLG}Vj;3+tLN5g4)aZwb(%*+gN zu9vS2qq;y4Jzf%aTgq2vB=QsP9vyfUe~;?CubDpZ@-sROmNv_B>ZsO2|aTc&guXug}onjJwZpZ>7W}`RiX`%)p5Q*i{$w zM0`5QOHL#(up4wZ;Cmka&8C3oBgmZUOns+;;aU{={BnZy16esaBffu9xf?g;ty4Kj z0l@NY)SQ?O{siX?Ua+Z2R3PArD=)ydlR^bZ`q88qI2}Vp2K{R4>Pt@SohRS=`eVcR z91xSaKTK-3j!yp(&kmuaoC%xP*qvxlJ!kI_xt#3{Lmkgb4*UxyH5HYJp!WAXGe~bJ z0)4<1X2`~HdE;)}-C%6!zjR(THa3F33y4>qDO@}mp;IK4dfg~`28o+{^QDrQoIfciz%$Rc><46&CJ<{P_bz z2p`aSao-c&!4IyibTm1lgMI~kmGVdAKXnVurL3&@A71fi_Lnv`IVO}>!noV6(L9V#*Ty+2itQvDM=-aViTJN$IMhIyl5 zY^}|A#`I{ZcGD_5g~?`=P4rF2PdRGk7NE{Ux?(pR5X}{*!4spw z1LvPoj^q9pXK)i07X!*H+@nb4SLfC0(WIsaC0kRoe((_hPk3s%!NeCHB8lbpJG#2M zbWxrA`}?LtS)-QNu|U&8TfoQfm>=Px{?vB3ZTo~aIT7csf&n|^{0#Nc=l|m4NC;U9 zH1GQfU@IU~gz+^|8oUN2duv+wBoVUH*$^pAz2VC8^1+UTKNW$5?4$vv=ltM>1fUG` z=eg$&3>Mw?7V6pyP^v8NAAAJ8RQ8+FoctizY+Z5u7n{>Ud~uWDQ3sxTsiX|d8^2-C zXLjA)+S@y;y*R18gdip@{r#5pA5kF-ZIqq}ls4zY{2{$HvPxzzg`G71P$|PgW-RZv(+od7_sdOIP3iVAvtk>8kD zSR9PNcwHJFug6QLIg)hG*52;?wEiqlJUQct3}&d(e*_H}vQ>Cis==%>XQeK3h$u~* z>-iD5$1-wUHfCpcJD*O1NM<@(;Q4}J>{oI?VTd4bn`etKzXTOt6_nWL8&&U$>aWJu zp@^WSZt!wBZy3y*f$(E_>@UT4gh=ciNZ1hIi_@q?c1bK!&0gPNC{_({On{#BGM4_` zY?)flIzItAD&Ig)y@1^RkjweI*UzB~QOz!~T<8)>J*g-!2kP-Pxj124S{9o3 zy3Xq;&Hx{fb}zpiBDLVyO8K6I)D-wx1Q0M_!J$g_pE>eEZ=(h!vZ4aXEiPQ(=gmi})#3QKgdZOH z;G=2hE7r?o#yJ|97>%cyD16x9U2YC|aPM;)UTAf=n}6P|TQo=Kc44wULkv~dd1`BS zC!(r$?tv$C!qHToAh5-#LsCvf?1sl<_-=@0WZ|_QXq_l|9j<%tH{|g4Fc!+x zS^w$+I+2x)4H?NI6*|=$J#Z7{-i+{J{axZjsX)DFt`*d6I=KW8q!%8vlGr$54t>f!d z*G0Ed*agaO-hf5~ENyU987L{axn|DJm&Sh96Xs(sAXU)N&_u;Q3Gduv!29LwhKick zO=+{C!%7TYZ9ml0i9$Uo35j-aIDU_egq_a`2+_JBEjybRtn_Ri=FQG_Gm9}gt%`)7 z5$f!NCnU?IPE)(4G2g9@frV6kN^x~*Jqt$}dQ+{ToAdMY{;ppPBUQmJ4x<(@GysHI zfWJSun_*+c#!f(7J@h9imayVJgW+T#Q~i8=+z2JyUZbg~q-CNY(b6UZ(+$Ei8!avB zO=SO9ZcT>2x-TbIad6@Qd_nqdWlGUtrlU*A$uad0pnX$wwCmzx&4Hwx-<0}J{#z)A zv~IE`*DIwT|1BF0D%xfb?3BQw-oe3fV9UU{0~gib*EeEZ0`MU`{Ej^Vk051gP7cNG z+aCZo0ip$u=HT$~${ST|5;h%ZUtq%n4(!RqVN|(M;edohh5L!$o!{mzufqVKTd;d& z;|Yn0-9jRM;MdywGpQX3>7aqct85RMgoK2rl<5AyWir@QaAnpg&}jw3$ayhIRA(%| zD}0iLg8wAwlK?^57j-(ADuu3*)>Ov})`s=k+~Dkxw> z`cS6*CFil6f>z%W%Ql8R^0h+YjMJim?bBsL@JCgQFdjp|TtY7>LMXY zM%-YcFYRRdG3nOUCeGIIH+-cp86!XhqpfYal*>Q7DQGPvxqf=c#GbS_OZ)*|wjIF@ zBv5e8LRtSs;B7&Tn`oOi#RL=;iIChvLk|s&R%(AcJ%%rc0rXsxopJtVJnWjAG>t>B2G?>onQea|z+9KZX>Y|_C|B3G>shl*yL(gYvnUNR z(xGM~_umc9Y%PclK$b2t`CGKfmTQ%^eVW4q!)Wj4PjPYO)Y{>@Ii#JOPFtVeqMxLB z@T=|SC#qNhBqW%H1@m3K70%V|i`}|gM@@4LzBva9Ur-qOGO2b(zw9s0Z0lzr55RMF z0#XU5zn@Zfjx!AZ`t;09A>=er1L3C8ZrlJXcJGpvkzI^$r@+vEm?z-c_0oO#w}9|Z1vI_4x2F^E@#9Z$nFHWROiMeozXK(254_b|TlZ^k zLPKtLxOvUOWErYI^nMOU+wWh!3P?%Og(erS!Odt7_upxcxe^EuMiT z&I@#7Uf%6*-+t(^NXxi?#q;@m{cTJ@KquHPMvV9MG?c(a5qZtY)N~zCH4GI%C4%hm zNSI2;#idb ziHsx_^QI=8ot;%?RD=&sLJ1}s>1C%bCJ#uG9_lTkhJVm1kOz2qm38{x6nh-){!)|; zbaP;!7Z(<6L6)$6*oq{O*t@#Aw$K?>3j7#U^$*q_wCyk6o}Zz#wQKHpC8hnMl?aRG zv6uZCe?bNfr}K#o_~)C!90QWX<2P?Ehn*;%Ss-30>IT2-4xUemMBgTvBTSv7c`Gg+ z!}h)n#(83*qiKkrT|MDGOueaxAD?V~?PI1B0jb8&(6Fee=v!zgC6C=ENTY%a%u}T} z`WHNl!AfX+a`Ach7#Ta7o9B0OgzFoBK{$b8kkqKzQM@s4XqfciQPj_e`~)X-JS>z* z%9x`9xwrPYRbNqBY|cdq;|Kyr3T{8}HUj;XnQ_U~-z6tM*&93|oyElV0HB zoQi(H1lJ|z)i!x@>Owjm*jP;v=%jSwTjgs+4a z;-V9BI1t6h=U)O6ZT~N}-aDSlzJCKgMOG;xGoeJ;3CSK=$;?*Bu4ss&>_|vLM9N4- z64@i6K}j;RGeUM`uk;+>uKV{q|2+PL%TJ0o?w77! zO>c6(IJU{_Nsr=_XH{=rgPsi*3dk=LV2nV^g-#InKj!XT?ogX*?#L3s4Ca`aXO*c6 z#KUmC^X5C_)|iQk+(PEHhgOfW$ojzeRJ>-#hTRSlX^6*$d^KbKB z=57?3msiGfvedikX>^w^m0n*G5layBVJ;~NjFY-}j7Q?A=j<|YPJm2dI|X?;%0Zk$ zjFwW27=8KTdxL&uZh5xyE57AWecnCPwr!76@_*N+3H2tE`-vzVcvHTKkoNGfP0oK3 zw}r^e%xvx1^uHTPLptL!C{_R|B22Q&Un{tC^QO1zjns;b!+qYDF@?*>*q_pJdW}o0 zo@#zxS)H<=;O!hoM?M!56Y#;<#6-c)eh9bPvQJr_uvT3E;gM_^;4~P$$?(t>xyWB$ z>yxoV5_WcjU<|Wza8Nmoi1;mcto{6hV|3!kFvjG>MM23Elb3ss)lvAPD$~El|G!Me zMhaTTK%hl!tgNGr7SPbjYTNsps>Q$REfeG4d2Zk6Y{7Iq9yNu7a+svwhPR|-A(D`_ zre?+6nWZ~RNYO|_+8P=p*7xbtr!o9DHa70+>H>%-DJBM-VySfb$?rDi`mYJElcIf( z956m{f&{1;_0jzA`A{JH7ji&%L=f`VLvwzO5hx$Eq+Y`&;5PVlVq!Cpxk8hqr*{D~ zD>%&XoEvk$R+qyM39b0mUnd)06nmF>BstrqezYAnHvXFs?45Q%-H@IXb#m?c1%?1qB63c>vGT#)gpRtSUO8WuAcE0Nxggva<3a z@6ifRwdW)!B_*K{MjAit+GS*D*ep~0-|tGlYA;P5see5Wv_O#khZVO5#iOky2EFjcoyvipp1CFKyp19B``ANs%T|#$y~f%vk-;slPgK69U47VI~z} z5k@N!s0s8+ON)ep;@KdDSpK2>FGi_wA(c@V@>5b$Dl01kIZ89W_r9LvqO;=3H^-S+ z=yuD+b5q-h^%tM=i|i#+Iz`@g;pX}8gM)+6K5n!WLNEr;AN-udh2gHv+k1}yn)q`; zeSBAP!uQv3l+aBOSNq}g^}qN!Z~T_cGpLJAN2mTjz~}(VjjXKWz^9?A0x&5p&3dym zAV7JJs?k}@i?-Ysl!rHZ4K&*c42LwJj@&L^_Uj5~tC@2~RzbV#E83Xf8b`m7@Sn-f z7WSpqORz1$BpVhTI4w1Mf9>OB`uEbtxe*8R7BM~L+^82w24n& zzi!Nru!J5ZBO}`?^QV17SNL4};*<{(Fskp?_V&8ZpEvGT$>xz*aAM8y?8b?7zrT0MM`N5n8 zKwcZ`b9xZ|joYbaJQ6>6kc`mOVzz#CkAeV-l)MpyB9y3FzSu0A7v^#k*Na)y>iDgr zAJt2JyE~uduHX0dDZkKL8Ix3jK#41)DW>x8q_&dj%lliSUj${7tm@;(5TWN;S?h2} zdysT&Co8KoYym;D4zoLV0&yAea^RMz{45G({mVzjG&Yn_^T{k8n?}j;A|t~{Zd<^- zgjn&i#wj29hev1#z^^b1+!Vmg&mU52pZWYbP4K(z@+5|FdAT=xdb!p9dru`Bt{h+r zjXrh+-=3M78HJeZMb&$cTgD{Vr?h*;QN6#+&bAu0X?LIBMjYE(^Zej@SuO<+V13Ya z=xAt+gx_$zTu|xz9sT;UQ_#`feg29O>L{G%V%;T@b=80V`KY~lb8w`)gmtXww6hGu zq%-|0!@X@JMg6BH*&A=2k59Ve+`;^W=)gn+5VhVz>0AICxVaGTpl}~By4n7WI&e9M z-_vJcnEkaL1tG!y(r&s=-^64CW=AGjW@LhK@A7u`_iX>PR$mwVs8abDc+zQjy8ua@ zo%bg9(}2)X}iC_e@qUeW2eU%lj}S@E2ng(@YB&abZ`9Z+NDfy zy&OlgWed6gbXiAHmhD0NkCv9ZZh3L;-mL&wtsyd6AYz&cb`O5)nnO-H>t`fxaOxc5 ztVmmNk9dE`tyQ4Y=E;ZqS0y-Uu1HY(&>ZVKG&K6Xs;VEH77!-w2f&9Wvq36?)`gsa zPzE$*B|Wh_V~M^T z^m9Ow0m*Itne3zEDjt$UKM8-ZVL`eEBZiM5R7q`fR#rfr@Tbq~5VDitMTqsfIywrx z-+>9xSLShUCG8_^o_LIG_uWS?sr!!^jt5rJ2XaGh*?Q2*5w#9}d}ii&esaJ%bF-$J zmdne~FmVDhPoJTWkR>&@wBTk#Dj_5!#Je1IA6-db9VVHit|XYp_`j~?qi10;?WFUA zm1{B&^uXHrU@uOVf{c}#s@y!F8T{{HJRf(KkdWZzAXNVDd`iCj;loh^+1wWZ%*Vl~ zL#8@<#j`UrC0D1dEG*b#b9q{OcTnVFJjA1VbF#a58p&oMl!mZzbd-YjjFa;bI@y3d zTbMa|qPCKBMGr`Hf4&nlxVJvK3( z%M^aL6wt}lsV)ypRza#9{6hST@n`XCTNxX}FSQ7^ExEkBycC4HyStFkhVE;yQz?k8 zls7(i(2r|Ej*9YOWg4ESpgfIsO>v0tiHLYXBUwqf4$Aj?a!(Q!Ftg4Xp3W1R1{(3 z;&R--+h>ps*0#CL8ggo=a=b(d9gBdj8yNanhVBgt?B3_*5B`KUucF z*z6)Nzv|y)pTh+8)~!$WT>CGl;F$#Z0*XfB6+#O2@Nf`g9WploJUO&g4z<(b;x<}Z z7)4Vk3t1N3CcVy#)p(>y!5^s_?kY5zkD)H~B^!(! zEo1J4^K#W}2xTfgDe_nfO8xgid-0&D`~jAeXcIVfDC*HtqbQ7vi;E~+U0uaQ21y02 zghX`!Ppbh!2GTF;x2m$jSN2D??pohqEuD3fF4_K0g>LKCXL`vZV`7!+$mj$;0K4AL z<-|e9 z0L}r5bpLuvJI4MtWeZvf36$c<^;$YQQz&GYru#r;j*E)QEh@Tl^=h($PZ=1NPslHj zau8?{<=2*{V(rpJ2&+`NIJ0MRsqh8u3!Z{I2>aqoGicRud*#wi`w9L5pt5)W9C zf)CRALDg1_72`~zYS``E*|Qnt1+9>V*yWnRG$DH1Q41T5maSJL>S4VV6)p(gHU~R9 zFXtU2eFK9Xw87|!@b3s{8f)D6Soru#NHgyhY91|fJYU?&)un0C6XrCccH9S)RgFE6 zw(%n|vi`HS)CXVz<{s9{sc|W9yjO4!7tk8(>gdF}_q*AH^M@4<(fLGsu7W>~X~JX~wfFu-syb?EP14 z-<61oV_SxI5GqM{G0V!xAf3@u@QmHEVEbC7O{;rb0tRjHI@kmMRy3^i)KCn5@98P= z@DTIedXiLh-fgFsU>^O?2bXj4*Je6NcZK`nSrRy~-;_?uYyj*8-QWg6Fxs($XC@ zHI6XQ0|+tN`n(0~BlwYKY~Pxi6M&ixPdXrgY(=sYM$siFMZUy?=k(jx-)+SqY9vondEzdPnZZZ$;irb9dR2%8fyK2-$L3oN?({? zAjnV^f4L;kg@IOx!fjuy`Hp_z>D zsqT~ta2Vb72HZN(X!B+;Bpw>|0j(Y_J@|v0{QoOCBM}f+X&s!L+S}W2ynGoCWNLIg z@TQ}>`b{P76uANudKMP6n+4+AoUTM>=Q#P8=1%$<*NuE%aXfigSSLWKQB@;h0S_g@uhxOo~7O$;(^S9E8xm z5Z&-xRYl!uN6bEPi7tA(_o{nCWRgm_MHgiu)6~BgOXiQV_|Bssu7AHxp)5*qFk8rM z00RR}+#Lxp@4u;s3$)eH49I_GP~JQLt+~CO3Oyc!^(o6O)4gZTo{bjd+4llde@u+f zH8If!M+9TKMACkO2WVL&Dk4IX)fk#s^t{%{7qD}1*u))sLS#{8LFKf%S1?lVg9jFZ zHIe@ykH-6H?leA#b%ey90!}tIt*>OzQoPE&d-7CrRFpC@xKA*O5b$WWcAC@gpA^ZhG=qDAicsvr_>COxZ~Xy}=V z#-^s+SFfCfgaiaO5yvZU({Bdy$%Ni5Ic}C?X#b`x!e;>t2Oc7Wb^X!bjjoBa|gx%vCYiRo?YD!6XoR@880WF zQ^fvZw~(aQKU`8qQquM@g(kfbz9k~G$)9u@(bTwlYX67f`7j@I>W8Bq(T}hOkYrC3 zNreJZJJJ8^S8oTOh{!Me1%Y8Dzkr?U=!*qNut^if&!1_V4F`U?QkSEYLQuhJ6A}?w zoSg-XN`B%H>k)SK2(ruJz^pOm+yGaqHE3z5mXV0>slCY~5|wndSeHNG9>R+M0J5QORx`4enn|SJfCqbq8W;5F|0{s*a z`ttIgZ{K3y+RUtY6Jm!C^E>rJpI2ObRbQVz^5b4=Tgz}`04apkboo-o0&5kshe`YaEY8DR2gxHCleiaWAGHD3_ z8B&5kyuk-Uea|JI(_0~T+MPwmBkv@|#c`c?Z8MrsUZPe+{^sN51tW}};xxQI?Ce_N z{_m);q`DJ1j~~0u{nP=Y5i||A`Rt4g&;BYh@sA$H#@|RCLmWaM4s4pZRM1@n!q8By zK|gprA7BWzsm=4d-Ah(g%Vv^wb3qYyL@|M6a$a$k`yxe)iUL1U5zOJ631lU}ORzpf znF0_1<@xPfw@Q5fT$^Sb31Q{9(gdjmwMzaRgZ2@3nePn^kv-`oZ3Mg6#J z;qIL~ROvt9=8>huPHr`{m`ixx2&Q z@xkq_zAydn20hQr#6XaF#9mcxb4G?lPWH>-0^r}VsQSRGhUQ&LLV}fztrV?PNlByg zOYib(Aj#lb0unDhw8zou=yAIvFi#g_<))|bEbGq5|GO=zwnx$1Sxb1y#NsC=XdgsY zQt?n{7Z$!lX9iPYA#Wf9z?gvv0KT>i8yuKhfk5>6^XG&OP9VUUrM`A<8XzthMIpca zdip}XE<0S(@?~_x_Rn9A%nF=GF$ahc5{el2+oY%$O&g@i`*=B;B>BM|Mxw25BUz#W zu!fR`N#k1^J|#-I%`Aqdru@_@`40NK`vok(g4M5IvOrg zt*+4Q22V5+=(8iuX86HRkB!&;;|}PpZ{NP{D2cubl*#iCw&oNffFuFT24x;&V_Y=! zLTC@rV{=|`@b&Ha6gQRpMwr!maTZ}1Y@(UpPVneI)53cNK7ClX5+Z&hx2v?>O#5H@681SLAUQm?(Gu@Tz2r;(A{kJQ)MhylXi5=58-kf5Wq>UT?Q0ok`-GV5a3TN|sjKtBb{6y* z&_nj^a{%`gH#%*SQCLMY3&eu&C705cf4PEUGd*VGvWd8xNbaPo(ob><1fx~I-CxV9 z@%y&;CL1S;m=lc{_J8=phnkv z+la1zhsx;SZn)Lr3C_>EFHd|J#lj;^nf*aB)W@i1(8$0Elpv%M-=NXOgfk{YF$(tI zn0`r}?=iEmkii%RbB2krF$2_=83*X-BEYi-D@|B*nTJA=LXDZ8`1XweIA?(Y+fDWU zViu!sy^2*Im~NI-wdq{Av5AC{qoXLgb5}Q=+&`tZZ2lR=9s@fLnzCyP0?7^5zj|I= zn(uWkNvJi`&PT}raX-7{StCmg5@{2C?AZeXCzn|5a^{xW1rG+R4QBU{cFcgZwV#?? z5;tTo6Rk#kqFD7ClaJ;JqTci{r&05Ts9>1a{Czmtf?Il!Je^ef_Gt<0!4lX&f>V zZu?Z5h42p!|GWE5jI)GU-c}}a(&@8lxRsE=MjT}>XSC#qvZf|nAMyUR2A7cBKRh(5 z!@~5$V2zZkE0QO1(f)}xX#(Cc;mpZ4*4#lDz~XKfQNZ*;^lhX5U48=Xdq7p~2%r+&Ip6NI_u( zVV^|!{bj3SP2=D&46s$vbA}rQj;!n&(lG`&;gOM4&vtdl-tzJf+~Zz6=vK=0`EpYj zrt&D<1H~`z9w~PtxrGN zMp?5-44yrkmGdF&&Yf(uloOizhg_sM2-?i*ze9cBTEjNZ-srlc$y{%L_|nR zN`g{^EhbPXZPEG|tS?J4Ph4JJR<5CJD1TG_`bwU$6B5Cj*H^CnoRa+1!L6X+i$ENG zXGiLl$YvBaUtYsV^<&}@S=qx<=Vf8g=(#wV5Es{mTYL!z5BJ0ugelamKyq~s?L4fu4e;0yoFGj!VdS9*-Wa-oPYoUh^4753cwl&ujUl} zUQ=z)4~MQ-#?|oNyVxU2g@CY#eOx~}B|e=b%!gxb&mF20B=Zl$s~jqOZ{cHV2}lLWY(2JAAQrw}h*nPuS_!oG=yfY*%g%J6*zGtzJ^L45Po*_0f5WqQ;TC0VM8(wPnXi6UL;_&b=5DnMG?=FJ+?G`gqccjeml?GYDGOL$eQGc%pPHXEp$u9qmK6S(7wCmxd@qBuX@ zA_DYQ32`7Or*8Bc8h(Zq3~aDa%7ENQrS-9ckAZ<94kQXr&hG_{)zuC-BNz@xhtU>V zk0X$QJ*;WyrA}>78rxb9aGj9DZ$4Q6i4sxAl|>7F zv$!NcVjuk3O!8STcI|Eh>NZ;mn1#T{Rl3uwg@O@{9F8bvJFYA|D$VdGqG2EB;Wayc z%3w!%7@Arnh6iw&N9BlrqPW3s9Y8nTd*V@)!_(*-!s;G#b)@9(adyzUz@Y#au&DaW z71+{&p+*U|KaLcbD!TDvsR*zn)Gu~wv*_BxdJyZz#>Z<|?lT*OA9vTr`6QeLAz-=s&-`5GV*QB%^_vNO?>|O zDVJ&QL%MP5I4B&O6HjKaKtd102c+Ml}VnhhLSUeoO{3+EfW7g%K%GtUA_RqZ5{1o9T%(4$| z&d)6AYFk91=SY-9sT>&|zW3aYRa#VNg52E1;%dP217;R~P8k@}aI`7=z~yvyZjL;V zlJ8kc#;1llI@}gII)I43;WyMV{(BzUKdjgx1qvQf(LVM3Kg30ZGdygpDv@=q4^XER zGVd(;(fpdt9L{?v#W6#Oj*WF)r00wALO%`b3m2Nm@&^wUYMPBytrT)HS3&<$_J{x` z4uTnnS~rt2J^l=YE_`>;L$1Toy+bA90h$G)%FZ|RN5{sr=xy(Jq!m4d${)on07f~8 z^oP@4)wrR!ia6H}gbumZ!xKON;XmwiEzk0{%w&P2yz9%a)%=^Os3O@8sWUVc?%W-D z*y$^_F0EazR8W*zI`7^6x2x|`(~ZW#oI!MHevnhH-|*jH{0E_bQoCr2Wg0su0!Fg; z&Ij-3timZ@MVkP)@YkwfVchyMkQCNKGzKo4Phi|Q^g%$NiPB4 zwICI0FW#<&j~G4(3ki{EFD9k;;}kAzO5mmrLH#c0y?hbOXlxtM6Bxc5EWuv?{X6RFDJB(2ewWhZyqwJw<)_{?pMKib7JYv`T3u+e z5T(Tvz98X8uuC3OQ~PqvXsn6Mif1VMf`-qj+`_M`Wg|agFv?Z*O`pn6qoJn$R9nmD zaJsCm^lFP6<)ey^t{2@0Uf;TP3+2uc>05P1t27o*)5dHRa?{SVWB5BcIcZhl&6ae3 z+NE)Q|L9EldREvmkmL|={u}sLI3YDgf?^N)5&$L~EYdOx^dsoyh`WeH@$s5T&&Uaw zsbIa|YR6?CHn#;`=y_3Vc**THXJur(jB|j#Z5y~WLSD%VWwonb3|>TUNV6o?Ny(ug zHy?oC4vIO;>1`XE;gV@={B+W-Sr1qfzM2g*@5hNw{jicB_JE~Y6`4oB zI*O-}L{d$;%!&u5g|14lZ@omOBWxx?Lg!%$bc4w^bFJNL4WVGpv)p?1h!iIYbV!p9k% z2R1)}TTj}yrB|$OXl(4WJd+v{^0B4EY)P9;=LH@qdRbeS^+<|^<9so-_P#a2G6J+4 z8D%9U1!Lp+ZQl;cu!|4enWQpwi#|Xz>b{!Zb4GUD`IE=36R&S|K2YYrbwRefT;v)9 z8{5ioPL1fKvrJNvB{$zbP*V`E6W=9al~2CW84U3J)#gA+0O$PQC)apkA|P0 zpSHHPxR17vk33OuTu*JWF=DS@P=yL6jur*ZnEhZV91$KqIJbgX8@3Oiy4Xy_|J=)` z;nRM;G2hA8$R6J6l}YM6!JqPlojj&{@;;x`aY%{Uxbfv-e0)4wm|_)YhC|rc1DzPm^#P!^ zy8TvEu_m9rUhu*2PGOlvUDtB7A>q9E-GoZt%#50-5%_F-4mAfyH}`xpX(Ec-j&aJQ z$NKu=M_H?*V?`nzzN1Ck31ecp5R_@Vna_o_@KQJB2E4Fe^x+_0Bz+xG2Ho8XI_ zfb}81FrLEiZ}=+i=E)uh7nkxgEqrc#`qtKd2(l=AFk8c;wJdepM_^VLd;k9B(jLd{ z{3E@J^KSsyT?QkSGce(B;|vR^MSDB)A>dxuHQ|F ziH-dRP~Rk!v{L9HsFg9gYlW3*dhNP0Le*WXH<5+TsV2dv6cUbP=}UV+P5=oKGbqrA zU}4O8(3YZ9VRR7eLTSi^pX~#aw+<<{p6ILpuhMJw%n~MxI#+||5MSS?l zvQxIE`SwPGaNpCDzs5?A$~;m)#}fH>vHO&U>aVdE>s30&GoJQ|4Ziio=QUamdfRHW zkdbiWo*r*>T_3HgxvuZjV+ID8b*hwvlnimvfm(8rhQPcne=0~feGNUq^0Ma8I00gm z>8VpB?>WZ_PrxR-+1XcpDK^1n5>Sifv*dl+t*sxtEw>Zssh!Qu&}ITNr3L?;@`f@! z3EBDizp1Z_$o+7x3N19KLvz=W^WFC|+m#;@Mu78N$mA@Rmi^_O#afE~penb02H zQ&#f&MfM0ep-lhM?Hs6M5Mxk8jW#bgWj~uX?GpU-+0)E&t61w*1&`mDzC*b^B-5cz zqIhDD6U)~N5EIT`-kIFMU%Z0{qy*;hea4~|wd0zc^_VMTAh&Bbm*U)&evx+NOh2D{ zdV+$Ef5N4;Dum zWgpx*jeG=#(RMBc-pj}16B6h+W$t-L-lcnznf8_p)60tH@*Nfy761c<^-^1#n>YFY z_+cBRd6b@_R67J)M9zVzLYbT>vL*FSJ+y3x4lx>@NV&{%@Z0lRdyW95yHQaFmXrbB^puQYa({z2W&gPXB`PVtL zzO(4^7bxw;-SWN z!+S`-o^ht!^r09*0TS6i#%kFIAhj6#=@fq!h7>-1bH}75Q+CX}-LM&#Fa#d-&u8?h za3MsJnD&3~>yl4*EsXl9tFt#oseCg#Wj#O@Dv%vRT>%4-csvYn%kRZ<_P^FVMwzpfu z)4C2t#P@rDXdgS`;n5VVCdd`6Q+CCYqJLtd+lpb9C#bRg8J$x<$%sbO`zAoO z!ehL`5pXrbm4{>xLA-$Pc@a!kiK^!2==BT0H2?iDKcqdd81vm-klR({67;v8N)kr)niKXf8G!u{|kK>#P&Z^2M zRqnd&o;I=Iv$Xp4X}{@;pVURE#%lnBXLk zHmzV^9WLM#)tIePiGM&KbM4IA2cy=GEZSC!^OG@;;vt?DLHpSG+UH$biBt%15 zpF2oy(-uMfyCDyE?LRdeHQzBX{aa=jM+HjsFi866u3BWDmfq+u6Hqlmo&(Rw=vKC=v>x3cDFC(Jv~t0 zBav*tXcT;dXm@{x_cmgvba**8d!Q^KIc8#j1das?$J|aAWJuTm=pyhorvgr2<$gSs zcY}fJi|xYmE{V%$tn3H1IsT6eVAYF7cL-_-sIm|(jJrueXtp+TU;O&M)-B>C(*-KvkH9+(L<@B`u#0qfIRE5_c{5}iwGST8T zT!(%F?;DiPy?fI@TlBnsz4%u^2mXfC;IPN6{9K#v_$w#!%iDW)zB>Rj?DM<&g%)#< zRZbZjo9bz5E|+VAy5KemyT!kf#aL71|zWEZEfSDqd)NmE~+b#Sd&Tl$FRzxxnD#?JZ(e z-T`a1vwKz;&=(>u17U&1DqtE=A3=Uuot;j3N}5$#Rr*dn$+^CicWn`Y2nv5 z^+v|e`e@hTZU6?mP$xHunl#VjD(#f)kT>#rl8z#^U$O{Asc*Y`xLr(p-~bs(?cPNfOm1Q>!oLhkxX^J3%k)6Z&{ zU1FoV%BT{KtN@>dNf_|kj4o*=GYzfQa7xUOz*@rC3EyAFANKFZ-CWuEcT z_6W!5k%@`=(xr>lVLM3!y`L5S4I_VHlwa?*9|FJBEbX(~6BfaS9R1o4FDoDo)W!ub@# z)K=EKG@;|)h3n_d!_dqw&CK8tD>|IU3kTgAs8mb*7mnv+m6f-Stnj6#JafdEC8yl# zX=(gs*1#8}Ux$R8=jd!Dy#K?}9(`EuC_g-Sxf=6|m%)|tl5@>3L z$|Uy7=m{UB^1VSBPL7Tc4bWU%Sy?@(-T^`8cGiCk zape~lcEvdlTwUc5BoVdxs|@e2w1@HpCx%kEv?l49*=L zao+Jq`t$F{b&`gD!j-ZNFOi92j%~M`=f&aAJNG&~)i}Y&zCZWVyCfNl7+G^azr&-5 zAMkvEAJE=TZcEPOhV&Na`Ry+BHlG`7rCKS2k&9CGg2_FD=+)FCcl%(Cq4ah3RXTr1 zb6)Z0fhJW>v zCj4U|Hk+VG+#+4@-nbI z$Q3ly)!#iduX4P>&Oy5+^%*^cS?TLk?M{vcCtYBI0zHxN8$vzCKJ-mz@A<4Sup*=c9(InLfVdl`8dAl1x3hcw8Jv3KM-E-040 za{e6gdO>9XLox^ZnS3%t2;Qd`DR=W6<;1Wa7aqk89J3#pSZy0S;}&=mSr4_&{f3bn z-M1<=rXzMeI-MaH^1c+C&MiYU?pU&Kcs=ntL8r2#AY`vp#=zJZ*0@JjhAFO&bG5Yg z+`I`2xP!6LHqaVdTa63{{s=-dNxM7eS%sC0tFH%L=gqE^u3d@s*i@BKUcUY;T~uiM z!}u$68xOB#YsA2!$TJQ-Q!C0UMrD zckKPsuk0lHcX7|^=!=qMmG`fAEBtY?4NFZOxKR0rZ;VwDTw{ezQKM(wEb20USe(Cj zCHwJXTL^$Nwr9PBhF)?1{!jJwFXJtjoNPabw$p&DPq&qhRD1&!)OxQ)|L@q-pX3Xn zkKPLC6Q#;*-}H$`KF-1Ad9(x|Ieq$XfJ#yBEuZU{dvpFqqVZIix4B`R$K}gd$@hdi z`pRjWFc|)GroGNTpzKrOuVO>pg^quwULX%xC;eW!kSn$X21<}W#+(Y7hfI5xy zlDoS%LKG~^;iL!~%EE#IWGVi7!4VFm%Qe5hYkerKs2)~Gl;$re?In-Pnwf)I>#)mZ2So!OK3%G6q0{{4>F*))dP-V8VW z$?qSGsCvnfSl3V=V}g{Gk)a99uD910`$=9ZWVWMB_S;y8&-m3J9`9<~Y4|b)@+2(O zz0_{#!;%nv4V(?S%SIye}X00uZ9DCk!^mt|S33xz+>D{yWe zdOlKOW}v`ooC??;h7zRk(rzG~iF>>CIB{n@ai z7uTsDDx9&g(Kj_U1uJ3R;5gr9q%b)6|6#E@^?&vQl5rTKqqsi|P+C@&gOn~|6qtK) z0C3cM(ek)?@NR~x;C~G6hxb4}WM`miBbJ#M8DRo+1QdN)S-)DtDY!mj@(6gktIL8w zBZ_JPlAn$H54T`RuP+*3;54hN*YHijU$C&WJnRa@=A&)ygE#9HjwC)c>elAYCQXyN zJ3m!%cX5N^0)qv{RhoLb>UVa;TYWgi1b=y5HUR$-@{fuS4?EiD#qL}7SPRD@C{3y#T8 z0zryl8=H2f*rW_J6Al*H*&>^}qn;F{MWyKNs=`nB3YO}#vTH3>72(%9Yu96q#P?Lr zbaPzAT|l)CZ;(#r9dQ_v?%atv2}W%N3s)(m{WV6l`-!AI(*F0E#{QmEJ9uW}{Z?#%P)=qt5y z-`XVLx>;=pN12KHP7h<3hTgTuf&x-Uv;6l5hEoSNKe?)ZEvi@bf|h1)WQmZ9eiOaY z^pdvI&#w{JuPFOf1;@ck9tv4ZM2oKkWw@khog5B4z=Mfb? z1c=vdh~3&0=|8iVn^rDFiD`MjmbcQBZp&Wt04ETW@ddzG6n;Q2UUb8%4OcK;g6*M5 zKC91!jHjFopi>f^?Yi30fN46*-WRxdz)g_dMIW%r9A9nd_)17~#9R~-H&IjDCnvWn zyxMo|A^JXq^xiE(Bbm3SzJDcN&Cz)#Szbg*0uspF8pVeA`;00KBrac`gGg7Gp0QIx zOZ&v}d&km4?%%W4KK5dIG#;QyVj{=LQx&NP3b#8*Ytk;ls0%n~&&l{&h97(6dRFKn z5KBRd+Pp|rf?E`%=B)O)wU&vsALwT+3%$*qLg%N*e!jFc^J z2X{~%`>+@C#By+)u%KEcRa0A=gN@DSZP0F5*I8MefBCr85-M>f%<>G)E}45;Sz!wW z1n(7>7T{Km{wef_txv(#pAV_h^?Z^n>FMdkj?%7+xqxEGLJuL>h%^XeGm_dSRs%~q?Fuq z!i4n2et#rbV)uyBo&k7BVCK#S>@D>PZZT=~Ha`2y_;$guC46+i*rLWD~inSU)ZQk5)!BE-k{ zvwP<C<7{XN0oxgUU!xur)%G1*c+bj!UHw1{XlB@`oJ03)MHiDIZ>VvhNRzxnAko zGGL=^+qU(wct%4!$;M`W>XZ&I1Gp2g1|a#Rnwl0XYs_6>OiLP`f4c8}B-i1I-}#s% zfQP}(%KB=!4V4CX(|c>`=GwlRs4ce~iMmq~KP4Ue=EB}bj3>}!lDO(cS=Fa6N}s&0 z4Yjd@rz*=!4kG@%yCQI}80;kzjhpxuy+XU`uWMd3g+jy1<>PYRwk<7i4)}>3g~3{Q zGt%21SMBBIzU>TGY=Rg+Yksxeh}<8)G1b1+VbWf3_yO;}Vg5qiwP{JfRA4b-5P+y;*AMtfvq1$_YP*bLTD^Jcp_h*8mVLxS%1H zHP%rnDcr=xhceIqZr9M#w5fOy{W0X>r%#$&wuCF|SfV`!tc;xB8-esmM@JV;Q89PO zY2IAdK)=5r>g|L3IgnL+64|@~uyQNqV8_-!yQdz}(DvWU4Y0O44Sy*Z7Xo~;j2q+y zVXjG1GBnnhx3+@cdaHl@>@mW|RA(8UQ0U;kS+Kt;yvlddxghP=lj;U*Kt6kDcQpSBrWl?RNOc)Rz5zoL^D3SXwogOwfZYb(>M=et5CugzkUQ-^Wmy_NQM_V=qC zkLA^wi1*0Zg~n_0gJN>SReBMPUFWMhwS3I-wA zGnL*e;&b&o2V|j>)P|E)c(|R#r|IvnHPTxmzcePUm#)cdyKyNuUh-z-5~`CwhZ3;f zov2du3KqI=Qv|DqvF@j6-q7CM&dHz^7Mbe1F2%Ql_)=ew2R`r^wq#h9l~+`h1fn_i zGeUxZN`sR9i@V};7gvW)?Xh3k=cefy7}%|*^U?iipe)>Oi%kBEMv>MP@ed9lx=Qm z5)1aSv%mWO2r|g}aU^s{+?(xT-ZX#%dvHr-=)m4}fGO`=kR3B1paXYp$ISMed z{n@i!uff8e#Sm?KpOg6ufqDbO(eJ~tu@i}w+;D!hwH4q08?LX#r{7b`R+@@1=a0M) zIQTbP=hQvBs9gLfkepzFrmsGuTN2PnlF;U=Kmdq?m}7QycVn*nxH7^5{jI*fSZ3Da zyh5EkYq@udD{7n)4}P~kR~1=e0f%a{f9!*Cxj$~V(+o1`WF8ne?8Nx|#zM=60yf@Z z45ORgQdo0~{qwQGQnBfC6RKgyws9#uymyauX9wgb`^AtAbZ}oXvWiT2=BFJ5LT-GX zH-WN;nfaLzuV?LtTN877%|-^l487f|i4dIyXgdnddo5PWz$4;5*7rigaD36Et(jJS zz%-2xJ4glwPCGfV#3da0^On(f*BzFSmuJqUA7TaF7(<|>&lSn#q%_8BY@9#o;Hz45 zXDab96B*g*4rL}!yuqkDK-dqx_4M|Z>#1mJ$xcamh2IbpG0DS+-wz!SYiED*BsGov zhuw1pJ=Cg=XiUK}7-004_>B{Y6X|i~3KldTkBzB0EjA|U0b)4eub%p0+0)!=pBi^n z<&Nn$JD8Z{@cyw6mw9D5r@8r+!HM1P>nuCFe&Xf*=HZBVi~Xy=x7uWJ+yY+*+T)Yu zo|h6X_)A2IDIb3ltD^Hf`%HQ>3;+NmOTW9E1fHObv~&vD#zXN-PoLe~s+jsTd{)nQ z;CV(6P8Qa{V5qfNN)cWSUQ08ECb52vCpZK*toHTgPt@Mw86aL=nXvYg!Csw0^1z=?;NBhd)KJ>1-{Hj~Ot zCDzTLbU-H!vBEwXnX-zCBqi?SxMLs$LGR^q{ygRDov(Zc{n82x-nQ=A=<$ZH0rrE# zY7(#59mXe)7cTUD>-6f;_mkBMNtk&^HnS(E>g$s6;Tr~hy>G=CUYEV(B=BlhuL_eC zi6}D%;}+ivPpRs?e)Vbw{k*HA<07XU$&Nlz#v?N(=7g=St=H*;1U+@oPVJvi0BaZ; z&dTx)u^I_@pc98TxBk;wupTtmu83U@urBVo_4#ERukNl~T zuW6DNgZyFP9?i}J`y3(_?2AvF0b(jCCG~T@-h1029;rh|uK3A-cgH$;x_;>rKAb@8 zwPt6F3kb9|;bC&-Vj2zKTW~~xJyhN?1s~?JyN3ttl3tek{FOSG#Lc&fkdXNYO>s^(Rc=PN$e7k+hU!WFK-}GOJdlnDmG^OdGmF3@d zMn+7ZKD{fW^9lJZST$bgKBA4HU!4j15mexa!t&QnERO1oojFQo_F{j%;axfrf#3DQ zuac!x8;m(6ENlieB-rKkCDs5Dpzws#MO=Km84@~(++Ywv{_btvDZmGW77xl12@o|p zmO$LUKQca2D!&N<_5f_%jql};vD_9j(TA0&*4n$rNeVvK(9mA4~O3JU>^T)nm40RmFf{#J{r z-+x|9R3Am*oc37&?4-5i=-V=^Th*~f(6n-8KhroMFcN|Oe$>I?Uv_MJl!HRjGjg9N zs3@oE7W|r7p zcWyV`kD8pa>#Vn#q^8Mnam@#*QH4#Bj4S|-pLyTwEio~Bu6>&s3}iB!qD@%w$*5N9 zbJ_szsZO0Bhk7C}fkG7=6a=UZeh#RH(QfZx_yI=;_=U)eUvATTD5f@>9~ii=UifS1 zb3AKh<ff=xXfH`0L z{X-2K%Wu5B8xsRbLpKZ!Z{Pl+bpKXZ7%F2JU}JNWG&asuSkt804fk!>QTQ08Vi&C9~a<^*74)>029$n!iHq+_3M_gd8lwN zDJbkP@%_rH_50EumCgmao7>B$_;ur7a@R%vKQw)JJeK|Y_a&LhC}c#G3JD>bL?kOw zA|tzO71=9PDut3VLdxDNksZp+DzdXmWy|JyU-$R-c=gBqN4dGq&v|@~x zDHZ3l)5F8Ueja!q`X&DBS3Sb;fnf)I!jlVXc{In_&z7?|y+Uc(-VFASL;o}T&bZ7=WLL(?8d zQZ@e~J+M*g+E(H)z8zY%9AZc&-;RJ?q5Q{=V`yKRpDzV>zRz68S(3QPfyh!PYJczR z!z$s5zGP|N4R>@R=-YKK%u(!?e<*xDJ$?0w%>)~Fqxbl2}P-~ix@I#>r8RBdhBhqQF?x$h_ZZ!kI* zFA!E#lv!qHM^LzU@tMXEpt{IfL1sUGRFtBvk9xt_j=;_y1DV5KviE;WrLUO3z9=pC zx^Tl@L-G3cpHR3!N8jGCzzj!AJGTB5B@;|Pq^jQbAtf8Wsn|Fkv@Od9O{NQ2@OFJ0 zk5qu=?~(e1b`|3=C?$ZN3;l1d`qo**V}-m2lpe9*yi9Kkx{H6n_EfF z7PQ{YFDF;t)1!Y{cB|$Ot9sX^m%o@IGHmw`o%xvjCex(+JN(GT@}-bQ!eXPInR&!# znv#I4MJ~T^M_~1%W~Hzk#UoFl^)xaxEW4<*^vOj|`7{Y3)nTTDF@D2Mu@Niyg%#WjKUh*!;!10#PYNjDMjj z>($DY1Rh87zP|YgY=vls*wqs;y-7^RJGEm{M84K6Ahf;)e`jrH1-5M{ zE7$eM#{NjQ_8{4k-`m0ZD5%j^jBOJgI^^8|FX3%NLKP+(RCXTO{8NxaA$Y;aZQ1YK z02JacFATkU`F=g+*FT|mwt=HgoP zM~$sLUMqAO29vWCzds25Q9g5KXRsm?upyie7*~-;Mb+0Y!AqQxVOL_s%0M4-rj4%qv)(#$m{YHe%*Oysd}G+d=xho7UGya zW}>Hu;{Zc(?Jc@LN#*6n0P|z2d7brZzX_E}Cg$7LlRE7QVZ_#zl;7~)0eQjEooP8a z)9EUD*uv2H1*j`9<6dv^p>lF^BHkNtCvG7mYvJy-a0=(ST@BwU7d@0T71HxKTHqac zhI7{!UH7GBe45+abmdCC)nwd*2TsY_geoRxvNUNq{BHqZr%a^km-exMu~T(X--vGTY*=#=b%)i z8b*~itg$+$wm&ZM8t>6iWCR%T%>1|> z(`)lG1}F^oOrDy@#sljkk8PctW*m6-zlAm-`o#;}zjq$d=lwMA~`O z=+bVj_&4nL+@cfS66t*jIj}8p;BKg zS}lbLIriY8-D_!n7tW~y%=a)jH`enaX;Zio2P%N-LY@qK2dJ6U`5v3>(mPYcm0JQ zC>Z^eI@6Z)Koq8zF^{g8_L!3EKZnQ1IUE^!2;r(AVGxW%M@_xsTa4UaJPI4cNBo_- zGn$fcK7abNu4!$udFm~jUbE=e)H5q^+ylL z+@^WbFw^($NoZ3Z5_pv$Uh6<4)9JIf)GAr*vrfGJU366PXmW(=8_d^0+T@G@J)#HN zdo-Vyq{<8zJ%qZw9Ub`?C=ZDuAg$Ca>RqAWf)`VKm@_7RGz5!}wx?7At4#_1b_6%a zi4BZuWqzKPB2q#^PpVBZXP#)OY^J!ALj1bs=_wtl1}Ft4o9|d@LJ{$gu3e?Gz5R4b zXCA5dqi568ZyMPM!WY71L^~C9H5duerMwcl9p{znoCZoSDJxsCsC#VhXWz}vnjM-9 z88fJD&^u{+usDFDo@Bv%klC3!aIjgq*>`0BlStdg`im!EoTwH>wA1%`Y)Z$ zX=*XH<6K`oF<1qOzHj;AqhSy^9r@4ike{{&( zKu!w_by`JLYR`4dL7>!vf`g?j?sL|MCYP3e$C=B`4LfkJKu7oa_))#WVzPZqL(uX` zN=kBZDIn6I_MlmwzO}<0(3)m$e!s8Al98AN1@g}B2Yu(<^N{Ao6^lDt-#!I;b7^?K zwnh}IUvk?S6b>b&q{&KdZf*kMhtKZB_~AqT1XPlf3wz<&kKGQYB?FHDu{#x0B#M@6 zc*W;Z!0XaB0~4rRE-7aY63-l#)zys7iLvbtRS_LJ*}MKZ=G%xpuhZtj0^^pLmySN3DNOIVP;mx19|oS4`B3U^Bp4|E6R z<(<$vVs9u={;`RPdfAgdYmu}`7^dUM+A^+@?SI&H$uZdypHgp6&tjpU!3tLNLa{H~ z_W6`$LX$@9ASvmvxwi-b1|^z*duJCH`|~U)LRFp~q*smjah;Lp@;B__AV#o)3`Q6p zOP9TUz^V~PX{a^unOCn~34Z?NwN7@ZU#*4UKYG`5Q;y@gZK}zm^J;$E@>qNGa{nuf zM?s-Spn<5ZJp}I#8V69tRaK_wsK7G*XBx!$!gA+(QaZ-QJ^&Xm{_QUv^ApZwZvoB+ zC@kw4lT+IXrXO3+KEDoD>;*+9DoR}QjWV~tRrSHDM9WxP-s(Xv=NH`G7rD_I(d>(F zq@}a`ozR(B9%n;`4aR|h2{6lhnoOt&@iN-tlN+G98#j{?G12yCc1Eqqtun-tc4bP| zUDo$!ej2hr3I6X7+66vY)%PGk=y2e5_dP~y4oPdxlC3cbu^lP4b=b#@@e=U`t_)^w z#}+m;?bO6`77F4dbWOx1B-|h{2mZU6lb)HmlfYzl`uVN&5K0mSq&mENi1Gk-zW;QR zfyescOWliw5-zv1=qRZK`4O4|PYFj?77+XCo;*_Jl&dFWUcR#_`gTW(b79hQJ8ueI zj|907!rJk~PHRf?_f+`E(B6~bWc?ixtCF;Rd9Gb3hb!Z8bFj+!=cYdTpEb6y?yZgc z^jUHgSv*8A<>-bUA~Q){-3uK&c&T$d9q7SP*TC%|rzc5(=Q{6EIX!KpCD@@-sy`R_ z_2Fn9%$|;N!{DzNuti(T_gNtkyOy7-zKMv~=I4fs1`8K^nwuXb<>xa&wS?vf+JF!@&hM#d z2q6Fc+V$-7=aqu|$)YS!Ol>D?n1yRSdMd12P32r5sD9*A%YbMy)>UJ<32-jNr}X!4 zHtu{Z3)3cMDxu{^%}dhJFWOoRT*ybYzEco{MMQr1^FUmts;1Tv89`~3@4tNY31J~` z-r8bZf~?}k`8HXUsmNoO*No~m!eq#u-5papOlQpof$BH6w*FgQmMEDydBI5IoOVLm z!#&0(?OJca_aIinGwYcbg@C|P7|3!pltQabGksXSM{3B>#GG!Cn#be4 zi5}J2*_kG!gg)lY(Zd0?rV;73FH#isftLBNGTu`mC16EzYDsO&|DvA{-fx&`j~6+6 z&WuLL>9j>>;Ict>+6)IS0h;bCv9GS8rd}?lnU@l-TI@g&&K%bI79m6AxW+hse^!8S!*sYkHKFO1uB#&sLswV7fgme-Yk!JX&7vqf0)TcRB!h>Ma85_@oN7W^#<9}O znx>{>hyrbBxVEt}SIf2dEW5eN zF{`VoP5t_{TaS~Gy67e>D#tFP-NlX}2Fjg?dKq-32R;Pk6HN;cDEMWef+44Fa6lfD zon0sAq578YZ_g3gkq3~}d`O(~GGc;-d4&lA~ z+z)KV#@_`)c*WvO+Q8!dj+--?{lFtJ+r0ut*l?Ig$x!~s%&bU-M@9KT=#6*mRDGSI zk^PJy&`g8}_W1o2#o zS(;pNP5iZXDt@w9c^_CTqe(@y)4BkB@l}&>7?91muSjSmN?=gH1cxbCg)aV;xVW(H zv&o5?uEL_Sw|>xc5qOE_=ru;J{fv<4_-Ir!QjdfEE`_#trjZi30A74cgMv5+ObDJ7 z0m}jP3ClF>RG4}H4>Fvcy^fSmRUVB~5);)2d3d_k-ESZh9TU_0xHXeUQP*3ua- z8jsuqa#(d+?BvO$*RR2!Fz?xevi+dW9J@+jH6mttMnnilMjmhP+MjKVa-Y=sD``2) zEok)5A6W!hEH5_CM$9XZUK}I4fATMf(9K;s>2@Uk`dbS5w7bv!@+pkGbZh zrm+$}8gyTW-kSeEd_#X@<1l&fk6QB>KE{dINv-&oGH!zG<8QQi;T;9`f=LuVP<;rs zo3@S9L-nTx`N-@a`}5}mR_w_~%`PlFeDGi*;`3Om)`;(i@ZoXP@-zFV(?Z{d$7-Bk4`=mUh_9gBy}KOQkJ~qy;`8l?4X*XB@Y}m>|C^na z1cvBzz!morvd`OIiry>VwHuvu9az+X&QqD)Y&j=Y3f(TBR_ZKzBPPJyS0-40IkA(N z6HxcMoo@2w@{R6x#=Yg1=7m3$hmq|_PsT}N_EX_MvOJ!3KP}O~3-!=aA(RV%Kf=t+ z;zWSu`0iu}5gI}$@XqmG(+{I*! z3F?j~&SbAbN=jCQJry+KRsmpMKmD63e)x@%|%{ z4T-kj2M4p0Qzc2U5R2mu1ppir^fAhcnTd(GXc8Z5i=5A6b}hp< z*=W4pY4cXRCtI(59nJR)VvwfKyK}ko4?_Np@bkNwVoelFvXOZo^s|Nb1@a0C`Yugf zLS=Jvdj~{1FK-UxZ>2E-JkbAYbkPdqpKskKhwo<#u1Fk`sG;XRcC4ux(P2!N{$njL zijRR4!B1g(|6i6V#!hT5P*YczI`%8o5~~$)zFoZcX7heJrKyshQeKkQ+d9J(S4}j= z?u&JA`S0uAMMaR(`0yMBn|0Znh##Inr}i;>d+9mBX`)ssX?;wO>o?KQ9yKXU)T01#-fg}jUpGBf`qT+9#uj20sI`r@A5-CM?uig(sJBEsK} zqgO-?FE4|QWDD*aFIEGeE?X0j6*k!py{x_sF!w-eWX93mrO(=*K>ui^8cX zc)`R^oc)??#X#F#6Urmm= zeYuV`Hh8@`OsLQX$3#a@@bcw~{mANM^ZQ|J1byVTAQe>zjHVzbJC6UY9Vn#B-#8;i zeW{~g@tQDB7!Xflo*UziYgSh9X9C3DMM;VO)2h(ayL&cK(≤s>imJbS>y@sDVwpJ3H z--*m*A$z7yKe$r!(H~O9*tj@oA|+F?(NomH-hS%$@6{V6FD;nvOWI%1y`^JAOc+28 zHhw7zHNp)jssU>uDyOFhN%JZOH*P?=i9D3^c4$1#tM4Kl3`*%Xw2tkrAjUhEBWjX53HPAJe1p5 zf^z07w>Od7GlH5%LUw?ZT3&>Gb!FwYV;^xRv)TED|2%{i6rT!@o*o{pHTOEoIkP+D z%!5adTyET$TV56-hIGTNLzLe`S*R3lhn)$2c&d4M@rP?rdCT1A4ys!r!}YP++bf~E z%$7`d%gyhsz3=&meF4Qyt54B!1CqQVBg0A%I8de;$4CGY?DHLHBMLjL&Hp}xF0O>h zojrGh=77^_r6jl8Ccwr~t4d1Tb#(Up%D$JL8%7hZ-O>M_%mBU}2&At?ZGgr{Y>NdM znf0+;=WkbECl)Vgo^f|yNr=b-%ZGiYuPZ7BF`__OBGx~&au zZ%KR^$HBKxyNOh1MK1c+O~=@z%ho0ro(S+cO}&QVSWUr# zk9DY-FbD~U9@W4(!d+aBU3dKzK87mEIfh4elyQZtqN}e~u1lCy1rw1*c%vMgAre{l%1qef~cCe}OhUEUH#b0

~9ndxP{q$kOx%qkiS3nqv@B4HUdVQ}%btul zm%#Nth^*V%@-Y4HU+tRaUwdFWUxYJW5I&+fH04ADRC3 zi?~tZV+%FONCx2a9Xo1-?g0`lbKiR8B{96fZOa$B@h)W?_bwosRKhh$h zEHEI}-u=hR7cZ{k1wt(-GT|v74w-&oc;N5?lZz7sdw-GJK9=RNF_lR zW|fpyuuH!;lwqv^oht~t1__4hCj0y4&!ow#DstV%9)s9r-x1)-^AA#E{BAKAP$MEb z`uhbyHRt3E>DEF1h_6=&E44Uy_eXeZQ`0WOU$L)eFA~0}yshEdl?w$5X_a_{mR}T< zBU`JsYMcjd;zUbyQ3()C5dsQ1hfj3e<*uGWtND*1d(4dsr$2SftIzRaO)+}o;-aFz zewXlKliWLb!7@DV^rNT-1r`1=i*q0baKGKV;=41bo%TPze&Yc~Is_8qMyT{$N=HCU zup;!+kOkl+fJmIO-lnRqXI~YDaSiT<2dly9s`atCGJ~k-X^`rsU8&Sc>ZXaT?la`B*&&Jdgs(zW2 zk~iosJl%@4dDj(_2&O%vsQ?slpJ_%#N;}wpDdPxKj1w1EP0f7V+zRj}Ue4?Gj7hSl zd`W6ht7*czk1?PD5(PLD?^|uNA{8tI%n@-H;=0ND;<>vwMciqQWF`N*si@|BAq1~( zBGzBa27(JNF8-%b4%`fm7hUTu$!5!Ae3y!_fG;op-22S8MG1-Uh(C5Sm3X>E42Q0A zzwniLeQv+T0yY@_`2D-gtoAN+zld{Np1y(rHM}cp4P-9FH&LQ#Ztwbmo)fIK4ZDzQ zvRjEqbVp6wPMvY2{EqAkR^@@w{v;Py0*p2#B|Xrs;5hX4u@kCpTprrEZDcfDLXoQv zT6Zuwj-jU&-CN+DJNEYW_wU?M4%N&WaM=?=%nSnT=v}RS8!|ui(*wW1c;H8L$2@M)jv;ye?XDwxl7TP(IuIvE^ba`~oyR zR?$y{vwy3g(W~&i{4YJDdvE3N>phs80G@=7`5)O&D}fnYoPSBd-6dX7Ixz3I=FYfk zeH8YD3ClH4nr9PFd*$C?(QEkAr(VEC5LyR&xG1835mrkya6xAR(AvU-KcLza{mzt( zv<>N}tKHwp393taJA<4^{zII(ykD1N=Df_^gQjKdT?bzCUnp*KfE6A3fh&G zh68@G?(s{*K6*3R=r0EJMeLG zbgs7UzH!$JbGWC`VO*R- zc@8yW7UR_SsA@nBOULw_n*#|XW*0#Sg2teXAD*0q@%mZmm4wa!E1UQ!TLQ#JvVd2C zoL#$?f>{Sb7YX|wuH(mBY>)st-I<^fV>;} z;GwIpe_t`N{xOFgJO&T?wSjVC7?IZ3<+*06l1{%-M#46cJlk^ZmYL>EG}1E2cQs(n0SsnLP=?9Ums?? znFPkbY8FRUUn;z6=%OTZ!?2x~Iv93H1VoWGg?i&!hPnzJ{WpSCfZs`IS{BL^^YR!% z#;u`hQb_5O!R`1KFX+>LdB4D$@yh7pMKSG~sT`nN|C1T{ zDl~JWYmSxk`UkmDe8p1&p{k{&9G^P`A#`!a?G_!y@D?Fe9D035YGnXpFwkJ?OsK{I z=h>5%^>%#R-0sl(wwkO%YK_O^SLW%7ggZ1KpHw>UwU@N_qlu~tAid+hVPg?kHtPq|a#7ds0@(ESwaz1~y`FWGtmlY}9W(NC6M#^BtpoQ+VmTcan`(FEs z$;qxo-yBZ}6G0lB)@RvTV`psGl1MEkzb&IUca}iVJl>N_C00+$w!7%$@B-s2_a7xC zpp!uf&i0e8v$#@w-@g41cY_Ag_?CfX%T531ziW+dD&ciX*dtF$2vs3k=oS|>Uh?-y$2kLWQGIqfIr13c_a$)mh>9X=;g%{FLbd>g~$g4@P zp=*U6st`A^BN9udpf(;GA2%?WM-~LIh*9AeS-OP|4QY+^R945c&KKOu;Yq$wT80eg z8DLeU`R$7qT@GT972wRpIXH|QW7dpd=`71AF1JW8yJlr*h}8UjH^2Ka74yy#6yE9_ zkn{ZLEeQS=M+}DqeHP@X2V!4Z07xb$BN~2TL_riAypy|Ul$f9*RpGe5qm7%M%(p~g zFk?-4YA{1mA$~a>VrSw($<==$wO%PbTb+|wzMG(6cTm!JM|PV-jBneB)6xGVaaA|w zdYe;qhhcX{BTiN#tW0b7evM2%Oh$SH#vcUuvGKlLCI0jr(p}dmsz>J#a;9Hk zy?ddOcyuqkYZuN46Cer|4?lcn|1F~9c11`J4l^d6N-rvyXUjnxt#XOGwhQ|i`}*^)!>jUEg7*a0gw=TTF&mNGZ*)VY8|k5Aq}k7FZXnXcI%wZMt}3K zH4zr~o5RH*yM^SfYXCM9< z{+^`wu1YraDRgvnLkF0e*KL+yUInWv+4o4(h-mO??0<{%*Mm?U_Il*jP+TfIvd zY4PQV1bYA{`@>eFtguTEO`40dv*ecGKO>Dh>ABhxfvO@e7Muakff`xT?9A3yf&+ZP2{Qb)%@etuh1(_BFNkAoADKS=5?cB87$ zTjY$Q%`J6fW8xig?F`#zVI^+n$GX3U>RN*HF6yD<3V-op<3=DI@ zVxy$t@$qy7z@`FTDQaqjDge{y{=Y}FQC)Y@ycCJ;MxY^xQp;JD3I^Oy1THrhS~36r z-d<6f0M-nl)&}W^i97*xTXK6(voIe#bO=H;5J>=ZPJwI3+or3F%{SVQsf5vEGg7;r zI+T%?k@GUH)znB`jnp6C`IZJJuJa%v|Nr$uLm!WxcaKjISo^@KjeMh9Zk^=eit|1SIoC9U2e9Xx5xt`-~44UQ3a5vp58wtn@RK}b zKA!Gq7r(uz?9t@sN_~a**%ApiXh4V{DmVYJUkf^|prEiKa(|Z`@mWe>?~z|gS=l=e zk2|@P0!EARLOzbP{caQl{XFWDHGnDIrqQRBEYJM6==}u6_@?XXoOun_`d~*w@6wn& zcPAn2igApN@v(Emy9^!sEZPtMaQ+S32G5E5mBeLi<-wW-OjYYGjn>E|F;kdtVpScjsiCX8*!FJUn z^4^J6Sj}x2)<3B2KqZ)kJACXI7~sHfh|rLp9#x(_DH<=$t*!6Yj`WnNAt(oRZua`( zl2o#~*hQ~KPWh(KL-h1Fq0c0WlVij~%OB>Cp}$3n%ZaqKsdv+D&!WP)_pnRL4EYK; zz=pVQ9{{HV^|AYiy>fXq@{Ck@Cgmna804-vYwoDb9rq&(Zfg zk}iBJ_c-QUpvIzif%Y@(2vkoRVBW-0@!nVo$D%Q6YTD|74PK;@yzG6Ul5}EsdnFen z{76Bu`Sp77Nwho@N4(LcTb8AR0>Kq|<2pMl?%YlOnEwFXtFuB1R)TrJ(x>PfaMh9f z)vmx=Q`_&-AlAN91ik6*g5(3X(i=;sF07d#v>aCr1jZnSL1!T12lggt|FF~t(p9w6 zy+vQNfol+3?vi}9y~8*7}jiHt6HVG^~U;6*NZu~ zcV94*B6;oPcpA%Fu07xzo?AJr{#=;l(WB)V(F|KKzxh+2U1+S)SR)GV>TZAJM&Xpe zA&9gCOAK1rDFxNq49QSQ`^LdXsiDYradqCkqvk$Y8fhDMm^3;{omcPZlCoumC|E9f z48nX0y8=r0ze3e>=MuG3al5-u(+RlFH_(mz+YKZeKlv+hMrtcZM_;H}&z<`k@U^z? zK7X5xQQ^CH5Sm`Tc+oyiWUeI9_lb{8QnBMiQc4!1?PBBMr#$tGm9ywoGy|N7_$`(p z(|$6ze0g$s_{*t_JzZT`IgxQ{I^;_XlGHab0YN*`uzQI)m4DZQ^W}F%;45PvJ+OPL z-4-il;pEg-Ki<=dR}ReMr`ozAEw2wDuVd*Y+-}SDo?P-#w1QpWx~XXbf{@(Kw<7fh zEe_BW+;dYlfFop(FcKQtLf5+S7q>H5kX*zes%lw>l&1d|8Ij@S7~tp20Jak7VQ%iq zJPkgFH zA%KSBpKsE{=x85e;;~Vkdg+&uIqy`}6BCs#mB6lmvn;L~rPLp=#~L5I{%zw*-}2=P zcGs>=0`7qEB_<{Y#&)EZL*XK$f1dwLzyU#evI@HUJBH$C+;h@lQ-0w{sdz`*c(<8TVzP>nku2shU|*W=YRFZ(yt;)W zqN6dy_H}jfG`BeL9zE{gFd4!AgFpQz4LN})tB75Qzhj#pi`48)Dh4sH;4P6lV*9@k zH1tjvc%Kmg(D%1MLN*Q_#*~jEc8b|;nr52NATBFB z(*Wuh4-;4tN^WPy9lQPQ`}C&Hd|A!i zH-0hJt4+?$|NTO<)TC5+xHR>=t_)jDYC2<7lvlZWDI=$G<0$@zZV+qM9OCoONk1)j zo!9^9so3N)@`|Z%ecNZPt%+eYd-v=i@bdHD1#Wup9&#y&N%F3_AH(T8gwrxIF1I`u zcOF(ie`zH9M9^4y`Q^jMimJi{jhK*gy0dN9X{@&%$dz?+%IRZ|h;ye%M*=!RPj`20 zckiyN8`sl_1(c<$tFDeEFilWcP9JZ!^hX)cOY4ao6lTZmTbINOsx>B6^`#Yzrii&qa zLO!EMN0$f*Hbjq{%*?lePQi!r&E^Re_wH_3r-(ypYM7~g@BLtPZ7EkNVS=Ifjvb3z zR8LcUZ__>uX3AjT7>6;RDCvtkbIUH=6CTT3$bSbO4G&;|0yMb9;U91y{Q7^LXPZXI zJ)7G|EBke(dvjIE125#6oj!zpb7TqU)gAJR^p3l>et1C7se1@xIkxLEaBw)Jz81?- zPiZ|K9koE#Hk7i#u`v(y4xAora%)aaHPDL>nx?D~eF0U!xJlDhW(b*AY_?D4&b*PK z!yuqfLE^0djX?O_A1OSKM7k;&IsZ;hUOq(fAtEB;!2`9ihRi<^I*0ilt2&Up`*X?j z?wvb|51C)T=V8B(I{+hTW4`{iyELhf27{&3)28#MPcCeYqI6#%oTp4P&~#&$G9%<9 zB^|n?IUY3!{RZ4b&!0XuEW4fMd?yq&l9>CARAE_JG?P;cGrJU8bKK8*l&q+5!go>ecva z(;kE!v9ikf#fz+0Kix5%(yz`(Ms~=te^Xmq5**Wvf#!fPY7a@6)x+(MtIrQpC6Gsj z{2i)1B`xi#5Toa-Ti-U_Uu~46Om{q#X`qZ;i^C~1{m=9?L?GSKrFx{ma`)}o({bfO zb#l=QvXh5s`uNq=J-ocWRhOn90*7$ivaapMm$0W~+PZd&kn&=m<$Kw|`fC;4cJDmb znIa?mdV9M~1?!`i;1aE=QT(mBE<)vQE^9JDY6|O89CuJD9fa*d?x4+Z`d4ncUM;- zB%p*ZKX$#G5)&h=tga$yRT1DE#1$~M3=VGL|IYYXLIW)b^l^H+x@GjtX?|miA7$wF zm3Y!r-uU+Or_{`wXXx?&A}*$4U4J1cjoG5$X5H6{&#ffdy0vd)IcGqi+f#*D6mP!a zIi)U<#D8e>8^Ml1ykx`ePVUD++!3H~{@exotet_0eH!6S9W+$220w$^6DlJ2ATpB4e^jV;rMu$tk9qfNCd!1|;^ORZyK=-) zpf*jgO&$MOrz}@^@{-jX00r#5f-hB{NwW5e>Xgbc9 zRV^e#$$cg@pj9p1p=Naf$ENJY=v{DSkZi(V4{`!&s<@AtC&do|2yJk&hRY8`z_JW4J1y zbV_oS9HxPqXA`(2h-f9?d59WLn0MeaU0-j$$xZEj0FeL3gHtK2!GWY)JvUu4>2 zK8|i2g}0wH;eCnJX9A0V^dlE*Ydt`A7HipWpc6v_A=D}QF`N8+tI++=jbGc_(*g~P z3k&VL(<2RQ`YgC}uDMNX7ft<(DZxjCzl~k8C*A7=jZ29u7(nXa$iu&}z0NG`?6)NMx4E5LHOlrbI<-wP7h>g2^p94$NXNIi-MUqFYf{VJUf@ZCU1xF& zGP6;gzJ@*h$Dqh|=SXR2KOi;aWC(7&11kuPoipb%2+WAf3urm`GucF~HDC%wXQa$5 zhL{4!-hAb=XD4SWx3dRVU?7H7LYW#OSBppC(*IuzAd9tN(Unha`Ip~~&bhlfzrTwy zL-?hJeEg7@Uv~zUSl0;9EkUM&A~>$b*9wAh?PH}oWkn!`ZgYQQQ} zE{;oAmzsK}uKrroGt?Bk6?mXuKM|uPz)%T@8Op{K$lmJ*mSFuzkvp$1n0MZm}dkrggsApp@)_aD> z`GIM#*WrVAt1Tgy3gtXLaaOlGic<^%I5f&y=H{>Q(L!H`EK}dbAO@hEr+`ICS_JOB zy?Ho;M^B?s=;v>jO=FWS^h}tpwf~-e3TG>73QE`mvFq3|2BjVXT1!g<*V+>}#fC~n z#kiNU&poMX`urKuGhdpUHQ>WR<_vCSP+Ib?Q_o-<#5GI;3V0+w#NiUvWUi0RZ9C|k zm#SCsn5BIK6PAz85B; z+f~>}BPj`ScpbR6l&itJb{PnK#o^jo8GXXGpAWg`SuX2uU%WlXZ+YUJ;U?G za#u%~CFJsN{w~q^vT@2g4_ARY10m{Jnj+o(iBl}7E0lbBBZD0q*nqra34zr(g(L<| z49*bHp2Ay=xeyH<8JCbyJko*W3bKU{%tTq|k2&% zqW&Oi1oDXA3;aJKm6MYFE@4w^0jeRN+kpX2BuB34X7{ed_OE7h65#e^`~GBf-A}0t z&P}N1@cFOjly$Bwlw0#Da^4sF+ zF*$5-8>+9L0ArHH#ANL@{pDPihgAIbAD{WpQD(&U60|F3hQEX7v#`<@&`!Lt*&Rrh zGS{CRYeKd)O!AP+Us}C+$>$3(!3N*x`SGIZ$6GHK&aX(TzFYDa&$8}!D`Vs&F3v!b zO84qOSK8f$1RKd)9+azUiy>p8=FM90hQVu$Vq(^(wYcZqyIVbW0?yaJX1;Vj=X@B1DC)#x zmJ_d^uOYfU`08n9%t*}4Upp>MbwnIihdsC|JdCAVtFr9*-JO99i`<)PmM$dk>~Xyz zylC}Xo)M%n(6%-nF-|K!AO=nY`|)O%moGn!d^yw!6%b(&u+>K^*awSS%oK~ z3b@k6nKv~8FcS+qum)Kzwn#@jeTp9!|Lp{y&L)SQHeIw+T&S{Yx!vnSvsYhdXVYT* zQ0Jn@?k+Rm_NfEfL1k~mXZ!hs!pM~&pdLF``GALQZte%^3c-(ZuP1A9Etkx78*Yhx z74in0)5H*PWaxmupL#uXs)QILUE+LNN$cO>3$yxkrufJdHxYw@TKi8$eaJXPU`Ibf zRDZvFdS)l$^@MkE8x`uIu$7mIfP>K{GyDn^p~l*nlWgR9B5< z>#ww3q*XY3dP~9Y*t*?2pg#*r6tgtswco$59g{HUg}40ilzBd)RCOM+-EYAS*jq@mh+eo`fT~Vw>Ly1hJiM> z(7snkpUCWBi!G4+c;B@@qe}?GAwfA#ruEXKw6R}Xo~iU$u&$0XWI3@X|7Lu5tl|YTlFSu-A9oZb3>A_weThv2M)W8CnqKl4-_2}v#a6tRv>J05JkaBQxx_z&eZ&R zYG!QPcxk22HkaTHEP_YC3b@=gr{jASD|B^fhH-Gv(%~QXy;j~8{#OniFyZ2nZLs?# zxX!TW!p0`eS%k$O;ep?Bt`_wEk@s2ay3t0(9g28@>e-Q6AhRKnO}V$_3b%T_1?4zm zz9khhW?k`PF5l()`*FC%10fk~!?OZM?mG1M0A+j|Q6d`tob!u{M)Geh+ZN5}j39(s zy?slSay9&JGA>%Y=h-tRVKWC~3sbj(IsUw)xf`9?9FrM~XpkwXK=#AXQlKu6ZQQvj ze^tlmM{Q87EaCW-i|USRPEgjpvu+_$(T(GI_TpZ^!O;t^(V1i}1YHtwI|9;(F>HNrJ)}%&v$(afa2~qoGeH*fLN88H#C)@8 zm(^eiyzfl-X>pjXe3!v88DuHTIJFSfA)<2~Ff$x7MuP7g%Z9#S)hk*Qo2I4{m0?M# zZ|}(ZL*HBacON$nKOJl?02(2a!NJ+u3_1Wo;vWCP*9Y^8yaTwBa`mrVDGfO@gMA$; z09|jXzi&NpquY*Dx3fk>1a)XVi^Ll`O~k9Cy+1i_c`ds5YVUx(xz{09@>(?ZD2GF9 zf>H7Deh_^h=4+K&KYh@!f7z+?ro;J%V0qD}`RT?4g++HTo*gE6#3_fJpgoAoBT5tI z=K&PpfME_q@I4OrrOFf|Rf+it zrxw>qI5X!F0o=#pA|mtu{@tD5x5pthbq6IC)d^A2j8d@+N=g>^Erd_)E#Xc}&UUua_GZf8D+pdKlYm8x4| zI@$?*M~~{P^UV2Z*`&RFYgN9ORe_KJaK-N78P|@HdF*OAAFzg#_u1_Ll|LjKzPf?% z1z1l7Ff=1a*me?PTJZULMnxTXK9}d|aK-Gx>^&EBhH8furfA~Z z;&GQl-zy?2vR2$-aNT;%Zjx4ozY(#fK&hcDwe@k!oj`y!Vk1MEMl~xZ_4M`hium~| zO#?D0{NerkdVX>$1hLl;>F2AdWyWSd`S2^hL8#{hmu2I?S?d&``pGe@Qn3d|Jk&*u z9|J>Tf92gqLHyy2#t+ApY5Qza9b&8>;H3kQgiljhi_6?TvHO= zKR?ohpvl=G+*#8g zhSXh=(&hzV%^^_I!^5Vra=|vDr!scUDrtyWj_2NZczt@!ao~iPj1yNB3E`QUqsx5_g=yons?7Ww3Rp`)n8!z+Hz}b$AlsoUB@`$iL z%BZEd41|%T>dikDKC)o;_%%*xm!DYHn%SWBl^mTl;7CD({4rEGJK6GMO{k)BYf<}u99VBjm4m7MO z6OoU<^tUf){RR~Q!pLo<%T(M$Pj(Q0=llPr2ke}j;{B#+Oe*gT&|kMXbd;iEuw-Tj zVZcklr2l7we}JU#$G$#8jZ05IS{DHLk;KJ?eCjrjl{dDLpyAQ-U#Ac8PJz?Bs;UZD z9qqkNd_KrM72;Tht_;&E=M}c(sg{gyts)MUtG&BM93?}a;N*d3M~Lty?m~RfoJn2w#8zpwmfSF!h;C@VjolYZ&k zD%tN3-q?fWeI%&s1obc#_Zx1J6DQt#EF?qGZm%k=c;{G?)P60#C-;Q@-p%^HKO&;$ zZHSSegq&O)45dzk8-_Tx6(g0<;Y=ePV^IQfqsKSToWagRQKx=Vtz`QTPc6 z!l<}`wN)2I%Z~y z(peHG2KqFl7LY(`ZSUk%2_Ir|Y*yBPti{sW@+jr1H1Jcj-OvL+*#GbP)7xNRdJiAQ zjD@BK04RtTO|$3y=|M^fhsQq%6&V=m2_8P&t)H}fU;w={HUJ|Er2yNVY;1nPh4mOR z4~)*cMPp7`Omq8Yc+TD|#u8N=DB@tfHE3(1=MTK;41dcRDU>6E`AUEagSq~|mjPiTyYdau4 z@q{_qa@@3oWZcSN?gp3&;#ChZI?`>Y^0$M;^w63<2?=5SlH6^u9uR5h@b_24(BhxE zc;C~wlxCNf#+~K+?htWbh#Y13-o~~?Ws371SyA%jjItX{)8!Z-tTKb6gmucp#)gj| zC4Nj!V)=sY6dnLh0c`UIGctRB{r(LKW*rCm`Mzw2qCcwz0k0w$j}|4&dh3sA#S>mB z~d) ze;+=4**hy+MoD&*wCoWT*)uyzS|YMnL`oDgvm+ECk)6??G9ogf5E*4=#&ceMfA{ko z&v88eKG$*I_qT7{T-WFG{=CNdI?wZ^)08)oFf(gzb(V6|m0ZBKSo%5%WxuI9(CR~O zV(UN3z*ksXS%L04;z9z`d>L_l!PlAs$-URT5+2e|ebN*?F6?$XEw#h^%k`lqT*D}y zgN2jkIP0JzqjU;bqj>sj?u+mtN8e4csBr?OCi(}u}_a(Ie@n< zb?Cz$(dMS6QAF~KY%#*H7>eu}i6B?H2<6qg)%@ccMSG7}S@ z9pXf8?($ZD-V7B(%)9BKy1KHm15s~ATwmgCf*sBc-Xq{rDtb=6lN^8BxVvr~OCXJo z=BJLVXH|X6h+)nao={Nde5c0gl!jVdap?%@j~IBa^7jbqyn5J=wir^3I&5q6^_3^! zli>gW@XbjXyBmFlh~;e5Syjc(hFVpjQU)s!d$XolGwk-LHfHbM-MX^js>rX5Nk!Yk zqi@cZUML+IO%Ao7vFkIhU9U;@iF=LKLsk7D{fv+`@@PfF2OF==-*1>z{T;Rw z-((crXS?7JhE#yl2A4QQUSVNj@U+`|I0EiQh{eX@$YLsPVb0FU=?QESAP6Wp8)-8| z)Plw|pE}t5?MC&?HXqs5poc=6hZ1!jj4+(?GsR}8-GDc69(EPF-JD;e7_$ZS8h7BM zyDQ~y6pvWMy}EEdr`FY}&-Xcf6cP-PCEh{I>pd5a`7Zrr9iD5i*|T{hSn;z{;mse;6K5jp z?xy>+LDz{Mw(C*=&8oh+g++c|-k=}r?+3G|vM2qlH~IZlu+IxS{FXWo8kEAwgV5wK z7XCDHGl3yp^mqGl>O3FfRo2>-k(2Ye?1HJ#o^Q93ogOL&`bgPI7dyP1_~CZ{zF7$` z{SSf)$V*}Se?&`l6O!wc8K#6nN-ritn zQ0PE9Vkw;QKPFei1_w`;h25|rAx>@<{Fzmgq>@y1F<5#7paOBLd28hQcD7d@E*ej) z%>qX>^)1P*6dvc@I*d(cm->X>yaAI+!Ea3#i;p1DLy|}CM@e*<{BSW%-c=OHpyZfC z3?PSf&0qbE3t%Yw98K-oBxjyEZU)Zy23!ZjY_7lDH&9rX8T_m|rEO?wCqipB<~>A| zr91{zY^chJqvb{ zmL~62a_Mx(VyjLB#&antmKan07e;g*A+kfJP1I$hs09!QY0E>|^Y=qTHT;$fQV*0+ z=^$NJTpfWF{o{k^XXl@uNq-V+Xt5?V_jkS87hi)77@F`A8=mtHj7h&ydl9*qh>%2V z?#ek#-KWaQLVU$(ho`71W|u-qJ((k?-HaoheA5bm$fK^C8XMcaQFaA5-_xfvASJ?3 zy291)J&mv8#Y^>Ls%ort+Uk1@5i+H=^+=5@iG$ORP=S$JWo;zo4g9>tbY>MsZsy3} zDuyN|F9NnU5y#Osn6RVkhbP|$$PF)8IQin;sgTGE4&BIU$3Z;v38xTtSW@xph%R{C z;fM_NHfAB@l$1LUF#`+>3l{XmMCcPY5cKt{oG<>TjPRK}_!qJw(nwRbHaRn57~Bs$!F zRi#&ODtOxuzSd<{jJZ`Sr)S*!odSz@_4M^$Ui)bPk02G5pvp>bfLkcbAbK$^KdAZd z*R1^aYi=VM0>3GEusHqY6G%#Yzc{7!7AN*xCP;hbC(Nzh&6(&ryjvFHt~s{d7d^mW z!9F2C0572P!HmM60S5CYNoIV6la5mmC6~WGl{0!pk(oumU>&l_Xlq@Lw3n9J3Ak`w*5p4Td0^=f-J>MAB~A5u>k>ugc88& zD;;sUsMfAyILskT#66FvPx~PEnOpPC&NH!Ndh)}<0w82~q{`7V9d4=elz}ucN*cf8 z-dAP#M9JEuq3~^b{d#SE9nB^%ad&Lj{@!#8qlq%VqyOG*Bn!nX{y?yu?8-xXyfill z|1|NvdwV)N@v(X^Oajn&!|mKNzxauLE3-WW+dgw&w}(~rJ?DUr0NpQn-``I}ynxCi zg2Od4NW3w4<}`?JIhqzI67-y)f)XOYFQub{ronUc?Y;6^#ol5zw{cVY7YnPNlcBu_ z2?D!!*FJqFP4JcDsnb^F7Z(v7IU~$hb+yRaqQ<){Hnt0{QInG|{{47vtZSkX4Dr2( zzs6V{wcNw`UN%16b`$>2PS0g_b8|M$>Hx+A?j4^>N{0W^^KSdIL;Yx0gsPX<`xJ)# zZW1=tz=W|!77PW~mjl7Ur06%cg&AJIu0j0y^Jg+87V#-I`9ZG-O_qu(>_nV^Ai9F?3<6VW=>k@}=455v7;ke~WXh-f%CGdj z&ygTq&NP&g$Q20Ga(!*>!8>dkjxNgQW@mqav`^$mj*s`e+Cf|&9LT0lt-om@EjNs9uL1#67TC<#$eKwBL;9+8kw6MRz(to>`TSY&TfZg;WsQq z1F8sgK=yzs8Xz#5S=?8Sewl@>Asu-zm`Rup+yD<(boXuwqQ=&UGi=KS<0hB^c`r`r zk9~sP`8Cj3AtCY$Jy~1I9jvY;GR~a67gOZv7(TS+N6^Y#IAU-g6(elKFsA<0*@?*m z050^9d$pgM?lqHO4{OA}hQ~$QWM%(Ps#r|VyUPdiprTWSrBrSy!ZS6B9K9N~B$F#j|*{@V|Cg8lt|8qorx zQ)XB#nlrVNJQ|^%-;#g-aY9bQxDM3Cg0a)YdrE!20^CJ}y+$-1#IX&oU_iClO-8S@DBE2^~JCr=S(7hEJP=S8Q>pEn2`ePZzf(kE2!FpXMSUq_NHHIrav zWd-5krVE?Gf1yD0UnodA2NLzVQ)-D?p=%xrFZVkbk14BtP}lPcS}pY$(1l-q|FxmS4C?*ZHE^Jx>!iD zfz_hm!Dp*-pL%_^vH`xTpd#9W@uUIy3&|1mz9qn+qFKYCo^bV%7$#uYvN zq(3C_QUs>RNpY`AGUX|`-eMkZ?zEOP`5#-gerU`v^F6mO5tsFZwsvfEH1^ZN;Uyi} zOmcwr-{txH--meESK@dQ0idetsQ<$txk^1#O^uo9}%vesUAT(Q@Sj{0bop0_)vIp_redwf=CBNC*! zPQN(Qc=itGhq%}Qv}{0T#g`xBupoS8bWjsReC2D0eW=dS2Q9)*0@HLNJh)>A0oV=x zwmG9t`pTOtlq6Vg`JAhn|&!PX{EzTgGBER() zKcwW~0y!Vm#=qM?&0~y&X+w@RM zAt5}GpxhA&1xTZC_6-ROLtl%-jY9jvg$wX;g~^DC>ZsViJHtTumv*_Mai}rIY(jQ` zUE_fRVA2s`7(ic`oc)=fZ)m1VkR2;!?fgWdtYPfyW^OJbMj0rmCgbkiZ0Gf|;+o7> z%Oj*R{KA2%>qV10@xj`NZNQoCnO&8$eNibvG6O>3ta{ooBY3C3dtl1Ncel=fprxn( zDq(J70PgYMN3JedG)b~TU1%2qeNZA9ZkmB2^FC1 z$sZ!`svbL%RB$WxPZI+ROM;@I_U*nSzy~79!W1tl4wbM;tlR$P|GE6@GKd|gw+ z`+{bw^bR!ou2GHt$aI7ZGUEKlB|*Y_t63Zp`+dc>%&AK5SFds@ct4MiHytDQfq)-~ zr37|i$UPt)b*-9zkLX)QHGTc7*gs3hF7em`!f)s;xtX_zCmPDQ%F_1-wKW!)R`|08 zI(1f^=;(-!4beC#W6Uc$@j{Z026$(Pv)i?w(AWvpv~Al4&2TPui~-v3z37282G|CS z%u!O}3UlA?uKdU?)CWk~BLB|*Mz4dTBkXjER8g=%4*hy6aZZ|3LM}P^esCv?OSZbM zxJcl0`leI2_KK;ia}u;uBaRy??TASUiJZcErb<)@AtB^2ylvT!K^53>FObmL*y56E zjuL-M^ojqflPutUHx{!EJtJ~1 zu8w@4>-rWEBsZjI7Md3<`d0ojYV(edj*=7l`rs*l;)IsYHw7Q>k=G0$*nrOhYE2c` z75{cIV3ABhaxx>=K6T7EKmWq%MCDBJ-$OwIQ6_qwe8^=aUivIcEjP)yWJ_N&Yrf{K z$p0%!KbnyQ|B!Kb?4gZP=FYQ$M!|;Fr z?mtglX>tXd>M=*WLCHXh>kVCObYdcgh5)I@k-oa#e;{7;Y?}Y#ah9W#6Ltr`!UGk* zqc@E1oZUrIN>-T;t@V0`eH8RJhu;c5?+87mlE9|S0&pGvwxH1dU1U#%*6i2HIL%hDi+LEFFV-I+in3hU}# z0hbfI%B#oQ2+$f`9;`yw1poi_D5cH%Os{9pRpSeXU6}a8!Wi`oNC}ow4h|a0blXS? zRf6_~O(R*^5Qv!+z7nX?9rJtgZ8M?#QY!=kD0&1XBhi_eYT9KChqgiGhGdIfKO>`~ z*nKDo5Y#Ibyl_kn!szoVOfWZvAsfuOP}>|bj+r4|*e&6Iq#Seb_`nFm8=trO?~T8O z#WQiPj5!8`CGvc_;IWvSb1CDEgJE@jy7%7L966tE2-Qg~t%>KCA0{M($7f8Ok?i~Y zCJO!zqCfWe*NR9q={`u)_^s-9$z?-pHH>khlWpo?mU(FeL{KS5QUl(d2hN6hub#fj zmP-5QW5;PoHw-syw87xVmBDu5?zDhScp!?<|31un7w80rrXeGL@btf}JA&#I$shOz zQD9cJceLuijBy;NXab8SwZ6rQ8Xk+}2AdoHkoqPjDBsUqx&)Jl!`Q2lqDbRL*P*h~ zKuQpiVs{GyZ(t=aDj|PSHv5r^2F2;?{tx!+8nBe^7~ag5m?4v>v>W-XTZXx24^{H# zr{Ht6jnk2nKF(9qWps3WbnhPUC|5VPqi#R$U@8l=3M#j>=xBm6Zat1G86~~#X6d0i zOpHZEZ@n48)|wV0-5z4Q9L-Pr@A*oS1>BF`$LOOGReDy|CnzTnRo-8(FLLYXroVsx z?Z<|}ezw*TGBR5Si9S7l#X#MxDPsx>Re`_{1R1X1bhOclBwiFxh9oTdv-bE;Hu`+0 zG(Rk16RK!pD@{m$zMZ3uBUS0B`=k-JCgJQFr(%n%3n&QK&kGP0Zn>LKb0W}*OGuE{ z+ZRJ_S7YByYnQ5|n8v?-`*uD)BGi(X_ocve@3$x9I65J+|8QGs!A!6UWc=2yfnW8+ zdfRdK>_Z|Vg!;3ktrVo2-6x-w-26Ch{)?85L_qwcwp+t$MBfqn{05sFZ`9hXqhnYN z*M%twpRcHw^QF-q=7q|py1L=?L7N)yP>*ArN+dIPNCbA$bL>Tcg2dtw0J>+-m^wQr zoM?~VDp@&%J?%sg5^N-J@sQ@xxWwt81BaIx7LHq248ld<2ps_2F3|y?;B#{GA;y!S zfANHx8PfCs7$YM?0Ww;}gg`?>LvnFxeevghczvUC*3{H|pL`?lQ+q$lhuB#`wkW>g zRYSt8%Nym>9Y3MLK}cJ$d+`H(-E026h4F>B?u(C74DghdQpgL7EKiON54!-2`c?-v4HrFSAWH4M7SAV= z`ve)t|DDWmPHo|ZaXg5S5IxGyM)82fcA;Yk7n0)QN1^^1PT#fdhzZ|bl$iZbzGy1h z3{>A_&QiJ{uE0Q04VH1fGvVmqNJ1U9tl_6c5h(Y4UpEc8=Ol$N>q%2z^b|toP93Cs zgF4R0xOsH12MuDnL$~P~B1AVO=ldY@%8!drPP1}yuAmHE@RWbvp|Q*du+`Pzi=i$s zS@h3{x!7U1g3%&0&+>dxAI?JL2U1(9>ynC=)?4Um@P5JVLfg@orOCL~i^t9;ue#b7 z5)@QHc@kJCCyyQjIptwWNFb^xWxV=oG_1{D?6xpU{rojNjseHi9w;|KFXth$`O%h#{HhthfStyWm>(G6=t zV9~*izqTDSr9X_4$pY*6J>@&}Qbb7zq(KR%MjdwTCtIV`(`Pf};U*Pk#j?%36DP?6 zj~lA9PK%44SvsP<8v17Z%rD60>J^hou?Ef%C%sQKD?;wcgyTUx+>UVNf>5nicxsIH1`0y$iUKlvC3TtCG z%FZ`f(QL};&cn%x4RZzPwMc^mz@G(nh)#m|K!SY?@+W~8&QBO;!q}T5IdpTX|9^+) z?ZN#rGOJLpY0!ex3;P0@fX$8aAQEc&#}Yjq9XQhO@%t#IOwN4Y-rJY)k~@NSZsYJN zUW&j`p_Id+T&0Q^?tY*Qe5^%Hp4M+M*(jiC*_56_ayXl{|#qvU=EIP(`$5|w)F%VKLJb)MCIi-@Zkw)?_nt4 z_u`VcgbmDciQH5$68^^GAtZC^DTHb?p!@Gw&|?CK-AJmh%IOnyze&163uOqN zcC{jmIDkOK%WJD7cQuB za&i*vFlX>X!!Yn^kINdlkkD+KOG~=S4b1r$_NHhI@NCGFN}kTF;N0}Dm?kHPzVr0t ze{n;p>dt)@Lbe~%Co)PhdcybDGb9dvA=wVi&+5X51qjc0cIJrf=6dwVsgUwbU3$8i z{#%F4)7Ph!!fDQ6b}Mt1*Z<+|mct8KZvbcfvbl@#N9L4;F`b)^&omFjf-H;o~4H8%OPxlpI@F1@wtAJli|1OsoQi zEf{#-`H3g$vWeI07O5uz?I)g8RH_Bmafyj9!JGWHS&0h)r)<0+c(8;c%h+#p?gkGL zp@2HWpz=VqzTzTJxIar69;@=IANU%tj011`f~s zWqtXQQ!aBSm-Si7yZj`9T}IXP8hbvqNi{mC9;e|-Tv=i6E`4*dRY06>zyL*lK?zJ6 zZssJM_MW65WW7c_6L)k7Y|}it>wV&$BRaNCBA)W^7%iwglU05Z-IiTVQuF=)mY7aPAN6TtG6#s8?D zCAksZddv`+^3h!~<%?q532RFnkzYw>SG7)&2I`z{pGs@Jnr(ZY;?u{3{X_+8Zkj)C zZoYTt1Ig3y+IxmNgn;W60H;i-TCn*TaSU7;I2koB;yU>|067EapU__8bG?{qW9oF8 zvH-;PAke{w%ooEkqN4eHfVMsYT>hdF0>Noz0%Xw zH2}p7s3jok3Tr2{SHG+<4<}Zf2klG0K#B|SK1FL41356HfT&<-m%_?VrL3uWt|^=u z4?MnJ?K(z#R_BHzKWDNsA*AN@!a0$>BQA#?UB9BHO8V)n5q+}T%L+f%tHlJD#VgmQ zMUJF=lq3s8h*|GmSi#y$sYkx2wh_YpxNn9D9_6vB^v9_Z6|wJf;=Oh!A|lxOH2sd7 za+M-d$6N%KKRyL>2BwjxR;UZ*8+&?|aR@*aCFW%!Z54oLMa7{Y<@OvM9y?SD8{zh6 z&V+8Yce9nxP`3lzDn@<$4G*gqyiv8awc%G4@Jm^zOl|U*A>%OkalAWrBpEG|p$a_ghT>Sjx3uLt)-oGcpt?pC8zrG%ulCv$o#~F2>MJ{>pW78-m&@1l3 z!n&b}hqJSz6R5{|!l}su7tXbpa&&bmO{tuxAvC>d%)a3??l49WP_ogk8xkf}&i(lP zx0ozB4%eH>A%WGlT;|)CIWIR3GXs1Uiq z2u%&o-)jq(qLx5(`1Pf9PaswYfb6J|8e%HV0i zZIA4Es;5k4Y?(G!)6G>5NsTV>s@;t6D9Sx4b=S*A* zd*c-oHMtF~6!<^tnP`iajSEajoUo=B#9mlFwdKwKfPifyF1|3&L(9s!lVk%EXyj8n zME|pAg?TS>z!8yD+1U76yp`Q?T&o?|?&BJ-DxsK#F>66QYBqaJ@G#z7S()1v^~MnI zZex#;;??g>@N~lgVn+*UWQ6T9gP7N&R&qO(g7*NH-Zr1D$9=JQ%68_$@n=rfd>WpcxZ|#0q^AV~!;`x5+$hNfFoei2apJ;7m+Cj06>h=R7uAZGO z?JXg9WmtyOteXChk|}Tf_((BN!0%tMxrPT{;I7cLu*T_hXuYgbl2 z|1nVR5vcH#LUe}3&y~2^xQ;Cr!ZC*dl;COXusE!$bqVREv%MXsb#7sS)koRM>9N*3 zux$xiCr^SjXol%c_6sWg=kw=vaQIHX!IMt95@5t z9<+kE8(*xARyxZ>6w#q~Ya3u(NuP@AOpx+-D|X; zX+e&;7Q<}6$-1xDOu}0yBxL>OW<}A>UIjyFK(K&rYjTV@HH2OcryWL4^9u`D;sM?q z1Vry_rE#koRfA_+IXPdbxgE&P8UDaSy#i(=WXRP!P6PDw|C^C_>%F%8LWATv!t8wg zoiCZmo#SI9TNiKc*?E91lmZZ`eYyACJb9qa`1kQ|X|##j^qk@4G{Lty(x2R^p;B~Q zn(AQP^~SdGihj^0`XwZ6(NDG=3gM>>;4KI#qYFP?ITcPw|`UaS`j;b}D< zdSp_u`OMz5-x*Q^T)w>qj~_kiMlbt&aq@x zo}2X_o5Sdsy~f+y%j;XYac}j^rymRQ*3Fl_%+2kkY|N+$uHQ4S2NC>hqaVC(x3hPS z`ASw_9g2NYR05I=>lxIGnsf}jM`(ACMa+A=b~<_GyG~-yHYQQgF@;*pG@3Yal*A2^;&Q82(emnh(&V z3BP?~!>qJc$*dv2jIrWZg7lfsxf3<_pU4vy%U)H`q{X>q&-QzV)D)W8o$Dm|?3a-l z@kG0#3g#tnFm-izpXthr!3$hVe0;!ZFfkL>O?{GOc<*%`yuBf~IDFU~Q(V{>tPwb% zg$y;z*kcnQF^X6X$2`1 zLChk3|*>nRGeD_C`wDOUN8!c+kZCO6qe0r+rT^&kO3EBI%C<&6K_4OQwIV+pX@B zxS->ppE{K5`Fr~Pms_rf_b;cek3{Wm9Tzb-Gm9O}`DKMQcP!*8Qt{{Vp3+#Z!e13i zXWU59al>e=Yn+eM^6-%3&l5AwGtkJ;t9H`YcHG5&J8pM7Vp8j-NI{Aa%Pq``S2ySb z_I81^bGZHSKnxg|5budGNJ&T#=W=L_LCcJ0b;+negqK7myFM;?{DA307{Lj?BD6oPT5}U?{T;=9TV;1` z&-mZj^}>Or3rPH@Q7qiQZ!q8nh!L9FtDq2}ZHM<`8SJXA770)zAVK|f&BH@V(ON9d zKYH)X;r7_(pq%XQgR)``%2@`yMmI2|jERnRLHCY`kDSIMQp_>zsb8A#DKTR>{ylY8G@h<XcOc5R_w>J&IXj?uVL{{293MK-VyrCgeH(SP_k{0B%#D5Ri2&vPBM*jo5`3 z5${doVyB)s?Blu;ZZIc14AH=6VSNLU?XE9V0@Pw7mY;o75Y0(hYN^uyJT<_Ikpj9p zorg#G3XT~5j|)IF2Eng=YVha;-V$;NDEPn&*;rjjc7=T_wSH{Q>a|+I7k)-F!}5T_ z7L|mRiL+YO${}h}J_S{i2T5%6l$38js@i2ZoBI%2=9UBcf5&C=et%k!_p|a3V@XMS zreof;)%jst%R})l>vH5I50zBn3!jx~cps6nie1~ne5e{+VSG}xlXQiWc2}aNFE&RG zBc`F|JCB+Xp#r)kA_Zq{F;7bf!D9`(3$WR5sHnLf)=qeaQ1z`jUY4LrDmm01+Z1$6 z601Ex0a?ZRywz2F%cI;}oCP@X`FMGC7uf{heu%xf()Tlr>x$NRd5C0 zCehpMnp~i$P0aMJ+^aAPJ%5$*0a4w6P`R>!B#_?~+68`RxvT3T+ad9=7u^7Lh zD9;0RZWkXz#(BWvKz1- zLUavR#~{flBG!R$z`3gzXB=Y&GqYX{>tGZlKpl*K#Z3mXc*4^BL3A`kYv@~B?Lxf( z=bSy8laXQfj++Vf&D_#bjq6y1E1fl9O_dvz7&gI950wQG*2cy4f!7OqEu}HL6rZBUfdaeB6@nypa%gy zKEC6yk#BWk;un|>gRGcb`+M(pcKcpa=^|Rf&!4?DrH{i0iwg8|FXt`)3GK{m@C@L` zXgXg-q^E$&$}pxr7n{WUY<^uCRCg9jk~949mE_PNLf~B~_AN^>dPzy&!@T#(OZVCY zKVYxrzNP4Q`vk3(4`T9 zi@xXQTf73W_7)WZ107w=iASKj?Lc+kUtTnC%8_TpjX5I=%l!10{c!NW6p3ATgpr;e z+fRHjJpxz+sUrAKpo?cL6XP+{#8Us_|ZO zy?%YKKi5fh^`fkOwLzvQ6YDYDt#BY^w8g$cJiuU@BV=@UcDA&&ZDCT=*w6sh@FE_C z>dn<;MG;72Xs|?={!&=HHuH-EpDEc7S}5=W!p~HHJ^QnKzF9 z*nIp$Jd!)e=eo=XItA|s&ZZ*L<5ijVJ`YSL7AR7z(xZ3D)30x;Iuq9o!(<(BeMZ&| zi!wp>T}QYNe*rkC^mh?#;hv4Ze~qOaAx%PvX=-W$cnpSCwnIE927Bu^=BJA4K<$0> z__&Zkp=En#CyF^*Mn)Ln3J?;K8>_`SKgGFIUFgnd+p&Xz)lfU}=8Kv10eXo)Bqma7 zZy5;fpI^`3oA|=hGDf(*tN0_;!C=5UY_u9iMzJTNaoPGSu4di4*Fntj{EED6Y>)Ir zh)ONoOXSG9vq6NhBa;!O_su1@$>I(K-MljVv!;=}_U&yJND(~u?k$2-3>JlA6oXvA zv3HjG!ZF64vt@8J#*&5wqf&7)WxAoa`;eG_d4azfb<27nR4S3}P-4&4} zRWkw~?WPR4b(UX6mmg%s^*x1_uoP}XH$-siV3D+F=p~Qh|F$C2^agWX*pW0gZO?jL zKXZnYQ$Uh!kFM?1kIefd-52McawPd1ZGZicdZT2!{<0wZnVDmUK8vzd8%4BL?o{G; z3s9$8Hn0i}T{$a%IgCtsft^#D>8`tAjNl@r$7N0JYrw#W1Bb8Gd9F&hA!!gIaACHw1T+Lpe^&7#bctqn?<16A(#Ye+z$f7x+9P zHei4b@aF8fbEdwV=v9!OkbaE{atcWs0wT$DjM)b~-YtsS1b3P^JmGLX9UUsbq67vcD`VhHvO?j#5r zr%N1;qr%1O@5Tm94`ORDu^_60FwUTQxr@N;=b2E?aURcF$Hn4(Kmq{->wTPq>2ybH z-qY_U2$RQSgySwj+%SV1g*^rc;&CxCXJGpYpw)4VxP9Oc5Nl+}SFc{7fu!4?H|L-{uZ0DtypfKn!A`=R9b57~dvgY+|J?R}RB|2s9v9b!xRVq8TV8)Q zH$sjR+Oy7~G!?~cCdH~NBtt^EJv}3-aKks`rg9zaYaJs$SH#(*16PmoKJB*;y514u z3zvt?m*PL#{9HE&P*T~Ri^^#1$fkK-#_Z1Tvl+f^1Ka7v#7din<3Yl){_0atIp>@h15=PJ!iFS|Z z0T9G5!qNOcmqiS#;j+eaDz?ViYP&5Ktqhe=4zppOe^y2?8Vq zh8cR`iAau2*zqb@4nT%v@94GRHckyF10DFVY?Fhjii@WjK9kZtibJ-#I& z&PveM?!I>Xk=Ci;GKM={Pv>d_7=sBt51cD650VC+vg*2~;7v`?Ny_^f;X|sdb6knA z?=T^}ONT$2oixyFkY{>N)V=sHc1bLP%~|$lB4-xX@k^}{zoqWfPCed+^7D!1f-hbD z56Rhk;yF6^2dR(63w?MVi{*vz+*Ql$+7MF0&>>xpWDpTsHkLTTEHIxxw_T%lh zrwi65|J^Wu*JviO27TEx6fYnh#l3T2B1fnMfAcU@BlmWJ(upo75?dfpPT>%HT1uHpScLbQ0pN0Td+R{GtzoT<;Bt`VU~tFtYx5T!$O(C( zS9pg?&qYrt&kX!tlqEP@XO9Hz-#9r9yj85z6QgucZ$g|81a#-f;nYRnmH3^{_9pz?~)m@)L zCbz9P9;W;Q3aH#-LA~4dBLU!dj*gE^IzCzY0#%H*FzRCBqWE_UAE2nk6VESWPfi?r zVCVoe1OPBxRfe!Cx}wtG*?AHH1gHgt3jQC!k8504ms{VB&BK@m#U<1jU;6vYoxAfL zem+9y1OX%3P}Id$m6f%dl^;Pg5#F;WIWaM=t!0bj-ib%3>%o>ma2G2V6!13G9=?U+ zY7H3038SDE`KJyeP#*z_;bmeX)af+OpoFnTvxHI=t36Z82O0bS>NOKqS4Cn;U0hwU z;;<=d=*(x!=PN@O7F5r7O#Mx8YEsl2d}4i?=e4kww({AVN%6BeIpKbRfVB)K3XDhs zpJ>NQwiOQe|DM~`~!K){{q9%=t@LkXR34x*@3p14xYVuN8WlxF;Bb#wJ(Eco-qk%AOfo#LL6}$wO?pWP zrlweJ?`+sBK5S6*SYMNh|GM&xW3#*_&s$1PS>aeuPtQ$rZN9wkxA7ZQnaK5}g}FKX z&9M8fr@wEl*KHx*zC#;+@h|t#W0CDGPuEc~Vy|oe^YcKMNYk-5 z2ZbbZw83XTa$ER3W3~^p@VL-@z%MxdhhfYsBh!Hu)Cl5`n$GmGb^D<;!c6a&uP+hE zN0T)jcXV%Utfdr6fEDy|aW)yCkHEc%Xa=jkQ~%<{X$W}ngY6t=jue{>poaik2TXv; zxZK@4cbjDF-Sb4XNEH7KHlAp;zdC$h%;@ScE&j#WC0&%rBEV3Hi;D|bZzMkXr<_PF zAyOtD9%6|A$Od(-x~^`Ke%3Ki$0i{8LCo~S?5#e7l6OjAFXJ~mEnU{z%_w3XPlcq# z$DimjG??vXa&XKTBHT%S?7b?=%Ir#(^~Nqe%(*8b?dmMy^(p18eLjN}mh}VXP6OoP z{>FY^HfIHIdet|P`yVytryU*7*y4EmKCq18tY6g4$cR2e!(vx1(vM^)=ze_E{7b1} zWgzNtvH9*LCiULYFo0GM^~9R@?67mD-6G4k6WKnoD5L#hxX$K-4!=C%s;4cYi(PvAxwK(>xLGJ?8J%`5r30dh)y5^70B(lVbh38fhA6(PA6;B;THc+NPElaUdyXK7 zNAY%exZcZHSP>(-!JQA~xn7;1;u2nQe+1Ex8d+D<%4{r#=2#IYR{c~?GSV0P}vR|Sz#%zl6Mm;VOWm?)i6W}2<5 z9=^S|)(p%stbD=zI2nkbZv)U`n4vwx;UZ)koX1if%*2}Qi(vrb`c=i(cLOmT40Yq$ zq=JGeL~RgHlQ7RyQ0TfEY(^&|qji$)mOAh@o}(TTvO&fZ|GJ12>7btN!n0xt=y&OgiWldmMb_fMj+-}?rx{SV@q;NlXuUx zxlAM^<`LdCr{*_nq$lqzE=`RbMdpb2%*2C8dXHI`Br}5tt@JUv8 z$`xwbt9EQ%DCR7b9M&Uy`4Sl8n<#V|I3Ae6!}wKyo+=Bjpyp46mwSnB) zFrmJ{mehuwb>;rQ6Jkw4#m#o*U^~p=+3G4~rKJhc4<9OEBZkuEikehB2fr};Z5*4g zk4|#v>bAkhQ%mUa40SQ5zP{%JY(g6Z$ul4zfSOd%Rm076VsfW2cdqWvqy1Rkv|J0(fojQvD`>dcPE#fftLSPOa`0}tt+0A>*2KTT2WN|II(Dm5v(j`AA^{Gjn zKL>|WbauLI@r}-dl?&(+YKVrOp7NEMVyFo)y8vAmTsxx68P?Neo)P2hI_j!L0aZjX4*-|k>%fZ4VaareF2r!tL z?+=+dJ$IGry`kjs@Js%^m0YP=(kmys3w3y&T{wF7BT(9Wt8=<%HW^o{`Gp^R=~I@{ zOlymdiInWW?ZGdSr^}d~dnPL_$!fWpV|er`sALqkjS7|{d2ddL_$pBHskQh{MfSdA z7*q`H(e8$mEd3-MVMI9d%GUVIJ}!32rk^p=2g8O=#hPg_625$y_$!Ly&%$EzCu3mj zcHhI)ZK)Y&C9yQcx197P@A_tNTamq_J|=Mpzi|BfNx!g#qvy znVR-|{c5eN%UBO$Jsy>BQ&Y))YeJ_ucAp1~jc`rFz%cmCj_h?LQiO)v?_E1xmiCRh zsxWP@t!wyPX!N13N!^l`04~4Ti|tu&Ue%{Rp7Lhe#!m_s$+vQ@##--GzH{ex9ik>| z&cDe`%VXa#Eo|HxTAMZ4x1Xf`y|#|&we9=%w@>dn@Wsahz>$9GgQV#l^$+`*$h8x) z(X&Rg6O@LDzz-lk z$cV_KWyp}Pw+cMR;q~_OXCl5oHa7c(6KoprTD@JXinOd}pC4}~K?LXFIElgbb{?Jr zFyTi>;YfG#mU<_qVK^)Bs}lOz;%NEO(sv$$S~+p?R%~+pB%Gx|Cwrdu7Wwk8Zv$MT zV=pT!kHPnWS1~+hk6sa9z7a6DmYE5t2R+a{p6mbXvLukCGSLD>8=llfNXTM?U&h)0dNr=w5Typ%xzQFPRj=0zdl<8gXSM5VX zJvXRLK0BNIB-7HlQCpjIKZFn)6L_(^JDTn#3E^S)C9Z_o?SamYD!a!DNp!krggNi<m-dGHaGIN;5Pl>mkPU6E$iI8v));q^fVB4dj-eG@8CquP!&W6d4Y0kTLSD(}km|GxKAt$)H)16(I59F8@PDmjqcuexKHz}ytlcrb= zk3Q*Jn&z6V`(4O=lb}MduD)z4MI|71bazmAi!G@x)?2A33Ek5I%@#Dp({X=5DZ-3% z0YMGIK~c0`_)PeUjwh50^E^H>FFgFi#;4RcQD4Z0)R#g4Fnw_59f?Q)~T|om7#E-?|qq}u7gy}H7TDlaFN}i_DCS^L~HK%a?sBNh)Yz#!Zq~` z7MS??6ILj1V|Up+!m_?Z0yAYMzMHM_F4H0(Q&dNH_*o62_e7(1yy)+Zl|#L~^-bGXS1UZ&ls;$nr{p~b^TVi{cAD$W2Yno35V@PdREqO@`1>v zBQ_~V1lezoB_>sGQ{oJUcOWd1DAkKcs#;o+jNhWMTja%YwO{_i9 zO_oU;)&Dk*_42M<(Sc*9D9Z1N-^RZ;T^eoqF6SzS%Q>u^_5B8~({|csq9!&?iJzap z2;dcxSexQHfYxbXfi?ku-62$KnZrS~!tQIh9TWSd%7MW_rL~DH$TLX;Pb$b>B$t#_ z|N1J@R%E-{O?r@hMXEBAaD(7^h!Pw8G|Mib@Ywa1!xY8;;{wdWcnvCtnWDPQ;Je$_ zwkcOd**S%b3h{37Xhu2R3IVB^EffI|NTsftedfNmz{_T5T_z?o-A|jD3+|yZEfSDX zfzC-Rj=g0J8GOx`B-uIo#8m|*o*wao!u9E|I1juq3J(pPO`>%?PI~Fm^9xblOaBy1 zAJxQWP(uND!qn~gbF>6#OkWEeG(CNKJ>JHDZgzGhekv6M3x!8o~drWo8M3EOIg?n};C4x_I#k3_a0OaM<{Q^L+iD zR<@qVDS)uBOGYwMVFG6dhP@eR1<*-KdeuvsZ)urXNb7XMsfnN_7!KHc9+S9QeKQS1IWU<3 z_NAqyfO_460UeCT2-4Eh6TQVFP-}qSy;Zn%utrNm%Ut~9iUIR4${Lpc(>Sg8#6`6H z@#&sI`-HzMK_i7T8cY^RyG_{QvmJuXj{4dQljlCfX`s#iDL)rVup8wY-W;n=BWO5Z0oId$3(!T7C*L5}*xL z^*T*m%`-Wh`dIHP!`GPmo(E5SS2ko1xD(q{rO*ER7H;>}Qy9=86F>-8Twxh=^ra<4 zO)lUcqWX#(cNae#wB?EqgqQ42=%i*86A}_CaNZy`LEA!#^M(ms+CIk#LZzim`~T`3^=z&y@%2!U5k*y?>q)R2 zPEK~dt$y9`lUugIE%g%6FHi>8H6I?*AWch6#lT~p{Z(unC{rNpQL7&I&EGk~?TIoU zz8uTP0CnS3!M9!^S=LkfvUQx3l0c>N%yguS%##w%- zg^7go!(dAwG5@TfA{0fB&i+wlT)HR?#&jg$p`0Fvu|h%pGH!sy8CKy z&<{8;0Yp4(Tn8I7q6)BF<@MjS*nR@WK4?mgBj7HgZuSPsj8G1sH9I$#|ER|kv_0TO z?XA;8%ZZj4pd!9z1dAtN(a;1CzrHVV+QuJbiy{aCH#fntSjDbXJmuh9GSJX?ttT5u zCDgP2_e>lFDPz*N-6**eNT@mR)gaJpVHC-dEW!yfHIdv!n%58jH9pO>KR;D@Tk@BQdI>g zQhDX{Kx7G|BWWX!@n}phVGP; z``nTq@iar_i#5s0+dhK~JtEpNpeD`&{2A~evL}0a$jzG|UeWU_RzuU!!d`49fAzaM zw899p=NuhtMlS{n5Eqzqbm*_XfmXW$lPUs^-sXo~aE*lGP7qfQV2$2AE%J0&Z@^VG zpsL6+_5Z$N{IOLmwb{bST1@F39~7Z5A(nFYCBeKzwTe1!HxCHXsMBwmh7t#VAmBW> zm}g!p9Ee*Lupl}%zm7%C59H6nO&xP?uXpFggIg*XHxT;qttJqu!w=&BV(d%6sa)H( zQBo=*DpEcF0u86p}fiOd<2|pO4l4 z-tYU5@A!{@9s79qev7r9^*r}|-Pd(q=XqX*pMK-80gzD$M{O9;fWr}qLI=uf5C!db z5CSx?`W3hWD?xI-WMOga)BZHloC)Jv1$i1wD^MMYV@=UYd;HbT;}{d#1o&YPLfF_) zN>!n3(2h<=0!R|7-uh$thXyKPNRQF6$5R!#<%?tZ?%mRw&SCQliAr67i*ZkJpc+if zKyiReL*tWb-+==Ru(85xu)^&qvLdJQ{}i8CcF7x3C~s^4U;f`yrxuO{ytH4LGB95JHE#cwAi3Ffb$9{{nqd; z4O|=IzYUS729%PgkDLx#C)pIaGcqtBxI)|ZTOekVza8yzbmh_b!RQ2a+47XL>0c2o zDty^)px#5+1HGcKH-Kobd3r{_%+4k%LL^PI6HUL&?^++o&3fZnRyaqn&vFpkWb%ilq_6Y|ZrZ5{Tc3ItWDN!|ExTcgvUcHFz9X#Xwwm zwTT2uD(K_Q3!XNS?!ccyqwZdU9b`{WyefZ&e$m`%8lIKg{@j8chlE zPJ}1;^RjQ>Uh?A<=4eylx$2mt=xJ~c{D>2bL}(2lmkk_n3e z)xiQ-allG&_3XFyOF`d;!wM^^4^bDm2)+ZBk@o3Jpox=~9rT08#+??a1FGOOrw@Pu z@JfpAYvJ37YoPg~dU>DujjkApomjX9f)&YWH_{dD7<%=z906VZ9P4gaa(}nUQ%q3i6^K!x1xac3(SVvh_LMbh z)LvvR3_>R)RtwH573#aUZQC|5FhGS)_vgkhJ6Qd|r@A04GA~kO6C}@FUMp;%6Py%; zu0T3S*X5ntUVXb3tA=J3Err0W8MlTV@YlyRM& z`=cN^43z~pciG2}>BdfG7&{BHS@GS!5o@3wRK%aa2uezpaq}Up03?Ux3fUf=O2r@g zT@EyzIR3Kd-|zKl!LBRJ_S6HV2?QcIZ#;^L$&A{thI9Y^34};yukXG7a;|(wDa*Z1 z`Yfp0aHv%|_1jc-fy*v)6c82?vwV0gT~|UM8+QVd9h`_l!Bte>`}?t6&yhl~(J~gM z&r`yDyyCA_ZmX70;!r~Z1!CJ-8wEn-UQ}2JGAI-qK-vD_d4s}=o$B(ZNt=B`2qhkn zwimm*(Qe-!i7fEuVEqZCFeoWe1|m$sP~*^az@C**89%Pc*7+7DDRY40OxhL~7n^ok zo0=ZD^zNQL2g}N_2Wr*W#uZ2l8^h@~1>GY4Ggj<$TxJxcfB1ZyD!6;Nx27;=8l+r< z=fK8&?#p9X%*z<>hWZs=gQ~AQ$?;g~LYPwb>C@?#S${vm+>mD39mUuX4o79>4XN8= zUFgDK0hF}xvv1F-hk(%gKo11TDB@ut<=yrD)-BLk)jw4Li^;5g&CB9btaszjrm)DS zX)q2=V4x)|uFicZS+i0Aky&^6w766f>WQC?do^E{7kbpbB#2pI;;br>Kwo!C+bQ3m zW?s3Px?i}M-AGS3sBC{RSXws89z9aqK?QGZgN{pouQ$>NKjK3WGo+pVwA)Qlfvi?7 zx^hdACMzj-$r&T(phCO6SXyLsR+MrTnpvXm%K@xfVI@q@i=}6zs32Uj@%?+ZI#A^@ z>LBUGB(3Xn-0!SXrpgbyFYf{xh#W3rtsmv*IyT_P!sFt)ffO)?Z)kYdHQgByK~JTl zbI@^HZw04-ITAczs9$WuC?7g`^&8c~DoKFVXeU}*kG}g;d~S5TkWi&@bp~=ZR}Aop z*y#WFrf$ZYcfx3)z;>{wzuz8?d?2YJ#Nf0g$lOSuA{hQ&i2l~iGm8IUa1PF^h=+In zUfs^-zqvP45#7H@<;s6*Fy&py7~Dkpu>p^;x7v1K-~kY|v9p6(Dmyn9T;el;kD&uf z9(=R%a|5LqEaiA~6I=x27kZ&}Uwzb}&q~K_;r9d1wLnAuY}ZqYQzZ&<&O1 z-(BaNNSJ(LjRCn3;n}nKzyVsRe1Z{=0ucTYu)!4Hv18>i7O@+Y6!ET9lLQatid4Ys z3esdqGm`x!XfBQ#?1Gy@aK`64k7Mqh`=0B7T1||vBTnf}t5qH-tlZyJ6zh`c6}T-g zVGwiP1*^-Kn}LKOi9ztuYlvGE04jj202lO!?pB_g|2k(Lo)E>Yg6F7?t{zP^`}QGjwxz%amz4EuLqgoc*QO^oR~@Y z;|PYaVWbc+SKcz(+y}EG_%+w`xAQ&wtgXI`VN8aGT<2doMi*^uXe$5QO~n8^j3sx< zLRSLvzH!?=!oY0i%__SEhUHZ=?Yrg~E>*sW`t}Wf`rzKZ!e86a7Uo%dtTk@{wHC6@ zJDjU&Y4_^F3#S7)H4Z9BV&M6TF9=540MBK8lu@lG1_@#=e0>Ckgnl)qOn?W0DmHa- zs3sFa7`d!-8#oUi*4wli;;yZFBUoNwC0Krrb%*Q%4jd?EmVqt?(V>)4+C)_BdJs?H z8G&^K8A?^vV4HP`%9%56v+47|-n7+`($d|Y`_p7EX|a}*L`voO8{V{04#N+GSIY&2SO;3jqAX4r3QV z(FYr-zM%oLGBBm<4BX+G3j>bD!H9VhffGV3XzBx_ql-~e0Z4dq*y(IvrM7;}|6K_; z>R$)T6@@I4=zg4=5CnoY2b>OF5a*?ZTOoT&OH1Je1yS!8Fi>!9ooE)Jdx-P*QI&D# z>~IGSc3o=nXO9_LbN-w%DjW>|#vRAp_}A45?_-CehC2-2))Op zGLPKlpwhp!PWZfya!fp0aG|66GI zU*?pmE-x?7c=M)RIw9%=!7Kwd=nO^+`af`_`CL~w2%hP0Q5eErD0H}f?AV=gf8bmI zCTfs#R8~4XQQ|i*(N0S;%{tP`$Fe+rz8dsY8z5O|L4^pzgM#YB{M4d<_CuFLy)1Lp zN-!RW4ivd!_RHK85{&F@k@q&QTXhgd_!yRiFbc1TkRl*nPjHG^S&abmM^WLPZa$ zq^PLcAvaghL(7YBG737>I2O+Z!us-7A(~v;-+Y<*}m>G=_ zdfp(~0<3?myRBMh(k2fK_C?%u?2KQKid?sbp*B_D{jqXX?ZETLBl=(G=EAvD;mjHB zR&m1M2bj8W>-cD9Vk1(1Dm;75EAWhBy$vLwq^^}%bdy)nCg@YG2wF);^fj<@vG)nA z33_~-SYRSY10izQoNI%1MZ`RmhKP2Lp2q{0pt%TSab+L>M16fMjw;~s_Bc znT!erDeF5|Vnid2+=2F5di;RW}_R`?(;Ay0!Po8B!w@w9% zsislsQw&|~hav!i-MDJN0vH5}ScoJZ(}AKZjfVEr+WdLpaueV4D3L)$iugu1??ErXPGc6fXf1^f}dZ~VfFz~pac-2WoNM- zCWZ@HH@H1mCzC=Lu_IeT=uVPzTJwPYMGla%$*HLkG>8zw|IO5{L<5{gYl*rMUlFZz zo&peNZo-=%j|#Xd)-CF`i->E4(iIvKurvu7t<*#t?a_0nZUDl`E>3C~8TI09M?H;) zW(sh~+CCkbvedzYfc?lPPNWp|phyEoDTO2d)Ts){!vI=h9G0-Scn+4O^)3G;jXO5# z)J5i4763;2uI)y>e00-yn_Iy3bz{C`djOE6x3u(S_t_b9^wd_WopSHLO1yE-!rjMw zyX3XCG5QbWxsfx%l-)5wg;VY#(xU2Rdo&BJHJf@@!IoPvv<69t_zrmw&OmwG4l2To z0`7rQQS?Xg9N)g>L>mG%GRtz+k%`^iL{m&fhr>)URdtmLo{hwIs@vZCi#6{R9L+3~ z9DDj66lGR=T9AiD=2#&m8X9`>cDs-+=C(s6qmuBq=+Tl=?owz%5e$neKdGCb=QYU& zuH93-=GwsL-5bf6x{H^l7vu|CfLW-z)X%usKgr)@zIDg(mA@kQdEjvdRT)!(PE)MA zDVC=v=)lVQi?>1dNfR+;)sdXJR9|p=W@ia7wl-j8Q+ayg_aNRZPEQxI>LAYuNwk*? zul|fhG0p(yFkZR=r}NZ@j~|ICy&q}iDHq7* zC9#7_i;RZ=RXjq(?Pl>7?Nfdcm9@tm4%%d6HA03;m{kpKqkxvAtq3}QT+t>4+<_9i zr|$UbsES-STlz%wL{J1qp^cN2KuIT4ZC867iyPcV@<2&Go)_@&Ap@1iF*Q}yv6!fR zs6!7NIEy9>pm)sec%uhpec`SByu2r|K`?a!NvmAmH@DN9Rs*#OQS7tHOEOB?RA!j< z`05#G_~SdL&8a4ZdEojt6H<*0~E-a_x2 z8cS*2?W+Hj>_(@Xr5Bw#x`?2IpEoUTcV1<18X9>NAT`~@e9AkV$WM^`+1q+SdI0+y z+B>)J`>DMaHWvGq?=?S-2MV4PW~ZHMc5#;fruZ)xKpMFJhYx}R0zTHYk6`B)!Wh}m z&>*qYQg8>z{NtKFM3D%Fr^j&KqZpyW=K+2c-*MU_7DMMRUR1r_5ny)V&<63*tqkJ0 z#uZzIm7b~nC)Uf(DQoTT>U%)~`jHS2qtw_ZQTpygvi9<=irJUmC)NY~ZAU4ElIM!> z5z6{y+`QSraX)0oG`I8Q9#?lzJAb1d92^g+A=()bdis^E?2bv-ZYOKUJvdYAobRSQ#ORGjB?lD zMi$FGq02okcgM%cT!Cb+y|4dn7t%U}{WTb?PGptR99P#5zpvQ6Hw#rqb!XlxWo=#$|pFbEHKaD$N=e$dZ6{?Ge19-JiSmVqA^liI|_9I zxSq%!L9^HeEjN|i<^}8a%ye2+EzOn9CgF$lM%b)VtiG`LnvE@C6Q29D$15%v?P$&fyRuPCtZ5v}Zx%DDA$+BB6HV2-Wqx$-7RpE}edf z4Sa;q=iIr<{s~rzJygW&NITxTw)+~E28es`RTFs6>w%L5#8>soexZ?$s*nGCQPmSl zi|taj9SYL@2hLzy*RBRktX)ZNZEb~3rST<&i1VzrX~_Y(DoYJ~IO$)$c%fS9yYl{2 zH|Z06T41}t9QgXRJ=|Y{C%)+50LG5&$!hIC#Sq^5zD`xo&of6$^S#8)wd{v4XmgH1 z=hQg8cU)khtk=b6wf`wHQhoYx+zGs5GcM)aw#SHssVSn=LW>%hXw~<|n}nDh?XP|H z9Z7nNjwbnnxw%QXSJWS5u>H7h@}O{n&l&XLDv|z#!#H zf3ly{jw^AV1)B8-Q3L^^J?DxB+={{CMPBNDM74VcIE^S<=9HJOFL%7vMrDDaVNA_n+7}y`=%pIQ z%dF??5*!tp2@=$=U%Dr1C156qSLgyb{Kbn?Z7*Kz%YGI2d!+GQ;Ec1kPhxO|Mut9E z#lTa8($g)qxG5iyyyKC^TTFr@l2m*)C`ovs_66GV{(TbS3M!lOgmHS~ix;m!sEcON z4(QzmeE4teiTsG>CteFA9DG^Oh|q{USdnrB62dQ5jnvUmvRK68!D%{9{;X zp;R+{N4a;6WWjLd_hXMhnE)|zp>f0ngndLFn4#<|O-VhkuOCg*?38&gI(jD@A{7$> z`6RYM8U=^Ku*Z*+pgjTpS|3`$*45L)%)z1DX0O)EYckKx%q|fX90H&irnhMQ8=v+ruIca}BK&i~Qp`Iu>Rm9RM}5Xs-_)ds0ta}J$FWNaneyD@zklmw8J++m2Yx;% zQCZ)Vvhi!Xy2_@cGW%#(T2Dlc z-eW^yR3WT`;LsHQVT~409#2{0hG!!jO3JY%cIk90?{b@7^!!_VqbHsO?gUupjNc%- z=a_iI1TZut&sen_;6Dh6khMUM;(5=Hcj(!_5NZ^?;>DaP%^|(IFXQ02>xl@?g z3mq8v0&Uo^fhC%!Y*JFodfHw+h59<#}MR zXbWuGu|u!^UGHzCpt%;UVjy~XRIGe?s^shEHppEa86B0g9Z;Q(0i_nf4yNIr1NcpR zB_zhc`AHJQE9B<1#FEmtLiD@AP9~JLbtkhdD%`#^?esdGoF+4h=#^QnnBb@ zQ`nK=1i%Oa{(Uz-Cc-c(KK>XP*TC#WZoQsLi>f6>Gh!melHu1j*%*;!KcG&a{Zv2H z5L&yMCQg-zDnp!2@Fg?*xi-qI`mI-zOq7{o+F3q!0OEaXsAkloPz8z|iou3n@oqYY zt#?lFw6Zdwa)PCd{&KmYC=-lm5JRUSF~2Ovx(WuxVTT?dO$1>)T>0$mG%Y|b92T~p zn-9ypk@G1hwVMcPUd}q#tY4y~p+T5VG`;7+_-SH{`FbAa^Dn}ZK0e*@u<^~hn*;Kn zgjv|a=Z3Ce2qJoiC3Amt1RtlSUIpp`>vcj zCUhxVF!y-DpaVL9QkLdvJ^a+lJzgHZ%Ob4t%N^C*&^ zBjmZ%RMsi>nT zVR(v+CIcWM-;e8A5kGn2pMLX>k#-D?R8l_7@+c_i7}@wjNsm?U92vt_KOAAET*gn| zJlz36!eACiA@v8BLiE61XGQP>JuBs;UcHE?n>tlI89DyY zZGy|!?l?3_EpCq}{ z>K80f4iL_^hF2-yo&8?ji6H>tMMXuB`y4`-7UcRWk;=Q-uyY2<{t_llB08+M>ezdw zxw#okBR?FwWzH)b48Nl|l_p`B?6 zQKE6b2-g()sJ{s?5zqd=@>B*fNlC`FYl&f?2$XO=x_9gnz5=F@AoT^G)V{Bpb!X(^ z*f@{7#nJLydB1sg!%7;oPCPw!8HX1%Mh+NMllKiEW&LBn#I`?w{-DQ;j#dr8>>XHc z-K7tJq@k~e#z6#@Iw(slVSl4;gi)2T*bm+`vzp(E{QKIFx1oU>85TzT5&%GK{D;Op8hubwu z*Bq#?dn{S)IVI;JI$W(1?X3t8a>R~1Ty6k(6JWoI!DRhEenfejwjh~-{M;I2%49Drg zpHDo`k>$j#Ibxo?Ja7;*y)Pqg^M3Lg{Z%w|NDTws-PcFH9ZI!6rjexHHdWTErZ23Q zccHNPGCCpX6{06tWN5UqaH%xU_?~UVP!{FWd|P+y81C<{=)Q&x{1wXOo&KSrq0?~v z>d3os84wGaGjQe(;Sd(-LZ{pqu60RDOT#w96X@t5rWK#vsHi>4!<%E4ec&J@(E;w< zxW|WXjdQbYtDk!EG+{nVOHcl|Ue%X}NoREM?6AmXt;m!tNi|J0pT0=_LXo2uZ!iIY z1HeIo34P1pN<(=_k{9QN9TOjvKTx z4@%+9;()Xio&eh-A9+?=pBLWZ1F=Uaf(Que)zk5zopR7lErMu(xzF%dAqpN$AIr}_ zucM=N$t+P!Lc$WX3QTgDI(6pRYb&*UffD)beFMTwq7$sjI3K(`dwQ!1kJ7eVcWkQb zx28mCD%Awhe7|onxD=&u_(Meus_p$Rwjh^9mWj0M!i5V%0|N&E@l?NlNXTAL4u5z1 zVUrgT5iu@w;~>W;fD_2TNHRYQMvL({^upgm_uz|2z~tuTrZ#T8w~!BhF;+J7ViVM)WV&pZEjqN z+Kzf15&Y;~@9KH=bAl)3`kSJT$?neeT{!&Dc6FLvLE~d3f%RkS_Av$C8T|ZHx1mEFz)6CvOrV|X$wE?wJH%pK7JGu7Uqq|!k1fKc!^c%N~_P$ z0w?(*U@;ejB}GI=(TT=kK-lPDydo$v)w@Elqu`*Cl=b38K=<;|h0sCtac{tZaI|>- z6QAHWKEbFOQ>CZsx3sZ5;l!miTfgXy`Jq>>t#>g^c^*iA2G_v8%#gM9>Q`n(9tDb; z4xIzRv-Z~Bh0Z*gLOzqOje8kK6|Oi)R+l=dvv>H7k3~Onmwbx9qB8KV6Y<3?4`njm zTNgYGP0~#p8^CWtB4he1c!t)A4T18uK(f%XfyX}3eeLms(DVU5CUFi4>0%UO_Q##P6U#PjfUn#xp0?6-j7wYcaiJOKK1i|W%%-8$ebGf-Ex2<$5C!F}4bw zH#RdP*jKeO+?eXk&FnHrHh|2`zuZtEQGK?3DfcZBG<*R^z(XJazMFyM2Yi8E)Jg>5 z3Tc{u%x2jb`t@F(<%YMQX>fF`mb11vpsTGd$;a2<8{}{^`acD7NMYj5C=gRMNgu1K z)a(MTy4UvC?7EJ8`hUrrPYg@fEu2DSj2!Y7{&hC}w0KCT-+I-JqK@kiht>?(k{<;J zZ{E2xBQ+I%rRS0el@yeHB5o-QH@2~{F{wcxrOaD8ZKs8?X^uFqzoHg{JqdbK1eH(H z`cwY|tnbi%RAGXNzidSX!hhrWf|PGw-l5cK5NU-&1z70Q9Jx^H zpv(>01?NzFuri6}YyK0#bj3A+HJh$}iE@p2VN$aN5E8n0sSImYKK1Q}A3IIY(nE!a zmS)zu1so`1gpD!T_{x>p6e9#&41LYQA_4duI+%ZJf|sZkNK)2*Oq)0FN7;+cJ)Ee} zph2TgSicYgpYSXy;x6&?#~u}S!?7eNc;#~d2qjn*`yW%zoY+(?ADZHy(f)>6=kSPm z;!%EK6wt8_*aZ^fxvr$>yfXyX$$5|YsfHVX3FKCH!eFR#NmZu z+}`2)C^vt@#kn}!JZQ!+A)_DL1`@*B%E|!6M6qyXQV33NJc(j(D(ItN{h@}v?2OF{ zj{0*{9C2}&z(xvDge(4EiAGQ$R<($rAcv&Yg{mLdAbi{8MTU3|d0G3q%7gn3H?o zCb5nh97(-X@^8vbulR-_+p}X{333tIHEVzXDJ*}zc!4(ua3B0`lyE$z3HHW zLi_;i%HqI0{CiNC;SqpIa2ZIRfZXHKZeMWatAcpOCC0K}z64;4>Jd{B>r%A@XzxEi zienS2G$-y<|Lm}C4KNuoRgLy91#RStB!CpaBTYEpS3GFjyqPre5XU)@4pQ8zBiFtpc zTBs{DLL_^pskx<$TToEBTO*W-nOV&2GiUZydD(f-(%Dl6*F{zAo@t9B?hm$P?XjEQ zyQ*OR(Svpu6vPGdnwud$-C(gtd(CNalPH0@A4GB#L_+qIKDOvg3Z6{;gJF`uEFfgjVJ|Dm) zRF<>U$lyU(x{5>?e!EymP#=f;(k-XHc@yT@64N2$Uus$vxfq&Pi8Qj`TWeMqj1g5_40b+XE~A{uyRALfFcin>Qr0Y^+N%xSoo&}Hz= zx;PdNDprfd)I{SXeN4fOs_M+t@rT&_si~w@m%x|7OtB@{@Q#&n<=ccm+?ga0ddsj* zveOx2x0@)|&}srKWc#yG0i%Bb%0oe#4>!A2|4_D6DouiR7vMJ)@^bz{q@Ul$kfF_M zn!Moj4BY^xNV+YUO!Cu3iWTWI?%cX%Yt8hq(S6unqecaXXcj37*tUSM0FzhS)1e!o z15}2X=(2np!Km(xtm84|8eHN$a@RuU_%S6I>KoL2mAQo}|2w zJ0B$jjYn0$yj7`Pp2)i*6-z8uccknNO0ze@>-Ch{WYI?&C&>sDyN+id`47P+y+&Fx3f zKcQj!F1^E70o6Z>#BAW}a${J6 zd8J+Ea>U{(-^lqezR@(M)=W(SDMFK!kUoJf1uqIkM-wrfBS+rC21iv>)A;Juxsv5Y z|0;{I6VSW3;dy`@g7I7jB7r9F-n917j-LrI=i!Rq@{rYD`W1IP_`Pd3Q$B^C3abO| zN(ceAwzjCSPzoa|Dp|b-b0q=lses+b!HhpA_ywNR_=&I&YYdKp>xxqT)2EC%%~9fz z^z@WvRp6X)`U|6c{Kt&?3>h+F>}F6|dM9Sd7=g}dja~(^65e=h!wyWXMuD?|?(TWG zn1p%q?=J2cx#z$I{)FsZFUrDYxm~9_&<9WzqOt>M;(~kpzN`!g1?kYCR!9!Me0hZ! z3wYw3+s-8)umMmTw}#YAwjWg25NI17<=a*2OgUC=FCSY+TNnkq7+65b-WVhB7eSq% z@Nk{Pz$3ds>jJ{$0Cecz{z@x^Pe79Niw{e$0x<(>2gkX|E9V#X-x5v;y2W!M$BD0< z@l{R^hnTVarAys7Jn&4PrKS>;M8s9m%`0u5`?@^c|8fDSx_idB^Uvv3-=O(*XwKK*| zibG!rsuA*X3`amryui!t^Cfi#@_@X*rBroB=0cC`WqUMvkA{;?tDrwhYaZ32;h~ zX3P523mA+$Zjajx)|JA2971jmS?4zZKsInws-hWn{;s2!UO|Qd3Y+!zwvp{E&y3kt z(Yds*`s6aHNn0Z82pd!>Yin!McgbmKDE%Bk_y!7zq)KLICU7hqtpGVU(9?6J-On+U zXj(SkdOYRP!zJOV_TG^ZbU9syZd2Ar)24>B&&P!@-lq0a))%{9nyyUmOrA(5|9(T{ zJtUH$SP)7VB(><2R&e5s*osS*D5XT{j@&7 ze$*@3b*Q(r9%!nm?dBr!C%-9(z3qva#%WT2 zGf(ZeLX)xEFlicVNQaj8ls@_K=#v}6X8{a5ZSLJ}RXVSugL`VRB`q(`ee`XI&y%kJ zhLCP|~43>WkvrP5Y(|dXew7V{

TV6?u+`)XoN zkY$tg`^Nmb^|wtyA{A(0@X3OWY^P9#-IpxRu1Hw3=Mm%?eC{3i=hwCk)D`uLx8)0n9Is4jm3baNlO<1!sjcixa?V;Pjhtk zxS?3KXEHd8AGkS04atZLa7RP_4(M`qL4jU0FY@Ah1(w_rHvfEdaadc1R(PODef`~KWkmY9H}5$1 zpr$8$*oqfXhrk9EobH-;X=< z+{DC^Y@IY6e@MD+`m?ri^)=pFQ95gzb0kCzA=sPo>-lMaqvB zuo#Ndymc!V94lPP>TvNGKm+2=Bm9d}hrW_=_}27I)33%QBFOR3h-IMWPXL4?Bl=a{ zV&Ui?4IzHwsQ}smlRGCnTiu}&*&=KLA|~W)>}Cs^8hsz9bJcpRv$w(|ipw}n>0`F8 zJe95+vA5~&`|dpa6?B+}hKcLsvR;rbh~6E+G7lH?DBW97bioU03E={acjXZh+>(Zh z8uwGR%V+|uIQS~W&hd(0Q*D*q;`5EK?nxU?xD(WrmVdRn5WRyJ`CZ@H$7QhRLh<6; z=T52z6pc6vnIkw308jVte~C4amYl3RmM2ozpn(MNKnloGe~s8bhqy& z3vj^P#;~S3Bq&G>6ev(PK=C5XCeW?K;RPr6-SMOB+Sl@;D@@&bFCHfY*;kXNLN$&3 zqOQs9aX2-09xho`=oX|0{3VJM(n^(rb{L_b;6cX@Ht zjZ9WLJm^~4nI6P7x5)WK1}Ftag0M(X*6Z6)!%4Oo=x-1^kPF~T5L2giLBisOJ`1qi zQzuVaea)$AZVs(-h-ER%ngPU!AhW&m`R|C4(_h`vuh{c-*B<3tMP^nwd-SLWU3Hs; zI7xW441XgRkLpe5^n1l?M7)mVj=c9JV9I~#`va8=yn!zVt{t}L&y#Q$eq6H5j; z6K)gNvR-|y%Y0y$!a67&9`0-Y;|Mvlx8tlsSK=ETRF$9`pd9vB(Crh(E$#O51!#?iEL~in7q1>Hn>AP-mw)hoR14 z0yB7j;JQb{KqtuJ&-Bv9(@Is$o8IvWI-8rdT-=_Pv}P``;9-$(@tjl#B3@OMLe{M{ z3>?J7X>4pBZtiP&7j$)X<5cCz$YhbZW2?bZ86^)S?4VJDCSx7q%tyHwv75jc07(L< zC#v}nMrbPexCE#5PH}T{Z&EXIDH_+?cr!;TJp7yYnW5`5{;k93r6jY()isE;R=)@@8*^S+Cm8LnGkIus_xP-!tn$PI7KoZ)@5Nfe*eTo9;U;}iWx7akJdYJ zfp-EEWK$H)y5OTA;)4zZ;enL2^#0%%Z{D}JB%rHjk=D*O8 z>Se%zMGqc40Cf(uXYiI#3jt)fVrZydFA!V>E+jE8CqAAKlKVS39<`Lja|A=^`5UJa zpS*Sci~iP+?(}T7E>r;f7Sto8d@r(@i`|xG_w2dL!LH#U(6N16P~NScHc4O}Z+v#H zEUZ+9|Fu2>7~H2v$c3O*LJ@HCPI(#No` zTt)0^T!mL9X55O2p-UP<2tEV_ABkK%Nk$6Ek-Y zi)DL^UfQp+(ULW!d6(?>EnC3M!$1kboQu9Wdm7zqf1JBDmUv4^_*+@-`&RIK1dVo`Ttd#6=#4=UW}>;c^2FscCxXBA2|f6 z&??72(NZ_B0)vpfsR>i`moIO?k}W#*=i8H8?q9tM#BCxg6Vf2aM4VBY1JG;E5yc)=Z1D_o$7}qPDksOj0 z@8u&XG+bSaA;IV?H5O|{(-CGjrw+A@4745ho6d6hBogFsA@BO+2~FHgkqd87FD&B> z2B)3jLBmsp&8T}bmEd`ihdd+r6+^oIsVN3Gdzp0!JaQUVgb)V=yoH-0pY|c|Vr6CJ zs;;hvT>(K9!4Ol0#QLZx4uVZF<6P`4K^G|h2*cQi4mRC6!nujw1FXJk26a6&)coFM zwOYWp4@Epa+2nG{7U2Ahd=d4jOr+mT#WJJXA9?v-Nvcb-EWFO0;TK?Y%zDFt8*1F%F$ldf%rEevtDux7ZJWM(LeBhH+RZk~nqM{G;gj?XdOC&n39PqazeClXikwZ#(D{f|_@l@%Z4S!0_DgA{$=0(h8VV zu#7^bqSxFJ^VQGWn=2lyXA?R#ZsAZE!T0vRf=N(7;Y1t5TQ8Z%emG0X5^ zPjD5X-2s>B7U>YCeiE!MM@QFB8)=nEI`szpEIsSk51|JTTy=Fi^FzOYjt8_jv;xwV zc%EGE_@0t?9IA8Pi$1ygk=P-YCYsaYecfaY_W4Z9yVv8XXXv%&r6;i55cxx{Ja@h| zhAaY21ujgj%Xnv84LybVZ`%zTT3S5u3-R&ede*YWo!)1ji?Bp*lTxkh56PK06!qt@ zRE4oAxSg01ssOv)%&<_5Ed|IhDZEZS#TGu-P-~TStA<)NK0kE*Aj^&rw~09%Gyu2I zs@6$AeVXU8N{Xhpn|4)I=Yy|{0u!@}vb)8?H;j5k73!B4sO{|7oELOvZO5iV=;xPu zC2g2}EpxDr%Go0|U4WZeVOGmc_QsA_r~%I&+@{3Txfli$Czyt4hHZ)oU~wAHAS5G}&Ok6yv%*B8QdqP{WyPh{nnk!`i5b1`na9 zEq*2fI<}eVjDO#8tYojs^RIgfep%Yy^c(=l>#YcI^Eg@D{F7WNQ25kSC4$d(m2~Es zYv+=>D5cE()UPxhjg7V#B*Y_Ey?&-%h{~Yi^XF^8n1R|1xJ>$k!nW(m<%GsJEbktgK? z33(vwnA%7sw-duZA|v5u{H3+o>SM8;&qqTA6S28pI%T@Qz2tH&&Mqa#&XBpXU%%Ft zzwbXE8WBF!qse~;SSknC^+iwt7z)Bq~5%24_y2UWbbvGJ6L8`ZVfRlz)%6Qj zv|<7R=5UizvTE10`4vqH6__(;{bpS}3x^~L`lw%#*2U{EZdNGLtmXizXw?f%MO{e^ z_m)3xYE6~`4l9RovR7)|R3jC=IA(W)g$5@%9Bq<3Jc-dsNeKzP@MD9g<i!F&H*wZ;dMU0@)WgVxgQR3TV7eXsjr8*oNZ5ma2*4Ny;Oqr4j;)X4w@K+F=r=o2w9 zbyCRrbLS?1HCsc*&cRU8+1c69upb6Q#xkKbJAfO2+H>z%oZ*t}%GXz=dy(3932*DI>1^!;|IKK*nXTE<*~p6*^yTAvdnTFRA`7cb`j%>2OW5UzXx86f!X zOo6!>ulzDjKYr8od!8kbY-ncY0DqJd!~6J%na%h%n|)L)+tkv&aBun?*=##ZGI4{` zga@S66B330XJ?Xp)`&EkUV`A zt^TA&{^x8yX<%RgTkm4cT&FeB<%Yy858y{=j{?Cp`||uwdLeajguj0+iol=iJ&IR3 z-L$^QYYlK^>$k!QU57G+e#~g#<1gCM?zRc60NX)zb*9hNO^x}(ojZEpHw2v^LNt_6 zxOpsMlsd4{(ZYl-`8MRV3ye66s#R!mmDSYhPypsxOocPuRYXXtXI*PIJt|T8m9%>| zVYZ^(NF1f0Fi+l{CqfotY+{Xq)*sA|Gx||X!SIu5Q;n#XF(QOZ9UXosqwTgNYY`wR z$b+@1M>XzdzgEif{PD{C1Q~%-tUO2seLFjq&U#c1cSfg!hiZ36Gqcq>&75e@}IFbrxtKiC3eQ zuk5O2>?KVi(+Ba05#tJHej9Q9#5_@ryx0uP9w&1zmc8>US7&3oevspY$}@C>1H%<) zX`jZcAGJTtz*M#M`by-Xn$gByLy5NhArM$I0mFvecbBTfGxt0e93(L0681=$jBc!V zL_rmy7-^3s_%|n zuvqd?6b!P?{tdC$K+8iP7)@!mL3>xDT45wqw=51Yl^Yv>%ya!^)l+~??!4r}wVaB4 z8IDX+@VHW$LA4n7I9*=FucxPSl%<^!YF8boE#iNy6mPfB``6KQAlOt5QdMsUzKF3g zh&8+y-W-_!ntosEd@HTTb)8+Fpi=)pws;|f%WW^li>72`xDK_5etnf_Tj1eQj=;z2 zP(azljgGjlR89W?3MD*!hlkP59_Fh`Asbs-9){Ztu>BNtH0^|u8N*`i>;cVKKk?Seo5b>_zN67EelqWd-ZX~?H+Ax+$d6}x z&U-<&h2;R(2Tm?AQJ$NB-aUN^unI3Q0bpZzOy?iU?>pl6U6BAU|7KNvd)yM;NKgoyF z4M+kHW-n6S+ES}qXp)2B*-w3r*pp5ZKQ=N2rt6ir+u5<$PGFes242e3{>kwA^D2IK zO%e~6p-a!!fbylk%{nTkz2@8VkZIz|h+_;OKE%I*TY!6D=s^z5dtvQ-LHNMyyXQZB z`?i3pVUl!za*{OQ%6goT@z!?{N&Wb_CLg+eG7$=73R0W}!%B+}zym-=$svhDr?I($Uo=JdN;Q zXjK5nou3h(-%1!!7Zn{o>~gHeE$)MFiFCAV+^v@>$z~{KU{c|&NZ}UeQYA>H0Z023 zDe3Qt{wr8iIP%GHs^{tjn7yFD1Vj$29QX}%preTccRw3rEHgZmk-Pv!eNtg=WWgO$ zrJrQN0pci_RH&cb@x086dd#SQfHl3 zZtD@1ax?1D;bEabdF+fMM{WlPYlQ2gQ4VPYivCyNd4Lp*&Q1F(%k9Cxsbu?b)}eW! zab2x$TzE&)tE+MEdzsZvU+Vrb@87)U0lhUdiCBl9D+n)EKR+ z1gr}FQiP+*()k8q85w6l#|URahhhAifzhQ0Bqf*q-6-clve!1DE{G<)l5trfjK8qg zK^6FZmwFp-a1}5pcxXlh1b~waSqVI|ui--V+%GDo{phQxz1n6oakqky6jm7vZuuo( zYW%X*WJ?s|x^--#hWE}1@|th|*%)s7XJFBZzL1H9r4j5~PzaE-3HV1p77+Hh6@PjR(?Nc+V=LO|`yriNQVQQ&OHX?I6o zGmsbDawc5tgy5Fod2XBehl<54GfQ;iQ;LjZF-U)b%@mK+Q1G?nylr|O9c5+rJRt%W z*u}f+qN(Y7GVkpY@NtPYB+MK<0{bDI?-O3hL5?~d3CWpJFnWs# z2srXYs$oEMIV_|mk?m#G*B>C4BXiUI+aXZBOA#VPdcq9QKw%?}9v9>>T-dx@Z&Xov zf@bt2AYk9SPjjEb0(&E*at<8$dC_p;#)AXX?;jd79X_KH`l$2e6aEr;=8!-T&Qk(v zM#;9|vU0`CE(#17y-cx>`OA(vqK8FOa`7J;B#QtxU>OIz3dnAK5#VX~sO}IqQ$2c= z+P?yr5PNWI>_f2L35PV^n35uE#t}K|kk|EJu3B0=+RZ#lzh1+$D`*%Lt+*+Kdl zJ%!?*D<0f{k&MgAWQ273aEdPU>L##!B=_$J6yJ&9f%FZWo>#_I%+k`*vp;lkzQYEt z5KO^uL@vPg)o+(l08ITC_9#=c9DF`r!A+A2knha8&~*^0=R# zKM&;Q1180)DF80>^t8D_8u|2b@o!{p&y*zofWp3bJg=+xME9cTae=FpOKdhX0hjy^A*j!lUxRz7(~L;c2j z9|DXYYBe%3NlfO3chg~z%-b1vSDafIe zQJRk@$i!ekbfWO5a3d}4Bkg8wygts?5^OU;8e&!Qp77zOw9*Q3bx-culb>c}j7U|Q z;0*L`ZzKjO;*GY<$0TL6S9Nsna%4*~9-#^T)M=Alw6ybjy6y zN?i@v2ULuOtjqoL-wGX%{Shl#s>|{yF9$aS83X2BI!}-6i2R6f^W(Pz{TV|Gi$XMY zQ0zoV_?>(R6k0*sUp;8>r-@W=g=rQDnrTP|#%@v0&WO@nD|q$D#>k=jOw4)smM6gX zVB;g}9emSVLM|hIy5vdtTUMW{-fG2voD@RLu!GGS&}Y<9 zgI_&reY;i?%_Q{rV*6`%xG?%#|M>jvGiRo}jlk~Tl|J%WPaP0C@XceSrcjikkK^a( zCv-(rTs#WrB={gDBc(&AxO#eQ0M29MqT_`M7pzrR{C_+h5K0JxWiUp-f^~kO%8#CZ zq?}uEQqm9Y50uJIf<}oqSX8DD9Zw7+#?@^R(HFGNL*$1^4EDo!xv9a1e#k>%4!40W zP5u8axP($hejYp!nVyywiPHxjA(?m4cwC+?Std-SCr3T<^XHKC0X1tG@_wxcpaN5! z_?U7!~IO(EtoT;3Oo#Cb}Ta` z)UbNPVcqYxJP#138Q{(>nP|}0A)C8tYb$%R5-1BK$*J#qmqzN{jvV(1ntJiQvHMvq zrsqDzd4!I&y5>9FY&n;?M-c}S%1Z*`>MFiGSN{59>Q7Mt(gn1I%Qp(H?2lY#RNV-J zeNECawUQDNA(A^mrv<7+&O!Q&HZSIL6M840{2;PGp&t{UMDT?0_Bj9KoM%+gjzg*j zceARkJD0T)NeMg6b?d4=eX;^y9||Yb>7QVK5VA3qU5Yk$)Jd@ojbltq7Ye2$2p6`Mr&^uxpRT37s3$)zOKLUh@ zTOS(fNHaLt*k&^;K$ZO%BL|f+Cl3$wI7B9c1+SlNyw2(lOP>X%DnDq$s0| zQfX6?q`jquCM_gQB`s7MN;GLG+S>JguAASx{B=L~{iy3Y$M-mn&pP%4>VS!2d0}A% z`=ep42+yT7gpXsfA1|33+6a(<;6PolQUt>Ybha=re*OH}h-WHU728rVm(2MqkrfRb zlE`s>5vZoW!Nb~`mnF>Kw&q*3AxQ+6{QrD15=H2~T#;*lN4>$wg9OQ_r>*>6LM*nw z7AO+~030}=@fb7|eZe3Ol>T*myy@}dpK9-pu<`Qo;TmK5M9%aN%~NgILfqOKQQiL) zvPdZ@10LFxp@AvaZHz({BN4*0pnf3PY%If;hrjCRRpeOJd$W*)zX%R+z;S{!ZOpy6 z$4N4d7LcS0w6d^x!U!7ped4bQM{i7giJ=sC4kU{4UQv5m1_plA+@GW>@@Sucfd8F^ z$I=69V&7|yBe+UahK`aPWE|cnh$_H`d;BzBRJsA?qNBNam%XHf1OQ0L%H80+0S35< zmG%5C#sFk|k&=fK!|c<{uBVi&@Yb)osLOFXqE4aH75H^gy1TvI8S^mSfU=6zKDe5VZrXXnCA=2 z%WV3XwKbXOH*OS2)%f{S3|13apjy|i&3p=mreyc-z&mh32_G~pe@Y73X?)&jrs24> z(K&i0Fy!E8NA*XaB+MTc{l<3Ry@j{-ii(THq^t+{jyXQ+D|vL4&pOGv!I_L6#pr|O zrvHwL$a^j1`1A-okTCEC&x9*k$tx;|OMTlc#N| zme%>M(31npLbIRYv3W7A@yC2XWYG!|-s#k7q`u(TM{3Tq^7|9l-cJCrs2Z@&)cP~x z*)X!7qe$U~hO(0U#DL7vkd0f!i){;Aw8ssdo#|Y5v{u#agvBB3{+|ym)yp!^`>q{O zew6!Usp8Mm5T-3^5?$c}nhg_)h%pk!+91{-`=OI`rANYfQ`ZY!ddalSFHuw`F(~=u z$rRY$xx-7Nv#Ce|mJ9tNANpl%qIo@LH%Bjk6aCRN>itY>iWLdRZEg>We#+>dFiG?;yR zu<2+DS=Tdn`Y|if@c4blO^(6`rMGBCNYG`P<%2}W&K$T@GIQ#)k*i*=5AsgKs$4J_<442U1 z!=%)m7arK5V%Df+jXQ=(Vs)v=M>Ylc2Eg-$1=q0aRJTYmTj#zv|9I)C{CbGGw_=ia z=F!YZT=Zenhs{4e%~9u0gQr9kgv#p$k2PS5u$7Y$j~*XUVb6-wvt%Lw4aMPTK-HZR z7DUPt%F}DdUim!+shVg1`?kzVmo!s#`$))3PibaK3K^mNM^s42<$E0qct;}R>C=sj zjDm)Bxk$YGfIMkPGx3LSPm_^>f#)oXY(O;`0VQeJ^#M0|eJ>^+kc}u}iSnLbK$XKu zfiyStPSBtsB5y14e#E|GS|B&;0%3K>LMC6xsd<8G{r8uhH?;(ThLS?eu~s`wCm>ac zDozg39_S)rS#d=V`u--zlzL7IhovL(OU zn3apCrLq#3L=;wc>_b?FurPhjjrEu^dVBlo@#32l?r1lWKmoO=|22!rMwuh}`e9AO zpCNy9+L=fmtcqTe@OknCn}XY0`km;UWN{bzb{Oz%vr~5JQ*1rhuh`1UDEYxT%Xg@{ z(Vpd}AHS@>1!YVlOl8I1>s~fC{Q1tO60oNR?5_s6?yh!Z3@;REnP0fWHHS5qyua|J zh-B!p&-$i1h9PEx=+pU~9c|9XFiZ65ojTt5jMZ?p{ay_Ec`SHaoV|e1eKMkRj-&U| z=N$y#XoXcjoE3P5Qa(O8!@7WLiTJ<9x;j`c?m_*52?)p&dJ4RNZc_s)TP-pW`%EIn zTz+3Xcq{0Yw$9h}3mfwnPeHMw=1$7-gkkN)>5P%?{cyGr#vEMw3v2w0Zz`HPb8Ua* zxNoa>kWTq+;2hlDp3ts6j!Cgdy5?w(%Z&2d!%ftLRo$u!1L^c)R?4^#KD+6LU<4Ko zav{UMAp7M@X=rJ8?Xtsk2B7Kf_v~DZYk(E?kV_L8$$y+Zr1R2Rwfd{FGEeLjL&e0u z7k`M)9~rrcizKql%MD@ntX9PqHQRt4} zlk4djo?5ft!pAUoa%6UV&rq7thHdiRAa0?CT*$e=@`5KNL(RkGG!KEzF(vCm~4tcG-dhp;Y3=n8n-FZYJ?2L{d zpZ@d5BHwyLu)M$}Ph1x#7(hfc_R{{lCu!hrRtM0#C8! zfhOdEtn;3P4+vlQ4Gh~a7~L@L2kvHcR8;%t&rKgbfQOKkm4yt*;VE4iR1Vp;QptuyT^ZjdXYUjFVAlK1MM-^^`5@OgH3xc{ArBMG7g@;l!Cg+ zm@Cqz&a5(y?{G*q^dmVn7fa&l?MgZ(0z#1aI95>1O~;}-PQHZ%DcPTePqc61)q9* z6<}tDbsGQ*9yjDi3%$I#?i5?;5;-yKZU8t0IwB;i?b_`-NYgw@<@{n&lFs5RGQU^h z4ub)Z@FDqq!&y=GAb*uj>}k<=p<5`+D%P1R%ZjL^w4h{^5TJGr%9SgX!{tc@|sozoBIsk#p~0j)4-N~k)a2Y8312~jkdbjGI2ivX11uK6sf`3q zuAPQee17GhP~ypmd-vYDjQu1t_?bp;_OFH7{%rf_O8TPW&BBKcxkEzmbgl<<26{xm z)RzKYF)x*uE^B|k{MLn@vU0e^PE!5$-;2z=M9wN=JwFPPMOK#5ubc&?k|69e2TCj- zktJb))t>s$f4{;qc{|$zZtBb4dKRUmy8pHX@*Y0Com<`~EUjm3@M`~p{z6eD|G)EQ z|AksgFW~3DT(0^xr+2@0&JGBmUJv=DcbtJukn9#2F)2as!F1#*!p?s{D<&(e`u7tx zA9H%QdlvN&k1$xqPAo|}R(rF#sIsMn#MeNbZM{Q*TqxvLysV_OqO%khkax{s%0!*q zzMb#it>TkCLym~CEt@v+BjS`+PAN$kPH@Zw=3B$TQO4 z7BFE-ay^X+{s8_D&TBpqkq^T&SUQJ4NKVjB(Szp^4!=8xj{I%b+cMQ;eTS!95pa(9 zmMfqME>I(UgWLyzkj>Gf8I7>b&7%2iI9o`jsH2hPQxAy)4I?8&p7K|}-@#5MV8qzW z3t2hj(ul~pT1=3vK#1ttu|rI*iNyYGJBZ1TwA04TZIRJk;a}B1i+68k(F5%*j$)67 zH(+*tZD&&I4aez?)zI;(f{s@?#e1{THy?fG+gKK}d}DR64qXBkMX%sWybZIFFj>e# zk`o9hYzkInqL2Ma_T)+B;E}7WlnY4JErh2X^5ji)k8ky`{kyOVtlE9JSUNTnIAP(U z15ymqpWFu~&t^MG(HL~Nh>oeV5=Y9Alx@Q%^>?@RV&46@3Zp(j{zgS6{mQ%B%4rNy zvh_J7w;q1%D~!R0opPiTBD6iQbdt2uYu7HKmSwVAkXi7j>O1kPk}Kaab6)|Fu)}NN z$W6x$|2l2%#le8*+kuP_;`{e&YG{DhJqf!KcA=700($uTc^(Mm)YKFPV~h(y!NCGy zYT@@brihBb8kLuKIWnwV+@0fiRG5iE9XCM^UFmDsTyZ`s#xn|glZDleGf=HeoFgXZ z=N&8ZSpI(hB%M@R4;x31;!eB4%m4EWbW>?pz$g)w z;0!vgPoE~k&KZ-c&9aGW80}bJ{N~@XRmt4+%o#qS?q$K&2YaLFi2ud~DEEk+^6u0G zEA4mo&WqAluKs{=e#U49b+YJPomQZ6W8fRym%fw_sfyPD0 zv$pJ5#v{1oVWF+Pkx0%txuWhSmyu&7&I#|$C63+yo&kDwN#BWn#K%M#fZ}l zZ1v5bVm|^&A%Q3AzYqBZj0O(HAG0y+BHV=(m0=4u1SiNlwGyyV&>}yo@R8YIt;5Aa zH8+QG_+o(T|6GHo6s)99rM;u0yhJYUqJzW7vxRsC{Q^iWun!(Q{xe52u4QFr)I7yJ(`K zCgy(FRF_u}mAE69dj9oDBV%e#TMr^|4!;}^Hri>#up+>Rnz+rdB|I|nIoyFjJvMF( zhR+EtHsOO%CDLa((SP{>iIIf1L7xYcBxWllsp??rezB1Y>GBWa}_r2q~Kw&>yJ%$8-7L#z5BcaLLXnhc46vQ=l=w05C{~P)E-I#X;AQSP!NBQv1Tka zgNh!tQrfaUH90vCflxqkSp!d+m@M|ap|G9`f?@|aKX8ujsa4Gf(8nh=aeQQ0`iKf; zofdlgHr4+@uHnZ%2QPigF?S>F6N`8!=~t)P2+1Df5W| zT?SgH!l(q^eLql=5j2bk53c}xJG}MRFZ5=EX+gtcS$0F^@o}i_fdnHFu<6HDl9LJW z>yZD5y<|L`cfd^TB~8uBf8D>;n;k!PvNv0nR;y2?PmwQGm{IZ<9os@Qy3uDRHcG}; zrimW=L|cl1h;6DiaS|;7kO71)!-Ds=tIG{3{M%U#mfuj8r#Tf*7yt%GTl6i8J?TTD z=(^YKQ^rQ;28S(c2hp;Qj*NJ8Eh?fx zR=@%rf2G9&aar~ii}q9LOBU_wR^(!&Hrd=E+1@d>vItZCwZ!wuZz#E)53-t5^q!g- z-%gN308q?yFv_E;fdwfm>#bv$e{<1y^ut76s-@-P=B6TN&U=fVd*|gtNWeV4#g{|j z7!s&9Z18<4J>EqvyY6sA=DOVb6*8~IQ8X#AekJWcr{f;r`?uHNReMgi%5mZRd25Nd z?b|mfg##C%COoBT#3Y2i;IbPWKYkUX8lEitNR}V`Umid#M^wgJt~d73a>eI)Q4k(y z#!mhHVL!jij-vxm+iPv0DI7KAyXy#vOGyp&%e#UFx+D_U*Y}8-e@k22rX4$&esVR} znrgW)(Kol^#mmjgI&Ns_kR8PPuP05uAm@8*Wcjq69SspEFksYu`$q+t&-nO7sVz%Y z@?|d>x4(M(r0AL4Aed|nbV$@tG4t0qF{vM(>8EHPl9D6{qYZx>T>gErm3(c*NYdaY z{?zifrn1(^^W&MwTJ=zZJ2T)23Dl1#XVxxlW*k>`h=M2`2UhR95(K)rk&Y0ADLi>kb ztKF8L-eZ=J>b@W53y|d+!O9!3I=R|ap%D>4=lx|igs$TnsPD5mN7@&^wVRpyAJD+? z86SBRs?ygbB?{VFn`9IYKf{N#)D~f(#Ag`*fMlwh@ z|2-U6ND<#~ed6XfcVC_L+*==RE4bYBQ2^4w9m0XFtOGUl+#%`N*@y5{xrF@wR|+m| zWOMbC-46XK8kg;8i6P{-&$UdCG9r91qk(U5b$t~UzQE9>Y(h(_JYV)O;MFC~ul)Qz z!yc2dQw}>8IP5R)CM+rn;PHbjp|B8Aq~J*br07F$j(!q{`T1<|e^-!FtB>X9Jv&K= zA@N5Se514T3;f`Mf@#2~=&?SY&zF9vS9_Mi2Ps-x-!U^Y=XoqnhlVmQR4634z<2!z z%2M>bioCwTBF5bqCcfJgRwW?_Uj}PeK|sb<|GochFU5V2Wk7aJ`op)@T~$R+aF_3i zweL#%5yoW6PAH<0=$`xunKIP3_~#P0nX^drRowa4BLs8fHmUQQnn&|43C$KRPizpV z$-CbEuU{M-Y`q=!4^AfinnoiJqC+?hKGb>$-`8SHur;3tqzVhV~WBh5umHedY4ICE~t!K*g-f;Lw!%>BDf zmQ)oMd~LQ6%K^)bYaA}ID9`8HBN$`yfsKal{CTFgg7*)2KfeEgr18T-97l5DVX819b z*32}NDALIpG!ha6AsDCC12J~Jec=7C?@6en(gTG1K@cvtFYN+CVT+9hI2+d);OeNk8EkNehp&=91SJ)J{l4_C%e> zhD%}LWnat&B4EAf>@%AiHJ;~Y(uT9*xj}G4!m&I5Jx$H?r1xdkbGMino+a<8fei^C zHcQL9P2)r_hFcfm2iv4C#zJn5BNFw+^iUvXB%-o+IXPed`9vp6#xrgi2N-P@R)7BQ zD~%VToCBBy{xahCFehP$1n4UG*W3V5AP57iXOa}WpEZVFlak^-cd;%x6w6kqe9qZioAJrhFo3@=>#`-+d32PO1wvZwP z!PD0k_QyYWf|(T_uSPaxEkU4(ix>EOD<|hR$kf1*kv2h)5|1&2UA}(ZAw*_D3Q!>8 zpYcnNQGeQv(Zu?X^3kIdK{Y=|F3WC^P5fvj{(s>@2b%Ao!o);JQxO2Pe_TBz`#^{6 zZ)<_L*p7U%owLQVXE%Cax`R64*u-`KtkDZhfU7`o{RyE@acl)<4*l9Q9-*;e$s5mG zN-ma0f9Tn^Hm-Mym3VNOTdL)G$)u}2o$Z{D*T!wXKc?@)XYIt2yT)tSeY=5@5=wjv ztWG<8IPt?-d@)SKXG!66L+`N!iDhJKrIK#m7?XsAUMSO%&*z_+BgmDMoZMyb_hjao z-OayBi6yr`19o_!3tNu7Rq}S+N7qlc?+~3lXaCG1dKZD38eOm0^XX69OS2;la=q)C zludTGU%TGOVJIa%x)EE0BDEZ#U9pw?lYff1I2~W`{-Or zpv?pkYI%a5C~GsQDc6e=eVV<~61;Kg*Kfzni?41VtWsj^{xA}vY?W^%rKIL4eoXy4 zoS%k`Y+Ay3z$%lDc6)qu)ZWTUQSc1=VZSd%P-hXd$JrttpQy7T`d-Ml`_~Rfs6A?0 zI!s#TOXTIV^nUxixxfF>Ls1~&Opt~vIhnsQ>!?Uhw)+XQb6?94 zzO$5W`THBg@liwhUlI~;*RN+a30(lQxtG4dNI^rZ7tUA_dB}+SS?q(7*IEA-M z57Bp3b(iBI?)`MNF4q^W*Ma7pvr_yqm54cmIggl}osH~NIm849-QX1eWgfsA1kS)D zsb7m*Re8fe(Zn=Hy>F^ulaQ38^odO!h&FFkrH5w@rWUi7{)% ziuY3_35dp!viP7jMD{m)mDJSK7$Qnb6i1{!^s$i_B> z)DB)guzkBuy5i&GaMXJ#`CU$C@udi+;R;s8x^188{>jCa`;6%)D*y68bt}tuWGT;- zuF}c{)$p!e^h|oe>niuq=5A-ps1s`-KQoPN=`J;)BcKbMZh(<@F*8#G2le(Xzu=+e zp~h22CkBVitLkSiDDlN=Yd2nc%Kq4%n8Z?&YYTKX0OQ(6oSzx7>t z-#1pqCMIOW*HXqt?@gXZ+_osIG>^{WFmu9vz*3-@#l$_27?smn?o1eLW2E*Lq?AoW zPNKiUMiRY)XBi_L8?6VWXhUegLXND@RAM{4riX$RZ^n ztSo*rf1P2gTyBv-q6V4IgRGxYDg90KK{%b`w1!^Z88O%2hq+H%&Vswplt{*jKQWPu zap%ti%nk@MXQQobQFO_8_-s?$149e$$S9 zX{|lBlljzZCvd(nasX%mt$sjO)>?((UyG`h+Fd7N`n;~Gsi~@J05lDQY7kv}ujE(N zXK8)E<^3?+LUkP2_@hBXKR9aBSmy(yb}7Q@;zca77vK&lLdT8*!M4?Q3n!=6g9BGk z>VYcb?L|MEdZLa)=pemJG_?=@l7o@M6Qh$@M}-!F>bBJdY6hzB6*n`~tO}`tE&%s} z&BV~aKuX~>!~i5$@9)vFxBb86!J2*Zl%@TuszU;_Nsmu=-HU(2#-e;WRhp=Fs>(Q0 zXSU*a#nKpLaf7Nxh=AJkPa4lAoGK(i6x=%)nbV^U2p?Qez;$qc|M>o$utGr~F`m0N zH5{WCg$XLMX?SAW!RKx`Qf}X+Ba3)&QFl>z*KWK&n3mp*87XtzdNXAtgIpQ#*K4rMSXd4S3#uD{ z_hF`y6@tj#(GKtDnY}dDnjVsrlvG+udY}?@9KmiaTKTCBDeQp@<*wF4d;6Y(##07v ziZV~{+^m9>h$}c6omz7vVrK!A6jEBe{ zIQ8|6j~^cx8R;x@cR5{>lw|4Yc~~o9%>q}8pLMS>Ws5EwD=WIDoTpDK_KN@e`j_za zD~g;aXgtwFg~JG%ov%;q{A1#FiEZoK)$-c6ZwZ27tLUFc;!d2U@9zc%Vut*S+vVdk zo*8BIE8hqw91x3OFEh#HgqLbYd@&qrPeN7Ga0#Vg%Eo$^@^TB1F$kVE@0Ycm@}E*P z*hu6FZvKlxJq@!X4`E)C)=H561#`240z-+ynF5f3GtV}j*8efL5l^crh&4Qdeo}GB zCBrFBM#LHLmTsY@>6z$2dok2vho ze0+s?{o1dOp-_>F~!9bl|QM_7GpFEKU>rGO4 zs;WLYbLJ@SYPmq-K3{CpK;%?-R8$T6(T{;jDJ!6haOj~J7dn2u)blty+YGX36O-bv zFPoxMDEym-lgdQm-3A>_U)oM-;p=t1xoNH9GY#eCFH9C^;W_|Fn*@RcM+1--JQ}c) z#!A|M66$z&jK=v6@HI?p*tQj_&VNDMnPP)K?`GlhS5AORpcXAg;**_Q+Zb~eH>igAzhT@d#w1X4 zd3iIx5IQ&Ac***Xj_7(DI!ks;V6a)BYn45CP<=(jD+LF{e_2ClT5TO2LuwAS$e;yw zhH|ST{-;o4?d(bSBDx{%ONsM|)J*#&{%=CQ~UhPn$lPVnu5Xczu!Wf(x}6c?;il zd~B?{hew?us0LF*Xr(^{3E0q*)KqJ2MF~q`YU*(_GvQPBRudy)VuXI*2e%x(TB%#Dv?9_Ax!|t08p^p+=4i)FNI1POzG*Md-vXB21Q0}um4$=Ry#>(bO{*ot@4fk ztLf>jTNqR^^rCAvk=$W7Jwvi7oE-h6PVcQ3mLj8&6V|xZUEp!iJ2QgmB-=IP*eLk<)%KoBVvGpY&GsI5g6>;KZaG`Jgt#S9q#$cDL z)7E!2pp`)=|9(s;e_lDD)Ly>J{PQaR66G3fE6RTy?Cik48ygxTL_&lx&a)y5R)t3; zKR+M197u8;{G>=!n~O9l(o--663p`cY%QARA_opRVs1kgA4mW^=MWP>L-;p7$b;yd zK6F6R-Kp1mC&RY4-7gnoEWNfsW>6yGn)nIe*trk!HD?R84I)*ya_!P+y&Bh!_Z!n% zAaH92hPTJ2MiL+itQwx-n0>jazK6_5=kda9p@qkMKhi}GT_azIeh~^yjiWl(TSM1A zehY>qWhPc?{n3;4-kAl2)~OshLh@yh7^`2_NNmAvIPqRv=RinwO1UOo2UEeJ=+Nmi zb%RhB;0zSrz1y>^9(WiisEq`ify%E*9W@SNs91J~{+ffJOz8+JGJO|)m;mB*B^ z-45}~t9=}DbfUMS*S+zVt6e9J$= z+s1t-%Q^aImbLp+$u%J=}e^g>QB{pXRhRlwU+1(mDG*U*T~iSA-mA${ScU)T$8u0H%#V z8ZW3?rAw)pb%@E*JmZUwPN(xHIYnb|7HK~h;^#*NbNI34TWAWwykkf2P|HhpZ|`-S zU~Xk1Mqj!^6WG^UA|uLVyHw&tSHWBLWkpYDBM1tFPTrp7qPy5cOXNoVaP=VDLDbm< zdUY-A_4W1jkoSIE77XHv>x@mG_4!k#rr3wm!IPh&t8Z$$1SlQ-1j2p#oK~9Ts>IwBqU5~ij%uo;#}<`K#kB~u#3+nAz-`v)0nnK0IvXK@FX&_ z+*N8c2FsKo*tvl{F)@vh*z7?5@UhRS7z=^TKqd^8VS({puu>^U1CYAOCiB&d>+lW9 zVAVs3)7Ej9cJK+*44Fp-2QOQ*5|u!np7!V`Was8m6YnavTHFmyS0_^p;OOFg0HHNW zh8VQFp}rmwicZM#IeGrvSN2!tfLft3%!WJK=;1ysOCx<#+3bQnv(9hO|02ck-Tg$DdF+kB1*~rjXFFPK1>mO>aGM{5W{bU{yXYgk}>JnT-#eRpYbGcV+eO; zoxZis(j*zdz$va+HSwdBXp%%=RsGFxDV;x3q#l<09W($++2H03(NAJ%|S0fW&@rg zBN8>j9v`RgEjj2^A1spKLBYym`QW6tI^cc~b2biZKg!Wpyu;jP?$;IfYrZ*eT=#_=ad(?SO| zHF9mwqC-0*BJv9q_@*ni$G$Y-?EyY%3h}a)INEpo5h%Xo3WFxLCUR}vTEwt@p|EV@ zT(1;Y9J@j5Ee8c)?~p2(LgVswU^Mj(_1XDpi>F*W&)P4QeKo2&BhM00O}lZU!4u6U z4w)U7j_hdE?$<^C`gP@yg3<^*R7jo}^oTxOlPYsbz<$V{Px!z{OUuHI_m+x)Iq1J4 zWC(2pGD$1Y@nLNGv~)WrN^6UoKP=5ij2%PC8_H^MIsEkw*rp7d0(Yj?5^e{6;?7;4 z>=+eYj?3KEsmRX%YaM}Z1mBM316l4UIHcs(&4UrkLB2t8# zUj5jY?kxZ;NX6uv;e%xO6n_d?YHSM2!S348Zf7FhjT7vtA7M-EvP*lsuSyS^vOX{4VsZz{BgzL( zRZuC2-byQXk|h~;ATs>jl2(hW4iyiupz~+Xz62Z7Wd888&K>jpFID8lvK^U6>0HCu z=bq%D)Z_B}y3-u`{cDA{sxj|`f2pL(w@Uyjx67hqsRj~B8MIu*XY zLd6-w+mJIep$LsFk>@xo?!Nu$(64tIcZY@U36YU#+Y=v;=5Q=<0Reznh@OGrWJHQs z(XT?6w!B`d?)laZEnQt?zo**B_YjI&tR~j2CAKjssZ|TVZ|~hKEGug&@y5w~6P+!- zB;-~-F5G*Imv_C+dzG5Pla@N2J3PfWENUPvjoh~Hg${?=S``HO=R;^fXbaW2QMC#-7u|RJJPrwJABr>E+HGv(i&}R@@(rqfZSo{_eOR)$TcXQQ^XPrS zC=-2l1TI7-SSU1?|Ii5Sr@Ap%vbE=<8?XhaRCoPJMSp=m$Q*k4(hx+RvG2A`o1UYv z%la48T$#iJW-#I;ic~k}H(#3X08;{!$(XC5o!*MRLmpkax$YU!C+C7?q}y!jh71jQ zUE@x)ceY{h#W3%5(T`weVd2qDD~=1w+|X%u^n>Y%!~N~qpsWrb-uHL`D}BIGAc+tn zV>h{nH*Z!!p9A0C`E%#yH-zPwmq*>L~Q1+pkKdIvi8bRd3NF_U;u#<^2(lrzfN9_ClPcce;TqUlbod?N!qZ%|M@iC z<^aCWsZ?Y^)#}1xpjnKJj2v$@vn<_LU*hfr)BxG);Mf?J_62T6swrd_n8shGr1-ij zQWH>JKfvlsKcFEmgspGwpfmB{))a^!Dv_Pn(LsVRaCMQ3fU(Qv@@3RjL@x3g&)wdb zaAdW&Mrz~6%T|tE-@4IiFw)bHO&(eTGKob}n17QvR(kgDI(V=c*Y9QYOtNErfXqq0 zF9L-rrbvESNEc#ZcOMt zOLxwF^r?U?FcTI^95t&stzKlZiHt<>sb`U|9=B~g^$qqIQnFkte&ztxMzB>P_ z{_zjO3PvqpMnje(P&y$#fi5d6Z$>xt%*s6ZmMz9FHs1}h5EO)nxun{05#A^qV5G&& z_>NFk@7=6vX66n}4*ll6JF{$39J%vzHBRXV1_kOj_Qb*=5?XnYMLOXd(AKAqcKtEz z3Vhq9D(V<+GC!!3=0_c>{wVXXmUqa#j~N-MMbqEnJ9occ>|E7H(?D9?a_N!S`yT&I zkL|~(yPD|BT~PF;b^E|OqK8Dc9#GxV+B$|t=Q6k61=6kzfJYc`F_nM$VhYusySqEB zoch`(m`gMscRVlO$Wxe4`__OS7%ke|kf5Nk+_X7wz5v#!WAUz~J{)n`JN!}f%#Ix! z2Fs=i&DmQ2H8vhqXl@( zSVAo*vf9H?1|uUBKm}p~eD93F;>W7*^!2DJAGjQArT z;<9gFYI)K+M@PH4T4Q8*I2W=k>awBxWF*ATes%9Udotz2TUW6|S$36Z!E6=E5^dswx5`VdGGy*M&QWt*OWE=DJQKr&Eah30p;K z^0#h;E96@R>=YBLK!;9BC`)Tv1-*(tU!))A9uthf6cdLh|CFiD-nVwoUI%VBH{Odl z@UK`D3*|U=pO#6r{dRptd%^(}?=u74YwSN*){IEG0*Gnyym9z)>d+nE)SI0aM==71 zYI$PcV-d)6w9f%QUD)EXJo{D7UepgPE_Q>?13C!S6oe(?A%?7WWqX3_0nvJ66BgoX zc9t8tFSr48%5!rnj>9~D@-fOmoiA?38V51|Jo$5M3@9Ae_U)IRX0A2)T@mAGYBZ5p zui_Uvbm;@0y4@$9K(+1BK??+-7p>Ds)xZviua_*wMe-lLUmTd@lJ0J8MTVoncLR*_ z8whdnP2R^TjuMCfU&v8oKF z6TRA@uOHpo3xw0N;+1$!Uer;LS>4MgBevaRJ0VmNY2bZu73oB)Im;*B4&E3wd?~}n zJz8`r^Ro43Etmv$nkPC_{E|Mz5c{P%(%f==ILRCOu%a=qE_BC)4?9oD)*f$qa-S%s zhj|CK;VMSExnlP*k+apNciMN+$KlIIK~#{jceO7!awxDQAAa@vH7S#%__@cU z_4a^Vx>p|Z#3rm<_EVU7z3QWsauF-u$xnMAnIS1U8Xdq3I2q2JJ$v2hz%ST8uxs-@ zuFDCtWm|z6TLJt7F4KPeOC)q-QXMgC-lt1iV&e(#J1!M2F?-3tlK~t%=pK6e z=Q5a;uxMi}AujG% zR>9TZL*)gn9miv$>NB%+L`B#L2<lwH(r{wd)nabYoTTD`SDGsbDuiT*gELwd`veQ89~qs@wLgl%qujO+;x7& zqWYY}*0mZn{!f^3V4mEH&dOuATD(%qiQR7#Hz2+jrk4wd7C?Oj*oN8yU<Au4Vmz#9Qif0u)d=cZ#`LmvB3MIk$F}_|4!_5M53B(J zOM1a+p{pwULcB~etafM9(( zKX~K^v0Ex=)VTrWU{|!B9*hB&aQu5**&U_vqOcHI8f!qM1YWYef!yzG(?d^uRNr$H zXHEZbrgm$`YpCxPJsdTC^TQyS;&Ju3;ON_R*&`=4Tx@In1`3W|Q702CP~Gx#w_2-( z!Dl(I)!Ws>Ob7=<5>*oWJFM2RlQgwb{WHWv@!V9aQGqjoh7d9O;E|8>f6}$ooNt@* zd1g-G-4+kN+qgIMe*WwN+evsPU*}8wJ%mA_&oege{F*QkRsTFMk7PCPEa-WA?^cIK zueyHSWZa3J@OGmpCY5<0vG$UeW8;vy+xwr1x@YQarlCTTU-h#*s;<8KKjYvC@<%Wo zpk@x;R)y^V>=$Nc9Cm;NPt4@TW1K>%MeI7Vv&w9EDSbfuv6>N#<4?Znnnl!d$a$K@ zhQ=MlF%By6Uji+vx3#apqAxo$_b{TUF z5Uy+37(X-E%g#&ZNfVK-7&8A^($L`Mx*Iep`HMN6%;y!b+6XdYEWWMo$Beg+lOc0+ z_ipt{C~C2WnUv|kyKhd}dQS`D5OC3u`cZW3aD_yd%J=I^BV#w;4oHB10}q9Y5;SU} zTmrcwMt`I^4wth)I*B5J+0SMmDuF{VSjO-;|Ca>U(%{p&z{ap|_Y&G~%S9c3NHur! zf7&LzPdMp=K@|1q`G6l4h_ejs{4|2fhXgi^X4nUbuZPY8mS*g~jQZ3Nf0GFXqYNW6 z{vSxU!Zx_#Q_oUT`hfw#Ce!()A_x{Pv_tS{-hL3v<&|m6KIwR=MORrc0%1SD5X&|D zJ8jrsV8(r3Qr|Gk&aN~uV!pPpv*ETXB6jQLe1-IO!Dk2)xrEB2RSb#nLM7|(3c7To zB$hL?dDN|4YWVo4iJ|g%7(mcQW9_p5a}Q{FtVT1iwGk~oD3wde@*q)Wey9vLzO%dA zyX&l1I1Xtiobym$AZ`qxK9Dv@TCsZ7YbA;%<@!<9=LP-R?mMY;d=+s}{2m+A-1Jc3 zp&IjPh|tB1^ZJlWW+QP>K;Vbj9>pqX>vd%9{*Sia&`_iAYT^#8kHv_J?XEck^}XUC z)l&WVBkETw3u97KQ_(hb+h6c+y3XP5(^`{!ESDbt60buye;z&?GKxFe+G4YIX-kYf z6A&^oHgXZ!FBD^<%^OBZPFA%`lOp6(vPdoV>~zk2$-WcQB)ey-vlIcPAJu5Oz4 z>uPfXZjY*r#~)q2g0``M)^4%Ky&8F|aPnO?Cmxw>mR-b=8V}*QL)Tav*;^nFcuSEHb zZ(4^pCy=BSLjCagaKjHC`HalKOxl}z=LF$FMdloSAzocT7yT7qfS$(72qXB9iqKEx3P<_4hhYfVDyP0#5@UeVE!K%+yc- zO(adfXJ%V~O?Ey@M~R%)!`XSxv10Z5;f(ZF=uxN| zn7?UBQ&klMm7^4a%BFaTFpf^Q?DyzUtnR-|Iv{aM&@nyH+l3($+YL3lJJO{0??3ao zJ!<{A@pw{Bm&|XAkFHy`Z38~HE?v6oh)puw^lP}*kJTQ%xHVpyejxqVu(f+QH{2tpr>B{bC_LMlO9AVslsX^U*0 zZEkgO<35!d#I-q09P7nyMy%%0)93kiL8s9I;`(2eSES=sFyiTyJVEAAHOQ7HPu^vg z^jjzNJZaF_c&g`?(V~0mpoYmDqLy&@tPMn5JMds&z#4ZlEv>b`b$lO5XQrIq%vPg= zQs&>~t|`g&p|SA-$QT^G;Xw>Mv+`WQX93#aqJ)-yK66ib-r%msiP***d{mbNXe*hy zxDe>K?_@>yv;L#;MJMXNuWyx-eD;d}bzZC;&&(pZV1yPgi-`_K4a-v3md;KZzGJ>) z{LQTaU%2kLi5~F&4F&vSB4{eMpqT=pfyK#&4i2J$$`C4Fg$nl1pWpmu7T!r& z+1aob1C9YW_QWhUE<39&ziWPG`luzxd*JezSFvpVe03vqJ)|viaJC~vSx0m?j-X&^ zjP+n9ub?HpFH z9YpeT7jjE{8)h1H2@~!lO#uvAcfQh%WCmyka>$3!lIlU2I7uwKAr;n!O?gK&W~Zs;Kip{d}?;% z#u5!h8GXb9zYulrf}-LziF1E~-68+S{&c(UfOZsU;Ax~@4^kq~4%jN=ZZ!PlcqR$X z2g|i=UiIzU;XQlKx2H`mw+aPaeDG3=_qCq^Nm@nw-dtB*9q-PTW^J?BGWvQAeHq44 z1p)B|3<5iJ`k<;n*{ruI@b!K3dDAGrLkIix~P26ZG0ad*8O$*=dhgV zE0;c-YY{3PWe-Fx4q+Dy!s<%A2pUqhPn%7RLR))i1Q`*wxc2+rhrpI;QshDM|(0;K4j>U>;kyEe4<>a;zHW??WqD&WtKKzAtNj#5T*uos2WgjjJ zGAE#S+>s^PMe0Dt(Wk~8$ohZBIq_^quu07W`kg?~d-rlLI za!ZPC^SHp>CHtJtO1fP%yvfPDG-@29W%DncHec`hHc`uYvxGjs&s3r*_q$HX{(}AU zU)?`0FD)#1oF{g$u=Icbj>^FIruJM`j$+l|ey(@~mEe6RWv`~3i$pKK)PJ7PA0M*>SNunT^urBekaHZo+(&S{);Bh$AtK~+%732=4_8{3-=+@p zGO~N|T$ySe*~yFqG%#alt*j>9R&fw6^{%hE!2XBk8sa11)oXj}l9Q9sL?p9_H6TVA z=M?Vy4fH4by||N5wWsTkygIEadWGi z{K?V%kzukn4nwR{NJcU{MboE082r9+q0+acw2-2NtiM~J5L zv2#~66Od4$pg`^Urn0gU8~bMG=0-K{=o;vM=VLBNh_{R;MX^r~)CA$+qWTo>t&#I1~P5PkE{$+BQAvI)JRCTv_wuhIp6i-w zYJ?BZr*Q-aysAd79{gUAmJKEujU>%`z1@wo_*!E$W<$ooy$Mj-63L?FE?G;zJf&Y-LpFPsNowWObmotMeFOXF_(k(rl8?9me}i~30lZIl`B_l|%#aZl81 zW6NXrVgfwJ8p2-VQ8LciB7Wdz?I7yqG7kDO&_Zr{ot4^9%z>=fvG3+J)49o;(QjrL zc%_;&D=8Fpb(w0PVh)BU#Yxs&N6#uJNAgy^R_0SQ$Zk$fvuNXx-?7kDhV?WVLGB7j zZbehdM{bpyTd1gMZs=1{T2?J1r0_h$39yHj;I*Q-{N+bes}mj0x6__Iv32m`egW|E z?OWyOy8R*|!K!H=I=Fb=?vAR~IaVz&01M`)VP|RgT)%Y=tK(RXi*>O!~=9cYw!OHt}oKz9@ z&ytv6NQ#1hQcHHVpk~%`t<9c61jI@=F7pJj0zUA z*xbyIXKq>ct$#Hy>{p>ZSN*iDdgli+e>;v1=(cC$YiZWouhOjCs-`Is7y4Q2&-63|kYO+lKT?l4kTetE|HcXd zypybx%R<*Ui+?A`t=+g`h%`bx)vtPs)(_aJ+$s=1Cve&JG|%^mP3-r)RZgAyiU=A+ zor;Sy2hT@FMuLVR@$UkGBT0Vnbr9bp4hq>1OGS_+_a3^o6D1L;J^b6Q)?a;>754f7 zB|5TOSENK*=M2VSb$mujS!+!BIIZle0L^p7Oy&*v$=KE$i{bDmXgDfnsWGkA&lS#IouPwI-*}}*-(RWt2zS8&jPPW=U%#$ot|CGS1o#P%U*4KCWal{7PC1ItwbEWd+-@OX|cns2cg&Zv%M&bRJW3nygQxqO!8j7 zMDmo6k?Kc@!)_;hQ)~9+f6??E&{+5H|F^9OWh9Zk3mF;7C?h+g5JHHO zkUc^n(U6s7g^-;QAuCBLvrtq>Qua!=|LcCf|DW@m^L$Q?N@^-lhqY^Fh|`846iV+1)Az;*8bX{A;sR~YjWoYJen2s;$-ak`me7Sv zXpeF1ijm15*&citGnG71Q$w)%!PUEuG2tigdr>fxpUHd`QtQFn;$$N*2*mtUQ$HKv ze;yfPc6R78a5{bAmw*rwGSrVhj3l!)Q=+}9x2|{}AOyz#;zyEQQBlHfhWo5)9zpT( z_WF#~Wo3J4h7U_@F&F!rQd3itQ&HWHj0B)f6c!sAmIgm@0>>yQScTMb$)r16x2mdS zeJqVo`s%6Re#Ydgll}3~CMkW3fW53t)zZ?1BS)qKI9wO!+&M$te|K?vf0;l!#PGcD zt;m~V`UT{jX`}>_h{-=$d#frFDk_VASG5+QTCk zliNNc8f&DTFV`p!j~!gpQMYdWzU0h2$}Q8|Pt-@fdp7}&yV*A*W1T>F-DE7;V8k{+|!1j6oOf!Ksgnm9VvxE)b9G_MofFi z3Eny_*#E>x9d@hd>Zh5s@W(fJ2ojQ#=ZB=pQ48-xMjpQZK-bNz(!ZiU=f%^^`;QZD z4wO4o7HCe&N=pMU4I90VkRF^MJ(7}5;r!0&_0IyB7Fyr;&ra;eDHH(J_E6Az`_Paz zxRU<<)n?DHSFgFKDs})0M_eQ%=jorCgIl_~Bm@L5An6IcLuPF34w{1u>9Zfb;Og2LbPF#O+CAicjtmFMxlylB-SItF9}-a? za&HH1>#J8S66eXgoaT-&2hJSczyFr+*PHHHUpoING_4%J|DkoyH4~nqe?CgaC zB3G{tJdx)$Iq4g?_wKSpm79;rS45@rb8|QSc(Wvf^O6cRm9*M^ zi0m(g#EY9yk0DJ7N}sJe2H)*2NV*y8Ip(fU$`Xh>qg3y0X*~rKl$VF64O8^SubeJx zx=Z0$k#czE*EJ6Z3cbVKbRviHGxLkfbx@rK2a~V;d0t$6Pb2G3+xua;F{#BpbC1qv zUi^F4UeZsZ^O8sc);PctdLr8h9k>9}U&4-$Fc?i|Dk>7XbVn?+ht!||4|r9L^_6G} zPo3(@88#cZ$o0}RZsb|Z|60HY|yC!*~-P+H_jm+U;8+v;B zdo8DWDh+3;zfu=Yjq9Qf23R_4L0reIhEJZ$%6unyah0T z%6W0yQTTk=3X30z0z#X7tyg8_h}M{UbZE7<5+8H%J?!p^ANu<0N<4M2spB(oYiOmmR2jh7W6Dx?@YuD;v)~KvphO7@bd&HkkJDUwz^M;^0Bn$%MLc@R7!UDJg z;Bti4$*Udz2Dv1R9t&}dwk?tm4>QKcXB5!vVdNV9wtno_p^!s^+lJM~hT(stY(G+- zRbP#JwdbhIt_O@!bN8iN)XI||4Pq3{7Li27Xl>Ozb80xD&T2a|iV(9>a6K)fFn%>P z0(Rqys;X_a%FkrnF(=_>2*S``SNPIeqf~7ETW5!J&M7Rm+j?I2T0-tB%w z?b4g1)x>yIwY*#}RG^r2zo)C0m~ssQKX_6p=n&kAo}l@^ezjbJJs}pI4tRlJCvFR# z7~l0}7j!USY}FD>)e}rf2y_wUAkr{Hwq~1pj>L2f-w(fjL%@l)QVF$`xM|$jSn3%) zmiGPk|5;(tyIMFqL27fz?tq{m8Nnb*J4mL>PI*qtb_apzU%&PdR}IF#nG0hVWR4VZ zNNF1wy$sb3FhAoyZFOevT_TuZP{jJ~!v`k5-SQXi&@e>CB{1Ef(E(f{?@hV&Af~*s za>f<+UrSGqLsaxy^QFE_>40WI@i37jm?tLI+tJ1XqQUr-sfMQg{f`N zURqKKi6GK?RYEijc5lMf1!-yTaMFMlLrD6yjV-)NI7dMY?x3cAOr<#Gnivz42XXxF z-JgeF>X)fN)=C+g1pn} z>N^_s#NUa$l)4%gPBcPx&a)g)4nja`N^EQyw_;gCsv1vqO%3$1ceS&j7Eub`!ikfD zgfl0n&jSNU#JW3Lwkxg;|3&=B=;*^(LNLvf;8+HQv*%`{&!!)v!NgW{pr~QGWfExv z?ouJ0ZEb$o77E5HRiE?l`PTp)kxjwM!BHnFB6fcDxY8@CtR;Q6T?EpF+wtdG4oIhW zZTlFhTrrOoV%Scc1no*$wn z{S>+R#TAP^V!go#F|$hpY1yz&)aNJtzIBe#P@si zj0?(e%r7{WT3cFBSmqWNp@CNr6@A~{zKgb%lwNReio1piUwzT&MBP4n`37;mzI}NXeD@fGI8Zj@=g5yeGslUbQI8Cfph8< zH+Y4X?$NvVq$=n2tPFKTZ*T#gAA6K}fbu9T5OBMJ|3c0o_-b5xq74>i@!NmrGc9xDoK$R2Kfc&PTNo4OSxlQF%*>-oFjpYYo|D&B<-cfYz~_Mq^V3(P z`mBF+zj%F4;ac8vnSerzQ>UIGkx+@WsndJrvP{C(^0heKy~lRm4PIY{as@I=d--?& z+s~#XzbbV)A#pPJ@mP=;e^H{oCc|stB|o)`Emx1ZP;fWhoSA9d;3>Jot`LW?i|(c^ zE4U>%8ZSIjP1ofBEV_-PFr{`P!svMi-qeJk^;~+fuL! zsP&S!NkR16wvM`nYfgU|)zw=NlN;Fmjg$}q11HEikT}*?uH&DwXAGP%r@r>|wc54@z z^#{w|gtsmNi4_$i3~Cm~FNZySTm91~zFsL=-s5J$1F8fjo?WpkQI#I?9BY?B#$#&M z(a~AAfLjeSv!{lW8t4g$DO|7j}m*8V6ICE|C zo*Nlus5_Qbvfo%4JiD)LlQDFDH+94Z!TSOJ#SQOaDt&}q=QfUri6ACjK_492guM0i z!*(}Q*vBuHaS~rL921<8I#W5_l}Y!Q$Hyo_-duTCDyx+^bOBcghh^&KQ}~zwBXXoE zxZ5nz5%vTIDlH!RTk@?Mj2%LM%>o;6_9LaZ-2d;T)~;J28?PwNPF%fl!@6J7P|UG;;u4PgqwQM&(Z42dd(hld9^XiTI8Cw%kCNrZA+y>PQd@9W8& zeMsP@-IHu5T8+!6+tXcR;by{`h6C_+&P*y{(J3%as%naNP4p&6$$oD=OOI;lq+VZFcM41wICvc8!D~x-t{v-1){v_@I*tQUWuRu)`1>?V+uK-5GqJDm%>}~n zqt#n!X#gHSfgi((d#B?HZYvRPgO{%$sI_s$_XS5IW?*8#cWP?NU4HXMQeRZM&e3Zm zF3ntY9!xtcs^IM%qmgB+sa;PVBd5Rwi!NS{E?$|wB1E~Q+Mz<4>it)4-3(>=Pr2pQ z)nCqsO|aOVAiJo=7CK*3wziV|1P)@zOwbDfEkRT|wtJ&aff(jIHr2GU8*5#Pi;84S z&#y{Q0g66cpx$5<#{CCkbx3L`P^W^Sd z9jWL>zrhg|&XjPIX;W$l3lh9=x+9+tXBuuFjy2>6V%Z<&t)oY4J3XiszzX5!mr(jA z9=fp9c}J|bx2&7h_RrpjE6dYBno?6z%f$6MAz?uO+g33FblR?E@pJ*;;? z4hO|L$xrOxuE~5<=SCo8isrBVTOB~fRnydrj*X?YzA)(8bK#)i;(MWQEz!rVK_m`T z{J{!SASG`*I>1{&#SM9^S)IR(i0y9-B)FF7ZM(aJO0E6#Sf!s^_3 zo@?l%X2a4EvTx(g-)E@6gu*0-Pa2(=SXEL&uX?|@SghEJgKTu1V0(3;XjL0uIVkow zh3xwBbGTwMPsy!k(TU@Pq`o1+N-LSP0XC7Ual{SoDI5@Wp`Q6F3-o$sW)IQtEt-XHx2L>vrz(|7Tn!6H&tyK3q$|MI zza#e6@|pr8(@zE!qsfU``(KsI%lhd`n`off`?(wLBQ(Lp(im^dr%&uv4xPF}-MVa$T$CGhhM~Z9bbfg=aHG=T&=xF4)Ck-q0oG zmXYzI{<|e^`*>#^6bk6cCKGOd(fR0nw6>zy_x7#Zjt&mH5_2DcNkr@|IfcCIMJr(I3DLOn6?g8{`6O?IXMnC>cP$uipjQLKFpdcki2e+Nl9Z z7@5G$%~=HnG^+Q5+kdk#FgS6?$!iahsOT8b6Q0%f{cRTORho*2{@2H+^Ng`=^mh_a zDiwUn*2w$CmJ4yq0W1&8%=|IZ91%*RgE?qz{bkVR8tfxM`SFk7r@^dxOi}T|S9-b2 zO12r5IjrLJ)w$!gS4OTs^wqXbUoW<@7WU_)PSp?oH{G! z7S68>ZEwNWw0nCdb?f=jQD*xwa&opB;T^rt>pZcPif9PFIVQr&%4!)}KoW>tb^@N? zlw$!8lKWf0phnIwEDY58h$m>b@ZMtOboKEu#ik@(U0_|2*!=*n`NnyADaX*8luJu0 z0~WV#lSp578(ddK%{9(Zbrc-^6bZlqSq{!-H4)7tNL0EAL4UF_!x@t!dg@xS=_O@# zyfvBmiTbh<5-N^IBEoTiAkXj5L_WJr`kkNlul@YW%gSy}US<6np`E>lIxO90W`Amh zcK(Rv&xOk3>u+qFoZKWL;c`?USUykr!daK~cE?WN!4F5~7pft>l%Sl07y?#}-&0fU z!u+MDEQ~cmN-~o7+&6ugl5!akHJn@M)kbPqXxOKRJ_GA~T3C2TNVuxoK9%Z8&z~5F z1UYtnYwK-M&pA8Q0)cQp%3YP}e!h=tEMT@U-}%>YYI=HlY6`6-GQgjj01=vix=?BG z@@2FOfKGrXd|}A2dwDcoU-cEF=U@0;xs@TWvGuEE^yxYQuyt%uWat<0zLL)^LpEvF`$XrP& zDGLh=c>UuO67Y#Uz2raLDIFdkhbr>ey<{=#dd09~GI;LyQt#RY73f&g=so?U-*RUS zQW6t8?BAU3d!gPUz|T)K%``QQY;CSrXm0CGr60E#rs^HKSUJlt%d2W4RrD0ekTu2b4at@wG!Vs8ju5V zD_&Uyr{EJ%-vH$Rq@)<{+O34%S$6yxmIy7Sq@Y96@l@{O&XP)6xBEfT2-ajw2q{L$ zSP&JGFi&c3^@)#vlJ-Q++tAR?E-Rjisx8tWyQIVwCPZBGqpkZn9xB!kb$;-A+1>Oo zjB$rBM4l*eYn2T-2oMgxd9y~nMgP?OxS!ix0*b83r+T~PAs-Pd6rm^a|GRLcBgnPR z(;sLbmSr-y9zSp(4croV-^KSw0kWZtvK%xU9ve%*0*$X<3%P~i5&HoS77Y8?_%-7Fw^35gQ= zT_5cxsy+Mn2LQYxI;qg>*-KWV8bbwuZU_2;6P!r+Z%t4-Z(|cENlR&mwntZB@w=i< zLBn6KdFu1Ug>!+cukw60k+MWQkfCS+Nf;Hib5G`W3DM-YSOsWT&iMx)X{O5WSeJ|RD z(I3c>p8%JHVY2*uOBB%$?EV%~JS$5}#K#>_HBk?{NUH%cD1I-Xy~)whW{&ovk|sEg zi3Ha2pW74nI)>id*~7P+gsUWJYFT8)!8{uqJRtLXbro114f=luCe{x;$5l^C)aF&^ z*U+7y2}H|@e@79=(2UPd3_e~;5oK#6dg>t_bodx8>UI12yazC8 z2z?KmYc)l+b7+Q~{3_e#>2Bi$xjuS2B3vVzkeEnDI7QX}XkwZo1m6amUmgpb#`_3c zJgi#QpKSq_0df~tY-7NRo8zjg+|@Y(wRz^&v0jg-lH<_*RjuqK8$(wD-;Oxmy1F{6 zvu6jsehox*Za!Tn&x$|6nA)l>5DYOLp4=KLR?e4bjiKzo0ifi+B-Gmb47rBjDFssN zeKOnLxZAz<^&>ofn%#SBP0vPkzRcl0jzw|`ifSMF=2j^1F&B^h`c)|^awS;zM%1lS zf(d5^zfcgB!}zpQ%pGWqfOP-aHCj_s02x7aNq%ol340!+l_B%qz5iUu zdWd_e_ z@KXPn`d}V5>*#>5i57l;n)*%gat<@v9^DK%#T||kNoXRQ9UP*rU!PxW>LmnW6u`<3 z%s`;q;l=0m`K||^2pkpkMgS2{L}&utz&0vqw;@~LWk;eR@Vddla*yB4GoD+ zCJ&W!_LjG2`t`SMZ98`E^k(^a!hrtSc5!ktTxCddQ}6wQmKT!rY9P5)dKw&Tp6oAi zY>%lLaEh1hlM}lmQZle_-|gm+n?8?qvyEVZnVOy5t|ub^#XvF*k|5-~r_kIsOPr(O zS61G}5euI(hUnuAm>^og7lW4NG?&=ewS$Jn!`;1!1o6`A?}e-!Y%?CDeecTbX?Bmq zZ&6|Ri5&-1npo}`rjfNN<2-S;IoQ5a=C#_a_`}*C^B=8sbCJaS@}@(4aZ;utNyN({mlsJf z-#+UdMva8KzG{XSP}@HmQ!-x&bz!l>D_is81$Ie#z`TIh=tN=qWh@eVFOIAWasoPZ z2lKBIQc@sgk5|!ACTR1=wc)KZTe^3Bp5u775lM*jNoG!Z5_-Z^u|B*PD9D|i^jFS> z?h@m?Y$rL=<7XoQH-IX#c@ABVp$y?VWkS70kNF;T8YdMBM*X`@S}I%{%Ko}_4NwrA@+5KFPnQKYNEgIxHR% zu3M3j=owsD#m51-wsHoa$}{1i%M}pm`PiSBoE(4^c$}P^(5{u#{o3Y-eibmI6e}&~ zVD+NRP|2;G43U_u+V^Eh{^VM9o-00WBfymEeX$6lVB)a`BHt1Kd| zG|VX*LPfyxBDj-C3E{`VWo(~~uf@v=1V~Q#HZ6qYzAB#De3Z&MAFaRr)gfjRembQ8taX$#t;K z^%Wqtg!p*4Lp=}6Y@WZQFmS7N6m6Ct9RHZwD8MhShZO)@hb&;1)kl5c8ag`r{l{&0 z5Mcu%;Gz@RC331lB1i)MMx<%FwU6QWrWJo0WgjHvh@3eANrKx!UMxb`MNjW~?Kjo* z^t1EKLc(MOCgxW~$qf6+2#KWV&|J1Ese=en1<>w^ zgVuOB**K(9R7hwFqEfsn&AvrYBd)Ekw$bc-Q)qcmbR~$y((t6)tQsS){6&jxL6l$! zAy|uJe#0?Xb@|J2wfnrrANuy)JANo{A=$!Pz2zqJUM!K2R!~^0O}%_=@s}Rp5+Yps zyN>hp?h)u1EUjDC*4Jn87Y^t2^cYcw+VTj7jxxnux)rHCH;?nvV`*q@WO%=2d;k|t zJ46BKYimo+FMPRtS?-)FU4)9Z%ciyPEdDk?Xei`cn+G|m@+Gz_UVJ)p`vl9{#~+e_ zj~2c}(*eIm%jWHU@4qcWf(+C!y)X*su5@bTtMRzwIfmdC!QA3PLXSWGfg%Lq!-j_Y z)Y5T_$VOdXur&tw`MK-v-pEImd7@najP?XttNk}SWN`FTl9Q`pZp2nTZEcXOIOTSY z3|ivJ?eA~xImr_vcXlA?n1s6_lcy!l#b;DEdX&3bN9WNB4SN5gNdi%W=iJR!%g>@V zih5GvbNDA8f&^A&E(iOc#tuK_24tpXD{TmeDN_Um*+^bLPSiN7WgFS9>)V)L5R9=I z7c;B;4Ouak#NvH}9QSE%4Cpi*tqq)pw2B8BocoK1g175}9~b84I$plK5i{VmX>Le; zgLbn3t77dXlddq{Sa?I^jIVQR*^{OlW&Crj6c3uDQC{?F1?})o6 z+pdV_Yodn;GYzKmzAw|$hamI>C|34YeW_6oCXWULmq-Y_g$fq#s-eyf>->WD1=qdi z=lsNv8lVXs7+{NTwc})Cph)GL3z`#Ht09rpWb(3-q@?!juld77T zy}(d}u?Xi99=Fi!o5Te@SWA}O-O@7V93M(YNf&{vu+f3B`e1C|s^8vYF1C1V{CY4S zx(ECf%2*B&xGktebI^*Nc^n`ZYA?FD zFtf0vN!Y%s@tVAOMqt|%`t+?{2RkQUKfj1`T^c&~KgjR%CuwAIXh9!|pXLQI z_XHZrM(h6G>2ykcx`p{u1b*( z3ZNrE{y{V&1PVzVhp`Ts+gpPSk)uuULL!7^H+2{W7QCuNSNKamW5ivaI1y^W0ZsOB zBS-?wG{;YzX#Mt5+0gKuj%8u!rOijG|EC4ezI(9b@xIV)^%U3nQ9Wl}Lo}Ske_iF286ysL+A+ZoU5T(BFASS@n)~+{Jq{3hZx*HS~ z;O&h7{ks~+R4FLUKWh%w*4FN4V*_}JX&dA6n`q8$zit5$4JV$?>ca!}Bg4wNt+=e7g|kdV^X$I@=3 z3PCU;e-wRYENyJK6z+!4mtYMNB!g(*uovPn{!uY^4uD_6#3UYl9t!5O@GG=;(kW*s z<{((b&`YpEDC+auWem7r7=y8C6w4^EwfOpbC-x-NQ)nLm4Dj<06@1%CO)V!ckC=ZV z+ytQ-yJ-Lrwyx3v6?|1E>49foTe=>bmqAN??C!QQ(Zcod&+nbAln0_UrL19K2*NT> zrr_Xh$m7!GJp^*f6mrVgw!9z7x&H`z^k|@UV+JWUac%goX0yg1JeLSLru;OEqpX}j zU=a_uP9<2eotGTZJD9Rjo?GXF?bc2%9jzW9J;9&@g{{rewahHY<2z{5|K;z~$G=bM z{y=|*==6V+)4R!4%fNY;o^=JPr*M`(H%PB(R4F>5a~F;(B6=eY*z zESJ$3>HE++yi1LKud-##$woy*MMHBNij{VJjvAMLwh8wDo@Cd9I3M zBI$$tl&#Co@E)YuzD|R^{!67x za?yjDPSQS8e+zM}k#YGHZ*evxxVR>tkJI{^sgIMpa8gg!)2FO^_T0F{P)1A%h4G>M zg$FnFD|^XyrZL{>>Cg1SuFn0-FM%r|m-w&nKLsY_|3bkB*q@xeEj1CYDibeH zQY>%X@?ZGbW}tV^&C`>1$Bv|e;t3b2Tx+G&Ci^tqlkyC4SRf83SLb1pb)-k3SEL{u zaxXnBX%t^yA2u=Y=E_bGHSmpM9Zkc^Ipuvbflg?qAjDukaNyjTGdEjDah!~|Cp$uZ zz4=@3F)_^%1r@J8IxTSAFnA2Pj2!M?S)p$SxB_+h3pLZp2kq<+tUtNhiqF`OA zHEeIyW@YiV?p62o_0`mj$hxki17z7h_Q&L|UHbev*n9F*bVs@SldP;!+&$bJNCwf3 zKn`i&IXUBq-UMq}=ne{f|EB;^x$&zZd78W5;+dNax_^JOjsO0|+<0Z{o}`-nuZ*Pm**?^}b{0;{q^X#<^0#EB5_nP*l=wr(L zsO3sx(^qKTyud&R(at7{Cd(KeV=Y<*<8Z$SuA;R3^ilVJS4 zvg6Pp%*SY>pxPxghPFyQ{P1~g4Rrzq<~uA~M@wmuu(Z1QZzwJKP0JI|30&0(zo`whkX=x@xCENZowONA1aTEJqp>LwD zQbR8QXPPv`_~>l##T zC6l1>Ifh^hK=Z-8*Hz3gYyzy6)7HZ9`t60g9i9k5Mx(&13x_nG-kO^;sqlf$+`HR} zV8bIG@a6oP3XSyt)Mom|9+sAtcn%<8dM}T&2*2@Ij)C`K|B?|wacVq*l>=-7-<788 zKJlyCe(x*KEqEzTJ^Lps{gy%fqX!6YsDa<6qV)N5>`Z$s@O=~29Ho3OGc&RnkOPkV zfm^q4(=SV7X);oE9iNJPt4xx5w(X(ulwACgqxaw}VmNS{)1PSa<9;Gq5%2rz`g)f9 zq7b5Z!M6c_I>GLYv++OOriGy)j9wMbpPSFj2xdXpLfn%&JuSNA1&u^+V!Vw+l9r5D z#Cb;E#RZl_XR*uXZ+p6mt!>8h=Q2sU2f&P&mm^l2exVUV( z1gvbLZA7x{Q6ufl;Cq+3e!P4MA6DD*zi3RS5+Yg2gby8}2ywrf;=JpQgF`U^Et^M` zF<@g&UH z^`k%pt|x;`pg>A#;rX}teaZ?%S?AJkLstX(jH<+CIfesNnbUIQwgiI5eNs~q<~@uA z!sTDhYj?XiXINMoGd9uTLydFQ!%pud>h;Yg^B3%OC z_t3$E1mf0ZMDd7-l#XtdU`=~n6B8yK zLOR}4SgSQOCVY6$Py}Iu=lyjdIZac>)2s6@O-}*?r!ar_xBk7>RysLL(CqsL1XNX2 zu*QMJ$JiuBRZNGpbk}%nxhxM59n2ZeBh>IBLNCK z76!ELFa0;9a<)g?YTDh_9f1Nw&gu_0>;Oa^5x7^>dK@tjyiUm#f71>LE@hfb~+xKgwq4QaXTfBky)}v?R zxaTrbQv1)xQj=}5y!y#YTT{lP;$ITpR;RT2@MTpB`%m$G5hsRS#nHpwNhjcuBXi8c z9Jqb@urIg*NR^2p?@czY#X+LA2tlL_%_u0iJ#mtk`n-dK7rrZ=%PpuVF(pxk0No4; zfzpZ^wF>^rV84IRP1TD+KL)0Yn0b%sjfBTAHkw}baZ=Am)ofD2sXS~T=KW+&f7_Sx z+c%pdJ1VzG$RbWXI_tJTD_8sykS=8NkO9HCiZRZRp$S#mQfvSsh0(<~sOt7xV>6ZH$o-U@n z6d`DeA1p)IKzLY`^U9s!QbzI4PE{)_h%N!t|2h&!5^|ES1GNR*4MJLAT+Tfur0BbG zQh0|#sQD@DU?iS|gb?tH(b3Eg&%a-)5DuS zRO==`Om?wDJ0Q#9$$`BrNN)^YBHJQ>Pv`V>Sq2h_lY;Z9;2tI>Pc&3OUUyBmt!W{$ z1*JS{VeW+iduQ2>2rj`~y`91biR!*w$~n|E;OCe&IiG5Jz^n)@4nAr@LHXv>2n3gi z9zJYnZFJsd>0X(o_0h`wUmE^>xY?-Ni(0vxC7@bW`S$1x4zMC?(taP~E7X1aZoB}OMdg#&DYuCi1gptP|yO5jlG1l^&PIB3U&GUI|-Ch#dw7mmzLDF2j9QH zt(~nPC%3d;_0YMO507V%Q&QH{*3#_Qf%=0TBv>_u#|qM-$SArQnQ;8FX2|YW5!-~8 z1uR;8RTUZGKyvn^U=7sXh!xbBJ-ba$mqN<8iVHPRK+IS_+p3+3^JVZR(0)NY5;6)a0_4Inp3f0>=iSk-USa#0i3D1foO7iF*C? z#P?2)3jp*#A_R#ldgzj5-x(dNsPvMY^7n#*+ZX(o@%Kln{cvm7MgY&hr6t`n#=rR8 zo|YRkG8;0nh#<43+bR7C$2E+RxN5L!Ot!LpFgne`xBIrmKyj{Pot{p4X{jHy_JZNE zI4ud!PMXGzJ{p%iR(adk@!-x0j9w5z1 z$J=2I8>HnzZA;BrMNXEdM?Ad(9DfeauekE~M&jX+8NNi=PNPgl|Y}`CA#rStbG&{)391T}$ zxO_Nk$f__=e`uMf=Kj|k1oF@3|M~-PMHZzsbxq=pYk#U|e=ITUFq2=nHX?mMH78%F z{#jMBpE4KLQ=;EQ0bN^JzZ)Gr&db2!dS#q7uk2?4&N8)`;c&imQc zh|!1OZ>BUen7?&1pDzC-N3L!-mavHj>hFUB_MbY2-}~qYP6IbUpQ?D(foC?(`*b~F zk-*i~`L7|#7!sQNqYC1kN#CIZd3uX_Ti&JTRfwQmhz^SsJMGfi=(DMu3yVkTYn>`8 zYBaq6c@caq`UwOg35v{!U|zJV2tY$>TadQuC?0L>h{BS)wOqKLFS5_h<>Pil+2qu0X)oDJG_FZXBI1}kDeM!j*cdV$bEn5*KQcYu(^{dAtZt- zUnC=UeDlDyln<8&2e`y2=X~XS$p!Sca8J z_zFvtNeLb@pNL@hqnd!Ho@-7Q^R|9(rXSb?X>WVs0dfjT%Ai1kLGDN7Va11$Yo&y# z=Xv@oRZ|cOWj-n97@T2bR^31AI6iyp4l<>0<>!mkiq|}U{uj!qk|z#@?0H$d^QubQ zgEcn?ld5qDf#p7^2@r)OqM72?UXH`EC-@1JS7k-%37AdZ_I&T{W#-^G)Y3Vw*cal! z7}|FE_r!zY(PXLP;hTNu;?EF52FXXQcPUATcO}MumTz*}o?L+mEk{W78J{L!f1mOp zu@ek@f`XblmLJZZ&`XJ~%FkLf3^D{3isKLKV`NKAIK2TYe#O2_+o|8+UeZ?|O<*L5 zUbv0eUipK!=YD?_<*T~-6~tG;!bksy9VRu=3e+B2kDU#%ufC%M6e=ep|o^J_8bC8K`tPidUhzu!<;bgVoh za)$(>Pw;SGnrp^tX_!^!a#gFZ_f< zSunv&N)#NsuU{e;Zj=#J=t||3CImFLTLAd-}=Cdr0 zlrwCX0K#Dt<*0$&g%8kh8=1XwcRu+)AJuZ4!fJX;Z|`3m6QH6Y65mQ(TGs6u{QQnh zogYoU80Z|2azPW3zwkvMLjt$UqQWT#l6GXBLlf~RIl21F-Odx*+9EtW=b=)_{PCmh zxmQy-|Gr&%BKi4fl>W`nk32n2R5^q$#FaFH{sjMtz7e)q{Gfi~P=geY`UYG%b%CpU zu6!eBWE@^~s}?vyNoWid8UMKN&dntsd&zzF_O|a#?%xmVW4RSMlb=r-a$#r0-?}s< z1;X5J`9Zfm?sAHBvZpRg*|EZzhM8Jd{du+ICd;W#&HrcGBm;#HEW- zk(Gaq(jUaXIny`n_5$sESwP}rFC+F7o~nCxlQ`|OPpZf%Hx7%?j7mSzCrc8MoGbrI zP*Az>#gw^L^@A4=g~SaEb)1d=1#Fi4J>vZ;m_(MIg^_q9Pa1N!9G8FuytkMf0OsX^MbT6Witt%m#>un z(2}gG7nhu$^WDfDc!y@3KxhqiCa4_yaBA^JH0UKYxa^9b%`FVlKKK;<`cchd-YoDg z7@Cw|xpaWiOak3RVPWB`SNH&V85tm4gv0_Ge7wDZ^2uS#-vqN9gfUO%XUdbh2~6 z;R5>^%jNu+Wf=t)_tn@1MMK3x$7QSBrcUF!a)!?5J->&B9Qo6O3p^9yb*U>Mfrbik zNk$gYggfgJ^Dj8<?D(PO`|{os-z$vrwRuU|lm}pw zSP^5pyD7xvsbK@g0S&odLIOdBZx8T@-Q-xqwbiSVzR9rXfNM#KhtLsj?7uon5(p9t z@feUANS`vdf{C}K1(oY*V1K4zR_3#tl7|u&Ibxp z-vFX8I5?P_lXGM7ECVxzQjpUXQW0iV1_3OpW&bcPDfPH!XGMkWB&USL=KhFKulUz3ys@fY8OKu^~ll<4|(+0Oka$N zQtQdQ8+HF}8oy4<=fs#&TDu_!`@~~|X2EuyX>Wy&XR61TFlFoAULN$W{g*bxS#`1c z+ZRJ$bUpw;L@l5F^Pwr9*fHw z$3{&}jTnEqgIWm(w(IxXdiJ6NHMZUXoq2OHbk)@A^5h)_A;-%x&6fW@gj zXvuJ{q(HMM;?JIJ}r=}rkVsoOgYR>h`0+0*Umq`?x} zWQN5ym6hJ=>J)k(!Ir%r!?Z^xv9h^(E-E8`X9(Wm;H89Zj#4v42qUp;i{eN?S$JO2 zy(4#NRr9Px-1 z#Zev*pSC5$t8=vrE`;~*C)115sk%@S;5*bQi5(w2t0QxIr#eX+KC0KBo-t0Vay%aB zM8T0SkD0b5TJ!fGNQ{dcgkTzJ!`lW&wWKppt>6iE5}qrr-DO-dL zwIM?6?p0fDfpIF@8y~tp4N!I)D4Uox*6*j>byHt7etur4;>X(Qu8NaE@Hs1i_>Onb~)FvCnw!oq3;Bl5OJ~KH5 z%}<^_o%(9EyE~R+r_OBn_XASL^XGbI-ahnc;m37~FZ_(OAs9X4`Ir+A)(+Eeji9+-PWM$jH|YS)UtNBL7(QVeg=id$rqNV1AoxfA^z%)vWG+tQrdG%x2v8 zwJ=Tbiju*BLz#aPxTAT794*YuJf}shDmRtckogQM0CV%DOCq67SSWD9C8j_45+c6= zr47yrW!9$ME7JeA&X!@Os4T>n6|d~n^*A`jzpK0ZFt)P;@PNVqL>W3#JQ}FlvHm+> z@BIn3^L(jqW_9GHthyhxQI8z#c0Y)Lsr661HH}_7$PWQ)b7=Ux0WwCZ@wiZ=>FE?*ijh+|TJ^Zh z2+EMw)^Xe(C^_R4vQZqz#>OzOS1Jrlve)ACI7qRU(CqxDaqQ^A!F!^DK3EC~jFKp} zc}}uH3rjyK%^R=w@JM^a=B>}ngb9oI{fr~){JrnKY{PM=I>e&n19*IEl z*jZUuO0WEbsT*!Z3^NuSF8y@V79w*!Iy#$sV$TJ=HATcRxcBj|%iH2e2eiZc%`V_^ zqq>dId?bo}d2-{GU8%U8hjPW`FAIQ|(c}eROHfv!WD~ajs!|SP15wt%@wH@LVRV@Hu8s+;jYdV@(NWy^x|Uc6HOy}{Nl@2qjB0Yv%v z`$zQ`sg`{I{fCd7IHGslWG7#A*HOuB#n zl(KOZUOTXius&oPtPWxm7`TQ>WmBkeJM;n$6{J|Eg}NN!MW! zf&eTqSg%_IR1cso*n_&i0?dSn=6#a(Zy^>&lnnq(PDS5gh+EhGI+vPVc5;$KycMq0 z7=jN|fvcMlk4*=4loFYf4Xju1O%D=YpMoSD#RMxQ1xz=wgv3u=bwJJHdB>Ba$H zp_oO>1YUx3Of05HwJ^Xt+S*bNFm2gN4RcwM)=fYZK3>4 zT2%(MI9?J(RfgBOV-pkaE|kN@pR)JrUTz=fXrz>l|NP0t!`^XsCx_1Y^_gQ_9`qjc znkjN;jGybAW?m~YJa_L?JWgbQ3oEOuUcb9W?7Y%iN6~Fw!V|8U-kN&XpyFa)_Ny_0 zKp|fX6IdZ<8`80Bx3#7}*xu362Pz7@!GLt5GJ%Irv=)O!!jDld)WDDhqN_&E#Cx|JNL()Wi=%-b&QRF zq?qnK`1CwBDONA@d704#VP%NLcyo*p-2e*~d@BA3^o6Y&Wf8A7+qn4UO5|85K_moR z9f<)-=|ivr1L(nvR+P7{E`6+R=FYP_|BUU&a%taxAnn>1GTKc(f0ygOODH&wZfR;N z5U3C||L6~~ln^HzlHpsvf3KJLElbNzKH0HL|0Gt+CnyV1E^q832npM69hdu?_vLtc zdNf%;`Hj%}^%(&FUD2#3FdAVeoelnK)Qg>Zoun#;xU;<&!IMYsoq}*XxZ*)_%Xth~ zIz}Hi^$>l*urnOC@Y}OWmVy6lr`-YUq!(F4?ApIfU$gECtZ^h-^e2f>VIe7(x$gXK zLna4jXT)(fihGv8tunAOfA`FDhM|{A*VcqYMA8g=j}??>o76|sk1;EabQ5mw+K=SI z-H6vsP8P-1W1Q%4#IW~q;tCXT2MjASmQmH`-7ktXs((=wnXbxsMGutySFi+47n-jf zTw5;4BjdbE-;k!_CG*DMRfR@%Vaxd!)tS#2j{bYqldsCCXV)tkc5L8jra{5$A7Y zo#Ttp6d->YY)f+c)+8h0{;@go`JwxSEh%o2Kx6LC(?<2L$MX$}E~Xa{T{Rcl+436F z0eJH5QEia9x4koOUs`|3u>7tweV?mM-;C{HoNNAvoe)KBRlHnGI_=S#7H{LGft>WphtN(#H)#~aE*w99!& z^ucaR7!zR9g)0-)EE)k&_>e@YqxiYUFIc^;_D_ZVs|`sM@A|l-(pyrjb>zp_KX40G z^t7yK&9Tyq2X%d7YS6kDxFp2S?|?QX_&cZ8a}UDtjAv-6?Sgv6S#G!daFat-)%zAh ziCTcbT8@~FUm6P*>U0+OTHcjBeX%Ow<=>c^`=uJ^7}50jGk7`r9<8*|TNU0h8V)CR z4i30uu-hbJW25lKaSDEZIIYo;!^whj;`TU0wabS$qC$46Fl+j8|E2SSjJ#f0eO#s5 z)Pzvrn5j#vjCx1A0zFX-aCGWwX?5REEO+iTvNjj4eZnVj1R#Am9@*f1&9Vj8iBcLD zD(WBUJW_M%JCd%uirAl5d$(8N@Pd|1XJib#o`9c%SLR4=PD{8kczPP1z47dn;!X;^ zW*6EE?J@2HTXl0sqs6@EzEfYRbD5L>@=$s2msu!J;2#Gx2|V?LVLW(eykY-sM08#r zlr4yixsd7C(MSdZ$o$e$AaWT$xkH;rMnxA`bCDmf3pd%__>zl{+d4aCrKF5Uul{h< zHMtUhD`;#}69)yJjdR%Vk#3#qM<>1AftL}XjJ^l^W`gS8Jo2(PVj|Q(_J1Gu*2N$V zk`Dku&o2vSsb~~kkm{X4jrBqJZN2py`zs19ey0r ze{l5sSXl#KiEC}hVs3|$7X1}5Y5`ge;i!qOjArEfE1%#vdSgXHF=)e#Z_h5-OZ{^G zR1)96z7|wekd|H&CtE3L1N!asXv@KE2u+?dy~x$ilN=l(;hA-Yop^A^r>DP#9pk%I zvVX_$4s^1@Ao);HP!(W3M0z=BJ_~bm`J+d%GfQUCoc4TrQ>N{n+6o^bam{hD`$@LN zRsp9#<1(-9YJA<+^^q#%j_1a#+H&e3F#!?^Q6#oZh%+Gp4y`nlKpXFSGt`VTg!19T z36{FcqROys0)K2spGevEuFF}4v|O^X|H_y1v=EJ>7_j`|q}B7FrE|`u*Bpm(J29JU z5eyj8>HhawpA)B5xc9bVKq+@7Nl_J?v1vLlx12A`YibrkwoXu)reA1IU*mLQYu2>c zdQ}tZzh+j-wsR+isrW3ibu7hh(uQU;n;qm6yzTm34x%l@*rD5nJXpC`mJdG4V!je_ zoh=&=HFxY)cC6nGwpUuu;?l);mwSzd{|Jcyl)R#tEbha% z(<`xnAGG31Xw*(VLDg85f>V8!_NA=cK1~rgmHgCOYDYi7`ib3hf-Jh! ztn??k2AH)AQ^qKnMUk;GHK@!}QW+v7!*|@f=fB?Pd)9i_`o8zI_Fiij+kM~HbzbNB z`yIbyK+|ogufG}+e*~9fRRNxgZf@T>F7aK|*3^^DPPkpf!1Peq_4OSogYI%6dpkQd z<99tjtnRX2VNcyi>G0^uVG2E{&KR<6(!xk8_BcqwrPkKmCr{w|bplGB6DRIv2(w=w z|Ni~@n*sAWx^usO^*Ws*Cr)T@79xCE8H#wTmTq^wQ>WmKeN@AnXu53A9IZ&M#>Kb( zk*fKIDCWn_qsl0LQLQ~2kAtTLAX#+sV!6Atdc466M5+WDaTMast8}aC~du*q< zA5t|MT!3z6>#k^T8l*j{J-u(I(^xWn?)0v4x4h~3k}C%9{SWKuK@I}mbXpI6!=bcV z*0A@!i;U|ZjhZ6zM8vHl-Ze&=M?bGk%Z@qE;bbK>D^%tlc4_yIJ_`2ggFOv@&7a1% zI-)@n7Jk{iStPHet?{~fdTJw>z>O6%e)vMVe%RZgq%6`Rkp+>ZT8CTbh9+rGDi z=EMGOgO{!qj6rHaFEk|o9-8%)ZQj>ia+WFnmL>4{)KnH~ifsqfk5MQn=o}^&D|_|~ zHADe)8~E_ZrWV-k;_6DWP{k2a^ZC0Bk3dio>0{#4{yNSqgOv~a}tiZ{?MvrE`OmBiNjBUbDk zCBK(?K(LYMht5uU|CQ1hgUq>%o!Ar_0}qlfcZ5;S8iXsE>HZN(&0o)a{G;+PdS?zy zhp?p}o&vDbE1)FG{>a3VNxHr@zw4>ZgV~6s;b+_qQe`vhU&XsQR^D!#dFuIBocp8N z^Yc7BA2Y9SD_c=>{Z&&;oZYo!^NVi!cGHaPBX8ywWZW0D9cs`jfLbq1e}*Z&FeZb) zCV(ogrMH*l0zw`<1Oy1Y1eDz%b^_JejkL3v9(eZ86l-`!3NTh;Cz*Lm+5F zUxyepX*r*8+heM7%420EC4U{JOEXY|I455{c;ZenncAveR*n&yR*%fWLP>M^!L0|< zH)&9-8O10rbyn=SxpaVX@WsTL;u)cSwvEkCmD3+a?Al$Q;MQr<)I7=hs6g$6MuB1GsG<0&Q9jX_mOfZJ} zA2h51fEaLI`IwK)n{Zq)g4Lj&-sz(@j7IF4%;VVY6Ml0m55~s810Z=h(Oq87 zEio5`GCgOS>fn_VIyiwT=a{^Jsb9QEQUQWcYUk)EU@`m&1r(YM7-3#jxsta26-+lY zM#Dba^~`i^^`uLL&ije|RVGT^Br^A`(m8X7T61g8Y!+n~*-K{k%5~w}iStG_+iM@# z_fYM7NQ2o4C1uc?tH(S_Wi_1y9WtP0!r2N_IbV!+QICGUp8fD44Gj$m8u@4&_#Q#a z=fB3*)dikACvDIJLk3iVya&9U*WQt`+sK2}3=Dos1RM<6K})DXdR{1lN6?Ai1Z4FOD%3&f36l-GaKakCD9sBG;w)i3_L;)Epc#{squsG6A9?pm3O$ ztu>n9hCd}0(mmd0*4yr0_AMxt#VTygKN^A}A@V;MT9XOa88hsEG&9(-i5QaP**delEu!_&W?Ge#H}F&n-6T>_1TB+>LD<@@p;rde zKRlg9kOursfmc)H6clr#qOTv&=($O9J}PMR2&QEwhlh@ov!n@+_+hi7)Ip7ujGO}g zyIWT--Hr3?cDtSU^6mCQ@9cYd`Ee=KmoB|tK3Em?0$Y*NB}O+M_qi7{uf7geGiLh&@?-MwEL zwv*KxUZ28aL`^|W?Mp07#U!|WZyWNG=lq6L6J7;?-#-*79}MnZz2I%vL-qBD8Seo} z;L6FbTdVwxj&2?w*8(7j88tOEXb%H*fg4bA3=a>#di4rpdWg^<;Qxkf-Z5z$TPPcF zX5sJA@g6;Xd~KdDbw}hxD?F!dZB>|Hp}!5D!78axMR=O@&TGbJatv>w#;dUH1+ zVc7*&CisEB!R)FC>({qL{o3U_({#DTrqW^(PdQJ&sZlo5K6XL2d*I+9nw3JQQtdE` z`r9tuT0h9fK)tyA?LyaOu^Th9-&;}1-nn~ncyp<2fx`B!bg3lB?6AH*@IyhuQ@*on zkGL?5qVaX^{CP~KzQ9;7cDu6WJ+0JMM~LYQPLv-pG<>|sj7`l-ygVR88C~1XYZ=%W zBE!SX5;!xiqnH58nJrL0TV!NaO_-l4NW< znnN;shrR9Gc;VXCI65dE4^BX25E}xG2ox*-FqRLM~L@DQVjo8Mq>V}7nLDMib%|RBQ zLkYrY@fh_E7QeYsW_md@CH(X$GmfoN!g;q6C%pHb-zt7OW8+Lsbb~yLymv&(_T?*@ zVY~K)zOah~8+#@2j(qluGZp8C&c!VV;RfKn1 zY?rX;mKY!QCuTYKo_wrwi>`!IiR#B(_>ugoP-VZ2P^qTNXh~lbY^e#jdi9in+wrZ; zE9^G4N)C;Y2)2mRp%mvR{L<;<1oRolIXu7=1J{+3+LbTPf|F4jJs+aM5gXn8 zu9oyHY5sebdK{j9dZt)=d0~=l=yiMJqdkKFA9inwx|UCQjkiqcI#vBo5L%&1MO=Eq zAVX0th!YLb*upwetS_z#OYf6x+jjNpqk@+`I=rga@?;J!ydyUh%Y^`Yz+-hKm7#mA~4-Re{M9Ub?W~v#5`gY zeZ0M|z|CbzE|i(sJMpBH?4@6Xvt2_bExZ;rqt*9>W^169VbpD^P4t8gk7ZoS_6}W&okO%Fn za~R(I&BoP*qW5|AA> z2x(46mDCLk)M3C2{DrCYV(8#;Gc%mVw3Hz*D?!#;`{e$En*#5lnF$U)cx(y9#ACky z5SqviS6pB4nZJ(MD(kfKzAveL{-itN7d@^E#%pl)0?3s3V+ECJ`MW5%D#J8k9WLWQ z!S1D`ydCz0armJaZWK6Yk&=dm`s0y~rq*E<4`=5J)MLfP#el-?%XaftHT01)Qg0re z8&b*3SKy5uYPujSY}frRSzu8nrjy2Zm6lk2o2hX~{{44tEh_15Vuu3V&(C^+B~z}% z&AV5`C0K%fSo!HbHZ~#{@~OM3DX&wpZy|&X8yZ_>!I?9+IN7{i|6Yrso(2zv`=CX+ z&|oEBU$b4;DiEz7pP%goJy>4eT$qhKO&DIfr$B^(OFHaIK4bib`cR;9Vb8TErKR_Q z?IEw%)x*ZfsQ&9b+V63voTc4>8E7;17|9sOUEq68hkbpk^!)BXcQMP-G~vUd>>O+@ z;`%Fv_L7`H+M&if9XR?oKv44lejo12pmDc-C41Cx` z6F^pOKlNARuh%r=skc{~&A+8QP`w`d{LVg>%)YF9)719098FhfIxM;kvIRM%*z)q^ zeRP{H2h*Lr85gW^$f@A#jmyE6s{gfuChzTBso5D?bz9o9gk8~kgL?_v3$XPMBnLHX zxaK*!)q`+RhX6fLioj6IdH8U2Y4Z~P>!O!eZmklRbJopZZF(+e?p+iH{;8+a{@lw- z6k^q!KXi%&cSd#?oH+3py)eX)0AT?%8KX=pDM6&@sBP+>hvuS**bhw#6|ZHGf^A{I zi#3M$s|eH57Q@dS~@z&!rCE~+?$a~ZYRn4;LI5gF-uZGj-j>ai89fjE^H~< zy5g_wB(GLhJrCSaK_pyzlHFW))z9fU39lJ(!m5t`BkRz9eXoc43`9q2EA5b=+poJ% zy1!*V)DQ@%43D0C&~Zo`c7RSZO&;lF*D?;in5T)pNO7Hskco*{`X+V{w%BAu(XP4L z)7ID4XLt8LnM<+_sDxUMgaJ^0=2X)KqJsSO0O3dn z4pe1jX##tOp8?1sXp^3-%dE2}=gjn1bMW%sPe^z*e$GH&A9LZZs%Hh+^dPQK9=yr5 z+;R1Fz1&u5DgJQ%Xc=uG?I}0YExS@<)OP3?7!(#2eR%)=c%fN{jWC-^z^c58ip`Eq z2zkI$Q|8hm1l>YoBrC9OgvJyp-%g0TcYQ&I02Bv0TJg7NU?C$*77s$m-50eTDTS#o z{zyEMz_&=dH!0&D#s0Gi)28||?5mOM6vbDbc^}iJ7K#pj#&q#V+?@YK_s|Vl{re$% zda~~A_rPB1IURA_E&JYxtwY&vqW9Cu^^B^%fZ-b@{(Du%KK(NVAhcj%j?{VTP$|#@ z;SsHbQWe?&>`ZSvI^g;Et*>$({7|>AKY=YlRfi`Gf@k0!XavoKZrzQKm&Wh}h~oPh zs_U@sz$IaWfHG4|a(}%%m0XqM|sE^z4ub%>MoTNyZkh2fue9rRg<}7ifmy;6b|FPwnmL#LkS3?_A`fBu9S_ zM6jjRerkzRp7wCtqay9)Ek6dYFIm`0drdxCceyYwTSG$CMIF58g{+RhYZ^$ zOaAJxjg7W3aVg9P+z;m{k@wqt^=H<-?O}F5Mef{jxZ&|8%fHYk_s#)U2Pw*N zv8@Zh_l%5<|IGKmFub<@U^N?F@;Kp-2T*r<*-GBY^zo{V!nmu}yUm*j0O#7E%00c=JaKlw5 z_4G|_R9adlChF!)^{-$5z#in0%V@pbf_ zMQQgLPb*=~jR7DU5EEPY$@0MTk#O^7XLVXd4{)&U`tcBSn{(;^JbDZ8U(KBO-!yVW@Vi z-dRBUMyk9&7WI>$rBqcu4h}OL8_3&7$Hrvf_1s!hYc3i99V$*H`1YaogYyhfx@f-Q zIP6iRGKzzvy}cbkcI%55>H8l642yz5^Y!c7G8ce(fxrL+XRoLzZ{J(Hn!8b(lbGo5 z3OcE<8s4J62B=YJ50ft#51@5k%lHff9Pi0aP>%8*J?g8y{IR`#FGOUEAHWGBjf|(O zXIyWY?4~7>WnNb_Ox#LlN%SDBPHGW?qdNpH^ANh`{!`PkbGH{pN$@H3UETG zaj|W>X6C|^43B2S)vFWZX3_Dq&ayaAe4ma987veF*29pOvJP@sCF=t#cz(k_=A3X(v3QPb} z5P)uJnFmodrv4KXIYmXNy;ZR3q9**Hnh(+p24Uc14HVlqz^4RzncF>4F)>(_!R5yK zbGd(q#Pzpt6L?SWmB4%m8>RX1j3zjOlD8 z7e&|f^7_}bZ@BF+6>vargXTM4R38EgY?~ml{p`-x1BDCUM;C<_@;pph&^^N>uYpDN zUCyg4^n*<0ZgbfBCuF*`o#{`+3+E68BSv$J%v#nwDqr@{cWnPD zYEcL?EBjYmdiO3kkc*~nzNL*)Yq(E~D}DLprN{v6$8IXNwgyBXm!)Mlj*`*xFJNZ) zQK$xeDKEZ6akX;fp;ouSrAkjeIs>s)Khpyz1HIYU-yRxVH?P!g@)&-)Rjw%0(6+#; z$L`$oCz~~B)J2=J-HgtZ_OHD?<8H;W_Mvz5OWJYKrm{D*HFSjK=Q~PU0ahiU7%8%T zQAn`g((M7`G$ea#)l$$k?k9u0Cgrjo0*wRf`ZPTCuBg z<(j%u>Kd$nqhC2-$PH1D#t~*^-tK)~!@To-5w`L6y8bcc#Do0Op-Us-AC zDvUwkphz-+O-tMHqt9mJD3ra>;xWbYHeJ*HG$T+wa%0ylb_rPodB_2L60{R zM8vMl!EY{v-3MLmp83>tET=jvu$-IVfr7%2VG1S4X>u3dIq2l-Ix!&5*hM4d8kap5i zS19kVsGZ_maPbd*MyjX~uSW>oO#bDXrgX+}Vre}M5 zYcmfEggl{7e5`2}k(@K-dS6*kK7fr-YI2zPQFRsrZ`tp_bC)cZ?T>_l6?j)6vG8R) zZVA=o&*-~P-PV7Ey{D}a{9NaQ{erM_y;?cn*~Lasl2fh;4b*wyBnL4JO^|aJd4H9L zF5RC$A;p~ls8FK9YeEBIS};8W4hY;Ik)B9e0+vC{HNy%G+){9xF!q{%Z2%njWAJ~7 zHf5I;D2bCYeU9RLLgg?M1?dBZtFdoRVNTKo;d-|ph%B?Sv!FeoKhr7GM}jsd7uWB} zu3cZ>p6Qgy=?J>0M+l3(dNuF69F!q^K!iNs%xNi12VCt>1YoL2Nxv+^l>w?9aY@#r%t!u0@sZj>@EhC0fF! zjyxh%yi7WIdXOT>WuK$wdc5*51&wUv09mp5ojHoL14IzhJ^R7JiHak8%`F1|ORcbdzkA~I{#jOeKhEsz*%hu255C7f{j_|RJU|(M+AW@~%!H98 zxi|*}O}q19Q2HXaW1N3&rFPyOP(Y2<>I^007yIVvDt~%jW@?t;6qSiL;(PZt%(C8D zJNc%Xhj6;${`uz0r@I5Ou8z$BEKDgMeFDu5Z3-*{Q_U?K=(dTgE`k(LH-w5I~OeemFoog^kh zmd{*&pI#zi{d+N%Lw6lH5!>o$X<^Y?P;u6S^d)g&7VzMp|B|(7rw8o{(`T_4r$+tR zHTk#SCZB5VS+i~%9{!}J6D$G#Lszst+uah=BUz@3AG7>-3em*@&zKRwC5 zb*l%?86amktr`U)ZYME|8xBBEVqy&7@JQh4J&Q*cSnY4sT%6A1TmV42Sk-!uMiC_3E6d+&qp<_o;-{??+MEUw)is}n) z3rR89;z?6`G`5W&n_zzjOGCV2u$D>NHi0gQjDmUB_cSPeV z=^NmSfF|I0Zu#Vuud!uUjZ6=oI&khh87!1Jz~rB&UM;2l^-H#V&z`1HNrHAZqMF8+ zDal+fZ$$Z0bZbvmgWjb}oIJb4neTl#`LlzGU%$1+|43Q1$1A(Er(!SFS$(%?T-7cA zM18&{G5KLF^?@Lr+XZhY8Qte5Cp5SW;=>u; zEf~1W-6c6Utsdk~D$p-BclHp-)Qz8gJeNU7AmU2WyCS+c^$qns3c~tF@i~M3{Sm*U zfAl79<9x6C7;;i9Ne^INTwHYj zL6CZGp7Or8qTQ0PG!$WHgU$}kSa}%^tN$ii-E{=p>koUJ-zli3!k*&Vo9`0pg6&?1N3=VI?mib4(J{nW+0)C(&!%C&z$)ry}`VNa?Uso zgxz6J`PB3CoS`q^2QURJ;;5??%bzcc#z&a~4JxEhmp`}1R{QIj7xx^M(1GsGPJ~(E ziwufPIPbPsJ*H4HMO4ZgR-~il>M`6HB587Th#~$sZL?qq?>&@rplGcZy2yoCJ#J+QEf6= z8nTo_eSTeus=z#Sah<3$O^bg>_5!%@$y3WA+$+ z?$3Z|z}L*+!2wxJZ`YH`ii>+;Ac~zDbl9{a=7%bsos@NK=`7&0fM>aPbChHY6+_ay z!b;C+hNPX~&dS6bx0#$U@=++u*2ju5j?J4#rl(cFD1jH|2g3&ys0m@pfSxt~(Id>?m;cQOdxOwyq{KfH2?1;dq0fLe``XfhTuU^%LlOkL}9~Twb96bu&Z9HHA zy+l|8zjT z3ow?z5G%0MD~M5sa!%r==t8QllT)_N#GSPgU7IbPGS<}|KY0!rv)yRU&&%VF@)WGF zD$2?@4uE4#_o~O;|7hJ=M}jGDwcdW+Yl6=jnDRHVuG>}7(S9+XJTL|?bfo{p3)4nE3ja|76wCubT{lf-&3XD zXttO4;l8qt5hZM|D9FjD1_36PU;_iBeF#&9-!3VL*6aKE7%XDwEom^oX8-Mv9WqLiOj`!tVTPgd^IRO&=-kls@I~+b820@{J zL6lncP56H(=-I;tFK*L0dxSMKXKBo>vT!PsS5+QtoygPDclqOJkN6k6YxVOpmvGYf1C+4*EPvsMUzFx4 z>ZTjP#x%StLj4V&A5YZM^*cGEW49 zL6}FDgM^HE%Sx@0i3zFY3;Z*l6c$D)ESz!35NpDV^=0ageWA^x+vGj%;EQ#XhX&(Z z01u@2iu!srPkB%8yxiEiRfQDokQ|g@XK*^<;YY3Y`6)Ji)E?i*#wekwe$~{39C3zl zb=1nhC*;mB8a~;#*_KzDV&g92(L0W5$2O1Zrnswg{7hD*4#hJ>Nlr%BA%HOk3SkLZ zSIE~&%=Ook;vjX7CjiIC4{CLWyre576iYT-rQq2=CZ*0>QB1$dof;;p*GZu3t;?ONwHpB+^9lDgH(W?dD8_@37t99wrDpvRQ2A)V&H zeya@8*1JAm%c6AFl7t+>z>db#>Lk+$S-> zHZ7fwkm`~qU+%Laocpy`?qtvO;p6K1`&aaKVnz(v)yl(iTR9Hy2r{PO>Z&%6 zUXYpf_1q>ndhrim{rJnG{OFwK8-!ieL+^#yLAkTet}m91tW}S{mpsM$najR41XEWy z2;C^T@G$yZ!}I=(b`CHf6Y||m$#f(+WiM~?Tew~F0$nBX=`Y9f6@lFv)!EUL+tmCH z5TaK64|3xgg1_r%hG~#Ugm&-JCwyf&IU@e`@&{L~u}q-| zV$+IxH4+j+-g;0xsT$lF6yfmpKKvvQc(HEVkivlj+M1e>`t{cZ+CFLc0D>_>#r*#! zXTL)qnv>I=-VaO>fQKKXH*Lhm=TN{#z>Qzs0n~;A*eu~R|Uu{%0^J7%qtnsFu4r~ zfyJ+V01Emi?w?RfY)RD~c40FYMb%GB=&mEn^1qctq1I%g8g!eDD1MSy@V;7`MTW>Pb0~RYOJGt(=_crwW>BYWHo2*A3Cn3zjD#oR@QXjanvhK=9i2 z;K5t6>;8Pd)UG?#(Xd{}xiB>)J>X2xR?-mrPLuQo=gv*`(+uCRe@FJk=l##KL!Oe3 zoY&~R7C6i;?Yp|E11a-d&1;!=MRd!=$bO$<_u(uJH{iH);bqQmphl`5nrjN4)D&BG z=Ji7aB8Q;BlWkcw)xhKxz?RQ+e_qRh#Cbs_Dq;>Ra{TJ>-xH`Utt zeV;%+-ur^pXT_Sk^*noZj)?g2PGB!Y!A^=c$aU}yS$^`5N(jJzR(a58^B3B5EHQR z-Lb=7ocH}Sj`3VlsPN%V6iG&?h3CTx^{S^jx(Ek(k*7PAIl7+Gq4EE@Wpu%gVn z-~A5qeBuT5@_m?*_8 z1vWoB^W|cF5E;|}X#0N8&I$twDL?zN7tEy^|6P?6;0%xoxyyTrojw>drjQSu9zM?R zjK^sfEb2S+6wjA5J8X=Osvjf^U?|vA$?=!>?HczQ+0)>``JcMEd6YX84WA3_QOhpS z@?@l64$UIisGJ0EC$<#H8||rlP_W=WbCjq*s$F|E`5Hw`9S6~xFTBm-Z>b=iw}1;n zw$9Pb0=0=<0+$4h#l9=+TvBKpNj`inrZ$pFOSWakXs?ea5WK`1~J0Bp7b-0dolnQO7Dl(Lr*E z;8~;q9|2R9^2*9TXt4j~2bh?fw~Wp~F^G^uc#M;g7A96E$if|%l-*{6-zo@8ly+`z z)u%i3zn+%;A1**jT3Q;5%M1bVGlo;@C0sEPaRG+S+TSHegbWWK&Jb=x66gNE97*j7 zf3`Ow3G83yJEUpDfUwpF2OczQS^NwnHeO3gGhcs_FC58FJ`zICZpl&JU6KAMp)xJ< z+lAnBd-m=veD^(3@0#0`;k%=4@APt9PGyBWGUxX@u9?VYtadRt*dicMX@5*dv}WB( z)aaFgK&h_^`_7+QeXca`|CTz}gnJX449*B(F>LJY>wgC~)>pni&JTY|kl~R6+6%L} z=Ns#P(cd;bd1YS4$~X<|Z%CKhGV1B=TWfb!kvIm9%v96~vIX zI96Qd;Estq#?s!FRxBOh5aL-7^tj|CRanEHzP=c%%_AQ?`N-ZWDvV4yIm`Hp^tQ-Z zT;Zsn`9eQ+?EP?(ZD)Z(3ZLWHRE#Z|U7O5YW%Tr!xh``3XzVQyBF~tj{F1F10}O$Lx`tmU9I=pqt^LLki<1MiE9hky>8k>>YGPvEU>U)!XlQ5% zQ}6V;PuDP=#m~(=mI}BD$K7OEHv($ER!3?3!eABPf^u>vPc*7(qSixZMF|RFB={ zx7`xT93;#w4(4ih(_sWf?AfzCet%i`vX75Ww)|!8Kjn?1y>RqP*cCq?oNnSOOz$3Za_X&Nb(PJV zw*O+f>efafAsm-xv5$>ow9kT4Suj9xFV!@AW*IxR@!XdZ@+P6X3<)vfR~}tiwx_2b zZlpX}eff3*h2#@PvY?mr^=mBRN3F<@Z^NDLyF<*jE3_nYeljKxI!&{@GO#?84ZgO$ z0we4DP&y?r2D>zL1>!L~vhH4&lU8c{`xv2-EP4T9Oyo@MF#$yYIg#z$2wGy2#H+mV z;wS1Cm`pAZBxLNtf$*h)7jCyCh1pC$bUM$rcbHdmb2G_szrMu7z`pjp$R=OwWk6pA zlBG&*e`*`=wAmKUY}5J3Yh9`A%&UI>T$8*{zZKrOM#om92AG*TEPY)beuSpBs+ic4 zxoJP=djC$hkneYwJ5 z5k0ct;@i9a2*ls&%wM8N3V5TXOGo_pv)SzN(5n}oK8KtHj~*FTBfs4SO+T+{fYnoP z*8K&c5j3|CTXlpTcJlCeY$B3>F)+4`KKfckot2Ut&BoL5_2@OjM2GPynzJ&u6V-FW zOhdD_On$NL)7gF6(MIE9SsWGl_U-b_S!)iC*0r8{4{^G41n&GvHe@AYvrX{%yBe<9 zFKy(kaz5sk{A=W{t~(Am+T2M`lP+sf&pfER=l+9NhFvcG)El)x^Ues(Y6eFBcT-sV zcgN|V^5fy=u0p2|+YR(HpA;3qU{0aNlxBPO4gl>^kWZ)!}2dt8*uBUC-V4F|HhJC`L_W1TXhRe^xE}zg5 zSeE4wNx-v-0~v0|9}7>I@zgQQReN`EBb5S020#{@rEm0L>(uY)?d6xFY29sn;tQaL zYe5~a##4y>nRhq#H6kwV#dUFnj!6PjC$IS9)I<`e_ zkXu(reXRO>fjVf}dUA=^%4&Os<7pORknhFy&{rGX)@wVHcKv2N!prganCC_6pxei^ zpDg4UbPa4V6F=!Mn$zKzCTL1;ZoWg2;rP5QEbsE6 zF%W}pOwjTlI&`ROqW;tokp%9y?lPw_DLhxntLEae=SJKX833V~T_&`u_g<%fV>vx5 zd0z3)s(teQGppfZCTlq9Z1Ap^bxtmc6QBTu5J%NbUoD3AQs z=9@s3&$A!KR{aRZ0|p0*k+7&J<5wdT^FjBuV26JWxIwFH9R6hQE}sW2K+`AabOrE%fQnp{OryptWTInf;p28^;53N zXUvFDf`CMbNF+m*KLY#ro2s4?6%|Et=`m~Twecj|mmk3@?6KX8h$Xolo?b7;TqfW1`~veHtF&bN>EE9I=Ga4y?TQpdGVhnXB6;n4iA%n3s6TO7(1z=j#WDZiQ^$E4GRZblp(;@x@ex?L zey3iQUw??_dgWTtiiZ9WlT`4>X3Bw0TYOnB%JBbiUf7e+20#KA3bJ@mdyjY@Ra-+# zT54H%mMl;M*%F|j!hxf@stV=uG4q63Quxd43mOa?A82Q?<^gJl-sSoHTSeF^+{ZI@ z=~CtTb;yPb%}e-2MSlQu0Yw+ZmI$1n{!3iM^sf{dybWLGCAAfkk!!f?P^?w0-|@^Y z_!%|!n?}=d=Vu$MUsXYA zty}%2N8CJYe@2XP0_ItGJ4+VE}h z#SPHv?TReUWWA(+AQ>1Nd>%1W#Lc<@1=rjc?IjHNZ2f;9M_S2{5F@3W2`zCne z#K|u;YyRu*hai2chIIz+8R@Q6SECHBLs5;&C@swn_$_{2A?w?23MD0_FqB||j3k0I zKs=~k!5AS4@A0JL5wNZD-UGFeva-A~ zYWWjK4@M1`GJVEtq4tO2gl^1Pj*T=pbMq-))R$-K>gbRV6@C3;a(cn<+V$&l?!BJS zIlzB{Vs>GHk%{*PNa>yxlkXm;gJJ&yyq z#J_h%tH`bK!)H@J_^Pz8niN&eUG3-7USkM}Mew%#*_pr7S=HO5pB< z!woNAUS;8XtN2QD<8+|X`KyutzPPvjSMQR3Y)d_5%D=yCp_B~XP5LVo!eshGQUCr{ zpZI_Kqb=OkWE1NAaXJF=hi0|0%&UB^P~CLQ)+3wzeWxV$GLqD7! z1^oLra-IHvd!1vG|3BZ1CrxLGwKI6SFHZM?T7A^UW*TE)_?UI;Nm!Vh%UrB}DD_yRG%!eknGXGrC%!?%G5;bun<<2uybVko!0Eqb zPd-@hYuCH!edw{t)J+&?y?*<4aef}!ukUCl_Ph7uIlc%UI(Q5+7keg+Ne(d2ToWt{ z0~SA>Chc}0b^x(@^hlThwhvxk=X=NAP4FPAz<2~|`<DDNo9sw!TAkgEW@FRFTKVA?%J=_R&8-%LgsZ z%|*?N(*S=Uzmz`??6coM=WPmYi=fm*`Pim>qNZ`Dm%zypgTI9 zIz|7UQ($5gTQ$Te+mXWyauc8F!{E94;+=VXTb6eR&z={Nlr##oGzOLQiogFe%QH~f zjLODv2hXP8ZsE1rG`iZ$0rOIkyk`{BfLxJ^vQM z4Z77^LonDSAa&R&!f`k(-%-UxBmuJV*}aETC3W~kqiOIS;0TGEcIOUf=C0Z3O;0a? z2TR~hl|Jgjfal}Ka@?l>Uf&JwmqBpKTPwT`v?E-rz3PN2((FNjkTqiL~sweF#$jkfJKEv19#hb@cA0Z7>`wjI%uU%cc}dRAvbvC)`JJNpmvfp6pD&p z$H#$nfIRFCf*~*wby;ef6ug`v3jnMrLL-YUxIiZG{B|#H0tc-YK12_*yhWbz_#< z6}?@#3hQ58p`pn=_x%)r-H%8Bzaym~o}^m9O}ZW%6)0Mk=I5_RMmpmm^75L)$3Ksg z8m2+MmoCxm^~O^GCJ$WOSelj2hDuRj1@!as{`dmnpab4+o?(hy`!Mayy# z6)8!kEzAaO2H4)`5Sei}du8OhL>PYP{33WZg`k(A2gWELDWOT3*w5Q)N@4UFB} zDJkM6CvV<-PTfS&aP?7u{!l9AOJjSmQ8Lb@H^Qfo^|NM7VNJ~ytbM&iQLyR#Chnew zh#gP%Bims6lPRP`TzoWaYtP{&_xqzG?idhsa#|*)tq&ZfoI7_+K5%@26$6qkA?ebc zg=ws{J*WtfDdd{wt8T2GPY*k&zQXI#Dvjbk zj*M2+FeLZ%|KLYwgn>64VC0#{ViSweMiN!}7p8-|#l+6wo)7@ay3#dhu(xA}L_jIq zgPpr?Q=kKU=lX1OR2Zv68R}>ZNlDS0QR2UUeA)*EB{-D_)vx}~8^MAdnx1AJ!cZdP zKRP9tZf;@wjYo6y@<>$!x-=R_&f;_9hciDZa_*lvoNiPqy@S!&95gPL@GUf4J5S|f zXCna7i;1AZqa2!=5_w&68oM8|#uE;w`-tbp9?mAWb8>pMW2XjFG|g2e<26+1bYcAt zfFOR$ErQDgOc-V*am%pj&Er=45&BIyHQPvAcsS3ku9RAD@o4w!FeZycz2KK35h{c zvu4Wu@17}(s;tg7G@YebV*-=l@4~lZAf=d_-$_jc3wERLe8-z)*>-SqOYUjkFaZk_ z{2S%1_(>Gn-humYID!R)vW28?#8CcQ(mwUqrcY13Sy}vYl03sTt(_U{&&0v<3_^23d+y( z|0>%(nMlYdzW$$d_) z@H5N)-a8d;*#UG8)SBI`md2j8I6s2iZI6O{rnk=8Gm$ibW|!(#wl}0#w8v(8S3et zUS3$XQ86)S@(aK2y0X;o1JguEiH(HWP^|;w>3I8geSVM<*H^hjz_PHcun;rO4TqKa4EmdyJwb7dVG1fZ7Cx2L%o{4wS8_40goX*z&2Jp*UsX5YOpC?J ze*A2ARx1|CfbS@i@gEGd1_L+z8-ht;pQyA^HJX|^xVe#>1OwMs(Q+fl%iP3PJP9*< zm9>Q)mbEclK*uAb%Npkb1=qsjBE(%}lCU_yMS$`zJ16HXNW73X*F&{2aOvx|^d~p# z0al?LE;Qxgn40wiumW5@o+#_}RH|D6Aw~^O(cVh}64V^ZB9P}n0 zxUihY_z7EV6h#4WVuXiL5GsCB5iBhHsG#5n_AwRj$xQ@41fEyi64FchOm^h|vL z1w}OXCGBDK_@F2N`=**&kmolrqSez6XaAa;BO4kUyM6zD!Nv*zS8q>G0#Y?Jz=CXG zl$N{nPzM1KI(xP#z`tLUWdCcO4r0DhFQ(1#7==g1Ri-WCKo{p92OPrNS;UpEQOi&m0jT?QIyEX~64}duFZ*@m380P`8==J_mS-DS!e4@X zhfxkyJ$@&q3%_mC@wsPem$nCOtS{BUL3}ekVCmhj$82HU50oWf?S|@l@4&$JdVrsr zG1x=V)^rKZhv1L&fmU?^wFd{ONK5eBf47`Y(RVgY@&=?;->#bd{rw%)8r2TOzk9pj zZ%|ybZbfQE?dq>~0-1czEDNWjpk&`7q%V184feTo7Uk2w>o(RelloCyY>wlJt&RXG zz+IgSBBRhuO1U(_rK+lGoG!lc@mvWHUYCq;473W5jGXkk0AEW21q!=R5f2RVdVuL6 z|3g@Wg`Qs866SBdXl9^fz?D4$`>Yb&4U66xgh*{GL(n=F;q}q$SDG;3Xg4=SRU=mV8mG zZoyfR1Q9tojv&}Ig{qRRv;0P*+@dr$t%6Ry7!Z?8R`PNo$>7QN*Q^HX3nt`|!a z_FMf8446o%gTvNi=PqejX73WMAz>eAH2%BqC=Vz$A+#!@x@>C3j0T~}&65oD8q>>X zEH8slJ(bJ2Ls5|@sJMFor3X@;f+U$q%XneQfrJ?d>B|e_jQeh{SeyIa6_>xuS#umV zBl|2j(0vg&v%{6g!YJ2asrZt!S_f65r%v@**6P`a;rPohFvJcG39-2{_eyo$4!=~B z`rj?za^!3sg6GxMLx$f2OTfc%SARcxxUOsvFrtLsQ$YN&nK|t8RPMtt{QAsM$fsc% z@36cA0j`LI1jiJEp!yn)Uvy;GMcibPQTB_<%D!)Fn=%YMf)CG6f*c&Xsadv*{@6{5 znnuC-%@zlA%(ZJ^hTqf?z;4K)ETyQZC?up^X!`vy(Di-jfvTzw5TrKJh?A9dC*!Zc zg|0&{hiZ-h-^6)MhAP*qufj`x_@2jem(! z4Wy$b3?n>JpM)m{El7v*QXlX?C|gnJpl~h&P>5!hxU=tcEOzk8DEe>*Jids4XJKK% zGJ!2=ABXJ~#GTUYkdY}xU#N~~*)q6;0(wV3do?VqX7RJzOU}#Z&i%sfjXfd=T8k<6 zUPXEND*&ExTpxfj5HMkIGT@{OA>|S#7f6&Ka4PSwxrgmv^WCQS2(jhSqfrDwE~u+{ z(1cq6`^8q&M-2$L4_i2qY9jIgB1IHk7X;Bh#l*%^dtwNH<{eh>*HWZUy}pA|;o7N6 z@TR8<%LkP>z<>uHad83YY~Ym@uPuW>V~Q*4WIzLo#*3sI*RNRvG$ilM zGXY>(@|P1V4oY!^2O2-ffHI^qumdG&0K9w&IAC!sj~i&n#rcOpi3JRN7?xikdVCId hO9W^t&6-d@;sx~fT!^(e|BwL)JYD@<);T3K0RRw1VuAnw diff --git a/10-deep-learning_files/figure-html/unnamed-chunk-21-1.png b/10-deep-learning_files/figure-html/unnamed-chunk-21-1.png index afc6057f810824c94ac336a2e1db0b24a6e94d11..bc772e6a534a1a16e36b7088809a8ea1ff513c75 100644 GIT binary patch literal 86607 zcmeFZ2{e{%+ctcqfmBGLj7g;;Dx!=M{|E0ptb(~RT?C$LDj~13v7NSMm9_*UFvA<`ItH~r=>jG_E zVA`*|YhGvf;$}#W>5r_*iGSpWl2PNw_Lqh0Nu<*o4vyj%nst;S_;J?lKl9e(z+uwm zbN2T3=gysbNc|!tL{nb=apG%1l`*^AS%jY%L%Sc+r2cd z8H>c*#6+D8Gwtk7epXzMCz?@yT;Z_0{FN(LbacX7j^b~9+eo^B!vAHD_g_9<|FIJi zqa5}1{q?dxfR-5opl~B%4_#onqx4Ymoc4_yHz?S-xVShzF&*6Ydim0UiX+?h^6~M}dq*$V z$#T$p7eCQG8Mo~qe(zrK^HE;1js4KjcASqdtm^)gz^Mqb4kkLAGu-}CQNoe|@|gYk zR(Y?p9hsfJjzej6MX@m+CP`b=e@|^G>Jq>2)!CR~UU0MWRc>zXPI826Tbx3hqCxeW z%gV~ihs1gt%?q3t$4Wf!3Av926s2c)n`a%$6>)YN1rlJBxKZ%u9_@`&X}Cy?=ka`3$31SF{pEZmXuZz%hm^*ve~_SvY`xfMG{Ww_9OGVRxH4q!$|PcELEPuK z{Yb;=%i7RrU;oNZ1x?MkLcyn9>UYF!$qm!i?Wgt1&K8PEjjR!uhzN@F{qjT5c>O!q zJLT#g+HLz=zTE2m7Rc@{pF1+mFp^_Vp;!A(`Ou{eBvK)dLL1YEKR-e|X(Z+tM9ya1 z?sTnNM$4paCntYODn6qik=(SS@UHXxSSEOxiK|4!`WQ)60>s^MfH{KdFS+&vXgHa+1=Ei zjo^~#{>=Cv-*5f@Xf(b` z+maL_-@WVai-`yqwjKT!$TfqxG=FfI`3!1lxysNma>M}4oKX#^d(Bgc|y^0s} zbrH*xF|7+NGl;Py{`&~BtA@`$CmstCb)4=f_IAuL&$;d>$M|!sa0}JIdt~HT#h5qy znQ(GlFS6EWKSBg~-H**|{}dEY`lq=FQKvpZF7|yOLI? z!*s$}$<)-8LT@B*hUdtUrNxPg9^9*S?@zYnQO}$sH$L^&#;H=Ym(B!fRXPvYwn#U@y=}<}u>q6T-YT0K; z+wsqw(V{Jq6n>)9^~l|MjYR^FMqFlcw6m@4o#!65{&2R6JLjsNYB!!Uf@+|%o}B8E zi~zgscW0#0Ma3~}$?QJv~~Jdl{fgDT|y`1$iw$WK*O zRTzA+iSK+!arMJqO-E6II4{4jF51kwsQ)W8SbV-~kSDAm;F`QLaWb8Bl4J6I0Jtw@##GPhG+dp}7 z7KqI>S=A-!76dT;s;SvYN0(;WXcIR!I5amIo&v#jo*A>V?rFHG$2R4hVK0_BNiU%&QN$H+y9IajTv#Hd2o z>s#o#?EnuXDejR|w+vGXHd%baddpyBK5p>!P)z9i#!pM;yLYcVk2=cB%gf2hS$x4f zUOz>RA`%d;r?vQMNq@nWEtHg@wfCR+6ED3d-kX$GeEQ=BUUo4V%NpG)`}WOJg(*lT zcGJ_`rW%a?vrUVZ33i$9i_zSc zsGTFg#bs(JY=0;M$WdDJviIqG7Aj0yjIKlc`sK#=tfZN=xaqA{&)@u!D1N~UKRcb`0Yf@1E?lieE?AS-Q4z}^=$8<)!xbPJYS97mjSm?T9Bv(@*F3|h%E_{-I$ zLyB*139ISo=qM<>&)Ms87oSJb@2ccFd|1J0^2=dXKL&bw;;s&V*fl&yNl9sGWhF_0 z)r3?bYa4oguei9lpkPL7DnB5ApGtzpem2^ROS}{3Alm; znuXi}c=C`KtV@hU9zJ}iC4J)MDrx|T$jueW)5G7Ym`M4DH!jP>Yor-}ey|-`%&0E0 zSy@%PHp{BVniJXLSpf%c-ei?wg!3=p z!j-v*m5a9Y#7OXOTrLQAp8gFeElG7M>RO3!ssC=i8}2llwlONHs%A7_{vL`9aoYIc z=*kJBs;GDG-sye1zkiDp7V&s@nIuI7g2uEcE2S`HTyNBtKz9se;9{okMZ&$oO3 zbz^w?U)q7Nu(08s2$i$SD%tD#>CiE*Lj9X6Dk`$FvJ^lG?*N2}q=bxaWo3nh+TY(_ z6&CIPwY0SK+c!$~TP?X+>FELlw6_xp4-8B*s(wR2`|)W?)cyAvP96C^n$nquN%yoPys}Q%iJ~B5HM@9`*EU9WNET`iUZ>^ z-^sRO_j&t;OV5r*IxkEcecyy5sSb`_mO@ej^dVrPM~Cw#b@8p9EUfQ8d?NS$)amyl zLfmzdRlR0eBFfpknznd$E1;V|(E}iN>W*o1Kg5Z%4|t z%84&ocYXEmD@O}dIDKgJ@>c@!(MV)Y&&$h?7SJ0QxSN<$WM#>T%^6(3eqG}|dWqy~ zlukRwryI>Z|NM}wGiy{=k*A>&bwYbt2;9~s$gxK-W{TS$=ZQoriLNWscbMr;0XH^Zkg-UH;FW3zLgJXbmx3Lcbn_W7W@DrC5VW7W8A zO7{mZj(UoRp0fJmIFhHTsAyHVjy%+&X!FA02f6Im#HevlIw>m2%OAa3=(cYCrYK>I zI7ztd5g4-n8ZPR8Lzw@s5^1NA*rFQ#?|420#Dj?acvBXE2U{a0Jvn?&c6{fn`2PKS zQ^F|>*;Sd#iG>B|PH<$)+?0&8w9C?fuIp^OC%ZfS5e;swOpEp+51_luzBeJDG4dPr z0Tv2Zs{S1|pAAgUo+uXXyN*tL!6j~TX7e6 z&Iao^^nGD6XBN0sk7lEw>OOje7U7XxMR|FPi64RtbaWmx`C~rji;F$txK*dAT2<8~ zojk{J;?lofoV~l!``S+>Ud=xs0Kj#Zcd?Djr-GFwFd(d=4!7n~7sVC^t-i8*?xXbr zyb*9x!G-I?4~$VQ68M;!Eqrml$*MxTz-39+EYrHLibi5)xH*Gy1e6vaNQf4}zaILz zZ@6&eJy5zk%@gVh^Fyb`e;Y_j*|v6(QQAHVdcyBoXzqAy^U zCk^UQI%7pe1vNY1u$gTfAgI$S_@$?(pYE?U5j1Ez^!xYkDHg9y{vjbD!xTR9sC?NR z`~r>3^Bn`gzM+a>)Q%qq1n*Wbw*874_D$h@TijU8XO@dCj!AFd)*Fl1k2LoXSnKie z<=!+7aBHgS>c7AEFl8{x`KB7z8s}&(b#aRSp6;8s8EP=CPu8<_Z@P=dqsylQr$d$9 z$(ENwoVl!wiz@7w?>su}Lxf8>cKa|94qmI#dLjCj2aTVY%d+8?5mc>C_%{3~(|#V^}V?Y(ff)j#Nn z=6xDz8JP@5-5eWJib!P0#^!$E*17WZd#KutEv2o^{jDBxv9T8yMoP|e_)B$lcO$RH zzStJMob?$vpv#fV$r;7h>ueFV%X;a25xV@@r9(Dnz%8RhD>lgYZ^`}8kc{OqtnhB# ze%@#a3OtFCcpRmOf9Ljj7GP~{3ccmUIZYp4wd6hS7HxOQ->gN_698*8`dis^=l+Cf zF5j_Onx6s>AuGZvzC7Dm&9Dv0^3A!sD@j1!HuIBx=n-St`p}ri$cFt&)E2(+{aM#U zRbXHs#|7W`#6+>-%uc6abkC$ioh-8}FZhiVl$C3f^*#yN462FSHQ%Xgw(IoyU|8WY zkdV=kW|9G}5wl|M)X5}1U+Gu1i;lS77nWJxl#%=eUsloi&pYJ4mz8P0H@H|9bcB2x zqjd?3?Xxi~L^ma4OWQV1x$wnv-r;r~UPHbk=&e(y72k!JkxAjbLGO+NUM1B znc3|BA$3_b0y68=RE?syw>N+>wa93p3~!$GSVyt2+Df3RPS)W2GKZOAZ7r>)T>A+g z?d(vS20-J>ad_$WfQ$7j)-Ra*VJ<HR zIJ>5lYb9s*rr3>jc9jcA1Q}NbM zsVANYG@V`9$I#GF0M0J&>GWF3ap9jB!oZ!gZ_JRS5E!ikNd@^3ZiQR|LQ;Cnzd% z`jZ(bRwLV{+a&}o2d3b0Ae~0q;l|9{f4<}4%bg}GFMvjS8W9mO>_#H}e0D_r{l2;4 zGvq9(&2&VTY9900!r;p!Xf{3g+na$m5w)nBL_DbEUGr>Km)Z74vz#Mcf?I;M=w%1l z#>XYJwP$-m^ah`)>ezRG<4Oue9?a$=Bb~k$6c7-w*x}9U_5-hq3W8n?x|h-FvhYK& zH8eC#jMdL}q-6=r;xH!DwqT*Tgw>Jz)c%(8o5b`0F{h#aAZi5JIXIe@S3D9umks$j zI3{~{4`&N%IS{dl2MT@dCnl5tY2xKE;og_`P}TSPt7TjF*{G`JIZUYxEn(s8KBj+` zB2qKMETh>*6Hugog_1_%+41Y&WLdK#rzH`MBK1LPJf)b~qa zLMOJBf+Z>a`r`-Y%2|@aJa_Dijp{RT*JTwBYO&d?6zsgbBj7a|frM8>L{afWfF}`Q z(s+cakK}CbG!hx64UScjo=dhYfPopOP;MkCc!BlBm&g1}gIin{hBSS+b@MUH=m&;A zRETLFk={pZol)m+ai4}K zv+b`0G211yGOm5m!RZ>}bLwO0VYJhXYlAugIzep41=tP-B&C^OkNc?ILTv<*=*q%S zhAsuW)A-mlgX^Roh2F~2jNICph3@oQf85C6PJ>)fgF;4#Gjgu;)xDLk<%Ig8APa#S zWVZauu9@ii?NE$6jevQ}V%~`JNQLY21lVCwt3Tht_F4}$OaYjhf$5@XNB8L3?!f>! z`nYKyR7iJu6%~Q&-<~4qmKR!ATGEyt+Z;$Sd4)U|78s~J(ecQlCHd$3+7*o;DR)lj zev^IGoTBXniMCtVW)Q?@g#DlACro}lVinDHLGwOCtUERL+oWw>d5%b> zNF79anjd@g=n<27ZpsBeR`J=7)W96`Gb7@loV&EFn{#Zb*#Ttyg~%y)s{1JczFGZ7unKx2T$e-4^G_LkeQa76aa6D9=^;fl)Hy553q2TB zi^6pbxbwxi3D=pN*09@Ob_2;90T)0L=ognM7HyG(=}qGI0sPr1K^!ua6EjT`X{tKF`$s%ng|EQddm2fP80H=wFsh*L>;!h102 zUzL6#xf=pc)wq9U-x;8hfJTF#RV*=ML@!_Fp>-{gV3n1Wl%56$#|U0>_VajaL7v|S zoa%^DW1ijjCQl=kVr3>_^n115bZdvQvZNe^3UF)0uolYkt;=^gu_$Z?ejRB(gx~A8j}*p8X^1JJjPcERZ9j41gigKL z!QH!e1E^zRx!5pDgP3&ZF&&uWSiil4%BkPLAj;v(-LAA7-5itpac<1qLe^;gJMn<>!-HfS#m*VyWGA;QK?1%U z)*$~xF~+Pu`J-+Jlk^hzBxc!c_lc)n+>>u2%}!5!_$eH>?JM`>wm291`4=x3TikhI2=6bV03x(rie< z&gz!|rTWzDR}Q~;wUoW?M{Ne{!Q=bvI);CvSYXwO1rqA?Rk@%lvWQzFDDKWc97HG1 zNgmxVL%2hCpsLMqlRdbkrG**PfoDhA|LtRhpMjr}VR^{`=f5&?VRSeGd^#LR`69AR*LIABafQO~0jn z_`^S*B>#F@5W73XyE?z`p{MxV<$?8pappfN$EkSGfMWWWL$NB@-NDe*_z`8wJ-Ie? z0@UXg_EhuMe|g4Mhzm4DvtymQ4t>#Q_ufW&FPQqJ$?kr4J^8Ps2_$A;HgPfcpHOg= z2zkp*R9$+@;)yz~YG536y0BGr|M>$JA$nDn+BZ@d9267|;Mux7oq+^?Ev0^{PB-G) zEj(;8r;O2E^U7DnNYVt7qmdwLRwQBsY+4zyS|Gr+1LR)JXBXH1`~ZMw?>~k$u%na3 z>iaR!AF~6xD?+Zziya*u?2`AHokl-S$;_af3^(WKT9)o#ziF$~?W4F_eL!?}p(o8l zZ*Sh*(IV3l!fE0(H(t|&!E_VVm1c_sz$4jQW*jawuY>ad(W@BNy zo53?x7UV8y_ZW2S+KB`>@2{%soMzQi-nz0pL!22vdvc`JwK@AvEH%MimIHlJi%$Lo z@Mw@1W`_96O`n4yRZLd4#vSR}Hs68l2^>^a6)h9MF;JHz4v`RaPk-I>W7p1)-|HrB z{w@?wOloaSP1I~AbS4HZi;r}wTK*lK0=Ewx8AZ9x2hAf1S}zwdTKe${jsqA2@xU?M z`n1yuK^6huV0*{3@o&j<`Eawg3>L#xfX=Vg~$L|0vwMFGmbFM zXzBsn$#4o;^1s09Rjdz1jxac&X$p^sVDl>x21fd{P4H=8p!1J}CpXGNiGq&EEe{ih zeXAHKonZ6O4&VaGOy%mIq>S#abOp#sP-=X_tiCNnCN!u{k14SItY+;!)0OpLN#x-w=O!4cN%a4Qu(BC_}IK{?3??BRzj*f<{XLdTHKoqN0Tl~-= zf233=Cw*k0DGosMt?Y0**bdgGxLCl=#O@9;XEjYY=A%Uvpf5Vo+FWkc*<&r-RTy*x zkzymGcFfb57e+ac!9dH?Htk#t)DLrzZQ6&g2m!>F4&ia3lW~VJ+f9v!UZ;5&N z+vUfWbZy2h?V81ooU~qOr4M#jE_dQhP{WpJW&1DCbF0aNdj+npskj{!7K_=9kVfXs31y8HU~Yg^H=H zqmzG6G)8-^#8~x&*nt=o-`#wF%7sVb0c+S6e^c(1+<;3Z$|K&$ivmV7%CJbVW!LLeC<)k@B)K z6KMBA10asm&0AecW~QroqWykm+^p0`#VCEg^~q4)|D?LTco4DJ#ktb6vKXOudFq^} zt)oTZ#Q^f@g0Z)(=j^ixw&iD}WJYK>Q54pLBP!tW&vYIRY2y+BwCUoe5MX>%Qc4|qHFUrA zb<`x46Gi*8Yrg8qS3eTrG&j-1z`#Jg`?x__f$t#t5wt!9T~A~-ZQb>_qPLg#EvJs=ndmNRqDG=NRqrxy6z(EO0FXAY0ur~6sAILM*DD=&0-Q?J z%;1^Y9B?ErD$1bo9G2rn`_QBVwHw6CRoA9{|*^-bn+~0xj5)3Q1~F>*cwg6_;VnndONgj+x)W;m^m) z`%lDu6TEIs$z`pt|h=NHZ2k(`)K8Z+C*>w8<@J_hhD( zkaGE@Rs|Ze1}3Mod$&udA)lIE&eSHT6cJW7Km_=UpEDEuS7O$m7hRverqoltnUni~ z4ge?wQ5uO=-hz|rpdMn)C8(uzn7b}C0+RA(_r4-TtPp`CQ?ZPBs1Fel1?l=$9a~h? z@?!V8{{#e8dIJL7M%~6{8XOfQtQbPWpZy7m04DQyv9Wznk_gw+y%Vl;WxDri&Oh2! zNjS%x=l6-1%onbwUPOUnkvNH(1p+T`l_RehI!$*#|8@U7pyE3tDi6>HD^x&R4@6MJ8@k9 zZIgjAdPLko^aSit0fr~-Cs7v{alzguhos>0Wf!22C1d5Zv>vgg#cDQgCNZa3(~Rbz zj>rBx{7SZL*#g-`Q&Tg9Fle7SL#Q&CqM=$I+w1eziES9JJ^^p;%Fl6C^Fm2vNGF~>*|_+wYJ~0Wv7djRo)VA4v`XC8Nut$=m*q~SsTK+K|U zYI)@aHc?bfnj(uK09&iKFOjk4%K)Tv+Y?3owX+ z6M~B@aMj&vggO!N-9xgbruan(w@_ycF=^1QG+k10*pfEa@W|P4BxW z9Ra`yG)Oe<{F@dO(3S|#j*P2M27|jU<`{Yog;2m(WaJ~bStyD)Wr=ouX4n?!YjC4i-NUdy8#wd;p zpvU6kDFrhnWzM!1AK#+yp53M1mn-YIz#K^rob5DVJ>+0oV zdGG;vnw{a0poRcJoDj_nhG(DqPjqp%BLW-{I!K3bGKAk zTH{O)LZ8a&6RqqZFWmuP>f9tF@sn+P0SCdM_`IQHbKN|2Zyb)v58A6xp8svBaxnMCQu>CImG<0fJOrjtZVO#f`Z{y-}@Z5=+ z9S^O^f8U`L(UgJGp_-_b?_`g4p8Wwg+n9o)CX6(V9Vd-wB&Na36R;C=f|l)Kww1Fs zgoMGb{u~6yblyHQiI=N+UH!dycpQNoLyupVzIgE>K^3uqZ>ZC@)dnuFVnK+^`VrX^ z3BNri!i7txVWrqp3h(%i=)+kqVACM|s43LTe;KYH{EEYGnw~Kpn zzVQ$j-~Q$jbq^I+(%a0*Dw&>&h=LqGEoAT%wOfsbmEc>DYeBhyIY!MzF_6XQ(QEc} zcc+=RE~6HQqw-Nri#Sa7n(9gutArm89jtZ2!Yu}_fPq^rx!Kg#DIVMkJWw7qa0oy& zK{Hj*Yk_(&4O3Pf5;2z3On>bF6rDwMi5Xh$|FiygXtu`rGNlN+JD%%4Bn-!?{@V7Z z>)OX++n2rczKiO8n>^a_C%dVM@FKZ_Z(Fq|Q6PYSP zJ9E4r)GJY}a)duUW|x$pfYOO*(DW^U10AgrBtS)!U{D7-_SccZA^@YXLy8s5Iamj1 zW{ZV)*CQ1Shl$AV^(RJ?pezV&AB71=5KHoQ3Cm1>T#{MZpJ3KFh@9Yn^oYoyW)pM5 z{4|PoV9@FzY?cOa^6KPfUJeoXyX&OFrwf*b%rz6T;MxI%#f5T+-8d+O;^-u1 zG8uM6&Xob1yJT_{1c|7JO+Jxw1)~8G_k)hC@V$7xdp?5JZ0R~hdq%;MDPt$r_$yR7Yj}G8BGyZf$jKd$T1PfSIZy+&?y_@`SD0Du>TP5JKVQ!NKX#kO^wa-cbLVS%mg^m&1kr`O#c@!+~#&2M$X_ZxrNl@~9Jd~2F=;&q=# zci(#OX8*&#vaW1DyY9+qgRd=R!mh5NOtdee4{dM~ceSr@96nWXig{FI#J+stzNaTS zIeD4EdqsFM(8IH_Ht%@Lwb==Y7(NZC(MM5Xk&!_a&P2?3M@2;u`xpQ}cdz-Muviq^ z>Bbs9kGeSz&U&Wa>D`d@B_$;#!>svT!^;Q6t5@N|YI|DYr2D|^*+1a7Cp5Wt?k(Cuga_A}z0HD{|=&$*HMgjqp`0erPNW7&DlZ~l0W?vfk_%&LAWSlmyeqpatCHaO8PkA>}Qhuwb7y$wR zVpvC*JF&R{9THtZU{H{Xygcc2O!d+b4+ugyw65#vje&zu)*`w9ByVM{@sXMRsKOiE z#xM45-0Od{A=QXTpX&V4GD!vU}me*-2Xm`%2q5GBsM4e`T6<#Ux|K$E3%3?WmXlre&8j+;)Y=kl8(OofS1C{~$U;AV{i1gZ2&e|tx{vmvg@wh98-F9Yi_jeBu4g!Awx3oF;sQYQ4@WbJ)K*08njssKk6Fn8;9XW>%9kOfd0P9*bB57$k&gYdWv`bn( z-F4z|`j#FGjxW{K9~OtG_7bmmJXqDrhXZGqg$En&J*;i$Dc3L6CxJCid?EF%Gg|A92O_$d3&$_iJnBdOf${+&QQaN$f=~7I92M?w{FGjBrz-< z`|?^kT3Sk~(6zH?CY4CyAej0}{62oAFqb{9)Y)rIA@tB^U>(@Sf)|qeRv)FXwss25 zdFWBSzxpTSQfwKVmi7d9-ryeIM@IB@KoRc_JkiN>P}kIq5Ooy5vV9xj3L>yf6V}kJ zZ3HJdoYiyKWgwzpZ_CRw>q*!YjgnRL*s^;s2M3qS;>-fsOZkJLVPOGJp6KiAJ2^XZ z325ioY%6qa_qs78*QThbxQ?QDf^M^nyWyZ~G+3;H#c>%4iK6+bfeVjz=^;5idGa1K zH@3?l(y`ZWFtRnratHTBKuQ3c>N)ScLc|ex6~T z+vXz@`Iuu&V0`?u{@7pVyoo(1``PPA3b8xgE**&bOI54?!8qUC)ihesV_*_#y;zsL z<@};^nSu?c4uM;Q@EJ2{bf`euUL_*CATRKhtL%J9uSu@_iVso*3rJsRXdgd*Y*4&E zSGu&gc%vd*TWqT%9nHI5NPP4(G|8?jE+!@e*hdwdL21slWk*pkd{d6rlq&uoK77c? zuti`4aTh>_4Ca-Sk!j3xv@JfdvXs9;)L{~}t>peTCZaodSO)Vf#Y@MTVZXFA!^mmQ zQLt@x^$VNdym>yiB_p)`UJNMg3M+m_R`C;CADM0g(gBDVm$~Fq^ZYL)vn~|0f|2dHbj-DPNqt{Yoc^ z{BcOgLWXPX*}cT>e1b}FoW6DKTIcP~FQwh%$WIUxR2jpDQ>H;4`q>YHH7Nau$%U!n>G7mD5(XzdCxa=w2s1rFe1yOVB8? zOJ!vx3AIW@RMf-Q4ILtKu_3Klnx%zBJv^lj4i1)Yfj_#7cU#a$w6A zZz7$8?9_N~OM<5df)1^YVv?FT^;O(D33Z{GV3+b7vT||=r44klImW;ZK-dZGo@>q$ zKngPP#b|}gp(iV0w8B1<#>NFK1$g#c|8hn(0I>EsUyUsAu$Zj7zb>iZdKtZk#4&6m zg%J9*x`=o<}aeNq<^#HZ4^Kz+!TTIn5of^+4F`8xlJ;!D@A?{un7nV6p6nR`?Dg3 zZYj8Iq&bIkvZu0uGoaMH!ye$E!muyuLV$EMtyfpd8`;PUiU)T}w9~B|6+35{rIl2t z`q7~Hjyt4DGoCv_Vbam>hAoV#qZQ^dGZZE%iV}9Ru56l8zp7VEoNyC0>mp28Agx{^ z<`A4B8zM$Wp+(jla|+Jbcyzzf1vh!EGMq z;Tit@p$C;qL_`D=F|lD`p4ff*{dvIS$D^u(6L3qNM_qu6kA;;LV5T174|(#4TJq^# zYzYls#?G|kP@le(lX&q!_WJuI4#gx>gWo#$J z4~mQHK09*yZO+@bh#L|>kn%u|_$sV=&@285Nd{dbsp!)ut|JUO`T#l0y3UnA1iP3Q zK9VO$r1_ZrBHGf_}?GBVqVgLWq~w#km)7=`VK& z>k{{vZ-6eEL@c+C&ztpb9q~DD1@03};1zCqY8Ii?Z+`dJo}vG3gE*zA8+IRl4;NiP zCl<`BYhz{Q?dPW&s|2$(b9T;8vK?@`}-Iv&i<<*OhIB8^YHH;O(ZXWdFFz`kFwa zuC+feo7^%rjT7GR?&9qK=2a{m2<>z)C!)Mfb<``f&X()`&h-6G}#(m zd6R7Ydbx0s=i%XI1_m|wst(W_Cr_SKvejCf&^z0A*g0e(m0S!s(41@k!KoB`#fY!} zfP8m3LQH!ft@5>7e;-PNLmvR>=Ylf;1$y&0C2uZ;s_N6HZHwy4h08>T!oW}gBTx3V zwUzcx^I2iFB>c6U;^J%^7p`?^9(1j5d9{CD#LqScyBY1Vi1PFEYZ*D#!l=3E%;4Mi zz!DJ23ybN@WhIcPH*GsMeIh|62YjOOL*LbKdh4rCCKm|xVBP7GavJf$@!l%BLI~aO z1P-o7(W`9V%_7P{!NKIDpdf1V^!H>a>Y5&FVYQujVmIV2l7XRU?kT<CXQh z-h`WKC}EKRXAweoC|4R4@|&5Vu-xl|!er+}NA2={NX#op1$Q&y> zJ3A?fL; z!-2}Viq9{pEK}#{to*!_2u2is)(8=M?8L3EsYyk8CTQuC7B8jty(^M30Zb=59Vy>y zo!##n9YiC0h?J$Jvur*e9HkJ8Bj&}0)70j4&xO~5u>7X{UitfA(2p?bWhK9bu>Sx^?Ro>1}QQ;^JaIRm^m9T3SL7Q&xj? zW*`~9=R*UqHSc46LxYMr>8zjkNMYR-)Y%0XIQH+Yyc@}zU!9#5EwS$O0s0w}-tEU7 z-sX7BfjuI~o@PDT)o1lzw_{caU4tSFsq00VtZi<;t1&Q*T6}qpiG&(gyPa`uNJKY~ zr^LNBYsuZ*_Hj^q54WF>PeSG>w)caOlr%8tLeq;TcyVdzf-kd?zP`f7%C+-vCr%IA zg0$21SVxA!;f?VB9B@*KRYDGG`d}zGM~U-;{%r&-Mz2^V8zv;WV7FEn`-KjS-gz1j zu)~e%R2H?VPigtOwvLXyPEOD+$jHd9ns};~yfPKu71CC2%1j|8zMUWH1nh}Op33T7 zf3MFSt#^}NXWNeO_)FPbX#VPQRZ3ZOv@e>11Rie#4J|D#4GoF>!s3X@R)@{9<;kgM zKhV$K+}`387Zue77WVD^^=oVT-L+>J-s$OS_(}0?6~EiqT(cA{T$jUX({9YCJbt~| zYa1iK9Vkl@q&#ZUflGg--RVwvbZq#Jq)wL}(FHnv)kdr$)I&O)BE&I&AB3&N= zb7NyQ&$Ub!z8{~i?y{UCgoW?VZi73YoV2#Gn!u@mZIrc2%FE$<+uUbl;Zz@LWvjTZ zzOhmDa^c@&OTWYJFQvn#*odQ>X46--pPPGY0n3*Ga}~3)ijv=a=mLSFDg@51=EPS* ze)}gU1>p=zQaYUC$SEPa7#o>P^8WKW^XB(w#{yX!)__T+VhQKyq@b3zHg<%Hnl|t+ zb!?=V{e6_$slzn%q;Jt*xHnJ-|5gQ;Pz75dA4Y|RHKX0zvA1aZb{_7hr3SCR_-g0c zr9f?*y1$l>l^BlRjd6{_i3#=0x=+EtzyR3#!v`yL6(?<2R%882AS%Vm)3bXCk#b?0 zI)vtL@D+aS-}3tU|NVL0Ej4@n%NK{yL+snPZ~y*tJi&sH<#h{O09b2TcCI~3Lh*)h zR!w8$er$u04Hr?rP3*f`b*jKRy4U}0HW|n2?7S<+D0Y@&8`4ROHc-B8e|*=iZ~t@m zy@FP@8trT&+9I^c=rUECkFBljS_u^MF>dUof)Lc(vV+ZK!}IUSH3j54%#WT|<5p@4 zTvxC&Q^n-bTDYXqkf*#Zc?_=wC=@ldtG~!+KG@!kVK{K9s?QoO+N3wVYf)*;hx)rK zQ7h|}N?c{)`bR!&RZXus-G2W3`A?WY1sLqd&dEtmx^;`S<<ShoWqi<&$9O& z2Oj2H^85E>ZcR;|f(#62qB{&M*Y2W%4ESU#0w`EInj+85@jA89^3pyxcu1A*sQn$+ z0O1Z>F^cf~o;IWUmE@ta5R?A7adzCwUy2{r?tcO$flhwQNjouYtijFI6t>c^c`}f{ zc5wz~t2)QL^|bo$&ldwA)ocFyK|3iV#Cp==Du@z##6DUSQZW2xBuEKY zueQNxLjyKfOKY&Ljqq6uwmN+Pds54CU@bI;K5htK4ul^VTjE$xS$-Z>kNxiBi&qQ6 z{vjO~{O*bgm8-{YuVuw#>)OL&>&JYGj6wBM=p&nOvGvZ+x1)XE-@@nxa4DV-d_!sqPi?WG`Hyy&X&uDX4OCSJqW*^vZWzyT0G zLa+WFc4dT4z(D0|a?*xHC)X=gEV*-x^yMYyp8kQG_Z&T%3^@aGC9!AggvZ6zlPfV1 z=N7c=k^Nm&d*=0-8=G1}9?qVeJ@M(){#i4@{1)Ggn8Kz-EIwinAuL)IVZt$W z198F@YtQw^nDwDPhkWBk5{5(- zE+pgH!>X!v5NtrS3mVsat*<|M;J_Q;hoYiaz*DeW?eIEV>!h?c0xK7Z88iZkEHD2P z`+MXT9uD6fpo+r6 zj;ty@L1O8rAaqM*4zZip67tTjKA)QHWPPf%S*$N9EJaB!@9X=qORIgql#`QS4k zM|{F<5sK%#dw(@Q&SPNE`(wS2^HU@FYaI_i>Q{-^ni;#g-^9eQAv=%E40S#Fb7T*F z=i+aC(ZML zHdivQ4XQ=S2HR_lQ%(GID%$I>fL)gG>X|>CRMM{dQvT}qlc_hoX5O5M`IN&~=}MB8 z8ExS=F#PnDA}qVGa&z&yla9D|K9K2z4;8Yw<796;q)A#Y*x%tLw;QZK7A9->;NxpC z9pbMuGLHN-=gr@w7tE4Y6)J(<94HMXbloUhr!Q^ z!5|F|4Z+=W65sgr&eenac^xuF4jzkY!leo|`C9V2^i?@>X) zx`C;?ckhC7J{)*hRCEad8LV%S5d6WC_qRR=pXlLHd@pRHL!Q`)6KPRVpUcb3tE%4X zD?&|CQ&R&Ih|{59n3$Ss_vIW8e5k#Cb@`qks+LfcLLc09>G(o0ZXGu{m5<-IlEx?6 zIvzG@k&-{~#lJv9o^C^WS=V`U&u9JI8L@q|H*3`L$c#_C$@2`>`4ZcD`_N9CX|;^) zHa5G)j(^tY*7XTWNY*=2W1K6nZoSS1w+aEMMK3Z6U+qs${Y%!BC=Zll zZeHH?E;R&KE51VnyWL43stcjY;+xudc`Lx*S5+y*RCjlE^;!#IuU)JX<>}Xz6f19! z9}et?umihLX<6BhmW1A_=rYL?;jd+7gjp7=%>5eNqB+=Bs(fFXn9a#YC>Sw+hz$~V z`iTzU*fd>c&icYW#nCCg++7j6SJmX+a`Jv9cUyEaxbf0IhCQOdZo92wW&U~{hux&< zl?^05!%xL@F>+XN&5kdxbCD=SsZ(~{(*@pP-mtt$MuuBkuYUH+os+vr*LG}W^ZI2< z!{lwA(XOKoMeoX$?W9~Gs!#W?^azeX?gxo@XN>g4b{uL zuIMI}{94hQ;pyGBIi0F-w(i)ieNUxtwYa(4>9wQZ+v9(Ad?&i54E?bd{+nr|CiitJ zKIXIXgnsgG#STVTB0nr}%Dz?nTK?G&NwjM_~E9pjWosl zQi*;)r^l+o)ztde*}o2mJ(ivn)bHpdUL%61PQ8}Apr+0)B+cTWhdv#uKvt` ztI$Dc1}|Uk(R(fh0pa-JLx;ADX{f2)BU zxa%@@07Dr~PfrKGJT@`0o@VDxe(V5&7g{B^1zUZ0QhI3dnAg0mglhv^evbqba$hFC znTcKF?xh3J34_bJA-y0vSov;Tf@a5->j00HADg(27^6N6Mok_jvtxlzpMEuX|L`3B zY7&s!h$LYARJt4LJ^b{4p}8{Jn?zPvA^ytw!t9BASsPy^aquz(soZSZ;OYADd3Ep4 z&C&ZwS8wUpf1Z*ejgG7%If(8}DrzD#ZCqD4WJkLByW!{4=bL3@o@tU!tL?HDvK}$| z+LCm?^C~GmA?5ZKEiH=^WV)^!6X<#KwtA2`+C)}N*mYh$>n@paz9;d19z?>Bl+meA zC1sCk{N5i=9kCNnHc!{Dn>l)-xW10YVO_AqvD?SbKO%EWSdlEONPhoC)4cQhp+y(k z9h{xrdO!P;El+*>HaZ`&i;s#lJ^1+ zDPa&LCKwYHwL!N0sKA-U1TZ#I*8waBG`EMwZ>cw z+eOfQ|A4j*B!e`EZH?iC2xWqeoLP{$AYs+^O`-H@XGbQ}v^skuYax;NY&uuG8ZvY zFs3*a{zs@J)}TVES(^9o^YfoLp&Y9OP*qb_Ci8#M^wnWerr+N~H_|QLCEeZ9AR-`L z0*bUCy>yp!HzJLsbcl3{q@)NcNQodKyoc}adgqVbYgwIPW}fFh=X~lI(GnA%g8hnC z@`a{PRsYHsR;j zV@wu;s~)a@NdHb;yG7S#f{5WF&W4`-Hrocu%Erb92;1|;+>qa{4d9^66r%;oM$dCK z_+VXiYzGx=w+`%mO)Q_FCt{GEIQkcYj38K>M|!K0=g5Ik!1Llhby2dklG;c}*J zK3+0LZ0uWK`0SA*GJlD{5xy}OKmD5Dh729Qzxo_uIXpf37uoeUE&^~FA}sZo|71DQ z5T)go=h^@w&E4DBh^p%j&Js80rIQW16Q@=0h?AQO^)W7v&j%BsWZHLtD)iK9c+IBx zc!ykSk=54O{Wo3KwE3RUyf$tAco1VqnalvUh zp}u-wDN@_$f?qpN$sXOLX|R}uAhzLz*w(ht8$54!R1@jhsLlQzU*;fOg=v0#l4E`U z(R*`+gxDgoSy;sRzck@mW$RP);T@kKd|&Cvd9I|KP{8kZ73s{8{`>!003(yFW_~!( zPe|ivV^RJ#oCVc4^{0M2I{(m6V$?;1pb`qR5H#(I2@`8J!Fw#}AT&G{Ud9rU7*aw0 zsanaGYX4xSf^K!yl?lD)E_5fi!{EOk<;iTRDs3-F83~uYluIMwNF3J0PGBZ{~iXa+Zt?>Gcfn zEiL{1I}NKQKpi$b{JKp6PIgd9K5#_F{6CTtu6^u0LJ(B}(sWnYDHZH>!JwDy?d=60 zLeW(bKvke2cH`did(VDTJx9A}?<`k!gJEC`y!n&bo zmNuwUp&-}xV(;gFnGS;=isTj9Nf5e*P7X2M_z1IiNOD`wt8vOqI=D2bSNv+-eChe& z^>xn%T4#SE%MGX?EFIAhl;0xCH@2r8jz%fk{OH9gorJSRLXXfd4%uLoTy$+~qrXR9 zR(9lsyD4J!T7GY4u0FY|qiDfq)DuRDB+FGU#UApFEo{UiCVDdJuPYil7{Ql8QUg+6 zM2MlD=X-{jQpkdW9xG*wSc7DFs7cqQK}yzef`Kk3`8@-_(@fo|&0w+#>9!G-4yNOU$3b_ikLAu1U=9UuqTU z)fUq5aG#eH<(e459hI5C@f!uKo+Bk={&{&}No#7Vryn;Z8>$qN273ospuT?fVP-2) z)`$y2X=%xnL0d9*^;CMBj(J6D|JA4qKRyQ`XHKhq1EUjQ8)gs#w)5jLuj)Ci>#3YR z<793^e7vycL;5tUSzBBxV-P@%g^;ulpxNc#6 zy}FVkCbno{U>+~;9sM4AfId5(p%RY3CM;0dNdJwMRg^si6U49C&RwI(KTI-F0dnLZjSDrSi+6mbaS>YtdX zsjt`NCiC<41@+gzXtwqXE*QG*>*$CI3p+!P+rxc6{o@Q&b5Q94OG#BPi>DR^b6V1o z`S@H~K>;uhCJh=t8bUhVcWx&jH@Ak85*yG`gP!vyKn-HU;9bBal(;AFN9HN<8PHic zp%3==!|)p`K&Q0Y<@cy$2EL^m)yq^3L zN^CUWb0qq4pF4H3YlD+hkXIhc8%& zV>jK(>F&_Y*WoFsY)=sE+`aQv^>vXlQz&x!M2LZxWG6Hl8j5pEg3~|v9Qe4o5gWfa z)a)pv$q>A`ipG@Y6qt#ZhaM?q)wq)0a;yW{XRhbHTZxE`8gDqsuKZE& zhL(}aTw!9fG`?;44!5*4ISl`UV+7I1BkDfFLqGG_Ir^qIBldyE@xSIE!PfM6BG{4VWe+#|%8ElB7bMMPndM=!|GI9moy>yCvM#kG`SbK|A$dwKK75lOTF?8%I)d zwbM8$+Z`0a;KEt&X_0&pGK={{``bg#W;Ij`+0-u8KP0exWs77TKisb5cH-09( zeL_H-2XUQTxZ)fH(N;DkoILHI*96Vq;XRibq5)N_F?(hwSvjg)pP($YfcVs))C2gL?q2ndLBL3gwG-(3~0mCyK(o`S;2TmGXsKj^g{@aiM zRTUttuiFLzcKh(*1JKeiE)5JQ))?ZveYCu|NJ2(7G&l(TOIA)!M@Prl&IyEM=V|Y5 zZq5L}l%<)TmE~w}|LM&`E~_>S&>Mi@5ac$>iy`&LKpbM#fJzLoi|6az|3dWu%Ym4P zZTP}KFAi)aupX^m zJvn?K&|(CwYy)?AFHL>yuhK`SGZ4+*`pTeerPbNk+6qCK8^{7v3;5I2%jUj?;R|ez zfSgZUpnI;E0@N7nGw{vbHXg@^<1hsPY8Z6+jjgC2%ofnizX1#9 z$jG-kUaU0r+v^|56ev+3*Dad+4P~D*4VH{m1G)}V^ghIul-&6}1M+wvbl42cn&D!D zi5;-SF~}Z-IU0iJERGLv)l{0SbKgAS@&oF|-OUZII6Za}z?VRC4%0BKJ9q(<#L>~T zgAjaI*x`&otH5Uqs3HeDJB<0aw9#!>()t{ao`Jy%E-!_QhO#m{=mjhS&gRu+bs2p> zK_max&e@qqiVwAL;fJDv0?5;diHIOt4o( z^8)_dT$Y!^oT`FXIew%m^Zi}X{i<*^+dV1*k%uywJx5*)s_fjTh*(+HB;rftuw%5@ z*}{b)J*I(6fEd+E5lMM?@#xQgMjYUL#?FsNDr_QY7k549^CZJ`>~VQ8@cUl40%Kn< zITHOok-GYH$DLK^x`*(hl=c>}5VdCE+L4oHxSz2j@!>BHs1~o|v{A-ZYc@Tz+k4-} zInI5NYxQ@fMWOBk#_7g73Ikg(_9B59xr))&WPGC*iRhAhxX)h~60{#L>^cNuvnMIbTz}$VWEg z7MPDjs~^XIBpFzZspP=yz@@X`Hbo}3i3>pEBt`EE6J=$T!9VQ5i8`7kz@-W6l=2tD zuq&%?i3n#Q+S>dUUdCnVk~l$4K#b>s(Wa@k7&_ZEh&1vWQ^&iK4kp z6K3D@zSTcsLHHy09hRtGt)T~xVD4K{fBCE6Cp%}S6$ukoJs0C6C+Ilcer8bHOf)HTxy#2rYbvUm_AnBkeZsD)qmQ& zs=KvO9nmYTn_uD{eX#sYm6>gILNY)axJmPMDqID{g^roExS9k=Ee&v9_xgHt=j+&rRWj^S8@o15<%ujw!<{;At0z>G6*MWwn3`~Xl+O;0xm zbut|6KBcl(&P#WQCoVf5A@yA|Z(0_=d*DHuye`mCdZpwWxE<5=w@8nSRxL zc4nrJudjr-c=Y=%$kK#L8*?vh5KB6LN2-g@Q;h1RLRr6#?1t?TbSfZwe**fhogF(H zo01s^2Glzj+&yU*`nirVtKzfM)+Ho`y zy7@^+JYQQgXML5Gon2m52F7PnGBRw(aVXlxa5cZa-UDJ@Xalam zQ2;Ox%mtNnrTP#3uK*i`pnQH1oYvR#4M=hmVU;%K{*OGplm03!v# zG&0v0Hpyxr)G3D!1$#%FD7qA?IAF*J3v~tc_E0)Nj0!nsV8*!Q3h%}OZW4k4x*)F& za8z;s3qHUpT?sk^5F5+$vo@*#9d)_D-i6N{6%`e#%F0-InJ_-Px3BWyO%#}cqZar# zv$yu~U0neXu@9HpGiH;PkOqFDtp*nzPft&H%OC+Erlfock-;aibMRd(EG)qOk|JGj zBtpq%fsTTe6&)Sj9-B0XG%KrbsD)2-7rejljq~&KH$ZM0#Cx2dd&9OBc6QJ%I)lFg z-ddOiW2J#F7$w202k0CmdR|^$zH4iPJyy4we~^=;Q!FW zvppZO?)L%m4Vc%O9FQ6%4@|e?m^7=H^Y-&H?Kd)nn>S)Mh)x$W{A6al5CgJ$lOgel zhFWgQT#PWxrKMQwF$Lx3L@NPd!h@decsQ-`5$lJb$kt`&^GD^iP+GK$yX(BD=EMnz zZZe@sQ-7K6)eDMyxM6wHh}HJc^cn`DqNk!)zL-Ed+;^dV&Z?a^7);KProgHAGPz#2 z;N_3RM=!d%+%8Eb^I!vs!*>j7>M(P;3(_VBEDVg&&%$3!6z(YW(S`nz6o(>iZ8Kd& z9M--*=0Z*-G}!(Y%7V^4179)SDI~;mw%%5b(xAX)3%&QV&#%M}7Jek>;c0MXW>RV6 z1fO7c6*CT#Mg+!m%ASj1hX4DAfe7l%rdiqYv9YlY8c)c5f zToK8#Hx_Cko9f;$0=L41B2>3xAWvy=nQ}-8vj-W`l`qKr>g7v5(3W7@K4H2*efEqm zh#^%S-YsIyA4hw#jVNMP|KgnQYA{GTte6p(HiR65X`kpwH4Qbmuw?qj(Wrx%7;^7| z1;agZ6b!2&ZkXi99mT|~|FN>iyMFg(PxU##Ltx%Pv1AX|t#oB0Owf)9o||+1B5Gt} zbtaMdcw-Cg${ishV$?zS^(%!0#aA+;ZtSmQ7lD%Q?tox-N)jVPY9dua&v!S%|6}Lv zk8@<_dcSx~Axx!c5n`H9HfH6LnA5O$BgoBjymYhi1vGO-GnU^yaVaBGtgDUCYse0} z60cGCZ0Wza!^CXOUu%jU4Cr5i?}t`)?!ly@%u8NR2`IcQEEJNo+v~ky#|W652e5SR z52cPkuT(|NOth-qD>=8JN9QXoQXQ@mpY@i1LphbiW}Kab)cS~^QyfdO`~5to(DBgh zj#_7Y)?xn$<>PK8Yh)p+CMV}M&X|~be~u;-crP_8*5hgymqq@o|Lw7l=O)bRxB2xh zUfk?=J@HAMU;cfQlci}t>&HT1qM(3aiYy5~>m*iE`zvGKc&d zxxG?3U(9bwYb&Ks?62v53NzHV{ye{-oqD_eZ=Y=eZ^2*9g; zh2nSFi8m^)1i`NaMMN&g$H69;nMon%S+~C$?)><5mCdz+N@CfYm{1Hp{lRTp)=Y-KgOjlFCD&d5;VGLHvfw*1)_blmeiqgT^O`Ny-`AWOBag<-AT!g} z#{cxm{{kx^VLT^3oMru5j#xBMQfpdqHRr7tMnb}B=j8i!qzL?yM|25yHyvqOTHd$lXR%E?OO5q4zGpwNTx}L&$!-or&j(XKI!1qn_sl-{IJY8*{=nHuLQ8Rn|ba%B& zs|Tt2aiW5}mvDi<+O#4U>RR=_t7 z^nBsmH&+jvl}B3v)U-oIDEf2h!1ZEPkY?eMlY_apbe+%-V^nxcn$M57yL)(N|0bF% z15j#sxOrMH(8^;x6{3+D_ znb~@J33YToM6Im)M$5i@om~z^i*@*%?VWylMTwt)dp99EyF~VZaiu7KS?HZ`G-g(C z<62sWw2M>W{hTm<7owe)Jl6gqoUVJ0lmzoEJsNB7$xOvPvUIqP{u6~LYKw!o)2XfS z7q|#P!MBjI-VGTyH_ypx;%6vsT8^E{ENp~^dSpOPmcpU|92D#!c?X0WkakNeD;_tr zIkKeeqHH;47H^%}r{f=`dK_Rs2`XtaV12a&i2Iv)sQd0+`CwF>{QcoDI!N7*E-wD1 zD@+W*`CZ>WW~JJu-`mtoREX9-Vjg&S*Z{%_3vmw}GnEOzNqxN*T5 zgQ2VFuC8*d#O?^Wl&>Pr=$1|qIn<6*BWf6-qE?0xVzJth%ZR@$ZjN1RQ>(cr*!=#< zn|ofr4(=UYy)pu^UQst^AH77HT!)g18m?NvaZM`ew~uIr@3J3@@(bG>>j!suBg#7? zvc%EQQlB~;{Krh0_F{H&vR5PK4*ed3?_DO<=zN?fQwZ)LjPtKj*vb_))0W3I` z2|2$nH%4H2H3?H$=E$+meXYKZnqd9iNqwW?LWXLiaD~1l!OTpc_h0;0(BVIaL6W`B zhd(G-wpL%i|0o#_b~nN!g;c%AV?$>Kh5}D`|Kg|Q#U~RUc5;1Tlk|p5;*`w)FAAIX z_FVETk*z!boxgYlT+(8gOiVLfCwiEqJHLM1ixWhiOwLzn{rT(~IXkNIGnH z<&NE+D@yLl8k}5GS*2KGzpPnQXj~s9OjoGy_JyFleE%|`-R0zhkW=`umP`W*s=SD1>`fK}aJOD+~FHn4hJcpyMP)F9VhRjZw;(N|eJoy7& z>3AsWIT|-e`{G`I}A$Fgv8|C$l~Ra1#DGCrQ? zUH0BE=CpW5<{M93p;yU%uR`NdLc!N&$3Ydy#hi3mx)8q`Abw80v*UR%`Q}`=GHJy4 z8b30j!J2f*SV}{gbzlysorZ~pTJ-dNhJnG+6=jXIn17sk7YBFmhm1r~OPjIEZ2-A8 zM>cJ^ACKg%kdV#RNPY;i+TJZK*L|$v9rWVo*N(0&<%3VD$HZuaBK9;rxtl-C_B;ze zOeUn)Aj&oEt`#)3DyzTb+0lq==B0m~K=Swhb4h(Ap=Z)R`#ubHb7MHuO_oSse|-Y0R%tSf{g4Z#0FkAW%h^Arlg9MeGjGUt zc^eN2P{J+ME&uiea0eRFMmGAAiZ6K{_BEy{{1Nu$?#V5>k>liab+h`QZC;>i$71gM z=b8;2Z>T?Ig1X%`|DA-MOp8yYk4tJ=<1_w8_iP+T7O}SSq`V}0a;=NLrna1qV>XyQMO>dYIve(9TB+rp^s^C5wW-|;9LFC zRkdzm`;$Rxd!+$f!6j^Htz0}0@9)FM^!JXSBX-ZfnDRJz#YMm1H|o{ZixG7F(T{!4@L&z zqEeWkH`&{YhG+airfG!`#;~;+5V#zxt=jRs^AwO)$(G}9Y(_>&xq}r2g);t( zjE=_cmuo9kz!QHEk-64=7W^VW%Z!JN3^)}p(!*MMfm~W?N+G3-DmrncSFCXbYIbNV zJsZZqWlah#8M2_D|Qa6 zk;kj=?sxqDvJ>OqXu&3NnUXvBu18N_;ZVllp__v&OufTja;2G0Hl_U@JRTddL41D4 z^vdH~@e#?hwUkDI6r#Rh#~*mTERw-bA0d^#_#uA>)!Wh<8{MiW?A;G8w9>m%p?-+| z*xp_oT$;E_j@>CDs&~?8TtV2V{oE1VF zguxq;mU}()4N|q%b=+dG=N(C4cIJn}igvF_Ws}t~pPa6^eKEIboP9fbVN=*FQFbeV zn%tXoIx&jEcT{-^m+wB6DlzuEx$v%%X{guN6P!?f6L?3ZUR^fAXG13EDKrmX1qUYw z^L^KfNrcEJ#F^K=RU%SEJObs-JwbNEpKGjKuV|&SL<@=;W3U|$7)s9^r@{laA9Xma zBqY&m=k?CUeD)jkNWZ$*WP*)d-%Q+^m5+~Egy;H$wf8D~ct_!6Zis#~X)m9!1Sx+| z-%P9ZXNbqhEn)XvvN9IjyS#0Q!5)aG zvV*fR80Q>ZHb=Th56T|)I0@T-EtQ5Y*3|UE2c_IT4At!z`8WQ)wA^m($4|3Cg@xaF zb5-khYu-LS-1zx9RDI(2UH?nU z=H{?SIG#nf3YP?2H zbK>Ib;~}ly!La!%ACiz9>VKCgad+=w4nn{E=tGDyG#~b?t??YTCpZ62|#&-Xv;EIi-y)GUy%lleG*{DUJ^UF7-D zOftqfQn|J&uWH5icpW>*v&FD@oD%dD$+;6GbYXpdMZ zciUq(;hCS;%laUqOnYP-tGcES-5)x(jR5(+(T$Bq_fc81OwuKCG25 z0(pX_*3vOvV4m`SvyG%7um63+P*8;P1tpSSI6x1XRSVn*?>x7Mu~xHn%_ldCxP7T@ zYyvM5a-X5&+aCsAp6xDJB#jxj=EvtiN4T2Xn!)tSjY~H!wrTCo&yuFFV`Af;zdPIile{~HiYNNlOFyOEml8uYP-=MbSW4r4mpIqDa?!@P z+2A{}tkQUS34!Ptxj%vs+(uFA@hSuJTt-GGn;$YWU3Z97wH{evff#HHxgZ9WYTcJrIMghVROYS|1@7#LydsP#C_ul8SiFdl|ibY zJ&mdkpEZfMO9`L#nF;<~(odaqE*p90Nn95@r4+F>76Kml56X{Qy=bID{7!GvbHEEO zqyn?njl2-@#n6Q}E@UkME)C=1pWCQqfJ?zjc6O@|20RnM=mJO-1}GZjg14KQ0Lm3< z(suLk_zUY;!MC5Co7)K*c8|1}SaKg9I{Kejf)x#p(mi?Q&9LtH(Ie`IhD{DXWnlds zFpXwizTg#c2Sgrpl%*BKHwMP$YcSyhoeJ_ISas_5i?42KBl3`)t3<~lRXrwFDdNv0 zPr0`BKZ&1fMb3V4annvCBPw+um0xXVc~P)w#r?Q{;BTi$ULeZwsP%h&duT3 zyLiKNuo6uadao5*_A1ZfcQ?_HRo2ijR)RM z%PzR%aY6q{1eZ?UjqvFj3zHc|e^5v4I|&IdI5K*v`=?y4l)ktJUj8?XX=-|W#w4AG z^0#;6=-#(Avz(31`|d=K>JYfp3CV;X5}28NM{~qT%unAR))-e^8N-QP`Nry<)rwVu_zEP|Cy;3 z8jJ>hjHO^{4rwxac7XVP_k#Ic$x#R#$p7^8zdz=T;^Xi87`IAuoE7C7{9h=Vyyf%% z$~AKh-qCu5{-l?rZZ{<{?(7#+BZ+SM^ZekyEOD#E;VBdpG^Q@%>^3VW6@%2?Bcc{M z{*(9Z--Yqde-|rZTvMsOD31PFsJN z&ogJ`fDa|Q!3+a~P2xdhmfzf0SC#H-NZ%(R#+z>Kx#(}&h1Wg#lEGWN!0%1V^Ef#> z#9UR?;+Pz79CKDTFNI#EK5uJVp}R7`YtVJ0cp(y@cT**M0fX=PKwm`vzMOVL$p6kMe4FI zUZLIfAAFWuu97RL@TT8`k5QDwF?-pIb^!8 zd~~Y`brdlMBhU3J&G&>${VLB7`SfEn-Q9`Y8k(FY_S-Fu9?a?q3+IYO=#P-EI3CC= zQBYtzZxs~JOt)9j6DX6X86q5~IJisAFicJRdc(B>@t9alctq<;^c!p`1t^R9N}1(wR=|o{xyu?|%p~gmg`BF?G8r4f)CgYy9^F+Ec3;^6{Y3gyx0i?5$ z1%>*tkGA;qrYC>8ysor5Ua9^QT#6c;_4fT==XLW&fp4P0U4QHA>17jtUUNSNxYH`J z;q8-spWs!~K$$QT5cBZ6VhSZKqRKK$;@srN<!LENF!eLQxQyk*S7Vr|cB z8Dz}yByiwkT#clwOM*)1s4lXh;b@wMk;(B$MP^oP#_5*@#HjFUTS6K1xP8M>Tp6#~ zpR6jO;{z)6rt0{Z6y@`@OnGhkV^O`Tmrwb7w{?lO99I&&7ht$ZR@^(QHhSONhbk>s zVs)hJFA*Z|)%;z0brrX6THbo*b2r-25VEZRHg)EL{$c$O3<7FJGoI^fzZDXVM+B7zYw8$=_&C4JcF90_b3^pL|5lRot z5BeKkh`||d4e3MR_JDbGsz7!fFi}7;BXKFMU0igaJ1`o^++?)F=#Q*!TkPw*)wnZ3 z5+IC0|L03ihIDm6Txn@#Y=&?jWW~K6CYN*gC$VPC`rFx8`kSM{Lr5RPKKb*9prAIR zrANJN7F1%BliKAq+Z|?cbnn^dtgRVoE{aP~{Uy93IEX=Ey_lJ5OE$6Fci+l~M!LDV+FaRzug2tb$e)wTxH%W^$I%&a|8nyo z7RymL9a_Hem596eN{j0+K9QF3z`Pjjbe_$H2SP6LD(}>`QA&o6uGe)dVQ4P6gPfXL zZ_N>8s?58)>SA5NSjI2u%^(~;B?=JC#&=SC&E&7BelI-E@n!D1Irnd?hUkC$9C=&X zZ2P2;0z>GqDY%{UpPQm6=CcWh3-S_@BeGpclO4-=-Xz%O)$i2EoHR0x*D zIgp;0+OGP%sWQIdXJNUg$EVJs7@=?WKHCGz9_BBLGRiCcpqDAc^IWww`hRt>1W3O;Fp5&m$uNv$*5KffuOfXY+qtS$(vqH?P2Nmo z@;P=aop*N8$UmBoOgMF1Em)PPOE$iwElS%Ab&65?VdbB#w@*MbauFlcctrRNDe0A# zJd(My1w~Zw-U}qt&S1@_==-&DWPEK~NfhFZA?^4yxPq=b*6>StxpB}S0I-Y;$ zVbuww9U9s)ok=!G-Coz|l!~P$aIzM{Q}mFyaaIF*A#y zhI`|5D#OFWo)fn4e#=a!;D-!-d!D$ooGDH7^AyyvQ)&;@0xqMC^$fx^d4;r$vj&xB z#`Esi^^JYyn15*65tAhj<52Pm3pSG^PEERq^I$4wr@R*4Hrvdb=C(=S2NWYYS|;ib zACi38URL76?=P5tAXPldne2!`8?OIotXJMz#Kic)LzYjE446%Lx_o1au>xzv<*pul zclx&cP?MojC}3EZPqV_8QQrh?&yyoX#mM(ujukNRkU}G44Y`~ynO~^*R(C@DtbgGcRHHKR8tx5vs!bhFt$tn)vIriR3x%K1|ZlXZ$%4j#^oa zH2>IyF$^)QQ)F8#ZcQRU=_}oLho9f-ml&dwFxsF4BTQPQ(%;Hxg>N-y`!g>Wm&S$L z|GHxfm6B*Po2w5I&CO?^zxaOhpkxLN-%srA-^_mp#0Gx$^hkf8?FUk`|ZJwC2Hltw*@FUtDt@THe~V6pF1^+kBz>T-g#>4izW|zpkA>9^y2X$3??BA}j-HfvD&k^d z5Snlsh7P{~$Pian{J_iy!gw%DIB-92ymTx zyF#`U8v3H6l@;u2dq~ZJjYop5td&|+Vr0NvmClsGl!4RXrK*}O^nNCJ)fCw!cs@g?r;sUN3%YoTdXi_NbJe!E-J{lp)K zAsXYn{gD>K#!)f-4mm%>c02j_ZMAElw$9Dff1b?hqtVZGn+0vD<&7Van%Wz9pm%ui zAek(^QG&+=Nh&bRJW*3+mp2UE4}_Cfkh`ej$SOvjo3WJ0K*|x-uF(QeYd$lN4d+y z;>9wm$U5*b)jghqyvUI67XiPao_>sBtOA3RBJ*?YZsp)&O zk4!5O|Foz_C4AaR)Ize)a-#R0EBAZaqOz$>TF4ewT{GXjd0pGzkR{*zt1+WSJGSUyep0kF zHYq2OAI8b)@c7`w!F);STQw$LPb>7dkE6{3e#fysufuQh>{&AQX(5c03t&?l*-**8 z$rj+`beK0`58z~08K9A?C@H1D_&~1RRHsDz$hsJoKS@hTT_0lroK7`B5Kjq6#X1q> z>E13h1?LxV^*2;qH_A;;^G>il34%^;m+5f%tbF|VaeX}qH2eTwI7F>cM?k_nqzA+1 zNYE0HlaP2qvK>I}u#g^Np05>ut?_|Z8zN_57uVkZ`5i%hoeK-cA@Kpa+viJ-Wzc(q zR$9HxX{m7sLa)F@-2|2?*q@-WcNYZNpRfP@1q%a&1r?W+K!jl_FNhAI0|r|ws1hIsWM^tn)7B228 zB=3Ms-PW$mzdenAfRaD6f$7gdlWCP%ySp7kk~lax`1o8vf**uoo`N?G3}GP^ZZP=O z0Wdpz$ZgR-Ub%{o3Ld=wNz8XU%$}L(+{c=;#}w`>KP#1fGHQ@Ilq)%~OO14LY9v$; z?D6HRfFHg_x|N-l%DctacV&k6zlli{ExZl9>p1moL$UQ|X+^_VX#v5bv+WvXgNMh* zk>h#FN@54gB{kcF{AAr6B$E<%8RS3C>zPfw*;&OWnS3eft!ogKoWe3T{o{`V4_gAi zD**_0@NLIf-#n#|OmN4e3y`y7KtV^|U*V!3oW13M{w7a}&W@ts-~`?wNEsGCMacZi zR4u}#ifiES$s@bpzsW?&7gf!@mN_tV=Ft4>sG>X~^sjQh$HIbXkIKUHMM>Ag+TnMt zKYFQ8D_>ilNnW{;5}dsQ8l(T8)wL<|-ndPF2qAyb&5ji49S6K+Y z6oFzZH`M9n&`C2jzsaxh29Nmv2;Yi}BJV7uycz&OL-m&~@2DX4J3+y(K9n_&R*Qd} zKe_GzB&m8iQC8P9t{Lz8`pfR|KkvOWMhl)kTI$FC{=<_*it9o2lf0`P1Ko7VBNP&G z%O)?LDOQmpKZ;Hl5d6?3{>>3R_#@@UiL$4Jt-!5ThP6$Q1LN=T2Ca+Z{j8$Eq7pho z!_^)(5~P%^M=6-mcmZ$3)O`!o7E78j3=P9%$*_JW-0j;4V#pAD&S=6@5U6k;D*^AQ zr5TUnQrA*0QGaq`5OsmIv;vE5EFFDI1-nPv;1Qt@aiNB2T&igYr;{QL4f_J-__rUC z`XhOdLpJzYrqLcP9LL7Rg<41yP!l ziLwO5c5NB%$7~#9JUj&V=k=$uqiNa8wrpBj29=_f8SL_PZbA&XxH$!JS@(%rYW}|# zpnB`!1GI?$YVS=oIMZwERq{cu6C(*h2yGXe%(wjz6(++6)GpZ~Oswi8e>6X`7o%`by^pay%LEx#6_ zneXw7e}AVP5U0LHTDM@t)l2qIbrv@_msHn}l$@Opde+S5Kzj`!!_EBC%>!5*^V$G5 zF1@*lPBXy{hm@WE)m78q>VD#g(%)mVJyiyrkhKGDlJ7OPh1#y>r>(~KT+4A= z8G|l_mVkI*e$4vb^%0W%o%(tu1~!)Ps*IOcn38_(?ibsfq@*wo?N9eq@Uu->g$+EIuKWuW23`fH{W?WCp3l9zD<2zi6jlpJQ z!VKcxc%ULd9`s=-BKKL!D>cpD6>1CFTC>efl#^4pr|KECtN6@fQPc~=YP*g!XlSJs zCG*)iIq>F8JFtDnCmQ%ZjKy}EGAwCn7W&=Ue3O@T-vG4-qfj>~dGd5Ojv~C0a-l9I z8LX?;uf8utKmKn}>G&_&$*IFm->E^$E6|InV*}s1bq*0_GY*6KZDxKF7clJGqGsGn zAxNB!TBSjDv9wn?>|X6sys#-|Y-nN>Mx&;z`8yEq!pP1&U4Xs>-* zHmwoI$3D&PC*ogY>OX)PN6?!Ddz_~&C<9o|=CTKDwWf>`F|^lBzrQTZN$J>aSaS2+ z*I|;te)?4JvUZM`YJ6MO^C05U<3L#*O(Cu9=|<8u=V(kOkqC_*8@u;!4I1B4ieo=` z@ci=^V@~O-2Jhb0+|wub{U(R?D5y&~n55#Gc5x}f-r^wbMkNyC2_NjY`6nNJxOmZD z!?8ovKY)o}9DwoWO?13UQK849)S*1?ZM*!0p@SMv$)%pR7q3VNcpw3v#isz?l|TH7aRkDWaQjhBC^>rXxb zqTfmBhPF?LN$)#g9!TmwQq?3WEBp05w1jwfitr{?JI~Esfpz;Qz}o^2n1-Kn#kH&$ zY(6k$lq`6hi%jWL3gC9-j(HXzFA_sc06d zxR!(0Z$RGADdl_CHy#HBSo&yvvXK#{mD-1dwziZ45Fi*C&6cnUP+}c}3dK3zTT(qn znNEHLyQmq193?8Q&@|K!=Q)P86X%=koOWW@gd@KFeTVIdKW~BKw9h4=GVSP}tH7nv z)WUnjYxP9u=0Aj&*J``6wuoZngWAn=h@}f}lwioubhw2t$eOd02vT0TdRHiGF!sNs z0{NoC^})_WsNdj_>n>SB!k{#vnRsw2$yR~2lR5_7vxAERAr1~)%9t$51Z~r+yn0o* zwc62wOy3WNA%*tJxdQ#ZBQXE4v_h}eYTw6Jz!sSQseFDa+i8+B5W7 zGBtf`9q9U~nBI?ZlV(xzEHVk-IX8KNPSK|d1fRhrvD&HaPIRz5GZ=3_(RNpDmDt^5 zZz)Hxf`fz9p5VDw`!$AJ!=9JtkG{0A(u&w~TX~uDLu;%5$KG3oMb*CHqjn%lh#(>0 zkOC6YjW8ktQc}|0-HoUSNQrPtF-mVc*Q7VJs!{}^aLBfwZME_kuXX%1XHZ^^zsQBf??_NLY z4B8vWEu^F$Lk3{!S#|x@$jAuLufSNXyZf=X1Ru&7>FRn2v<#44L?$Fy!pt)8#DYNv z4+v8;ATD<6F6*~;DMf6IZW?$OD5pcU2uR6+;TJ}Oi#Q3WGXUrA>kA}dZ{NMss&dFG z(Q3E`QaxU$m_!M*C&1%D`G=gG+&jGqqiF%aWRL`S)7BcsCM9C0Ew&jAVR&w&VU9-#snPB>#M7)Kv}iE zUZ9W#h|qPfywww@qZJ6#xFFvVr}8NSNVOn~0X}fwfa~nWedWTRlu;fZ zwvDw7cmzkk?uo69{rM(TtfO0Q#d$_;0HLDz30quVcjo)8GzQwte&%Oz@u`%fJNKE@P@M+i{I^O#-iVQ|w zx*NF6@$eeBI!$;ohsL+UIh-m7)0TYrdX zSttZd3kK1H$&gbZeeeY+@p#YZQF;V6~M_l&@T?@7DnWq&M zkMuu%mJ$y&oULv#VA2+q=18;z45GI8Gb373VWLD|ZSU4fc2(6!p0v;7wuZ&C8HhQe z0|2@^^@bA>X=W{J%jW0^RlSIzgOdyQAD8R_yrHFau21%BvHrF)Swi$731{ib?`Kyj z@NcVfP{hRFS^wot$)1t&K!5a>e9%2&3{H-*Q6fOgPpiAjTHF7z&@n;Yr}R#G#)_?e&8tp((n+nMlT_iG+qNtBel4mV&<0%JS!w!c$P|6m2F0PQED)@}c>%1Z zE`^g5T<0!-reaMoiM~EpZmN_@nJ3N9oXii;Nwr!yUO5l3x-oSpZO&;psC{D6AG@@- zHXRktVfk|e2bzYd!rOf~Ed~xtqS6EdoGW7|DY0|kUH81uDLzfFpL9g~$~3<(4+sG! z`%g@>-(8utQzp*|p`tsmCp_+zUZ3=kYFCC-u7N~UjFnAyN+&*^*Yjhqf+E9M?8gZI z(whL~Y4Ca3_o4l8@2XlEJ{7-{zitkY`Z7ujRjaY_N(JzVvKx^Wp;C9SR3Ch$Jx>(fDpOLoR&C^3z(rPb(! zucOn|`R2)$4~ak9(g+B_3%5mo7oE`fmOBJ!up%>FC8Y}P=C;@t8@{8hCa3J|7&hNn zwYrHX)q5)=t=9E$hXBM=VB9o{eDcVRiBZ{h(b>tOu|2N9srG9(P*&HSe%v+&t@Ik0 zl&E)`IoW^f>B%dCfv$WoxwL^!!TiJ4b8QOUKErYj*4CIl=YtpNsF+`Dz!CJ12?_p%p%xY((xIbk9vm!#@qH-e7ZnyBf-=`9 zW=%E92k|$-bD$coQZQ*&5wx_mRl!J&vhpX8cLd!$n3e=FOUw)fID~A)Y+w#-JxK-+ zU{+dHQ!_R^TnT5~t-AwGra6=X=f(}jhYuZ{oYn?%Fj5tHxw&PK_`uw)igG?kI|20p z5GzCRDXi6GB0iTXJA1>beWkL*I#i=8oQ{l7#Ni8AU7$F?aH;~1^_})dn1qaqib_iI zhQbw8$Oah`VFGBvZTVp<0TtXa_z<{bP&0#`#jRVn+-gsO`~;{C?gEts0mO?X5PkO1 z(Oj+&P2Skr0);beAZZ0kI7r|aKo@?pKI!8}1-RPS*zBw*MzpqHCGQL^R(>HN4(k=z?xpan|1`_#4w_Xp^dP$W&8QFWL;#$ zerroE2Ufmv9_*7`CIdUQa=5Ay#3zq2%vNqZHXc_-EyN#r5C6znas{lB}#C zm#R(K>riTnkCz?dPa^v=koQGAd3Y3F9)7R?^<#&^BtX4P@8DW)9$y&k_uQU(+hUQ5fATOl5pS#*>T?@&k}d8(aK`uZ6_c6M~x!MJvvCGQr=WAc=d+)M{!|*%~eK%O6Qqbpq1~v9TogM=4FD5fh#_C^- zVPL{pTXMCw%N~Mn+1c-(Fl~v0zGbe_8T2xlt~w zXYtxi-DWPkqNbIP($6d`ephJ8QKuV0?RfzD#%wb#B+aET1`T!AQKebOr=(GSRFJfW~NVXXNJI2U`Hf6mH%bm&K<3WiJRv$poRnTnL8nlwzgrd25?#M^?EHf_xAF$Fo!89^8z~#%(QK9 zZ2`yc-uk3B+|DBw5Gje{vDE{50^nV71`3JTOo=n&zd^m$}yw^UZ?{YXPo>% zD4#Rn`0oHlB5$PtBcrYjJeTr|{=suwGz@LWyUQ3tZ!q3hE?ojCjfGt|sN%rWfdKOR zcTo_61dAx;76VZ^#K9_akxO5zd?s?6&UqGGtT? zqI~$b`&1ejrskmPp!!l?PGsu&);2s_p~Ju7pvY%vWK?&g+sFyivx;})(U?+Gy|*_! za-f@psZO}?prEhLpGy?^G}k9;Veza2E&ANwzcpJ*y#{hwiUS{KT9e*_i-5s*XxM-! zfX+AAZZ$P=mcSbvT-ZgiKZ^j0H|S&lWf73vKqpv9=?-O7J9w5Fptk}s zT?vZzmKOhz5KOn*!9iM9w%+r&?8p4lE)4iew<-)l88A%&md~^HAHZq}djpnDNNDI} zrM+>?UoBH}Da*XS2By~5!XU`;`SWMErusSwh~$=CT!m1cm<$L=;XrHbgl=+g^7cPO zRPz(6S7;*4KcNBQ=g`{Z!RJovsZJAMAKL@N1rSRB`;ef=!B=wUQj;D%_)P1%!*pnq zYz&<{MI#^u>Pix(08I^L<+(q9dZDNYou)xj1ys2>@B`LHq{1?SX8kHdr3V@&e1OuR z0_1eSdBdNH+-3coYM{5@Et)d`OaxLZ0YEkZRH5SHSANyFZUa@u zGr6&bhUuhOx6_w@=jLGAQCUR=)N(u8+jHvHXd$%tD6FWY1e6%Sjz~vM4PX8N!F%91 zcpme}5O{}s)iY~qP_S}f3jsv~^ss=H6vzU>QNypmP7i!uz)=_@O$*x!7#g~vJJs1) z3n?OKT#?ja^n`$f3&XW~omWa01hY;ZZLLjA7GSFZ{T>8Wp!^GPF2tP-0nZ}`NcR9c z28=1JY;21_pN?U$cphhDu!BQ#{TUaH2vmJ1iJG8S2O%4#qf|NqJb`KH>5@*=kU~P% z*woYn)4&iYMbV$D)hbZV0niv464m&4GN2r0)~o`xM|C@&;Gm!}NHyVgptK9I4Txd^ zr5C4JKRt+x0TpaJx_QF0`Ki+E$wjnGLCh!9n4O91Fi;&Rp(NsS$0DcNT*VnWL7$rI3d{ z%^8pZg8+2U-;1xl_ogAcHPqkF!OdMfY{48=4KaD6AI_)+oQXC}!h#D}b?NX74Gg3c z_)4KRj(e@e;8CT97gzv@uKgOPcXoCFq5^E@rzeL4b{oaZO|h88hZu? zn&!755WO0b55^jTXyE@W&C3h!Gy)>dPI&tB!f0OhKZ{VT#{`U4RgHmHFfn) zqhf`uzcxH@7w+A=2lQxyg0(3r2u8;C^?eBG5!pF8z=skvxh^Yr~NB5n+*>cINPO_6@maH zxG``67f+Z)flTd@p#Dx=kv*vrU7&#cyX)3X?9Fk2J9U3||31~mwVLVY6HE&mxXIzcFe>M|fQlUKoy7Qf4R!V7qvZncWltbKI00@RFv~*l zZcNV|5rMU>&Dji(Ow$E4~^5D5pAI=o)sGm%yB1w0$KFhxc`0F zH(Zj_^QVv+(aFSrkWBMFXrzcXWWc`Cm~^^BcgwFBL@0`fQ8)K@k56E6|FPkb71%^7 zD-!~BYoRK%gW^EHeR5scX6l=cU;xD!zytyoLc)kD4CK9P%oOY}{I2ENKzH`<@M3Kg z{>IK$otwL|(z6BRl^|C^5=;(Hk94gX2nwRwm9(_-hnBl1z@3*1@Akg{KYHq8M(!af zDELT7n-Nd@E9cWR$`tELiw9??M16SY{R%i+5g4X=`!aU*=)nT}g z^v|RJNn>)5xb7>e%|mt=YmVji`aVLB`8L+sImGl1SoE6XxrNXqp&g&V$6HreV{C>g zM=S_yK|ILC!C}J_59LOnQcAR!5gG(;1EAE}*HP&{pl;-SK5gP){gf!M2{zfik0WO% zeZ}l|Zrus)25Mj~P2mBat3aZ4MM2{gy zmQSBH^O6E{rBnOEa*clWic7$VheRB{BSr0G2!lUaUw2-eta3>HaE#VuUh}1D26AwC z_eqqBaji6cWLq+`?p;uL7SsH0WA8Od6K#XOyD1y->yCQ6WDQh zI6Xb6ZViEA$Mo97V$Pt)L@)U@zktlT$tsKGF5$Hikhe*6U#m>c$NP6Dri!>w-QSv< znuaa73WnUfoaV-kxwWq*HtFHXgVx(^S%HRW8-IE>w$1$NHM?re6>5QA6zCbcoYsQk zG&Mf2ajGK2JyiQwVDRFKdyWJB0u+2sA2VDzI}uO!2F?aJEa06~&e_#Q9}%&e4zKSEL(P$G|Y&!JutGC5OJ1{t^y4YAhQ4|An0w#e|DNz6kUdRQvnib3gA0ECq3c- zI^Z$xiLoCPpXvX54SH}R=_1bAFRa*<-!7j0_~YCqm;YXI_tm-o{*HSQ7sze@9eU?3 z2%P;Sf-Umu>e-L}=dW#?{dA7)+PdKs-5J5KzzFi2flo)hEu~Jwr@PfHe4;G{qN7@@MkH8Nc^zv+@|^N zxs9t=to~c7Z?2pt?^04#l|YWF^E3ah$}3Iq#{`1^x3}g1XFVL*LpyDSx92%_tH!)1 z+&}n(52;F~zxnSornY>~;Sg=~zmVNLL|qk{bnO0gPpOvnDdn$Ohgz){{A;TN2{J=K z>H|KQyPxyF8=+-$&!yrOd>#hb0+FJoyuIC%&er0%{QA=)Z`2Hhn9|*Ur~hVn<2*UD z1@I0+B&}9yHw#_>4iE@~p;S!0Tv?f`KkdKg7y{4H{6ShdL*11GMy zRfs;u>%E2XY=6$38%xFbvBWj_#!kXSep9Fub0y$7JV|x)ra@lt_T`_K%3o z7catHES#eHcDD*|ci+pe0;(rYl1_-~{i>wT7N+hk`>APla36HiZj-h%Da4Bp1i0e- z@n^nx#|W%N^7b`OZ%~B_Jo?{)!S?34C@cZoqJE9Ue}&tOim!TAEYxEJj}UGiy{gwk zon`uN`nsa?bF=syvmu%I{CVm)fySs|Cd&DJty}B#?Inm)@O>}dnL<#0!@(yHr6UaVTPg?yf>Q|x+kHz53+j|-tpP+9 z2@&zC1Lly`#k1;uZyF+c1Ef`QQLNZdw7BVby)jrJi);!1-F*lhLb~U1;D2sLwKw^= znR+G_C~P>sg}j}fY>DBV9yTPO5G_Me{sB#&)NL$e;USbXxn5s!)wX(^32}b^`$?)E zbCf?5Vc@t`{Q{rZ&#JZFmIwXR&WXP|`imPZpBQ zY;fQo7^vSxqA7$Pzj+}5i61O}6}@d+0hjzJCdePWXZtJ(CVGLczh|9f1Md|X^TQLb zq;Bq#rl|<*A7qyf9z#6*x?zeE-Qy$wY})1^1??s(Dhfyi;=nwSJihwU2|XG_f(-XT zw^b(VMFO#8+?2UA1n#wHin?wetof-;mmU*><8A!+5aHlJ$S21^a_?hz^R$`HHNm|l z8b8@gscTmmzU}+C)@(e?ulR$7aLdY`L!~QM&lIOC2Ot%|J})@_x39HeU#pL~E$3nR z{wZA=F}|hzuAcAw6XZ?8Eky@| znja>Yg+*+8a%P)fJqG8Fj)Za(#0GbRe**9KfrrqW23EZv*O-Z#&tCM6^6|Mh>mnJz z@m-aie`fcl?A|_EIs2C9{@;uR#*&%O=_yd-4@2_+vgWGlYF|-ZaD+g59v%+6@;`5) z3%0hd7>bMhYZaKt@uE63K==QK%VMq{up+Fm`@a8|dc9+Woo~g*$4AJds`Du3=iMFW z5T>pdIW-MaWrHD6*=AZU)0eBqPdnJVu^ofYuDKESf`A9N_fcOGP@jX>(~9Sf?9^i; zrK6>r(1;K|sV#-_>KuHjX7CmNbDG$~sy}Yky#d1bZlPmomPi49&n*!*V*`d+?>K=( zf8~4)UgW^pLcun^wy|*!>WXh0r-@A;h^}3xt?F)XrAz@9-p2(u{(Xsm&w2fuQ88Ya zI8&8QPEa?I9j|hZqfOx1BdTDc{MP9FSs}|mb9A;&$Gm=M`Gzl8uPMUI|K4x&QskB2 z`sb38i0*l7ORM7Ji}U4PHcQxez!r$@rupB6Qow~?eH3E>=u^U^^)gxE{dc0q-1in2 zJuSM4EVzy_$z96Q*^A$?Ylz>fi)$u#Ux9vXQoJdHX5rEx-sQ`i491jSYfER|ytvsQ z@fS*n49v{rOCMEO&p^e))oc1_?Aw zfZh0CVGuO!wDOd39H0Dmxma+y+E7cz1ohD4xmpHk*fK;3EZ&EK(zK-CzyEiox8ZAO zVVYKbNv9llu|s}k&l`Mcx_ z2@?_~0wnI^);M>PUp*xphEWd_P!4PFZQ2=C8JRTiFUDSF5NA%HM^?l zhen~!JypT_lWyqE+N;43z-#t*0AXAL_`-Hj=)rV&fC&4h zY3;vney;GU*ur%0SG7FkmvH~9Wdb$k=z2!;KTlGg(gjuW@l$D0TNLvr{q{D|^qjef zBk)zP={=}8LF=Zzq{Lw%`z-7ACx-7}N>6}=c;sHwQIK);5G|yZhkt#4Y$segE#AK> zKHt4;h3D6B=h$bEg-TDK6Gk7(f*dGNv_l02(q(|}$qk$t{|zd?b2w4rAk2Y843AHk zZXPKjrms5QoHp4}nN5>+D?dUNPoOh<%ouyjK1D_b^_s)75MCLvc5nxP8#ti{bC2!p z>>$961x062id9u@G17m37RG*y0)N;GT>&YehKCuTdr6k+yxuaF+QN9xWu=VN&|XtE zsRLXr{4?W+#fM~FYkOVpTwGiHjP@( zWjr7Y;U+k0O7A|I-sn@2uBx}7#uKcX|8_Nl#NR~pW3s?w<2@H)Gm}Ro!K7RkcU^3< z6tf4+zzac91E>H%`f165Z|2XR9k&x)!80rR>M>*%Q9%k>nE6&nra+O(%1A1(Gb~l1 zY~&tcfwf-IV=6+t9y;S#{)GI?!!Zy__Kxj!G)P9uRUC~}hJU;ry0g>eSG}vyIxtL( zEGf<_uAAhep;?@^oki}{M>d;xo=204j=g9o7 z>5r7-HgOrrM*_|VzkmKTLP!RM8$Gz=Nxq(ym}s2j3CPbR8U$t=w?aX1XqhGjs=Cnk z1QqoCW59aSKF2KT{(l|_91ZrBi}v5OvlJ0a|5DR$p>Uf~ly60+84GHPU@D{j9uKNv z@g0v)efE&zq;+SoO4}t5MA3l<(qPNwdyh%FXcr9MopiJwqF7|R3iw=O{ZCzI355Lg z?c7hxi^PAc#J*zDvJp9Q!D8YjudFSBj>aEO+epZ;KC-+KKF*;%<`R147=N zSB%KLvNh&zzLT~pA6@;l<=RBz=+`$4O7CN10&=UQ)-|?Hn}{&~)mUAj&^o{m zc-uS!%0bfcJcZTO`~+CnF;gp|2q@hBv8!~eO>CaI`90*Hp?lR%2-hYD=mqGH9!$7 zH~&3Z;sm^nB$xxV`|5Q*jU%Ku1E2w`_sEX;%>1U+lI zsvP&mU)*G%qu{A~FoJo%ZHE9cGBCJ^ed!b7UsieVrp*!Y97}LzOXhR+tGO*Tde-i~=?D%_G_j>N*~MwIU{~ z!DEU;QQbVGhqE{$VLT zn_m+mg|PaKzhnu?obC`e2VPI=ZHC`WgJFpiG8yJ(X1)FW)CjlZA6XI{M=d|te3uSv zIF3TaA~m?&JHO{(4QwWHm69*4kZ1LI)VTNn0Cl-e?5UeuGc9edxU}R$Aw3RLidxe`Y@Qz_3 zE9e(jj>*v@kq-Oke#~$CR)l@@a}Q%#*8I0Dumw{*hH2jCkOa@sk5oI%)U+SiOP}P z;Ei{l>r}YzOG>I95_j~IhPH-lUy>_u_s6MNvc%t}m_B$Cf3YvEO;&~bD>FwI({nG+ z>eDf}2UC?SbxP$c1-#w28g?xig5Bc1noXQlRPk|0o zRALN~S1z1~xI&S^Q%D=sEa?g0K>>0XtPWrUI!sdbL;J#-8nfk7qBVPi@v|CuLtgb>^2-d~|)xQUNz;|tfH4|F4&$-8cpezbig-{!f zfPipzcE(@|`qBqwu2(-jN57T2VA<dy8@Q3NRC>Fv2h z5&mUr>qMjsnVVrE5tVG@cdqK0&KbIyCn=p$1q1$1LRW-z_FTNx;?ZsKj1atuRqqwP z%ykURPy7@+rc)@iamY;WxBYW~ooLo*9DSH?m0@n7_Z=*08_@6!DObH|Aic4j7|XRO z=e+*Mk6}ZQw|=iw#w_w~zUK}diV@V}lCOMBLBrRpzjNqecfOvd;n)1;;J!Lw zNWAuPei{`PDPdW&_78&u&5(10N?BFa(aQ8ihly*iZ*V~mQz&VD|F|9{X5j?%D%AK; zXanqBmLjZ>;^B^2J1Bd=uOJraR*U5;3i_sR8YU*RxZ)CGVqVJ0eIt_xJatJ4!b~D6%QdZeH62_yxcf#qoy_jlQd9Kf!3yIa!YG#;pl3HY!cW9 zSUj*;Cq1Y~SDj$>1`=Qv(l;H*vhJrUWUbYJOwW2vxPOh-di8o;HFTFj&^@Vp-mWs5 zW+9+uZeLcr6v%HReedf!yU(xe3%i-_?eIShhNZK}J-SbjJ(H16-L!c~!O0=`dq_Jo zL^iTL$f|qMOUkQ;L5ch+vefgWL#!zfY1}e6V`t1TE6VKWU(>)c)UE$1*I0asov4ak zB2D^5M&mPN<)$QJ$*$^km_(s?rvnz*A{TxSYDF5c^-i?;xu>b!Fnz*1e<5?Tn@>~g z=^ym_KS7q%B?mm&IrvhDEXm31`@7fC-4*>QRvO7do_9+e;T`u#SKjK=z`S?r+!qCU z)#{CGm#_ulNY2$;^AL&*<;ON4jvCia2+cg#;ZHKtrBUGnL6E*-h2 zVwhIX?;SF}&f>9aN$yvC6kBQks86{+er#YOh9SyQRe*&>i_>}6t+?6!iP;vTeL;zf z=j{otPTj)9Xuj#SUOX^yR;MoPflXDV6;tm^{z499bLE0_2*c^j0b6s>+>>RlB7ZC9 zF74Q)&u0dyN`%pP2G6rNr%pilYwq}WH z2NEL1oB{ZJtMVMh?4Tg*(B{60i7vguN9^o$($P?{inAUZL0ALHX!whyIg z!zHMDLE&=ctq7$K2g&6NKFT?@732RpWtcvb7XcE{V_81#G-_GSR$n!~23Zp@W4C~9 zPv`=q#=}la=@-?Ft^-$>Z-2p0Kw3Ds)7BLq9lfsMjWMN&H>)^$bF6zb2R{g7$W3|+6Z z54lBPXX0HtL%z!pdi$Fw*dgrO6D$*$Qj&t{DN_XRB{WDFJeHwlm)6nOzkw&nQ5N(> z(R{67z5Em1!o0CDZcp1tUzo|x{!IIgegsRhD2o);Ypegm0?a5|(>v(vPHge^QT3QCzIT48 zHQD<*-Z($HW^X4(+iEo7E}dqnj*RbPpf~BR(p-b|aQy{cddp0{i}OvqaRrSHk;6>K zUE{>x-l{+6(N-42fB*g<6+X@FTO^5!-dJW!vmjhf59M|jOIE1eg8+YgHKBv^a=x@w zYp@D@?=IqN3uzxA1|&VogIl4B?CVotz~9Z087%+3!jw`NaGi{BSwuyahfL;E({o9g z6&s#?XD5C#*hmeA#mC3Tqobn%5^eL(D#!pZhr}mm(atj~8;Q}Ly-=rt;EILyYC82n z2sF}$?yUkZZEdD=Yh~BktGlWQAOGJLPSC0_!4GT_N(zl(hUrAZM*z{-aI2L}``9=g z-8rYj+QMO4IwD8^cmH_SjyJS=MMziVHXcaW+0RRWi2d*SP zCx;^zwzrKn6pJD*+mz^z@lIIZ)8Wzjg@-6Dd=GBr&NQ;usjEQDlf5 zU7gzmI8s4T(H5v%YZa*EDTDLW${BEScDAn^`|;xk3=RPJt4f18O`(+uyQjDJ`?L)N zNq_$QfqVnfUkPexN%=$S#z6wn&?HI?nJQmB-lbvv(f@+aIrh%KA8USLyc!@Jrkf=Y zVPhWKz{u&rzHLs4%j0oi17Q~j3E=X^%dNE?9XajmIB)~%Y*Ezq#jq9|VncWFuO~HX zIc}9m(z{sn{uU0Q*p6Fu&wshyN~+R{I`mH=3LO+UY3eDwa#GU&7aqK}!!DDs6DqOy zV@@&qgrMsYlvw5s(q}Ux9tojEj6<82Uj)3ZWT3%YU0z_8{YCfl*=4dW+ZPnYC0aFd z5=cQSZoCJ+qJ&{Rt48k+?XbN$+EAnc;=C4IeSfdN^OxwDxfPLzi=~LTkGIP+j*Q5X zV|||IbagpL>y>y;us;K~cGrBt3MYP#Qp_R}wz~7xH!8+yId4S~e7^_8kT>j${WJR( z`Sc4TO*lJNUO`CczcA55Bg#yNd&}C=^aa*s=g1J9SZS+n$3xy5MeG3!sqbjX<4#)M zvLQ>VYjeh@<0He|Jaw7zYG1g&_fJd^df7H#`g;ZPs`NHPmhKqlDVxNsZ18l#EDuo2 ze>ZEH_nVDlZz?Q$8)-J^7i;I_TNaH#HLA{bH`j#Y=x;+R8KL>=_nfCo9LIcBg9F^@ zt%lXsr~pZ7jT);|MqCuQ!cdu9lfvO7Aywk$`Sl7IyP!RV_yS6rf!jrsvD}Pg0YPe; z`P>9<@PsBq87Ig3j}{UyZX$Y)npC3b79dGp#=ZpAgyYGVDvG-#@H->X#t?Ijo1WdD zlcaUC!eObBSm(KJ{B0L1Eq6cvQ#Q}7@ieM3j`D|E;>=;t*=x z_}hOSAOp~=4&QQ>eIDncFMo={;f`M_j-XjctknA`O&MLZbrDgO%xe8|xMg1^@pP!i zEK=@Z<6z4+;JVbH_sFb+(%l`Lh|ncRSFdV)Sq>7-h$v4-y0(>z9`=3o4ey~vIMmjHIvk@ zd#hY~uC)XK(o)YsIx(rd#1|3f5@zC|AS#Awitf4y@CEQ_-siVkg8GqDwPweRT|m>0 z(2qdmu$rT1eUlmHzvmY8e)4~Pnfrq$t8>2K-yuBzoTPc)T_6$4Mg{IfK$zzRrF3-X z@ySOWh*%XukU!?PFZz5N&byd!Zyvjpr#hh1h`o;k5q8b55$0JShpn$q^8Yh41HOA` z*^!3~qFv^wV~@JEwKW@_+#VB1cv}qswhXy&Y7Y{DS3E2YVB0YrR=}ZW^nnihfEgz{ zR^#$6Ej2X+yVrb01Dg;8Si5eXtamM}t)2N2{wbjcH|YW9n>i|HXgNW~7!X$DRzqf` zU`AvlC70IM=WTf4t3YKnJG;%S)Qlq*Vh=!TIPe9~v$F$>qAJs;(^D^qy%n;8#PvZx z69SKC&w3UeZuz}g6oi;LKs*(Kj$}E2wm}yOSY)H6X`vr1CkFz?{Cs?1j9_*I-U5la zvvauwHQ+(vVgXsJlPCoxbA5e%W8(}FN_se$nXSys?D!Hu+ZyN(LHQX%9Svq~78cn& zWfdhQUa}AX1cD$aK-xJ-AbHc%OA$@Z%+A*PnFxBS?;5AUA|qgE)SfRv+B;t~nf~EJ z!$Q^F-Cbzmb8?nKLJvZk_4V3Jgitxamj^9LN?g{v*pFg5*7w=FpQI@O)*FVgjxfDp zIW;w?h5>H>t*EY*RlX{d6NqzXunP%kQR0?>@_Bjr_8|)98zLp4$<`ER*EJ@>5<--=6o14%RMxm%%VZQr>*69zw9dt)h%H+Ix#^(3F`_j+;sBp-FRF1%PVW zvaqM^T>tV7du<`7KZy_dCt`%%b+rb=waHvhTME-IS6)|W-6{DC_E0;UuMUD9K=eX~ zW-wl;m|FPhkJ*rZ(-7j$r*1*B*ZGVFYW2mp!9EIbjpEX++EwKW)hp0TyoW+3abU13 zvmW`-G!E;<^FIX0@myaw6ekJUBRKB{;NtD)6qq|{zw(bjoX2}vBZTOctP?o2-aPb| zpu@W@AKLyLxrjyF*Kg#EFCRvm=7UDlCO=XpSAQ>Ge@9ND?ligZ5$biLtL})Ng|@?D zqB)~lb@!^GAw}I^-erGDg#7H=8Le;kWM8&7O3$#euo&)}P|B6Je(9F?wpsn_oW2U> zLCf3sPicdA2doce4TB}=$LzOdiz4113r`NrIy`LcerdZaZA=LRful?*&V(H=BjR@d z^eoM!*M;$QrY{qM?Ng~MA6(gFZtGMNGCn%(S_4RB3SOYlY3plwzRt0MWlJG6)Iw(M z)AwC#XbK9jpCY^y1WtXn!k~WXZ2!VMOY3+IHEiqQ)swUS77>OC6s9VQ%0q@c?b2U9 zBs_r*RDM@ibfH-u-CZ!!C!R}h$;;<6D6Bti9

qyJK2^o!TY zlmw^*XJLU?$LBwI@T{If<)?0O!OQJJy1NkB+GuKZsh2tqh!@Ks>Wm$p)kz+`(`8WS^N;^t zvypVEaUEU+F=aBgPGvfxzsEvl1xL8V3lc_njCo(d_+DAJQn6=%6!nhY>w+3wk4ZmeWKh2;gL_b|RcPb%FUcQRKA(Hc*hzvd z2pKi+mmi~Gj2#XOLaDa--94Td&U-UY5ZW9gqoYztyYs?IX%%s3lXqEJfJIiY>WyS6W z(bG5`{`l$qKUbaD^Dg-4=;YlzLPu@D><+}m@PESFhaT%HjRjpVzlecXt<{GkQL%W$2i>;VT*`IlJqYr)&NCQ_cb3(P~hnmS|&x4d7A>hH}9Fk_1n#uP-i6Z@MflE_w*n=j61{Y-TDlgh}*6nX9JWH=yffH)zpGreyShNxmxK6DxZ zGaKuhS1V4{3Rw}Y21dnjKVtlWSsvsrzfZFkzmtL=^!2Iw93vqO`SfWJ>gmvO4U>Qb z1Zn~#BqZS9iU}=XNQB+;{o;U`si|(&G?dp>x(Q^dA454G&P1Dm72p3R zCatH;WbCB{jB~@!;UOA4f|UW|28KW^0l?bmRYX{3gPx&R2(G8i61q+ry=<~uAEGSf zDnQti{__)5KAK2M{o6Gse{oE&4(tb$S5ng1LuI(%c}T#s>4$I-MR8u~%(UjQXYz(6 z)eWfl0zk+sYv}+YXegw50NU2sqsp?q*L>b6{Uo+iOa6uX`ohOAJd0x;B`P`QXxECR z?(k;KuumNbH6<1F#sM4DQYBI+h6fAli``*(C>eaAQ<<}nI1)*W^)j$i%aTLo?oL(_ z4}$M-z{9KbpgrwCQS|=p;^$U4eLv#;xGM#Tu_`0FsSxuj##Av`$GS%6c@ccej76RE zbkL_@wRz*ygeLP6#(MyH(R=*L*N3%+f`Gazms*eN8`%E~Sn=|4D_bU#H{ z<}TIr=8?dJ<-^qg+g?b3s8aT22mMleCKZFa6d1sTFIBm*g&ubkzu;vW1Q-)x!Fop1 zVaG?m0_CA1e`O6$b}ed14~q*@t0iuu2Wwtxa2;zu%??;d8}5VdKNMJ@$YD1VS)j@J zrrAi?z2g0%?ZNo(9_Xh-T^zELcTL-zdOb7lI3uXjYRRVpe1(X+0VqTC{ab;hUOhEM zm&Qkqeu9&ta*WNBB6{yS_FZN2UyX=g@ZlkhPr1`AhyVF_gVox4+>pJ=#wD5<61s#~lb!+rv7K|kkJYiFKzdeFvP&=AI>xlOm^#$U990TN!&4PbjR#Al(`t5hm7qu$ zBH!eFSLL)}ksW94cMk5gQ$9n~SZ>LKb&rLOtAqx|r>ck)*vluh%#wz4u}%Er6avRs%f@#Du59u{cZ zm6p!c8N3{0X@B~__xo|#+Rmjnx5IC86a4iY=?BJ{ClHFLMn^G3omirEq2ves|87Yv z90wj>*WV0rxuZVZfb3HA^B?d z-qiTSS$$tx{y3F5R$O=-I+~EGpynx53`IIO_!cpeO@p7f{Dg6_E2D(4Ke=@;MN+|-nb>YRq*UYM4=A9+`E4LV2X{n#Zl z7HwVK#v$Fd0_@2LNjFXA^zJ7uv&htblk(emp`C+LKWL)@iMPKS6;@c3!Jyf8CQ`Ia zwci-pRfM+!BPk!1z-UDXANH z0LzCa{%Bw0j;yU}E^@rR-eGg{5(->hbY7b{PoXT zW_R}eAOr2PK2p~9*lc<-mIEXp^)<8m6np?hw6lGZxT^5H>}a#Sz5RH6-HY4%6qU?; ziPA`C;|?}A^d%tYt*AI?BGLwu63pN|@Te{?FGICe7N)*o1{#R0p>9#PH)4)a4QKJ% z47^J2q61tUfReEYqu0uldG-wN8Fh0Itu^%I!SFeIlm%CodBlwu{!Nv|6pp6sM8jHA z-ESGx?xaUIL!w(hQs%z4v>RqpqF5j}U`<@AH;^+>YG2&Ddio8J9IK}5N0v<{V17H2 z+cI#-p`po5Fb56rC(A-?Rf6y{evcUce&w46g<7tsX#&3o8gr+cnL_u=PyDEQ78Wig zh%=4!j5Oqy3$qQ2AlHWmZbNdY^?hd5YF$sJDU@yDUx^MO8P%TVF)?jL6+T@YfpxTG z=f)SH+A%Y@yL;TIi|F-7TibdOCl%mhsaxD~W|h!>zOoJ#5C11$@B)u&w2Tn1G``Ux za@O0~6*ai{Q3EUHX%Z4hqi(eRdhN^%Ls!}KvBkP)T3&Cq^Z()Mz2mw3qyPWQE=eUJ zJIP)lWM_x0Wbb5WW$!(+$<8J_**hU5WbYNSx9ssdy+6P2xPALWWxcM~bv?&7>W>Q#}1U@Vs{wqdJLN1=ON*K83( zMOIeNQHS2*#<8DpbH6>eyycGwcNt}rQ!CNmi}&p{xbBAtw=W>VbRT!x4cNKvrYb}B zNw`)ji1_g_DnIeP{Ck+BEJ#8!$NUSnMLV>wvi|spgn)_Ax9iE=*&h=g#2{bO2jkjy zpGx1yleC*8Lw=>?iB&fAkKrW7%4eZ|eUpWOjNlWZdU24d)K9&kdR8liAheYWi1k@4 zHlBCnx7~fr7%w?Zm2^}uD|;7YOiMAu4^KD3ESTs}*!?DwGPHI#e<6l)$#>2)O_rn#A zH)<5S^k=m-F_5%_!iT|&?l1FXV~iy6x_n!SasBrWHdc|(x&muq-!1bQbR!GfP!13B z;q`xfqd^Twz0ZxytNr5X9<9*`+E-U?cVy@L?-K(|?M++HC+xYuZ3VT;{KOwvZEv7> z2{9$Sq`^->Yw5!r!Xwyky%(yEzc;x#Jumiq_x?+6)!2aF!H&Fj@X2NK4>IaM$+Ry% znjCD^nZ=&&hH?dmLCW?oMWEGP4>Z<>WaBHq8hSXC!R{5Sg~mT70Wt$k-B%9>CDN#>c6i_ zC-!@kyQH=-*Vb6IxrIVaQ|Y)X>5Vk**(jj~d@br`;b=TTbF_e1da{cw4w0$>lntCb^5OqTiz|ROp4CGt$a0Wp`*%x?>10MFtEC8IkQuK z|D_NGDfzxr{Jl?r#Hnjy+m7*gk}Ps!v|pEJ%w3^g1P^xKd8%rXTWFSZ-)E*RM@h~~ z-P8QliKGBpvC%qCZH4`MZoOH-Wnqp$l57*d)nOUQ=xw&5(XCHi1l2QDtF-pRrMjEf z+85;eEP2XIwaFviIT?fVyhh0aR z22b61OG_2$*JmBN`*P(4*m)%$;T;ugmRW87UGQk`q5z58q)o>M7-X}nE5S6;^1Vgd zvko#ZKGgKh|D69^YB5~_-%1$vM~ceE@$K-h>FN>&h8jc8{ZB)p+(mUx>tVcUtV9u3 zbc`yq<$vb`ChorGW%u40iN+8;vbfpyyI;)K+}^n_mHRCnqaYiqFk!sBm-G(<={S?>Qe#6+9skb~Cf1iJ-z?0ksH)yk6(~9#Ht;wWFJ&wcrsF<}c zSzyDKqchtv{z=5ND_^q6u2|N~N9x7ZZ%=KfvBM?<7mgP=a_?_7o7?D=(Bi%Nte#e^ zK4UwXtH%_M#y_CIeL2X-{UKh>=NU@sVf>q(x-(1Tm-O_cX~|hU?*?y{GA9wfpc%*i zQP}hD=Fx1<;^!D1!B^4tPOSx{Tt(k)QoQtqQh9S04w+ZR1;N?daX&y_h523JLqpX0 z#P6%8XWJ=Rk?ql4{N#>D1v&ASwNZX|*u401l@F(!_rLG`#t$#7Ns%9AvKn`J=yGjV z{pC};qvGmU@sglHrFp%)w~o2p6epfLhsfMeUbbwSuYJn#l7}U|e@5qv^?9Bn0roLH zDJ$W1LAA1D|J=ltC%Z65K44WbcfO)gqDKikJlF^zT<)dPBT5u@GBCQ>5zySuFGfWO z{LiICr?&}yKDcvD49E3zlf%xI=5|3oDncH2I+b!i&`Rx*K0A`GlhpX`#6CiwCN;F1JTI^h zz7+NlY^yhJ3_j`Z-}ii@c=>U-n=AGn8WJbfkpE=^){*`!dr|e>`J06vy!O~;p0Cw+ z%|riWNDvtgeE;&>xfXkTdoc7UXQl6)($*ZsY+EE{D%6Xldq?7E5%WL3xZtk z48$~P67CkKL)?U{!mKT}QFE#4)9;A_4UCf;HsAmDnMHMp)&`iE|*IdOE1GA{kPx?*f| zWgTkID}Slu@{9f{2&`-CS zHFJ<&_T*eQw+riVou<|89A|o(Eb#jfG-d;Z#boy5>RJqc8)lTNE_}tEYi+p-rwz)5sIM|Zatjo5XuYU7Z8h^RN?EEcNt?J5x z^Ek%M(rO2;PU^xC>?-9CmfI`ihkv^wR9U8#b@tY8Fk4kRau?bXEG-q$vz&i&oC~&M zZK!p+Ecopc&r;PeGvWO1O@5@DZglCXfyv_Ek>0ePgo#hF7n89+t^b!6V2gcYm7=b= z4i));|HCQcQF2uu@7K%HQggWU75d^OE^~{k1bj@-S+; zLoKcQrNTEM#PBf@lK%IYLF}6aCK~eHT~N6#L+L>e85_MJaxL2*Q_Au7`*svB399I8 z=$;l-j41A&ZG&s-3{gZc)~!T?d5~Yv4G%az$Yw6FB2u|Z2-~WG7wOG@LVfFR{d$=y zlmu7^u0JYrX@Z0NawN)ri>sLH%kh4IlxFd|gy(G}${M|$7O)3G97$+2}GC+#~;g}rHyskBU1 zLP0W|e*dg_06S#<7kmWwrnwT2hy2?oA$OZdi67j(7L>=sTd?0@P^}V|m3VGoLWFn- zI1*r5OBWus8*qOBo22W%k3GhDe7*EjRnEKyU$MqbDXU{5u2@6JPGN4M7DM|TYzp#z zDYc)oJ!!ru8*3!8^T&|gxp&>E-*7K$28G7?TZrwp7xAMA*)BY^uja+6L|;1?L zuZ9Q6<#RlgxG}^Dw~r3=k;!-#L&eSx&bm9*#?MOj<^y;Ur{;PJfv!qAcKcn5!-I1A*wpS)Sho1;PTyaCt&9Gq+nb1jcv_gY zB8NpfD)a7IVF0kW)A0I)3eS6IQbsj~ z=kn~N+^Vp|C-EIE- zo-t2DW!?GFjX!PCUBF(04F+qpv?#`~(P=(}mEdf3h#o?@=(gQiEwe{=uIZL&DyBX< zb+fMNF))vO-lXOz;4js1S&l~Zb!NERs51xdo{Yr17gbKPb?f(&&C?@_s}YJ&fhmQf zfrjxJ6P$hlT`MhtM>6MT<{goPjdZNb-S&(ykXqw)GqlB*>6YqUury^qfO2v7h50JA zQtO%P@BE=j83M~)kW}8^A-h+PNC$-fBkdS7X+^gnaN18mxliQ#c&$HrWM&j_R5}2&2?1PC8VvmUH z+$z$2_Vx1wS-n9Q^}t5TeQr12bb`k#l3Yb4Kk$aPr^w`UbWAPlsx77ShA)>7Eph#A zcLL%#76&?-p7Q-Z1R*`dYk$He)*ZL>U(y9~7bUU8OnIJbHug1B>sFc%N23I;j}n%^ zeBI}^QuwS+ad6-{#zFbI?65=~PBTBMJ*8-F3+lRF{63$GXRXBL(dJkhxL8-kdtCH; z`j!ppY?)jCpOArB?WTfVr)1kjtdY{}D5)0|?tj9?s)tn?zkD`rb~(7EIyI!Nc40$0bsH=yH0m}+GkcKw z?;HcMMO2J&p!P?JoQ=WZZr@#}zq*1ncY|G3n6-5hj>fo+{|a7-#O=ilcmK{;UOJ3G z=2CNRSQRsPmKIjGTYyjOVX1CZ$I?i{kqv*G3ykuY<-|Hxn?&}mxzb;7->S9eD3Lg9 zU2k82V9hndbY$7Ll~ugVUA6 zF$~RC6{niGWUiosf_4d&Qk|Rk!FcZKE<5ASeZw75J)&F%I;crNZxd9^bX-<*!0O9= zrv>Yu9Zj?6RcEIZKRE>{DX1;Ll?3V`&=3OcW`I-t2kK|8o`Oc#hz3Zn^a&hIfzYY} zKo=mTgwA6%k$L@i5@gJxikmxo_gld_}GjHt}&S}w#B1Q?E0afP z(sr#m)(CFgN4&(HpCUUj{t>#lcd*}amEY?^hv|bLWFOiMW3OXe;Euj3zlW@SiDFJ8 zK%A5dqFc%B>iT6fW&Q3>*dUa8YB_2q*U?=!{5s^gCfHR76c(iEjao-v8NM!dM*1X16ORC$hgZ4+A>WMce4_n*I(rA?HG+y>lDS9XwGnt}v+)WcT4 z_IM8bKmDIRz0jmXN1l>$F|cF#7@|IF(MR)obQ|64+^*Q14? zX1Ely_VHQgWlDa=i(t1I58j9W9%f;SJ zb~&P*>%+Z=ynpsv^bsd#orkb(<#CVuWW2#Bxc<>j_$I>In<`ElbxPW6nViL?384ci z0+Aew7o3WINwqzWsO~XfB)neoDEMQFI;+tyf&LYnFqY6R;k-)&6Lc)6PfaS{3?yQ@ zL|b3Ou?>zIh_$c)v3$-}Ng>1BI}|;u=ARs~Fjc%g`%>MSMFFu@=H zj3#wpRXeIP>9@}+`*mbvV-3q|mN_A7@pEZXah9c3mQuhMKL@*`d8Dr|QJn!Fr=A}I z)Eu=Ssfq{}EG3@H(DA?Gs5+Dcv)r6N^AvTpIpc%$>(4M-gWKfuUL+R(JNP$Vm|-R! ztYy~1k(irjBck0hP04#J_o(ia&8wNo{EuS3u6ae3o{9GJ`L*9L^Mj|Vt42!@RLG$}aXMd^dBzg=W#LpD4zV$p4@l>N?+-@S_iVH1wj+Y-jTLar zcNE@?Xx}Qx*GNeVMQa!xmudS<)=g-WA1nHh*qqG{PUwF^l#t`k#t52|`K}0Wu-&+c zXp~M4c~WpSu3?8y{`q4WPC3Z{<$|wW2yl*7hKOh%y)V!2w6FuOR1*2tbAvso784i!hl|Q4eSK#8HViV^wCC zt+^Q9G=Jnv_R_a~H|#E0Uj7VA zC2=yy+%3;zG{eYen4VbdC~lbd&u4wbs6drbao_3PyM1Vp{M#XPxeGmeQ?MgKllRdN z==wcfE3|69I9wgf>-hd1n(lx|!buGMH2|>t31YT@fB1aj-QZ;NGV7(oNb%zW>f{{?P5GZAona4@qvwuB$B=#7nLP0-5g3VVl5 zEO~4smv{Cv2QN_k#XVnKp|6m2SI*L|SiTW^E;MKNWc%cIoqSW|=LCAvtKr&iXU~~u zU)fLt8l5t7c3T?*CBJXw7k+2k2)625zH)|eGW~Y&B%e#})|9)Og`2(2GfD#8I>)Pm zKHOeqspsh57V>)EKHafriI3@9@W>i2opf~iwR&3mhvu>GrJQ76gLWFN4Sz(30H5Vl z%~eqE`q}2W1fI9`Y}rc)L)25^le?EliAVT7*0dlDar>@JsyBG~kWA$)DO%)Fpu3*a zK~88f4-y?d>$WoXX|nzcPnu+o#}J>F1uj>7e75y@MvQSZPg8~Pqx62kP1P(S#GmR` zDnHo^n&el~knz4_V!^QvEhQRl2Y3ZQwG)) zKly$u7}elI(v_Hg8nE}Szwm`52xa0UjT7bF8fo169s_Z4jzv7=7}gg=497(duDnGN)yjlnm|cz3uqdM0hiE>D9QJ4 zYb>#*CW;{3sr{rn60GJaMaHM8zE)nUQzeIUcZeeY~BJfc6Xr z!T7W=E1&i(TT{D=2svm%kl5hkyEk=Sk7XR4yyVfLC&5N6V0!J|U8 z9NYA^!+c5i@iV5VGe3)g&#$-P)NBabirucMvLQ`K$ZZ?w50E27X*FUrgn-~G{M6g- zPp@5~-JaIyh!1BThv`F~A&DSAk(7(@Zzc2PRhW_BrA!TAj?OyPq3z zX%QynrF7hgy2h6|<16eYkJS+*wR`0j(Yd$Aa{?Ds-P+tv!qBi90dZ8pI)#37bD1~y zUFftQC!TOJP@yZ{f+9;6ZQr99jNnOH;`;jZ*Olvy7%_Vf!=v=6%>WHCq{vjx)|ctK;V8CL|<`2ff(vFeuxc3~W+IC8=WC z4Iax;9r2~mlhWb^za^M_naA2NGU4q*s!DAuXZ8M33{k+%r_+6D&F?dR;fGUD48Gl7 zc^0@yLabiDefyM2u%LgWq1wD<%N>XPdh2I`m?c`t=KK3jD078TWdn|aHCM;0q_})h z(vW?To}b(f{Cyo~?r&kLxK*>K(UckuK0NAiiY@Kx?HwlnSTRa|&&$0bvib_0t5}QO zgbqIY0^>QtBzT?>tZB1G~bhBwUIax}%|D06!?@0?`udb=e zl5(&(gf6udo(|G^o`x>k{BatGtC!R6zL1fNyN;1(cX}_4jMUzBt0@0KEX(|ejQteN zaa09%{nTl9c%Lb|F~tpZw^rN#)x}{#|1e7W?eSh`&}4a8 zk=QqbEi#3xd1(@4irZcqyTgjt31(gqJw5&})B01){?kdamkc&ud=V)J60aFU_g1L~ z!b_US>(LO%I-`EQplu4HiGdOm6U}Ql;wMO8A?6`BY`l!Y`WWTUA38CdQwVGqmjfG) z0OR{Ocmd*JC`!s^iBgnyx^9P6u^^vr-+=O8w*&_j2Eif&ik_?FE#y;8ic&`UpqZq` z5ls&1K&TZe@7n@S&4pbLfdULKi5ybo@Zcs25+;_C6RGZ-Q7Z8r$}BV}UJx`RUlAjQ z>=G&T)O8JZRD{?t+JxQNPWi^F-sDRd;S!bQp|$=XFL!pG*|?h_vajqG9s z!PiR<{;^Ztx_|(Se%YYW*T!N_RPyHIeM)k6$dPZ~m z7t$}@DVw+4rqtDSsHjT23SkD{dSs_xSf^NjB{hDM!%$e)!zeadysM8V*OJPDHG&P9 z62&B_W5Q$|#_k4x$;Tgx`E^8MBbkYgbNAQkj%sqAj`{l#+tA21v;kGZ)8JUi7IUEz(phA*g#<@+g}oFhDM>v$BZu#h<3!XR}8!#lK#J#EP{z@kdLA+{dv? z%(>6`$MB&R@x8G23wPq)d%@Ay{0?3|y0$7w;FcCp9@r$=Ixf-~ci0>y_~N!1>ESJ- z*I%yfHg8FB1~0IW`1LTKa%XJhQHLj=dt1<;-J$Tjxp?h08`0(D01kWSoQ5IlG1Q!@ zp{q8sdkG&Kf-31HK*Q(BCW1^Dd_fKXRx7aolf!Y^e)RP3`aj0#2g^C!4yXaNYzud?{3_g|*gq(8fM5JusCs<@3L3PZY`apq!YCM%C1-?kI|n+&1mjaiSYjhYN)I*3+AJzdg0 zmOt{7$IDVSOwVW^kM!=lWJR}v9>t!|VRm;oSe)Teu&~qV&vsKjhx%`KD&Fb$=sLj8 zH*gwFMY@xGi@b4lPVs)SIt^Y$0@I+w*32+>O>HK1ij-FMa_Ah7OBsO-Mz{{C-gDBu3oqTfUIB+dZyVqwNxH?^Krp2l;S^De?g!SlsYxUn`9|1mBc z5}yC&!Q-`j>U5Cp;HGV=$Q4so?YXiE8L{J&_oj$-hbnE;@G2-V_ME1&h&SQvD$b>i zeBNA-QI-8*^eQ~M_0r660}3?An955G%UOw4Mkf9)X+lAOYUHGB zR@fC{ko<>#NDs~@y7in(HmAi~-C1%yMp zWj2T~F2>n#7F|LLLsx*EEjE=(Q~#P`jVA3KG{nHHpu7#AYL_IH{6sK4!bj4`*AvR@Wb}&DU#?QN}98xPYZ_2YUAA2#<+#sgW zxVHIu4519{rn!5KOoe1X>r>6_b+s>vI8bz1w9EJ!&u9>yS`-j%U!lMK5H@4MikDDGBnD`7Q%|+Z!^~;oeyue3%gXT!*I{IAc zHP#9*i#;q9FZnFRU@_m>i0?i%2!7;Uw)dhrPML}a5xsS}B*@nsCDC1{W8(R~#SbMu z=KSi4*_%?^Pl7UEip^vU5w!&fC+Ujra;ya~zi`?uU2P7Mqt#1z(375%Ur4ux875}u z_iaTtzyyC$C6stqA~Q(o&OlQ#_W?(gbcR~0c*fT!rs_cf@BEudLOG`ADoZL%TZYIHW!RoFgYEXKA?wK0zSQ1eeNOxzsvf9{z)uU7Y>} z{$35RM<|>Zqd=ZHHy=nb0|b*VZ@K}c>U{AREE$_G!CPV+IGw4}500MvFD-!M%n4Aw z|3UF1gf5Q(!*~UbEVJ+W1pa{?U_ajq?F<6wY8b!yP>Z}fv%{ihzZC&*#02O05dP> zEn#JikjMhG8nkzK3(ws^-}Del6j5Y|gFA9NJR1QR0O;MS`A$jY4pIrFssH`mE|H4}%HO`FAc0;ag2k4B`*)Ds^T( z{;oh)?$gT88hzOK;>4&8#y(G$*ITW02C1|!thrsn>swX0JobfpO8sWIw!1IwcRPR7 z%b-bD=|Z90{@@im?5LnB0rMxCQLkDu^m`%)wPTnM#WFWlCb0z2N~E%RW!%P1E^SR9 z7tq)Jil;S7Mqpf(InX2d{|+8p2zX6A4JF#ThN)D63&NX5)R>172Fo{d zZ(t4%xV>tW{gPF9)G*VQ*Kfr5M#6J0vwzCmv4rkt>Fpbic7Js^lit;C@IL?Wgs4uB z?Pxr;FyzJ$F(1f0vyw1tG1%v773`msBrr*M>b+4E%# zW`ps7>0|X$jiNt#*3X+lWU{ulNb1b0N*L*tZJyEsWcjTFjX(cOmL3Ck2Zz6v3B`;d zdy5t$k=@f~RmA7sOa9JOn6Hghb@O`4GK!?6@{T6RAZtz?WzyRn#g+&;#yX3`%18cG znC_iRrsZm)yT|q?ASc>S4gV-t=q|3hHpWN*IECgUl%pr zwKL4XvyEOTk6?4H@3%xjsX>dH>F#xdt|9MvMPH%MIbUK$b5h8;?Iua7FdKD}UOd8c z{Cl%N`(tw9k1_loZ?bYG|J?p%dXxQnsK`wJbe==4RX&SxWEZ@z_?`!ugCvG{xN&uj z<;HEBjm32-($wKK0;)YKr~y5FuR@3ppwJ$m{Sj1bc6g@0RxQiN#_(;klvwm1U)oj5 zY}(B#tAEc`sL&n719Wyd$#v1RM`Dt*jro=0QqlLjMVJPfxCmTzmysc zUSzP}Emuq&kE^#Y86dEHlvO*r#e}z0&_=})b_!YCgbr7bF=IzL4O^_2@r9r_6fi|4 zv^Xu8oTD$)C0!8t)g;AYrnrS(LDSh>pmc~7|{;*q!nSg-{X#JoTCQs zNe=<7jcaEAWUjIvl4plm***O!c4@c9f?=trisdC`O>tF-7^N0_(phQ=sFjw~Ew(p; zsqs2TGDf#7o@lK!%h@VOK|T?+ZUUH{|UPam4VzUeaUSWXrLJq5Yd}ROE}ChW?Q^ zpu`wfxpUnQ25PHW_x$HOrpFSR@!Z7q^(Dlcvi$~CP{e@27$I>nI4tKYWbOF&UYr!E zgKO_v7&}+_b9!S%I#088INTq}rTWYef-A*~V!~JRRBoU}@>p}D zOLRM3&095n`aJ?uLgPo&Gt4WNPV^liJa%xA#kq_@J@(|fhW_oUEqBm_6Kt8N@ld>g z^Djk>1WABj(Z zHGF z^Yl)* zO(t$X*Qfi39Yy#)w(66hdc$7JM>egD*L62IyiQ<}&(%&xH_A(B&KhR2AO&gfy!C;g zXT)?_y%k%ObX=%EGflU*e$T3nXsy&I2WQx;Vy z`*GPn6VcyVAHI%%@dwJeQs@ReQLtu%n!{S0%09ZNn`S>l|3%s~9J9ruyBtC{eLr5> zq%}WdsI~Z#G8C@Mp42Uds>?ZITIo5(P4pZJSwA3RfP3um_fu?p2u+ND1C~+^lQMnt zh)IOG9dSe{m!>QYULVPOC^2Vs4Q0A5nWoNv>35UO>Rz(7_JUW~Z@F_PwJEp4WOi<6 z)ut7KD%vgsh0e|kO{qLsna(W~+*Pbd_m7DU@dU#>l=v>kzp}+kYinv8jC-9nv>5JS zt-XnsE?WVGjl1Z>k5YDuI$LjIP720Pi+dHmF2`MS>gf31tQ^6>Qxvw;o_25xcKL?}zjHH8OhZ@0Nj9tAtm=?2 zB@9~~;-5mRodgMz7c{E)OOBHp&@u6}vFsU5V$8!U(EYZJ@u0MRxVH!ia5&-*@-E=i z+I6pz+dP)5jS zZLU(7Jysw2KyYu*8EYtsC0X>l!LalfNXS4MzFT5Hvtf##oX|O6Llop7&W}V|ummpR zUl8yHHwfogJW=N>c3_Xk$ z0nJhUSpY;6jM)2-$yX__>@g@u7nRw2caQY2cdH3zBWm0iS!x~*%Z-W&D1(#c@F%y! zy&pCE!ov77rd<7@rB!HG=Fv$h>rYi%9#BDOJ((=&?tW!E=F0+-%^t!~<}OwGryGuZ zt{^JuDImR{eD67RbN^~-(pz{7q5kJbbuN=kRjcf#f)UA5^PqQi2z~2rYV#ni-pV9< z+F(H;fA9^SF*Te&^bW8_;|L5!6UI=!uB-CFyZxEWy$#j_Cs-bS=c%v(-#?#3=|+Db zgzM9y$HQE6+7Hg^YD^=|Ya3Vj>EPdk+p+0Ddu9Ac4fBc-40wsdFcFcfjQj!6WqG-AZ_0D3s0s~cZ()ENRgGIf$&Dpm zh8pihD({mxX;V&O1d_@wjZMHETC&}BYWV%zfIX2PEDNP`AcN34PeeonmkUg|&z~bZ z7HX%rOgI704cP~P%_sNZV2aY=e_sI3(5?w*5-=0=*Z}iJ9IDLF&i2!&9K-h+aTp-w zej0HQ^ZAQF9|XXZ$RjusWlKl-xVQpxefpNOLcE2&Iu_=6z@2c@fe*2+oib#v<4GC3V2GMQ!^apcMcEIs!Zw^b4*vN&UifEOmbY+)AK~3N;ScK|k)vkwQp$iy zj1qVk>-NCvu=+Qye`77`y5D_UdQ4DyN&n`Fkp+qEU%r$kllNkjKUF%dzxJ($@!-M| z#N@vbt5SM&5=_J;^8my~kbQ~bpE9uviK|lM}a-t_Jb|EYdw@#3wVpgUPyM&NI?(64AC$)c-u4j(H z*@QG_iU9lXL@Fnq%{%c`rrLiJT&z?U;@}z{`K4=ttVFVqZhh(8)j-;wHo*&W3s9U5 z2uEgo7m2g=D6w^w?5pWx)qf1<4RFyCs1rX-LUs}9$P&3W{Bu^t}f(G zk{XeDl*}kCxJG7DJ!Z%>B6_nD`tzzD(n4_|qH*f8xgY0*C~aT5 z0L^wm@p*;r#a@mI2&m3tLtq!UJNwZw;Nea7E_t&;(coiI+TQd==CwEDuBQP|{noC0 zbxW@BZS{nufE`Z9?r=MX%-+6^sVRwVEd)IvLhi!9JU{7%JiE5m=iKa_){ov+6$^HF zs{voA@Fd2@rzNRxZxt8LDCq1B_%ljMRHE{3ZY430hn*fIhVCkxf$R+WGGUu>d(8PQ zLt*PvKGVIuwDUtBMo@?R(;r(s(G((6$lBbJ@jHha3fz))Y3%L%LP*Ul_H9ms=nhUs zLo(&te3!*fZ-dVNBD>@?q-!s}SBEC?{A)#$>A(h5V67!HAx%JG`iZZUTj1Rxm9MNz zw*J}x>aZ`ZobD4%==&3>9ER-~o-#aqXlN~BVL1l5Y_X9!NZa+?nSEtFcG_0e*lR)c z4ev94nMED71wxIFR!RPQmPIYUphc;)(jk&Q7N94PQ@mXG$mz*S!%fsn>FY@d#Vd;I ziO8i8CVZd>mqo7n5Z{*ceiLvF*PV`{xCLaGyup5x3ZSr1L=p#!eOt8yC$S8LW8XLm z1W0v60T~PuS^Z!}D@IkTwRLm`obH$|!)MP>!X&`mwOpeZR>K@6DCbvdq{`9&9}aMK zkP)2R#KC+BPR3DEurf(e17k)sM@dsdZ`!Z}LwJc2wtD-QKfN#(@FG5kgP*Z%@s6Oxj`lsGqUJ~$N1 zR|a5QSy`D0XMs{apkx5)({AwWDLp~x8*6Je;QAXI117hA8WzSxStCnJmQ*=M9@2Sh zHJX@Uu}o_osM@XA@rJeoudcPV6(GOVc(8z*aGG%?Kc|isqiSLMX<6;cPYz@}gEF6g z_gu`B5B#6j{{pXIaSLFM_i=5*I=&xYg)M^2! zj~WjiOxreWXM$-IxPkpq;O`+!>C|{v#UGq;NpZ8{Q2|?u7aWs<>o3w`Ln<&1|MM13 zU^+!<7Ny3c)nyc-48Hs>j~C3D|M~39{9}UB$3ue9{?|X2Y~kp~m3$MC%ez}f8>D%kf&$A6EaMsCc=`DOJ`4BB#^!PO z0!<7(c?4seG$4Oe3g2>(1h&$%vI4iy*B9Cp__?{Ov$GAIoW_jG9}~R~3Bd+@{p2J` zn8ZD7aYaQ%phk^b`|iR8pD$jeK74gEiEQwGeT zRXZpZWM@HH8YaWFtxjYhba=9EOa45pSzz`4+ z0McULGAL*q?CtZ1*T6+4IT z`CG&?Kuro?0Irop7WjREwH-Kl3sX|YN#DT69x{89IS)ox*U`Vt%>g+I%nIS%%v%#F zs={j9oVZmXE2YPU^N~pe? zzW#$$ISLYzclFaE8K3LwV68E$)FABuC-!lRt&I&k-c*R5{lv9uI2&%@Ln0g? z(wC{dzxIS7jv>DLZm=JiEuUTdw~WVrI)U7UJRLrj?Vwqu0TE3U zkai*c0~Z8_j`_*}|IO+T`iG9^OO;FR`EF2l?$NALC{7qHonvNcdD$iJK^5P96@M$j zljxrY_sZe_WSAh$Bt!~Jd73vY<+mhhysnJrtp0_+x=8Rf1R_~^Y;^Wx&s$Mau_v*o zSg7vOC||P+mUM&u|Cu=1`3AJT(B$%eKD0EW{HOH&zdr|GJty%xDBb!R0j`yMyB!oD z4I5wH2LTtj^%dzhlLtSzQUx?JWRpz5M(YM^!%p9d@&EISuS~bMvXXeWk?P%Xqn4MPTvcB9lY=+nFKj2cn#P)yc`9P{ zC6$FG=1+NaX{eH4qVuE0dN*{LQ+8d}7(ZodATpn`Z9UpoJ)WlJmM;-XTZ~_>Jwn}F zyNBWC?Ch+hqy%v3Y9V<7MS5q4U>+->Yfh|*^v!m^1n*|9%dk}HL&&VBnpm;9*hyws< zOjm_|E9Me!42b4HRdKMt&u70DYwM{h|IZ@)pDQCK&dQJ8|J(|H3C^<4f&I2PH7{@7 zU$c`3q5*2fDtUv6adAssa%N^`V3c+TAO8UAcJ{9%|MQ7jN!SKDZ>cU})Td8$)irpE zRWrjS+Vu>KjCb$eP5d9D+)v+*ZX5$!?v5fsUv;inNNDJGUEn|eM@Gi=e}WU>tf?N{ z{_n!;_bv*5jSde7OHCH`B+Js=$uBM4K3@fU8&gwLDEDbLxzz@i)EZnqdDf1vqd1eHO?Ge71v8bZBUGwjqTT}UBNUV#k_Q{mlqP)nqg_|764GSRl6g* ztebXnC(W#z?&)(kz-=7;v}B_je>$g z1&%dCUGIRyYdc$$c6pEo%YCaVm^qvuHD7HKfOE{p?UwZe7lAb1nWgRRQU^je1$bV= zTRx?VHeI&;9ZoSdp;Z3yQ{A-EdJcD8`EW+H)b){>Q)ummA1tld$W!2T_=;(AD;!p!N zJ@drUl?8(|IJ?=~gKxqv8BlmsjY8@?*^cyKeVn(ZqphF6gq0eG;~jV?kVFlinBFBr z!@@0sXXQ3d!=L8{4$&a8uX<-OT@<5%tR%WMd#MU`0Y)6&9{1;GzL@q0Z7?7LTmM<7 zgZE$UTQH3O$YC5F)NsDn{rdZRn@L{7&d4g)Lg0~K-udxLpO+N3etM>-bgMAdYKK&b z)mHn3SJs;N55*-X2aNCn!l(b#pj{9C62Ra5bDvzu0*6Aiy8k>a_)0i;hSAV&P-{K^ z4Z<(up9%fEA5&AAsUAv(Z>#_v_%Stg8}r!Mn3}r!Dm*jg%akww`5>;4H|c%_644U@ zfeR={gD9!wMjbqVpc5XSok1so!$I51%1YHySohDLvT|}&8pRL@YLESo_3>rewr-o{ zH^1w5?%aW$4XnPERaPOb{fL$p=K4R2Nzk#DeE8?(1M1fOy}hAzrhhX4+1$Ul?{o*! z>OFP#>zAja%^!t4XAH^~mzO6ks($_Y1w+5m@(?`P;J=S#K2!)4ll}K|_9vG(4MBc> ze!x(C4fd1p##mWd|DzUf{gYpvliU9HeFvnC2ADcxKEA$SoyKdmzg53IV=n|b{O-=q z9i_bgzGp|dG~s`yuJAhD+<)`@*ZQ^bKTY?UXP*1az0W;=wfxTS z_dDO^vwc6G&x^$O8AZP>Y}IF$-@5CciSxU;|DQ{C7v0gXnI(~gqKv5q2mN`2_*c?- znX^VGv~02R=sRR$M??MO2Yhvn&hHuC5)%`H86Z29v2^ljeZIOn*JuZAZ3~F07J786 zT`6P!p~a+f}~3*n?@r5^qkrHrd%#^`$wwxn=87>_Xg4zou|Y&471_5YAU z&(6sqP959>c#n4@x4>fm{%^?GJN?p%LAhyZX*l7y*bpwLSMJrVS6azk!XZ%acU@xW z6k1gv+Bx8RE$c70tbpdObBT_vjNWn5>dz!ksoh*F_{K1v{k4C*q%8guh#1a|rf~$^ z&Cu}Ws6a~%yd1QT7P@n!(B-ZT@tT#s?=8_1o4)U=t`)n6#QqWc&Aay;J!M}mR!u>d z?Db$AOjsAu#Nu!4i9A9P$4eex`{nVq!a*x9^k-96-66Z%+Rej*=(G=z6=eMPadNxd zo@dK1^zgz?bD!p~e%bpdvO8#`&|jMkSUgU582ZNczV`{)1o?}lEu-suoK+|{@a?T= zMw@Y_MxCJ_T6Cf1D1bG1zi^&)#fK@M2Aj){6nZwTw>Udb<>I6x{X603pA%0vtowrs z?9AoQFZUa{tl0E}ecoGOz8kl^?HJ#xh-ZeI2`8zG56^wL+YAkf-seVrXL;rF^73$f z+v~fo5|qPZP)-$T>Y1yi<5s=V#tG4HdK1Zp;k<$<>UoXIpwD^}1+|6#oEB-NqpC

tnF%pn(a!!K_Y7i*O6q`&!L)<;rYysc;*c0tJozt*dIOEd5Ao3_{#qor$gme%LNeP^7> z=SVr|H{NHQ-sDnxr2V#yh>Q${jERB;dUEg?cvEQ3y`T8~Gy^V9IPff=mAP`(@058C zt-Zwrra8{QkPH*DIOq5L(o3TNWeSC&q=f8Fi2MMn9iHF)YHTTCU!OpAz7)a4c^|b8 z9BaRwdxJYl+!%!q>3o4}R#yo!Ll&8Is*zE}lO%ev*pL~{#?WcJ>uGrD_rsYxiPTfC zCG)+CRzWEV*{aM~#i+*h{C;S5j})DE(P(d> zQas{~Vsq!;nA)%;MMZu48scBvG>4}lfUn{&SW)uZ-4hEWZ$)3LdvxVnzZ`O`qt0rI81Fzfx8U-aD- zLaSuYkFYU>+}zzQZ}} zc;YLd8P%$hnWa{J5QI>ix^g;73;ZTkamVDX+??=p=gv{HGczA}FYzkfalyP+7tpo-LNJDT>aQOQp>w=Kj7@tc|@J~-1 zgyN$m)R<4;_82)~@|7pnR38owUc_MH_%|5wnR~aYs_du;Q}~>8{P(;WUAnX19+$xd z@SdMyL$4Adp=;QFy#<%rjj(Bh12@gY;<0l|Yo6_eh5LoFiO)QUJ}2}+wtG){}L9Pupbkz zTmk!Y0hRY(@$lq#8BBndJG!%bHkR(uU1L8xYe$ummL{Yvd>72SnqB$+HGg>A+|Wj$ zsOadEzPsJ(un8j=>~ktJVK^^m*p@MvOxOGMHeyqN~nc>+yvYV74b)3pr zgUuP7{O2sbHEWDbO|9fKKTBCxbPT9koFe|Wh`|sL5V(c|b`)yv=@(B6Q+|djjkk}Z zs;a86Q=*>;%gNOsQ;&6#zJ_I(RRyvm{5L)iu+t7wF- z`2LF$^%nT=YW0ii`xDLk=a=Nwd1#IJGLd~y&?Q77{jW+rj8=#Z1UOa~Y{#(5ESN?! z=RmE%;Dd{^R%#ojQXhr}ic{m3CyPQWwwtU_Dga9mjfc36jEsapucj^Ms0F(rQNgR~ zBf@{j1C|k|h>D6rt5DE;n_LQBrKO|M?`it+)iLA<>HtYcv!^x=+>6k#{jxM4rA}J#W01EeME>hUM4}kKVV9seg#q;m| z0w+e=ivo<`nF@odyDw!c8g@z?J#&Z6aBy_g)Xz*0J z0BhgCSRJLId?2;<0Lf| zU{a%%_l6A{l$D3kt`xKn~RZp*`qT=fYCxM+IN`RO<-5&# z`~m{v@sM$4B_*35AoB*MSTr51fhpR(eI4fC%-&X{~(+=A`cN|pvEM2Wh(U-Sk^_RfE6WitcSaW5ardL8 znRnGf!KrMwLn9vS%w}PQE2nEe0&$< z;=C%Cu?sd^mHoJ^XnsvJzM-L^Uhed-g?DI0K#T`qQ9@I8YVAVe5DEl64XhVAeNNiQ zK_-*uDz&*t*KacY*=h8((pvj$%HOm>Hy5Fa%Ivsv*$hDJ9Vg^PL{0)FjFCs(V8r?J zef|AW4ajfCbh7H8;;|b-m*>0f6Z-xdS~H$hR8si~A^#F-K|w(;=?^a=~n5C6ghb_>BHg=rQaeukF{{fPt>{p-J8h7#0+Dz)9sU7%`zoF;UoOSE&P}Kz1BvquxiNJL zaSnuv$eF)Bg3l5tu`)l2(ogzfulnX_w!2k8v<%82}NF+iHw3MLEe{*{RLWHxx(r@ndRb*XTundh}pLAt#Jn?Dj%o(@d89;t#2Pp&y?}B>i$K z8DU|B$c5Gj3LY(XiV|LU-5eq=D*7!My8~{83)a21*P+X8c%}KJ?j0o3P((T3E-Y|g z-L6>Kow?>2Gz*K5FUfUGqWQ`2gn{)==XaSn3H6M(6h(^DTR0zW#~ z*>z%_Ak4fx#qz@HiEelYxjvK?r`$9V6pYo4r=kN81a-F^~ z8gT#Q)njM?v`}z#(H#zugc|M2cc*;LoLPWZ1#AO6y}381IZ+Le$gHI_=!Tv(_InlA zts-3)BkZwcicMx>cj4sw7p@2KAz3IFpQZ-gBbyN4Q3?{I3Uxj;T?|*SC_&Q z>l+wQP*8}K+TM6<>sP$Ih|U0oj@TIYiDrwVsP=`%3LQ_#I3F2_s69TQ6M(^C$Mn5* zVp5fuz3-4Rm}wLZ>+!*{^^lEaHIs#)$`dY?kI5-4*=Oq9f}|lMJIygrm=L>x>yP1* z+lqh1=RdjlUkr`fU@Z3PhOyp9u1#s12w%;88u<2}H{N1j{PRr{#7#*5Q{>>wZU1i* d=RZGaL4sB&+A06_+B)J*jE&5w48vpR{sp3rtiJ#N literal 83368 zcmeFZcUV;0mn~X|3W@|1s0fGx1`t#PRFVe+7yyYPQA9zBO3t7nh$xs4kt7I`Bq>R< z1QC@aIVZ_EXX=d&o?rKS-LLz0y7!-3-#H(ORkhb%YpyxR7;~)ceo01>cKwd^Boc}C zoYZM~5^2pz5{WX0dKG?SxB9dveyqDKb;XQCVsRt>r|{|=y+I=FBAq+^m!h@*K$Fc? z#m>wbvYy1NbuQ8j_ujq@ao%N|TAB8CRRQhj0pp0b_oTcZ{n(nMI%sNAz7!v_k2LG(fatm9BP}swV)cpRxC+BDa&2&rLZt0Je95>@q zR#MuEp*T0GsYX%Z52UOhXZ&=gW*EU=7${b;;>YfjIg}*Q$<@@>_{&2Q1wDRj+|^2f z;jUUOg1?-0{`WrQJfTQ`N$rDHc2;O8M-kPN!j9v|k4K)cmG<`at$X{|{Un1LWktnM z+gCew@7=qX)ys|H(W6J-zkin(m7$eOF>bOJ(Y$o&Y2RP^o?o9Z*F-Apb$J>T6x4S` z-_w5!mkt+U-sS6_k1tyHz%jR_x!@XtrGk8@4u4m z=%DX*t3L6TK9w`u_wE1rTl!DU!~f`qc8OL|KIORZipOR+b9!uiocZX@vakz2G65&T zY5ZG^yZxj&2i1w>UR>iduWIMy+LG zvWDDD1bCC5b6n90tD&6|)Wg{+lf}cO>)ChOk9;}jk9%@v;lNia$QlFkI2u=x1t(+m5u zi3yXX>EdtSq{e~`;!jxgxiGYkTHm;FBT6rlJu3Usr;i__$UnVaK6|#&qu@C?C}0N< zqX*|wt5TEuuV26HgNII6X@%XobxX4<;D?vIuw1)jB!h&lZ5p5AOz<@&C8fx~VC8+> z+~Yxxh)T=X&a%%Z`)hn&zT}vFJ8CV0Y_B947kPxAAK5D2aU^D>k1y!xtzY@>o0)i2F3HK2qnHmhrE8g&pCRDGa`GESs`iP?n(O=c zgLi%;k3RbHD_)JcE(&hV_ZP8RI2Ce*B;rq+h?t zNAI?C$FF{uGeNH1fs12tCjL%Kv;4Y0Z#Jb_L=~cdT;O{1*0CbzqGa6h8?HVSL$K)x*65$3p1naF1L%5*hO`I7+v15VUjsx zt}nX3CQf$gDu!auN1>&rsHD`{+4i!*Mql=7x7`yos!yd6#Xk?7%c0q7k+uwrO_we&9 zaM+JHMnoJ#>EZkQ`}sfazy3zq+rMJeQnJQNgqcc%n7T?Y@Ce@ebshETvZl=q9>xoU z1n#nDqR{G}7^wRs>BTzURXqRuh3tU?2UG#~fLeb3{Ha(<+($C?O#Qxt2V*dZ0AVYc z<>F?@CzOEU{kOx^|GTUIja_}xHi}9rxkk&$;pgka(@j=d#>PXIV%K|rg>B}#IMXdo zzM`U18+z`A!&F_jzgbCdS#Tb2z-Fwo;N|{Rj&l>e%tFSEON%Xz^Mf1roK;j*bYXZS zV4!^I(n!^1t>Qq@`GxV)?4A%0%evS2&g=$pSaV~lz3=FL{GEba0~)& zX{IACwf&LkRPDQvkZWVVi?9o7$4kXqA{D|BCBtcV0UY0|#!$~}=hw}csy7I7n7VT7 zRxiD6>qKvb5Xu37Qh!`_7O*k93u@j3(#qSnS94v~DZIb9k>EZYk};|7O~1k}T>hZ) z%-g#PBt?ZMd68Z{@unPgut80%yKS81KtiFi34+v_BVwywE7gW=hpy&0=Lhj+&9noZ z8Ff=4&-h`>0skGW=09>og4$i^E5W5URX%cV(N1ZyY*4Nt;3Grs()@5x;~iW$B6IZu zy`a@lBa@c>VVB!eb$TWiZl6lU?CL(=P~)N7ek4>Y+cDifb@)=8W~S|AUszv{fPBZ- zCkmnxw3YGNd5T__Dp2r_R_&=giJHvI&CQKJ9^Jlud)6f@Uh(sT>+6YuzCP9s;R$nHSqq>`SOXfIvDMc&OG7B0OzY&-p_*5!T zqh68aFs~?^xaSq0mY8v4s#UX1_}jN{wewshcizb_VA5)g)j;i7&e@wr8b0D5gojdf z@Oj)jvxxQW_KJ?(dX1@fvS$kT1dSV2GLP6z4J7Ck(y^SFK%tXSPuqYzX)Kdt^5gM# z{;Y+uAG{iA3++tVQja2KTD-7G&aa{ zrLEDh(%^CNDLMX5&Cg+z$CHjLHEb3$rBen?laZs-*r~JKLD{8Z^P!e92tP)%Iu36pVHLI7Pj@3#LuVLkdpOi@vPf==8R?A{?_3CV z#UuF<#qp=~o3_sov^8Du37fd%cy?D;m&}`^w`yb)X*k7v-#IFGDW?d2`@WvtesQwq zTmX2QGfVphd+BO?BSY@5Tnn|Pw9xo{E>HdZ{3?BMFSYETq{3}3n?zmOLE z9p`%Y`!nErgNpsllv)iUQ?)wci@mZz0oko_K~6R@^T~Fdm1+tf@B9kCz{vQaxTt8m zu%)4HS9J?JKx>ZinVg#2D6~th_vzQtvx?mK{&HU2Um?>GiD{{g-s#v#4yD2|W(>JO zHmAnk4y2#IRAd|>QSwHBTU?2u$$d3!%#kZ4MKpcSiz0k>b zN?Cc^(PI-L2aoFSJbxi?{!{L2s=cy-{@&gKwi7)ofJg`k@)ZQ93Kpxzjn{6qISsF8 znxK8M!;L{gS@~G7f%Qmm*@rr4Dte`X0ey!t*)y)rabeA`L~J6SAvgB{&5L6jG0KMBtp<_cXABZE zu?>|49qca8pFdxbL;$E%4?x&(VR+?`Fj=f+&ni)C5SRJ0}1%z8M;+juf7)-XSUv>k2B)Mx_Tzu=N4U})y!$0H>_M{>>Dl5X94@E|jx_?3oXL4!yL=YRk~}W~HL;ePT$YRCGIQlZ_gpqM~p|AeQfFn0W0ct1b_x4`=nqrt`WS!j?y- zAOLRmeA6%i6oAiI>{u-~6En8!MptPNx#yjiD+4Bh7#qHgs~Y;Cpm|T({BSn8fquKF ztvPkD!~7JR{m5UKr05zddRF^y>~6T*iDIFa#=w#0E2XD?S<@3Y@x9TeJHXw_Pkc0g zbJZlPgw%x#%*RcC|3$@;Sg-^*hIx%qO`4l6b;`1_aAVj3Js3heL+*3V3t7{t!u>W< zTzak89`ap-DJ^vD=Ka-!7+&)=1s=xqp|t+BbURaLuPG~61Bs4mlqMIP8Ec6D6zFz+ z69JGT<k`gcR*%PD zSO?bUT&V&C-A+G`ODbE{>IFF@r-?Z&EgaVQ`qTfoSzGP{PEJl?k(?4+*5{{)zC-9) zi23dG2`Yxs!jOd0Ec%VXg2xLm>+{uWCen=oV*LE$VE*E*&Xkzg>58DG2|y3o#!V)6 z1OHfX{ehGgM@U0aQTkxmYE9aceN}3e^B@6YP>&cS!es&+$@48%!_Ap(~UuM-NE+)E#6Wmu1*NDFGUKnHuuerl86z-AMJmRv>; z;RTVG%hI8AEpFZb2}zH#&N%7}14v39wnqB4Vw zQ5qyp)*{cdip z0gMTKp*3sPM6!l<-2~okpOA8Nbc76fdv=&)% zB2PQ)>C_P0l_L6&LolXoUR)EjGLPHDf3(%bhV%u0?!*w?R&kh8S3oYeakWYgtK9v3H9 zAllJ!cy&-sncUUxCPrM={`oOVEuCYO*%ikrv$_A-Pc!`81AoU)dU03&|K0}Pzfn{O zIk`mz{RuUbOeGZ+>z{`U6HQtOLb%*@s(li=s}-0Cxs`7idk}D6?ONv(q2^=58RO_u zZFU+ge^*Gumr(}t`nwdpkEchgHoi1P?vS3Ow$HwmuTpK|)toH(EJnlLxB9#G|d;64dX`K0yTm zH&db3uKsXEAzVhf%MhQ77rxNPIpJ<=QWaH&n@B=t;bPRX?G0nd1ypuKj(1~lrDqcZ zDF^TnLpO$~gk9E2`Pef-wtbQ7HgoZ)r^aKcFltfTiEYPjzctXsH(TnRqn*jf$Y96? z4^+??Mp6bs*}(1(k)$e}Sa%FU0}1hnUPBcs9z^-)_GJjeK5-7uB_vS|>o!yy-VN-P z#Zt0fVa0wz;N3P};!87oyhKcqS-5O+_nps(s){=(t~n{q9i4q?c65_gh-RtAm*}+3 z1=kW@+B+^*i&scDWy z&pC_yjV}NA_ANp!#W>0{Fo1YvIxfy(Gx1#c7&@Od-*~6-uw>#*R+_d?xgmC9Gd*p` z$9W-q&_j5Ad!O!n)zk8JE{}8xR53^s(iO;5kR~W?l&sPSZ4W9%Xn6R%MAPR@;^)I` zAHCsX^(yQbRg|UJ_@dI;q%xu$o!w=&I|h#H%3x|{1}dtSnTpolrH-rsly|5@>_t?? zKYw1d|5TTHM+XAW!;O1h@mw)I+u)#eYkf!1 ze!toG>=hNwgo08~P=Goa85rjJt)xU|fxL*45E~I8HF-?nv(8#*DC?S7d#y`WS(_u@O0LtwnC}C zdFynGenyFH(CqNNcpC`Pd`kA>L6@@V%W-`UD0cX#REB#7pRHo?&jlV#LUEU;oke#z z*cm`;F?h5?NdkeqDQe~f$Z9QTWpHYSlZfBvv)EbAt1dHb{0ZJ2Fdx#CtY?I z?7RmI5CepCf4dU`ZNGL@%NQ`Rd?UR$$NW`p;Gi?M$+^*;U z?DrS_`!8fp^g^O!o}ac6+UYRE=%>p&KgJe4MlUjwlP`%BK#EU4XPM(h1TeVUm`NP; zZQI$g&Z403sclEDe?u$Ry8Gvq;=UVNjghA&pA0l#$qE?`jbwKru*riC;$CU@Q(Mb- z=1|gSpgOxk_ZcbWA95Vd>rhSlc|LBu{BpFI%BT{xc5BXELVAU2x%c90)curO74MZ} zR0B-E@826}eQiL^DhAMCSCOTUv)2DNVQU^9+RdbX`!DE~j*I48mLvVrzcX~=-_(V8|LPlUj< zfej|LfTv~wOe;U>l_G+F(<@)t2}VIQZX{s@0B8sFcwv$`S&e=`DYgX!TF>qL??a3; z%ZA4enJR7*LDm6s_6rfUn2&^Pg$iBe>m)NqlC z+Lp$N#?ktn0we}G&wI7_58y0{VH%|uo`d)EJ8An;MEqJt*-39LfP)4fb zaXDmA{Q>ob%Pd)S2z+}3yGs!LtX1aT%E-;G%?y1)uMcTU+_`h-ye?32GGTEbYL!l* z=U{zOe{DhsL>xkXo*nNFw~b}7f^PsND+9fibJmLHoRdCB{}TnSu7$ib{qz;1r>HS^ zLtizmq9Z=U(cH{l8dG= zg`1`7-1raijDe6Vi@YM_rD2)qt*F?i{Ul$JZE>5-k>qn^2eJ^&sA6b%J8>?Aw?6 z!K_eDR7PJ_NH}4eY#e{+k6`+Rfl%C7@i0@Chd=TG!EdP9`T5OqoGA#RiG*13zCuQL z5NPWAw{JI5Nv@6dMaK=*ed>k+ULC8^fKg%v5usDSVs<8<1VG!cnNSGg(41otwp^H~ zkd=JAtsDiSSp2v_A?swVj@M*QxelrokPo0eJ*+H%+Q;=u{8^mjp6`yN2uiiT)}iY? z^Dm10GJYeLnxL1Ll(sj|{7_Eby7fV}!|Q;6rq5>Gp@d|<;D%(($(U;--t^s~!$H`h zHbpM4?vozT8}TW*U8QvS35&3x%OxaTP=O~q1fpy!zCGJh^Wn-kR%+{(E$UWH7VIwg zggE-eXp9gci`0T{FJ{0D1n2<){jvrr(IGG<+DP$Nzo0sF|MVUI%4~A|`gMY}Hl&D< znWYR0x$Y#SYF{_qN%ZiYSAl_I@D~wIA=ICS zEG0|C`&@5$wCr@Cb-R0}-83D$Jz+QMlFKG94x{R#zFK}+!xTz%c4UyC?S?+<6)qJO zy{vQ|#y+6d21`S?90_=bu;oC@(o_nA1ZF;J+7~btl|q7=ipBiW@*<*#6djj!VoMy& zW%~8x7L7K(ioA3aH?p0`L_f2bbvBGkLM`@YQF^OM&h-SK08wY2zJ9@@K!O_h>>Tqg z8aFmaYad<>L9OkFuAaPeXB3U9)Y*3Qu#*slBM$FaEo-&iSS)M1xZUR?y?uPhuO&H~ z9i0lcUN+1sUBjeKG|Y@d28VV1wZW z-pO4k6p0EA{S8We+0t@K3~i)Yu~F*`5ab52lpxc80H0aZdP36zn1XjJ{3{UaFb}JI)Z_M8Oa>)d^t7P5N-=ITPrgt%}(5L)3C#_ z_LrNJYbSo)J3~WL&1%~d{Q2|e{k{iXE;)3h6*r+k=DE>p!5r8ghdm*NHb~aeTtAAs zk+@nr`m~kFY)OW7AF+Tj%^~T)W~-LOcUm>xL5^4)49C!f=C!HH9Y$lh)U`v zpg-a}sBb?&MNE3v8-qZwVX7s;6yvjV@j9MAi+)nGD=I39@{?Z#SCDsj6c__FZ1J2P z~i2UDPD5v~Du>t_5uTd39QL4MpeFwX4ETWGGGW9xSN8<=foJ0AZ4 z0nMkCB{Dk>%y#+=3<*Kl6azES~#8vPXy3ksAB01 z+fX)ni6Ivve!A6g*6dtzOJP?3Q4o40Rn$c&tFrbkHWHZk^(IRx0ncqW3*#z|p&g#W zLy+}EuxF!>9MJ=GZWamQeP7KXTvsjuZMnxg z1o|MHI)IH3iscy(M)WkC>KF1Fo_7FwHe()>r_sbeMa@2XMQ0sCqX*7M5C>{)0FHF? z+?ct=VW%Kp-{bZ(BSfz{VQi|wL<>EpW!WnKT0?5F)1xqO|BJ70k9?uhGA-hSNal-$ zN7xe@VNLdu1ro?*^*d(@Jz4h3_`hC=#OOjeVQa}=oI;OXV@#a<0gYkA8806nVS^;3 zDx(3&o>s05$WACr>F{ZWVQVy{TeV=FQO3V9NO-Y|A_YeN{;3{g)R647(7}dmR)Wk& zBtK{dC*U9;-nm)k7&2RSAb!gxOB*v905HZB9VhJw)(ru`!j?>bTmw*2I{@0Gtd~W~e z?`jl%eSK+n992oTG&*zU3}Nv5UyLv*o`z{nc z2{4F4Uip_c>$*NFch0(@aiben)&kFUp|}!e2xxEo{QS{OFeX6F2n-6+QdiHgi5OCT zZo75~HV8gGKCo_o7{36)vkN?)2Lw1^;z0C{BTZxHBK^oN>o%2ZiIRa#mYizt)K?ja zokoxeW3^%SA__7liqqvbK}IIR#TR289LHV+!J)Csg~+$Rr2$<}4&Fv}Lv{p^hRCC} z2=;zCMs>2?gHO}4?#2oOtEw=%AWtbadU#lTwn#QPvw={dGWoIc1E&#lY-0A-fBp64 zR(ODNw&NmLaS)0mdL5IL^dh&LZMws3(~Qo^c6~`6Uz~DpdOq7_k5B4*`5CA6Hky^P zh&wL8u|+8Jz*XRqk%;8Tz0)r;6)_?azM_t~o+eyU;_x+CMz4q_mU`v=z#AGXOEx z(}jV691ytx4)X(@X7bqRzvCZ(6CgUtb zCj|KenPuYbtoA9xT%BA!kjp;;VwOHs1l09bx$Xgl7Fq-v)4%bdOVOz=|2+pI zrqWqUiA;Ya32}w`4V*W+&PS=N?JV zg(<&Zgi4OVyaU95Jd+dVqX|QQV~m((Lt$SBz*S1G@+xpm%-{uBQoYJH9dN2ZNZE#?$onM@+pI}UC zYAT#c=vuC0S9h4K0^TBW95z5h;w=Rgm18KIAT6TkG&b101$x31O%66pBiHYTEs}@F zeynJ(?6Ec%NSHbvqS@0b;oxzdK3c32-+ulC^*@RHlWd!hj?@B5y<=ZwI7(G21fvpo z>%a>|a92xnHQB}q-3znhm{G#QMR+RF;Q0M_GdlG}Y?-UooCA{}2(O^6`2RFUub2 z!$@50F7BK6D)SkGWB&?0m!L8b;=#L~{bzu%(B8e_$TwKW4~~^+!cAefb^h>SVq$U{ zmV*z@X)+qkQKH@hd_(MH$lWNp^DtI9F_FBlj?L+V3(+$}#H67$GK>Q^`E5G{^a&}9 z)$|)X?r9bcq>R}TToPr#7@4m`%Jit zPaee{(<3m@RHu)&hTP4v+uU|=7rVq0g{1Es!6mt*-wpWlpO6vIblpR!FWb(Ro+voV%s`^jO zwAqUm)NSwmjL(;FR85D_vuA)!A*%KK@0^8Vu~`k>s3G_ZV!;SssTt4y&|3P4<^$w% zKv2cuk<3lrOL0z%{SAJbYQA2cA1z$Z{=2IS{+-V5Zl`&{F~Fnq1M!Qc$sWr^YuAw} zECJyjgjuzsN3LyFPoj_vA68iBo zt!s>*e^Avo+1Kb*SM2ZNpSLe4lq=?M@ji1&aY}m1;c9gYTi%&dN4Q5dGvAF{wj=^TfJmjNYCF3^DQOnOG`^60QQzcotPnrAyxg8B*ZoOcIT?QSMgR7k<1+zZSxFj zk)=<-vvBrZ#bTrpwoFxJ<Wijz-PK=BYnkQTJ!rQ|+ zn>P}#?VPabf*}&N=b~rLsyVG+?r&ZHpzZLa@O&xdq3gq7gJ}HU!MIsjSuvW)v9Z4L zcT~w$CLM9aaNX$ zH(NV9C(&+{3j4)*;gyQ2s%1~vm->2Jc>3w+=#a_1&xbjU9ui<%YmF(xYmOVRT${(% z#ALXvtW4OCu;=~$_6%?cM?Lp)aw3i10+@mRN(dm+LrplQG!*WXesY%zq&}Qy%+1S- z`S2ksIvN$j9Qq^CrB5<)03E;`0sSHr4NW~zjMOh){0Z$7eTpC-pBij$Xn}lSavm~2@RlkjElo92F?rw=meB_7NSmUN^hDJteDWruLOEC+ZJ=ZR>H{ad6 zGD$~PZ5UaOwbQ51I#a9!{=;w7hyJYc#_be;Pv`DEe~0Di>#Mi5x-ajMhmT2=BFpix z@!7_!&i{cgog)ybYD{uc^seQR*haXkrjzEnPe2id6mSQItMA{>2VdI8z;FmYCz$Z1 zI4~1Wh|5MCj89538r<~l^$4Aaz~DPCu^k9Rz;M(cysIw`Hu4e)$t}KeX4r z`+9UoG0vFM-enk$&5bCZ>}7G5waXwMURZTaM`zETJ!(m3mv11%jgC@8z!L5w#A2`{ zy*b^jMlQyVUOm!4LrPpH&7PIpe0?}aK_@aYlGMspYi@ROmoo@#^3^jdYmiTpwUUdj zB3-@q%lwnnYG)Jgl~3twZy2e3ot`dC3SBA@o$?c+Nd5dd>7Ref2IcIx>G2YO31!_5 z!A#bcA<`jiaOH@{%j?v`{*@&A1mOg!dyT#TW0otodkS+@)X0|ur-MeZyH8l?_ivS2 z9>v6=c00@y;Sx199San-JJh|#dOoL^rK(Emv7}K9q;p7qVe+>OYW{rSb{mVe{ylg1 zA^-tG<t?#>U1b(Xg+4{rkf? z;se;fI4{IuWo4zMrGEWHjHl`AUjwX~1ge5;G*DbzO~-QVYmM4I2GT>meYUew zm7g#8=$80@h6%Si_Swq*=o_V5I2DzaHU}W~^7ZSTZ$ymMsn|~0QZX2s*xekPIjQD{ zDIjvXg$KvV@YiBkk(>9*;Bc2{SB+R%^?SJ&zAA}tt*l7%)59iDeyx0S#5&@AiHS!^ zqW(MI7z$jJm6kRtf3xzBJ=2(j&_vaPB=*(PQhW9DNu(7oK=K>vHMHXlD;q+bf^y;1 z_ogP6lM>o@Uc`*9IYSZ=6;=DVGLYSARBLD4Ds%2qa&9V${cYg8)%(etqbHi&y01}e zU>5u!aDF8?L%0s%T3gl^pe3pjCht|BG;QA;oh!idPWJlGbt(#V^ z8!*E<<5mM;M92teI6F2hG2b-kco(0hq+!;p9DF`{a%7}7!{(E_`?oo!pI~&J7 zA>1D1;SmuLagorhUI~`DLT%^0r1%u#Wioi+&+J)^YI3iv`F}90EwAA2|K?QrZ+@wB zL=w$ScEB6BO}*URBO@Y4Mn~7JTjzW1P6&47B9GsT7oSi;k!BdTY&mw~ge8jd_m}&V zlauMwbXUGQWG(#}XFi;pOSpDMRdojml^rEm{j(`2FRwAIj_7bqYYLt?F$bNuMn7ry zvj_kZRBIdvf-6fs%iiYd)vF#aS3t|@iQDGE1pco0^=lO+CE@S@TtZ?%3xJc0%W=F| zXj(HKrxZaARAVfmxuL~b5AV(C(@<9S^!4=-?x?i1v{#WqE6WivNHv3gAQuP6x6Uz` zfSbyOo9gOjA$lJ@eAxZbBM{q+!jEWc3kV3n?T7sYI}PdjkvcfSu9}!=DJq%+1vA!D zt=z3`PtqcZjfI8wt=;sH)zmAi+qXxir)&Br4<9}}ts=O0FO{>OUqis=2NJ>2(cI4H z*HXzJUWtZsje1AtY@2Z|XFxK+H^#58-CPs}`XI^6%TL%k=0gRyy%wh@`fJdl=H9cX1BX3Iii!@Oo`&k20?QVt=9e#D=wl84 ztc!5cqFhgObTmbyXnlq{FV!Ul1v9T3Ve-9jH;jXSA!k7z*i2iImz%3w8hDU_`i;6i zyn|Nas^ajl03wrT$HJvWVw9ra9@=7e1& zf=}+k0cd>y3QyPU2$85_DUMCOleVnCboXv@w^sCH0;G`33sQ(wJg_Ng@sLDiM*Y(8 zW7U|oSM~MlZ>IVBZg&>nd2R_zzNn~(v}OH+m>450#*G`DK*}BM?Y3x9{@y)0I*JCy z6E3uWe;nGN8GY#R;m@T$%@q#%1+t%LJDf&!3;Fu)=e(=WSiS16ygCs9rn@I|`1Hry zBP;Lk+OmGOUv@$-X|du44UNagk4M7f>u#pCHP2_ZP&xrGz@%51o7;}K7--H+*S@Bx zSP5jAg(pf78aWIW^b!ATVo4@OMjG%11N`1h(|#looLcx11etOKOid`%ilnWrjpPpn zwGK=NVx!fxJeoljH;+EyI?wSOPPgo28Yf@H0|MqpFCFm41|;V8JP+fKPN?jr0Wvl9 zNr@ZLz?YBQauT!9lr20_^X>YPNJUi$8ubqnSzf(*Mtt*K{t zGxch049d6VH^Kka(*2W91z)PHtX$Aekxr9!RoG9$m8@EH478_9R-GoFG!oBYUV7pE z^5xMt#>Xf>B_zydt|3k)uU}6hoaAx}XbYqm)|qyErOniIntn0jn$XW22~0SfeTjCR zLU_fkqdshKoVzkri9+GGBHu*{P@v7&3F{MP~?@_v=(EYv;;r@rxxZy&p-Rv z1{=?0oBj}vm{?ibN5LT{CG}K_gL3VLnankuo80~Un4RHmJgWc5({pVZh&Yw}sQ7c$ zm}cM#+zjl(EG#U@7AK%Oy?ptyI@V+Pi+i-orzq_KB#_&{3(_nHRr4|@kU^jn$i^s7 z54W^5HfCs-z%KzPHVnfhtpZF&8?GWN6ne1B(Jo@a}Gl<`e>mQ)<8jfb(Vga!t zP7R`M0DTct5Ub!dM#ANaY5oeq6u7m2SBfv8W2?;|(LjZUO zyg_JezCPY=Rg!$Gg3J5-B&-cWjjF>K0-90qfVqKE+2dI%>mP(*Dx$02ynYQ?aL=w? zZw02_hxMAL?G3UYYcKGi-jM6@X=B;CRTKxfH-5gHbfW{}8)m|dj-|f-#!5|1x`PyM zWokMH>D6+maXT}!9hTg{pdZu+;AY#|o7pLP=zz^Gh2w8WP{V% zz@_&%dP#XQy!E}`{d*>*r~V+k4U{5_6yE38+$Dvy^nPG}MM^buar%?H8U8@RlW~%| zXDf<(BT1}*8!1-SLX`#|yo;nqnPZjy=H`#@r!PJZ3%h|vo@(lWKi8!t-hzqvT3yX3 z&XVsM7p&Z3oEmtKtVVibJJk63d3s^WA6TP18UI*9*2xzqOF-nqFZ#+S>2T8xoRVUC z?aRK0@|=5FKXHK6EpL8>5tp|V3Xe<_LRbOsO&A%u{o?z2|6`u_=kIWSDX6olic*4Y z7zyvz;Hnk!R4>+iH+;T*a4-|$17i)2oFcWix3~BAb2Ct@rt|(8^BRnK9kK=76~=!< z6xOo?5`XILe+L^|nce@LxaB{~8~@Fb5@2Z8J4HOb3YD~@(5t(v>wMTR$UAtN2XV|d zsek3;`kxR+LYp>iLQ#6*@BcwLT0>nO{i-ASmHVFc2(`RG79twz(9mcV+B-TB5of(v ziT13opI?e~)k>fmu~CXtVGpUL-fkm~jQelfxUn4D08YG_+1c&yIk>oxlawMAul3x1 z?6kpnC?fjQfQ?~uDkR_f`g+Lb>sQ~auBBXA=H$l|fqiIA_5*NxnvU1lTC%hb>2oo1 zGBP@`GFzdYKiW~qcHA_-xR^Ev4TWIE1BcBR-%H17$@3P*Z?q1j@5;E1oPVE+-qp=5 zr7Lg+Hb!^uGDHB@zjJY`{M-K z>FK$dntH7-@oRK^e9i|Z3VS#k4{C}nBOtkkLXs>A-%N4ULPgxp?2~XfjkA)r!{E=0 zklf%|e1+Mp=`RoCc9C{4OPh2)`SkVc+9LuhJ494*TkAR!0{pZ@+aVLLbia#dLjGbk z)=!!PA%`458nMf(JQv2`-q-zy%IOy1a(HY9JCjF&bn|7Z57IYCP)JyI?j-FJO6g77 z!gw;q(%jtslF~{V6wN2D0JY;sf8PPcC@!^62M$ow*40@I)>kz)?#vE6C8xc4oTZWe zE!Kekqx{O$yKSKqp*VT-W(4cQRMWws_X|;d9ow&%e$GG?9q}E2mU~L$;Hxap=g(Vy zGFfbBNI_7c^OSt4{7=T(oORZdbQMQ2P@HT#uA4s%kdBBp%bR#!Uw`YFcvXaenXc%b zXHp~{mH1s;TwTDh1utG4-nD_1H6kYO!FcNPT@2J>>Yu(gHDyjt-CxdT5yjU)OsMJT zG!t)@V|v`q4s7q}XX$Fwrb(|_#zi!>erIKydEB8DBhFM`mn1K#y=c2s^w5$Kp#S!oUp4&66MS1Vb&N2s>jcjb@&J9MXiHGf1N zud1p_vRwCPUvEaj)!6Yw7rhMhQ*T2Cv9Oq&oOBra+zkb_>~DP{Y5#_ii4*`4xZ8DM z$3p4f@p-3A$&K@HiC?$2p21nIvihnO>X5_|QqAl9__b!76P$fG?nF-}Wh@+6pd_6P z%Pn=Y&~nVuuR5To*Ny&6NYalL3a(EBF?uNt(}haAwpnb(+Q`9{YS*(}3vwUSY46^K z{r7-(l#;=pcke+~ivtg2BZ*9I2>tlg=&vu<9EqUCCr+gOEkNnp5pPNdhas(AE!ecP zws%V8?-f0iQ=t`Jev@=^LU_;qhi6j$-i6t26oE@8E?>S3XPQykae=KMNNP07D<7-W zMIdMi;Rpz+b}L0@)*UrR))3DNVZUI}PqNqkGo|#HQVT$<2|NNPu6tz3E+RIq>9*(^ z8m&_rgCGsy<*IRijd^1^PsJe%{sx9r zOaG(13Es16?KSCzff+(mxU?TvMel9iwS`{o`p^BM`-xL-Ce1u^nWKY$B2M;NR!H`b zk`iO2TM)}E07e|mURI$l8!)YCah27FaumjaPQe>uG}f$SKfeOriIp4rf2pPMG@=68 z=%(QG#=wA~`|0-X?wV{Tr%1*6`ZFu5zg=jpwZ6>xu(6nw=K5qKzw=@GuWuR{q?`Bt zBG!BBA9e7EC3HSReFFo++4uVOwr`!=w$lYoU5dUld#&8JA}(X>JPx130HONl)_3b5 zK^E(RF@U^|j2EUyzm1O#I-$LLQ+tAS8?2(@I6Vt%W0L-pm2rQ4biY<^yzJ)D7W;A9 z)7N;k?g(cxuU^wp;4@*1j&{FV$#{ z%Nf0L5q*9nBpVdVa~zx0vs~)yx!&+;oz^hAefzdY&7XBRySxrRI?-(3cYu3jR`qD# zUac$cvf>nnpx{^UUm+LYW<~`}D-D5EZJ{XKTYUDd=xcsqc{5W|QmXwr;gk;LB^U!> znJudqSSc0Txv?aIyu7?b;>2CWX{0^8z%*>Q1BwOu%`>DGUAtTh%4u;?X({v({gx~T zJmjzWipFekH@)O zgBchv5{T`kOE?qo3Ej*5^wE`NaavC)BDxQh1D<#ldHGGNC9G{u1($VqeJw1^p7?da zjh<}@PU_RU9T!((c!+897EuP`)_f2 zw@z-&w{l8CEx_rC+;GumJF+E2$es7R#O_A+f?qxI@KF1+0eA1oBgxX|X@Pw z$CHMgI}aPWoDU=T)eok#%R|0Sh_iRm5+{*b=%f=nyH4^@5Vii}-w}Tp)B9Jpe-6Lw zwfgG=&1+hJ@MUkHrx&*xQaA8fSz7|R_TC+tm{{%n@@2ydLHh?cOm^*I8FV;&WM`ga zZe1q|B|PDVxFEOpxs}Tv#Ye30AeK^e&p9~qA(w#eK1~$ZQM||_Z2DV@G-FBC*GFod z*gsIamgYhRd1JLnZf?O$HfQb9K}O!{^)9ewUXYP-e3~w?@{JRS!CQdl+whzeXiza3 zB9i?44jLNf<`Zyf7d~OWtg1Ta=0U&nSUeoPQWpwxYo2<$52}%_7Wwrwk565+6*V;6 z`0LEOa-1BX36~A3?w_PLbX!@c$ILC1veFM9KPDawa)XP@#BI`n>sZ%fk)DcYG3TIZe$rh+{kA5hTrM zl)-Q@*!X$;euKcuX4+FAj3$9hI3$bh6LThoPi(^^nYbfYJ73z>)8A!!z0YNra07tt zP7&)7`WhW0qeXnfL5;M^x}tTYMp2U#jhU_ET4#*u(k%0`yLIZ;|6CMWC5)+db8{mO zz#w?(Zw~*tEg&WVMrLL|99u5SGfGX7e8**$V127aljMkw{QMQ~)4P3zt%e|zkxs_M z2C1rfC~*f@wO7@QwC&x-Pe+;8`rD;mm1JRIfrpLu!d{Bq>B%O(Ie-#ku(iEiAAz>^jWO&aMru2%UbkOVKs5#&FlHx+@d@DJf~I zdtY`6ShX!QN<6}VIr+w|TgO3*(KHFuW?ddaNb+5Fw;cfel)D6PrCGV==dX4?AC^Ym z`UbX0JnRN0s@kkI{JOi@SxIYYBTcgG*WvP9dHp6KCgP-HwyXE{4QGZgzu53-929U@ z_MxA}#keY=YCyX@c;(}2(mmmKX4LtcLYpIwIn2RDkZwOa2B>xM;zi&rbgHZLAp==} z#p5?JGM*Q?Ao$qgPEb(Bd_2pHP6VLWe|p~*RW&to@tg!wIs( z(Be4JZ3_+#e*gacvl<2d@7y9Ec$kx7GjDItf!*ef>Vy@{VUBJ2{Dt`m7Lo|L zeQ+YSLc7JQcI({?^`jzT!wnRgV#k*FkM7vOdhc*Y>(Jf&)%K&?$4qf%i0DS7M{jjM z?ON01ahiBovF}CYD_3syT(b+$#7jLU$5I7LZQ##nDO6 z&L(4z{xT)2=8a0Zft4-s#~h; zzObBU51z~sGD&&d*QXEJHkaAE=(_93lwJxU&!V({EB$KQUHgn8c`_i+K%o9cc!{Iw z((@NDD$q(VE-oe;ilUQCcw%uX&o3y*Yee#})5fv!Ll&Fv-AkCFAw4|dO6JLX%dV2P zlT>a0kj*KlLt&(SC+VA1VWZC_IXV-z#_y6w2Y1td`MH5~bO(>tBmVGR3^{q5eYZ1E z$B$TSVD>!D*M5FgpwmW$aEhGI+julKB<(OB>Zzwatapc-?a1~ydY4Q}y3-+vTUJpt zHfmqH_t<<}>=x?PhZf!^D&&;1Wq&2*@Rw@0cRW3x&~<8Hek1Ahwb$P-X;21Cm9qNV za&e7}xSVuOtelf3v)b9Eets8Ax4-7CJhCcWjNL?~@nWlKN_eZn^9q$VKxW^T>dMN- zIu3@$rAZ$t4*jJ1k#|xY@D!>cgpe_|u0Mmm8sgogF*UfRADm0+fRdr1A(kDY7aR<* zFBETJqj;-$Ku1T%HGGk4GcWK`jK+lqZGiWR z-etM`si4FzhT7U%_-m^=U1>TicM{Hr&eO3m&!spZp^;vmzk!<5&!}TyFki^Oaq-2W zoD_zR^&`+A;E;peH0Z&qzapX)s0_~77&W>7^uHGN>-%?oxI<7loIXcE_eV$km6Gzc zg7fA*58ZXzy1n>nD041rwKuR>JM4F*qjGlFk<;`PeHI_@qUq(MmG6C8L~tEn$(ND8 zZr0GHlZCb|xyHroFsnC>bg9kEQfJLG%8GmF1)i}Kd@p;~ZLgs5gG&z|nTi}=qoibg zhmK3kvzR%-Vza$)Ym-uZLcxzucQ`3D(%V{SNTRyMr=V7LLZa*RBD7GPT%YE0Jf~_q$yUl562x<0O$!%D}<#HkNTu-sl3&KEA>qd?6nhnCv5Fg zXq1&zQnz_c+&$?$VEAE?y4xw!!G3G?HnB}ZL$&Et`)f!iIb1cVVi*};Z4+Bn5?hn;)^3uB~@Fc)`na>~`R^D&TT zOM&TJW>yxAg1*=YKn|)ffB;ja&jldU?cBKWnH_K)tgMpr@&W`vavD4l>N;ftT~&e=mR@2q+Rr$(|S+1KL;zGsg%#s`>eO zkH$-Q=mBBG4rgLvQGBmoQdDFwMCI*$3J2`|m+1&Z(At+&sCDv0-I-)$ZbukU9RSx0 z=x77K(v=|OLPY{#8XSBuFo33!`aByrGE3c&yhha>gM(^3={LEAOpmyLE0i*51L{d~ z9+Pyo7LNbp0)YKDc$Ga;RgJnGyud4gO~md(`_1s(2h!5g&=|uX+L!tv+5<_vTj{O8 zjP2N;UBX4k2SC@h5y(tP_%0KK`VGt_0G=qRub+Uw3Hj>k;?%V!3uL)G=4~c0UW1$h zEhr$TGOI|^GBPq^Vvh?)fxrSoWrPj0~?0f!GBJa`cha4m-a4ESIlK5#Xf%*O>W z&DaDRa-{+S1@H`L5F!4!4n7duX-i4ZWm$;ABO`$^ zKIHb{%KH|$1npI_8AC_ZLs54cD%=yrE|d6#`TjQg8V_N=$hVbv%;?=eaA%nii!+Z} z_=u%<4vv*E!zrYY{b*0cOxHKtH3J9`4cXtvB=68OhKHH<4{<&g9lE&L>ox-W!4`K`9~;7x&oO28*PEB<+tP(6139nPTux>Bv> zZ>*S`p$U`nk0Mo6x+ZWIZZMNDKmR>gB~g8Pc1|tYJaNiU?cEXSw73;vxX!RE01voR ztlKJkKm%19ZS`-xjLemnk3_5SkwT14=xMI8sHd#=by{k05Z6}%Jmy`m+4nDF8`9!_ z)>$}23+H$KLP?=laOao4q6v{hxUmx6SuZF-zyGPaUe=f@RAOc4)WR~v+i-Ec+s&)< zkC>`1M&vSZ0}#NlODm9gtHWBas^((FZ#j zy7QCCGx|k#&C^{l7mU7f62YOz5`5-7YZ4UlCPY6bul1pB=v^#=Xg+(soSYhKYN&~* zL-?(ie#diXS9^bV6(<*Mwm71gYYL}7mF4Jp!?;07_ZS9-U z3GXi>j!D@sWsXJaOeF@mb#&vhs1N|Y?*hFZ^a3Dur|DUqpz%NaA9Gg z2@gIN)+PiPFztj^@K)@6+Dr=C`x{QQId2QUPBWME+Y`0?@Xui0O}q`0|pgMo7bbsD_BgTs4+ zvMZpt-!@NiaX1LbnUJQuOOBwnf(wKpt^_kP%Kc9ilBks~?>=;mGOM;iiA6(83uORg zTKMXAzk`Q&^7pSm{U;FnLU53fc*2$H=i>v@V-|LHLjwbGaq)i@(o`q361h!nQb;!yN4v)>u0=Fl*si~>q z`!KVbn9v6nhd!5{o*u4t8v=M8Ig-AA0ihznRx0d{yfaPrb`Ev<8dNfqjjO=XS2j3dfL8yUh z1;7IE?@3lKhKeSlMxN@!t5>f;fOtbqPj9ZLx3aMz3g!oZpF>Ud>+z!p58%fS6i!n> zmvrm-GF&F8``!}>u@D*4fUAk9GAP59GQj!H4+oMmz8glvZwm_{?1A|Ryz6T)g@9|Y z|2SjoJFQZzODp9k1g=*A3eL`+0C)Z#hf+pP`;|wfzp+JWp!9Bf&PhkiSG#;qwZZ#~Rs(=pbzyJQ*8*SAp z*3FgjpLF}h$I7|}S{b+(5o|yR0%VmJCfvhSa(BkJk8eL!V7vmg13rCVX+b3l)N6-s zg&?T;pky2!9fb;1mxBZ(L>GFk{y0c&pcsG|K2(>$4}e$>CpIJ`B!Uufr(5X;Q2es8 zsQmpepgjU9^z!a^;6c)Xu@I<7H#avUWD0IC6lM$Af1FBm-@{A}U>`UaP`U1wvQJbz z`3yu24}kt4Rl&Q2ah!Ih0IQHt{irjfcaYiu1moQBXLot|FMKW_ikSijq1oBy0#{RJ zsQsYLgqi{xPayMv8spDx;=7s3Zwdt@?8y7JMl8Iuk1n3O3tF3qyzg-3-_GoBsn6ux z#-0*_6!OO{a;gbItl<0~YP7Mo4amh~>T(IgL85JS1I!h8a*Ns*Da zQBDTj?wQ~qR#Xa`s8rq}7DtfE&7wo_vJ3^%wJJcaLcos| zFscL4(8CH!sH{~KT;lfvgH=s}9$Wql;&UUYt>b&a)#TZ&l0R4JIO92#gZ8Ga`C29a zjfRNHgTC0C&~%YW*U)IJ-z~v+qmR+Z$(IhWV7m7S)5yqb@|kL5M1=&z;~ZzGIxAhK zYf_R zK4baCnuDt=jn?hC!rapqaSXHhvn}eVUZWNQ{!yR*7?l%3v?9rEm;JEWG<*J}yK|D= z`Q7;id#^PCFEHaD&Y13YLc3OEP{1s~N(h~X7?&h+QR`%d?S7d~t z;ee+5inyXx9A#wN)&7;Y=re3KwvA)VdlmbK+9QsmVyGw>a`=DdBHnf_T+O_qdr2vH z(CIHq{R+v_s0DR%)91i9Bg<;d5!JgdLB`@+WOaR4*&)FUxnRDW7uor7zs9qN#-WD4 zWh?sLjZ%mpJrP-%P!koi{(ZPHO1#$^yD?4>J#s-NmQ>;-RSSqodqvP z7TkZzb0}iSymL&-!bY6n=@_jT3utQYZy1H)_BB!{I!Bt}FW~K;bn>#l(eIiB;4Crw z<%S>vklW$hhXxUaD*OKJi}H-nCIHN^95Z7a(2TOMBmdylQYZGu^Y}`#c&)RkdmBeS z>)ZotP_nU1br1e)?lt6 z{^Hyds=ga8$M~R7WTb$Z?p{*THPxs{WoB>;otp!S>5CUHaPb9LSXoWm0)CE^7KRwz zGbsB303LuGFbBr9tAU9IP|YIl^2!(JoX(ay0PXeZ*ftPkfShc3IRz6 zAoqTL=a4eMRtdK^GAv9IItA=-D`+%84QXF0`#rC^M#+rFYWVG^$uD2HBqduaD|Z1V z0XYfPt3F6-Zn8L4ij5e+vH>9k#Bt!uHJZ3MI%Z{M-FkaW- ztukfKCdhJSMFw(pHwG@$-NmJ)AbEh-%FM`sy65`0 z0wLzeJam^Fyb4yKOWAwSAnoo}>TparK8Jyc36Hmz7XalDoFN|!4-dm2CL&%#AhR5r z97yWVJw5f`>w7P7YinpU`y5z3fBrEy*A(g?TvjAefB^sofo$j6fHMWD2*$6XqN4ZA zT12_Hun{Da)pZJ%jOwm33N}_&V;kf;Jn2sWl*-8P@$*ARN9Q7mf!rIdsB3DP3pLRH z(LccR2e1NIh6ZLdFkG18_-|%$N|iIrECqmL)O<}NZ|*JFR0U2R*1>SpqqvDWj0V)>Tm$A?$Ut1_51gI zx5W;4onNhb+!085al=X)>EeI5?5XT`SKX7HQDy)}oDqq{# zB4l;!0KQ4`M7#e9a}yguZjEGdrbYRL!$_I;)%5klex$`{_ED{n z7D*D44w*M?*BSPR3_e0DetoWnnIaU*GIa!9^Eyy8=n_p;%L$VHD#6fye zT8x(5v7t5|y?g*ofP}?2yLV;1TdCExUCm`s(Y)F&m|8NgTci&j!ZCROS(i;34>47_ ze6eToH2kAEMAwxr#`Kk&G+D0RC53lo-Nf3HrKjdv7TdN)MvFITZwl#49)u`{p32C( zVFpUa36@&eC7jMjmQm;VAscg>WyMA(6gFYs_>n1vW8!rlmAW0WO}@a>(~~@;@u($i zBu|L%Z9yCO((}_E63}2aL2Qkk$dPypA9ZJL!9(kDbj)z0W2DQ=gBARNVfpW7M%VYl zhiELmYS!87wv{TSan1Tz@gS|K`7-t!7oD$r>X&N8N2ZF+>A^tKp6;@EH- zG7pRM9LmCakK@|<%B>y1Ul3TufTr9z_WRza3Er8Q`M zN&GOPO8e^ZSv4W_%|Gs1?gjIG8EWmp}><=MtOxEY+RuQ(8+@bQh_bkuQCu8jxjYWqlN6tJeK`l_8%Q9 z$nM{NUR(^dc&KVvuB)zuCMFb>m9ML~ z7+YFKwF)#eT0w#Xsd2lMI!``X{h{6~VfunfJXtWrgF=4W4sg@ozh~y=mY1zEvbuhv zHHsw;?jO}b3{VO;YJv451ei<9IGE}lwP05VRuD^l7nmjuZkC|S#Fd%fR(~ZM+;%lm zCvu_!H?hYGA_XsfPyf;~c+)S>9?XwoS@(B;MyWLI9o_-AoLM#JBMQlXBs4VeJmtSY zh7QNz)+8BPCK&GCy1ryjSL#F=ns;yT6JQ@4rwJ%u@KJb}kB<*LQ)l%mPtoT6>Xx$efw+9o zewXmn8fIz{{8kke6|F5Ts~*0PF^wzQo^KnpEw$4D8cX!=;^N}$4AdH8z@tX=SY>3d zlaoOUb9!R8vlTN%##nTRr`Kwe2tg3N^d~OIxx(O%h&K);ww_?X4Xw)>V6&9V?f}U1 zw$DAU&|wKx)d${KM#fpTHN|}jDgS~uh?`fk&yROpqm1GX5(=i6A&jId{)th2@Id(J z3aT1MaI=A?+VavE71ins@x}Syw=gHA;GvoL@dHXlZhron>%SpOZ+ABM-Q0Wsnb(|Hudes@ z^vwO7Dx7b?$`Ga&plrJ=mu_ut?EXsan93!6TO>n=a`UT(UYP0{>DYU8d>rv+=}XgR zkt4sJK4jTahNvD<|B^tt_aq3%RD^{sHsUj{KDAZ?Dq_q3&ex)2?ucTXkp5vhCPLjC zk*=JjIaMa~;J*SJ&#OO27BHKr@gqbfroNZx78KlsF15dDR=G+Lm!di!rfg!VcQU-O ze#(j}PvOTxW$Vxo>wisyi1WumeEG8FB*>Q|iRO(STsyWPL^&LZLoT5nQ((|NUUyCO za*4*}Y~Y;mg2y~89PRuBBlxZ%!a~;QNgL<0YdS5zF{)xgbacpp4`MBr>8r=SaPL@(*+8Nl*xb6~#iV44L+?W;FG{l9l`m3dxW ze11|}H}1bD6_Plj@D5#PGm3KYnx!gA=-K>Ct-eQf*dFNeKVG)H)P{@4MQ+G~6Z+z+V zutkF9_-DUKhIs-GQFvSB)oU%ZrLS`qb(Z6;9ltm|nLZF?bIi&A3C z(7mqZO}Wugvdw;P+SSxqZ<|w4SnLpA)+PuiBxTBJ<6k3Lm5YfTtclz+f@iC$O;KIo z1W)p;dQrJQe!WyG;PcA^V$=i2r)Yd!$>i6;?>SLWE)SHndM)j~Gdn4=`TP7`Vh=bs zq@tGVV|JSUm>uu+T7CW|iuANlw{}y>_(`Kb&jsH8FovhMHQ(Z9@7BjN3}n&fYD;FB zKSWo|DS`r_`-H(rl*DgcJ)?Acrw=$tI3Bh9!*g{mZqD9D^YK0G=DFV<5*;&Pqt9g^ z*c8$fPo3D)U-@kc0q$MC$>7u$JDiK3tk<$llJBajYF zkYSR-y+O2HrkOU^d@OFJes(8~#||vLG&JrzujZa^?DcnrYD?R#v*Hq(=*XC zaqc>OdZA@Qf_vQZm*|&ic|$%*yYdMG{jXKdFVfde^&WFph#YY}I$gAY91q$nFflCy@*RK$?99vxc9tP5)eA=g z+EOn+3IEK@&egWn0;ls8E}O8f5Egeb^pHm_^9Q9 z#M<1arN4(-^~Sr8zqGw{z83r?#Uz&R6GKR7QFKql zndDiOuG$2}+%KddZlgeY*VeE8^ncG%8XD+;5D3-cjh&O_T>R{T>#JGBwb$j83aI^{ zgamc45MbbXsp@PgH5Yc1(xHv$sWJK92UnlZ&XHKCv|s3POrO&|wC3Rayd5=!e28IxP zg3KO572Gru$cw8uQMts2*_`NtxC~Ng{QRABp}*MLQ;=%P>67n#@(zCZ zVn&CTZ?ypn@$VlZg)Nqsm$UEdlY3dk4+e}@q9aO|6TkMx{QJx5*OV!oJ;LWHzP07< zMYh2En%resuY%jiIHnHA@dHxCmg*0E<`hC29Lxg$341wZ)trYD_Q7LB#!sT6NaEsN z-yaj7wOCwq9aw8Q7oVIoX+w=lxl?aE1_C%z#FM59V)kh5VwOUF^30qhbiQpZ_dQ)b zoevo=$YqzF|2@RSv07sWIeWp*O4~)_y4jbnF7*VeZR(dwb5du^)NkJcefP(MoUaU~ z8a5=dFKC1F+s|GrT9!8~s4D+K_xbP6ytGN1;tN`SJ=C|eA5mV0*qK`>-}@*`BNOq8 zn?6w9f`FWtA*If*!d{Z^f+u7Q|5DwQJEe60++4B0Ni;TJ+8258fW4-kg*W}gt1q4$ zSHg=VHy*=)NILmCFrHSf;`fiK(TrzXsK(rj>&wOL{@>pdYO5Z3FOV8@I_2gA zQ|8SKDr_&(?S)>a5)llD$Skj-@-Z9WY_+zJEPPieGiu1V4EP``?*#^_|KAtjLlgz8 z?(XL+PwyHZtW%jN`A)n1k-45?nl5Sr4v2- zCN?%2Z)=RQf-CPHpbg*Y?;lJyzfQAoWM2Pn z<@hBBAL}w7A;`=0LBB`)dz@FczQkN44OZ7I)$wMC=f-%<`@QUxgPJIk zzSF{DiRs{{DeQiV0+DgBJj%ly1aPa<)sK4z)IN!wO(b1WGb9%yYSE+QR4TI%FRA|6; z3uQ$b2HkQ8qwP24vfi>a$(J6wi&TIYon|u8h7P|pZ=>&4~ zRJI)Bd3hZj?MJS^=YTr-`W!d^>XBq~x60q?l#DEFF9&)Sf@ARie3eksG>}L?N}h2pghi;&?lBoHf?V7 z=KS$PRP6BhRPumiG=)MUj}(9ZN~LV6lPr^NQEy-R1ki?X)-CU44DFU_b-} zx(Zl$q^b&wGH6)A3-4P80v$!7&F-sHMpkkjmjO}mN8!Do9`jGwOE0XD#eO2jZTW2P z6|+kIqgem`w?pCVUrHX=)_0VJ7YcTOLB&z4D8N6*V6MLOB^RfybZKaO)*5_i5=`k0 zA@P(P$(7ERq%lWZdU?b(juyjhL&OtB9^KrOyU70fy5rUPej(H5HgS}yyq^M-5mxT* zC85Qts*S)hFH>d;Yhu9lJy}_X%1WE3$;qh70rOR8`^ViqVR1qsnZ>D1EPLM1yT@YY zXlgWgu{K$2yNB@v1=VB18f1|jewiX{Z3k>{NM%y)kFW?9bjrH&{wA~H=265|HY^H9 zyE0w^*MH#%oF?7%Cy!COBp8ITs%ug0{|nC+MrP>wp^IYd(l_62N@{8v_6ciuySuBqk*TNgoq@?7v8RKz;+VorznP+|By}Zvk@9iLk*Eh} zL7#((OTHRAK}v+3I!)v1I?DI{8Ow5E9m;?7m6fVL5+eMij{Ih3V2Dr=VHt5}(@b^n z(?*^pw|=Zat~l2IhH-fXCwH#}gJb=f!+$rgoRTKP72=(u^|A4g?Efn$C!sg*d|>hw zmD&I8cv4neaq1gVVCB5AyGAar7{%2wkNisDKZdIO7~5@$rX#CA=_?x6Q%`?gT^wN6 z;f!J{-z_PcN<3mv3O_pH^DCeJNt~AQ9?rwXDO36((1XO?j|DoUPT%T}{ZxC%xJxHr zEO7Z59i4-#XE3vafzMH!1^YypHpW$-Rp~N>dQ?b^Pm+W|fkGVdIzhK3;KX$igSYRx`Z22NcCQP&M zYn7FQT9*=zAK3zP;#RSu=7Yyx^c3$|9sDqsA*I*&;mZ0_I=uJsJGhMHNbPE_*>$*A zv9a@Xfll)W6z*+-mw^Po5cpC>mud^} zb+mrP!YXWwPa~7>>S>`0O8RYU+aq8{SHbQ1d}*84HbNuX(@Wslrw$*PJsY>g{`}S5 z#v-=ypM!BmT`CrQBNI^-6&X0=;~RfKB-2TM6`0s$Y*n)V*Y^B8Y~SuDw(Oa?fmF=< ztQZfjFIZTl<>q6LboKTR)(W*Tu^{^mtUCJ=yV7_1@tVd*W4(Ii`J9mC-p22+SPGnb zW=w3W3hhYFew6I&l_n*h(EQpJa0wqrQ&xD17@q;t*v<3D!4@Tu?b9qU1Qv7mM=6}-9K4lyfp;rY#A^m|1 z>Y(4lcYwdd8HT`S`&XDxY-IP)|x~lIICmtEP?;{K*1J2Q)GCR}~mw>#>d!6)_DY)?vb+pk{ zB=3h14_$Hm^;9|D0)29r4w%wDwVb9eR+B1z2bjws42qT3lJQ8m)6tTpD`rLPHI zi4hmRuUaWQrKGXBmS6Q~yNI|nu@Rv2C$cy!%MrD;K@va`qAL0RrYa6KRJ=g84cM`F&QPB(ZVd3okBl+&m zhK0KYlZ*;I``T*f9cIiG4AFO3euW(6DCr zmozx=mGp_w%kC^cjc-w(EdQuE-9bU|@r&`EoD8lp>V8A_GNfRvXN;DT%gOLjcw@LK zBNiV!<(`HszIw66`=UVVt-EUYR(2BQk+0t9C!4F&0>Y$#!dPHc6eV0%WnbONusfmS zU-V~dtH+{Yusif2B$m;s$l~HR8ZHix-1)J?7vntY_$i_?nckG!r{V_d*GkM8RjO^> z}zG9W}w#6tBxUCLu`sAH815 z@*m{B_i8gKLtl$av)ze%RLRBuAQa-+nQ__wslR3awZSj3!tr$&xo|;6o{Gzwo6%t_ zvBZ3Fi&XQ+kU1uU?4^2@lz&ZBr5&*P#=NKi)>wB@9=wB+{~)O=WUrWUjf$xrZ@^4CPZ zo3gTIWMB?@ED*wmyN)!!G za8lpbBg*AQJ+pWdkTtVYzYJ01XF4nvN~|rer+Aq+YOQ##cCW4|&&ictik6*y$wk6H zrc*AhvSU0p&xH?9f#Dk2PbxMYlysdPy@&7g-_BGXF(k4nFU~!-5m*XnG-J>=OsG_1 zTlwmP^$ zjR@Gcw;yip+?Kh&|9?;%rx13Wum&y(kT7p|;{yF1SbLgkYCk~*osvSlK3u)2L>Cym zK6$up_EEkDe8Rv!W1y)C_)5PQz7ae1V&een1{BN^&CI{Bry>Bzf?#+I#Qwsj81AZr zv%=*}zw3M7%ZbZgl^Hg;az+Mr&*w#;)A_%aX$g+zlx`Ce{`)&WQzCmOKYC5$Nw$$4 zdrzGeme^0D*~HY&l?0C^n!CH?^f$8vmjz1;OB`+}MbRHf2eF{EDde0NNokzN&@x1t>Mnfa!8)`Rc zNzv1r`r1a?kG>);(r{%-&Pr0dviMzM#-NbM;UB*-y~iJ^K4#_0{GIwvNf}biTDq(% zlD2^I=|x8ef|+UmnE8TAW(PHj>Xl3IcqxKOd1+NqJ$YIqM-YFpDqqhqHXQBQ@6n%w zzYRD35R_F4mLlD|ml24uvlCj{aOk4Xb#f_hz~-_$u)4a2_2%x5mwNl!24K!;|CW=B zSzWCpgDbeP(LR1$)*It$H_mZYJYw+_Ke91$4k=@k`TTW{mcPsO9DIkKsUuc&BcB`@ECm?zpz zcBcKDNT|Eip@1QGui#w;p@7GfWn4u4#$IElRMI=jR2wXQmuyuhZwIOW9AheIpC4Pj zQR|8kTA{RkDgIj7nVujjU*?}L&Do=)1TFReCZe&FlN!oo>-x}#btBR&Yt`t5l!`=g zqLjk_LitBK6S2E}xaGNYQ7jz7HKm8Tt$Yh3Q`PU?vv(jBndwGh_`Wf>xznr99h?>% zp!_u&HJSA|L@P9)ukcsVsm2S|4B8b1-?NXZp@a_EM`?7QJ}R-5eloZe%Q4#Av2@c3 z!0$Wo7~GH-c=)R$vD+V^lgTH_F~ht;R5+{D#Ic#XBa%K(L6=-Ozu;3|Uq6+eiPx7y zc`jdIMEQw?^26l)01|2(iiU#1#^S9=8?SZi?&%f z?<$oG2rg#3!fnC=beGjQj3=D=$+X{pKtn{GhumY zp{dz3TW=f?fOe90*09_~O)fz)Mw^s3;U{%nssJvbq3XL|Hwlb!&#vb}T zuI<7+S-!(w4u;M%a7zYH{{LX1F(|IpG&G*Tat+Yv;?YY>g2w$$aq<^zCA(W#8qDJi zVTlS%5y0y*-OO(j7BJ`RaMdYn%oIAj}X!xl2oV~#V%gIUloJ2;(qt^Ejw}4$TAnNJq{sG2vPR3jLPuEI+ z_Vr&McGXE9hZ!yG{R_u1w6N-0WS}-*-EJqM3{T+QbSU?aizW97pe-p0IAo>H>+Pn_ zjN8di&J2<7JhwwK3ks)i@1D(WULTtnuuiC5@h{Qr{+b=(Drr345QF@AY>a4+kB3QnnF@THQdnEMh9;FCczvFRgI{a>@~9c_jsot;GkU*~&UKJ9Hd5W2rVju$+r zhlRtmP8BpCSInB0wrUfr8=742zd$E0p~!eTZ)TZGAxP#*FEIA+#0dv|H>&r|`k^)- z1%(8igai=}_MKM!$6Mq79i7JZE`%Q<%lKTp8OLIdR_^Zd-`HGQlNphoxopC>I5PV4M^%#`AS^|Yme#73C*`0k_MLt})2j+s%n_agOr#L`{d>6u z4F8#Hpr*CUaFIGVSe>b25UQmSfBTjd@LuyJOZ4Zu{=T%g+skcnlqhb0F#V*|SuHJv zM3PdGqFNgc`^L5jlk6{48!!RT_)TUa$~!rqb>PddrlzBC!O;Ma$o{5CT{Y=N4 zl%#_InSauaE|7bS%v<5=K|a?#1m7cKIxW4zjj^Skm_C}+nBTU{q}-I}+*y0z zot|i`7_ZE>{)e)&&d1k!B<00}IgZMDDd4;)NNgoRF){{-{!wv5_85nBU|a|i2{sL_gCmKI9X}2+ve${{^JstWBxaPp z@s=TOK8i3w)tPB}qF`0Ad*34G=Q&7ho<>nBZ0+}89UrLbmL+B*-(~}BPPz8uC9PGe9fwdRFA#00Y-|~|c6_Uj*ED){N=*Q!Mf)#IcV$4yQ3VTM0 z#z-peD^-!dj_m!WSh^~6zVxL2#pkg?-Om~+rAOZsES_v3sd20?2J5Ol&ZJoBZhlxh zh5PE;QuCFE*l#k=`h^Tim*_NuMK8v42)Wn71-Ba`Zpvo&gP4H0>mtT*luw{utSfGG z+`c+>lOkWN3q~T~-C_rxr=UWl5pl_BJlaR#<0sr&5yI{opyI-6lyUIS0;7VpzwO<+ zpakMJs$X!rH?!M+rK_%A@Y^|_{oUqs~e z=qLwPue!N`arsBk%gD-hfW>|*P^m#*4;m%yT=5@3=q`iJ6cyhlzc_+J1#pVDz;}=y zj3VLxy4~;yY;o|c0FpcC8W+Go2KdMzN8<;zHY_kahHXk_;KU2M8edyBJFSDS-+O3N zre8Fg(2@-8v30yfA3kqj8MCxH;~$UdzBc| zPd|P_7I+!r^q5BK9@%?tZWGb|%@nIwMo;c~jiIAQNyr7#{U+;DNzChOlhQZf6IE?b zi}Td#jV84Wk|}1Tz`?=uIA?jvIw+(nseMccf`ftm0Qes(&WA?XSkp;tRkT3 z>9LQeXO();pxYm}_Qye2ui- zZE?0}8IrH`35pIBD{^ru_BKa71eiR#yI_G&xS#?zHO8Gd@$QJDng%O#ZuIE%G%SK^ zp<~w`sp_3f`=YL^7nPv$RNfi|?Oz<7f*pIKn2-p+FBeZGjhLnx4_JBmK74ZbXpop4 zx~&j%PVT2Cx%NV5XCABGV|t?*gkf(7U#^?J3x`bsi2+ zY);QuxePBUyiRwYqS1WdYV`cAGC+BabpxwIQA%vgQn`Nxy1EYbfrBdp4OGKxz>SB8 zNz6(=rAUXGESKjaIx?XT!m;fs@zxuLiIIvo2)%L9zHwQe2|)i1op z=X#d?b-0oS#r(72va((pf6M!IbW&@>DL^@S-_t8GsqbH1L!+LN>?6{ey6Re@Dc#3| zIldVgI%ZmOZG6nrGfDXxW<0&UnmMWpD87vYHlRZ?`(9IM|55%ga|jcLzkK(oqgWtG z@v<^!$TCyFxst&a zTYyrkwW!p+6?BW6yT5gjOGmov$G4{Ml*WM&xCx}>13z4AP+6}A=mqI1`!OWyG%?qwN`sBRMbEcPmL)vglBh| z#l&P|XKA0Y{J*gAg7Pq@#|{2`RxC4tffuAsE00S(C`*^^9Ra}4$gF|gFtqnfq@ElPgB4A}77OdxjC^TvNwIQ}5hsHEDdkV&ajI7#q@}_2^PsoilD(rx014zIr zD(c}T1JJ!`ysnx%;h-TFJ*Ux2_+%ypUVwc2-4CJriMe3bM9dDSOG#{rd4FHgdbq8% z{$uqS-72%b5LZ~AKz<+wKuVarS=rdXIPn=u)=CchHzPJe!~aS6a^{H2x|Pe>=k z5bn^N-+TYZ1xQJ@nw!JKEUJ4MOAeF`aoX_Evsj4ozSv$TLL!7gNYvC1x7h{Z`Qq>W zMO4Acej;LC_55 zGF}O20~1FSkeT!A9lR6fwUPiG%VMGj2i@t+`&Yv8hio3+z1s7O^vJhne%S1 z1sIHbd3lwngGb@`ts_zb{oeNW_Rfyfe>3%PIXtl05UeSn5lkf|oe2#fT50rqJmU{$ z>)@l53I3d5Gy(3*VEOYDd{uPJtF$$q?(ufrEKI39zN8eQ)pd zuWPiAQYWSgr?RuMxNssmn~c?Vc|2kRIqGIt6IarkGl|(4arMNpzF!x8S!_CpFT~y>-Nm9lc}kR zgHyRmrD}cIS2tNFyMVv2CGKu|v_yP3T5fJmTwzVf!@_|4?f5M^$PHe*t69Te&tu53oTBrY>S(tHavpjv_b1Lgo!v7aX2OUqK+iK{;<6tHY! zv#-3X==}Bat$d{!+r6AoYqbY#yy<_OAypo(&r$N2_x;LUBrq{Sb=Nb9$(HCk|Nj}zWO^czyAy?E{;Jgx`hVJLF=uS70=HH zzN~ina&b}6urQ;zy2^LI<6sww{Hx3Mpl>|8J96L)10UuM0_wwW1x;BQgZ38TkCV1{4%e&&13SO)qPkUh_9?c_0VXVv)k7H-G#8XnV`3Dx)rH)J6qK z2?0St8V)HasdxZIq`SMj8x?_rlz^0!v`7g^w+M)ov~=g8yZILKzVEo>j{Ea|=ZE76 z=R7>I_u6aCHTRsN;nBbk>@gKZj!)Hxjy1Rb?0Va&$3Xx2UdWCk>>KY4O`zzKee80{ zLI@jMo%J45_zU(&k95sm{t#)8ODc=4t1GJJxmMnqAI3E(Kw#>SH}<^y z%4lj9!$+AGcI5s(z}ouWqbo(EF@e)raY6w(cdrSEh`cqq8hkgY!mn)eL-YLNgNHti z*Pi|r5euFn17tMLyZExoz%?$G=Qsn4Mc+h;{Iv73UshU#lCDCK^Z^{1fd z;dCH1G*lN){r>LrJ^m^-g{k@HM_1e9ld6M1uyx5kuYMoE)X7fHC+sPBa?#l(r90H# z2Yq!IFCZ2J14r69KAPe0U=sJ#@EzNN7~W-H9;B;v%{njP4CqcIJhr1f-TK#BIW7G# za!J^-$xGHAmNx7cgC{CUidoQHImz-o_A-M7S6M{`guAe@2O3ZEM7_LDK`a_JBSRtm z0N0XF-{z>Pb7;s7M63VVCn4Y<(7Be1W_y~WczJnw`5B=x`1qcwC&4yXU*Bu@*x1-O zIJ!WmDXSND#%}HG%+Ab2L`MFev4u^*;I9g`>Fn$*Y($2IJNW+bXxsg$mQ}E>1k>Lh z&^v-H%Y%altJSaGgANz$zXO*%DJd!MI%%-O=KX+1u3X{Jp-MMxk1xAFe6<$TsK7Q9J`VeqL1mtYhi4VO zzPfq}zPR9J)BB(o|Fr*I(5;aX;i`q{z!ZXQhaYduD; zy5;QLHN?c<+K$-6kk*oV(fKMtGw5-l;S{>*#6x&uX*Re@z`%g#W9@5+#%#Wr#{thU zUIdEe8185w1BDTEtbWvf3(G|y8AF0KIc~toDIsA=|HaSG-nK$hN*i~Rcyd8RlH1Ko z$N9HUi1vEbBi^-kMVpi4Ldnt0*~6bjN;4n_BLmzEb+@Sdy>%q`51`#-dI zV5g3p*_~KyOegk{7;L+1X2!_)E=Sw>hqDXttdw9^{BQ*prIw(3$RMdiZy=dp&@F#| zo=Tj?4=po1zBV7!8=OrELzcnGjj+K>VZrF!AU0tn} zSNY2lsw(!^`QE-AIG}KJbl+h=+||)*p$?JNY<#2TA2`|O^vC#1-c(G6@fWd_`&<%h z#aR>v3${I`_Pma#tt*L^k9IBw`PSa!<@~wF`3{qg*D@G3*Y!)t=KcAEK@6xnQpiQ>;HqzxQ%a-^O_TwJS+%L!rvSp}#1t4Oef>GI_BZ3yxUosVSR-&CoJ z^rH|RYNBS!%(-uPGepQggSe{7{Pg3Lm{scc5}c6ShWc>ggruAxlnk?SUh}wqCpNuor|wL1LlkV=LzA z@P}ffe>KhR|JF384{gY5PyPISd>Y0s?Iq2l(Q;t?RY1U5agfa(JY7K9df zfr<`9HNX+UO4o+`tMW-l5GWnNcKhVYO7$e^D;O9ct^<)v`t5O;M0!mPCEJ}1-V?GZ z04E`Z2Pr7hv+XcMpr&~HwsDav0GuFl2=+0h<>eqa-~h2>Kz5nx0;nW{)+2awnVXp0 zp`Z}d{fY8=putXp1z!YtAE+VVXiz<{vao>uCfGB<8G*}+m^x6IJc&;S$*5th@i)!NAk>bM$E&IK41WSRCbbaZt&fS~pH^JgeEVcmd@ zJxM#!F)eGu3e@g=LqhNGJ&0<7B@A`|!__5( zhuf` z?hcPYODjpK@=aQS7s_I?^DOU`+_XRyD zDq#piS@`5k9aI5)ZlA!CzH3c?9q!PX2ZNQB)QcBoGntF?^QD#}ypRHb@H{Mf>wRMd z`A-pZvzIh(`UroWhJDfnAj;7pbXxiS`&MI1dis^CDe|FFOs0W257iy@)(v%ale-rz zKO4(Eq^ACZGx{Za-^o=3kC5=8-K+!-4vu%-2hbQKCf;S81}oDDaWG1TGpW*CotxtU zxh2@R0|+_j!B~t|?vJSmsq3|)LT?lO8GK)MHJ@{Rd#aT|#z%)fPDA;7bgeco@1D!t zt<%##3WxhTue)@=s|JZ~{$|R1=j#jFEt_D724h-Q4E8$@4Gp2~%VGY3CD-hIsgoKS z(0Kz8K0I6wkOt7rSd+K6wIv1Paga6(mba|$pcJE%4I?4{S??Xqpgbh4)wFN-z2t>> z35;2ZSN^Efyr?BJ60Five4=114vq)BtBuP$J9RAqEwJa8M8LOVE?IhQfG+u6f;TlI z;jL{fY}g~stoogvj!x|RH03+lOm(nWhn?g!jYnV>#dGkpBk@Hz0N`DO(6zYa-FfSS zM^K+4fcAuh24G#MuC6XCyOHOEFHX3#!}W0F+X|T;bOO%o0lWx5RG;wV9S~oR z;XJ*{{#FwJg_oem1(?L<+8PNtIcQRY6lbR~7s#!tmRcuORUQ9{g$E9zXXiq$kaB`Q z7s&QNqWMZf0@5)+U_cfpFK>0t5rPurx#fo8V<2GL z^XCs(`%#jTnnHF6{xJD;Fb}r2j>vEpPnp2j=ghN9PEOCzkO&Wt-Rs1CK^6S@)YXS! z!a>Rhe-h-?z}R?ceEbh=0UsSb0cE)aewTuroLdk#w7_%aOdQ>Vjg@s302k0CdJSTi zppp!F+p4Onpk)IpMi>|vaK*Q-c^4LLvK(nZ7Hnby3Fv2U?|PV(pwdl7N(zd&qQT?~ zXtbJ^R$i~^jI9eq03ZYO?1P3Fh>d}A4+!UgFr7AAPhX#SKZot~uQRi`_PEDhEtgsWDV<2>Q8iVaK z=nVNqe})r*Z)MihjDdm?r0<6Ft-=>T>VcG$)YZ)mH0`ZHVGZ)&CrQ$QU>y&ACy1q{ zMXmsl0XzlGG8iTxQ3UTtMn*uX2OJwgj~k?b$*AsvTn;=wh+h!hz(W!IJi!qcr2D{K zCY8a-8I0hd*)=dY2+5{f0aPho8nM<@PrUixn|b{X3epnycvw^Ws}qS~sg!U8FtgDLugNzO7vW<+7@!B>}UgM>cR3l{GIPicgU_o0Mr<^y@%d+%z;$VE_5u3{E=P<-HFAPBc>-S zd=`RnHBwR`;%VSt0;>{8c@7($?5$66 za&VmVzO2B;9oPBw^mUCS?zrErD|=bk7f)38lrB;VW5kd#bgCKAV zp~JOGGffsmjn4uSA_{Iw;vnmOp1i#L$H>S}5}*w8859fQag~j&35}7R-dd4=k22>T z+HEXT_qx8uHhr&Fi|+bb{k2O+3Vs*7Sm3`7*Vp@U|jh3BDsV+e)0UR?}aPQ=kH$Mzi^S_{H^x&+6RvP{@n|eqMRuw zWLWjqPFxd~2;2BXkJ8CvM#FCv@16N6oG#NXeNn#cd%Wj6K#Hp6+<1q?MYh+d^|woBP8o%KQ`|L(A3e z9fImMYP~bI6Hh#gP)MqkwFTR}BGP=MjW7anhHBP6<-ROVX`) z*q$9d^tpcFz#7YNukC(dw`B>=!5jC#7rAykstSd(7B;)9rnCy7tdE+es{fB?Q1zh^~r}xsHy!Jc&1-k!~ zuJcI%x3q*@zw#C+5f*>HJx!CnrK@v(iZ!^`*Vd*+y-trSR;yzT29ib zjrl1>kYG^~4XOM39v`zY^=5`?d8U;2L|fY~{r?ylmc5~8XnBHoHxev}Kb@aFXeQ=!k3IwO0APNLf z)G)7h`_Rz@+NeWmcgI0B+mE#faj3rF?UV|29s|$&j-}r3_L$d@ZHsPp74Px51uZlHnm!oK*m8#NXVjD zsmvIG?dWiR(lyuNDJH^-4==JM#`7O~CRyM5zdR8C?+ow%@1wr0LtOApg=7`<&^>_@ z1bGKlC_N300d(g;Sqp-Tw+89IE3aL92Tyna_%|!%Xhh+F1d=1Ec6x`r2aN;D!yxVp z@_t#pi**Rxe;?N-5q|BLI14pH$*^re|8g}S-DY24&vnX(7PLUu@WFiOS+`9K_&{(( zXMXih=VQz|;=Xa6)aHCjNV$4$R7T+G)ZWo`NL=1$ly7JANBI2Ww__PV8rEDdE|>aIoYkj5j8n~cJ<_!_>Zo@I)aqPCHiqB zjsE*bo1)&j<@~p`y*_Y(Z|W!RQDu(%Wk}*~>|dhjuq@FUOFsXACmsx9I`d9nJOev}@i2N%>^7Ch@<+)UNrR{TXTc%Bf(! zIEs$98g7UkQiRZo!GMxvqT9^YV;|vsXt6%c{?Cmp{oye_kx>R2MMvD@LXe=&b`p%n zy%RZ|$Ib`2v8f5yCnQxTKnHhlaoPSpJdadZ&tU9!LvVT(%U!kJNOnegdeb<2IjC)T z3ZN2~jb=Kq5x-V)K9pZi+=OYz0juR4P_3Zx3WE0Fq6WDEq({Jk_15^$xm%yj^6)43 ze!iT*;s4&5|Hgp}|G!4(*V^AOUr)e7?hZ~eAcbuAh#cH&K=cio*5Fz2ztdWK z14}ev$xPs*0&e>dO>GOpB-n@CGq zOIuq1Sw5`s9DjpRy+2Flt8=YH^51QFt#cv#Z(T=;UK4htffo<$9ML54wf}MfY+Y-X z(zmAL#y>-dg_QV~xW)OstjB^u>JGUSlItzn9!* z5OF%J?^y1r(9ez)o|x!0g8t8Ee9u0kmappr730v{Z=M9!nvHeO47*;V!!5%W&>x2; z#I>yd4x1|_q27sL1zof8o5`#{w@U9c=@l_!hNzfvYL}|FO;f`siY$QZRv?k4_}_HB z0n;^4B9?nOTuQL)^yE-VK_Px|ewt!>b00eWE{mBNz)kqy(A2qnk>W%RM;!N9@- zC0S*IZs&qMjAHore?#Eg+Y2~K7wg#|nTMCR#`(%n`wix_Qex8-0~3>(i3#3KpBp!B z071c91MlBGtNj3zQHh|X0Bqme16Zq}a&pm zR)(APqG3(Hozpn6PHthz#KIB}RRwIssnl-{tIIta2DgC>l8|U^l^rOXXW3hW_fJ`?2=JbL=Y6;y znqLMBHYk^p{Ji;}z{hpPxs(?SDnL-)&f8n`n(1kogNN(Q5S=rK3pA3>54r}SUrEf( zjSsIO)5GhV0!xIkL051v49Yw!jej4xP=DnTzxrL-%#Wxg%Z?h=+F4r=IQP}b)sy+p z?=jAP4=f6I!J5*dk>%|ZJ2Mj_r<(4V4A_e^P?k(+Bua%v4tkhRkVcuHgvz3nzj9By<^L058;z;*AZupk_-r*t6Z~tNI zhOUlf&jh~q=fTq}fQ|;NIQQe0ICHkpE?|gdt z`zZmGlQ#X&6MGL&?9(F%c6e=pK?ikx12&yVc@Vt)U<_JQ%)cA1wC#@vqdD^764 zUOH-I(hb9i4G*0i!Q zJ473?nvaw#d6z(+v!1iD#NmW0UGvub-*Df{o z$nEK`sj7#1VG?_%*F3ml4z&2}G^xf3I;lhWH|B&+Rr30AG@H*)>x|{Zb_}b zqsMFo)}%H zX1oBBi-z&7nZ;eUj*+s87F~_-Nc(jqp5kE_yG@9t^yDd#;(!$?=4sv~BvepVh6Ixq zO3*KP=SS6MgQIR+H{Es8H!?~z?>DO4JZW|mz}rWOsCUaX&;?>QX{3mrI1K)LP8m@@ zrBaG6^a~;9{JW&XXi$uxS5E_+v=W99_>arV$$?PFaU+>kbldDxkEiF4v!4uZB#v(W z9^gFc1^O?ddKG)k?j_U83E{aY?{t51&3v+Um|r#=w(H?wZ{^W_J!ou#jHq|Ce70&p z?jzOxekjU#w-2C+eF_<92)w{OuerHwyH5?A#W2tg=g(Z7=Pjkym2p$xq2*QW3uAEb`P26YRQMbF>gc)cR}_&NLL4;G2!o}9(mCw>TpLX2s+QJ#YnHMMGq@5+~7#EJ|@Nx%FU$))7rd#H1vpIQaH|Z zn{bg9skTk6zxDREvx61O`|#i(Rn`PdkHFx(X)1qc8Jv8}tsqH)*Xu;Aixjy$q0hHU zYL20Q;nG1NbZqiUAVkL$%Y!0vhEw8MqCbk`?f2m@O2s}lWQP=alMzWk%l4lMUBUV# zO)evIWmJ^Goagt$Cwg|#;x1N_hOVC-k4L>7JVb^3oh{uZO2x}^3d;Mmm!RxfUZzQg zRP31_wnrWoz>Edi!b#5oT0^1MT&rX=xgZUOK3!GSh&eaL)vKV&9)L_n*F^j~)eF@a z7b&jQf&cdI>X=uDz%F>TgrQusi!BI=mC_#+{KUt5hK2W7AVKw+R%r-hmzW3=BlgoR z?!(DdoHRF&D~S zmSTUANpM7|Z)A!Nk5wFn8P;;@n48_A>i~F#k2(W>@9rHKVW+)5TL932i_?hZnCP>nu* zwl$UvSyaiyv_xNN!mtZx>VJQFQf59>xgZ`oKlSN=m5i0fw8u{ou|XK%|v9{k4Q*sY8aJftc};rKA+z`hE?cy17jmjQ<;a$r+;YquA@2Xnun;d4XdlL0juj)i`OsK= z_wEYlx!acUQ%He@;?t*Zx)(r0Uy(85&Ye4^Mde9Jbu+d;zP?|3O{sKMB6UbCi>`Us z85WupsbZaHB70XMx2yXIBE5b>NB8OJ=R3tHHOkzyQbXR~cimLqw}>$z&6H=rFT%ol z#81JVXk}Nn@Sb05aDayRW|NN}Zs)|TB5Mmm95rS56Z_Nn9SN2D{Q^J9a%q7Pi|U`>?(M7atR3ND6Ro8@{ zJGNl^HQDKzcAd`Xlb7`zcwvN~_KvoxOgu9cw@6ZxEz3AgK5zx30f+6512b}k9G4=4U>C&&Y+(U=0l2Ekh89@L#G_{lL1+P zSp=P~y#ClWLjZ)|zPoD=g@;tY*-o1MViza}-_%cxqycUOaRvmF%>>NwspAK+f zug9yG*ous(FU62%GRxG|-?-V2a_5%F0 zbDjBRu7xub)J+dAofrkoLCR&$ujM%}`!YLmHG#&C+Cv`G(F0Tq5!W#X9@!N%q3>?5 zaI?0I3h|<=hhJI~JA|G-<=GI)2S>vPnE@(9_z(3ZKr~DUi3A&&tcIsoVxr zq|;vCkt(;{j>8Z@Z!@}^kF%xef9kS3J$_tKH$be>_3K)P)C>$NmJ zOpKNJd03!;ip&`C6$0RbJ-A(ge>U{aic3mb^a~&n1Ql<1U=<_aLjgQEsDoqM+a>dr z$q5O;=i;ot$;O6DLJ}Pw4pt>V6T5QhO-+qZJsX&lz#6-^he9Sp=e)5|lp=(fj0|Y% z!DTUL*Ni`CKW`#q;mYGlP5#RJT9xy^;-#~ex8u)+{Uq3bG_nr@1cE$!TDl7yV!qh&S&!`Ml_ zcPZBbwY@3u&Hat!eXT+xuar*t+jzwe?edXnJtZZN1V%ALLtNQd-JC4yKM5QXQccKB zH8Q^lWc^!S-%@}n_EV52|8u>0{8k-8cc3@Zzr--rLvyS&X)hI7s}&m=M?k(co@B+x z(%1U=HDu2c*mK@~)ede|DbMT+^-@B(O56wjN+1_54qm66srG#-8A2zK7I%lN*LZ1c z><-6=WO*XJk0Q2h+1B9NReIzP2oHq==G?IkH_JwrEuo=rVlu&PsHZmtJzuZU z8nt`4(94K4XwZfAd&ikysFWnJ!-%AD*^wYrI0@GusV;OWu_LNk->@*I!tf2$;t}G7 z154}9VAu}rFeKH&>Z*|nH*86`cdvG8-FjfOqXS?!Apf~lje=Jy3UwV?QvLmOQ7yp1 z(LgAHy~xlq_RX6cu2tLH+kjW==05@hFhrD-z5UPLe*jxpmyPL}2^>HUE?>S(8{RxNriF+i3lh!c zq+(D zE@sOA)~NfJ<<;W=qJslq1@xr9eX|Bm5o~t^(^eX)td=V6exrf%+DAoJId!GeQypWm z0uJiRzuE(e__9ouGHBCHA0q9)w`AXpR}VA8k8_)s>~mg9y}3%t*~W(K7cURKr+dn&q`_2*35zDYy*FLbG}IO;oi^z^{SI)yG6EysR-%%j zyI(%wJR(>0d&Y>{`s#fYrU+!~<&~q)DSN-eZXChxH|d5UO)2>PlW#Av?vS<`a^i{X zec{)%Lcey=*1q>cEP`m<>#IgyiSjGj@EVCl)`&jR1HiA#P8ptURbdk4+5-vf)VhQp zY-PMEF;7p`AG7?J37~!?ob+eL)}{sO$E;r0GET7MRi`+)7y4W|(%}&~@&Qo*;Z2`T zW9YI$Vw0oD7vFwz;z?KLK)DUB+A#AsHnlN>rgh7Q>f;UU{1# zCKlGmW_`zscrs8cOkyL!a;(tE>J=3egG@IefkH5mHS{^~%AnCNK`RG20uuR7IEgJr zf}esFX6}MLH*Ck<-G!9i_uad-{UewsRFG0)UB5nlzyS8ZFyrtZa&rR-;L8{K97Qz^ zjkfKC2eh=j@$JA`hMP)6#0?~ke^h5gj06z@LHD0OKw@68=Y>iE<}MITfUG9Z_)1(H zI?UjRL=-S%yL!WE{W*}Nz)J}THUs9QL9{|bT1nDC=fb@57RY?irGrj136?Lkn85`( zclVu6G!UH%hxYdRX;WB137w1Ge9yUD?1zHpulGtcp1QogPPW|gpY#*xOPQ= zKxSl!A)>&HDW7?$rw4E#C}YY5Q&v_iVh$HWkr1b3eez&u3&1f~`%0?63+ zWB}k6^6QxU=j~ALF#Kv9TI9Q3FInTi=FF(pn3~>MFKHVtna6QIewAFK1psz-;u*ez z0kz5ly4xb}BAj-s?UE)Z*dH%kJUNR#11$yozW!@65IY#l-8O~oN&VlU%Ci+Mi|{Wd z0%C}RH!xyJ73S$wRn@ujfJ!KHZ)0OZ@U+z7h^G4j9*x;Ur2e-7D9L|4$q|0=AUWXL zWxSwUw`L>xdy{qv1DgE&@H_jXRF#uwUQm|??CkdO;NcM;hLkvbk8KN2`4K?%17+-F zA%F+@m0uIvtMn7SoaJ@XnlXJN+|I}i#*+TS+Ig9Q@+Sc;MTKAcl$bqT#6w><9eyh) zZ!%Sj2y=cZ%7`bRX8Rt+-I2fx8DZg|?^n%dI?nT(1WoKnFXqP$_*L8nKYrllM+o?a zAo_gi3Ed)FxA6if_6}kxvMelAH^{na{|>KUV0+ZFwTfXHO8URwD^2&5($EV`?5LZ; z^uE2(8C%T9Pm!xOpX92$wG6J^8qM4Qe+*OryO^%Llo722r$nxJ=!W0`|n+*1de5G&)x3eBP-t z`-oWUWe^dq&^z#>Z1MTgpYcjLjKm8~thK<>heukYL`$4z{BR()*hSgV)c)r)mjTy+ zZC|MxZGXiKS_C=X;aaF3ak;Cg@e#$ZyJ?Xx6jw)u#_eaXVT^o;TQitS6L1KE1jfNs z78<}md78@FQL$a&XK{E}*hhk18S)DViWT)^i!6PB&J~$7YY?GZGG6+Bp{Fv})!!SM zYatCfHqF#`=|bVA9r=W(2`I+C*5Pn?!*YCi*$^|)*il{Lt!PzI@>h%6CEK#eo@&j= zoK;R0=1AN$Dwf)s)SP-_Cuj9sbBzzt9ZV+ZZ$rznZ?r!mzBT_H(<^*TjvXYw$Qs^5 z8VK|lci$JCro1eDmS<#?{TDxzJWqP7Jb=!Id_Zv1`6t)Em%68&f|8Qf$^MxdLG>y^ zvvMzG^;PB*X$vmRvatJezcnMg#ty0kiQhb_y>)GWjuv`gF1P)k+`C6*`NCHR_6q>F zOciq2`iasbKaQAINKmRc0V&kXi;A6o+cecYv(a){0>}{JX&j&^RiXw}_Zj2$*F%lR zw|$zXpk|~9X&QOd-PF`%SZFh%IV`ui@4li0cJVoi=xtYExdCnJ`V~lH6tf~w?}T+A z7gtA=sOE6Q1U5|pL)j+W2{^P+P8ALSv#iYI(F1({PUG(b<`APMCMLkyEiX@s{x%0z z_rQS4zn}`G5G45F%}`be3k#Q`Z6HvnBX*tn;pPC52I4h8e_REB6Qte;2iLA#!Igm; ztEGT9J~->AF&FGRflYk*%0R~(wsd;_{JK*VE*vYonTndaLD$B}D5GTtw!2IsYA0OZ za*+mgF90bzf505LcCaxLyapjmG85U22mnb9A{?3+$caEEgx<@rb{w=F3J1WF_AGRY zL)y4&2Q)V?uNtrd%IbBjsLII+(uoE_!0Xo+U^h%}Z(6TuN^-I~0yu{R{Ug-3Z-NFs ztT6f>7BH8r zLske60Bn{lF@Xe8DbO~-mWex-<-r5}qrG)l4S>xAOEItzh=~)0yhcrnpo#*9 zqB3(N5M-02NeBq0?0JD;3Aaf!<%Oi=Y@^ir{t?)(XZ5Pe$aMDiXLK6F{?}>S$2`2u zP+r=H7%_Ow(9iux>SwAf*R0Z3uInc&(mb@kdd{_ZL(^VPp>-zWDTq< zs2fXP)`CA0pmGWdgibf2p$!OWKHT!s7z6k@q`AwEZgB4h%mrPy=u6gNUEbK-EL)Mu zQ6$61w}q<#a$1t~BVfowhZu<9(7}L}H#IdCsKT(Mffx)Fc+&JBy%W66TwGBHiL&Vxw<*0pD9phw3H4qmsgu$Wm{=MJ7kh|hGUh=FA#%rHn7 zV5!f{oZz(pGAB??;THUy+XN1rovkfgA^14~-OYhlVUkwJR_Az^fP7#1^_k~BN)Lh3E24n8%>?@0p`y5zU8Nrc6Y~#0T(=R0@yQ1lz@`5 z2JEX~cLLa0b?WAm^V%Dqo*erjif}_aC+JNa+HQ)+Tebo^s0^4uUm{+9Qqqt zrpM`NERxG)whh}mNN988%N)L@0qh^&AN8eUW&G0>ehNI7N@hG46)LLPipjF-3+qnD zmlUJa=IG`SgH~kUaAmT`gsL$r^cR)_C%t}W*5AhEEcGaB(T%61&WXj@~hqP+0!r4v@VaMUQTQaio{J$X3A=jEaS!R+jvs25}L4@D5SEwi$# z`MvMqRXNU9B5TLWv<<(=!aY^-8z|snKM6fjr#d+1q0x-=&>Q>-$wzQe@oJ9M`|ZtR z-39)%=Uh-=s3e~)v7bo1^83G~RWeE=_)6WA6lp!?t!;*0yO`)6r4Pt0TkdY~jBOSy z^_6uK2cwSB1DLSJ+P?L!R_2wWy-j33z&>s-$mZHJHMJ4UU2*~)y9;HHE*5hBmkVHi zTCnd=`jE$NLe58TrKOE`q_b>6*avb5+kv;Ya}1Z~v|z#Yq1GLXdDx&`m{D0IQae?} zXZAb!UFjGliwW1x+%=42j|pZW|5WXkU`SaM&3m%s-V5egRYFG|$`ZN&LptN0fRpMB zi`uvx%meh+4a1DA`Qs3PshH&6TRxX8o#Sx&V!^ZPct&Ru400djyQYlhvp+7c#; zJlx~AJNh`VAl`ex`jXV9vTCYu@Y?;561Mx?O!RAMa%mQq4Yl%}M^Vu)(@&m7sfxIt zY^351)V=A5jI&&zy#BET?i$I+!-*>;h1T-ArL3U$oo`yW z?j7ozX6&)DSw%lOXlJ__>*gc(%%Ubk>O3cPXH?saU~eZl;&xv;+7y1f%ft0B=Aeo~ zsu&@SxDB4XD~=U}R62^z#&%v_DfwPdZ$3d_er$on-GVz-l<_VZ>98Sp;==rQ)g0R- zHu02P9#V+z&RF4JWYmCJd;;|OH;|0IND<6gT`U{|u&zh;#*M4jVn-}X{1^!tRNI<^ zCB54O0nnR4Q{!DXb-e|v9Wpgk{P1ot)It>X`d0xpN}lX)K9OjQT~YQVuP$8JUaRj#Qq2-B_I~%mzN);fL}UV>$DuU_4tSvWtFdN z7tU2NqYVc7I}#EPJ1za1rZ!xaRz`VFjzeU>lim;6t*CD>+i|~>w~ndAAe3iTX)&e* zt+Y?K$BdkYt5=x-ZGOO>fJ$I^+^bh4a_~j>*!|_UhWhi_YLPKbg;&TraUVrmmR=_t zrT(Uaef-X5YymuzRQ$B(@_KB8!t9mhO__kU#CrE;e5bAzGRpYfI90?>VM~(uV}xT`|K%oxb>@eMJ7C-%#X+(4sF`x zD?ya~sXS^Z`bH@cWIRz;3;J%-`jqPxM9hIdy#l(U7ZB{=oz2eQsYv<0k(WpU>iV*X zxRv>jm_EaK5S)=*0<`a$3Q}{tQAB|#z|^^`{0xkmMC~r$63on{OBQRIeZP0q)oz(m z&wigO5M4Z8PH0IH^FzV;s|F={u7cm>3Z94R&=aK7oIbq>Rd~Hic98%wbNoc5!i=nv z+09L{T#?qmzgS5T)46hv_BI<5gEW?}HS)0yJ0PU^XH_B9LCf^IZPHqHHQHWm?$?uL z2)-oACG8Oeor&*n0tEIxVYK^hyvfCr-C0AtPCjBr8}P)uN2;NW)S^8e+IpcY zcp8sM2xxAu3^g&$&$7x&}m^<->c1cGF&tA7|tk57N5!0RVX(T2M9r|2?WdNO^Id(Xj(!E8$uXZ zTp{4~Sm7(~jT&`sFLC*wFEiSA2F=a7!Zc%5p{hA?UiygYICXCxc3EdvYso^`ZwKDO zJb_BH5_iNy%L7K9R;=pQk5n6|w;WWP(l|qDyWnj z@uEhfXb-MS=u4@x-h3?ST^rZs_x_<1hoqmrb1ZZ!1VqsVId8X?kvjQ%y*`!XFV za+I_678cSP^Ajj7-N-&cRDpKZ$Kg_Y5!)Z#O_IF)3Gg`FtMwBSksDA?YWpPXZx{4u zNdA=@Gj=Rr`6Vt?ZK&e;#Zt<`Tb1f@rR{pUid=`&_V-)SbjWuK$BT`Cy9aZXKp$T# zPkFCofuLitLiGTHR?H_VN6TUE%NE4&OB7b<;RUy-cFxD->Z>mX4kCahJtCt3b5JS$ z_Fa{-M*~`g8M&Qe%!7hfq6_6@h?r~qnmeIKafN70k~B>I!C_cuYN~ZQqgsf1NCKgn zHZ+%S%j@IkqMdVA^FwuvJ<_vnV9D}v*9>WkBqNuv_Uy$Zz=Tuz>3$OVxLYz;1?C&j z>t38J%9$y-mpixntWW?B5xW%FIAo>DMpfMTca=5V^O0AhPpr~H8xeGPqGA=ih6W=T zZW!_M8VFCcgg2FpIuxqYTf0=sB~vk4sT}W>R?ePWjClUtq5XULP~UHnAA%JEB3X}1 z-RlnR(su`zU^tJc$eCJdq}=w$;y(QK#G*N<1skFDnI~3&xq|m${$gY=04D01R)DM? zl}A(%wU+1G#9o;hjsjS0M~)f;!FB)n{uEK}k^B|7IROIKDA=9! z(?-O39XzmawGjvH7ZOhT$T*~Z_V%A2z;~%Xmiy6yS#a~80Lr(p823NVZ>rYIxjm0V*8i6j)k*iew*+R9He?T|r4LtH8EVMFUm;=U zC&aZLw-Ei@JHYLP_%S;mH};9$AKe>IKzuwJXR@DEhfIxpzCszlZLd>}>Yz@J$(K=^ zm1x22#FP0ZygvNyt&^eT#LWakvLds(Y{KRn!#a~dMKWp(naQ12jR|wMpP6*CPIHf0T)H8JiQ2?*l?~h&RqZ~8fR9rqwgwy zXr8jWk>Wwy0s|HRUKK2y7LZ7m)jdU~%KZXVzRBZ~_Bifjf<^ zCFhCflCkA)WyqP_3=gz2q^^HxLD6@KZ3FW3^A{VHtUnTG*1)UGTyU4^85gk4BJNMG z_odPRg7r{xx#bvRwYhov>m++Cuop_wrG8)Uv1Qs<5M4Y7Wx#rd)d@e%vCFVKRlgs? z!zwt-GjN9^>4V!M5SGeMm3pNg=A4u?jZ?3MCcz6g>pXJ!x_&;tAN%uYOXS9n>oE8ODUS$$hSlOlQB z)A?)&82hLOOkQ#ohzVy6Jnb#|6@&Dvd#8@@CU07J zA}I4k4JF^86xDsIm?vyByl-P4E4R+ENKx2WS<;r^(3$pxOwTb(QPho`a1ZtyS-NwT zP+w)3hWTiBN{p9S!_063z@Jq|w7rpHd02-RNL5B5G=2v?zx0|CIIhn^)Df)A%wWzu z!P&uYcc$ev>b43xn&qq2hY6ks3w*1UoI*lEP~h|O4mLGGoNI#K4k9lK1*@pk(20%R z=>EW|!x9D*~v5V-GQIj*XR76i=+gNkkg&(`m(ZI}USVhCAzO_|s5lnz1(xj`zlG8~FljK(8tCUj5Aq61}%K9@?!f@VWdGm5s^MNpC0T)rcImLak5hMDnp zrqdy>>m^m;#bw%AxJ5vH>Ly!ZQnVWnf4YC*{mqn9ob67+u`=@rP|+nd+T zi86o)fu+F6&s>pKDGn4$c8g?4TT08@7j4Jl5;(+(pCh4T#(}hwh^HsKUTtZbxytz} z5UTFWk_(lqgK6jWAT1N4-Sy(&+c~pkH?F$-x*RWEjhr92p$CP3IddPdI!q0^ zmN$*|Ui`F*XWA`Tn!4%&giBgKJF|TeA^*Lqfo^%KPQE!N7DRtup%}p5=q=|_W|r`q zSB7R^^*>J=V*Ki6QB2ky))yjiKN*58La^!``^>6n|p+)X*U2P@SWc$Fhb48 z$Am-YgKlUW5Ss_4JwvmSN-t@=7$48*3<$lVFTGC#+rkGmW$vbaSk>g`foM9}Tu2c) z!Y1ftBOF&}=ry%;HRAc!ewYhXP-vreGV>jP&pU{MBCcPNa(9FIlZg1l$%8PQD%-Hk zT81DG1@!YPM?hQD^B;{B*p3YBsoH)8`R3!grxir)@3&+G)|YLlT&>!BL~hu?c_sGr zh#h>vX4ZPd%{-ksNx<9*k&#)7RHkpFa8uU;s`rMj{X1_4;W~E)fH;{}1IuRj)bY0h zTIHm_h5DLtgS*h+gr+X=1~fm((-YqGbV`lxV5=bRD1+TQ$L|@vM?5!weT2)Ee*f;& zX&mKe0Bzg-4uYnL?+#p~hZ#-CY9S|A_)FsxRw~KR{XM4Tn^HDAtoBWZ<^(kpamV{0 z*QMAnM1TXJ2hccy^*eO77+MM5Ohmz$I4(y+Q`cgTcUrgNH;sHVKX%*|q9(qHgV5TO z*4){7Z)#c4AtXgkH@)fU^&RfhK_-G zGc?<_RftZf2Ve(>-$IXn1&wTwH1N9_PX0e z&2Vs~!H!z~YtS6i(Su32+Edq_UpX_xfC<=5=?3SjaxgF+&2-N}5;>Z-nLv@~m0FfI zYWxYG&<0Y0G8=X&Pq!b-+tq>&KZ5r;1t_2?=LWQ)h1f_T#SbNAm0b(#fwotZ~PasXx9_)cZRgL$VYT zt2i7RyIs=70aeFPXG*n36_-RBe`jvU#`fbf!+pTHij{gvmV;pq7+I=@Qhq%w&!l;R zVA8Cb_IWY2)33EypiE5nGhVI4R#tz{>D5b0CK8XYDd?>9NP4ZrFtVsB0llvMT#7R3 zQm=ud8p&AYzFs*qwpqVxTBR<%rv^su$_muu>yE97Wd`lvH7lyV7xF2fy-sE$LPA=G z6f~Ns4WWjBbWKlXhce!q2XUyqvSp<@n2qR%^wI#WL@4r(sHFI?B~?rb0om=~#4Hgc z_(wl&ytE#vG2nY@j*%qG$CcXb+Y#Ip%(9p-g4*Lj_z;oJRi67Hn z3Av?S&Ry(~xs|w9 zJG5fs-oED2^2?qtFRjWa{B87#Bb8#v&u;S!TB$84;G|$8O!mQn?!uAB%Q-EMZ6`Gg zs2H-WH0-1#XD42=00|^qq;Qut7Ql(`J2=zVFcfmNj{ih2LC^JOs5Ru5z~BT^%~ehjJ*ixk+|Ub@4ANO zTfx6c@j`Zi+(B59z2a1wjo2+n-TSJh*b#Ji`nO`Ww)ktI&Ui;}Z)CQGO8v&il0tGL z{DS8G4I%k2W%*$2vQUb)FmF+MT*rs&@7H4NW$#n=iR+D>*7RcP691b8|01$CRebWjR2$(LD+Nx zI)HJ-c79NJk8T#5o!Wz#4OA7tAIKJ5XrDqC-uVVpkWN#bAIx$F`A$6}{`K=`7qt0h zXVY|}D0vO!ZU4-AOjJFb$hd=#VEgzN3pgKd_S8G1R$dO7Z`|ASOsi!7QN$d6G_^NI z>^zh`v#pKZd>rtczT~`k@NP{_(I{cqJD=_JvHP?VMuMUD2O5QbY|MYNuGe<+HRyQL z+vvB=ZzJf|_RbLbY|}P=FEdEtUV|dj!JCQCV_cV!;_ti2#`2{bq`|EihT;*2PV2#a z%a?*XcUqB+p}0KkCUTXG;MPA*r{8+d@_{N_TwzB zAmLwWa8v{gRyVb;A@=UGh@`k(Mlk2xM-ka{x(vqsxKu|R@gR1F>R+BjSLTzPC>DXL+`3#y%<~v7QPtoJqs3+PT zbSQ^|sybUNx#sM(ksm@!n88-h4pCb31=o?6#7oGVfkhgzs5;M)0{pVWyabLaP9n3! z5_;x4@$xiPuCgbO`&>|!!klVNR6&E`f6@CV(PB!ZIB1zk;%Tb$vLowuEfRFTOvQ@G zhbj{xpDW8E2?EH;OFxoeZ*|qT#2H$N0%BV_Eqd{{hP?Mf3nAFJ7@ z8B(a4u{C_+GNUCxZ#uKUM`6Vb-JhD~m#bFxD{LS^_HLx1nRkD(M>g7~RdgRcW(xge zAL@DqamqC4%v;26!kKo#Njt4y97H~jaV4UCwld`FP*^1+w;tHp&(pco)YCi#Ul-t6 zZODWKJD$oS2a6}eh|3VlRfdK*nW$Tr8O^-b^(L~G&V!HCyk0Y{vWXUs`LuK@ehCp3 zY|Lmi$38rv$oXJb9Uh?=j+kqyNg$$J&XtLso6-;Dx^&Z-=4*EMJ0(4txCj*)CJ$28`;Pv`c)v&!imQEWJqAl|G;U##(9aFa;bCSR`oZuMA>;?D zV*gRy{Om)gf;(26mynH!?YX;5MXln(lbf*4I;w43I!qBCk3Rpqp{rrv##brYXSdJ% zn-8(W{qpmv5K2RJnAG>fzSh=bUJc!@rN53gX*L#6s=X9ip5NQm83h{o$5Ax=$5|Ne z$mE#meQL_{@f~}&ko}721cD|WVvgw_J2=xDwt{Y+&oV}>0*+Cf$cf$XA+HPtzd;8J zN0w%6XQr9=n&(d~7JUhtMi?I@Ebgey{YvV5+u`9CASV!-cV6j6 z8qqxTb3pBH>Gl>MY4C618d8GYs-(^#-ueMS-W6QqBF+X+*&9gRk3VQgugNE4S^`Rb z13%*o8R71+avP*cVx$(C3?qz~6*Y|r*m)Y@p=-*)eBxVLkq{|Wq}%Q{Mv3Q;U_8X? zm|mznH$UH9ti>F%`8CaC3=e01J=wU)s=vhZH8wq-M5@?ye1Enr_pYv9Olp` z%<0uN(;(5n<;c_sr3hrQdfJhosW`br`#Q$@KB;KmkheWcN9MVbV>+^NB17E%d9;N= zDXm4wte64xu_`@~CNoasBK%nop|MAy%7ku<@Soo;xxPibVwvSu;`*HHElEpoHXToB z%y8;vq=#v~`WM5BTY)A$u&y+v)!dk;!4`l-K@#pFt_6)4f4e3Fk~g^;1HK84DqlAJ<*do2c1P9tj$s~0$@ATf+GY;8&#HEC3Z6Zg=P>NaTO>9Vf$^Xy&l{7d>LHGB zUR%`L@Kp-V6!v@sLj zs&suj%5)+xBhWE=I&YrQ-n4A{KGu!#&70ct;pn)l}nq{ zAzEX5=6jgxLUTB}$&VIMvANXAD>YTTYEo&wa8$Ha5kYK?nA)$-l)ox535V2xEU-JAu5A9|$WQAqH#RUOQ>E(&Xzc7% zv<*LM3-eOqx`GfV5j?|JaC?9MZ_{o%V@LYQ_SRq@YUwnP7GdkRLnhNR0`Yo#2Mf4| ztM`|-+%ArT7uYwclZs7LiIJa7GXzZy7CZ*TZwxs?9TcSE^rg{04YfV5jHgEwZah&< zY_aNR_rmh!EGrE7c@?=v8PjS`5+J_C7k7|=52mPRjy_{bciaEe0w^D>^j{6bMW|f^sVQUV z5?Hc=>8`golXHG2xe=Nx%Aqh3JF4aC)hcI02bg_KX^dRn+jN!&9VFq_fnl;r@$@Najk( zHn7Pty~Ep3ApFLa(xiEZQG|30<1!#>HhZk?6Y=-eSX!{;hhO-y%}Z%=%@}&Ih2@Zs zSY(b|LfrX$5%X)8`%{!1KfLxIxb&8dUhOK<KD_@OfB^#3wZ{nROOUQ%~Gy7 zLXnInnQtE;=0E0>H+^P)REPk+C$cNJO-_rTgE!7P*3TJP@Tx&GFI81CO_I+~CSH<1 zEACyz2tFj^Lv|k3#i88pHKh7iJ4S237=5O{38%!sHUVn(==m-ik)+t#eLy`R#c&I5uMfWl%D581>3)&54p9Z{B4c&QpE9i~v9me{& z$Xr7m(`(40@?1gd%g%ZBemk_uf>sg7aCtzyIVqEP<^9RJsJ0oUP(}z8OEyh49#$j8ynp~LQE$%tRESV_+ck@E3F<@;Q&!WuBkRLxbq3mYUcukSv^s zK7xp2fVugb;^Izou8XRKR_LDw(GpxL@sqs~XNj$L+6ycPXV77%+3Av=z^F?L$R!d% z!#ikAK*OpeHy_>=E(11@w(LRSq_Jg5KgnCP6+U&Ekc@%l2^)4{9=;lXdOPw}v#EN!3n2>{4=7`97Qk8B)MYV3>B=_QO`WSt0eVz9Z-IPnJW3(M- z-5I!m#013B35A{Gf%}h)ytTiB{M=@P#XuMF@5a92`l34Cn=zb052n{~DC$cqpR}RQ z&eW&XyjCBm{|wn)2HS3H_jab8Q~iTE-n*gOJEbpc#s74_DWK{~U5l30t1GqJ|Iw~S z{Ii@S(riVf?n6b6B1i4V&n`{VwN}f~>NNKlsv3iSH#jBNO4FDcXxi=^tsU8Vizd}L zf)y%Sx38W#V#IbUvt(eey1r3l=n69PWU)7@n-r-~|M*T_U}ihv)0e8^PPEA&T!G;y zcPiAmjM9HE?>VfjT|(S2_TVtHO?d$!INrNT$)k_J_JB zJ(!PWT)_BKy#V86GZRg+WBwlh2CZHoY94vhaZA#R5-WF7Yxss+g}}o0rQj!RIc3-H zq6a&CIl@6FifA>DJ;DIzTL_s9mu?H%ZL_4i5y$>rK2}AOgE3C^(Sy54`<=yjhwzz! zVSFN#5+ik*I(09K+ZK_xwn?TI*=)0f<1m&S3#Iqw1-%y1lx>H+unBz~8_|ObXOrkK zAMl_Pxh0lTKqF+qz!-0j7L4t0ugJ)~l@h5VL_Z%Nz%I!j#v@KaP{?a>*A{tGsK(HF z8QCJ(Vu^o5)BW7x-Zfw1_%50fj9}X*Zy&sxb`~C2jh`URG^0)LP#-2E;bz4WsXdqH z<_r8P|5x^9_u|(!!!N-Ofi|KFGcm1qj$6~YMHvO};7bdi_*_*s|Dpal)QYM7SM-*vC4lG^ohTgAv>@8J&6ZxE@L7AX1 zE^PFj_ZJH{OZW_F2bG2?|8M@yoWtzRunDC-9shQQx^FzT4+;fv_7n5bf_aqfqn{!- z6&PB=-ahH2y&-PJxF)at)gqz9dL`Ccz&HCYri_BG@d1jTX!xscoN?CnfXM5EK3T@D zd2}R0-r?kmpi~^Tq=lCZ$!#HeMd_K?Z<@@G(Xo^wUoy$7JQ$|KWM@S=4hO_%%ZKh; zh({=723ByM`R}vw5|@>z1;?cjk|^m{*qB0*mF6Uf zVp)ZGo+N^Nx9T6j6|WY$XE;mJ85-?K)@l9SDnE#G@2Y~18XZ|AV>mrc|72Lour}PM zwELl`>Yl$_cY+ zp)Jtz9P=7+eE92K^?b%7P@gc-(xOf1gob+1yyPS6e)F67McbjjcucQ^eW~Lrk8}ca zY92Gg_N&3Qs8*(M&`)&s-1YzAd^uQ~VOv+PH=Qa#SqhxWgX0#t4wnUU(aqZXQNNwf zsN(!*qiFcI7*N_i}!@ zSMvEmp90icC(Nw+u+bu9G_lwBaJmKyRQJe}JUdRH;XvL@@rTgFEA52iOI*d@%`txxyNTi8duxR{o{ zr7D6Gyiid!v9fZwc;a_$@|bGPucH28scP+K$&tHDHAm_Dp{I|{G9@zGP#?sG4z>eA z!|AgjCWu+G{yhr1R2njSim`%gtJcSPc->4JtecfG znZs2~sso=O;eD%2oDGal&qO@PyYQ9523_4{p%MF3j|gIGapM&qjbJ_vQr@H`I_NED=}E1 z3J3L{)Ul9~Pu^&fNyZFwA8}K=W@=uJK0av++rti)WnznaVYHfqDto@EXN4TF%{h3E zzmRZo`HGFaeIDfAiy&M~_AeEM$fC(BhLAU=xtD|ApFZgP0}-;Ej-MRUT!^yP_dAa` zua~*g#~N-L_*N4@@QsUE|5dIgdXwDn%2kNo@bF6gwRdBES zRfaZtLu99;XJq_P*yt$j>?sYu5j(+-ZynsB@pSxgAc16(6hB!sL`S&9ALq8pk{PoQ z$|W$DX-k*tV-MGU&0l$HoS2Y*Z?C4Zb?tI6Uf3&iL2J+0f>h(q_#6}J?-m0)uC*cL zkMY8;AP`bS+VMArdB^7W;$svHJ-a}72rnG_=;X|Y9N|T|-l!5d{$=S+r+6fpA&Crs zLF;cv39GCPEi(}GNO}=m9qwQzWW}Ffr#_^8z^@>^r>2O_D|JUjgVDrk>#hzy_dRaP zn__i;EUZ9Tn8INfB?>Va#$(Jm&kEXhYDJH_P9TU<1+`mEZpxg!e@Q zflCvH#X3w5F}stR%j?IwMI22hizxHi<{s}<6WEiJ*qbUhoK>ShHP{go2-K` z)<&_~n8m079f`k6FC@y4(1sFjxfPE`GxhgClTDN3H&sM}bo{H+jvZFy&7}K4N~9Rc zlL6}ocdlM{^P&;)kQ3iEXs4p?sQX+l(#0{S$X|+3$Eu89HDGJc(|*cE#FxdQ_-*!{ z7rGuC37^T;JlLECb$t`Z8e_Y|8MNpK=`(&{w{x&6_3+T$x^D>{cByT9k0R?||D`J~ zb_^)uJbHIo%yJ<`wB>}IA74vicrTR=%W@=GbCXP`sBkN@B_5y{r=)344HBjm1z)51 zvA)~zFgEa|IC8vHRuw!LrfoWUTfkx?g?_!vpiY_vV|esskZ7Us9ry2)osXlfco<)1 z^%2mUm)G?T&37B9r9wZpgpG*Q0zOF-Ew3F@c)UE_%s63a3xHs(tgMVV0CWb(gY~UW zP7n%@Gc9RctsDWFu+lcD$_0gu(~*g@O7Hb?5Ll)Z^YfT!2mnzD=+-vp!Um~`8#nlP zc^yC}2Gnz8b6kCVKIi7nf;bpvNES4)fNt8sK<3%b@5Zy;5L|Jeyd1-Zu4RcEN1SpyPzZ-&_Mlj|J%WvDRIl6`tDf(R36@c~J4EiEm5 zeL>*Ffqcr}zn0#jad{;Fz$E;pH}7R;Hk(5WmKOEr_Y?9QWUu2EuCr6nIo;vglNhf~ zKABkE_X%=;^Z2VdnCD=pdIx+;t6QRNnY>{nPMt9L{^2TU z^?UrJMfTS&I}cY*A9as#BU92_SX6<=2UW@Oh1i*`qhh?8yZd_1=jM7zQkY1L041l7 zwMqU!Yu+M+5Ld64oVuM)DOUd^jxekHCz`}#QaJfz2VX9xH)!`gL-A>RKaDqCjmPP? z^}6G!Eh&n$yr6(LQ}H*9M*9|8b7kfvUB+3RPmd&9DhRDuvLIG-ma_cIc~!tz({LuL z_2q~2-}NHB zB$|C#W3m<@;9yr8b7sAXj*+=9rPMaKUY9AK%fZ3U?GupPaJr_De4HFHcUAx0x!CAV zxfquF&^wXkyxN>TLT9Hxe1a$C81ArxK9EZd^e)i&GCL|>{)-~Yw9G;Y;#+6co|ZVr zDRaN%_Ngp1Bai2PKEv-&nh31wv#fozr+g=E7YzUWRMji`??!BX9ZhdO6mTxbxId9; zz-n2ZDK@^hkH6}W+K?Y;&6`r=G*D>~G@18Dku>Jm?IGjvCu>7b2&y}d^nh<^hEJ$srV&8hKM`;Ok)fi`Ft8;f?4A18}~t; z?T-cd(Q%j;^4ZFd4>_?trc~ptq&CyKlT1loD(YQ#198gGy@BBR*9GyC7wQ#*{X}cc zYQR1D3-T-*aPl6Hnx-?^*k$!B?7|sNi;QY(DWJU*`)Rvz1d-@jEBc(N_LfJuR!D=1 zNi&Ko!`5NVttQvFr|gUWLoR7q6H`5%9;^OdN@p<(Dl2Z1-`Y!X#@w&PMQ=*7%5_K$ z%*qYQRM=Sq#P5|pR zH*$S@kehP9<#99qo<_Uw$=RIbecqJRENxr&r&zLGsAmzK-xZNbwHs}Ci=PacUgc=) zKB`+5QI)D}npwnj^YK__4qrVV?PtIw(V-$AkXCqJ@%{Wk5A_k72()CZ^g$)hJ{gvgG+s}+9zZ86E znDn&eu$}AYs@wfaqICaqp6x^_{)hWNu3T-A*ExhQgkToPR?rdgjgY?`M=9JBM_~nC zAATnz{GN}5CL~7wl&9lzDM{qBLozw0x-p1sqEwG}VMrWHvp-Y6RIKm+#n;esgYl69 zw!diuLwZN-y{(8XKDdv55lL|d-?`#dMl|zEMFp*&IMX-bORKpR7zgKAW<OUZ{!KdD$1Pbs0c7J~zNUBiV?hti7lc#E~XZ6;tkXhU!Y;U41*etIWi650S~$HSzYm z4*6NpMLpzWbrlt2?>0-GIxgVAU`E=3#6Ky-3)BBH#hdC#_pOKWu{hFBxS=C;@3r=k71e5?rHE{t7a#msjf>MRqGBGF)4gOmq2$YpIYke z^VEUtIQ1wJQ7=|UnliF(xm{*KX|_kl7wE7E^48l7N#mSFUUfEMA%v%PgNQY3s?!P& z5wxsC3)XA`3;aC}V~1s|1@TO;Zo9SJ(=UNB$uqR=9$a)vN;v5fl#D@~`5WIXA&)Db zX{M{|b)@_B2lIm2`O$B!vFFZDq20LamuaBYD`keXpH!-?wVB1XsdFxZp#7j^*aie5 zw=$j&9jG$UR|35?kh}^9c_z2&*WK3cg0De=-=N$YU_tg3f1p1T&bSieYB%Vw1Yu;* zQz15XX@+L$`L2|5pr|hcjWsXLsRqO}^-4?wj#sNBHkvM7cvNpNMmO6c_rWrP?3xH0|n zo9GbrT=eoKW8*LDSlDiagpGB1Or9o|%1dv*qYlqVP9hO5%SF#eXw)e^yd&fzn2|HG z;q)Z_CB2dy`Pqd($L|cJnTXNRk!)d{4UWlKGJ;!Z`LGR=WLJ}?qrdr$jR$$Xrdzkd zxqR-L9k3tEtk&fHlvb|m7o1Rnw0+XTvsa!?EUfsJm&<*dKC&!5rJZZ!tIFd^aiO`p zg8S~_;(?EgG&{l2ldKSi&hj>+=|vB=ByNl+C7(Q3xv=frE?27i#UmGU7p3)JLne>sv|>)Z*V$JwFKP>*k5!zU_u$L90Z41XWd%DP%$Ih zibl!IH{SvBx9pDu30KC)m>%^xoa0OHqx)`{h$&+}0MBwjaHqM+e7@jerEo9hHL5C+ z@s%Zu3n$B)By@b8N0>J5(9o0N5*!`NA~lybzxIDQ5euPx^jxKzU#>z*Rx1)SVzs*` z1H0*aeD>%jGe2VTts#YccuMLAlKO&Q9^=ClHVqc)cRzP`3BT-<^+zGzRXpt}tIh zLe9rTXZTm(gM`5b=OJT`1YH&s6>0k>!MhJE$9mr98WHc?W>ZRZil05f#loCe7tn6E z(o0NULiOZa(a$!+B(v| zP2q+4H0Z9zwC>+Xn^@-2ZqH3#R5Ep19u;t~?vHdPB;0#c7ezF3w>ZE`Vd{y0_d^QC z+g-X!Tpsr{9`g}6aT&#okS8Y+_4duw|LPmFEfx9_x9xdo%mBInlQ;|82Hk*ruFb5g zCQx&c+N^K<##1I6OOB0^JO>-8g37V%&+%vm7$nzQgfKrCb-H(tnICwX8HRbdp8rL@zUO5R-aGMWL#z%EOX`bAzmBsy_Y*pe3` zYzh;XMQU%e^D~6FW~iq1puEUJap9fhBxSw1hQYxBR7JV%QJw#d%nrXQXY=)si`ntn z21TQA-lsWwT}>9z%dzApfl>rAw#tVrJ63LsFa&Lm(b6ms7RbljI3IT13hpR-r>^%t z7A8a6yS+HcjqG@-DD@5dN53t~SO50Yq|4i0e+IWB_r6kTx7CV!vF29~W`w80y}&~0 z(e+KrilOHLkHLZp|C&u~Buw_c5>hxA*{>l->pk?-w$#kPR5O7!sJddgpjEdpc93Z% zLw={}MRn(5LrwNOWd`5c+Wl99_jpJmJPcE_D|c_lymBo5u2k$k3%@+gG1{`!r$)2c zid$fJhwARg2Nurv+U?_uiy{_OI$MY3gT8ni^HDt+nB}WmW$|t)9<>&qTkn2<7YBv+r9x(l3-u*>)EY$9w-s3e`tyEvo3~-Ip z-?4lsm($8`R@|>dpJ)ED*@0r`sDzD^JxS5jZr#4@&n~xu2(*QmG+B2CW+la8cH zZB3Nq%DYST^Vx4kNQKR`h0s!{{$45ymvZfagtlufZF&zIbJu+NtJ}55@F0p=cA(PZ zQT=m>BfV%z$=4{gJxX6|AZlCK9h_x({HD)R#L4}#<)(8IL7CmrYC=6lP7hQD$dK=S zlyLd#u zLm!%_oRW{B*nuYO0HT2S{B6cj0B=QWulsESnYzH*nrwMCzw+Yz!KrtNokfIh;EEWw zb>!$CCt1Mv={=_Od7+Q! zMUU~dXBXlLIiMZ_GAW3^xEYNRx%^kl(zv)=IH5UxwzjGKDaxiLdWG67Ns47j*-Y`d zfZzgYIPMf>P!#d>+}+!=<|eH-&6KB$rW9LptNv=axwAL`;*{XT|04ks-8Kzz1P}(H z!vooUdc3P3{Sng!!l*1PEa;DeyRGrBTn7FjM{;p(tq8$2P;@EPcPP~dS+C?yo#H{D z!I^ViMn|hKWG9Nap9RVoV2Ley{BhvF9raC_p~bo!>=aN`;UYnCk$`kKytV4JMG6o| zmZYsie}p=@Q;580VZoLr&YU|%x^!^_e)M99x7(`!vHt6m4+I+_o6=sI zl6kM&$}c}D>|$i4j*OYo+o+~JGZ#+CLUy>r81dYr*-fe0;$Il9;AoDqOu1;LU{|{u zvJet0eGQdmi$lfd71$}t(50++`f?+p4CU%!Qe@j{%T&&pK9rzx{4vD4|&lfmX|N|l9Hi{jf|b2U&YKoSCb>*tfIP=Dq?>AO38;G zY3TR3h0d|9*4T8`z+?+z`#^L}S1&2kqXsM#s1T~qv2y?E8zk!1MS1O^`_qZSLa?Mb zt30g{BHKatpGA6qL{3Y6(iIO+QwDI-*r9xk*}OE6_tCd4nlUKp5x1_07+P5_`i{W{ zatkBvnzxddA|_UEFStKyCEttUOM=34|X{oj0>)n(3;nualnqx_(xK}y40r`2B6EEx_yquO-QutF%i|OpA zPuJ}zDEt%nrOj*TK!t<7Wzc1m|C_D-_sLOHoOqSqmnmZ{18bbiFubrUitr_E^A^eB zlDj_rGT=ri@rv*Lz@p!p&+W84=QNG-?1D+~{Qr=v3dK}e8|sKA`R)1K2Bo_1N}O!W z{wwXJcQP9lIgU498#pjbhf=LV(0zBQjTIZo1?SZRX^|qIvFs7+;kw#{+zjB{{X9q!fv9kS}X?cxCMtpkd`|S<)KgfGL z);-M`=7DI`1cmT-6jy_hQN?G;YexiaTG&U<&$rklsGe3a)dZ7+RbVm*bj zaL?tG^Q-s?st^-%wqnVi`uS!`N%W(H<(z3@83`|>0=;77rF9NXp5BOn~D^Q`3| zH!#sT>)f8J5S0C0w|^_4`2gmiz)x+_jmr)w%6MAjqrb4*+Yp&sIC950I8{v;oANKMgXJ z0A~YXBYAGPm)C->mbSJpS}lRB$!_=w6u&Xi(W;Fz0P!-@ehXyD%()<83QQ}|^?-Q0 zR8~Pz(bD1~DG3P@3}hELo`#3*szv~(366H}JP!%>qK9Ge;I+#@UbLzj0gYFXpQ6J9 zLBH_go}p9~!t5=!;in$6V< zwH4{&H&?=Ri*uR2+gHT5kl=)dK7US*gM||^ucBmRjS7w-X6H@r~RO${xLJBZ( zW7VAueSv6%T?zm6~RAk225RGVST%aYK6fYNMiP%{Xlb)e$Qc>$v2$%tiSZQ$)_fC=k6A7JbFZ2 z%K57b!t70ppU7$FIY@6C1FNN*d4n?%xlYSmy)|4al5S{5Dkz>QR1WW)ARB2*eb8_+SdW zte_n!?8#nIk|wnpcfsIHDSz?I&e@u=wL+)%-!g1O54cF)t@QhyV}aIvx+=g-@&|m6 zewlMI-MrFHKr-dxBdUn|ObRD7q-`2tDq5z0y=uFydD6N$n3+K=&Z}089A|!RE`GbR zbM~(bD3yXR=-$qbPyIL$-unAB@tOvBRnF`|O;n5$dX*v51A=enT)<#~H!BQPz8ph% zCN(doZviyQKoYdK*M^G(#OgSbm6!;6Qu&lPl0nZD#a*b*bP{+TM=J+W9d)MHfU`9% zp&};-sXY4%LzX163$4vBvu>a{cM=!`f--b?8pVU$DJuYbrlqB&!~X=}y{xP|@mKrp zD*(6(6Pz3;zpn0e%M>h~CqMhTF4Ef5aSE_pD<;;R9USOV-hk*SFoVv{&Y*M{RPlgz z+t)YcA_yXM0Rdvj&fea)>CO0fn4tYbL$*~TAR-2qx zutQ~-onU6~{H70l7X;~`_F{b<>V|-QyXfI(ZrqvdLL%x{};z}V5H!KMOQM!oY1in+WhMr^EXcJ0{s@885Yp$a(Kh1#$M$A|EcMS)JzV@b(eP1xl7 z>p{#ivv>c{AEX6gYh*~6otx7nm~|1fO@+A+g9gvepQzrnch;?gN(M;5ba=2yyHp#) zG-FBnHtm|wk*J!Vq)4C8;Zbd@n$H6u?Dx7*id;LE0pU=0_qK>Sc^r?rcvdJee=FBv zc2|J_u&9#M@XTTRm!XO4od-q9e^e_0mmD9$nb3h3_H@h%mYu)&#PTkzq|9E>+=kj0 zn9xs}sbw?_-@k0NtR7nf{k<^h_zu*HS8bo|Wh7lSU!4iqQ2+A)kQXT0UcgM|RN>H@?5`t@s`2{lp5AP5?RusndN zAETWJ5Wy*nbLoKd68t;*=`~~{1x@s*5vDZ^UTpdt#c496k2_}(*tgpUaZ+JgpmXLj z+^SAQ50QpoAk*V5Vs5nQOje|UjgSIu8uSkSL>8VmZA z2{;Q+TiXRCdpfBrcb)~nzz3)e19pi z=PU4r;q&3wwQ2tO%+s5xc=Lfs(7}cUQczNosn4F7oD4W&2*-m*+Jj;YY&TvlX>v^xq2>doT5Wou}lko?R^OPFNfUm0;pVATzu(s!}Z; zya4>!97Lz_I|>I#jgj(LCnhDOPf;>-9`p~KcYjw{Aebotki+J)#J(WAx*Rvb*8{(m zusL{Gh|Ta#?HasryaJL50wiS_QcQUay}W&yqFGFo`0b zrQEvszQC(u)%$@*{pmB~$HXO<&gasc_jY&Ro7TgIdD8jt&VL`UbNaE6bBglZu3t#= zBy&uH+*{4X`Fa8)Q^G0ZkzLbQ4AUTdglN=4OZTSYfh2%aCI}k<4e7l*Y&^*5l0??@QfHw-@vqMyZ zmn?nh`D+)w~?k* zugtB_ZPRE-weguR=O=wO*qo}W_KS)?2wQ0Z`kGMB7!6Sq-F#|VB4M@s40s(!;~*BE zc*07L=1&vX)S1)q#k#!O*olp(Mcus&&St!(wrSTo(Ns2UOfcpCQk~I}Q3-~~dE&v| z?jal%>QNSnwE<5wS)B=p96s`l*Dsb9{JL2=!VB9&X#dgy7>ocos`~Wlb+>#7uX_k644{~{v*R?(3)tQii}=#<^os^x^z<0 z(je9YnxUeU(18rJ($25>^(R}Mn?5Hnd&C3uMgB8scq@8NMK+K(y2Y7(ONG}t>tfrc zK@L=jE?%ctS8VeuzFFOK^8ftLJG+}vpMhx%w#QoS`q|%khtbcQ6Kgh*Xakt}I33@$ ze{D5SZwhNoyb#&=_vk$RF^qG`=f5kNEf`JoKUef1YC!vx$i1ygu%4gh4=Bi;tzY~#Ni`o0cX396 zA>nP#-*e`tXShsTjri#a&|_6Uakf{5xWM>*fD~p?TF4I@Bd}}%q<9^HLe4bc=V=RI zW%YWEdHx3Nbi~%){tK(>&hE_qaWbbF4gyY8&|+?KUX9R|fXdV3i@N$#Xi?}+g-d&1 zT+RSfY2klyxLio9-G>(zm<)F0^9LXUS?Gi#mDz31C77;?faCSOqa$T$j(TSp49x64 zCZr2E@?E406MO;4yu)TEVNL=2`ng;3q&GNFbL$KadeaSL3ZXI!2AVE%U|FQ)hwuiO;?fK^qSh2uvB1vA9(t_P8y<_0xayBz? z5mn2iM;CiOs#*S)F?-CpP~a0SqqPhM{lz)Ll7xjHL&~JEND7PAX?%^ zZ^4z*eXd{$0Wn|;%I1{j<;{UF1mtL-2JAPU2gu2_L04bkRzjw-=kgDMTh*WaPd3{; zg~yoF;8Z&39rEncV zlVpOPfV~w`kUw6+%yy=6b1fWL{G6Y~6fTNY%W=d4INIfAEUG$S>Qp%lZ~68LwTOqA z5C{17yv;$+c+i*lr`p{{l(M9#s0N63R@|h>58itF!6GP+=1%AUWMY!&#bBsh1Mlx% zC@$3S9335n>B#!}`ohKv=z^>)i>1`(4Ow7nkF5UVe(zt;cn;r?go%a4PINuA4N>1Fl>1udz~LhbLCJ8)0_xmxl^`y34`fc5Jc2R7@oxY03-%ObzlnBcB+WRdhhlJ&#qC!+uFtCwWK?DZb^#X_JRlx6G6 zf3J-fb{V1&by}43+lR=_h}OJ+UmcPju=i1dukUfod{=&l|m4MDzDuNYmaml&=aY-ET)@La>B(GjPr z!x*^mZ3U+^4$o)-}I@w7~S=t^JDN2)P_IV^im=~U!-ol*u4b)^+1`Q&g=8e zdaeGk;!gwlaX8OR%l3|K{x#4LVpRX1mW@^+0RoiNXRS?)7)y|%JARn0OkGi1SMj88cxo2$mYRDR&L#0 z=0G;j*O40$)MQJ{GEw?U5~5avMLO1sF?_JWLMUZ7)meM;@2yEr0}x<2-d}eHuMfCQ z!s!>Q4@jTyD6_6Azo3Ge>M4Iz+Q z`xC>%i~GkwmB+)!ztJMEq_ll}+6WwK+qMBfPHJoO2A%nbwzv-J=pFhufZvLXjUDNF z4V?21*rz@;9NCAq#AReOIE~c+>F@eE9fCB`cS1s#;fMr5J~M%84wRjl0e&L)L&lFi z@Dx*$B&pRkHQi2kf0r0m*aKIqv=$Po0PO*E`wI>TErf!t1W)?qT%lIJuD13&v&L~v zPEO7s(AlxX+7qMf&Cd+8x4gIrl)Ipl&CnM!fH|7Aq$eM7H0;df;9GbXx z6@jr&ZnKKXor6DqY=8-yF6b&H4dL=9&2OsDq5KYZ*zopGcA)364YpFZHS`&c)OweY zY2adpK(yI92nZm7fekM%__-h)0E99qf?IDqKg+rwD*bhO6C#L^QW$)PegbTELnEVs zfdRm+!l`_8yxs^mMj`5>E)9&^Hj;Umf(c5O5hVTaH1HO%+OTjbBR^DCRXqY000is- z4pQ_v;Mi+h!ra1QYI5?;@WkL?Qe0dXKZUrzJ|P6^i8zdzX!ML-WlT&=cFr1)mRR!# zm}w%2*de~5*$P2L2tL1%H#GbMJ73OmcNhW{h1NjB&>*}hwFe55^uENwWQe_%jm>;( zpe%w+#$T+E;BTNw33x&o)alW#{p(TK)?stKLjw$kTJQDBy1VrBR`WgSbMl{G&@nPn z3A&hnPh_S6;(STLUy!wQ-&xSO{>412e763Rljs z22Jgz)zDhaP`a(9sm+Ykl5SHGQdg~pk(KpBTZWh`BhNE1!1Q&ymqU9Id! zIy;2+9JhY=uk(ws!GZI=m*;)n@BJPI6yI>zY&P~M#504hPt&Q-SO7a?*HbsJOc+0w z)E0J5R=DRBTSAlHW5$?LLF)A!EAP6b?t#a*$jC_4BtAtj>yu88H_JaT=Hx|UfHvf5 zcXx!^fFMv_UjCsa^^m=E^9aOZ)y6GR)La_+)CJAZO`&;^hk3nt?%6mQLK8pCEu2`f zy(=O6f)cpJ(Nh;n=Vfvqb#@ARCpv2$Odww!vrC@Z?wSh+P_FWWpPwMJ7+9eN5(GN?K_szCku8udx z&3U3)4TfDl)7G}oUiD>GrT_G7sI^5X2Jd|#(Mv-zx|+0UBh#S(86Mo$*w_dot^pso zESDZr7Kr<>{0U7Ryfdjgwn3wLi2!?|Cxp)zi^Y52wN62g5D3gN&1$u}^70q?h4Dbj zGn-}MkxGoWZ1J7}7BaM%gd%q6qCGS?3d8Q_T&~opK&5bXh}sZ~%_;?}0eT9&0xvo( zEiDv9qeOQ0#`+P&TA}SI?k_msHVDb#jB2+V^@@w1XjwQXX*F z+Un|oUk5AH;RI0=ZvAyrnCA2a`B2i~w#0yx;FtR1> z8qRUxgyW{A7h=lVje9VnmVX6EsaVe-7VeSHDJ&n($d`>Lzi@Sx6<7`F!v0<``x?l z{p^SYo1rau5i*00e^8^zbhDjoUrJ zpWw$MYf)7@2!zxZ_3uu|%7Fm{@)9B@_(8!retXVELtzZraWL@s-Mj6Fsb0xY8iuN( z7>Q~MOC1`+oS4|^s0@P(i;}hi);^el;fZ2nthlL9Mh<Ydl!XNz}G zZ)6~Mc~Czg7y@1oP(KCkJlse9guI2^A-jEp{;~-T^(Gl|&-wNjxL*I=$bZktf3K7O z-aC;0|3iTt2%Rl~{{9z-J&Z?(ho3bcqGI5Sz~JD%AVQY>f&xiR(Ti72${%~88MTYQ ze9g(bjgd>Hlf{1rQ$9&*3W2}!3%I(v+S=NJ?`mqX85tP~2?=FoW#D_xR!vfTJPG!r zhYy_*2OW}{n77T(pDlQL9t3!w=KbB&(JZ^yf}KP=y}kMRMhQ{`*w_b01N=oQ zCI4ot?PO(Tzdv}&XS?jDlt+Koe4DWp?}iL6l-RCx%V$aXiwXz`2nx1x@70Zz8uZAG z)cR#BFMENW-2Nw3JMAa1_mxJE>_m#rmtwKs;cPG192Qhs%3_m z*SBljOFRgIEV5Z@pPHQfoWR9KO-;?gaRxs7;0cNIqN1atV^0)4F`KCZr^4;$vrCu4cQaJwzCX&&yMEw7+nip5;KsOBJ< zZ4}41Z~ix4S;W39?e6Xt7Z;~_f4aZ_&VmW`0k7@=h_({9eF zl$1gtn3zsk9R3piCgVzmV;1l?`L_#=hWxI3+sUv5qsKuTfvfh-R#9UE*dh1x*K|H> zlrWwxcRew>DSfA$rrRe~hoUuk;@l?3oNDtREb#8$9{K5V>go4w)FrQ(cDZF1b zMMs8*%U_-DfT)FT+crj$}uwe6UYwV$68j56f>%Mv2JW7P8V^~Y#T7PJIm z=Ue}xxuv6B;ngv&2kSqvW+nIkGwTs7cX>W{_e*{I&NA&_Eq-NGf4%G}rCBBOY>~8_ z$gSXwcGk#z|8YR=ixpY^j@i`YKqWm(gFlTesNQq$H9rmdx3=G-Lx4kGTISfbYRN`l z%Wi|L6iW+LgJV3YE^$a_d z%OPEvabr&1^%R%8mZgMa*lJBnT$JJ#J!iXvW{RoeTz`;MvleKxP^tH|97~H0BvxJ+ zn@EgC4i+UHZ&BIUlU!s^ygJ$HskTF}e-a;D>$HL?bNTb(of}g zj*$MD|77Z1h=%BIq`9w1+-(yyWOsRRB?_X(3o;zcVt8wl|5g`bnR2PEt!xRlZaYTJ z91PxTWZzLQX+aeVUfo_0MrasFI=_C&oZ6U*Wlu{wH&U%d$6O9p{kQy@ zvEi+Jre9o~m;|bJ@G6Bi1^Q_;xK~Rha2b$`oXJ?(Dmx~@X!aTzU0gU_E-svqXkGbA z9#^S*%n9-8F16+PTXM$=j+XO7zFP1bzHN9cw^iL!qSrGOT@4HyP8EvRHpM5JUTk3qUgGXB%e=g6x^)5*3&{hYeqA3a{7zTR7hFuE$kedOqEjdA0oPRj%MTY`NJ6q!d%l0GOC%d)KiuDP^k>?sL7bi0Rl8gm#ABoGxI1`y6kUOiot=HJ z;i@nvFPoXm{+WnKnLO2;W$Vf+p~r>SmWn*r* z%y{YYdQE<~OnZB*D(E&tG*P^2YH2OKLN^BOxefU2f1*&guj0ltGPtf-8CY_(q%4y2 zw>MWQE08`(4PJ6RzjRTgb+B+r=p6oq<8qNfAE{gr>jF3Zdg4J=x)}T*b-2`4*ugA$ zY<~^44U-YS1it9j+@KvA)~|Y;H0KsIS`Gazq=(oV)?K!)^_aO{!X~b#(&qaDHD6%I z$hG)XV4nJZ`qW0&+qa?q^Yr8tW)?0gu%@W zTKz<76ex%DE9^?z{b*h1K3dIj_2m7OpT;V(Gtk1jMTTTVdrhBAG&ITC>IqcNHGG?w z^5g5i9>Ioc?;XARdf3%*C;FpM`#SF}*y4G<+mt4EzQuTOe2g?r>?t^O=9sUOw!*dv z2zu*Vjheb-h|{7hwZ5UahQ=0coYM8_Qkz;&#t^2EwB>tiow39+u7pAt(lzt}$Et}J zA;~UwHuktqPH2yB!N5~r?)_}s8i(+ujv~IH4rZzur}n_ZHUc5()zuAqt^_}l5ca`q z7iHyYM@gk3_FiLDkNeN6&_cRXWhu^h`Fju-hwa47HmQOlJ#{YVM#A=|k2beQj4VY< zXb%q^z!p9J-QCkxk)sSR#ea;tI^-K9q}yPeE=N^f%lX6VKy*2)YK_8Z29IH?QjP)? zWQio7^|COqt4><-`6jINooglm+Gi%fLS6k-*h?gX#WZ4arpFJv)Vjwx zNi-^HT?Suw1%ANQ89_Vn|xL$|17 zxI;AG+$Q|D5AHcDJfgwoF7OtAeA?hT@rJB`DoE5{V!CeG(}(C5v+sU~gyiaO93?!i zT8tNOrGArZ=pL_!Pz+V_A;L#>vFq)zD63edzFcduKo=8(-gvV=(UugVHSSi<=$I&V zcz_ogOCUv@$%p!I72{o=+#uu?KOv`_mWQHXsojzCHqZ4~(WSnXnFJ>G6d~*)-A)tR zhTpTh*Z`IL1#-Y0FyFn~+WfHrA=lh|=)jdAs&&IVk=+<__I(;Z|BGjXjvcm1nMeKh zyv3p%YNIFLL-o=gY%tB*)f4^*=QY3fp3Y{~H2l~RJM_FFgxu3KLRG5bF&UkXjt?y^ zYG5Emi6|cK*2>jZknowG?r<28_i7ZFR%{RxvWd&dXGoKCI$5@S#r)%&+Rb-c{zM3Z zD|9XBG4cKUF19&Hm@Nsv2t^1N@u++0OV^Ioo%89zxZ|X0h!`J7}$xb3l!Xr6}$C_HMQ_d zc6vt`fb4$>!o}-HN2W51ktOTt;Y?kU^|}591^F5|BOA-tJ5XC%mKm))rcEkTr!wd^ zpe^YZpBiPuZ2OuHsl5dUC_6_{=Ql@k_m``yE#yV~ds9!25_k6hW_L_qFVv+G;9z=U zTuxXI<|^c>$4q$&wG$9?YiALcIG>$F^x9qvqT0=cfSpYvSAQjyTPdA39#XV_z{){; z$fl$ewY-9Nli5Gux!DwTsu|l6lB6*x*E7{V`+2Tu?qG`H1bNj_ovN3sm`m1YdhK#) z>SzhmHC4hAv8Z;xeGrQ1ARA8G#B>$r#Wkw6oJu`NKaC!jc&E+WmG@l(NdC#Y(%x zIUF@_NB@#6<-zXO$%K$#X_-)ltM%IU_Egz--2~I=KB6z%HK@-hu-$WB{D8rX;2+=P zPYO%a>>HO8v<+bJqJkP462V97@hPhbp-L;N6yU>N;vpjIUq#9e1V&dI8CF-f1oe-! z2{=fq_V+khSWKqm?71$ktis1oxj8uuj4wnuYs6o4(p%6>!VbIonbTI`h-QtSA6UTO zx^H_oeR}fHyNHXUwP4K$$+$h9=kJiM{(t2gXs+$Eq;&NtI(`a~aE@fdtuCTac_W$GEgP~> zZX&NYap$e~8wyFw`D`b8no7&QVJS1q%01(1qlvGK<7}6kbGAiKzy$&sCo*?#0A0~u zh$a(+Pyd>(mz!ZTo=0i(qPI|5Wp`@D4tl)a25#ucXZPod#!J+g#QiOny+lUqs{Nch zG=mg!1YdLRoDK#0dsFe|POaBC5V(x2AxhRJ5R}kBVTa?RT*F*l)D}=65`MZ5s(mpr zv8bphKmoKTV!nOa7Z)fRH#!-Ir9n|R`yNKUah{Rh94=K6WtQcDT!CK$%rqK$1BBv|UfuUE0lPJ6yzkz|uGcq|ZUN!+XVr_Xj!j*=X*JGtS68fB!RP)r|3**m_tm#D5 zy0I_qkeyFCgL(hN#$`j}+z$uTePqubKi#jiT7a!YDpy#}5fKw#HT93>DNndTlbcP3 zKBlIvOfs3wp7Yhmx+-$pQD#_T)!OEvXaoVuOe3#}4SSEP)9ane1+r4)@y5tW+{0|Y zSdM6V_2my%-j6MnovcT-4=0iiTXA#^D525M<;v`iBu+i#ik;>gi;% zi;jUNA-IwQkVd<;e!yz2ufKTq?2Dcr4hc#8@86TTig^rR{fUZ;clRf77irZ`7wfcu zaaLN)jQ>(Z%RHI2vpv+;SR-Sz=ZX|}7_iKemXP`7=Ld%$uiNBa=#lGJQ%yxSK(B-D z^trJWDV7_EoUce%knRmn?({V&`Ou+68ZYJz!o|kROq-I`klq*;keq1d=W7>k?EggV6Ii4@s4P-H4wBLbjcd{KC_?{j z(H^b!v&Fv!<|ZuZep~B|x;f@AH!9 ztzd+aY~&RZ6oeb9Rqu+I?u5NDUtcSnc5$`W2q;fBqF}$Os;YdIlGIPWPXLBKIXiQ5 zbYxc;^6H|@F!UF-o!)82kMygK;o@LKhFf&hn@vtQ**$}+8TLoSu&hk8-4@$pMUAfQ z@kc?N6HYUgr6%F`?#~-xaGM1W@!ul1mBFLA>2Ka!D2j7ZS4T&-5q3R0JBvxcfXiM1 zm^AH17+|viW6P+QB7mMHmotxHzM^YH32oE1cd`p7(E8OEM@cOPyAXOK8P-8gpshWE zi(&%=VvZm0=j7x-b@lY*%S!;W27Qi)r&wO1n5P6SwU}Y&#k?1hrq@EmMgl+E(lMc~ zck3}q>YA^YTfQpd&XkUhq6Yxy;z+uyhehiaL_~-VK=ziGUqcTL4upueMsp~j9U

Z77GPjp-d?O2?nK@UWzfdT(Vc zd%#dDF)}vsPJ_uvCbZOUO$uFmB&|+q}a$$K)xotu@EPSW(USosJ-`+~$1~!APiz@ZD%lqU3MDO;ZN&iM;v7@M5 zuK=*%7qUS02F8e>EvgGkNYEUdtaGu$e)Q;3we5gy!zw@H{=@uV= z@LSGSeal*prc?d*@1O1AvKaQpWJww*yriUl0_wX^v$nap8O4?sYx2x=rj(YHE{*K|)2sv@6H)Hf6M2j6* zU-&E-kpBeL807vJt*Af`clU;i)woo8qF_V7#=d$5$ewM$!CMWbwt3w_6Wz=HK}VmV zL|<($rmLH-Ro`sLT_BXECMBG7jc~DWPaQJ49a!i(;o?N0=GCaYv~vO$1Lt~qvjNZ)WMLl(SwR| zRt_9&CKenwH*D3)J@>zNPfR0CKNhv`Y}FFgzeA0@J>AHQoYOY_`}Y{7YF`|!QvUj6iC&s;Fg~4%_0BXKS`_+Q zf$K{s6g)~NJ7cYmsgAx6f?@Jj(H*0CN?jZa%Wlrg++3R^39TDRBmjp+#;T*GcoOg0E_&`3vd$gIjxt^wfwRh?FrLnA#bKkCBn_^=tjkU$1s| zcfoHlpFZU|Hq;+ zQ#3R*4$IjWcf%t53+&y&s(iw6wg|}6zPl~4(qrREhRWHkc`Ah{p1H&$0 z(RZiHvSMOl{C@cQO2@JH&CPj$p@Ww}(nA>xOcr=Sq<3{pj^2?E7Q7N#q>28Lb`?ar>Qjf=IUg=)JsFcm;{o3FMriWHAbNFXI4=`FO)K31TK1dSw&H;;Pb zGC39c@l^lq@d-O^9rdN#W^ zUR(K?=(T!9=ha@%T2fpM3I{?AbQdAyV2&KN#wFv}cc#jyI5;Yc=l=ZpQw*YXe}x5E znPHc*wpdneRN6pymt;@(kd-@Y|oZ2DR;d;AHH308+#<9|*_T~hMz?<*+%W)(@G)UX@gWu3dGp`+z8oO$mf zhb1tqY?bdezGCka;T`&w@D~*}yoXtuJdcM&!2OH-B%z+pR?4e&+#$ln#eMNYr9h4C zQ^W&6MO3p|(0C$VWl7=PJHN3oJ+qQm-#dWn%4Zf$TX{7D3O!T$%%5}1Zs((#5(mpk zg4;fk9ZPWatLPHcZG2Gx3x2B1h~OFyu*`=K9{_4_u(M+kRP*v>ZQ5KCe66dV+8xY)p-n1)RY9OLFr$^4M6t0!}V5!4uzHVb>WvoO`Bsdeg!uneY7X{D)AV@ZIQalj=7J;_&gq$xvB4RZ8 z!*^^7VcgxX(Z7GcHyKV3Cg#8|oLpO6EdKK68|W~uP)p&pWL$usvU~5M)#;hZ3L9^- z>XdJ9e@~+$McSymX=JWXni4WaQ&UJ#SMP)z_3Q*VnfLkj#R;B2XEyBp{o}`v<|rV? znJ+ZdYt}jfQ4Wx_%lb`;;}47ZU2ORUSTnVPf~Y%I&G<~tK+OWc(aLHMr1qMc8ZvH2^RwMKPEJmK z#D0q}IZVYrNxZ4045nweTF6Vy{)DZEM`35Pf_|%}H6sFc#k=IV+ZVK574i9+>OVf6 zfJ0DV0hJZ~0SIbsYBa&i%gZ2Y$V40x69>57Nb^OmG$3dAc9ZiDG%tAL-1N@i(d0_@U05S1z*&K?(8|7 zb_}ksG{)16+#@4{+Fndf;pi~6^=AuCb7af*offw3*h8g$lcq`?u zdjU$H0)7I_C+_ahUQr4`*v;C9j~)fXGe zQ>(PR?&DYhn(z)_{w1R5R4dF?FO+`Q$xRY(=rVwI>x-8ZL@VQ_y*ex5%j7ppj3>%sl+C?YGil|o{Z;&Cvzgq&YD9vSVG@~1YeK*+4y>$hCn;3<2myt z3l2iO0t4})`60Bu3q+M?p+8Bv0c)b0#pST6@N5y(H{$i<0|R@&VnNxsDiArAQ4*IU z;r#GqQ|#*0b~nC4so>FdY~b8n-tM*RmX^~H`Ji@IctnJ$=kC;`PH)*9kxFbFVU9cK zBsq78rKw1>_B1CeG57OUVODy&pI?2wN0+?!OeY>0?h*=~$HW+@NIf2uXfEglG|t|F zNQNBSfqm&a+5^$X>S_`$`+*(+%h55(QX-mfxDwvKf1f#T-C1=q01E_NKpqPK7;C#M z3J@lc{DTQuGQouT6m-3I_we}i=@VFT>W8CXMPfXGQc$SFO&yA!fYioe>Mav?eYLWw zIAjV~GA530I|fs@xZsn-df5K<$pgnHuCqn}lmyEPQ!CIMoJ0=F4BSf;L+$U!Vn}0~ zqt!knkOcldVezw@LlD{#XTn@Ztw*j311(d-P!6RPz(Z%-g^fuH6UP7zn?EP zkm!OWMMRW4?P+0yg(q397>Gg7r&VJQY6t9##wL?H+I4#yx@P-g(-&2In|)x_f%u zfX&5fw(1oWOeUM;Mz*nr#&V>x{+T7G>nIz;s@%s|rn_}{-%oc^3dkod+*EqAUQs>f zyFx2BqgsIFjjwp#^iZ?Fa{&o%DLLCydrRI757}kq{l6hB;R%31z~l7q^u{EpaojNl z5eo#CwY4+g-U;mNj-;)xt7B$inSq;efUGb$I0*36_4!f=NIUBFu5ds5%~6x{{l9N+ z9%sKCH3b!smYbxYh9%!3`boD*^N-3*Mog0tCs2~_=rAF#uj6wtQJI1U?V3Dg!W91X z_GMyXqF!mK3Bm%P7eIjxY!p#|aIoJP#-~>U4yDx?qeL4}V1H?AKLRl>D=UkD?tM~X zqSoXMa5ROGa*xl=@xFZdl7*#g+0b@;d_2kHRJChmOE$yBH9+)4-M=f7q4nPACR16W z+qS9rAFE(ql9jT7PMPe5>XGNFC8JvGwX z`&qELr6o%;ZgqE;55QFLDwWnZAInvo0iRq;@Y^h^mN8$++<%?MADGhVAX`Mbzs{!biu5*!MQTNP{CJ zm^Fe|n?r_+i&FPMr{2E43w16dBO_%XA%H4E?jJYu9oN&R3F15PvH9ISEDqGREEL>_ z(~<;kv{b|rIG+I=%U7d3IB+3{={s(%#$vf?yOb0?0$?AEr~fUnMs-cieROo-sL?kx zB;#|Rd?6q4U5?!U?C?;n%39}w0BxnDSkx)s(2c@or@j7h?uErBD-+jcVY4)no?v=RoGU!sKp5p zV)?%zZ2-@^d{ANBz5DIkH^lWd7BhD8ODx#*Gf$SIK=*`tczb*4`NDMm2YO-tq2f^v z3GtmGat^Ak5D^F5&=hi{+I;Pw=wF~0L0iT)_h%$Zga5YvzL@+|t5S*efI(2JR(11X z#wn_X&-G*W{_%exULkFp%Hra6u$^UO27zW5DGub%abTHAic?QZqA$tzFPU$;qc`6y@afj|^2CwH)`N zEN1j)*oRCFn(wLBRT(eLR9`^D!p%B0*MV}~JL+UnlsxvEz>KNzjbuP2(0%JpqRLGu z(C&*0c$FK=!rDORkbqPpFVC%lN}s3X^XbdTKxJMK{y6Q|e=!MHR95Z{rTLN(Oiw9> zQ5DkCi--`i2!l%M+ql*>Yr>YE@eOMB zk@pj%qG8-;avyWGGKl*-L!ZBJUeo!J1PlB9LbX97ijjbun~=L=IhtteG=bmt)!%FE zoqW39&vJ}l1Yc+_@0|mRSFhuzzvxnXu*(MNJBZU+n8nExWVPc^w%L4eg(QsQPhKL* zBDo85Z6IW7Yuvy^W(ui?jaWXQANLGJ|1@k$`BLESUBRsI7T>)(rDtReTNBZb_dOFE z@ExwlQwut_%aIMDdtV9&r!pFmVtqRg$2+3#XoTIU_!4C z#&Bkx8R$QKnaN5sX%V)#5Zu(1cS-Yk?A|!Q*PRH)bSNC@_}IELQ;oMtb(I zEs)ps*e@V}#(c;WuBD{~LJznGJdUZUDd4=Zv9ST2gJ%S`clgmtv^FUFie@UUtR{<7 zHcUkfjI0=GmAmDoyQ*wTor*N-bI-`Fm@1h|sfuQeex(GMDD;Rmq92iOuS8SwYXfy# zI0PTACLHvx8zle>pr;w#e~5uW^WNIdj@x-Z84xFaDr#yqRaLXJE!qw4s*Tm%B4Ax0 zZ>}+iYvbZ(&JUJ=i*+(z6~G?3irnE1du(iMegG5#@Mr;!BEZ9AbKYO15cz2guk*&# zJ3Nc_d*~lRKu-QB7(W#7$v+Z9CJYVk{XkAW5cXvaQFBf6 zoEHsod9o$JKHuQM&CXt7e)78)07>Bari6mR86b2UYwId$KzEH*0c&nBv2+YGP{6cQ z8{A!h5n5T>gUw-c^e~q|F|xjXawbqzlFBSnw0&nbXr}u5qvvj=h08&rjt=&nk;1vo zXQAKM3c>bP!Z)PV-gKqdsbK1kKiK&*Q|*#b=cy}T)kt4meRy+qIBO?YG+#tiNxXrC z;X7CsvcHnD=fXBwRqPvSJ6*u!eauplk?7o%g76s?Ujn9APKmVPk`e?vC5^XAMVR~tD;r5i3+W@~#CbQ%h}_8KNM zB+VCpNkQ>R@OWl%ZfHnW5F-kXM)QMX{?}OEN;jVTgshYT^cuMT@^ajYLobv3#H7Ug zhKr=Y)yZ_nv%-ak2f$bEkwo{lDgf)R?AP~P-7ze38;KE2_1W2mA~)|GP*%O{)G9Pj zU!aZv?#vT;eTGHM{+N{O_%9=m`}sbotpwBE&US_Ko;cf|qFWhL#TEYmNOIQ|UsEt6 zY}Fxt2Tmj(sh}jE#Yg_!)^z)BXvb$wlmM1X0@ml@3T0Fs?3Y;hn+*=-i{Cshhaj|s z#>89zmY0Oj{T%FWM?1S(Bqh+;K>6tmyx_prO-D!fMk-!~FXQf0=>j?b&U$ty)_--T zk$!ZO&$SK}qT4O4?pM&PxjxR6rFisc;{~45+*e)-u#9Bp0m1^22R14}|KoI@AHrKF^Q&EDBvb40UU=W+yM zmf>^=Bw}^pneD@V#_6= zxknaRmN3@)mA>0^{*J}gV`KF!l_j9m7ZqLPAp)7w`xDx_#T|~bam_QP$~S!0w60h- zg@+jU*mklW>^q?3hR4!CT?R#qTLLJ0Yk{R7P*BQ+8Zn89g=3}@Ac#5dfAOHDQwHAq z^_j{&qa=?Cz!HsQNiQ^(6&3A`Nb`fu!wOvIhty98MVzY;XN@59AnWi#mAL_QBLung$wQ3n=4t{fk)a6Q=sgaZr) z16mtw-`cCo;~MUM8sqHT8AO=LkRhj1DJ7Ng99vBES8_6bv=jsRges`Z;^Qk!J+*H5 z8^LfO_YGP_*1>6pM&!*E(0Qody!lDYv3t1EV+s#*EC3`rc*%92J!#@~*m{%X5w*W? zvgPh3xv|#a@wc?JG;F+Jk0gmVuY2Gu=VC7n3$X;e2wn#-IAm*UYXKz~&O=HxZ$Q?_#PK_vy zAeEV#oE8ru%T0o9TAx~lreqch_jkS;t>7RG5YB;@LSW-QR#+0u6)}3ltA3Wdvt!B487S&|$yS#;6J><=bDStr z)YPyatRw>xn&R(Hn#$bY6%16^NT$e9i>6)o7=r9!Z1fWa=Y?2_f=kx!%GBIkAB}CY zueD4Et%nvVNyikQ*QwX*4T+Hl87n6$juz-3J(sw%B6gT=q(4b%)P|b;AruN_I_Upi3J&xPTR|cBWtZX@F++Ugihp)n3_&@q1%8r* zf@4pmCOsp4SLCLHO3ME{oGW4g4d4X>(F4d5uIK$=dwgMGheT=@sM-GeL8tykBDyA? z=d!;m?cW-eRo$ykzTLb^2c!N!geX}HR}@l`Zm~sM@)~nL!_z-=bM?SPj5--L4L8G5 zXN90mHfo!WN9;9nIFLpLe*X??6OFpJ5>b9xS@G{*Q3(4bC8cLZP4luc!%n9gm3KJR z`%$ht0srH=weVlRzD#mKA}U9+RzUhAt{i{C-aDG1z{SqaY!Ul8{%xKzFi_g=#OfRP zYf|^DW*M&diMlFDRY#+AwUxgqlF0$$MM`tRuNnS*#FAG|_4TJc#lIdK@BW8ucXD*d z{-R!G!piKJA4E#a$q5_~o2KBpWaoR%$pPJV1+SoFB?O;3>Rq$ih7W^~Bd`Xtg8Dq< z%w9L%#R3vq_&UpA6`%a?{PmeDYxT}Vok*G5ea2cL(lwy}>TWfG6Dh54vXrYJ+S&T9 zjWEp$^UynJpJh?hafEmh1RM@Xiit4+7KwabCZ>a>5PpyptUrGI77{W5_*$Sf_U)F~ z<3L83^Oh)~3>-KbN-&MdDsCqJ@>-x7w1dDXD^nuuIJ`9C0$KSCf@Ag8@e)- z+c4s|vLz1Y*$hQZOduSNbyIbWm})X4R$87KT-q<(k)AkSj|$aXnht&p++PpOB)b~K za#W|PFDPfQ#!%I|_<`}A8*s2}ukNTnie5cFApu2mACH2wSSV&HJw5%KHsHkYNl0b^ zR|G&kis}Dya`)XcV(E}*>>*VUsQ{%a*_z{aGIwu^Pa6;|bDI7j&Jp4+b{F{TERQZe8S2L^Jldg&G@b7}rQdu{j9daasOc@)t0v$h zas`wXz|)k{ZSCz(NV&@2)e~@mX$C;+3Ca7#jSbaI<~uLGo|PZ8sl`TaFKw+B!n9_y z$-zs{JcBXiu$T+0w1R@)$SdvDpIBzV`2I=jain^sI5}7vA*A6A9@kPxez!AHyfJvi z9YhSFt4P30Ib2+~4h^VbXoxQX8BkMiQ?jBB~Jxumq~ zg8;nXqx26)mTk0-6EZTm*d9{RL7|p@*7$6t*wF(V23ow0$3F|T`-bC6@S^S)t0Yb z$>0UROn3TgsHU#2Z8t{~!-kH$U0GNQ;U^#NA6hp$ZF`tLPNR!sb-df)OG`8B)rco2 zl1|D-q<*u%yBIC=mwCc;59&oQKy|+CH9j4`0i*rAy zsQjvx!zNFwCM^hFG9_P~Y;6Mq0>q`H7`Q?GNPXdZNaXBtWMyWSXZaC2%v-T4Z14cn zV*4?9do1r#(a}9UE1i5Q{1rTW%1i@-EF++J(-hIteRhkD`W>Zdo3Bu>5Wr$&z1n-k zI5&&o+b7Cu!Ug0!vrV|lmwinOa{6<5yof1n%t&-GA~u(+`$i~Y!WzV z5;)(Lqc+4^e~!Odks==Z+Q^T%zLQ+_;^5%lnYcS#ETmMKDvIc_f>EFH7JANLR!RE7h>m6pZjKPovg zKYy?;rlErUIyM*DhX?ywneefoHnpYP#uImtBmfF`K%5`&x}BzHWYk~n)}Ef80wHOx z-i-s|FDU2u;2anxLCDsuwnY~Jx*Iq%o_4SXool&&aKurna<@>#D zB8{!Ox1kW@aqCD{l^X%_Cb0P351)Ulk{UHXB%+I9V1o~qZ}dq2(pI_mvdGJ96%}B3 zvo=?|v^`G7K#gi=X9tvjlnfIl59-Clw6vu_#;2rQ_Mn;oW8`t(hj^*bX;oS#2ttRx z7(<7Jyem#2(?7`mBzgYp>$7E-M>cZa9b>og6xJ)KHUx>A0XP z>&amR8n~{MpTU1-QStB|^m9XAM*bV7{NV$odJs%RtyHKT62~!ALcwJ(5dKEoasxHQ zG1px7{hgUge(S}i^`SH%u8IG2+L?9$iZ`VQcvN9`>8#w4OLou7EF;;H&l}q_c|Y-@ScKlO z37ze}HR^euukzLR=|Rg=$QO&?C#2i4a7*N9Yvv$f484w}R-_ne+2&Fy5<5fcT~BJj zx&#UuI8zxJAHTi1xw*E+0Yqk5&NDF(nX!S61&noALX^$ndE}W1c}Yzc2tU-Y~|^qdrQOM#?0Yn=kbUml>$W6*i2o1A7G>e zzDd+GWz30THs}Vb8W1A30W_&L7gR`F_ZPA{l`maLPZUJ-A8v?>HvM?yyhi%F&fMm3 z1lwOWB#AOpoOl#Is3!OPW18@9PM}{_z5n5zL3Ty8yH34?n6+CCu0UG5x(f8bv=|03 z6M6ome4-+Ddd!ug?IR^QfA7dhZ@jNP9Q{=?_z^l@8uE%M8?Yq{~OBUzm z%h-eQ)u)ro$*dUUyl0rQOnSX2{Nqf;dpCSf#8*s$!K#u$liQZOK}#E9HfK^5H!fWV zT2|bW`#4;I&o$$?94$7pU9bI#4?qj4^*nGtg#V?}JT}(@Fwdd*HrU+IVakGz$3BT# zMd5BdsaB5?ymNEFA|xQ=5fdN4=X!|xoKMMDDrmxRCKlhtJs~0IGtK4IyOS;qw%eEC zhjY#TWUmeB=7<}RDgh(ypfa0lF9MP?x{SREac;_fMg* zZ)FLpN$mZQL<8lK$S0`1J(?+p=6Ue14LIX@8A^zc0Kf#>n=(=yjvwTgP2%G-Sx5N% zky2r6Tj0m6l+897Y37#8sQ~TO8Xk|=_V_3`gC(Ey_36WhAY63---K2W_-{mj*_iAp z8PA~FJ80^m!@8Hrh>VJ#&C+XcbW|fk_UG1l>%<8Z=MQG|3|RPM=+RV zQM^#x9qsKJI_$vS;1lKG12_{0jcV1A#&Y3NsQ5ri=w-Kc1=AQ#AVy$7SwH}rL%(v3 zGt#8ba`Er(lP0EMumW!HfNdY=&FAy|mZ!iH;d%boH*5*0lJeK$=8OKKEj11;Q)MYn znX%=6g=;?)coU9?hpT&H%5$;j<2W)IQMoj7r}HJ(RIZ!7W+j3A<|siA>p3ZFp%MHn zoX=ZvtX?}%OttRHc1%M;baxlR^PGaoe*Gvk<Ayg)x@O&&$6lcgf{X}ezMCq2G1}xYq$ioV{O?-Z zwLofzO-iEqx)xFGyB_|A1FF05$GPhto1b%XO9c0Qwks`mJ3C6rlEx!G>;1Oxd$)Ie?^@gTeSfU`kLTGQ z&plk%b)M%j?8koW2ily@pWp8f=>sRRJOhScvqgxt8Aap z5qrc|Wj^5szg|o@^_gz4UjtI?R6+}!4R@+3i~kCd$$+Oj@Jhy#hfeAN>pEan94oNIDpB**}z57-p9+W zAYqsHo3IDOH!HK9?b4VB{hoAKQqo>@BeS;HgYbZK!L_tmSDPiHPdxBxJ6y%CuC;?D zIA6lhEL8uQ+@Yi{^~)}{j1q!`F}#}6S_<0n@hnMrQfW80mX;kmPrtiW(yO^a&wF!B z>H+#Q0a4CxJY7#oPPwtxXF8o|Qru5G)LV5e4w`_I(eosWss$uVMt>CS3AcB7W=kgj z3|2bZx!vD)HbM2XF%A{~s|!1&+{EwOQq?wp8=DU#b^=>MFZV1v(hCqF#GN}9(ROsN z8@=>rDsCG?IVd7RpI7*ZlHRG=*{)B_HM0w?bAiTrH9X1U4Uv70p7x#@`P-)6)I`z- zo_Ln}?CJN9MNLXCt1OCcP5e1Evv@$(GXCcC1j_?>NI@jM*H=4HX?Y(=56V^5iG=ug zfQs{PJAF-aoLXslytSgzHZkk7%gK>sx<{*9=R4`_Zp`1y9=+nA)vTCnlmX?>PIpl>&SdE4O~c=W#Ck}grN!6RXLGS9GxVglG)yem#tR=)-Z+hdPV z@7V*ILHwsrpI*Lv2{9Ky<4p34BJ%QnU~&;*^zLoW&ZJP*k@j8k?Vt4IY^`JGs()se zb=14hq<1JLY3VDOwcWP(O1}*q-?Q^%(arKJw~vLdWju;!m>vCy!5c(d;P|AV@?Qkf z94*`4(NS!D#KmPC2(V#>iN2Yc42Td^)HKLZ9d`&bhxrdQ1wxBL^9zsT$6exE5)&0^ z$-@i3SvPWChm)q|QHT*>hszE7r@^TUREdW%F-(>)!V`aLgiS3S9SgkYTm%HtP;mXz z+v_{?!wt2oG(8=1+<;}}W_VMTP|LGMS6rIsK3$9Z6?TeC-)DdQ11jp;^sQvFh?ls@ zn^G?4*z-Sh)*akBJrel7-P(1)vUZ2sZvCkHSS$w_F|JvZ?Wx8lC+~OO6s`e2_13A? z1v)I?rDwr&KeFuA{PM)OXp_-kazojI+ zuv33G%iR1tYL2Ez%k;87TOZt7dP>ys5%aF?pg?&~=WFhk8tU^J?FzjYeq&vna=^!& ztbL9Du&*7JC|S&``A3fDgvm--W$|QxZB3Ppr-DYtM?_t5oT}9{a?;XV{%G|O5a8zK zrcz7LncAwz1%1K*UlD2xVlu$_Gwx@}1I#5U4cF&kt4?M6xqm)eldQd`u3@3gm8)rdS6dy> zfa#3J9Y}MbGE&uq?dpWOBbp*eG)&vgQXax>JP2c6o~Q5Vn4M~FrZwK~(7rIN|MAQ1 zNyEES|K2CksfNI)W+&3s=&_Pmsbxv_^@T@&G6{G(DW<-8BtB-EQ(DKIS;Tjl5$j%m z%t3q%Rc$lKe*xUgG|T%15iZytLWJvVZRz*#G6Dhu5QRE?*d4eM_!K0Q94ASuvh|@A zeB{&kyii*3U{1PXmCLvN;dDL#M0tU~Q{?$s5c-Y@SGk018kR8N}mn`UjdN=LBKyaKf1N<-j9N3&wiq$0JBgB2EMw@ZgTww$EH|Q32m#mx`H#4 zQf`VSCao-IsAUiBT4MK;lDOE{+$8z(sbk|!-qV^-Z&gLsz2J_Et1Xg7pN@HSZ4`EZ znq9I7t@&37ccH6>bNmzeABZzPbUpBNBR~GP>)?gOT(6o9Z-=<(xI^$; zmlYB?-6c`<1Fv6iSw=}o>ZD-B!vpRM=gP|8Js(O(s^tdF+npVk9XE)atyiFW9`MBxiKnsJyO_k{vS9;l& zRd9jHsV}c)K14Gp`Y@9c6Ppm~V$o5-RosJJ@#^9(gN*k>tq2(U{*IcjA#re>8E&)F zD2~?B6WsUe!Rdx>y9v(e!Nak4)LKWzrcQCwM0Nz77@ohv^uC}&Dl*@z(>6ucf5Pov zu}5Z>ZLt`!{nspSO6fzrrNC{p7=k%~e$?-%Dk~pZUoLATzQ1sfG?IdX(UM4?YX?^Mepu6?(2TOTuR?^_Tocv?UI;5hf@Xm`T`IoV(Q ziP3{|eI|Nbe|8!2^laH;_-kPYem3I_^-2s{7eU2wfmgu`9I3sMQ|-*LUk0lOV*dcYL>=s*Jz0Jow+JYK^&gn00 zUoJFqSO9~%7Z8!o9xPHA^0DiVnD^&5Tui!li!Q#^1y8Rt#__#U6D@nYW`!%oqVqX( z@k1wO9_hH~*-!c$YO{O@mbJ4hXqM%Pz4*pHd@;*BnAQxH=B1xEd3t+#sGyG5hv*Gi zK(vepaq*C4+rq|{VU|~qwypQqH@ru7eX1^v{e{`P8u>GI$zNoyn-S?fkRd4NPnH&gwD$V`= zgwH|C*tDlnwO6r2`fOTC<=Hto=TSmhBXa1FDH=;Zh3&G?4CR(K=qLgy*XQ#+Uf_ko ziReKP1JcmY&;T%LX0Z8gdoOdHKwtmQ045P9jt9aYABnBsu+`&NS?s0L<;Ydj>sQV{ zUw-STyeZgMOXi)bfx(&S;mFq3lKf&Xx5deCsy>fPzCC~C-DRR#`Rhky_Sh-?T}`$( zOp{8QW~u@dY3sO`=1>1QvA8tj7|sXBf~z&Z%CL9|OoWKf*5t50T<24%rJ05vRJozU zM*_v#vigx2ohk#jl%9pf3R*V4R92b*#X&hSO;nV*e=j|~pz*a}Rz!{`V#HI;LgQNRW68A3A5DB=xsly8iE{pEyY$R*cV?(Wn0{^1T6)#@ z1xiY59*Z;F-Ttg>ly_?Pub-!6WCkPhu2LZ6e1&S7j`H#Z!%Xv%rD;=GQd{Z`3Fy%y zF>aSJ($aFs>qX}CN!|>U`@%Ee!dQ@q!k(=Hj;4^5r_jiG#r)cRlX+!w`DMjxDho(ZKt^Vp!e~cP z+Fk(!lPcv2YKFrlDCn3f((Xhw6sbMIJqvaj%I4;L;Wl=5&IJEG=NvAT`+!v$SQUkZ zg`!-q93VIO5?E}~$(ZGlg3;|V?noRbe{s#>PG@ChZoBOhOsCmfr&f5M}nT`V~*H#*9HN^bk~`RQKuB=df)*`rg8dX;IW{VW!IkIV?Gw(w4=E>8%N`(kFCIYaGSi3@kB9t$4~GX^rDH-5X=pUn z&9jST{BT;+ev#b>=9B@^1hS#f;NVsu4Y(Ijo2k0ck_MU@pyT6c^H72GC?H8YJG)So z#{I913T>*k$jg@?IUqcp^p$ipn1GX>W>Hd%7!g|&;dd_nMld_jKj2&+e+=FcWUa-Y z9wan3*>@D?z3g0r5N(^(_2ZAnhTmS~b2S)&SdYHDo=a^qBd;n@r!`GPiZ1m&sjzii ztt>9isRFTQGwrRdbK+4c ztkHb^=a)i+Ge0K21mr_LwKH{bjT3q};IG@!WOc^MYFRo{tpTtK_Bw!-7DYGlq+=&e zpi^z=T>!1Vd>J^Z2%vyL)S9Q>azJmjyZbB}vhm?PKxtCN<(4_p44?aWwMHGJ7zwY_n4o_yp*IdQPX*RJl!t) zyqVqcr&*l&kq`gw-go?op+k8S&2%p+5hP2Sb#)UA>$`Om!j9FDf+of<=gaLWxiEEg za7zVEf!$x;!({T0LjZ_{Omom;#slNdBBjw-r$8;dC;YSG(W59j6vK{zyi>>_Auc}B zS+W8MX#oX*q$I9_rl%l$!F3VKdgY2_$tPM8y5t$KOMku(+|SlFd~AULy;xFG#->&D z?*GcYN`*Lfc{zb5AmD)YdUfG}x!3cY!{-&}8>;^}cvD-1Y~HM}`&x8I)|17Z0(S|F ztzJ7D$-I%O5KA!Z0MU$rMi3y$RyZtvOH+onwzelue8ov}8~JYU>?{u=64n8iZLZ7S z$Rba?NXU}2!^dI*6tw&SH;I}07?uKS( zPoX$TM^E3=eKK0c7$p?w|2>j$mP5-A(HvCn@Zze4*zM5Jo|1c;03?Rpc;gftzdM4T zdd)x27o)E9^j3dZUczrv)5JO&s_hyfbS^GFugdk z4Jc_S*Dl_5B`bH{1)R3K2^72ko8rw~{KM4}RF*Z{Nyne0n+<(g`xSY8{J|-nXCO<< z`!3F+q6{jvu8xlSiM-t0ji8qyXX2K+-zJbuKrdd?Pb8epksPCB>qd|oht&ZoSH+|gv-6JM^8&go*V1-(`Z2Ry`=Yrxao5vM_Fo0KX~TH! z9|a%2GR};4Hwj&!C#>2&ST&ppDU^JW?x{1P1GRXn$<$`y%DU>u5-{%zgj=Ba?ayEW!x7AFx7fvLc`#`g78mhac(@Sq<5N;Q7b>AYJTdW6dNOB zUndW?m)%K@tq~cO!h&nY_DVWV1WWUNG%aX- zwsv%wcSChqw6uzzfpdox=VS5H-}oLzJ#)6$$N_BuBh!-PgY}UlwXa`(xXQi$T^IR0 znqPDb`Qsqd55c*$4NGMkLp$I4JqatC?qaBpo(QbC^I)ma?9F8toj5mVBh&7A*Y5>4 zD?$TL5Ict-=m~^f+`YWgeDp0WEVA!roW}{=lK8a9x~IJ^?P6DgM&!Nla0UhjvF*V@ zYSdD0Dten=m@8ipnI2VjKRiA3E?Tzs+lSJtEqS`jXMH~8Z<(O=;!UuNl+oMGlI?gl z?SdRKyz&o4XQHjwsHs=UaV)=lsTMxEY3tx1m0Dyk51n7~yKfIBC2G@LeSD5(cbv*r zscTkMAmupa74puu)`p=HwSgM}h8i!n7V50f+Jaq=g0DmJm*iCsFYrt^HvgNUToZ^bYFj%5IrinEH6o7t7AUiF|yV$#T@ z&*6e7QFvkQrxW9HpGzD;^yJgyHAFP<%lmo<&@jZs#WfUc0hn&RfB(aS>vR)$?4m9& z7tWvgQ(s>jzXAEcazgZWlz*~p^)IS)R=8%njq}`5*}U0)bW)Ge?$7s=f7A`Phi`Ut zp)w(oAe|L%cebqr!9h)}b{YY*+FN52{;dtoHv_2Y=%W3i!mhSvg*`ph(RRj@nM*EHvXnsnd_{Wdw(4)uqa2qEUDplA?4mgOj304WTmRXUXj9@h87Z#aObSva z|8=<(q`cDT;qjwy*%y$@&{mdjuXlwB@AwbEn@*(fcmN2nh>& zbUvy65KIvPTS(@i7<)XO6gGOq(FP2=V^6UlSd$?1ElK0LEP_AY}c zwCw@ri7<1j0m=pw3Y;Cjy0#XuvAndj^uB%lxFVgD%?d}3yaSqi0lmU#`t|Viq>FqS zlq}xg$+NUT&wb5di=!jDdoQnt!7|j!zUoW~L~oVY5}nQhhR>SW=H}*{3;*6I^m4m= zc@aS~%|?z|Tt=|c=NEDZqlOTm5p;Qw{~*Rrw1J$hjRiizB_J5BNnk}VI6EEW6y-q~ zA)%p!lVMjpA-E3HOj<)t4cOLdu71xUWpdb}1gCDIW)#2|LaKl+%!Acrwd=?HiwkE2KMj?hwRPj~VlOSFgSUcn>{v zdP{`OCSSl&5oTtG6oSI}{iLm96DuRj<0aCHc5$404lv*4!IJ>4oSkY^c480cvs|SJ zJ^AN33e|RViaQxWd0}Z`CYv=BIV-z_bO9AcHC!;#TJfd_p1aVAhh=r(Q?l#I&{VHY--{3rE3 zDu)V13>`fhlb<8AK?b1Ge006Y>*5f{;?l(~|5|(giWV*5x3PVn8dVi(-xZZ@6dx1* z7Qf-^lXvq|vy-|cz_Huu{Bk|hGeh&jYCmHW1vZ<1NYRapmaX>kdt#IRRoHsntIN4HKdxUseQ38`?|E6D z616|krpZ?H?C>M$X2Hk8EzkRk@k#EJm7ccW!sSjNTevS_6J1tT);U@xCZ=<|EJ0|Z z_)*}tf>Ii|a76Y#MJ;?Pe-fNOYWLyn<~I-S;5jB-Jo7{@G+OpzfBo;m(Q7xn$(MKN z=SdsoN*k?zymS66BPUH!TODW%R4x}j68c75My1q?W7^xcnG-~_8%QCn07G4BFm_ZAHe zNEdPb3JY`5c>?95DDiWTLPL)?mhHk>A8b9eRIBY1qT?J<@|GtyUpzh-@Usm^oW!BM_=i`FOU zPtD>#B+kC&sQ>)qyHV#CHJ-QUckW&9;W3^fZ2h|Mn$_GNt4kAK*2S>5w^o&;JNcv; z{NY|2;f?>8U@i8-PT#dt2UTLFOj21aHeY9_{zakv+$0&Y5oS9No=e zKc0;|<^z`5gKnca=Edw?E*nw$+ssppP4Q^2JkQ08l}S!cmiBYEJa_GCNZYfLEX+(@ z`$yx`Buk9Q-vvljqPwQ(`+pL`Jpl? zJ$(~bNPSgn(r*trHABy#tEi}` zhX-pxXeqfUxnlZCdf!fLe+QG-Yc(Du8(Wxd6cz@FDaQAG{INDQ!yhfCoPHDMllE+t zRHRMOk(>)mV=^w4H2X+4Y&tjNO*!xd}+t~y=k?grCc&ub)r?ly3&nYFv zi zZ)PIrMoqDYYP+pwfR{Pi$6h^o?3kp_{A3ZLvCfj1hY!!%*!1-FHX(07E;`u%mwhb^ zz=Z9Tvsj{Pcbt`R>plTp>NC{kenb3vB@<@WCwzbQZ4YgY0G3%-rJwo^<4ue)wM0t` zvtd+PjlqDxR{@O7Js6340Lv4#-LK!KmkNAjON}I|gX)iba1i2j8l1a;EP-2YWo1<< zkc+D^buQ}p^XIMKitnLAhq8t7oOj1BdV@LfNsU*R%-rF{#JcAJr{MmIuTRIOB1YSe z;~nut{)!A9>L3W#VK&i)R*7Dz@^W4@W8j7e)a0!*k3W136s8tSseZJbg(K-(Nl3Kn zl>7ZhASOFo->IwS>4zZZWL??{Oi@StLmP=%sSehqxc6mL4^wG3Is%eIIOX> zv0-CjsfQQ}yDnlDH2-4yfVAJTx22^eIDm-v@v^jxwI!prs?SSHi|p;Ubx&3(m%yFb zS<6z=TH5E%7Tya&a*JB+R4H3Nf4M(37-@UXdGkt;*VawUjfxJ-jZjxTK_Z8p1&xQd zVCEihp*OI;rKJi{n$ysfdriUVdIsnW4fR7982&a}%>JSW--=ID!ToSk=FQ?_@d@pW z(^fiyET&0v%MPQ;x>Y)(dvY#j^>-9TE6!Q0sK{%6)$!zO%8H&!Z+4Ki5787tszz#i zV?R-;4uw4un#u=G1vTtg8TCdyML@c#KXRNSw{lgHj7K|Ozj-sBI`20%GZ=EzWt=|k zAm?Z6*SCiXo7ry`X8%gAJ;Kk|+Hs6#?_z7~RK|?>6G8pRf3%#OEpqNnrQ694W}92+ z!vU@9x+}lD>Gy!5_5`E1r>B%ZIy(wqzXn$N^XE^%OWLD~SmJz#7Lmi!-PIBZs=R$J zc9CRcu76dMs&W5h>1327(()oKV8(P{3^FF{3Y(lu4v2PvB$u-hlhrKb$3rx znbsw(^#FL4O_Z|9^`~)o->;h-y*t?ax5MkOPrg%dM9OK~6UD{x-(y09)DIe$Y#D6k z-0FEX>an%~}vi&P>1GEb820Rz#1}&@XOi_(V2j z9evku#%13|qt5qnR>4Mk<#}rRGhd!<_OS6l23& zX*_x>P0U*n03eVgp@UVg+Mjp;U%zkb^w#Z&&&bpMEX>0TmEp}1LWjg_)11$kTrq2( z5_=eJv~fpf>{);u4l!kQZSlrFa<;&?^p&b{AxqQ}*5I z)32EB9mMaR71>Cbyp)Dgv@T8etb-T=st5W~;K6L$w27pUIP`HH?2AdN$EDxBR-@*Z zN0+VAjeQ1PyQR{68P7En6pHNkV$R?nUaw7I)zKN7ADr#qLjgO(&TI8kx=pg52EcA# zX)We{@xmyNrTgQVne#>_ay&DC-=!PhDlb{Oar9>Fq_KEt1HLY7KO*)~;G;Q2t*CK7 z7>nr&S+);Fs8sJROBDObC!V}%-tQO7E)0ZteBx|8Ag-XxSDRLrzE)QsKX&YZsOX#Q z?8fPA;?G|SM&Ex#K(Z1l>XaN`z_{UE_2bH#*f4_WR=>_JC!0oRMwIPRAJA~lqHcKY+L{Di zos`L>7~eIUSXp)Fhz%UZ_u2Ze2g}y2!H48dWv?6orMA;06u%tjpt(c?;Z0}o(?L!E z>Xs>(la-~rK%CEbHZt?#;!Ee4<&`_s=hrEhQF8ZP7Ju(U9*K$xljiH`OKv-A4( zwQw&;)sX?1hNoZ&)pnWtaT8w^mvbiY4skh|v9{DJLxTLX327X zp%?8Kr22sAnB`vBDKFnyT1xWI$QVZQj7e&Tp*rE`=LeYOcp$;R4(5;qQpysH4r>3+tYH6y-VR2ub=$phO%! zHh$-Y2SLVcd;s>Ctt}r(SNGe!0EF&vUuZpXFtHjn78cHgFcsRjDJ7rOJLX;to}NO# zMhffdpBZ(=o;0-!^l+mMLFw_}>xruE{A(Pb(2RqV0PYb4fl<1G8M(jh@}*1E7Z4yp zB9A=)N1cSiNUX04u~)YX2teoOPH>b=DLq-c)x#&)Jv}CA;Lo?IdAeiOmB%%!n(9AS zojITGY#VP;&W@yF0Hw`hkRtB#$fNryS(A@-X+?e-4t)(8-yvg!&T7)Fki2X`T!?G7 zb?~oG{omFz=$n7f;PUoyg-_veE?URV-jkG?%l58W)Rn(+YrlqXbi;>=Cz&INTBQ&w z0F;qMIT3UA(9=DKnj0u|jz99n{PCBH^76(^3k!3Pu2L!z_H2`uC2=DMqc$!t|DzsJ z)7IYo_YC(7c@Gv^#=SAAL0^w=OI_}e_Wx~{m$yzC<$WS5cGwAo7Fd_CX%1kg;m6L! ztzhrL$eO-6`*U#6(cV5PEbQm#XbDB58XMZf`bLs z{aQ;?gJUy>zR`4aPJcLCzUuEvO8sZwnHlYt)VtkS@u~LEM-c(4nv_1Ic%>8 z@zUzHBMwge{kUINB<1ph&jN_Is4uDZ;p4>RN6n8*QfKru>we|g{Q|Nr09LHlzwtiUZ_&W#qfD{c-T?!HGo{N?Jt6 z2PfJS>kIaP=Cl89>Ur%5JYWg@V$XiLZ`L2%8z>K^J0GSUSYp1BuZpLHh2mZ;Fi z@ncctfq)=PLrUQWj|BFTP^J?sA07nh1|xSWgJBPbnv--xk6C{^C=AhoY>a z)Yb~5rG6z)mD`U(TBiEy&U6%dkYIkcf*>Cek)X9BLLIWsa%U&I$-hyyIF!oU0sMzI`sUAjvk$! z+$^<@h}9y_5jqsB6m`K)(UEnK$UbnULeS`CY2_`A4D9o;5sl;JIp;WJIUktWgVuOm zJ4Llm!l|VeE*X;Afg{`sut@`rX^4KoywQu&AP$E<{bPwUm74`kC^$lDQ9}uJi9=&5 zjLB#D#}6NZiA*q1KYxZ#gIF9P7Xw4a%Zt{2`7bp!F)|*nZ>%w-hcNSgxK{tS9iAF} zg1BEj-ubByPb6pO?2zp_0}Bq@f%pNlSpQT%rd1H!QUk*cVKSt0y!5C7Vp|pCSHZ>u zPmGB}q_nlwnA#)lCQJt#SjyA{HWb$ur%XRN1o`8a9*1ndZkA%eb5Q~6UjoIY#YJxt zkZ$u=R)LBT7-?#{GB@4>YtQSCeo0~~-n}El;an^3BSlI#z4=~o6@`rl_5VNQc*8Sl z7CGvNf`z9y`Om|L9D&cxy@tiAQLe%A@a-TSf7JDrdoAtg<+GReF8&;NRSfbR(YqYi zrU-(yxcC--$h=g|++qz@!ubXf;z6^a9jIlgeLKNO!QNH<%p9eem`7|7z3L#Phca8y ziS^j|#R;RR`_bGBq9H^nP=Bi!itt~*(m)q;tWsT7Rh6WZqE|$(zZH@V&vs!EwXqFc~S(1QkxXJ)9HUsUftB}i7S5rIeHe#l!c^d;5J#0`?1vy|{?Tb^CpKi-J=Y@5_ z<~sL1_f>K-LC3>HVhx1Q5nHObW zI>!9o<&xMhCs&NL57nRzbaV%?SW=wCao}QPoc$9sQtv(3a{t~3@U!Rwt-{Vu8FgNN z(Ev?3G7KZB?{5W>-?XzI9p}gh#=PS2`MJL|v1Kvjd z)G1;|vCaKe^`1GEmvt&1NKj}a6E~ge$Ja>rw9g@wV=Aq#7Q!ijoF&R*sOOHqTV-ay zvHu1JLeeDF!hkbMAy5;-v)RbRv@(=cL&RGjdHiw<3h18_Oz708xrw?$$y*^2no*C) zI}{F0X*}2=K0Wp`n{?CxJ2U1gFubl-@VPw>TN&zI6 znK?Pzu%l7IyNy=uJ~QH=J#A_GwKS7$UUF#k_sxHOthc8A-$2=ju2-(m zY~tcd>z8SajEHDNECv-1Ci=j@)07ayf05DKAfJfA)I_F?2yJ#rKu>DWL0W7#zHNXjIS~6pLEd$=V+ltDJdFmY*nt{Y24u3I zKL@}4g`Y_t*tiYdi0|{)(h(rTT7(wP5L!S(1XzVHFewP31&Xq6R#tJEeBYd~Qe?Q* zuZ;3blb^C*tzMkN`CSrFrkJCJpU22;B%UO&lwg?1VQT$5K|<64xxx%2DO3;_v+&qo zp2icwD>+do>uM(~Wa~SyFK1_GD|_l>0n0!XE_U%y%n`!BX1Kf(^Sb}U#__Zu(bvf0 zk1P^7fY{@J%17WMp-r+;PDWDFIDq)hWT}UZC5;XKTL?%R5qlU3{s_|X$*tS;9>7c) z34$nOFnZyYQS~}T;{9zb!Ae9%;2<>Z9?^)n^yNxY>mht1H#u3&;3D#U5;$m86p* z3YAP~U&B3CR8)NbZnkb0_%;V60H z_0^+4Au<}G)2ri7dVqqE529pgXzr1KgQ?$Smz|G23}F@c(u)9m(9w49q}vQivX~?R zG=h%K0ba)V5e8Od-zg{v`n?k7dHiu}YFJ2K0ToH_t;@PuNUsYLqJQA;&H;)6r;NWF z*(Vbn-8@R7;2O-#AVo~eEKYrYA}#Pl5+)D4!@ioBn5f!YfOHg$Wz+^>M5oaD=nL8= z(Y}+zM@3Q)>KWNj{%X9{LMdjS=elBJ>ZD^e3tQ4c54-of-^*!;d=~9Fkdj&NsrM9@ zfya9*C@@gj$mrpLqL84pG)J_bfK`ck7--ek)|WMiM3ju*2#jbVtnV;jzkNfb)x$oLS z_N(OQ`=oLgeov*@M;pBKrdvfkyOj?XwR!_Q*}| zr$K2CTAi4LfiAzGpqn>8Vh{yb7E4P@LV|+oTevpffNcYFgpg~`y>LVcSpB8npO``Q z92hAOMY&kUbMmaEWfuq|mLukn-s+LOq|%mC?fY8){nr5Ho3ewFq8Iir-~C70Sn~7R zkq7e!acQzs`X&~#Fmy$T_Ha;cqkJOB@K8B#O<_WU4JzaiIflNw#rP2JeSXw$#Z;+s zLg#J<(cOV_N^e z9Z0A?H)INHL{|PP?;W8gS#Fo{|6L3~ka@$#jouCC#yCTJ0q20t(g|Z?B$k2qFUD*= z^yLQ?snn^`bzQM|?cfyIs*Sqilo=dWUIsTFkyxIqhRNFpo>S^8oqJ-m@HiR_ReicE zblQsQwb*jk_{BsG}Ps`tiMJ^Z8c!m3YdAyEC6I_|wGJ zb!TZO`=(77Kmb6h2o$Cmw4~g+W3thN?ATRIaZDuxC7nHYo2;HRM(Gz4)z22s>Yq9goJ&x$bN;-c`1sxR#P~JLtcVz- z^Tv=e{h7bB7hGLIZp5p|rH6omz$r{?bU?*$%CjgVBLle*WDW1$y$gYE^e$BYAY|=G zu{nDU{r!Fg?@7(S`|+Ph;$^6y;Gf5ZT>{lSp-JLjq+}{8LX8iD=2r))kYd-YjvXiI z`BO%_ko>VUl#ggpt)a|5Ch;ry6Jp6w>Y zw34k05%Z}=LKzVkbMU@EDUg<&hQA|j+9HX5Ao zqkitq|wilbIvU;jYZ!u%?%H(trZnj6Pp zf?2}=bBp1~-+m3Pc$tYWhsfjb6pyA=C||0e$vB3HiTrDHbfs1uJXhvCKs zsq8l?lbB6&)Dhy(N@Za>$*{#U?zK-eS6OE zJzfnE_q3jEUwwpA{FmA1(eM34%9USGFdfJC!Gjq;c1ui@0Mv{n_M51d0Hu9L(dxHf zOtiRa2+yavDo+EkSWqDCa1^00>3((_<)$Oj;;1@4;i16|L`)I#i8wULl^e#;6^p1> z|3xX3Li6+Twr->eRngriOehOOz?rk_d0ZUU*=?xhK=-#Uq~CqLz0*j5%)tmiCuljVbROQ`2M`L%3j(Wg8lF zR`e09T@fZJDc3+oz^qkq1^coP4Kr**lp$&UCSS{l0WOpHa}d-h{}9>DZIqbjO7b_UDkGIxmER&f5M~NGNBc~->Noy@C5}*MDzVpO@ww6#vY)L_|_-*@#9{*C>Oqik9HQ0FGLODGLK0s zvUBFV3qP;@yKM`h5g{@}5STLFynf9RHaU6uTL;Asl7c={6=MgHT_8Pm(*S&99VLLg zXWLdS6zPCu=}iAORV9EsP}9^>X+@g|U@ql&JjcW$LIasZ+7wQ`$OUCjGSOSBdbjvu zw@Qi_nsE@cVTc@a7>N9jzLP0$6**9ft?Yx68rcOg1Ug6vnkfcSRi?n`pb&zd&14`r z_%v%6QQX1Is@GC9#Ajo-ycYH71$*t-=o)LE)Uw^jk_J3sFu6 zi8Zq{BnWN;twcl|hZf`44Qo&_Koo^iQ7xEdzuhRinxqpZFm{WW1I8h;gOAS^=ssx8 z$4{PwR@?aG$F{aM zh|*7rub!G8n%@;WiAJsJM^BN)Jc~d<8d3yXz;a3ZF1l~cdYzR8w3U}w%fzYGcg8PL5uqz)x0KTWjx+q)4`TD(TFU4< z(yX(7aiF=YhUe1H8aa0zzVp3_EL56xq*{Z+0A0A9qoQ&w?Pft&TCqJM_85uJr0Ehz1 ztM<6TB$0L&Q7YO`z>~h)oN3{A_M(S)JH^2G)55GGJjHP!WEgwW4T}#E1GGsb%`;a1 z7+jcikBGU@>yIfkFL5rBZ-ME7$$T10*hDB%5LGJR35Y*smL|VNw2Dzaoqao)$`?yS zwnB*uQ$9eBX5Naz2W7O$xzL%n=p{g2&U#%4>R3#RumNzlZ+glHmD(ES6?}Jpj1HO1>yg!w!M1&+5zSQIbKgy zWM}{o_U)o)|3RLT=VG&tYxM)-FbSPt;45w0w(S-pf&415u(F0QUg@hnq6Cuk9#*bZ zZ^cLJZfI!8jB1WS-8<~Q;7<@j2w}`QTc@amx|Q|6JK}Kr21+Q@*o$!74nV`&<}iRE z>=}B+OXfyKXFcyy9!rBKmJ;Sa2y;p!QAV&YbOcp*SZOyu{{Z|;g}TjzHx`$_ZQtrs zd>Bk6clfYTt8$4`>uaP|=p!~!!qe^@8hR2LiEtIlIwPM1ZrH8XNBmfYeFOxY&7$a?7OT%WQOG z3H4`1QPqW+xsXueA|}GI5iN30z|MuMk6;c}nLxJx^|1s-R;(P!s<_Kjf(c+Yo>P+) z^OAf{vs2Mx0&0N_93qym$rY3P$ut3{!6v|N=y+`tvek?7oElu#&aSxsdFqB`+ebzc z(w`I)Epqxj`dc|A&I^*^ZHa&haa;8dikFCekXpNC9%l*J9Go0_MdjbP1GQ%J$V|P0 z_9}!{w}|WyE445)nt2-{EX&6NuzR;sTH^_fP=G*_gf3+_`L|V-2~DE+ zmJtIb{5<^@ad#(_fNJWC7I7WN^6l86poEFzsoyEX+e!Wd*1yiY!$9TzdXfH2W&S3XjEJ1MeEp2&SFep(}yt(+W-GELeSHSFnMB{zEYA1_Ne2 z=wqCh`nk_CbaZq|Wlc@fN7kVXhN9L zkk;S4c{8Z2ygWjC;vFr|UWtSlU^K{e(ozmlHUlC_Y-92-2=S#L=z{R$WnJX&DJ-%WkeDbW#kwz+FmZmLeO`GlJD;;`ai6Vvga7 z%wUmkMOfz%9Yb*d3guB8T{sr}WV};NQ!~B6_e(Frcc30DVc4cbMHdGZNy!4@Z*@Yq zg>v4D(Z&r2On^>ljRtN7#8vy7RFskAEld63=HbmYVh_SON3@6MrJpKjn%$TOXal0c zE{J1!tKd{-hTEIbo6DSsNoXhmq4h#PV>LE);9(tA6ec6_9V+3SX`YDk(Sic`*AY@O}6$odv8H6bBb2x-N0~dZjptQ8qt<&c%IQpRP zM>JF7Int~!HWfC_Ej8R97CE)sdoXS^pwH8(r zcSLgF*IiTix9Plrfk8yuZGJ+r4O>(94g<|m-eMP2++i*?A3nT)e>WN5o>pc{`Bbls zx<6{IK*kE;R1kM#=$k>m>=BF(eFR|hk&LX6PzK9m%GE!N4DP&2w5=m6n+A*`efV%L z@a~9qR%lJ0hLx8%{mW~#C=wG;A)Bo3Off>1`*Ba$0T@PT4|jyv=oHc(bSKBS{|c@g z))c}yg^294RQDbvQHA4`0(_cgm=vX>!}4|yaf$v=fF{XXf7-;H0w6_2r2=~ch+g-= zfCI+FSKOChS^V~9#`6p8@s6X~3Jc4x;bB=ApXgjlE|TnEc6H_M|0TeFBE5*gdRpp^rein3~@Fq&Pplk}2sigzBpv9}+?j?O?11f*kxcnNY`HI^c~ zVO&*Jp;SH()(v(zpflj=ud=ezrYlzWxnkZLRFDc?VjCSoLK`?hafKK%a~Lv3BK&D> z&AV+Cs?v?Fr@=w+)+P~Z5whVscb-NFgjl7qQJ8o-u~+sP`b%^qRip1z(jB7%(^7XG zYK59f5YQnd6_p1oiuvg1z+|60+hylr=mC2F8)l&&Z$`C@LUp+c1j>!p!5AtkUJF`F zLnAjJ?hb#5*bQmbsMzQVEAdOE-2nx)ok`7u%4Kh83kz~{gMwU4DR>Mi^%`OdUdjuo z=(7l*0!XdEIJ#-?talD5YFUHy@V+ef9Tbctb^u%DF|0D8Z=6uMHz;_&Mj(zL_BQc; z%wAWrMm+}s`CZWia&o>9m!J$pGfWuA`PHyLTQBdt)gCgW*rhK=i=E z3|#8le402s4l=hGba*4^L%xlvl5HTdB0fW_t%O6vC%mDef=^iZWM(tud-(S30eux( z^tN_(0^gus{CjlNxq~umn4}dDAyEp}qa1ScmM&!Em6E5}eu^JH-16?-PKpce{qyG| zssj}ja+e>3hkwIbRovk=W#FcSLJk=Nx%5R@SreVe6dc^2-{SgD!Lk1=hZv6))Bmr+ zX3qXIQ&U17+VhyKX^(W!=Ivze*y_lUkrPam{D=}m z#&t+$2p}7_QhN!t# zs-cPn*T=N&uw^=`R9yfYo<4cP-@r%tK=$MQyJ4m@jQW=Q1TS2-Y=y%HTz&yZ_lfzsYfQibxA)$aG_B~D- z;wNIokr)u2_h>R8zlLRpRkINh7oWzE&COx(YjC}95kM2L5U|IfbkLq%=2ue$1AG)z z4jm=MI41O^1*>^_dSZ1-fS|y*2j09H^665ItevxSX2Wh?UWBsy zi82sDB|OGJiT$*=2&fEs0m{3b00DqWVDG1GbP#=nFWC1HJLakUV^Hpg&EVmG^$jKt+2c2L8}e=1cYR}>wd~5 zXx$&AD+D&3k#P>OQHtH4)tNyEQX@U5ktV?vR37c6yw%^Hf0En19JFkHGJ+y6v|REp z{~H{)yOy z>Dg+~yAGulIZ4U9TkVwVC)vO~IiobCKSOy|q{`AQtDE+J4}|_7U;Y0J9RB~aFL^2I zMt!#cDxrW{?9gCoYYUJDSTX=R)1Gy;2hd4UH9xTMHD=FuZhKvE#QGJmeBS{O*n zBnF`4vXId}&U7OdU4cQ>zeHFB4S4KCo~hCQ|nG>x(un$~J1Cb9z=HEF-gslR>E|A~{Bq`34DW157z*Xa#0^BYr@7 z^+?J!H3-obf{8>EX^M?*;9EPp00d??8FWZQ5y{v_x4|Hq(b4b?JwAcUENLqZOnesM5_ z_C4kOsf<+1#s2;z=GFo?XKU-RrwR+r+Qu)<7T%?QJs)6_t@~<-7f3Kif()%c* z3)wN-q^ITIXKQP`mH)3f+G~R}aDbdqczJ%J5WRB=5|pJ85ni*{?TAR7kfZqg4RN-1 z@M=2qC4{sxi`?P~-1-Jqh>kqDcW)dM{q#IFD0b}9&Qe1?MDvIn_1H3H2vMG+Hy(-* zdUmf^ANB;jD`Tu$*^8eo>FE$T!Z@Qe_LT}=6N8TmXq;JN6xJ4g1_&Ub6 zZQeq>{#$!z7F6Z6$I>L=?Hq{oACw`lYM7`r%er-ReA~DiJx~+27u4 zuk|0+N-tWJp`rXE;%sQS(xh8!%}SS^^DgZ#Gx6QA;{t6Qt^_8n0qU9JJ8-RKaLOJs zu5r?3=N*$DMJMb*xtx)0U>CFWW#S-!MgF;4p=g&W{&TxY%z`shebpiso-P^xdO7STZ3iCjvjT3>=_CZIYTOSHb-} zyRPn|QAR*u;PEUqhrZXE{<^sSNqOP_DziEybGWp1rK;zS9frPREr;>AhIWpZ)1U5% z4)ZzqG}y=a)_UdsAy)z|?|A~H!4HG4Uq>O?yM~Z_*fsp0|DZ;Wbhl!l-C}rHV9O_$ zJX}alyj%M^M6ey0PsLBpG%<Q{`&uU36C0e)c_$Xa>Q(yH{7mJF$ZN)yr`zEu zm@$j&eq!M8`W1P~mU(pBe@ZS_`=A~6>?j=t8XJR~#kd|Jr;N+hg` zaWV9LHOpgdb-Kh90A5hnJ9d0RD+-92Qkva#b0B~LB*iP9GO-U%>~<#m>J|VDG?J~~ z(Tp@H{X2Lrts()hhYQwukmj^RwjJaN0qjir0Vu0m6tTmO=b40F74wIw;=WL|wJk&) zQ&~{(m6;jhv>=PXPv?ZHf!t6@L18FAyu@s3ce7-pFDwqp`hKBUr1OmDij*>;RdI6l z$nCnQF;hXdfi`Ngu*{1W4>EW>K(yI}Fm)&rn&DSE;-P6`7S^2{SE|>Te+d;Rh^*P(mRAHW4vjAK zLLQwe&Wivnc2X-?)dFJ+rOCk$s=-flKq*ME5VC3^K-1{md(3h^rKOM9p}h5^)tcJd zN00zI@Wl-H-rG<34E9;{=?+-Em?~52R8!Q3WSR%=U7H~c3zLg_g#Xp3!RXzRC?3sHrt6kfAi+osu0s>zdI9d`Len?PHTPUvYOHL*=t8%zhxyG zW~)jACEFxL5NTQ0gos?7-~z{hfUCW*!+CmVhHT_x&S*_rf8Wx$daPveOf|?y6A<8*L?Ff$+5j4DB>rH&gdO98JNKImjE(h#?ddVZ!OEeJsug|?U z5&>sjT52kw;1@^2!-vW(60X;5V8J+y93JO8Z`XuzhjDrrTQQjTy~X!C72=%z{r#C4 zi6Yb7+#ElJZdMeZK6&d|AuaaW#(SVD3!oPpf31aWYZJqndXtR`NOprZA$Tid;?K}H z1ZGuF3|iM~KVkgX|DNlH#{O}R`GbrT8m~`23F4c5!}ap6j*B@t_=Ed9q;u#$3cYhP z)GuNtoe=RT1T(fsW_tS5%bu%~4)vWF#-q%&v9TWv&5FGBhKNa7k>#LOY-x7D(2;9m zX8Vj|CimgPsnqDOH)4WCY2qT#lcL+Vjr^M8nl~FSSyBdT5wG(7>XT7s zJZX2q?_bswR4!FcEJE?j_s|m>66;Z#cjriSMU(UiPU%)wb;Yr$^b0~CXOy0rm~Sok zVfxC|%H_lKjJChBJQrErZgra$M?7U`Tfu!Ma!R&Dku@ZC`r-4aZ=c!?HwH>iknq~HD7PPiUp1wjNj2B z^t8o834VE(-S89@@+!FsfMmN^NW@{e-R;R$6rD3|38Al2VC0 z{GA|7*AL>>Dhb#54ld*c>Jiw=nvAN#eB#N_p+nUVfsDfN8?C=Tdf~!_7=g`qwbQ9p zr%LN_eG6#E6vyVyqWCslk!8)(RYn)`YKCfOS zC0fek4lG}xz7Sky@tlR0?&aW(EUP*4mlG$ndUYr;G|mi2c5`PJnIP~JJ>~Zyq!mgu z{g?W^Yt~1!?;`t~(Dwa#a>2{^5Txn!NSAkyyJ-^olrG&qSj7|-6`?XoF1zVMOR`G} zRWWXUw1R>{LshOzoAo0UBd9vvSlbkh&zdG3LE`FlI6AsLPO~Oy%E?K}rsi-exacy8 zXYZkSqz_P1iNt^gGBSν*hXK6h^4juP03vgWk$u#cTXoZi@}t+w(;}euHZ~Fp!>7e(un) z$?D1|zkB6a!q)Lrv){ba4_}jmB$%#BB%Uyu=?Mc|Nj*Z|w0&l|lKcPokt1WIixw?% z*KYtd)lmZQgr%8SgsdSZ zDvIRX4-qub{DAuG-$gLYulfWz6S0i*c{n0s@gHCKZ3aa^#N{yeodGw!MNeC?eacm} zyg8Cg1>2Et9*~g8tGRns&bD!pMf5|j)BAD2Gcv@sgR~u1n?^8yUMsa|Q~$oE^GcaQ zk4Bjc2MXxT_N(zKzdEqx$1BN3y6h@MyYk32nUS7I27e9z& z1OsASi(Y-n#8UOxNd;_YF$Xp;4-`+R*k(;fF(I2JZrFa0jY|{QM6xNUwXIMO*q%4` zeDS?kwOpPcp69cEu4nN8xeudT-sSQnC`xYjpJ3wU7#Hq7(6(EEI8J6A#ZCXJrM-D! z?bu+=)yN2CZsgFucQQlXSE?rHiox%YZzHI79_{{AfP0Rj_q^GYL>N=C>BUH>dP85AC#PB$Qo+z;`uKdSg}cjL8p)z3RF|JXnl zv^HDAinm?z9T%|W>PhO_^1WtFF6_Wo1e{^}a&o%(B9FK(INYd8%gdLlZm7&|W37>i zd5{ODql|W^AF1>OW1H*h9Cvs^nsWUd2x3*DS5afp-ZE()V#BP`<!EEJZVLF=?3C(E~sWKMRiZ;C6)0q^a%&0{9! zmz2CmyZ~pV@59Bc>%|Sr1$(T$co2eztgT(TeEFsIC2nZy(7L>$WqOGA@1!YVEN3=Z7EH_C!otG4ckkZ0 zaGP zW}zP0(4+gsv(f18AgT+8-1b+8uLNSX+9h^J;$oWNbL2-Y8?K?@H0WM<0l#1L`Mi0v zDAvM57@zD$2?XHmnpLaDNbw%KAEUipH^UJHdm85~932ixFp$!8Jk%&)G7h9;LU%u& zW6ypp-H*{DST_eDtMq7WY>%jJ7X983i_RzL)01_{e~f|>p+7}}hwy$0&8cOCZ~*Z{ z`){M|(RTPHCMIXgpN6XbZSrGc+?8J@hz03Y|Ni|AVO(MT8Lx$uhpfmMvrJNpY<^9s zGPBc&_`<|aVSw@%VH)PFVOm-%HoutpF8g%JdK>_8ot_l)vI*}+yZCE<&d!yLfTPZG z%-y|jTG|(MkzSEDmg!Go8j&V~{Xg5z-ig^E_uO#l9-gTG@0;cvH za(x?1Z@)=6{;0%N{h=LYen}{9z@pKS1aJ{iZWi9VmqSfXVmjQwxuK>e1GU+dx^L#p zX{D(}Pj?q-j5NNio{1u=+S-+9rkbC*ru;_s#gqBuAJ8DWKiHVbbwux=6&M7_r);jd zIyyccXpnuMGMbo;4wVRlUuh&wo#}M975l@SQM=ZBO@uJL*t-EDzNjA5|5k|h+Y1e` z9wgnO%K9ipg2FXr%FQT{F|OYp#HJ&ruAN5&x1!gDcsI2Dz0iQN*`2LtUA7G2{rhVZ=Fx=6G$}_W2Rs=AW*Us8hfFRUJ3n9}OWY=63n9Y&=@v z)cC=But*MP7K{|d(8Jf;t=G|KcG)68ha{39v)Udz&PRxzID}DNs@z@O z$gJ}gP|%v843<5!2tqdM9-6&>e<9V|Q{bc;9b`pix!pc2zBK;P#-H1AqX0$P!G%&U zUNk&tWMs4xSAgt2-+olfSAe_^DT>aYJ9l6#0>J!xP5DR0jB$}j`Y)MlHOt}tL@6bV z>;^^B+ZQkFKUy5y+L|d!f^B*OR+_N9ynbM9^K8HQ)T!PaGHEl+XryxaH|%ly!JK%T325}bLXt62 zN_t~sW25VIpM>COh)vy$`ANB3Q!_Is7?+=4KEpk<2O#_sMWz_OlbU+JwpCG5{k}J} zRD`emNRTq~c@yrrjjcvWJ@w?S!(=mhO4c)q)mEGOkTbiTVF^Y|TkmRLmOw9MFJE7S zb!qf*Mu;|Db3S{6f`H?14@ib=oYp3z4GkkFsYnw2d(YFK!iT$JAAOxC4c6@teX2@0 z;Zy2#RetqGuHfYrVD~de(kt%a>#!KzmYaUoMl)wx*mnVO1Yx%XAB4o-hZ3Y6)TNUN zJ?6N%NG@V`HqS-8Bj53vBv_+&*=E#y_u@bwb0i^Gssf)J3Q|-3`=!5VaP!1*fOf_- zH#!&oquvrMvV@{jk2WHZHRRlo#^|l;CdLxJ-j=1rHU|^RH%fZL{F#Pd7S5Z;z>d^? z;BP9jdzIRn)fj%lL+o>_E?h<#p7x^PF!2-yFH+wLqy4P&j$(R;?g%Bq1bU4CZ(qW= zTwL&R0SM016A}_StIjuscYR=!=V#rp*!TXRC(EywX_@>;cg9t+JZGf9dTB4ikbQK- z_Oy+XPxiAmHZnS(KKW+UpAjx7HthNd*_~YuWd#zUC05v4Gnc*POvzVWO8E{)-HIpM zx(@|)l5n41bAyuHBF-nM^yxX>EO6SHVDj?eS| z$4*6!Sl1aO=_0Xq;Kc!>rDzevPrrDli0<$DgJ*+~nbeg(*P@Q-q-KfxkLTfx7EM(R%I-NTaG<{h7AK591!H>a5>lak$RLp+=x zn2B!_V5h7nH)p2fhA;niYtjj84Yx6Klgjsnxu-^Kk6#n*y5Xze(rn+F)lRe@@cIjl z>^%;Se>2?sgHP(*t)rKJ=9a0KoVvT&v|`eU=N>La#&wD?pYuwxCFI+G; zGn+r|m$Hz4?oN^JZEbh@RL}gpNzya~1S(y>pyqvj&76x>Rl^sYfmVLV1(4^qXfgkI zc+|JFyG{Zh_{Y&P%g;NWE{Q6B+1lDlxxmW<{EO|k+uQpC!{-4!Xt!n_n_Jl#l%ii) zvK~WM^OGxmssp!uuPxcVuk~1^g?3ZsvMm7djg4E4w02}wfW^g+kG@w2bWg9gnwr(Q zRXt}19KToB(&Cd<;q2mK5at#bm^D7EcxMwSQCpk;By|?Q;oZ9wq(hrGYiVnL3dq51 zas2pki_R)-$&1cq6OZ)-3K2xLK?D0&Vkzk(vg$qxLV2mcSH>1*`Tu|L-!DmUl|^|? Vhh~T7RsLKu*Ls1~g|A)r{yQ>*%BuhX literal 51339 zcmeFZbySs4^gfCrpi+W_v;uQWJCl&GK&cd8>+Qk2~KXcd3SjJJ2^QyAt3>LjgB_d)zvjO-`U-@FgG{f z)pY)w`Xb&PKE@mckvJ{rFk0HJ7c0eIy!1_+DqVbudb;%AC(nIPD--4$a@%z z+YkCf@1W4o{RODEqN3tdfJo-i>8XRgeMNbB`JX@U-o5MY=@Azb3kVA88yqAEZ*6TI z61cy~P4${C)AtVx3p*-*h&bDyp5HK{prEw<#H6=8u(Glm&6TC#_izFIN=Zx8%Z{5> z;o#u#IPYuL*{yX15oTs&h=h{5o^HyAi0B_K{?Ax^H8!o)s?5fwDvX7xLD!U&%LAMX zyOU-5(R*4DMLMNK4$Jj{MCYs1Ewu`xKrslUolwU^_h0+N?N|Rj_U@amG8b+i7_hv( zf}fq8o!7;x7i%|rxSsFN2^-ASJF>?;ghM%~{GxSWK){Rer8a+oP0K^NXtUP;o>Afw z5vfu2AX-C8Yl-dY>A}I7(>~l^TzpNL-8cO|0rcg&!x|Siw_n}iRH6=(H4dff*Iyoj ztst48FZ3-7iyY)^Z>GlU_6lwK123-!e0z*?q*Mn=EROonFiLZO#Sp*7)#-JXc%-)5 zR`$+B>4JUi>FKF(5CME^l$MsZYj)OSy+59v;w9n^IoTuf3ysF6CV{KXOd?hzJbZjc z?M9cLh__k|PMA-h9-f^!IXl<<&w4R1^#9L#1qO22t%|9ts%mR%Z|$QKFqlk~B-?sF zL9{4HCgOWP^HrA5(9)96{cJZXDk>u*D|ZCb*iB zO8WHSUwSYj%X;CHu0V`gv{gJR{xl$4UTP($ln z&R>(O4D^y;RY*c`SRWpLPRO^kDaCa?eu)2CYGcm(YAbgm)muBvb-$}W8qLG0hyv=p z5+ac(C8a2%9U|@g->)Fasl|E$Cd;QmxZ$FnbE#y@<}FzdbHjpnG2J&aGcz?+mzA~p z*}1>JpMj39x2GpRP|KyjVGh9ymKcx}a73n#6P$p5)E*~Ex{oFj^O1d$} zE)kTJfg*A!FfNXeX@7j?3}o|%Ycm9JtWWsCe@2hRzQpU!|s>Xs!FK0^S_>%Dpx8VGVfQ23Cm-DclfVj3zhc2vZtS?5Z^%WNX&IAd z_a4bS?5HYS6!KqdyA1lB-Ekibk?%L>3c~%AYy*`cvteYF65_x1`10KSjYK4$BKo`% z@+HIp+e%zQqTtS!n8QN#)=$uQUuOI(1-uS!DB&F#7CUy($2!!_IwaKv33Fw%cuWlo-&Zc7*45pm;skLV7@ z_PjA5ATd!xPj9(It3liAat+w)$Y*9|W(tbbhJye)2F-U`4HDwwnhMHpGG3RAu7f3f zXlQW7d~Qx>e4Jr3!xfPYCM}P|Piq8tQ)+V);8AgsU0C@VUNaWXio2i3%r~tq3#*l9 zE%ud?^Hm*8#2PWbIw>6q;fPuPpS(@tvoEv*1cAvvx}Mi(WV|kizQ|}0gPV(^gQoM8 zGW++xYib%VG6GzFxhPLhR^7xN+7NM;T!g0>qc-oZo4|WcQ^t5tW=O)w1H|(5gbJ09 zw{lf}%9?PWmF*uHr|wSF|EXQ|zGfo8k5xPgMm+U57Me~hxdv4Y4JdqEqfBM&LbF<2v=zrJcq0_nK^`JeVD}ZB^E&;mMj-C~ zxX*6UCxETQ?CyJOXW&_h=X5>vV?k!~GGgEOSiL(`ibDj&sfv)~46i?B?>A?LC>`8$ zN6{b!?l|+kt(uTf;khHvVc_5<^TbZr>8ka!?S>qkZx94_^y=Wr$PnIT9nwdX-eQaBlyjSm5?TR8dAerC8{P zZM2wks)&|)Z48A+fw#GUY%bkI%)~DSbUrOj1=IY-v%PFj_pjkHN`@qwV~aokp5WUjwimBv{^;gc%gDs%yKXQ~n^$EhL|gXAi0$&#A5hWIB$AWYm34KK zq8A=t3%BR0ygO`=)zoSbIL@SXW3w&Gw}}{etTUHqzb+;po}+uu5M6&<`T%>=MC2EB z4V*BwLT_ESO}>}~5$vzcGz6$NaOYx?Z&!MrWO_yCiFWiI zZ>)@#p9JYemeUcs@clDe^QTzt48cy~XyUxmq4YcTez2fgR&6S@l@sf|4{2ekD*7A0 zNTj5bt1Tmh{Ijfe97U#uB=4;MO*Z>pbD~{wJU=}@wK6gO7eyC+bJyUoKg+_tGD_Wv6ID9mBt>IvGra~T zAITQq#v=$;M^B68eZOnyIO7K zqTf>$YdZH4wV%}L6&2}l;wLMFZH5T^`icc~3Thz@E2>FYxyKG=M{tJ`AtOYWwTLBR z1YkZhQ!1IRFGk$0-s3VmPU(obE>I#H_dNsC<-5~LEZPsO@|JF{har_oG)&`k(FN1# zh!tL(e|MN?Pa}gTDoVsEa(6D!GZ=$OlBLk4oEXo`2|E~pV~}A%H&(|omLqgzbZ$+q zw{p>{iHH`n!~P+9x4{Oz3k&`1mK6|}`+akfXy+M(l5pm-z8bO&58T_!#?y8cMPu@uocrD zm$YQ=A0NKJ4jT!h!_f$-lQ=zVbUUafpQC{qJ8rca?>X)4#666J5R8PdFJ&{m3uQx* za(&$>;X7}Qq_MEu*3O@Y3utG*gk81H^-K>87wqojv<>rw?RcGE8@mr`S+CJkTEz}- zzaf1X8#)R(T*RX26L)VC7o~!nV!7Q=9kUmAZkuiqL%*=WP$0d3a$=BJ?7u3zqYSUe z2VZOmx+gI*iioA{i|w<9Mw?WrhcDw6WBFe~7n!o@CsQ|Pc^VCA3xgGjSOk}sPss=o zU0DhFiZ(4Ry}7@uF{%3KS--lrCuX~E;x5D<`D3=hOsUVp;NU~3Cd6gBlGGd#O?+P4z*73CYP$dyW@xx5^* z)JPxMKF09CCuaNI<}Vpa8t7=MnK5b)M_g(n+lWREA6o91-kX`}NZKO0JNcC?ZG?H$ zWN9wdU#j5^O|q#t)zGwGcq}PB)k!lrZg?WwnUxeM35Pr2;*`}8dV5cm2|=vZ2_{Wp zsF&i1MqyN7v;s0R)8`8+w<{E~yO-O6KEAA|)gM?Ho#`b6TQUO!!|vdW^|}?VvP}Ke zl}xv8OX^p_zLvz(=8AM<1GC@T!UJob-{?qGP6tYQT^`5tW0J8wcwm6jL_G~Yy4vjr zesxu^h?YICFK!9~pbYQ!%-!m9S%4};>$sl$WS|*87OGyzt`a1sZ*0!`HBxyWE7HB{ zX2H6q>23~^y}eL!bg)r^j~7jCye|Jh4Q~w>rmp{<(y}ES;lxvO2Ir=KlSr= zeksKD`1Tw`k%5n#g?hIEg#Iz-?%t+&!otl%@95+;yj2@DPL2iQSYg};k6H#T zI&{L`Jo%4SNvf)^c&WgxKHQtl&u_9NW6ikH((w9Ygzp&-{>T28pOYr@!*Sw+gX&BSM<7Ewwe7jb7^QNN*tFwGGLRmsq;)Dcq1%6God%oDn;BJFI%^dkvCv zjQ6E2f$NcAYKj_bv{0QzF>3=Ft*wDTh1OBNH*go1PtT zio;JFJ}Rps?sF6gA-rg8Y>bM+Lq|s^B#ceqC;avlG!r1J=WtHIgrhPl(~*<(OZxrH zriuR1T#}dXUCP9$UB*l69AiQQ1HxAio86~AvlIPhzRBGmi)^|;sSurz33ghCBxK`6 zAS-)PTPrwo6NJs)J4jDAW5>(QMb*#(gRmRtr3N%M9c}ssJXt|OpeDK8&GeSsoE!oI z0_D2*JUs4W1xl2-FJFGWefg+LI9CRj(AfP9b|W6y+X=SJoBNu!fQNVnv#ck)xVgC@ z)OOL`881FMa#ru(u`Veo;ZA*;=g93CF_5^nDgPeO7AJ5AZg2KE>lKx9S3i*N`Jweu zg$1Zv{&{fD+qh)tjmm{&9}T)op?7YnCmVywg)@OZ(|M;Izs}w03=C9C!?l_WjwkGH zYDoo{f(TX`A-w!-`Uqq2HwN zxl7D6`1uLR$^EXXaugF2v$HFQLcIYc0HFq?-cSnPM2XhP(b47EjCF&ly_$M*6% zsL9jlB{NBJwZp(=!{wE^>#_J$6}|mvSamoK2T#q+?kp4K2h0R3FP96Zd*AGeb-r=0 zZ`Yk`=QEzmz5X*YaI=-OX_Nbe%MXYpe-{{vau>wh`~ruSwPL~her{+;79tY->VrY| z3rWeow~{eYgW@tWfGNE1iKHD$6_E86O&-bWVZI(>nzmcs63`bxojuENF7DocseSiLuO^aF1HhKXcSns5@rGAbtnpmi%)Q2&vQYUAD;dK ziTG$|ez$=L?P*Bd1Q*@iVmL&fY`&Sg6NUpqEq`nqaE}#fX@bSdK?p{&H=4e|afbt- z^_CVvV`F2_AE?1hZ&_s_3KhqFB8rqoIW)o@Zz-&J*5>||WG8z5&{D6Pf^u1Bfp)`N zzdXZswuWu$m(E;4SJ!$bbR>7iM>743@n!oED|3+;Iy4@`q&ZP-Ia7{fZ+{ZQs6l{_ zuhr~PXYt2;qD06aOIlV|r%X?n-S2T#l0^2^kcl^;h9u7WcXV}K7WVdA^ICM3&Nx%$ z!bXEpq7?Xqv7NHF`rX5G<7!0gk%$25)4Hj;$jZtp+&(>BfAeiLM_QPgj;`P)4GM(@ zSj<#Q1Lnrg;oNlc-{-HzLQhb7p!bZ;&2}d_dxMttHB71Z2e!ux_Yy`qDIXCIMUD>G zX=Y|-05uIw428vbQG7rWw`0N0x0Ce&;nk6B$@=7{c>~pVL=^yt@rRk{QynzX=E_QH z&cc64hE{Ky#3wRZH@+E2?BiTejB`KOxdDwLZNC7mmX?+VfRsqIa4ZJ1+G0upa(8nw zbj?DL^xUN94Sc7z%hHeU-@lLi1q^DCn2E{eSfNUvQOrE= z0`MK%G$>|~gMqOe%<)&7`JU{~&#`F>I|s)<@b4`oH?Wzmgy9madzphMs9e|-WmB>K z5Ha(V95jF2<>#ijozFRsj*P@GXiqoi0o|tex9F-ii(8=aPk5Uc6RDb zZjM_!%dR1$+X>xbjuHwj9ftzaG)nUqfZhsoG@XWOt@I}DrzVTsuCaRVH;&C_WwoD) zG(wc%FweH1cGtN0h)Ii*>3VE!ZGAuIybwal^-(}T04y^Pj|PN+k#S=n@%(D1G?m}e zt;Oe&VPtVpk+%CD`|T=<*)wKl9MiAA*T=KfE_HcA)FDiolJ{?l@)A()QnYo>t>2e!ZXPpwa$#RU8=W$lq8UFD_o*n=T4(hQ#&F z%?tR1j=PJCBIkR#LN;qAO3be%?58&G1b7e1&d`{tJ^t(|){VU}ld$lku^cU(R=TXe zppkBG&FFj}p;=zfp*IFpnhY-&mNwG~WGENbSWMv(6W2Q*%6~U&R7<({RvC!fqBN^0lL_ zhAx^8aQ^S!<+=U!nU>DRWoG7bSI)?&B;#(UHUdmVKtQ1ud>;AU$BW;Qo#Xw<-Xar$ zQO;V|1bMAuwO~`A1iSvXM4uQ;3?dBBywCiW>LP{rjjg-;_0dX~Y$8XoMvc|gz>{}= zH8n|m^s&tMX4~~PeSsq%2zmNYA|GSe*l~)QM~lgL4R@FmxuSI7KfeJVKuqgxCa-t# zdnly8OWv>fXKY|lgeYV5MuR3(I4q}Edm`s%XHVlW^7axq32}Ps@Y?>D7NE|f&hX&# zULu$A2BB7cN_axNbzG#g>C)Bh$?XYxkR|&%Abs$wn>jiK-czYS&XSwX;H88ZFQ0)& z5DXbP;yVlzxT`HLw%pnPWIOccxKBEP?b-9^&!0Ve@6O08*A-20vAIdZ#1TZ}tZ`=$ zc1*JJwOo{Wo)x~LW1Y1z{_ir`f?<}ln6Abd2joz7T+Na+9bXDAVC=iHnppz>*DlKz>?q@8Uo=p?X5`uyVT z=b2WW)X0~toCXZUjwx3x?#JMWlBm%mD!;HjR|B}eNIz&CNs!rr;gB+8*hD0Zyh*q1 zXM1})yZQJN6qNiiGaPYoakXj-wXRSyQ2eU3m?{T=<2D;?^zq`i-0C?p#Vi*3rgw9* zUT>JOcE=0S170n9+Bt_4ABr{C)gR?k;Zj)lKV&@x(&vwLIy*a!MzX|bsx2kO#4bSE zkB0H;o0%CMB^Y}gt8r*U!*$#T{Vzg7h2E5c@B5{tEf@v{q?9_bD9L8rJDm3OM`F6K z;GKgA&yl8nD!178iJP1MsQo2~Fm+CQ)4%ZP-OhIN74pFXeEIUlrI2)+H(J6_-_)uw ze>u>B%C5S~0$yqifi&OGPd<>Bib4B@{}zP(P%*#z`4txz+uPfNvYz=eo}gtWuG*Oa z_KJ>usl}$8tw4%9v;ET2t@4@daAkj^5x4KtGRmHmCh#ja}au>LqMTn=Nuwf zY=4|3`sF(Yv7+v$(m(I$OXX^W3KZ$ggUs&VRQ>rapJ@O$Q)^J3_o%2BMTD|%)K~8h zK@~8O(;Dm9vni-IqU1SME0T$@BrGf}3=CKwfHUU3cwCMWPWwgwmI$r+)RHs#$LqF= z-ZYO%(tNbdDAGS%_0S-tE2GK&=`Th1_Y7GBPrffB8}G2kR8T z#|{tM-PQw=vHQ=Uw7;WO!O0Qj^5{m%oshtkl(bGNTsN~1=icL1tpU2fp^})8_wr?X zn~*>*;u+-HfO(cSUzuH&K6>;BjC!Rr1gz4iD4n3MlypazC>FV9J@sl}P0oxdtP{&2 z(ZC-ymcs60#oPOS#B!M;F=tns5zxy2(nT;3E4Rq6v_s$~AOKxlTzvMD#>Lfji~dF0 zH-3J*!?f+zAM`y_mWi;E#|w3JJ4@Z|$5b-HT$GfwvI-ALnDD={mirBX`*;@JFTjf7 zi6S_9dU`U++|l9TOIur0P43QB^-gfMSCL_10c>o*U+>kx6En}mWMu^$HsxGgTqGnU3YCj#WRg_Ruo_oaS3wcga`I2QMF&4{ zlA!u1oytGw;wqnbaSnMB74GH{TL)>G8WQ{jZ~ zOgIWWL|NpK6ZgevCN?&Q=}J?urwyrqw^}IE7X`2tEY$Vco;xVo9-pv&{Ako4SLS}c z4_tr=+gP4L9*KHQ478*rwJt(=z%L&HS+#2&$fvk>%jFtH<7KN)tO=!kr&YGRtt2k_ z635n|NKZyPWp!P->3VL1=Tm@0wmza(grrng`gj2#h!Je-#v>6qIl0U5YiCe5N1N+t zX$D@?`|cza9~HorpBO}TU?`x#R#w^M?hZyW*YMd7l~y+82Ec;n=;)l5)2I6l2jfc( zxHeGW&S`T|Wv*Nu^}ebkhH5iC+fvd2r8DNi`~Fodv+w1{iE!=koNx`xL6-Sq)Nh+t;6?ehqEFqwyz62x@1N)!1h`vcK1a^NE^92`!UJA!g$lBXM8 z?K5wY@-Sg$T<&L`10I)4QgG}X`9ww+-bEP=Z(iIAy@@Ha#1UNgO5>7|}vF{4~;ftXP`#Eb%s!w9yrvT`%>oDR)%i=x+hqkE;^EJOsmvr? z&g^@arLFts&4q>3w0&cR#}hhah(Q$`fFwJbUWG0gA|f&__)4FyDL2h>rnlPB^Y>(JFKYlMT)Xz<+|VZPqM!u}Uez>@auPOGD0_oy$3#e!KrEV@v(; zXCQjDs5eoO(vO=XpJg&5{Pu!;fYl!TfDfw0Gu7aH&N+kDbXKYTz(m0jG7DTBoHvwn z4bCb`>aK-gS+Y^)hlWafJ#TMrz&rptm^81(t;&|w>A&lbbS*n~w1opwDVsA_%{kT9 z%lQjQpiO824@K)?sh*H*ARc}LSZIFMi9lvPv|Hs1Ylp)JsF-gEPQ)(NNMlo24&n(i`_~7@t0V99dYnmwBnFrDe|cMlK~VPy&Lb zU2A*T8saX4<(1U|6ZJIXhv`g%9)_|c&a(^*1C{BktA9!&Ha+Z3y-rkLceiStogvso z6mFa8z=vo-y|lv0FiNEpIVj7Ghf+?~l=pDMVK5lj@KIj#<%W`LWMCVf9V{?YCi6I# zT*k?L|NY|VB+`C^gEW}9%EUPuJ6=U+f&i2J%oH}30ih7@=4Qi_ zSLO};_+7WR*_Ikh^$0m*+RM4p7VEisc2?G}kik>|P~%~SUTqippw8L zqcj8vbc~ZW^DME*qgag%`eIIsQ4Kc;J7^5qZHI>vm-@`VmAdL{=DN$w*fQ*A+L?Ed z1mtg_t?2a9Gsw(1j>8~h&GGiB)%%o`TU!xP)SOmgnIb#{U9-q_sS+}Jn=4VmTi2QO|HFYm-i_-@W-;{(5NAG+0O<&|;txGzFdj)CT8?;iq^wfkCX= z2>MR&uY=AH?abj|5PCr@R#Q_0Xj%)P6F^-63;|pk$c{l`SPpi86wrqN6gD_G=q9CU z59T})WC$ZD8x<85eVxF+UDBp{jLgmP0j$r-S##XC3e+L|V3fUm6PYR7FmcH0=I~$9 zuucqD37Rt&r`J?2w6!}w%(t>rXV{z?*|)dei-t*96gljfRT_Yy`RHxw>Y9;|B#KAU z09U~H=FJjVeNvWhKET5UBz>~g1K@Rke_y{d_!%Z<@m-)b;lqH#0!5T^s`8hK3_8s>?SU_eh=>5*CjtfFsP|u^F8T$` z#z6sorL)}Kx$TtXrxt&S84(vs_ynuwE(2LHWALk$pULJz;V0Lj4k@v;F)<=ih@JW_ zoFCmI9u<@L2^yM)hQ`MF`cHrViT3vxX}ucDKypLKtZi|@B1eu$KBJsgvXhND$U@zW z^;ANxss*gfu{EbXGe&~Of!wO6GvnA}F6vJ5rt9)QfkE0y)r3;dX6SUk5A%OeEg33j zoUNM^nzD*WcJu!nDS$ijK}+9woEjHb3!D$orGa2ulb4rgF*`r64W)f-#=~5S%gQSK z*$QY*v~_L4`^OjZ#3)xj#@t@5(F%0Bp$Ta;W3bg5^c zeU7x8{x=NLy3Kg7pBfK&>f2#iU!Y1`0KrjAto!cvItb82o$SQY020{P*nInzrY-a2 z-OrGa9%g8q*+8?^#NR;)OH28qq3J#0FxDf!Cc z0fB*2m8M_8NC3mpC$J23ar`!mEnta(?rdgq^B!}uN&*P905ID!#>U28`T2Qoq%d|g znX^E#Km<$qC;C_PN%e#vF?FXqK-b5HCWT7hHvA4Fqwfk0EG+cwv&zSY^X^>!t{%~6 zs2M;+lWn5^N6`*Q1WY)0>V zol{bMo>K6HaduZAZiJ7W9%|9vkUqDNxG@lUe=QIg@G7d(Qf{*PMr2=H=qD{|Xt&vl z3jBW_eYmUz;eOYks85~%n?pF%fdOfX{MVG_AOQp}Ky71gbaWK{3e}CpG=h|y8mLsV z#79cz_;N+(YA^4?XlK|RwQs_#P{oX2HK#b=jm@_Qf)pf)_r3W45s`5xB z^;cLJg9Z&U8ZiqEV$+fQ-(c_FA|UqwkdJ%!qMvmL;08MZ@8ZblxAOAkCgBX~)XmK` zS7xI*&Iu_xijF!*EK|?*fl8dVn=UUu`*refeM3LMXK|SSVX~~=5VC?MHz<2WqXOYv zvJPHiRDR!(9!pLRKy#PARcK6PO-ZLR&>Vu0xe$CrwVvg<)^iU#yJNrv3ul2?&wGl+ z%VKX&#I5)fE4qkbs$R)yFG4<(Zs^#`g!5wy{ae;j3n0#cpNt;$1Z+I^OhrFs6o^QO zrf7tm3ljvdo!d_&W8Ij2fQLkWetE{{Y5oDrogTi@q@EC;@QRZJv9_5G?+=MXi~wb( zUVL_YzK{w8y7R1$;6Fk_q!OqjX_U&|v%xIx?{Y;$B7C(9XO-yf-tag5NNiFocKhD6 zZVJokIb*t9-#(j}AY-VWoAq?ra>-pU@p4vsgAj$v@oO=us%dC&nvOsqdiwhC6fTSH@S+bWeBjeQ?thq$y{s7z2eD~!|uJx%FuCj*0}LwkS?{Al*O%pL(0*=4#!MRYvjW1u{v zQ}3_^_`wP5*DDgTvZ*}IRv-&D9!S6^CME`Tmb#`OF%OR$Acry7*zf6B|ru~P)Trb%Ir4=LDB+5yC3N2%s^HOP}rD>%X7l3_04w;&V8`_ za;xOvS8sTnB^H_uNxb3JZ14ov|O^0b5HR9dTV zF+)F_S;g%4jP4yGb{yVgw@!iUs%nxYmQD^hY@3WGOgM2gmL z9aYWr7Ew*eR_kTRqu zh z*M;Td$8w{LN8Xo(O|f>(D(4)m>}53y69#G1t!-0RQ*W9BiRz*`^A$%<-f;2_-mf`$ zNEm7B%9o`)$n4Jn*ROL9n45qAz$Ic`bAS32AhuQRl-#zR>EeH>2bnPKc8RFdn=MjmOBy%$l`-fW{BLGXX;S{+%-hHnvWaTUEd* z(6D!oJW{fCj>V^^W-9q5jfUOZ)@dE*og~Y`*<`837cDW(XadG7H4cbtN_@*bzz!>D10TrI=c{YVMmmNds zVr_j8#(!nf8B8QBEF1)Ia=C#xptO)5KL&dIxw$z|x?x~o0FKDs&JGtB7jSi#n}F~xxtgew{6eORMq-TUHNXS7S=!e1=)X? zdE<9ghd>dhQeb;mUjqAa`^)7Wdf?LrK?U5WzO>UiT_s;JCnty9lc33EJ3qImxESo$ zwD((nc-*}J| zRA$@Sk>whIrhWOY&1xcpbx7xzvlBB}P-DuGW?E`~U^;~cn0&$HTz%hz;mj_r>a*=J z^WLcUfFK*WI9dTCU;(@#$nH>3QMa9#knR6J9W%H>DouYJOv9m zwtYk2T*Fj1q;@L`WYOlciU}4b9O$b@VZC`(pzu@M3xT@1BM2U((E^B6ax|# zyz-+Kc#xDd$|2fbYEJu_##3w`_|EIny)+5OnH_~DXplAgnvVsTL4#&80~}_cJ;3n= zbptoG?<&fTbj$hyRG%*w{RaFqfj)Jv|2>OS8uuVbL_j4$A`HNILCm}(;oX~i^*!9 zLNM?3AoT&5WawPLb-6vTF&L922TV$>{CIJs>q#(KHaW|OaNRaif2?SrJ=v=vlTGnB z&Mi$1zEC?mh&39Z>mU0?8YE#hI$2h|fG8mOs0()88#vh7)@_hhsqbxUaN4bQgF`@@ zP6S9-IIrG?h62e69^Rp1wQpQp_vY|NvnY!>I%_Cnu8g>X!rEhR>W{W}^;8c)-$p|} z3ouST5x~VoMns$@A`4=cS4L?ytga8tOkWRY#uAo_rJNu5Fck;K7L|O>H)wxK^x;c+ zd3(+;_IuWo=fc!HrT-Rtz_8MIXDgqYDRMC5TBmCWkg z6^3MA=8n2^{F!(~Kh&gTb%25b`!oLe`kX zKJigs z1e@pD`vPs+z$vN~x^PH~Ni23SkH3+d-80_=XsM3$f+S#8KHQ?Qk98CjA`%nLEiJWl zdAYbQ0hRp}8(UUF!fZIB0|Y%#pR_bHD+o(MI+Yrm6oTxI;zFM_XnNhKDk}W=k`k|R zaTLnxtj={Sb0VC*JvTl#3xhz*j4>=&LE-tB>R#La!o{K7Ye?_u=g-HRnMTk%Zqo(z zshaBLaVpU)`;C#X_wlYM!f&xyjZMAs*!HYW?(^sG-M82Ggc+^*D0K~WjjTJKBVfey zvu`MOfabSuzFY^qT2gXysb-ym*J!inH3&su?G&!ypy&*sFrZMi0TBlT9`G1KpA=QL z6TI-y?Hk(bDkc)ee8uEOdM0_(!Vli&Ut%Y-`g_spMm^B&M}bluh&#EUe;ZypJ5M#7 zPjV(k2wcI+U(141c1I@;PJfhxGBYci;oiODR3B1zx#=m%7-vwcJ+p2e?elxjcVzia zEK8zddM)L50G&ByhC)6NME^^VuXy7v0mi>MjEr|-4ZU>&r`V>ZrlL7Jy1D>jC@E@+ zeTI#F0L})2gK3d8(gpZ0edb+V0s^k1C0EX@@o<2-d3abQ$|+D5DvPTp1Uw0NqBwb5 za?(^XBXT3)?o3XgGzEQhlR|Vxre+{7C~vzUjPuo2Xjs9%2ZGC5!)DJ|CAA<$MQAf5 z1(;xIdVCeOQM?`$&A*r^D6@is%2Vs-U8k(1s6;>Gxfsqj+)B@hQ#afP>s%L@R(Lj& zHp-mFHP6i2_AL)6?22{P2_OH94%omnHPG+k7Is$3vLvW`9c# zjb~y`Vf)oo&ET>WVM#As0m6b~z%@U6_AHWCb{^^toz8qI|Kk%6a*H2Y94i_U zB~_XEggMH_&#uW-02Tvhie+^6Qn(5xu!>TK6?lj3* zZcghG0lWYz(X>*X@H_~3QGd!N2I@Xufd19a`5m`mfVhv3k8erLA7A>)4i?r9hW z{iicbxVX4#bVN;w9n12qPmS^m?BM9KAsTvA_sfR>sG%%b>GC5j|#ZsMX z@T}l2R_8R8x6JF!y-bLRmir+um8fJx{!APZTfTmNvbmCCGWREkr&L<4nyX zeSf(rJSAkWnRr}gQwlM?idGW06k;pXRhiE zNz;wzfJDdHme0^bh zT7B$4h~n7kmS3AcCXa)QipOk92j0%S=rlMP(^vtGC5hR#nWM}*gKyZ z%O@raVHaL#F1d*(p-JVe@4KG9kTj+v9CKC2XIn)T zb|+ktVUNMi4jYRcGd5n?RZHRxgx-CO(wkc)SIC?gM|siY@hrT%eReXd{Srz%z8}AQ z(gu^OG5DVmU83?cFzlXgjRM02wVcvUeLcO5%*;pNu#MUh`XQ+40-}Z=VbJU-G#PYx zuJ(Y!n7dANRj&W+l=IKjjLn{k&z`F3W1j=H9$1r)zk-ARHE_C}qrQH;s#D%OBwB$g zCjGbE6>;>AvWFe~zySuoaL<+n>9!vbuSlHB1!aU#g7 zy7^C|8)i_EKn3X)r&ZFfJ=E)#E+HzQphUgu1#X1#0aD8+D(OwOEPUK&>eUc!a+#EN z6`iHxns26o{GNwtQsQRk46=|_z=y1;2 zn*X!*=QlpFmzwb3;KbpkPOnwU&0fty3YR?%C+9h+1_2e*JAU|*KgIOS4D@ORux59l zTnP@V%{I9c`lvDh;<*Y!87@GDGsc3VJdPx#%^OP+``BAlptpZO#YNE{m4m}*dzQ2L zJMmOv7j@S9Dt?r*4Hr{(npnYC33r_pSiz6Keji*|ka#_>tXaLcaMeN0D@r2#HzFoj((I+)^0Yr(QFvO8TAS|Kd$Hl{QxVf;MG7f|wDzHHNlK++- zN6GZZI0upx>l#HmG%~8%#oEAB)X~!HzTFqEyzF|Ix&#Q2~H2%Zo~3dR}Da# z2_l7#j!wAPQAiFbHK9Fu;;=g@zr=tu#zw&8h~!Wf;Z2_FWy!iwL!4vO#NZkrMty9h zm3tx99p4bEm+HwnGAF3=XTuEx-h0GJ`@Yc8ytei=6BhLL=YkjVzwYUE^oCBZP??_d z`~En#j*j_^Rj9>siqR?|K{RiCP;q|Af6m9?&kE$#zh5X#UteDb5GR4nbcDnVQECK| z2o5t?_rO50-c&%II!?nU=m%=`lhxTXrn~LDdB3{cJ8h~d;u=t-!K&CSagK8ECxSuEy zp-PirPyZ1+noZ(TZK}V^k^ZbN(mQ}Bo(&bAwcgT{T)6~e38)M$eTo}y6T>JJM4(y; znC&m3>2%pnmaD7ZV0jVl!-2vX>Ev({F4d+-Z!0^yI7buw8v881i7&}S2C2bAvN`!Zv7Tx@#J3b|C`JM1Wm*@`a%dZ zp}Ff6g0!kZ$(x%et1)M)->~7Or~j#F+Z=wyiB(c!{i$T6d8vpBhY%0{&f{3QE>uFS z=c>{h9aFfVI!ES5G6-B&%hpK&`G%jw_+`9!9<_j2?Xu}7QBQ~mfd&x zG(N4b<8SXskQPf-Cdm_7vzK`n1~whTX-nST*>B{pA1iv3@hl%emsb4H2^Y5l1D>j? zJxfN|gxJ@~mY4E|%UjQ6Wp8fmO;3cl$0$bfn+cHRvp3d%9@=2M@{x@F{TqqC5Wo(M zQ)-!U8YcpeycBS60e;CBS%^~mAKgEHT2^~%-Ks7QODf9l%w_q9Jl-oZ;cD+qKgI5v zYcLtx-p@P*_+i6eGp_!!=wBJQI{+4p$%~_eGd?aR+jRvunZU}GX(A4-PrZ4^&U2^s z(`Q)BrM^B-heA+`=|X zN&UY80ov!c=e1sBRH7MaNX^YIXHuJjibPvGVmZ(`g-tv}$R|!$LwzqX zcu)v(Dx&wn>k_3{W2GC1K__&o;s;h1YHkLFsU@RZnHXESx!>P?c%?m;E;Y47XyB)b ztjK>{4-XF>p;RfwWZPB8k8J_DRC{3kjVi4-CVH|<`;F^U_ zLY*`X&Z)_JC!L4@;hdlK^dj!6@x*w{3yvI4&Mrl*{|9kz0#)PxzKw1oV^U-&6xbkpO2?@0u}+l{GQ5YEANoqv=DCQSoYCLT%CCQ;c=Mx20{e~~ z7;v(@thI&|ucUU3ZAHUT-^@whEdNVw9KMiIdR zuJF{?_!~EZ*p2dW%m~D0ILk^;c9+OrP!L~SUTjKj(m7}0vuBN_ruA2LU&FeM zE}sJqJIpeZKIUk@`~C0x)5+qCYOMtWfp^qrt~bW3d~?rS`Eev+Lv>rd{6#xprz%`s)hvRng#QM;$T5i$6ry1_qpxxiXGtFFVyO_4w>7<~q0?cVp=5MMDVo zB(&|5kzvr&eZ-|Ia7{aQn~Rw9KJ}&4wyY@8;5}oK4tjbwTsm%OEUfe{G-vSh*S#LM z`QB*c<+PA|^0McG`gue7*Utra1`FEiAWQ3}#`ke!Z$q6jYS&y#`jg37qJLP1f^*J%=&y+R$rva^Ct*E1| z_J8vF-c%oXA()#s*c9gL_Ty@D=CAZ+9>%8ETbPLL6L*^CAJU9`@Dcu2g})4eaKomp zTU+o?&)6@P0M7PL+)k&w`R&`=b+Ui|$i0jmpM1Dt@1&)znrd|EuU*EEbv|#SKdEr& zyW2r?Nrom~%FLn!wuzP%)`c!NytwCZztEvzn?Hyh{lTvdnKZlHai zmbA$a8t2e@^}e0uUYBJ<^Vu~zw(mI9RklOG%ty#{-R0ASnX;rpsdVZ7Zs%dAI9s5cNH|LqzwDLXs6-Fd}vy(>N~ zS$YHMH z-?_qqgL-B3%$^?=532J^=XJR%?C)yW(K|@^xEB$(s90H9X$y;rx_{Ugd&-dU2!ew4 zT$iAeuh+lr@%+#M{})2M0T*IilwSMG*6LS&{bYT@UTERD$a$5S8td2M!b$9^U8-ER zFF*mOqB1u%4Mj5q1B0@@o~|w%iHV7+{wRW)c!phM zwsjKrU)r?#4o4q};~E=mlhux`dn|_K@_1uO)ehoYbaXU$g(bzs*&jc?P}#-G>inl-FKM_r zLva|}ETDP3du&p=$o+)vHWiiAvGMLKG5X1(_mXnYSsVYUZa(923jc8i`)`LcPQo6Yc@etK$}FXP>*DYGh%UhV0lUZA+b`<5H*}`AHu2A;MhLc7O-=+O&%ee!ePl;a)rODlFVwz&gBxLp|BI4r;iSWVG*GSWE%HAO=dM&+_ZEp7ts@7+%IX)3#Pdp!t zB(B^O${cKbJK|zfsaARM;e7uglO`AaA0LbuHg9peOvK>IK3=vRdJx45s(I^M{fc<3<#AW3cojiMg*aVq*WUx=gl3LQC!Hk! zRAJF%&v%wsMxPvRo|{(PC+<=9++v*E(mFo=jasGk?ioGjR^mdV-Ki1jE(_{yc5w6$ zEEx%Hi*|Ol8g!qZOwp%NN!C&|o^7_3st=gk$tz;PBX75yTReO_Tlx&y-`G^qyK^ zHw}+iZA_TDbZPs}NAgj}oN}#odme^*pOLD(FL)pMV2ray8*_(w*wX3oPJMmZE2^qX zb>k1R1_ah_cbXh8r#<=kZj6H7s} zj=?&JiA#e+1g{r zk6%aEL~`M0N zo@CH|W`o-{g)@DFQR>1tC_3Nlp0My(F=7vMQZdarW9MGr4DgbPj5_CMGxD!p@g)dQ zBF@X=P_$g!uzr0Rv!KJ=9ZgO2ygCXa(1CI#NJVNiax%)^vX5rligHG8P3k)=308)m zs|x#guSUk~&{%D(Mc>mdegVUWHBoX~tpXBeXWU=zc2S+CwtMEavR$clYVfh^28+34 z$X@B_l8-l>7{a&@9C^or!?`otI($vgYGxnCsEZv9#lmCB-Z_8d3$e*Zh4w{gDJmLhH3 zB7O4!+dr?zuqz5WR4d6y7}q>i&)HUp0*D@^5vDmMm3kSFeX#dAnR;J;x<8f5;u$xo zE{Cg#D;MND%xbAEWf*VVRuafl%rvKO&9ju((%g`xa-3iJ;`Crq5midc;lnqOEDJ5t=ur zA92BseGAHW&jVC&p7{8ztIn}C=g~tbrI7gO#XE+@F%M9-kxP2AW1Gw!alo8tV?1hnT*3%N?ee@=D zw`AYN`QD6j((kio2(?L8*4BjF>Ln|3q~+$EXHSEt6*`qRsJ=#U&v_-!o=SbUx}uw+ zp6Jf`lNnSDM(XX&m0x6i54}^G*s^&I+gshEZA{l4awGEQ$12}j7UvY{jEY}Sy%&~n zt-ZY1=%m=l+KX^>uE?@L%by%nveB&9HF?8D?>W?MMx!PyoxFdv{-X@{!=Wv^si@$G8?2gzv2C5e9#6E8mM#KS_ghs})$iZx=VFb9 z)NBDsfh)OB!cIU;Y>`F}j}iAdKaiM=!Lc<7X3FN6DLZEfwg?V?O{NOJyZ6Yfk?q-Xg;W(JSX`Drov zUl^bHeb{ug;@Py4sfWz%U#Zmfg!vX6C}4jd71cdFY^tZHXK0uWw%EgmYk$olC?Et- zVX;8{1FCP1%es}~W!z!>I3glqh-*`X>vp<$SJ_8$udCnfRZ!Gs8f}t4)%)iM+AYWL zUuZDUyV)#&I>8mzdv=y$770VCX%@Yk#l@$r*+g&oN94=&5f$fh1p_LS-$5kIhi*Z; z)8g=F*T#lrYmoQ6i&J0rn74fF92{KyZC>E^=+W3VAv*t=zW&b1s(=Kr`VOAsv~SL_ zO)=a-0>tCv3($69>Oq!pON&y)`Dl46UI@sXS|^+`S|T&l#6X>tF$4 zWYlgRGg_fnb#+F-9GbG2nVH+|29rQ8%O7ZiCNuJP3z&|NA3x&EAlA4A)0+C$5h0v$ z!@KyodQT788%b)NQ-&2`zJWacp}wvg#`|M$x9mB3G)Q1mS=s5o2kkVA+({qq^ShKh z*>zcnV=W?KvO@3xjf0j}8+>EQ6J}%dBqXXPj~1*fqmeB2$e}~Auv%aqeEs$<8F&Ez zR-ia+^wX#tpEz*>QnrPKq(Y8g%&$2KtZjT{Q>dGV?0#ftF&DAI^gB|lxz0X$l*H53 zopO`vw6wD1;CSLD+GqUrd!BD3wb|1yENt9Tw6fBMOj1cXZmy{*PCjbez7u1}`MO*Y z7ZS~*XhHe*%v^R?E!wWIBbd=+2CJ8#nWrEEAPC})y5aR1(FK=IpZ)^XWv}vr0y#@d zJ`%$=HgdvYdwYkmHIj2AWWEl2h0Siv-I)*OQoS?H`N_s@^0iQV{^DG5s-tRiU0UF8 z;jp+!qxy$_v`#C`2Mz>DYW4|WTlBsGY_MLTsJq4IxmS)czM=z-mLY83Abv1>eY=MH!c7C z*&*!hmt~n^G-`BEb)Vgl<@3$XVrBM1nZ`r6BizFuCU8kf{aY5xG}WP4Q}2N-S$OoN zBUxPSSp|INZMjfjhWtQ+!*uWsbXD8f*l_Nf!`5U!bm(_;bAkjtoj>EcbvK69LQXt* zEo?E#p_1&LFDWk3IqXY!u%m}9k*_*{BwJb9%FLqvukYe)Sef7$(ovxsdCb%z2wP5bSmlk?Y}NB7j1AP>|={xw9kO`OehgC(K2~ z-gA>z{>>kn87Y=pDNPt?6k5C``}>>WuVPvTPnqEX_3xooUr){s&We4N_(2*wA-H&> z$MxZ9>wIb5xJ0V`JY1t!dtCXvmGAE9t510HK3ZY)y;@RV|IN8O@3fr_etmjWl|NxL zvfqk;p=08j3b8;X-@b{RIPoHzwgtRh1w}>l)ea30E9a=N8yFhS|FK=haazO7e9!Jn z?G9~c-@jEB|4v*EIr;Kv@x@O<8Rsnd^naY5S;-pz^l7eVN8JdUE#EA4<_L|+8V&og ze{VkhHaW!f+-)@D$`6P5w%YvED=%-y^B=`um4?1NA zAf{*wQXsX+I)L~g< z7q?4Tn9a{MW4D#m?;cv|yk_MT@#pfDn7+FB5BIl3{kCP}z3cV_hbx-9zWssfv#>B0 zm*{9VIc>yi2@!31|{Z6s@61>*)pSc{k7&@#4w-v8AX;S0C)5vE&Xa}x~9tGoN6Q};+;UsL3nEufd8H?I-l@6)F;GJ;BfD`7=01{T(5^h94{ zV;qpzUW|z2{ORwJb!ptrKXlee=0$y{UgN^T`uoZCrp>z6vm@KSKN_Mf*~!{wD>>2> z@OWF`gxG$dc!p0(SR*#vFf&_55#OluG{eilz|1?HUl5Tq?A)0f!3_ZP`0;TtsL}rU z5As6=^M9C(jhz-R%S7{(lw8Zb*0)`WdC%bBp26LVnx#KC@?RX<+8(){npQ}-PB2S= zQ&PMpr(b6DZEzKe6H}~sBt#c;^_x@4%S~D4+9X7pca^QEqE8@l0lQ4?+O@cS^2T^v z?Qj5H-otwt8AU}ze*ONFQ!N0w|dIknW&o~wY4Zfs_$w}wA&c~32hm8j2yXj>p zp1DBLw+;RM$Bu0$QVsu~G!B#wk${!pU$wPq70P{yUXYDLyS;it9@I+wSf!jG27=z; z7qUBt1`Y;X0E=WBt7I-jO%SlOBN-U$_zY--Pe7o~?hcu}mLzlL=75q9E3K4NMAX%c zq7i&4=LUwx{!nysryJHHN2+^$_9U88AdEr{odjxx#oTC2K%dHHWP}KN(DRNZ%|(O* zlH?lv_#eEjnjA<`#l>GcD)z!(v^mIE&joV zI&?Xl2RJyW3kx4meP7&@E6c`s{MJ_l#BlFXuWsY8?sIk)Fs8P(jg6)C^6C-Z9^ccz zntg!IyZn7^bfuXMGowm#0``U*a=zS|b_rey`=n0lcIWQ|pO20ca zGiAa}u8Wb$^a{0aNhY=7o)?BlYKD6U27CVOX36_^wQv8&rNf~#4ttm9s3!m1{JzU- z=;NTtP(b^^3VF52vhO(^B9UwC7T&mxsr9i!#63)b4P- zRDuebB%koWxjvY2?@|tx5wvgw0%Z52Uki90bg&Ab!X*LVpqCa6VO>|Gme##NV*}$w zEb)az_1*FDLj6^bDkHd6#rW~RoBS=#6XZDo?RAP@zItxl`gML1uwLPH!jUU=AFeNy zayY!Ap*?NiZjb%}qT*UsHa zKf-Ge`P$Je2Gu%kZBQ$vkQ5-sg47OB3NVeUe*6evX4$^I8X^cco!4Qn?|1wg+QYxX zc1=0lQ6bK9uh_0c)$S6SSt)0!_79IKU#WkSY9$@oN{apIL*5d~&{ zBk1>bQ@458*vOHozN}IMo>7H^*@}W3{x{0#x9)4|_6yH+_$g0Wp0p1SoQaN&#p%H; z91f%gXV0B0$VO&af|zF!O?~KRN#Eh;;Ze@E7NF~ydr59D|9icZ7xmJHETNx`_wPDx ziB${aSYRx_q+B63TKN6@Tu<3egPB|0=3jOoX2w$xMnhbeY655S4|O#)C^8TuA!yN! z<+NmHWo3naXW;B;B0;JL#K6~A4(*2O>9;$FDiCUUa7NX$+Ih(&3yu+_oRn?{!)ezR zO^*A*_IDj~7KAQ3wAR0Vp6~%OwRZP%?akVsRoDEG8cB>jxNL6pBe=pvPvrjQd>*@- zxAuzL)`!sAD-k3HqhmNp@ViE$=h#smBVOIz2d%aP++uyKRzeyZ+6xA#&`1D}<6XX5g|9xp>974ayGnU}PqNUj$MPTbBOLJZH~m?qy**K#mMP8DFq~ zH0OOkin20BLe`u%BQG2UNDJS8XnlH=Y1)2urN65XrOh`sBmbmv}YW-`h`K ztDf1>?Ptw?i}cB1)2nr=Sw~5O2A+q~JASaEtWgTl1DatlIyhR2zD} zJM66Vs1to7Fa$Mjr(l2P7K0tf;RX zgB|TUSe^qRl>4+ukm%j7`JC}SroYw42bPT{^N%~%H!G*$Fl>&b@-nWC;&+rg$(sUR zByy1wd)WMyIEgYTJ+Y)1WTQvO`ShG6Om%E$5wu~e52VMYM4i$37MzhV?;Rq(Z@Z{W zc2H1ZgUW2bVxrGqN=fu4jtCKR%zW-!F_#gzU42p!OM_=~Kvh-T(P$;NB(EO{E8hjd zFKboq??K2fP zXCv^lF)(s2RWjJ6{!7~SI!VBFiA z5cjd09Z8BR;@wD8VqWJ>JM%{#(`<+#t4Wo4KFq=M^Uis|E04(b(8L*9n_<^(z*q&CE<3^kw|qxcl?xvGLD= zJbOeGYS+ZOi`wnv)jAwDG4U`%aPWGSU4wCaTlMy(g;QVtmZqOhcm{P@02XCf-x zzIS(6tes_K()#vgtp_U4YMg(jY0u0Q?3!)550T8RV%9&JOwW z*5-mxw5BQ3^z{rc&whr{Ld3bfaA_noBSPH{bP8=b#C2JwzX^B;qEomEs5a41Ud&KN zAu8_Vgj+IXU{PFLmtMQCEYVhl>-1jmC_eoya$tW?MXR%!)OEhN?3x!x?oG&?a(l8( zazWPVgSGJo%5}I8@Nicry?$79o=Qd`pzRaNg^egQ$&lr5-;VS1yJDz^)Ap>@i4|Re zkci=Y`n)^>n3KQ1Kd1@|L>`J$9tMqz%TiUHBp1s!oXnt9n zN>kDyP8pO7cOF`>Q5LOL%p=lX6dih9)pq&9q=|`vS+uG{ZicsVmi{)&;)0dCE2k{q zewkdoyt9KDg;iDND6Rq$d*14md_XCe%6Yh1kc1+Jo%HOs!f)SV5isIfk%qP?+CYoL!vt#h{T|v z>XqQATU6nJfz)q+`s%6J4AdRLpd>>>;}slOWylhBjuwn}KEB{f>J08`)A<%T_uM{`JlPMGeWawzCSj+O+y5LFs>INAFvVG zm`6%x4v)e-;x_Ni`=fjIa$9oRg6vmvL|PrU@)VsNHeu(%sosvx&J;vaV`F1{TzZ1V zFW(^&i&Ryav){RMSBm}a^6wA7fV7doY^-Ng9p!d#B^G8?fX9Kh z8mN+qaG|g7HVKM;s>jhlA)lddYIOCVA1&bow_FU|JJ4U3!go~UegA^dB~(+!_zLb! zdcNii|JwF5?npQ@;C@qlGQ#P_-20Qec(ufFXQ;IS;s^=~+Q|!AoEoA*sIskjgJc8i zs4RVrU|SsEp<#8}Xl{;R3n5~UYrux&0GsNN&!)M-Vz<-s?f8E-z5-9_(F^YSZBh)O zQ*1nkgs*(gI)LZNNVIq$*?Q(m!zeH|H6;!8P!$O3~}6< z5%}^$Z8TC+-0JT3vmYGAwr(W}BT5{*`u z3Jv64gqvnZyN7-$Tu~UuCLt$~!E?*Y%M&ERf;bMNR>myz?uI1imAGWTXH3a6{jNE- z{()~h#=IKMn_T|&qzb_ zyX|H6!5uq}8`6h_Y-iQZpTxEKZ~q#cUl5-E=M&8!<#cM2Hx z+(>aZCT?H(R)X*tnp{GT>y)5Z4w_5fxieEzjIx|H+R+N%lCDfrRH`LMsBszdy2)e^xzKo5H-B8%vY=`(7 zyxCx=K|>u03;O9a@kE-@?owE2V|!3KZF1Wg$vfsZj7{~gI%sVU@x1@SKkDA+<^ffr zN@<8H<@a6h0Ym>vq(j04gr)7H#X%vVOr%b!r;z<%XG1LZy?WLN^nnxZXcS=gR@QS8$Kr6`~VYz#tIBp+X2Z(30?F{0=N z`;*LgkAyXZ&yoP?hUp#-O*v94hsB0sZ(7F*8FFEcoM!fYRk1V_L1n8dx$fW2AWU3B zI@SZ7V}uz)%Gc#e;BN1=@|r;dR&|B?J81;$UpquK-2_kt z5~H9|^xHyWTi;}L-m0#?a~EgO&)OpUQROVV0MK%3m-B2%WxSR!&x(AO#LlTdi7O`Z zENB=&VG`Qu!sc!JSXkPil?Cl7XrMuP5O7#wVIdf=gv3kga96f3Q4$NdY4#*V$M|aA z1?u#>EY+3QYgLc*KmTE1ekjIp8?-fRqeln0QeVeh2{(w*@}zK;l(@0@p=?bcZeK`p(WPfO+}`mUEt!qb|K%DK@&3 zH!VGV>!wW>R#xw>Ucyr&TQ4q!FCI!hv?=r>$fWUZ;KT|{^R<&QbM zUtdoz10X}FZQD)6mtZr=HGF?a^DUCccRDXvY$Hk;9Yu7%iJcU~~Gx|ss{_GhcDoOxa zh}6-$h)N)KohDRuAb9rV$-?LuakKG9#hC8j2QMI&{}BP%DoHXJeSmU=N|GAG_U%u@ z!n7O!nMYk&D|1Q7M+@nx9#2XUzWm(uxjqb63>GpK#W+p3RxuaA zur$n~=}?tH6DOxOZpuS8bZV-je^b2D-_cr1#=*%7;{}sK;AO?dWy?(%NeLY?{3_Ug zh~ZK2+R7*(D`buRJlpQ9&2ybdNa_l|Z*l3VJ3=I2bOC-|e%1ePBF)}UQ&>u+BU62A zPX4Vs?{MrY#g6vA>0+LPxm>f|QwDHJan5WP?j#qwogi#?;Hjwy9>8k(t7RB(*s^83 zHD?=d*xrGQ|DzH?RMv0TCj|KURWX%2I5+?nr0M_@CP5RIjQp03-fDRB4Nsj);e#y{ z8XD>stPxi(UZkqVQRU+jvmV&FckldEzaqh?#KU%yu)DG1wD`O6hR%c$_t1sYMhzF; zdWSVlQK+a@Ie{%#jI(KKVgipcIWbZG?nV7IHMInapCC_9&Z19ckHkX#$*#$Fy^F~& zR9NowirlC(p5?I#^_AE;zrVV@sJ!l^s|c3Viy$D5jWwkiT!A9*tANyC|U7JYANDL)@y?<(XEU4o}fHHxrG=-YHgj7mF0{sFcXs@oP=-*dunt$LpkQy zfdij^y=phYNs#Z|vBTA@^+G7K_@08Lx5Y#eF6Mt0Fg?8x(_4W8rA+kw=|P^8>`o6AZZZ&S$NTlvY?>9@FE8D#xg2R6Cx{F1mAP-_I{Q7-4Cv?|`bp9gCb9l1b`&TA*ikH`lxsvx z4>B5RZ@YM9FJIPz^~3jid3ixJ3@*G7rI~~S=DlMWU`+KPEsaw4R8qVY;*n;MV#Z83 zczTS6tcGEd~0>lb^BLqOvyK{Qr4G-iJFN?Bj0Ds zZi+z_d;zv;5jrYAW@nd;6us2PwuDjkqYT3>1>avOC(tGYEWX^wgb`$}$0?jfZi2`{ zMw>P2oX)%@2&~(f|4+xZPZy6~5nH*vLO4H78PNX-dPpxSD;)b<~}A; z3seS*&mj_6AUNFe?EDtwwNzNG-tWBM*rhIif8rciMvzR?&laD%H2yIDKU{!5q#J9R zAd_(2(A=DMf8tNxUkS0Xhpd1QlM`ArG;0!fTUu`4#X-PM37$w~|8J?`(SrC=MB}I_ zk=W?V`f8$#3HIGMN^@&I=H%E8H!IuI!2rqLNmRL6%P}?JR%A;Wnp>>wjN6d&K@dZFLd;1WpQy#dg|QV9O^41Vxq9o?w@c( z%^IJq3{f8eV~LlWTO2ie)SC!U=)<*!!dXzVP`0i&BYZc#Qg_L<{CzGd`A1!hX~PPF zvtI>qG~U>KK1TgYo745HabuSlk`DnNJzGre)b!yd$RF@&P)8m?2@gte7aLF{xK;Y} z01+@k%UdBLNbTnB+l0y&LKj$f4h{}Gdwb`JvO`dF2bt*fIb^kw<$4zi{%F2#-py)} zA{dZJ=PiTM$NBhQo2#x8nNWfu%)6tbBXUCvJkGed!HFzY3&)rhOG^moV zQrS;Z&ukvK8<)7cvb$5BQ|RB&GFpekArpeV2n6s&uaFW42SZHP5}Hg%~$B)0km~Yv_AXP=!NW7xgbGAL$&;f-H5KOjj-@bF_ zi`TEYn7H$sbMy0;(Fi1J-X`uw+ob9QmfQqh)6>2_KIp9CB>;NU-$JUYsu6c7J9vLU zS1FyYK4;i)4V8+@K(ejPwz96PMKy94gB*EF`v9RG45OH9@Nd2MiYh zQrFqD?x^?X58!( z-HHQ4A1#2uEKQ4wzNDi48qL5#fwK>r>*8q*C>eG@RUVosYu_MWLlXwl{3Z0!)ib|K zt*)=fQ0zW%=Qgl9ptk6W5)8rI=(|e!bo8Cv=L?fmJ{FVcouX5(&AQ7yU0^a@w=k9b z*%PsyLd*kU0f8ZeZAzRFEuROwtv)QCexM=gH;6ju=S{~kudcot(GBb znMe)kgRAJal7<^(#Aypbe9`97R+aW)6db_cK^(^m-^n1l4bP;04=O~Jq3dGLQhM$_ z!z@*SWQPb4gr+4h&+3@sCh!&5RCx6y!KjjdvPekW5#EPGh2E96($YRG%JT9u#+(hd zWKZHdSS9S(0xr}uJt4v;ymXaX&W?at`O@)2R+g0QNQj>wEeXd16Yc$FqsN{;re#gA zNxDGrf!Nv4%=|*zS`&eJ;H9LAR1wK&T2BW!(>r#o_~nWDB_?PrN;#RnI<$svhrH6I zGxQY)EE*IpTAXAI&7e2<;i?uJ9-$xloOd`{pW{HkVl$n@H|`w=9#e0apbL@Tw>LzA zs^zc=&Yxd>8tsPd7W^i=7blmEEs_Vd7QX;N<~1J9c(0noIz2!BjrE+*o(Pk>G>|=x zlh>kQPJU}3{Lb89;4F#6aYNT-6mDdC=0Pe%Cs**xOh8H{@)%#QA(8xC?-wl@&>EWS z7~cHPZzEMsZj-zkh*lpY;a=X}zOS{C!~BI$K;p(Pe!N>SEKG>iQFj1yvfrL6NlXAZ zzHeEo-o6L3ve3BG&#H;7FB6UN0`M_K#(c#c_a2c}zz(el790fm&)pwcQ7$o90E92?m4!yeK0qVRCS$U?0d%mTTeY*;w4kr8ra zo@KY%S)11!N8gT6ldk^eWayAWS3NRiF9<&6;r8rdxbz&*fPkceDDEOMlUnGD;?19n zq!vqJP)o8wW(5r62sih;H$#Y_Kw0(k^+g$Ugb7n4&>4&L{X;`AG_v#)Zc5WxG_)>p z|G8AgKgPo-BInqarDR$caq*A>U%ABA={m(Lj=l|1f0Q^2!$qwx$N+i&QdC6KuPFmH zeW9HMYy05KK@>l6Im786{kTUunI^GDj|nVO#!{WZTWO`V<=7kfnH<80k@V*jsR z7I!;{0_L9`l)o#WmrTo>2$P7)%s_QS5penPkztIIhb08f@I~4bX8@!d0PJGfJv`t+ zK$CEN_z+u*hnu^6*WmO5t`|-SrT}buz{luGBOZ{IBYxkjmeKRH<)uq`}ya#M8Clt)vxn_@Htt6%e)wIP>qR4{5hOsBA zjHSL6AmlMu8tKBs(u%l;OM^_pJfhprlU8R=T~Yhd_xGN?2U{ z;LS;4(#VfsL2{~5eF8$xMw6ET(k>fFZz>m-Y{$orS??g-u=}&OV^Eo(p3BOxyfEZ? zWzBjQ1=n44XbHr|Q&|>E&CG;z2Z5)RGIfo55vqCH59Cp~h$Jex2yeg`6EXxXrs7_H zHWmmbc!#B^L>Wi(sc!O~4;Sj7*u9JwN)T>0K0JOKJg zwC}GUjJ+J-d>DHoY{4kr*6PJ<+YZ&n30gNJ<@j%gwGYy6=w(~9_@MU};o!%Q zA3>AR8*?!**z;QTAp;)9;I?)=-Ymj|THr z&g5K$<@by_VSfJhkNC%M9T%=TKgEY8Wa=n@eT`v1XicmvEd>DXgN63?w(sKxXBFLP zSAvd3K!$=u`g4>|VP=HKxsku>1xrf$3&%yaBc7DgKt0I3Pxq_GgrDoc%tnKb#5g|Q zsS_Wug$=?TF;B@Fhs=x1(ZN9r^Fp%P?7l;r!^SR=t+J4KmT|vNP_3Lqviz=Jl7$tx z;Z4d5Jul}kT|zOz1V9!j43}fmFtQQO7APp5`Q)T=TW)szYhrFCDaiZ6`Z?(TN4{OW zw-x=gcpb5gC1l!3h3WTv_7Q&}ah}A^#p~qyjbp@rNpYk#l*#5KhW(TaA-!38TOm_%hfe&#fks^uRi3%yG#BssrB0ZKtq+K2z@!MS-ip;$>YaU-Y!!9 zFNq;e$py-;j@jJ=DF*=BR_%sF)vDTBeqP>DRMFf&qd`;Y`zAsZnMA6%9<$aVTeSdj zmDJ(GvV_S)yObnI=HQI0Uk7xglIZK{xoOLm9EbS{LgcwJbP;`pD(3B!_XjZ?G{0pF zyy2ym-TB0NNo)tmmM((>7w#}|o;{B;+z~usY(@eX#0gb}P&KSaedFrs)BjdVT0vp# zZEP$^X6MeG%lY&vP+bRTpWgRfh}}WgQchG+jyS)1xuO1RoM5%cKOz2rD^$JOfDZ?3 z;RStkXY-aVl(i%s7bS*_dU+HStVnYq+&R;OV7?@8JLNOBM^WW=p}C4S>HLKY^mN9? zg9*8-Khdu&CAEV|h*=q!NLilnhMS9v3#rmo%A+Fr=;*qj3hLp31QD3aal5?y7REd+ zDHQZ7DlSP&P(Jy7E zry#lfI?h+&YB{L4pC<%i8W9ISVG@V`QAA&oe*N?X(iX(PU!8ReDa%w zHIisq1p^M~g|IkHJ`OVqQP3Kk0lx*|c^y!GPGMqv9?2z}f0@I^F3$;07)3=1o1vfs zc}$36&Yn3#1E}}&!}WTz#C!~r%eh+)E>PpE0Yo7q6IFwQ@;3?9i|i~kzn8O5Ij=0X zBX%lKNx4Y;I;kSWe~lzNd+(_8N{$B*u#w+cqkt!%fhrY!#(bxmDNki2Ut{xg6?|lBan{r>ITUIV$oiWhC zVn0?QFz!&Ow(O7~S0^|khAXMs8!?SuR2dNzVSR`> zPft$TqZtJ`b@C7vT?Oj$2+a|RIiN^S2Z08MPPg3&i8{nmkSdfA~!`Jr*B8+^*y_hwV+OiftAFh^F)T$I7rcZVC zyekCok9i^eL6(15NQg+QaY~>ykAm$4$l?GmF)jo{c{8k5x6>CP!}o1?jgfaJC<-0J+OR2n@k4fe|;# zow+FW@{a}Rnws*EY3=G({Xv{hxz=gtxv@?V?q4#t0MkGK#~Bpx`ZfHp8XffEj=z_p zynBBD?HKH$GWJ^hXLLBKSbl=-Iw(Lf5Ts>wU+vSpWVBeYI!?*+{5NZ5t*lp7SQRX2 z3}T2o?*A(6yokNCh_98J6ksF4aG`>?uDS z+kck&9&G5nF?1L#XFr=RH~2$&rY;uvSQt;0pNPsZl24j>-#&!Ufni~7*x$&{Jm8sH zaiS1{HfNgVV80uL-G3{ZLrr{cVa1-kgf9LeGA1z~{F+FfI&n3 z{pERvDF@<47%sKkcQDLu;7E~2K!7sbAx3SZdTp|2s5xU03zn`nj0cs_beM9}e=15z z&Y;TDS=_mESfmM6J35SGJ}S}_uXu=V9@&eJ$*M!z!`z{%sd=UcZkv4rSd|CJNJBYA znovfh%eLx&_FvC`zN0-IrV3_hB@1uOG;39pfquC{s>1w~y@f@}xWwv}&frSH+NJj= z>O}7Dj3a7|5z-szJfA$tw;2wOP{_WQ4{nO`Fd-FBBJu0%y3lUgB>z5A8}<;yQo!S9 zqCvmMP6W~e$|pg^N1YtdO2D_`$vq;JwVOB6kHP!=n@r2?tT;D_Ycm5|N4e z8UhLY{d)F1#MhUgm5`8ZKzVBgyr-57H7!d>P|%6hPB+-PBd(*lla*tgMHd6JKYc=m zMG(|4n6BPZ8+Rh#eA%J!mJc_j%Ls1{i1DbX=mOxe^0Kn~3;nS;+wVV3X=pJSJSHgdoMeQUNT3Gr?B!Yy67MLy zq})%x08Tz~I#?78f09hUhpt%V`$@qzz{hY;orIlo|Y_X+#SF@0=^B9CUS~Q&Vu3zye?@!DNDV$5~Aj z7+^9mEB6xGbbmQ+F z!UqnVimIu*h0GbRy9T;0n6wh@Az;=r2j5P^B=oK!@KO|!8bUFjRSBt$1RAaimWIs{ zCN9D^vrb-Fb&>;n4)R98YbSUhl8G^kwxt^%8{6)WoM*F98*a1r?G)}H4=#+ynqov; zYSy&<`gL6flh_h7_9{Y4Hpk;1is``RX%ldCiD63~kUF^(azcK7`>H3(C`1UEVXQh| zu%&;{9gc^1_N-_bfbC*Yn)wMR1^6tPo{32e+q+y`YBdW;fJ3SZIxa0EWY!WC5~7i` z6jXuY3R>B?$&fYYE%;}N?WkWFLhSSxN$C7EY0uB|aG3ldjFwMCWEMzaOtX-~V9nyvBj^x^qqzRz#;IR?fG`lA&JNUHltGbO0EiI+ z!$zYh;)6(&Iz5_wXLNz1qaH%QAP69Sqj3YK24hAy4`95_?J48>Z3xqG(D5uBaN4jT z7?dDO3u=Zv5FGIGz%h-_*hX0&{qJ)mR?M0 zt!)_ta1g~d8b?rEoQnunH(4`B<=4i|o9!@5756~wI!Db_PqT6ho63IXod;UZbwJx- z^nI(QPC3h@n7unXCMPE|)RH`M@!Xw43dUB&`w?8^$07TvR`W5m&blIS*3JMYVDCroF&f46Z96)4$5zG)X5dahz zjc)P+)MMJ7-`YovCa7OM?y7jZlnB_5D7?HiSl&FVpPT9aogh4UB4#?_Nk^cgF zBH!(Xe<9>b1TC>+IE5)UODe~q9YvZyJ+pd7IB#IRVW6gG#+zWG%MbhyO0@S zLI{WyeAe2O%b3KoN&ZcQTdTyZP)Mlq@FWe@wjw)18$L>=uCA`xzr={&ezU8N-!xURQ@s~_}DnX^!8 zvVy<71ON;;%4(L;6KGLmwZ33lUR4E5GU!>Ggt$1zm9Tv~clJO(d?amr%FYp$U(BRH zDFx@@+4JYPAB5LN*r0>Hlm)CfjT{|Z##Edod86n`4<8@#yMKHZ3>x0RSBxld#Q zm|vp(3J38VT)u6vjaYepId%tKH_O;aR~MOFZaPf-gcP?4#wnfh0)^$V3XFs>56>_H zL*WrOBj1-VU+%GwmC)No`-qZS*6l;G@jnf`jpPv4dl2<5Vc4t9#*490AkiT+uz3Ue_fJs?~6WMchM^~`tL1x})&XmvC~Xe-!G zxeK`40S+6o<^MW+6{5MVL91zn*I5Mcu+2Bj;ID9nCFdxDpVzL-;|9$b#i zxj(6IWMl+O2DK_Pv-f5Io#AqrmN_U|3`nP@)-iXLaMiOw(jU_>agafOPd18O{j%dYHkOvD9#+WUdpeLA zpCOoGfMDG}qe{DXfMO3QDHb|E$S7nO>^XAu=yLnYk~UpGqL#7?y%!cS5$V4|7y*+5 z?pF$k@o~A&enQ7YF?9m7wA-d z-7(4?{we+W(GaO56BIcB|DmWMdh^RVy{l0*Xdu1%ODpBp8MH5?4v&lwT1yGDls)3| zi*I6Ut_MOVi4!O0q&7gmm6#l|apTqe3gmY}U}6#~eG~bso3lfH?|o`5nSHB?C&UML z?>F{d>(*fCd#dJ{Jf>%7kr&b{?m$0@q&0B)m*VNrN_T?l=U1zL7S2F<2T zM{Gyog7KUS0A^XvLSP6($Q16EuA&XcE(b_bB@=QKXLLb4P0pK9tM^+yP~~0xW0a5k z-wa6qcT_ttfVUMiZK48gJ&FRQ*`?L3`{vQPjbk9B;Z-ucdPmBi*;ki0?qp0Q0n;|z zVWM2y8*;#Af`^;jFG>0TC)a;`^uH55|L+(3{{@2o|LsG#;bliTy4q zDM8o-CJp|qbEKT;iY;sM_YrkwWE&75!nEY)fq`!u2QM-J+FY2QM@mM$VS}(~a~+%9 zYO?x&w0G`dIi~Fze=B6qJ0>9-8ktdKH)SHSNk*YmigtF>ASsnpc7+ewPo^ZRrG4wZ8A0b$oxU?^x?w>yLG;{%|;`x97Q^=f3XqI?waE zu8v_3<^L>RVh3L+(Gkxk%{FKq36b=I;K2iO|?j^3v@PiF00@1BafjVHP%fk{-6YdN8VEpVx{cZGLh4 z+JJw+Jys?VMsvPBr1-Q6fukS3TB)_LNTnF=&BP=nP5}%h`Wqzj?>P3P+xF}kz3|M) z9jez9@xZmKQmX|tik9fH??!{Y;6Z~14g`!in|x|~JMZtGy8l1G78G(?s4uxtNEjf0 zusHdc{YCa4_woy0xqt9s75R5eyhAyeL9UySAo_#h>a65cnJ^(RIoTKDlO7uQU;(o$ z$2?;NtgWs8;OoT;0C?ytPKyS@+?!;@8b@DmUn|G;^3S4>7hipnkNnS|#~+>7F8!}* zah;T#RRlu+vj+E%vBGiEyxH_Rz+NUx!i%s}ctaR~IJ?cqm`{JTmmrY48hM$T-Xz1e za&&Bt?t$aQ`prGXT~FU3k$*e@BLlQu_WnK0k%!YM4>l_c-+76Ep6hzy`NuXRT z1%MJrZP#>9aVddwOTYSLJjK+Mi5jLRWLgJ8Le9f1p%mISp$Wn`rZK`UJX7VlV)^yA5^^#!CP=Xl%!8@@^iz)>UwuW#fni!uG4X%@)Vl`_ zt()F|-aledG1*v2fV}0XHAHfvOBQt9$-fx~>c0@7cfWqHLH=8Eaw8oL(CVz_%(;m4 z$NL5b)bHOvqE&~i;p`X0qa zIrqMtRBe@7H^6=J6_WNiHoK(tL-K?1pHJ_b_nrrBAD0KK2v1mNu5#goB0bvgvpaQ0 ze7?Gx8q@55zjtnnui?{F zfW?FSogxj+-e1ra-!os+pS86-H>YB_s(qYoZ0>>79aSf@mxR1tH2#Ox>BTwYB0oh% zMTM7L`JKkUCR3)sT>5gRU65BzcmqwdXZ!X8BVA;Ov>0tHtpUlDEf~U_^GL!|)gw>P z+|ySOH86z-4ZRiL^r7b2XS4uXtcZ;AL`~Eu6fanb{i!?bGkbqby<>Ca0S@cSK`?SB zQ2Ylil|;8iSo%te9VySSO7wc!78DkyUftK{3Dr9$bR`^g+B;_fW+`+t{-E1EWdijGnJ~{ zNDptw?r4&OI(?$1ZS>$Tw%(FX!!FH-J*;vLJU{}-Xs{Ymi>CzziggCxXWqOCE1k~> z)SPsQ{*?H9NFgZrXQXsF>5+Z@=Eu-Gs)zu8ipBp@Gspkw>z}CtJ@d9>ls+s&v<Uq4Nb-#BvLn4GAK7yY12Cr+F=)FJ6S(WkKy531<-rhAy(mP(t_TG-gPd2tc! zI@kfK-}*9DH#~gqyCHcg+h1*nA7@1C$V7$*bQUA#dl9G~3{?;a_Qc!SJFJ3IH zGT5*c!;-iSs48)?7zz7}QT=px_Znz2`rk$7T&lg+2~hyvxXh$jIa-DGfA3gU-KYQl zju!M`rL@P5ONXYy9n&)ycSC3TAi%70$_bHk-#cubS0H?>2r-snET0?;jUdIFFhdgz ze|B+!ukzqrRwK7O2SJW7FTOcu@ik-332#P@8bya3#YA%~cw0OV%^dyn?d~oc(i!M9 z=ymt#wa)6 zvx+Nt=DHWmV{>)FZ#{Zc$C)!|X}9Q0>_FWeO(pVH--2{O3wXUqN3tO5Zt*NuwoK!R z8o#^IL+0V7*2a&;D@V<^{&6JGAfOLzA7xta=@N`U#Bxh1bQx zhE$-zpnI=_KF+EudW`Mm0jHx-7y9(EcMT&?2^nl{ay%h{MiW=`Cwfnaa=N%;z=#pE zOc2bw$e)M4XTg16J(^;|lH&jWrB}sJ$%Qq4>>3TiDu4CLaKW{|bd=4hvJB(&nO-ml zPwc4&tnOR&yW@9nOG_Ve`XsLDVt4<$fdfOh-h8Dte!X7d zCxN1t29u@HgfC<$%7m4HnbVODM(!MEC*9r?{3q&fx#CR;?c{<|F2R8ppa0VDBU*pM zebq*a{Z~$G!$+X?hz~-@3mv**GhQA>c*~akabuI z3nb$(-?HFWALr5COBW0*-x(FurcIj=htjuiOl_Go+x5bh zT!%+oXy`5uzFzI-R`{qWX_{J{c`6N7b^tEuado-a|74Nw ziq2`&T*v2ElWPNmm&NKw9$44>?v}#&jQE><@7|2w;!+r9Hx@5aXsA%1Wb3|OF^x+x z2fQX`!w7NyitH;V&lgbM{r>fT28pASvmWqW!~HoOhMfXNi6 zp?%BF&>!MW!MR%~>|E<5_tLLc_qwyfiL~S1CfCksAEgp~GI~v#fgt)ftX>CZ=Aj~H zcs$8+khmTPV>5{bj0m54rL`eGH2j*?*3@3MukrsngQ59n5!=&N4Mo2Z6{7C<4n;(a zUH9od$$g@DZ-UVhyN&-CS>!~jj_^P8&e$g%sxO@(r2LeWlq54PxUyUWQASrot$Dvh zb23mAU`oA|z&^ngGB#JwldlF`o9ET`4D2%l(VE@HTQ8)4qtEFJ#WR{UD|o@!fdk7h zmQbPlj(TZtZ{Z?ex$=D7odrZspDLTl+B~asJ3G~o+J8#3;P**mBD3JqW8gyMqpRy)yuCmL#e#mY&7))Y=Z@aWyST?)YQmslJI|H zv*^j}t39@CxW%|H-+61^T;SDyN1WGsgu@1QdnpIIEPizqtC(6unS&|9<$CSpqhF^x2b9ayq?jqDasA8UMr8bGAD4s^Rn}vR2zJO-JHIhT zYBGvGoGi&H=P(8Ws0GuVx?|4|^bQQ!g}2K6Ny0=za|FLmM~3~nfZ&Ml2KQAx6d!jpIJaVv`?E{1)1-1JvVoD!F8umP z2=HT~^`&{%R=FF&#s_UQrZlNuSg^3LoBT{qVcE7m*L%DahY{gF_2NY+AtV9gXsqTF z3zpyzNW2OL6Ya(4-eg1z6{`Lz`uxSlK%`M!^8EW9Ro#iZ^Tg@jdCfx$MKQsN85+{F z=g-63ZJ$=CqC;EZsfHaM<2X7r)2?aTrM8xf??#Nan0ps&Gc+wMs;iGbCWKHm%zJr^ z1$TcI^ZE@AmOGvTgK23^ntO&9Lr+IX$6>|$%31hnrC`hZSy`iYp1WTyVZFl+A3jqQ z`6xSk1XabuogE#G7`jw`@;a3aGoB2v?9HyU+gFY3|IJb@fJ$#QRRIX5_$TZOBF_tv z)q)l5-0#h%=QQ^oRs`zYj%|M&%Op!=e!yDJx;yE38)KjU(j9R~+xINLc$ag0KuC`d z836^bGt`U>&>}OY!Tm1~`W$^|O)p-&FgebuVE$UlB*Ifq+vtoWN&$Vg2OT+bBqG8g zGZR4;t!(7lI}Q?`>rnF`nMKk9I$@?Hn=E95bp^gQOm9(d6D@u= zt0g0W=1h!1Kb5AUS+iN)IlB45U0(2z1NXU@37NQd2_jjlzYX->-W*3^ZI7S+qym|1u34E7ZWnF?>`=F$?I4O^?>Txg7leatpCJwSe$;#N$F zbp4?cjjn~$pY^L<0md6)G_CR|ULO0Zb`889s_Jl;oZplgrM)?Sg)jp*n7QuuoSt>^OelEZPS-K>@?WhJ{stpWrj>oc&#Z z1@V{a%u)ibf0dUa^X%}&ZmnbaAV-uz!MAMv_Fv(#|KCr`{*Psu|IOQR#RBwuldn)4G2Qz4eFW3a5JgZ`qWow= zD59K+7<2O?k;ZUX3Kc~FIXkG@2-Fdry9r3Y5k1$&vZsbdtm7~7IWV^kTH=?OK?r{x zj7~4i=4v5>^B*9O8+}vD5oV?mQ0?# zVuc@0EF6DkGn%vJwf>&H1Xy%@t)b@&lk$BZmo8oE(hV$@fNKG|lLns;v$M~FRz>{7 zVYTUioRod)u0FZMC3SUml=;BGKrkY}?aaov>Jo|mm;3Cq%ha$#0PFzeZJ`dL$c;l` z+wEoX>9$vo;sEAp7~v&Hig5(*QR_8oR6TvVNXP_qN1|=(jYA63)dyu)ckn< z|8*k>y0|CU@syG=l^c*N5!|sL0um37@o@zspls`|$zTA2y0hO=>>H%LIQ}TxFte~Q z_A0v_HgA0a&=w?_5rhir=cZV0UF{4wf7Nk?PATgzb4wgf>mN?UtM-&M{I~Bcwg)lO zfHYG$ssLBixS5(DY|)C(Sl(RkRRM5a4r{<`1mGkKr{21Ar%uZX&z}kcl3satQ{#%f zHO-1yIs@ip2T3HV4bw~}+B6fGX^3yy8FR)4fun)2V?6CiNjz8joUq#(kS!z{9#V1HsQTcx)WbB;b- zp6@8NwuCK}dU|_5cDbxkd z{;gILnEDs7uebNU=3fw78CqSDe(4f#-G|6m^5zQTP={L&!bhF7X=e-Yd2s*!OcS{J zc{Nhb$(UI!KeS2D%$}}u1rxDhb?oYgvnO#Adkqpz*OyJBqX<`-_DOEN zyLa#JQZwbqp+o7+J(c=5xDQyg?!m+=dUb{j6}Y}&qISK; z$s*&?PYMh7^?QvQ)oZg{(BL?a7^-vp;>9PG`E3b{Q$+xK77|-YvyL^N-Y>mgiY`L4 zaE~XS`;+3>8JIa6&D*{pALqQ;8do%GPaja)AVJ>ziHA!?| z0GZIu>>+j!^uhM^EZA1DV5#Yh;*@Qx#3yz=(yL`HbBRPFqpObP1H8$%5UjezkXqRo zoVQNHNK)<;7VoamM z2M7yV)h~W#2M1&@AjmGIWKHIt0ClN$as}@!ZO)@#LMq9&96Pq?!Viw=%F4>5TA{(g zyEiGf6^o8-Ambr~(#e;haNkQGwstfYdciAp7TdHbA( zG=2VX7gKuchuSeYMMXvV`M!V~(Y-P@ms*9zom6R4fx4f!dW>XFKV?nHHPYjidST`P z%U#0aj=Zg`jDFir?ol=N=8WmnhcM~L{+>bVQ|=ee#Ln~OO`I5Om@_%HoyS?~yzz_V zr;Yo3vYo_DB0lRW)#rS#l^J4ht}6pmMh;D0=kyZ9S+KDl77)8}z<5i&uyeT$?91j% z_Yccnz(-P9rlO)^uJ_FnS67){l1E9a9E*xvjXTwtxUU-9cPfFLMt_ex<~=wW!Z<*h zI_$Ng$E<^e#RIx?x1i8m<9xyt5N_qVe7RGd--nWY{l=F>e(JC)%IcJToZjWxvAHz* zNUY`n{h@WPBtf$*kDxYRjgVgj=!Ncw&UZIE1rp_PYY!8R;fdhmv8M9h_`EdPHJtdE z@ci)ruzSue-m-OTr_Gc8n4I@uD?KD_t*sx^$sT9KIS$>DHOW?|RuXv7QEYWW`KZGT zYr*B5;#fnTi0`1_+J=Q^=M3qoaip&#PepOF=H}*S=d7hI(uWGO%j@H>)MAI@)LIMC z$yg}v#73tf4ZEmbSXN6*OYBH7&7Pl$vt7ffg~ij#3`*@vzgoC8s=|I0y)26y<7~6m zH(i-?c23`KzA2%)=9sri#;5s!T(|B!0^VP)3$z#vD^r1#g*r<`Bd68 diff --git a/deep-learning.html b/deep-learning.html index 513a328..4fa144e 100644 --- a/deep-learning.html +++ b/deep-learning.html @@ -750,9 +750,9 @@

10.2.3 Question 8
pred <- model |>
   predict(x) |>
@@ -1007,9 +1007,9 @@ 

10.2.5 Question 10plot(history, smooth = FALSE)

kpred <- predict(model, xrnn[!istrain,, ])
-
## 56/56 - 0s - 58ms/epoch - 1ms/step
+
## 56/56 - 0s - 59ms/epoch - 1ms/step
1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0
-
## [1] 0.412886
+
## [1] 0.4129694

Both models estimate the same number of coefficients/weights (16):

coef(arfit)
##       (Intercept)      L1.DJ_return     L1.log_volume L1.log_volatility 
@@ -1029,24 +1029,24 @@ 

10.2.5 Question 10
model$get_weights()
## [[1]]
 ##               [,1]
-##  [1,] -0.031145222
-##  [2,]  0.101065643
-##  [3,]  0.141815767
-##  [4,] -0.004181504
-##  [5,]  0.116010934
-##  [6,] -0.003764492
-##  [7,]  0.038601257
-##  [8,]  0.078083567
-##  [9,]  0.137415737
-## [10,] -0.029184511
-## [11,]  0.036070298
-## [12,] -0.821708620
-## [13,]  0.095548652
-## [14,]  0.511229098
-## [15,]  0.521453559
+##  [1,] -0.032474127
+##  [2,]  0.097779043
+##  [3,]  0.178456694
+##  [4,] -0.005626136
+##  [5,]  0.121273242
+##  [6,] -0.076247886
+##  [7,]  0.035232600
+##  [8,]  0.077857092
+##  [9,]  0.163645267
+## [10,] -0.026966114
+## [11,]  0.032263778
+## [12,] -0.807968795
+## [13,]  0.095888853
+## [14,]  0.513532162
+## [15,]  0.496699780
 ## 
 ## [[2]]
-## [1] -0.006889343
+## [1] -0.004996791

The flattened RNN has a lower \(R^2\) on the test data than our lm model above. The lm model is quicker to fit and conceptually simpler also giving us the ability to inspect the coefficients for different variables.

@@ -1092,8 +1092,8 @@

10.2.6 Question 11 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) -
## 56/56 - 0s - 66ms/epoch - 1ms/step
-
## [1] 0.4271516
+
## 56/56 - 0s - 64ms/epoch - 1ms/step
+
## [1] 0.4262716

This approach improves our \(R^2\) over the linear model above.

@@ -1155,8 +1155,8 @@

10.2.7 Question 12 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 })

-
## 56/56 - 0s - 133ms/epoch - 2ms/step
-
## [1] 0.4405331
+
## 56/56 - 0s - 134ms/epoch - 2ms/step
+
## [1] 0.4429825

10.2.8 Question 13

@@ -1209,14 +1209,14 @@

10.2.8 Question 13 @@ -1227,19 +1227,19 @@

10.2.8 Question 13 1000 -0.86084 +0.85324 3000 -0.87224 +0.87808 5000 -0.87460 +0.88076 10000 -0.86180 +0.86936 diff --git a/linear-regression.html b/linear-regression.html index 89feaf3..f4d4fe4 100644 --- a/linear-regression.html +++ b/linear-regression.html @@ -469,10 +469,10 @@

3.1.3 Question 3\(y = \beta_0 + \beta_1 \text{GPA} + \beta_2 \text{IQ} + \beta_3 \text{Level} + \beta_4 \text{GPA} \text{IQ} + \beta_5 \text{GPA} \text{Level}\)

+

\(y = \beta_0 + \beta_1 \cdot \text{GPA} + \beta_2 \cdot \text{IQ} + \beta_3 \cdot \text{Level} + \beta_4 \cdot \text{GPA} \cdot \text{IQ} + \beta_5 \cdot \text{GPA} \cdot \text{Level}\)

Fixing IQ and GPA, changing Level from 0 to 1 will change the outcome by:

-

\(\Delta y = \beta_3 + \beta_5 \text{GPA}\)

-

\(\Delta y > 0 \Rightarrow \beta_3 + \beta_5 \text{GPA} > 0 \Rightarrow \text{GPA} > \dfrac{-\beta3}{\beta_5} = - \dfrac{35}{-10} = 3.5\)

+

\(\Delta y = \beta_3 + \beta_5 \cdot \text{GPA}\)

+

\(\Delta y > 0 \Rightarrow \beta_3 + \beta_5 \cdot \text{GPA} > 0 \Rightarrow \text{GPA} < \dfrac{-\beta_3}{\beta_5} = \dfrac{-35}{-10} = 3.5\)

From a graphical standpoint:

library(plotly)
model <- function(gpa, iq, level) {
@@ -501,8 +501,8 @@ 

3.1.3 Question 3 xaxis = list(title = "GPA"), yaxis = list(title = "IQ"), zaxis = list(title = "Salary")))

-
- +
+

Option iii correct.

    diff --git a/resampling-methods.html b/resampling-methods.html index 2aaeaea..edaab90 100644 --- a/resampling-methods.html +++ b/resampling-methods.html @@ -554,7 +554,7 @@

    5.1.2 Question 2
    store <- replicate(10000, sum(sample(1:100, replace = TRUE) == 4) > 0)
     mean(store)

-
## [1] 0.6308
+
## [1] 0.6355

The probability of including \(4\) when resampling numbers \(1...100\) is close to \(1 - (1 - 1/100)^{100}\).

diff --git a/search_index.json b/search_index.json index 406e819..329f9a4 100644 --- a/search_index.json +++ b/search_index.json @@ -1 +1 @@ -[["index.html", "An Introduction to Statistical Learning Exercise solutions in R 1 Introduction", " An Introduction to Statistical Learning Exercise solutions in R 1 Introduction This bookdown document provides solutions for exercises in the book “An Introduction to Statistical Learning with Applications in R”, second edition, by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani. "],["statistical-learning.html", "2 Statistical Learning 2.1 Conceptual 2.2 Applied", " 2 Statistical Learning 2.1 Conceptual 2.1.1 Question 1 For each of parts (a) through (d), indicate whether we would generally expect the performance of a flexible statistical learning method to be better or worse than an inflexible method. Justify your answer. The sample size \\(n\\) is extremely large, and the number of predictors \\(p\\) is small. Flexible best - opposite of b. The number of predictors \\(p\\) is extremely large, and the number of observations \\(n\\) is small. Inflexible best - high chance of some predictors being randomly associated. The relationship between the predictors and response is highly non-linear. Flexible best - inflexible leads to high bias. The variance of the error terms, i.e. \\(\\sigma^2 = Var(\\epsilon)\\), is extremely high. Inflexible best - opposite of c. 2.1.2 Question 2 Explain whether each scenario is a classification or regression problem, and indicate whether we are most interested in inference or prediction. Finally, provide \\(n\\) and \\(p\\). We collect a set of data on the top 500 firms in the US. For each firm we record profit, number of employees, industry and the CEO salary. We are interested in understanding which factors affect CEO salary. \\(n=500\\), \\(p=3\\), regression, inference. We are considering launching a new product and wish to know whether it will be a success or a failure. We collect data on 20 similar products that were previously launched. For each product we have recorded whether it was a success or failure, price charged for the product, marketing budget, competition price, and ten other variables. \\(n=20\\), \\(p=13\\), classification, prediction. We are interested in predicting the % change in the USD/Euro exchange rate in relation to the weekly changes in the world stock markets. Hence we collect weekly data for all of 2012. For each week we record the % change in the USD/Euro, the % change in the US market, the % change in the British market, and the % change in the German market. \\(n=52\\), \\(p=3\\), regression, prediction. 2.1.3 Question 3 We now revisit the bias-variance decomposition. Provide a sketch of typical (squared) bias, variance, training error, test error, and Bayes (or irreducible) error curves, on a single plot, as we go from less flexible statistical learning methods towards more flexible approaches. The x-axis should represent the amount of flexibility in the method, and the y-axis should represent the values for each curve. There should be five curves. Make sure to label each one. Explain why each of the five curves has the shape displayed in part (a). (squared) bias: Decreases with increasing flexibility (Generally, more flexible methods result in less bias). variance: Increases with increasing flexibility (In general, more flexible statistical methods have higher variance). training error: Decreases with model flexibility (More complex models will better fit the training data). test error: Decreases initially, then increases due to overfitting (less bias but more training error). Bayes (irreducible) error: fixed (does not change with model). 2.1.4 Question 4 You will now think of some real-life applications for statistical learning. Describe three real-life applications in which classification might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. Coffee machine cleaned? (day of week, person assigned), inference. Is a flight delayed? (airline, airport etc), inference. Beer type (IPA, pilsner etc.), prediction. Describe three real-life applications in which regression might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. Amount of bonus paid (profitability, client feedback), prediction. Person’s height, prediction. House price, inference. Describe three real-life applications in which cluster analysis might be useful. RNAseq tumour gene expression data. SNPs in human populations. Frequencies of mutations (with base pair context) in somatic mutation data. 2.1.5 Question 5 What are the advantages and disadvantages of a very flexible (versus a less flexible) approach for regression or classification? Under what circumstances might a more flexible approach be preferred to a less flexible approach? When might a less flexible approach be preferred? Inflexible is more interpretable, fewer observations required, can be biased. Flexible can overfit (high error variance). In cases where we have high \\(n\\) or non-linear patterns flexible will be preferred. 2.1.6 Question 6 Describe the differences between a parametric and a non-parametric statistical learning approach. What are the advantages of a parametric approach to regression or classification (as opposed to a non-parametric approach)? What are its disadvantages? Parametric uses (model) parameters. Parametric models can be more interpretable as there is a model behind how data is generated. However, the disadvantage is that the model might not reflect reality. If the model is too far from the truth, estimates will be poor and more flexible models can fit many different forms and require more parameters (leading to overfitting). Non-parametric approaches do not estimate a small number of parameters, so a large number of observations may be needed to obtain accurate estimates. 2.1.7 Question 7 The table below provides a training data set containing six observations, three predictors, and one qualitative response variable. Obs. \\(X_1\\) \\(X_2\\) \\(X_3\\) \\(Y\\) 1 0 3 0 Red 2 2 0 0 Red 3 0 1 3 Red 4 0 1 2 Green 5 -1 0 1 Green 6 1 1 1 Red Suppose we wish to use this data set to make a prediction for \\(Y\\) when \\(X_1 = X_2 = X_3 = 0\\) using \\(K\\)-nearest neighbors. Compute the Euclidean distance between each observation and the test point, \\(X_1 = X_2 = X_3 = 0\\). dat <- data.frame( "x1" = c(0, 2, 0, 0, -1, 1), "x2" = c(3, 0, 1, 1, 0, 1), "x3" = c(0, 0, 3, 2, 1, 1), "y" = c("Red", "Red", "Red", "Green", "Green", "Red") ) # Euclidean distance between points and c(0, 0, 0) dist <- sqrt(dat[["x1"]]^2 + dat[["x2"]]^2 + dat[["x3"]]^2) signif(dist, 3) ## [1] 3.00 2.00 3.16 2.24 1.41 1.73 What is our prediction with \\(K = 1\\)? Why? knn <- function(k) { names(which.max(table(dat[["y"]][order(dist)[1:k]]))) } knn(1) ## [1] "Green" Green (based on data point 5 only) What is our prediction with \\(K = 3\\)? Why? knn(3) ## [1] "Red" Red (based on data points 2, 5, 6) If the Bayes decision boundary in this problem is highly non-linear, then would we expect the best value for \\(K\\) to be large or small? Why? Small (high \\(k\\) leads to linear boundaries due to averaging) 2.2 Applied 2.2.1 Question 8 This exercise relates to the College data set, which can be found in the file College.csv. It contains a number of variables for 777 different universities and colleges in the US. The variables are Private : Public/private indicator Apps : Number of applications received Accept : Number of applicants accepted Enroll : Number of new students enrolled Top10perc : New students from top 10% of high school class Top25perc : New students from top 25% of high school class F.Undergrad : Number of full-time undergraduates P.Undergrad : Number of part-time undergraduates Outstate : Out-of-state tuition Room.Board : Room and board costs Books : Estimated book costs Personal : Estimated personal spending PhD : Percent of faculty with Ph.D.’s Terminal : Percent of faculty with terminal degree S.F.Ratio : Student/faculty ratio perc.alumni : Percent of alumni who donate Expend : Instructional expenditure per student Grad.Rate : Graduation rate Before reading the data into R, it can be viewed in Excel or a text editor. Use the read.csv() function to read the data into R. Call the loaded data college. Make sure that you have the directory set to the correct location for the data. college <- read.csv("data/College.csv") Look at the data using the View() function. You should notice that the first column is just the name of each university. We don’t really want R to treat this as data. However, it may be handy to have these names for later. Try the following commands: rownames(college) <- college[, 1] View(college) You should see that there is now a row.names column with the name of each university recorded. This means that R has given each row a name corresponding to the appropriate university. R will not try to perform calculations on the row names. However, we still need to eliminate the first column in the data where the names are stored. Try college <- college [, -1] View(college) Now you should see that the first data column is Private. Note that another column labeled row.names now appears before the Private column. However, this is not a data column but rather the name that R is giving to each row. rownames(college) <- college[, 1] college <- college[, -1] Use the summary() function to produce a numerical summary of the variables in the data set. Use the pairs() function to produce a scatterplot matrix of the first ten columns or variables of the data. Recall that you can reference the first ten columns of a matrix A using A[,1:10]. Use the plot() function to produce side-by-side boxplots of Outstate versus Private. Create a new qualitative variable, called Elite, by binning the Top10perc variable. We are going to divide universities into two groups based on whether or not the proportion of students coming from the top 10% of their high school classes exceeds 50%. > Elite <- rep("No", nrow(college)) > Elite[college$Top10perc > 50] <- "Yes" > Elite <- as.factor(Elite) > college <- data.frame(college, Elite) Use the summary() function to see how many elite universities there are. Now use the plot() function to produce side-by-side boxplots of Outstate versus Elite. Use the hist() function to produce some histograms with differing numbers of bins for a few of the quantitative variables. You may find the command par(mfrow=c(2,2)) useful: it will divide the print window into four regions so that four plots can be made simultaneously. Modifying the arguments to this function will divide the screen in other ways. Continue exploring the data, and provide a brief summary of what you discover. summary(college) ## Private Apps Accept Enroll ## Length:777 Min. : 81 Min. : 72 Min. : 35 ## Class :character 1st Qu.: 776 1st Qu.: 604 1st Qu.: 242 ## Mode :character Median : 1558 Median : 1110 Median : 434 ## Mean : 3002 Mean : 2019 Mean : 780 ## 3rd Qu.: 3624 3rd Qu.: 2424 3rd Qu.: 902 ## Max. :48094 Max. :26330 Max. :6392 ## Top10perc Top25perc F.Undergrad P.Undergrad ## Min. : 1.00 Min. : 9.0 Min. : 139 Min. : 1.0 ## 1st Qu.:15.00 1st Qu.: 41.0 1st Qu.: 992 1st Qu.: 95.0 ## Median :23.00 Median : 54.0 Median : 1707 Median : 353.0 ## Mean :27.56 Mean : 55.8 Mean : 3700 Mean : 855.3 ## 3rd Qu.:35.00 3rd Qu.: 69.0 3rd Qu.: 4005 3rd Qu.: 967.0 ## Max. :96.00 Max. :100.0 Max. :31643 Max. :21836.0 ## Outstate Room.Board Books Personal ## Min. : 2340 Min. :1780 Min. : 96.0 Min. : 250 ## 1st Qu.: 7320 1st Qu.:3597 1st Qu.: 470.0 1st Qu.: 850 ## Median : 9990 Median :4200 Median : 500.0 Median :1200 ## Mean :10441 Mean :4358 Mean : 549.4 Mean :1341 ## 3rd Qu.:12925 3rd Qu.:5050 3rd Qu.: 600.0 3rd Qu.:1700 ## Max. :21700 Max. :8124 Max. :2340.0 Max. :6800 ## PhD Terminal S.F.Ratio perc.alumni ## Min. : 8.00 Min. : 24.0 Min. : 2.50 Min. : 0.00 ## 1st Qu.: 62.00 1st Qu.: 71.0 1st Qu.:11.50 1st Qu.:13.00 ## Median : 75.00 Median : 82.0 Median :13.60 Median :21.00 ## Mean : 72.66 Mean : 79.7 Mean :14.09 Mean :22.74 ## 3rd Qu.: 85.00 3rd Qu.: 92.0 3rd Qu.:16.50 3rd Qu.:31.00 ## Max. :103.00 Max. :100.0 Max. :39.80 Max. :64.00 ## Expend Grad.Rate ## Min. : 3186 Min. : 10.00 ## 1st Qu.: 6751 1st Qu.: 53.00 ## Median : 8377 Median : 65.00 ## Mean : 9660 Mean : 65.46 ## 3rd Qu.:10830 3rd Qu.: 78.00 ## Max. :56233 Max. :118.00 college$Private <- college$Private == "Yes" pairs(college[, 1:10], cex = 0.2) plot(college$Outstate ~ factor(college$Private), xlab = "Private", ylab = "Outstate") college$Elite <- factor(ifelse(college$Top10perc > 50, "Yes", "No")) summary(college$Elite) ## No Yes ## 699 78 plot(college$Outstate ~ college$Elite, xlab = "Elite", ylab = "Outstate") par(mfrow = c(2,2)) for (n in c(5, 10, 20, 50)) { hist(college$Enroll, breaks = n, main = paste("n =", n), xlab = "Enroll") } chisq.test(college$Private, college$Elite) ## ## Pearson's Chi-squared test with Yates' continuity correction ## ## data: college$Private and college$Elite ## X-squared = 4.3498, df = 1, p-value = 0.03701 Whether a college is Private and Elite is not random! 2.2.2 Question 9 This exercise involves the Auto data set studied in the lab. Make sure that the missing values have been removed from the data. x <- read.table("data/Auto.data", header = TRUE, na.strings = "?") x <- na.omit(x) Which of the predictors are quantitative, and which are qualitative? sapply(x, class) ## mpg cylinders displacement horsepower weight acceleration ## "numeric" "integer" "numeric" "numeric" "numeric" "numeric" ## year origin name ## "integer" "integer" "character" numeric <- which(sapply(x, class) == "numeric") names(numeric) ## [1] "mpg" "displacement" "horsepower" "weight" "acceleration" What is the range of each quantitative predictor? You can answer this using the range() function. sapply(x[, numeric], function(x) diff(range(x))) ## mpg displacement horsepower weight acceleration ## 37.6 387.0 184.0 3527.0 16.8 What is the mean and standard deviation of each quantitative predictor? library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1 ## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1 ## ✔ purrr 1.0.2 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors library(knitr) x[, numeric] |> pivot_longer(everything()) |> group_by(name) |> summarise( Mean = mean(value), SD = sd(value) ) |> kable() name Mean SD acceleration 15.54133 2.758864 displacement 194.41199 104.644004 horsepower 104.46939 38.491160 mpg 23.44592 7.805008 weight 2977.58418 849.402560 Now remove the 10th through 85th observations. What is the range, mean, and standard deviation of each predictor in the subset of the data that remains? x[-(10:85), numeric] |> pivot_longer(everything()) |> group_by(name) |> summarise( Range = diff(range(value)), Mean = mean(value), SD = sd(value) ) |> kable() name Range Mean SD acceleration 16.3 15.72690 2.693721 displacement 387.0 187.24051 99.678367 horsepower 184.0 100.72152 35.708853 mpg 35.6 24.40443 7.867283 weight 3348.0 2935.97152 811.300208 Using the full data set, investigate the predictors graphically, using scatterplots or other tools of your choice. Create some plots highlighting the relationships among the predictors. Comment on your findings. pairs(x[, numeric], cex = 0.2) cor(x[, numeric]) |> kable() mpg displacement horsepower weight acceleration mpg 1.0000000 -0.8051269 -0.7784268 -0.8322442 0.4233285 displacement -0.8051269 1.0000000 0.8972570 0.9329944 -0.5438005 horsepower -0.7784268 0.8972570 1.0000000 0.8645377 -0.6891955 weight -0.8322442 0.9329944 0.8645377 1.0000000 -0.4168392 acceleration 0.4233285 -0.5438005 -0.6891955 -0.4168392 1.0000000 heatmap(cor(x[, numeric]), cexRow = 1.1, cexCol = 1.1, margins = c(8, 8)) Many of the variables appear to be highly (positively or negatively) correlated with some relationships being non-linear. Suppose that we wish to predict gas mileage (mpg) on the basis of the other variables. Do your plots suggest that any of the other variables might be useful in predicting mpg? Justify your answer. Yes, since other variables are correlated. However, horsepower, weight and displacement are highly related. 2.2.3 Question 10 This exercise involves the Boston housing data set. To begin, load in the Boston data set. The Boston data set is part of the ISLR2 library in R. > library(ISLR2) Now the data set is contained in the object Boston. > Boston Read about the data set: > ?Boston How many rows are in this data set? How many columns? What do the rows and columns represent? library(ISLR2) dim(Boston) ## [1] 506 13 Make some pairwise scatterplots of the predictors (columns) in this data set. Describe your findings. library(ggplot2) library(tidyverse) ggplot(Boston, aes(nox, rm)) + geom_point() ggplot(Boston, aes(ptratio, rm)) + geom_point() heatmap(cor(Boston, method = "spearman"), cexRow = 1.1, cexCol = 1.1) Are any of the predictors associated with per capita crime rate? If so, explain the relationship. Yes Do any of the census tracts of Boston appear to have particularly high crime rates? Tax rates? Pupil-teacher ratios? Comment on the range of each predictor. Boston |> pivot_longer(cols = 1:13) |> filter(name %in% c("crim", "tax", "ptratio")) |> ggplot(aes(value)) + geom_histogram(bins = 20) + facet_wrap(~name, scales="free", ncol= 1) Yes, particularly crime and tax rates. How many of the census tracts in this data set bound the Charles river? sum(Boston$chas) ## [1] 35 What is the median pupil-teacher ratio among the towns in this data set? median(Boston$ptratio) ## [1] 19.05 Which census tract of Boston has lowest median value of owner-occupied homes? What are the values of the other predictors for that census tract, and how do those values compare to the overall ranges for those predictors? Comment on your findings. Boston[Boston$medv == min(Boston$medv), ] |> kable() crim zn indus chas nox rm age dis rad tax ptratio lstat medv 399 38.3518 0 18.1 0 0.693 5.453 100 1.4896 24 666 20.2 30.59 5 406 67.9208 0 18.1 0 0.693 5.683 100 1.4254 24 666 20.2 22.98 5 sapply(Boston, quantile) |> kable() crim zn indus chas nox rm age dis rad tax ptratio lstat medv 0% 0.006320 0.0 0.46 0 0.385 3.5610 2.900 1.129600 1 187 12.60 1.730 5.000 25% 0.082045 0.0 5.19 0 0.449 5.8855 45.025 2.100175 4 279 17.40 6.950 17.025 50% 0.256510 0.0 9.69 0 0.538 6.2085 77.500 3.207450 5 330 19.05 11.360 21.200 75% 3.677083 12.5 18.10 0 0.624 6.6235 94.075 5.188425 24 666 20.20 16.955 25.000 100% 88.976200 100.0 27.74 1 0.871 8.7800 100.000 12.126500 24 711 22.00 37.970 50.000 In this data set, how many of the census tract average more than seven rooms per dwelling? More than eight rooms per dwelling? Comment on the census tracts that average more than eight rooms per dwelling. sum(Boston$rm > 7) ## [1] 64 sum(Boston$rm > 8) ## [1] 13 Let’s compare median statistics for those census tracts with more than eight rooms per dwelling on average, with the statistics for those with fewer. Boston |> mutate( `log(crim)` = log(crim), `log(zn)` = log(zn) ) |> select(-c(crim, zn)) |> pivot_longer(!rm) |> mutate(">8 rooms" = rm > 8) |> ggplot(aes(`>8 rooms`, value)) + geom_boxplot() + facet_wrap(~name, scales = "free") ## Warning: Removed 372 rows containing non-finite outside the scale range ## (`stat_boxplot()`). Census tracts with big average properties (more than eight rooms per dwelling) have higher median value (medv), a lower proportion of non-retail business acres (indus), a lower pupil-teacher ratio (ptratio), a lower status of the population (lstat) among other differences. "],["linear-regression.html", "3 Linear Regression 3.1 Conceptual 3.2 Applied", " 3 Linear Regression 3.1 Conceptual 3.1.1 Question 1 Describe the null hypotheses to which the p-values given in Table 3.4 correspond. Explain what conclusions you can draw based on these p-values. Your explanation should be phrased in terms of sales, TV, radio, and newspaper, rather than in terms of the coefficients of the linear model. For intercept, that \\(\\beta_0 = 0\\) For the others, that \\(\\beta_n = 0\\) (for \\(n = 1, 2, 3\\)) We can conclude that that without any spending, there are still some sales (the intercept is not 0). Furthermore, we can conclude that money spent on TV and radio are significantly associated with increased sales, but the same cannot be said of newspaper spending. 3.1.2 Question 2 Carefully explain the differences between the KNN classifier and KNN regression methods. The KNN classifier is categorical and assigns a value based on the most frequent observed category among \\(K\\) nearest neighbors, whereas KNN regression assigns a continuous variable, the average of the response variables for the \\(K\\) nearest neighbors. 3.1.3 Question 3 Suppose we have a data set with five predictors, \\(X_1\\) = GPA, \\(X_2\\) = IQ, \\(X_3\\) = Level (1 for College and 0 for High School), \\(X_4\\) = Interaction between GPA and IQ, and \\(X_5\\) = Interaction between GPA and Level. The response is starting salary after graduation (in thousands of dollars). Suppose we use least squares to fit the model, and get \\(\\hat\\beta_0 = 50\\), \\(\\hat\\beta_1 = 20\\), \\(\\hat\\beta_2 = 0.07\\), \\(\\hat\\beta_3 = 35\\), \\(\\hat\\beta_4 = 0.01\\), \\(\\hat\\beta_5 = -10\\). Which answer is correct, and why? For a fixed value of IQ and GPA, high school graduates earn more on average than college graduates. For a fixed value of IQ and GPA, college graduates earn more on average than high school graduates. For a fixed value of IQ and GPA, high school graduates earn more on average than college graduates provided that the GPA is high enough. For a fixed value of IQ and GPA, college graduates earn more on average than high school graduates provided that the GPA is high enough. The model is: \\(y = \\beta_0 + \\beta_1 \\text{GPA} + \\beta_2 \\text{IQ} + \\beta_3 \\text{Level} + \\beta_4 \\text{GPA} \\text{IQ} + \\beta_5 \\text{GPA} \\text{Level}\\) Fixing IQ and GPA, changing Level from 0 to 1 will change the outcome by: \\(\\Delta y = \\beta_3 + \\beta_5 \\text{GPA}\\) \\(\\Delta y > 0 \\Rightarrow \\beta_3 + \\beta_5 \\text{GPA} > 0 \\Rightarrow \\text{GPA} > \\dfrac{-\\beta3}{\\beta_5} = - \\dfrac{35}{-10} = 3.5\\) From a graphical standpoint: library(plotly) model <- function(gpa, iq, level) { 50 + gpa * 20 + iq * 0.07 + level * 35 + gpa * iq * 0.01 + gpa * level * -10 } x <- seq(1, 5, length = 10) y <- seq(1, 200, length = 20) college <- t(outer(x, y, model, level = 1)) high_school <- t(outer(x, y, model, level = 0)) plot_ly(x = x, y = y) |> add_surface( z = ~college, colorscale = list(c(0, 1), c("rgb(107,184,214)", "rgb(0,90,124)")), colorbar = list(title = "College")) |> add_surface( z = ~high_school, colorscale = list(c(0, 1), c("rgb(255,112,184)", "rgb(128,0,64)")), colorbar = list(title = "High school")) |> layout(scene = list( xaxis = list(title = "GPA"), yaxis = list(title = "IQ"), zaxis = list(title = "Salary"))) Option iii correct. Predict the salary of a college graduate with IQ of 110 and a GPA of 4.0. model(gpa = 4, iq = 110, level = 1) ## [1] 137.1 True or false: Since the coefficient for the GPA/IQ interaction term is very small, there is very little evidence of an interaction effect. Justify your answer. This is false. It is important to remember that GPA and IQ vary over different scales. It is better to explicitly test the significance of the interaction effect, and/or visualize or quantify the effect on sales under realistic ranges of GPA/IQ values. 3.1.4 Question 4 I collect a set of data (\\(n = 100\\) observations) containing a single predictor and a quantitative response. I then fit a linear regression model to the data, as well as a separate cubic regression, i.e. \\(Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon\\). Suppose that the true relationship between \\(X\\) and \\(Y\\) is linear, i.e. \\(Y = \\beta_0 + \\beta_1X + \\epsilon\\). Consider the training residual sum of squares (RSS) for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. You would expect the cubic regression to have lower RSS since it is at least as flexible as the linear regression. Answer (a) using test rather than training RSS. Though we could not be certain, the test RSS would likely be higher due to overfitting. Suppose that the true relationship between \\(X\\) and \\(Y\\) is not linear, but we don’t know how far it is from linear. Consider the training RSS for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. You would expect the cubic regression to have lower RSS since it is at least as flexible as the linear regression. Answer (c) using test rather than training RSS. There is not enough information to tell, it depends on how non-linear the true relationship is. 3.1.5 Question 5 Consider the fitted values that result from performing linear regression without an intercept. In this setting, the ith fitted value takes the form \\[\\hat{y}_i = x_i\\hat\\beta,\\] where \\[\\hat{\\beta} = \\left(\\sum_{i=1}^nx_iy_i\\right) / \\left(\\sum_{i' = 1}^n x^2_{i'}\\right).\\] show that we can write \\[\\hat{y}_i = \\sum_{i' = 1}^na_{i'}y_{i'}\\] What is \\(a_{i'}\\)? Note: We interpret this result by saying that the fitted values from linear regression are linear combinations of the response values. \\[\\begin{align} \\hat{y}_i & = x_i \\frac{\\sum_{i=1}^nx_iy_i}{\\sum_{i' = 1}^n x^2_{i'}} \\\\ & = x_i \\frac{\\sum_{i'=1}^nx_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\frac{\\sum_{i'=1}^n x_i x_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\sum_{i'=1}^n \\frac{ x_i x_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\sum_{i'=1}^n \\frac{ x_i x_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} y_{i'} \\end{align}\\] therefore, \\[a_{i'} = \\frac{ x_i x_{i'}}{\\sum x^2}\\] 3.1.6 Question 6 Using (3.4), argue that in the case of simple linear regression, the least squares line always passes through the point \\((\\bar{x}, \\bar{y})\\). when \\(x = \\bar{x}\\) what is \\(y\\)? \\[\\begin{align} y &= \\hat\\beta_0 + \\hat\\beta_1\\bar{x} \\\\ &= \\bar{y} - \\hat\\beta_1\\bar{x} + \\hat\\beta_1\\bar{x} \\\\ &= \\bar{y} \\end{align}\\] 3.1.7 Question 7 It is claimed in the text that in the case of simple linear regression of \\(Y\\) onto \\(X\\), the \\(R^2\\) statistic (3.17) is equal to the square of the correlation between \\(X\\) and \\(Y\\) (3.18). Prove that this is the case. For simplicity, you may assume that \\(\\bar{x} = \\bar{y} = 0\\). We have the following equations: \\[ R^2 = \\frac{\\textit{TSS} - \\textit{RSS}}{\\textit{TSS}} \\] \\[ Cor(x,y) = \\frac{\\sum_i (x_i-\\bar{x})(y_i - \\bar{y})}{\\sqrt{\\sum_i(x_i - \\bar{x})^2}\\sqrt{\\sum_i(y_i - \\bar{y})^2}} \\] As above, its important to remember \\(\\sum_i x_i = \\sum_j x_j\\) when \\(\\bar{x} = \\bar{y} = 0\\) \\[ Cor(x,y)^2 = \\frac{(\\sum_ix_iy_i)^2}{\\sum_ix_i^2 \\sum_iy_i^2} \\] Also note that: \\[\\hat{y}_i = \\hat\\beta_o + \\hat\\beta_1x_i = x_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2}\\] Therefore, given that \\(RSS = \\sum_i(y_i - \\hat{y}_i)^2\\) and \\(\\textit{TSS} = \\sum_i(y_i - \\bar{y})^2 = \\sum_iy_i^2\\) \\[\\begin{align} R^2 &= \\frac{\\sum_iy_i^2 - \\sum_i(y_i - x_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2} {\\sum_iy_i^2} \\\\ &= \\frac{\\sum_iy_i^2 - \\sum_i( y_i^2 - 2y_ix_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2} + x_i^2 (\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2 )}{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\sum_i(y_ix_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2}) - \\sum_i(x_i^2 (\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2) }{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\sum_i(y_ix_i) \\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2} - \\sum_i(x_i^2) \\frac{(\\sum_j{x_jy_j})^2}{(\\sum_jx_j^2)^2} }{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\frac{(\\sum_i{x_iy_i})^2}{\\sum_jx_j^2} - \\frac{(\\sum_i{x_iy_i})^2}{\\sum_jx_j^2} }{\\sum_iy_i^2} \\\\ &= \\frac{(\\sum_i{x_iy_i})^2}{\\sum_ix_i^2 \\sum_iy_i^2} \\end{align}\\] 3.2 Applied 3.2.1 Question 8 This question involves the use of simple linear regression on the Auto data set. Use the lm() function to perform a simple linear regression with mpg as the response and horsepower as the predictor. Use the summary() function to print the results. Comment on the output. For example: Is there a relationship between the predictor and the response? How strong is the relationship between the predictor and the response? Is the relationship between the predictor and the response positive or negative? What is the predicted mpg associated with a horsepower of 98? What are the associated 95% confidence and prediction intervals? library(ISLR2) fit <- lm(mpg ~ horsepower, data = Auto) summary(fit) ## ## Call: ## lm(formula = mpg ~ horsepower, data = Auto) ## ## Residuals: ## Min 1Q Median 3Q Max ## -13.5710 -3.2592 -0.3435 2.7630 16.9240 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 39.935861 0.717499 55.66 <2e-16 *** ## horsepower -0.157845 0.006446 -24.49 <2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 4.906 on 390 degrees of freedom ## Multiple R-squared: 0.6059, Adjusted R-squared: 0.6049 ## F-statistic: 599.7 on 1 and 390 DF, p-value: < 2.2e-16 Yes, there is a significant relationship between predictor and response. For every unit increase in horsepower, mpg reduces by 0.16 (a negative relationship). predict(fit, data.frame(horsepower = 98), interval = "confidence") ## fit lwr upr ## 1 24.46708 23.97308 24.96108 predict(fit, data.frame(horsepower = 98), interval = "prediction") ## fit lwr upr ## 1 24.46708 14.8094 34.12476 Plot the response and the predictor. Use the abline() function to display the least squares regression line. plot(Auto$horsepower, Auto$mpg, xlab = "horsepower", ylab = "mpg") abline(fit) Use the plot() function to produce diagnostic plots of the least squares regression fit. Comment on any problems you see with the fit. par(mfrow = c(2, 2)) plot(fit, cex = 0.2) The residuals show a trend with respect to the fitted values suggesting a non-linear relationship. 3.2.2 Question 9 This question involves the use of multiple linear regression on the Auto data set. Produce a scatterplot matrix which includes all of the variables in the data set. pairs(Auto, cex = 0.2) Compute the matrix of correlations between the variables using the function cor(). You will need to exclude the name variable, name which is qualitative. x <- subset(Auto, select = -name) cor(x) ## mpg cylinders displacement horsepower weight ## mpg 1.0000000 -0.7776175 -0.8051269 -0.7784268 -0.8322442 ## cylinders -0.7776175 1.0000000 0.9508233 0.8429834 0.8975273 ## displacement -0.8051269 0.9508233 1.0000000 0.8972570 0.9329944 ## horsepower -0.7784268 0.8429834 0.8972570 1.0000000 0.8645377 ## weight -0.8322442 0.8975273 0.9329944 0.8645377 1.0000000 ## acceleration 0.4233285 -0.5046834 -0.5438005 -0.6891955 -0.4168392 ## year 0.5805410 -0.3456474 -0.3698552 -0.4163615 -0.3091199 ## origin 0.5652088 -0.5689316 -0.6145351 -0.4551715 -0.5850054 ## acceleration year origin ## mpg 0.4233285 0.5805410 0.5652088 ## cylinders -0.5046834 -0.3456474 -0.5689316 ## displacement -0.5438005 -0.3698552 -0.6145351 ## horsepower -0.6891955 -0.4163615 -0.4551715 ## weight -0.4168392 -0.3091199 -0.5850054 ## acceleration 1.0000000 0.2903161 0.2127458 ## year 0.2903161 1.0000000 0.1815277 ## origin 0.2127458 0.1815277 1.0000000 Use the lm() function to perform a multiple linear regression with mpg as the response and all other variables except name as the predictors. Use the summary() function to print the results. Comment on the output. For instance: Is there a relationship between the predictors and the response? Which predictors appear to have a statistically significant relationship to the response? What does the coefficient for the year variable suggest? fit <- lm(mpg ~ ., data = x) summary(fit) ## ## Call: ## lm(formula = mpg ~ ., data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.5903 -2.1565 -0.1169 1.8690 13.0604 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -17.218435 4.644294 -3.707 0.00024 *** ## cylinders -0.493376 0.323282 -1.526 0.12780 ## displacement 0.019896 0.007515 2.647 0.00844 ** ## horsepower -0.016951 0.013787 -1.230 0.21963 ## weight -0.006474 0.000652 -9.929 < 2e-16 *** ## acceleration 0.080576 0.098845 0.815 0.41548 ## year 0.750773 0.050973 14.729 < 2e-16 *** ## origin 1.426141 0.278136 5.127 4.67e-07 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.328 on 384 degrees of freedom ## Multiple R-squared: 0.8215, Adjusted R-squared: 0.8182 ## F-statistic: 252.4 on 7 and 384 DF, p-value: < 2.2e-16 Yes, there is a relationship between some predictors and response, notably “displacement” (positive), “weight” (negative), “year” (positive) and “origin” (positive). The coefficient for year (which is positive \\(~0.75\\)) suggests that mpg increases by about this amount every year on average. Use the plot() function to produce diagnostic plots of the linear regression fit. Comment on any problems you see with the fit. Do the residual plots suggest any unusually large outliers? Does the leverage plot identify any observations with unusually high leverage? par(mfrow = c(2, 2)) plot(fit, cex = 0.2) One point has high leverage, the residuals also show a trend with fitted values. Use the * and : symbols to fit linear regression models with interaction effects. Do any interactions appear to be statistically significant? summary(lm(mpg ~ . + weight:horsepower, data = x)) ## ## Call: ## lm(formula = mpg ~ . + weight:horsepower, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -8.589 -1.617 -0.184 1.541 12.001 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.876e+00 4.511e+00 0.638 0.524147 ## cylinders -2.955e-02 2.881e-01 -0.103 0.918363 ## displacement 5.950e-03 6.750e-03 0.881 0.378610 ## horsepower -2.313e-01 2.363e-02 -9.791 < 2e-16 *** ## weight -1.121e-02 7.285e-04 -15.393 < 2e-16 *** ## acceleration -9.019e-02 8.855e-02 -1.019 0.309081 ## year 7.695e-01 4.494e-02 17.124 < 2e-16 *** ## origin 8.344e-01 2.513e-01 3.320 0.000986 *** ## horsepower:weight 5.529e-05 5.227e-06 10.577 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.931 on 383 degrees of freedom ## Multiple R-squared: 0.8618, Adjusted R-squared: 0.859 ## F-statistic: 298.6 on 8 and 383 DF, p-value: < 2.2e-16 summary(lm(mpg ~ . + acceleration:horsepower, data = x)) ## ## Call: ## lm(formula = mpg ~ . + acceleration:horsepower, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.0329 -1.8177 -0.1183 1.7247 12.4870 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -32.499820 4.923380 -6.601 1.36e-10 *** ## cylinders 0.083489 0.316913 0.263 0.792350 ## displacement -0.007649 0.008161 -0.937 0.349244 ## horsepower 0.127188 0.024746 5.140 4.40e-07 *** ## weight -0.003976 0.000716 -5.552 5.27e-08 *** ## acceleration 0.983282 0.161513 6.088 2.78e-09 *** ## year 0.755919 0.048179 15.690 < 2e-16 *** ## origin 1.035733 0.268962 3.851 0.000138 *** ## horsepower:acceleration -0.012139 0.001772 -6.851 2.93e-11 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.145 on 383 degrees of freedom ## Multiple R-squared: 0.841, Adjusted R-squared: 0.8376 ## F-statistic: 253.2 on 8 and 383 DF, p-value: < 2.2e-16 summary(lm(mpg ~ . + cylinders:weight, data = x)) ## ## Call: ## lm(formula = mpg ~ . + cylinders:weight, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -10.9484 -1.7133 -0.1809 1.4530 12.4137 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 7.3143478 5.0076737 1.461 0.14494 ## cylinders -5.0347425 0.5795767 -8.687 < 2e-16 *** ## displacement 0.0156444 0.0068409 2.287 0.02275 * ## horsepower -0.0314213 0.0126216 -2.489 0.01322 * ## weight -0.0150329 0.0011125 -13.513 < 2e-16 *** ## acceleration 0.1006438 0.0897944 1.121 0.26306 ## year 0.7813453 0.0464139 16.834 < 2e-16 *** ## origin 0.8030154 0.2617333 3.068 0.00231 ** ## cylinders:weight 0.0015058 0.0001657 9.088 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.022 on 383 degrees of freedom ## Multiple R-squared: 0.8531, Adjusted R-squared: 0.8501 ## F-statistic: 278.1 on 8 and 383 DF, p-value: < 2.2e-16 There are at least three cases where the interactions appear to be highly significant. Try a few different transformations of the variables, such as \\(log(X)\\), \\(\\sqrt{X}\\), \\(X^2\\). Comment on your findings. Here I’ll just consider transformations for horsepower. par(mfrow = c(2, 2)) plot(Auto$horsepower, Auto$mpg, cex = 0.2) plot(log(Auto$horsepower), Auto$mpg, cex = 0.2) plot(sqrt(Auto$horsepower), Auto$mpg, cex = 0.2) plot(Auto$horsepower ^ 2, Auto$mpg, cex = 0.2) x <- subset(Auto, select = -name) x$horsepower <- log(x$horsepower) fit <- lm(mpg ~ ., data = x) summary(fit) ## ## Call: ## lm(formula = mpg ~ ., data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.3115 -2.0041 -0.1726 1.8393 12.6579 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 27.254005 8.589614 3.173 0.00163 ** ## cylinders -0.486206 0.306692 -1.585 0.11372 ## displacement 0.019456 0.006876 2.830 0.00491 ** ## horsepower -9.506436 1.539619 -6.175 1.69e-09 *** ## weight -0.004266 0.000694 -6.148 1.97e-09 *** ## acceleration -0.292088 0.103804 -2.814 0.00515 ** ## year 0.705329 0.048456 14.556 < 2e-16 *** ## origin 1.482435 0.259347 5.716 2.19e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.18 on 384 degrees of freedom ## Multiple R-squared: 0.837, Adjusted R-squared: 0.834 ## F-statistic: 281.6 on 7 and 384 DF, p-value: < 2.2e-16 par(mfrow = c(2, 2)) plot(fit, cex = 0.2) A log transformation of horsepower appears to give a more linear relationship with mpg. 3.2.3 Question 10 This question should be answered using the Carseats data set. Fit a multiple regression model to predict Sales using Price, Urban, and US. fit <- lm(Sales ~ Price + Urban + US, data = Carseats) Provide an interpretation of each coefficient in the model. Be careful—some of the variables in the model are qualitative! summary(fit) ## ## Call: ## lm(formula = Sales ~ Price + Urban + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9206 -1.6220 -0.0564 1.5786 7.0581 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.043469 0.651012 20.036 < 2e-16 *** ## Price -0.054459 0.005242 -10.389 < 2e-16 *** ## UrbanYes -0.021916 0.271650 -0.081 0.936 ## USYes 1.200573 0.259042 4.635 4.86e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.472 on 396 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2335 ## F-statistic: 41.52 on 3 and 396 DF, p-value: < 2.2e-16 Write out the model in equation form, being careful to handle the qualitative variables properly. \\[ \\textit{Sales} = 13 + -0.054 \\times \\textit{Price} + \\begin{cases} -0.022, & \\text{if $\\textit{Urban}$ is Yes, $\\textit{US}$ is No} \\\\ 1.20, & \\text{if $\\textit{Urban}$ is No, $\\textit{US}$ is Yes} \\\\ 1.18, & \\text{if $\\textit{Urban}$ and $\\textit{US}$ is Yes} \\\\ 0, & \\text{Otherwise} \\end{cases} \\] For which of the predictors can you reject the null hypothesis \\(H_0 : \\beta_j = 0\\)? Price and US (Urban shows no significant difference between “No” and “Yes”) On the basis of your response to the previous question, fit a smaller model that only uses the predictors for which there is evidence of association with the outcome. fit2 <- lm(Sales ~ Price + US, data = Carseats) How well do the models in (a) and (e) fit the data? summary(fit) ## ## Call: ## lm(formula = Sales ~ Price + Urban + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9206 -1.6220 -0.0564 1.5786 7.0581 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.043469 0.651012 20.036 < 2e-16 *** ## Price -0.054459 0.005242 -10.389 < 2e-16 *** ## UrbanYes -0.021916 0.271650 -0.081 0.936 ## USYes 1.200573 0.259042 4.635 4.86e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.472 on 396 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2335 ## F-statistic: 41.52 on 3 and 396 DF, p-value: < 2.2e-16 summary(fit2) ## ## Call: ## lm(formula = Sales ~ Price + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9269 -1.6286 -0.0574 1.5766 7.0515 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.03079 0.63098 20.652 < 2e-16 *** ## Price -0.05448 0.00523 -10.416 < 2e-16 *** ## USYes 1.19964 0.25846 4.641 4.71e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.469 on 397 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2354 ## F-statistic: 62.43 on 2 and 397 DF, p-value: < 2.2e-16 anova(fit, fit2) ## Analysis of Variance Table ## ## Model 1: Sales ~ Price + Urban + US ## Model 2: Sales ~ Price + US ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 396 2420.8 ## 2 397 2420.9 -1 -0.03979 0.0065 0.9357 They have similar \\(R^2\\) and the model containing the extra variable “Urban” is non-significantly better. Using the model from (e), obtain 95% confidence intervals for the coefficient(s). confint(fit2) ## 2.5 % 97.5 % ## (Intercept) 11.79032020 14.27126531 ## Price -0.06475984 -0.04419543 ## USYes 0.69151957 1.70776632 Is there evidence of outliers or high leverage observations in the model from (e)? par(mfrow = c(2, 2)) plot(fit2, cex = 0.2) Yes, somewhat. 3.2.4 Question 11 In this problem we will investigate the t-statistic for the null hypothesis \\(H_0 : \\beta = 0\\) in simple linear regression without an intercept. To begin, we generate a predictor x and a response y as follows. set.seed(1) x <- rnorm(100) y <- 2 * x + rnorm(100) set.seed(1) x <- rnorm(100) y <- 2 * x + rnorm(100) Perform a simple linear regression of y onto x, without an intercept. Report the coefficient estimate \\(\\hat{\\beta}\\), the standard error of this coefficient estimate, and the t-statistic and p-value associated with the null hypothesis \\(H_0 : \\beta = 0\\). Comment on these results. (You can perform regression without an intercept using the command lm(y~x+0).) fit <- lm(y ~ x + 0) coef(summary(fit)) ## Estimate Std. Error t value Pr(>|t|) ## x 1.993876 0.1064767 18.72593 2.642197e-34 There’s a significant positive relationship between \\(y\\) and \\(x\\). \\(y\\) values are predicted to be (a little below) twice the \\(x\\) values. Now perform a simple linear regression of x onto y without an intercept, and report the coefficient estimate, its standard error, and the corresponding t-statistic and p-values associated with the null hypothesis \\(H_0 : \\beta = 0\\). Comment on these results. fit <- lm(x ~ y + 0) coef(summary(fit)) ## Estimate Std. Error t value Pr(>|t|) ## y 0.3911145 0.02088625 18.72593 2.642197e-34 There’s a significant positive relationship between \\(x\\) and \\(y\\). \\(x\\) values are predicted to be (a little below) half the \\(y\\) values. What is the relationship between the results obtained in (a) and (b)? Without error, the coefficients would be the inverse of each other (2 and 1/2). The t-statistic and p-values are the same. For the regression of \\(Y\\) onto \\(X\\) without an intercept, the t-statistic for \\(H_0 : \\beta = 0\\) takes the form \\(\\hat{\\beta}/SE(\\hat{\\beta})\\), where \\(\\hat{\\beta}\\) is given by (3.38), and where \\[ SE(\\hat\\beta) = \\sqrt{\\frac{\\sum_{i=1}^n(y_i - x_i\\hat\\beta)^2}{(n-1)\\sum_{i'=1}^nx_{i'}^2}}. \\] (These formulas are slightly different from those given in Sections 3.1.1 and 3.1.2, since here we are performing regression without an intercept.) Show algebraically, and confirm numerically in R, that the t-statistic can be written as \\[ \\frac{(\\sqrt{n-1}) \\sum_{i-1}^nx_iy_i)} {\\sqrt{(\\sum_{i=1}^nx_i^2)(\\sum_{i'=1}^ny_{i'}^2)-(\\sum_{i'=1}^nx_{i'}y_{i'})^2}} \\] \\[ \\beta = \\sum_i x_i y_i / \\sum_{i'} x_{i'}^2 ,\\] therefore \\[\\begin{align} t &= \\frac{\\sum_i x_i y_i \\sqrt{n-1} \\sqrt{\\sum_ix_i^2}} {\\sum_i x_i^2 \\sqrt{\\sum_i(y_i - x_i \\beta)^2}} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{\\sum_ix_i^2 \\sum_i(y_i - x_i \\beta)^2}} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_i(y_i^2 - 2 y_i x_i \\beta + x_i^2 \\beta^2) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_iy_i^2 - \\beta \\sum_ix_i^2 (2 \\sum_i y_i x_i -\\beta \\sum_i x_i^2) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_iy_i^2 - \\sum_i x_i y_i (2 \\sum_i y_i x_i - \\sum_i x_i y_i) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{\\sum_ix_i^2 \\sum_iy_i^2 - (\\sum_i x_i y_i)^2}} \\\\ \\end{align}\\] We can show this numerically in R by computing \\(t\\) using the above equation. n <- length(x) sqrt(n - 1) * sum(x * y) / sqrt(sum(x ^ 2) * sum(y ^ 2) - sum(x * y) ^ 2) ## [1] 18.72593 Using the results from (d), argue that the t-statistic for the regression of y onto x is the same as the t-statistic for the regression of x onto y. Swapping \\(x_i\\) for \\(y_i\\) in the formula for \\(t\\) will give the same result. In R, show that when regression is performed with an intercept, the t-statistic for \\(H_0 : \\beta_1 = 0\\) is the same for the regression of y onto x as it is for the regression of x onto y. coef(summary(lm(y ~ x))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -0.03769261 0.09698729 -0.3886346 6.983896e-01 ## x 1.99893961 0.10772703 18.5555993 7.723851e-34 coef(summary(lm(x ~ y))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.03880394 0.04266144 0.9095787 3.652764e-01 ## y 0.38942451 0.02098690 18.5555993 7.723851e-34 3.2.5 Question 12 This problem involves simple linear regression without an intercept. Recall that the coefficient estimate \\(\\hat{\\beta}\\) for the linear regression of \\(Y\\) onto \\(X\\) without an intercept is given by (3.38). Under what circumstance is the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) the same as the coefficient estimate for the regression of \\(Y\\) onto \\(X\\)? \\[ \\hat\\beta = \\sum_i x_iy_i / \\sum_{i'} x_{i'}^2 \\] The coefficient for the regression of X onto Y swaps the \\(x\\) and \\(y\\) variables: \\[ \\hat\\beta = \\sum_i x_iy_i / \\sum_{i'} y_{i'}^2 \\] So they are the same when \\(\\sum_{i} x_{i}^2 = \\sum_{i} y_{i}^2\\) Generate an example in R with \\(n = 100\\) observations in which the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) is different from the coefficient estimate for the regression of \\(Y\\) onto \\(X\\). x <- rnorm(100) y <- 2 * x + rnorm(100, 0, 0.1) c(sum(x^2), sum(y^2)) ## [1] 105.9889 429.4924 c(coef(lm(y ~ x))[2], coef(lm(x ~ y))[2]) ## x y ## 2.0106218 0.4962439 Generate an example in R with \\(n = 100\\) observations in which the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) is the same as the coefficient estimate for the regression of \\(Y\\) onto \\(X\\). x <- rnorm(100) y <- x + rnorm(100, 0, 0.1) c(sum(x^2), sum(y^2)) ## [1] 135.5844 134.5153 c(coef(lm(y ~ x))[2], coef(lm(x ~ y))[2]) ## x y ## 0.9925051 1.0006765 3.2.6 Question 13 In this exercise you will create some simulated data and will fit simple linear regression models to it. Make sure to use set.seed(1) prior to starting part (a) to ensure consistent results. set.seed(1) Using the rnorm() function, create a vector, x, containing 100 observations drawn from a \\(N(0, 1)\\) distribution. This represents a feature, \\(X\\). x <- rnorm(100, 0, 1) Using the rnorm() function, create a vector, eps, containing 100 observations drawn from a \\(N(0, 0.25)\\) distribution—a normal distribution with mean zero and variance 0.25. eps <- rnorm(100, 0, sqrt(0.25)) Using x and eps, generate a vector y according to the model \\[Y = -1 + 0.5X + \\epsilon\\] What is the length of the vector y? What are the values of \\(\\beta_0\\) and \\(\\beta_1\\) in this linear model? y <- -1 + 0.5 * x + eps length(y) ## [1] 100 \\(\\beta_0 = -1\\) and \\(\\beta_1 = 0.5\\) Create a scatterplot displaying the relationship between x and y. Comment on what you observe. plot(x, y) There is a linear relationship between \\(x\\) and \\(y\\) (with some error). Fit a least squares linear model to predict y using x. Comment on the model obtained. How do \\(\\hat\\beta_0\\) and \\(\\hat\\beta_1\\) compare to \\(\\beta_0\\) and \\(\\beta_1\\)? fit <- lm(y ~ x) summary(fit) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -0.93842 -0.30688 -0.06975 0.26970 1.17309 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.01885 0.04849 -21.010 < 2e-16 *** ## x 0.49947 0.05386 9.273 4.58e-15 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.4814 on 98 degrees of freedom ## Multiple R-squared: 0.4674, Adjusted R-squared: 0.4619 ## F-statistic: 85.99 on 1 and 98 DF, p-value: 4.583e-15 \\(\\beta_0\\) and \\(\\beta_1\\) are close to their population values. Display the least squares line on the scatterplot obtained in (d). Draw the population regression line on the plot, in a different color. Use the legend() command to create an appropriate legend. plot(x, y) abline(fit) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) Now fit a polynomial regression model that predicts y using x and x^2. Is there evidence that the quadratic term improves the model fit? Explain your answer. fit2 <- lm(y ~ poly(x, 2)) anova(fit2, fit) ## Analysis of Variance Table ## ## Model 1: y ~ poly(x, 2) ## Model 2: y ~ x ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 97 22.257 ## 2 98 22.709 -1 -0.45163 1.9682 0.1638 There is no evidence for an improved fit, since the F-test is non-significant. Repeat (a)–(f) after modifying the data generation process in such a way that there is less noise in the data. The model (3.39) should remain the same. You can do this by decreasing the variance of the normal distribution used to generate the error term \\(\\epsilon\\) in (b). Describe your results. x <- rnorm(100, 0, 1) y <- -1 + 0.5 * x + rnorm(100, 0, sqrt(0.05)) fit2 <- lm(y ~ x) summary(fit2) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -0.61308 -0.12553 -0.00391 0.15199 0.41332 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -0.98917 0.02216 -44.64 <2e-16 *** ## x 0.52375 0.02152 24.33 <2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.2215 on 98 degrees of freedom ## Multiple R-squared: 0.858, Adjusted R-squared: 0.8565 ## F-statistic: 592.1 on 1 and 98 DF, p-value: < 2.2e-16 plot(x, y) abline(fit2) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) The data shows less variability and the \\(R^2\\) is higher. Repeat (a)–(f) after modifying the data generation process in such a way that there is more noise in the data. The model (3.39) should remain the same. You can do this by increasing the variance of the normal distribution used to generate the error term \\(\\epsilon\\) in (b). Describe your results. x <- rnorm(100, 0, 1) y <- -1 + 0.5 * x + rnorm(100, 0, 1) fit3 <- lm(y ~ x) summary(fit3) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.51014 -0.60549 0.02065 0.70483 2.08980 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.04745 0.09676 -10.825 < 2e-16 *** ## x 0.42505 0.08310 5.115 1.56e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.9671 on 98 degrees of freedom ## Multiple R-squared: 0.2107, Adjusted R-squared: 0.2027 ## F-statistic: 26.16 on 1 and 98 DF, p-value: 1.56e-06 plot(x, y) abline(fit3) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) The data shows more variability. The \\(R^2\\) is lower. What are the confidence intervals for \\(\\beta_0\\) and \\(\\beta_1\\) based on the original data set, the noisier data set, and the less noisy data set? Comment on your results. confint(fit) ## 2.5 % 97.5 % ## (Intercept) -1.1150804 -0.9226122 ## x 0.3925794 0.6063602 confint(fit2) ## 2.5 % 97.5 % ## (Intercept) -1.033141 -0.9451916 ## x 0.481037 0.5664653 confint(fit3) ## 2.5 % 97.5 % ## (Intercept) -1.2394772 -0.8554276 ## x 0.2601391 0.5899632 The confidence intervals for the coefficients are smaller when there is less error. 3.2.7 Question 14 This problem focuses on the collinearity problem. Perform the following commands in R : > set.seed(1) > x1 <- runif(100) > x2 <- 0.5 * x1 + rnorm(100) / 10 > y <- 2 + 2 * x1 + 0.3 * x2 + rnorm(100) The last line corresponds to creating a linear model in which y is a function of x1 and x2. Write out the form of the linear model. What are the regression coefficients? set.seed(1) x1 <- runif(100) x2 <- 0.5 * x1 + rnorm(100) / 10 y <- 2 + 2 * x1 + 0.3 * x2 + rnorm(100) The model is of the form: \\[Y = \\beta_0 + \\beta_1X_1 + \\beta_2X_2 + \\epsilon\\] The coefficients are \\(\\beta_0 = 2\\), \\(\\beta_1 = 2\\), \\(\\beta_3 = 0.3\\). What is the correlation between x1 and x2? Create a scatterplot displaying the relationship between the variables. cor(x1, x2) ## [1] 0.8351212 plot(x1, x2) Using this data, fit a least squares regression to predict y using x1 and x2. Describe the results obtained. What are \\(\\hat\\beta_0\\), \\(\\hat\\beta_1\\), and \\(\\hat\\beta_2\\)? How do these relate to the true \\(\\beta_0\\), \\(\\beta_1\\), and _2$? Can you reject the null hypothesis \\(H_0 : \\beta_1\\) = 0$? How about the null hypothesis \\(H_0 : \\beta_2 = 0\\)? summary(lm(y ~ x1 + x2)) ## ## Call: ## lm(formula = y ~ x1 + x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.8311 -0.7273 -0.0537 0.6338 2.3359 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.1305 0.2319 9.188 7.61e-15 *** ## x1 1.4396 0.7212 1.996 0.0487 * ## x2 1.0097 1.1337 0.891 0.3754 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.056 on 97 degrees of freedom ## Multiple R-squared: 0.2088, Adjusted R-squared: 0.1925 ## F-statistic: 12.8 on 2 and 97 DF, p-value: 1.164e-05 \\(\\hat\\beta_0 = 2.13\\), \\(\\hat\\beta_1 = 1.43\\), and \\(\\hat\\beta_2 = 1.01\\). These are relatively poor estimates of the true values. We can reject the hypothesis that \\(H_0 : \\beta_1\\) at a p-value of 0.05 (just about). We cannot reject the hypothesis that \\(H_0 : \\beta_2 = 0\\). Now fit a least squares regression to predict y using only x1. Comment on your results. Can you reject the null hypothesis \\(H 0 : \\beta_1 = 0\\)? summary(lm(y ~ x1)) ## ## Call: ## lm(formula = y ~ x1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.89495 -0.66874 -0.07785 0.59221 2.45560 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.1124 0.2307 9.155 8.27e-15 *** ## x1 1.9759 0.3963 4.986 2.66e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.055 on 98 degrees of freedom ## Multiple R-squared: 0.2024, Adjusted R-squared: 0.1942 ## F-statistic: 24.86 on 1 and 98 DF, p-value: 2.661e-06 We can reject \\(H_0 : \\beta_1 = 0\\). The p-value is much more significant for \\(\\beta_1\\) compared to when x2 is included in the model. Now fit a least squares regression to predict y using only x2. Comment on your results. Can you reject the null hypothesis \\(H_0 : \\beta_1 = 0\\)? summary(lm(y ~ x2)) ## ## Call: ## lm(formula = y ~ x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.62687 -0.75156 -0.03598 0.72383 2.44890 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.3899 0.1949 12.26 < 2e-16 *** ## x2 2.8996 0.6330 4.58 1.37e-05 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.072 on 98 degrees of freedom ## Multiple R-squared: 0.1763, Adjusted R-squared: 0.1679 ## F-statistic: 20.98 on 1 and 98 DF, p-value: 1.366e-05 Similarly, we can reject \\(H_0 : \\beta_2 = 0\\). The p-value is much more significant for \\(\\beta_2\\) compared to when x1 is included in the model. Do the results obtained in (c)–(e) contradict each other? Explain your answer. No they do not contradict each other. Both x1 and x2 individually are capable of explaining much of the variation observed in y, however since they are correlated, it is very difficult to tease apart their separate contributions. Now suppose we obtain one additional observation, which was unfortunately mismeasured. > x1 <- c(x1, 0.1) > x2 <- c(x2, 0.8) > y <- c(y, 6) Re-fit the linear models from (c) to (e) using this new data. What effect does this new observation have on the each of the models? In each model, is this observation an outlier? A high-leverage point? Both? Explain your answers. x1 <- c(x1 , 0.1) x2 <- c(x2 , 0.8) y <- c(y ,6) summary(lm(y ~ x1 + x2)) ## ## Call: ## lm(formula = y ~ x1 + x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.73348 -0.69318 -0.05263 0.66385 2.30619 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.2267 0.2314 9.624 7.91e-16 *** ## x1 0.5394 0.5922 0.911 0.36458 ## x2 2.5146 0.8977 2.801 0.00614 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.075 on 98 degrees of freedom ## Multiple R-squared: 0.2188, Adjusted R-squared: 0.2029 ## F-statistic: 13.72 on 2 and 98 DF, p-value: 5.564e-06 summary(lm(y ~ x1)) ## ## Call: ## lm(formula = y ~ x1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.8897 -0.6556 -0.0909 0.5682 3.5665 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.2569 0.2390 9.445 1.78e-15 *** ## x1 1.7657 0.4124 4.282 4.29e-05 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.111 on 99 degrees of freedom ## Multiple R-squared: 0.1562, Adjusted R-squared: 0.1477 ## F-statistic: 18.33 on 1 and 99 DF, p-value: 4.295e-05 summary(lm(y ~ x2)) ## ## Call: ## lm(formula = y ~ x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.64729 -0.71021 -0.06899 0.72699 2.38074 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.3451 0.1912 12.264 < 2e-16 *** ## x2 3.1190 0.6040 5.164 1.25e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.074 on 99 degrees of freedom ## Multiple R-squared: 0.2122, Adjusted R-squared: 0.2042 ## F-statistic: 26.66 on 1 and 99 DF, p-value: 1.253e-06 par(mfrow = c(2, 2)) plot(lm(y ~ x1 + x2), cex = 0.2) par(mfrow = c(2, 2)) plot(lm(y ~ x1), cex = 0.2) par(mfrow = c(2, 2)) plot(lm(y ~ x2), cex = 0.2) In the first model (with both predictors), the new point has very high leverage (since it is an outlier in terms of the joint x1 and x2 distribution), however it is not an outlier. In the model that includes x1, it is an outlier but does not have high leverage. In the model that includes x2, it has high leverage but is not an outlier. It is useful to consider the scatterplot of x1 and x2. plot(x1, x2) points(0.1, 0.8, col = "red", pch = 19) 3.2.8 Question 15 This problem involves the Boston data set, which we saw in the lab for this chapter. We will now try to predict per capita crime rate using the other variables in this data set. In other words, per capita crime rate is the response, and the other variables are the predictors. We are trying to predict crim. pred <- subset(Boston, select = -crim) For each predictor, fit a simple linear regression model to predict the response. Describe your results. In which of the models is there a statistically significant association between the predictor and the response? Create some plots to back up your assertions. fits <- lapply(pred, function(x) lm(Boston$crim ~ x)) printCoefmat(do.call(rbind, lapply(fits, function(x) coef(summary(x))[2, ]))) ## Estimate Std. Error t value Pr(>|t|) ## zn -0.0739350 0.0160946 -4.5938 5.506e-06 *** ## indus 0.5097763 0.0510243 9.9908 < 2.2e-16 *** ## chas -1.8927766 1.5061155 -1.2567 0.2094 ## nox 31.2485312 2.9991904 10.4190 < 2.2e-16 *** ## rm -2.6840512 0.5320411 -5.0448 6.347e-07 *** ## age 0.1077862 0.0127364 8.4628 2.855e-16 *** ## dis -1.5509017 0.1683300 -9.2135 < 2.2e-16 *** ## rad 0.6179109 0.0343318 17.9982 < 2.2e-16 *** ## tax 0.0297423 0.0018474 16.0994 < 2.2e-16 *** ## ptratio 1.1519828 0.1693736 6.8014 2.943e-11 *** ## lstat 0.5488048 0.0477610 11.4907 < 2.2e-16 *** ## medv -0.3631599 0.0383902 -9.4597 < 2.2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 There are significant associations for all predictors with the exception of chas when fitting separate linear models. For example, consider the following plot representing the third model plot(Boston$rm, Boston$crim) abline(fits[[5]]) Fit a multiple regression model to predict the response using all of the predictors. Describe your results. For which predictors can we reject the null hypothesis \\(H_0 : \\beta_j = 0\\)? mfit <- lm(crim ~ ., data = Boston) summary(mfit) ## ## Call: ## lm(formula = crim ~ ., data = Boston) ## ## Residuals: ## Min 1Q Median 3Q Max ## -8.534 -2.248 -0.348 1.087 73.923 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.7783938 7.0818258 1.946 0.052271 . ## zn 0.0457100 0.0187903 2.433 0.015344 * ## indus -0.0583501 0.0836351 -0.698 0.485709 ## chas -0.8253776 1.1833963 -0.697 0.485841 ## nox -9.9575865 5.2898242 -1.882 0.060370 . ## rm 0.6289107 0.6070924 1.036 0.300738 ## age -0.0008483 0.0179482 -0.047 0.962323 ## dis -1.0122467 0.2824676 -3.584 0.000373 *** ## rad 0.6124653 0.0875358 6.997 8.59e-12 *** ## tax -0.0037756 0.0051723 -0.730 0.465757 ## ptratio -0.3040728 0.1863598 -1.632 0.103393 ## lstat 0.1388006 0.0757213 1.833 0.067398 . ## medv -0.2200564 0.0598240 -3.678 0.000261 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 6.46 on 493 degrees of freedom ## Multiple R-squared: 0.4493, Adjusted R-squared: 0.4359 ## F-statistic: 33.52 on 12 and 493 DF, p-value: < 2.2e-16 There are now only significant associations for zn, dis, rad, black and medv. How do your results from (a) compare to your results from (b)? Create a plot displaying the univariate regression coefficients from (a) on the \\(x\\)-axis, and the multiple regression coefficients from (b) on the \\(y\\)-axis. That is, each predictor is displayed as a single point in the plot. Its coefficient in a simple linear regression model is shown on the x-axis, and its coefficient estimate in the multiple linear regression model is shown on the y-axis. The results from (b) show reduced significance compared to the models fit in (a). plot(sapply(fits, function(x) coef(x)[2]), coef(mfit)[-1], xlab = "Univariate regression", ylab = "multiple regression") The estimated coefficients differ (in particular the estimated coefficient for nox is dramatically different) between the two modelling strategies. Is there evidence of non-linear association between any of the predictors and the response? To answer this question, for each predictor X, fit a model of the form \\[ Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon \\] pred <- subset(pred, select = -chas) fits <- lapply(names(pred), function(p) { f <- paste0("crim ~ poly(", p, ", 3)") lm(as.formula(f), data = Boston) }) for (fit in fits) printCoefmat(coef(summary(fit))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.37219 9.7088 < 2.2e-16 *** ## poly(zn, 3)1 -38.74984 8.37221 -4.6284 4.698e-06 *** ## poly(zn, 3)2 23.93983 8.37221 2.8594 0.004421 ** ## poly(zn, 3)3 -10.07187 8.37221 -1.2030 0.229539 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.6135 0.3300 10.9501 < 2.2e-16 *** ## poly(indus, 3)1 78.5908 7.4231 10.5873 < 2.2e-16 *** ## poly(indus, 3)2 -24.3948 7.4231 -3.2863 0.001086 ** ## poly(indus, 3)3 -54.1298 7.4231 -7.2920 1.196e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.32157 11.2370 < 2.2e-16 *** ## poly(nox, 3)1 81.37202 7.23361 11.2492 < 2.2e-16 *** ## poly(nox, 3)2 -28.82859 7.23361 -3.9854 7.737e-05 *** ## poly(nox, 3)3 -60.36189 7.23361 -8.3446 6.961e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.6135 0.3703 9.7584 < 2.2e-16 *** ## poly(rm, 3)1 -42.3794 8.3297 -5.0878 5.128e-07 *** ## poly(rm, 3)2 26.5768 8.3297 3.1906 0.001509 ** ## poly(rm, 3)3 -5.5103 8.3297 -0.6615 0.508575 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.34852 10.3683 < 2.2e-16 *** ## poly(age, 3)1 68.18201 7.83970 8.6970 < 2.2e-16 *** ## poly(age, 3)2 37.48447 7.83970 4.7814 2.291e-06 *** ## poly(age, 3)3 21.35321 7.83970 2.7237 0.00668 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.32592 11.0870 < 2.2e-16 *** ## poly(dis, 3)1 -73.38859 7.33148 -10.0101 < 2.2e-16 *** ## poly(dis, 3)2 56.37304 7.33148 7.6892 7.870e-14 *** ## poly(dis, 3)3 -42.62188 7.33148 -5.8135 1.089e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.29707 12.1639 < 2.2e-16 *** ## poly(rad, 3)1 120.90745 6.68240 18.0934 < 2.2e-16 *** ## poly(rad, 3)2 17.49230 6.68240 2.6177 0.009121 ** ## poly(rad, 3)3 4.69846 6.68240 0.7031 0.482314 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.30468 11.8599 < 2.2e-16 *** ## poly(tax, 3)1 112.64583 6.85371 16.4358 < 2.2e-16 *** ## poly(tax, 3)2 32.08725 6.85371 4.6817 3.665e-06 *** ## poly(tax, 3)3 -7.99681 6.85371 -1.1668 0.2439 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.36105 10.0084 < 2.2e-16 *** ## poly(ptratio, 3)1 56.04523 8.12158 6.9008 1.565e-11 *** ## poly(ptratio, 3)2 24.77482 8.12158 3.0505 0.002405 ** ## poly(ptratio, 3)3 -22.27974 8.12158 -2.7433 0.006301 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.33917 10.6540 <2e-16 *** ## poly(lstat, 3)1 88.06967 7.62944 11.5434 <2e-16 *** ## poly(lstat, 3)2 15.88816 7.62944 2.0825 0.0378 * ## poly(lstat, 3)3 -11.57402 7.62944 -1.5170 0.1299 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.29203 12.374 < 2.2e-16 *** ## poly(medv, 3)1 -75.05761 6.56915 -11.426 < 2.2e-16 *** ## poly(medv, 3)2 88.08621 6.56915 13.409 < 2.2e-16 *** ## poly(medv, 3)3 -48.03343 6.56915 -7.312 1.047e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Yes there is strong evidence for many variables having non-linear associations. In many cases, the addition of a cubic term is significant (indus, nox, age, dis, ptratio and medv). In other cases although the cubic terms is not significant, the squared term is (zn, rm, rad, tax, lstat). In only one case, black is there no evidence for a non-linear relationship. "],["classification.html", "4 Classification 4.1 Conceptual 4.2 Applied", " 4 Classification 4.1 Conceptual 4.1.1 Question 1 Using a little bit of algebra, prove that (4.2) is equivalent to (4.3). In other words, the logistic function representation and logit representation for the logistic regression model are equivalent. We need to show that \\[ p(X) = \\frac{e^{\\beta_0 + \\beta_1X}}{1 + e^{\\beta_0 + \\beta_1X}} \\] is equivalent to \\[ \\frac{p(X)}{1-p(X)} = e^{\\beta_0 + \\beta_1X} \\] Letting \\(x = e^{\\beta_0 + \\beta_1X}\\) \\[\\begin{align} \\frac{P(X)}{1-p(X)} &= \\frac{\\frac{x}{1 + x}} {1 - \\frac{x}{1 + x}} \\\\ &= \\frac{\\frac{x}{1 + x}} {\\frac{1}{1 + x}} \\\\ &= x \\end{align}\\] 4.1.2 Question 2 It was stated in the text that classifying an observation to the class for which (4.12) is largest is equivalent to classifying an observation to the class for which (4.13) is largest. Prove that this is the case. In other words, under the assumption that the observations in the \\(k\\)th class are drawn from a \\(N(\\mu_k,\\sigma^2)\\) distribution, the Bayes’ classifier assigns an observation to the class for which the discriminant function is maximized. 4.12 is \\[ p_k(x) = \\frac{\\pi_k\\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp(-\\frac{1}{2\\sigma^2}(x - \\mu_k)^2)} {\\sum_{l=1}^k \\pi_l\\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2)} \\] and the discriminant function is \\[ \\delta_k(x) = x.\\frac{\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma_2} + \\log(\\pi_k) \\] Since \\(\\sigma^2\\) is constant \\[ p_k(x) = \\frac{\\pi_k \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_k)^2\\right)} {\\sum_{l=1}^k \\pi_l \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2\\right)} \\] Maximizing \\(p_k(x)\\) also maximizes any monotonic function of \\(p_k(X)\\), and therefore, we can consider maximizing \\(\\log(p_K(X))\\) \\[ \\log(p_k(x)) = \\log(\\pi_k) - \\frac{1}{2\\sigma^2}(x - \\mu_k)^2 - \\log\\left(\\sum_{l=1}^k \\pi_l \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2\\right)\\right) \\] Remember that we are maximizing over \\(k\\), and since the last term does not vary with \\(k\\) it can be ignored. So we just need to maximize \\[\\begin{align} f &= \\log(\\pi_k) - \\frac{1}{2\\sigma^2} (x^2 - 2x\\mu_k + \\mu_k^2) \\\\ &= \\log(\\pi_k) - \\frac{x^2}{2\\sigma^2} + \\frac{x\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma^2} \\\\ \\end{align}\\] Since \\(\\frac{x^2}{2\\sigma^2}\\) is also independent of \\(k\\), we just need to maximize \\[ \\log(\\pi_k) + \\frac{x\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma^2} \\] 4.1.3 Question 3 This problem relates to the QDA model, in which the observations within each class are drawn from a normal distribution with a class-specific mean vector and a class specific covariance matrix. We consider the simple case where \\(p = 1\\); i.e. there is only one feature. Suppose that we have \\(K\\) classes, and that if an observation belongs to the \\(k\\)th class then \\(X\\) comes from a one-dimensional normal distribution, \\(X \\sim N(\\mu_k,\\sigma^2)\\). Recall that the density function for the one-dimensional normal distribution is given in (4.16). Prove that in this case, the Bayes classifier is not linear. Argue that it is in fact quadratic. Hint: For this problem, you should follow the arguments laid out in Section 4.4.1, but without making the assumption that \\(\\sigma_1^2 = ... = \\sigma_K^2\\). As above, \\[ p_k(x) = \\frac{\\pi_k\\frac{1}{\\sqrt{2\\pi\\sigma_k}} \\exp(-\\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2)} {\\sum_{l=1}^k \\pi_l\\frac{1}{\\sqrt{2\\pi\\sigma_l}} \\exp(-\\frac{1}{2\\sigma_l^2}(x - \\mu_l)^2)} \\] Now lets derive the Bayes classifier, without assuming \\(\\sigma_1^2 = ... = \\sigma_K^2\\) Maximizing \\(p_k(x)\\) also maximizes any monotonic function of \\(p_k(X)\\), and therefore, we can consider maximizing \\(\\log(p_K(X))\\) \\[ \\log(p_k(x)) = \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2 - \\log\\left(\\sum_{l=1}^k \\frac{1}{\\sqrt{2\\pi\\sigma_l}} \\pi_l \\exp\\left(-\\frac{1}{2\\sigma_l^2}(x - \\mu_l)^2\\right)\\right) \\] Remember that we are maximizing over \\(k\\), and since the last term does not vary with \\(k\\) it can be ignored. So we just need to maximize \\[\\begin{align} f &= \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2 \\\\ &= \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{x^2}{2\\sigma_k^2} + \\frac{x\\mu_k}{\\sigma_k^2} - \\frac{\\mu_k^2}{2\\sigma_k^2} \\\\ \\end{align}\\] However, unlike in Q2, \\(\\frac{x^2}{2\\sigma_k^2}\\) is not independent of \\(k\\), so we retain the term with \\(x^2\\), hence \\(f\\), the Bayes’ classifier, is a quadratic function of \\(x\\). 4.1.4 Question 4 When the number of features \\(p\\) is large, there tends to be a deterioration in the performance of KNN and other local approaches that perform prediction using only observations that are near the test observation for which a prediction must be made. This phenomenon is known as the curse of dimensionality, and it ties into the fact that non-parametric approaches often perform poorly when \\(p\\) is large. We will now investigate this curse. Suppose that we have a set of observations, each with measurements on \\(p = 1\\) feature, \\(X\\). We assume that \\(X\\) is uniformly (evenly) distributed on \\([0, 1]\\). Associated with each observation is a response value. Suppose that we wish to predict a test observation’s response using only observations that are within 10% of the range of \\(X\\) closest to that test observation. For instance, in order to predict the response for a test observation with \\(X = 0.6\\), we will use observations in the range \\([0.55, 0.65]\\). On average, what fraction of the available observations will we use to make the prediction? For values in \\(0...0.05\\), we use less than 10% of observations (between 5% and 10%, 7.5% on average), similarly with values in \\(0.95...1\\). For values in \\(0.05...0.95\\) we use 10% of available observations. The (weighted) average is then \\(7.5 \\times 0.1 + 10 \\times 0.9 = 9.75\\%\\). Now suppose that we have a set of observations, each with measurements on \\(p = 2\\) features, \\(X_1\\) and \\(X_2\\). We assume that \\((X_1, X_2)\\) are uniformly distributed on \\([0, 1] \\times [0, 1]\\). We wish to predict a test observation’s response using only observations that are within 10% of the range of \\(X_1\\) and within 10% of the range of \\(X_2\\) closest to that test observation. For instance, in order to predict the response for a test observation with \\(X_1 = 0.6\\) and \\(X_2 = 0.35\\), we will use observations in the range \\([0.55, 0.65]\\) for \\(X_1\\) and in the range \\([0.3, 0.4]\\) for \\(X_2\\). On average, what fraction of the available observations will we use to make the prediction? Since we need the observation to be within range for \\(X_1\\) and \\(X_2\\) we square 9.75% = \\(0.0975^2 \\times 100 = 0.95\\%\\) Now suppose that we have a set of observations on \\(p = 100\\) features. Again the observations are uniformly distributed on each feature, and again each feature ranges in value from 0 to 1. We wish to predict a test observation’s response using observations within the 10% of each feature’s range that is closest to that test observation. What fraction of the available observations will we use to make the prediction? Similar to above, we use: \\(0.0975^{100} \\times 100 = 8 \\times 10^{-100}\\%\\), essentially zero. Using your answers to parts (a)–(c), argue that a drawback of KNN when \\(p\\) is large is that there are very few training observations “near” any given test observation. As \\(p\\) increases, the fraction of observations near any given point rapidly approaches zero. For instance, even if you use 50% of the nearest observations for each \\(p\\), with \\(p = 10\\), only \\(0.5^{10} \\times 100 \\approx 0.1\\%\\) points are “near”. Now suppose that we wish to make a prediction for a test observation by creating a \\(p\\)-dimensional hypercube centered around the test observation that contains, on average, 10% of the training observations. For \\(p = 1,2,\\) and \\(100\\), what is the length of each side of the hypercube? Comment on your answer. Note: A hypercube is a generalization of a cube to an arbitrary number of dimensions. When \\(p = 1\\), a hypercube is simply a line segment, when \\(p = 2\\) it is a square, and when \\(p = 100\\) it is a 100-dimensional cube. When \\(p = 1\\), clearly the length is 0.1. When \\(p = 2\\), we need the value \\(l\\) such that \\(l^2 = 0.1\\), so \\(l = \\sqrt{0.1} = 0.32\\). When \\(p = n\\), \\(l = 0.1^{1/n}\\), so in the case of \\(n = 100\\), \\(l = 0.98\\). Therefore, the length of each side of the hypercube rapidly approaches 1 (or 100%) of the range of each \\(p\\). 4.1.5 Question 5 We now examine the differences between LDA and QDA. If the Bayes decision boundary is linear, do we expect LDA or QDA to perform better on the training set? On the test set? QDA, being a more flexible model, will always perform better on the training set, but LDA would be expected to perform better on the test set. If the Bayes decision boundary is non-linear, do we expect LDA or QDA to perform better on the training set? On the test set? QDA, being a more flexible model, will perform better on the training set, and we would hope that extra flexibility translates to a better fit on the test set. In general, as the sample size \\(n\\) increases, do we expect the test prediction accuracy of QDA relative to LDA to improve, decline, or be unchanged? Why? As \\(n\\) increases, we would expect the prediction accuracy of QDA relative to LDA to improve as there is more data to fit to subtle effects in the data. True or False: Even if the Bayes decision boundary for a given problem is linear, we will probably achieve a superior test error rate using QDA rather than LDA because QDA is flexible enough to model a linear decision boundary. Justify your answer. False. QDA can overfit leading to poorer test performance. 4.1.6 Question 6 Suppose we collect data for a group of students in a statistics class with variables \\(X_1 =\\) hours studied, \\(X_2 =\\) undergrad GPA, and \\(Y =\\) receive an A. We fit a logistic regression and produce estimated coefficient, \\(\\hat\\beta_0 = -6\\), \\(\\hat\\beta_1 = 0.05\\), \\(\\hat\\beta_2 = 1\\). Estimate the probability that a student who studies for 40h and has an undergrad GPA of 3.5 gets an A in the class. The logistic model is: \\[ \\log\\left(\\frac{p(X)}{1-p(x)}\\right) = -6 + 0.05X_1 + X_2 \\] or \\[ p(X) = \\frac{e^{-6 + 0.05X_1 + X_2}}{1 + e^{-6 + 0.05X_1 + X_2}} \\] when \\(X_1 = 40\\) and \\(X_2 = 3.5\\), \\(p(X) = 0.38\\) How many hours would the student in part (a) need to study to have a 50% chance of getting an A in the class? We would like to solve for \\(X_1\\) where \\(p(X) = 0.5\\). Taking the first equation above, we need to solve \\(0 = −6 + 0.05X_1 + 3.5\\), so \\(X_1 = 50\\) hours. 4.1.7 Question 7 Suppose that we wish to predict whether a given stock will issue a dividend this year (“Yes” or “No”) based on \\(X\\), last year’s percent profit. We examine a large number of companies and discover that the mean value of \\(X\\) for companies that issued a dividend was \\(\\bar{X} = 10\\), while the mean for those that didn’t was \\(\\bar{X} = 0\\). In addition, the variance of \\(X\\) for these two sets of companies was \\(\\hat{\\sigma}^2 = 36\\). Finally, 80% of companies issued dividends. Assuming that \\(X\\) follows a normal distribution, predict the probability that a company will issue a dividend this year given that its percentage profit was \\(X = 4\\) last year. Hint: Recall that the density function for a normal random variable is \\(f(x) =\\frac{1}{\\sqrt{2\\pi\\sigma^2}}e^{-(x-\\mu)^2/2\\sigma^2}\\). You will need to use Bayes’ theorem. Value \\(v\\) for companies (D) issuing a dividend = \\(v_D \\sim \\mathcal{N}(10, 36)\\). Value \\(v\\) for companies (N) not issuing a dividend = \\(v_N \\sim \\mathcal{N}(0, 36)\\) and \\(p(D) = 0.8\\). We want to find \\(p(D|v)\\) and we can calculate \\(p(v|D)\\) from the Gaussian density function. Note that since \\(e^2\\) is equal between both classes, the term \\(\\frac{1}{\\sqrt{2\\pi\\sigma^2}}\\) cancels. \\[\\begin{align} p(D|v) &= \\frac{p(v|D) p(D)}{p(v|D)p(D) + p(v|N)p(N)} \\\\ &= \\frac{\\pi_D \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_D)^2/2\\sigma^2}} {\\pi_D \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_D)^2/2\\sigma^2} + \\pi_N \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_N)^2/2\\sigma^2}} \\\\ &= \\frac{\\pi_D e^{-(x-\\mu_D)^2/2\\sigma^2}} {\\pi_D e^{-(x-\\mu_D)^2/2\\sigma^2} + \\pi_N e^{-(x-\\mu_N)^2/2\\sigma^2}} \\\\ &= \\frac{0.8 \\times e^{-(4-10)^2/(2 \\times 36)}} {0.8 \\times e^{-(4-10)^2/(2 \\times 36)} + 0.2 \\times e^{-(4-0)^2/(2 \\times 36)}} \\\\ &= \\frac{0.8 e^{-1/2}}{0.8 e^{-1/2} + 0.2 e^{-2/9}} \\end{align}\\] exp(-0.5) * 0.8 / (exp(-0.5) * 0.8 + exp(-2/9) * 0.2) ## [1] 0.7518525 4.1.8 Question 8 Suppose that we take a data set, divide it into equally-sized training and test sets, and then try out two different classification procedures. First we use logistic regression and get an error rate of 20% on the training data and 30% on the test data. Next we use 1-nearest neighbors (i.e. \\(K = 1\\)) and get an average error rate (averaged over both test and training data sets) of 18%. Based on these results, which method should we prefer to use for classification of new observations? Why? For \\(K = 1\\), performance on the training set is perfect and the error rate is zero, implying a test error rate of 36%. Logistic regression outperforms 1-nearest neighbor on the test set and therefore should be preferred. 4.1.9 Question 9 This problem has to do with odds. On average, what fraction of people with an odds of 0.37 of defaulting on their credit card payment will in fact default? Odds is defined as \\(p/(1-p)\\). \\[0.37 = \\frac{p(x)}{1 - p(x)}\\] therefore, \\[p(x) = \\frac{0.37}{1 + 0.37} = 0.27\\] Suppose that an individual has a 16% chance of defaulting on her credit card payment. What are the odds that she will default? \\[0.16 / (1 - 0.16) = 0.19\\] 4.1.10 Question 10 Equation 4.32 derived an expression for \\(\\log(\\frac{Pr(Y=k|X=x)}{Pr(Y=K|X=x)})\\) in the setting where \\(p > 1\\), so that the mean for the \\(k\\)th class, \\(\\mu_k\\), is a \\(p\\)-dimensional vector, and the shared covariance \\(\\Sigma\\) is a \\(p \\times p\\) matrix. However, in the setting with \\(p = 1\\), (4.32) takes a simpler form, since the means \\(\\mu_1, ..., \\mu_k\\) and the variance \\(\\sigma^2\\) are scalars. In this simpler setting, repeat the calculation in (4.32), and provide expressions for \\(a_k\\) and \\(b_{kj}\\) in terms of \\(\\pi_k, \\pi_K, \\mu_k, \\mu_K,\\) and \\(\\sigma^2\\). \\[\\begin{align*} \\log\\left(\\frac{Pr(Y=k|X=x)}{Pr(Y=K|X=x)}\\right) & = \\log\\left(\\frac{\\pi_k f_k(x)}{\\pi_K f_K(x)}\\right) \\\\ & = \\log\\left(\\frac{\\pi_k \\exp(-1/2((x-\\mu_k)/\\sigma)^2)}{\\pi_K \\exp(-1/2((x-\\mu_K)/\\sigma)^2)}\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2} \\left(\\frac{x-\\mu_k}{\\sigma}\\right)^2 + \\frac{1}{2} \\left(\\frac{x-\\mu_K}{\\sigma}\\right)^2 \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} (x-\\mu_k)^2 + \\frac{1}{2\\sigma^2} (x-\\mu_K)^2 \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left((x-\\mu_k)^2 - (x-\\mu_K)^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left(x^2-2x\\mu_k+\\mu_k^2 - x^2 + 2x\\mu_K - \\mu_K^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left(2x(\\mu_K - \\mu_k) + \\mu_k^2 -\\mu_K^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{\\mu_k^2 -\\mu_K^2}{2\\sigma^2} + \\frac{x(\\mu_k - \\mu_K)}{\\sigma^2} \\end{align*}\\] Therefore, \\[a_k = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{\\mu_k^2 -\\mu_K^2}{2\\sigma^2}\\] and \\[b_k = (\\mu_k - \\mu_K) / \\sigma^2\\] 4.1.11 Question 11 ToDo Work out the detailed forms of \\(a_k\\), \\(b_{kj}\\), and \\(b_{kjl}\\) in (4.33). Your answer should involve \\(\\pi_k\\), \\(\\pi_K\\), \\(\\mu_k\\), \\(\\mu_K\\), \\(\\Sigma_k\\), and \\(\\Sigma_K\\). 4.1.12 Question 12 Suppose that you wish to classify an observation \\(X \\in \\mathbb{R}\\) into apples and oranges. You fit a logistic regression model and find that \\[ \\hat{Pr}(Y=orange|X=x) = \\frac{\\exp(\\hat\\beta_0 + \\hat\\beta_1x)}{1 + \\exp(\\hat\\beta_0 + \\hat\\beta_1x)} \\] Your friend fits a logistic regression model to the same data using the softmax formulation in (4.13), and finds that \\[ \\hat{Pr}(Y=orange|X=x) = \\frac{\\exp(\\hat\\alpha_{orange0} + \\hat\\alpha_{orange1}x)} {\\exp(\\hat\\alpha_{orange0} + \\hat\\alpha_{orange1}x) + \\exp(\\hat\\alpha_{apple0} + \\hat\\alpha_{apple1}x)} \\] What is the log odds of orange versus apple in your model? The log odds is just \\(\\hat\\beta_0 + \\hat\\beta_1x\\) What is the log odds of orange versus apple in your friend’s model? From 4.14, log odds of our friend’s model is: \\[ (\\hat\\alpha_{orange0} - \\hat\\alpha_{apple0}) + (\\hat\\alpha_{orange1} - \\hat\\alpha_{apple1})x \\] Suppose that in your model, \\(\\hat\\beta_0 = 2\\) and \\(\\hat\\beta = −1\\). What are the coefficient estimates in your friend’s model? Be as specific as possible. We can say that in our friend’s model \\(\\hat\\alpha_{orange0} - \\hat\\alpha_{apple0} = 2\\) and \\(\\hat\\alpha_{orange1} - \\hat\\alpha_{apple1} = -1\\). We are unable to know the specific value of each parameter however. Now suppose that you and your friend fit the same two models on a different data set. This time, your friend gets the coefficient estimates \\(\\hat\\alpha_{orange0} = 1.2\\), \\(\\hat\\alpha_{orange1} = −2\\), \\(\\hat\\alpha_{apple0} = 3\\), \\(\\hat\\alpha_{apple1} = 0.6\\). What are the coefficient estimates in your model? The coefficients in our model would be \\(\\hat\\beta_0 = 1.2 - 3 = -1.8\\) and \\(\\hat\\beta_1 = -2 - 0.6 = -2.6\\) Finally, suppose you apply both models from (d) to a data set with 2,000 test observations. What fraction of the time do you expect the predicted class labels from your model to agree with those from your friend’s model? Explain your answer. The models are identical with different parameterization so they should perfectly agree. 4.2 Applied 4.2.1 Question 13 This question should be answered using the Weekly data set, which is part of the ISLR2 package. This data is similar in nature to the Smarket data from this chapter’s lab, except that it contains 1,089 weekly returns for 21 years, from the beginning of 1990 to the end of 2010. Produce some numerical and graphical summaries of the Weekly data. Do there appear to be any patterns? library(MASS) library(class) library(tidyverse) library(corrplot) library(ISLR2) library(e1071) summary(Weekly) ## Year Lag1 Lag2 Lag3 ## Min. :1990 Min. :-18.1950 Min. :-18.1950 Min. :-18.1950 ## 1st Qu.:1995 1st Qu.: -1.1540 1st Qu.: -1.1540 1st Qu.: -1.1580 ## Median :2000 Median : 0.2410 Median : 0.2410 Median : 0.2410 ## Mean :2000 Mean : 0.1506 Mean : 0.1511 Mean : 0.1472 ## 3rd Qu.:2005 3rd Qu.: 1.4050 3rd Qu.: 1.4090 3rd Qu.: 1.4090 ## Max. :2010 Max. : 12.0260 Max. : 12.0260 Max. : 12.0260 ## Lag4 Lag5 Volume Today ## Min. :-18.1950 Min. :-18.1950 Min. :0.08747 Min. :-18.1950 ## 1st Qu.: -1.1580 1st Qu.: -1.1660 1st Qu.:0.33202 1st Qu.: -1.1540 ## Median : 0.2380 Median : 0.2340 Median :1.00268 Median : 0.2410 ## Mean : 0.1458 Mean : 0.1399 Mean :1.57462 Mean : 0.1499 ## 3rd Qu.: 1.4090 3rd Qu.: 1.4050 3rd Qu.:2.05373 3rd Qu.: 1.4050 ## Max. : 12.0260 Max. : 12.0260 Max. :9.32821 Max. : 12.0260 ## Direction ## Down:484 ## Up :605 ## ## ## ## corrplot(cor(Weekly[, -9]), type = "lower", diag = FALSE, method = "ellipse") Volume is strongly positively correlated with Year. Other correlations are week, but Lag1 is negatively correlated with Lag2 but positively correlated with Lag3. Use the full data set to perform a logistic regression with Direction as the response and the five lag variables plus Volume as predictors. Use the summary function to print the results. Do any of the predictors appear to be statistically significant? If so, which ones? fit <- glm( Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume, data = Weekly, family = binomial ) summary(fit) ## ## Call: ## glm(formula = Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + ## Volume, family = binomial, data = Weekly) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) 0.26686 0.08593 3.106 0.0019 ** ## Lag1 -0.04127 0.02641 -1.563 0.1181 ## Lag2 0.05844 0.02686 2.175 0.0296 * ## Lag3 -0.01606 0.02666 -0.602 0.5469 ## Lag4 -0.02779 0.02646 -1.050 0.2937 ## Lag5 -0.01447 0.02638 -0.549 0.5833 ## Volume -0.02274 0.03690 -0.616 0.5377 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 1496.2 on 1088 degrees of freedom ## Residual deviance: 1486.4 on 1082 degrees of freedom ## AIC: 1500.4 ## ## Number of Fisher Scoring iterations: 4 Lag2 is significant. Compute the confusion matrix and overall fraction of correct predictions. Explain what the confusion matrix is telling you about the types of mistakes made by logistic regression. contrasts(Weekly$Direction) ## Up ## Down 0 ## Up 1 pred <- predict(fit, type = "response") > 0.5 (t <- table(ifelse(pred, "Up (pred)", "Down (pred)"), Weekly$Direction)) ## ## Down Up ## Down (pred) 54 48 ## Up (pred) 430 557 sum(diag(t)) / sum(t) ## [1] 0.5610652 The overall fraction of correct predictions is 0.56. Although logistic regression correctly predicts upwards movements well, it incorrectly predicts most downwards movements as up. Now fit the logistic regression model using a training data period from 1990 to 2008, with Lag2 as the only predictor. Compute the confusion matrix and the overall fraction of correct predictions for the held out data (that is, the data from 2009 and 2010). train <- Weekly$Year < 2009 fit <- glm(Direction ~ Lag2, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 (t <- table(ifelse(pred, "Up (pred)", "Down (pred)"), Weekly[!train, ]$Direction)) ## ## Down Up ## Down (pred) 9 5 ## Up (pred) 34 56 sum(diag(t)) / sum(t) ## [1] 0.625 Repeat (d) using LDA. fit <- lda(Direction ~ Lag2, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 9 5 ## Up 34 56 sum(diag(t)) / sum(t) ## [1] 0.625 Repeat (d) using QDA. fit <- qda(Direction ~ Lag2, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 0 0 ## Up 43 61 sum(diag(t)) / sum(t) ## [1] 0.5865385 Repeat (d) using KNN with \\(K = 1\\). fit <- knn( Weekly[train, "Lag2", drop = FALSE], Weekly[!train, "Lag2", drop = FALSE], Weekly$Direction[train] ) (t <- table(fit, Weekly[!train, ]$Direction)) ## ## fit Down Up ## Down 21 30 ## Up 22 31 sum(diag(t)) / sum(t) ## [1] 0.5 Repeat (d) using naive Bayes. fit <- naiveBayes(Direction ~ Lag2, data = Smarket, subset = train) pred <- predict(fit, Weekly[!train, ], type = "class") (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 27 29 ## Up 16 32 sum(diag(t)) / sum(t) ## [1] 0.5673077 Which of these methods appears to provide the best results on this data? Logistic regression and LDA are the best performing. Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results on the held out data. Note that you should also experiment with values for \\(K\\) in the KNN classifier. fit <- glm(Direction ~ Lag1, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5673077 fit <- glm(Direction ~ Lag3, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~ Lag1 * Lag2 * Lag3 * Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5961538 fit <- lda(Direction ~ Lag1 + Lag2 + Lag3 + Lag4,data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5769231 fit <- qda(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5192308 fit <- naiveBayes(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "class") mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5096154 set.seed(1) res <- sapply(1:30, function(k) { fit <- knn( Weekly[train, 2:4, drop = FALSE], Weekly[!train, 2:4, drop = FALSE], Weekly$Direction[train], k = k ) mean(fit == Weekly[!train, ]$Direction) }) plot(1:30, res, type = "o", xlab = "k", ylab = "Fraction correct") (k <- which.max(res)) ## [1] 26 fit <- knn( Weekly[train, 2:4, drop = FALSE], Weekly[!train, 2:4, drop = FALSE], Weekly$Direction[train], k = k ) table(fit, Weekly[!train, ]$Direction) ## ## fit Down Up ## Down 23 18 ## Up 20 43 mean(fit == Weekly[!train, ]$Direction) ## [1] 0.6346154 KNN using the first 3 Lag variables performs marginally better than logistic regression with Lag2 if we tune \\(k\\) to be \\(k = 26\\). 4.2.2 Question 14 In this problem, you will develop a model to predict whether a given car gets high or low gas mileage based on the Auto data set. Create a binary variable, mpg01, that contains a 1 if mpg contains a value above its median, and a 0 if mpg contains a value below its median. You can compute the median using the median() function. Note you may find it helpful to use the data.frame() function to create a single data set containing both mpg01 and the other Auto variables. x <- cbind(Auto[, -1], data.frame("mpg01" = Auto$mpg > median(Auto$mpg))) Explore the data graphically in order to investigate the association between mpg01 and the other features. Which of the other features seem most likely to be useful in predicting mpg01? Scatterplots and boxplots may be useful tools to answer this question. Describe your findings. par(mfrow = c(2, 4)) for (i in 1:7) hist(x[, i], breaks = 20, main = colnames(x)[i]) par(mfrow = c(2, 4)) for (i in 1:7) boxplot(x[, i] ~ x$mpg01, main = colnames(x)[i]) pairs(x[, 1:7]) Most variables show an association with mpg01 category, and several variables are colinear. Split the data into a training set and a test set. set.seed(1) train <- sample(seq_len(nrow(x)), nrow(x) * 2/3) Perform LDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? sort(sapply(1:7, function(i) { setNames(abs(t.test(x[, i] ~ x$mpg01)$statistic), colnames(x)[i]) })) ## acceleration year origin horsepower displacement weight ## 7.302430 9.403221 11.824099 17.681939 22.632004 22.932777 ## cylinders ## 23.035328 fit <- lda(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "response")$class mean(pred != x[-train, ]$mpg01) ## [1] 0.1068702 Perform QDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- qda(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "response")$class mean(pred != x[-train, ]$mpg01) ## [1] 0.09923664 Perform logistic regression on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- glm(mpg01 ~ cylinders + weight + displacement, data = x[train, ], family = binomial) pred <- predict(fit, x[-train, ], type = "response") > 0.5 mean(pred != x[-train, ]$mpg01) ## [1] 0.1145038 Perform naive Bayes on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- naiveBayes(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "class") mean(pred != x[-train, ]$mpg01) ## [1] 0.09923664 Perform KNN on the training data, with several values of \\(K\\), in order to predict mpg01. Use only the variables that seemed most associated with mpg01 in (b). What test errors do you obtain? Which value of \\(K\\) seems to perform the best on this data set? res <- sapply(1:50, function(k) { fit <- knn(x[train, c(1, 4, 2)], x[-train, c(1, 4, 2)], x$mpg01[train], k = k) mean(fit != x[-train, ]$mpg01) }) names(res) <- 1:50 plot(res, type = "o") res[which.min(res)] ## 3 ## 0.1068702 For the models tested here, \\(k = 32\\) appears to perform best. QDA has a lower error rate overall, performing slightly better than LDA. 4.2.3 Question 15 This problem involves writing functions. Write a function, Power(), that prints out the result of raising 2 to the 3rd power. In other words, your function should compute \\(2^3\\) and print out the results. Hint: Recall that x^a raises x to the power a. Use the print() function to output the result. Power <- function() print(2^3) Create a new function, Power2(), that allows you to pass any two numbers, x and a, and prints out the value of x^a. You can do this by beginning your function with the line > Power2=function(x,a) { You should be able to call your function by entering, for instance, > Power2(3, 8) on the command line. This should output the value of \\(3^8\\), namely, 6,561. Power2 <- function(x, a) print(x^a) Using the Power2() function that you just wrote, compute \\(10^3\\), \\(8^{17}\\), and \\(131^3\\). c(Power2(10, 3), Power2(8, 17), Power2(131, 3)) ## [1] 1000 ## [1] 2.2518e+15 ## [1] 2248091 ## [1] 1.000000e+03 2.251800e+15 2.248091e+06 Now create a new function, Power3(), that actually returns the result x^a as an R object, rather than simply printing it to the screen. That is, if you store the value x^a in an object called result within your function, then you can simply return() this result, using the following line: > return(result) The line above should be the last line in your function, before the } symbol. Power3 <- function(x, a) { result <- x^a return(result) } Now using the Power3() function, create a plot of \\(f(x) = x^2\\). The \\(x\\)-axis should display a range of integers from 1 to 10, and the \\(y\\)-axis should display \\(x^2\\). Label the axes appropriately, and use an appropriate title for the figure. Consider displaying either the \\(x\\)-axis, the \\(y\\)-axis, or both on the log-scale. You can do this by using log = \"x\", log = \"y\", or log = \"xy\" as arguments to the plot() function. plot(1:10, Power3(1:10, 2), xlab = "x", ylab = expression(paste("x"^"2")), log = "y" ) Create a function, PlotPower(), that allows you to create a plot of x against x^a for a fixed a and for a range of values of x. For instance, if you call > PlotPower(1:10, 3) then a plot should be created with an \\(x\\)-axis taking on values \\(1,2,...,10\\), and a \\(y\\)-axis taking on values \\(1^3,2^3,...,10^3\\). PlotPower <- function(x, a, log = "y") { plot(x, Power3(x, a), xlab = "x", ylab = substitute("x"^a, list(a = a)), log = log ) } PlotPower(1:10, 3) 4.2.4 Question 13 Using the Boston data set, fit classification models in order to predict whether a given census tract has a crime rate above or below the median. Explore logistic regression, LDA, naive Bayes and KNN models using various sub-sets of the predictors. Describe your findings. Hint: You will have to create the response variable yourself, using the variables that are contained in the Boston data set. x <- cbind( ISLR2::Boston[, -1], data.frame("highcrim" = Boston$crim > median(Boston$crim)) ) set.seed(1) train <- sample(seq_len(nrow(x)), nrow(x) * 2/3) We can find the most associated variables by performing wilcox tests. ord <- order(sapply(1:12, function(i) { p <- wilcox.test(as.numeric(x[train, i]) ~ x[train, ]$highcrim)$p.value setNames(log10(p), colnames(x)[i]) })) ord <- names(x)[ord] ord ## [1] "nox" "dis" "indus" "tax" "age" "rad" "zn" ## [8] "lstat" "medv" "ptratio" "rm" "chas" Variables nox (nitrogen oxides concentration) followed by dis (distance to employment center) appear to be most associated with high crime. Let’s reorder columns by those most associated with highcrim (in the training data) x <- x[, c(ord, "highcrim")] Let’s look at univariate associations with highcrim (in the training data) x[train, ] |> pivot_longer(!highcrim) |> mutate(name = factor(name, levels = ord)) |> ggplot(aes(highcrim, value)) + geom_boxplot() + facet_wrap(~name, scale = "free") Fit lda, logistic regression, naive Bayes and KNN models (with k = 1..50) for a set of specific predictors and return the error rate. We fit models using increasing numbers of predictors: column 1, then columns 1 and 2 etc. fit_models <- function(cols, k_vals = 1:50) { dat_train <- x[train, cols, drop = FALSE] dat_test <- x[-train, cols, drop = FALSE] fit <- lda(x$highcrim[train] ~ ., data = dat_train) pred <- predict(fit, dat_test, type = "response")$class lda_err <- mean(pred != x$highcrim[-train]) fit <- glm(x$highcrim[train] ~ ., data = dat_train, family = binomial) pred <- predict(fit, dat_test, type = "response") > 0.5 logreg_err <- mean(pred != x$highcrim[-train]) fit <- naiveBayes(x$highcrim[train] ~ ., data = dat_train) pred <- predict(fit, dat_test, type = "class") nb_err <- mean(pred != x$highcrim[-train]) res <- sapply(k_vals, function(k) { fit <- knn(dat_train, dat_test, x$highcrim[train], k = k) mean(fit != x$highcrim[-train]) }) knn_err <- min(res) c("LDA" = lda_err, "LR" = logreg_err, "NB" = nb_err, "KNN" = knn_err) } res <- sapply(1:12, function(max) fit_models(1:max)) res <- as_tibble(t(res)) res$n_var <- 1:12 pivot_longer(res, cols = !n_var) |> ggplot(aes(n_var, value, col = name)) + geom_line() + xlab("Number of predictors") + ylab("Error rate") KNN appears to perform better (if we tune \\(k\\)) for all numbers of predictors. fit <- knn( x[train, "nox", drop = FALSE], x[-train, "nox", drop = FALSE], x$highcrim[train], k = 1 ) table(fit, x[-train, ]$highcrim) ## ## fit FALSE TRUE ## FALSE 78 2 ## TRUE 3 86 mean(fit != x[-train, ]$highcrim) * 100 ## [1] 2.95858 Surprisingly, the best model (with an error rate of <5%) uses \\(k = 1\\) and assigns crime rate categories based on the town with the single most similar nitrogen oxide concentration (nox). This might be, for example, because nearby towns have similar crime rates, and we can obtain good predictions by predicting crime rate based on a nearby town. But what if we only consider \\(k = 20\\). res <- sapply(1:12, function(max) fit_models(1:max, k_vals = 20)) res <- as_tibble(t(res)) res$n_var <- 1:12 pivot_longer(res, cols = !n_var) |> ggplot(aes(n_var, value, col = name)) + geom_line() + xlab("Number of predictors") + ylab("Error rate") KNN still performs best with a single predictor (nox), but logistic regression with 12 predictors also performs well and has an error rate of ~12%. vars <- names(x)[1:12] dat_train <- x[train, vars] dat_test <- x[-train, vars] fit <- glm(x$highcrim[train] ~ ., data = dat_train, family = binomial) pred <- predict(fit, dat_test, type = "response") > 0.5 table(pred, x[-train, ]$highcrim) ## ## pred FALSE TRUE ## FALSE 70 9 ## TRUE 11 79 mean(pred != x$highcrim[-train]) * 100 ## [1] 11.83432 summary(fit) ## ## Call: ## glm(formula = x$highcrim[train] ~ ., family = binomial, data = dat_train) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) -44.525356 7.935621 -5.611 2.01e-08 *** ## nox 55.062428 10.281556 5.355 8.53e-08 *** ## dis 1.080847 0.304084 3.554 0.000379 *** ## indus -0.067493 0.058547 -1.153 0.248997 ## tax -0.005336 0.003138 -1.700 0.089060 . ## age 0.020965 0.014190 1.477 0.139556 ## rad 0.678196 0.192193 3.529 0.000418 *** ## zn -0.099558 0.045914 -2.168 0.030134 * ## lstat 0.134035 0.058623 2.286 0.022231 * ## medv 0.213114 0.088922 2.397 0.016547 * ## ptratio 0.294396 0.155285 1.896 0.057981 . ## rm -0.518115 0.896423 -0.578 0.563278 ## chas 0.139557 0.798632 0.175 0.861280 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 467.04 on 336 degrees of freedom ## Residual deviance: 135.80 on 324 degrees of freedom ## AIC: 161.8 ## ## Number of Fisher Scoring iterations: 9 "],["resampling-methods.html", "5 Resampling Methods 5.1 Conceptual 5.2 Applied", " 5 Resampling Methods 5.1 Conceptual 5.1.1 Question 1 Using basic statistical properties of the variance, as well as single- variable calculus, derive (5.6). In other words, prove that \\(\\alpha\\) given by (5.6) does indeed minimize \\(Var(\\alpha X + (1 - \\alpha)Y)\\). Equation 5.6 is: \\[ \\alpha = \\frac{\\sigma^2_Y - \\sigma_{XY}}{\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}} \\] Remember that: \\[ Var(aX) = a^2Var(X), \\\\ \\mathrm{Var}(X + Y) = \\mathrm{Var}(X) + \\mathrm{Var}(Y) + 2\\mathrm{Cov}(X,Y), \\\\ \\mathrm{Cov}(aX, bY) = ab\\mathrm{Cov}(X, Y) \\] If we define \\(\\sigma^2_X = \\mathrm{Var}(X)\\), \\(\\sigma^2_Y = \\mathrm{Var}(Y)\\) and \\(\\sigma_{XY} = \\mathrm{Cov}(X, Y)\\) \\[\\begin{align} Var(\\alpha X + (1 - \\alpha)Y) &= \\alpha^2\\sigma^2_X + (1-\\alpha)^2\\sigma^2_Y + 2\\alpha(1 - \\alpha)\\sigma_{XY} \\\\ &= \\alpha^2\\sigma^2_X + \\sigma^2_Y - 2\\alpha\\sigma^2_Y + \\alpha^2\\sigma^2_Y + 2\\alpha\\sigma_{XY} - 2\\alpha^2\\sigma_{XY} \\end{align}\\] Now we want to find when the rate of change of this function is 0 with respect to \\(\\alpha\\), so we compute the partial derivative, set to 0 and solve. \\[ \\frac{\\partial}{\\partial{\\alpha}} = 2\\alpha\\sigma^2_X - 2\\sigma^2_Y + 2\\alpha\\sigma^2_Y + 2\\sigma_{XY} - 4\\alpha\\sigma_{XY} = 0 \\] Moving \\(\\alpha\\) terms to the same side: \\[ \\alpha\\sigma^2_X + \\alpha\\sigma^2_Y - 2\\alpha\\sigma_{XY} = \\sigma^2_Y - \\sigma_{XY} \\] \\[ \\alpha = \\frac{\\sigma^2_Y - \\sigma_{XY}}{\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}} \\] We should also show that this is a minimum, so that the second partial derivative wrt \\(\\alpha\\) is \\(>= 0\\). \\[\\begin{align} \\frac{\\partial^2}{\\partial{\\alpha^2}} &= 2\\sigma^2_X + 2\\sigma^2_Y - 4\\sigma_{XY} \\\\ &= 2(\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}) \\\\ &= 2\\mathrm{Var}(X - Y) \\end{align}\\] Since variance is positive, then this must be positive. 5.1.2 Question 2 We will now derive the probability that a given observation is part of a bootstrap sample. Suppose that we obtain a bootstrap sample from a set of n observations. What is the probability that the first bootstrap observation is not the \\(j\\)th observation from the original sample? Justify your answer. This is 1 - probability that it is the \\(j\\)th = \\(1 - 1/n\\). What is the probability that the second bootstrap observation is not the \\(j\\)th observation from the original sample? Since each bootstrap observation is a random sample, this probability is the same (\\(1 - 1/n\\)). Argue that the probability that the \\(j\\)th observation is not in the bootstrap sample is \\((1 - 1/n)^n\\). For the \\(j\\)th observation to not be in the sample, it would have to not be picked for each of \\(n\\) positions, so not picked for \\(1, 2, ..., n\\), thus the probability is \\((1 - 1/n)^n\\) When \\(n = 5\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 5 1 - (1 - 1 / n)^n ## [1] 0.67232 \\(p = 0.67\\) When \\(n = 100\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 100 1 - (1 - 1 / n)^n ## [1] 0.6339677 \\(p = 0.64\\) When \\(n = 10,000\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 100000 1 - (1 - 1 / n)^n ## [1] 0.6321224 \\(p = 0.63\\) Create a plot that displays, for each integer value of \\(n\\) from 1 to 100,000, the probability that the \\(j\\)th observation is in the bootstrap sample. Comment on what you observe. x <- sapply(1:100000, function(n) 1 - (1 - 1 / n)^n) plot(x, log = "x", type = "o") The probability rapidly approaches 0.63 with increasing \\(n\\). Note that \\[e^x = \\lim_{x \\to \\inf} \\left(1 + \\frac{x}{n}\\right)^n,\\] so with \\(x = -1\\), we can see that our limit is \\(1 - e^{-1} = 1 - 1/e\\). We will now investigate numerically the probability that a bootstrap sample of size \\(n = 100\\) contains the \\(j\\)th observation. Here \\(j = 4\\). We repeatedly create bootstrap samples, and each time we record whether or not the fourth observation is contained in the bootstrap sample. > store <- rep (NA, 10000) > for (i in 1:10000) { store[i] <- sum(sample(1:100, rep = TRUE) == 4) > 0 } > mean(store) Comment on the results obtained. store <- replicate(10000, sum(sample(1:100, replace = TRUE) == 4) > 0) mean(store) ## [1] 0.6308 The probability of including \\(4\\) when resampling numbers \\(1...100\\) is close to \\(1 - (1 - 1/100)^{100}\\). 5.1.3 Question 3 We now review \\(k\\)-fold cross-validation. Explain how \\(k\\)-fold cross-validation is implemented. We divided our data into (approximately equal) \\(k\\) subsets, and then generate predictions for each \\(k\\)th set, training on the exclusive \\(k\\) sets combined. What are the advantages and disadvantages of \\(k\\)-fold cross-validation relative to: The validation set approach? LOOCV? When using a validation set, we can only train on a small portion of the data as we must reserve the rest for validation. As a result it can overestimate the test error rate (assuming we then train using the complete data for future prediction). It is also sensitive to which observations are including in train vs. test. It is, however, low cost in terms of processing time (as we only have to fit one model). When using LOOCV, we can train on \\(n-1\\) observations, however, the trained models we generate each differ only by the inclusion (and exclusion) of a single observation. As a result, LOOCV can have high variance (the models fit will be similar, and might be quite different to what we would obtain with a different data set). LOOCV is also costly in terms of processing time. 5.1.4 Question 4 Suppose that we use some statistical learning method to make a prediction for the response \\(Y\\) for a particular value of the predictor \\(X\\). Carefully describe how we might estimate the standard deviation of our prediction. We could address this with bootstrapping. Our procedure would be to (jointly) resample \\(Y\\) and \\(X\\) variables and fit our model many times. For each model we could obtain a summary of our prediction and calculate the standard deviation over bootstrapped samples. 5.2 Applied 5.2.1 Question 5 In Chapter 4, we used logistic regression to predict the probability of default using income and balance on the Default data set. We will now estimate the test error of this logistic regression model using the validation set approach. Do not forget to set a random seed before beginning your analysis. Fit a logistic regression model that uses income and balance to predict default. library(ISLR2) set.seed(42) fit <- glm(default ~ income + balance, data = Default, family = "binomial") Using the validation set approach, estimate the test error of this model. In order to do this, you must perform the following steps: Split the sample set into a training set and a validation set. Fit a multiple logistic regression model using only the training observations. Obtain a prediction of default status for each individual in the validation set by computing the posterior probability of default for that individual, and classifying the individual to the default category if the posterior probability is greater than 0.5. Compute the validation set error, which is the fraction of the observations in the validation set that are misclassified. train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") table(pred, Default$default[-train]) ## ## pred No Yes ## No 4817 110 ## Yes 20 53 mean(pred != Default$default[-train]) ## [1] 0.026 Repeat the process in (b) three times, using three different splits of the observations into a training set and a validation set. Comment on the results obtained. replicate(3, { train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") mean(pred != Default$default[-train]) }) ## [1] 0.0260 0.0294 0.0258 The results obtained are variable and depend on the samples allocated to training vs. test. Now consider a logistic regression model that predicts the probability of default using income, balance, and a dummy variable for student. Estimate the test error for this model using the validation set approach. Comment on whether or not including a dummy variable for student leads to a reduction in the test error rate. replicate(3, { train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance + student, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") mean(pred != Default$default[-train]) }) ## [1] 0.0278 0.0256 0.0250 Including student does not seem to make a substantial improvement to the test error. 5.2.2 Question 6 We continue to consider the use of a logistic regression model to predict the probability of default using income and balance on the Default data set. In particular, we will now compute estimates for the standard errors of the income and balance logistic regression coefficients in two different ways: (1) using the bootstrap, and (2) using the standard formula for computing the standard errors in the glm() function. Do not forget to set a random seed before beginning your analysis. Using the summary() and glm() functions, determine the estimated standard errors for the coefficients associated with income and balance in a multiple logistic regression model that uses both predictors. fit <- glm(default ~ income + balance, data = Default, family = "binomial") summary(fit) ## ## Call: ## glm(formula = default ~ income + balance, family = "binomial", ## data = Default) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) -1.154e+01 4.348e-01 -26.545 < 2e-16 *** ## income 2.081e-05 4.985e-06 4.174 2.99e-05 *** ## balance 5.647e-03 2.274e-04 24.836 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 2920.6 on 9999 degrees of freedom ## Residual deviance: 1579.0 on 9997 degrees of freedom ## AIC: 1585 ## ## Number of Fisher Scoring iterations: 8 The standard errors obtained by bootstrapping are \\(\\beta_1\\) = 5.0e-6 and \\(\\beta_2\\) = 2.3e-4. Write a function, boot.fn(), that takes as input the Default data set as well as an index of the observations, and that outputs the coefficient estimates for income and balance in the multiple logistic regression model. boot.fn <- function(x, i) { fit <- glm(default ~ income + balance, data = x[i, ], family = "binomial") coef(fit)[-1] } Use the boot() function together with your boot.fn() function to estimate the standard errors of the logistic regression coefficients for income and balance. library(boot) set.seed(42) boot(Default, boot.fn, R = 1000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Default, statistic = boot.fn, R = 1000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 2.080898e-05 2.737444e-08 5.073444e-06 ## t2* 5.647103e-03 1.176249e-05 2.299133e-04 Comment on the estimated standard errors obtained using the glm() function and using your bootstrap function. The standard errors obtained by bootstrapping are similar to those estimated by glm. 5.2.3 Question 7 In Sections 5.3.2 and 5.3.3, we saw that the cv.glm() function can be used in order to compute the LOOCV test error estimate. Alternatively, one could compute those quantities using just the glm() and predict.glm() functions, and a for loop. You will now take this approach in order to compute the LOOCV error for a simple logistic regression model on the Weekly data set. Recall that in the context of classification problems, the LOOCV error is given in (5.4). Fit a logistic regression model that predicts Direction using Lag1 and Lag2. fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly, family = "binomial") Fit a logistic regression model that predicts Direction using Lag1 and Lag2 using all but the first observation. fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly[-1, ], family = "binomial") Use the model from (b) to predict the direction of the first observation. You can do this by predicting that the first observation will go up if \\(P(\\)Direction=\"Up\" | Lag1 , Lag2\\() > 0.5\\). Was this observation correctly classified? predict(fit, newdata = Weekly[1, , drop = FALSE], type = "response") > 0.5 ## 1 ## TRUE Yes the observation was correctly classified. Write a for loop from \\(i = 1\\) to \\(i = n\\), where \\(n\\) is the number of observations in the data set, that performs each of the following steps: Fit a logistic regression model using all but the \\(i\\)th observation to predict Direction using Lag1 and Lag2 . Compute the posterior probability of the market moving up for the \\(i\\)th observation. Use the posterior probability for the \\(i\\)th observation in order to predict whether or not the market moves up. Determine whether or not an error was made in predicting the direction for the \\(i\\)th observation. If an error was made, then indicate this as a 1, and otherwise indicate it as a 0. error <- numeric(nrow(Weekly)) for (i in 1:nrow(Weekly)) { fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly[-i, ], family = "binomial") p <- predict(fit, newdata = Weekly[i, , drop = FALSE], type = "response") > 0.5 error[i] <- ifelse(p, "Down", "Up") == Weekly$Direction[i] } Take the average of the \\(n\\) numbers obtained in (d) in order to obtain the LOOCV estimate for the test error. Comment on the results. mean(error) ## [1] 0.4499541 The LOOCV test error rate is 45% which implies that our predictions are marginally more often correct than not. 5.2.4 Question 8 We will now perform cross-validation on a simulated data set. Generate a simulated data set as follows: > set.seed(1) > x <- rnorm(100) > y <- x - 2 *x^2 + rnorm(100) In this data set, what is \\(n\\) and what is \\(p\\)? Write out the model used to generate the data in equation form. set.seed(1) x <- rnorm(100) y <- x - 2 * x^2 + rnorm(100) \\(n\\) is 100 and \\(p\\) is 1 (there are 100 observations and \\(y\\) is predicted with a single variable \\(x\\)). The model equation is: \\[y = -2x^2 + x + \\epsilon\\]. Create a scatterplot of \\(X\\) against \\(Y\\). Comment on what you find. plot(x, y) \\(y\\) has a (negative) quadratic relationship with \\(x\\). Set a random seed, and then compute the LOOCV errors that result from fitting the following four models using least squares: \\(Y = \\beta_0 + \\beta_1 X + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\beta_3 X^3 + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\beta_3 X^3 + \\beta_4 X^4 + \\epsilon\\). Note you may find it helpful to use the data.frame() function to create a single data set containing both \\(X\\) and \\(Y\\). library(boot) set.seed(42) dat <- data.frame(x, y) sapply(1:4, function(i) cv.glm(dat, glm(y ~ poly(x, i)))$delta[1]) ## [1] 7.2881616 0.9374236 0.9566218 0.9539049 Repeat (c) using another random seed, and report your results. Are your results the same as what you got in (c)? Why? set.seed(43) dat <- data.frame(x, y) sapply(1:4, function(i) cv.glm(dat, glm(y ~ poly(x, i)))$delta[1]) ## [1] 7.2881616 0.9374236 0.9566218 0.9539049 The results are the same because we are using LOOCV. When doing this, the model is fit leaving each one of the observations out in turn, and thus there is no stochasticity involved. Which of the models in (c) had the smallest LOOCV error? Is this what you expected? Explain your answer. The second model had the smallest LOOCV. This what would be expected since the model to generate the data was quadratic and we are measuring the test (rather than training) error rate to evaluate performance. Comment on the statistical significance of the coefficient estimates that results from fitting each of the models in (c) using least squares. Do these results agree with the conclusions drawn based on the cross-validation results? for (i in 1:4) printCoefmat(coef(summary(glm(y ~ poly(x, i), data = dat)))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.55002 0.26001 -5.9613 3.954e-08 *** ## poly(x, i) 6.18883 2.60014 2.3802 0.01924 * ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.095803 -16.1792 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.958032 6.4599 4.185e-09 *** ## poly(x, i)2 -23.948305 0.958032 -24.9974 < 2.2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.096263 -16.1019 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.962632 6.4291 4.972e-09 *** ## poly(x, i)2 -23.948305 0.962632 -24.8779 < 2.2e-16 *** ## poly(x, i)3 0.264106 0.962632 0.2744 0.7844 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.095905 -16.1620 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.959051 6.4531 4.591e-09 *** ## poly(x, i)2 -23.948305 0.959051 -24.9708 < 2.2e-16 *** ## poly(x, i)3 0.264106 0.959051 0.2754 0.7836 ## poly(x, i)4 1.257095 0.959051 1.3108 0.1931 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 We can see that the coefficients in the first model are not highly significant, but all terms (\\(\\beta_0, \\beta_1\\) and \\(\\beta_2\\)) are in the quadratic model. After this, subsequent \\(\\beta_n\\) terms are not significant. Therefore, these results agree with those from cross-validation. 5.2.5 Question 9 We will now consider the Boston housing data set, from the ISLR2 library. Based on this data set, provide an estimate for the population mean of medv. Call this estimate \\(\\hat\\mu\\). (mu <- mean(Boston$medv)) ## [1] 22.53281 Provide an estimate of the standard error of \\(\\hat\\mu\\). Interpret this result. Hint: We can compute the standard error of the sample mean by dividing the sample standard deviation by the square root of the number of observations. sd(Boston$medv) / sqrt(length(Boston$medv)) ## [1] 0.4088611 Now estimate the standard error of \\(\\hat\\mu\\) using the bootstrap. How does this compare to your answer from (b)? set.seed(42) (bs <- boot(Boston$medv, function(v, i) mean(v[i]), 10000)) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) mean(v[i]), ## R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 22.53281 0.002175751 0.4029139 The standard error using the bootstrap (0.403) is very close to that obtained from the formula above (0.409). Based on your bootstrap estimate from (c), provide a 95% confidence interval for the mean of medv. Compare it to the results obtained using t.test(Boston$medv). Hint: You can approximate a 95% confidence interval using the formula \\([\\hat\\mu - 2SE(\\hat\\mu), \\hat\\mu + 2SE(\\hat\\mu)].\\) se <- sd(bs$t) c(mu - 2 * se, mu + 2 * se) ## [1] 21.72698 23.33863 Based on this data set, provide an estimate, \\(\\hat\\mu_{med}\\), for the median value of medv in the population. median(Boston$medv) ## [1] 21.2 We now would like to estimate the standard error of \\(\\hat\\mu_{med}\\). Unfortunately, there is no simple formula for computing the standard error of the median. Instead, estimate the standard error of the median using the bootstrap. Comment on your findings. set.seed(42) boot(Boston$medv, function(v, i) median(v[i]), 10000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) median(v[i]), ## R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 21.2 -0.01331 0.3744634 The estimated standard error of the median is 0.374. This is lower than the standard error of the mean. Based on this data set, provide an estimate for the tenth percentile of medv in Boston census tracts. Call this quantity \\(\\hat\\mu_{0.1}\\). (You can use the quantile() function.) quantile(Boston$medv, 0.1) ## 10% ## 12.75 Use the bootstrap to estimate the standard error of \\(\\hat\\mu_{0.1}\\). Comment on your findings. set.seed(42) boot(Boston$medv, function(v, i) quantile(v[i], 0.1), 10000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) quantile(v[i], ## 0.1), R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 12.75 0.013405 0.497298 We get a standard error of ~0.5. This is higher than the standard error of the median. Nevertheless the standard error is quite small, thus we can be fairly confidence about the value of the 10th percentile. "],["linear-model-selection-and-regularization.html", "6 Linear Model Selection and Regularization 6.1 Conceptual 6.2 Applied", " 6 Linear Model Selection and Regularization 6.1 Conceptual 6.1.1 Question 1 We perform best subset, forward stepwise, and backward stepwise selection on a single data set. For each approach, we obtain \\(p + 1\\) models, containing \\(0, 1, 2, ..., p\\) predictors. Explain your answers: Which of the three models with \\(k\\) predictors has the smallest training RSS? Best subset considers the most models (all possible combinations of \\(p\\) predictors are considered), therefore this will give the smallest training RSS (it will at least consider all possibilities covered by forward and backward stepwise selection). However, all three approaches are expected to give similar if not identical results in practice. Which of the three models with \\(k\\) predictors has the smallest test RSS? We cannot tell which model will perform best on the test RSS. The answer will depend on the tradeoff between fitting to the data and overfitting. True or False: The predictors in the \\(k\\)-variable model identified by forward stepwise are a subset of the predictors in the (\\(k+1\\))-variable model identified by forward stepwise selection. True. Forward stepwise selection retains all features identified in previous models as \\(k\\) is increased. The predictors in the \\(k\\)-variable model identified by backward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by backward stepwise selection. True. Backward stepwise selection removes features one by one as \\(k\\) is decreased. The predictors in the \\(k\\)-variable model identified by backward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by forward stepwise selection. False. Forward and backward stepwise selection can identify different combinations of variables due to differing algorithms. The predictors in the \\(k\\)-variable model identified by forward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by backward stepwise selection. False. Forward and backward stepwise selection can identify different combinations of variables due to differing algorithms. The predictors in the \\(k\\)-variable model identified by best subset are a subset of the predictors in the \\((k+1)\\)-variable model identified by best subset selection. False. Best subset selection can identify different combinations of variables for each \\(k\\) by considering all possible models. 6.1.2 Question 2 For parts (a) through (c), indicate which of i. through iv. is correct. Justify your answer. The lasso, relative to least squares, is: More flexible and hence will give improved prediction accuracy when its increase in bias is less than its decrease in variance. More flexible and hence will give improved prediction accuracy when its increase in variance is less than its decrease in bias. Less flexible and hence will give improved prediction accuracy when its increase in bias is less than its decrease in variance. Less flexible and hence will give improved prediction accuracy when its increase in variance is less than its decrease in bias. By using shrinkage, lasso can reduce the number of predictors so is less flexible. As a result, it will lead to an increase in bias by approximating the true relationship. We hope that this increase is small but that we dramatically reduce variance (i.e. the difference we would see in the model fit between different sets of training data). Repeat (a) for ridge regression relative to least squares. The same is true of ridge regression—shrinkage results in a less flexible model and can reduce variance. Repeat (a) for non-linear methods relative to least squares. Non-linear methods can be more flexible. They can perform better as long as they don’t substantially increase variance. 6.1.3 Question 3 Suppose we estimate the regression coefficients in a linear regression model by minimizing: \\[ \\sum_{i=1}^n\\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 \\textrm{subject to} \\sum_{j=1}^p|\\beta_j| \\le s \\] for a particular value of \\(s\\). For parts (a) through (e), indicate which of i. through v. is correct. Justify your answer. As we increase \\(s\\) from 0, the training RSS will: Increase initially, and then eventually start decreasing in an inverted U shape. Decrease initially, and then eventually start increasing in a U shape. Steadily increase. Steadily decrease. Remain constant. As \\(s\\) increases, the model becomes more flexible (the sum of absolute coefficients can be higher). With more flexible models, training RSS will always decrease. Repeat (a) for test RSS. With more flexible models, test RSS will decrease (as the fit improves) and will then increase due to overfitting (high variance). Repeat (a) for variance. As \\(s\\) increases, the model becomes more flexible so variance will increase. Repeat (a) for (squared) bias. As \\(s\\) increases, the model becomes more flexible so bias will decrease. Repeat (a) for the irreducible error. The irreducible error is unchanged. 6.1.4 Question 4 Suppose we estimate the regression coefficients in a linear regression model by minimizing \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] for a particular value of \\(\\lambda\\). For parts (a) through (e), indicate which of i. through v. is correct. Justify your answer. As we increase \\(\\lambda\\) from 0, the training RSS will: Increase initially, and then eventually start decreasing in an inverted U shape. Decrease initially, and then eventually start increasing in a U shape. Steadily increase. Steadily decrease. Remain constant. As \\(\\lambda\\) is increased, more weight is placed on the sum of squared coefficients and so the model becomes less flexible. As a result, training RSS must increase. Repeat (a) for test RSS. As \\(\\lambda\\) increases, flexibility decreases so test RSS will decrease (variance decreases) but will then increase (as bias increases). Repeat (a) for variance. Steadily decrease. Repeat (a) for (squared) bias. Steadily increase. Repeat (a) for the irreducible error. The irreducible error is unchanged. 6.1.5 Question 5 It is well-known that ridge regression tends to give similar coefficient values to correlated variables, whereas the lasso may give quite different coefficient values to correlated variables. We will now explore this property in a very simple setting. Suppose that \\(n = 2, p = 2, x_{11} = x_{12}, x_{21} = x_{22}\\). Furthermore, suppose that \\(y_1 + y_2 =0\\) and \\(x_{11} + x_{21} = 0\\) and \\(x_{12} + x_{22} = 0\\), so that the estimate for the intercept in a least squares, ridge regression, or lasso model is zero: \\(\\hat{\\beta}_0 = 0\\). Write out the ridge regression optimization problem in this setting. We are trying to minimize: \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] We can ignore \\(\\beta_0\\) and can expand the sums since there’s only two terms. Additionally, we can define \\(x_1 = x_{11} = x_{12}\\) and \\(x_2 = x_{21} = x_{22}\\). We then need to minimize \\[\\begin{align} f = & (y_1 - \\beta_1x_1 - \\beta_2x_1)^2 + (y_2 - \\beta_1x_2 - \\beta_2x_2)^2 + \\lambda\\beta_1^2 + \\lambda\\beta_2^2 \\\\ f = & y_1^2 - 2y_1\\beta_1x_1 - 2y_1\\beta_2x_1 + \\beta_1^2x_1^2 + 2\\beta_1\\beta_2x_1^2 + \\beta_2^2x_1^2 + \\\\ & y_2^2 - 2y_2\\beta_1x_2 - 2y_2\\beta_2x_2 + \\beta_1^2x_2^2 + 2\\beta_1\\beta_2x_2^2 + \\beta_2^2x_2^2 + \\\\ & \\lambda\\beta_1^2 + \\lambda\\beta_2^2 \\\\ \\end{align}\\] Argue that in this setting, the ridge coefficient estimates satisfy \\(\\hat{\\beta}_1 = \\hat{\\beta}_2\\) We can find when the above is minimized with respect to each of \\(\\beta_1\\) and \\(\\beta_2\\) by partial differentiation. \\[ \\frac{\\partial}{\\partial{\\beta_1}} = - 2y_1x_1 + 2\\beta_1x_1^2 + 2\\beta_2x_1^2 - 2y_2x_2 + 2\\beta_1x_2^2 + 2\\beta_2x_2^2 + 2\\lambda\\beta_1 \\] \\[ \\frac{\\partial}{\\partial{\\beta_2}} = - 2y_1x_1 + 2\\beta_1x_1^2 + 2\\beta_2x_1^2 - 2y_2x_2 + 2\\beta_1x_2^2 + 2\\beta_2x_2^2 + 2\\lambda\\beta_2 \\] A minimum can be found when these are set to 0. \\[ \\lambda\\beta_1 = y_1x_1 + y_2x_2 - \\beta_1x_1^2 - \\beta_2x_1^2 - \\beta_1x_2^2 - \\beta_2x_2^2 \\\\ \\lambda\\beta_2 = y_1x_1 + y_2x_2 - \\beta_1x_1^2 - \\beta_2x_1^2 - \\beta_1x_2^2 - \\beta_2x_2^2 \\] Therefore \\(\\lambda\\beta_1 = \\lambda\\beta_2\\) and \\(\\beta_1 = \\beta_2\\), thus there is only one solution, that is when the coefficients are the same. Write out the lasso optimization problem in this setting. We are trying to minimize: \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p |\\beta_j| \\] As above (and defining \\(x_1 = x_{11} = x_{12}\\) and \\(x_2 = x_{21} = x_{22}\\)) we simplify to \\[ (y_1 - \\beta_1x_1 - \\beta_2x_1)^2 + (y_2 - \\beta_1x_2 - \\beta_2x_2)^2 + \\lambda|\\beta_1| + \\lambda|\\beta_2| \\] Argue that in this setting, the lasso coefficients \\(\\hat{\\beta}_1\\) and \\(\\hat{\\beta}_2\\) are not unique—in other words, there are many possible solutions to the optimization problem in (c). Describe these solutions. We will consider the alternate form of the lasso optimization problem \\[ (y_1 - \\hat{\\beta_1}x_1 - \\hat{\\beta_2}x_1)^2 + (y_2 - \\hat{\\beta_1}x_2 - \\hat{\\beta_2}x_2)^2 \\quad \\text{subject to} \\quad |\\hat{\\beta_1}| + |\\hat{\\beta_2}| \\le s \\] Since \\(x_1 + x_2 = 0\\) and \\(y_1 + y_2 = 0\\), this is equivalent to minimising \\(2(y_1 - (\\hat{\\beta_1} + \\hat{\\beta_2})x_1)^2\\) which has a solution when \\(\\hat{\\beta_1} + \\hat{\\beta_2} = y_1/x_1\\). Geometrically, this is a \\(45^\\circ\\) backwards sloping line in the (\\(\\hat{\\beta_1}\\), \\(\\hat{\\beta_2}\\)) plane. The constraints \\(|\\hat{\\beta_1}| + |\\hat{\\beta_2}| \\le s\\) specify a diamond shape in the same place, also with lines that are at \\(45^\\circ\\) centered at the origin and which intersect the axes at a distance \\(s\\) from the origin. Thus, points along two edges of the diamond (\\(\\hat{\\beta_1} + \\hat{\\beta_2} = s\\) and \\(\\hat{\\beta_1} + \\hat{\\beta_2} = -s\\)) become solutions to the lasso optimization problem. 6.1.6 Question 6 We will now explore (6.12) and (6.13) further. Consider (6.12) with \\(p = 1\\). For some choice of \\(y_1\\) and \\(\\lambda > 0\\), plot (6.12) as a function of \\(\\beta_1\\). Your plot should confirm that (6.12) is solved by (6.14). Equation 6.12 is: \\[ \\sum_{j=1}^p(y_j - \\beta_j)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] Equation 6.14 is: \\[ \\hat{\\beta}_j^R = y_j/(1 + \\lambda) \\] where \\(\\hat{\\beta}_j^R\\) is the ridge regression estimate. lambda <- 0.7 y <- 1.4 fn <- function(beta) { (y - beta)^2 + lambda * beta^2 } plot(seq(0, 2, 0.01), fn(seq(0, 2, 0.01)), type = "l", xlab = "beta", ylab = "6.12") abline(v = y / (1 + lambda), lty = 2) Consider (6.13) with \\(p = 1\\). For some choice of \\(y_1\\) and \\(\\lambda > 0\\), plot (6.13) as a function of \\(\\beta_1\\). Your plot should confirm that (6.13) is solved by (6.15). Equation 6.13 is: \\[ \\sum_{j=1}^p(y_j - \\beta_j)^2 + \\lambda\\sum_{j=1}^p|\\beta_j| \\] Equation 6.15 is: \\[ \\hat{\\beta}_j^L = \\begin{cases} y_j - \\lambda/2 &\\mbox{if } y_j > \\lambda/2; \\\\ y_j + \\lambda/2 &\\mbox{if } y_j < -\\lambda/2; \\\\ 0 &\\mbox{if } |y_j| \\le \\lambda/2; \\end{cases} \\] For \\(\\lambda = 0.7\\) and \\(y = 1.4\\), the top case applies. lambda <- 0.7 y <- 1.4 fn <- function(beta) { (y - beta)^2 + lambda * abs(beta) } plot(seq(0, 2, 0.01), fn(seq(0, 2, 0.01)), type = "l", xlab = "beta", ylab = "6.12") abline(v = y - lambda / 2, lty = 2) 6.1.7 Question 7 We will now derive the Bayesian connection to the lasso and ridge regression discussed in Section 6.2.2. Suppose that \\(y_i = \\beta_0 + \\sum_{j=1}^p x_{ij}\\beta_j + \\epsilon_i\\) where \\(\\epsilon_1, ..., \\epsilon_n\\) are independent and identically distributed from a \\(N(0, \\sigma^2)\\) distribution. Write out the likelihood for the data. \\[\\begin{align*} \\mathcal{L} &= \\prod_i^n \\mathcal{N}(0, \\sigma^2) \\\\ &= \\prod_i^n \\frac{1}{\\sqrt{2\\pi\\sigma}}\\exp\\left(-\\frac{\\epsilon_i^2}{2\\sigma^2}\\right) \\\\ &= \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\end{align*}\\] Assume the following prior for \\(\\beta\\): \\(\\beta_1, ..., \\beta_p\\) are independent and identically distributed according to a double-exponential distribution with mean 0 and common scale parameter b: i.e. \\(p(\\beta) = \\frac{1}{2b}\\exp(-|\\beta|/b)\\). Write out the posterior for \\(\\beta\\) in this setting. The posterior can be calculated by multiplying the prior and likelihood (up to a proportionality constant). \\[\\begin{align*} p(\\beta|X,Y) &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\prod_j^p\\frac{1}{2b}\\exp\\left(-\\frac{|\\beta_j|}{b}\\right) \\\\ &\\propto \\frac{1}{2b} \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 -\\sum_j^p\\frac{|\\beta_j|}{b}\\right) \\end{align*}\\] Argue that the lasso estimate is the mode for \\(\\beta\\) under this posterior distribution. Let us find the maximum of the posterior distribution (the mode). Maximizing the posterior probability is equivalent to maximizing its log which is: \\[ \\log(p(\\beta|X,Y)) \\propto \\log\\left[ \\frac{1}{2b} \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\right ] - \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\sum_j^p\\frac{|\\beta_j|}{b}\\right) \\] Since, the first term is independent of \\(\\beta\\), our solution will be when we minimize the second term. \\[\\begin{align*} \\DeclareMathOperator*{\\argmin}{arg\\,min} % Jan Hlavacek \\argmin_\\beta \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\sum_j^p\\frac{|\\beta|}{b}\\right) &= \\argmin_\\beta \\left(\\frac{1}{2\\sigma^2} \\right ) \\left( \\sum_i^n \\epsilon_i^2 +\\frac{2\\sigma^2}{b}\\sum_j^p|\\beta_j|\\right) \\\\ &= \\argmin_\\beta \\left( \\sum_i^n \\epsilon_i^2 +\\frac{2\\sigma^2}{b}\\sum_j^p|\\beta_j|\\right) \\end{align*}\\] Note, that \\(RSS = \\sum_i^n \\epsilon_i^2\\) and if we set \\(\\lambda = \\frac{2\\sigma^2}{b}\\), the mode corresponds to lasso optimization. \\[ \\argmin_\\beta RSS + \\lambda\\sum_j^p|\\beta_j| \\] Now assume the following prior for \\(\\beta\\): \\(\\beta_1, ..., \\beta_p\\) are independent and identically distributed according to a normal distribution with mean zero and variance \\(c\\). Write out the posterior for \\(\\beta\\) in this setting. The posterior is now: \\[\\begin{align*} p(\\beta|X,Y) &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\prod_j^p\\frac{1}{\\sqrt{2\\pi c}}\\exp\\left(-\\frac{\\beta_j^2}{2c}\\right) \\\\ &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\left(\\frac{1}{\\sqrt{2\\pi c}}\\right)^p \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 - \\frac{1}{2c}\\sum_j^p\\beta_j^2\\right) \\end{align*}\\] Argue that the ridge regression estimate is both the mode and the mean for \\(\\beta\\) under this posterior distribution. To show that the ridge estimate is the mode we can again find the maximum by maximizing the log of the posterior. The log is \\[ \\log{p(\\beta|X,Y)} \\propto \\log{\\left[\\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\left(\\frac{1}{\\sqrt{2\\pi c}}\\right)^p \\right ]} - \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\frac{1}{2c}\\sum_j^p\\beta_j^2 \\right) \\] We can maximize (wrt \\(\\beta\\)) by ignoring the first term and minimizing the second term. i.e. we minimize: \\[ \\argmin_\\beta \\left( \\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\frac{1}{2c}\\sum_j^p\\beta_j^2 \\right)\\\\ = \\argmin_\\beta \\left( \\frac{1}{2\\sigma^2} \\left( \\sum_i^n \\epsilon_i^2 + \\frac{\\sigma^2}{c}\\sum_j^p\\beta_j^2 \\right) \\right) \\] As above, if \\(RSS = \\sum_i^n \\epsilon_i^2\\) and if we set \\(\\lambda = \\frac{\\sigma^2}{c}\\), we can see that the mode corresponds to ridge optimization. 6.2 Applied 6.2.1 Question 8 In this exercise, we will generate simulated data, and will then use this data to perform best subset selection. Use the rnorm() function to generate a predictor \\(X\\) of length \\(n = 100\\), as well as a noise vector \\(\\epsilon\\) of length \\(n = 100\\). library(ISLR2) library(glmnet) library(leaps) library(pls) set.seed(42) x <- rnorm(100) ep <- rnorm(100) Generate a response vector \\(Y\\) of length \\(n = 100\\) according to the model \\[Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon,\\] where \\(\\beta_0, \\beta_1, \\beta_2,\\) and \\(\\beta_3\\) are constants of your choice. y <- 2 + 3 * x - 2 * x^2 + 0.5 * x^3 + ep Use the regsubsets() function to perform best subset selection in order to choose the best model containing the predictors \\(X, X^2, ..., X^{10}\\). What is the best model obtained according to \\(C_p\\), BIC, and adjusted \\(R^2\\)? Show some plots to provide evidence for your answer, and report the coefficients of the best model obtained. Note you will need to use the data.frame() function to create a single data set containing both \\(X\\) and \\(Y\\). dat <- data.frame(x, y) summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat)) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat) ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: exhaustive ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) "*" "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " Repeat (c), using forward stepwise selection and also using backwards stepwise selection. How does your answer compare to the results in (c)? summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat, method = "forward")) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat, method = "forward") ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: forward ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat, method = "backward")) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat, method = "backward") ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: backward ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " " " ## 7 ( 1 ) " " " " ## 8 ( 1 ) " " " " ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" Now fit a lasso model to the simulated data, again using \\(X, X^2, ..., X^{10}\\) as predictors. Use cross-validation to select the optimal value of \\(\\lambda\\). Create plots of the cross-validation error as a function of \\(\\lambda\\). Report the resulting coefficient estimates, and discuss the results obtained. res <- cv.glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1) (best <- res$lambda.min) ## [1] 0.09804219 plot(res) out <- glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1, lambda = res$lambda.min) predict(out, type = "coefficients", s = best) ## 11 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) 1.8457308 ## 1 2.9092918 ## 2 -1.9287428 ## 3 0.5161012 ## 4 . ## 5 . ## 6 . ## 7 . ## 8 . ## 9 . ## 10 . When fitting lasso, the model that minimizes MSE uses three predictors (as per the simulation). The coefficients estimated (2.9, -1.9 and 0.5) are similar to those used in the simulation. Now generate a response vector \\(Y\\) according to the model \\[Y = \\beta_0 + \\beta_7X^7 + \\epsilon,\\] and perform best subset selection and the lasso. Discuss the results obtained. dat$y <- 2 - 2 * x^2 + 0.2 * x^7 + ep summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat)) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat) ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: exhaustive ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " "*" ## 3 ( 1 ) " " "*" ## 4 ( 1 ) " " "*" ## 5 ( 1 ) " " "*" ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " "*" ## 8 ( 1 ) " " "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " "*" ## 8 ( 1 ) " " "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" res <- cv.glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1) (best <- res$lambda.min) ## [1] 1.126906 plot(res) out <- glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1, lambda = best) predict(out, type = "coefficients", s = best) ## 11 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) 1.061389580 ## 1 . ## 2 -0.883080980 ## 3 . ## 4 -0.121018425 ## 5 0.028984084 ## 6 -0.009540039 ## 7 0.188796928 ## 8 . ## 9 . ## 10 . When fitting lasso, the model does not perfectly replicate the simulation (coefficients are retained for powers of \\(x\\) that were not simulated). 6.2.2 Question 9 In this exercise, we will predict the number of applications received using the other variables in the College data set. Split the data set into a training set and a test set. set.seed(42) train <- sample(nrow(College), nrow(College) * 2 / 3) test <- setdiff(seq_len(nrow(College)), train) mse <- list() Fit a linear model using least squares on the training set, and report the test error obtained. fit <- lm(Apps ~ ., data = College[train, ]) (mse$lm <- mean((predict(fit, College[test, ]) - College$Apps[test])^2)) ## [1] 1695269 Fit a ridge regression model on the training set, with \\(\\lambda\\) chosen by cross-validation. Report the test error obtained. mm <- model.matrix(Apps ~ ., data = College[train, ]) fit2 <- cv.glmnet(mm, College$Apps[train], alpha = 0) p <- predict(fit2, model.matrix(Apps ~ ., data = College[test, ]), s = fit2$lambda.min) (mse$ridge <- mean((p - College$Apps[test])^2)) ## [1] 2804369 Fit a lasso model on the training set, with \\(\\lambda\\) chosen by cross- validation. Report the test error obtained, along with the number of non-zero coefficient estimates. mm <- model.matrix(Apps ~ ., data = College[train, ]) fit3 <- cv.glmnet(mm, College$Apps[train], alpha = 1) p <- predict(fit3, model.matrix(Apps ~ ., data = College[test, ]), s = fit3$lambda.min) (mse$lasso <- mean((p - College$Apps[test])^2)) ## [1] 1822322 Fit a PCR model on the training set, with \\(M\\) chosen by cross-validation. Report the test error obtained, along with the value of \\(M\\) selected by cross-validation. fit4 <- pcr(Apps ~ ., data = College[train, ], scale = TRUE, validation = "CV") validationplot(fit4, val.type = "MSEP") p <- predict(fit4, College[test, ], ncomp = 17) (mse$pcr <- mean((p - College$Apps[test])^2)) ## [1] 1695269 Fit a PLS model on the training set, with \\(M\\) chosen by cross-validation. Report the test error obtained, along with the value of \\(M\\) selected by cross-validation. fit5 <- plsr(Apps ~ ., data = College[train, ], scale = TRUE, validation = "CV") validationplot(fit5, val.type = "MSEP") p <- predict(fit5, College[test, ], ncomp = 12) (mse$pls <- mean((p - College$Apps[test])^2)) ## [1] 1696902 Comment on the results obtained. How accurately can we predict the number of college applications received? Is there much difference among the test errors resulting from these five approaches? barplot(unlist(mse), ylab = "Test MSE", horiz = TRUE) Ridge and lasso give the lowest test errors but the lowest is generated by the ridge regression model (in this specific case with this specific seed). 6.2.3 Question 10 We have seen that as the number of features used in a model increases, the training error will necessarily decrease, but the test error may not. We will now explore this in a simulated data set. Generate a data set with \\(p = 20\\) features, \\(n = 1,000\\) observations, and an associated quantitative response vector generated according to the model \\(Y =X\\beta + \\epsilon\\), where \\(\\beta\\) has some elements that are exactly equal to zero. set.seed(42) dat <- matrix(rnorm(1000 * 20), nrow = 1000) colnames(dat) <- paste0("b", 1:20) beta <- rep(0, 20) beta[1:4] <- c(5, 4, 2, 7) y <- colSums((t(dat) * beta)) + rnorm(1000) dat <- data.frame(dat) dat$y <- y Split your data set into a training set containing 100 observations and a test set containing 900 observations. train <- dat[1:100, ] test <- dat[101:1000, ] Perform best subset selection on the training set, and plot the training set MSE associated with the best model of each size. fit <- regsubsets(y ~ ., data = train, nvmax = 20) summary(fit) ## Subset selection object ## Call: regsubsets.formula(y ~ ., data = train, nvmax = 20) ## 20 Variables (and intercept) ## Forced in Forced out ## b1 FALSE FALSE ## b2 FALSE FALSE ## b3 FALSE FALSE ## b4 FALSE FALSE ## b5 FALSE FALSE ## b6 FALSE FALSE ## b7 FALSE FALSE ## b8 FALSE FALSE ## b9 FALSE FALSE ## b10 FALSE FALSE ## b11 FALSE FALSE ## b12 FALSE FALSE ## b13 FALSE FALSE ## b14 FALSE FALSE ## b15 FALSE FALSE ## b16 FALSE FALSE ## b17 FALSE FALSE ## b18 FALSE FALSE ## b19 FALSE FALSE ## b20 FALSE FALSE ## 1 subsets of each size up to 20 ## Selection Algorithm: exhaustive ## b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 ## 1 ( 1 ) " " " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 2 ( 1 ) "*" " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 3 ( 1 ) "*" "*" " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 4 ( 1 ) "*" "*" "*" "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 5 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " " " " " " " " " " " " " " " ## 6 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " " " " " " " " " " " " " "*" ## 7 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " "*" " " " " " " " " " " "*" ## 8 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 9 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 10 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 11 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " " " " " "*" "*" ## 12 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " " " " " "*" "*" ## 13 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " "*" " " "*" "*" ## 14 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 15 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 16 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" "*" "*" "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" ## b18 b19 b20 ## 1 ( 1 ) " " " " " " ## 2 ( 1 ) " " " " " " ## 3 ( 1 ) " " " " " " ## 4 ( 1 ) " " " " " " ## 5 ( 1 ) " " " " " " ## 6 ( 1 ) " " " " " " ## 7 ( 1 ) " " " " " " ## 8 ( 1 ) " " " " " " ## 9 ( 1 ) " " " " " " ## 10 ( 1 ) " " " " "*" ## 11 ( 1 ) " " " " "*" ## 12 ( 1 ) "*" " " "*" ## 13 ( 1 ) "*" " " "*" ## 14 ( 1 ) "*" " " "*" ## 15 ( 1 ) "*" " " "*" ## 16 ( 1 ) "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" plot(summary(fit)$rss / 100, ylab = "MSE", type = "o") Plot the test set MSE associated with the best model of each size. predict.regsubsets <- function(object, newdata, id, ...) { form <- as.formula(object$call[[2]]) mat <- model.matrix(form, newdata) coefi <- coef(object, id = id) xvars <- names(coefi) mat[, xvars] %*% coefi } mse <- sapply(1:20, function(i) mean((test$y - predict(fit, test, i))^2)) plot(mse, ylab = "MSE", type = "o", pch = 19) For which model size does the test set MSE take on its minimum value? Comment on your results. If it takes on its minimum value for a model containing only an intercept or a model containing all of the features, then play around with the way that you are generating the data in (a) until you come up with a scenario in which the test set MSE is minimized for an intermediate model size. which.min(mse) ## [1] 4 The min test MSE is found when model size is 4. This corresponds to the simulated data which has four non-zero coefficients. set.seed(42) dat <- matrix(rnorm(1000 * 20), nrow = 1000) colnames(dat) <- paste0("b", 1:20) beta <- rep(0, 20) beta[1:9] <- c(5, 4, 2, 7, 0.01, 0.001, 0.05, 0.1, 0.5) y <- colSums((t(dat) * beta)) + rnorm(1000) dat <- data.frame(dat) dat$y <- y train <- dat[1:100, ] test <- dat[101:1000, ] fit <- regsubsets(y ~ ., data = train, nvmax = 20) summary(fit) ## Subset selection object ## Call: regsubsets.formula(y ~ ., data = train, nvmax = 20) ## 20 Variables (and intercept) ## Forced in Forced out ## b1 FALSE FALSE ## b2 FALSE FALSE ## b3 FALSE FALSE ## b4 FALSE FALSE ## b5 FALSE FALSE ## b6 FALSE FALSE ## b7 FALSE FALSE ## b8 FALSE FALSE ## b9 FALSE FALSE ## b10 FALSE FALSE ## b11 FALSE FALSE ## b12 FALSE FALSE ## b13 FALSE FALSE ## b14 FALSE FALSE ## b15 FALSE FALSE ## b16 FALSE FALSE ## b17 FALSE FALSE ## b18 FALSE FALSE ## b19 FALSE FALSE ## b20 FALSE FALSE ## 1 subsets of each size up to 20 ## Selection Algorithm: exhaustive ## b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 ## 1 ( 1 ) " " " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 2 ( 1 ) "*" " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 3 ( 1 ) "*" "*" " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 4 ( 1 ) "*" "*" "*" "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 5 ( 1 ) "*" "*" "*" "*" " " " " " " " " "*" " " " " " " " " " " " " " " " " ## 6 ( 1 ) "*" "*" "*" "*" " " " " " " " " "*" " " " " " " " " " " " " " " "*" ## 7 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " " " " " " " " " " " " " "*" ## 8 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " "*" " " " " " " " " " " "*" ## 9 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 10 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 11 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 12 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" "*" " " " " " " "*" "*" ## 13 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" "*" " " " " " " "*" "*" ## 14 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " " " " " "*" "*" ## 15 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " "*" " " "*" "*" ## 16 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" "*" "*" "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" ## b18 b19 b20 ## 1 ( 1 ) " " " " " " ## 2 ( 1 ) " " " " " " ## 3 ( 1 ) " " " " " " ## 4 ( 1 ) " " " " " " ## 5 ( 1 ) " " " " " " ## 6 ( 1 ) " " " " " " ## 7 ( 1 ) " " " " " " ## 8 ( 1 ) " " " " " " ## 9 ( 1 ) " " " " " " ## 10 ( 1 ) " " " " " " ## 11 ( 1 ) " " " " "*" ## 12 ( 1 ) " " " " "*" ## 13 ( 1 ) "*" " " "*" ## 14 ( 1 ) "*" " " "*" ## 15 ( 1 ) "*" " " "*" ## 16 ( 1 ) "*" " " "*" ## 17 ( 1 ) "*" " " "*" ## 18 ( 1 ) "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" mse <- sapply(1:20, function(i) mean((test$y - predict(fit, test, i))^2)) plot(mse, ylab = "MSE", type = "o", pch = 19) which.min(mse) ## [1] 5 How does the model at which the test set MSE is minimized compare to the true model used to generate the data? Comment on the coefficient values. The min test MSE is found when model size is 5 but there are 9 non-zero coefficients. coef(fit, id = 5) ## (Intercept) b1 b2 b3 b4 b9 ## 0.03507654 5.06180121 3.82785027 2.20434996 7.05312844 0.57032008 The coefficient values are well estimated when high, but the smaller coefficients are dropped. Create a plot displaying \\(\\sqrt{\\sum_{j=1}^p (\\beta_j - \\hat{\\beta}{}_j^r)^2}\\) for a range of values of \\(r\\), where \\(\\hat{\\beta}{}_j^r\\) is the \\(j\\)th coefficient estimate for the best model containing \\(r\\) coefficients. Comment on what you observe. How does this compare to the test MSE plot from (d)? names(beta) <- paste0("b", 1:20) b <- data.frame(id = names(beta), b = beta) out <- sapply(1:20, function(i) { c <- coef(fit, id = i)[-1] c <- data.frame(id = names(c), c = c) m <- merge(b, c) sqrt(sum((m$b - m$c)^2)) }) plot(out, ylab = "Mean squared coefficient error", type = "o", pch = 19) The error of the coefficient estimates is minimized when model size is 5. This corresponds to the point when test MSE was minimized. 6.2.4 Question 11 We will now try to predict per capita crime rate in the Boston data set. Try out some of the regression methods explored in this chapter, such as best subset selection, the lasso, ridge regression, and PCR. Present and discuss results for the approaches that you consider. set.seed(1) train <- sample(nrow(Boston), nrow(Boston) * 2 / 3) test <- setdiff(seq_len(nrow(Boston)), train) hist(log(Boston$crim)) Propose a model (or set of models) that seem to perform well on this data set, and justify your answer. Make sure that you are evaluating model performance using validation set error, cross-validation, or some other reasonable alternative, as opposed to using training error. We will try to fit models to log(Boston$crim) which is closer to a normal distribution. fit <- lm(log(crim) ~ ., data = Boston[train, ]) mean((predict(fit, Boston[test, ]) - log(Boston$crim[test]))^2) ## [1] 0.66578 mm <- model.matrix(log(crim) ~ ., data = Boston[train, ]) fit2 <- cv.glmnet(mm, log(Boston$crim[train]), alpha = 0) p <- predict(fit2, model.matrix(log(crim) ~ ., data = Boston[test, ]), s = fit2$lambda.min) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6511807 mm <- model.matrix(log(crim) ~ ., data = Boston[train, ]) fit3 <- cv.glmnet(mm, log(Boston$crim[train]), alpha = 1) p <- predict(fit3, model.matrix(log(crim) ~ ., data = Boston[test, ]), s = fit3$lambda.min) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6494337 fit4 <- pcr(log(crim) ~ ., data = Boston[train, ], scale = TRUE, validation = "CV") validationplot(fit4, val.type = "MSEP") p <- predict(fit4, Boston[test, ], ncomp = 8) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6561043 fit5 <- plsr(log(crim) ~ ., data = Boston[train, ], scale = TRUE, validation = "CV") validationplot(fit5, val.type = "MSEP") p <- predict(fit5, Boston[test, ], ncomp = 6) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6773353 In this case lasso (alpha = 1) seems to perform very slightly better than un-penalized regression. Some coefficients have been dropped: coef(fit3, s = fit3$lambda.min) ## 14 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) -4.713172675 ## (Intercept) . ## zn -0.011043739 ## indus 0.022515402 ## chas . ## nox 3.856157215 ## rm . ## age 0.004210529 ## dis . ## rad 0.145604750 ## tax . ## ptratio -0.031787696 ## lstat 0.036112321 ## medv 0.004304181 Does your chosen model involve all of the features in the data set? Why or why not? Not all features are included due to the lasso penalization. "],["moving-beyond-linearity.html", "7 Moving Beyond Linearity 7.1 Conceptual 7.2 Applied", " 7 Moving Beyond Linearity 7.1 Conceptual 7.1.1 Question 1 It was mentioned in the chapter that a cubic regression spline with one knot at \\(\\xi\\) can be obtained using a basis of the form \\(x, x^2, x^3, (x-\\xi)^3_+\\), where \\((x-\\xi)^3_+ = (x-\\xi)^3\\) if \\(x>\\xi\\) and equals 0 otherwise. We will now show that a function of the form \\[ f(x)=\\beta_0 +\\beta_1x+\\beta_2x^2 +\\beta_3x^3 +\\beta_4(x-\\xi)^3_+ \\] is indeed a cubic regression spline, regardless of the values of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3,\\beta_4\\). Find a cubic polynomial \\[ f_1(x) = a_1 + b_1x + c_1x^2 + d_1x^3 \\] such that \\(f(x) = f_1(x)\\) for all \\(x \\le \\xi\\). Express \\(a_1,b_1,c_1,d_1\\) in terms of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3, \\beta_4\\). In this case, for \\(x \\le \\xi\\), the cubic polynomial simply has terms \\(a_1 = \\beta_0\\), \\(b_1 = \\beta_1\\), \\(c_1 = \\beta_2\\), \\(d_1 = \\beta_3\\) Find a cubic polynomial \\[ f_2(x) = a_2 + b_2x + c_2x^2 + d_2x^3 \\] such that \\(f(x) = f_2(x)\\) for all \\(x > \\xi\\). Express \\(a_2, b_2, c_2, d_2\\) in terms of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3, \\beta_4\\). We have now established that \\(f(x)\\) is a piecewise polynomial. For \\(x \\gt \\xi\\), the cubic polynomial would be (we include the \\(\\beta_4\\) term). \\[\\begin{align} f(x) = & \\beta_0 + \\beta_1x + \\beta_2x^2 + \\beta_3x^3 + \\beta_4(x-\\xi)^3 \\\\ = & \\beta_0 + \\beta_1x + \\beta_2x^2 + + \\beta_4(x^3 - 3x^2\\xi + 3x\\xi^2 -\\xi^3) \\\\ = & \\beta_0 - \\beta_4\\xi^3 + (\\beta_1 + 3\\beta_4\\xi^2)x + (\\beta_2 - 3\\beta_4\\xi)x^2 + (\\beta_3 + \\beta_4)x^3 \\end{align}\\] Therefore, \\(a_1 = \\beta_0 - \\beta_4\\xi^3\\), \\(b_1 = \\beta_1 + 3\\beta_4\\xi^2\\), \\(c_1 = \\beta_2 - 3\\beta_4\\xi\\), \\(d_1 = \\beta_3 + \\beta_4\\) Show that \\(f_1(\\xi) = f_2(\\xi)\\). That is, \\(f(x)\\) is continuous at \\(\\xi\\). To do this, we replace \\(x\\) with \\(\\xi\\) in the above equations and simplify. \\[\\begin{align} f_1(\\xi) = \\beta_0 + \\beta_1\\xi + \\beta_2\\xi^2 + \\beta_3\\xi^3 \\end{align}\\] \\[\\begin{align} f_2(\\xi) = & \\beta_0 - \\beta_4\\xi^3 + (\\beta_1 + 3\\beta_4\\xi^2)\\xi + (\\beta_2 - 3\\beta_4\\xi)\\xi^2 + (\\beta_3 + \\beta_4)\\xi^3 \\\\ = & \\beta_0 - \\beta_4\\xi^3 + \\beta_1\\xi + 3\\beta_4\\xi^3 + \\beta_2\\xi^2 - 3\\beta_4\\xi^3 + \\beta_3\\xi^3 + \\beta_4\\xi^3 \\\\ = & \\beta_0 + \\beta_1\\xi + \\beta_2\\xi^2 + \\beta_3\\xi^3 \\end{align}\\] Show that \\(f_1'(\\xi) = f_2'(\\xi)\\). That is, \\(f'(x)\\) is continuous at \\(\\xi\\). To do this we differentiate the above with respect to \\(x\\). \\[ f_1'(x) = \\beta_1 + 2\\beta_2x + 3\\beta_3x^2 f_1'(\\xi) = \\beta_1 + 2\\beta_2\\xi + 3\\beta_3\\xi^2 \\] \\[\\begin{align} f_2'(x) & = \\beta_1 + 3\\beta_4\\xi^2 + 2(\\beta_2 - 3\\beta_4\\xi)x + 3(\\beta_3 + \\beta_4)x^2 \\\\ f_2'(\\xi) & = \\beta_1 + 3\\beta_4\\xi^2 + 2(\\beta_2 - 3\\beta_4\\xi)\\xi + 3(\\beta_3 + \\beta_4)\\xi^2 \\\\ & = \\beta_1 + 3\\beta_4\\xi^2 + 2\\beta_2\\xi - 6\\beta_4\\xi^2 + 3\\beta_3\\xi^2 + 3\\beta_4\\xi^2 \\\\ & = \\beta_1 + 2\\beta_2\\xi + 3\\beta_3\\xi^2 \\end{align}\\] Show that \\(f_1''(\\xi) = f_2''(\\xi)\\). That is, \\(f''(x)\\) is continuous at \\(\\xi\\). Therefore, \\(f(x)\\) is indeed a cubic spline. \\[ f_1'(x) = 2\\beta_2x + 6\\beta_3x \\\\ f_1''(\\xi) = 2\\beta_2\\xi + 6\\beta_3\\xi \\] \\[ f_2''(x) = 2\\beta_2 - 6\\beta_4\\xi + 6(\\beta_3 + \\beta_4)x \\\\ \\] \\[\\begin{align} f_2''(\\xi) & = 2\\beta_2 - 6\\beta_4\\xi + 6\\beta_3\\xi + 6\\beta_4\\xi \\\\ & = 2\\beta_2 + 6\\beta_3\\xi \\end{align}\\] Hint: Parts (d) and (e) of this problem require knowledge of single-variable calculus. As a reminder, given a cubic polynomial \\[f_1(x) = a_1 + b_1x + c_1x^2 + d_1x^3,\\] the first derivative takes the form \\[f_1'(x) = b_1 + 2c_1x + 3d_1x^2\\] and the second derivative takes the form \\[f_1''(x) = 2c_1 + 6d_1x.\\] 7.1.2 Question 2 Suppose that a curve \\(\\hat{g}\\) is computed to smoothly fit a set of \\(n\\) points using the following formula: \\[ \\DeclareMathOperator*{\\argmin}{arg\\,min} % Jan Hlavacek \\hat{g} = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(m)}(x) \\right]^2 dx \\right), \\] where \\(g^{(m)}\\) represents the \\(m\\)th derivative of \\(g\\) (and \\(g^{(0)} = g\\)). Provide example sketches of \\(\\hat{g}\\) in each of the following scenarios. \\(\\lambda=\\infty, m=0\\). Here we penalize the \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. This means that the \\(\\hat{g}\\) will be 0. \\(\\lambda=\\infty, m=1\\). Here we penalize the first derivative (the slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. Thus the slope will be 0 (and otherwise best fitting \\(x\\), i.e. at the mean of \\(x\\)). \\(\\lambda=\\infty, m=2\\). Here we penalize the second derivative (the change of slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. Thus the line will be straight (and otherwise best fitting \\(x\\)). \\(\\lambda=\\infty, m=3\\). Here we penalize the third derivative (the change of the change of slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. In other words, the curve will have a consistent rate of change (e.g. a quadratic function or similar). \\(\\lambda=0, m=3\\). Here we penalize the third derivative, but a value of \\(\\lambda = 0\\) means that there is no penalty. As a result, the curve is able to interpolate all points. 7.1.3 Question 3 Suppose we fit a curve with basis functions \\(b_1(X) = X\\), \\(b_2(X) = (X - 1)^2I(X \\geq 1)\\). (Note that \\(I(X \\geq 1)\\) equals 1 for \\(X \\geq 1\\) and 0 otherwise.) We fit the linear regression model \\[Y = \\beta_0 +\\beta_1b_1(X) + \\beta_2b_2(X) + \\epsilon,\\] and obtain coefficient estimates \\(\\hat{\\beta}_0 = 1, \\hat{\\beta}_1 = 1, \\hat{\\beta}_2 = -2\\). Sketch the estimated curve between \\(X = -2\\) and \\(X = 2\\). Note the intercepts, slopes, and other relevant information. x <- seq(-2, 2, length.out = 1000) f <- function(x) 1 + x + -2 * (x - 1)^2 * I(x >= 1) plot(x, f(x), type = "l") grid() 7.1.4 Question 4 Suppose we fit a curve with basis functions \\(b_1(X) = I(0 \\leq X \\leq 2) - (X -1)I(1 \\leq X \\leq 2),\\) \\(b_2(X) = (X -3)I(3 \\leq X \\leq 4) + I(4 \\lt X \\leq 5)\\). We fit the linear regression model \\[Y = \\beta_0 +\\beta_1b_1(X) + \\beta_2b_2(X) + \\epsilon,\\] and obtain coefficient estimates \\(\\hat{\\beta}_0 = 1, \\hat{\\beta}_1 = 1, \\hat{\\beta}_2 = 3\\). Sketch the estimated curve between \\(X = -2\\) and \\(X = 6\\). Note the intercepts, slopes, and other relevant information. x <- seq(-2, 6, length.out = 1000) b1 <- function(x) I(0 <= x & x <= 2) - (x - 1) * I(1 <= x & x <= 2) b2 <- function(x) (x - 3) * I(3 <= x & x <= 4) + I(4 < x & x <= 5) f <- function(x) 1 + 1*b1(x) + 3*b2(x) plot(x, f(x), type = "l") grid() 7.1.5 Question 5 Consider two curves, \\(\\hat{g}\\) and \\(\\hat{g}_2\\), defined by \\[ \\hat{g}_1 = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(3)}(x) \\right]^2 dx \\right), \\] \\[ \\hat{g}_2 = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(4)}(x) \\right]^2 dx \\right), \\] where \\(g^{(m)}\\) represents the \\(m\\)th derivative of \\(g\\). As \\(\\lambda \\to \\infty\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller training RSS? \\(\\hat{g}_2\\) is more flexible (by penalizing a higher derivative of \\(g\\)) and so will have a smaller training RSS. As \\(\\lambda \\to \\infty\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller test RSS? We cannot tell which function will produce a smaller test RSS, but there is chance that \\(\\hat{g}_1\\) will if \\(\\hat{g}_2\\) overfits the data. For \\(\\lambda = 0\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller training and test RSS? When \\(\\lambda = 0\\) there is no penalty, so both functions will give the same result: perfect interpolation of the training data. Thus training RSS will be 0 but test RSS could be high. 7.2 Applied 7.2.1 Question 6 In this exercise, you will further analyze the Wage data set considered throughout this chapter. Perform polynomial regression to predict wage using age. Use cross-validation to select the optimal degree \\(d\\) for the polynomial. What degree was chosen, and how does this compare to the results of hypothesis testing using ANOVA? Make a plot of the resulting polynomial fit to the data. library(ISLR2) library(boot) library(ggplot2) set.seed(42) res <- sapply(1:6, function(i) { fit <- glm(wage ~ poly(age, i), data = Wage) cv.glm(Wage, fit, K = 5)$delta[1] }) which.min(res) ## [1] 6 plot(1:6, res, xlab = "Degree", ylab = "Test MSE", type = "l") abline(v = which.min(res), col = "red", lty = 2) fit <- glm(wage ~ poly(age, which.min(res)), data = Wage) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) points(1:100, predict(fit, data.frame(age = 1:100)), type = "l", col = "red") summary(glm(wage ~ poly(age, 6), data = Wage)) ## ## Call: ## glm(formula = wage ~ poly(age, 6), data = Wage) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 111.7036 0.7286 153.316 < 2e-16 *** ## poly(age, 6)1 447.0679 39.9063 11.203 < 2e-16 *** ## poly(age, 6)2 -478.3158 39.9063 -11.986 < 2e-16 *** ## poly(age, 6)3 125.5217 39.9063 3.145 0.00167 ** ## poly(age, 6)4 -77.9112 39.9063 -1.952 0.05099 . ## poly(age, 6)5 -35.8129 39.9063 -0.897 0.36956 ## poly(age, 6)6 62.7077 39.9063 1.571 0.11620 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 1592.512) ## ## Null deviance: 5222086 on 2999 degrees of freedom ## Residual deviance: 4766389 on 2993 degrees of freedom ## AIC: 30642 ## ## Number of Fisher Scoring iterations: 2 fit1 <- lm(wage ~ poly(age, 1), data = Wage) fit2 <- lm(wage ~ poly(age, 2), data = Wage) fit3 <- lm(wage ~ poly(age, 3), data = Wage) fit4 <- lm(wage ~ poly(age, 4), data = Wage) fit5 <- lm(wage ~ poly(age, 5), data = Wage) anova(fit1, fit2, fit3, fit4, fit5) ## Analysis of Variance Table ## ## Model 1: wage ~ poly(age, 1) ## Model 2: wage ~ poly(age, 2) ## Model 3: wage ~ poly(age, 3) ## Model 4: wage ~ poly(age, 4) ## Model 5: wage ~ poly(age, 5) ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 2998 5022216 ## 2 2997 4793430 1 228786 143.5931 < 2.2e-16 *** ## 3 2996 4777674 1 15756 9.8888 0.001679 ** ## 4 2995 4771604 1 6070 3.8098 0.051046 . ## 5 2994 4770322 1 1283 0.8050 0.369682 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 The selected degree is 4. When testing with ANOVA, degrees 1, 2 and 3 are highly significant and 4 is marginal. Fit a step function to predict wage using age, and perform cross-validation to choose the optimal number of cuts. Make a plot of the fit obtained. set.seed(42) res <- sapply(2:10, function(i) { Wage$cats <- cut(Wage$age, i) fit <- glm(wage ~ cats, data = Wage) cv.glm(Wage, fit, K = 5)$delta[1] }) names(res) <- 2:10 plot(2:10, res, xlab = "Cuts", ylab = "Test MSE", type = "l") which.min(res) ## 8 ## 7 abline(v = names(which.min(res)), col = "red", lty = 2) fit <- glm(wage ~ cut(age, 8), data = Wage) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) points(18:80, predict(fit, data.frame(age = 18:80)), type = "l", col = "red") 7.2.2 Question 7 The Wage data set contains a number of other features not explored in this chapter, such as marital status (maritl), job class (jobclass), and others. Explore the relationships between some of these other predictors and wage, and use non-linear fitting techniques in order to fit flexible models to the data. Create plots of the results obtained, and write a summary of your findings. plot(Wage$year, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$maritl, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$jobclass, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$education, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) We have a mix of categorical and continuous variables and also want to incorporate non-linear aspects of the continuous variables. A GAM is a good choice to model this situation. library(gam) ## Loading required package: splines ## Loading required package: foreach ## Loaded gam 1.22-4 fit0 <- gam(wage ~ s(year, 4) + s(age, 5) + education, data = Wage) fit2 <- gam(wage ~ s(year, 4) + s(age, 5) + education + maritl, data = Wage) fit1 <- gam(wage ~ s(year, 4) + s(age, 5) + education + jobclass, data = Wage) fit3 <- gam(wage ~ s(year, 4) + s(age, 5) + education + jobclass + maritl, data = Wage) anova(fit0, fit1, fit2, fit3) ## Analysis of Deviance Table ## ## Model 1: wage ~ s(year, 4) + s(age, 5) + education ## Model 2: wage ~ s(year, 4) + s(age, 5) + education + jobclass ## Model 3: wage ~ s(year, 4) + s(age, 5) + education + maritl ## Model 4: wage ~ s(year, 4) + s(age, 5) + education + jobclass + maritl ## Resid. Df Resid. Dev Df Deviance Pr(>Chi) ## 1 2986 3689770 ## 2 2985 3677553 1 12218 0.0014286 ** ## 3 2982 3595688 3 81865 1.071e-14 *** ## 4 2981 3581781 1 13907 0.0006687 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 par(mfrow = c(2, 3)) plot(fit3, se = TRUE, col = "blue") 7.2.3 Question 8 Fit some of the non-linear models investigated in this chapter to the Auto data set. Is there evidence for non-linear relationships in this data set? Create some informative plots to justify your answer. Here we want to explore a range of non-linear models. First let’s look at the relationships between the variables in the data. pairs(Auto, cex = 0.4, pch = 19) It does appear that there are some non-linear relationships (e.g. horsepower / weight and mpg). We will pick one variable (horsepower) to predict mpg and try the range of models discussed in this chapter. We will measure test MSE through cross-validation to compare the models. library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ lubridate 1.9.3 ✔ tibble 3.2.1 ## ✔ purrr 1.0.2 ✔ tidyr 1.3.1 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ purrr::accumulate() masks foreach::accumulate() ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ✖ purrr::when() masks foreach::when() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors set.seed(42) fit <- glm(mpg ~ horsepower, data = Auto) err <- cv.glm(Auto, fit, K = 10)$delta[1] fit1 <- glm(mpg ~ poly(horsepower, 4), data = Auto) err1 <- cv.glm(Auto, fit1, K = 10)$delta[1] q <- quantile(Auto$horsepower) Auto$hp_cats <- cut(Auto$horsepower, breaks = q, include.lowest = TRUE) fit2 <- glm(mpg ~ hp_cats, data = Auto) err2 <- cv.glm(Auto, fit2, K = 10)$delta[1] fit3 <- glm(mpg ~ bs(horsepower, df = 4), data = Auto) err3 <- cv.glm(Auto, fit3, K = 10)$delta[1] ## Warning in bs(horsepower, degree = 3L, knots = 92, Boundary.knots = c(46L, : ## some 'x' values beyond boundary knots may cause ill-conditioned bases ## Warning in bs(horsepower, degree = 3L, knots = 92, Boundary.knots = c(46L, : ## some 'x' values beyond boundary knots may cause ill-conditioned bases fit4 <- glm(mpg ~ ns(horsepower, 4), data = Auto) err4 <- cv.glm(Auto, fit4, K = 10)$delta[1] fit5 <- gam(mpg ~ s(horsepower, df = 4), data = Auto) # rough 10-fold cross-validation for gam. err5 <- mean(replicate(10, { b <- cut(sample(seq_along(Auto$horsepower)), 10) pred <- numeric() for (i in 1:10) { train <- b %in% levels(b)[-i] f <- gam(mpg ~ s(horsepower, df = 4), data = Auto[train, ]) pred[!train] <- predict(f, Auto[!train, ]) } mean((Auto$mpg - pred)^2) # MSE })) c(err, err1, err2, err3, err4, err5) ## [1] 24.38418 19.94222 20.37940 18.92802 19.33556 19.02999 anova(fit, fit1, fit2, fit3, fit4, fit5) ## Analysis of Deviance Table ## ## Model 1: mpg ~ horsepower ## Model 2: mpg ~ poly(horsepower, 4) ## Model 3: mpg ~ hp_cats ## Model 4: mpg ~ bs(horsepower, df = 4) ## Model 5: mpg ~ ns(horsepower, 4) ## Model 6: mpg ~ s(horsepower, df = 4) ## Resid. Df Resid. Dev Df Deviance F Pr(>F) ## 1 390 9385.9 ## 2 387 7399.5 3.00000000 1986.39 35.258 < 2.2e-16 *** ## 3 388 7805.4 -1.00000000 -405.92 21.615 4.578e-06 *** ## 4 387 7276.5 1.00000000 528.94 28.166 1.880e-07 *** ## 5 387 7248.6 0.00000000 27.91 ## 6 387 7267.7 0.00013612 -19.10 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 x <- seq(min(Auto$horsepower), max(Auto$horsepower), length.out=1000) pred <- data.frame( x = x, "Linear" = predict(fit, data.frame(horsepower = x)), "Polynomial" = predict(fit1, data.frame(horsepower = x)), "Step" = predict(fit2, data.frame(hp_cats = cut(x, breaks = q, include.lowest = TRUE))), "Regression spline" = predict(fit3, data.frame(horsepower = x)), "Natural spline" = predict(fit4, data.frame(horsepower = x)), "Smoothing spline" = predict(fit5, data.frame(horsepower = x)), check.names = FALSE ) pred <- pivot_longer(pred, -x) ggplot(Auto, aes(horsepower, mpg)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() 7.2.4 Question 9 This question uses the variables dis (the weighted mean of distances to five Boston employment centers) and nox (nitrogen oxides concentration in parts per 10 million) from the Boston data. We will treat dis as the predictor and nox as the response. Use the poly() function to fit a cubic polynomial regression to predict nox using dis. Report the regression output, and plot the resulting data and polynomial fits. fit <- glm(nox ~ poly(dis, 3), data = Boston) summary(fit) ## ## Call: ## glm(formula = nox ~ poly(dis, 3), data = Boston) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.554695 0.002759 201.021 < 2e-16 *** ## poly(dis, 3)1 -2.003096 0.062071 -32.271 < 2e-16 *** ## poly(dis, 3)2 0.856330 0.062071 13.796 < 2e-16 *** ## poly(dis, 3)3 -0.318049 0.062071 -5.124 4.27e-07 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 0.003852802) ## ## Null deviance: 6.7810 on 505 degrees of freedom ## Residual deviance: 1.9341 on 502 degrees of freedom ## AIC: -1370.9 ## ## Number of Fisher Scoring iterations: 2 plot(nox ~ dis, data = Boston, col = alpha("steelblue", 0.4), pch = 19) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) lines(x, predict(fit, data.frame(dis = x)), col = "red", lty = 2) Plot the polynomial fits for a range of different polynomial degrees (say, from 1 to 10), and report the associated residual sum of squares. fits <- lapply(1:10, function(i) glm(nox ~ poly(dis, i), data = Boston)) x <- seq(min(Boston$dis), max(Boston$dis), length.out=1000) pred <- data.frame(lapply(fits, function(fit) predict(fit, data.frame(dis = x)))) colnames(pred) <- 1:10 pred$x <- x pred <- pivot_longer(pred, !x) ggplot(Boston, aes(dis, nox)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() # residual sum of squares do.call(anova, fits)[, 2] ## [1] 2.768563 2.035262 1.934107 1.932981 1.915290 1.878257 1.849484 1.835630 ## [9] 1.833331 1.832171 Perform cross-validation or another approach to select the optimal degree for the polynomial, and explain your results. res <- sapply(1:10, function(i) { fit <- glm(nox ~ poly(dis, i), data = Boston) cv.glm(Boston, fit, K = 10)$delta[1] }) which.min(res) ## [1] 4 The optimal degree is 3 based on cross-validation. Higher values tend to lead to overfitting. Use the bs() function to fit a regression spline to predict nox using dis. Report the output for the fit using four degrees of freedom. How did you choose the knots? Plot the resulting fit. fit <- glm(nox ~ bs(dis, df = 4), data = Boston) summary(fit) ## ## Call: ## glm(formula = nox ~ bs(dis, df = 4), data = Boston) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.73447 0.01460 50.306 < 2e-16 *** ## bs(dis, df = 4)1 -0.05810 0.02186 -2.658 0.00812 ** ## bs(dis, df = 4)2 -0.46356 0.02366 -19.596 < 2e-16 *** ## bs(dis, df = 4)3 -0.19979 0.04311 -4.634 4.58e-06 *** ## bs(dis, df = 4)4 -0.38881 0.04551 -8.544 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 0.003837874) ## ## Null deviance: 6.7810 on 505 degrees of freedom ## Residual deviance: 1.9228 on 501 degrees of freedom ## AIC: -1371.9 ## ## Number of Fisher Scoring iterations: 2 plot(nox ~ dis, data = Boston, col = alpha("steelblue", 0.4), pch = 19) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) lines(x, predict(fit, data.frame(dis = x)), col = "red", lty = 2) Knots are chosen based on quantiles of the data. Now fit a regression spline for a range of degrees of freedom, and plot the resulting fits and report the resulting RSS. Describe the results obtained. fits <- lapply(3:10, function(i) { glm(nox ~ bs(dis, df = i), data = Boston) }) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) pred <- data.frame(lapply(fits, function(fit) predict(fit, data.frame(dis = x)))) colnames(pred) <- 3:10 pred$x <- x pred <- pivot_longer(pred, !x) ggplot(Boston, aes(dis, nox)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() At high numbers of degrees of freedom the splines overfit the data (particularly at extreme ends of the distribution of the predictor variable). Perform cross-validation or another approach in order to select the best degrees of freedom for a regression spline on this data. Describe your results. set.seed(42) err <- sapply(3:10, function(i) { fit <- glm(nox ~ bs(dis, df = i), data = Boston) suppressWarnings(cv.glm(Boston, fit, K = 10)$delta[1]) }) which.min(err) ## [1] 8 This approach would select 4 degrees of freedom for the spline. 7.2.5 Question 10 This question relates to the College data set. Split the data into a training set and a test set. Using out-of-state tuition as the response and the other variables as the predictors, perform forward stepwise selection on the training set in order to identify a satisfactory model that uses just a subset of the predictors. library(leaps) # helper function to predict from a regsubsets model predict.regsubsets <- function(object, newdata, id, ...) { form <- as.formula(object$call[[2]]) mat <- model.matrix(form, newdata) coefi <- coef(object, id = id) xvars <- names(coefi) mat[, xvars] %*% coefi } set.seed(42) train <- rep(TRUE, nrow(College)) train[sample(1:nrow(College), nrow(College) * 1 / 3)] <- FALSE fit <- regsubsets(Outstate ~ ., data = College[train, ], nvmax = 17, method = "forward") plot(summary(fit)$bic, type = "b") which.min(summary(fit)$bic) ## [1] 11 # or via cross-validation err <- sapply(1:17, function(i) { x <- coef(fit, id = i) mean((College$Outstate[!train] - predict(fit, College[!train, ], i))^2) }) which.min(err) ## [1] 16 min(summary(fit)$bic) ## [1] -690.9375 For the sake of simplicity we’ll choose 6 coef(fit, id = 6) ## (Intercept) PrivateYes Room.Board PhD perc.alumni ## -3540.0544008 2736.4231642 0.9061752 33.7848157 47.1998115 ## Expend Grad.Rate ## 0.2421685 33.3137332 Fit a GAM on the training data, using out-of-state tuition as the response and the features selected in the previous step as the predictors. Plot the results, and explain your findings. fit <- gam(Outstate ~ Private + s(Room.Board, 2) + s(PhD, 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), data = College[train, ]) Evaluate the model obtained on the test set, and explain the results obtained. pred <- predict(fit, College[!train, ]) err_gam <- mean((College$Outstate[!train] - pred)^2) plot(err, ylim = c(min(err_gam, err), max(err)), type = "b") abline(h = err_gam, col = "red", lty = 2) # r-squared 1 - err_gam / mean((College$Outstate[!train] - mean(College$Outstate[!train]))^2) ## [1] 0.7655905 For which variables, if any, is there evidence of a non-linear relationship with the response? summary(fit) ## ## Call: gam(formula = Outstate ~ Private + s(Room.Board, 2) + s(PhD, ## 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), ## data = College[train, ]) ## Deviance Residuals: ## Min 1Q Median 3Q Max ## -7112.59 -1188.98 33.13 1238.54 8738.65 ## ## (Dispersion Parameter for gaussian family taken to be 3577008) ## ## Null Deviance: 8471793308 on 517 degrees of freedom ## Residual Deviance: 1809966249 on 506.0001 degrees of freedom ## AIC: 9300.518 ## ## Number of Local Scoring Iterations: NA ## ## Anova for Parametric Effects ## Df Sum Sq Mean Sq F value Pr(>F) ## Private 1 2327235738 2327235738 650.610 < 2.2e-16 *** ## s(Room.Board, 2) 1 1741918028 1741918028 486.976 < 2.2e-16 *** ## s(PhD, 2) 1 668408518 668408518 186.863 < 2.2e-16 *** ## s(perc.alumni, 2) 1 387819183 387819183 108.420 < 2.2e-16 *** ## s(Expend, 2) 1 625813340 625813340 174.954 < 2.2e-16 *** ## s(Grad.Rate, 2) 1 111881207 111881207 31.278 3.664e-08 *** ## Residuals 506 1809966249 3577008 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Anova for Nonparametric Effects ## Npar Df Npar F Pr(F) ## (Intercept) ## Private ## s(Room.Board, 2) 1 2.224 0.13648 ## s(PhD, 2) 1 5.773 0.01664 * ## s(perc.alumni, 2) 1 0.365 0.54581 ## s(Expend, 2) 1 61.182 3.042e-14 *** ## s(Grad.Rate, 2) 1 4.126 0.04274 * ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Non-linear relationships are significant for Expend and PhD. 7.2.6 Question 11 In Section 7.7, it was mentioned that GAMs are generally fit using a backfitting approach. The idea behind backfitting is actually quite simple. We will now explore backfitting in the context of multiple linear regression. Suppose that we would like to perform multiple linear regression, but we do not have software to do so. Instead, we only have software to perform simple linear regression. Therefore, we take the following iterative approach: we repeatedly hold all but one coefficient estimate fixed at its current value, and update only that coefficient estimate using a simple linear regression. The process is continued until convergence—that is, until the coefficient estimates stop changing. We now try this out on a toy example. Generate a response \\(Y\\) and two predictors \\(X_1\\) and \\(X_2\\), with \\(n = 100\\). set.seed(42) x1 <- rnorm(100) x2 <- rnorm(100) y <- 2 + 0.2 * x1 + 4 * x2 + rnorm(100) Initialize \\(\\hat{\\beta}_1\\) to take on a value of your choice. It does not matter 1 what value you choose. beta1 <- 20 Keeping \\(\\hat{\\beta}_1\\) fixed, fit the model \\[Y - \\hat{\\beta}_1X_1 = \\beta_0 + \\beta_2X_2 + \\epsilon.\\] You can do this as follows: > a <- y - beta1 * x1 > beta2 <- lm(a ~ x2)$coef[2] a <- y - beta1*x1 beta2 <- lm(a ~ x2)$coef[2] Keeping \\(\\hat{\\beta}_2\\) fixed, fit the model \\[Y - \\hat{\\beta}_2X_2 = \\beta_0 + \\beta_1 X_1 + \\epsilon.\\] You can do this as follows: > a <- y - beta2 * x2 > beta1 <- lm(a ~ x1)$coef[2] a <- y - beta2 * x2 beta1 <- lm(a ~ x1)$coef[2] Write a for loop to repeat (c) and (d) 1,000 times. Report the estimates of \\(\\hat{\\beta}_0, \\hat{\\beta}_1,\\) and \\(\\hat{\\beta}_2\\) at each iteration of the for loop. Create a plot in which each of these values is displayed, with \\(\\hat{\\beta}_0, \\hat{\\beta}_1,\\) and \\(\\hat{\\beta}_2\\) each shown in a different color. res <- matrix(NA, nrow = 1000, ncol = 3) colnames(res) <- c("beta0", "beta1", "beta2") beta1 <- 20 for (i in 1:1000) { beta2 <- lm(y - beta1*x1 ~ x2)$coef[2] beta1 <- lm(y - beta2*x2 ~ x1)$coef[2] beta0 <- lm(y - beta2*x2 ~ x1)$coef[1] res[i, ] <- c(beta0, beta1, beta2) } res <- as.data.frame(res) res$Iteration <- 1:1000 res <- pivot_longer(res, !Iteration) p <- ggplot(res, aes(x=Iteration, y=value, color=name)) + geom_line() + geom_point() + scale_x_continuous(trans = "log10") p Compare your answer in (e) to the results of simply performing multiple linear regression to predict \\(Y\\) using \\(X_1\\) and \\(X_2\\). Use the abline() function to overlay those multiple linear regression coefficient estimates on the plot obtained in (e). fit <- lm(y ~ x1 + x2) coef(fit) ## (Intercept) x1 x2 ## 2.00176627 0.05629075 4.08529318 p + geom_hline(yintercept = coef(fit), lty = 2) On this data set, how many backfitting iterations were required in order to obtain a “good” approximation to the multiple regression coefficient estimates? In this case, good estimates were obtained after 3 iterations. 7.2.7 Question 12 This problem is a continuation of the previous exercise. In a toy example with \\(p = 100\\), show that one can approximate the multiple linear regression coefficient estimates by repeatedly performing simple linear regression in a backfitting procedure. How many backfitting iterations are required in order to obtain a “good” approximation to the multiple regression coefficient estimates? Create a plot to justify your answer. set.seed(42) p <- 100 n <- 1000 betas <- rnorm(p) * 5 x <- matrix(rnorm(n * p), ncol = p, nrow = n) y <- (x %*% betas) + rnorm(n) # ignore beta0 for simplicity # multiple regression fit <- lm(y ~ x - 1) coef(fit) ## x1 x2 x3 x4 x5 x6 ## 6.9266184 -2.8428817 1.8686821 3.1466472 1.9601927 -0.5529214 ## x7 x8 x9 x10 x11 x12 ## 7.4786723 -0.4454637 10.0816005 -0.2391234 6.5832468 11.4451280 ## x13 x14 x15 x16 x17 x18 ## -6.9684368 -1.3604495 -0.6310041 3.1786639 -1.4470502 -13.2957027 ## x19 x20 x21 x22 x23 x24 ## -12.2061834 6.5765842 -1.5227262 -8.8855906 -0.8422954 6.1189230 ## x25 x26 x27 x28 x29 x30 ## 9.4395267 -2.1697854 -1.2738835 -8.8457987 2.2851699 -3.1922704 ## x31 x32 x33 x34 x35 x36 ## 2.2812995 3.4695892 5.1162617 -3.0423873 2.4985589 -8.5952764 ## x37 x38 x39 x40 x41 x42 ## -3.9539370 -4.2616463 -12.0038342 0.1981058 1.0559250 -1.8205017 ## x43 x44 x45 x46 x47 x48 ## 3.7739990 -3.6240020 -6.8575534 2.1042998 -4.0228773 7.1880298 ## x49 x50 x51 x52 x53 x54 ## -2.1967821 3.3137115 1.6406524 -3.9402065 7.9067705 3.1815846 ## x55 x56 x57 x58 x59 x60 ## 0.4504175 1.4003479 3.3999814 0.4317695 -14.9255798 1.3816878 ## x61 x62 x63 x64 x65 x66 ## -1.8071634 0.9907740 2.9771540 6.9528872 -3.5956916 6.5283946 ## x67 x68 x69 x70 x71 x72 ## 1.6798820 5.1911857 4.5573256 3.5961319 -5.1909352 -0.4869003 ## x73 x74 x75 x76 x77 x78 ## 3.1472166 -4.7898363 -2.7402076 2.9247173 3.8659938 2.3686379 ## x79 x80 x81 x82 x83 x84 ## -4.4261734 -5.5020688 7.5807239 1.3010702 0.4378713 -0.5856580 ## x85 x86 x87 x88 x89 x90 ## -5.9799328 3.0089329 -1.1230969 -0.8857679 4.7211363 4.1042952 ## x91 x92 x93 x94 x95 x96 ## 6.9492037 -2.3959211 3.2188522 6.9947040 -5.5392641 -4.3114784 ## x97 x98 x99 x100 ## -5.7287292 -7.3148812 0.3454408 3.2830658 # backfitting backfit <- function(x, y, iter = 20) { beta <- matrix(0, ncol = ncol(x), nrow = iter + 1) for (i in 1:iter) { for (k in 1:ncol(x)) { residual <- y - (x[, -k] %*% beta[i, -k]) beta[i + 1, k] <- lm(residual ~ x[, k])$coef[2] } } beta[-1, ] } res <- backfit(x, y) error <- rowMeans(sweep(res, 2, betas)^2) plot(error, log = "x", type = "b") # backfitting error error[length(error)] ## [1] 0.001142494 # lm error mean((coef(fit) - betas)^2) ## [1] 0.001138655 We need around 5 to 6 iterations to obtain a good estimate of the coefficients. "],["tree-based-methods.html", "8 Tree-Based Methods 8.1 Conceptual 8.2 Applied", " 8 Tree-Based Methods 8.1 Conceptual 8.1.1 Question 1 Draw an example (of your own invention) of a partition of two-dimensional feature space that could result from recursive binary splitting. Your example should contain at least six regions. Draw a decision tree corresponding to this partition. Be sure to label all aspects of your figures, including the regions \\(R_1, R_2, ...,\\) the cutpoints \\(t_1, t_2, ...,\\) and so forth. Hint: Your result should look something like Figures 8.1 and 8.2. library(showtext) showtext::showtext_auto() library(ggplot2) library(tidyverse) library(ggtree) tree <- ape::read.tree(text = "(((R1:1,R2:1)N1:2,R3:4)N2:2,(R4:2,(R5:1,R6:1)R3:2)N4:5)R;") tree$node.label <- c("Age < 40", "Weight < 100", "Weight < 70", "Age < 60", "Weight < 80") ggtree(tree, ladderize = FALSE) + scale_x_reverse() + coord_flip() + geom_tiplab(vjust = 2, hjust = 0.5) + geom_text2(aes(label=label, subset=!isTip), hjust = -0.1, vjust = -1) plot(NULL, xlab="Age (years)", ylab="Weight (kg)", xlim = c(0, 100), ylim = c(40, 160), xaxs = "i", yaxs = "i") abline(v = 40, col = "red", lty = 2) lines(c(0, 40), c(100, 100), col = "blue", lty = 2) lines(c(0, 40), c(70, 70), col = "blue", lty = 2) abline(v = 60, col = "red", lty = 2) lines(c(60, 100), c(80, 80), col = "blue", lty = 2) text( c(20, 20, 20, 50, 80, 80), c(55, 85, 130, 100, 60, 120), labels = c("R1", "R2", "R3", "R4", "R5", "R6") ) 8.1.2 Question 2 It is mentioned in Section 8.2.3 that boosting using depth-one trees (or stumps) leads to an additive model: that is, a model of the form \\[ f(X) = \\sum_{j=1}^p f_j(X_j). \\] Explain why this is the case. You can begin with (8.12) in Algorithm 8.2. Equation 8.1 is: \\[ f(x) = \\sum_{b=1}^B(\\lambda \\hat{f}^b(x) \\] where \\(\\hat{f}^b(x)\\) represents the \\(b\\)th tree with (in this case) 1 split. Since 1-depth trees involve only one variable, and the total function for \\(x\\) involves adding the outcome for each, this model is an additive. Depth 2 trees would allow for interactions between two variables. 8.1.3 Question 3 Consider the Gini index, classification error, and cross-entropy in a simple classification setting with two classes. Create a single plot that displays each of these quantities as a function of \\(\\hat{p}_{m1}\\). The \\(x\\)-axis should display \\(\\hat{p}_{m1}\\), ranging from 0 to 1, and the \\(y\\)-axis should display the value of the Gini index, classification error, and entropy. Hint: In a setting with two classes, \\(\\hat{p}_{m1} = 1 - \\hat{p}_{m2}\\). You could make this plot by hand, but it will be much easier to make in R. The Gini index is defined by \\[G = \\sum_{k=1}^{K} \\hat{p}_{mk}(1 - \\hat{p}_{mk})\\] Entropy is given by \\[D = -\\sum_{k=1}^{K} \\hat{p}_{mk}\\log(\\hat{p}_{mk})\\] The classification error is \\[E = 1 - \\max_k(\\hat{p}_{mk})\\] # Function definitions are for when there's two classes only p <- seq(0, 1, length.out = 100) data.frame( x = p, "Gini index" = p * (1 - p) * 2, "Entropy" = -(p * log(p) + (1 - p) * log(1 - p)), "Classification error" = 1 - pmax(p, 1 - p), check.names = FALSE ) |> pivot_longer(!x) |> ggplot(aes(x = x, y = value, color = name)) + geom_line(na.rm = TRUE) 8.1.4 Question 4 This question relates to the plots in Figure 8.12. Sketch the tree corresponding to the partition of the predictor space illustrated in the left-hand panel of Figure 8.12. The numbers inside the boxes indicate the mean of \\(Y\\) within each region. tree <- ape::read.tree(text = "(((3:1.5,(10:1,0:1)A:1)B:1,15:2)C:1,5:2)D;") tree$node.label <- c("X1 < 1", "X2 < 1", "X1 < 0", "X2 < 0") ggtree(tree, ladderize = FALSE) + scale_x_reverse() + coord_flip() + geom_tiplab(vjust = 2, hjust = 0.5) + geom_text2(aes(label=label, subset=!isTip), hjust = -0.1, vjust = -1) Create a diagram similar to the left-hand panel of Figure 8.12, using the tree illustrated in the right-hand panel of the same figure. You should divide up the predictor space into the correct regions, and indicate the mean for each region. plot(NULL, xlab="X1", ylab="X2", xlim = c(-1, 2), ylim = c(0, 3), xaxs = "i", yaxs = "i") abline(h = 1, col = "red", lty = 2) lines(c(1, 1), c(0, 1), col = "blue", lty = 2) lines(c(-1, 2), c(2, 2), col = "red", lty = 2) lines(c(0, 0), c(1, 2), col = "blue", lty = 2) text( c(0, 1.5, -0.5, 1, 0.5), c(0.5, 0.5, 1.5, 1.5, 2.5), labels = c("-1.80", "0.63", "-1.06", "0.21", "2.49") ) 8.1.5 Question 5 Suppose we produce ten bootstrapped samples from a data set containing red and green classes. We then apply a classification tree to each bootstrapped sample and, for a specific value of \\(X\\), produce 10 estimates of \\(P(\\textrm{Class is Red}|X)\\): \\[0.1, 0.15, 0.2, 0.2, 0.55, 0.6, 0.6, 0.65, 0.7, \\textrm{and } 0.75.\\] There are two common ways to combine these results together into a single class prediction. One is the majority vote approach discussed in this chapter. The second approach is to classify based on the average probability. In this example, what is the final classification under each of these two approaches? x <- c(0.1, 0.15, 0.2, 0.2, 0.55, 0.6, 0.6, 0.65, 0.7, 0.75) ifelse(mean(x > 0.5), "red", "green") # majority vote ## [1] "red" ifelse(mean(x) > 0.5, "red", "green") # average probability ## [1] "green" 8.1.6 Question 6 Provide a detailed explanation of the algorithm that is used to fit a regression tree. First we perform binary recursive splitting of the data, to minimize RSS at each split. This is continued until there are n samples present in each leaf. Then we prune the tree to a set of subtrees determined by a parameter \\(\\alpha\\). Using K-fold CV, we select \\(\\alpha\\) to minimize the cross validation error. The final tree is then calculated using the complete dataset with the selected \\(\\alpha\\) value. 8.2 Applied 8.2.1 Question 7 In the lab, we applied random forests to the Boston data using mtry = 6 and using ntree = 25 and ntree = 500. Create a plot displaying the test error resulting from random forests on this data set for a more comprehensive range of values for mtry and ntree. You can model your plot after Figure 8.10. Describe the results obtained. library(ISLR2) library(randomForest) ## randomForest 4.7-1.1 ## Type rfNews() to see new features/changes/bug fixes. ## ## Attaching package: 'randomForest' ## The following object is masked from 'package:ggtree': ## ## margin ## The following object is masked from 'package:dplyr': ## ## combine ## The following object is masked from 'package:ggplot2': ## ## margin set.seed(42) train <- sample(c(TRUE, FALSE), nrow(Boston), replace = TRUE) rf_err <- function(mtry) { randomForest( Boston[train, -13], y = Boston[train, 13], xtest = Boston[!train, -13], ytest = Boston[!train, 13], mtry = mtry, ntree = 500 )$test$mse } res <- lapply(c(1, 2, 3, 5, 7, 10, 12), rf_err) names(res) <- c(1, 2, 3, 5, 7, 10, 12) data.frame(res, check.names = FALSE) |> mutate(n = 1:500) |> pivot_longer(!n) |> ggplot(aes(x = n, y = value, color = name)) + geom_line(na.rm = TRUE) + xlab("Number of trees") + ylab("Error") + scale_y_log10() + scale_color_discrete(name = "No. variables at\\neach split") 8.2.2 Question 8 In the lab, a classification tree was applied to the Carseats data set after converting Sales into a qualitative response variable. Now we will seek to predict Sales using regression trees and related approaches, treating the response as a quantitative variable. Split the data set into a training set and a test set. set.seed(42) train <- sample(c(TRUE, FALSE), nrow(Carseats), replace = TRUE) Fit a regression tree to the training set. Plot the tree, and interpret the results. What test error rate do you obtain? library(tree) tr <- tree(Sales ~ ., data = Carseats[train, ]) summary(tr) ## ## Regression tree: ## tree(formula = Sales ~ ., data = Carseats[train, ]) ## Variables actually used in tree construction: ## [1] "ShelveLoc" "Price" "Income" "Advertising" "CompPrice" ## [6] "Age" ## Number of terminal nodes: 16 ## Residual mean deviance: 2.356 = 424.1 / 180 ## Distribution of residuals: ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## -4.54900 -0.82980 0.03075 0.00000 0.89250 4.83100 plot(tr) text(tr, pretty = 0, digits = 2, cex = 0.8) carseats_mse <- function(model) { p <- predict(model, newdata = Carseats[!train, ]) mean((p - Carseats[!train, "Sales"])^2) } carseats_mse(tr) ## [1] 4.559764 Use cross-validation in order to determine the optimal level of tree complexity. Does pruning the tree improve the test error rate? res <- cv.tree(tr) plot(res$size, res$dev, type = "b", xlab = "Tree size", ylab = "Deviance") min <- which.min(res$dev) abline(v = res$size[min], lty = 2, col = "red") Pruning improves performance very slightly (though this is not repeatable in different rounds of cross-validation). Arguably, a good balance is achieved when the tree size is 11. ptr <- prune.tree(tr, best = 11) plot(ptr) text(ptr, pretty = 0, digits = 2, cex = 0.8) carseats_mse(ptr) ## [1] 4.625875 Use the bagging approach in order to analyze this data. What test error rate do you obtain? Use the importance() function to determine which variables are most important. # Here we can use random Forest with mtry = 10 = p (the number of predictor # variables) to perform bagging bagged <- randomForest(Sales ~ ., data = Carseats[train, ], mtry = 10, ntree = 200, importance = TRUE) carseats_mse(bagged) ## [1] 2.762861 importance(bagged) ## %IncMSE IncNodePurity ## CompPrice 11.2608998 104.474222 ## Income 5.0953983 73.275066 ## Advertising 12.9011190 125.886762 ## Population 3.4071044 60.095200 ## Price 34.6904380 450.952728 ## ShelveLoc 33.7059874 374.808575 ## Age 7.9101141 143.652934 ## Education -2.1154997 32.712444 ## Urban 0.9604097 7.029648 ## US 3.1336559 6.287048 The test error rate is ~2.8 which is a substantial improvement over the pruned regression tree above. Use random forests to analyze this data. What test error rate do you obtain? Use the importance() function to determine which variables are most important. Describe the effect of \\(m\\), the number of variables considered at each split, on the error rate obtained. rf <- randomForest(Sales ~ ., data = Carseats[train, ], mtry = 3, ntree = 500, importance = TRUE) carseats_mse(rf) ## [1] 3.439357 importance(rf) ## %IncMSE IncNodePurity ## CompPrice 8.5717587 122.75189 ## Income 2.8955756 116.33951 ## Advertising 13.0681194 128.13563 ## Population 2.0475415 104.03803 ## Price 34.7934136 342.84663 ## ShelveLoc 39.0704834 292.56638 ## Age 7.7941744 135.69061 ## Education 0.8770806 64.67614 ## Urban -0.3301478 13.83594 ## US 6.2716539 22.07306 The test error rate is ~3.0 which is a substantial improvement over the pruned regression tree above, although not quite as good as the bagging approach. Now analyze the data using BART, and report your results. library(BART) ## Loading required package: nlme ## ## Attaching package: 'nlme' ## The following object is masked from 'package:ggtree': ## ## collapse ## The following object is masked from 'package:dplyr': ## ## collapse ## Loading required package: survival # For ease, we'll create a fake "predict" method that just returns # yhat.test.mean regardless of provided "newdata" predict.wbart <- function(model, ...) model$yhat.test.mean bartfit <- gbart(Carseats[train, 2:11], Carseats[train, 1], x.test = Carseats[!train, 2:11]) ## *****Calling gbart: type=1 ## *****Data: ## data:n,p,np: 196, 14, 204 ## y1,yn: 2.070867, 2.280867 ## x1,x[n*p]: 138.000000, 1.000000 ## xp1,xp[np*p]: 141.000000, 1.000000 ## *****Number of Trees: 200 ## *****Number of Cut Points: 58 ... 1 ## *****burn,nd,thin: 100,1000,1 ## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,0.287616,3,0.21118,7.42913 ## *****sigma: 1.041218 ## *****w (weights): 1.000000 ... 1.000000 ## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,14,0 ## *****printevery: 100 ## ## MCMC ## done 0 (out of 1100) ## done 100 (out of 1100) ## done 200 (out of 1100) ## done 300 (out of 1100) ## done 400 (out of 1100) ## done 500 (out of 1100) ## done 600 (out of 1100) ## done 700 (out of 1100) ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) ## time: 2s ## trcnt,tecnt: 1000,1000 carseats_mse(bartfit) ## [1] 1.631285 The test error rate is ~1.6 which is an improvement over random forest and bagging. 8.2.3 Question 9 This problem involves the OJ data set which is part of the ISLR2 package. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations. set.seed(42) train <- sample(1:nrow(OJ), 800) test <- setdiff(1:nrow(OJ), train) Fit a tree to the training data, with Purchase as the response and the other variables except for Buy as predictors. Use the summary() function to produce summary statistics about the tree, and describe the results obtained. What is the training error rate? How many terminal nodes does the tree have? tr <- tree(Purchase ~ ., data = OJ[train, ]) summary(tr) ## ## Classification tree: ## tree(formula = Purchase ~ ., data = OJ[train, ]) ## Variables actually used in tree construction: ## [1] "LoyalCH" "SalePriceMM" "PriceDiff" ## Number of terminal nodes: 8 ## Residual mean deviance: 0.7392 = 585.5 / 792 ## Misclassification error rate: 0.1638 = 131 / 800 Type in the name of the tree object in order to get a detailed text output. Pick one of the terminal nodes, and interpret the information displayed. tr ## node), split, n, deviance, yval, (yprob) ## * denotes terminal node ## ## 1) root 800 1066.00 CH ( 0.61500 0.38500 ) ## 2) LoyalCH < 0.48285 285 296.00 MM ( 0.21404 0.78596 ) ## 4) LoyalCH < 0.064156 64 0.00 MM ( 0.00000 1.00000 ) * ## 5) LoyalCH > 0.064156 221 260.40 MM ( 0.27602 0.72398 ) ## 10) SalePriceMM < 2.04 128 123.50 MM ( 0.18750 0.81250 ) * ## 11) SalePriceMM > 2.04 93 125.00 MM ( 0.39785 0.60215 ) * ## 3) LoyalCH > 0.48285 515 458.10 CH ( 0.83689 0.16311 ) ## 6) LoyalCH < 0.753545 230 282.70 CH ( 0.69565 0.30435 ) ## 12) PriceDiff < 0.265 149 203.00 CH ( 0.57718 0.42282 ) ## 24) PriceDiff < -0.165 32 38.02 MM ( 0.28125 0.71875 ) * ## 25) PriceDiff > -0.165 117 150.30 CH ( 0.65812 0.34188 ) ## 50) LoyalCH < 0.703993 105 139.60 CH ( 0.61905 0.38095 ) * ## 51) LoyalCH > 0.703993 12 0.00 CH ( 1.00000 0.00000 ) * ## 13) PriceDiff > 0.265 81 47.66 CH ( 0.91358 0.08642 ) * ## 7) LoyalCH > 0.753545 285 111.70 CH ( 0.95088 0.04912 ) * Create a plot of the tree, and interpret the results. plot(tr) text(tr, pretty = 0, digits = 2, cex = 0.8) Predict the response on the test data, and produce a confusion matrix comparing the test labels to the predicted test labels. What is the test error rate? table(predict(tr, OJ[test, ], type = "class"), OJ[test, "Purchase"]) ## ## CH MM ## CH 125 15 ## MM 36 94 Apply the cv.tree() function to the training set in order to determine the optimal tree size. set.seed(42) res <- cv.tree(tr) Produce a plot with tree size on the \\(x\\)-axis and cross-validated classification error rate on the \\(y\\)-axis. plot(res$size, res$dev, type = "b", xlab = "Tree size", ylab = "Deviance") min <- which.min(res$dev) abline(v = res$size[min], lty = 2, col = "red") Which tree size corresponds to the lowest cross-validated classification error rate? res$size[min] ## [1] 6 Produce a pruned tree corresponding to the optimal tree size obtained using cross-validation. If cross-validation does not lead to selection of a pruned tree, then create a pruned tree with five terminal nodes. ptr <- prune.tree(tr, best = res$size[min]) plot(ptr) text(ptr, pretty = 0, digits = 2, cex = 0.8) Compare the training error rates between the pruned and unpruned trees. Which is higher? oj_misclass <- function(model) { summary(model)$misclass[1] / summary(model)$misclass[2] } oj_misclass(tr) ## [1] 0.16375 oj_misclass(ptr) ## [1] 0.16375 The training misclassification error rate is slightly higher for the pruned tree. Compare the test error rates between the pruned and unpruned trees. Which is higher? oj_err <- function(model) { p <- predict(model, newdata = OJ[test, ], type = "class") mean(p != OJ[test, "Purchase"]) } oj_err(tr) ## [1] 0.1888889 oj_err(ptr) ## [1] 0.1888889 The test misclassification error rate is slightly higher for the pruned tree. 8.2.4 Question 10 We now use boosting to predict Salary in the Hitters data set. Remove the observations for whom the salary information is unknown, and then log-transform the salaries. dat <- Hitters dat <- dat[!is.na(dat$Salary), ] dat$Salary <- log(dat$Salary) Create a training set consisting of the first 200 observations, and a test set consisting of the remaining observations. train <- 1:200 test <- setdiff(1:nrow(dat), train) Perform boosting on the training set with 1,000 trees for a range of values of the shrinkage parameter \\(\\lambda\\). Produce a plot with different shrinkage values on the \\(x\\)-axis and the corresponding training set MSE on the \\(y\\)-axis. library(gbm) ## Loaded gbm 2.2.2 ## This version of gbm is no longer under development. Consider transitioning to gbm3, https://github.com/gbm-developers/gbm3 set.seed(42) lambdas <- 10 ^ seq(-3, 0, by = 0.1) fits <- lapply(lambdas, function(lam) { gbm(Salary ~ ., data = dat[train, ], distribution = "gaussian", n.trees = 1000, shrinkage = lam) }) errs <- sapply(fits, function(fit) { p <- predict(fit, dat[train, ], n.trees = 1000) mean((p - dat[train, ]$Salary)^2) }) plot(lambdas, errs, type = "b", xlab = "Shrinkage values", ylab = "Training MSE", log = "xy") Produce a plot with different shrinkage values on the \\(x\\)-axis and the corresponding test set MSE on the \\(y\\)-axis. errs <- sapply(fits, function(fit) { p <- predict(fit, dat[test, ], n.trees = 1000) mean((p - dat[test, ]$Salary)^2) }) plot(lambdas, errs, type = "b", xlab = "Shrinkage values", ylab = "Training MSE", log = "xy") min(errs) ## [1] 0.249881 abline(v = lambdas[which.min(errs)], lty = 2, col = "red") Compare the test MSE of boosting to the test MSE that results from applying two of the regression approaches seen in Chapters 3 and 6. Linear regression fit1 <- lm(Salary ~ ., data = dat[train, ]) mean((predict(fit1, dat[test, ]) - dat[test, "Salary"])^2) ## [1] 0.4917959 Ridge regression library(glmnet) ## Loading required package: Matrix ## ## Attaching package: 'Matrix' ## The following object is masked from 'package:ggtree': ## ## expand ## The following objects are masked from 'package:tidyr': ## ## expand, pack, unpack ## Loaded glmnet 4.1-8 x <- model.matrix(Salary ~ ., data = dat[train, ]) x.test <- model.matrix(Salary ~ ., data = dat[test, ]) y <- dat[train, "Salary"] fit2 <- glmnet(x, y, alpha = 1) mean((predict(fit2, s = 0.1, newx = x.test) - dat[test, "Salary"])^2) ## [1] 0.4389054 Which variables appear to be the most important predictors in the boosted model? summary(fits[[which.min(errs)]]) ## var rel.inf ## CAtBat CAtBat 16.4755242 ## CRBI CRBI 9.0670759 ## CHits CHits 8.9307168 ## CRuns CRuns 7.6839786 ## CWalks CWalks 7.1014886 ## PutOuts PutOuts 6.7869382 ## AtBat AtBat 5.8567916 ## Walks Walks 5.8479836 ## Years Years 5.3349489 ## Assists Assists 5.0076392 ## CHmRun CHmRun 4.6606919 ## RBI RBI 3.9255396 ## Hits Hits 3.8123124 ## HmRun HmRun 3.4462640 ## Runs Runs 2.4779866 ## Errors Errors 2.2341326 ## NewLeague NewLeague 0.5788283 ## Division Division 0.4880237 ## League League 0.2831352 Now apply bagging to the training set. What is the test set MSE for this approach? set.seed(42) bagged <- randomForest(Salary ~ ., data = dat[train, ], mtry = 19, ntree = 1000) mean((predict(bagged, newdata = dat[test, ]) - dat[test, "Salary"])^2) ## [1] 0.2278813 8.2.5 Question 11 This question uses the Caravan data set. Create a training set consisting of the first 1,000 observations, and a test set consisting of the remaining observations. train <- 1:1000 test <- setdiff(1:nrow(Caravan), train) Fit a boosting model to the training set with Purchase as the response and the other variables as predictors. Use 1,000 trees, and a shrinkage value of 0.01. Which predictors appear to be the most important? set.seed(42) fit <- gbm(as.numeric(Purchase == "Yes") ~ ., data = Caravan[train, ], n.trees = 1000, shrinkage = 0.01) ## Distribution not specified, assuming bernoulli ... ## Warning in gbm.fit(x = x, y = y, offset = offset, distribution = distribution, ## : variable 50: PVRAAUT has no variation. ## Warning in gbm.fit(x = x, y = y, offset = offset, distribution = distribution, ## : variable 71: AVRAAUT has no variation. head(summary(fit)) ## var rel.inf ## PPERSAUT PPERSAUT 15.243041 ## MKOOPKLA MKOOPKLA 10.220498 ## MOPLHOOG MOPLHOOG 7.584734 ## MBERMIDD MBERMIDD 5.983650 ## PBRAND PBRAND 4.557491 ## ABRAND ABRAND 4.076017 Use the boosting model to predict the response on the test data. Predict that a person will make a purchase if the estimated probability of purchase is greater than 20%. Form a confusion matrix. What fraction of the people predicted to make a purchase do in fact make one? How does this compare with the results obtained from applying KNN or logistic regression to this data set? p <- predict(fit, Caravan[test, ], n.trees = 1000, type = "response") table(p > 0.2, Caravan[test, "Purchase"] == "Yes") ## ## FALSE TRUE ## FALSE 4415 257 ## TRUE 118 32 sum(p > 0.2 & Caravan[test, "Purchase"] == "Yes") / sum(p > 0.2) ## [1] 0.2133333 141 (109 + 32) are predicted to purchase. Of these 32 do which is 21%. # Logistic regression fit <- glm(Purchase == "Yes" ~ ., data = Caravan[train, ], family = "binomial") ## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred p <- predict(fit, Caravan[test, ], type = "response") ## Warning in predict.lm(object, newdata, se.fit, scale = 1, type = if (type == : ## prediction from rank-deficient fit; attr(*, "non-estim") has doubtful cases table(p > 0.2, Caravan[test, "Purchase"] == "Yes") ## ## FALSE TRUE ## FALSE 4183 231 ## TRUE 350 58 sum(p > 0.2 & Caravan[test, "Purchase"] == "Yes") / sum(p > 0.2) ## [1] 0.1421569 For logistic regression we correctly predict 14% of those predicted to purchase. library(class) # KNN fit <- knn(Caravan[train, -86], Caravan[test, -86], Caravan$Purchase[train]) table(fit, Caravan[test, "Purchase"] == "Yes") ## ## fit FALSE TRUE ## No 4260 263 ## Yes 273 26 sum(fit == "Yes" & Caravan[test, "Purchase"] == "Yes") / sum(fit == "Yes") ## [1] 0.08695652 For KNN we correctly predict 8.7% of those predicted to purchase. 8.2.6 Question 12 Apply boosting, bagging, random forests and BART to a data set of your choice. Be sure to fit the models on a training set and to evaluate their performance on a test set. How accurate are the results compared to simple methods like linear or logistic regression? Which of these approaches yields the best performance? Here I’m going to use the College dataset (used in Question 10 from Chapter 7 to compare performance with the GAM we previously built). In this model we were trying to predict Outstate using the other variables in College. library(gam) ## Loading required package: splines ## Loading required package: foreach ## ## Attaching package: 'foreach' ## The following objects are masked from 'package:purrr': ## ## accumulate, when ## Loaded gam 1.22-4 set.seed(42) train <- sample(1:nrow(College), 400) test <- setdiff(1:nrow(College), train) # Linear regression lr <- gam(Outstate ~ ., data = College[train, ]) # GAM from chapter 7 gam <- gam(Outstate ~ Private + s(Room.Board, 2) + s(PhD, 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), data = College[train, ]) # Boosting boosted <- gbm(Outstate ~ ., data = College[train, ], n.trees = 1000, shrinkage = 0.01) ## Distribution not specified, assuming gaussian ... # Bagging (random forest with mtry = no. predictors) bagged <- randomForest(Outstate ~ ., data = College[train, ], mtry = 17, ntree = 1000) # Random forest with mtry = sqrt(no. predictors) rf <- randomForest(Outstate ~ ., data = College[train, ], mtry = 4, ntree = 1000) # BART pred <- setdiff(colnames(College), "Outstate") bart <- gbart(College[train, pred], College[train, "Outstate"], x.test = College[test, pred]) ## *****Calling gbart: type=1 ## *****Data: ## data:n,p,np: 400, 18, 377 ## y1,yn: -4030.802500, 77.197500 ## x1,x[n*p]: 1.000000, 71.000000 ## xp1,xp[np*p]: 0.000000, 99.000000 ## *****Number of Trees: 200 ## *****Number of Cut Points: 1 ... 75 ## *****burn,nd,thin: 100,1000,1 ## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,301.581,3,715815,10580.8 ## *****sigma: 1916.969943 ## *****w (weights): 1.000000 ... 1.000000 ## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,18,0 ## *****printevery: 100 ## ## MCMC ## done 0 (out of 1100) ## done 100 (out of 1100) ## done 200 (out of 1100) ## done 300 (out of 1100) ## done 400 (out of 1100) ## done 500 (out of 1100) ## done 600 (out of 1100) ## done 700 (out of 1100) ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) ## time: 4s ## trcnt,tecnt: 1000,1000 mse <- function(model, ...) { pred <- predict(model, College[test, ], ...) mean((College$Outstate[test] - pred)^2) } res <- c( "Linear regression" = mse(lr), "GAM" = mse(gam), "Boosting" = mse(boosted, n.trees = 1000), "Bagging" = mse(bagged), "Random forest" = mse(rf), "BART" = mse(bart) ) res <- data.frame("MSE" = res) res$Model <- factor(row.names(res), levels = rev(row.names(res))) ggplot(res, aes(Model, MSE)) + coord_flip() + geom_bar(stat = "identity", fill = "steelblue") In this case, it looks like bagging produces the best performing model in terms of test mean square error. "],["support-vector-machines.html", "9 Support Vector Machines 9.1 Conceptual 9.2 Applied", " 9 Support Vector Machines 9.1 Conceptual 9.1.1 Question 1 This problem involves hyperplanes in two dimensions. Sketch the hyperplane \\(1 + 3X_1 − X_2 = 0\\). Indicate the set of points for which \\(1 + 3X_1 − X_2 > 0\\), as well as the set of points for which \\(1 + 3X_1 − X_2 < 0\\). library(ggplot2) xlim <- c(-10, 10) ylim <- c(-30, 30) points <- expand.grid( X1 = seq(xlim[1], xlim[2], length.out = 50), X2 = seq(ylim[1], ylim[2], length.out = 50) ) p <- ggplot(points, aes(x = X1, y = X2)) + geom_abline(intercept = 1, slope = 3) + # X2 = 1 + 3X1 theme_bw() p + geom_point(aes(color = 1 + 3*X1 - X2 > 0), size = 0.1) + scale_color_discrete(name = "1 + 3X1 − X2 > 0") On the same plot, sketch the hyperplane \\(−2 + X_1 + 2X_2 = 0\\). Indicate the set of points for which \\(−2 + X_1 + 2X_2 > 0\\), as well as the set of points for which \\(−2 + X_1 + 2X_2 < 0\\). p + geom_abline(intercept = 1, slope = -1/2) + # X2 = 1 - X1/2 geom_point( aes(color = interaction(1 + 3*X1 - X2 > 0, -2 + X1 + 2*X2 > 0)), size = 0.5 ) + scale_color_discrete(name = "(1 + 3X1 − X2 > 0).(−2 + X1 + 2X2 > 0)") 9.1.2 Question 2 We have seen that in \\(p = 2\\) dimensions, a linear decision boundary takes the form \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 = 0\\). We now investigate a non-linear decision boundary. Sketch the curve \\[(1+X_1)^2 +(2−X_2)^2 = 4\\]. points <- expand.grid( X1 = seq(-4, 2, length.out = 100), X2 = seq(-1, 5, length.out = 100) ) p <- ggplot(points, aes(x = X1, y = X2, z = (1 + X1)^2 + (2 - X2)^2 - 4)) + geom_contour(breaks = 0, colour = "black") + theme_bw() p On your sketch, indicate the set of points for which \\[(1 + X_1)^2 + (2 − X_2)^2 > 4,\\] as well as the set of points for which \\[(1 + X_1)^2 + (2 − X_2)^2 \\leq 4.\\] p + geom_point(aes(color = (1 + X1)^2 + (2 - X2)^2 - 4 > 0), size = 0.1) Suppose that a classifier assigns an observation to the blue class if \\[(1 + X_1)^2 + (2 − X_2)^2 > 4,\\] and to the red class otherwise. To what class is the observation \\((0, 0)\\) classified? \\((−1, 1)\\)? \\((2, 2)\\)? \\((3, 8)\\)? points <- data.frame( X1 = c(0, -1, 2, 3), X2 = c(0, 1, 2, 8) ) ifelse((1 + points$X1)^2 + (2 - points$X2)^2 > 4, "blue", "red") ## [1] "blue" "red" "blue" "blue" Argue that while the decision boundary in (c) is not linear in terms of \\(X_1\\) and \\(X_2\\), it is linear in terms of \\(X_1\\), \\(X_1^2\\), \\(X_2\\), and \\(X_2^2\\). The decision boundary is \\[(1 + X_1)^2 + (2 − X_2)^2 -4 = 0\\] which we can expand to: \\[1 + 2X_1 + X_1^2 + 4 − 4X_2 + X_2^2 - 4 = 0\\] which is linear in terms of \\(X_1\\), \\(X_1^2\\), \\(X_2\\), \\(X_2^2\\). 9.1.3 Question 3 Here we explore the maximal margin classifier on a toy data set. We are given \\(n = 7\\) observations in \\(p = 2\\) dimensions. For each observation, there is an associated class label. Obs. \\(X_1\\) \\(X_2\\) \\(Y\\) 1 3 4 Red 2 2 2 Red 3 4 4 Red 4 1 4 Red 5 2 1 Blue 6 4 3 Blue 7 4 1 Blue Sketch the observations. data <- data.frame( X1 = c(3, 2, 4, 1, 2, 4, 4), X2 = c(4, 2, 4, 4, 1, 3, 1), Y = c(rep("Red", 4), rep("Blue", 3)) ) p <- ggplot(data, aes(x = X1, y = X2, color = Y)) + geom_point(size = 2) + scale_colour_identity() + coord_cartesian(xlim = c(0.5, 4.5), ylim = c(0.5, 4.5)) p Sketch the optimal separating hyperplane, and provide the equation for this hyperplane (of the form (9.1)). library(e1071) fit <- svm(as.factor(Y) ~ ., data = data, kernel = "linear", cost = 10, scale = FALSE) # Extract beta_0, beta_1, beta_2 beta <- c( -fit$rho, drop(t(fit$coefs) %*% as.matrix(data[fit$index, 1:2])) ) names(beta) <- c("B0", "B1", "B2") p <- p + geom_abline(intercept = -beta[1] / beta[3], slope = -beta[2] / beta[3], lty = 2) p Describe the classification rule for the maximal margin classifier. It should be something along the lines of “Classify to Red if \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 > 0\\), and classify to Blue otherwise.” Provide the values for \\(\\beta_0, \\beta_1,\\) and \\(\\beta_2\\). Classify to red if \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 > 0\\) and blue otherwise where \\(\\beta_0 = 1\\), \\(\\beta_1 = -2\\), \\(\\beta_2 = 2\\). On your sketch, indicate the margin for the maximal margin hyperplane. p <- p + geom_ribbon( aes(x = x, ymin = ymin, ymax = ymax), data = data.frame(x = c(0, 5), ymin = c(-1, 4), ymax = c(0, 5)), alpha = 0.1, fill = "blue", inherit.aes = FALSE ) p Indicate the support vectors for the maximal margin classifier. p <- p + geom_point(data = data[fit$index, ], size = 4) p The support vectors (from the svm fit object) are shown above. Arguably, there’s another support vector, since four points exactly touch the margin. Argue that a slight movement of the seventh observation would not affect the maximal margin hyperplane. p + geom_point(data = data[7, , drop = FALSE], size = 4, color = "purple") The 7th point is shown in purple above. It is not a support vector, and not close to the margin, so small changes in its X1, X2 values would not affect the current calculated margin. Sketch a hyperplane that is not the optimal separating hyperplane, and provide the equation for this hyperplane. A non-optimal hyperline that still separates the blue and red points would be one that touches the (red) point at X1 = 2, X2 = 2 and the (blue) point at X1 = 4, X2 = 3. This gives line \\(y = x/2 + 1\\) or, when \\(\\beta_0 = -1\\), \\(\\beta_1 = -1/2\\), \\(\\beta_2 = 1\\). p + geom_abline(intercept = 1, slope = 0.5, lty = 2, col = "red") Draw an additional observation on the plot so that the two classes are no longer separable by a hyperplane. p + geom_point(data = data.frame(X1 = 1, X2 = 3, Y = "Blue"), shape = 15, size = 4) 9.2 Applied 9.2.1 Question 4 Generate a simulated two-class data set with 100 observations and two features in which there is a visible but non-linear separation between the two classes. Show that in this setting, a support vector machine with a polynomial kernel (with degree greater than 1) or a radial kernel will outperform a support vector classifier on the training data. Which technique performs best on the test data? Make plots and report training and test error rates in order to back up your assertions. set.seed(10) data <- data.frame( x = runif(100), y = runif(100) ) score <- (2*data$x-0.5)^2 + (data$y)^2 - 0.5 data$class <- factor(ifelse(score > 0, "red", "blue")) p <- ggplot(data, aes(x = x, y = y, color = class)) + geom_point(size = 2) + scale_colour_identity() p train <- 1:50 test <- 51:100 fits <- list( "Radial" = svm(class ~ ., data = data[train, ], kernel = "radial"), "Polynomial" = svm(class ~ ., data = data[train, ], kernel = "polynomial", degree = 2), "Linear" = svm(class ~ ., data = data[train, ], kernel = "linear") ) err <- function(model, data) { out <- table(predict(model, data), data$class) (out[1, 2] + out[2, 1]) / sum(out) } plot(fits[[1]], data) plot(fits[[2]], data) plot(fits[[3]], data) sapply(fits, err, data = data[train, ]) ## Radial Polynomial Linear ## 0.04 0.30 0.10 sapply(fits, err, data = data[test, ]) ## Radial Polynomial Linear ## 0.06 0.48 0.14 In this case, the radial kernel performs best, followed by a linear kernel with the 2nd degree polynomial performing worst. The ordering of these models is the same for the training and test data sets. 9.2.2 Question 5 We have seen that we can fit an SVM with a non-linear kernel in order to perform classification using a non-linear decision boundary. We will now see that we can also obtain a non-linear decision boundary by performing logistic regression using non-linear transformations of the features. Generate a data set with \\(n = 500\\) and \\(p = 2\\), such that the observations belong to two classes with a quadratic decision boundary between them. For instance, you can do this as follows: > x1 <- runif(500) - 0.5 > x2 <- runif(500) - 0.5 > y <- 1 * (x1^2 - x2^2 > 0) set.seed(42) train <- data.frame( x1 = runif(500) - 0.5, x2 = runif(500) - 0.5 ) train$y <- factor(as.numeric((train$x1^2 - train$x2^2 > 0))) Plot the observations, colored according to their class labels. Your plot should display \\(X_1\\) on the \\(x\\)-axis, and \\(X_2\\) on the \\(y\\)-axis. p <- ggplot(train, aes(x = x1, y = x2, color = y)) + geom_point(size = 2) p Fit a logistic regression model to the data, using \\(X_1\\) and \\(X_2\\) as predictors. fit1 <- glm(y ~ ., data = train, family = "binomial") Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be linear. plot_model <- function(fit) { if (inherits(fit, "svm")) { train$p <- predict(fit) } else { train$p <- factor(as.numeric(predict(fit) > 0)) } ggplot(train, aes(x = x1, y = x2, color = p)) + geom_point(size = 2) } plot_model(fit1) Now fit a logistic regression model to the data using non-linear functions of \\(X_1\\) and \\(X_2\\) as predictors (e.g. \\(X_1^2, X_1 \\times X_2, \\log(X_2),\\) and so forth). fit2 <- glm(y ~ poly(x1, 2) + poly(x2, 2), data = train, family = "binomial") ## Warning: glm.fit: algorithm did not converge ## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be obviously non-linear. If it is not, then repeat (a)-(e) until you come up with an example in which the predicted class labels are obviously non-linear. plot_model(fit2) Fit a support vector classifier to the data with \\(X_1\\) and \\(X_2\\) as predictors. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels. fit3 <- svm(y ~ x1 + x2, data = train, kernel = "linear") plot_model(fit3) Fit a SVM using a non-linear kernel to the data. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels. fit4 <- svm(y ~ x1 + x2, data = train, kernel = "polynomial", degree = 2) plot_model(fit4) Comment on your results. When simulating data with a quadratic decision boundary, a logistic model with quadratic transformations of the variables and an svm model with a quadratic kernel both produce much better (and similar fits) than standard linear methods. 9.2.3 Question 6 At the end of Section 9.6.1, it is claimed that in the case of data that is just barely linearly separable, a support vector classifier with a small value of cost that misclassifies a couple of training observations may perform better on test data than one with a huge value of cost that does not misclassify any training observations. You will now investigate this claim. Generate two-class data with \\(p = 2\\) in such a way that the classes are just barely linearly separable. set.seed(2) # Simulate data that is separable by a line at y = 2.5 data <- data.frame( x = rnorm(200), class = sample(c("red", "blue"), 200, replace = TRUE) ) data$y <- (data$class == "red") * 5 + rnorm(200) # Add barley separable points (these are simulated "noise" values) newdata <- data.frame(x = rnorm(30)) newdata$y <- 1.5*newdata$x + 3 + rnorm(30, 0, 1) newdata$class = ifelse((1.5*newdata$x + 3) - newdata$y > 0, "blue", "red") data <- rbind(data, newdata) # remove any that cause misclassification leaving data that is barley linearly # separable, but along an axis that is not y = 2.5 (which would be correct # for the "true" data. data <- data[!(data$class == "red") == ((1.5*data$x + 3 - data$y) > 0), ] data <- data[sample(seq_len(nrow(data)), 200), ] p <- ggplot(data, aes(x = x, y = y, color = class)) + geom_point(size = 2) + scale_colour_identity() + geom_abline(intercept = 3, slope = 1.5, lty = 2) p Compute the cross-validation error rates for support vector classifiers with a range of cost values. How many training errors are misclassified for each value of cost considered, and how does this relate to the cross-validation errors obtained? How many training errors are misclassified for each value of cost? costs <- 10^seq(-3, 5) sapply(costs, function(cost) { fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost) pred <- predict(fit, data) sum(pred != data$class) }) ## [1] 98 8 9 4 1 1 0 0 0 Cross-validation errors out <- tune(svm, as.factor(class) ~ ., data = data, kernel = "linear", ranges = list(cost = costs)) summary(out) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 10 ## ## - best performance: 0.005 ## ## - Detailed performance results: ## cost error dispersion ## 1 1e-03 0.540 0.09067647 ## 2 1e-02 0.045 0.02838231 ## 3 1e-01 0.045 0.03689324 ## 4 1e+00 0.020 0.02581989 ## 5 1e+01 0.005 0.01581139 ## 6 1e+02 0.005 0.01581139 ## 7 1e+03 0.005 0.01581139 ## 8 1e+04 0.010 0.02108185 ## 9 1e+05 0.010 0.02108185 data.frame( cost = out$performances$cost, misclass = out$performances$error * nrow(data) ) ## cost misclass ## 1 1e-03 108 ## 2 1e-02 9 ## 3 1e-01 9 ## 4 1e+00 4 ## 5 1e+01 1 ## 6 1e+02 1 ## 7 1e+03 1 ## 8 1e+04 2 ## 9 1e+05 2 Generate an appropriate test data set, and compute the test errors corresponding to each of the values of cost considered. Which value of cost leads to the fewest test errors, and how does this compare to the values of cost that yield the fewest training errors and the fewest cross-validation errors? set.seed(2) test <- data.frame( x = rnorm(200), class = sample(c("red", "blue"), 200, replace = TRUE) ) test$y <- (test$class == "red") * 5 + rnorm(200) p + geom_point(data = test, pch = 21) (errs <- sapply(costs, function(cost) { fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost) pred <- predict(fit, test) sum(pred != test$class) })) ## [1] 95 2 3 9 16 16 19 19 19 (cost <- costs[which.min(errs)]) ## [1] 0.01 (fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost)) ## ## Call: ## svm(formula = as.factor(class) ~ ., data = data, kernel = "linear", ## cost = cost) ## ## ## Parameters: ## SVM-Type: C-classification ## SVM-Kernel: linear ## cost: 0.01 ## ## Number of Support Vectors: 135 test$prediction <- predict(fit, test) p <- ggplot(test, aes(x = x, y = y, color = class, shape = prediction == class)) + geom_point(size = 2) + scale_colour_identity() p Discuss your results. A large cost leads to overfitting as the model finds the perfect linear separation between red and blue in the training data. A lower cost then leads to improved prediction in the test data. 9.2.4 Question 7 In this problem, you will use support vector approaches in order to predict whether a given car gets high or low gas mileage based on the Auto data set. Create a binary variable that takes on a 1 for cars with gas mileage above the median, and a 0 for cars with gas mileage below the median. library(ISLR2) data <- Auto data$high_mpg <- as.factor(as.numeric(data$mpg > median(data$mpg))) Fit a support vector classifier to the data with various values of cost, in order to predict whether a car gets high or low gas mileage. Report the cross-validation errors associated with different values of this parameter. Comment on your results. Note you will need to fit the classifier without the gas mileage variable to produce sensible results. set.seed(42) costs <- 10^seq(-4, 3, by = 0.5) results <- list() f <- high_mpg ~ displacement + horsepower + weight results$linear <- tune(svm, f, data = data, kernel = "linear", ranges = list(cost = costs)) summary(results$linear) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 0.03162278 ## ## - best performance: 0.1019231 ## ## - Detailed performance results: ## cost error dispersion ## 1 1.000000e-04 0.5967949 0.05312225 ## 2 3.162278e-04 0.5967949 0.05312225 ## 3 1.000000e-03 0.2199359 0.08718077 ## 4 3.162278e-03 0.1353846 0.06058195 ## 5 1.000000e-02 0.1121795 0.04011293 ## 6 3.162278e-02 0.1019231 0.05087176 ## 7 1.000000e-01 0.1096154 0.05246238 ## 8 3.162278e-01 0.1044872 0.05154934 ## 9 1.000000e+00 0.1044872 0.05154934 ## 10 3.162278e+00 0.1044872 0.05154934 ## 11 1.000000e+01 0.1019231 0.05501131 ## 12 3.162278e+01 0.1019231 0.05501131 ## 13 1.000000e+02 0.1019231 0.05501131 ## 14 3.162278e+02 0.1019231 0.05501131 ## 15 1.000000e+03 0.1019231 0.05501131 Now repeat (b), this time using SVMs with radial and polynomial basis kernels, with different values of gamma and degree and cost. Comment on your results. results$polynomial <- tune(svm, f, data = data, kernel = "polynomial", ranges = list(cost = costs, degree = 1:3)) summary(results$polynomial) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost degree ## 0.1 1 ## ## - best performance: 0.101859 ## ## - Detailed performance results: ## cost degree error dispersion ## 1 1.000000e-04 1 0.5842949 0.04703306 ## 2 3.162278e-04 1 0.5842949 0.04703306 ## 3 1.000000e-03 1 0.5842949 0.04703306 ## 4 3.162278e-03 1 0.2167949 0.07891173 ## 5 1.000000e-02 1 0.1275641 0.04806885 ## 6 3.162278e-02 1 0.1147436 0.05661708 ## 7 1.000000e-01 1 0.1018590 0.05732429 ## 8 3.162278e-01 1 0.1069231 0.05949679 ## 9 1.000000e+00 1 0.1069231 0.06307278 ## 10 3.162278e+00 1 0.1069231 0.06307278 ## 11 1.000000e+01 1 0.1043590 0.06603760 ## 12 3.162278e+01 1 0.1043590 0.06603760 ## 13 1.000000e+02 1 0.1043590 0.06603760 ## 14 3.162278e+02 1 0.1043590 0.06603760 ## 15 1.000000e+03 1 0.1043590 0.06603760 ## 16 1.000000e-04 2 0.5842949 0.04703306 ## 17 3.162278e-04 2 0.5842949 0.04703306 ## 18 1.000000e-03 2 0.5842949 0.04703306 ## 19 3.162278e-03 2 0.5255128 0.08090636 ## 20 1.000000e-02 2 0.3980769 0.08172400 ## 21 3.162278e-02 2 0.3674359 0.07974741 ## 22 1.000000e-01 2 0.3597436 0.08336609 ## 23 3.162278e-01 2 0.3597436 0.09010398 ## 24 1.000000e+00 2 0.3444872 0.08767258 ## 25 3.162278e+00 2 0.3545513 0.10865903 ## 26 1.000000e+01 2 0.3239103 0.09593710 ## 27 3.162278e+01 2 0.3035256 0.08184137 ## 28 1.000000e+02 2 0.3061538 0.08953945 ## 29 3.162278e+02 2 0.3060897 0.08919821 ## 30 1.000000e+03 2 0.3035897 0.09305216 ## 31 1.000000e-04 3 0.5842949 0.04703306 ## 32 3.162278e-04 3 0.4955128 0.10081350 ## 33 1.000000e-03 3 0.3750641 0.08043982 ## 34 3.162278e-03 3 0.3036538 0.09096445 ## 35 1.000000e-02 3 0.2601282 0.07774595 ## 36 3.162278e-02 3 0.2499359 0.08407106 ## 37 1.000000e-01 3 0.2017949 0.07547413 ## 38 3.162278e-01 3 0.1937179 0.08427411 ## 39 1.000000e+00 3 0.1478205 0.04579654 ## 40 3.162278e+00 3 0.1451923 0.05169638 ## 41 1.000000e+01 3 0.1451282 0.04698931 ## 42 3.162278e+01 3 0.1500000 0.07549058 ## 43 1.000000e+02 3 0.1373718 0.05772558 ## 44 3.162278e+02 3 0.1271795 0.06484766 ## 45 1.000000e+03 3 0.1322436 0.06764841 results$radial <- tune(svm, f, data = data, kernel = "radial", ranges = list(cost = costs, gamma = 10^(-2:1))) summary(results$radial) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost gamma ## 1000 0.1 ## ## - best performance: 0.08179487 ## ## - Detailed performance results: ## cost gamma error dispersion ## 1 1.000000e-04 0.01 0.58410256 0.05435320 ## 2 3.162278e-04 0.01 0.58410256 0.05435320 ## 3 1.000000e-03 0.01 0.58410256 0.05435320 ## 4 3.162278e-03 0.01 0.58410256 0.05435320 ## 5 1.000000e-02 0.01 0.58410256 0.05435320 ## 6 3.162278e-02 0.01 0.26557692 0.10963269 ## 7 1.000000e-01 0.01 0.15038462 0.05783237 ## 8 3.162278e-01 0.01 0.11224359 0.04337812 ## 9 1.000000e+00 0.01 0.10730769 0.04512161 ## 10 3.162278e+00 0.01 0.10730769 0.04512161 ## 11 1.000000e+01 0.01 0.10737179 0.05526490 ## 12 3.162278e+01 0.01 0.10480769 0.05610124 ## 13 1.000000e+02 0.01 0.10480769 0.05610124 ## 14 3.162278e+02 0.01 0.10737179 0.05526490 ## 15 1.000000e+03 0.01 0.10993590 0.05690926 ## 16 1.000000e-04 0.10 0.58410256 0.05435320 ## 17 3.162278e-04 0.10 0.58410256 0.05435320 ## 18 1.000000e-03 0.10 0.58410256 0.05435320 ## 19 3.162278e-03 0.10 0.58410256 0.05435320 ## 20 1.000000e-02 0.10 0.15301282 0.06026554 ## 21 3.162278e-02 0.10 0.11480769 0.04514816 ## 22 1.000000e-01 0.10 0.10730769 0.04512161 ## 23 3.162278e-01 0.10 0.10730769 0.04512161 ## 24 1.000000e+00 0.10 0.10737179 0.05526490 ## 25 3.162278e+00 0.10 0.10737179 0.05526490 ## 26 1.000000e+01 0.10 0.10737179 0.05526490 ## 27 3.162278e+01 0.10 0.10737179 0.05526490 ## 28 1.000000e+02 0.10 0.09967949 0.04761387 ## 29 3.162278e+02 0.10 0.08429487 0.03207585 ## 30 1.000000e+03 0.10 0.08179487 0.03600437 ## 31 1.000000e-04 1.00 0.58410256 0.05435320 ## 32 3.162278e-04 1.00 0.58410256 0.05435320 ## 33 1.000000e-03 1.00 0.58410256 0.05435320 ## 34 3.162278e-03 1.00 0.58410256 0.05435320 ## 35 1.000000e-02 1.00 0.12506410 0.05342773 ## 36 3.162278e-02 1.00 0.10730769 0.06255920 ## 37 1.000000e-01 1.00 0.10993590 0.05561080 ## 38 3.162278e-01 1.00 0.10737179 0.05526490 ## 39 1.000000e+00 1.00 0.09711538 0.05107441 ## 40 3.162278e+00 1.00 0.08429487 0.03634646 ## 41 1.000000e+01 1.00 0.08692308 0.03877861 ## 42 3.162278e+01 1.00 0.08948718 0.03503648 ## 43 1.000000e+02 1.00 0.09198718 0.03272127 ## 44 3.162278e+02 1.00 0.10217949 0.04214031 ## 45 1.000000e+03 1.00 0.09692308 0.04645046 ## 46 1.000000e-04 10.00 0.58410256 0.05435320 ## 47 3.162278e-04 10.00 0.58410256 0.05435320 ## 48 1.000000e-03 10.00 0.58410256 0.05435320 ## 49 3.162278e-03 10.00 0.58410256 0.05435320 ## 50 1.000000e-02 10.00 0.58410256 0.05435320 ## 51 3.162278e-02 10.00 0.22205128 0.12710181 ## 52 1.000000e-01 10.00 0.11237179 0.03888895 ## 53 3.162278e-01 10.00 0.10217949 0.04375722 ## 54 1.000000e+00 10.00 0.09717949 0.03809440 ## 55 3.162278e+00 10.00 0.09717949 0.03809440 ## 56 1.000000e+01 10.00 0.09711538 0.04161705 ## 57 3.162278e+01 10.00 0.11487179 0.04240664 ## 58 1.000000e+02 10.00 0.13019231 0.03541140 ## 59 3.162278e+02 10.00 0.13532051 0.03865626 ## 60 1.000000e+03 10.00 0.14044872 0.04251917 sapply(results, function(x) x$best.performance) ## linear polynomial radial ## 0.10192308 0.10185897 0.08179487 sapply(results, function(x) x$best.parameters) ## $linear ## cost ## 6 0.03162278 ## ## $polynomial ## cost degree ## 7 0.1 1 ## ## $radial ## cost gamma ## 30 1000 0.1 Make some plots to back up your assertions in (b) and (c). Hint: In the lab, we used the plot() function for svm objects only in cases with \\(p = 2\\). When \\(p > 2\\), you can use the plot() function to create plots displaying pairs of variables at a time. Essentially, instead of typing > plot(svmfit, dat) where svmfit contains your fitted model and dat is a data frame containing your data, you can type > plot(svmfit, dat, x1 ∼ x4) in order to plot just the first and fourth variables. However, you must replace x1 and x4 with the correct variable names. To find out more, type ?plot.svm. table(predict(results$radial$best.model, data), data$high_mpg) ## ## 0 1 ## 0 176 5 ## 1 20 191 plot(results$radial$best.model, data, horsepower~displacement) plot(results$radial$best.model, data, horsepower~weight) plot(results$radial$best.model, data, displacement~weight) 9.2.5 Question 8 This problem involves the OJ data set which is part of the ISLR2 package. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations. set.seed(42) train <- sample(seq_len(nrow(OJ)), 800) test <- setdiff(seq_len(nrow(OJ)), train) Fit a support vector classifier to the training data using cost = 0.01, with Purchase as the response and the other variables as predictors. Use the summary() function to produce summary statistics, and describe the results obtained. fit <- svm(Purchase ~ ., data = OJ[train, ], kernel = "linear", cost = 0.01) summary(fit) ## ## Call: ## svm(formula = Purchase ~ ., data = OJ[train, ], kernel = "linear", ## cost = 0.01) ## ## ## Parameters: ## SVM-Type: C-classification ## SVM-Kernel: linear ## cost: 0.01 ## ## Number of Support Vectors: 432 ## ## ( 215 217 ) ## ## ## Number of Classes: 2 ## ## Levels: ## CH MM What are the training and test error rates? err <- function(model, data) { t <- table(predict(model, data), data[["Purchase"]]) 1 - sum(diag(t)) / sum(t) } errs <- function(model) { c(train = err(model, OJ[train, ]), test = err(model, OJ[test, ])) } errs(fit) ## train test ## 0.171250 0.162963 Use the tune() function to select an optimal cost. Consider values in the range 0.01 to 10. tuned <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "linear", ranges = list(cost = 10^seq(-2, 1, length.out = 10))) tuned$best.parameters ## cost ## 7 1 summary(tuned) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 1 ## ## - best performance: 0.1775 ## ## - Detailed performance results: ## cost error dispersion ## 1 0.01000000 0.18250 0.04133199 ## 2 0.02154435 0.18000 0.04005205 ## 3 0.04641589 0.18000 0.05041494 ## 4 0.10000000 0.18000 0.04901814 ## 5 0.21544347 0.18250 0.04377975 ## 6 0.46415888 0.18250 0.04090979 ## 7 1.00000000 0.17750 0.04031129 ## 8 2.15443469 0.18000 0.03961621 ## 9 4.64158883 0.17875 0.03821086 ## 10 10.00000000 0.18375 0.03438447 Compute the training and test error rates using this new value for cost. errs(tuned$best.model) ## train test ## 0.167500 0.162963 Repeat parts (b) through (e) using a support vector machine with a radial kernel. Use the default value for gamma. tuned2 <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "radial", ranges = list(cost = 10^seq(-2, 1, length.out = 10))) tuned2$best.parameters ## cost ## 6 0.4641589 errs(tuned2$best.model) ## train test ## 0.1525000 0.1666667 Repeat parts (b) through (e) using a support vector machine with a polynomial kernel. Set degree = 2. tuned3 <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "polynomial", ranges = list(cost = 10^seq(-2, 1, length.out = 10)), degree = 2) tuned3$best.parameters ## cost ## 9 4.641589 errs(tuned3$best.model) ## train test ## 0.1487500 0.1703704 Overall, which approach seems to give the best results on this data? Overall the “radial” kernel appears to perform best in this case. "],["deep-learning.html", "10 Deep Learning 10.1 Conceptual 10.2 Applied", " 10 Deep Learning 10.1 Conceptual 10.1.1 Question 1 Consider a neural network with two hidden layers: \\(p = 4\\) input units, 2 units in the first hidden layer, 3 units in the second hidden layer, and a single output. Draw a picture of the network, similar to Figures 10.1 or 10.4. Write out an expression for \\(f(X)\\), assuming ReLU activation functions. Be as explicit as you can! The three layers (from our final output layer back to the start of our network) can be described as: \\[\\begin{align*} f(X) &= g(w_{0}^{(3)} + \\sum^{K_2}_{l=1} w_{l}^{(3)} A_l^{(2)}) \\\\ A_l^{(2)} &= h_l^{(2)}(X) = g(w_{l0}^{(2)} + \\sum_{k=1}^{K_1} w_{lk}^{(2)} A_k^{(1)})\\\\ A_k^{(1)} &= h_k^{(1)}(X) = g(w_{k0}^{(1)} + \\sum_{j=1}^p w_{kj}^{(1)} X_j) \\\\ \\end{align*}\\] for \\(l = 1, ..., K_2 = 3\\) and \\(k = 1, ..., K_1 = 2\\) and \\(p = 4\\), where, \\[ g(z) = (z)_+ = \\begin{cases} 0, & \\text{if } z < 0 \\\\ z, & \\text{otherwise} \\end{cases} \\] Now plug in some values for the coefficients and write out the value of \\(f(X)\\). We can perhaps achieve this most easily by fitting a real model. Note, in the plot shown here, we also include the “bias” or intercept terms. library(ISLR2) library(neuralnet) library(sigmoid) set.seed(5) train <- sample(seq_len(nrow(ISLR2::Boston)), nrow(ISLR2::Boston) * 2/3) net <- neuralnet(crim ~ lstat + medv + ptratio + rm, data = ISLR2::Boston[train, ], act.fct = relu, hidden = c(2, 3) ) plot(net) We can make a prediction for a given observation using this object. Firstly, let’s find an “ambiguous” test sample p <- predict(net, ISLR2::Boston[-train, ]) x <- ISLR2::Boston[-train, ][which.min(abs(p - mean(c(max(p), min(p))))), ] x <- x[, c("lstat", "medv", "ptratio", "rm")] predict(net, x) ## [,1] ## 441 19.14392 Or, repeating by “hand”: g <- function(x) ifelse(x > 0, x, 0) # relu activation function w <- net$weights[[1]] # the estimated weights for each layer v <- as.numeric(x) # our input predictors # to calculate our prediction we can take the dot product of our predictors # (with 1 at the start for the bias term) and our layer weights, lw) for (lw in w) v <- g(c(1, v) %*% lw) v ## [,1] ## [1,] 19.14392 How many parameters are there? length(unlist(net$weights)) ## [1] 23 There are \\(4*2+2 + 2*3+3 + 3*1+1 = 23\\) parameters. 10.1.2 Question 2 Consider the softmax function in (10.13) (see also (4.13) on page 141) for modeling multinomial probabilities. In (10.13), show that if we add a constant \\(c\\) to each of the \\(z_l\\), then the probability is unchanged. If we add a constant \\(c\\) to each \\(Z_l\\) in equation 10.13 we get: \\[\\begin{align*} Pr(Y=m|X) &= \\frac{e^{Z_m+c}}{\\sum_{l=0}^9e^{Z_l+c}} \\\\ &= \\frac{e^{Z_m}e^c}{\\sum_{l=0}^9e^{Z_l}e^c} \\\\ &= \\frac{e^{Z_m}e^c}{e^c\\sum_{l=0}^9e^{Z_l}} \\\\ &= \\frac{e^{Z_m}}{\\sum_{l=0}^9e^{Z_l}} \\\\ \\end{align*}\\] which is just equation 10.13. In (4.13), show that if we add constants \\(c_j\\), \\(j = 0,1,...,p\\), to each of the corresponding coefficients for each of the classes, then the predictions at any new point \\(x\\) are unchanged. 4.13 is \\[ Pr(Y=k|X=x) = \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\] adding constants \\(c_j\\) to each class gives: \\[\\begin{align*} Pr(Y=k|X=x) &= \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + c_1 + ... + \\beta_{Kp}x_p + c_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + c_1 + ... + \\beta_{lp}x_p + c_p}} \\\\ &= \\frac {e^{c1 + ... + c_p}e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{c1 + ... + c_p}e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ &= \\frac {e^{c1 + ... + c_p}e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {e^{c1 + ... + c_p}\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ &= \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ \\end{align*}\\] which collapses to 4.13 (with the same argument as above). This shows that the softmax function is over-parametrized. However, regularization and SGD typically constrain the solutions so that this is not a problem. 10.1.3 Question 3 Show that the negative multinomial log-likelihood (10.14) is equivalent to the negative log of the likelihood expression (4.5) when there are \\(M = 2\\) classes. Equation 10.14 is \\[ -\\sum_{i=1}^n \\sum_{m=0}^9 y_{im}\\log(f_m(x_i)) \\] Equation 4.5 is: \\[ \\ell(\\beta_0, \\beta_1) = \\prod_{i:y_i=1}p(x_i) \\prod_{i':y_i'=0}(1-p(x_i')) \\] So, \\(\\log(\\ell)\\) is: \\[\\begin{align*} \\log(\\ell) &= \\log \\left( \\prod_{i:y_i=1}p(x_i) \\prod_{i':y_i'=0}(1-p(x_i')) \\right ) \\\\ &= \\sum_{i:y_1=1}\\log(p(x_i)) + \\sum_{i':y_i'=0}\\log(1-p(x_i')) \\\\ \\end{align*}\\] If we set \\(y_i\\) to be an indicator variable such that \\(y_{i1}\\) and \\(y_{i0}\\) are 1 and 0 (or 0 and 1) when our \\(i\\)th observation is 1 (or 0) respectively, then we can write: \\[ \\log(\\ell) = \\sum_{i}y_{i1}\\log(p(x_i)) + \\sum_{i}y_{i0}\\log(1-p(x_i')) \\] If we also let \\(f_1(x) = p(x)\\) and \\(f_0(x) = 1 - p(x)\\) then: \\[\\begin{align*} \\log(\\ell) &= \\sum_i y_{i1}\\log(f_1(x_i)) + \\sum_{i}y_{i0}\\log(f_0(x_i')) \\\\ &= \\sum_i \\sum_{m=0}^1 y_{im}\\log(f_m(x_i)) \\\\ \\end{align*}\\] When we take the negative of this, it is equivalent to 10.14 for two classes (\\(m = 0,1\\)). 10.1.4 Question 4 Consider a CNN that takes in \\(32 \\times 32\\) grayscale images and has a single convolution layer with three \\(5 \\times 5\\) convolution filters (without boundary padding). Draw a sketch of the input and first hidden layer similar to Figure 10.8. How many parameters are in this model? There are 5 convolution matrices each with 5x5 weights (plus 5 bias terms) to estimate, therefore 130 parameters Explain how this model can be thought of as an ordinary feed-forward neural network with the individual pixels as inputs, and with constraints on the weights in the hidden units. What are the constraints? We can think of a convolution layer as a regularized fully connected layer. The regularization in this case is due to not all inputs being connected to all outputs, and weights being shared between connections. Each output node in the convolved image can be thought of as taking inputs from a limited number of input pixels (the neighboring pixels), with a set of weights specified by the convolution layer which are then shared by the connections to all other output nodes. If there were no constraints, then how many weights would there be in the ordinary feed-forward neural network in (c)? With no constraints, we would connect each output pixel in our 5x32x32 convolution layer to each node in the 32x32 original image (plus 5 bias terms), giving a total of 5,242,885 weights to estimate. 10.1.5 Question 5 In Table 10.2 on page 433, we see that the ordering of the three methods with respect to mean absolute error is different from the ordering with respect to test set \\(R^2\\). How can this be? Mean absolute error considers absolute differences between predictions and observed values, whereas \\(R^2\\) considers the (normalized) sum of squared differences, thus larger errors contribute relatively ore to \\(R^2\\) than mean absolute error. 10.2 Applied 10.2.1 Question 6 Consider the simple function \\(R(\\beta) = sin(\\beta) + \\beta/10\\). Draw a graph of this function over the range \\(\\beta \\in [−6, 6]\\). r <- function(x) sin(x) + x/10 x <- seq(-6, 6, 0.1) plot(x, r(x), type = "l") What is the derivative of this function? \\[ cos(x) + 1/10 \\] Given \\(\\beta^0 = 2.3\\), run gradient descent to find a local minimum of \\(R(\\beta)\\) using a learning rate of \\(\\rho = 0.1\\). Show each of \\(\\beta^0, \\beta^1, ...\\) in your plot, as well as the final answer. The derivative of our function, i.e. \\(cos(x) + 1/10\\) gives us the gradient for a given \\(x\\). For gradient descent, we move \\(x\\) a little in the opposite direction, for some learning rate \\(\\rho = 0.1\\): \\[ x^{m+1} = x^m - \\rho (cos(x^m) + 1/10) \\] iter <- function(x, rho) x - rho*(cos(x) + 1/10) gd <- function(start, rho = 0.1) { b <- start v <- b while(abs(b - iter(b, 0.1)) > 1e-8) { b <- iter(b, 0.1) v <- c(v, b) } v } res <- gd(2.3) res[length(res)] ## [1] 4.612221 plot(x, r(x), type = "l") points(res, r(res), col = "red", pch = 19) Repeat with \\(\\beta^0 = 1.4\\). res <- gd(1.4) res[length(res)] ## [1] -1.670964 plot(x, r(x), type = "l") points(res, r(res), col = "red", pch = 19) 10.2.2 Question 7 Fit a neural network to the Default data. Use a single hidden layer with 10 units, and dropout regularization. Have a look at Labs 10.9.1–-10.9.2 for guidance. Compare the classification performance of your model with that of linear logistic regression. library(keras) dat <- ISLR2::Boston x <- scale(model.matrix(crim ~ . - 1, data = dat)) n <- nrow(dat) ntest <- trunc(n / 3) testid <- sample(1:n, ntest) y <- dat$crim # logistic regression lfit <- lm(crim ~ ., data = dat[-testid, ]) lpred <- predict(lfit, dat[testid, ]) with(dat[testid, ], mean(abs(lpred - crim))) ## [1] 2.99129 # keras nn <- keras_model_sequential() |> layer_dense(units = 10, activation = "relu", input_shape = ncol(x)) |> layer_dropout(rate = 0.4) |> layer_dense(units = 1) compile(nn, loss = "mse", optimizer = optimizer_rmsprop(), metrics = list("mean_absolute_error") ) history <- fit(nn, x[-testid, ], y[-testid], epochs = 100, batch_size = 26, validation_data = list(x[testid, ], y[testid]), verbose = 0 ) plot(history, smooth = FALSE) npred <- predict(nn, x[testid, ]) ## 6/6 - 0s - 61ms/epoch - 10ms/step mean(abs(y[testid] - npred)) ## [1] 2.219039 In this case, the neural network outperforms logistic regression having a lower absolute error rate on the test data. 10.2.3 Question 8 From your collection of personal photographs, pick 10 images of animals (such as dogs, cats, birds, farm animals, etc.). If the subject does not occupy a reasonable part of the image, then crop the image. Now use a pretrained image classification CNN as in Lab 10.9.4 to predict the class of each of your images, and report the probabilities for the top five predicted classes for each image. library(keras) images <- list.files("images/animals") x <- array(dim = c(length(images), 224, 224, 3)) for (i in seq_len(length(images))) { img <- image_load(paste0("images/animals/", images[i]), target_size = c(224, 224)) x[i,,,] <- image_to_array(img) } model <- application_resnet50(weights = "imagenet") ## Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5 ## 8192/102967424 [..............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 3956736/102967424 [>.............................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 4202496/102967424 [>.............................] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 8396800/102967424 [=>............................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 16785408/102967424 [===>..........................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 25174016/102967424 [======>.......................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 33562624/102967424 [========>.....................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 41951232/102967424 [===========>..................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 50905088/102967424 [=============>................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 58728448/102967424 [================>.............] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 67117056/102967424 [==================>...........] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 83894272/102967424 [=======================>......] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 101908480/102967424 [============================>.] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 102967424/102967424 [==============================] - 1s 0us/step pred <- model |> predict(x) |> imagenet_decode_predictions(top = 5) ## 1/1 - 1s - 1s/epoch - 1s/step ## Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json ## 8192/35363 [=====>........................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 35363/35363 [==============================] - 0s 0us/step names(pred) <- images print(pred) ## $bird.jpg ## class_name class_description score ## 1 n01819313 sulphur-crested_cockatoo 0.33546305 ## 2 n01580077 jay 0.18020906 ## 3 n02441942 weasel 0.08320859 ## 4 n02058221 albatross 0.07002056 ## 5 n01855672 goose 0.05195721 ## ## $bird2.jpg ## class_name class_description score ## 1 n02006656 spoonbill 0.840428233 ## 2 n02012849 crane 0.016258685 ## 3 n01819313 sulphur-crested_cockatoo 0.009740722 ## 4 n02007558 flamingo 0.007816141 ## 5 n01667778 terrapin 0.007497459 ## ## $bird3.jpg ## class_name class_description score ## 1 n01833805 hummingbird 0.9767877460 ## 2 n02033041 dowitcher 0.0111253690 ## 3 n02028035 redshank 0.0042764111 ## 4 n02009229 little_blue_heron 0.0012727526 ## 5 n02002724 black_stork 0.0008971311 ## ## $bug.jpg ## class_name class_description score ## 1 n02190166 fly 0.67558461 ## 2 n02167151 ground_beetle 0.10097048 ## 3 n02172182 dung_beetle 0.05490885 ## 4 n02169497 leaf_beetle 0.03541914 ## 5 n02168699 long-horned_beetle 0.03515299 ## ## $butterfly.jpg ## class_name class_description score ## 1 n02951585 can_opener 0.20600465 ## 2 n03476684 hair_slide 0.09360629 ## 3 n04074963 remote_control 0.06316858 ## 4 n02110185 Siberian_husky 0.05178998 ## 5 n02123597 Siamese_cat 0.03785341 ## ## $butterfly2.jpg ## class_name class_description score ## 1 n02276258 admiral 9.999689e-01 ## 2 n01580077 jay 1.388074e-05 ## 3 n02277742 ringlet 1.235042e-05 ## 4 n02279972 monarch 3.037859e-06 ## 5 n02281787 lycaenid 1.261888e-06 ## ## $elba.jpg ## class_name class_description score ## 1 n02085620 Chihuahua 0.29892012 ## 2 n02091032 Italian_greyhound 0.20332782 ## 3 n02109961 Eskimo_dog 0.08477225 ## 4 n02086910 papillon 0.05140305 ## 5 n02110185 Siberian_husky 0.05064517 ## ## $hamish.jpg ## class_name class_description score ## 1 n02097209 standard_schnauzer 0.6361451149 ## 2 n02097047 miniature_schnauzer 0.3450845778 ## 3 n02097130 giant_schnauzer 0.0164217781 ## 4 n02097298 Scotch_terrier 0.0019116047 ## 5 n02096177 cairn 0.0002054328 ## ## $poodle.jpg ## class_name class_description score ## 1 n02113799 standard_poodle 0.829670966 ## 2 n02088094 Afghan_hound 0.074567914 ## 3 n02113712 miniature_poodle 0.032005571 ## 4 n02102973 Irish_water_spaniel 0.018583152 ## 5 n02102318 cocker_spaniel 0.008629788 ## ## $tortoise.jpg ## class_name class_description score ## 1 n04033995 quilt 0.28395897 ## 2 n02110958 pug 0.15959552 ## 3 n03188531 diaper 0.14018111 ## 4 n02108915 French_bulldog 0.09364161 ## 5 n04235860 sleeping_bag 0.02608401 10.2.4 Question 9 Fit a lag-5 autoregressive model to the NYSE data, as described in the text and Lab 10.9.6. Refit the model with a 12-level factor representing the month. Does this factor improve the performance of the model? Fitting the model as described in the text. library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1 ## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1 ## ✔ purrr 1.0.2 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ dplyr::compute() masks neuralnet::compute() ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors library(ISLR2) xdata <- data.matrix(NYSE[, c("DJ_return", "log_volume","log_volatility")]) istrain <- NYSE[, "train"] xdata <- scale(xdata) lagm <- function(x, k = 1) { n <- nrow(x) pad <- matrix(NA, k, ncol(x)) rbind(pad, x[1:(n - k), ]) } arframe <- data.frame( log_volume = xdata[, "log_volume"], L1 = lagm(xdata, 1), L2 = lagm(xdata, 2), L3 = lagm(xdata, 3), L4 = lagm(xdata, 4), L5 = lagm(xdata, 5) ) arframe <- arframe[-(1:5), ] istrain <- istrain[-(1:5)] arfit <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred <- predict(arfit, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.413223 Now we add month (and work with tidyverse). arframe$month = as.factor(str_match(NYSE$date, "-(\\\\d+)-")[,2])[-(1:5)] arfit2 <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred2 <- predict(arfit2, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred2 - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4170418 Adding month as a factor marginally improves the \\(R^2\\) of our model (from 0.413223 to 0.4170418). This is a significant improvement in fit and model 2 has a lower AIC. anova(arfit, arfit2) ## Analysis of Variance Table ## ## Model 1: log_volume ~ L1.DJ_return + L1.log_volume + L1.log_volatility + ## L2.DJ_return + L2.log_volume + L2.log_volatility + L3.DJ_return + ## L3.log_volume + L3.log_volatility + L4.DJ_return + L4.log_volume + ## L4.log_volatility + L5.DJ_return + L5.log_volume + L5.log_volatility ## Model 2: log_volume ~ L1.DJ_return + L1.log_volume + L1.log_volatility + ## L2.DJ_return + L2.log_volume + L2.log_volatility + L3.DJ_return + ## L3.log_volume + L3.log_volatility + L4.DJ_return + L4.log_volume + ## L4.log_volatility + L5.DJ_return + L5.log_volume + L5.log_volatility + ## month ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 4260 1791.0 ## 2 4249 1775.8 11 15.278 3.3234 0.000143 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 AIC(arfit, arfit2) ## df AIC ## arfit 17 8447.663 ## arfit2 28 8433.031 10.2.5 Question 10 In Section 10.9.6, we showed how to fit a linear AR model to the NYSE data using the lm() function. However, we also mentioned that we can “flatten” the short sequences produced for the RNN model in order to fit a linear AR model. Use this latter approach to fit a linear AR model to the NYSE data. Compare the test \\(R^2\\) of this linear AR model to that of the linear AR model that we fit in the lab. What are the advantages/disadvantages of each approach? The lm model is the same as that fit above: arfit <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred <- predict(arfit, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4170418 Now we reshape the data for the RNN n <- nrow(arframe) xrnn <- data.matrix(arframe[, -1]) xrnn <- array(xrnn, c(n, 3, 5)) xrnn <- xrnn[, , 5:1] xrnn <- aperm(xrnn, c(1, 3, 2)) We can add a “flatten” layer to turn the reshaped data into a long vector of predictors resulting in a linear AR model. model <- keras_model_sequential() |> layer_flatten(input_shape = c(5, 3)) |> layer_dense(units = 1) Now let’s fit this model. model |> compile(optimizer = optimizer_rmsprop(), loss = "mse") history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) plot(history, smooth = FALSE) kpred <- predict(model, xrnn[!istrain,, ]) ## 56/56 - 0s - 58ms/epoch - 1ms/step 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.412886 Both models estimate the same number of coefficients/weights (16): coef(arfit) ## (Intercept) L1.DJ_return L1.log_volume L1.log_volatility ## 0.067916689 0.094410214 0.498673056 0.586274266 ## L2.DJ_return L2.log_volume L2.log_volatility L3.DJ_return ## -0.027299158 0.036903027 -0.931509135 0.037995916 ## L3.log_volume L3.log_volatility L4.DJ_return L4.log_volume ## 0.070312741 0.216160520 -0.004954842 0.117079461 ## L4.log_volatility L5.DJ_return L5.log_volume L5.log_volatility ## -0.039752786 -0.029620296 0.096034795 0.144510264 ## month02 month03 month04 month05 ## -0.100003367 -0.143781381 -0.028242819 -0.131120579 ## month06 month07 month08 month09 ## -0.125993911 -0.141608808 -0.163030102 -0.018889698 ## month10 month11 month12 ## -0.017206826 -0.037298183 0.008361380 model$get_weights() ## [[1]] ## [,1] ## [1,] -0.031145222 ## [2,] 0.101065643 ## [3,] 0.141815767 ## [4,] -0.004181504 ## [5,] 0.116010934 ## [6,] -0.003764492 ## [7,] 0.038601257 ## [8,] 0.078083567 ## [9,] 0.137415737 ## [10,] -0.029184511 ## [11,] 0.036070298 ## [12,] -0.821708620 ## [13,] 0.095548652 ## [14,] 0.511229098 ## [15,] 0.521453559 ## ## [[2]] ## [1] -0.006889343 The flattened RNN has a lower \\(R^2\\) on the test data than our lm model above. The lm model is quicker to fit and conceptually simpler also giving us the ability to inspect the coefficients for different variables. The flattened RNN is regularized to some extent as data are processed in batches. 10.2.6 Question 11 Repeat the previous exercise, but now fit a nonlinear AR model by “flattening” the short sequences produced for the RNN model. From the book: To fit a nonlinear AR model, we could add in a hidden layer. xfun::cache_rds({ model <- keras_model_sequential() |> layer_flatten(input_shape = c(5, 3)) |> layer_dense(units = 32, activation = "relu") |> layer_dropout(rate = 0.4) |> layer_dense(units = 1) model |> compile( loss = "mse", optimizer = optimizer_rmsprop(), metrics = "mse" ) history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) plot(history, smooth = FALSE, metrics = "mse") kpred <- predict(model, xrnn[!istrain,, ]) 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) ## 56/56 - 0s - 66ms/epoch - 1ms/step ## [1] 0.4271516 This approach improves our \\(R^2\\) over the linear model above. 10.2.7 Question 12 Consider the RNN fit to the NYSE data in Section 10.9.6. Modify the code to allow inclusion of the variable day_of_week, and fit the RNN. Compute the test \\(R^2\\). To accomplish this, I’ll include day of the week as one of the lagged variables in the RNN. Thus, our input for each observation will be 4 x 5 (rather than 3 x 5). xfun::cache_rds({ xdata <- data.matrix( NYSE[, c("day_of_week", "DJ_return", "log_volume","log_volatility")] ) istrain <- NYSE[, "train"] xdata <- scale(xdata) arframe <- data.frame( log_volume = xdata[, "log_volume"], L1 = lagm(xdata, 1), L2 = lagm(xdata, 2), L3 = lagm(xdata, 3), L4 = lagm(xdata, 4), L5 = lagm(xdata, 5) ) arframe <- arframe[-(1:5), ] istrain <- istrain[-(1:5)] n <- nrow(arframe) xrnn <- data.matrix(arframe[, -1]) xrnn <- array(xrnn, c(n, 4, 5)) xrnn <- xrnn[,, 5:1] xrnn <- aperm(xrnn, c(1, 3, 2)) dim(xrnn) model <- keras_model_sequential() |> layer_simple_rnn(units = 12, input_shape = list(5, 4), dropout = 0.1, recurrent_dropout = 0.1 ) |> layer_dense(units = 1) model |> compile(optimizer = optimizer_rmsprop(), loss = "mse") history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) kpred <- predict(model, xrnn[!istrain,, ]) 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) ## 56/56 - 0s - 133ms/epoch - 2ms/step ## [1] 0.4405331 10.2.8 Question 13 Repeat the analysis of Lab 10.9.5 on the IMDb data using a similarly structured neural network. There we used a dictionary of size 10,000. Consider the effects of varying the dictionary size. Try the values 1000, 3000, 5000, and 10,000, and compare the results. xfun::cache_rds({ library(knitr) accuracy <- c() for(max_features in c(1000, 3000, 5000, 10000)) { imdb <- dataset_imdb(num_words = max_features) c(c(x_train, y_train), c(x_test, y_test)) %<-% imdb maxlen <- 500 x_train <- pad_sequences(x_train, maxlen = maxlen) x_test <- pad_sequences(x_test, maxlen = maxlen) model <- keras_model_sequential() |> layer_embedding(input_dim = max_features, output_dim = 32) |> layer_lstm(units = 32) |> layer_dense(units = 1, activation = "sigmoid") model |> compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = "acc" ) history <- fit(model, x_train, y_train, epochs = 10, batch_size = 128, validation_data = list(x_test, y_test), verbose = 0 ) predy <- predict(model, x_test) > 0.5 accuracy <- c(accuracy, mean(abs(y_test == as.numeric(predy)))) } tibble( "Max Features" = c(1000, 3000, 5000, 10000), "Accuracy" = accuracy ) |> kable() }) ## Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz ## 8192/17464789 [..............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 3784704/17464789 [=====>........................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 4202496/17464789 [======>.......................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 8396800/17464789 [=============>................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 17464789/17464789 [==============================] - 0s 0us/step ## 782/782 - 16s - 16s/epoch - 20ms/step ## 782/782 - 16s - 16s/epoch - 20ms/step ## 782/782 - 16s - 16s/epoch - 20ms/step ## 782/782 - 16s - 16s/epoch - 20ms/step Max Features Accuracy 1000 0.86084 3000 0.87224 5000 0.87460 10000 0.86180 Varying the dictionary size does not make a substantial impact on our estimates of accuracy. However, the models do take a substantial amount of time to fit and it is not clear we are finding the best fitting models in each case. For example, the model using a dictionary size of 10,000 obtained an accuracy of 0.8721 in the text which is as different from the estimate obtained here as are the differences between the models with different dictionary sizes. "],["survival-analysis-and-censored-data.html", "11 Survival Analysis and Censored Data 11.1 Conceptual 11.2 Applied", " 11 Survival Analysis and Censored Data 11.1 Conceptual 11.1.1 Question 1 For each example, state whether or not the censoring mechanism is independent. Justify your answer. In a study of disease relapse, due to a careless research scientist, all patients whose phone numbers begin with the number “2” are lost to follow up. Independent. There’s no reason to think disease relapse should be related to the first digit of a phone number. In a study of longevity, a formatting error causes all patient ages that exceed 99 years to be lost (i.e. we know that those patients are more than 99 years old, but we do not know their exact ages). Not independent. Older patients are more likely to see an event that younger. Hospital A conducts a study of longevity. However, very sick patients tend to be transferred to Hospital B, and are lost to follow up. Not independent. Sick patients are more likely to see an event that healthy. In a study of unemployment duration, the people who find work earlier are less motivated to stay in touch with study investigators, and therefore are more likely to be lost to follow up. Not independent. More employable individuals are more likely to see an event. In a study of pregnancy duration, women who deliver their babies pre-term are more likely to do so away from their usual hospital, and thus are more likely to be censored, relative to women who deliver full-term babies. Not independent. Delivery away from hospital will be associated with pregnancy duration. A researcher wishes to model the number of years of education of the residents of a small town. Residents who enroll in college out of town are more likely to be lost to follow up, and are also more likely to attend graduate school, relative to those who attend college in town. Not independent. Years of education will be associated with enrolling in out of town colleges. Researchers conduct a study of disease-free survival (i.e. time until disease relapse following treatment). Patients who have not relapsed within five years are considered to be cured, and thus their survival time is censored at five years. In other words we assume all events happen within five years, so censoring after this time is equivalent to not censoring at all so the censoring is independent. We wish to model the failure time for some electrical component. This component can be manufactured in Iowa or in Pittsburgh, with no difference in quality. The Iowa factory opened five years ago, and so components manufactured in Iowa are censored at five years. The Pittsburgh factory opened two years ago, so those components are censored at two years. If there is no difference in quality then location and therefore censoring is independent of failure time. We wish to model the failure time of an electrical component made in two different factories, one of which opened before the other. We have reason to believe that the components manufactured in the factory that opened earlier are of higher quality. In this case, the difference in opening times of the two locations will mean that any difference in quality between locations will be associated with censoring, so censoring is not independent. 11.1.2 Question 2 We conduct a study with \\(n = 4\\) participants who have just purchased cell phones, in order to model the time until phone replacement. The first participant replaces her phone after 1.2 years. The second participant still has not replaced her phone at the end of the two-year study period. The third participant changes her phone number and is lost to follow up (but has not yet replaced her phone) 1.5 years into the study. The fourth participant replaces her phone after 0.2 years. For each of the four participants (\\(i = 1,..., 4\\)), answer the following questions using the notation introduced in Section 11.1: Is the participant’s cell phone replacement time censored? No, Yes, Yes and No. Censoring occurs when we do not know if or when the phone was replaced. Is the value of \\(c_i\\) known, and if so, then what is it? \\(c_i\\) is censoring time. For the four participants these are: NA. 2. 1.5 and NA. Is the value of \\(t_i\\) known, and if so, then what is it? \\(t_i\\) is time to event. For the four participants these are: 1.2, NA, NA and 0.2. Is the value of \\(y_i\\) known, and if so, then what is it? \\(y_i\\) is the observed time. For the four participants these are: 1.2, 2, 1.5 and 0.2. Is the value of \\(\\delta_i\\) known, and if so, then what is it? \\(\\delta_i\\) is an indicator for censoring. The nomenclature introduced here defines this to be 1 if we observe the true “survival” time and 0 if we observe the censored time. Therefore, for these participants, the values are: 1, 0, 0 and 1. 11.1.3 Question 3 For the example in Exercise 2, report the values of \\(K\\), \\(d_1,...,d_K\\), \\(r_1,...,r_K\\), and \\(q_1,...,q_K\\), where this notation was defined in Section 11.3. \\(K\\) is the number of unique deaths, which is 2. \\(d_k\\) represents the unique death times, which are: 0.2, 1.2. \\(r_k\\) denotes the number of patients alive and in the study just before \\(d_k\\). Note the first event is for patient 4, then patient 1, then patient 3 is censored and finally the study ends with patient 2 still involved. Therefore \\(r_k\\) takes values are: 4, 3. \\(q_k\\) denotes the number of patients who died at time \\(d_k\\), therefore this takes values: 1, 1. We can check by using the survival package. library(survival) x <- Surv(c(1.2, 2, 1.5, 0.2), event = c(1, 0, 0, 1)) summary(survfit(x ~ 1)) ## Call: survfit(formula = x ~ 1) ## ## time n.risk n.event survival std.err lower 95% CI upper 95% CI ## 0.2 4 1 0.75 0.217 0.426 1 ## 1.2 3 1 0.50 0.250 0.188 1 11.1.4 Question 4 This problem makes use of the Kaplan-Meier survival curve displayed in Figure 11.9. The raw data that went into plotting this survival curve is given in Table 11.4. The covariate column of that table is not needed for this problem. What is the estimated probability of survival past 50 days? There are 2 events that happen before 50 days. The number at risk \\(r_k\\) are 5 and 4 (one was censored early on), thus survival probability is \\(4/5 * 3/4 = 0.6\\). Equivalently, we can use the survival package. library(tidyverse) table_data <- tribble( ~Y, ~D, ~X, 26.5, 1, 0.1, 37.2, 1, 11, 57.3, 1, -0.3, 90.8, 0, 2.8, 20.2, 0, 1.8, 89.8, 0, 0.4 ) x <- Surv(table_data$Y, table_data$D) summary(survfit(x ~ 1)) ## Call: survfit(formula = x ~ 1) ## ## time n.risk n.event survival std.err lower 95% CI upper 95% CI ## 26.5 5 1 0.8 0.179 0.516 1 ## 37.2 4 1 0.6 0.219 0.293 1 ## 57.3 3 1 0.4 0.219 0.137 1 Write out an analytical expression for the estimated survival function. For instance, your answer might be something along the lines of \\[ \\hat{S}(t) = \\begin{cases} 0.8 & \\text{if } t < 31\\\\ 0.5 & \\text{if } 31 \\le t < 77\\\\ 0.22 & \\text{if } 77 \\le t \\end{cases} \\] (The previous equation is for illustration only: it is not the correct answer!) \\[ \\hat{S}(t) = \\begin{cases} 1 & \\text{if } t < 26.5 \\\\ 0.8 & \\text{if } 26.5 \\le t < 37.2 \\\\ 0.6 & \\text{if } 37.2 \\le t < 57.3 \\\\ 0.4 & \\text{if } 57.3 \\le t \\end{cases} \\] 11.1.5 Question 5 Sketch the survival function given by the equation \\[ \\hat{S}(t) = \\begin{cases} 0.8, & \\text{if } t < 31\\\\ 0.5, & \\text{if } 31 \\le t < 77\\\\ 0.22 & \\text{if } 77 \\le t \\end{cases} \\] Your answer should look something like Figure 11.9. We can draw this plot, or even engineer data that will generate the required plot… plot(NULL, xlim = c(0, 100), ylim = c(0, 1), ylab = "Estimated Probability of Survival", xlab = "Time in Days" ) lines( c(0, 31, 31, 77, 77, 100), c(0.8, 0.8, 0.5, 0.5, 0.22, 0.22) ) 11.1.6 Question 6 This problem makes use of the data displayed in Figure 11.1. In completing this problem, you can refer to the observation times as \\(y_1,...,y_4\\). The ordering of these observation times can be seen from Figure 11.1; their exact values are not required. Report the values of \\(\\delta_1,...,\\delta_4\\), \\(K\\), \\(d_1,...,d_K\\), \\(r_1,...,r_K\\), and \\(q_1,...,q_K\\). The relevant notation is defined in Sections 11.1 and 11.3. \\(\\delta\\) values are: 1, 0, 1, 0. \\(K\\) is 2 \\(d\\) values are \\(y_3\\) and \\(y_1\\). \\(r\\) values are 4 and 2. \\(q\\) values are 1 and 1. Sketch the Kaplan-Meier survival curve corresponding to this data set. (You do not need to use any software to do this—you can sketch it by hand using the results obtained in (a).) plot(NULL, xlim = c(0, 350), ylim = c(0, 1), ylab = "Estimated Probability of Survival", xlab = "Time in Days" ) lines( c(0, 150, 150, 300, 300, 350), c(1, 1, 0.75, 0.75, 0.375, 0.375) ) x <- Surv(c(300, 350, 150, 250), c(1, 0, 1, 0)) Based on the survival curve estimated in (b), what is the probability that the event occurs within 200 days? What is the probability that the event does not occur within 310 days? 0.25 and 0.375. Write out an expression for the estimated survival curve from (b). \\[ \\hat{S}(t) = \\begin{cases} 1 & \\text{if } t < y_3 \\\\ 0.75 & \\text{if } y_3 \\le t < y_1 \\\\ 0.375 & \\text{if } y_1 \\le t \\end{cases} \\] 11.1.7 Question 7 In this problem, we will derive (11.5) and (11.6), which are needed for the construction of the log-rank test statistic (11.8). Recall the notation in Table 11.1. Assume that there is no difference between the survival functions of the two groups. Then we can think of \\(q_{1k}\\) as the number of failures if we draw $r_{1k} observations, without replacement, from a risk set of \\(r_k\\) observations that contains a total of \\(q_k\\) failures. Argue that \\(q_{1k}\\) follows a hypergeometric distribution. Write the parameters of this distribution in terms of \\(r_{1k}\\), \\(r_k\\), and \\(q_k\\). A hypergeometric distributions models sampling without replacement from a finite pool where each sample is a success or failure. This fits the situation here, where with have a finite number of samples in the risk set. The hypergeometric distribution is parameterized as \\(k\\) successes in \\(n\\) draws, without replacement, from a population of size \\(N\\) with \\(K\\) objects with that feature. Mapping to our situation, \\(q_{1k}\\) is \\(k\\), \\(r_{1k}\\) is \\(n\\), \\(r_k\\) is \\(N\\) and \\(q_k\\) is \\(K\\). Given your previous answer, and the properties of the hypergeometric distribution, what are the mean and variance of \\(q_{1k}\\)? Compare your answer to (11.5) and (11.6). With the above parameterization, the mean (\\(n K/N\\)) is \\(r_{1k} q_k/r_K\\). The variance \\(n K/N (N-K)/N (N-n)/(N-1)\\) is \\[ r_{1k} \\frac{q_k}{r_k} \\frac{r_k-q_k}{r_k} \\frac{r_k - r_{1k}}{r_k - 1} \\] These are equivalent to 11.5 and 11.6. 11.1.8 Question 8 Recall that the survival function \\(S(t)\\), the hazard function \\(h(t)\\), and the density function \\(f(t)\\) are defined in (11.2), (11.9), and (11.11), respectively. Furthermore, define \\(F(t) = 1 − S(t)\\). Show that the following relationships hold: \\[ f(t) = dF(t)/dt \\\\ S(t) = \\exp\\left(-\\int_0^t h(u)du\\right) \\] If \\(F(t) = 1 - S(t)\\), then \\(F(t)\\) is the cumulative density function (cdf) for \\(t\\). For a continuous distribution, a cdf, e.g. \\(F(t)\\) can be expressed as an integral (up to some value \\(x\\)) of the probability density function (pdf), i.e. \\(F(t) = \\int_{-\\infty}^x f(x) dt\\). Equivalently, the derivative of the cdf is its pdf: \\(f(t) = \\frac{d F(t)}{dt}\\). Then, \\(h(t) = \\frac{f(t)}{S(t)} = \\frac{dF(t)/dt}{S(t)} = \\frac{-dS(t)/dt}{S(t)}\\). From basic calculus, this can be rewritten as \\(h(t) = -\\frac{d}{dt}\\log{S(t)}\\). Integrating and then exponentiating we get the second identity. 11.1.9 Question 9 In this exercise, we will explore the consequences of assuming that the survival times follow an exponential distribution. Suppose that a survival time follows an \\(Exp(\\lambda)\\) distribution, so that its density function is \\(f(t) = \\lambda\\exp(−\\lambda t)\\). Using the relationships provided in Exercise 8, show that \\(S(t) = \\exp(-\\lambda t)\\). The cdf of an exponential distribution is \\(1 - \\exp(-\\lambda x)\\) and \\(S(t)\\) is \\(1 - F(t)\\) where \\(F(t)\\) is the cdf. Hence, \\(S(t) = \\exp(-\\lambda t)\\). Now suppose that each of \\(n\\) independent survival times follows an \\(\\exp(\\lambda)\\) distribution. Write out an expression for the likelihood function (11.13). The reference to (11.13) gives us the following formula: \\[ L = \\prod_{i=1}^{n} h(y_i)^{\\delta_i} S(y_i) \\] (11.10) also gives us \\[ h(t) = \\frac{f(t)}{S(t)} \\] Plugging in the expressions from part (a), we get \\[\\begin{align*} h(t) &= \\frac{\\lambda \\exp(- \\lambda t)}{\\exp(- \\lambda t)} \\\\ &= \\lambda \\end{align*}\\] Using (11.13), we get the following loss expression: \\[ \\ell = \\prod_i \\lambda^{\\delta_i} e^{- \\lambda y_i} \\] Show that the maximum likelihood estimator for \\(\\lambda\\) is \\[ \\hat\\lambda = \\sum_{i=1}^n \\delta_i / \\sum_{i=1}^n y_i. \\] Take the log likelihood. \\[\\begin{align*} \\log \\ell &= \\sum_i \\log \\left( \\lambda^{\\delta_i} e^{- \\lambda y_i} \\right) \\\\ &= \\sum_i{\\delta_i\\log\\lambda - \\lambda y_i \\log e} \\\\ &= \\sum_i{\\delta_i\\log\\lambda - \\lambda y_i} \\\\ &= \\log\\lambda\\sum_i{\\delta_i} - \\lambda\\sum_i{y_i} \\end{align*}\\] Differentiating this expression with respect to \\(\\lambda\\) we get: \\[ \\frac{d \\log \\ell}{d \\lambda} = \\frac{\\sum_i{\\delta_i}}{\\lambda} - \\sum_i{y_i} \\] This function maximises when its gradient is 0. Solving for this gives a MLE of \\(\\hat\\lambda = \\sum_{i=1}^n \\delta_i / \\sum_{i=1}^n y_i\\). Use your answer to (c) to derive an estimator of the mean survival time. Hint: For (d), recall that the mean of an \\(Exp(\\lambda)\\) random variable is \\(1/\\lambda\\). Estimated mean survival would be \\(1/\\lambda\\) which given the above would be \\(\\sum_{i=1}^n y_i / \\sum_{i=1}^n \\delta_i\\), which can be thought of as the total observation time over the total number of deaths. 11.2 Applied 11.2.1 Question 10 This exercise focuses on the brain tumor data, which is included in the ISLR2 R library. Plot the Kaplan-Meier survival curve with ±1 standard error bands, using the survfit() function in the survival package. library(ISLR2) x <- Surv(BrainCancer$time, BrainCancer$status) plot(survfit(x ~ 1), xlab = "Months", ylab = "Estimated Probability of Survival", col = "steelblue", conf.int = 0.67 ) Draw a bootstrap sample of size \\(n = 88\\) from the pairs (\\(y_i\\), \\(\\delta_i\\)), and compute the resulting Kaplan-Meier survival curve. Repeat this process \\(B = 200\\) times. Use the results to obtain an estimate of the standard error of the Kaplan-Meier survival curve at each timepoint. Compare this to the standard errors obtained in (a). plot(survfit(x ~ 1), xlab = "Months", ylab = "Estimated Probability of Survival", col = "steelblue", conf.int = 0.67 ) fit <- survfit(x ~ 1) dat <- tibble(time = c(0, fit$time)) for (i in 1:200) { y <- survfit(sample(x, 88, replace = TRUE) ~ 1) y <- tibble(time = c(0, y$time), "s{i}" := c(1, y$surv)) dat <- left_join(dat, y, by = "time") } res <- fill(dat, starts_with("s")) |> rowwise() |> transmute(sd = sd(c_across(starts_with("s")))) se <- res$sd[2:nrow(res)] lines(fit$time, fit$surv - se, lty = 2, col = "red") lines(fit$time, fit$surv + se, lty = 2, col = "red") Fit a Cox proportional hazards model that uses all of the predictors to predict survival. Summarize the main findings. fit <- coxph(Surv(time, status) ~ sex + diagnosis + loc + ki + gtv + stereo, data = BrainCancer) fit ## Call: ## coxph(formula = Surv(time, status) ~ sex + diagnosis + loc + ## ki + gtv + stereo, data = BrainCancer) ## ## coef exp(coef) se(coef) z p ## sexMale 0.18375 1.20171 0.36036 0.510 0.61012 ## diagnosisLG glioma 0.91502 2.49683 0.63816 1.434 0.15161 ## diagnosisHG glioma 2.15457 8.62414 0.45052 4.782 1.73e-06 ## diagnosisOther 0.88570 2.42467 0.65787 1.346 0.17821 ## locSupratentorial 0.44119 1.55456 0.70367 0.627 0.53066 ## ki -0.05496 0.94653 0.01831 -3.001 0.00269 ## gtv 0.03429 1.03489 0.02233 1.536 0.12466 ## stereoSRT 0.17778 1.19456 0.60158 0.296 0.76760 ## ## Likelihood ratio test=41.37 on 8 df, p=1.776e-06 ## n= 87, number of events= 35 ## (1 observation deleted due to missingness) diagnosisHG and ki are highly significant. Stratify the data by the value of ki. (Since only one observation has ki=40, you can group that observation together with the observations that have ki=60.) Plot Kaplan-Meier survival curves for each of the five strata, adjusted for the other predictors. To adjust for other predictors, we fit a model that includes those predictors and use this model to predict new, artificial, data where we allow ki to take each possible value, but set the other predictors to be the mode or mean of the other predictors. library(ggfortify) modaldata <- data.frame( sex = rep("Female", 5), diagnosis = rep("Meningioma", 5), loc = rep("Supratentorial", 5), ki = c(60, 70, 80, 90, 100), gtv = rep(mean(BrainCancer$gtv), 5), stereo = rep("SRT", 5) ) survplots <- survfit(fit, newdata = modaldata) plot(survplots, xlab = "Months", ylab = "Survival Probability", col = 2:6) legend("bottomleft", c("60", "70", "80", "90", "100"), col = 2:6, lty = 1) 11.2.2 Question 11 This example makes use of the data in Table 11.4. Create two groups of observations. In Group 1, \\(X < 2\\), whereas in Group 2, \\(X \\ge 2\\). Plot the Kaplan-Meier survival curves corresponding to the two groups. Be sure to label the curves so that it is clear which curve corresponds to which group. By eye, does there appear to be a difference between the two groups’ survival curves? x <- split(Surv(table_data$Y, table_data$D), table_data$X < 2) plot(NULL, xlim = c(0, 100), ylim = c(0, 1), ylab = "Survival Probability") lines(survfit(x[[1]] ~ 1), conf.int = FALSE, col = 2) lines(survfit(x[[2]] ~ 1), conf.int = FALSE, col = 3) legend("bottomleft", c(">= 2", "<2"), col = 2:3, lty = 1) There does not appear to be any difference between the curves. Fit Cox’s proportional hazards model, using the group indicator as a covariate. What is the estimated coefficient? Write a sentence providing the interpretation of this coefficient, in terms of the hazard or the instantaneous probability of the event. Is there evidence that the true coefficient value is non-zero? fit <- coxph(Surv(Y, D) ~ X < 2, data = table_data) fit ## Call: ## coxph(formula = Surv(Y, D) ~ X < 2, data = table_data) ## ## coef exp(coef) se(coef) z p ## X < 2TRUE 0.3401 1.4051 1.2359 0.275 0.783 ## ## Likelihood ratio test=0.08 on 1 df, p=0.7797 ## n= 6, number of events= 3 The coefficient is \\(0.3401\\). This implies a slightly increased hazard when \\(X < 2\\) but it is not significantly different to zero (P = 0.8). Recall from Section 11.5.2 that in the case of a single binary covariate, the log-rank test statistic should be identical to the score statistic for the Cox model. Conduct a log-rank test to determine whether there is a difference between the survival curves for the two groups. How does the p-value for the log-rank test statistic compare to the \\(p\\)-value for the score statistic for the Cox model from (b)? summary(fit)$sctest ## test df pvalue ## 0.07644306 1.00000000 0.78217683 survdiff(Surv(Y, D) ~ X < 2, data = table_data)$chisq ## [1] 0.07644306 They are identical. "],["unsupervised-learning.html", "12 Unsupervised Learning 12.1 Conceptual 12.2 Applied", " 12 Unsupervised Learning 12.1 Conceptual 12.1.1 Question 1 This problem involves the \\(K\\)-means clustering algorithm. Prove (12.18). 12.18 is: \\[ \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k} \\sum_{j=1}^p (x_{ij} - x_{i'j})^2 = 2 \\sum_{i \\in C_k} \\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\] where \\[\\bar{x}_{kj} = \\frac{1}{|C_k|}\\sum_{i \\in C_k} x_{ij}\\] On the left hand side we compute the difference between each observation (indexed by \\(i\\) and \\(i'\\)). In the second we compute the difference between each observation and the mean. Intuitively this identity is clear (the factor of 2 is present because we calculate the difference between each pair twice). However, to prove. Note first that, \\[\\begin{align} (x_{ij} - x_{i'j})^2 = & ((x_{ij} - \\bar{x}_{kj}) - (x_{i'j} - \\bar{x}_{kj}))^2 \\\\ = & (x_{ij} - \\bar{x}_{kj})^2 - 2(x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + (x_{i'j} - \\bar{x}_{kj})^2 \\end{align}\\] Note that the first term is independent of \\(i'\\) and the last is independent of \\(i\\). Therefore, 10.12 can be written as: \\[\\begin{align} \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k} \\sum_{j=1}^p (x_{ij} - x_{i'j})^2 = & \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 - \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p 2(x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{i'j} - \\bar{x}_{kj})^2 \\\\ = & \\frac{|C_k|}{|C_k|}\\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 - \\frac{2}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + \\frac{|C_k|}{|C_k|}\\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\\\ = & 2 \\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\end{align}\\] Note that we can drop the term containing \\((x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj})\\) since this is 0 when summed over combinations of \\(i\\) and \\(i'\\) for a given \\(j\\). On the basis of this identity, argue that the \\(K\\)-means clustering algorithm (Algorithm 12.2) decreases the objective (12.17) at each iteration. Equation 10.12 demonstrates that the euclidean distance between each possible pair of samples can be related to the difference from each sample to the mean of the cluster. The K-means algorithm works by minimizing the euclidean distance to each centroid, thus also minimizes the within-cluster variance. 12.1.2 Question 2 Suppose that we have four observations, for which we compute a dissimilarity matrix, given by \\[\\begin{bmatrix} & 0.3 & 0.4 & 0.7 \\\\ 0.3 & & 0.5 & 0.8 \\\\ 0.4 & 0.5 & & 0.45 \\\\ 0.7 & 0.8 & 0.45 & \\\\ \\end{bmatrix}\\] For instance, the dissimilarity between the first and second observations is 0.3, and the dissimilarity between the second and fourth observations is 0.8. On the basis of this dissimilarity matrix, sketch the dendrogram that results from hierarchically clustering these four observations using complete linkage. Be sure to indicate on the plot the height at which each fusion occurs, as well as the observations corresponding to each leaf in the dendrogram. m <- matrix(c(0, 0.3, 0.4, 0.7, 0.3, 0, 0.5, 0.8, 0.4, 0.5, 0., 0.45, 0.7, 0.8, 0.45, 0), ncol = 4) c1 <- hclust(as.dist(m), method = "complete") plot(c1) Repeat (a), this time using single linkage clustering. c2 <- hclust(as.dist(m), method = "single") plot(c2) Suppose that we cut the dendrogram obtained in (a) such that two clusters result. Which observations are in each cluster? table(1:4, cutree(c1, 2)) ## ## 1 2 ## 1 1 0 ## 2 1 0 ## 3 0 1 ## 4 0 1 Suppose that we cut the dendrogram obtained in (b) such that two clusters result. Which observations are in each cluster? table(1:4, cutree(c2, 2)) ## ## 1 2 ## 1 1 0 ## 2 1 0 ## 3 1 0 ## 4 0 1 It is mentioned in the chapter that at each fusion in the dendrogram, the position of the two clusters being fused can be swapped without changing the meaning of the dendrogram. Draw a dendrogram that is equivalent to the dendrogram in (a), for which two or more of the leaves are repositioned, but for which the meaning of the dendrogram is the same. plot(c1, labels = c(2, 1, 3, 4)) 12.1.3 Question 3 In this problem, you will perform \\(K\\)-means clustering manually, with \\(K = 2\\), on a small example with \\(n = 6\\) observations and \\(p = 2\\) features. The observations are as follows. Obs. \\(X_1\\) \\(X_2\\) 1 1 4 2 1 3 3 0 4 4 5 1 5 6 2 6 4 0 Plot the observations. library(ggplot2) d <- data.frame( x1 = c(1, 1, 0, 5, 6, 4), x2 = c(4, 3, 4, 1, 2, 0) ) ggplot(d, aes(x = x1, y = x2)) + geom_point() Randomly assign a cluster label to each observation. You can use the sample() command in R to do this. Report the cluster labels for each observation. set.seed(42) d$cluster <- sample(c(1, 2), size = nrow(d), replace = TRUE) Compute the centroid for each cluster. centroids <- sapply(c(1,2), function(i) colMeans(d[d$cluster == i, 1:2])) Assign each observation to the centroid to which it is closest, in terms of Euclidean distance. Report the cluster labels for each observation. dist <- sapply(1:2, function(i) { sqrt((d$x1 - centroids[1, i])^2 + (d$x2 - centroids[2, i])^2) }) d$cluster <- apply(dist, 1, which.min) Repeat (c) and (d) until the answers obtained stop changing. centroids <- sapply(c(1,2), function(i) colMeans(d[d$cluster == i, 1:2])) dist <- sapply(1:2, function(i) { sqrt((d$x1 - centroids[1, i])^2 + (d$x2 - centroids[2, i])^2) }) d$cluster <- apply(dist, 1, which.min) In this case, we get stable labels after the first iteration. In your plot from (a), color the observations according to the cluster labels obtained. ggplot(d, aes(x = x1, y = x2, color = factor(cluster))) + geom_point() 12.1.4 Question 4 Suppose that for a particular data set, we perform hierarchical clustering using single linkage and using complete linkage. We obtain two dendrograms. At a certain point on the single linkage dendrogram, the clusters {1, 2, 3} and {4, 5} fuse. On the complete linkage dendrogram, the clusters {1, 2, 3} and {4, 5} also fuse at a certain point. Which fusion will occur higher on the tree, or will they fuse at the same height, or is there not enough information to tell? The complete linkage fusion will likely be higher in the tree since single linkage is defined as being the minimum distance between two clusters. However, there is a chance that they could be at the same height (so technically there is not enough information to tell). At a certain point on the single linkage dendrogram, the clusters {5} and {6} fuse. On the complete linkage dendrogram, the clusters {5} and {6} also fuse at a certain point. Which fusion will occur higher on the tree, or will they fuse at the same height, or is there not enough information to tell? They will fuse at the same height (the algorithm for calculating distance is the same when the clusters are of size 1). 12.1.5 Question 5 In words, describe the results that you would expect if you performed \\(K\\)-means clustering of the eight shoppers in Figure 12.16, on the basis of their sock and computer purchases, with \\(K = 2\\). Give three answers, one for each of the variable scalings displayed. Explain. In cases where variables are scaled we would expect clusters to correspond to whether or not the retainer sold a computer. In the first case (raw numbers of items sold), we would expect clusters to represent low vs high numbers of sock purchases. To test, we can run the analysis in R: set.seed(42) dat <- data.frame( socks = c(8, 11, 7, 6, 5, 6, 7, 8), computers = c(0, 0, 0, 0, 1, 1, 1, 1) ) kmeans(dat, 2)$cluster ## [1] 1 1 2 2 2 2 2 1 kmeans(scale(dat), 2)$cluster ## [1] 1 1 1 1 2 2 2 2 dat$computers <- dat$computers * 2000 kmeans(dat, 2)$cluster ## [1] 1 1 1 1 2 2 2 2 12.1.6 Question 6 We saw in Section 12.2.2 that the principal component loading and score vectors provide an approximation to a matrix, in the sense of (12.5). Specifically, the principal component score and loading vectors solve the optimization problem given in (12.6). Now, suppose that the M principal component score vectors zim, \\(m = 1,...,M\\), are known. Using (12.6), explain that the first \\(M\\) principal component loading vectors \\(\\phi_{jm}\\), \\(m = 1,...,M\\), can be obtaining by performing \\(M\\) separate least squares linear regressions. In each regression, the principal component score vectors are the predictors, and one of the features of the data matrix is the response. 12.2 Applied 12.2.1 Question 7 In the chapter, we mentioned the use of correlation-based distance and Euclidean distance as dissimilarity measures for hierarchical clustering. It turns out that these two measures are almost equivalent: if each observation has been centered to have mean zero and standard deviation one, and if we let \\(r_{ij}\\) denote the correlation between the \\(i\\)th and \\(j\\)th observations, then the quantity \\(1 − r_{ij}\\) is proportional to the squared Euclidean distance between the ith and jth observations. On the USArrests data, show that this proportionality holds. Hint: The Euclidean distance can be calculated using the dist() function, and correlations can be calculated using the cor() function. dat <- t(scale(t(USArrests))) d1 <- dist(dat)^2 d2 <- as.dist(1 - cor(t(dat))) plot(d1, d2) 12.2.2 Question 8 In Section 12.2.3, a formula for calculating PVE was given in Equation 12.10. We also saw that the PVE can be obtained using the sdev output of the prcomp() function. On the USArrests data, calculate PVE in two ways: Using the sdev output of the prcomp() function, as was done in Section 12.2.3. pr <- prcomp(USArrests, scale = TRUE) pr$sdev^2 / sum(pr$sdev^2) ## [1] 0.62006039 0.24744129 0.08914080 0.04335752 By applying Equation 12.10 directly. That is, use the prcomp() function to compute the principal component loadings. Then, use those loadings in Equation 12.10 to obtain the PVE. These two approaches should give the same results. colSums(pr$x^2) / sum(colSums(scale(USArrests)^2)) ## PC1 PC2 PC3 PC4 ## 0.62006039 0.24744129 0.08914080 0.04335752 Hint: You will only obtain the same results in (a) and (b) if the same data is used in both cases. For instance, if in (a) you performed prcomp() using centered and scaled variables, then you must center and scale the variables before applying Equation 12.10 in (b). 12.2.3 Question 9 Consider the USArrests data. We will now perform hierarchical clustering on the states. Using hierarchical clustering with complete linkage and Euclidean distance, cluster the states. set.seed(42) hc <- hclust(dist(USArrests), method = "complete") Cut the dendrogram at a height that results in three distinct clusters. Which states belong to which clusters? ct <- cutree(hc, 3) sapply(1:3, function(i) names(ct)[ct == i]) ## [[1]] ## [1] "Alabama" "Alaska" "Arizona" "California" ## [5] "Delaware" "Florida" "Illinois" "Louisiana" ## [9] "Maryland" "Michigan" "Mississippi" "Nevada" ## [13] "New Mexico" "New York" "North Carolina" "South Carolina" ## ## [[2]] ## [1] "Arkansas" "Colorado" "Georgia" "Massachusetts" ## [5] "Missouri" "New Jersey" "Oklahoma" "Oregon" ## [9] "Rhode Island" "Tennessee" "Texas" "Virginia" ## [13] "Washington" "Wyoming" ## ## [[3]] ## [1] "Connecticut" "Hawaii" "Idaho" "Indiana" ## [5] "Iowa" "Kansas" "Kentucky" "Maine" ## [9] "Minnesota" "Montana" "Nebraska" "New Hampshire" ## [13] "North Dakota" "Ohio" "Pennsylvania" "South Dakota" ## [17] "Utah" "Vermont" "West Virginia" "Wisconsin" Hierarchically cluster the states using complete linkage and Euclidean distance, after scaling the variables to have standard deviation one. hc2 <- hclust(dist(scale(USArrests)), method = "complete") What effect does scaling the variables have on the hierarchical clustering obtained? In your opinion, should the variables be scaled before the inter-observation dissimilarities are computed? Provide a justification for your answer. ct <- cutree(hc, 3) sapply(1:3, function(i) names(ct)[ct == i]) ## [[1]] ## [1] "Alabama" "Alaska" "Arizona" "California" ## [5] "Delaware" "Florida" "Illinois" "Louisiana" ## [9] "Maryland" "Michigan" "Mississippi" "Nevada" ## [13] "New Mexico" "New York" "North Carolina" "South Carolina" ## ## [[2]] ## [1] "Arkansas" "Colorado" "Georgia" "Massachusetts" ## [5] "Missouri" "New Jersey" "Oklahoma" "Oregon" ## [9] "Rhode Island" "Tennessee" "Texas" "Virginia" ## [13] "Washington" "Wyoming" ## ## [[3]] ## [1] "Connecticut" "Hawaii" "Idaho" "Indiana" ## [5] "Iowa" "Kansas" "Kentucky" "Maine" ## [9] "Minnesota" "Montana" "Nebraska" "New Hampshire" ## [13] "North Dakota" "Ohio" "Pennsylvania" "South Dakota" ## [17] "Utah" "Vermont" "West Virginia" "Wisconsin" Scaling results in different clusters and the choice of whether to scale or not depends on the data in question. In this case, the variables are: Murder numeric Murder arrests (per 100,000) Assault numeric Assault arrests (per 100,000) UrbanPop numeric Percent urban population Rape numeric Rape arrests (per 100,000) These variables are not naturally on the same unit and the units involved are somewhat arbitrary (so for example, Murder could be measured per 1 million rather than per 100,000) so in this case I would argue the data should be scaled. 12.2.4 Question 10 In this problem, you will generate simulated data, and then perform PCA and \\(K\\)-means clustering on the data. Generate a simulated data set with 20 observations in each of three classes (i.e. 60 observations total), and 50 variables. Hint: There are a number of functions in R that you can use to generate data. One example is the rnorm() function; runif() is another option. Be sure to add a mean shift to the observations in each class so that there are three distinct classes. set.seed(42) data <- matrix(rnorm(60 * 50), ncol = 50) classes <- rep(c("A", "B", "C"), each = 20) dimnames(data) <- list(classes, paste0("v", 1:50)) data[classes == "B", 1:10] <- data[classes == "B", 1:10] + 1.2 data[classes == "C", 5:30] <- data[classes == "C", 5:30] + 1 Perform PCA on the 60 observations and plot the first two principal component score vectors. Use a different color to indicate the observations in each of the three classes. If the three classes appear separated in this plot, then continue on to part (c). If not, then return to part (a) and modify the simulation so that there is greater separation between the three classes. Do not continue to part (c) until the three classes show at least some separation in the first two principal component score vectors. pca <- prcomp(data) ggplot(data.frame(Class = classes, PC1 = pca$x[, 1], PC2 = pca$x[, 2]), aes(x = PC1, y = PC2, col = Class)) + geom_point() Perform \\(K\\)-means clustering of the observations with \\(K = 3\\). How well do the clusters that you obtained in \\(K\\)-means clustering compare to the true class labels? Hint: You can use the table() function in R to compare the true class labels to the class labels obtained by clustering. Be careful how you interpret the results: \\(K\\)-means clustering will arbitrarily number the clusters, so you cannot simply check whether the true class labels and clustering labels are the same. km <- kmeans(data, 3)$cluster table(km, names(km)) ## ## km A B C ## 1 1 20 1 ## 2 0 0 19 ## 3 19 0 0 \\(K\\)-means separates out the clusters nearly perfectly. Perform \\(K\\)-means clustering with \\(K = 2\\). Describe your results. km <- kmeans(data, 2)$cluster table(km, names(km)) ## ## km A B C ## 1 18 20 1 ## 2 2 0 19 \\(K\\)-means effectively defines cluster 2 to be class B, but cluster 1 is a mix of classes A and B. Now perform \\(K\\)-means clustering with \\(K = 4\\), and describe your results. km <- kmeans(data, 4)$cluster table(km, names(km)) ## ## km A B C ## 1 0 7 2 ## 2 18 1 0 ## 3 0 0 18 ## 4 2 12 0 \\(K\\)-means effectively defines cluster 1 to be class B, cluster 2 to be class A but clusters 3 and 4 are split over class C. Now perform \\(K\\)-means clustering with \\(K = 3\\) on the first two principal component score vectors, rather than on the raw data. That is, perform \\(K\\)-means clustering on the \\(60 \\times 2\\) matrix of which the first column is the first principal component score vector, and the second column is the second principal component score vector. Comment on the results. km <- kmeans(pca$x[, 1:2], 3)$cluster table(km, names(km)) ## ## km A B C ## 1 0 20 2 ## 2 20 0 0 ## 3 0 0 18 \\(K\\)-means again separates out the clusters nearly perfectly. Using the scale() function, perform \\(K\\)-means clustering with \\(K = 3\\) on the data after scaling each variable to have standard deviation one. How do these results compare to those obtained in (b)? Explain. km <- kmeans(scale(data), 3)$cluster table(km, names(km)) ## ## km A B C ## 1 1 20 1 ## 2 19 0 0 ## 3 0 0 19 \\(K\\)-means appears to perform less well on the scaled data in this case. 12.2.5 Question 11 Write an R function to perform matrix completion as in Algorithm 12.1, and as outlined in Section 12.5.2. In each iteration, the function should keep track of the relative error, as well as the iteration count. Iterations should continue until the relative error is small enough or until some maximum number of iterations is reached (set a default value for this maximum number). Furthermore, there should be an option to print out the progress in each iteration. Test your function on the Boston data. First, standardize the features to have mean zero and standard deviation one using the scale() function. Run an experiment where you randomly leave out an increasing (and nested) number of observations from 5% to 30%, in steps of 5%. Apply Algorithm 12.1 with \\(M = 1,2,...,8\\). Display the approximation error as a function of the fraction of observations that are missing, and the value of \\(M\\), averaged over 10 repetitions of the experiment. 12.2.6 Question 12 In Section 12.5.2, Algorithm 12.1 was implemented using the svd() function. However, given the connection between the svd() function and the prcomp() function highlighted in the lab, we could have instead implemented the algorithm using prcomp(). Write a function to implement Algorithm 12.1 that makes use of prcomp() rather than svd(). 12.2.7 Question 13 On the book website, www.StatLearning.com, there is a gene expression data set (Ch12Ex13.csv) that consists of 40 tissue samples with measurements on 1,000 genes. The first 20 samples are from healthy patients, while the second 20 are from a diseased group. Load in the data using read.csv(). You will need to select header = F. data <- read.csv("data/Ch12Ex13.csv", header = FALSE) colnames(data) <- c(paste0("H", 1:20), paste0("D", 1:20)) Apply hierarchical clustering to the samples using correlation-based distance, and plot the dendrogram. Do the genes separate the samples into the two groups? Do your results depend on the type of linkage used? hc.complete <- hclust(as.dist(1 - cor(data)), method = "complete") plot(hc.complete) hc.complete <- hclust(as.dist(1 - cor(data)), method = "average") plot(hc.complete) hc.complete <- hclust(as.dist(1 - cor(data)), method = "single") plot(hc.complete) Yes the samples clearly separate into the two groups, although the results depend somewhat on the linkage method used. In the case of average clustering, the disease samples all fall within a subset of the healthy samples. Your collaborator wants to know which genes differ the most across the two groups. Suggest a way to answer this question, and apply it here. This is probably best achieved with a supervised approach. A simple method would be to determine which genes show the most significant differences between the groups by applying a t-test to each group. We can then select those with a FDR adjusted p-value less than some given threshold (e.g. 0.05). class <- factor(rep(c("Healthy", "Diseased"), each = 20)) pvals <- p.adjust(apply(data, 1, function(v) t.test(v ~ class)$p.value)) which(pvals < 0.05) ## [1] 11 12 13 14 15 16 17 18 19 20 501 502 503 504 505 506 507 508 ## [19] 509 511 512 513 514 515 516 517 519 520 521 522 523 524 525 526 527 528 ## [37] 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 ## [55] 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 ## [73] 565 566 567 568 569 570 571 572 574 575 576 577 578 579 580 581 582 583 ## [91] 584 586 587 588 589 590 591 592 593 595 596 597 598 599 600 "],["multiple-testing.html", "13 Multiple Testing 13.1 Conceptual 13.2 Applied", " 13 Multiple Testing 13.1 Conceptual 13.1.1 Question 1 Suppose we test \\(m\\) null hypotheses, all of which are true. We control the Type I error for each null hypothesis at level \\(\\alpha\\). For each sub-problem, justify your answer. In total, how many Type I errors do we expect to make? We expect \\(m\\alpha\\). Suppose that the m tests that we perform are independent. What is the family-wise error rate associated with these m tests? Hint: If two events A and B are independent, then Pr(A ∩ B) = Pr(A) Pr(B). The family-wise error rate (FWER) is defined as the probability of making at least one Type I error. We can think of this as 1 minus the probability of no type I errors, which is: \\(1 - (1 - \\alpha)^m\\) Alternatively, for two tests this is: Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). For independent tests this is \\(\\alpha + \\alpha - \\alpha^2\\) Suppose that \\(m = 2\\), and that the p-values for the two tests are positively correlated, so that if one is small then the other will tend to be small as well, and if one is large then the other will tend to be large. How does the family-wise error rate associated with these \\(m = 2\\) tests qualitatively compare to the answer in (b) with \\(m = 2\\)? Hint: First, suppose that the two p-values are perfectly correlated. If they were perfectly correlated, we would effectively be performing a single test (thus FWER would be \\(alpha\\)). In the case when they are positively correlated therefore, we can expect the FWER to be less than in b. Alternatively, as above, FWEW = Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). For perfectly positively correlated tests Pr(A ∩ B) = \\(\\alpha\\), so the FWEW is \\(\\alpha\\) which is smaller than b. Suppose again that \\(m = 2\\), but that now the p-values for the two tests are negatively correlated, so that if one is large then the other will tend to be small. How does the family-wise error rate associated with these \\(m = 2\\) tests qualitatively compare to the answer in (b) with \\(m = 2\\)? Hint: First, suppose that whenever one p-value is less than \\(\\alpha\\), then the other will be greater than \\(\\alpha\\). In other words, we can never reject both null hypotheses. Taking the equation above, for two tests, FWEW = Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). In the case considered in the hint Pr(A ∩ B) = 0, so Pr(A ∪ B) = \\(2\\alpha\\), which is larger than b. 13.1.2 Question 2 Suppose that we test \\(m\\) hypotheses, and control the Type I error for each hypothesis at level \\(\\alpha\\). Assume that all \\(m\\) p-values are independent, and that all null hypotheses are true. Let the random variable \\(A_j\\) equal 1 if the \\(j\\)th null hypothesis is rejected, and 0 otherwise. What is the distribution of \\(A_j\\)? \\(A_j\\) follows a Bernoulli distribution: \\(A_j \\sim \\text{Bernoulli}(p)\\) What is the distribution of \\(\\sum_{j=1}^m A_j\\)? Follows a binomial distribution \\(\\sum_{j=1}^m A_j \\sim Bi(m, \\alpha)\\). What is the standard deviation of the number of Type I errors that we will make? The variance of a Binomial is \\(npq\\), so for this situation the standard deviation would be \\(\\sqrt{m \\alpha (1-\\alpha)}\\). 13.1.3 Question 3 Suppose we test \\(m\\) null hypotheses, and control the Type I error for the \\(j\\)th null hypothesis at level \\(\\alpha_j\\), for \\(j=1,...,m\\). Argue that the family-wise error rate is no greater than \\(\\sum_{j=1}^m \\alpha_j\\). \\(p(A \\cup B) = p(A) + p(B)\\) if \\(A\\) and \\(B\\) are independent or \\(p(A) + p(B) - p(A \\cap B)\\) when they are not. Since \\(p(A \\cap B)\\) must be positive, \\(p(A \\cup B) < p(A) + p(B)\\) (whether independent or not). Therefore, the probability of a type I error in any of \\(m\\) hypotheses can be no larger than the sum of the probabilities for each individual hypothesis (which is \\(\\alpha_j\\) for the \\(j\\)th). 13.1.4 Question 4 Suppose we test \\(m = 10\\) hypotheses, and obtain the p-values shown in Table 13.4. pvals <- c(0.0011, 0.031, 0.017, 0.32, 0.11, 0.90, 0.07, 0.006, 0.004, 0.0009) names(pvals) <- paste0("H", sprintf("%02d", 1:10)) Suppose that we wish to control the Type I error for each null hypothesis at level \\(\\alpha = 0.05\\). Which null hypotheses will we reject? names(which(pvals < 0.05)) ## [1] "H01" "H02" "H03" "H08" "H09" "H10" We reject all NULL hypotheses where \\(p < 0.05\\). Now suppose that we wish to control the FWER at level \\(\\alpha = 0.05\\). Which null hypotheses will we reject? Justify your answer. names(which(pvals < 0.05 / 10)) ## [1] "H01" "H09" "H10" We reject all NULL hypotheses where \\(p < 0.005\\). Now suppose that we wish to control the FDR at level \\(q = 0.05\\). Which null hypotheses will we reject? Justify your answer. names(which(p.adjust(pvals, "fdr") < 0.05)) ## [1] "H01" "H03" "H08" "H09" "H10" We reject all NULL hypotheses where \\(q < 0.05\\). Now suppose that we wish to control the FDR at level \\(q = 0.2\\). Which null hypotheses will we reject? Justify your answer. names(which(p.adjust(pvals, "fdr") < 0.2)) ## [1] "H01" "H02" "H03" "H05" "H07" "H08" "H09" "H10" We reject all NULL hypotheses where \\(q < 0.2\\). Of the null hypotheses rejected at FDR level \\(q = 0.2\\), approximately how many are false positives? Justify your answer. We expect 20% (in this case 2 out of the 8) rejections to be false (false positives). 13.1.5 Question 5 For this problem, you will make up p-values that lead to a certain number of rejections using the Bonferroni and Holm procedures. Give an example of five p-values (i.e. five numbers between 0 and 1 which, for the purpose of this problem, we will interpret as p-values) for which both Bonferroni’s method and Holm’s method reject exactly one null hypothesis when controlling the FWER at level 0.1. In this case, for Bonferroni, we need one p-value to be less than \\(0.1 / 5 = 0.02\\). and the others to be above. For Holm’s method, we need the most significant p-value to be below \\(0.1/(5 + 1 - 1) = 0.02\\) also. An example would be: 1, 1, 1, 1, 0.001. pvals <- c(1, 1, 1, 1, 0.001) sum(p.adjust(pvals, method = "bonferroni") < 0.1) ## [1] 1 sum(p.adjust(pvals, method = "holm") < 0.1) ## [1] 1 Now give an example of five p-values for which Bonferroni rejects one null hypothesis and Holm rejects more than one null hypothesis at level 0.1. An example would be: 1, 1, 1, 0.02, 0.001. For Holm’s method we reject two because \\(0.02 < 0.1/(5 + 1 - 2)\\). pvals <- c(1, 1, 1, 0.02, 0.001) sum(p.adjust(pvals, method = "bonferroni") < 0.1) ## [1] 1 sum(p.adjust(pvals, method = "holm") < 0.1) ## [1] 2 13.1.6 Question 6 For each of the three panels in Figure 13.3, answer the following questions: There are always: 8 positives (red) and 2 negatives (black). False / true positives are black / red points below the line respectively. False / true negatives are red / black points above the line respectively. Type I / II errors are the same as false positives and false negatives respectively. How many false positives, false negatives, true positives, true negatives, Type I errors, and Type II errors result from applying the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.05\\)? Panel FP FN TP TN Type I Type II 1 0 1 7 2 0 1 2 0 1 7 2 0 1 3 0 5 3 2 0 5 How many false positives, false negatives, true positives, true negatives, Type I errors, and Type II errors result from applying the Holm procedure to control the FWER at level \\(\\alpha = 0.05\\)? Panel FP FN TP TN Type I Type II 1 0 1 7 2 0 1 2 0 0 8 2 0 0 3 0 0 8 2 0 0 What is the false discovery rate associated with using the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.05\\)? False discovery rate is the expected ratio of false positives to total positives. There are never any false positives (black points below the line). There are always the same number of total positives (8). For panels 1, 2, 3 this would be 0/8, 0/8 and 0/8 respectively. What is the false discovery rate associated with using the Holm procedure to control the FWER at level \\(\\alpha = 0.05\\)? For panels 1, 2, 3 this would be 0/8, 0/8 and 0/8 respectively. How would the answers to (a) and (c) change if we instead used the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.001\\)? This would equate to a more stringent threshold. We would not call any more false positives, so the results would not change. 13.2 Applied 13.2.1 Question 7 This problem makes use of the Carseats dataset in the ISLR2 package. For each quantitative variable in the dataset besides Sales, fit a linear model to predict Sales using that quantitative variable. Report the p-values associated with the coefficients for the variables. That is, for each model of the form \\(Y = \\beta_0 + \\beta_1X + \\epsilon\\), report the p-value associated with the coefficient \\(\\beta_1\\). Here, \\(Y\\) represents Sales and \\(X\\) represents one of the other quantitative variables. library(ISLR2) nm <- c("CompPrice", "Income", "Advertising", "Population", "Price", "Age") pvals <- sapply(nm, function(n) { summary(lm(Carseats[["Sales"]] ~ Carseats[[n]]))$coef[2, 4] }) Suppose we control the Type I error at level \\(\\alpha = 0.05\\) for the p-values obtained in (a). Which null hypotheses do we reject? names(which(pvals < 0.05)) ## [1] "Income" "Advertising" "Price" "Age" Now suppose we control the FWER at level 0.05 for the p-values. Which null hypotheses do we reject? names(which(pvals < 0.05 / length(nm))) ## [1] "Income" "Advertising" "Price" "Age" Finally, suppose we control the FDR at level 0.2 for the p-values. Which null hypotheses do we reject? names(which(p.adjust(pvals, "fdr") < 0.2)) ## [1] "Income" "Advertising" "Price" "Age" 13.2.2 Question 8 In this problem, we will simulate data from \\(m = 100\\) fund managers. set.seed(1) n <- 20 m <- 100 X <- matrix(rnorm(n * m), ncol = m) set.seed(1) n <- 20 m <- 100 X <- matrix(rnorm(n * m), ncol = m) These data represent each fund manager’s percentage returns for each of \\(n = 20\\) months. We wish to test the null hypothesis that each fund manager’s percentage returns have population mean equal to zero. Notice that we simulated the data in such a way that each fund manager’s percentage returns do have population mean zero; in other words, all \\(m\\) null hypotheses are true. Conduct a one-sample \\(t\\)-test for each fund manager, and plot a histogram of the \\(p\\)-values obtained. pvals <- apply(X, 2, function(p) t.test(p)$p.value) hist(pvals, main = NULL) If we control Type I error for each null hypothesis at level \\(\\alpha = 0.05\\), then how many null hypotheses do we reject? sum(pvals < 0.05) ## [1] 4 If we control the FWER at level 0.05, then how many null hypotheses do we reject? sum(pvals < 0.05 / length(pvals)) ## [1] 0 If we control the FDR at level 0.05, then how many null hypotheses do we reject? sum(p.adjust(pvals, "fdr") < 0.05) ## [1] 0 Now suppose we “cherry-pick” the 10 fund managers who perform the best in our data. If we control the FWER for just these 10 fund managers at level 0.05, then how many null hypotheses do we reject? If we control the FDR for just these 10 fund managers at level 0.05, then how many null hypotheses do we reject? best <- order(apply(X, 2, sum), decreasing = TRUE)[1:10] sum(pvals[best] < 0.05 / 10) ## [1] 1 sum(p.adjust(pvals[best], "fdr") < 0.05) ## [1] 1 Explain why the analysis in (e) is misleading. Hint The standard approaches for controlling the FWER and FDR assume that all tested null hypotheses are adjusted for multiplicity, and that no “cherry-picking” of the smallest p-values has occurred. What goes wrong if we cherry-pick? This is misleading because we are not correctly accounting for all tests performed. Cherry picking the similar to repeating a test until by chance we find a significant result. "],["404.html", "Page not found", " Page not found The page you requested cannot be found (perhaps it was moved or renamed). You may want to try searching to find the page's new location, or use the table of contents to find the page you are looking for. "]] +[["index.html", "An Introduction to Statistical Learning Exercise solutions in R 1 Introduction", " An Introduction to Statistical Learning Exercise solutions in R 1 Introduction This bookdown document provides solutions for exercises in the book “An Introduction to Statistical Learning with Applications in R”, second edition, by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani. "],["statistical-learning.html", "2 Statistical Learning 2.1 Conceptual 2.2 Applied", " 2 Statistical Learning 2.1 Conceptual 2.1.1 Question 1 For each of parts (a) through (d), indicate whether we would generally expect the performance of a flexible statistical learning method to be better or worse than an inflexible method. Justify your answer. The sample size \\(n\\) is extremely large, and the number of predictors \\(p\\) is small. Flexible best - opposite of b. The number of predictors \\(p\\) is extremely large, and the number of observations \\(n\\) is small. Inflexible best - high chance of some predictors being randomly associated. The relationship between the predictors and response is highly non-linear. Flexible best - inflexible leads to high bias. The variance of the error terms, i.e. \\(\\sigma^2 = Var(\\epsilon)\\), is extremely high. Inflexible best - opposite of c. 2.1.2 Question 2 Explain whether each scenario is a classification or regression problem, and indicate whether we are most interested in inference or prediction. Finally, provide \\(n\\) and \\(p\\). We collect a set of data on the top 500 firms in the US. For each firm we record profit, number of employees, industry and the CEO salary. We are interested in understanding which factors affect CEO salary. \\(n=500\\), \\(p=3\\), regression, inference. We are considering launching a new product and wish to know whether it will be a success or a failure. We collect data on 20 similar products that were previously launched. For each product we have recorded whether it was a success or failure, price charged for the product, marketing budget, competition price, and ten other variables. \\(n=20\\), \\(p=13\\), classification, prediction. We are interested in predicting the % change in the USD/Euro exchange rate in relation to the weekly changes in the world stock markets. Hence we collect weekly data for all of 2012. For each week we record the % change in the USD/Euro, the % change in the US market, the % change in the British market, and the % change in the German market. \\(n=52\\), \\(p=3\\), regression, prediction. 2.1.3 Question 3 We now revisit the bias-variance decomposition. Provide a sketch of typical (squared) bias, variance, training error, test error, and Bayes (or irreducible) error curves, on a single plot, as we go from less flexible statistical learning methods towards more flexible approaches. The x-axis should represent the amount of flexibility in the method, and the y-axis should represent the values for each curve. There should be five curves. Make sure to label each one. Explain why each of the five curves has the shape displayed in part (a). (squared) bias: Decreases with increasing flexibility (Generally, more flexible methods result in less bias). variance: Increases with increasing flexibility (In general, more flexible statistical methods have higher variance). training error: Decreases with model flexibility (More complex models will better fit the training data). test error: Decreases initially, then increases due to overfitting (less bias but more training error). Bayes (irreducible) error: fixed (does not change with model). 2.1.4 Question 4 You will now think of some real-life applications for statistical learning. Describe three real-life applications in which classification might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. Coffee machine cleaned? (day of week, person assigned), inference. Is a flight delayed? (airline, airport etc), inference. Beer type (IPA, pilsner etc.), prediction. Describe three real-life applications in which regression might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. Amount of bonus paid (profitability, client feedback), prediction. Person’s height, prediction. House price, inference. Describe three real-life applications in which cluster analysis might be useful. RNAseq tumour gene expression data. SNPs in human populations. Frequencies of mutations (with base pair context) in somatic mutation data. 2.1.5 Question 5 What are the advantages and disadvantages of a very flexible (versus a less flexible) approach for regression or classification? Under what circumstances might a more flexible approach be preferred to a less flexible approach? When might a less flexible approach be preferred? Inflexible is more interpretable, fewer observations required, can be biased. Flexible can overfit (high error variance). In cases where we have high \\(n\\) or non-linear patterns flexible will be preferred. 2.1.6 Question 6 Describe the differences between a parametric and a non-parametric statistical learning approach. What are the advantages of a parametric approach to regression or classification (as opposed to a non-parametric approach)? What are its disadvantages? Parametric uses (model) parameters. Parametric models can be more interpretable as there is a model behind how data is generated. However, the disadvantage is that the model might not reflect reality. If the model is too far from the truth, estimates will be poor and more flexible models can fit many different forms and require more parameters (leading to overfitting). Non-parametric approaches do not estimate a small number of parameters, so a large number of observations may be needed to obtain accurate estimates. 2.1.7 Question 7 The table below provides a training data set containing six observations, three predictors, and one qualitative response variable. Obs. \\(X_1\\) \\(X_2\\) \\(X_3\\) \\(Y\\) 1 0 3 0 Red 2 2 0 0 Red 3 0 1 3 Red 4 0 1 2 Green 5 -1 0 1 Green 6 1 1 1 Red Suppose we wish to use this data set to make a prediction for \\(Y\\) when \\(X_1 = X_2 = X_3 = 0\\) using \\(K\\)-nearest neighbors. Compute the Euclidean distance between each observation and the test point, \\(X_1 = X_2 = X_3 = 0\\). dat <- data.frame( "x1" = c(0, 2, 0, 0, -1, 1), "x2" = c(3, 0, 1, 1, 0, 1), "x3" = c(0, 0, 3, 2, 1, 1), "y" = c("Red", "Red", "Red", "Green", "Green", "Red") ) # Euclidean distance between points and c(0, 0, 0) dist <- sqrt(dat[["x1"]]^2 + dat[["x2"]]^2 + dat[["x3"]]^2) signif(dist, 3) ## [1] 3.00 2.00 3.16 2.24 1.41 1.73 What is our prediction with \\(K = 1\\)? Why? knn <- function(k) { names(which.max(table(dat[["y"]][order(dist)[1:k]]))) } knn(1) ## [1] "Green" Green (based on data point 5 only) What is our prediction with \\(K = 3\\)? Why? knn(3) ## [1] "Red" Red (based on data points 2, 5, 6) If the Bayes decision boundary in this problem is highly non-linear, then would we expect the best value for \\(K\\) to be large or small? Why? Small (high \\(k\\) leads to linear boundaries due to averaging) 2.2 Applied 2.2.1 Question 8 This exercise relates to the College data set, which can be found in the file College.csv. It contains a number of variables for 777 different universities and colleges in the US. The variables are Private : Public/private indicator Apps : Number of applications received Accept : Number of applicants accepted Enroll : Number of new students enrolled Top10perc : New students from top 10% of high school class Top25perc : New students from top 25% of high school class F.Undergrad : Number of full-time undergraduates P.Undergrad : Number of part-time undergraduates Outstate : Out-of-state tuition Room.Board : Room and board costs Books : Estimated book costs Personal : Estimated personal spending PhD : Percent of faculty with Ph.D.’s Terminal : Percent of faculty with terminal degree S.F.Ratio : Student/faculty ratio perc.alumni : Percent of alumni who donate Expend : Instructional expenditure per student Grad.Rate : Graduation rate Before reading the data into R, it can be viewed in Excel or a text editor. Use the read.csv() function to read the data into R. Call the loaded data college. Make sure that you have the directory set to the correct location for the data. college <- read.csv("data/College.csv") Look at the data using the View() function. You should notice that the first column is just the name of each university. We don’t really want R to treat this as data. However, it may be handy to have these names for later. Try the following commands: rownames(college) <- college[, 1] View(college) You should see that there is now a row.names column with the name of each university recorded. This means that R has given each row a name corresponding to the appropriate university. R will not try to perform calculations on the row names. However, we still need to eliminate the first column in the data where the names are stored. Try college <- college [, -1] View(college) Now you should see that the first data column is Private. Note that another column labeled row.names now appears before the Private column. However, this is not a data column but rather the name that R is giving to each row. rownames(college) <- college[, 1] college <- college[, -1] Use the summary() function to produce a numerical summary of the variables in the data set. Use the pairs() function to produce a scatterplot matrix of the first ten columns or variables of the data. Recall that you can reference the first ten columns of a matrix A using A[,1:10]. Use the plot() function to produce side-by-side boxplots of Outstate versus Private. Create a new qualitative variable, called Elite, by binning the Top10perc variable. We are going to divide universities into two groups based on whether or not the proportion of students coming from the top 10% of their high school classes exceeds 50%. > Elite <- rep("No", nrow(college)) > Elite[college$Top10perc > 50] <- "Yes" > Elite <- as.factor(Elite) > college <- data.frame(college, Elite) Use the summary() function to see how many elite universities there are. Now use the plot() function to produce side-by-side boxplots of Outstate versus Elite. Use the hist() function to produce some histograms with differing numbers of bins for a few of the quantitative variables. You may find the command par(mfrow=c(2,2)) useful: it will divide the print window into four regions so that four plots can be made simultaneously. Modifying the arguments to this function will divide the screen in other ways. Continue exploring the data, and provide a brief summary of what you discover. summary(college) ## Private Apps Accept Enroll ## Length:777 Min. : 81 Min. : 72 Min. : 35 ## Class :character 1st Qu.: 776 1st Qu.: 604 1st Qu.: 242 ## Mode :character Median : 1558 Median : 1110 Median : 434 ## Mean : 3002 Mean : 2019 Mean : 780 ## 3rd Qu.: 3624 3rd Qu.: 2424 3rd Qu.: 902 ## Max. :48094 Max. :26330 Max. :6392 ## Top10perc Top25perc F.Undergrad P.Undergrad ## Min. : 1.00 Min. : 9.0 Min. : 139 Min. : 1.0 ## 1st Qu.:15.00 1st Qu.: 41.0 1st Qu.: 992 1st Qu.: 95.0 ## Median :23.00 Median : 54.0 Median : 1707 Median : 353.0 ## Mean :27.56 Mean : 55.8 Mean : 3700 Mean : 855.3 ## 3rd Qu.:35.00 3rd Qu.: 69.0 3rd Qu.: 4005 3rd Qu.: 967.0 ## Max. :96.00 Max. :100.0 Max. :31643 Max. :21836.0 ## Outstate Room.Board Books Personal ## Min. : 2340 Min. :1780 Min. : 96.0 Min. : 250 ## 1st Qu.: 7320 1st Qu.:3597 1st Qu.: 470.0 1st Qu.: 850 ## Median : 9990 Median :4200 Median : 500.0 Median :1200 ## Mean :10441 Mean :4358 Mean : 549.4 Mean :1341 ## 3rd Qu.:12925 3rd Qu.:5050 3rd Qu.: 600.0 3rd Qu.:1700 ## Max. :21700 Max. :8124 Max. :2340.0 Max. :6800 ## PhD Terminal S.F.Ratio perc.alumni ## Min. : 8.00 Min. : 24.0 Min. : 2.50 Min. : 0.00 ## 1st Qu.: 62.00 1st Qu.: 71.0 1st Qu.:11.50 1st Qu.:13.00 ## Median : 75.00 Median : 82.0 Median :13.60 Median :21.00 ## Mean : 72.66 Mean : 79.7 Mean :14.09 Mean :22.74 ## 3rd Qu.: 85.00 3rd Qu.: 92.0 3rd Qu.:16.50 3rd Qu.:31.00 ## Max. :103.00 Max. :100.0 Max. :39.80 Max. :64.00 ## Expend Grad.Rate ## Min. : 3186 Min. : 10.00 ## 1st Qu.: 6751 1st Qu.: 53.00 ## Median : 8377 Median : 65.00 ## Mean : 9660 Mean : 65.46 ## 3rd Qu.:10830 3rd Qu.: 78.00 ## Max. :56233 Max. :118.00 college$Private <- college$Private == "Yes" pairs(college[, 1:10], cex = 0.2) plot(college$Outstate ~ factor(college$Private), xlab = "Private", ylab = "Outstate") college$Elite <- factor(ifelse(college$Top10perc > 50, "Yes", "No")) summary(college$Elite) ## No Yes ## 699 78 plot(college$Outstate ~ college$Elite, xlab = "Elite", ylab = "Outstate") par(mfrow = c(2,2)) for (n in c(5, 10, 20, 50)) { hist(college$Enroll, breaks = n, main = paste("n =", n), xlab = "Enroll") } chisq.test(college$Private, college$Elite) ## ## Pearson's Chi-squared test with Yates' continuity correction ## ## data: college$Private and college$Elite ## X-squared = 4.3498, df = 1, p-value = 0.03701 Whether a college is Private and Elite is not random! 2.2.2 Question 9 This exercise involves the Auto data set studied in the lab. Make sure that the missing values have been removed from the data. x <- read.table("data/Auto.data", header = TRUE, na.strings = "?") x <- na.omit(x) Which of the predictors are quantitative, and which are qualitative? sapply(x, class) ## mpg cylinders displacement horsepower weight acceleration ## "numeric" "integer" "numeric" "numeric" "numeric" "numeric" ## year origin name ## "integer" "integer" "character" numeric <- which(sapply(x, class) == "numeric") names(numeric) ## [1] "mpg" "displacement" "horsepower" "weight" "acceleration" What is the range of each quantitative predictor? You can answer this using the range() function. sapply(x[, numeric], function(x) diff(range(x))) ## mpg displacement horsepower weight acceleration ## 37.6 387.0 184.0 3527.0 16.8 What is the mean and standard deviation of each quantitative predictor? library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1 ## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1 ## ✔ purrr 1.0.2 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors library(knitr) x[, numeric] |> pivot_longer(everything()) |> group_by(name) |> summarise( Mean = mean(value), SD = sd(value) ) |> kable() name Mean SD acceleration 15.54133 2.758864 displacement 194.41199 104.644004 horsepower 104.46939 38.491160 mpg 23.44592 7.805008 weight 2977.58418 849.402560 Now remove the 10th through 85th observations. What is the range, mean, and standard deviation of each predictor in the subset of the data that remains? x[-(10:85), numeric] |> pivot_longer(everything()) |> group_by(name) |> summarise( Range = diff(range(value)), Mean = mean(value), SD = sd(value) ) |> kable() name Range Mean SD acceleration 16.3 15.72690 2.693721 displacement 387.0 187.24051 99.678367 horsepower 184.0 100.72152 35.708853 mpg 35.6 24.40443 7.867283 weight 3348.0 2935.97152 811.300208 Using the full data set, investigate the predictors graphically, using scatterplots or other tools of your choice. Create some plots highlighting the relationships among the predictors. Comment on your findings. pairs(x[, numeric], cex = 0.2) cor(x[, numeric]) |> kable() mpg displacement horsepower weight acceleration mpg 1.0000000 -0.8051269 -0.7784268 -0.8322442 0.4233285 displacement -0.8051269 1.0000000 0.8972570 0.9329944 -0.5438005 horsepower -0.7784268 0.8972570 1.0000000 0.8645377 -0.6891955 weight -0.8322442 0.9329944 0.8645377 1.0000000 -0.4168392 acceleration 0.4233285 -0.5438005 -0.6891955 -0.4168392 1.0000000 heatmap(cor(x[, numeric]), cexRow = 1.1, cexCol = 1.1, margins = c(8, 8)) Many of the variables appear to be highly (positively or negatively) correlated with some relationships being non-linear. Suppose that we wish to predict gas mileage (mpg) on the basis of the other variables. Do your plots suggest that any of the other variables might be useful in predicting mpg? Justify your answer. Yes, since other variables are correlated. However, horsepower, weight and displacement are highly related. 2.2.3 Question 10 This exercise involves the Boston housing data set. To begin, load in the Boston data set. The Boston data set is part of the ISLR2 library in R. > library(ISLR2) Now the data set is contained in the object Boston. > Boston Read about the data set: > ?Boston How many rows are in this data set? How many columns? What do the rows and columns represent? library(ISLR2) dim(Boston) ## [1] 506 13 Make some pairwise scatterplots of the predictors (columns) in this data set. Describe your findings. library(ggplot2) library(tidyverse) ggplot(Boston, aes(nox, rm)) + geom_point() ggplot(Boston, aes(ptratio, rm)) + geom_point() heatmap(cor(Boston, method = "spearman"), cexRow = 1.1, cexCol = 1.1) Are any of the predictors associated with per capita crime rate? If so, explain the relationship. Yes Do any of the census tracts of Boston appear to have particularly high crime rates? Tax rates? Pupil-teacher ratios? Comment on the range of each predictor. Boston |> pivot_longer(cols = 1:13) |> filter(name %in% c("crim", "tax", "ptratio")) |> ggplot(aes(value)) + geom_histogram(bins = 20) + facet_wrap(~name, scales="free", ncol= 1) Yes, particularly crime and tax rates. How many of the census tracts in this data set bound the Charles river? sum(Boston$chas) ## [1] 35 What is the median pupil-teacher ratio among the towns in this data set? median(Boston$ptratio) ## [1] 19.05 Which census tract of Boston has lowest median value of owner-occupied homes? What are the values of the other predictors for that census tract, and how do those values compare to the overall ranges for those predictors? Comment on your findings. Boston[Boston$medv == min(Boston$medv), ] |> kable() crim zn indus chas nox rm age dis rad tax ptratio lstat medv 399 38.3518 0 18.1 0 0.693 5.453 100 1.4896 24 666 20.2 30.59 5 406 67.9208 0 18.1 0 0.693 5.683 100 1.4254 24 666 20.2 22.98 5 sapply(Boston, quantile) |> kable() crim zn indus chas nox rm age dis rad tax ptratio lstat medv 0% 0.006320 0.0 0.46 0 0.385 3.5610 2.900 1.129600 1 187 12.60 1.730 5.000 25% 0.082045 0.0 5.19 0 0.449 5.8855 45.025 2.100175 4 279 17.40 6.950 17.025 50% 0.256510 0.0 9.69 0 0.538 6.2085 77.500 3.207450 5 330 19.05 11.360 21.200 75% 3.677083 12.5 18.10 0 0.624 6.6235 94.075 5.188425 24 666 20.20 16.955 25.000 100% 88.976200 100.0 27.74 1 0.871 8.7800 100.000 12.126500 24 711 22.00 37.970 50.000 In this data set, how many of the census tract average more than seven rooms per dwelling? More than eight rooms per dwelling? Comment on the census tracts that average more than eight rooms per dwelling. sum(Boston$rm > 7) ## [1] 64 sum(Boston$rm > 8) ## [1] 13 Let’s compare median statistics for those census tracts with more than eight rooms per dwelling on average, with the statistics for those with fewer. Boston |> mutate( `log(crim)` = log(crim), `log(zn)` = log(zn) ) |> select(-c(crim, zn)) |> pivot_longer(!rm) |> mutate(">8 rooms" = rm > 8) |> ggplot(aes(`>8 rooms`, value)) + geom_boxplot() + facet_wrap(~name, scales = "free") ## Warning: Removed 372 rows containing non-finite outside the scale range ## (`stat_boxplot()`). Census tracts with big average properties (more than eight rooms per dwelling) have higher median value (medv), a lower proportion of non-retail business acres (indus), a lower pupil-teacher ratio (ptratio), a lower status of the population (lstat) among other differences. "],["linear-regression.html", "3 Linear Regression 3.1 Conceptual 3.2 Applied", " 3 Linear Regression 3.1 Conceptual 3.1.1 Question 1 Describe the null hypotheses to which the p-values given in Table 3.4 correspond. Explain what conclusions you can draw based on these p-values. Your explanation should be phrased in terms of sales, TV, radio, and newspaper, rather than in terms of the coefficients of the linear model. For intercept, that \\(\\beta_0 = 0\\) For the others, that \\(\\beta_n = 0\\) (for \\(n = 1, 2, 3\\)) We can conclude that that without any spending, there are still some sales (the intercept is not 0). Furthermore, we can conclude that money spent on TV and radio are significantly associated with increased sales, but the same cannot be said of newspaper spending. 3.1.2 Question 2 Carefully explain the differences between the KNN classifier and KNN regression methods. The KNN classifier is categorical and assigns a value based on the most frequent observed category among \\(K\\) nearest neighbors, whereas KNN regression assigns a continuous variable, the average of the response variables for the \\(K\\) nearest neighbors. 3.1.3 Question 3 Suppose we have a data set with five predictors, \\(X_1\\) = GPA, \\(X_2\\) = IQ, \\(X_3\\) = Level (1 for College and 0 for High School), \\(X_4\\) = Interaction between GPA and IQ, and \\(X_5\\) = Interaction between GPA and Level. The response is starting salary after graduation (in thousands of dollars). Suppose we use least squares to fit the model, and get \\(\\hat\\beta_0 = 50\\), \\(\\hat\\beta_1 = 20\\), \\(\\hat\\beta_2 = 0.07\\), \\(\\hat\\beta_3 = 35\\), \\(\\hat\\beta_4 = 0.01\\), \\(\\hat\\beta_5 = -10\\). Which answer is correct, and why? For a fixed value of IQ and GPA, high school graduates earn more on average than college graduates. For a fixed value of IQ and GPA, college graduates earn more on average than high school graduates. For a fixed value of IQ and GPA, high school graduates earn more on average than college graduates provided that the GPA is high enough. For a fixed value of IQ and GPA, college graduates earn more on average than high school graduates provided that the GPA is high enough. The model is: \\(y = \\beta_0 + \\beta_1 \\cdot \\text{GPA} + \\beta_2 \\cdot \\text{IQ} + \\beta_3 \\cdot \\text{Level} + \\beta_4 \\cdot \\text{GPA} \\cdot \\text{IQ} + \\beta_5 \\cdot \\text{GPA} \\cdot \\text{Level}\\) Fixing IQ and GPA, changing Level from 0 to 1 will change the outcome by: \\(\\Delta y = \\beta_3 + \\beta_5 \\cdot \\text{GPA}\\) \\(\\Delta y > 0 \\Rightarrow \\beta_3 + \\beta_5 \\cdot \\text{GPA} > 0 \\Rightarrow \\text{GPA} < \\dfrac{-\\beta_3}{\\beta_5} = \\dfrac{-35}{-10} = 3.5\\) From a graphical standpoint: library(plotly) model <- function(gpa, iq, level) { 50 + gpa * 20 + iq * 0.07 + level * 35 + gpa * iq * 0.01 + gpa * level * -10 } x <- seq(1, 5, length = 10) y <- seq(1, 200, length = 20) college <- t(outer(x, y, model, level = 1)) high_school <- t(outer(x, y, model, level = 0)) plot_ly(x = x, y = y) |> add_surface( z = ~college, colorscale = list(c(0, 1), c("rgb(107,184,214)", "rgb(0,90,124)")), colorbar = list(title = "College")) |> add_surface( z = ~high_school, colorscale = list(c(0, 1), c("rgb(255,112,184)", "rgb(128,0,64)")), colorbar = list(title = "High school")) |> layout(scene = list( xaxis = list(title = "GPA"), yaxis = list(title = "IQ"), zaxis = list(title = "Salary"))) Option iii correct. Predict the salary of a college graduate with IQ of 110 and a GPA of 4.0. model(gpa = 4, iq = 110, level = 1) ## [1] 137.1 True or false: Since the coefficient for the GPA/IQ interaction term is very small, there is very little evidence of an interaction effect. Justify your answer. This is false. It is important to remember that GPA and IQ vary over different scales. It is better to explicitly test the significance of the interaction effect, and/or visualize or quantify the effect on sales under realistic ranges of GPA/IQ values. 3.1.4 Question 4 I collect a set of data (\\(n = 100\\) observations) containing a single predictor and a quantitative response. I then fit a linear regression model to the data, as well as a separate cubic regression, i.e. \\(Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon\\). Suppose that the true relationship between \\(X\\) and \\(Y\\) is linear, i.e. \\(Y = \\beta_0 + \\beta_1X + \\epsilon\\). Consider the training residual sum of squares (RSS) for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. You would expect the cubic regression to have lower RSS since it is at least as flexible as the linear regression. Answer (a) using test rather than training RSS. Though we could not be certain, the test RSS would likely be higher due to overfitting. Suppose that the true relationship between \\(X\\) and \\(Y\\) is not linear, but we don’t know how far it is from linear. Consider the training RSS for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. You would expect the cubic regression to have lower RSS since it is at least as flexible as the linear regression. Answer (c) using test rather than training RSS. There is not enough information to tell, it depends on how non-linear the true relationship is. 3.1.5 Question 5 Consider the fitted values that result from performing linear regression without an intercept. In this setting, the ith fitted value takes the form \\[\\hat{y}_i = x_i\\hat\\beta,\\] where \\[\\hat{\\beta} = \\left(\\sum_{i=1}^nx_iy_i\\right) / \\left(\\sum_{i' = 1}^n x^2_{i'}\\right).\\] show that we can write \\[\\hat{y}_i = \\sum_{i' = 1}^na_{i'}y_{i'}\\] What is \\(a_{i'}\\)? Note: We interpret this result by saying that the fitted values from linear regression are linear combinations of the response values. \\[\\begin{align} \\hat{y}_i & = x_i \\frac{\\sum_{i=1}^nx_iy_i}{\\sum_{i' = 1}^n x^2_{i'}} \\\\ & = x_i \\frac{\\sum_{i'=1}^nx_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\frac{\\sum_{i'=1}^n x_i x_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\sum_{i'=1}^n \\frac{ x_i x_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\sum_{i'=1}^n \\frac{ x_i x_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} y_{i'} \\end{align}\\] therefore, \\[a_{i'} = \\frac{ x_i x_{i'}}{\\sum x^2}\\] 3.1.6 Question 6 Using (3.4), argue that in the case of simple linear regression, the least squares line always passes through the point \\((\\bar{x}, \\bar{y})\\). when \\(x = \\bar{x}\\) what is \\(y\\)? \\[\\begin{align} y &= \\hat\\beta_0 + \\hat\\beta_1\\bar{x} \\\\ &= \\bar{y} - \\hat\\beta_1\\bar{x} + \\hat\\beta_1\\bar{x} \\\\ &= \\bar{y} \\end{align}\\] 3.1.7 Question 7 It is claimed in the text that in the case of simple linear regression of \\(Y\\) onto \\(X\\), the \\(R^2\\) statistic (3.17) is equal to the square of the correlation between \\(X\\) and \\(Y\\) (3.18). Prove that this is the case. For simplicity, you may assume that \\(\\bar{x} = \\bar{y} = 0\\). We have the following equations: \\[ R^2 = \\frac{\\textit{TSS} - \\textit{RSS}}{\\textit{TSS}} \\] \\[ Cor(x,y) = \\frac{\\sum_i (x_i-\\bar{x})(y_i - \\bar{y})}{\\sqrt{\\sum_i(x_i - \\bar{x})^2}\\sqrt{\\sum_i(y_i - \\bar{y})^2}} \\] As above, its important to remember \\(\\sum_i x_i = \\sum_j x_j\\) when \\(\\bar{x} = \\bar{y} = 0\\) \\[ Cor(x,y)^2 = \\frac{(\\sum_ix_iy_i)^2}{\\sum_ix_i^2 \\sum_iy_i^2} \\] Also note that: \\[\\hat{y}_i = \\hat\\beta_o + \\hat\\beta_1x_i = x_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2}\\] Therefore, given that \\(RSS = \\sum_i(y_i - \\hat{y}_i)^2\\) and \\(\\textit{TSS} = \\sum_i(y_i - \\bar{y})^2 = \\sum_iy_i^2\\) \\[\\begin{align} R^2 &= \\frac{\\sum_iy_i^2 - \\sum_i(y_i - x_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2} {\\sum_iy_i^2} \\\\ &= \\frac{\\sum_iy_i^2 - \\sum_i( y_i^2 - 2y_ix_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2} + x_i^2 (\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2 )}{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\sum_i(y_ix_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2}) - \\sum_i(x_i^2 (\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2) }{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\sum_i(y_ix_i) \\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2} - \\sum_i(x_i^2) \\frac{(\\sum_j{x_jy_j})^2}{(\\sum_jx_j^2)^2} }{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\frac{(\\sum_i{x_iy_i})^2}{\\sum_jx_j^2} - \\frac{(\\sum_i{x_iy_i})^2}{\\sum_jx_j^2} }{\\sum_iy_i^2} \\\\ &= \\frac{(\\sum_i{x_iy_i})^2}{\\sum_ix_i^2 \\sum_iy_i^2} \\end{align}\\] 3.2 Applied 3.2.1 Question 8 This question involves the use of simple linear regression on the Auto data set. Use the lm() function to perform a simple linear regression with mpg as the response and horsepower as the predictor. Use the summary() function to print the results. Comment on the output. For example: Is there a relationship between the predictor and the response? How strong is the relationship between the predictor and the response? Is the relationship between the predictor and the response positive or negative? What is the predicted mpg associated with a horsepower of 98? What are the associated 95% confidence and prediction intervals? library(ISLR2) fit <- lm(mpg ~ horsepower, data = Auto) summary(fit) ## ## Call: ## lm(formula = mpg ~ horsepower, data = Auto) ## ## Residuals: ## Min 1Q Median 3Q Max ## -13.5710 -3.2592 -0.3435 2.7630 16.9240 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 39.935861 0.717499 55.66 <2e-16 *** ## horsepower -0.157845 0.006446 -24.49 <2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 4.906 on 390 degrees of freedom ## Multiple R-squared: 0.6059, Adjusted R-squared: 0.6049 ## F-statistic: 599.7 on 1 and 390 DF, p-value: < 2.2e-16 Yes, there is a significant relationship between predictor and response. For every unit increase in horsepower, mpg reduces by 0.16 (a negative relationship). predict(fit, data.frame(horsepower = 98), interval = "confidence") ## fit lwr upr ## 1 24.46708 23.97308 24.96108 predict(fit, data.frame(horsepower = 98), interval = "prediction") ## fit lwr upr ## 1 24.46708 14.8094 34.12476 Plot the response and the predictor. Use the abline() function to display the least squares regression line. plot(Auto$horsepower, Auto$mpg, xlab = "horsepower", ylab = "mpg") abline(fit) Use the plot() function to produce diagnostic plots of the least squares regression fit. Comment on any problems you see with the fit. par(mfrow = c(2, 2)) plot(fit, cex = 0.2) The residuals show a trend with respect to the fitted values suggesting a non-linear relationship. 3.2.2 Question 9 This question involves the use of multiple linear regression on the Auto data set. Produce a scatterplot matrix which includes all of the variables in the data set. pairs(Auto, cex = 0.2) Compute the matrix of correlations between the variables using the function cor(). You will need to exclude the name variable, name which is qualitative. x <- subset(Auto, select = -name) cor(x) ## mpg cylinders displacement horsepower weight ## mpg 1.0000000 -0.7776175 -0.8051269 -0.7784268 -0.8322442 ## cylinders -0.7776175 1.0000000 0.9508233 0.8429834 0.8975273 ## displacement -0.8051269 0.9508233 1.0000000 0.8972570 0.9329944 ## horsepower -0.7784268 0.8429834 0.8972570 1.0000000 0.8645377 ## weight -0.8322442 0.8975273 0.9329944 0.8645377 1.0000000 ## acceleration 0.4233285 -0.5046834 -0.5438005 -0.6891955 -0.4168392 ## year 0.5805410 -0.3456474 -0.3698552 -0.4163615 -0.3091199 ## origin 0.5652088 -0.5689316 -0.6145351 -0.4551715 -0.5850054 ## acceleration year origin ## mpg 0.4233285 0.5805410 0.5652088 ## cylinders -0.5046834 -0.3456474 -0.5689316 ## displacement -0.5438005 -0.3698552 -0.6145351 ## horsepower -0.6891955 -0.4163615 -0.4551715 ## weight -0.4168392 -0.3091199 -0.5850054 ## acceleration 1.0000000 0.2903161 0.2127458 ## year 0.2903161 1.0000000 0.1815277 ## origin 0.2127458 0.1815277 1.0000000 Use the lm() function to perform a multiple linear regression with mpg as the response and all other variables except name as the predictors. Use the summary() function to print the results. Comment on the output. For instance: Is there a relationship between the predictors and the response? Which predictors appear to have a statistically significant relationship to the response? What does the coefficient for the year variable suggest? fit <- lm(mpg ~ ., data = x) summary(fit) ## ## Call: ## lm(formula = mpg ~ ., data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.5903 -2.1565 -0.1169 1.8690 13.0604 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -17.218435 4.644294 -3.707 0.00024 *** ## cylinders -0.493376 0.323282 -1.526 0.12780 ## displacement 0.019896 0.007515 2.647 0.00844 ** ## horsepower -0.016951 0.013787 -1.230 0.21963 ## weight -0.006474 0.000652 -9.929 < 2e-16 *** ## acceleration 0.080576 0.098845 0.815 0.41548 ## year 0.750773 0.050973 14.729 < 2e-16 *** ## origin 1.426141 0.278136 5.127 4.67e-07 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.328 on 384 degrees of freedom ## Multiple R-squared: 0.8215, Adjusted R-squared: 0.8182 ## F-statistic: 252.4 on 7 and 384 DF, p-value: < 2.2e-16 Yes, there is a relationship between some predictors and response, notably “displacement” (positive), “weight” (negative), “year” (positive) and “origin” (positive). The coefficient for year (which is positive \\(~0.75\\)) suggests that mpg increases by about this amount every year on average. Use the plot() function to produce diagnostic plots of the linear regression fit. Comment on any problems you see with the fit. Do the residual plots suggest any unusually large outliers? Does the leverage plot identify any observations with unusually high leverage? par(mfrow = c(2, 2)) plot(fit, cex = 0.2) One point has high leverage, the residuals also show a trend with fitted values. Use the * and : symbols to fit linear regression models with interaction effects. Do any interactions appear to be statistically significant? summary(lm(mpg ~ . + weight:horsepower, data = x)) ## ## Call: ## lm(formula = mpg ~ . + weight:horsepower, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -8.589 -1.617 -0.184 1.541 12.001 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.876e+00 4.511e+00 0.638 0.524147 ## cylinders -2.955e-02 2.881e-01 -0.103 0.918363 ## displacement 5.950e-03 6.750e-03 0.881 0.378610 ## horsepower -2.313e-01 2.363e-02 -9.791 < 2e-16 *** ## weight -1.121e-02 7.285e-04 -15.393 < 2e-16 *** ## acceleration -9.019e-02 8.855e-02 -1.019 0.309081 ## year 7.695e-01 4.494e-02 17.124 < 2e-16 *** ## origin 8.344e-01 2.513e-01 3.320 0.000986 *** ## horsepower:weight 5.529e-05 5.227e-06 10.577 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.931 on 383 degrees of freedom ## Multiple R-squared: 0.8618, Adjusted R-squared: 0.859 ## F-statistic: 298.6 on 8 and 383 DF, p-value: < 2.2e-16 summary(lm(mpg ~ . + acceleration:horsepower, data = x)) ## ## Call: ## lm(formula = mpg ~ . + acceleration:horsepower, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.0329 -1.8177 -0.1183 1.7247 12.4870 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -32.499820 4.923380 -6.601 1.36e-10 *** ## cylinders 0.083489 0.316913 0.263 0.792350 ## displacement -0.007649 0.008161 -0.937 0.349244 ## horsepower 0.127188 0.024746 5.140 4.40e-07 *** ## weight -0.003976 0.000716 -5.552 5.27e-08 *** ## acceleration 0.983282 0.161513 6.088 2.78e-09 *** ## year 0.755919 0.048179 15.690 < 2e-16 *** ## origin 1.035733 0.268962 3.851 0.000138 *** ## horsepower:acceleration -0.012139 0.001772 -6.851 2.93e-11 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.145 on 383 degrees of freedom ## Multiple R-squared: 0.841, Adjusted R-squared: 0.8376 ## F-statistic: 253.2 on 8 and 383 DF, p-value: < 2.2e-16 summary(lm(mpg ~ . + cylinders:weight, data = x)) ## ## Call: ## lm(formula = mpg ~ . + cylinders:weight, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -10.9484 -1.7133 -0.1809 1.4530 12.4137 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 7.3143478 5.0076737 1.461 0.14494 ## cylinders -5.0347425 0.5795767 -8.687 < 2e-16 *** ## displacement 0.0156444 0.0068409 2.287 0.02275 * ## horsepower -0.0314213 0.0126216 -2.489 0.01322 * ## weight -0.0150329 0.0011125 -13.513 < 2e-16 *** ## acceleration 0.1006438 0.0897944 1.121 0.26306 ## year 0.7813453 0.0464139 16.834 < 2e-16 *** ## origin 0.8030154 0.2617333 3.068 0.00231 ** ## cylinders:weight 0.0015058 0.0001657 9.088 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.022 on 383 degrees of freedom ## Multiple R-squared: 0.8531, Adjusted R-squared: 0.8501 ## F-statistic: 278.1 on 8 and 383 DF, p-value: < 2.2e-16 There are at least three cases where the interactions appear to be highly significant. Try a few different transformations of the variables, such as \\(log(X)\\), \\(\\sqrt{X}\\), \\(X^2\\). Comment on your findings. Here I’ll just consider transformations for horsepower. par(mfrow = c(2, 2)) plot(Auto$horsepower, Auto$mpg, cex = 0.2) plot(log(Auto$horsepower), Auto$mpg, cex = 0.2) plot(sqrt(Auto$horsepower), Auto$mpg, cex = 0.2) plot(Auto$horsepower ^ 2, Auto$mpg, cex = 0.2) x <- subset(Auto, select = -name) x$horsepower <- log(x$horsepower) fit <- lm(mpg ~ ., data = x) summary(fit) ## ## Call: ## lm(formula = mpg ~ ., data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.3115 -2.0041 -0.1726 1.8393 12.6579 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 27.254005 8.589614 3.173 0.00163 ** ## cylinders -0.486206 0.306692 -1.585 0.11372 ## displacement 0.019456 0.006876 2.830 0.00491 ** ## horsepower -9.506436 1.539619 -6.175 1.69e-09 *** ## weight -0.004266 0.000694 -6.148 1.97e-09 *** ## acceleration -0.292088 0.103804 -2.814 0.00515 ** ## year 0.705329 0.048456 14.556 < 2e-16 *** ## origin 1.482435 0.259347 5.716 2.19e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.18 on 384 degrees of freedom ## Multiple R-squared: 0.837, Adjusted R-squared: 0.834 ## F-statistic: 281.6 on 7 and 384 DF, p-value: < 2.2e-16 par(mfrow = c(2, 2)) plot(fit, cex = 0.2) A log transformation of horsepower appears to give a more linear relationship with mpg. 3.2.3 Question 10 This question should be answered using the Carseats data set. Fit a multiple regression model to predict Sales using Price, Urban, and US. fit <- lm(Sales ~ Price + Urban + US, data = Carseats) Provide an interpretation of each coefficient in the model. Be careful—some of the variables in the model are qualitative! summary(fit) ## ## Call: ## lm(formula = Sales ~ Price + Urban + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9206 -1.6220 -0.0564 1.5786 7.0581 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.043469 0.651012 20.036 < 2e-16 *** ## Price -0.054459 0.005242 -10.389 < 2e-16 *** ## UrbanYes -0.021916 0.271650 -0.081 0.936 ## USYes 1.200573 0.259042 4.635 4.86e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.472 on 396 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2335 ## F-statistic: 41.52 on 3 and 396 DF, p-value: < 2.2e-16 Write out the model in equation form, being careful to handle the qualitative variables properly. \\[ \\textit{Sales} = 13 + -0.054 \\times \\textit{Price} + \\begin{cases} -0.022, & \\text{if $\\textit{Urban}$ is Yes, $\\textit{US}$ is No} \\\\ 1.20, & \\text{if $\\textit{Urban}$ is No, $\\textit{US}$ is Yes} \\\\ 1.18, & \\text{if $\\textit{Urban}$ and $\\textit{US}$ is Yes} \\\\ 0, & \\text{Otherwise} \\end{cases} \\] For which of the predictors can you reject the null hypothesis \\(H_0 : \\beta_j = 0\\)? Price and US (Urban shows no significant difference between “No” and “Yes”) On the basis of your response to the previous question, fit a smaller model that only uses the predictors for which there is evidence of association with the outcome. fit2 <- lm(Sales ~ Price + US, data = Carseats) How well do the models in (a) and (e) fit the data? summary(fit) ## ## Call: ## lm(formula = Sales ~ Price + Urban + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9206 -1.6220 -0.0564 1.5786 7.0581 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.043469 0.651012 20.036 < 2e-16 *** ## Price -0.054459 0.005242 -10.389 < 2e-16 *** ## UrbanYes -0.021916 0.271650 -0.081 0.936 ## USYes 1.200573 0.259042 4.635 4.86e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.472 on 396 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2335 ## F-statistic: 41.52 on 3 and 396 DF, p-value: < 2.2e-16 summary(fit2) ## ## Call: ## lm(formula = Sales ~ Price + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9269 -1.6286 -0.0574 1.5766 7.0515 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.03079 0.63098 20.652 < 2e-16 *** ## Price -0.05448 0.00523 -10.416 < 2e-16 *** ## USYes 1.19964 0.25846 4.641 4.71e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.469 on 397 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2354 ## F-statistic: 62.43 on 2 and 397 DF, p-value: < 2.2e-16 anova(fit, fit2) ## Analysis of Variance Table ## ## Model 1: Sales ~ Price + Urban + US ## Model 2: Sales ~ Price + US ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 396 2420.8 ## 2 397 2420.9 -1 -0.03979 0.0065 0.9357 They have similar \\(R^2\\) and the model containing the extra variable “Urban” is non-significantly better. Using the model from (e), obtain 95% confidence intervals for the coefficient(s). confint(fit2) ## 2.5 % 97.5 % ## (Intercept) 11.79032020 14.27126531 ## Price -0.06475984 -0.04419543 ## USYes 0.69151957 1.70776632 Is there evidence of outliers or high leverage observations in the model from (e)? par(mfrow = c(2, 2)) plot(fit2, cex = 0.2) Yes, somewhat. 3.2.4 Question 11 In this problem we will investigate the t-statistic for the null hypothesis \\(H_0 : \\beta = 0\\) in simple linear regression without an intercept. To begin, we generate a predictor x and a response y as follows. set.seed(1) x <- rnorm(100) y <- 2 * x + rnorm(100) set.seed(1) x <- rnorm(100) y <- 2 * x + rnorm(100) Perform a simple linear regression of y onto x, without an intercept. Report the coefficient estimate \\(\\hat{\\beta}\\), the standard error of this coefficient estimate, and the t-statistic and p-value associated with the null hypothesis \\(H_0 : \\beta = 0\\). Comment on these results. (You can perform regression without an intercept using the command lm(y~x+0).) fit <- lm(y ~ x + 0) coef(summary(fit)) ## Estimate Std. Error t value Pr(>|t|) ## x 1.993876 0.1064767 18.72593 2.642197e-34 There’s a significant positive relationship between \\(y\\) and \\(x\\). \\(y\\) values are predicted to be (a little below) twice the \\(x\\) values. Now perform a simple linear regression of x onto y without an intercept, and report the coefficient estimate, its standard error, and the corresponding t-statistic and p-values associated with the null hypothesis \\(H_0 : \\beta = 0\\). Comment on these results. fit <- lm(x ~ y + 0) coef(summary(fit)) ## Estimate Std. Error t value Pr(>|t|) ## y 0.3911145 0.02088625 18.72593 2.642197e-34 There’s a significant positive relationship between \\(x\\) and \\(y\\). \\(x\\) values are predicted to be (a little below) half the \\(y\\) values. What is the relationship between the results obtained in (a) and (b)? Without error, the coefficients would be the inverse of each other (2 and 1/2). The t-statistic and p-values are the same. For the regression of \\(Y\\) onto \\(X\\) without an intercept, the t-statistic for \\(H_0 : \\beta = 0\\) takes the form \\(\\hat{\\beta}/SE(\\hat{\\beta})\\), where \\(\\hat{\\beta}\\) is given by (3.38), and where \\[ SE(\\hat\\beta) = \\sqrt{\\frac{\\sum_{i=1}^n(y_i - x_i\\hat\\beta)^2}{(n-1)\\sum_{i'=1}^nx_{i'}^2}}. \\] (These formulas are slightly different from those given in Sections 3.1.1 and 3.1.2, since here we are performing regression without an intercept.) Show algebraically, and confirm numerically in R, that the t-statistic can be written as \\[ \\frac{(\\sqrt{n-1}) \\sum_{i-1}^nx_iy_i)} {\\sqrt{(\\sum_{i=1}^nx_i^2)(\\sum_{i'=1}^ny_{i'}^2)-(\\sum_{i'=1}^nx_{i'}y_{i'})^2}} \\] \\[ \\beta = \\sum_i x_i y_i / \\sum_{i'} x_{i'}^2 ,\\] therefore \\[\\begin{align} t &= \\frac{\\sum_i x_i y_i \\sqrt{n-1} \\sqrt{\\sum_ix_i^2}} {\\sum_i x_i^2 \\sqrt{\\sum_i(y_i - x_i \\beta)^2}} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{\\sum_ix_i^2 \\sum_i(y_i - x_i \\beta)^2}} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_i(y_i^2 - 2 y_i x_i \\beta + x_i^2 \\beta^2) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_iy_i^2 - \\beta \\sum_ix_i^2 (2 \\sum_i y_i x_i -\\beta \\sum_i x_i^2) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_iy_i^2 - \\sum_i x_i y_i (2 \\sum_i y_i x_i - \\sum_i x_i y_i) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{\\sum_ix_i^2 \\sum_iy_i^2 - (\\sum_i x_i y_i)^2}} \\\\ \\end{align}\\] We can show this numerically in R by computing \\(t\\) using the above equation. n <- length(x) sqrt(n - 1) * sum(x * y) / sqrt(sum(x ^ 2) * sum(y ^ 2) - sum(x * y) ^ 2) ## [1] 18.72593 Using the results from (d), argue that the t-statistic for the regression of y onto x is the same as the t-statistic for the regression of x onto y. Swapping \\(x_i\\) for \\(y_i\\) in the formula for \\(t\\) will give the same result. In R, show that when regression is performed with an intercept, the t-statistic for \\(H_0 : \\beta_1 = 0\\) is the same for the regression of y onto x as it is for the regression of x onto y. coef(summary(lm(y ~ x))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -0.03769261 0.09698729 -0.3886346 6.983896e-01 ## x 1.99893961 0.10772703 18.5555993 7.723851e-34 coef(summary(lm(x ~ y))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.03880394 0.04266144 0.9095787 3.652764e-01 ## y 0.38942451 0.02098690 18.5555993 7.723851e-34 3.2.5 Question 12 This problem involves simple linear regression without an intercept. Recall that the coefficient estimate \\(\\hat{\\beta}\\) for the linear regression of \\(Y\\) onto \\(X\\) without an intercept is given by (3.38). Under what circumstance is the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) the same as the coefficient estimate for the regression of \\(Y\\) onto \\(X\\)? \\[ \\hat\\beta = \\sum_i x_iy_i / \\sum_{i'} x_{i'}^2 \\] The coefficient for the regression of X onto Y swaps the \\(x\\) and \\(y\\) variables: \\[ \\hat\\beta = \\sum_i x_iy_i / \\sum_{i'} y_{i'}^2 \\] So they are the same when \\(\\sum_{i} x_{i}^2 = \\sum_{i} y_{i}^2\\) Generate an example in R with \\(n = 100\\) observations in which the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) is different from the coefficient estimate for the regression of \\(Y\\) onto \\(X\\). x <- rnorm(100) y <- 2 * x + rnorm(100, 0, 0.1) c(sum(x^2), sum(y^2)) ## [1] 105.9889 429.4924 c(coef(lm(y ~ x))[2], coef(lm(x ~ y))[2]) ## x y ## 2.0106218 0.4962439 Generate an example in R with \\(n = 100\\) observations in which the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) is the same as the coefficient estimate for the regression of \\(Y\\) onto \\(X\\). x <- rnorm(100) y <- x + rnorm(100, 0, 0.1) c(sum(x^2), sum(y^2)) ## [1] 135.5844 134.5153 c(coef(lm(y ~ x))[2], coef(lm(x ~ y))[2]) ## x y ## 0.9925051 1.0006765 3.2.6 Question 13 In this exercise you will create some simulated data and will fit simple linear regression models to it. Make sure to use set.seed(1) prior to starting part (a) to ensure consistent results. set.seed(1) Using the rnorm() function, create a vector, x, containing 100 observations drawn from a \\(N(0, 1)\\) distribution. This represents a feature, \\(X\\). x <- rnorm(100, 0, 1) Using the rnorm() function, create a vector, eps, containing 100 observations drawn from a \\(N(0, 0.25)\\) distribution—a normal distribution with mean zero and variance 0.25. eps <- rnorm(100, 0, sqrt(0.25)) Using x and eps, generate a vector y according to the model \\[Y = -1 + 0.5X + \\epsilon\\] What is the length of the vector y? What are the values of \\(\\beta_0\\) and \\(\\beta_1\\) in this linear model? y <- -1 + 0.5 * x + eps length(y) ## [1] 100 \\(\\beta_0 = -1\\) and \\(\\beta_1 = 0.5\\) Create a scatterplot displaying the relationship between x and y. Comment on what you observe. plot(x, y) There is a linear relationship between \\(x\\) and \\(y\\) (with some error). Fit a least squares linear model to predict y using x. Comment on the model obtained. How do \\(\\hat\\beta_0\\) and \\(\\hat\\beta_1\\) compare to \\(\\beta_0\\) and \\(\\beta_1\\)? fit <- lm(y ~ x) summary(fit) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -0.93842 -0.30688 -0.06975 0.26970 1.17309 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.01885 0.04849 -21.010 < 2e-16 *** ## x 0.49947 0.05386 9.273 4.58e-15 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.4814 on 98 degrees of freedom ## Multiple R-squared: 0.4674, Adjusted R-squared: 0.4619 ## F-statistic: 85.99 on 1 and 98 DF, p-value: 4.583e-15 \\(\\beta_0\\) and \\(\\beta_1\\) are close to their population values. Display the least squares line on the scatterplot obtained in (d). Draw the population regression line on the plot, in a different color. Use the legend() command to create an appropriate legend. plot(x, y) abline(fit) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) Now fit a polynomial regression model that predicts y using x and x^2. Is there evidence that the quadratic term improves the model fit? Explain your answer. fit2 <- lm(y ~ poly(x, 2)) anova(fit2, fit) ## Analysis of Variance Table ## ## Model 1: y ~ poly(x, 2) ## Model 2: y ~ x ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 97 22.257 ## 2 98 22.709 -1 -0.45163 1.9682 0.1638 There is no evidence for an improved fit, since the F-test is non-significant. Repeat (a)–(f) after modifying the data generation process in such a way that there is less noise in the data. The model (3.39) should remain the same. You can do this by decreasing the variance of the normal distribution used to generate the error term \\(\\epsilon\\) in (b). Describe your results. x <- rnorm(100, 0, 1) y <- -1 + 0.5 * x + rnorm(100, 0, sqrt(0.05)) fit2 <- lm(y ~ x) summary(fit2) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -0.61308 -0.12553 -0.00391 0.15199 0.41332 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -0.98917 0.02216 -44.64 <2e-16 *** ## x 0.52375 0.02152 24.33 <2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.2215 on 98 degrees of freedom ## Multiple R-squared: 0.858, Adjusted R-squared: 0.8565 ## F-statistic: 592.1 on 1 and 98 DF, p-value: < 2.2e-16 plot(x, y) abline(fit2) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) The data shows less variability and the \\(R^2\\) is higher. Repeat (a)–(f) after modifying the data generation process in such a way that there is more noise in the data. The model (3.39) should remain the same. You can do this by increasing the variance of the normal distribution used to generate the error term \\(\\epsilon\\) in (b). Describe your results. x <- rnorm(100, 0, 1) y <- -1 + 0.5 * x + rnorm(100, 0, 1) fit3 <- lm(y ~ x) summary(fit3) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.51014 -0.60549 0.02065 0.70483 2.08980 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.04745 0.09676 -10.825 < 2e-16 *** ## x 0.42505 0.08310 5.115 1.56e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.9671 on 98 degrees of freedom ## Multiple R-squared: 0.2107, Adjusted R-squared: 0.2027 ## F-statistic: 26.16 on 1 and 98 DF, p-value: 1.56e-06 plot(x, y) abline(fit3) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) The data shows more variability. The \\(R^2\\) is lower. What are the confidence intervals for \\(\\beta_0\\) and \\(\\beta_1\\) based on the original data set, the noisier data set, and the less noisy data set? Comment on your results. confint(fit) ## 2.5 % 97.5 % ## (Intercept) -1.1150804 -0.9226122 ## x 0.3925794 0.6063602 confint(fit2) ## 2.5 % 97.5 % ## (Intercept) -1.033141 -0.9451916 ## x 0.481037 0.5664653 confint(fit3) ## 2.5 % 97.5 % ## (Intercept) -1.2394772 -0.8554276 ## x 0.2601391 0.5899632 The confidence intervals for the coefficients are smaller when there is less error. 3.2.7 Question 14 This problem focuses on the collinearity problem. Perform the following commands in R : > set.seed(1) > x1 <- runif(100) > x2 <- 0.5 * x1 + rnorm(100) / 10 > y <- 2 + 2 * x1 + 0.3 * x2 + rnorm(100) The last line corresponds to creating a linear model in which y is a function of x1 and x2. Write out the form of the linear model. What are the regression coefficients? set.seed(1) x1 <- runif(100) x2 <- 0.5 * x1 + rnorm(100) / 10 y <- 2 + 2 * x1 + 0.3 * x2 + rnorm(100) The model is of the form: \\[Y = \\beta_0 + \\beta_1X_1 + \\beta_2X_2 + \\epsilon\\] The coefficients are \\(\\beta_0 = 2\\), \\(\\beta_1 = 2\\), \\(\\beta_3 = 0.3\\). What is the correlation between x1 and x2? Create a scatterplot displaying the relationship between the variables. cor(x1, x2) ## [1] 0.8351212 plot(x1, x2) Using this data, fit a least squares regression to predict y using x1 and x2. Describe the results obtained. What are \\(\\hat\\beta_0\\), \\(\\hat\\beta_1\\), and \\(\\hat\\beta_2\\)? How do these relate to the true \\(\\beta_0\\), \\(\\beta_1\\), and _2$? Can you reject the null hypothesis \\(H_0 : \\beta_1\\) = 0$? How about the null hypothesis \\(H_0 : \\beta_2 = 0\\)? summary(lm(y ~ x1 + x2)) ## ## Call: ## lm(formula = y ~ x1 + x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.8311 -0.7273 -0.0537 0.6338 2.3359 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.1305 0.2319 9.188 7.61e-15 *** ## x1 1.4396 0.7212 1.996 0.0487 * ## x2 1.0097 1.1337 0.891 0.3754 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.056 on 97 degrees of freedom ## Multiple R-squared: 0.2088, Adjusted R-squared: 0.1925 ## F-statistic: 12.8 on 2 and 97 DF, p-value: 1.164e-05 \\(\\hat\\beta_0 = 2.13\\), \\(\\hat\\beta_1 = 1.43\\), and \\(\\hat\\beta_2 = 1.01\\). These are relatively poor estimates of the true values. We can reject the hypothesis that \\(H_0 : \\beta_1\\) at a p-value of 0.05 (just about). We cannot reject the hypothesis that \\(H_0 : \\beta_2 = 0\\). Now fit a least squares regression to predict y using only x1. Comment on your results. Can you reject the null hypothesis \\(H 0 : \\beta_1 = 0\\)? summary(lm(y ~ x1)) ## ## Call: ## lm(formula = y ~ x1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.89495 -0.66874 -0.07785 0.59221 2.45560 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.1124 0.2307 9.155 8.27e-15 *** ## x1 1.9759 0.3963 4.986 2.66e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.055 on 98 degrees of freedom ## Multiple R-squared: 0.2024, Adjusted R-squared: 0.1942 ## F-statistic: 24.86 on 1 and 98 DF, p-value: 2.661e-06 We can reject \\(H_0 : \\beta_1 = 0\\). The p-value is much more significant for \\(\\beta_1\\) compared to when x2 is included in the model. Now fit a least squares regression to predict y using only x2. Comment on your results. Can you reject the null hypothesis \\(H_0 : \\beta_1 = 0\\)? summary(lm(y ~ x2)) ## ## Call: ## lm(formula = y ~ x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.62687 -0.75156 -0.03598 0.72383 2.44890 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.3899 0.1949 12.26 < 2e-16 *** ## x2 2.8996 0.6330 4.58 1.37e-05 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.072 on 98 degrees of freedom ## Multiple R-squared: 0.1763, Adjusted R-squared: 0.1679 ## F-statistic: 20.98 on 1 and 98 DF, p-value: 1.366e-05 Similarly, we can reject \\(H_0 : \\beta_2 = 0\\). The p-value is much more significant for \\(\\beta_2\\) compared to when x1 is included in the model. Do the results obtained in (c)–(e) contradict each other? Explain your answer. No they do not contradict each other. Both x1 and x2 individually are capable of explaining much of the variation observed in y, however since they are correlated, it is very difficult to tease apart their separate contributions. Now suppose we obtain one additional observation, which was unfortunately mismeasured. > x1 <- c(x1, 0.1) > x2 <- c(x2, 0.8) > y <- c(y, 6) Re-fit the linear models from (c) to (e) using this new data. What effect does this new observation have on the each of the models? In each model, is this observation an outlier? A high-leverage point? Both? Explain your answers. x1 <- c(x1 , 0.1) x2 <- c(x2 , 0.8) y <- c(y ,6) summary(lm(y ~ x1 + x2)) ## ## Call: ## lm(formula = y ~ x1 + x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.73348 -0.69318 -0.05263 0.66385 2.30619 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.2267 0.2314 9.624 7.91e-16 *** ## x1 0.5394 0.5922 0.911 0.36458 ## x2 2.5146 0.8977 2.801 0.00614 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.075 on 98 degrees of freedom ## Multiple R-squared: 0.2188, Adjusted R-squared: 0.2029 ## F-statistic: 13.72 on 2 and 98 DF, p-value: 5.564e-06 summary(lm(y ~ x1)) ## ## Call: ## lm(formula = y ~ x1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.8897 -0.6556 -0.0909 0.5682 3.5665 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.2569 0.2390 9.445 1.78e-15 *** ## x1 1.7657 0.4124 4.282 4.29e-05 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.111 on 99 degrees of freedom ## Multiple R-squared: 0.1562, Adjusted R-squared: 0.1477 ## F-statistic: 18.33 on 1 and 99 DF, p-value: 4.295e-05 summary(lm(y ~ x2)) ## ## Call: ## lm(formula = y ~ x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.64729 -0.71021 -0.06899 0.72699 2.38074 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.3451 0.1912 12.264 < 2e-16 *** ## x2 3.1190 0.6040 5.164 1.25e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.074 on 99 degrees of freedom ## Multiple R-squared: 0.2122, Adjusted R-squared: 0.2042 ## F-statistic: 26.66 on 1 and 99 DF, p-value: 1.253e-06 par(mfrow = c(2, 2)) plot(lm(y ~ x1 + x2), cex = 0.2) par(mfrow = c(2, 2)) plot(lm(y ~ x1), cex = 0.2) par(mfrow = c(2, 2)) plot(lm(y ~ x2), cex = 0.2) In the first model (with both predictors), the new point has very high leverage (since it is an outlier in terms of the joint x1 and x2 distribution), however it is not an outlier. In the model that includes x1, it is an outlier but does not have high leverage. In the model that includes x2, it has high leverage but is not an outlier. It is useful to consider the scatterplot of x1 and x2. plot(x1, x2) points(0.1, 0.8, col = "red", pch = 19) 3.2.8 Question 15 This problem involves the Boston data set, which we saw in the lab for this chapter. We will now try to predict per capita crime rate using the other variables in this data set. In other words, per capita crime rate is the response, and the other variables are the predictors. We are trying to predict crim. pred <- subset(Boston, select = -crim) For each predictor, fit a simple linear regression model to predict the response. Describe your results. In which of the models is there a statistically significant association between the predictor and the response? Create some plots to back up your assertions. fits <- lapply(pred, function(x) lm(Boston$crim ~ x)) printCoefmat(do.call(rbind, lapply(fits, function(x) coef(summary(x))[2, ]))) ## Estimate Std. Error t value Pr(>|t|) ## zn -0.0739350 0.0160946 -4.5938 5.506e-06 *** ## indus 0.5097763 0.0510243 9.9908 < 2.2e-16 *** ## chas -1.8927766 1.5061155 -1.2567 0.2094 ## nox 31.2485312 2.9991904 10.4190 < 2.2e-16 *** ## rm -2.6840512 0.5320411 -5.0448 6.347e-07 *** ## age 0.1077862 0.0127364 8.4628 2.855e-16 *** ## dis -1.5509017 0.1683300 -9.2135 < 2.2e-16 *** ## rad 0.6179109 0.0343318 17.9982 < 2.2e-16 *** ## tax 0.0297423 0.0018474 16.0994 < 2.2e-16 *** ## ptratio 1.1519828 0.1693736 6.8014 2.943e-11 *** ## lstat 0.5488048 0.0477610 11.4907 < 2.2e-16 *** ## medv -0.3631599 0.0383902 -9.4597 < 2.2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 There are significant associations for all predictors with the exception of chas when fitting separate linear models. For example, consider the following plot representing the third model plot(Boston$rm, Boston$crim) abline(fits[[5]]) Fit a multiple regression model to predict the response using all of the predictors. Describe your results. For which predictors can we reject the null hypothesis \\(H_0 : \\beta_j = 0\\)? mfit <- lm(crim ~ ., data = Boston) summary(mfit) ## ## Call: ## lm(formula = crim ~ ., data = Boston) ## ## Residuals: ## Min 1Q Median 3Q Max ## -8.534 -2.248 -0.348 1.087 73.923 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.7783938 7.0818258 1.946 0.052271 . ## zn 0.0457100 0.0187903 2.433 0.015344 * ## indus -0.0583501 0.0836351 -0.698 0.485709 ## chas -0.8253776 1.1833963 -0.697 0.485841 ## nox -9.9575865 5.2898242 -1.882 0.060370 . ## rm 0.6289107 0.6070924 1.036 0.300738 ## age -0.0008483 0.0179482 -0.047 0.962323 ## dis -1.0122467 0.2824676 -3.584 0.000373 *** ## rad 0.6124653 0.0875358 6.997 8.59e-12 *** ## tax -0.0037756 0.0051723 -0.730 0.465757 ## ptratio -0.3040728 0.1863598 -1.632 0.103393 ## lstat 0.1388006 0.0757213 1.833 0.067398 . ## medv -0.2200564 0.0598240 -3.678 0.000261 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 6.46 on 493 degrees of freedom ## Multiple R-squared: 0.4493, Adjusted R-squared: 0.4359 ## F-statistic: 33.52 on 12 and 493 DF, p-value: < 2.2e-16 There are now only significant associations for zn, dis, rad, black and medv. How do your results from (a) compare to your results from (b)? Create a plot displaying the univariate regression coefficients from (a) on the \\(x\\)-axis, and the multiple regression coefficients from (b) on the \\(y\\)-axis. That is, each predictor is displayed as a single point in the plot. Its coefficient in a simple linear regression model is shown on the x-axis, and its coefficient estimate in the multiple linear regression model is shown on the y-axis. The results from (b) show reduced significance compared to the models fit in (a). plot(sapply(fits, function(x) coef(x)[2]), coef(mfit)[-1], xlab = "Univariate regression", ylab = "multiple regression") The estimated coefficients differ (in particular the estimated coefficient for nox is dramatically different) between the two modelling strategies. Is there evidence of non-linear association between any of the predictors and the response? To answer this question, for each predictor X, fit a model of the form \\[ Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon \\] pred <- subset(pred, select = -chas) fits <- lapply(names(pred), function(p) { f <- paste0("crim ~ poly(", p, ", 3)") lm(as.formula(f), data = Boston) }) for (fit in fits) printCoefmat(coef(summary(fit))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.37219 9.7088 < 2.2e-16 *** ## poly(zn, 3)1 -38.74984 8.37221 -4.6284 4.698e-06 *** ## poly(zn, 3)2 23.93983 8.37221 2.8594 0.004421 ** ## poly(zn, 3)3 -10.07187 8.37221 -1.2030 0.229539 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.6135 0.3300 10.9501 < 2.2e-16 *** ## poly(indus, 3)1 78.5908 7.4231 10.5873 < 2.2e-16 *** ## poly(indus, 3)2 -24.3948 7.4231 -3.2863 0.001086 ** ## poly(indus, 3)3 -54.1298 7.4231 -7.2920 1.196e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.32157 11.2370 < 2.2e-16 *** ## poly(nox, 3)1 81.37202 7.23361 11.2492 < 2.2e-16 *** ## poly(nox, 3)2 -28.82859 7.23361 -3.9854 7.737e-05 *** ## poly(nox, 3)3 -60.36189 7.23361 -8.3446 6.961e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.6135 0.3703 9.7584 < 2.2e-16 *** ## poly(rm, 3)1 -42.3794 8.3297 -5.0878 5.128e-07 *** ## poly(rm, 3)2 26.5768 8.3297 3.1906 0.001509 ** ## poly(rm, 3)3 -5.5103 8.3297 -0.6615 0.508575 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.34852 10.3683 < 2.2e-16 *** ## poly(age, 3)1 68.18201 7.83970 8.6970 < 2.2e-16 *** ## poly(age, 3)2 37.48447 7.83970 4.7814 2.291e-06 *** ## poly(age, 3)3 21.35321 7.83970 2.7237 0.00668 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.32592 11.0870 < 2.2e-16 *** ## poly(dis, 3)1 -73.38859 7.33148 -10.0101 < 2.2e-16 *** ## poly(dis, 3)2 56.37304 7.33148 7.6892 7.870e-14 *** ## poly(dis, 3)3 -42.62188 7.33148 -5.8135 1.089e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.29707 12.1639 < 2.2e-16 *** ## poly(rad, 3)1 120.90745 6.68240 18.0934 < 2.2e-16 *** ## poly(rad, 3)2 17.49230 6.68240 2.6177 0.009121 ** ## poly(rad, 3)3 4.69846 6.68240 0.7031 0.482314 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.30468 11.8599 < 2.2e-16 *** ## poly(tax, 3)1 112.64583 6.85371 16.4358 < 2.2e-16 *** ## poly(tax, 3)2 32.08725 6.85371 4.6817 3.665e-06 *** ## poly(tax, 3)3 -7.99681 6.85371 -1.1668 0.2439 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.36105 10.0084 < 2.2e-16 *** ## poly(ptratio, 3)1 56.04523 8.12158 6.9008 1.565e-11 *** ## poly(ptratio, 3)2 24.77482 8.12158 3.0505 0.002405 ** ## poly(ptratio, 3)3 -22.27974 8.12158 -2.7433 0.006301 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.33917 10.6540 <2e-16 *** ## poly(lstat, 3)1 88.06967 7.62944 11.5434 <2e-16 *** ## poly(lstat, 3)2 15.88816 7.62944 2.0825 0.0378 * ## poly(lstat, 3)3 -11.57402 7.62944 -1.5170 0.1299 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.29203 12.374 < 2.2e-16 *** ## poly(medv, 3)1 -75.05761 6.56915 -11.426 < 2.2e-16 *** ## poly(medv, 3)2 88.08621 6.56915 13.409 < 2.2e-16 *** ## poly(medv, 3)3 -48.03343 6.56915 -7.312 1.047e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Yes there is strong evidence for many variables having non-linear associations. In many cases, the addition of a cubic term is significant (indus, nox, age, dis, ptratio and medv). In other cases although the cubic terms is not significant, the squared term is (zn, rm, rad, tax, lstat). In only one case, black is there no evidence for a non-linear relationship. "],["classification.html", "4 Classification 4.1 Conceptual 4.2 Applied", " 4 Classification 4.1 Conceptual 4.1.1 Question 1 Using a little bit of algebra, prove that (4.2) is equivalent to (4.3). In other words, the logistic function representation and logit representation for the logistic regression model are equivalent. We need to show that \\[ p(X) = \\frac{e^{\\beta_0 + \\beta_1X}}{1 + e^{\\beta_0 + \\beta_1X}} \\] is equivalent to \\[ \\frac{p(X)}{1-p(X)} = e^{\\beta_0 + \\beta_1X} \\] Letting \\(x = e^{\\beta_0 + \\beta_1X}\\) \\[\\begin{align} \\frac{P(X)}{1-p(X)} &= \\frac{\\frac{x}{1 + x}} {1 - \\frac{x}{1 + x}} \\\\ &= \\frac{\\frac{x}{1 + x}} {\\frac{1}{1 + x}} \\\\ &= x \\end{align}\\] 4.1.2 Question 2 It was stated in the text that classifying an observation to the class for which (4.12) is largest is equivalent to classifying an observation to the class for which (4.13) is largest. Prove that this is the case. In other words, under the assumption that the observations in the \\(k\\)th class are drawn from a \\(N(\\mu_k,\\sigma^2)\\) distribution, the Bayes’ classifier assigns an observation to the class for which the discriminant function is maximized. 4.12 is \\[ p_k(x) = \\frac{\\pi_k\\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp(-\\frac{1}{2\\sigma^2}(x - \\mu_k)^2)} {\\sum_{l=1}^k \\pi_l\\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2)} \\] and the discriminant function is \\[ \\delta_k(x) = x.\\frac{\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma_2} + \\log(\\pi_k) \\] Since \\(\\sigma^2\\) is constant \\[ p_k(x) = \\frac{\\pi_k \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_k)^2\\right)} {\\sum_{l=1}^k \\pi_l \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2\\right)} \\] Maximizing \\(p_k(x)\\) also maximizes any monotonic function of \\(p_k(X)\\), and therefore, we can consider maximizing \\(\\log(p_K(X))\\) \\[ \\log(p_k(x)) = \\log(\\pi_k) - \\frac{1}{2\\sigma^2}(x - \\mu_k)^2 - \\log\\left(\\sum_{l=1}^k \\pi_l \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2\\right)\\right) \\] Remember that we are maximizing over \\(k\\), and since the last term does not vary with \\(k\\) it can be ignored. So we just need to maximize \\[\\begin{align} f &= \\log(\\pi_k) - \\frac{1}{2\\sigma^2} (x^2 - 2x\\mu_k + \\mu_k^2) \\\\ &= \\log(\\pi_k) - \\frac{x^2}{2\\sigma^2} + \\frac{x\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma^2} \\\\ \\end{align}\\] Since \\(\\frac{x^2}{2\\sigma^2}\\) is also independent of \\(k\\), we just need to maximize \\[ \\log(\\pi_k) + \\frac{x\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma^2} \\] 4.1.3 Question 3 This problem relates to the QDA model, in which the observations within each class are drawn from a normal distribution with a class-specific mean vector and a class specific covariance matrix. We consider the simple case where \\(p = 1\\); i.e. there is only one feature. Suppose that we have \\(K\\) classes, and that if an observation belongs to the \\(k\\)th class then \\(X\\) comes from a one-dimensional normal distribution, \\(X \\sim N(\\mu_k,\\sigma^2)\\). Recall that the density function for the one-dimensional normal distribution is given in (4.16). Prove that in this case, the Bayes classifier is not linear. Argue that it is in fact quadratic. Hint: For this problem, you should follow the arguments laid out in Section 4.4.1, but without making the assumption that \\(\\sigma_1^2 = ... = \\sigma_K^2\\). As above, \\[ p_k(x) = \\frac{\\pi_k\\frac{1}{\\sqrt{2\\pi\\sigma_k}} \\exp(-\\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2)} {\\sum_{l=1}^k \\pi_l\\frac{1}{\\sqrt{2\\pi\\sigma_l}} \\exp(-\\frac{1}{2\\sigma_l^2}(x - \\mu_l)^2)} \\] Now lets derive the Bayes classifier, without assuming \\(\\sigma_1^2 = ... = \\sigma_K^2\\) Maximizing \\(p_k(x)\\) also maximizes any monotonic function of \\(p_k(X)\\), and therefore, we can consider maximizing \\(\\log(p_K(X))\\) \\[ \\log(p_k(x)) = \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2 - \\log\\left(\\sum_{l=1}^k \\frac{1}{\\sqrt{2\\pi\\sigma_l}} \\pi_l \\exp\\left(-\\frac{1}{2\\sigma_l^2}(x - \\mu_l)^2\\right)\\right) \\] Remember that we are maximizing over \\(k\\), and since the last term does not vary with \\(k\\) it can be ignored. So we just need to maximize \\[\\begin{align} f &= \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2 \\\\ &= \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{x^2}{2\\sigma_k^2} + \\frac{x\\mu_k}{\\sigma_k^2} - \\frac{\\mu_k^2}{2\\sigma_k^2} \\\\ \\end{align}\\] However, unlike in Q2, \\(\\frac{x^2}{2\\sigma_k^2}\\) is not independent of \\(k\\), so we retain the term with \\(x^2\\), hence \\(f\\), the Bayes’ classifier, is a quadratic function of \\(x\\). 4.1.4 Question 4 When the number of features \\(p\\) is large, there tends to be a deterioration in the performance of KNN and other local approaches that perform prediction using only observations that are near the test observation for which a prediction must be made. This phenomenon is known as the curse of dimensionality, and it ties into the fact that non-parametric approaches often perform poorly when \\(p\\) is large. We will now investigate this curse. Suppose that we have a set of observations, each with measurements on \\(p = 1\\) feature, \\(X\\). We assume that \\(X\\) is uniformly (evenly) distributed on \\([0, 1]\\). Associated with each observation is a response value. Suppose that we wish to predict a test observation’s response using only observations that are within 10% of the range of \\(X\\) closest to that test observation. For instance, in order to predict the response for a test observation with \\(X = 0.6\\), we will use observations in the range \\([0.55, 0.65]\\). On average, what fraction of the available observations will we use to make the prediction? For values in \\(0...0.05\\), we use less than 10% of observations (between 5% and 10%, 7.5% on average), similarly with values in \\(0.95...1\\). For values in \\(0.05...0.95\\) we use 10% of available observations. The (weighted) average is then \\(7.5 \\times 0.1 + 10 \\times 0.9 = 9.75\\%\\). Now suppose that we have a set of observations, each with measurements on \\(p = 2\\) features, \\(X_1\\) and \\(X_2\\). We assume that \\((X_1, X_2)\\) are uniformly distributed on \\([0, 1] \\times [0, 1]\\). We wish to predict a test observation’s response using only observations that are within 10% of the range of \\(X_1\\) and within 10% of the range of \\(X_2\\) closest to that test observation. For instance, in order to predict the response for a test observation with \\(X_1 = 0.6\\) and \\(X_2 = 0.35\\), we will use observations in the range \\([0.55, 0.65]\\) for \\(X_1\\) and in the range \\([0.3, 0.4]\\) for \\(X_2\\). On average, what fraction of the available observations will we use to make the prediction? Since we need the observation to be within range for \\(X_1\\) and \\(X_2\\) we square 9.75% = \\(0.0975^2 \\times 100 = 0.95\\%\\) Now suppose that we have a set of observations on \\(p = 100\\) features. Again the observations are uniformly distributed on each feature, and again each feature ranges in value from 0 to 1. We wish to predict a test observation’s response using observations within the 10% of each feature’s range that is closest to that test observation. What fraction of the available observations will we use to make the prediction? Similar to above, we use: \\(0.0975^{100} \\times 100 = 8 \\times 10^{-100}\\%\\), essentially zero. Using your answers to parts (a)–(c), argue that a drawback of KNN when \\(p\\) is large is that there are very few training observations “near” any given test observation. As \\(p\\) increases, the fraction of observations near any given point rapidly approaches zero. For instance, even if you use 50% of the nearest observations for each \\(p\\), with \\(p = 10\\), only \\(0.5^{10} \\times 100 \\approx 0.1\\%\\) points are “near”. Now suppose that we wish to make a prediction for a test observation by creating a \\(p\\)-dimensional hypercube centered around the test observation that contains, on average, 10% of the training observations. For \\(p = 1,2,\\) and \\(100\\), what is the length of each side of the hypercube? Comment on your answer. Note: A hypercube is a generalization of a cube to an arbitrary number of dimensions. When \\(p = 1\\), a hypercube is simply a line segment, when \\(p = 2\\) it is a square, and when \\(p = 100\\) it is a 100-dimensional cube. When \\(p = 1\\), clearly the length is 0.1. When \\(p = 2\\), we need the value \\(l\\) such that \\(l^2 = 0.1\\), so \\(l = \\sqrt{0.1} = 0.32\\). When \\(p = n\\), \\(l = 0.1^{1/n}\\), so in the case of \\(n = 100\\), \\(l = 0.98\\). Therefore, the length of each side of the hypercube rapidly approaches 1 (or 100%) of the range of each \\(p\\). 4.1.5 Question 5 We now examine the differences between LDA and QDA. If the Bayes decision boundary is linear, do we expect LDA or QDA to perform better on the training set? On the test set? QDA, being a more flexible model, will always perform better on the training set, but LDA would be expected to perform better on the test set. If the Bayes decision boundary is non-linear, do we expect LDA or QDA to perform better on the training set? On the test set? QDA, being a more flexible model, will perform better on the training set, and we would hope that extra flexibility translates to a better fit on the test set. In general, as the sample size \\(n\\) increases, do we expect the test prediction accuracy of QDA relative to LDA to improve, decline, or be unchanged? Why? As \\(n\\) increases, we would expect the prediction accuracy of QDA relative to LDA to improve as there is more data to fit to subtle effects in the data. True or False: Even if the Bayes decision boundary for a given problem is linear, we will probably achieve a superior test error rate using QDA rather than LDA because QDA is flexible enough to model a linear decision boundary. Justify your answer. False. QDA can overfit leading to poorer test performance. 4.1.6 Question 6 Suppose we collect data for a group of students in a statistics class with variables \\(X_1 =\\) hours studied, \\(X_2 =\\) undergrad GPA, and \\(Y =\\) receive an A. We fit a logistic regression and produce estimated coefficient, \\(\\hat\\beta_0 = -6\\), \\(\\hat\\beta_1 = 0.05\\), \\(\\hat\\beta_2 = 1\\). Estimate the probability that a student who studies for 40h and has an undergrad GPA of 3.5 gets an A in the class. The logistic model is: \\[ \\log\\left(\\frac{p(X)}{1-p(x)}\\right) = -6 + 0.05X_1 + X_2 \\] or \\[ p(X) = \\frac{e^{-6 + 0.05X_1 + X_2}}{1 + e^{-6 + 0.05X_1 + X_2}} \\] when \\(X_1 = 40\\) and \\(X_2 = 3.5\\), \\(p(X) = 0.38\\) How many hours would the student in part (a) need to study to have a 50% chance of getting an A in the class? We would like to solve for \\(X_1\\) where \\(p(X) = 0.5\\). Taking the first equation above, we need to solve \\(0 = −6 + 0.05X_1 + 3.5\\), so \\(X_1 = 50\\) hours. 4.1.7 Question 7 Suppose that we wish to predict whether a given stock will issue a dividend this year (“Yes” or “No”) based on \\(X\\), last year’s percent profit. We examine a large number of companies and discover that the mean value of \\(X\\) for companies that issued a dividend was \\(\\bar{X} = 10\\), while the mean for those that didn’t was \\(\\bar{X} = 0\\). In addition, the variance of \\(X\\) for these two sets of companies was \\(\\hat{\\sigma}^2 = 36\\). Finally, 80% of companies issued dividends. Assuming that \\(X\\) follows a normal distribution, predict the probability that a company will issue a dividend this year given that its percentage profit was \\(X = 4\\) last year. Hint: Recall that the density function for a normal random variable is \\(f(x) =\\frac{1}{\\sqrt{2\\pi\\sigma^2}}e^{-(x-\\mu)^2/2\\sigma^2}\\). You will need to use Bayes’ theorem. Value \\(v\\) for companies (D) issuing a dividend = \\(v_D \\sim \\mathcal{N}(10, 36)\\). Value \\(v\\) for companies (N) not issuing a dividend = \\(v_N \\sim \\mathcal{N}(0, 36)\\) and \\(p(D) = 0.8\\). We want to find \\(p(D|v)\\) and we can calculate \\(p(v|D)\\) from the Gaussian density function. Note that since \\(e^2\\) is equal between both classes, the term \\(\\frac{1}{\\sqrt{2\\pi\\sigma^2}}\\) cancels. \\[\\begin{align} p(D|v) &= \\frac{p(v|D) p(D)}{p(v|D)p(D) + p(v|N)p(N)} \\\\ &= \\frac{\\pi_D \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_D)^2/2\\sigma^2}} {\\pi_D \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_D)^2/2\\sigma^2} + \\pi_N \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_N)^2/2\\sigma^2}} \\\\ &= \\frac{\\pi_D e^{-(x-\\mu_D)^2/2\\sigma^2}} {\\pi_D e^{-(x-\\mu_D)^2/2\\sigma^2} + \\pi_N e^{-(x-\\mu_N)^2/2\\sigma^2}} \\\\ &= \\frac{0.8 \\times e^{-(4-10)^2/(2 \\times 36)}} {0.8 \\times e^{-(4-10)^2/(2 \\times 36)} + 0.2 \\times e^{-(4-0)^2/(2 \\times 36)}} \\\\ &= \\frac{0.8 e^{-1/2}}{0.8 e^{-1/2} + 0.2 e^{-2/9}} \\end{align}\\] exp(-0.5) * 0.8 / (exp(-0.5) * 0.8 + exp(-2/9) * 0.2) ## [1] 0.7518525 4.1.8 Question 8 Suppose that we take a data set, divide it into equally-sized training and test sets, and then try out two different classification procedures. First we use logistic regression and get an error rate of 20% on the training data and 30% on the test data. Next we use 1-nearest neighbors (i.e. \\(K = 1\\)) and get an average error rate (averaged over both test and training data sets) of 18%. Based on these results, which method should we prefer to use for classification of new observations? Why? For \\(K = 1\\), performance on the training set is perfect and the error rate is zero, implying a test error rate of 36%. Logistic regression outperforms 1-nearest neighbor on the test set and therefore should be preferred. 4.1.9 Question 9 This problem has to do with odds. On average, what fraction of people with an odds of 0.37 of defaulting on their credit card payment will in fact default? Odds is defined as \\(p/(1-p)\\). \\[0.37 = \\frac{p(x)}{1 - p(x)}\\] therefore, \\[p(x) = \\frac{0.37}{1 + 0.37} = 0.27\\] Suppose that an individual has a 16% chance of defaulting on her credit card payment. What are the odds that she will default? \\[0.16 / (1 - 0.16) = 0.19\\] 4.1.10 Question 10 Equation 4.32 derived an expression for \\(\\log(\\frac{Pr(Y=k|X=x)}{Pr(Y=K|X=x)})\\) in the setting where \\(p > 1\\), so that the mean for the \\(k\\)th class, \\(\\mu_k\\), is a \\(p\\)-dimensional vector, and the shared covariance \\(\\Sigma\\) is a \\(p \\times p\\) matrix. However, in the setting with \\(p = 1\\), (4.32) takes a simpler form, since the means \\(\\mu_1, ..., \\mu_k\\) and the variance \\(\\sigma^2\\) are scalars. In this simpler setting, repeat the calculation in (4.32), and provide expressions for \\(a_k\\) and \\(b_{kj}\\) in terms of \\(\\pi_k, \\pi_K, \\mu_k, \\mu_K,\\) and \\(\\sigma^2\\). \\[\\begin{align*} \\log\\left(\\frac{Pr(Y=k|X=x)}{Pr(Y=K|X=x)}\\right) & = \\log\\left(\\frac{\\pi_k f_k(x)}{\\pi_K f_K(x)}\\right) \\\\ & = \\log\\left(\\frac{\\pi_k \\exp(-1/2((x-\\mu_k)/\\sigma)^2)}{\\pi_K \\exp(-1/2((x-\\mu_K)/\\sigma)^2)}\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2} \\left(\\frac{x-\\mu_k}{\\sigma}\\right)^2 + \\frac{1}{2} \\left(\\frac{x-\\mu_K}{\\sigma}\\right)^2 \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} (x-\\mu_k)^2 + \\frac{1}{2\\sigma^2} (x-\\mu_K)^2 \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left((x-\\mu_k)^2 - (x-\\mu_K)^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left(x^2-2x\\mu_k+\\mu_k^2 - x^2 + 2x\\mu_K - \\mu_K^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left(2x(\\mu_K - \\mu_k) + \\mu_k^2 -\\mu_K^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{\\mu_k^2 -\\mu_K^2}{2\\sigma^2} + \\frac{x(\\mu_k - \\mu_K)}{\\sigma^2} \\end{align*}\\] Therefore, \\[a_k = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{\\mu_k^2 -\\mu_K^2}{2\\sigma^2}\\] and \\[b_k = (\\mu_k - \\mu_K) / \\sigma^2\\] 4.1.11 Question 11 ToDo Work out the detailed forms of \\(a_k\\), \\(b_{kj}\\), and \\(b_{kjl}\\) in (4.33). Your answer should involve \\(\\pi_k\\), \\(\\pi_K\\), \\(\\mu_k\\), \\(\\mu_K\\), \\(\\Sigma_k\\), and \\(\\Sigma_K\\). 4.1.12 Question 12 Suppose that you wish to classify an observation \\(X \\in \\mathbb{R}\\) into apples and oranges. You fit a logistic regression model and find that \\[ \\hat{Pr}(Y=orange|X=x) = \\frac{\\exp(\\hat\\beta_0 + \\hat\\beta_1x)}{1 + \\exp(\\hat\\beta_0 + \\hat\\beta_1x)} \\] Your friend fits a logistic regression model to the same data using the softmax formulation in (4.13), and finds that \\[ \\hat{Pr}(Y=orange|X=x) = \\frac{\\exp(\\hat\\alpha_{orange0} + \\hat\\alpha_{orange1}x)} {\\exp(\\hat\\alpha_{orange0} + \\hat\\alpha_{orange1}x) + \\exp(\\hat\\alpha_{apple0} + \\hat\\alpha_{apple1}x)} \\] What is the log odds of orange versus apple in your model? The log odds is just \\(\\hat\\beta_0 + \\hat\\beta_1x\\) What is the log odds of orange versus apple in your friend’s model? From 4.14, log odds of our friend’s model is: \\[ (\\hat\\alpha_{orange0} - \\hat\\alpha_{apple0}) + (\\hat\\alpha_{orange1} - \\hat\\alpha_{apple1})x \\] Suppose that in your model, \\(\\hat\\beta_0 = 2\\) and \\(\\hat\\beta = −1\\). What are the coefficient estimates in your friend’s model? Be as specific as possible. We can say that in our friend’s model \\(\\hat\\alpha_{orange0} - \\hat\\alpha_{apple0} = 2\\) and \\(\\hat\\alpha_{orange1} - \\hat\\alpha_{apple1} = -1\\). We are unable to know the specific value of each parameter however. Now suppose that you and your friend fit the same two models on a different data set. This time, your friend gets the coefficient estimates \\(\\hat\\alpha_{orange0} = 1.2\\), \\(\\hat\\alpha_{orange1} = −2\\), \\(\\hat\\alpha_{apple0} = 3\\), \\(\\hat\\alpha_{apple1} = 0.6\\). What are the coefficient estimates in your model? The coefficients in our model would be \\(\\hat\\beta_0 = 1.2 - 3 = -1.8\\) and \\(\\hat\\beta_1 = -2 - 0.6 = -2.6\\) Finally, suppose you apply both models from (d) to a data set with 2,000 test observations. What fraction of the time do you expect the predicted class labels from your model to agree with those from your friend’s model? Explain your answer. The models are identical with different parameterization so they should perfectly agree. 4.2 Applied 4.2.1 Question 13 This question should be answered using the Weekly data set, which is part of the ISLR2 package. This data is similar in nature to the Smarket data from this chapter’s lab, except that it contains 1,089 weekly returns for 21 years, from the beginning of 1990 to the end of 2010. Produce some numerical and graphical summaries of the Weekly data. Do there appear to be any patterns? library(MASS) library(class) library(tidyverse) library(corrplot) library(ISLR2) library(e1071) summary(Weekly) ## Year Lag1 Lag2 Lag3 ## Min. :1990 Min. :-18.1950 Min. :-18.1950 Min. :-18.1950 ## 1st Qu.:1995 1st Qu.: -1.1540 1st Qu.: -1.1540 1st Qu.: -1.1580 ## Median :2000 Median : 0.2410 Median : 0.2410 Median : 0.2410 ## Mean :2000 Mean : 0.1506 Mean : 0.1511 Mean : 0.1472 ## 3rd Qu.:2005 3rd Qu.: 1.4050 3rd Qu.: 1.4090 3rd Qu.: 1.4090 ## Max. :2010 Max. : 12.0260 Max. : 12.0260 Max. : 12.0260 ## Lag4 Lag5 Volume Today ## Min. :-18.1950 Min. :-18.1950 Min. :0.08747 Min. :-18.1950 ## 1st Qu.: -1.1580 1st Qu.: -1.1660 1st Qu.:0.33202 1st Qu.: -1.1540 ## Median : 0.2380 Median : 0.2340 Median :1.00268 Median : 0.2410 ## Mean : 0.1458 Mean : 0.1399 Mean :1.57462 Mean : 0.1499 ## 3rd Qu.: 1.4090 3rd Qu.: 1.4050 3rd Qu.:2.05373 3rd Qu.: 1.4050 ## Max. : 12.0260 Max. : 12.0260 Max. :9.32821 Max. : 12.0260 ## Direction ## Down:484 ## Up :605 ## ## ## ## corrplot(cor(Weekly[, -9]), type = "lower", diag = FALSE, method = "ellipse") Volume is strongly positively correlated with Year. Other correlations are week, but Lag1 is negatively correlated with Lag2 but positively correlated with Lag3. Use the full data set to perform a logistic regression with Direction as the response and the five lag variables plus Volume as predictors. Use the summary function to print the results. Do any of the predictors appear to be statistically significant? If so, which ones? fit <- glm( Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume, data = Weekly, family = binomial ) summary(fit) ## ## Call: ## glm(formula = Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + ## Volume, family = binomial, data = Weekly) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) 0.26686 0.08593 3.106 0.0019 ** ## Lag1 -0.04127 0.02641 -1.563 0.1181 ## Lag2 0.05844 0.02686 2.175 0.0296 * ## Lag3 -0.01606 0.02666 -0.602 0.5469 ## Lag4 -0.02779 0.02646 -1.050 0.2937 ## Lag5 -0.01447 0.02638 -0.549 0.5833 ## Volume -0.02274 0.03690 -0.616 0.5377 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 1496.2 on 1088 degrees of freedom ## Residual deviance: 1486.4 on 1082 degrees of freedom ## AIC: 1500.4 ## ## Number of Fisher Scoring iterations: 4 Lag2 is significant. Compute the confusion matrix and overall fraction of correct predictions. Explain what the confusion matrix is telling you about the types of mistakes made by logistic regression. contrasts(Weekly$Direction) ## Up ## Down 0 ## Up 1 pred <- predict(fit, type = "response") > 0.5 (t <- table(ifelse(pred, "Up (pred)", "Down (pred)"), Weekly$Direction)) ## ## Down Up ## Down (pred) 54 48 ## Up (pred) 430 557 sum(diag(t)) / sum(t) ## [1] 0.5610652 The overall fraction of correct predictions is 0.56. Although logistic regression correctly predicts upwards movements well, it incorrectly predicts most downwards movements as up. Now fit the logistic regression model using a training data period from 1990 to 2008, with Lag2 as the only predictor. Compute the confusion matrix and the overall fraction of correct predictions for the held out data (that is, the data from 2009 and 2010). train <- Weekly$Year < 2009 fit <- glm(Direction ~ Lag2, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 (t <- table(ifelse(pred, "Up (pred)", "Down (pred)"), Weekly[!train, ]$Direction)) ## ## Down Up ## Down (pred) 9 5 ## Up (pred) 34 56 sum(diag(t)) / sum(t) ## [1] 0.625 Repeat (d) using LDA. fit <- lda(Direction ~ Lag2, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 9 5 ## Up 34 56 sum(diag(t)) / sum(t) ## [1] 0.625 Repeat (d) using QDA. fit <- qda(Direction ~ Lag2, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 0 0 ## Up 43 61 sum(diag(t)) / sum(t) ## [1] 0.5865385 Repeat (d) using KNN with \\(K = 1\\). fit <- knn( Weekly[train, "Lag2", drop = FALSE], Weekly[!train, "Lag2", drop = FALSE], Weekly$Direction[train] ) (t <- table(fit, Weekly[!train, ]$Direction)) ## ## fit Down Up ## Down 21 30 ## Up 22 31 sum(diag(t)) / sum(t) ## [1] 0.5 Repeat (d) using naive Bayes. fit <- naiveBayes(Direction ~ Lag2, data = Smarket, subset = train) pred <- predict(fit, Weekly[!train, ], type = "class") (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 27 29 ## Up 16 32 sum(diag(t)) / sum(t) ## [1] 0.5673077 Which of these methods appears to provide the best results on this data? Logistic regression and LDA are the best performing. Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results on the held out data. Note that you should also experiment with values for \\(K\\) in the KNN classifier. fit <- glm(Direction ~ Lag1, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5673077 fit <- glm(Direction ~ Lag3, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~ Lag1 * Lag2 * Lag3 * Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5961538 fit <- lda(Direction ~ Lag1 + Lag2 + Lag3 + Lag4,data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5769231 fit <- qda(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5192308 fit <- naiveBayes(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "class") mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5096154 set.seed(1) res <- sapply(1:30, function(k) { fit <- knn( Weekly[train, 2:4, drop = FALSE], Weekly[!train, 2:4, drop = FALSE], Weekly$Direction[train], k = k ) mean(fit == Weekly[!train, ]$Direction) }) plot(1:30, res, type = "o", xlab = "k", ylab = "Fraction correct") (k <- which.max(res)) ## [1] 26 fit <- knn( Weekly[train, 2:4, drop = FALSE], Weekly[!train, 2:4, drop = FALSE], Weekly$Direction[train], k = k ) table(fit, Weekly[!train, ]$Direction) ## ## fit Down Up ## Down 23 18 ## Up 20 43 mean(fit == Weekly[!train, ]$Direction) ## [1] 0.6346154 KNN using the first 3 Lag variables performs marginally better than logistic regression with Lag2 if we tune \\(k\\) to be \\(k = 26\\). 4.2.2 Question 14 In this problem, you will develop a model to predict whether a given car gets high or low gas mileage based on the Auto data set. Create a binary variable, mpg01, that contains a 1 if mpg contains a value above its median, and a 0 if mpg contains a value below its median. You can compute the median using the median() function. Note you may find it helpful to use the data.frame() function to create a single data set containing both mpg01 and the other Auto variables. x <- cbind(Auto[, -1], data.frame("mpg01" = Auto$mpg > median(Auto$mpg))) Explore the data graphically in order to investigate the association between mpg01 and the other features. Which of the other features seem most likely to be useful in predicting mpg01? Scatterplots and boxplots may be useful tools to answer this question. Describe your findings. par(mfrow = c(2, 4)) for (i in 1:7) hist(x[, i], breaks = 20, main = colnames(x)[i]) par(mfrow = c(2, 4)) for (i in 1:7) boxplot(x[, i] ~ x$mpg01, main = colnames(x)[i]) pairs(x[, 1:7]) Most variables show an association with mpg01 category, and several variables are colinear. Split the data into a training set and a test set. set.seed(1) train <- sample(seq_len(nrow(x)), nrow(x) * 2/3) Perform LDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? sort(sapply(1:7, function(i) { setNames(abs(t.test(x[, i] ~ x$mpg01)$statistic), colnames(x)[i]) })) ## acceleration year origin horsepower displacement weight ## 7.302430 9.403221 11.824099 17.681939 22.632004 22.932777 ## cylinders ## 23.035328 fit <- lda(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "response")$class mean(pred != x[-train, ]$mpg01) ## [1] 0.1068702 Perform QDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- qda(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "response")$class mean(pred != x[-train, ]$mpg01) ## [1] 0.09923664 Perform logistic regression on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- glm(mpg01 ~ cylinders + weight + displacement, data = x[train, ], family = binomial) pred <- predict(fit, x[-train, ], type = "response") > 0.5 mean(pred != x[-train, ]$mpg01) ## [1] 0.1145038 Perform naive Bayes on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- naiveBayes(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "class") mean(pred != x[-train, ]$mpg01) ## [1] 0.09923664 Perform KNN on the training data, with several values of \\(K\\), in order to predict mpg01. Use only the variables that seemed most associated with mpg01 in (b). What test errors do you obtain? Which value of \\(K\\) seems to perform the best on this data set? res <- sapply(1:50, function(k) { fit <- knn(x[train, c(1, 4, 2)], x[-train, c(1, 4, 2)], x$mpg01[train], k = k) mean(fit != x[-train, ]$mpg01) }) names(res) <- 1:50 plot(res, type = "o") res[which.min(res)] ## 3 ## 0.1068702 For the models tested here, \\(k = 32\\) appears to perform best. QDA has a lower error rate overall, performing slightly better than LDA. 4.2.3 Question 15 This problem involves writing functions. Write a function, Power(), that prints out the result of raising 2 to the 3rd power. In other words, your function should compute \\(2^3\\) and print out the results. Hint: Recall that x^a raises x to the power a. Use the print() function to output the result. Power <- function() print(2^3) Create a new function, Power2(), that allows you to pass any two numbers, x and a, and prints out the value of x^a. You can do this by beginning your function with the line > Power2=function(x,a) { You should be able to call your function by entering, for instance, > Power2(3, 8) on the command line. This should output the value of \\(3^8\\), namely, 6,561. Power2 <- function(x, a) print(x^a) Using the Power2() function that you just wrote, compute \\(10^3\\), \\(8^{17}\\), and \\(131^3\\). c(Power2(10, 3), Power2(8, 17), Power2(131, 3)) ## [1] 1000 ## [1] 2.2518e+15 ## [1] 2248091 ## [1] 1.000000e+03 2.251800e+15 2.248091e+06 Now create a new function, Power3(), that actually returns the result x^a as an R object, rather than simply printing it to the screen. That is, if you store the value x^a in an object called result within your function, then you can simply return() this result, using the following line: > return(result) The line above should be the last line in your function, before the } symbol. Power3 <- function(x, a) { result <- x^a return(result) } Now using the Power3() function, create a plot of \\(f(x) = x^2\\). The \\(x\\)-axis should display a range of integers from 1 to 10, and the \\(y\\)-axis should display \\(x^2\\). Label the axes appropriately, and use an appropriate title for the figure. Consider displaying either the \\(x\\)-axis, the \\(y\\)-axis, or both on the log-scale. You can do this by using log = \"x\", log = \"y\", or log = \"xy\" as arguments to the plot() function. plot(1:10, Power3(1:10, 2), xlab = "x", ylab = expression(paste("x"^"2")), log = "y" ) Create a function, PlotPower(), that allows you to create a plot of x against x^a for a fixed a and for a range of values of x. For instance, if you call > PlotPower(1:10, 3) then a plot should be created with an \\(x\\)-axis taking on values \\(1,2,...,10\\), and a \\(y\\)-axis taking on values \\(1^3,2^3,...,10^3\\). PlotPower <- function(x, a, log = "y") { plot(x, Power3(x, a), xlab = "x", ylab = substitute("x"^a, list(a = a)), log = log ) } PlotPower(1:10, 3) 4.2.4 Question 13 Using the Boston data set, fit classification models in order to predict whether a given census tract has a crime rate above or below the median. Explore logistic regression, LDA, naive Bayes and KNN models using various sub-sets of the predictors. Describe your findings. Hint: You will have to create the response variable yourself, using the variables that are contained in the Boston data set. x <- cbind( ISLR2::Boston[, -1], data.frame("highcrim" = Boston$crim > median(Boston$crim)) ) set.seed(1) train <- sample(seq_len(nrow(x)), nrow(x) * 2/3) We can find the most associated variables by performing wilcox tests. ord <- order(sapply(1:12, function(i) { p <- wilcox.test(as.numeric(x[train, i]) ~ x[train, ]$highcrim)$p.value setNames(log10(p), colnames(x)[i]) })) ord <- names(x)[ord] ord ## [1] "nox" "dis" "indus" "tax" "age" "rad" "zn" ## [8] "lstat" "medv" "ptratio" "rm" "chas" Variables nox (nitrogen oxides concentration) followed by dis (distance to employment center) appear to be most associated with high crime. Let’s reorder columns by those most associated with highcrim (in the training data) x <- x[, c(ord, "highcrim")] Let’s look at univariate associations with highcrim (in the training data) x[train, ] |> pivot_longer(!highcrim) |> mutate(name = factor(name, levels = ord)) |> ggplot(aes(highcrim, value)) + geom_boxplot() + facet_wrap(~name, scale = "free") Fit lda, logistic regression, naive Bayes and KNN models (with k = 1..50) for a set of specific predictors and return the error rate. We fit models using increasing numbers of predictors: column 1, then columns 1 and 2 etc. fit_models <- function(cols, k_vals = 1:50) { dat_train <- x[train, cols, drop = FALSE] dat_test <- x[-train, cols, drop = FALSE] fit <- lda(x$highcrim[train] ~ ., data = dat_train) pred <- predict(fit, dat_test, type = "response")$class lda_err <- mean(pred != x$highcrim[-train]) fit <- glm(x$highcrim[train] ~ ., data = dat_train, family = binomial) pred <- predict(fit, dat_test, type = "response") > 0.5 logreg_err <- mean(pred != x$highcrim[-train]) fit <- naiveBayes(x$highcrim[train] ~ ., data = dat_train) pred <- predict(fit, dat_test, type = "class") nb_err <- mean(pred != x$highcrim[-train]) res <- sapply(k_vals, function(k) { fit <- knn(dat_train, dat_test, x$highcrim[train], k = k) mean(fit != x$highcrim[-train]) }) knn_err <- min(res) c("LDA" = lda_err, "LR" = logreg_err, "NB" = nb_err, "KNN" = knn_err) } res <- sapply(1:12, function(max) fit_models(1:max)) res <- as_tibble(t(res)) res$n_var <- 1:12 pivot_longer(res, cols = !n_var) |> ggplot(aes(n_var, value, col = name)) + geom_line() + xlab("Number of predictors") + ylab("Error rate") KNN appears to perform better (if we tune \\(k\\)) for all numbers of predictors. fit <- knn( x[train, "nox", drop = FALSE], x[-train, "nox", drop = FALSE], x$highcrim[train], k = 1 ) table(fit, x[-train, ]$highcrim) ## ## fit FALSE TRUE ## FALSE 78 2 ## TRUE 3 86 mean(fit != x[-train, ]$highcrim) * 100 ## [1] 2.95858 Surprisingly, the best model (with an error rate of <5%) uses \\(k = 1\\) and assigns crime rate categories based on the town with the single most similar nitrogen oxide concentration (nox). This might be, for example, because nearby towns have similar crime rates, and we can obtain good predictions by predicting crime rate based on a nearby town. But what if we only consider \\(k = 20\\). res <- sapply(1:12, function(max) fit_models(1:max, k_vals = 20)) res <- as_tibble(t(res)) res$n_var <- 1:12 pivot_longer(res, cols = !n_var) |> ggplot(aes(n_var, value, col = name)) + geom_line() + xlab("Number of predictors") + ylab("Error rate") KNN still performs best with a single predictor (nox), but logistic regression with 12 predictors also performs well and has an error rate of ~12%. vars <- names(x)[1:12] dat_train <- x[train, vars] dat_test <- x[-train, vars] fit <- glm(x$highcrim[train] ~ ., data = dat_train, family = binomial) pred <- predict(fit, dat_test, type = "response") > 0.5 table(pred, x[-train, ]$highcrim) ## ## pred FALSE TRUE ## FALSE 70 9 ## TRUE 11 79 mean(pred != x$highcrim[-train]) * 100 ## [1] 11.83432 summary(fit) ## ## Call: ## glm(formula = x$highcrim[train] ~ ., family = binomial, data = dat_train) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) -44.525356 7.935621 -5.611 2.01e-08 *** ## nox 55.062428 10.281556 5.355 8.53e-08 *** ## dis 1.080847 0.304084 3.554 0.000379 *** ## indus -0.067493 0.058547 -1.153 0.248997 ## tax -0.005336 0.003138 -1.700 0.089060 . ## age 0.020965 0.014190 1.477 0.139556 ## rad 0.678196 0.192193 3.529 0.000418 *** ## zn -0.099558 0.045914 -2.168 0.030134 * ## lstat 0.134035 0.058623 2.286 0.022231 * ## medv 0.213114 0.088922 2.397 0.016547 * ## ptratio 0.294396 0.155285 1.896 0.057981 . ## rm -0.518115 0.896423 -0.578 0.563278 ## chas 0.139557 0.798632 0.175 0.861280 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 467.04 on 336 degrees of freedom ## Residual deviance: 135.80 on 324 degrees of freedom ## AIC: 161.8 ## ## Number of Fisher Scoring iterations: 9 "],["resampling-methods.html", "5 Resampling Methods 5.1 Conceptual 5.2 Applied", " 5 Resampling Methods 5.1 Conceptual 5.1.1 Question 1 Using basic statistical properties of the variance, as well as single- variable calculus, derive (5.6). In other words, prove that \\(\\alpha\\) given by (5.6) does indeed minimize \\(Var(\\alpha X + (1 - \\alpha)Y)\\). Equation 5.6 is: \\[ \\alpha = \\frac{\\sigma^2_Y - \\sigma_{XY}}{\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}} \\] Remember that: \\[ Var(aX) = a^2Var(X), \\\\ \\mathrm{Var}(X + Y) = \\mathrm{Var}(X) + \\mathrm{Var}(Y) + 2\\mathrm{Cov}(X,Y), \\\\ \\mathrm{Cov}(aX, bY) = ab\\mathrm{Cov}(X, Y) \\] If we define \\(\\sigma^2_X = \\mathrm{Var}(X)\\), \\(\\sigma^2_Y = \\mathrm{Var}(Y)\\) and \\(\\sigma_{XY} = \\mathrm{Cov}(X, Y)\\) \\[\\begin{align} Var(\\alpha X + (1 - \\alpha)Y) &= \\alpha^2\\sigma^2_X + (1-\\alpha)^2\\sigma^2_Y + 2\\alpha(1 - \\alpha)\\sigma_{XY} \\\\ &= \\alpha^2\\sigma^2_X + \\sigma^2_Y - 2\\alpha\\sigma^2_Y + \\alpha^2\\sigma^2_Y + 2\\alpha\\sigma_{XY} - 2\\alpha^2\\sigma_{XY} \\end{align}\\] Now we want to find when the rate of change of this function is 0 with respect to \\(\\alpha\\), so we compute the partial derivative, set to 0 and solve. \\[ \\frac{\\partial}{\\partial{\\alpha}} = 2\\alpha\\sigma^2_X - 2\\sigma^2_Y + 2\\alpha\\sigma^2_Y + 2\\sigma_{XY} - 4\\alpha\\sigma_{XY} = 0 \\] Moving \\(\\alpha\\) terms to the same side: \\[ \\alpha\\sigma^2_X + \\alpha\\sigma^2_Y - 2\\alpha\\sigma_{XY} = \\sigma^2_Y - \\sigma_{XY} \\] \\[ \\alpha = \\frac{\\sigma^2_Y - \\sigma_{XY}}{\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}} \\] We should also show that this is a minimum, so that the second partial derivative wrt \\(\\alpha\\) is \\(>= 0\\). \\[\\begin{align} \\frac{\\partial^2}{\\partial{\\alpha^2}} &= 2\\sigma^2_X + 2\\sigma^2_Y - 4\\sigma_{XY} \\\\ &= 2(\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}) \\\\ &= 2\\mathrm{Var}(X - Y) \\end{align}\\] Since variance is positive, then this must be positive. 5.1.2 Question 2 We will now derive the probability that a given observation is part of a bootstrap sample. Suppose that we obtain a bootstrap sample from a set of n observations. What is the probability that the first bootstrap observation is not the \\(j\\)th observation from the original sample? Justify your answer. This is 1 - probability that it is the \\(j\\)th = \\(1 - 1/n\\). What is the probability that the second bootstrap observation is not the \\(j\\)th observation from the original sample? Since each bootstrap observation is a random sample, this probability is the same (\\(1 - 1/n\\)). Argue that the probability that the \\(j\\)th observation is not in the bootstrap sample is \\((1 - 1/n)^n\\). For the \\(j\\)th observation to not be in the sample, it would have to not be picked for each of \\(n\\) positions, so not picked for \\(1, 2, ..., n\\), thus the probability is \\((1 - 1/n)^n\\) When \\(n = 5\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 5 1 - (1 - 1 / n)^n ## [1] 0.67232 \\(p = 0.67\\) When \\(n = 100\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 100 1 - (1 - 1 / n)^n ## [1] 0.6339677 \\(p = 0.64\\) When \\(n = 10,000\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 100000 1 - (1 - 1 / n)^n ## [1] 0.6321224 \\(p = 0.63\\) Create a plot that displays, for each integer value of \\(n\\) from 1 to 100,000, the probability that the \\(j\\)th observation is in the bootstrap sample. Comment on what you observe. x <- sapply(1:100000, function(n) 1 - (1 - 1 / n)^n) plot(x, log = "x", type = "o") The probability rapidly approaches 0.63 with increasing \\(n\\). Note that \\[e^x = \\lim_{x \\to \\inf} \\left(1 + \\frac{x}{n}\\right)^n,\\] so with \\(x = -1\\), we can see that our limit is \\(1 - e^{-1} = 1 - 1/e\\). We will now investigate numerically the probability that a bootstrap sample of size \\(n = 100\\) contains the \\(j\\)th observation. Here \\(j = 4\\). We repeatedly create bootstrap samples, and each time we record whether or not the fourth observation is contained in the bootstrap sample. > store <- rep (NA, 10000) > for (i in 1:10000) { store[i] <- sum(sample(1:100, rep = TRUE) == 4) > 0 } > mean(store) Comment on the results obtained. store <- replicate(10000, sum(sample(1:100, replace = TRUE) == 4) > 0) mean(store) ## [1] 0.6355 The probability of including \\(4\\) when resampling numbers \\(1...100\\) is close to \\(1 - (1 - 1/100)^{100}\\). 5.1.3 Question 3 We now review \\(k\\)-fold cross-validation. Explain how \\(k\\)-fold cross-validation is implemented. We divided our data into (approximately equal) \\(k\\) subsets, and then generate predictions for each \\(k\\)th set, training on the exclusive \\(k\\) sets combined. What are the advantages and disadvantages of \\(k\\)-fold cross-validation relative to: The validation set approach? LOOCV? When using a validation set, we can only train on a small portion of the data as we must reserve the rest for validation. As a result it can overestimate the test error rate (assuming we then train using the complete data for future prediction). It is also sensitive to which observations are including in train vs. test. It is, however, low cost in terms of processing time (as we only have to fit one model). When using LOOCV, we can train on \\(n-1\\) observations, however, the trained models we generate each differ only by the inclusion (and exclusion) of a single observation. As a result, LOOCV can have high variance (the models fit will be similar, and might be quite different to what we would obtain with a different data set). LOOCV is also costly in terms of processing time. 5.1.4 Question 4 Suppose that we use some statistical learning method to make a prediction for the response \\(Y\\) for a particular value of the predictor \\(X\\). Carefully describe how we might estimate the standard deviation of our prediction. We could address this with bootstrapping. Our procedure would be to (jointly) resample \\(Y\\) and \\(X\\) variables and fit our model many times. For each model we could obtain a summary of our prediction and calculate the standard deviation over bootstrapped samples. 5.2 Applied 5.2.1 Question 5 In Chapter 4, we used logistic regression to predict the probability of default using income and balance on the Default data set. We will now estimate the test error of this logistic regression model using the validation set approach. Do not forget to set a random seed before beginning your analysis. Fit a logistic regression model that uses income and balance to predict default. library(ISLR2) set.seed(42) fit <- glm(default ~ income + balance, data = Default, family = "binomial") Using the validation set approach, estimate the test error of this model. In order to do this, you must perform the following steps: Split the sample set into a training set and a validation set. Fit a multiple logistic regression model using only the training observations. Obtain a prediction of default status for each individual in the validation set by computing the posterior probability of default for that individual, and classifying the individual to the default category if the posterior probability is greater than 0.5. Compute the validation set error, which is the fraction of the observations in the validation set that are misclassified. train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") table(pred, Default$default[-train]) ## ## pred No Yes ## No 4817 110 ## Yes 20 53 mean(pred != Default$default[-train]) ## [1] 0.026 Repeat the process in (b) three times, using three different splits of the observations into a training set and a validation set. Comment on the results obtained. replicate(3, { train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") mean(pred != Default$default[-train]) }) ## [1] 0.0260 0.0294 0.0258 The results obtained are variable and depend on the samples allocated to training vs. test. Now consider a logistic regression model that predicts the probability of default using income, balance, and a dummy variable for student. Estimate the test error for this model using the validation set approach. Comment on whether or not including a dummy variable for student leads to a reduction in the test error rate. replicate(3, { train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance + student, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") mean(pred != Default$default[-train]) }) ## [1] 0.0278 0.0256 0.0250 Including student does not seem to make a substantial improvement to the test error. 5.2.2 Question 6 We continue to consider the use of a logistic regression model to predict the probability of default using income and balance on the Default data set. In particular, we will now compute estimates for the standard errors of the income and balance logistic regression coefficients in two different ways: (1) using the bootstrap, and (2) using the standard formula for computing the standard errors in the glm() function. Do not forget to set a random seed before beginning your analysis. Using the summary() and glm() functions, determine the estimated standard errors for the coefficients associated with income and balance in a multiple logistic regression model that uses both predictors. fit <- glm(default ~ income + balance, data = Default, family = "binomial") summary(fit) ## ## Call: ## glm(formula = default ~ income + balance, family = "binomial", ## data = Default) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) -1.154e+01 4.348e-01 -26.545 < 2e-16 *** ## income 2.081e-05 4.985e-06 4.174 2.99e-05 *** ## balance 5.647e-03 2.274e-04 24.836 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 2920.6 on 9999 degrees of freedom ## Residual deviance: 1579.0 on 9997 degrees of freedom ## AIC: 1585 ## ## Number of Fisher Scoring iterations: 8 The standard errors obtained by bootstrapping are \\(\\beta_1\\) = 5.0e-6 and \\(\\beta_2\\) = 2.3e-4. Write a function, boot.fn(), that takes as input the Default data set as well as an index of the observations, and that outputs the coefficient estimates for income and balance in the multiple logistic regression model. boot.fn <- function(x, i) { fit <- glm(default ~ income + balance, data = x[i, ], family = "binomial") coef(fit)[-1] } Use the boot() function together with your boot.fn() function to estimate the standard errors of the logistic regression coefficients for income and balance. library(boot) set.seed(42) boot(Default, boot.fn, R = 1000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Default, statistic = boot.fn, R = 1000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 2.080898e-05 2.737444e-08 5.073444e-06 ## t2* 5.647103e-03 1.176249e-05 2.299133e-04 Comment on the estimated standard errors obtained using the glm() function and using your bootstrap function. The standard errors obtained by bootstrapping are similar to those estimated by glm. 5.2.3 Question 7 In Sections 5.3.2 and 5.3.3, we saw that the cv.glm() function can be used in order to compute the LOOCV test error estimate. Alternatively, one could compute those quantities using just the glm() and predict.glm() functions, and a for loop. You will now take this approach in order to compute the LOOCV error for a simple logistic regression model on the Weekly data set. Recall that in the context of classification problems, the LOOCV error is given in (5.4). Fit a logistic regression model that predicts Direction using Lag1 and Lag2. fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly, family = "binomial") Fit a logistic regression model that predicts Direction using Lag1 and Lag2 using all but the first observation. fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly[-1, ], family = "binomial") Use the model from (b) to predict the direction of the first observation. You can do this by predicting that the first observation will go up if \\(P(\\)Direction=\"Up\" | Lag1 , Lag2\\() > 0.5\\). Was this observation correctly classified? predict(fit, newdata = Weekly[1, , drop = FALSE], type = "response") > 0.5 ## 1 ## TRUE Yes the observation was correctly classified. Write a for loop from \\(i = 1\\) to \\(i = n\\), where \\(n\\) is the number of observations in the data set, that performs each of the following steps: Fit a logistic regression model using all but the \\(i\\)th observation to predict Direction using Lag1 and Lag2 . Compute the posterior probability of the market moving up for the \\(i\\)th observation. Use the posterior probability for the \\(i\\)th observation in order to predict whether or not the market moves up. Determine whether or not an error was made in predicting the direction for the \\(i\\)th observation. If an error was made, then indicate this as a 1, and otherwise indicate it as a 0. error <- numeric(nrow(Weekly)) for (i in 1:nrow(Weekly)) { fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly[-i, ], family = "binomial") p <- predict(fit, newdata = Weekly[i, , drop = FALSE], type = "response") > 0.5 error[i] <- ifelse(p, "Down", "Up") == Weekly$Direction[i] } Take the average of the \\(n\\) numbers obtained in (d) in order to obtain the LOOCV estimate for the test error. Comment on the results. mean(error) ## [1] 0.4499541 The LOOCV test error rate is 45% which implies that our predictions are marginally more often correct than not. 5.2.4 Question 8 We will now perform cross-validation on a simulated data set. Generate a simulated data set as follows: > set.seed(1) > x <- rnorm(100) > y <- x - 2 *x^2 + rnorm(100) In this data set, what is \\(n\\) and what is \\(p\\)? Write out the model used to generate the data in equation form. set.seed(1) x <- rnorm(100) y <- x - 2 * x^2 + rnorm(100) \\(n\\) is 100 and \\(p\\) is 1 (there are 100 observations and \\(y\\) is predicted with a single variable \\(x\\)). The model equation is: \\[y = -2x^2 + x + \\epsilon\\]. Create a scatterplot of \\(X\\) against \\(Y\\). Comment on what you find. plot(x, y) \\(y\\) has a (negative) quadratic relationship with \\(x\\). Set a random seed, and then compute the LOOCV errors that result from fitting the following four models using least squares: \\(Y = \\beta_0 + \\beta_1 X + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\beta_3 X^3 + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\beta_3 X^3 + \\beta_4 X^4 + \\epsilon\\). Note you may find it helpful to use the data.frame() function to create a single data set containing both \\(X\\) and \\(Y\\). library(boot) set.seed(42) dat <- data.frame(x, y) sapply(1:4, function(i) cv.glm(dat, glm(y ~ poly(x, i)))$delta[1]) ## [1] 7.2881616 0.9374236 0.9566218 0.9539049 Repeat (c) using another random seed, and report your results. Are your results the same as what you got in (c)? Why? set.seed(43) dat <- data.frame(x, y) sapply(1:4, function(i) cv.glm(dat, glm(y ~ poly(x, i)))$delta[1]) ## [1] 7.2881616 0.9374236 0.9566218 0.9539049 The results are the same because we are using LOOCV. When doing this, the model is fit leaving each one of the observations out in turn, and thus there is no stochasticity involved. Which of the models in (c) had the smallest LOOCV error? Is this what you expected? Explain your answer. The second model had the smallest LOOCV. This what would be expected since the model to generate the data was quadratic and we are measuring the test (rather than training) error rate to evaluate performance. Comment on the statistical significance of the coefficient estimates that results from fitting each of the models in (c) using least squares. Do these results agree with the conclusions drawn based on the cross-validation results? for (i in 1:4) printCoefmat(coef(summary(glm(y ~ poly(x, i), data = dat)))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.55002 0.26001 -5.9613 3.954e-08 *** ## poly(x, i) 6.18883 2.60014 2.3802 0.01924 * ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.095803 -16.1792 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.958032 6.4599 4.185e-09 *** ## poly(x, i)2 -23.948305 0.958032 -24.9974 < 2.2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.096263 -16.1019 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.962632 6.4291 4.972e-09 *** ## poly(x, i)2 -23.948305 0.962632 -24.8779 < 2.2e-16 *** ## poly(x, i)3 0.264106 0.962632 0.2744 0.7844 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.095905 -16.1620 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.959051 6.4531 4.591e-09 *** ## poly(x, i)2 -23.948305 0.959051 -24.9708 < 2.2e-16 *** ## poly(x, i)3 0.264106 0.959051 0.2754 0.7836 ## poly(x, i)4 1.257095 0.959051 1.3108 0.1931 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 We can see that the coefficients in the first model are not highly significant, but all terms (\\(\\beta_0, \\beta_1\\) and \\(\\beta_2\\)) are in the quadratic model. After this, subsequent \\(\\beta_n\\) terms are not significant. Therefore, these results agree with those from cross-validation. 5.2.5 Question 9 We will now consider the Boston housing data set, from the ISLR2 library. Based on this data set, provide an estimate for the population mean of medv. Call this estimate \\(\\hat\\mu\\). (mu <- mean(Boston$medv)) ## [1] 22.53281 Provide an estimate of the standard error of \\(\\hat\\mu\\). Interpret this result. Hint: We can compute the standard error of the sample mean by dividing the sample standard deviation by the square root of the number of observations. sd(Boston$medv) / sqrt(length(Boston$medv)) ## [1] 0.4088611 Now estimate the standard error of \\(\\hat\\mu\\) using the bootstrap. How does this compare to your answer from (b)? set.seed(42) (bs <- boot(Boston$medv, function(v, i) mean(v[i]), 10000)) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) mean(v[i]), ## R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 22.53281 0.002175751 0.4029139 The standard error using the bootstrap (0.403) is very close to that obtained from the formula above (0.409). Based on your bootstrap estimate from (c), provide a 95% confidence interval for the mean of medv. Compare it to the results obtained using t.test(Boston$medv). Hint: You can approximate a 95% confidence interval using the formula \\([\\hat\\mu - 2SE(\\hat\\mu), \\hat\\mu + 2SE(\\hat\\mu)].\\) se <- sd(bs$t) c(mu - 2 * se, mu + 2 * se) ## [1] 21.72698 23.33863 Based on this data set, provide an estimate, \\(\\hat\\mu_{med}\\), for the median value of medv in the population. median(Boston$medv) ## [1] 21.2 We now would like to estimate the standard error of \\(\\hat\\mu_{med}\\). Unfortunately, there is no simple formula for computing the standard error of the median. Instead, estimate the standard error of the median using the bootstrap. Comment on your findings. set.seed(42) boot(Boston$medv, function(v, i) median(v[i]), 10000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) median(v[i]), ## R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 21.2 -0.01331 0.3744634 The estimated standard error of the median is 0.374. This is lower than the standard error of the mean. Based on this data set, provide an estimate for the tenth percentile of medv in Boston census tracts. Call this quantity \\(\\hat\\mu_{0.1}\\). (You can use the quantile() function.) quantile(Boston$medv, 0.1) ## 10% ## 12.75 Use the bootstrap to estimate the standard error of \\(\\hat\\mu_{0.1}\\). Comment on your findings. set.seed(42) boot(Boston$medv, function(v, i) quantile(v[i], 0.1), 10000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) quantile(v[i], ## 0.1), R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 12.75 0.013405 0.497298 We get a standard error of ~0.5. This is higher than the standard error of the median. Nevertheless the standard error is quite small, thus we can be fairly confidence about the value of the 10th percentile. "],["linear-model-selection-and-regularization.html", "6 Linear Model Selection and Regularization 6.1 Conceptual 6.2 Applied", " 6 Linear Model Selection and Regularization 6.1 Conceptual 6.1.1 Question 1 We perform best subset, forward stepwise, and backward stepwise selection on a single data set. For each approach, we obtain \\(p + 1\\) models, containing \\(0, 1, 2, ..., p\\) predictors. Explain your answers: Which of the three models with \\(k\\) predictors has the smallest training RSS? Best subset considers the most models (all possible combinations of \\(p\\) predictors are considered), therefore this will give the smallest training RSS (it will at least consider all possibilities covered by forward and backward stepwise selection). However, all three approaches are expected to give similar if not identical results in practice. Which of the three models with \\(k\\) predictors has the smallest test RSS? We cannot tell which model will perform best on the test RSS. The answer will depend on the tradeoff between fitting to the data and overfitting. True or False: The predictors in the \\(k\\)-variable model identified by forward stepwise are a subset of the predictors in the (\\(k+1\\))-variable model identified by forward stepwise selection. True. Forward stepwise selection retains all features identified in previous models as \\(k\\) is increased. The predictors in the \\(k\\)-variable model identified by backward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by backward stepwise selection. True. Backward stepwise selection removes features one by one as \\(k\\) is decreased. The predictors in the \\(k\\)-variable model identified by backward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by forward stepwise selection. False. Forward and backward stepwise selection can identify different combinations of variables due to differing algorithms. The predictors in the \\(k\\)-variable model identified by forward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by backward stepwise selection. False. Forward and backward stepwise selection can identify different combinations of variables due to differing algorithms. The predictors in the \\(k\\)-variable model identified by best subset are a subset of the predictors in the \\((k+1)\\)-variable model identified by best subset selection. False. Best subset selection can identify different combinations of variables for each \\(k\\) by considering all possible models. 6.1.2 Question 2 For parts (a) through (c), indicate which of i. through iv. is correct. Justify your answer. The lasso, relative to least squares, is: More flexible and hence will give improved prediction accuracy when its increase in bias is less than its decrease in variance. More flexible and hence will give improved prediction accuracy when its increase in variance is less than its decrease in bias. Less flexible and hence will give improved prediction accuracy when its increase in bias is less than its decrease in variance. Less flexible and hence will give improved prediction accuracy when its increase in variance is less than its decrease in bias. By using shrinkage, lasso can reduce the number of predictors so is less flexible. As a result, it will lead to an increase in bias by approximating the true relationship. We hope that this increase is small but that we dramatically reduce variance (i.e. the difference we would see in the model fit between different sets of training data). Repeat (a) for ridge regression relative to least squares. The same is true of ridge regression—shrinkage results in a less flexible model and can reduce variance. Repeat (a) for non-linear methods relative to least squares. Non-linear methods can be more flexible. They can perform better as long as they don’t substantially increase variance. 6.1.3 Question 3 Suppose we estimate the regression coefficients in a linear regression model by minimizing: \\[ \\sum_{i=1}^n\\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 \\textrm{subject to} \\sum_{j=1}^p|\\beta_j| \\le s \\] for a particular value of \\(s\\). For parts (a) through (e), indicate which of i. through v. is correct. Justify your answer. As we increase \\(s\\) from 0, the training RSS will: Increase initially, and then eventually start decreasing in an inverted U shape. Decrease initially, and then eventually start increasing in a U shape. Steadily increase. Steadily decrease. Remain constant. As \\(s\\) increases, the model becomes more flexible (the sum of absolute coefficients can be higher). With more flexible models, training RSS will always decrease. Repeat (a) for test RSS. With more flexible models, test RSS will decrease (as the fit improves) and will then increase due to overfitting (high variance). Repeat (a) for variance. As \\(s\\) increases, the model becomes more flexible so variance will increase. Repeat (a) for (squared) bias. As \\(s\\) increases, the model becomes more flexible so bias will decrease. Repeat (a) for the irreducible error. The irreducible error is unchanged. 6.1.4 Question 4 Suppose we estimate the regression coefficients in a linear regression model by minimizing \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] for a particular value of \\(\\lambda\\). For parts (a) through (e), indicate which of i. through v. is correct. Justify your answer. As we increase \\(\\lambda\\) from 0, the training RSS will: Increase initially, and then eventually start decreasing in an inverted U shape. Decrease initially, and then eventually start increasing in a U shape. Steadily increase. Steadily decrease. Remain constant. As \\(\\lambda\\) is increased, more weight is placed on the sum of squared coefficients and so the model becomes less flexible. As a result, training RSS must increase. Repeat (a) for test RSS. As \\(\\lambda\\) increases, flexibility decreases so test RSS will decrease (variance decreases) but will then increase (as bias increases). Repeat (a) for variance. Steadily decrease. Repeat (a) for (squared) bias. Steadily increase. Repeat (a) for the irreducible error. The irreducible error is unchanged. 6.1.5 Question 5 It is well-known that ridge regression tends to give similar coefficient values to correlated variables, whereas the lasso may give quite different coefficient values to correlated variables. We will now explore this property in a very simple setting. Suppose that \\(n = 2, p = 2, x_{11} = x_{12}, x_{21} = x_{22}\\). Furthermore, suppose that \\(y_1 + y_2 =0\\) and \\(x_{11} + x_{21} = 0\\) and \\(x_{12} + x_{22} = 0\\), so that the estimate for the intercept in a least squares, ridge regression, or lasso model is zero: \\(\\hat{\\beta}_0 = 0\\). Write out the ridge regression optimization problem in this setting. We are trying to minimize: \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] We can ignore \\(\\beta_0\\) and can expand the sums since there’s only two terms. Additionally, we can define \\(x_1 = x_{11} = x_{12}\\) and \\(x_2 = x_{21} = x_{22}\\). We then need to minimize \\[\\begin{align} f = & (y_1 - \\beta_1x_1 - \\beta_2x_1)^2 + (y_2 - \\beta_1x_2 - \\beta_2x_2)^2 + \\lambda\\beta_1^2 + \\lambda\\beta_2^2 \\\\ f = & y_1^2 - 2y_1\\beta_1x_1 - 2y_1\\beta_2x_1 + \\beta_1^2x_1^2 + 2\\beta_1\\beta_2x_1^2 + \\beta_2^2x_1^2 + \\\\ & y_2^2 - 2y_2\\beta_1x_2 - 2y_2\\beta_2x_2 + \\beta_1^2x_2^2 + 2\\beta_1\\beta_2x_2^2 + \\beta_2^2x_2^2 + \\\\ & \\lambda\\beta_1^2 + \\lambda\\beta_2^2 \\\\ \\end{align}\\] Argue that in this setting, the ridge coefficient estimates satisfy \\(\\hat{\\beta}_1 = \\hat{\\beta}_2\\) We can find when the above is minimized with respect to each of \\(\\beta_1\\) and \\(\\beta_2\\) by partial differentiation. \\[ \\frac{\\partial}{\\partial{\\beta_1}} = - 2y_1x_1 + 2\\beta_1x_1^2 + 2\\beta_2x_1^2 - 2y_2x_2 + 2\\beta_1x_2^2 + 2\\beta_2x_2^2 + 2\\lambda\\beta_1 \\] \\[ \\frac{\\partial}{\\partial{\\beta_2}} = - 2y_1x_1 + 2\\beta_1x_1^2 + 2\\beta_2x_1^2 - 2y_2x_2 + 2\\beta_1x_2^2 + 2\\beta_2x_2^2 + 2\\lambda\\beta_2 \\] A minimum can be found when these are set to 0. \\[ \\lambda\\beta_1 = y_1x_1 + y_2x_2 - \\beta_1x_1^2 - \\beta_2x_1^2 - \\beta_1x_2^2 - \\beta_2x_2^2 \\\\ \\lambda\\beta_2 = y_1x_1 + y_2x_2 - \\beta_1x_1^2 - \\beta_2x_1^2 - \\beta_1x_2^2 - \\beta_2x_2^2 \\] Therefore \\(\\lambda\\beta_1 = \\lambda\\beta_2\\) and \\(\\beta_1 = \\beta_2\\), thus there is only one solution, that is when the coefficients are the same. Write out the lasso optimization problem in this setting. We are trying to minimize: \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p |\\beta_j| \\] As above (and defining \\(x_1 = x_{11} = x_{12}\\) and \\(x_2 = x_{21} = x_{22}\\)) we simplify to \\[ (y_1 - \\beta_1x_1 - \\beta_2x_1)^2 + (y_2 - \\beta_1x_2 - \\beta_2x_2)^2 + \\lambda|\\beta_1| + \\lambda|\\beta_2| \\] Argue that in this setting, the lasso coefficients \\(\\hat{\\beta}_1\\) and \\(\\hat{\\beta}_2\\) are not unique—in other words, there are many possible solutions to the optimization problem in (c). Describe these solutions. We will consider the alternate form of the lasso optimization problem \\[ (y_1 - \\hat{\\beta_1}x_1 - \\hat{\\beta_2}x_1)^2 + (y_2 - \\hat{\\beta_1}x_2 - \\hat{\\beta_2}x_2)^2 \\quad \\text{subject to} \\quad |\\hat{\\beta_1}| + |\\hat{\\beta_2}| \\le s \\] Since \\(x_1 + x_2 = 0\\) and \\(y_1 + y_2 = 0\\), this is equivalent to minimising \\(2(y_1 - (\\hat{\\beta_1} + \\hat{\\beta_2})x_1)^2\\) which has a solution when \\(\\hat{\\beta_1} + \\hat{\\beta_2} = y_1/x_1\\). Geometrically, this is a \\(45^\\circ\\) backwards sloping line in the (\\(\\hat{\\beta_1}\\), \\(\\hat{\\beta_2}\\)) plane. The constraints \\(|\\hat{\\beta_1}| + |\\hat{\\beta_2}| \\le s\\) specify a diamond shape in the same place, also with lines that are at \\(45^\\circ\\) centered at the origin and which intersect the axes at a distance \\(s\\) from the origin. Thus, points along two edges of the diamond (\\(\\hat{\\beta_1} + \\hat{\\beta_2} = s\\) and \\(\\hat{\\beta_1} + \\hat{\\beta_2} = -s\\)) become solutions to the lasso optimization problem. 6.1.6 Question 6 We will now explore (6.12) and (6.13) further. Consider (6.12) with \\(p = 1\\). For some choice of \\(y_1\\) and \\(\\lambda > 0\\), plot (6.12) as a function of \\(\\beta_1\\). Your plot should confirm that (6.12) is solved by (6.14). Equation 6.12 is: \\[ \\sum_{j=1}^p(y_j - \\beta_j)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] Equation 6.14 is: \\[ \\hat{\\beta}_j^R = y_j/(1 + \\lambda) \\] where \\(\\hat{\\beta}_j^R\\) is the ridge regression estimate. lambda <- 0.7 y <- 1.4 fn <- function(beta) { (y - beta)^2 + lambda * beta^2 } plot(seq(0, 2, 0.01), fn(seq(0, 2, 0.01)), type = "l", xlab = "beta", ylab = "6.12") abline(v = y / (1 + lambda), lty = 2) Consider (6.13) with \\(p = 1\\). For some choice of \\(y_1\\) and \\(\\lambda > 0\\), plot (6.13) as a function of \\(\\beta_1\\). Your plot should confirm that (6.13) is solved by (6.15). Equation 6.13 is: \\[ \\sum_{j=1}^p(y_j - \\beta_j)^2 + \\lambda\\sum_{j=1}^p|\\beta_j| \\] Equation 6.15 is: \\[ \\hat{\\beta}_j^L = \\begin{cases} y_j - \\lambda/2 &\\mbox{if } y_j > \\lambda/2; \\\\ y_j + \\lambda/2 &\\mbox{if } y_j < -\\lambda/2; \\\\ 0 &\\mbox{if } |y_j| \\le \\lambda/2; \\end{cases} \\] For \\(\\lambda = 0.7\\) and \\(y = 1.4\\), the top case applies. lambda <- 0.7 y <- 1.4 fn <- function(beta) { (y - beta)^2 + lambda * abs(beta) } plot(seq(0, 2, 0.01), fn(seq(0, 2, 0.01)), type = "l", xlab = "beta", ylab = "6.12") abline(v = y - lambda / 2, lty = 2) 6.1.7 Question 7 We will now derive the Bayesian connection to the lasso and ridge regression discussed in Section 6.2.2. Suppose that \\(y_i = \\beta_0 + \\sum_{j=1}^p x_{ij}\\beta_j + \\epsilon_i\\) where \\(\\epsilon_1, ..., \\epsilon_n\\) are independent and identically distributed from a \\(N(0, \\sigma^2)\\) distribution. Write out the likelihood for the data. \\[\\begin{align*} \\mathcal{L} &= \\prod_i^n \\mathcal{N}(0, \\sigma^2) \\\\ &= \\prod_i^n \\frac{1}{\\sqrt{2\\pi\\sigma}}\\exp\\left(-\\frac{\\epsilon_i^2}{2\\sigma^2}\\right) \\\\ &= \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\end{align*}\\] Assume the following prior for \\(\\beta\\): \\(\\beta_1, ..., \\beta_p\\) are independent and identically distributed according to a double-exponential distribution with mean 0 and common scale parameter b: i.e. \\(p(\\beta) = \\frac{1}{2b}\\exp(-|\\beta|/b)\\). Write out the posterior for \\(\\beta\\) in this setting. The posterior can be calculated by multiplying the prior and likelihood (up to a proportionality constant). \\[\\begin{align*} p(\\beta|X,Y) &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\prod_j^p\\frac{1}{2b}\\exp\\left(-\\frac{|\\beta_j|}{b}\\right) \\\\ &\\propto \\frac{1}{2b} \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 -\\sum_j^p\\frac{|\\beta_j|}{b}\\right) \\end{align*}\\] Argue that the lasso estimate is the mode for \\(\\beta\\) under this posterior distribution. Let us find the maximum of the posterior distribution (the mode). Maximizing the posterior probability is equivalent to maximizing its log which is: \\[ \\log(p(\\beta|X,Y)) \\propto \\log\\left[ \\frac{1}{2b} \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\right ] - \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\sum_j^p\\frac{|\\beta_j|}{b}\\right) \\] Since, the first term is independent of \\(\\beta\\), our solution will be when we minimize the second term. \\[\\begin{align*} \\DeclareMathOperator*{\\argmin}{arg\\,min} % Jan Hlavacek \\argmin_\\beta \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\sum_j^p\\frac{|\\beta|}{b}\\right) &= \\argmin_\\beta \\left(\\frac{1}{2\\sigma^2} \\right ) \\left( \\sum_i^n \\epsilon_i^2 +\\frac{2\\sigma^2}{b}\\sum_j^p|\\beta_j|\\right) \\\\ &= \\argmin_\\beta \\left( \\sum_i^n \\epsilon_i^2 +\\frac{2\\sigma^2}{b}\\sum_j^p|\\beta_j|\\right) \\end{align*}\\] Note, that \\(RSS = \\sum_i^n \\epsilon_i^2\\) and if we set \\(\\lambda = \\frac{2\\sigma^2}{b}\\), the mode corresponds to lasso optimization. \\[ \\argmin_\\beta RSS + \\lambda\\sum_j^p|\\beta_j| \\] Now assume the following prior for \\(\\beta\\): \\(\\beta_1, ..., \\beta_p\\) are independent and identically distributed according to a normal distribution with mean zero and variance \\(c\\). Write out the posterior for \\(\\beta\\) in this setting. The posterior is now: \\[\\begin{align*} p(\\beta|X,Y) &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\prod_j^p\\frac{1}{\\sqrt{2\\pi c}}\\exp\\left(-\\frac{\\beta_j^2}{2c}\\right) \\\\ &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\left(\\frac{1}{\\sqrt{2\\pi c}}\\right)^p \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 - \\frac{1}{2c}\\sum_j^p\\beta_j^2\\right) \\end{align*}\\] Argue that the ridge regression estimate is both the mode and the mean for \\(\\beta\\) under this posterior distribution. To show that the ridge estimate is the mode we can again find the maximum by maximizing the log of the posterior. The log is \\[ \\log{p(\\beta|X,Y)} \\propto \\log{\\left[\\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\left(\\frac{1}{\\sqrt{2\\pi c}}\\right)^p \\right ]} - \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\frac{1}{2c}\\sum_j^p\\beta_j^2 \\right) \\] We can maximize (wrt \\(\\beta\\)) by ignoring the first term and minimizing the second term. i.e. we minimize: \\[ \\argmin_\\beta \\left( \\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\frac{1}{2c}\\sum_j^p\\beta_j^2 \\right)\\\\ = \\argmin_\\beta \\left( \\frac{1}{2\\sigma^2} \\left( \\sum_i^n \\epsilon_i^2 + \\frac{\\sigma^2}{c}\\sum_j^p\\beta_j^2 \\right) \\right) \\] As above, if \\(RSS = \\sum_i^n \\epsilon_i^2\\) and if we set \\(\\lambda = \\frac{\\sigma^2}{c}\\), we can see that the mode corresponds to ridge optimization. 6.2 Applied 6.2.1 Question 8 In this exercise, we will generate simulated data, and will then use this data to perform best subset selection. Use the rnorm() function to generate a predictor \\(X\\) of length \\(n = 100\\), as well as a noise vector \\(\\epsilon\\) of length \\(n = 100\\). library(ISLR2) library(glmnet) library(leaps) library(pls) set.seed(42) x <- rnorm(100) ep <- rnorm(100) Generate a response vector \\(Y\\) of length \\(n = 100\\) according to the model \\[Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon,\\] where \\(\\beta_0, \\beta_1, \\beta_2,\\) and \\(\\beta_3\\) are constants of your choice. y <- 2 + 3 * x - 2 * x^2 + 0.5 * x^3 + ep Use the regsubsets() function to perform best subset selection in order to choose the best model containing the predictors \\(X, X^2, ..., X^{10}\\). What is the best model obtained according to \\(C_p\\), BIC, and adjusted \\(R^2\\)? Show some plots to provide evidence for your answer, and report the coefficients of the best model obtained. Note you will need to use the data.frame() function to create a single data set containing both \\(X\\) and \\(Y\\). dat <- data.frame(x, y) summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat)) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat) ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: exhaustive ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) "*" "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " Repeat (c), using forward stepwise selection and also using backwards stepwise selection. How does your answer compare to the results in (c)? summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat, method = "forward")) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat, method = "forward") ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: forward ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat, method = "backward")) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat, method = "backward") ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: backward ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " " " ## 7 ( 1 ) " " " " ## 8 ( 1 ) " " " " ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" Now fit a lasso model to the simulated data, again using \\(X, X^2, ..., X^{10}\\) as predictors. Use cross-validation to select the optimal value of \\(\\lambda\\). Create plots of the cross-validation error as a function of \\(\\lambda\\). Report the resulting coefficient estimates, and discuss the results obtained. res <- cv.glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1) (best <- res$lambda.min) ## [1] 0.09804219 plot(res) out <- glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1, lambda = res$lambda.min) predict(out, type = "coefficients", s = best) ## 11 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) 1.8457308 ## 1 2.9092918 ## 2 -1.9287428 ## 3 0.5161012 ## 4 . ## 5 . ## 6 . ## 7 . ## 8 . ## 9 . ## 10 . When fitting lasso, the model that minimizes MSE uses three predictors (as per the simulation). The coefficients estimated (2.9, -1.9 and 0.5) are similar to those used in the simulation. Now generate a response vector \\(Y\\) according to the model \\[Y = \\beta_0 + \\beta_7X^7 + \\epsilon,\\] and perform best subset selection and the lasso. Discuss the results obtained. dat$y <- 2 - 2 * x^2 + 0.2 * x^7 + ep summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat)) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat) ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: exhaustive ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " "*" ## 3 ( 1 ) " " "*" ## 4 ( 1 ) " " "*" ## 5 ( 1 ) " " "*" ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " "*" ## 8 ( 1 ) " " "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " "*" ## 8 ( 1 ) " " "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" res <- cv.glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1) (best <- res$lambda.min) ## [1] 1.126906 plot(res) out <- glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1, lambda = best) predict(out, type = "coefficients", s = best) ## 11 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) 1.061389580 ## 1 . ## 2 -0.883080980 ## 3 . ## 4 -0.121018425 ## 5 0.028984084 ## 6 -0.009540039 ## 7 0.188796928 ## 8 . ## 9 . ## 10 . When fitting lasso, the model does not perfectly replicate the simulation (coefficients are retained for powers of \\(x\\) that were not simulated). 6.2.2 Question 9 In this exercise, we will predict the number of applications received using the other variables in the College data set. Split the data set into a training set and a test set. set.seed(42) train <- sample(nrow(College), nrow(College) * 2 / 3) test <- setdiff(seq_len(nrow(College)), train) mse <- list() Fit a linear model using least squares on the training set, and report the test error obtained. fit <- lm(Apps ~ ., data = College[train, ]) (mse$lm <- mean((predict(fit, College[test, ]) - College$Apps[test])^2)) ## [1] 1695269 Fit a ridge regression model on the training set, with \\(\\lambda\\) chosen by cross-validation. Report the test error obtained. mm <- model.matrix(Apps ~ ., data = College[train, ]) fit2 <- cv.glmnet(mm, College$Apps[train], alpha = 0) p <- predict(fit2, model.matrix(Apps ~ ., data = College[test, ]), s = fit2$lambda.min) (mse$ridge <- mean((p - College$Apps[test])^2)) ## [1] 2804369 Fit a lasso model on the training set, with \\(\\lambda\\) chosen by cross- validation. Report the test error obtained, along with the number of non-zero coefficient estimates. mm <- model.matrix(Apps ~ ., data = College[train, ]) fit3 <- cv.glmnet(mm, College$Apps[train], alpha = 1) p <- predict(fit3, model.matrix(Apps ~ ., data = College[test, ]), s = fit3$lambda.min) (mse$lasso <- mean((p - College$Apps[test])^2)) ## [1] 1822322 Fit a PCR model on the training set, with \\(M\\) chosen by cross-validation. Report the test error obtained, along with the value of \\(M\\) selected by cross-validation. fit4 <- pcr(Apps ~ ., data = College[train, ], scale = TRUE, validation = "CV") validationplot(fit4, val.type = "MSEP") p <- predict(fit4, College[test, ], ncomp = 17) (mse$pcr <- mean((p - College$Apps[test])^2)) ## [1] 1695269 Fit a PLS model on the training set, with \\(M\\) chosen by cross-validation. Report the test error obtained, along with the value of \\(M\\) selected by cross-validation. fit5 <- plsr(Apps ~ ., data = College[train, ], scale = TRUE, validation = "CV") validationplot(fit5, val.type = "MSEP") p <- predict(fit5, College[test, ], ncomp = 12) (mse$pls <- mean((p - College$Apps[test])^2)) ## [1] 1696902 Comment on the results obtained. How accurately can we predict the number of college applications received? Is there much difference among the test errors resulting from these five approaches? barplot(unlist(mse), ylab = "Test MSE", horiz = TRUE) Ridge and lasso give the lowest test errors but the lowest is generated by the ridge regression model (in this specific case with this specific seed). 6.2.3 Question 10 We have seen that as the number of features used in a model increases, the training error will necessarily decrease, but the test error may not. We will now explore this in a simulated data set. Generate a data set with \\(p = 20\\) features, \\(n = 1,000\\) observations, and an associated quantitative response vector generated according to the model \\(Y =X\\beta + \\epsilon\\), where \\(\\beta\\) has some elements that are exactly equal to zero. set.seed(42) dat <- matrix(rnorm(1000 * 20), nrow = 1000) colnames(dat) <- paste0("b", 1:20) beta <- rep(0, 20) beta[1:4] <- c(5, 4, 2, 7) y <- colSums((t(dat) * beta)) + rnorm(1000) dat <- data.frame(dat) dat$y <- y Split your data set into a training set containing 100 observations and a test set containing 900 observations. train <- dat[1:100, ] test <- dat[101:1000, ] Perform best subset selection on the training set, and plot the training set MSE associated with the best model of each size. fit <- regsubsets(y ~ ., data = train, nvmax = 20) summary(fit) ## Subset selection object ## Call: regsubsets.formula(y ~ ., data = train, nvmax = 20) ## 20 Variables (and intercept) ## Forced in Forced out ## b1 FALSE FALSE ## b2 FALSE FALSE ## b3 FALSE FALSE ## b4 FALSE FALSE ## b5 FALSE FALSE ## b6 FALSE FALSE ## b7 FALSE FALSE ## b8 FALSE FALSE ## b9 FALSE FALSE ## b10 FALSE FALSE ## b11 FALSE FALSE ## b12 FALSE FALSE ## b13 FALSE FALSE ## b14 FALSE FALSE ## b15 FALSE FALSE ## b16 FALSE FALSE ## b17 FALSE FALSE ## b18 FALSE FALSE ## b19 FALSE FALSE ## b20 FALSE FALSE ## 1 subsets of each size up to 20 ## Selection Algorithm: exhaustive ## b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 ## 1 ( 1 ) " " " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 2 ( 1 ) "*" " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 3 ( 1 ) "*" "*" " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 4 ( 1 ) "*" "*" "*" "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 5 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " " " " " " " " " " " " " " " ## 6 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " " " " " " " " " " " " " "*" ## 7 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " "*" " " " " " " " " " " "*" ## 8 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 9 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 10 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 11 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " " " " " "*" "*" ## 12 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " " " " " "*" "*" ## 13 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " "*" " " "*" "*" ## 14 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 15 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 16 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" "*" "*" "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" ## b18 b19 b20 ## 1 ( 1 ) " " " " " " ## 2 ( 1 ) " " " " " " ## 3 ( 1 ) " " " " " " ## 4 ( 1 ) " " " " " " ## 5 ( 1 ) " " " " " " ## 6 ( 1 ) " " " " " " ## 7 ( 1 ) " " " " " " ## 8 ( 1 ) " " " " " " ## 9 ( 1 ) " " " " " " ## 10 ( 1 ) " " " " "*" ## 11 ( 1 ) " " " " "*" ## 12 ( 1 ) "*" " " "*" ## 13 ( 1 ) "*" " " "*" ## 14 ( 1 ) "*" " " "*" ## 15 ( 1 ) "*" " " "*" ## 16 ( 1 ) "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" plot(summary(fit)$rss / 100, ylab = "MSE", type = "o") Plot the test set MSE associated with the best model of each size. predict.regsubsets <- function(object, newdata, id, ...) { form <- as.formula(object$call[[2]]) mat <- model.matrix(form, newdata) coefi <- coef(object, id = id) xvars <- names(coefi) mat[, xvars] %*% coefi } mse <- sapply(1:20, function(i) mean((test$y - predict(fit, test, i))^2)) plot(mse, ylab = "MSE", type = "o", pch = 19) For which model size does the test set MSE take on its minimum value? Comment on your results. If it takes on its minimum value for a model containing only an intercept or a model containing all of the features, then play around with the way that you are generating the data in (a) until you come up with a scenario in which the test set MSE is minimized for an intermediate model size. which.min(mse) ## [1] 4 The min test MSE is found when model size is 4. This corresponds to the simulated data which has four non-zero coefficients. set.seed(42) dat <- matrix(rnorm(1000 * 20), nrow = 1000) colnames(dat) <- paste0("b", 1:20) beta <- rep(0, 20) beta[1:9] <- c(5, 4, 2, 7, 0.01, 0.001, 0.05, 0.1, 0.5) y <- colSums((t(dat) * beta)) + rnorm(1000) dat <- data.frame(dat) dat$y <- y train <- dat[1:100, ] test <- dat[101:1000, ] fit <- regsubsets(y ~ ., data = train, nvmax = 20) summary(fit) ## Subset selection object ## Call: regsubsets.formula(y ~ ., data = train, nvmax = 20) ## 20 Variables (and intercept) ## Forced in Forced out ## b1 FALSE FALSE ## b2 FALSE FALSE ## b3 FALSE FALSE ## b4 FALSE FALSE ## b5 FALSE FALSE ## b6 FALSE FALSE ## b7 FALSE FALSE ## b8 FALSE FALSE ## b9 FALSE FALSE ## b10 FALSE FALSE ## b11 FALSE FALSE ## b12 FALSE FALSE ## b13 FALSE FALSE ## b14 FALSE FALSE ## b15 FALSE FALSE ## b16 FALSE FALSE ## b17 FALSE FALSE ## b18 FALSE FALSE ## b19 FALSE FALSE ## b20 FALSE FALSE ## 1 subsets of each size up to 20 ## Selection Algorithm: exhaustive ## b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 ## 1 ( 1 ) " " " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 2 ( 1 ) "*" " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 3 ( 1 ) "*" "*" " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 4 ( 1 ) "*" "*" "*" "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 5 ( 1 ) "*" "*" "*" "*" " " " " " " " " "*" " " " " " " " " " " " " " " " " ## 6 ( 1 ) "*" "*" "*" "*" " " " " " " " " "*" " " " " " " " " " " " " " " "*" ## 7 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " " " " " " " " " " " " " "*" ## 8 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " "*" " " " " " " " " " " "*" ## 9 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 10 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 11 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 12 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" "*" " " " " " " "*" "*" ## 13 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" "*" " " " " " " "*" "*" ## 14 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " " " " " "*" "*" ## 15 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " "*" " " "*" "*" ## 16 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" "*" "*" "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" ## b18 b19 b20 ## 1 ( 1 ) " " " " " " ## 2 ( 1 ) " " " " " " ## 3 ( 1 ) " " " " " " ## 4 ( 1 ) " " " " " " ## 5 ( 1 ) " " " " " " ## 6 ( 1 ) " " " " " " ## 7 ( 1 ) " " " " " " ## 8 ( 1 ) " " " " " " ## 9 ( 1 ) " " " " " " ## 10 ( 1 ) " " " " " " ## 11 ( 1 ) " " " " "*" ## 12 ( 1 ) " " " " "*" ## 13 ( 1 ) "*" " " "*" ## 14 ( 1 ) "*" " " "*" ## 15 ( 1 ) "*" " " "*" ## 16 ( 1 ) "*" " " "*" ## 17 ( 1 ) "*" " " "*" ## 18 ( 1 ) "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" mse <- sapply(1:20, function(i) mean((test$y - predict(fit, test, i))^2)) plot(mse, ylab = "MSE", type = "o", pch = 19) which.min(mse) ## [1] 5 How does the model at which the test set MSE is minimized compare to the true model used to generate the data? Comment on the coefficient values. The min test MSE is found when model size is 5 but there are 9 non-zero coefficients. coef(fit, id = 5) ## (Intercept) b1 b2 b3 b4 b9 ## 0.03507654 5.06180121 3.82785027 2.20434996 7.05312844 0.57032008 The coefficient values are well estimated when high, but the smaller coefficients are dropped. Create a plot displaying \\(\\sqrt{\\sum_{j=1}^p (\\beta_j - \\hat{\\beta}{}_j^r)^2}\\) for a range of values of \\(r\\), where \\(\\hat{\\beta}{}_j^r\\) is the \\(j\\)th coefficient estimate for the best model containing \\(r\\) coefficients. Comment on what you observe. How does this compare to the test MSE plot from (d)? names(beta) <- paste0("b", 1:20) b <- data.frame(id = names(beta), b = beta) out <- sapply(1:20, function(i) { c <- coef(fit, id = i)[-1] c <- data.frame(id = names(c), c = c) m <- merge(b, c) sqrt(sum((m$b - m$c)^2)) }) plot(out, ylab = "Mean squared coefficient error", type = "o", pch = 19) The error of the coefficient estimates is minimized when model size is 5. This corresponds to the point when test MSE was minimized. 6.2.4 Question 11 We will now try to predict per capita crime rate in the Boston data set. Try out some of the regression methods explored in this chapter, such as best subset selection, the lasso, ridge regression, and PCR. Present and discuss results for the approaches that you consider. set.seed(1) train <- sample(nrow(Boston), nrow(Boston) * 2 / 3) test <- setdiff(seq_len(nrow(Boston)), train) hist(log(Boston$crim)) Propose a model (or set of models) that seem to perform well on this data set, and justify your answer. Make sure that you are evaluating model performance using validation set error, cross-validation, or some other reasonable alternative, as opposed to using training error. We will try to fit models to log(Boston$crim) which is closer to a normal distribution. fit <- lm(log(crim) ~ ., data = Boston[train, ]) mean((predict(fit, Boston[test, ]) - log(Boston$crim[test]))^2) ## [1] 0.66578 mm <- model.matrix(log(crim) ~ ., data = Boston[train, ]) fit2 <- cv.glmnet(mm, log(Boston$crim[train]), alpha = 0) p <- predict(fit2, model.matrix(log(crim) ~ ., data = Boston[test, ]), s = fit2$lambda.min) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6511807 mm <- model.matrix(log(crim) ~ ., data = Boston[train, ]) fit3 <- cv.glmnet(mm, log(Boston$crim[train]), alpha = 1) p <- predict(fit3, model.matrix(log(crim) ~ ., data = Boston[test, ]), s = fit3$lambda.min) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6494337 fit4 <- pcr(log(crim) ~ ., data = Boston[train, ], scale = TRUE, validation = "CV") validationplot(fit4, val.type = "MSEP") p <- predict(fit4, Boston[test, ], ncomp = 8) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6561043 fit5 <- plsr(log(crim) ~ ., data = Boston[train, ], scale = TRUE, validation = "CV") validationplot(fit5, val.type = "MSEP") p <- predict(fit5, Boston[test, ], ncomp = 6) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6773353 In this case lasso (alpha = 1) seems to perform very slightly better than un-penalized regression. Some coefficients have been dropped: coef(fit3, s = fit3$lambda.min) ## 14 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) -4.713172675 ## (Intercept) . ## zn -0.011043739 ## indus 0.022515402 ## chas . ## nox 3.856157215 ## rm . ## age 0.004210529 ## dis . ## rad 0.145604750 ## tax . ## ptratio -0.031787696 ## lstat 0.036112321 ## medv 0.004304181 Does your chosen model involve all of the features in the data set? Why or why not? Not all features are included due to the lasso penalization. "],["moving-beyond-linearity.html", "7 Moving Beyond Linearity 7.1 Conceptual 7.2 Applied", " 7 Moving Beyond Linearity 7.1 Conceptual 7.1.1 Question 1 It was mentioned in the chapter that a cubic regression spline with one knot at \\(\\xi\\) can be obtained using a basis of the form \\(x, x^2, x^3, (x-\\xi)^3_+\\), where \\((x-\\xi)^3_+ = (x-\\xi)^3\\) if \\(x>\\xi\\) and equals 0 otherwise. We will now show that a function of the form \\[ f(x)=\\beta_0 +\\beta_1x+\\beta_2x^2 +\\beta_3x^3 +\\beta_4(x-\\xi)^3_+ \\] is indeed a cubic regression spline, regardless of the values of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3,\\beta_4\\). Find a cubic polynomial \\[ f_1(x) = a_1 + b_1x + c_1x^2 + d_1x^3 \\] such that \\(f(x) = f_1(x)\\) for all \\(x \\le \\xi\\). Express \\(a_1,b_1,c_1,d_1\\) in terms of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3, \\beta_4\\). In this case, for \\(x \\le \\xi\\), the cubic polynomial simply has terms \\(a_1 = \\beta_0\\), \\(b_1 = \\beta_1\\), \\(c_1 = \\beta_2\\), \\(d_1 = \\beta_3\\) Find a cubic polynomial \\[ f_2(x) = a_2 + b_2x + c_2x^2 + d_2x^3 \\] such that \\(f(x) = f_2(x)\\) for all \\(x > \\xi\\). Express \\(a_2, b_2, c_2, d_2\\) in terms of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3, \\beta_4\\). We have now established that \\(f(x)\\) is a piecewise polynomial. For \\(x \\gt \\xi\\), the cubic polynomial would be (we include the \\(\\beta_4\\) term). \\[\\begin{align} f(x) = & \\beta_0 + \\beta_1x + \\beta_2x^2 + \\beta_3x^3 + \\beta_4(x-\\xi)^3 \\\\ = & \\beta_0 + \\beta_1x + \\beta_2x^2 + + \\beta_4(x^3 - 3x^2\\xi + 3x\\xi^2 -\\xi^3) \\\\ = & \\beta_0 - \\beta_4\\xi^3 + (\\beta_1 + 3\\beta_4\\xi^2)x + (\\beta_2 - 3\\beta_4\\xi)x^2 + (\\beta_3 + \\beta_4)x^3 \\end{align}\\] Therefore, \\(a_1 = \\beta_0 - \\beta_4\\xi^3\\), \\(b_1 = \\beta_1 + 3\\beta_4\\xi^2\\), \\(c_1 = \\beta_2 - 3\\beta_4\\xi\\), \\(d_1 = \\beta_3 + \\beta_4\\) Show that \\(f_1(\\xi) = f_2(\\xi)\\). That is, \\(f(x)\\) is continuous at \\(\\xi\\). To do this, we replace \\(x\\) with \\(\\xi\\) in the above equations and simplify. \\[\\begin{align} f_1(\\xi) = \\beta_0 + \\beta_1\\xi + \\beta_2\\xi^2 + \\beta_3\\xi^3 \\end{align}\\] \\[\\begin{align} f_2(\\xi) = & \\beta_0 - \\beta_4\\xi^3 + (\\beta_1 + 3\\beta_4\\xi^2)\\xi + (\\beta_2 - 3\\beta_4\\xi)\\xi^2 + (\\beta_3 + \\beta_4)\\xi^3 \\\\ = & \\beta_0 - \\beta_4\\xi^3 + \\beta_1\\xi + 3\\beta_4\\xi^3 + \\beta_2\\xi^2 - 3\\beta_4\\xi^3 + \\beta_3\\xi^3 + \\beta_4\\xi^3 \\\\ = & \\beta_0 + \\beta_1\\xi + \\beta_2\\xi^2 + \\beta_3\\xi^3 \\end{align}\\] Show that \\(f_1'(\\xi) = f_2'(\\xi)\\). That is, \\(f'(x)\\) is continuous at \\(\\xi\\). To do this we differentiate the above with respect to \\(x\\). \\[ f_1'(x) = \\beta_1 + 2\\beta_2x + 3\\beta_3x^2 f_1'(\\xi) = \\beta_1 + 2\\beta_2\\xi + 3\\beta_3\\xi^2 \\] \\[\\begin{align} f_2'(x) & = \\beta_1 + 3\\beta_4\\xi^2 + 2(\\beta_2 - 3\\beta_4\\xi)x + 3(\\beta_3 + \\beta_4)x^2 \\\\ f_2'(\\xi) & = \\beta_1 + 3\\beta_4\\xi^2 + 2(\\beta_2 - 3\\beta_4\\xi)\\xi + 3(\\beta_3 + \\beta_4)\\xi^2 \\\\ & = \\beta_1 + 3\\beta_4\\xi^2 + 2\\beta_2\\xi - 6\\beta_4\\xi^2 + 3\\beta_3\\xi^2 + 3\\beta_4\\xi^2 \\\\ & = \\beta_1 + 2\\beta_2\\xi + 3\\beta_3\\xi^2 \\end{align}\\] Show that \\(f_1''(\\xi) = f_2''(\\xi)\\). That is, \\(f''(x)\\) is continuous at \\(\\xi\\). Therefore, \\(f(x)\\) is indeed a cubic spline. \\[ f_1'(x) = 2\\beta_2x + 6\\beta_3x \\\\ f_1''(\\xi) = 2\\beta_2\\xi + 6\\beta_3\\xi \\] \\[ f_2''(x) = 2\\beta_2 - 6\\beta_4\\xi + 6(\\beta_3 + \\beta_4)x \\\\ \\] \\[\\begin{align} f_2''(\\xi) & = 2\\beta_2 - 6\\beta_4\\xi + 6\\beta_3\\xi + 6\\beta_4\\xi \\\\ & = 2\\beta_2 + 6\\beta_3\\xi \\end{align}\\] Hint: Parts (d) and (e) of this problem require knowledge of single-variable calculus. As a reminder, given a cubic polynomial \\[f_1(x) = a_1 + b_1x + c_1x^2 + d_1x^3,\\] the first derivative takes the form \\[f_1'(x) = b_1 + 2c_1x + 3d_1x^2\\] and the second derivative takes the form \\[f_1''(x) = 2c_1 + 6d_1x.\\] 7.1.2 Question 2 Suppose that a curve \\(\\hat{g}\\) is computed to smoothly fit a set of \\(n\\) points using the following formula: \\[ \\DeclareMathOperator*{\\argmin}{arg\\,min} % Jan Hlavacek \\hat{g} = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(m)}(x) \\right]^2 dx \\right), \\] where \\(g^{(m)}\\) represents the \\(m\\)th derivative of \\(g\\) (and \\(g^{(0)} = g\\)). Provide example sketches of \\(\\hat{g}\\) in each of the following scenarios. \\(\\lambda=\\infty, m=0\\). Here we penalize the \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. This means that the \\(\\hat{g}\\) will be 0. \\(\\lambda=\\infty, m=1\\). Here we penalize the first derivative (the slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. Thus the slope will be 0 (and otherwise best fitting \\(x\\), i.e. at the mean of \\(x\\)). \\(\\lambda=\\infty, m=2\\). Here we penalize the second derivative (the change of slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. Thus the line will be straight (and otherwise best fitting \\(x\\)). \\(\\lambda=\\infty, m=3\\). Here we penalize the third derivative (the change of the change of slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. In other words, the curve will have a consistent rate of change (e.g. a quadratic function or similar). \\(\\lambda=0, m=3\\). Here we penalize the third derivative, but a value of \\(\\lambda = 0\\) means that there is no penalty. As a result, the curve is able to interpolate all points. 7.1.3 Question 3 Suppose we fit a curve with basis functions \\(b_1(X) = X\\), \\(b_2(X) = (X - 1)^2I(X \\geq 1)\\). (Note that \\(I(X \\geq 1)\\) equals 1 for \\(X \\geq 1\\) and 0 otherwise.) We fit the linear regression model \\[Y = \\beta_0 +\\beta_1b_1(X) + \\beta_2b_2(X) + \\epsilon,\\] and obtain coefficient estimates \\(\\hat{\\beta}_0 = 1, \\hat{\\beta}_1 = 1, \\hat{\\beta}_2 = -2\\). Sketch the estimated curve between \\(X = -2\\) and \\(X = 2\\). Note the intercepts, slopes, and other relevant information. x <- seq(-2, 2, length.out = 1000) f <- function(x) 1 + x + -2 * (x - 1)^2 * I(x >= 1) plot(x, f(x), type = "l") grid() 7.1.4 Question 4 Suppose we fit a curve with basis functions \\(b_1(X) = I(0 \\leq X \\leq 2) - (X -1)I(1 \\leq X \\leq 2),\\) \\(b_2(X) = (X -3)I(3 \\leq X \\leq 4) + I(4 \\lt X \\leq 5)\\). We fit the linear regression model \\[Y = \\beta_0 +\\beta_1b_1(X) + \\beta_2b_2(X) + \\epsilon,\\] and obtain coefficient estimates \\(\\hat{\\beta}_0 = 1, \\hat{\\beta}_1 = 1, \\hat{\\beta}_2 = 3\\). Sketch the estimated curve between \\(X = -2\\) and \\(X = 6\\). Note the intercepts, slopes, and other relevant information. x <- seq(-2, 6, length.out = 1000) b1 <- function(x) I(0 <= x & x <= 2) - (x - 1) * I(1 <= x & x <= 2) b2 <- function(x) (x - 3) * I(3 <= x & x <= 4) + I(4 < x & x <= 5) f <- function(x) 1 + 1*b1(x) + 3*b2(x) plot(x, f(x), type = "l") grid() 7.1.5 Question 5 Consider two curves, \\(\\hat{g}\\) and \\(\\hat{g}_2\\), defined by \\[ \\hat{g}_1 = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(3)}(x) \\right]^2 dx \\right), \\] \\[ \\hat{g}_2 = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(4)}(x) \\right]^2 dx \\right), \\] where \\(g^{(m)}\\) represents the \\(m\\)th derivative of \\(g\\). As \\(\\lambda \\to \\infty\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller training RSS? \\(\\hat{g}_2\\) is more flexible (by penalizing a higher derivative of \\(g\\)) and so will have a smaller training RSS. As \\(\\lambda \\to \\infty\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller test RSS? We cannot tell which function will produce a smaller test RSS, but there is chance that \\(\\hat{g}_1\\) will if \\(\\hat{g}_2\\) overfits the data. For \\(\\lambda = 0\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller training and test RSS? When \\(\\lambda = 0\\) there is no penalty, so both functions will give the same result: perfect interpolation of the training data. Thus training RSS will be 0 but test RSS could be high. 7.2 Applied 7.2.1 Question 6 In this exercise, you will further analyze the Wage data set considered throughout this chapter. Perform polynomial regression to predict wage using age. Use cross-validation to select the optimal degree \\(d\\) for the polynomial. What degree was chosen, and how does this compare to the results of hypothesis testing using ANOVA? Make a plot of the resulting polynomial fit to the data. library(ISLR2) library(boot) library(ggplot2) set.seed(42) res <- sapply(1:6, function(i) { fit <- glm(wage ~ poly(age, i), data = Wage) cv.glm(Wage, fit, K = 5)$delta[1] }) which.min(res) ## [1] 6 plot(1:6, res, xlab = "Degree", ylab = "Test MSE", type = "l") abline(v = which.min(res), col = "red", lty = 2) fit <- glm(wage ~ poly(age, which.min(res)), data = Wage) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) points(1:100, predict(fit, data.frame(age = 1:100)), type = "l", col = "red") summary(glm(wage ~ poly(age, 6), data = Wage)) ## ## Call: ## glm(formula = wage ~ poly(age, 6), data = Wage) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 111.7036 0.7286 153.316 < 2e-16 *** ## poly(age, 6)1 447.0679 39.9063 11.203 < 2e-16 *** ## poly(age, 6)2 -478.3158 39.9063 -11.986 < 2e-16 *** ## poly(age, 6)3 125.5217 39.9063 3.145 0.00167 ** ## poly(age, 6)4 -77.9112 39.9063 -1.952 0.05099 . ## poly(age, 6)5 -35.8129 39.9063 -0.897 0.36956 ## poly(age, 6)6 62.7077 39.9063 1.571 0.11620 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 1592.512) ## ## Null deviance: 5222086 on 2999 degrees of freedom ## Residual deviance: 4766389 on 2993 degrees of freedom ## AIC: 30642 ## ## Number of Fisher Scoring iterations: 2 fit1 <- lm(wage ~ poly(age, 1), data = Wage) fit2 <- lm(wage ~ poly(age, 2), data = Wage) fit3 <- lm(wage ~ poly(age, 3), data = Wage) fit4 <- lm(wage ~ poly(age, 4), data = Wage) fit5 <- lm(wage ~ poly(age, 5), data = Wage) anova(fit1, fit2, fit3, fit4, fit5) ## Analysis of Variance Table ## ## Model 1: wage ~ poly(age, 1) ## Model 2: wage ~ poly(age, 2) ## Model 3: wage ~ poly(age, 3) ## Model 4: wage ~ poly(age, 4) ## Model 5: wage ~ poly(age, 5) ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 2998 5022216 ## 2 2997 4793430 1 228786 143.5931 < 2.2e-16 *** ## 3 2996 4777674 1 15756 9.8888 0.001679 ** ## 4 2995 4771604 1 6070 3.8098 0.051046 . ## 5 2994 4770322 1 1283 0.8050 0.369682 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 The selected degree is 4. When testing with ANOVA, degrees 1, 2 and 3 are highly significant and 4 is marginal. Fit a step function to predict wage using age, and perform cross-validation to choose the optimal number of cuts. Make a plot of the fit obtained. set.seed(42) res <- sapply(2:10, function(i) { Wage$cats <- cut(Wage$age, i) fit <- glm(wage ~ cats, data = Wage) cv.glm(Wage, fit, K = 5)$delta[1] }) names(res) <- 2:10 plot(2:10, res, xlab = "Cuts", ylab = "Test MSE", type = "l") which.min(res) ## 8 ## 7 abline(v = names(which.min(res)), col = "red", lty = 2) fit <- glm(wage ~ cut(age, 8), data = Wage) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) points(18:80, predict(fit, data.frame(age = 18:80)), type = "l", col = "red") 7.2.2 Question 7 The Wage data set contains a number of other features not explored in this chapter, such as marital status (maritl), job class (jobclass), and others. Explore the relationships between some of these other predictors and wage, and use non-linear fitting techniques in order to fit flexible models to the data. Create plots of the results obtained, and write a summary of your findings. plot(Wage$year, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$maritl, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$jobclass, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$education, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) We have a mix of categorical and continuous variables and also want to incorporate non-linear aspects of the continuous variables. A GAM is a good choice to model this situation. library(gam) ## Loading required package: splines ## Loading required package: foreach ## Loaded gam 1.22-4 fit0 <- gam(wage ~ s(year, 4) + s(age, 5) + education, data = Wage) fit2 <- gam(wage ~ s(year, 4) + s(age, 5) + education + maritl, data = Wage) fit1 <- gam(wage ~ s(year, 4) + s(age, 5) + education + jobclass, data = Wage) fit3 <- gam(wage ~ s(year, 4) + s(age, 5) + education + jobclass + maritl, data = Wage) anova(fit0, fit1, fit2, fit3) ## Analysis of Deviance Table ## ## Model 1: wage ~ s(year, 4) + s(age, 5) + education ## Model 2: wage ~ s(year, 4) + s(age, 5) + education + jobclass ## Model 3: wage ~ s(year, 4) + s(age, 5) + education + maritl ## Model 4: wage ~ s(year, 4) + s(age, 5) + education + jobclass + maritl ## Resid. Df Resid. Dev Df Deviance Pr(>Chi) ## 1 2986 3689770 ## 2 2985 3677553 1 12218 0.0014286 ** ## 3 2982 3595688 3 81865 1.071e-14 *** ## 4 2981 3581781 1 13907 0.0006687 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 par(mfrow = c(2, 3)) plot(fit3, se = TRUE, col = "blue") 7.2.3 Question 8 Fit some of the non-linear models investigated in this chapter to the Auto data set. Is there evidence for non-linear relationships in this data set? Create some informative plots to justify your answer. Here we want to explore a range of non-linear models. First let’s look at the relationships between the variables in the data. pairs(Auto, cex = 0.4, pch = 19) It does appear that there are some non-linear relationships (e.g. horsepower / weight and mpg). We will pick one variable (horsepower) to predict mpg and try the range of models discussed in this chapter. We will measure test MSE through cross-validation to compare the models. library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ lubridate 1.9.3 ✔ tibble 3.2.1 ## ✔ purrr 1.0.2 ✔ tidyr 1.3.1 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ purrr::accumulate() masks foreach::accumulate() ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ✖ purrr::when() masks foreach::when() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors set.seed(42) fit <- glm(mpg ~ horsepower, data = Auto) err <- cv.glm(Auto, fit, K = 10)$delta[1] fit1 <- glm(mpg ~ poly(horsepower, 4), data = Auto) err1 <- cv.glm(Auto, fit1, K = 10)$delta[1] q <- quantile(Auto$horsepower) Auto$hp_cats <- cut(Auto$horsepower, breaks = q, include.lowest = TRUE) fit2 <- glm(mpg ~ hp_cats, data = Auto) err2 <- cv.glm(Auto, fit2, K = 10)$delta[1] fit3 <- glm(mpg ~ bs(horsepower, df = 4), data = Auto) err3 <- cv.glm(Auto, fit3, K = 10)$delta[1] ## Warning in bs(horsepower, degree = 3L, knots = 92, Boundary.knots = c(46L, : ## some 'x' values beyond boundary knots may cause ill-conditioned bases ## Warning in bs(horsepower, degree = 3L, knots = 92, Boundary.knots = c(46L, : ## some 'x' values beyond boundary knots may cause ill-conditioned bases fit4 <- glm(mpg ~ ns(horsepower, 4), data = Auto) err4 <- cv.glm(Auto, fit4, K = 10)$delta[1] fit5 <- gam(mpg ~ s(horsepower, df = 4), data = Auto) # rough 10-fold cross-validation for gam. err5 <- mean(replicate(10, { b <- cut(sample(seq_along(Auto$horsepower)), 10) pred <- numeric() for (i in 1:10) { train <- b %in% levels(b)[-i] f <- gam(mpg ~ s(horsepower, df = 4), data = Auto[train, ]) pred[!train] <- predict(f, Auto[!train, ]) } mean((Auto$mpg - pred)^2) # MSE })) c(err, err1, err2, err3, err4, err5) ## [1] 24.38418 19.94222 20.37940 18.92802 19.33556 19.02999 anova(fit, fit1, fit2, fit3, fit4, fit5) ## Analysis of Deviance Table ## ## Model 1: mpg ~ horsepower ## Model 2: mpg ~ poly(horsepower, 4) ## Model 3: mpg ~ hp_cats ## Model 4: mpg ~ bs(horsepower, df = 4) ## Model 5: mpg ~ ns(horsepower, 4) ## Model 6: mpg ~ s(horsepower, df = 4) ## Resid. Df Resid. Dev Df Deviance F Pr(>F) ## 1 390 9385.9 ## 2 387 7399.5 3.00000000 1986.39 35.258 < 2.2e-16 *** ## 3 388 7805.4 -1.00000000 -405.92 21.615 4.578e-06 *** ## 4 387 7276.5 1.00000000 528.94 28.166 1.880e-07 *** ## 5 387 7248.6 0.00000000 27.91 ## 6 387 7267.7 0.00013612 -19.10 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 x <- seq(min(Auto$horsepower), max(Auto$horsepower), length.out=1000) pred <- data.frame( x = x, "Linear" = predict(fit, data.frame(horsepower = x)), "Polynomial" = predict(fit1, data.frame(horsepower = x)), "Step" = predict(fit2, data.frame(hp_cats = cut(x, breaks = q, include.lowest = TRUE))), "Regression spline" = predict(fit3, data.frame(horsepower = x)), "Natural spline" = predict(fit4, data.frame(horsepower = x)), "Smoothing spline" = predict(fit5, data.frame(horsepower = x)), check.names = FALSE ) pred <- pivot_longer(pred, -x) ggplot(Auto, aes(horsepower, mpg)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() 7.2.4 Question 9 This question uses the variables dis (the weighted mean of distances to five Boston employment centers) and nox (nitrogen oxides concentration in parts per 10 million) from the Boston data. We will treat dis as the predictor and nox as the response. Use the poly() function to fit a cubic polynomial regression to predict nox using dis. Report the regression output, and plot the resulting data and polynomial fits. fit <- glm(nox ~ poly(dis, 3), data = Boston) summary(fit) ## ## Call: ## glm(formula = nox ~ poly(dis, 3), data = Boston) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.554695 0.002759 201.021 < 2e-16 *** ## poly(dis, 3)1 -2.003096 0.062071 -32.271 < 2e-16 *** ## poly(dis, 3)2 0.856330 0.062071 13.796 < 2e-16 *** ## poly(dis, 3)3 -0.318049 0.062071 -5.124 4.27e-07 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 0.003852802) ## ## Null deviance: 6.7810 on 505 degrees of freedom ## Residual deviance: 1.9341 on 502 degrees of freedom ## AIC: -1370.9 ## ## Number of Fisher Scoring iterations: 2 plot(nox ~ dis, data = Boston, col = alpha("steelblue", 0.4), pch = 19) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) lines(x, predict(fit, data.frame(dis = x)), col = "red", lty = 2) Plot the polynomial fits for a range of different polynomial degrees (say, from 1 to 10), and report the associated residual sum of squares. fits <- lapply(1:10, function(i) glm(nox ~ poly(dis, i), data = Boston)) x <- seq(min(Boston$dis), max(Boston$dis), length.out=1000) pred <- data.frame(lapply(fits, function(fit) predict(fit, data.frame(dis = x)))) colnames(pred) <- 1:10 pred$x <- x pred <- pivot_longer(pred, !x) ggplot(Boston, aes(dis, nox)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() # residual sum of squares do.call(anova, fits)[, 2] ## [1] 2.768563 2.035262 1.934107 1.932981 1.915290 1.878257 1.849484 1.835630 ## [9] 1.833331 1.832171 Perform cross-validation or another approach to select the optimal degree for the polynomial, and explain your results. res <- sapply(1:10, function(i) { fit <- glm(nox ~ poly(dis, i), data = Boston) cv.glm(Boston, fit, K = 10)$delta[1] }) which.min(res) ## [1] 4 The optimal degree is 3 based on cross-validation. Higher values tend to lead to overfitting. Use the bs() function to fit a regression spline to predict nox using dis. Report the output for the fit using four degrees of freedom. How did you choose the knots? Plot the resulting fit. fit <- glm(nox ~ bs(dis, df = 4), data = Boston) summary(fit) ## ## Call: ## glm(formula = nox ~ bs(dis, df = 4), data = Boston) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.73447 0.01460 50.306 < 2e-16 *** ## bs(dis, df = 4)1 -0.05810 0.02186 -2.658 0.00812 ** ## bs(dis, df = 4)2 -0.46356 0.02366 -19.596 < 2e-16 *** ## bs(dis, df = 4)3 -0.19979 0.04311 -4.634 4.58e-06 *** ## bs(dis, df = 4)4 -0.38881 0.04551 -8.544 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 0.003837874) ## ## Null deviance: 6.7810 on 505 degrees of freedom ## Residual deviance: 1.9228 on 501 degrees of freedom ## AIC: -1371.9 ## ## Number of Fisher Scoring iterations: 2 plot(nox ~ dis, data = Boston, col = alpha("steelblue", 0.4), pch = 19) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) lines(x, predict(fit, data.frame(dis = x)), col = "red", lty = 2) Knots are chosen based on quantiles of the data. Now fit a regression spline for a range of degrees of freedom, and plot the resulting fits and report the resulting RSS. Describe the results obtained. fits <- lapply(3:10, function(i) { glm(nox ~ bs(dis, df = i), data = Boston) }) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) pred <- data.frame(lapply(fits, function(fit) predict(fit, data.frame(dis = x)))) colnames(pred) <- 3:10 pred$x <- x pred <- pivot_longer(pred, !x) ggplot(Boston, aes(dis, nox)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() At high numbers of degrees of freedom the splines overfit the data (particularly at extreme ends of the distribution of the predictor variable). Perform cross-validation or another approach in order to select the best degrees of freedom for a regression spline on this data. Describe your results. set.seed(42) err <- sapply(3:10, function(i) { fit <- glm(nox ~ bs(dis, df = i), data = Boston) suppressWarnings(cv.glm(Boston, fit, K = 10)$delta[1]) }) which.min(err) ## [1] 8 This approach would select 4 degrees of freedom for the spline. 7.2.5 Question 10 This question relates to the College data set. Split the data into a training set and a test set. Using out-of-state tuition as the response and the other variables as the predictors, perform forward stepwise selection on the training set in order to identify a satisfactory model that uses just a subset of the predictors. library(leaps) # helper function to predict from a regsubsets model predict.regsubsets <- function(object, newdata, id, ...) { form <- as.formula(object$call[[2]]) mat <- model.matrix(form, newdata) coefi <- coef(object, id = id) xvars <- names(coefi) mat[, xvars] %*% coefi } set.seed(42) train <- rep(TRUE, nrow(College)) train[sample(1:nrow(College), nrow(College) * 1 / 3)] <- FALSE fit <- regsubsets(Outstate ~ ., data = College[train, ], nvmax = 17, method = "forward") plot(summary(fit)$bic, type = "b") which.min(summary(fit)$bic) ## [1] 11 # or via cross-validation err <- sapply(1:17, function(i) { x <- coef(fit, id = i) mean((College$Outstate[!train] - predict(fit, College[!train, ], i))^2) }) which.min(err) ## [1] 16 min(summary(fit)$bic) ## [1] -690.9375 For the sake of simplicity we’ll choose 6 coef(fit, id = 6) ## (Intercept) PrivateYes Room.Board PhD perc.alumni ## -3540.0544008 2736.4231642 0.9061752 33.7848157 47.1998115 ## Expend Grad.Rate ## 0.2421685 33.3137332 Fit a GAM on the training data, using out-of-state tuition as the response and the features selected in the previous step as the predictors. Plot the results, and explain your findings. fit <- gam(Outstate ~ Private + s(Room.Board, 2) + s(PhD, 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), data = College[train, ]) Evaluate the model obtained on the test set, and explain the results obtained. pred <- predict(fit, College[!train, ]) err_gam <- mean((College$Outstate[!train] - pred)^2) plot(err, ylim = c(min(err_gam, err), max(err)), type = "b") abline(h = err_gam, col = "red", lty = 2) # r-squared 1 - err_gam / mean((College$Outstate[!train] - mean(College$Outstate[!train]))^2) ## [1] 0.7655905 For which variables, if any, is there evidence of a non-linear relationship with the response? summary(fit) ## ## Call: gam(formula = Outstate ~ Private + s(Room.Board, 2) + s(PhD, ## 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), ## data = College[train, ]) ## Deviance Residuals: ## Min 1Q Median 3Q Max ## -7112.59 -1188.98 33.13 1238.54 8738.65 ## ## (Dispersion Parameter for gaussian family taken to be 3577008) ## ## Null Deviance: 8471793308 on 517 degrees of freedom ## Residual Deviance: 1809966249 on 506.0001 degrees of freedom ## AIC: 9300.518 ## ## Number of Local Scoring Iterations: NA ## ## Anova for Parametric Effects ## Df Sum Sq Mean Sq F value Pr(>F) ## Private 1 2327235738 2327235738 650.610 < 2.2e-16 *** ## s(Room.Board, 2) 1 1741918028 1741918028 486.976 < 2.2e-16 *** ## s(PhD, 2) 1 668408518 668408518 186.863 < 2.2e-16 *** ## s(perc.alumni, 2) 1 387819183 387819183 108.420 < 2.2e-16 *** ## s(Expend, 2) 1 625813340 625813340 174.954 < 2.2e-16 *** ## s(Grad.Rate, 2) 1 111881207 111881207 31.278 3.664e-08 *** ## Residuals 506 1809966249 3577008 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Anova for Nonparametric Effects ## Npar Df Npar F Pr(F) ## (Intercept) ## Private ## s(Room.Board, 2) 1 2.224 0.13648 ## s(PhD, 2) 1 5.773 0.01664 * ## s(perc.alumni, 2) 1 0.365 0.54581 ## s(Expend, 2) 1 61.182 3.042e-14 *** ## s(Grad.Rate, 2) 1 4.126 0.04274 * ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Non-linear relationships are significant for Expend and PhD. 7.2.6 Question 11 In Section 7.7, it was mentioned that GAMs are generally fit using a backfitting approach. The idea behind backfitting is actually quite simple. We will now explore backfitting in the context of multiple linear regression. Suppose that we would like to perform multiple linear regression, but we do not have software to do so. Instead, we only have software to perform simple linear regression. Therefore, we take the following iterative approach: we repeatedly hold all but one coefficient estimate fixed at its current value, and update only that coefficient estimate using a simple linear regression. The process is continued until convergence—that is, until the coefficient estimates stop changing. We now try this out on a toy example. Generate a response \\(Y\\) and two predictors \\(X_1\\) and \\(X_2\\), with \\(n = 100\\). set.seed(42) x1 <- rnorm(100) x2 <- rnorm(100) y <- 2 + 0.2 * x1 + 4 * x2 + rnorm(100) Initialize \\(\\hat{\\beta}_1\\) to take on a value of your choice. It does not matter 1 what value you choose. beta1 <- 20 Keeping \\(\\hat{\\beta}_1\\) fixed, fit the model \\[Y - \\hat{\\beta}_1X_1 = \\beta_0 + \\beta_2X_2 + \\epsilon.\\] You can do this as follows: > a <- y - beta1 * x1 > beta2 <- lm(a ~ x2)$coef[2] a <- y - beta1*x1 beta2 <- lm(a ~ x2)$coef[2] Keeping \\(\\hat{\\beta}_2\\) fixed, fit the model \\[Y - \\hat{\\beta}_2X_2 = \\beta_0 + \\beta_1 X_1 + \\epsilon.\\] You can do this as follows: > a <- y - beta2 * x2 > beta1 <- lm(a ~ x1)$coef[2] a <- y - beta2 * x2 beta1 <- lm(a ~ x1)$coef[2] Write a for loop to repeat (c) and (d) 1,000 times. Report the estimates of \\(\\hat{\\beta}_0, \\hat{\\beta}_1,\\) and \\(\\hat{\\beta}_2\\) at each iteration of the for loop. Create a plot in which each of these values is displayed, with \\(\\hat{\\beta}_0, \\hat{\\beta}_1,\\) and \\(\\hat{\\beta}_2\\) each shown in a different color. res <- matrix(NA, nrow = 1000, ncol = 3) colnames(res) <- c("beta0", "beta1", "beta2") beta1 <- 20 for (i in 1:1000) { beta2 <- lm(y - beta1*x1 ~ x2)$coef[2] beta1 <- lm(y - beta2*x2 ~ x1)$coef[2] beta0 <- lm(y - beta2*x2 ~ x1)$coef[1] res[i, ] <- c(beta0, beta1, beta2) } res <- as.data.frame(res) res$Iteration <- 1:1000 res <- pivot_longer(res, !Iteration) p <- ggplot(res, aes(x=Iteration, y=value, color=name)) + geom_line() + geom_point() + scale_x_continuous(trans = "log10") p Compare your answer in (e) to the results of simply performing multiple linear regression to predict \\(Y\\) using \\(X_1\\) and \\(X_2\\). Use the abline() function to overlay those multiple linear regression coefficient estimates on the plot obtained in (e). fit <- lm(y ~ x1 + x2) coef(fit) ## (Intercept) x1 x2 ## 2.00176627 0.05629075 4.08529318 p + geom_hline(yintercept = coef(fit), lty = 2) On this data set, how many backfitting iterations were required in order to obtain a “good” approximation to the multiple regression coefficient estimates? In this case, good estimates were obtained after 3 iterations. 7.2.7 Question 12 This problem is a continuation of the previous exercise. In a toy example with \\(p = 100\\), show that one can approximate the multiple linear regression coefficient estimates by repeatedly performing simple linear regression in a backfitting procedure. How many backfitting iterations are required in order to obtain a “good” approximation to the multiple regression coefficient estimates? Create a plot to justify your answer. set.seed(42) p <- 100 n <- 1000 betas <- rnorm(p) * 5 x <- matrix(rnorm(n * p), ncol = p, nrow = n) y <- (x %*% betas) + rnorm(n) # ignore beta0 for simplicity # multiple regression fit <- lm(y ~ x - 1) coef(fit) ## x1 x2 x3 x4 x5 x6 ## 6.9266184 -2.8428817 1.8686821 3.1466472 1.9601927 -0.5529214 ## x7 x8 x9 x10 x11 x12 ## 7.4786723 -0.4454637 10.0816005 -0.2391234 6.5832468 11.4451280 ## x13 x14 x15 x16 x17 x18 ## -6.9684368 -1.3604495 -0.6310041 3.1786639 -1.4470502 -13.2957027 ## x19 x20 x21 x22 x23 x24 ## -12.2061834 6.5765842 -1.5227262 -8.8855906 -0.8422954 6.1189230 ## x25 x26 x27 x28 x29 x30 ## 9.4395267 -2.1697854 -1.2738835 -8.8457987 2.2851699 -3.1922704 ## x31 x32 x33 x34 x35 x36 ## 2.2812995 3.4695892 5.1162617 -3.0423873 2.4985589 -8.5952764 ## x37 x38 x39 x40 x41 x42 ## -3.9539370 -4.2616463 -12.0038342 0.1981058 1.0559250 -1.8205017 ## x43 x44 x45 x46 x47 x48 ## 3.7739990 -3.6240020 -6.8575534 2.1042998 -4.0228773 7.1880298 ## x49 x50 x51 x52 x53 x54 ## -2.1967821 3.3137115 1.6406524 -3.9402065 7.9067705 3.1815846 ## x55 x56 x57 x58 x59 x60 ## 0.4504175 1.4003479 3.3999814 0.4317695 -14.9255798 1.3816878 ## x61 x62 x63 x64 x65 x66 ## -1.8071634 0.9907740 2.9771540 6.9528872 -3.5956916 6.5283946 ## x67 x68 x69 x70 x71 x72 ## 1.6798820 5.1911857 4.5573256 3.5961319 -5.1909352 -0.4869003 ## x73 x74 x75 x76 x77 x78 ## 3.1472166 -4.7898363 -2.7402076 2.9247173 3.8659938 2.3686379 ## x79 x80 x81 x82 x83 x84 ## -4.4261734 -5.5020688 7.5807239 1.3010702 0.4378713 -0.5856580 ## x85 x86 x87 x88 x89 x90 ## -5.9799328 3.0089329 -1.1230969 -0.8857679 4.7211363 4.1042952 ## x91 x92 x93 x94 x95 x96 ## 6.9492037 -2.3959211 3.2188522 6.9947040 -5.5392641 -4.3114784 ## x97 x98 x99 x100 ## -5.7287292 -7.3148812 0.3454408 3.2830658 # backfitting backfit <- function(x, y, iter = 20) { beta <- matrix(0, ncol = ncol(x), nrow = iter + 1) for (i in 1:iter) { for (k in 1:ncol(x)) { residual <- y - (x[, -k] %*% beta[i, -k]) beta[i + 1, k] <- lm(residual ~ x[, k])$coef[2] } } beta[-1, ] } res <- backfit(x, y) error <- rowMeans(sweep(res, 2, betas)^2) plot(error, log = "x", type = "b") # backfitting error error[length(error)] ## [1] 0.001142494 # lm error mean((coef(fit) - betas)^2) ## [1] 0.001138655 We need around 5 to 6 iterations to obtain a good estimate of the coefficients. "],["tree-based-methods.html", "8 Tree-Based Methods 8.1 Conceptual 8.2 Applied", " 8 Tree-Based Methods 8.1 Conceptual 8.1.1 Question 1 Draw an example (of your own invention) of a partition of two-dimensional feature space that could result from recursive binary splitting. Your example should contain at least six regions. Draw a decision tree corresponding to this partition. Be sure to label all aspects of your figures, including the regions \\(R_1, R_2, ...,\\) the cutpoints \\(t_1, t_2, ...,\\) and so forth. Hint: Your result should look something like Figures 8.1 and 8.2. library(showtext) showtext::showtext_auto() library(ggplot2) library(tidyverse) library(ggtree) tree <- ape::read.tree(text = "(((R1:1,R2:1)N1:2,R3:4)N2:2,(R4:2,(R5:1,R6:1)R3:2)N4:5)R;") tree$node.label <- c("Age < 40", "Weight < 100", "Weight < 70", "Age < 60", "Weight < 80") ggtree(tree, ladderize = FALSE) + scale_x_reverse() + coord_flip() + geom_tiplab(vjust = 2, hjust = 0.5) + geom_text2(aes(label=label, subset=!isTip), hjust = -0.1, vjust = -1) plot(NULL, xlab="Age (years)", ylab="Weight (kg)", xlim = c(0, 100), ylim = c(40, 160), xaxs = "i", yaxs = "i") abline(v = 40, col = "red", lty = 2) lines(c(0, 40), c(100, 100), col = "blue", lty = 2) lines(c(0, 40), c(70, 70), col = "blue", lty = 2) abline(v = 60, col = "red", lty = 2) lines(c(60, 100), c(80, 80), col = "blue", lty = 2) text( c(20, 20, 20, 50, 80, 80), c(55, 85, 130, 100, 60, 120), labels = c("R1", "R2", "R3", "R4", "R5", "R6") ) 8.1.2 Question 2 It is mentioned in Section 8.2.3 that boosting using depth-one trees (or stumps) leads to an additive model: that is, a model of the form \\[ f(X) = \\sum_{j=1}^p f_j(X_j). \\] Explain why this is the case. You can begin with (8.12) in Algorithm 8.2. Equation 8.1 is: \\[ f(x) = \\sum_{b=1}^B(\\lambda \\hat{f}^b(x) \\] where \\(\\hat{f}^b(x)\\) represents the \\(b\\)th tree with (in this case) 1 split. Since 1-depth trees involve only one variable, and the total function for \\(x\\) involves adding the outcome for each, this model is an additive. Depth 2 trees would allow for interactions between two variables. 8.1.3 Question 3 Consider the Gini index, classification error, and cross-entropy in a simple classification setting with two classes. Create a single plot that displays each of these quantities as a function of \\(\\hat{p}_{m1}\\). The \\(x\\)-axis should display \\(\\hat{p}_{m1}\\), ranging from 0 to 1, and the \\(y\\)-axis should display the value of the Gini index, classification error, and entropy. Hint: In a setting with two classes, \\(\\hat{p}_{m1} = 1 - \\hat{p}_{m2}\\). You could make this plot by hand, but it will be much easier to make in R. The Gini index is defined by \\[G = \\sum_{k=1}^{K} \\hat{p}_{mk}(1 - \\hat{p}_{mk})\\] Entropy is given by \\[D = -\\sum_{k=1}^{K} \\hat{p}_{mk}\\log(\\hat{p}_{mk})\\] The classification error is \\[E = 1 - \\max_k(\\hat{p}_{mk})\\] # Function definitions are for when there's two classes only p <- seq(0, 1, length.out = 100) data.frame( x = p, "Gini index" = p * (1 - p) * 2, "Entropy" = -(p * log(p) + (1 - p) * log(1 - p)), "Classification error" = 1 - pmax(p, 1 - p), check.names = FALSE ) |> pivot_longer(!x) |> ggplot(aes(x = x, y = value, color = name)) + geom_line(na.rm = TRUE) 8.1.4 Question 4 This question relates to the plots in Figure 8.12. Sketch the tree corresponding to the partition of the predictor space illustrated in the left-hand panel of Figure 8.12. The numbers inside the boxes indicate the mean of \\(Y\\) within each region. tree <- ape::read.tree(text = "(((3:1.5,(10:1,0:1)A:1)B:1,15:2)C:1,5:2)D;") tree$node.label <- c("X1 < 1", "X2 < 1", "X1 < 0", "X2 < 0") ggtree(tree, ladderize = FALSE) + scale_x_reverse() + coord_flip() + geom_tiplab(vjust = 2, hjust = 0.5) + geom_text2(aes(label=label, subset=!isTip), hjust = -0.1, vjust = -1) Create a diagram similar to the left-hand panel of Figure 8.12, using the tree illustrated in the right-hand panel of the same figure. You should divide up the predictor space into the correct regions, and indicate the mean for each region. plot(NULL, xlab="X1", ylab="X2", xlim = c(-1, 2), ylim = c(0, 3), xaxs = "i", yaxs = "i") abline(h = 1, col = "red", lty = 2) lines(c(1, 1), c(0, 1), col = "blue", lty = 2) lines(c(-1, 2), c(2, 2), col = "red", lty = 2) lines(c(0, 0), c(1, 2), col = "blue", lty = 2) text( c(0, 1.5, -0.5, 1, 0.5), c(0.5, 0.5, 1.5, 1.5, 2.5), labels = c("-1.80", "0.63", "-1.06", "0.21", "2.49") ) 8.1.5 Question 5 Suppose we produce ten bootstrapped samples from a data set containing red and green classes. We then apply a classification tree to each bootstrapped sample and, for a specific value of \\(X\\), produce 10 estimates of \\(P(\\textrm{Class is Red}|X)\\): \\[0.1, 0.15, 0.2, 0.2, 0.55, 0.6, 0.6, 0.65, 0.7, \\textrm{and } 0.75.\\] There are two common ways to combine these results together into a single class prediction. One is the majority vote approach discussed in this chapter. The second approach is to classify based on the average probability. In this example, what is the final classification under each of these two approaches? x <- c(0.1, 0.15, 0.2, 0.2, 0.55, 0.6, 0.6, 0.65, 0.7, 0.75) ifelse(mean(x > 0.5), "red", "green") # majority vote ## [1] "red" ifelse(mean(x) > 0.5, "red", "green") # average probability ## [1] "green" 8.1.6 Question 6 Provide a detailed explanation of the algorithm that is used to fit a regression tree. First we perform binary recursive splitting of the data, to minimize RSS at each split. This is continued until there are n samples present in each leaf. Then we prune the tree to a set of subtrees determined by a parameter \\(\\alpha\\). Using K-fold CV, we select \\(\\alpha\\) to minimize the cross validation error. The final tree is then calculated using the complete dataset with the selected \\(\\alpha\\) value. 8.2 Applied 8.2.1 Question 7 In the lab, we applied random forests to the Boston data using mtry = 6 and using ntree = 25 and ntree = 500. Create a plot displaying the test error resulting from random forests on this data set for a more comprehensive range of values for mtry and ntree. You can model your plot after Figure 8.10. Describe the results obtained. library(ISLR2) library(randomForest) ## randomForest 4.7-1.1 ## Type rfNews() to see new features/changes/bug fixes. ## ## Attaching package: 'randomForest' ## The following object is masked from 'package:ggtree': ## ## margin ## The following object is masked from 'package:dplyr': ## ## combine ## The following object is masked from 'package:ggplot2': ## ## margin set.seed(42) train <- sample(c(TRUE, FALSE), nrow(Boston), replace = TRUE) rf_err <- function(mtry) { randomForest( Boston[train, -13], y = Boston[train, 13], xtest = Boston[!train, -13], ytest = Boston[!train, 13], mtry = mtry, ntree = 500 )$test$mse } res <- lapply(c(1, 2, 3, 5, 7, 10, 12), rf_err) names(res) <- c(1, 2, 3, 5, 7, 10, 12) data.frame(res, check.names = FALSE) |> mutate(n = 1:500) |> pivot_longer(!n) |> ggplot(aes(x = n, y = value, color = name)) + geom_line(na.rm = TRUE) + xlab("Number of trees") + ylab("Error") + scale_y_log10() + scale_color_discrete(name = "No. variables at\\neach split") 8.2.2 Question 8 In the lab, a classification tree was applied to the Carseats data set after converting Sales into a qualitative response variable. Now we will seek to predict Sales using regression trees and related approaches, treating the response as a quantitative variable. Split the data set into a training set and a test set. set.seed(42) train <- sample(c(TRUE, FALSE), nrow(Carseats), replace = TRUE) Fit a regression tree to the training set. Plot the tree, and interpret the results. What test error rate do you obtain? library(tree) tr <- tree(Sales ~ ., data = Carseats[train, ]) summary(tr) ## ## Regression tree: ## tree(formula = Sales ~ ., data = Carseats[train, ]) ## Variables actually used in tree construction: ## [1] "ShelveLoc" "Price" "Income" "Advertising" "CompPrice" ## [6] "Age" ## Number of terminal nodes: 16 ## Residual mean deviance: 2.356 = 424.1 / 180 ## Distribution of residuals: ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## -4.54900 -0.82980 0.03075 0.00000 0.89250 4.83100 plot(tr) text(tr, pretty = 0, digits = 2, cex = 0.8) carseats_mse <- function(model) { p <- predict(model, newdata = Carseats[!train, ]) mean((p - Carseats[!train, "Sales"])^2) } carseats_mse(tr) ## [1] 4.559764 Use cross-validation in order to determine the optimal level of tree complexity. Does pruning the tree improve the test error rate? res <- cv.tree(tr) plot(res$size, res$dev, type = "b", xlab = "Tree size", ylab = "Deviance") min <- which.min(res$dev) abline(v = res$size[min], lty = 2, col = "red") Pruning improves performance very slightly (though this is not repeatable in different rounds of cross-validation). Arguably, a good balance is achieved when the tree size is 11. ptr <- prune.tree(tr, best = 11) plot(ptr) text(ptr, pretty = 0, digits = 2, cex = 0.8) carseats_mse(ptr) ## [1] 4.625875 Use the bagging approach in order to analyze this data. What test error rate do you obtain? Use the importance() function to determine which variables are most important. # Here we can use random Forest with mtry = 10 = p (the number of predictor # variables) to perform bagging bagged <- randomForest(Sales ~ ., data = Carseats[train, ], mtry = 10, ntree = 200, importance = TRUE) carseats_mse(bagged) ## [1] 2.762861 importance(bagged) ## %IncMSE IncNodePurity ## CompPrice 11.2608998 104.474222 ## Income 5.0953983 73.275066 ## Advertising 12.9011190 125.886762 ## Population 3.4071044 60.095200 ## Price 34.6904380 450.952728 ## ShelveLoc 33.7059874 374.808575 ## Age 7.9101141 143.652934 ## Education -2.1154997 32.712444 ## Urban 0.9604097 7.029648 ## US 3.1336559 6.287048 The test error rate is ~2.8 which is a substantial improvement over the pruned regression tree above. Use random forests to analyze this data. What test error rate do you obtain? Use the importance() function to determine which variables are most important. Describe the effect of \\(m\\), the number of variables considered at each split, on the error rate obtained. rf <- randomForest(Sales ~ ., data = Carseats[train, ], mtry = 3, ntree = 500, importance = TRUE) carseats_mse(rf) ## [1] 3.439357 importance(rf) ## %IncMSE IncNodePurity ## CompPrice 8.5717587 122.75189 ## Income 2.8955756 116.33951 ## Advertising 13.0681194 128.13563 ## Population 2.0475415 104.03803 ## Price 34.7934136 342.84663 ## ShelveLoc 39.0704834 292.56638 ## Age 7.7941744 135.69061 ## Education 0.8770806 64.67614 ## Urban -0.3301478 13.83594 ## US 6.2716539 22.07306 The test error rate is ~3.0 which is a substantial improvement over the pruned regression tree above, although not quite as good as the bagging approach. Now analyze the data using BART, and report your results. library(BART) ## Loading required package: nlme ## ## Attaching package: 'nlme' ## The following object is masked from 'package:ggtree': ## ## collapse ## The following object is masked from 'package:dplyr': ## ## collapse ## Loading required package: survival # For ease, we'll create a fake "predict" method that just returns # yhat.test.mean regardless of provided "newdata" predict.wbart <- function(model, ...) model$yhat.test.mean bartfit <- gbart(Carseats[train, 2:11], Carseats[train, 1], x.test = Carseats[!train, 2:11]) ## *****Calling gbart: type=1 ## *****Data: ## data:n,p,np: 196, 14, 204 ## y1,yn: 2.070867, 2.280867 ## x1,x[n*p]: 138.000000, 1.000000 ## xp1,xp[np*p]: 141.000000, 1.000000 ## *****Number of Trees: 200 ## *****Number of Cut Points: 58 ... 1 ## *****burn,nd,thin: 100,1000,1 ## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,0.287616,3,0.21118,7.42913 ## *****sigma: 1.041218 ## *****w (weights): 1.000000 ... 1.000000 ## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,14,0 ## *****printevery: 100 ## ## MCMC ## done 0 (out of 1100) ## done 100 (out of 1100) ## done 200 (out of 1100) ## done 300 (out of 1100) ## done 400 (out of 1100) ## done 500 (out of 1100) ## done 600 (out of 1100) ## done 700 (out of 1100) ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) ## time: 3s ## trcnt,tecnt: 1000,1000 carseats_mse(bartfit) ## [1] 1.631285 The test error rate is ~1.6 which is an improvement over random forest and bagging. 8.2.3 Question 9 This problem involves the OJ data set which is part of the ISLR2 package. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations. set.seed(42) train <- sample(1:nrow(OJ), 800) test <- setdiff(1:nrow(OJ), train) Fit a tree to the training data, with Purchase as the response and the other variables except for Buy as predictors. Use the summary() function to produce summary statistics about the tree, and describe the results obtained. What is the training error rate? How many terminal nodes does the tree have? tr <- tree(Purchase ~ ., data = OJ[train, ]) summary(tr) ## ## Classification tree: ## tree(formula = Purchase ~ ., data = OJ[train, ]) ## Variables actually used in tree construction: ## [1] "LoyalCH" "SalePriceMM" "PriceDiff" ## Number of terminal nodes: 8 ## Residual mean deviance: 0.7392 = 585.5 / 792 ## Misclassification error rate: 0.1638 = 131 / 800 Type in the name of the tree object in order to get a detailed text output. Pick one of the terminal nodes, and interpret the information displayed. tr ## node), split, n, deviance, yval, (yprob) ## * denotes terminal node ## ## 1) root 800 1066.00 CH ( 0.61500 0.38500 ) ## 2) LoyalCH < 0.48285 285 296.00 MM ( 0.21404 0.78596 ) ## 4) LoyalCH < 0.064156 64 0.00 MM ( 0.00000 1.00000 ) * ## 5) LoyalCH > 0.064156 221 260.40 MM ( 0.27602 0.72398 ) ## 10) SalePriceMM < 2.04 128 123.50 MM ( 0.18750 0.81250 ) * ## 11) SalePriceMM > 2.04 93 125.00 MM ( 0.39785 0.60215 ) * ## 3) LoyalCH > 0.48285 515 458.10 CH ( 0.83689 0.16311 ) ## 6) LoyalCH < 0.753545 230 282.70 CH ( 0.69565 0.30435 ) ## 12) PriceDiff < 0.265 149 203.00 CH ( 0.57718 0.42282 ) ## 24) PriceDiff < -0.165 32 38.02 MM ( 0.28125 0.71875 ) * ## 25) PriceDiff > -0.165 117 150.30 CH ( 0.65812 0.34188 ) ## 50) LoyalCH < 0.703993 105 139.60 CH ( 0.61905 0.38095 ) * ## 51) LoyalCH > 0.703993 12 0.00 CH ( 1.00000 0.00000 ) * ## 13) PriceDiff > 0.265 81 47.66 CH ( 0.91358 0.08642 ) * ## 7) LoyalCH > 0.753545 285 111.70 CH ( 0.95088 0.04912 ) * Create a plot of the tree, and interpret the results. plot(tr) text(tr, pretty = 0, digits = 2, cex = 0.8) Predict the response on the test data, and produce a confusion matrix comparing the test labels to the predicted test labels. What is the test error rate? table(predict(tr, OJ[test, ], type = "class"), OJ[test, "Purchase"]) ## ## CH MM ## CH 125 15 ## MM 36 94 Apply the cv.tree() function to the training set in order to determine the optimal tree size. set.seed(42) res <- cv.tree(tr) Produce a plot with tree size on the \\(x\\)-axis and cross-validated classification error rate on the \\(y\\)-axis. plot(res$size, res$dev, type = "b", xlab = "Tree size", ylab = "Deviance") min <- which.min(res$dev) abline(v = res$size[min], lty = 2, col = "red") Which tree size corresponds to the lowest cross-validated classification error rate? res$size[min] ## [1] 6 Produce a pruned tree corresponding to the optimal tree size obtained using cross-validation. If cross-validation does not lead to selection of a pruned tree, then create a pruned tree with five terminal nodes. ptr <- prune.tree(tr, best = res$size[min]) plot(ptr) text(ptr, pretty = 0, digits = 2, cex = 0.8) Compare the training error rates between the pruned and unpruned trees. Which is higher? oj_misclass <- function(model) { summary(model)$misclass[1] / summary(model)$misclass[2] } oj_misclass(tr) ## [1] 0.16375 oj_misclass(ptr) ## [1] 0.16375 The training misclassification error rate is slightly higher for the pruned tree. Compare the test error rates between the pruned and unpruned trees. Which is higher? oj_err <- function(model) { p <- predict(model, newdata = OJ[test, ], type = "class") mean(p != OJ[test, "Purchase"]) } oj_err(tr) ## [1] 0.1888889 oj_err(ptr) ## [1] 0.1888889 The test misclassification error rate is slightly higher for the pruned tree. 8.2.4 Question 10 We now use boosting to predict Salary in the Hitters data set. Remove the observations for whom the salary information is unknown, and then log-transform the salaries. dat <- Hitters dat <- dat[!is.na(dat$Salary), ] dat$Salary <- log(dat$Salary) Create a training set consisting of the first 200 observations, and a test set consisting of the remaining observations. train <- 1:200 test <- setdiff(1:nrow(dat), train) Perform boosting on the training set with 1,000 trees for a range of values of the shrinkage parameter \\(\\lambda\\). Produce a plot with different shrinkage values on the \\(x\\)-axis and the corresponding training set MSE on the \\(y\\)-axis. library(gbm) ## Loaded gbm 2.2.2 ## This version of gbm is no longer under development. Consider transitioning to gbm3, https://github.com/gbm-developers/gbm3 set.seed(42) lambdas <- 10 ^ seq(-3, 0, by = 0.1) fits <- lapply(lambdas, function(lam) { gbm(Salary ~ ., data = dat[train, ], distribution = "gaussian", n.trees = 1000, shrinkage = lam) }) errs <- sapply(fits, function(fit) { p <- predict(fit, dat[train, ], n.trees = 1000) mean((p - dat[train, ]$Salary)^2) }) plot(lambdas, errs, type = "b", xlab = "Shrinkage values", ylab = "Training MSE", log = "xy") Produce a plot with different shrinkage values on the \\(x\\)-axis and the corresponding test set MSE on the \\(y\\)-axis. errs <- sapply(fits, function(fit) { p <- predict(fit, dat[test, ], n.trees = 1000) mean((p - dat[test, ]$Salary)^2) }) plot(lambdas, errs, type = "b", xlab = "Shrinkage values", ylab = "Training MSE", log = "xy") min(errs) ## [1] 0.249881 abline(v = lambdas[which.min(errs)], lty = 2, col = "red") Compare the test MSE of boosting to the test MSE that results from applying two of the regression approaches seen in Chapters 3 and 6. Linear regression fit1 <- lm(Salary ~ ., data = dat[train, ]) mean((predict(fit1, dat[test, ]) - dat[test, "Salary"])^2) ## [1] 0.4917959 Ridge regression library(glmnet) ## Loading required package: Matrix ## ## Attaching package: 'Matrix' ## The following object is masked from 'package:ggtree': ## ## expand ## The following objects are masked from 'package:tidyr': ## ## expand, pack, unpack ## Loaded glmnet 4.1-8 x <- model.matrix(Salary ~ ., data = dat[train, ]) x.test <- model.matrix(Salary ~ ., data = dat[test, ]) y <- dat[train, "Salary"] fit2 <- glmnet(x, y, alpha = 1) mean((predict(fit2, s = 0.1, newx = x.test) - dat[test, "Salary"])^2) ## [1] 0.4389054 Which variables appear to be the most important predictors in the boosted model? summary(fits[[which.min(errs)]]) ## var rel.inf ## CAtBat CAtBat 16.4755242 ## CRBI CRBI 9.0670759 ## CHits CHits 8.9307168 ## CRuns CRuns 7.6839786 ## CWalks CWalks 7.1014886 ## PutOuts PutOuts 6.7869382 ## AtBat AtBat 5.8567916 ## Walks Walks 5.8479836 ## Years Years 5.3349489 ## Assists Assists 5.0076392 ## CHmRun CHmRun 4.6606919 ## RBI RBI 3.9255396 ## Hits Hits 3.8123124 ## HmRun HmRun 3.4462640 ## Runs Runs 2.4779866 ## Errors Errors 2.2341326 ## NewLeague NewLeague 0.5788283 ## Division Division 0.4880237 ## League League 0.2831352 Now apply bagging to the training set. What is the test set MSE for this approach? set.seed(42) bagged <- randomForest(Salary ~ ., data = dat[train, ], mtry = 19, ntree = 1000) mean((predict(bagged, newdata = dat[test, ]) - dat[test, "Salary"])^2) ## [1] 0.2278813 8.2.5 Question 11 This question uses the Caravan data set. Create a training set consisting of the first 1,000 observations, and a test set consisting of the remaining observations. train <- 1:1000 test <- setdiff(1:nrow(Caravan), train) Fit a boosting model to the training set with Purchase as the response and the other variables as predictors. Use 1,000 trees, and a shrinkage value of 0.01. Which predictors appear to be the most important? set.seed(42) fit <- gbm(as.numeric(Purchase == "Yes") ~ ., data = Caravan[train, ], n.trees = 1000, shrinkage = 0.01) ## Distribution not specified, assuming bernoulli ... ## Warning in gbm.fit(x = x, y = y, offset = offset, distribution = distribution, ## : variable 50: PVRAAUT has no variation. ## Warning in gbm.fit(x = x, y = y, offset = offset, distribution = distribution, ## : variable 71: AVRAAUT has no variation. head(summary(fit)) ## var rel.inf ## PPERSAUT PPERSAUT 15.243041 ## MKOOPKLA MKOOPKLA 10.220498 ## MOPLHOOG MOPLHOOG 7.584734 ## MBERMIDD MBERMIDD 5.983650 ## PBRAND PBRAND 4.557491 ## ABRAND ABRAND 4.076017 Use the boosting model to predict the response on the test data. Predict that a person will make a purchase if the estimated probability of purchase is greater than 20%. Form a confusion matrix. What fraction of the people predicted to make a purchase do in fact make one? How does this compare with the results obtained from applying KNN or logistic regression to this data set? p <- predict(fit, Caravan[test, ], n.trees = 1000, type = "response") table(p > 0.2, Caravan[test, "Purchase"] == "Yes") ## ## FALSE TRUE ## FALSE 4415 257 ## TRUE 118 32 sum(p > 0.2 & Caravan[test, "Purchase"] == "Yes") / sum(p > 0.2) ## [1] 0.2133333 141 (109 + 32) are predicted to purchase. Of these 32 do which is 21%. # Logistic regression fit <- glm(Purchase == "Yes" ~ ., data = Caravan[train, ], family = "binomial") ## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred p <- predict(fit, Caravan[test, ], type = "response") ## Warning in predict.lm(object, newdata, se.fit, scale = 1, type = if (type == : ## prediction from rank-deficient fit; attr(*, "non-estim") has doubtful cases table(p > 0.2, Caravan[test, "Purchase"] == "Yes") ## ## FALSE TRUE ## FALSE 4183 231 ## TRUE 350 58 sum(p > 0.2 & Caravan[test, "Purchase"] == "Yes") / sum(p > 0.2) ## [1] 0.1421569 For logistic regression we correctly predict 14% of those predicted to purchase. library(class) # KNN fit <- knn(Caravan[train, -86], Caravan[test, -86], Caravan$Purchase[train]) table(fit, Caravan[test, "Purchase"] == "Yes") ## ## fit FALSE TRUE ## No 4260 263 ## Yes 273 26 sum(fit == "Yes" & Caravan[test, "Purchase"] == "Yes") / sum(fit == "Yes") ## [1] 0.08695652 For KNN we correctly predict 8.7% of those predicted to purchase. 8.2.6 Question 12 Apply boosting, bagging, random forests and BART to a data set of your choice. Be sure to fit the models on a training set and to evaluate their performance on a test set. How accurate are the results compared to simple methods like linear or logistic regression? Which of these approaches yields the best performance? Here I’m going to use the College dataset (used in Question 10 from Chapter 7 to compare performance with the GAM we previously built). In this model we were trying to predict Outstate using the other variables in College. library(gam) ## Loading required package: splines ## Loading required package: foreach ## ## Attaching package: 'foreach' ## The following objects are masked from 'package:purrr': ## ## accumulate, when ## Loaded gam 1.22-4 set.seed(42) train <- sample(1:nrow(College), 400) test <- setdiff(1:nrow(College), train) # Linear regression lr <- gam(Outstate ~ ., data = College[train, ]) # GAM from chapter 7 gam <- gam(Outstate ~ Private + s(Room.Board, 2) + s(PhD, 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), data = College[train, ]) # Boosting boosted <- gbm(Outstate ~ ., data = College[train, ], n.trees = 1000, shrinkage = 0.01) ## Distribution not specified, assuming gaussian ... # Bagging (random forest with mtry = no. predictors) bagged <- randomForest(Outstate ~ ., data = College[train, ], mtry = 17, ntree = 1000) # Random forest with mtry = sqrt(no. predictors) rf <- randomForest(Outstate ~ ., data = College[train, ], mtry = 4, ntree = 1000) # BART pred <- setdiff(colnames(College), "Outstate") bart <- gbart(College[train, pred], College[train, "Outstate"], x.test = College[test, pred]) ## *****Calling gbart: type=1 ## *****Data: ## data:n,p,np: 400, 18, 377 ## y1,yn: -4030.802500, 77.197500 ## x1,x[n*p]: 1.000000, 71.000000 ## xp1,xp[np*p]: 0.000000, 99.000000 ## *****Number of Trees: 200 ## *****Number of Cut Points: 1 ... 75 ## *****burn,nd,thin: 100,1000,1 ## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,301.581,3,715815,10580.8 ## *****sigma: 1916.969943 ## *****w (weights): 1.000000 ... 1.000000 ## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,18,0 ## *****printevery: 100 ## ## MCMC ## done 0 (out of 1100) ## done 100 (out of 1100) ## done 200 (out of 1100) ## done 300 (out of 1100) ## done 400 (out of 1100) ## done 500 (out of 1100) ## done 600 (out of 1100) ## done 700 (out of 1100) ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) ## time: 3s ## trcnt,tecnt: 1000,1000 mse <- function(model, ...) { pred <- predict(model, College[test, ], ...) mean((College$Outstate[test] - pred)^2) } res <- c( "Linear regression" = mse(lr), "GAM" = mse(gam), "Boosting" = mse(boosted, n.trees = 1000), "Bagging" = mse(bagged), "Random forest" = mse(rf), "BART" = mse(bart) ) res <- data.frame("MSE" = res) res$Model <- factor(row.names(res), levels = rev(row.names(res))) ggplot(res, aes(Model, MSE)) + coord_flip() + geom_bar(stat = "identity", fill = "steelblue") In this case, it looks like bagging produces the best performing model in terms of test mean square error. "],["support-vector-machines.html", "9 Support Vector Machines 9.1 Conceptual 9.2 Applied", " 9 Support Vector Machines 9.1 Conceptual 9.1.1 Question 1 This problem involves hyperplanes in two dimensions. Sketch the hyperplane \\(1 + 3X_1 − X_2 = 0\\). Indicate the set of points for which \\(1 + 3X_1 − X_2 > 0\\), as well as the set of points for which \\(1 + 3X_1 − X_2 < 0\\). library(ggplot2) xlim <- c(-10, 10) ylim <- c(-30, 30) points <- expand.grid( X1 = seq(xlim[1], xlim[2], length.out = 50), X2 = seq(ylim[1], ylim[2], length.out = 50) ) p <- ggplot(points, aes(x = X1, y = X2)) + geom_abline(intercept = 1, slope = 3) + # X2 = 1 + 3X1 theme_bw() p + geom_point(aes(color = 1 + 3*X1 - X2 > 0), size = 0.1) + scale_color_discrete(name = "1 + 3X1 − X2 > 0") On the same plot, sketch the hyperplane \\(−2 + X_1 + 2X_2 = 0\\). Indicate the set of points for which \\(−2 + X_1 + 2X_2 > 0\\), as well as the set of points for which \\(−2 + X_1 + 2X_2 < 0\\). p + geom_abline(intercept = 1, slope = -1/2) + # X2 = 1 - X1/2 geom_point( aes(color = interaction(1 + 3*X1 - X2 > 0, -2 + X1 + 2*X2 > 0)), size = 0.5 ) + scale_color_discrete(name = "(1 + 3X1 − X2 > 0).(−2 + X1 + 2X2 > 0)") 9.1.2 Question 2 We have seen that in \\(p = 2\\) dimensions, a linear decision boundary takes the form \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 = 0\\). We now investigate a non-linear decision boundary. Sketch the curve \\[(1+X_1)^2 +(2−X_2)^2 = 4\\]. points <- expand.grid( X1 = seq(-4, 2, length.out = 100), X2 = seq(-1, 5, length.out = 100) ) p <- ggplot(points, aes(x = X1, y = X2, z = (1 + X1)^2 + (2 - X2)^2 - 4)) + geom_contour(breaks = 0, colour = "black") + theme_bw() p On your sketch, indicate the set of points for which \\[(1 + X_1)^2 + (2 − X_2)^2 > 4,\\] as well as the set of points for which \\[(1 + X_1)^2 + (2 − X_2)^2 \\leq 4.\\] p + geom_point(aes(color = (1 + X1)^2 + (2 - X2)^2 - 4 > 0), size = 0.1) Suppose that a classifier assigns an observation to the blue class if \\[(1 + X_1)^2 + (2 − X_2)^2 > 4,\\] and to the red class otherwise. To what class is the observation \\((0, 0)\\) classified? \\((−1, 1)\\)? \\((2, 2)\\)? \\((3, 8)\\)? points <- data.frame( X1 = c(0, -1, 2, 3), X2 = c(0, 1, 2, 8) ) ifelse((1 + points$X1)^2 + (2 - points$X2)^2 > 4, "blue", "red") ## [1] "blue" "red" "blue" "blue" Argue that while the decision boundary in (c) is not linear in terms of \\(X_1\\) and \\(X_2\\), it is linear in terms of \\(X_1\\), \\(X_1^2\\), \\(X_2\\), and \\(X_2^2\\). The decision boundary is \\[(1 + X_1)^2 + (2 − X_2)^2 -4 = 0\\] which we can expand to: \\[1 + 2X_1 + X_1^2 + 4 − 4X_2 + X_2^2 - 4 = 0\\] which is linear in terms of \\(X_1\\), \\(X_1^2\\), \\(X_2\\), \\(X_2^2\\). 9.1.3 Question 3 Here we explore the maximal margin classifier on a toy data set. We are given \\(n = 7\\) observations in \\(p = 2\\) dimensions. For each observation, there is an associated class label. Obs. \\(X_1\\) \\(X_2\\) \\(Y\\) 1 3 4 Red 2 2 2 Red 3 4 4 Red 4 1 4 Red 5 2 1 Blue 6 4 3 Blue 7 4 1 Blue Sketch the observations. data <- data.frame( X1 = c(3, 2, 4, 1, 2, 4, 4), X2 = c(4, 2, 4, 4, 1, 3, 1), Y = c(rep("Red", 4), rep("Blue", 3)) ) p <- ggplot(data, aes(x = X1, y = X2, color = Y)) + geom_point(size = 2) + scale_colour_identity() + coord_cartesian(xlim = c(0.5, 4.5), ylim = c(0.5, 4.5)) p Sketch the optimal separating hyperplane, and provide the equation for this hyperplane (of the form (9.1)). library(e1071) fit <- svm(as.factor(Y) ~ ., data = data, kernel = "linear", cost = 10, scale = FALSE) # Extract beta_0, beta_1, beta_2 beta <- c( -fit$rho, drop(t(fit$coefs) %*% as.matrix(data[fit$index, 1:2])) ) names(beta) <- c("B0", "B1", "B2") p <- p + geom_abline(intercept = -beta[1] / beta[3], slope = -beta[2] / beta[3], lty = 2) p Describe the classification rule for the maximal margin classifier. It should be something along the lines of “Classify to Red if \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 > 0\\), and classify to Blue otherwise.” Provide the values for \\(\\beta_0, \\beta_1,\\) and \\(\\beta_2\\). Classify to red if \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 > 0\\) and blue otherwise where \\(\\beta_0 = 1\\), \\(\\beta_1 = -2\\), \\(\\beta_2 = 2\\). On your sketch, indicate the margin for the maximal margin hyperplane. p <- p + geom_ribbon( aes(x = x, ymin = ymin, ymax = ymax), data = data.frame(x = c(0, 5), ymin = c(-1, 4), ymax = c(0, 5)), alpha = 0.1, fill = "blue", inherit.aes = FALSE ) p Indicate the support vectors for the maximal margin classifier. p <- p + geom_point(data = data[fit$index, ], size = 4) p The support vectors (from the svm fit object) are shown above. Arguably, there’s another support vector, since four points exactly touch the margin. Argue that a slight movement of the seventh observation would not affect the maximal margin hyperplane. p + geom_point(data = data[7, , drop = FALSE], size = 4, color = "purple") The 7th point is shown in purple above. It is not a support vector, and not close to the margin, so small changes in its X1, X2 values would not affect the current calculated margin. Sketch a hyperplane that is not the optimal separating hyperplane, and provide the equation for this hyperplane. A non-optimal hyperline that still separates the blue and red points would be one that touches the (red) point at X1 = 2, X2 = 2 and the (blue) point at X1 = 4, X2 = 3. This gives line \\(y = x/2 + 1\\) or, when \\(\\beta_0 = -1\\), \\(\\beta_1 = -1/2\\), \\(\\beta_2 = 1\\). p + geom_abline(intercept = 1, slope = 0.5, lty = 2, col = "red") Draw an additional observation on the plot so that the two classes are no longer separable by a hyperplane. p + geom_point(data = data.frame(X1 = 1, X2 = 3, Y = "Blue"), shape = 15, size = 4) 9.2 Applied 9.2.1 Question 4 Generate a simulated two-class data set with 100 observations and two features in which there is a visible but non-linear separation between the two classes. Show that in this setting, a support vector machine with a polynomial kernel (with degree greater than 1) or a radial kernel will outperform a support vector classifier on the training data. Which technique performs best on the test data? Make plots and report training and test error rates in order to back up your assertions. set.seed(10) data <- data.frame( x = runif(100), y = runif(100) ) score <- (2*data$x-0.5)^2 + (data$y)^2 - 0.5 data$class <- factor(ifelse(score > 0, "red", "blue")) p <- ggplot(data, aes(x = x, y = y, color = class)) + geom_point(size = 2) + scale_colour_identity() p train <- 1:50 test <- 51:100 fits <- list( "Radial" = svm(class ~ ., data = data[train, ], kernel = "radial"), "Polynomial" = svm(class ~ ., data = data[train, ], kernel = "polynomial", degree = 2), "Linear" = svm(class ~ ., data = data[train, ], kernel = "linear") ) err <- function(model, data) { out <- table(predict(model, data), data$class) (out[1, 2] + out[2, 1]) / sum(out) } plot(fits[[1]], data) plot(fits[[2]], data) plot(fits[[3]], data) sapply(fits, err, data = data[train, ]) ## Radial Polynomial Linear ## 0.04 0.30 0.10 sapply(fits, err, data = data[test, ]) ## Radial Polynomial Linear ## 0.06 0.48 0.14 In this case, the radial kernel performs best, followed by a linear kernel with the 2nd degree polynomial performing worst. The ordering of these models is the same for the training and test data sets. 9.2.2 Question 5 We have seen that we can fit an SVM with a non-linear kernel in order to perform classification using a non-linear decision boundary. We will now see that we can also obtain a non-linear decision boundary by performing logistic regression using non-linear transformations of the features. Generate a data set with \\(n = 500\\) and \\(p = 2\\), such that the observations belong to two classes with a quadratic decision boundary between them. For instance, you can do this as follows: > x1 <- runif(500) - 0.5 > x2 <- runif(500) - 0.5 > y <- 1 * (x1^2 - x2^2 > 0) set.seed(42) train <- data.frame( x1 = runif(500) - 0.5, x2 = runif(500) - 0.5 ) train$y <- factor(as.numeric((train$x1^2 - train$x2^2 > 0))) Plot the observations, colored according to their class labels. Your plot should display \\(X_1\\) on the \\(x\\)-axis, and \\(X_2\\) on the \\(y\\)-axis. p <- ggplot(train, aes(x = x1, y = x2, color = y)) + geom_point(size = 2) p Fit a logistic regression model to the data, using \\(X_1\\) and \\(X_2\\) as predictors. fit1 <- glm(y ~ ., data = train, family = "binomial") Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be linear. plot_model <- function(fit) { if (inherits(fit, "svm")) { train$p <- predict(fit) } else { train$p <- factor(as.numeric(predict(fit) > 0)) } ggplot(train, aes(x = x1, y = x2, color = p)) + geom_point(size = 2) } plot_model(fit1) Now fit a logistic regression model to the data using non-linear functions of \\(X_1\\) and \\(X_2\\) as predictors (e.g. \\(X_1^2, X_1 \\times X_2, \\log(X_2),\\) and so forth). fit2 <- glm(y ~ poly(x1, 2) + poly(x2, 2), data = train, family = "binomial") ## Warning: glm.fit: algorithm did not converge ## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be obviously non-linear. If it is not, then repeat (a)-(e) until you come up with an example in which the predicted class labels are obviously non-linear. plot_model(fit2) Fit a support vector classifier to the data with \\(X_1\\) and \\(X_2\\) as predictors. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels. fit3 <- svm(y ~ x1 + x2, data = train, kernel = "linear") plot_model(fit3) Fit a SVM using a non-linear kernel to the data. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels. fit4 <- svm(y ~ x1 + x2, data = train, kernel = "polynomial", degree = 2) plot_model(fit4) Comment on your results. When simulating data with a quadratic decision boundary, a logistic model with quadratic transformations of the variables and an svm model with a quadratic kernel both produce much better (and similar fits) than standard linear methods. 9.2.3 Question 6 At the end of Section 9.6.1, it is claimed that in the case of data that is just barely linearly separable, a support vector classifier with a small value of cost that misclassifies a couple of training observations may perform better on test data than one with a huge value of cost that does not misclassify any training observations. You will now investigate this claim. Generate two-class data with \\(p = 2\\) in such a way that the classes are just barely linearly separable. set.seed(2) # Simulate data that is separable by a line at y = 2.5 data <- data.frame( x = rnorm(200), class = sample(c("red", "blue"), 200, replace = TRUE) ) data$y <- (data$class == "red") * 5 + rnorm(200) # Add barley separable points (these are simulated "noise" values) newdata <- data.frame(x = rnorm(30)) newdata$y <- 1.5*newdata$x + 3 + rnorm(30, 0, 1) newdata$class = ifelse((1.5*newdata$x + 3) - newdata$y > 0, "blue", "red") data <- rbind(data, newdata) # remove any that cause misclassification leaving data that is barley linearly # separable, but along an axis that is not y = 2.5 (which would be correct # for the "true" data. data <- data[!(data$class == "red") == ((1.5*data$x + 3 - data$y) > 0), ] data <- data[sample(seq_len(nrow(data)), 200), ] p <- ggplot(data, aes(x = x, y = y, color = class)) + geom_point(size = 2) + scale_colour_identity() + geom_abline(intercept = 3, slope = 1.5, lty = 2) p Compute the cross-validation error rates for support vector classifiers with a range of cost values. How many training errors are misclassified for each value of cost considered, and how does this relate to the cross-validation errors obtained? How many training errors are misclassified for each value of cost? costs <- 10^seq(-3, 5) sapply(costs, function(cost) { fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost) pred <- predict(fit, data) sum(pred != data$class) }) ## [1] 98 8 9 4 1 1 0 0 0 Cross-validation errors out <- tune(svm, as.factor(class) ~ ., data = data, kernel = "linear", ranges = list(cost = costs)) summary(out) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 10 ## ## - best performance: 0.005 ## ## - Detailed performance results: ## cost error dispersion ## 1 1e-03 0.540 0.09067647 ## 2 1e-02 0.045 0.02838231 ## 3 1e-01 0.045 0.03689324 ## 4 1e+00 0.020 0.02581989 ## 5 1e+01 0.005 0.01581139 ## 6 1e+02 0.005 0.01581139 ## 7 1e+03 0.005 0.01581139 ## 8 1e+04 0.010 0.02108185 ## 9 1e+05 0.010 0.02108185 data.frame( cost = out$performances$cost, misclass = out$performances$error * nrow(data) ) ## cost misclass ## 1 1e-03 108 ## 2 1e-02 9 ## 3 1e-01 9 ## 4 1e+00 4 ## 5 1e+01 1 ## 6 1e+02 1 ## 7 1e+03 1 ## 8 1e+04 2 ## 9 1e+05 2 Generate an appropriate test data set, and compute the test errors corresponding to each of the values of cost considered. Which value of cost leads to the fewest test errors, and how does this compare to the values of cost that yield the fewest training errors and the fewest cross-validation errors? set.seed(2) test <- data.frame( x = rnorm(200), class = sample(c("red", "blue"), 200, replace = TRUE) ) test$y <- (test$class == "red") * 5 + rnorm(200) p + geom_point(data = test, pch = 21) (errs <- sapply(costs, function(cost) { fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost) pred <- predict(fit, test) sum(pred != test$class) })) ## [1] 95 2 3 9 16 16 19 19 19 (cost <- costs[which.min(errs)]) ## [1] 0.01 (fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost)) ## ## Call: ## svm(formula = as.factor(class) ~ ., data = data, kernel = "linear", ## cost = cost) ## ## ## Parameters: ## SVM-Type: C-classification ## SVM-Kernel: linear ## cost: 0.01 ## ## Number of Support Vectors: 135 test$prediction <- predict(fit, test) p <- ggplot(test, aes(x = x, y = y, color = class, shape = prediction == class)) + geom_point(size = 2) + scale_colour_identity() p Discuss your results. A large cost leads to overfitting as the model finds the perfect linear separation between red and blue in the training data. A lower cost then leads to improved prediction in the test data. 9.2.4 Question 7 In this problem, you will use support vector approaches in order to predict whether a given car gets high or low gas mileage based on the Auto data set. Create a binary variable that takes on a 1 for cars with gas mileage above the median, and a 0 for cars with gas mileage below the median. library(ISLR2) data <- Auto data$high_mpg <- as.factor(as.numeric(data$mpg > median(data$mpg))) Fit a support vector classifier to the data with various values of cost, in order to predict whether a car gets high or low gas mileage. Report the cross-validation errors associated with different values of this parameter. Comment on your results. Note you will need to fit the classifier without the gas mileage variable to produce sensible results. set.seed(42) costs <- 10^seq(-4, 3, by = 0.5) results <- list() f <- high_mpg ~ displacement + horsepower + weight results$linear <- tune(svm, f, data = data, kernel = "linear", ranges = list(cost = costs)) summary(results$linear) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 0.03162278 ## ## - best performance: 0.1019231 ## ## - Detailed performance results: ## cost error dispersion ## 1 1.000000e-04 0.5967949 0.05312225 ## 2 3.162278e-04 0.5967949 0.05312225 ## 3 1.000000e-03 0.2199359 0.08718077 ## 4 3.162278e-03 0.1353846 0.06058195 ## 5 1.000000e-02 0.1121795 0.04011293 ## 6 3.162278e-02 0.1019231 0.05087176 ## 7 1.000000e-01 0.1096154 0.05246238 ## 8 3.162278e-01 0.1044872 0.05154934 ## 9 1.000000e+00 0.1044872 0.05154934 ## 10 3.162278e+00 0.1044872 0.05154934 ## 11 1.000000e+01 0.1019231 0.05501131 ## 12 3.162278e+01 0.1019231 0.05501131 ## 13 1.000000e+02 0.1019231 0.05501131 ## 14 3.162278e+02 0.1019231 0.05501131 ## 15 1.000000e+03 0.1019231 0.05501131 Now repeat (b), this time using SVMs with radial and polynomial basis kernels, with different values of gamma and degree and cost. Comment on your results. results$polynomial <- tune(svm, f, data = data, kernel = "polynomial", ranges = list(cost = costs, degree = 1:3)) summary(results$polynomial) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost degree ## 0.1 1 ## ## - best performance: 0.101859 ## ## - Detailed performance results: ## cost degree error dispersion ## 1 1.000000e-04 1 0.5842949 0.04703306 ## 2 3.162278e-04 1 0.5842949 0.04703306 ## 3 1.000000e-03 1 0.5842949 0.04703306 ## 4 3.162278e-03 1 0.2167949 0.07891173 ## 5 1.000000e-02 1 0.1275641 0.04806885 ## 6 3.162278e-02 1 0.1147436 0.05661708 ## 7 1.000000e-01 1 0.1018590 0.05732429 ## 8 3.162278e-01 1 0.1069231 0.05949679 ## 9 1.000000e+00 1 0.1069231 0.06307278 ## 10 3.162278e+00 1 0.1069231 0.06307278 ## 11 1.000000e+01 1 0.1043590 0.06603760 ## 12 3.162278e+01 1 0.1043590 0.06603760 ## 13 1.000000e+02 1 0.1043590 0.06603760 ## 14 3.162278e+02 1 0.1043590 0.06603760 ## 15 1.000000e+03 1 0.1043590 0.06603760 ## 16 1.000000e-04 2 0.5842949 0.04703306 ## 17 3.162278e-04 2 0.5842949 0.04703306 ## 18 1.000000e-03 2 0.5842949 0.04703306 ## 19 3.162278e-03 2 0.5255128 0.08090636 ## 20 1.000000e-02 2 0.3980769 0.08172400 ## 21 3.162278e-02 2 0.3674359 0.07974741 ## 22 1.000000e-01 2 0.3597436 0.08336609 ## 23 3.162278e-01 2 0.3597436 0.09010398 ## 24 1.000000e+00 2 0.3444872 0.08767258 ## 25 3.162278e+00 2 0.3545513 0.10865903 ## 26 1.000000e+01 2 0.3239103 0.09593710 ## 27 3.162278e+01 2 0.3035256 0.08184137 ## 28 1.000000e+02 2 0.3061538 0.08953945 ## 29 3.162278e+02 2 0.3060897 0.08919821 ## 30 1.000000e+03 2 0.3035897 0.09305216 ## 31 1.000000e-04 3 0.5842949 0.04703306 ## 32 3.162278e-04 3 0.4955128 0.10081350 ## 33 1.000000e-03 3 0.3750641 0.08043982 ## 34 3.162278e-03 3 0.3036538 0.09096445 ## 35 1.000000e-02 3 0.2601282 0.07774595 ## 36 3.162278e-02 3 0.2499359 0.08407106 ## 37 1.000000e-01 3 0.2017949 0.07547413 ## 38 3.162278e-01 3 0.1937179 0.08427411 ## 39 1.000000e+00 3 0.1478205 0.04579654 ## 40 3.162278e+00 3 0.1451923 0.05169638 ## 41 1.000000e+01 3 0.1451282 0.04698931 ## 42 3.162278e+01 3 0.1500000 0.07549058 ## 43 1.000000e+02 3 0.1373718 0.05772558 ## 44 3.162278e+02 3 0.1271795 0.06484766 ## 45 1.000000e+03 3 0.1322436 0.06764841 results$radial <- tune(svm, f, data = data, kernel = "radial", ranges = list(cost = costs, gamma = 10^(-2:1))) summary(results$radial) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost gamma ## 1000 0.1 ## ## - best performance: 0.08179487 ## ## - Detailed performance results: ## cost gamma error dispersion ## 1 1.000000e-04 0.01 0.58410256 0.05435320 ## 2 3.162278e-04 0.01 0.58410256 0.05435320 ## 3 1.000000e-03 0.01 0.58410256 0.05435320 ## 4 3.162278e-03 0.01 0.58410256 0.05435320 ## 5 1.000000e-02 0.01 0.58410256 0.05435320 ## 6 3.162278e-02 0.01 0.26557692 0.10963269 ## 7 1.000000e-01 0.01 0.15038462 0.05783237 ## 8 3.162278e-01 0.01 0.11224359 0.04337812 ## 9 1.000000e+00 0.01 0.10730769 0.04512161 ## 10 3.162278e+00 0.01 0.10730769 0.04512161 ## 11 1.000000e+01 0.01 0.10737179 0.05526490 ## 12 3.162278e+01 0.01 0.10480769 0.05610124 ## 13 1.000000e+02 0.01 0.10480769 0.05610124 ## 14 3.162278e+02 0.01 0.10737179 0.05526490 ## 15 1.000000e+03 0.01 0.10993590 0.05690926 ## 16 1.000000e-04 0.10 0.58410256 0.05435320 ## 17 3.162278e-04 0.10 0.58410256 0.05435320 ## 18 1.000000e-03 0.10 0.58410256 0.05435320 ## 19 3.162278e-03 0.10 0.58410256 0.05435320 ## 20 1.000000e-02 0.10 0.15301282 0.06026554 ## 21 3.162278e-02 0.10 0.11480769 0.04514816 ## 22 1.000000e-01 0.10 0.10730769 0.04512161 ## 23 3.162278e-01 0.10 0.10730769 0.04512161 ## 24 1.000000e+00 0.10 0.10737179 0.05526490 ## 25 3.162278e+00 0.10 0.10737179 0.05526490 ## 26 1.000000e+01 0.10 0.10737179 0.05526490 ## 27 3.162278e+01 0.10 0.10737179 0.05526490 ## 28 1.000000e+02 0.10 0.09967949 0.04761387 ## 29 3.162278e+02 0.10 0.08429487 0.03207585 ## 30 1.000000e+03 0.10 0.08179487 0.03600437 ## 31 1.000000e-04 1.00 0.58410256 0.05435320 ## 32 3.162278e-04 1.00 0.58410256 0.05435320 ## 33 1.000000e-03 1.00 0.58410256 0.05435320 ## 34 3.162278e-03 1.00 0.58410256 0.05435320 ## 35 1.000000e-02 1.00 0.12506410 0.05342773 ## 36 3.162278e-02 1.00 0.10730769 0.06255920 ## 37 1.000000e-01 1.00 0.10993590 0.05561080 ## 38 3.162278e-01 1.00 0.10737179 0.05526490 ## 39 1.000000e+00 1.00 0.09711538 0.05107441 ## 40 3.162278e+00 1.00 0.08429487 0.03634646 ## 41 1.000000e+01 1.00 0.08692308 0.03877861 ## 42 3.162278e+01 1.00 0.08948718 0.03503648 ## 43 1.000000e+02 1.00 0.09198718 0.03272127 ## 44 3.162278e+02 1.00 0.10217949 0.04214031 ## 45 1.000000e+03 1.00 0.09692308 0.04645046 ## 46 1.000000e-04 10.00 0.58410256 0.05435320 ## 47 3.162278e-04 10.00 0.58410256 0.05435320 ## 48 1.000000e-03 10.00 0.58410256 0.05435320 ## 49 3.162278e-03 10.00 0.58410256 0.05435320 ## 50 1.000000e-02 10.00 0.58410256 0.05435320 ## 51 3.162278e-02 10.00 0.22205128 0.12710181 ## 52 1.000000e-01 10.00 0.11237179 0.03888895 ## 53 3.162278e-01 10.00 0.10217949 0.04375722 ## 54 1.000000e+00 10.00 0.09717949 0.03809440 ## 55 3.162278e+00 10.00 0.09717949 0.03809440 ## 56 1.000000e+01 10.00 0.09711538 0.04161705 ## 57 3.162278e+01 10.00 0.11487179 0.04240664 ## 58 1.000000e+02 10.00 0.13019231 0.03541140 ## 59 3.162278e+02 10.00 0.13532051 0.03865626 ## 60 1.000000e+03 10.00 0.14044872 0.04251917 sapply(results, function(x) x$best.performance) ## linear polynomial radial ## 0.10192308 0.10185897 0.08179487 sapply(results, function(x) x$best.parameters) ## $linear ## cost ## 6 0.03162278 ## ## $polynomial ## cost degree ## 7 0.1 1 ## ## $radial ## cost gamma ## 30 1000 0.1 Make some plots to back up your assertions in (b) and (c). Hint: In the lab, we used the plot() function for svm objects only in cases with \\(p = 2\\). When \\(p > 2\\), you can use the plot() function to create plots displaying pairs of variables at a time. Essentially, instead of typing > plot(svmfit, dat) where svmfit contains your fitted model and dat is a data frame containing your data, you can type > plot(svmfit, dat, x1 ∼ x4) in order to plot just the first and fourth variables. However, you must replace x1 and x4 with the correct variable names. To find out more, type ?plot.svm. table(predict(results$radial$best.model, data), data$high_mpg) ## ## 0 1 ## 0 176 5 ## 1 20 191 plot(results$radial$best.model, data, horsepower~displacement) plot(results$radial$best.model, data, horsepower~weight) plot(results$radial$best.model, data, displacement~weight) 9.2.5 Question 8 This problem involves the OJ data set which is part of the ISLR2 package. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations. set.seed(42) train <- sample(seq_len(nrow(OJ)), 800) test <- setdiff(seq_len(nrow(OJ)), train) Fit a support vector classifier to the training data using cost = 0.01, with Purchase as the response and the other variables as predictors. Use the summary() function to produce summary statistics, and describe the results obtained. fit <- svm(Purchase ~ ., data = OJ[train, ], kernel = "linear", cost = 0.01) summary(fit) ## ## Call: ## svm(formula = Purchase ~ ., data = OJ[train, ], kernel = "linear", ## cost = 0.01) ## ## ## Parameters: ## SVM-Type: C-classification ## SVM-Kernel: linear ## cost: 0.01 ## ## Number of Support Vectors: 432 ## ## ( 215 217 ) ## ## ## Number of Classes: 2 ## ## Levels: ## CH MM What are the training and test error rates? err <- function(model, data) { t <- table(predict(model, data), data[["Purchase"]]) 1 - sum(diag(t)) / sum(t) } errs <- function(model) { c(train = err(model, OJ[train, ]), test = err(model, OJ[test, ])) } errs(fit) ## train test ## 0.171250 0.162963 Use the tune() function to select an optimal cost. Consider values in the range 0.01 to 10. tuned <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "linear", ranges = list(cost = 10^seq(-2, 1, length.out = 10))) tuned$best.parameters ## cost ## 7 1 summary(tuned) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 1 ## ## - best performance: 0.1775 ## ## - Detailed performance results: ## cost error dispersion ## 1 0.01000000 0.18250 0.04133199 ## 2 0.02154435 0.18000 0.04005205 ## 3 0.04641589 0.18000 0.05041494 ## 4 0.10000000 0.18000 0.04901814 ## 5 0.21544347 0.18250 0.04377975 ## 6 0.46415888 0.18250 0.04090979 ## 7 1.00000000 0.17750 0.04031129 ## 8 2.15443469 0.18000 0.03961621 ## 9 4.64158883 0.17875 0.03821086 ## 10 10.00000000 0.18375 0.03438447 Compute the training and test error rates using this new value for cost. errs(tuned$best.model) ## train test ## 0.167500 0.162963 Repeat parts (b) through (e) using a support vector machine with a radial kernel. Use the default value for gamma. tuned2 <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "radial", ranges = list(cost = 10^seq(-2, 1, length.out = 10))) tuned2$best.parameters ## cost ## 6 0.4641589 errs(tuned2$best.model) ## train test ## 0.1525000 0.1666667 Repeat parts (b) through (e) using a support vector machine with a polynomial kernel. Set degree = 2. tuned3 <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "polynomial", ranges = list(cost = 10^seq(-2, 1, length.out = 10)), degree = 2) tuned3$best.parameters ## cost ## 9 4.641589 errs(tuned3$best.model) ## train test ## 0.1487500 0.1703704 Overall, which approach seems to give the best results on this data? Overall the “radial” kernel appears to perform best in this case. "],["deep-learning.html", "10 Deep Learning 10.1 Conceptual 10.2 Applied", " 10 Deep Learning 10.1 Conceptual 10.1.1 Question 1 Consider a neural network with two hidden layers: \\(p = 4\\) input units, 2 units in the first hidden layer, 3 units in the second hidden layer, and a single output. Draw a picture of the network, similar to Figures 10.1 or 10.4. Write out an expression for \\(f(X)\\), assuming ReLU activation functions. Be as explicit as you can! The three layers (from our final output layer back to the start of our network) can be described as: \\[\\begin{align*} f(X) &= g(w_{0}^{(3)} + \\sum^{K_2}_{l=1} w_{l}^{(3)} A_l^{(2)}) \\\\ A_l^{(2)} &= h_l^{(2)}(X) = g(w_{l0}^{(2)} + \\sum_{k=1}^{K_1} w_{lk}^{(2)} A_k^{(1)})\\\\ A_k^{(1)} &= h_k^{(1)}(X) = g(w_{k0}^{(1)} + \\sum_{j=1}^p w_{kj}^{(1)} X_j) \\\\ \\end{align*}\\] for \\(l = 1, ..., K_2 = 3\\) and \\(k = 1, ..., K_1 = 2\\) and \\(p = 4\\), where, \\[ g(z) = (z)_+ = \\begin{cases} 0, & \\text{if } z < 0 \\\\ z, & \\text{otherwise} \\end{cases} \\] Now plug in some values for the coefficients and write out the value of \\(f(X)\\). We can perhaps achieve this most easily by fitting a real model. Note, in the plot shown here, we also include the “bias” or intercept terms. library(ISLR2) library(neuralnet) library(sigmoid) set.seed(5) train <- sample(seq_len(nrow(ISLR2::Boston)), nrow(ISLR2::Boston) * 2/3) net <- neuralnet(crim ~ lstat + medv + ptratio + rm, data = ISLR2::Boston[train, ], act.fct = relu, hidden = c(2, 3) ) plot(net) We can make a prediction for a given observation using this object. Firstly, let’s find an “ambiguous” test sample p <- predict(net, ISLR2::Boston[-train, ]) x <- ISLR2::Boston[-train, ][which.min(abs(p - mean(c(max(p), min(p))))), ] x <- x[, c("lstat", "medv", "ptratio", "rm")] predict(net, x) ## [,1] ## 441 19.14392 Or, repeating by “hand”: g <- function(x) ifelse(x > 0, x, 0) # relu activation function w <- net$weights[[1]] # the estimated weights for each layer v <- as.numeric(x) # our input predictors # to calculate our prediction we can take the dot product of our predictors # (with 1 at the start for the bias term) and our layer weights, lw) for (lw in w) v <- g(c(1, v) %*% lw) v ## [,1] ## [1,] 19.14392 How many parameters are there? length(unlist(net$weights)) ## [1] 23 There are \\(4*2+2 + 2*3+3 + 3*1+1 = 23\\) parameters. 10.1.2 Question 2 Consider the softmax function in (10.13) (see also (4.13) on page 141) for modeling multinomial probabilities. In (10.13), show that if we add a constant \\(c\\) to each of the \\(z_l\\), then the probability is unchanged. If we add a constant \\(c\\) to each \\(Z_l\\) in equation 10.13 we get: \\[\\begin{align*} Pr(Y=m|X) &= \\frac{e^{Z_m+c}}{\\sum_{l=0}^9e^{Z_l+c}} \\\\ &= \\frac{e^{Z_m}e^c}{\\sum_{l=0}^9e^{Z_l}e^c} \\\\ &= \\frac{e^{Z_m}e^c}{e^c\\sum_{l=0}^9e^{Z_l}} \\\\ &= \\frac{e^{Z_m}}{\\sum_{l=0}^9e^{Z_l}} \\\\ \\end{align*}\\] which is just equation 10.13. In (4.13), show that if we add constants \\(c_j\\), \\(j = 0,1,...,p\\), to each of the corresponding coefficients for each of the classes, then the predictions at any new point \\(x\\) are unchanged. 4.13 is \\[ Pr(Y=k|X=x) = \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\] adding constants \\(c_j\\) to each class gives: \\[\\begin{align*} Pr(Y=k|X=x) &= \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + c_1 + ... + \\beta_{Kp}x_p + c_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + c_1 + ... + \\beta_{lp}x_p + c_p}} \\\\ &= \\frac {e^{c1 + ... + c_p}e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{c1 + ... + c_p}e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ &= \\frac {e^{c1 + ... + c_p}e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {e^{c1 + ... + c_p}\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ &= \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ \\end{align*}\\] which collapses to 4.13 (with the same argument as above). This shows that the softmax function is over-parametrized. However, regularization and SGD typically constrain the solutions so that this is not a problem. 10.1.3 Question 3 Show that the negative multinomial log-likelihood (10.14) is equivalent to the negative log of the likelihood expression (4.5) when there are \\(M = 2\\) classes. Equation 10.14 is \\[ -\\sum_{i=1}^n \\sum_{m=0}^9 y_{im}\\log(f_m(x_i)) \\] Equation 4.5 is: \\[ \\ell(\\beta_0, \\beta_1) = \\prod_{i:y_i=1}p(x_i) \\prod_{i':y_i'=0}(1-p(x_i')) \\] So, \\(\\log(\\ell)\\) is: \\[\\begin{align*} \\log(\\ell) &= \\log \\left( \\prod_{i:y_i=1}p(x_i) \\prod_{i':y_i'=0}(1-p(x_i')) \\right ) \\\\ &= \\sum_{i:y_1=1}\\log(p(x_i)) + \\sum_{i':y_i'=0}\\log(1-p(x_i')) \\\\ \\end{align*}\\] If we set \\(y_i\\) to be an indicator variable such that \\(y_{i1}\\) and \\(y_{i0}\\) are 1 and 0 (or 0 and 1) when our \\(i\\)th observation is 1 (or 0) respectively, then we can write: \\[ \\log(\\ell) = \\sum_{i}y_{i1}\\log(p(x_i)) + \\sum_{i}y_{i0}\\log(1-p(x_i')) \\] If we also let \\(f_1(x) = p(x)\\) and \\(f_0(x) = 1 - p(x)\\) then: \\[\\begin{align*} \\log(\\ell) &= \\sum_i y_{i1}\\log(f_1(x_i)) + \\sum_{i}y_{i0}\\log(f_0(x_i')) \\\\ &= \\sum_i \\sum_{m=0}^1 y_{im}\\log(f_m(x_i)) \\\\ \\end{align*}\\] When we take the negative of this, it is equivalent to 10.14 for two classes (\\(m = 0,1\\)). 10.1.4 Question 4 Consider a CNN that takes in \\(32 \\times 32\\) grayscale images and has a single convolution layer with three \\(5 \\times 5\\) convolution filters (without boundary padding). Draw a sketch of the input and first hidden layer similar to Figure 10.8. How many parameters are in this model? There are 5 convolution matrices each with 5x5 weights (plus 5 bias terms) to estimate, therefore 130 parameters Explain how this model can be thought of as an ordinary feed-forward neural network with the individual pixels as inputs, and with constraints on the weights in the hidden units. What are the constraints? We can think of a convolution layer as a regularized fully connected layer. The regularization in this case is due to not all inputs being connected to all outputs, and weights being shared between connections. Each output node in the convolved image can be thought of as taking inputs from a limited number of input pixels (the neighboring pixels), with a set of weights specified by the convolution layer which are then shared by the connections to all other output nodes. If there were no constraints, then how many weights would there be in the ordinary feed-forward neural network in (c)? With no constraints, we would connect each output pixel in our 5x32x32 convolution layer to each node in the 32x32 original image (plus 5 bias terms), giving a total of 5,242,885 weights to estimate. 10.1.5 Question 5 In Table 10.2 on page 433, we see that the ordering of the three methods with respect to mean absolute error is different from the ordering with respect to test set \\(R^2\\). How can this be? Mean absolute error considers absolute differences between predictions and observed values, whereas \\(R^2\\) considers the (normalized) sum of squared differences, thus larger errors contribute relatively ore to \\(R^2\\) than mean absolute error. 10.2 Applied 10.2.1 Question 6 Consider the simple function \\(R(\\beta) = sin(\\beta) + \\beta/10\\). Draw a graph of this function over the range \\(\\beta \\in [−6, 6]\\). r <- function(x) sin(x) + x/10 x <- seq(-6, 6, 0.1) plot(x, r(x), type = "l") What is the derivative of this function? \\[ cos(x) + 1/10 \\] Given \\(\\beta^0 = 2.3\\), run gradient descent to find a local minimum of \\(R(\\beta)\\) using a learning rate of \\(\\rho = 0.1\\). Show each of \\(\\beta^0, \\beta^1, ...\\) in your plot, as well as the final answer. The derivative of our function, i.e. \\(cos(x) + 1/10\\) gives us the gradient for a given \\(x\\). For gradient descent, we move \\(x\\) a little in the opposite direction, for some learning rate \\(\\rho = 0.1\\): \\[ x^{m+1} = x^m - \\rho (cos(x^m) + 1/10) \\] iter <- function(x, rho) x - rho*(cos(x) + 1/10) gd <- function(start, rho = 0.1) { b <- start v <- b while(abs(b - iter(b, 0.1)) > 1e-8) { b <- iter(b, 0.1) v <- c(v, b) } v } res <- gd(2.3) res[length(res)] ## [1] 4.612221 plot(x, r(x), type = "l") points(res, r(res), col = "red", pch = 19) Repeat with \\(\\beta^0 = 1.4\\). res <- gd(1.4) res[length(res)] ## [1] -1.670964 plot(x, r(x), type = "l") points(res, r(res), col = "red", pch = 19) 10.2.2 Question 7 Fit a neural network to the Default data. Use a single hidden layer with 10 units, and dropout regularization. Have a look at Labs 10.9.1–-10.9.2 for guidance. Compare the classification performance of your model with that of linear logistic regression. library(keras) dat <- ISLR2::Boston x <- scale(model.matrix(crim ~ . - 1, data = dat)) n <- nrow(dat) ntest <- trunc(n / 3) testid <- sample(1:n, ntest) y <- dat$crim # logistic regression lfit <- lm(crim ~ ., data = dat[-testid, ]) lpred <- predict(lfit, dat[testid, ]) with(dat[testid, ], mean(abs(lpred - crim))) ## [1] 2.99129 # keras nn <- keras_model_sequential() |> layer_dense(units = 10, activation = "relu", input_shape = ncol(x)) |> layer_dropout(rate = 0.4) |> layer_dense(units = 1) compile(nn, loss = "mse", optimizer = optimizer_rmsprop(), metrics = list("mean_absolute_error") ) history <- fit(nn, x[-testid, ], y[-testid], epochs = 100, batch_size = 26, validation_data = list(x[testid, ], y[testid]), verbose = 0 ) plot(history, smooth = FALSE) npred <- predict(nn, x[testid, ]) ## 6/6 - 0s - 55ms/epoch - 9ms/step mean(abs(y[testid] - npred)) ## [1] 2.269432 In this case, the neural network outperforms logistic regression having a lower absolute error rate on the test data. 10.2.3 Question 8 From your collection of personal photographs, pick 10 images of animals (such as dogs, cats, birds, farm animals, etc.). If the subject does not occupy a reasonable part of the image, then crop the image. Now use a pretrained image classification CNN as in Lab 10.9.4 to predict the class of each of your images, and report the probabilities for the top five predicted classes for each image. library(keras) images <- list.files("images/animals") x <- array(dim = c(length(images), 224, 224, 3)) for (i in seq_len(length(images))) { img <- image_load(paste0("images/animals/", images[i]), target_size = c(224, 224)) x[i,,,] <- image_to_array(img) } model <- application_resnet50(weights = "imagenet") ## Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5 ## 8192/102967424 [..............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 4202496/102967424 [>.............................] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 14876672/102967424 [===>..........................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 16785408/102967424 [===>..........................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 25174016/102967424 [======>.......................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 33562624/102967424 [========>.....................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 41951232/102967424 [===========>..................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 53223424/102967424 [==============>...............] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 58728448/102967424 [================>.............] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 68878336/102967424 [===================>..........] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 75505664/102967424 [====================>.........] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 86802432/102967424 [========================>.....] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 92282880/102967424 [=========================>....] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 102967424/102967424 [==============================] - 1s 0us/step pred <- model |> predict(x) |> imagenet_decode_predictions(top = 5) ## 1/1 - 1s - 1s/epoch - 1s/step ## Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json ## 8192/35363 [=====>........................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 35363/35363 [==============================] - 0s 0us/step names(pred) <- images print(pred) ## $bird.jpg ## class_name class_description score ## 1 n01819313 sulphur-crested_cockatoo 0.33546305 ## 2 n01580077 jay 0.18020906 ## 3 n02441942 weasel 0.08320859 ## 4 n02058221 albatross 0.07002056 ## 5 n01855672 goose 0.05195721 ## ## $bird2.jpg ## class_name class_description score ## 1 n02006656 spoonbill 0.840428233 ## 2 n02012849 crane 0.016258685 ## 3 n01819313 sulphur-crested_cockatoo 0.009740722 ## 4 n02007558 flamingo 0.007816141 ## 5 n01667778 terrapin 0.007497459 ## ## $bird3.jpg ## class_name class_description score ## 1 n01833805 hummingbird 0.9767877460 ## 2 n02033041 dowitcher 0.0111253690 ## 3 n02028035 redshank 0.0042764111 ## 4 n02009229 little_blue_heron 0.0012727526 ## 5 n02002724 black_stork 0.0008971311 ## ## $bug.jpg ## class_name class_description score ## 1 n02190166 fly 0.67558461 ## 2 n02167151 ground_beetle 0.10097048 ## 3 n02172182 dung_beetle 0.05490885 ## 4 n02169497 leaf_beetle 0.03541914 ## 5 n02168699 long-horned_beetle 0.03515299 ## ## $butterfly.jpg ## class_name class_description score ## 1 n02951585 can_opener 0.20600465 ## 2 n03476684 hair_slide 0.09360629 ## 3 n04074963 remote_control 0.06316858 ## 4 n02110185 Siberian_husky 0.05178998 ## 5 n02123597 Siamese_cat 0.03785341 ## ## $butterfly2.jpg ## class_name class_description score ## 1 n02276258 admiral 9.999689e-01 ## 2 n01580077 jay 1.388074e-05 ## 3 n02277742 ringlet 1.235042e-05 ## 4 n02279972 monarch 3.037859e-06 ## 5 n02281787 lycaenid 1.261888e-06 ## ## $elba.jpg ## class_name class_description score ## 1 n02085620 Chihuahua 0.29892012 ## 2 n02091032 Italian_greyhound 0.20332782 ## 3 n02109961 Eskimo_dog 0.08477225 ## 4 n02086910 papillon 0.05140305 ## 5 n02110185 Siberian_husky 0.05064517 ## ## $hamish.jpg ## class_name class_description score ## 1 n02097209 standard_schnauzer 0.6361451149 ## 2 n02097047 miniature_schnauzer 0.3450845778 ## 3 n02097130 giant_schnauzer 0.0164217781 ## 4 n02097298 Scotch_terrier 0.0019116047 ## 5 n02096177 cairn 0.0002054328 ## ## $poodle.jpg ## class_name class_description score ## 1 n02113799 standard_poodle 0.829670966 ## 2 n02088094 Afghan_hound 0.074567914 ## 3 n02113712 miniature_poodle 0.032005571 ## 4 n02102973 Irish_water_spaniel 0.018583152 ## 5 n02102318 cocker_spaniel 0.008629788 ## ## $tortoise.jpg ## class_name class_description score ## 1 n04033995 quilt 0.28395897 ## 2 n02110958 pug 0.15959552 ## 3 n03188531 diaper 0.14018111 ## 4 n02108915 French_bulldog 0.09364161 ## 5 n04235860 sleeping_bag 0.02608401 10.2.4 Question 9 Fit a lag-5 autoregressive model to the NYSE data, as described in the text and Lab 10.9.6. Refit the model with a 12-level factor representing the month. Does this factor improve the performance of the model? Fitting the model as described in the text. library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1 ## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1 ## ✔ purrr 1.0.2 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ dplyr::compute() masks neuralnet::compute() ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors library(ISLR2) xdata <- data.matrix(NYSE[, c("DJ_return", "log_volume","log_volatility")]) istrain <- NYSE[, "train"] xdata <- scale(xdata) lagm <- function(x, k = 1) { n <- nrow(x) pad <- matrix(NA, k, ncol(x)) rbind(pad, x[1:(n - k), ]) } arframe <- data.frame( log_volume = xdata[, "log_volume"], L1 = lagm(xdata, 1), L2 = lagm(xdata, 2), L3 = lagm(xdata, 3), L4 = lagm(xdata, 4), L5 = lagm(xdata, 5) ) arframe <- arframe[-(1:5), ] istrain <- istrain[-(1:5)] arfit <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred <- predict(arfit, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.413223 Now we add month (and work with tidyverse). arframe$month = as.factor(str_match(NYSE$date, "-(\\\\d+)-")[,2])[-(1:5)] arfit2 <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred2 <- predict(arfit2, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred2 - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4170418 Adding month as a factor marginally improves the \\(R^2\\) of our model (from 0.413223 to 0.4170418). This is a significant improvement in fit and model 2 has a lower AIC. anova(arfit, arfit2) ## Analysis of Variance Table ## ## Model 1: log_volume ~ L1.DJ_return + L1.log_volume + L1.log_volatility + ## L2.DJ_return + L2.log_volume + L2.log_volatility + L3.DJ_return + ## L3.log_volume + L3.log_volatility + L4.DJ_return + L4.log_volume + ## L4.log_volatility + L5.DJ_return + L5.log_volume + L5.log_volatility ## Model 2: log_volume ~ L1.DJ_return + L1.log_volume + L1.log_volatility + ## L2.DJ_return + L2.log_volume + L2.log_volatility + L3.DJ_return + ## L3.log_volume + L3.log_volatility + L4.DJ_return + L4.log_volume + ## L4.log_volatility + L5.DJ_return + L5.log_volume + L5.log_volatility + ## month ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 4260 1791.0 ## 2 4249 1775.8 11 15.278 3.3234 0.000143 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 AIC(arfit, arfit2) ## df AIC ## arfit 17 8447.663 ## arfit2 28 8433.031 10.2.5 Question 10 In Section 10.9.6, we showed how to fit a linear AR model to the NYSE data using the lm() function. However, we also mentioned that we can “flatten” the short sequences produced for the RNN model in order to fit a linear AR model. Use this latter approach to fit a linear AR model to the NYSE data. Compare the test \\(R^2\\) of this linear AR model to that of the linear AR model that we fit in the lab. What are the advantages/disadvantages of each approach? The lm model is the same as that fit above: arfit <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred <- predict(arfit, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4170418 Now we reshape the data for the RNN n <- nrow(arframe) xrnn <- data.matrix(arframe[, -1]) xrnn <- array(xrnn, c(n, 3, 5)) xrnn <- xrnn[, , 5:1] xrnn <- aperm(xrnn, c(1, 3, 2)) We can add a “flatten” layer to turn the reshaped data into a long vector of predictors resulting in a linear AR model. model <- keras_model_sequential() |> layer_flatten(input_shape = c(5, 3)) |> layer_dense(units = 1) Now let’s fit this model. model |> compile(optimizer = optimizer_rmsprop(), loss = "mse") history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) plot(history, smooth = FALSE) kpred <- predict(model, xrnn[!istrain,, ]) ## 56/56 - 0s - 59ms/epoch - 1ms/step 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4129694 Both models estimate the same number of coefficients/weights (16): coef(arfit) ## (Intercept) L1.DJ_return L1.log_volume L1.log_volatility ## 0.067916689 0.094410214 0.498673056 0.586274266 ## L2.DJ_return L2.log_volume L2.log_volatility L3.DJ_return ## -0.027299158 0.036903027 -0.931509135 0.037995916 ## L3.log_volume L3.log_volatility L4.DJ_return L4.log_volume ## 0.070312741 0.216160520 -0.004954842 0.117079461 ## L4.log_volatility L5.DJ_return L5.log_volume L5.log_volatility ## -0.039752786 -0.029620296 0.096034795 0.144510264 ## month02 month03 month04 month05 ## -0.100003367 -0.143781381 -0.028242819 -0.131120579 ## month06 month07 month08 month09 ## -0.125993911 -0.141608808 -0.163030102 -0.018889698 ## month10 month11 month12 ## -0.017206826 -0.037298183 0.008361380 model$get_weights() ## [[1]] ## [,1] ## [1,] -0.032474127 ## [2,] 0.097779043 ## [3,] 0.178456694 ## [4,] -0.005626136 ## [5,] 0.121273242 ## [6,] -0.076247886 ## [7,] 0.035232600 ## [8,] 0.077857092 ## [9,] 0.163645267 ## [10,] -0.026966114 ## [11,] 0.032263778 ## [12,] -0.807968795 ## [13,] 0.095888853 ## [14,] 0.513532162 ## [15,] 0.496699780 ## ## [[2]] ## [1] -0.004996791 The flattened RNN has a lower \\(R^2\\) on the test data than our lm model above. The lm model is quicker to fit and conceptually simpler also giving us the ability to inspect the coefficients for different variables. The flattened RNN is regularized to some extent as data are processed in batches. 10.2.6 Question 11 Repeat the previous exercise, but now fit a nonlinear AR model by “flattening” the short sequences produced for the RNN model. From the book: To fit a nonlinear AR model, we could add in a hidden layer. xfun::cache_rds({ model <- keras_model_sequential() |> layer_flatten(input_shape = c(5, 3)) |> layer_dense(units = 32, activation = "relu") |> layer_dropout(rate = 0.4) |> layer_dense(units = 1) model |> compile( loss = "mse", optimizer = optimizer_rmsprop(), metrics = "mse" ) history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) plot(history, smooth = FALSE, metrics = "mse") kpred <- predict(model, xrnn[!istrain,, ]) 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) ## 56/56 - 0s - 64ms/epoch - 1ms/step ## [1] 0.4262716 This approach improves our \\(R^2\\) over the linear model above. 10.2.7 Question 12 Consider the RNN fit to the NYSE data in Section 10.9.6. Modify the code to allow inclusion of the variable day_of_week, and fit the RNN. Compute the test \\(R^2\\). To accomplish this, I’ll include day of the week as one of the lagged variables in the RNN. Thus, our input for each observation will be 4 x 5 (rather than 3 x 5). xfun::cache_rds({ xdata <- data.matrix( NYSE[, c("day_of_week", "DJ_return", "log_volume","log_volatility")] ) istrain <- NYSE[, "train"] xdata <- scale(xdata) arframe <- data.frame( log_volume = xdata[, "log_volume"], L1 = lagm(xdata, 1), L2 = lagm(xdata, 2), L3 = lagm(xdata, 3), L4 = lagm(xdata, 4), L5 = lagm(xdata, 5) ) arframe <- arframe[-(1:5), ] istrain <- istrain[-(1:5)] n <- nrow(arframe) xrnn <- data.matrix(arframe[, -1]) xrnn <- array(xrnn, c(n, 4, 5)) xrnn <- xrnn[,, 5:1] xrnn <- aperm(xrnn, c(1, 3, 2)) dim(xrnn) model <- keras_model_sequential() |> layer_simple_rnn(units = 12, input_shape = list(5, 4), dropout = 0.1, recurrent_dropout = 0.1 ) |> layer_dense(units = 1) model |> compile(optimizer = optimizer_rmsprop(), loss = "mse") history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) kpred <- predict(model, xrnn[!istrain,, ]) 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) ## 56/56 - 0s - 134ms/epoch - 2ms/step ## [1] 0.4429825 10.2.8 Question 13 Repeat the analysis of Lab 10.9.5 on the IMDb data using a similarly structured neural network. There we used a dictionary of size 10,000. Consider the effects of varying the dictionary size. Try the values 1000, 3000, 5000, and 10,000, and compare the results. xfun::cache_rds({ library(knitr) accuracy <- c() for(max_features in c(1000, 3000, 5000, 10000)) { imdb <- dataset_imdb(num_words = max_features) c(c(x_train, y_train), c(x_test, y_test)) %<-% imdb maxlen <- 500 x_train <- pad_sequences(x_train, maxlen = maxlen) x_test <- pad_sequences(x_test, maxlen = maxlen) model <- keras_model_sequential() |> layer_embedding(input_dim = max_features, output_dim = 32) |> layer_lstm(units = 32) |> layer_dense(units = 1, activation = "sigmoid") model |> compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = "acc" ) history <- fit(model, x_train, y_train, epochs = 10, batch_size = 128, validation_data = list(x_test, y_test), verbose = 0 ) predy <- predict(model, x_test) > 0.5 accuracy <- c(accuracy, mean(abs(y_test == as.numeric(predy)))) } tibble( "Max Features" = c(1000, 3000, 5000, 10000), "Accuracy" = accuracy ) |> kable() }) ## Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz ## 8192/17464789 [..............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 2924544/17464789 [====>.........................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 4202496/17464789 [======>.......................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 8396800/17464789 [=============>................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 17464789/17464789 [==============================] - 0s 0us/step ## 782/782 - 15s - 15s/epoch - 19ms/step ## 782/782 - 15s - 15s/epoch - 20ms/step ## 782/782 - 15s - 15s/epoch - 20ms/step ## 782/782 - 15s - 15s/epoch - 20ms/step Max Features Accuracy 1000 0.85324 3000 0.87808 5000 0.88076 10000 0.86936 Varying the dictionary size does not make a substantial impact on our estimates of accuracy. However, the models do take a substantial amount of time to fit and it is not clear we are finding the best fitting models in each case. For example, the model using a dictionary size of 10,000 obtained an accuracy of 0.8721 in the text which is as different from the estimate obtained here as are the differences between the models with different dictionary sizes. "],["survival-analysis-and-censored-data.html", "11 Survival Analysis and Censored Data 11.1 Conceptual 11.2 Applied", " 11 Survival Analysis and Censored Data 11.1 Conceptual 11.1.1 Question 1 For each example, state whether or not the censoring mechanism is independent. Justify your answer. In a study of disease relapse, due to a careless research scientist, all patients whose phone numbers begin with the number “2” are lost to follow up. Independent. There’s no reason to think disease relapse should be related to the first digit of a phone number. In a study of longevity, a formatting error causes all patient ages that exceed 99 years to be lost (i.e. we know that those patients are more than 99 years old, but we do not know their exact ages). Not independent. Older patients are more likely to see an event that younger. Hospital A conducts a study of longevity. However, very sick patients tend to be transferred to Hospital B, and are lost to follow up. Not independent. Sick patients are more likely to see an event that healthy. In a study of unemployment duration, the people who find work earlier are less motivated to stay in touch with study investigators, and therefore are more likely to be lost to follow up. Not independent. More employable individuals are more likely to see an event. In a study of pregnancy duration, women who deliver their babies pre-term are more likely to do so away from their usual hospital, and thus are more likely to be censored, relative to women who deliver full-term babies. Not independent. Delivery away from hospital will be associated with pregnancy duration. A researcher wishes to model the number of years of education of the residents of a small town. Residents who enroll in college out of town are more likely to be lost to follow up, and are also more likely to attend graduate school, relative to those who attend college in town. Not independent. Years of education will be associated with enrolling in out of town colleges. Researchers conduct a study of disease-free survival (i.e. time until disease relapse following treatment). Patients who have not relapsed within five years are considered to be cured, and thus their survival time is censored at five years. In other words we assume all events happen within five years, so censoring after this time is equivalent to not censoring at all so the censoring is independent. We wish to model the failure time for some electrical component. This component can be manufactured in Iowa or in Pittsburgh, with no difference in quality. The Iowa factory opened five years ago, and so components manufactured in Iowa are censored at five years. The Pittsburgh factory opened two years ago, so those components are censored at two years. If there is no difference in quality then location and therefore censoring is independent of failure time. We wish to model the failure time of an electrical component made in two different factories, one of which opened before the other. We have reason to believe that the components manufactured in the factory that opened earlier are of higher quality. In this case, the difference in opening times of the two locations will mean that any difference in quality between locations will be associated with censoring, so censoring is not independent. 11.1.2 Question 2 We conduct a study with \\(n = 4\\) participants who have just purchased cell phones, in order to model the time until phone replacement. The first participant replaces her phone after 1.2 years. The second participant still has not replaced her phone at the end of the two-year study period. The third participant changes her phone number and is lost to follow up (but has not yet replaced her phone) 1.5 years into the study. The fourth participant replaces her phone after 0.2 years. For each of the four participants (\\(i = 1,..., 4\\)), answer the following questions using the notation introduced in Section 11.1: Is the participant’s cell phone replacement time censored? No, Yes, Yes and No. Censoring occurs when we do not know if or when the phone was replaced. Is the value of \\(c_i\\) known, and if so, then what is it? \\(c_i\\) is censoring time. For the four participants these are: NA. 2. 1.5 and NA. Is the value of \\(t_i\\) known, and if so, then what is it? \\(t_i\\) is time to event. For the four participants these are: 1.2, NA, NA and 0.2. Is the value of \\(y_i\\) known, and if so, then what is it? \\(y_i\\) is the observed time. For the four participants these are: 1.2, 2, 1.5 and 0.2. Is the value of \\(\\delta_i\\) known, and if so, then what is it? \\(\\delta_i\\) is an indicator for censoring. The nomenclature introduced here defines this to be 1 if we observe the true “survival” time and 0 if we observe the censored time. Therefore, for these participants, the values are: 1, 0, 0 and 1. 11.1.3 Question 3 For the example in Exercise 2, report the values of \\(K\\), \\(d_1,...,d_K\\), \\(r_1,...,r_K\\), and \\(q_1,...,q_K\\), where this notation was defined in Section 11.3. \\(K\\) is the number of unique deaths, which is 2. \\(d_k\\) represents the unique death times, which are: 0.2, 1.2. \\(r_k\\) denotes the number of patients alive and in the study just before \\(d_k\\). Note the first event is for patient 4, then patient 1, then patient 3 is censored and finally the study ends with patient 2 still involved. Therefore \\(r_k\\) takes values are: 4, 3. \\(q_k\\) denotes the number of patients who died at time \\(d_k\\), therefore this takes values: 1, 1. We can check by using the survival package. library(survival) x <- Surv(c(1.2, 2, 1.5, 0.2), event = c(1, 0, 0, 1)) summary(survfit(x ~ 1)) ## Call: survfit(formula = x ~ 1) ## ## time n.risk n.event survival std.err lower 95% CI upper 95% CI ## 0.2 4 1 0.75 0.217 0.426 1 ## 1.2 3 1 0.50 0.250 0.188 1 11.1.4 Question 4 This problem makes use of the Kaplan-Meier survival curve displayed in Figure 11.9. The raw data that went into plotting this survival curve is given in Table 11.4. The covariate column of that table is not needed for this problem. What is the estimated probability of survival past 50 days? There are 2 events that happen before 50 days. The number at risk \\(r_k\\) are 5 and 4 (one was censored early on), thus survival probability is \\(4/5 * 3/4 = 0.6\\). Equivalently, we can use the survival package. library(tidyverse) table_data <- tribble( ~Y, ~D, ~X, 26.5, 1, 0.1, 37.2, 1, 11, 57.3, 1, -0.3, 90.8, 0, 2.8, 20.2, 0, 1.8, 89.8, 0, 0.4 ) x <- Surv(table_data$Y, table_data$D) summary(survfit(x ~ 1)) ## Call: survfit(formula = x ~ 1) ## ## time n.risk n.event survival std.err lower 95% CI upper 95% CI ## 26.5 5 1 0.8 0.179 0.516 1 ## 37.2 4 1 0.6 0.219 0.293 1 ## 57.3 3 1 0.4 0.219 0.137 1 Write out an analytical expression for the estimated survival function. For instance, your answer might be something along the lines of \\[ \\hat{S}(t) = \\begin{cases} 0.8 & \\text{if } t < 31\\\\ 0.5 & \\text{if } 31 \\le t < 77\\\\ 0.22 & \\text{if } 77 \\le t \\end{cases} \\] (The previous equation is for illustration only: it is not the correct answer!) \\[ \\hat{S}(t) = \\begin{cases} 1 & \\text{if } t < 26.5 \\\\ 0.8 & \\text{if } 26.5 \\le t < 37.2 \\\\ 0.6 & \\text{if } 37.2 \\le t < 57.3 \\\\ 0.4 & \\text{if } 57.3 \\le t \\end{cases} \\] 11.1.5 Question 5 Sketch the survival function given by the equation \\[ \\hat{S}(t) = \\begin{cases} 0.8, & \\text{if } t < 31\\\\ 0.5, & \\text{if } 31 \\le t < 77\\\\ 0.22 & \\text{if } 77 \\le t \\end{cases} \\] Your answer should look something like Figure 11.9. We can draw this plot, or even engineer data that will generate the required plot… plot(NULL, xlim = c(0, 100), ylim = c(0, 1), ylab = "Estimated Probability of Survival", xlab = "Time in Days" ) lines( c(0, 31, 31, 77, 77, 100), c(0.8, 0.8, 0.5, 0.5, 0.22, 0.22) ) 11.1.6 Question 6 This problem makes use of the data displayed in Figure 11.1. In completing this problem, you can refer to the observation times as \\(y_1,...,y_4\\). The ordering of these observation times can be seen from Figure 11.1; their exact values are not required. Report the values of \\(\\delta_1,...,\\delta_4\\), \\(K\\), \\(d_1,...,d_K\\), \\(r_1,...,r_K\\), and \\(q_1,...,q_K\\). The relevant notation is defined in Sections 11.1 and 11.3. \\(\\delta\\) values are: 1, 0, 1, 0. \\(K\\) is 2 \\(d\\) values are \\(y_3\\) and \\(y_1\\). \\(r\\) values are 4 and 2. \\(q\\) values are 1 and 1. Sketch the Kaplan-Meier survival curve corresponding to this data set. (You do not need to use any software to do this—you can sketch it by hand using the results obtained in (a).) plot(NULL, xlim = c(0, 350), ylim = c(0, 1), ylab = "Estimated Probability of Survival", xlab = "Time in Days" ) lines( c(0, 150, 150, 300, 300, 350), c(1, 1, 0.75, 0.75, 0.375, 0.375) ) x <- Surv(c(300, 350, 150, 250), c(1, 0, 1, 0)) Based on the survival curve estimated in (b), what is the probability that the event occurs within 200 days? What is the probability that the event does not occur within 310 days? 0.25 and 0.375. Write out an expression for the estimated survival curve from (b). \\[ \\hat{S}(t) = \\begin{cases} 1 & \\text{if } t < y_3 \\\\ 0.75 & \\text{if } y_3 \\le t < y_1 \\\\ 0.375 & \\text{if } y_1 \\le t \\end{cases} \\] 11.1.7 Question 7 In this problem, we will derive (11.5) and (11.6), which are needed for the construction of the log-rank test statistic (11.8). Recall the notation in Table 11.1. Assume that there is no difference between the survival functions of the two groups. Then we can think of \\(q_{1k}\\) as the number of failures if we draw $r_{1k} observations, without replacement, from a risk set of \\(r_k\\) observations that contains a total of \\(q_k\\) failures. Argue that \\(q_{1k}\\) follows a hypergeometric distribution. Write the parameters of this distribution in terms of \\(r_{1k}\\), \\(r_k\\), and \\(q_k\\). A hypergeometric distributions models sampling without replacement from a finite pool where each sample is a success or failure. This fits the situation here, where with have a finite number of samples in the risk set. The hypergeometric distribution is parameterized as \\(k\\) successes in \\(n\\) draws, without replacement, from a population of size \\(N\\) with \\(K\\) objects with that feature. Mapping to our situation, \\(q_{1k}\\) is \\(k\\), \\(r_{1k}\\) is \\(n\\), \\(r_k\\) is \\(N\\) and \\(q_k\\) is \\(K\\). Given your previous answer, and the properties of the hypergeometric distribution, what are the mean and variance of \\(q_{1k}\\)? Compare your answer to (11.5) and (11.6). With the above parameterization, the mean (\\(n K/N\\)) is \\(r_{1k} q_k/r_K\\). The variance \\(n K/N (N-K)/N (N-n)/(N-1)\\) is \\[ r_{1k} \\frac{q_k}{r_k} \\frac{r_k-q_k}{r_k} \\frac{r_k - r_{1k}}{r_k - 1} \\] These are equivalent to 11.5 and 11.6. 11.1.8 Question 8 Recall that the survival function \\(S(t)\\), the hazard function \\(h(t)\\), and the density function \\(f(t)\\) are defined in (11.2), (11.9), and (11.11), respectively. Furthermore, define \\(F(t) = 1 − S(t)\\). Show that the following relationships hold: \\[ f(t) = dF(t)/dt \\\\ S(t) = \\exp\\left(-\\int_0^t h(u)du\\right) \\] If \\(F(t) = 1 - S(t)\\), then \\(F(t)\\) is the cumulative density function (cdf) for \\(t\\). For a continuous distribution, a cdf, e.g. \\(F(t)\\) can be expressed as an integral (up to some value \\(x\\)) of the probability density function (pdf), i.e. \\(F(t) = \\int_{-\\infty}^x f(x) dt\\). Equivalently, the derivative of the cdf is its pdf: \\(f(t) = \\frac{d F(t)}{dt}\\). Then, \\(h(t) = \\frac{f(t)}{S(t)} = \\frac{dF(t)/dt}{S(t)} = \\frac{-dS(t)/dt}{S(t)}\\). From basic calculus, this can be rewritten as \\(h(t) = -\\frac{d}{dt}\\log{S(t)}\\). Integrating and then exponentiating we get the second identity. 11.1.9 Question 9 In this exercise, we will explore the consequences of assuming that the survival times follow an exponential distribution. Suppose that a survival time follows an \\(Exp(\\lambda)\\) distribution, so that its density function is \\(f(t) = \\lambda\\exp(−\\lambda t)\\). Using the relationships provided in Exercise 8, show that \\(S(t) = \\exp(-\\lambda t)\\). The cdf of an exponential distribution is \\(1 - \\exp(-\\lambda x)\\) and \\(S(t)\\) is \\(1 - F(t)\\) where \\(F(t)\\) is the cdf. Hence, \\(S(t) = \\exp(-\\lambda t)\\). Now suppose that each of \\(n\\) independent survival times follows an \\(\\exp(\\lambda)\\) distribution. Write out an expression for the likelihood function (11.13). The reference to (11.13) gives us the following formula: \\[ L = \\prod_{i=1}^{n} h(y_i)^{\\delta_i} S(y_i) \\] (11.10) also gives us \\[ h(t) = \\frac{f(t)}{S(t)} \\] Plugging in the expressions from part (a), we get \\[\\begin{align*} h(t) &= \\frac{\\lambda \\exp(- \\lambda t)}{\\exp(- \\lambda t)} \\\\ &= \\lambda \\end{align*}\\] Using (11.13), we get the following loss expression: \\[ \\ell = \\prod_i \\lambda^{\\delta_i} e^{- \\lambda y_i} \\] Show that the maximum likelihood estimator for \\(\\lambda\\) is \\[ \\hat\\lambda = \\sum_{i=1}^n \\delta_i / \\sum_{i=1}^n y_i. \\] Take the log likelihood. \\[\\begin{align*} \\log \\ell &= \\sum_i \\log \\left( \\lambda^{\\delta_i} e^{- \\lambda y_i} \\right) \\\\ &= \\sum_i{\\delta_i\\log\\lambda - \\lambda y_i \\log e} \\\\ &= \\sum_i{\\delta_i\\log\\lambda - \\lambda y_i} \\\\ &= \\log\\lambda\\sum_i{\\delta_i} - \\lambda\\sum_i{y_i} \\end{align*}\\] Differentiating this expression with respect to \\(\\lambda\\) we get: \\[ \\frac{d \\log \\ell}{d \\lambda} = \\frac{\\sum_i{\\delta_i}}{\\lambda} - \\sum_i{y_i} \\] This function maximises when its gradient is 0. Solving for this gives a MLE of \\(\\hat\\lambda = \\sum_{i=1}^n \\delta_i / \\sum_{i=1}^n y_i\\). Use your answer to (c) to derive an estimator of the mean survival time. Hint: For (d), recall that the mean of an \\(Exp(\\lambda)\\) random variable is \\(1/\\lambda\\). Estimated mean survival would be \\(1/\\lambda\\) which given the above would be \\(\\sum_{i=1}^n y_i / \\sum_{i=1}^n \\delta_i\\), which can be thought of as the total observation time over the total number of deaths. 11.2 Applied 11.2.1 Question 10 This exercise focuses on the brain tumor data, which is included in the ISLR2 R library. Plot the Kaplan-Meier survival curve with ±1 standard error bands, using the survfit() function in the survival package. library(ISLR2) x <- Surv(BrainCancer$time, BrainCancer$status) plot(survfit(x ~ 1), xlab = "Months", ylab = "Estimated Probability of Survival", col = "steelblue", conf.int = 0.67 ) Draw a bootstrap sample of size \\(n = 88\\) from the pairs (\\(y_i\\), \\(\\delta_i\\)), and compute the resulting Kaplan-Meier survival curve. Repeat this process \\(B = 200\\) times. Use the results to obtain an estimate of the standard error of the Kaplan-Meier survival curve at each timepoint. Compare this to the standard errors obtained in (a). plot(survfit(x ~ 1), xlab = "Months", ylab = "Estimated Probability of Survival", col = "steelblue", conf.int = 0.67 ) fit <- survfit(x ~ 1) dat <- tibble(time = c(0, fit$time)) for (i in 1:200) { y <- survfit(sample(x, 88, replace = TRUE) ~ 1) y <- tibble(time = c(0, y$time), "s{i}" := c(1, y$surv)) dat <- left_join(dat, y, by = "time") } res <- fill(dat, starts_with("s")) |> rowwise() |> transmute(sd = sd(c_across(starts_with("s")))) se <- res$sd[2:nrow(res)] lines(fit$time, fit$surv - se, lty = 2, col = "red") lines(fit$time, fit$surv + se, lty = 2, col = "red") Fit a Cox proportional hazards model that uses all of the predictors to predict survival. Summarize the main findings. fit <- coxph(Surv(time, status) ~ sex + diagnosis + loc + ki + gtv + stereo, data = BrainCancer) fit ## Call: ## coxph(formula = Surv(time, status) ~ sex + diagnosis + loc + ## ki + gtv + stereo, data = BrainCancer) ## ## coef exp(coef) se(coef) z p ## sexMale 0.18375 1.20171 0.36036 0.510 0.61012 ## diagnosisLG glioma 0.91502 2.49683 0.63816 1.434 0.15161 ## diagnosisHG glioma 2.15457 8.62414 0.45052 4.782 1.73e-06 ## diagnosisOther 0.88570 2.42467 0.65787 1.346 0.17821 ## locSupratentorial 0.44119 1.55456 0.70367 0.627 0.53066 ## ki -0.05496 0.94653 0.01831 -3.001 0.00269 ## gtv 0.03429 1.03489 0.02233 1.536 0.12466 ## stereoSRT 0.17778 1.19456 0.60158 0.296 0.76760 ## ## Likelihood ratio test=41.37 on 8 df, p=1.776e-06 ## n= 87, number of events= 35 ## (1 observation deleted due to missingness) diagnosisHG and ki are highly significant. Stratify the data by the value of ki. (Since only one observation has ki=40, you can group that observation together with the observations that have ki=60.) Plot Kaplan-Meier survival curves for each of the five strata, adjusted for the other predictors. To adjust for other predictors, we fit a model that includes those predictors and use this model to predict new, artificial, data where we allow ki to take each possible value, but set the other predictors to be the mode or mean of the other predictors. library(ggfortify) modaldata <- data.frame( sex = rep("Female", 5), diagnosis = rep("Meningioma", 5), loc = rep("Supratentorial", 5), ki = c(60, 70, 80, 90, 100), gtv = rep(mean(BrainCancer$gtv), 5), stereo = rep("SRT", 5) ) survplots <- survfit(fit, newdata = modaldata) plot(survplots, xlab = "Months", ylab = "Survival Probability", col = 2:6) legend("bottomleft", c("60", "70", "80", "90", "100"), col = 2:6, lty = 1) 11.2.2 Question 11 This example makes use of the data in Table 11.4. Create two groups of observations. In Group 1, \\(X < 2\\), whereas in Group 2, \\(X \\ge 2\\). Plot the Kaplan-Meier survival curves corresponding to the two groups. Be sure to label the curves so that it is clear which curve corresponds to which group. By eye, does there appear to be a difference between the two groups’ survival curves? x <- split(Surv(table_data$Y, table_data$D), table_data$X < 2) plot(NULL, xlim = c(0, 100), ylim = c(0, 1), ylab = "Survival Probability") lines(survfit(x[[1]] ~ 1), conf.int = FALSE, col = 2) lines(survfit(x[[2]] ~ 1), conf.int = FALSE, col = 3) legend("bottomleft", c(">= 2", "<2"), col = 2:3, lty = 1) There does not appear to be any difference between the curves. Fit Cox’s proportional hazards model, using the group indicator as a covariate. What is the estimated coefficient? Write a sentence providing the interpretation of this coefficient, in terms of the hazard or the instantaneous probability of the event. Is there evidence that the true coefficient value is non-zero? fit <- coxph(Surv(Y, D) ~ X < 2, data = table_data) fit ## Call: ## coxph(formula = Surv(Y, D) ~ X < 2, data = table_data) ## ## coef exp(coef) se(coef) z p ## X < 2TRUE 0.3401 1.4051 1.2359 0.275 0.783 ## ## Likelihood ratio test=0.08 on 1 df, p=0.7797 ## n= 6, number of events= 3 The coefficient is \\(0.3401\\). This implies a slightly increased hazard when \\(X < 2\\) but it is not significantly different to zero (P = 0.8). Recall from Section 11.5.2 that in the case of a single binary covariate, the log-rank test statistic should be identical to the score statistic for the Cox model. Conduct a log-rank test to determine whether there is a difference between the survival curves for the two groups. How does the p-value for the log-rank test statistic compare to the \\(p\\)-value for the score statistic for the Cox model from (b)? summary(fit)$sctest ## test df pvalue ## 0.07644306 1.00000000 0.78217683 survdiff(Surv(Y, D) ~ X < 2, data = table_data)$chisq ## [1] 0.07644306 They are identical. "],["unsupervised-learning.html", "12 Unsupervised Learning 12.1 Conceptual 12.2 Applied", " 12 Unsupervised Learning 12.1 Conceptual 12.1.1 Question 1 This problem involves the \\(K\\)-means clustering algorithm. Prove (12.18). 12.18 is: \\[ \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k} \\sum_{j=1}^p (x_{ij} - x_{i'j})^2 = 2 \\sum_{i \\in C_k} \\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\] where \\[\\bar{x}_{kj} = \\frac{1}{|C_k|}\\sum_{i \\in C_k} x_{ij}\\] On the left hand side we compute the difference between each observation (indexed by \\(i\\) and \\(i'\\)). In the second we compute the difference between each observation and the mean. Intuitively this identity is clear (the factor of 2 is present because we calculate the difference between each pair twice). However, to prove. Note first that, \\[\\begin{align} (x_{ij} - x_{i'j})^2 = & ((x_{ij} - \\bar{x}_{kj}) - (x_{i'j} - \\bar{x}_{kj}))^2 \\\\ = & (x_{ij} - \\bar{x}_{kj})^2 - 2(x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + (x_{i'j} - \\bar{x}_{kj})^2 \\end{align}\\] Note that the first term is independent of \\(i'\\) and the last is independent of \\(i\\). Therefore, 10.12 can be written as: \\[\\begin{align} \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k} \\sum_{j=1}^p (x_{ij} - x_{i'j})^2 = & \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 - \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p 2(x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{i'j} - \\bar{x}_{kj})^2 \\\\ = & \\frac{|C_k|}{|C_k|}\\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 - \\frac{2}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + \\frac{|C_k|}{|C_k|}\\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\\\ = & 2 \\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\end{align}\\] Note that we can drop the term containing \\((x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj})\\) since this is 0 when summed over combinations of \\(i\\) and \\(i'\\) for a given \\(j\\). On the basis of this identity, argue that the \\(K\\)-means clustering algorithm (Algorithm 12.2) decreases the objective (12.17) at each iteration. Equation 10.12 demonstrates that the euclidean distance between each possible pair of samples can be related to the difference from each sample to the mean of the cluster. The K-means algorithm works by minimizing the euclidean distance to each centroid, thus also minimizes the within-cluster variance. 12.1.2 Question 2 Suppose that we have four observations, for which we compute a dissimilarity matrix, given by \\[\\begin{bmatrix} & 0.3 & 0.4 & 0.7 \\\\ 0.3 & & 0.5 & 0.8 \\\\ 0.4 & 0.5 & & 0.45 \\\\ 0.7 & 0.8 & 0.45 & \\\\ \\end{bmatrix}\\] For instance, the dissimilarity between the first and second observations is 0.3, and the dissimilarity between the second and fourth observations is 0.8. On the basis of this dissimilarity matrix, sketch the dendrogram that results from hierarchically clustering these four observations using complete linkage. Be sure to indicate on the plot the height at which each fusion occurs, as well as the observations corresponding to each leaf in the dendrogram. m <- matrix(c(0, 0.3, 0.4, 0.7, 0.3, 0, 0.5, 0.8, 0.4, 0.5, 0., 0.45, 0.7, 0.8, 0.45, 0), ncol = 4) c1 <- hclust(as.dist(m), method = "complete") plot(c1) Repeat (a), this time using single linkage clustering. c2 <- hclust(as.dist(m), method = "single") plot(c2) Suppose that we cut the dendrogram obtained in (a) such that two clusters result. Which observations are in each cluster? table(1:4, cutree(c1, 2)) ## ## 1 2 ## 1 1 0 ## 2 1 0 ## 3 0 1 ## 4 0 1 Suppose that we cut the dendrogram obtained in (b) such that two clusters result. Which observations are in each cluster? table(1:4, cutree(c2, 2)) ## ## 1 2 ## 1 1 0 ## 2 1 0 ## 3 1 0 ## 4 0 1 It is mentioned in the chapter that at each fusion in the dendrogram, the position of the two clusters being fused can be swapped without changing the meaning of the dendrogram. Draw a dendrogram that is equivalent to the dendrogram in (a), for which two or more of the leaves are repositioned, but for which the meaning of the dendrogram is the same. plot(c1, labels = c(2, 1, 3, 4)) 12.1.3 Question 3 In this problem, you will perform \\(K\\)-means clustering manually, with \\(K = 2\\), on a small example with \\(n = 6\\) observations and \\(p = 2\\) features. The observations are as follows. Obs. \\(X_1\\) \\(X_2\\) 1 1 4 2 1 3 3 0 4 4 5 1 5 6 2 6 4 0 Plot the observations. library(ggplot2) d <- data.frame( x1 = c(1, 1, 0, 5, 6, 4), x2 = c(4, 3, 4, 1, 2, 0) ) ggplot(d, aes(x = x1, y = x2)) + geom_point() Randomly assign a cluster label to each observation. You can use the sample() command in R to do this. Report the cluster labels for each observation. set.seed(42) d$cluster <- sample(c(1, 2), size = nrow(d), replace = TRUE) Compute the centroid for each cluster. centroids <- sapply(c(1,2), function(i) colMeans(d[d$cluster == i, 1:2])) Assign each observation to the centroid to which it is closest, in terms of Euclidean distance. Report the cluster labels for each observation. dist <- sapply(1:2, function(i) { sqrt((d$x1 - centroids[1, i])^2 + (d$x2 - centroids[2, i])^2) }) d$cluster <- apply(dist, 1, which.min) Repeat (c) and (d) until the answers obtained stop changing. centroids <- sapply(c(1,2), function(i) colMeans(d[d$cluster == i, 1:2])) dist <- sapply(1:2, function(i) { sqrt((d$x1 - centroids[1, i])^2 + (d$x2 - centroids[2, i])^2) }) d$cluster <- apply(dist, 1, which.min) In this case, we get stable labels after the first iteration. In your plot from (a), color the observations according to the cluster labels obtained. ggplot(d, aes(x = x1, y = x2, color = factor(cluster))) + geom_point() 12.1.4 Question 4 Suppose that for a particular data set, we perform hierarchical clustering using single linkage and using complete linkage. We obtain two dendrograms. At a certain point on the single linkage dendrogram, the clusters {1, 2, 3} and {4, 5} fuse. On the complete linkage dendrogram, the clusters {1, 2, 3} and {4, 5} also fuse at a certain point. Which fusion will occur higher on the tree, or will they fuse at the same height, or is there not enough information to tell? The complete linkage fusion will likely be higher in the tree since single linkage is defined as being the minimum distance between two clusters. However, there is a chance that they could be at the same height (so technically there is not enough information to tell). At a certain point on the single linkage dendrogram, the clusters {5} and {6} fuse. On the complete linkage dendrogram, the clusters {5} and {6} also fuse at a certain point. Which fusion will occur higher on the tree, or will they fuse at the same height, or is there not enough information to tell? They will fuse at the same height (the algorithm for calculating distance is the same when the clusters are of size 1). 12.1.5 Question 5 In words, describe the results that you would expect if you performed \\(K\\)-means clustering of the eight shoppers in Figure 12.16, on the basis of their sock and computer purchases, with \\(K = 2\\). Give three answers, one for each of the variable scalings displayed. Explain. In cases where variables are scaled we would expect clusters to correspond to whether or not the retainer sold a computer. In the first case (raw numbers of items sold), we would expect clusters to represent low vs high numbers of sock purchases. To test, we can run the analysis in R: set.seed(42) dat <- data.frame( socks = c(8, 11, 7, 6, 5, 6, 7, 8), computers = c(0, 0, 0, 0, 1, 1, 1, 1) ) kmeans(dat, 2)$cluster ## [1] 1 1 2 2 2 2 2 1 kmeans(scale(dat), 2)$cluster ## [1] 1 1 1 1 2 2 2 2 dat$computers <- dat$computers * 2000 kmeans(dat, 2)$cluster ## [1] 1 1 1 1 2 2 2 2 12.1.6 Question 6 We saw in Section 12.2.2 that the principal component loading and score vectors provide an approximation to a matrix, in the sense of (12.5). Specifically, the principal component score and loading vectors solve the optimization problem given in (12.6). Now, suppose that the M principal component score vectors zim, \\(m = 1,...,M\\), are known. Using (12.6), explain that the first \\(M\\) principal component loading vectors \\(\\phi_{jm}\\), \\(m = 1,...,M\\), can be obtaining by performing \\(M\\) separate least squares linear regressions. In each regression, the principal component score vectors are the predictors, and one of the features of the data matrix is the response. 12.2 Applied 12.2.1 Question 7 In the chapter, we mentioned the use of correlation-based distance and Euclidean distance as dissimilarity measures for hierarchical clustering. It turns out that these two measures are almost equivalent: if each observation has been centered to have mean zero and standard deviation one, and if we let \\(r_{ij}\\) denote the correlation between the \\(i\\)th and \\(j\\)th observations, then the quantity \\(1 − r_{ij}\\) is proportional to the squared Euclidean distance between the ith and jth observations. On the USArrests data, show that this proportionality holds. Hint: The Euclidean distance can be calculated using the dist() function, and correlations can be calculated using the cor() function. dat <- t(scale(t(USArrests))) d1 <- dist(dat)^2 d2 <- as.dist(1 - cor(t(dat))) plot(d1, d2) 12.2.2 Question 8 In Section 12.2.3, a formula for calculating PVE was given in Equation 12.10. We also saw that the PVE can be obtained using the sdev output of the prcomp() function. On the USArrests data, calculate PVE in two ways: Using the sdev output of the prcomp() function, as was done in Section 12.2.3. pr <- prcomp(USArrests, scale = TRUE) pr$sdev^2 / sum(pr$sdev^2) ## [1] 0.62006039 0.24744129 0.08914080 0.04335752 By applying Equation 12.10 directly. That is, use the prcomp() function to compute the principal component loadings. Then, use those loadings in Equation 12.10 to obtain the PVE. These two approaches should give the same results. colSums(pr$x^2) / sum(colSums(scale(USArrests)^2)) ## PC1 PC2 PC3 PC4 ## 0.62006039 0.24744129 0.08914080 0.04335752 Hint: You will only obtain the same results in (a) and (b) if the same data is used in both cases. For instance, if in (a) you performed prcomp() using centered and scaled variables, then you must center and scale the variables before applying Equation 12.10 in (b). 12.2.3 Question 9 Consider the USArrests data. We will now perform hierarchical clustering on the states. Using hierarchical clustering with complete linkage and Euclidean distance, cluster the states. set.seed(42) hc <- hclust(dist(USArrests), method = "complete") Cut the dendrogram at a height that results in three distinct clusters. Which states belong to which clusters? ct <- cutree(hc, 3) sapply(1:3, function(i) names(ct)[ct == i]) ## [[1]] ## [1] "Alabama" "Alaska" "Arizona" "California" ## [5] "Delaware" "Florida" "Illinois" "Louisiana" ## [9] "Maryland" "Michigan" "Mississippi" "Nevada" ## [13] "New Mexico" "New York" "North Carolina" "South Carolina" ## ## [[2]] ## [1] "Arkansas" "Colorado" "Georgia" "Massachusetts" ## [5] "Missouri" "New Jersey" "Oklahoma" "Oregon" ## [9] "Rhode Island" "Tennessee" "Texas" "Virginia" ## [13] "Washington" "Wyoming" ## ## [[3]] ## [1] "Connecticut" "Hawaii" "Idaho" "Indiana" ## [5] "Iowa" "Kansas" "Kentucky" "Maine" ## [9] "Minnesota" "Montana" "Nebraska" "New Hampshire" ## [13] "North Dakota" "Ohio" "Pennsylvania" "South Dakota" ## [17] "Utah" "Vermont" "West Virginia" "Wisconsin" Hierarchically cluster the states using complete linkage and Euclidean distance, after scaling the variables to have standard deviation one. hc2 <- hclust(dist(scale(USArrests)), method = "complete") What effect does scaling the variables have on the hierarchical clustering obtained? In your opinion, should the variables be scaled before the inter-observation dissimilarities are computed? Provide a justification for your answer. ct <- cutree(hc, 3) sapply(1:3, function(i) names(ct)[ct == i]) ## [[1]] ## [1] "Alabama" "Alaska" "Arizona" "California" ## [5] "Delaware" "Florida" "Illinois" "Louisiana" ## [9] "Maryland" "Michigan" "Mississippi" "Nevada" ## [13] "New Mexico" "New York" "North Carolina" "South Carolina" ## ## [[2]] ## [1] "Arkansas" "Colorado" "Georgia" "Massachusetts" ## [5] "Missouri" "New Jersey" "Oklahoma" "Oregon" ## [9] "Rhode Island" "Tennessee" "Texas" "Virginia" ## [13] "Washington" "Wyoming" ## ## [[3]] ## [1] "Connecticut" "Hawaii" "Idaho" "Indiana" ## [5] "Iowa" "Kansas" "Kentucky" "Maine" ## [9] "Minnesota" "Montana" "Nebraska" "New Hampshire" ## [13] "North Dakota" "Ohio" "Pennsylvania" "South Dakota" ## [17] "Utah" "Vermont" "West Virginia" "Wisconsin" Scaling results in different clusters and the choice of whether to scale or not depends on the data in question. In this case, the variables are: Murder numeric Murder arrests (per 100,000) Assault numeric Assault arrests (per 100,000) UrbanPop numeric Percent urban population Rape numeric Rape arrests (per 100,000) These variables are not naturally on the same unit and the units involved are somewhat arbitrary (so for example, Murder could be measured per 1 million rather than per 100,000) so in this case I would argue the data should be scaled. 12.2.4 Question 10 In this problem, you will generate simulated data, and then perform PCA and \\(K\\)-means clustering on the data. Generate a simulated data set with 20 observations in each of three classes (i.e. 60 observations total), and 50 variables. Hint: There are a number of functions in R that you can use to generate data. One example is the rnorm() function; runif() is another option. Be sure to add a mean shift to the observations in each class so that there are three distinct classes. set.seed(42) data <- matrix(rnorm(60 * 50), ncol = 50) classes <- rep(c("A", "B", "C"), each = 20) dimnames(data) <- list(classes, paste0("v", 1:50)) data[classes == "B", 1:10] <- data[classes == "B", 1:10] + 1.2 data[classes == "C", 5:30] <- data[classes == "C", 5:30] + 1 Perform PCA on the 60 observations and plot the first two principal component score vectors. Use a different color to indicate the observations in each of the three classes. If the three classes appear separated in this plot, then continue on to part (c). If not, then return to part (a) and modify the simulation so that there is greater separation between the three classes. Do not continue to part (c) until the three classes show at least some separation in the first two principal component score vectors. pca <- prcomp(data) ggplot(data.frame(Class = classes, PC1 = pca$x[, 1], PC2 = pca$x[, 2]), aes(x = PC1, y = PC2, col = Class)) + geom_point() Perform \\(K\\)-means clustering of the observations with \\(K = 3\\). How well do the clusters that you obtained in \\(K\\)-means clustering compare to the true class labels? Hint: You can use the table() function in R to compare the true class labels to the class labels obtained by clustering. Be careful how you interpret the results: \\(K\\)-means clustering will arbitrarily number the clusters, so you cannot simply check whether the true class labels and clustering labels are the same. km <- kmeans(data, 3)$cluster table(km, names(km)) ## ## km A B C ## 1 1 20 1 ## 2 0 0 19 ## 3 19 0 0 \\(K\\)-means separates out the clusters nearly perfectly. Perform \\(K\\)-means clustering with \\(K = 2\\). Describe your results. km <- kmeans(data, 2)$cluster table(km, names(km)) ## ## km A B C ## 1 18 20 1 ## 2 2 0 19 \\(K\\)-means effectively defines cluster 2 to be class B, but cluster 1 is a mix of classes A and B. Now perform \\(K\\)-means clustering with \\(K = 4\\), and describe your results. km <- kmeans(data, 4)$cluster table(km, names(km)) ## ## km A B C ## 1 0 7 2 ## 2 18 1 0 ## 3 0 0 18 ## 4 2 12 0 \\(K\\)-means effectively defines cluster 1 to be class B, cluster 2 to be class A but clusters 3 and 4 are split over class C. Now perform \\(K\\)-means clustering with \\(K = 3\\) on the first two principal component score vectors, rather than on the raw data. That is, perform \\(K\\)-means clustering on the \\(60 \\times 2\\) matrix of which the first column is the first principal component score vector, and the second column is the second principal component score vector. Comment on the results. km <- kmeans(pca$x[, 1:2], 3)$cluster table(km, names(km)) ## ## km A B C ## 1 0 20 2 ## 2 20 0 0 ## 3 0 0 18 \\(K\\)-means again separates out the clusters nearly perfectly. Using the scale() function, perform \\(K\\)-means clustering with \\(K = 3\\) on the data after scaling each variable to have standard deviation one. How do these results compare to those obtained in (b)? Explain. km <- kmeans(scale(data), 3)$cluster table(km, names(km)) ## ## km A B C ## 1 1 20 1 ## 2 19 0 0 ## 3 0 0 19 \\(K\\)-means appears to perform less well on the scaled data in this case. 12.2.5 Question 11 Write an R function to perform matrix completion as in Algorithm 12.1, and as outlined in Section 12.5.2. In each iteration, the function should keep track of the relative error, as well as the iteration count. Iterations should continue until the relative error is small enough or until some maximum number of iterations is reached (set a default value for this maximum number). Furthermore, there should be an option to print out the progress in each iteration. Test your function on the Boston data. First, standardize the features to have mean zero and standard deviation one using the scale() function. Run an experiment where you randomly leave out an increasing (and nested) number of observations from 5% to 30%, in steps of 5%. Apply Algorithm 12.1 with \\(M = 1,2,...,8\\). Display the approximation error as a function of the fraction of observations that are missing, and the value of \\(M\\), averaged over 10 repetitions of the experiment. 12.2.6 Question 12 In Section 12.5.2, Algorithm 12.1 was implemented using the svd() function. However, given the connection between the svd() function and the prcomp() function highlighted in the lab, we could have instead implemented the algorithm using prcomp(). Write a function to implement Algorithm 12.1 that makes use of prcomp() rather than svd(). 12.2.7 Question 13 On the book website, www.StatLearning.com, there is a gene expression data set (Ch12Ex13.csv) that consists of 40 tissue samples with measurements on 1,000 genes. The first 20 samples are from healthy patients, while the second 20 are from a diseased group. Load in the data using read.csv(). You will need to select header = F. data <- read.csv("data/Ch12Ex13.csv", header = FALSE) colnames(data) <- c(paste0("H", 1:20), paste0("D", 1:20)) Apply hierarchical clustering to the samples using correlation-based distance, and plot the dendrogram. Do the genes separate the samples into the two groups? Do your results depend on the type of linkage used? hc.complete <- hclust(as.dist(1 - cor(data)), method = "complete") plot(hc.complete) hc.complete <- hclust(as.dist(1 - cor(data)), method = "average") plot(hc.complete) hc.complete <- hclust(as.dist(1 - cor(data)), method = "single") plot(hc.complete) Yes the samples clearly separate into the two groups, although the results depend somewhat on the linkage method used. In the case of average clustering, the disease samples all fall within a subset of the healthy samples. Your collaborator wants to know which genes differ the most across the two groups. Suggest a way to answer this question, and apply it here. This is probably best achieved with a supervised approach. A simple method would be to determine which genes show the most significant differences between the groups by applying a t-test to each group. We can then select those with a FDR adjusted p-value less than some given threshold (e.g. 0.05). class <- factor(rep(c("Healthy", "Diseased"), each = 20)) pvals <- p.adjust(apply(data, 1, function(v) t.test(v ~ class)$p.value)) which(pvals < 0.05) ## [1] 11 12 13 14 15 16 17 18 19 20 501 502 503 504 505 506 507 508 ## [19] 509 511 512 513 514 515 516 517 519 520 521 522 523 524 525 526 527 528 ## [37] 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 ## [55] 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 ## [73] 565 566 567 568 569 570 571 572 574 575 576 577 578 579 580 581 582 583 ## [91] 584 586 587 588 589 590 591 592 593 595 596 597 598 599 600 "],["multiple-testing.html", "13 Multiple Testing 13.1 Conceptual 13.2 Applied", " 13 Multiple Testing 13.1 Conceptual 13.1.1 Question 1 Suppose we test \\(m\\) null hypotheses, all of which are true. We control the Type I error for each null hypothesis at level \\(\\alpha\\). For each sub-problem, justify your answer. In total, how many Type I errors do we expect to make? We expect \\(m\\alpha\\). Suppose that the m tests that we perform are independent. What is the family-wise error rate associated with these m tests? Hint: If two events A and B are independent, then Pr(A ∩ B) = Pr(A) Pr(B). The family-wise error rate (FWER) is defined as the probability of making at least one Type I error. We can think of this as 1 minus the probability of no type I errors, which is: \\(1 - (1 - \\alpha)^m\\) Alternatively, for two tests this is: Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). For independent tests this is \\(\\alpha + \\alpha - \\alpha^2\\) Suppose that \\(m = 2\\), and that the p-values for the two tests are positively correlated, so that if one is small then the other will tend to be small as well, and if one is large then the other will tend to be large. How does the family-wise error rate associated with these \\(m = 2\\) tests qualitatively compare to the answer in (b) with \\(m = 2\\)? Hint: First, suppose that the two p-values are perfectly correlated. If they were perfectly correlated, we would effectively be performing a single test (thus FWER would be \\(alpha\\)). In the case when they are positively correlated therefore, we can expect the FWER to be less than in b. Alternatively, as above, FWEW = Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). For perfectly positively correlated tests Pr(A ∩ B) = \\(\\alpha\\), so the FWEW is \\(\\alpha\\) which is smaller than b. Suppose again that \\(m = 2\\), but that now the p-values for the two tests are negatively correlated, so that if one is large then the other will tend to be small. How does the family-wise error rate associated with these \\(m = 2\\) tests qualitatively compare to the answer in (b) with \\(m = 2\\)? Hint: First, suppose that whenever one p-value is less than \\(\\alpha\\), then the other will be greater than \\(\\alpha\\). In other words, we can never reject both null hypotheses. Taking the equation above, for two tests, FWEW = Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). In the case considered in the hint Pr(A ∩ B) = 0, so Pr(A ∪ B) = \\(2\\alpha\\), which is larger than b. 13.1.2 Question 2 Suppose that we test \\(m\\) hypotheses, and control the Type I error for each hypothesis at level \\(\\alpha\\). Assume that all \\(m\\) p-values are independent, and that all null hypotheses are true. Let the random variable \\(A_j\\) equal 1 if the \\(j\\)th null hypothesis is rejected, and 0 otherwise. What is the distribution of \\(A_j\\)? \\(A_j\\) follows a Bernoulli distribution: \\(A_j \\sim \\text{Bernoulli}(p)\\) What is the distribution of \\(\\sum_{j=1}^m A_j\\)? Follows a binomial distribution \\(\\sum_{j=1}^m A_j \\sim Bi(m, \\alpha)\\). What is the standard deviation of the number of Type I errors that we will make? The variance of a Binomial is \\(npq\\), so for this situation the standard deviation would be \\(\\sqrt{m \\alpha (1-\\alpha)}\\). 13.1.3 Question 3 Suppose we test \\(m\\) null hypotheses, and control the Type I error for the \\(j\\)th null hypothesis at level \\(\\alpha_j\\), for \\(j=1,...,m\\). Argue that the family-wise error rate is no greater than \\(\\sum_{j=1}^m \\alpha_j\\). \\(p(A \\cup B) = p(A) + p(B)\\) if \\(A\\) and \\(B\\) are independent or \\(p(A) + p(B) - p(A \\cap B)\\) when they are not. Since \\(p(A \\cap B)\\) must be positive, \\(p(A \\cup B) < p(A) + p(B)\\) (whether independent or not). Therefore, the probability of a type I error in any of \\(m\\) hypotheses can be no larger than the sum of the probabilities for each individual hypothesis (which is \\(\\alpha_j\\) for the \\(j\\)th). 13.1.4 Question 4 Suppose we test \\(m = 10\\) hypotheses, and obtain the p-values shown in Table 13.4. pvals <- c(0.0011, 0.031, 0.017, 0.32, 0.11, 0.90, 0.07, 0.006, 0.004, 0.0009) names(pvals) <- paste0("H", sprintf("%02d", 1:10)) Suppose that we wish to control the Type I error for each null hypothesis at level \\(\\alpha = 0.05\\). Which null hypotheses will we reject? names(which(pvals < 0.05)) ## [1] "H01" "H02" "H03" "H08" "H09" "H10" We reject all NULL hypotheses where \\(p < 0.05\\). Now suppose that we wish to control the FWER at level \\(\\alpha = 0.05\\). Which null hypotheses will we reject? Justify your answer. names(which(pvals < 0.05 / 10)) ## [1] "H01" "H09" "H10" We reject all NULL hypotheses where \\(p < 0.005\\). Now suppose that we wish to control the FDR at level \\(q = 0.05\\). Which null hypotheses will we reject? Justify your answer. names(which(p.adjust(pvals, "fdr") < 0.05)) ## [1] "H01" "H03" "H08" "H09" "H10" We reject all NULL hypotheses where \\(q < 0.05\\). Now suppose that we wish to control the FDR at level \\(q = 0.2\\). Which null hypotheses will we reject? Justify your answer. names(which(p.adjust(pvals, "fdr") < 0.2)) ## [1] "H01" "H02" "H03" "H05" "H07" "H08" "H09" "H10" We reject all NULL hypotheses where \\(q < 0.2\\). Of the null hypotheses rejected at FDR level \\(q = 0.2\\), approximately how many are false positives? Justify your answer. We expect 20% (in this case 2 out of the 8) rejections to be false (false positives). 13.1.5 Question 5 For this problem, you will make up p-values that lead to a certain number of rejections using the Bonferroni and Holm procedures. Give an example of five p-values (i.e. five numbers between 0 and 1 which, for the purpose of this problem, we will interpret as p-values) for which both Bonferroni’s method and Holm’s method reject exactly one null hypothesis when controlling the FWER at level 0.1. In this case, for Bonferroni, we need one p-value to be less than \\(0.1 / 5 = 0.02\\). and the others to be above. For Holm’s method, we need the most significant p-value to be below \\(0.1/(5 + 1 - 1) = 0.02\\) also. An example would be: 1, 1, 1, 1, 0.001. pvals <- c(1, 1, 1, 1, 0.001) sum(p.adjust(pvals, method = "bonferroni") < 0.1) ## [1] 1 sum(p.adjust(pvals, method = "holm") < 0.1) ## [1] 1 Now give an example of five p-values for which Bonferroni rejects one null hypothesis and Holm rejects more than one null hypothesis at level 0.1. An example would be: 1, 1, 1, 0.02, 0.001. For Holm’s method we reject two because \\(0.02 < 0.1/(5 + 1 - 2)\\). pvals <- c(1, 1, 1, 0.02, 0.001) sum(p.adjust(pvals, method = "bonferroni") < 0.1) ## [1] 1 sum(p.adjust(pvals, method = "holm") < 0.1) ## [1] 2 13.1.6 Question 6 For each of the three panels in Figure 13.3, answer the following questions: There are always: 8 positives (red) and 2 negatives (black). False / true positives are black / red points below the line respectively. False / true negatives are red / black points above the line respectively. Type I / II errors are the same as false positives and false negatives respectively. How many false positives, false negatives, true positives, true negatives, Type I errors, and Type II errors result from applying the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.05\\)? Panel FP FN TP TN Type I Type II 1 0 1 7 2 0 1 2 0 1 7 2 0 1 3 0 5 3 2 0 5 How many false positives, false negatives, true positives, true negatives, Type I errors, and Type II errors result from applying the Holm procedure to control the FWER at level \\(\\alpha = 0.05\\)? Panel FP FN TP TN Type I Type II 1 0 1 7 2 0 1 2 0 0 8 2 0 0 3 0 0 8 2 0 0 What is the false discovery rate associated with using the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.05\\)? False discovery rate is the expected ratio of false positives to total positives. There are never any false positives (black points below the line). There are always the same number of total positives (8). For panels 1, 2, 3 this would be 0/8, 0/8 and 0/8 respectively. What is the false discovery rate associated with using the Holm procedure to control the FWER at level \\(\\alpha = 0.05\\)? For panels 1, 2, 3 this would be 0/8, 0/8 and 0/8 respectively. How would the answers to (a) and (c) change if we instead used the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.001\\)? This would equate to a more stringent threshold. We would not call any more false positives, so the results would not change. 13.2 Applied 13.2.1 Question 7 This problem makes use of the Carseats dataset in the ISLR2 package. For each quantitative variable in the dataset besides Sales, fit a linear model to predict Sales using that quantitative variable. Report the p-values associated with the coefficients for the variables. That is, for each model of the form \\(Y = \\beta_0 + \\beta_1X + \\epsilon\\), report the p-value associated with the coefficient \\(\\beta_1\\). Here, \\(Y\\) represents Sales and \\(X\\) represents one of the other quantitative variables. library(ISLR2) nm <- c("CompPrice", "Income", "Advertising", "Population", "Price", "Age") pvals <- sapply(nm, function(n) { summary(lm(Carseats[["Sales"]] ~ Carseats[[n]]))$coef[2, 4] }) Suppose we control the Type I error at level \\(\\alpha = 0.05\\) for the p-values obtained in (a). Which null hypotheses do we reject? names(which(pvals < 0.05)) ## [1] "Income" "Advertising" "Price" "Age" Now suppose we control the FWER at level 0.05 for the p-values. Which null hypotheses do we reject? names(which(pvals < 0.05 / length(nm))) ## [1] "Income" "Advertising" "Price" "Age" Finally, suppose we control the FDR at level 0.2 for the p-values. Which null hypotheses do we reject? names(which(p.adjust(pvals, "fdr") < 0.2)) ## [1] "Income" "Advertising" "Price" "Age" 13.2.2 Question 8 In this problem, we will simulate data from \\(m = 100\\) fund managers. set.seed(1) n <- 20 m <- 100 X <- matrix(rnorm(n * m), ncol = m) set.seed(1) n <- 20 m <- 100 X <- matrix(rnorm(n * m), ncol = m) These data represent each fund manager’s percentage returns for each of \\(n = 20\\) months. We wish to test the null hypothesis that each fund manager’s percentage returns have population mean equal to zero. Notice that we simulated the data in such a way that each fund manager’s percentage returns do have population mean zero; in other words, all \\(m\\) null hypotheses are true. Conduct a one-sample \\(t\\)-test for each fund manager, and plot a histogram of the \\(p\\)-values obtained. pvals <- apply(X, 2, function(p) t.test(p)$p.value) hist(pvals, main = NULL) If we control Type I error for each null hypothesis at level \\(\\alpha = 0.05\\), then how many null hypotheses do we reject? sum(pvals < 0.05) ## [1] 4 If we control the FWER at level 0.05, then how many null hypotheses do we reject? sum(pvals < 0.05 / length(pvals)) ## [1] 0 If we control the FDR at level 0.05, then how many null hypotheses do we reject? sum(p.adjust(pvals, "fdr") < 0.05) ## [1] 0 Now suppose we “cherry-pick” the 10 fund managers who perform the best in our data. If we control the FWER for just these 10 fund managers at level 0.05, then how many null hypotheses do we reject? If we control the FDR for just these 10 fund managers at level 0.05, then how many null hypotheses do we reject? best <- order(apply(X, 2, sum), decreasing = TRUE)[1:10] sum(pvals[best] < 0.05 / 10) ## [1] 1 sum(p.adjust(pvals[best], "fdr") < 0.05) ## [1] 1 Explain why the analysis in (e) is misleading. Hint The standard approaches for controlling the FWER and FDR assume that all tested null hypotheses are adjusted for multiplicity, and that no “cherry-picking” of the smallest p-values has occurred. What goes wrong if we cherry-pick? This is misleading because we are not correctly accounting for all tests performed. Cherry picking the similar to repeating a test until by chance we find a significant result. "],["404.html", "Page not found", " Page not found The page you requested cannot be found (perhaps it was moved or renamed). You may want to try searching to find the page's new location, or use the table of contents to find the page you are looking for. "]] diff --git a/tree-based-methods.html b/tree-based-methods.html index 8fed720..22d1b9b 100644 --- a/tree-based-methods.html +++ b/tree-based-methods.html @@ -792,7 +792,7 @@

8.2.2 Question 8
carseats_mse(bartfit)
## [1] 1.631285
@@ -1220,7 +1220,7 @@

8.2.6 Question 12
mse <- function(model, ...) {
   pred <- predict(model, College[test, ], ...)

10.2.2 Question 7plot(history, smooth = FALSE)

npred <- predict(nn, x[testid, ])
-
## 6/6 - 0s - 61ms/epoch - 10ms/step
+
## 6/6 - 0s - 55ms/epoch - 9ms/step
mean(abs(y[testid] - npred))
-
## [1] 2.219039
+
## [1] 2.269432

In this case, the neural network outperforms logistic regression having a lower absolute error rate on the test data.

@@ -779,18 +779,18 @@