From bea0ec26cccc42b9c408fe02154a88f49ddc1b1d Mon Sep 17 00:00:00 2001 From: Zongwei Zhou Date: Fri, 5 Feb 2021 09:57:11 -0700 Subject: [PATCH] Add downstream tasks --- competition/.DS_Store | Bin 6148 -> 0 bytes datasets/.DS_Store | Bin 6148 -> 0 bytes figures/.DS_Store | Bin 6148 -> 0 bytes figures/superbar/.DS_Store | Bin 6148 -> 0 bytes keras/downstream_tasks/BraTS/DataSet.py | 372 +++++++++++++ keras/downstream_tasks/BraTS/DataSet.pyc | Bin 0 -> 11286 bytes keras/downstream_tasks/BraTS/Patient.py | 88 ++++ keras/downstream_tasks/BraTS/Patient.pyc | Bin 0 -> 3179 bytes keras/downstream_tasks/BraTS/__init__.py | 14 + keras/downstream_tasks/BraTS/__init__.pyc | Bin 0 -> 409 bytes .../BraTS/__pycache__/DataSet.cpython-36.pyc | Bin 0 -> 9871 bytes .../BraTS/__pycache__/DataSet.cpython-37.pyc | Bin 0 -> 9869 bytes .../BraTS/__pycache__/Patient.cpython-36.pyc | Bin 0 -> 2817 bytes .../BraTS/__pycache__/Patient.cpython-37.pyc | Bin 0 -> 2815 bytes .../BraTS/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 426 bytes .../BraTS/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 424 bytes .../__pycache__/load_utils.cpython-36.pyc | Bin 0 -> 3136 bytes .../__pycache__/load_utils.cpython-37.pyc | Bin 0 -> 3134 bytes .../__pycache__/modalities.cpython-36.pyc | Bin 0 -> 2280 bytes .../__pycache__/modalities.cpython-37.pyc | Bin 0 -> 2263 bytes .../__pycache__/structure.cpython-36.pyc | Bin 0 -> 1181 bytes .../__pycache__/structure.cpython-37.pyc | Bin 0 -> 1179 bytes keras/downstream_tasks/BraTS/load_utils.py | 93 ++++ keras/downstream_tasks/BraTS/load_utils.pyc | Bin 0 -> 3394 bytes keras/downstream_tasks/BraTS/modalities.py | 92 ++++ keras/downstream_tasks/BraTS/modalities.pyc | Bin 0 -> 2530 bytes keras/downstream_tasks/BraTS/structure.py | 50 ++ keras/downstream_tasks/BraTS/structure.pyc | Bin 0 -> 1337 bytes keras/downstream_tasks/BraTS/test.py | 32 ++ keras/downstream_tasks/BraTS/test_load.py | 31 ++ keras/downstream_tasks/config.py | 375 ++++++++++++++ keras/downstream_tasks/data/bms/fold_1.csv | 95 ++++ keras/downstream_tasks/data/bms/fold_2.csv | 95 ++++ keras/downstream_tasks/data/bms/fold_3.csv | 95 ++++ keras/downstream_tasks/data/ecc/test_cv-1.csv | 40 ++ keras/downstream_tasks/data/ecc/test_cv-2.csv | 40 ++ keras/downstream_tasks/data/ecc/test_cv-3.csv | 40 ++ .../downstream_tasks/data/ecc/train_cv-1.csv | 75 +++ .../downstream_tasks/data/ecc/train_cv-2.csv | 75 +++ .../downstream_tasks/data/ecc/train_cv-3.csv | 75 +++ keras/downstream_tasks/data/ecc/val_cv-1.csv | 5 + keras/downstream_tasks/data/ecc/val_cv-2.csv | 5 + keras/downstream_tasks/data/ecc/val_cv-3.csv | 5 + .../lung nodule segmentation.ipynb | 489 ++++++++++++++++++ keras/downstream_tasks/ncs_data.py | 32 ++ keras/downstream_tasks/unet3d.py | 123 +++++ keras/downstream_tasks/utils.py | 289 +++++++++++ 47 files changed, 2725 insertions(+) delete mode 100644 competition/.DS_Store delete mode 100644 datasets/.DS_Store delete mode 100644 figures/.DS_Store delete mode 100644 figures/superbar/.DS_Store create mode 100755 keras/downstream_tasks/BraTS/DataSet.py create mode 100755 keras/downstream_tasks/BraTS/DataSet.pyc create mode 100755 keras/downstream_tasks/BraTS/Patient.py create mode 100755 keras/downstream_tasks/BraTS/Patient.pyc create mode 100755 keras/downstream_tasks/BraTS/__init__.py create mode 100755 keras/downstream_tasks/BraTS/__init__.pyc create mode 100644 keras/downstream_tasks/BraTS/__pycache__/DataSet.cpython-36.pyc create mode 100755 keras/downstream_tasks/BraTS/__pycache__/DataSet.cpython-37.pyc create mode 100644 keras/downstream_tasks/BraTS/__pycache__/Patient.cpython-36.pyc create mode 100755 keras/downstream_tasks/BraTS/__pycache__/Patient.cpython-37.pyc create mode 100644 keras/downstream_tasks/BraTS/__pycache__/__init__.cpython-36.pyc create mode 100755 keras/downstream_tasks/BraTS/__pycache__/__init__.cpython-37.pyc create mode 100644 keras/downstream_tasks/BraTS/__pycache__/load_utils.cpython-36.pyc create mode 100755 keras/downstream_tasks/BraTS/__pycache__/load_utils.cpython-37.pyc create mode 100644 keras/downstream_tasks/BraTS/__pycache__/modalities.cpython-36.pyc create mode 100755 keras/downstream_tasks/BraTS/__pycache__/modalities.cpython-37.pyc create mode 100644 keras/downstream_tasks/BraTS/__pycache__/structure.cpython-36.pyc create mode 100755 keras/downstream_tasks/BraTS/__pycache__/structure.cpython-37.pyc create mode 100755 keras/downstream_tasks/BraTS/load_utils.py create mode 100755 keras/downstream_tasks/BraTS/load_utils.pyc create mode 100755 keras/downstream_tasks/BraTS/modalities.py create mode 100755 keras/downstream_tasks/BraTS/modalities.pyc create mode 100755 keras/downstream_tasks/BraTS/structure.py create mode 100755 keras/downstream_tasks/BraTS/structure.pyc create mode 100755 keras/downstream_tasks/BraTS/test.py create mode 100755 keras/downstream_tasks/BraTS/test_load.py create mode 100755 keras/downstream_tasks/config.py create mode 100755 keras/downstream_tasks/data/bms/fold_1.csv create mode 100755 keras/downstream_tasks/data/bms/fold_2.csv create mode 100755 keras/downstream_tasks/data/bms/fold_3.csv create mode 100755 keras/downstream_tasks/data/ecc/test_cv-1.csv create mode 100755 keras/downstream_tasks/data/ecc/test_cv-2.csv create mode 100755 keras/downstream_tasks/data/ecc/test_cv-3.csv create mode 100755 keras/downstream_tasks/data/ecc/train_cv-1.csv create mode 100755 keras/downstream_tasks/data/ecc/train_cv-2.csv create mode 100755 keras/downstream_tasks/data/ecc/train_cv-3.csv create mode 100755 keras/downstream_tasks/data/ecc/val_cv-1.csv create mode 100755 keras/downstream_tasks/data/ecc/val_cv-2.csv create mode 100755 keras/downstream_tasks/data/ecc/val_cv-3.csv create mode 100644 keras/downstream_tasks/lung nodule segmentation.ipynb create mode 100755 keras/downstream_tasks/ncs_data.py create mode 100755 keras/downstream_tasks/unet3d.py create mode 100755 keras/downstream_tasks/utils.py diff --git a/competition/.DS_Store b/competition/.DS_Store deleted file mode 100644 index 58f8cc64eeb4bdbf21a169969e3bb85679ff4206..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKJ5Iwu5S;}VmS|E^?iF%_B{C<-1xT<2MJ%TPYD*McfCHq+9q2h3Z$3ncjFD)N zKyRek=bf47wO`@!h)7yIwsVo0h}3XJdC_HTnpYp$Swt#e(tB*~=F8i@+m0%y1In$W zm6bfm>I?oM*Y!=a-t=qK*H8Pm=iTf3)iJm0+i&yR`yW5s^iwlK1*iZOpaN9j?<#mCstb(@ H{DuM_OJ5-y)}~5N&DSo?HQ!o$=1Ovgq<d& zEz(>XMjs3W1Hr%t1F}CPG{Njx40Y>3rzHSTuF)#crIwJGH1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T06ijC{)3RHhnI8A; zv8|My6qpJ;?C>>p5cj9EqLm`=rX9kYsx)+=VUn$<_l>S$IUGpplLubS?J z=}nl`3FC0hi0RhMN?NPuwaL6TWmZD$Hf=fpRx_P3^MNsUjd^=@pXrR7pkdYlW+LyM zAx_QtI#K;Siu(Dm8=Q0(TS==KBz63v_59P%KK<+qZvD(Kxf;bM-PfbOyBM?*{LF83 zq;lFOt^RrxcNT)q@I<}7ay7K>I$*hjIJyyb0_%38RwwLVaieuNxf-}<hxiHm?n=SW6Y;D&GjR9YP|0f<%++Ms_-n$@fc;$d_oYfg~DTvd~P zr88Cm?6~w#ret27k_okJ;k5CZ^OL}%%>(eEtenGRe~sd{V{RMc<7kdaDr|04$-uh~ z|8r=>+(thtqb3<+Wn&~88aKDAG6d`Z3UlO?tV|ln1uIk8(6mYRNl$~KhNC1iY$~+H zbhG`O-JB-@m)oQR?I~CqYMqr5oBrD%XNdn@nNQFE+Qb-nYZ}MtGyu_1pGFLWH}(jgbBR zu$N7@DtnuQKq|{oKS)M7rx#-jsoCI)O=_hrY;usT9DZlrZ@1c4fg0zZZ*`#shs*Q* z^J| z_aS#O!A-D5nE3v2)Gg67PR%)n=YUhI*6^!;ZSsNC${zb2G(I$9Y#j3o5o?MRq$5G2 zVm^YPL857UxYqsyMcqyRH3!`=fsud-p?z@ea1BVqBqWiGRq`LxB1G=pRF)d5b4SHaxrFo$d5?B~Y;ua1Hm`Vt_mM^#z zVrDxUC~v}!U07IfZA2duu1jZ6fNe%Y_hzfV1HaF4aq6Z@Tjo0`BPM{QN1IfTL#cx< zvom2xVj%Q9Cy)qc;$E6Oj&2e}!iH-#6CX~SP4-sk3mhF+YAq|{XfeG83N>sr8-Cbp zT?y>&(Ica1!d}L&bv|)X!ba3!@(6YNiNV?IyllYUmzB<-% z6aUfN`q;o0!U7Xk-E2%2?s)O?lj8QO<_^5_h}oEuNuzYbr`VpBHh`ghJu;X2=gqBi z1V>o==(ZOxKbL{PGW0gg?NKv(XH49&qM-u<f~No{D-~Tf1!5Gm2 zbvm<{DUz3256*)N8FX9iV4loU14FH$pr!b*Ne*k%qZlTfoI>&FA-5PM7^C~Bn#kA^ z;i}h~ab_xqoI{mbW!{-_j#Q6l5=2^1E!&*?G>Tgf3mXteUVeavnLJT(am+>Mdfi-N zSOyW1xG{BK?_ptX9+sec);+Vdw9SSfAdaJ++X?M8^<|lDiPkm{wb;Uhoh&>L6CJSPOj`GaKWw{I8-!=z&^FKNei8m{}+&z#zatSH`|J!EJw=`cK4p5MnkCs6o)ztszT zUj)_n5eE*tXet$tVS>kdtXez}Cy3z@U3Qbf9=Rpn#AEENa&wwBZd@QIwH>2A39%o< zWJ{G9t4uX&^+vrh-WaPLXiU}~t&NGZOE0=@%#nXIu4nPsX~;pKIg10PqyLe)sDAm( z1rr&PynvxX{z8*P$Xu{rk{9fsl%{5i(7Y#$!O8V3sj*KcPiD=AG^euWj5MdS=6-4J z%bEwI*~pp)r8$!|A27io)7hUj4>Nnh3sjr*p+@i#Y25^!Aa_t5n)0VaR3(RkTc&-4 zHUe%46Ck3SRInMu>1Vtfwwg0;W~+Y=l3AFUuqrxmy$tspf z>B5D17vM#AMV~`>O6DHh>Md*%BdcZ18Ec`(*iw9wW^l^edM{#9LWNw1(f66D^o#gP zd_tsY9yrvi5=m`Da78p^eu1cX4WD>QhUS%9?O?!NbsmD|5Sy}1@8e6BL16h;uUyee z7ZoB*TJVCcrdjP4^HAs(!>Nm9R$KE%H0DLGt^inBA!ZMtr9s=1PRwj9(_2~oiN?<_ zP%WHvX-;V`++P!VUmgKbht0r9AGa*^k-i0rrQ`9UP~LOv;d$#2@H=#;hbmyHgOt1l zQ+j14i-44vHG*;B70JU2$y1o{ovk+=#P9If7f_hW6s{3dm18JpoulxYQzKJw`|^j& z1%Hn@$M~zg=~$b7b&GGkj*bM1EM608YoW81)I!0@BI%28Hhx09ZPhe&9s*> zymwT3FG=rc-a96}J?TXv78WbLtk7HLk=_L))>>O~TS%!fJ$O%~nNyad8f-4&de@Xh z&kFinq~{Q{<)HJplQ}r9X438{X7Z3*fi4AzC5ze1dOv)PbfH3Qh3&n;e%g9fqAF_C zA^uW@D4>N5mqG{%|9x02K!DcYFx67_%=jV15aXq%%~*_2?roZAXt2Y}0umrgX7e$! zkezyQkvs!%#;xUUMY0OUT)MtwM&K{5)lK}WaU2J$n{V|qSn0js&j6m2aV5)5l7t0U z^p?paW}dth8t1|MlkPAHyWvln)}+b6Zw@nO%^5dYU*Nh0(pz{Vm(lWN^PW9&FdnJ9 zJWqA}{dG{H=47Sr)`+dJ4H?;2NjxrmWFUqCHK$@ys5?}+iWeTqVWQoE;&O+vMGSd{6 zZEM-)E3G2}vM6>>S}yq)9wjXL8_=y$RBcNIpAPOupR9dzUrED#RCU6r(3X zC|r398+vcESY^Syndh_k35ynsH5P3ajFr8B#X5@JtKwZ(2$fKS6RZ1mOLjzI(PX_utTii&*3b*NA$n zG2bH{hB*DRyP*RmwGe>v3#R+a!JAAfZEGV&$W4mp_kc}Y~p1!Uxi-f37 zP8n|r>-R_sO3{lX1#P3g!=g}>-azI;@wF4fi-3YDezN zLvF`k!qh@0;IF?|gtACg(6D9`n4E|CK&HeoPKou{2%(^4S=*?JhmtEU-(Yw*&?T4P z;Q-NQ;N3)f_a={*r8jsIMw(>kfig4mQ!HR`%PAh0W@fhXgWQ8PFNSK5N$~tWUVKX8 zk(Rs17Z*F@5nqyn_#Q3n(fx?i5+sBy*oU|o(QYR zrb!ySeAt341(OeLBOm(^iVsWQ@Ib1u<|9e?^JAX!XJK4OVL!-)yu*j0ikDyJD^y*G zw_j}R>nl{*la!XGKx}ud_da{@q)Nx;jvIKC{y_#7=_9WZ;3%StH)oZ9kv*gh_dLW?@D=Vhe`x%=* zMME! zHAeQQP1Jg}kGxpS|J3tih(o2tii#!?o60&=n%1?x9~l z>l3yur0)|YR?;41dxvd&qZQ!O#q@*01-#zDZ5!Vd+ES;yU}8GNdS@_GUdf0%(y-s- mS$9JeQJ_=$2I-|57TKdH@JyF!05FR_XjeCQZt+ZUtWoZCut3?8#P@!IyLMs{}?@Cn{RmQbXcD*=uYM)da zY4;CQyaKPl!|{&?fbSb8*#H%-P$in=OlFSfoHLhi&baaW8SnNlzuk{z_G#e%Dn|7k zNFuTe>PX^B=E$x)=Nq!yRK6iuOLnLMt_`j!kFlsDNlTd(N!pTtb}0TNa&NcG-L`vO z9`Y54cQegQ-~TqUsmZOk6h#b56rQRZr8qkLGD z$%aWLA9(2>_zZducC;iP#-tsLCdLhn>SquuasX=`u`bMUj@7qgR}LewO*w*%j%?lE zmg9!lmK?O@sKKd&4vjx@<+;gtk~!>QLX=cA<9ZqGY+s%McVj%YFJqE^h0A78 zvw8(YI_I6XbHQ14PP^xvuIo8xotSZ=pA7E}jA{aMm=mXSFn+JN0|s53@OeEJig27)7M-l0R~JFAHVt~rYtSYk zJ_8k5C4onBiiDUCh_hPxEVxA>sV6Ee&ed{L8v|l3I8SncgaA`yDtR(I*P!zccmmnu zEQ-xK9_ljj6r#>K$8*{MsaEg|EsQrV=D-vw?7`&K-gZR`E*#tufi-cf+axgWb!Dx|i1^@@r!N3Fo zyx9&0JsPfeSDI+ix@jK)=Azcxdd`jEXqCcz7Af6Vjn~Ai?*}cIoIa5UdyAk zHp4I}V%WmNwYCE_pDhRPTG0cOY5URFBYk7pn7uxwrdpcR#xSvIRt2kA8n9WY1VfKmPT>s_ z78L`id$1%4a zm&^i6yLDcTwQA2efAG@SY~41_XDeMb+M+~54VM=*iN;Ezk()1Ck2s*)Wf|*TYAWlz z#$#ejGEg~x$Qd0l!g(GajnTCb%9TQN!dL07u~on!6@qE@wceT$)GW)r(yZcc zcCDV(v~-Rwb?0`HyPe+W(|c`43$*6Kea$O-mFJpQIX0SgJXJjlPnN%UY&MrXV_R#? zjsMlY7j*sW&fQkj+V&$GcRYP}^WDvN-*@cWqiBDit~(zM`p#Xy72)=F#}njM7`6I) z1LbY_-l$>QJNrTC>|rivs0L30&kvpMpydVqN6ui+iS~Wx1J&Buc04Q;`cY`(UB?H? z?h$T5-`Vl|;XvK%4xT!_f#-L_4f{7>j<(g%<5Kk4>*20nYqKf)GlAT~6)FH<1NXG1 z;jMdyXFk`#!;)8it~EFYsY%C0^IWE1g`?Oy?MbcujVaa>>S$a-lDYG-jcVB zws~*GTgAPGzBTWh^euQgICwrL%Uau`U2t!!%|qoM?FtG^<3az?Q$N_e-EMh)FKBOm?0X;Hc0ZOgbMO0oKMcan;b^xTv|VrT zv>!&wZ}nW9{6V-WaA*h>_sU^%N$y$zc9bKC&ykf``A;_Qd==wd1(+iSdQj(T`~^ z%<+HQPWrb!>;@6^8d^_Eop7`ZVxh&+z`;|m)$L|&yN6E7d*{Quc0R-PVN11oPXD^I z1M2_=?fQzw!0-A!2wzBxK)WGxp(|1?2m$N~)|pPC{Ai>w$8D$YKb;}vX%Ou@gMRlg znPup=gT27_3M&PX-wX4paxRed>9rot|lWRH-QNc=8t> z1{m2M43k3$yl`VYsA3cldJZLJCXfD}eC}AXqH|%y6DI zEzuSe>wXtc!hX#f*9(k5*XdlKK7a>s%?ER8iP(0+6HS+Pa%KyQuVEal3pL#hdaXx( zcm@?ET9O}-3=2RrZ0M+^j~5HXaK!1hCpkGCFb{qH<`FS5{5KZY^-{0(`djOWEk z`J#4IiIyf@fh~CFxTI^3|N0@03kPE`Vkn3Et)bsoRx2RCIS1al($bulV@e)kqd$zx zes35Z%1J6=<9WQH$Z|!cP_(ac1FgHfq-{wT$1bB7{jMTtr7&>731ls=zWbCm8rlQ3)BxE$lf0L`;1IT>n z+r&B5Hh_T>~`R$!>M_=J~iKr0fIrlr4C^)`cZ1@IeGf_)Kc$k z!5qE@n$_C`JS`}qSU)oF1ixY{o8V{YOr*bB)0gxa=a`Mjog<~6rc*TF*FX?3fk6jV zwH`siA zL>w8z#)h{SmnFE*GyZtO-r{8MsFAN3&t9Zb4{qw@vxS+0t!8Z6#eW1e#(K;jBuDB`dePeoIX^d4|KVJvA66k?r4;UMhqJ+fvcT z=L$4UN9AN?wnbFu+bLO*4zXX0M2SR6D2R+Pp+ZS3L{j1nYByt^csV-|Hw&?w3vvKi zd#3g8V)RKqbKPI!l5mFZF8R$`#%v-iKyO}WVBK%^eAkU_*F}&r>JqQH?&DFboAfBg z+=}4=EDKoVEBFqvMz6Dj$0{aTT_YwU@f~6$s6b>Mo=A|#!wVToScbi5*)_XXtyQd* z+Pw7*t0E(4sPjJ)*ezUP43MY2-~9iiy+~;#@kMGpmn6PT8*P&KqHP|yB)`CC(qFW% zd*>w;t^>c|T@-u~_$BWZ!Iywv_Fffy8Tb|NHNjVaJKpPpuL8g7eM9gy|D5-R_om?M zl3u?RS4Gr{xiuNbOQh?!KoEW~M!SNT6_RvpgzJ%!7-rY(e>#B@ptp}+Gb$Y$f*CUI zCdWnP37#e>x6q&;8%L!GCc0z6P+~Ovo+&x{oaE(p--_lvgN8rN$Dwk_#|i5Kk%a?a z61XgIC7n;}ta_%*xK?OM);K34*9D#zctPMrftLinBJi@nR|Q@X#V&9C#se3|A8kvD zbC?!dm?+bsI+6!n^|Evl0mW4ZwU?xM7fEU8s=*+->a_YWwTCbbSDWUK?%!{k5ANS@ zycXL5d;zOHZP-a<@^Srk7((bVw-{a|&c(^qCWwjG_F(*6)`Q%sH;;`0@@g2d{kYT_ z1pVf`J6#_+HrI~le)z2I4>>z>WyD+oCg(%Nl>{&@x%-bE0d@iFZkholW(wNNj2>+^cCKd*0gWDbQi+5?MHdpCJ(A+Ji|}1M6R>- zL}w#Z#|{j21-`*Vz6E{ml<>TPXH=@>xs2zVcrK^U6+Ca@xw4HomBIh`mw6a*A0>&_ zOjQ8I2o_Xc2xD@Nu{ViiWEWD_*g>0v!p5W1Di5q)@NJem{)jaYB&!G~e8ei#bao3l zzzys-&T>x-~VBm@4Vg?fZ3hg0N9IaXx?@SM!T;uud zATTcX&I}q@a1#4@3kY}z&K+3l`r#$<#PrOtm7TI@{DRd;4awMANsB56wt8PjaKQrf zw)UyK367iNf6F<(lT~TRk|~#yRDtE3yo@6}%Ok&UJEJJ*24fa@sIa)_a~9&qXm5k> zZE$14Ysn~z8N&-}NhSAtM>hvULN;Qi2xMaUU;`{%vT8EdF~vnmhUa5i6(0ROEGfI5~e$ zj>rP|0_W~wHwoufPcc{Vg<@mTk|sQK`F0!N46d>Ne{m&TLvTF9wImb<*QVO>6|B=v zm1_M=sYddaOErrKnY{dnbTb=7D42Q@UNbe8rY9`OKE+1H|?)YLUu90-02^%0P%{46q~+AHaZD z1mYtYa7*BvKwIFv!0hV?$JC`|G4%)^=SpxF5ql^V;Up|@AWJCgz+y{dl9%SAbIsTq z^!>bbtFfd+55$a!YzQ|$BEx=6@POba1b;?APp7sCb_o8Q;1hy}1fK$&p$|+PmdIiU zh*r*hFKmeXX8mriAE)GGwE8A4;r#c2h}EZlypz~`53w)!R%|ytdIBRYbihl{)^DkI z@b>uiJA+Xd#dKul4E5mrlJ6$Q@5CuVHpWem!gQw>@zorEEwc!`9^Y% z@@ZFEJFFt&BXRNbLs@J|Tw?K5?cw#fG8!U4^3@|eoMBX2==w3qwSe$MP;1i1>nAiV zO+Zek^a@_gDZKy;CbxsMI5QwH6ey>dV&sx6ZTfjukzHqyMwjqYk|xC_F#`n?Fv~-F ztFK3$SldNwUQ)SsP6OTml(UuZ#UC4v6Q zQ!6E&>NeJjtNen_Jn?J>rMWb+TkiyA(=?D=!ho+wmAFn)rMd)Xvk4TD6zlr1O;yzMTV_Um$BehH9u% z@p<+Wa!JX)KEM>H8i!=~9pFg)IDK4V9R**sOmAo9Jix$pu1)oCOB6wzEv9#J|Qweimiesd{l zKzSJ%8UE8I%vz+l%{(|%{9c`e=anQT6{-pa0NEus7+W`zLiJ5Pg_i(IsN7iizH1pZ MLcvOA{jW6RccjSaIsgCw literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/__pycache__/DataSet.cpython-37.pyc b/keras/downstream_tasks/BraTS/__pycache__/DataSet.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..b0fb3eb6c9dd27e905c2165c8f8d47838afd007b GIT binary patch literal 9869 zcmcgyTaz2db)Fl5!2pZJUby5^6g8r52JK)amF>t3!|*23lBtdD-AD>zX^ce=xU&mj zz@7ogMPZXT%c{s#_KRJ9N>U-K{F16X=QrdBo}d&%I*bZinq1H?(la(>J!>+Is69+qy9d_kDTAe%~M1x7~J#+gA<*H?9X^ zd(iczXu4w5u&mvEFR;5<%N|Pqp(otH?)h!u4er~1*ADkx`=)H~?$`nw1#TEvc-MB( zq~Q^6-oV~<2Z1l|^!!J5-xqE#Xj;Dkag^12^ADP8xhIcJEJ{NZ@WWGr61ktx`iv)2bic6#Zf{v)F$dv{TUEB zp&nzR0T#mIL^&|Hr!>*gV+sYZ%+T_vUEy|B&|6Db;6F{A6{z^jv7Ew8NV?lY147MffqVX^J>rUw0ps|X6~JO zv?M&~c0yksIsNu9DnfojS42k73&O}abdQ3l>P(LlL`zOEk`KLy?Vi&K9!7f49iVS8 z>Zh{;h}zN6jS5a*dI4^Mdq0TE*>G!Zx`OCBopxs*8*XIdPIuJn9XTZY=r6li+Lh3% zr~-20Fv5N8%7zgcf!piK3QpXr&Y-kJDuP`@hwzv(K$bB#GE;(FAc7H76}70Y;;O1e zy@-4KH(pyn#10Xdek2*`61ACrs^NvG^2Cs#oP_`iiuq4=Lx`=eck;ED|?1B4e29-x%xNrM|-ch{D(Cv6#&lS0y zywL3j*;1Lp5P3*p)^G*q09aw_$0`T`72d=Zt4ypgDzmSl$s(pknkO2nJdb9~Gh&w{ z`~NnsRJ32qaU~|)Lc!J~p&{!44V~AFDmf{JGw>t{e2NF}%JNZ+SN$sONoy#jRdu}b zgQb2AH8dS+iJMbnN>LJTdtd zb6m?20z-?bK>7e0z$qV=Wu00(cz&urQ&jFJ%4}ovbrS0c7r}1p_5~k2R z7Kr~ugEsG~p?X$1hMVEGhPD>!2gU?O*Y3ED5-V@ZBL2qv zwk#rd+?GV_sFb`ZNjj!_4i%p>{>x)!$7gd=m6y$o z!Ksn!&_b^RJU$OXDj+lr9m&^3eWrd0S$|-HsOWJt4i@$Ck22M}2bmA-8~5(b=_f)! z>Gy5n1&6UiNcGm=JAj)Grsma-QB&TFbmX>A(_c1y&$aRZH3{gL=3M-B5p^@rdTEP^d|G zLf>MS{);(wvd3)O0X!{*@o@||WoV9fvHwnHAgqZUdHM!ieHts|xcuE@y;u@BIe<&f z#3g%Wd?B6b3H#D(!6kd(he#^i0HHy1yRpWK7g-;YQobvtFIh`lrQ|hMNtop$s0sbDEscHdEpmw@va7 z-X<=J69tW-&NdOw%7`slq0@8Qa(c*99EK&NKa3;WpAf$6Heg$d>iA3!r|GN=EwU{- zE>@kdC1`~z#C|oCMd< zJ|{N0y^QvG@dCH&XkQR7a(e~si{d42ucB>>m$|)$_9gKOx7Xb>;#Kh)w>LPkej_Te zsO7V2A{<>TUB5#E;RnTN7ZI~Ul1{W>Bh+HU?C67!Cr|>64lt^R#)-x)jptpXd0}CK zrwIxzWYABvV$#qIk25@TgW*|*=NN7>JkRh2h8GyV$nYX7c47NB0#q2kzr!icQBq=| zM423_&3Vuzk*1RfC@$Hky2RakNJ;}o`hIxHZVzB;k6;=uwe)Y_z1!07-M!m*DY88H z0;=Z>;cxCBm;`g6ttHzdh|sr@oXON z#L2le?ZQMX!V^teh+F5A@ysTmGLR>H(bFx0E%WJ zur#$*z*WRcNLLIKPKy5!Zqj{KU8Q^6n~aU?w;^mhZkUyAvY=|lGkhOgWI9`mbv9HF z^g~Tvgm2K1Z$Y0A3_LgSj2aa`7w~)y&xPcgv5P(%g^l}*Di2h>ptq^q@n=*6L9&W)!bPkyO=q`}15oZ4 zrMV|2(yHxq@1|86BFU7>ajHP&oUDw)G|M8tui2x}>v>};@Q}jdo~@aSAH!~w-fPmq znAV(8lq)-wS!HV~BW3LJm!U(aUpxWWu0xKXXagYu`v_ggQZ9lx@j|PIx z-{RPu&s5aG(-$OcOp;W7< zN;Q(VOsc7fkjl$<2ye;;5eg=rg9~pi^|N&RkXp|<8I6CAD?S;*(V3HFii*YmL`R|u zLVBdc1|$~v&v+!^Pk+3ZX+w*7&d z^=>!nlJ$Vi2+Iaj^8+H-Hi5q;Kp$M>E`h%x@F9VZ2z*T7Zwa&r{2jn4`ap?8ok(n> zp%gOT3md|Jvwk09cAN5d7M^;Xu9-LqNJ;lDY^C1!QwE*cRQ-F9azf0gbvP6p1po3ayP_{$;ly^7@$ zMrK9oQ`q^wAq2OaScYu{XW#JCJgk??dY5|PE{yo_}+wR|<^BY)K8 z-ltSWG@hyX&kqH*C6PhJQ%O~&s5lxTKyu{)9!@bTWOu(q=u#9&96_x~9dFEOT9SY) z#`Gdy%rHI26Im5mi5)LuoyuRW2C#a=4Hakc0|;%L<($pa4JU0 zgqg(Dz_L0n{^mRV6dt0Y`kC^KKZvsf%#J~k4C&Asy+mG-KNMd+RzrL{eTr}Afcj_1 zTAx8Rq>+D~{e)~tKG(Zg0#)OH2!01`q<*9akwJA7e9_WrfJ$HdS(CpiOFAJTUPM4S zzMRP=W==Kkl#k09EgM>-cY1*w(bBX&#E;u$>L8&UrrVHC=1j%RHi!+jw;N~Wx9F`a z1pa~mmrFa;B5%cs5nE&VLwalzpfE>L2q4Kn#=!zv2`LDWQDEc7Mom7TM+p#BI=yxf z^j-XW5+WYzX5vf%!RJzU(TJUEb4fQiHNVjPn)R*Mc8nOEaN^))(fa2~)w U<;KMKT~n(P>c8QasS_ zLRGsUIO!4YaJO)Ju5G=Z53N@Od)V(4A+$aZ&PXrfA&;Pq?-L&L#u?#_Q@7XP?qkwQ zPVOhWc_BLV5!1OSb+W@We0|y8YHxi-lkJf{tYnA&P?dB?Fb&Pk0k_IsrCHgpB;OQ# z)Jk50QKZ{)jB_h_AY?G)wHfPR%46E+(Wnkn$V=@8nWsx{_m>Ri$Qm2|KN> z#zQ(%=#eZtbZ=C{DJv5h)&0#qQL0L=c0c^-!R%~Y4ObPYLspA!>w#=DJ zWuDpr6BW7AV2<(1eBby5fHLuc(AG}VL~U4cR^+fOez?#)B@WE`Vj#^+o@YX}BC|ff zhb2rC<5>v07sI02wsAf@Nc+GEq&c9>NfAI z_DNM9ybyVNJ7Zi7^Q`?)@CVx|*d+>;-WR1%xoX#={UXm&UcD%lmVyma&D1m1z9-q< zG;k?_{DDGgl_CK9r+&gXUH}zG1LI6W!}%A96!=z z&a@rimNRfCJ}}J#rUlwZ9T*krU=RY+I+I}HPCVdNa+ClML*QEkea^(2M12?gpForW z-(0o}WM5KFEtq88O!HD#l$BH*gCCJVaNU$i$;Pk4zWG(mSg2O%i(a(bcIY|I(sY>leY( ztx92S>b7Eu8AGD;WCI1HKq4=oXrfp{aS;T}d<&YMjTH-^*I1rq0&A4KgtOm9@eT?k zj-~8OT!Bxs@ehsxx$A;u;sy*WBpgXB&z!`G9Uqt`H=r-Wfki>i-GyrNvLHPNaS|mMPI%T z8_H`aK0xsy3VYuex=kZOj|$ z_%&g;`u5&A5U)IOyXAMGv>FDYt7JowrY1@8Z66h=H`DZJ#ERL7q_B^PYgyGo>an~I zpAsWV-b8T=#jBXL8s<8A6+{QBjoH{qqGpIc+uuq|l){^w!C`?7D>;vwe4p)$!lq=i zZr|w5`9F(Xs6~&>q9dIb%D%!jMcRG;%@7mcnF0F-V@qK=_q`Zia;RQ>IllZiRj0OE literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/__pycache__/Patient.cpython-37.pyc b/keras/downstream_tasks/BraTS/__pycache__/Patient.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..60c99e94f32b777e161404bdb0707d8620b6c5ea GIT binary patch literal 2815 zcmcgu&2Jk;6yMozZ=I3?ElMdcKnrybu~G>MnF=)}q!hJQp%IlV!7|+$C!4N!otbeP zN9zm8g#-UXdxL*zuAKTWaN@o3N38%t>Y-!Jo451%es6y7jc={2Bo371*Rwxe0PRow zm=_O}n^4s@2=4S8PPki;o@-mL=R@li!4CF&MF_3WgA=D0@sLN*#<1 z!QBT=D>=HIZ0CjO(ECj1qSVP2)A03Kd!xPaIZZZ4dasfl`fXLxEx|N2SNFM9ZYs^n zekJ+3;G*EjGzuBg6c#43aa`Ugm(6c277-a6R>^a9&T%|c8LInS`}plRCWN99(?pfadlQ zk9YKYLZRHP7^jS?yxc7W9Zs=*s%So7yMl72Sz_mPhD@@-d_sx?DbE4$RxU-RD>$0_?t9b@hk-0i(%1p+c+QWrhVXq@jx^7Q;&zjtYmwvEJcAkio-t~c^+=` zukYG}Qn%O3zG@#;LA!#URhx_XNMQnSx=WQ0Z+^3YDuiub!4lOTh-IX6mtO z-;!+SL3_r(_2Jl@L!jxhv8za+w?Q~jllbH;{9-pDp&OGXk?%l%hCIU(Lk*x>&iw$& z;eGAQIJO0>BG25~2ab8bu|WH^4;%~YPzU=VaI8}YwOf0@s^lO628O`22>M9v)luKY z{)Z4>z%-X_1I-s?Q*#d43#NIgE6PeL4&eroI^bx^q-5h);oZy?Q~s%C;^O_;Z98-m zcc*YHdXMV81??%)4h>pfh}ZqU3t`d#asosT#Qo_VEDJjH4w8XZeMC6j5pZ+5*neTs z%k}fH=~kt%Hg#LE#9Sfq@?--Ara^fIMH9seinAbK<{QxTY?N34y~g4!6Ii3Y=y>G zqUg(aVMBQd#d|2;M`8Vq@xA1-I0YBwI!(WTer4Y&63%`{tMmAsPg*4IYLy1mJa#F3*X7MNLVpXjy5>XgL}T$J2OPmv{hx zNpi^c_$FmyiUDuOL?yQ@F^#VeGciOaTfi+MKY;Pmv zNE9|Dn^pTpuh0HdHPHJ9VtCFzg|UATuHY5f;HLfM5}d8q`Gn zEey79qo_}b2qz?MZfrb~1@sffq>_%Y?s6RuxkKlS_Gs@si3gQ?(Wal=X_X9tJ4~I0 z*cDgSaW&UQbU{>U9A6tQRWhwgZITJ(B`C+8#1ALO*a_Qmv|ap(1Co2+l)2Id($F_~ zwteQK49U4R>ooAeQQGh=#)Qro^U;bMB~@$150hCPYvp|OG>MWmHdQK$!75PC3#&2&xOO-hobw142i5Am0B z_2e&j5N9ib19>xfle{;~b&|v$M*P}+`o{dxT>c`&Vu;5aA#iWuai0gJzwp6d1-u0< z9=-*B!?w$C?(tyeWu2XgC!&NwN~w^-QrcT@<1w?yoRdE3U!?J=czymLYB9PaM#@cO6kkr6CJk zhvm&P7v<<&D80>m=bvP4cR(puIi)VzFfD~Fj5{=dCr3LMhNo{KlkH6hTf)mVV zy~YWR-aXx{YY$%!q>`5OQ7*cy6t7FTiqOV%5W*nxyTK18jeej2 literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/__pycache__/load_utils.cpython-36.pyc b/keras/downstream_tasks/BraTS/__pycache__/load_utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73ec922c0e852bf381e24b98705f23d8cebf7c8e GIT binary patch literal 3136 zcmaKuTW=dh6vuaV*Y?^?+)zS#p@=C68cQW^1xS$CQM=XT9sr zjGM;Vyfl^QE025xJ_cW7UU}*}PdstXtat4cgsnNdvvWWH-<+Ax=jR*C7i&LPPdUz? z&dkq)_v?_<4KUoKUx;q~Cxc|!8@p*+;;aTBL9`O1rvQy;^ehR+S_#B^y zXTaTUr*(RKyRjL?qC-bIidEwl)A0Uf`&#?jDs60x^j<1E^lqBaTY_nLEcdy6xvn%z zda2|qf{$8_H!vM%DRKt7G0`oC+m$#Hck7UxJ->`%;MPcD=6Z#VLAi}25t{gJLg$|gG(!{nMe|%fY zR8lDzkCNSK#bTrm{Y15V;|{qAuoBM8)Nj8C#Sl9&O(_x>svD;9XppFuYvzib){NH1jrFj0nIvssYBSx}-^})X;M$R9urk^_)AAbWlMmv!+2 z>mVz`#7Q*m&0@eoFRQFVk(tJ-hm=V{!!!XGXpv~!6ANXk6y;H`C&2BRV$08z&?@I3 zCy4iQZX`SSHxVcy?a6d-5{p9Zyq|(6q)#D7Q=*=Z^pvV%IfE3Y6p5V1H|W(j9vI_B z+<0tP$Qph^Pw&*FP$EQf#vgv0>?fcqtkR-Gqol;oQs(IC1e$F2eiUlPa>o4aUpUf=Im zaHp%Qx{A-2(}u8mhuR`}NGpykiJV&(LMXvxxCd=#8%%o4s~6qRR<>l|I21y~Y|D@-(NzMF)X{SYt=oC`HppNZjF*>1 z`Y>-d3sOIg62VVIGrRhkcNCY3BN)iJP5j#CSt`7v(;*tMx-K$ z4=f3V@gPo^=7v&2B4{?i88=nN$53i2kpeH7ckry(O7$l8nb)bsu-UMz5~l}38XxBj z!LMF=)zl{QGtHCUFg`BkEh^1o4|MKg3B<=j)n%YgEr4+v0I54i0&jsVVsN?y-$FcZ z@K=R|RNsW8eggA>asB-jV;}qs#|NFC0~}N=6`Ttpb^qel>HmiY0UP zk@IZ1r1Q)yoxq-z2SMOer30b&0Lc^V$u!j+`T*<3vWqn+2YADfEft3X5edJUoK*gYcoZ7PGX1s4}L!y8j&{D<|(9XLU9=NHa}Y<}TDY~iF)`H@xb zgA*8(UpsW8lm>~JQjzOkXt;sFOh~wdNqQKG*xo#PC&p%=vh^EAKIB4`dX~f4PD{7o zv4bu_t*j66mezM)LwrZpTV$){S>Z}?{0CgPRA~SJ literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/__pycache__/load_utils.cpython-37.pyc b/keras/downstream_tasks/BraTS/__pycache__/load_utils.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..c0f368793f25766f6f9c490447dc4f468185cd02 GIT binary patch literal 3134 zcmaJ@&2QT_6c;7gmfgh7x~^aAFm}V%8tCE{*nq+avZfy?Ra>oJZa0>rSaj%6N3m+$Vj8}`YF}+%y+#{LL%os84!xTu^p;>6UY9nx z{kX0)OM0o~^MVgsjW@6yXC)-~f}aad6`q^$s4GzHHMMgL z&OUiTesmJ|2S!tY+nxT1`zq1=wPSxj%PeouR_V%yspFvYJ0fZ?g^Y`S6t?dR{>@VNe#%9xx_3k(RHWKG z-AR-N-~Fy;YD=|mNcM2Gjoa^9q~`~FX4X~{z;(j--Kf=H~Epdz?FJQmb@ z@R2#2z`wH&ceh-59+}50>t5z`NpiXDt3qEV@8h*~-}W{E_y9(b9Qw=w!0V&GZEzph z&{zNt!$Z^FGyymuWieF-p`P%m*iJu%B}F3V;Tw$V8xIQO zM%;L8UC0_XVWijUQYaB3XGR}?muw|~D(updhDJ$AJj*xFRMZ9=2va*8A#sWpF)r^} z7Ue}98f68FGw`SxD4bdo@NSYhw@GZza)|4}AI^YdE##ZfJB3cTmH&Th5cu$8fqcSKiIMq?&mL!E0x$Di<``)iWpaUV|h( zUw8zhLIq$CAT~C{%TZ!Q0M9+{n*pd2;*!ce1YNTz!E?yyme|_?O~NsezNjdkvc?g# z3%eZj$uyu1N-!C2fbF!w;f(q4V*2U9mKo@Wf=EnThD?dB5E}sMinbMTy(kV%QWi2f`$pdOhFd}T}cx$Z{4i48m1Ac z2=sv^p)elkglR4*B_tBfN}F+0WqcG-Q;8Hr$-IYIVkOnfIA=bl7Q4n%YTJzu zyiultTT+tZNX&f`)iTE?{6H)IHFT>c6iywe;hiE45|C3wUWAr1Yk5#X3B$L#28DKB zO-q>fv(vM;raXlx^Hl`H&Z?#Itl~ zCoRK*X$RFEv9dALkd6=BSD^36^%hxac^0`T%!nbpMdU|dGKEU85VQU>&K7@y_P^k9 zMrRL|VSXV_Ll&z=_{hhU{_)%*EUWOSb5N{<=O-t|BqIlKv4tGsXXID+xsPixWFCqJ z2$s|eISV%?cm9eY?kw|j|COuTGe+5YrH-i literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/__pycache__/modalities.cpython-36.pyc b/keras/downstream_tasks/BraTS/__pycache__/modalities.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87fea629a3e4587a584d6c8fa276f529aa505bf9 GIT binary patch literal 2280 zcmaJ>OK%%D5GHq5)@m)svh#FN6g)JD6&Mzrwm@M7ZPGXiS~R}cK!R7;-W>j%OakBsHTpwwbhib*$$p36dNl`@5sIkyV-@_sq7%#I`zYA}#m7M7`;4<)nH zF+9fKa+UFZCgGIZ_KoKIdrWZ4nUU6PMyCKN>C%ec2NgUmCi&^#Fmw6~ssL09a`92H z9)XxoK}ix*$c86q>>@CC-+uO#`Goa@`x1u{Rt5;C1qPmXi z2C5KM3l(;Z6cND>@jxa&0Wl9iz1y(lfS!;Y@)P^fJEq4#;hA@fSnt3;2xjDjzFyGL zjP8*)(Muvcps|1Shs({5v{L5^Vsc!)tToIuuk$s~WnOaxp^e&H) zpg>uO>;^NC5opYH*1 zgLOe#Arw?ZJ5*yAPqAme28(fcp7pCQuYB+_`t}3t zH|`m#?}EMC^)-^AR!9lB8yi;lx?YB%>bS13nU!256RR>HDK=2;_%2uvb z;>@@OEvh#EzfHu^Ww+6{-E-%mB*>*y8V9&T$N4)Kp=*GS`Qbx21&#D8Y&d2eRN#eP Z+YkH@q|<5Bw!c8@Ulg8=ez+7a{|m&5X7T_4 literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/__pycache__/modalities.cpython-37.pyc b/keras/downstream_tasks/BraTS/__pycache__/modalities.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..6bdb314b484c8954ae60296af65579347f459832 GIT binary patch literal 2263 zcmaJ>OK%%D5GHq5)@m)svh!+AWq}5X0z-m>7ATCM&BIC1qVdHB5^Ns}dPzxi_Mwne z92xQ{x%4;mk{sJ}f6ZQd@~J)d)ETa2D@M^3W=L^{{B9wI1mjPTqiN;c4?>U@ zNCdJB(jGSA6&OR|EW+7@vkGSyZPkb`IS&fDCD1z(xUwu(RS>U=j#zs~;*MAsmtkBJ zSHx8q*TuD6(!GB4A$p=RxyABQ@Jv}HO|;D|EPUDjqQChSiylnusMK5RyRu;0l3N&V z9|$+OZ!9kcr4~IYCfz7{DhsJq$`mf<+$!+p`{l$kJCaPO!9Z$RSf+A5l*~@Ya2bEi zRmS%-3A>!zL(TVgnc$W)BdzI0Cjcqw(u&>z6+A8`dDlPr8`g9H>f~2vJ;8Yja`sV} z-3KwBgOVhsK#M16>S% zffuM^c;QLzF{~Uc;MjN1H0xWmsAkuVta&7`NY+Y{Xv!;FfU+kL5Do zml}h&BW0L`7qfkt@`*to!;CGp6mx#&%($a)e^COvnhhY0*uliOLm_1*!x7$a2$Y|N zRL6gU)SP&zd{BA`XAMw0-BuMQNx^fOBvq6okd%`Q=j|kUHR0K*rI{q6Okvrxn;>_q zz;33pY77vz$~SWO8_`$sXjK2~_4C5^d&R)?kIG_rC{_PK%7x5T+J7d+w+|AaU1lc1 z{58t-MR`~lt0m79%gsyEcRATVFOlAOs+(~14G_}?6*dDp|2d29z^YzDwT|jCsw=2M zR4r84G*ZL@KSTmCcm!hZf_k%I$$R>NY?GhZkKQpohUA@j*NF8F{P)3(e4sCvbTp&8 zIEIltblbD#tBR1GhS%4#E%%L*U9dzX1 zG`*c}qq=m8@YJP%sT#`4+*Dp^ssOL8!hH_q2hnu>Dmo9d3RON#FoY^V>pA!O8X#G< zhtgs$VS}4~g8(B5q`6M7(2(j+VSdH~_K`Cl?!X^QX5=kBrivVUZ$6n(;G=Ww&-|HZ z>CAsi?~zyQ&&dq&r$>Le2R(w+u>d}l79TU{g#dXWuxW5MR8Wc+;5t-Lc!k@_89Wp! z6{=P!EmJ7H=^3u(BbsVQn}FXI+g@}&wbW#okIC9rDfVR!$3k#p3~C`ipf$KUMzmev z*tnhDo#I*qhjs%{yM?Y-H8W)_#Nc4;2G#OHTu_pbKc=Fh3UGFjn9d?G3B>a~0B*1@ zNGpVbifD)GoA58iZh;?;D6UMf@@jf!^aeo!cDl8H*VCVYu?pv`U%Ywkqnk0dAHd(Z zYpA{j^PTQxy^3daT=n;|l8a*gURm@}y~4p@hRi)-N}uEn_w5AVS)Xq8`MV==cu1zza2{lE`FI-NFc`%AR` MMd9h_hb!Ugzu4_$y8r+H literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/__pycache__/structure.cpython-36.pyc b/keras/downstream_tasks/BraTS/__pycache__/structure.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8ebdd0f17d822f0738be549dfa033fed7203132 GIT binary patch literal 1181 zcmZuw%Wf1o6!ojRE0s4SE8WK zDs0*F3w*}x=L@`@4M=>!tmfJsk}PoP9>1=ykMBKoXK5+Bb+h~7?*yTL(YeN8yar|i zNK_z+C8=?Np{F&O(t_f!#%eZAiUgw}l8NkmMzW*mF)cc>E7Q-Y=t{DM1`D5FWE)kj z9;{er+S0W~Wlx2J`Mvype*6APw%)p(#ynVg+31z0N;v3lO=KLbSts+POc98u5 zLTGb2VP>U-L}fTJ={|!oc$4jgb@YU9CUrkxfWB!xz9MCHnN4=x$klQu@&{n4&51# zJ*@!}XMmQ^_C;Mu;i^V|L*@d6ityk2e5ze8M>hY|=<$K7^7T?kHLc40wURH^`Rhii z+VY{&%2qbtYq#sFyt;R2LLK{HHU&sAi(gzV!+S!5 z(xb~@Pr&R?fEjjZf_I5|b9l*-SuDMzU+58ehx8u|Il7yfyBLK}q9gJiY4R7E5s7ve z%;Omp+_6bA>A*>L46_)o3-O#wXDGxzCP(zdhsxyCX90Y^{??~+>XU)b_Y+j> zC?dHAHbIQIdm03#Qi{8E?zImaEyvRE92t9WEsEW|Fj70dJWGohWy%q)i+ zt^-^Jvws30oDqUA;XclA5J#V6xOaxX_DI-^CkLtTau8n0xw!`CJyY-#njj^=9ObuK pH$w8(Rke+|nWrk;dX7nyexT#ihx4E3Q3qx=17L7lkM!tr_kT#YCP4rI literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/__pycache__/structure.cpython-37.pyc b/keras/downstream_tasks/BraTS/__pycache__/structure.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..5a979696bc603c1e5c4e3878d7bf6956f6467391 GIT binary patch literal 1179 zcmZuxO>Y!881~ofP9~crAV7dPm1MvWrGb zuO1iMO{?y&Id8k#cUBb-g@^GE<-PLGy|rSq^LxhLU;D-AwTDW0XujK*sk7m{&=X_j zx{}?f_zHy3&IkwgNOv=6uTB~*`2^ha+GsBt-RNl@QKtn%X9KFc4Mrij1geO{6_uo7 zl2(~yKsW=Uy)Xn`Vz+zllz(<~sBGN(H5n3n6wJ=3A9BvMm?_Rf!THR{4o>G`!1-UD zXirw?-gFvh3nudxXocd1Xd5YfWAxw1UIJIi{I^@qv@h57#Fej%p1xE~xmgRTW=&l_ zRr2vBe`=&^9Um*LT;ob5;N|FJ8b4ayk``L#c>|`Hdl>GIJfN7tNQV5r$tapsxHl3D_tTi?e;{rPrelM_n(`dUKakr+*^XFE0$6XQ^ zmyK1mH}=R50I+bMRhJhS5D75y5foP90LXdnL6Ft*Bp$^R!)hrt5mv1(=FcT|E-o}Z0U3ad$e r>#Y%zcfM&|ip?Ta@ze`UlISCykpH-NHb3=X)*!D$0BpsW3F44WlA literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/load_utils.py b/keras/downstream_tasks/BraTS/load_utils.py new file mode 100755 index 0000000..bbc155f --- /dev/null +++ b/keras/downstream_tasks/BraTS/load_utils.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +""" +File: utils +Date: 5/1/18 +Author: Jon Deaton (jdeaton@stanford.edu) +""" + +import os +import pandas as pd + +def load_survival(survival_csv): + """ + Loads a survival CSV file + :param survival_csv: The path to the CSV file to load + :return: Pandas DataFrame with the survival information + """ + try: + survival = pd.read_csv(survival_csv) + except: + raise Exception("Error reading survival CSV file: %s" % survival_csv) + return rename_columns(survival) + + +def rename_columns(df): + """ + Rename the columns of a survival data CSV so that they are consistent + across different data-sets + :param df: The raw Pandas DataFrame read from the survival CSV file + :return: The same DataFrame but with the columns modified + """ + if df.shape[1] == 3: + df.columns = ['id', 'age', 'survival'] + elif df.shape[1] == 4: + df.columns = ['id', 'age', 'survival', 'resection'] + else: + raise Exception("Unknown columns in survival: %s" % df.columns) + return df + + +def find_file_containing(directory, keyword, case_sensitive=False): + """ + Finds a file in a directory containing a keyword in it's name + + :param directory: The directory to search in + :param keyword: The keyword to search in the name for + :param case_sensitive: Search with case sensitivity + :return: The joined path to the file containing the keyword in + it's name, if found, else None. + """ + assert isinstance(directory, str) + assert isinstance(keyword, str) + + if not os.path.isdir(directory): + raise FileNotFoundError(directory) + + # Iterate through files + for file in os.listdir(directory): + if keyword in (file if case_sensitive else file.lower()): + return os.path.join(directory, file) + return None + + +def find_file_named(root, name): + """ + Find a file named something + + :param root: Root directory to search recursively through + :param name: The name of the file to search for + :return: Full path to the (first!) file with the specified name found, + or None if no file was found of that name. + """ + assert isinstance(root, str) + assert isinstance(name, str) + + # Search the directory recursively + for path, dirs, files in os.walk(root): + for file in files: + if file == name: + return os.path.join(path, file) + return None + + +def listdir(directory): + """ + Gets the full paths to the contents of a directory + + :param directory: A path to some directory + :return: An iterator yielding full paths to all files in the specified directory + """ + assert isinstance(directory, str) + m = map(lambda d: os.path.join(directory, d), os.listdir(directory)) + contents = [f for f in m if not f.startswith('.')] + return contents diff --git a/keras/downstream_tasks/BraTS/load_utils.pyc b/keras/downstream_tasks/BraTS/load_utils.pyc new file mode 100755 index 0000000000000000000000000000000000000000..c86f754d61150296d5d96a9be1440f14f72e1bab GIT binary patch literal 3394 zcma)Nu##F zy6>T>ZpMkN>%NZ@SKTlk{ZFo4zjpmoU0v_{-7K%``&p_ttnv7%bt389ao(iuERWV~ z)W2Mf|Ha={OFNZT5ul1hEO?ULM*>d}J{czXnp7Fcd zXWFA*&fxb1>WZ1U_5D1p>jx%{jMMOd*@P~u4`QB*d7~v`-f`FXI7?-&`w-UM%kwPP zxiwLo?u=#2#p?Q!tIa?TyCv#Ho*mE9Y@=Uf1&1x$W1aqO`IKVuW-Ckj-PDN_{1V1X zBV?KcZ|H~`K9V$U8NJVQ^RRJ^x7U=AuJs1~1jc2gRPq)ETonz?1!`FZ>Wq@uas1d; ziCqUB1P4Ee(z}3WuR?i^X#;1#YX_ksY9em8CgtWtt}&JacRMqsC|E$$7i5Ae0}nNg z8oR>%gGO8%T6TlDf9MLGwI{(U~6(ZfxJ6W2qBIAhJhC0i}!F zb{6P4(cT>8Ph@YaVnLCiwV2XfR!9x&hi1Xvq;p zqbu4bSZpgx?V2pJRpZ*?xwgbEPWhB*S%KO4TngN}b2ev$rEm&}kJGjoj zgCRbfwlcrTYZdYgaf0+_36@Q=1E}Nayea9hHATtglo>@q_#bXbESdJ9Ol2>;J{9~K z=xqa=Cd;E%myqyOcM1(vSJX=7v^p7_Rqv}+)#7}c!^0-u#p8ZN^X4gX_+SWhks=%0 zx9R(Ujw_r8$1`+zm5yd;WBU>vBa84)Nf9n4%~A*HRN-;Sf(P{IS$#4SPETe6I2RNc zx=6dMYyB?ppW-~vGGEsr+KE&dV~gbJ=MGpV1Mr(?{hi%ObXjvjynv0e&aBCzV;v*h zOKdm$Niqd&tsUpiU%Xt*93R#_+mgB}={clSmd7d0WH7sFHk^e6UpVH&MNSbmsU&#; zAqx5I4EbOVPjbV-6;cjNvKKO<;aP50(bPD=@{t`R9abk&@&**0K+BtT;d!2e=QXD{ zuRDf~HAc5Fo|0lV5+R(6gITg##2+S?f=%C64|AXKNADGG=%!u3(%h zID=hrn2%C(imuUUU9XQK&LECqD{v~&Sm%0dbKFmJJ&0{07qRx#+6M55)46yvZf=Ch zXgN_jx(4x<_&z0uQgBghbMH74r25IaqNFQrTdtmE1jL}R zeXlYtWbF++j%}p*6?IY7RE_Jq)YE1&%3954kr&K=Pp;AYpB4)FU9JrR{ieZD5x&P` mA8^B(3oFoh69?hO)KBa!)++GNRhHCJW%0z~sioD$GyefJ&@Ll5TCOXC%MLLnwFHH3hfK3i=srSARw!PwsDJ8K_2W#1w&I{c&&C zZ7row@Ev&IiSNM2LIMf#48NJPOVfvL|l54xAh!HLVVXT2-J zvEGJotanwoOLWk^LS~cQ>TZy^Cd^eb*M-?6^A2?y?{fIRfW(hnVw)<>jZR!}*2gU! zaPaZoM|=C9s`&mSjPjzX9_N{ASsn0boEl;7`Jl5wUYK2LCb#4G3!B-(^_AnpR0juH zeUeXt3L~qG8w_k=v!Gn6hgOB@7?iL~u1Q z`LJY{z&^|-srv(r`QSOyv7>ZmRL2fe+$av*-W3xUkCgM-EhO9aN15yEB$+~MmhBUu;KD7h`g!); zW)5aVKjdnppCkK0X}G6So1WN$oev}Dl|{tr#P;>Xvy4|hyck9OOok)A;RWzNcd7K>F*dHkq{@UrM28SH7)gP%*UNNjd%Y0jq}M~GP7>a( z$kyb2@N6LLmybI^Dq+8G0~;D}{sJ5KcG$p!7Q|n}Rw{09(Ez*SX`AhBWBGNE+Da{& zf7@IUZN9qA4z|~ruwxF3-iW5p!ST)P&>@z;5Aw1}f#@{iq0xG}L*G^CRYb3(8qX@U zg(yy|bh<=mm@5%|U!$Tq}q%N~w>TuCrjG`{F7!-M0 z;?KE^MXx#5g~lm&V~-JQDxL+3B~(5Gaje6rlrC(T6j@UpqnEkD*?M`G0~1hKlfFMfj3?LDA(sCH z1b~)k0ZCK>z#(<6p9i{zSvWUlr!G_)#)$Mhqa=>=#FzJCMaGH7XaX8?(2$1ZHcFfi@T7emd_x-d z61WRKmQ9Sp^fLdApvjm04>UJm>LQvwxc(*1Ja;Uut)s2$(e6vijJh9%RjdBe>b^UrelL=R-3CY zaCkDkc9UG2-XyrBY!)8dZ5}>>lUcTfA8E7#&#Sa>y~PewvE0efd+U6Y23ritHX9ZP z=8^ZfRQuKol9HnKO~@<~BHY_1t|aj;Y+<&4#!h+`uT|p8MzvOrv9>lgqs?kPisf5h MExCGit-f~UA5QmC<^TWy literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/structure.py b/keras/downstream_tasks/BraTS/structure.py new file mode 100755 index 0000000..bca7b8e --- /dev/null +++ b/keras/downstream_tasks/BraTS/structure.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +""" +File: structure +Date: 5/8/18 +Author: Jon Deaton (jdeaton@stanford.edu) +""" + +import os +from enum import Enum + +from BraTS.load_utils import find_file_containing + + +class DataSubsetType(Enum): + hgg = 0 + lgg = 1 + train = 2 + validation = 3 + + +def get_brats_subset_directory(brats_dataset_dir, data_set_type): + if data_set_type == DataSubsetType.train: + # Training data + try: + found_train = find_file_containing(brats_dataset_dir, "train", case_sensitive=False) + except FileNotFoundError: + found_train = None + if found_train is not None: + return found_train + return os.path.join(brats_dataset_dir, "training") + + if data_set_type == DataSubsetType.hgg: + train_dir = get_brats_subset_directory(brats_dataset_dir, DataSubsetType.train) + return os.path.join(train_dir, "HGG") + + if data_set_type == DataSubsetType.lgg: + train_dir = get_brats_subset_directory(brats_dataset_dir, DataSubsetType.train) + return os.path.join(train_dir, "LGG") + + if data_set_type == DataSubsetType.validation: + # Validation + try: + found_validation = find_file_containing(brats_dataset_dir, "validation", case_sensitive=False) + except FileNotFoundError: + found_validation = None + + if found_validation is not None: + return found_validation + return os.path.join(brats_dataset_dir, "validation") + diff --git a/keras/downstream_tasks/BraTS/structure.pyc b/keras/downstream_tasks/BraTS/structure.pyc new file mode 100755 index 0000000000000000000000000000000000000000..7caec16e483463afcf0628f63598d6dfb0907d21 GIT binary patch literal 1337 zcmah}ZBG+H5T3oews5=(z68J6k7r^anrH|y1`wzc!-p$IZDKY@Z(FWh@5t>QNt5yk z|AX;I`A_`j571}!fTEGuc4udHc3z&D*}6aH{Qa*#K6mNz<6_^$((gfHq7JB_*r8NV z2Mm5Lr49{U>bN9W_9*peSffr2a*dQrJ)G9*3(*D9r%s*X1`C}A`)LPL?EWo?zn7$H zqp3|%c1>9*evO5NpOwh+MszG!zO+`rH z+on!~1`VdH7q+MNWsqqb^_y5_{ZraRRn`dGI26N}o zB^d7yF!R7-!FUxIw6g;9NQ}L|z|?IJ=-WX9^*ace+VSZ|T1l-kotWfI*;d>3@py4> z-?UnGf7fb-+>>Aq#J0)$2<^CXU-qIDGkAwNOzA;x_VO}|cZ(t~>^L9fnL>kza&6_) z$Q)Z{kSAGa$L1lleP!fP5g9GD9cmdTh3cBT7==8GRSxzxhpKL3!hpi?Rm17A3`|BE zya+RclOE?HtGS$pY&m;>@ml+@S-ovcmtl1eOFsc2vEaC3Ma&6b%nDB|I}2iILa%fV z>+JuA;p$5n=Upj7PQcbd;k}3K&hU|a8>?v^#j-R>s%@dGx^wW!OS0T8dD?cqtlsJ? RE}!PtTSq2~&Z2w2@f*Vz29f{( literal 0 HcmV?d00001 diff --git a/keras/downstream_tasks/BraTS/test.py b/keras/downstream_tasks/BraTS/test.py new file mode 100755 index 0000000..3e5551f --- /dev/null +++ b/keras/downstream_tasks/BraTS/test.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +""" +File: test +Date: 5/3/18 +Author: Jon Deaton (jdeaton@stanford.edu) + +This file tests the functionality of the BraTS dataset loader +""" + +import BraTS +import unittest +import numpy as np + +# If, for some reason, you wanted to test this on your machine you would +# need to set up the BraTS data-sets in some directory and set that path here +brats_root = "/Users/jonpdeaton/Datasets/BraTS" + +class BraTSTest(unittest.TestCase): + + def test_patient(self): + brats = BraTS.DataSet(brats_root=brats_root, year=2017) + patient = brats.train.patient("Brats17_TCIA_167_1") + + self.assertIsInstance(patient.id, str) + self.assertIsInstance(patient.age, float) + self.assertIsInstance(patient.survival, int) + self.assertIsInstance(patient.mri, np.ndarray) + self.assertIsInstance(patient.seg, np.ndarray) + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/keras/downstream_tasks/BraTS/test_load.py b/keras/downstream_tasks/BraTS/test_load.py new file mode 100755 index 0000000..4500d30 --- /dev/null +++ b/keras/downstream_tasks/BraTS/test_load.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +""" +File: test_load +Date: 5/4/18 +Author: Jon Deaton (jdeaton@stanford.edu) +""" + +import io +import BraTS +import timeit +import cProfile, pstats + +BraTS.set_root("/Users/jonpdeaton/Datasets/BraTS") +brats = BraTS.DataSet(year=2017) + + +def load(): + subset = brats.train.subset(brats.train.ids[:10]) + x = subset.mris + + +def load_n(n=10): + for i in range(n): + p = brats.train.patient(brats.train.ids[i]) + + +s = timeit.timeit(load) +print("Time: %s sec" % s) + + + diff --git a/keras/downstream_tasks/config.py b/keras/downstream_tasks/config.py new file mode 100755 index 0000000..0d5b518 --- /dev/null +++ b/keras/downstream_tasks/config.py @@ -0,0 +1,375 @@ +import os +import shutil +import csv +import keras +import random + +class bms_config: + arch = 'Vnet' + + # data + data = '/mnt/dataset/shared/zongwei/BraTS' + csv = "data/bms" + deltr = 30 + step_pixel_size = 30 + input_rows = 64 + input_cols = 64 + input_deps = 32 + crop_rows = 100 + crop_cols = 100 + crop_deps = 50 + + # model + lr = 1e-3 + optimizer = keras.optimizers.Adam(lr=lr) + patience = 30 + verbose = 1 + batch_size = 12 + use_multiprocessing = True + workers = 4 + max_queue_size = workers * 1 + nb_epoch = 10000 + + def __init__(self, args): + self.exp_name = self.arch + '-' + args.suffix + if args.data is not None: + self.data = args.data + + if args.suffix == 'random': + self.weights = None + elif args.suffix == 'genesis': + self.weights = 'pretrained_weights/Genesis_Chest_CT.h5' + elif args.suffix == 'genesis-autoencoder': + self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5' + elif args.suffix == 'genesis-nonlinear': + self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5' + elif args.suffix == 'genesis-localshuffling': + self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5' + elif args.suffix == 'genesis-outpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5' + elif args.suffix == 'genesis-inpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5' + elif args.suffix == 'denoisy': + self.weights = 'pretrained_weights/denoisy.h5' + elif args.suffix == 'patchshuffling': + self.weights = 'pretrained_weights/patchshuffling.h5' + elif args.suffix == 'hg': + self.weights = 'pretrained_weights/hg.h5' + else: + raise + + train_ids = self._load_csv(os.path.join(self.csv, "fold_1.csv")) + self._load_csv(os.path.join(self.csv, "fold_2.csv")) + random.Random(4).shuffle(train_ids) + self.validation_ids = train_ids[:len(train_ids) // 8] + self.train_ids = train_ids[len(train_ids) // 8:] + self.test_ids = self._load_csv(os.path.join(self.csv, "fold_3.csv")) + self.num_train = len(self.train_ids) + self.num_validation = len(self.validation_ids) + self.num_test = len(self.test_ids) + + # logs + self.model_path = os.path.join("models/bms", "run_"+str(args.run)) + if not os.path.exists(self.model_path): + os.makedirs(self.model_path) + self.logs_path = os.path.join(self.model_path, "logs") + if not os.path.exists(self.logs_path): + os.makedirs(self.logs_path) + + def _load_csv(self, foldfile=None): + assert foldfile is not None + patient_ids = [] + with open(foldfile, 'r') as f: + reader = csv.reader(f, lineterminator='\n') + patient_ids.extend(reader) + for i, item in enumerate(patient_ids): + patient_ids[i] = item[0] + return patient_ids + + def display(self): + """Display Configuration values.""" + print("\nConfigurations:") + for a in dir(self): + if not a.startswith("__") and not callable(getattr(self, a)) and not '_ids' in a: + print("{:30} {}".format(a, getattr(self, a))) + print("\n") + +class ecc_config: + arch = 'Vnet' + + # data + data = '/mnt/dfs/zongwei/Academic/MICCAI2020/Genesis_PE/dataset/augdata/VOIR' + csv = "data/ecc" + clip_min = -1000 + clip_max = 1000 + input_rows = 64 + input_cols = 64 + input_deps = 64 + + # model + lr = 1e-3 + optimizer = keras.optimizers.Adam(lr=lr) + patience = 38 + verbose = 1 + batch_size = 24 + use_multiprocessing = False + workers = 1 + max_queue_size = workers * 1 + nb_epoch = 10000 + num_classes = 1 + verbose = 1 + + def __init__(self, args=None): + self.exp_name = self.arch + '-' + args.suffix + '-cv-' + str(args.cv) + if args.data is not None: + self.data = args.data + + if args.suffix == 'random': + self.weights = None + elif args.suffix == 'genesis': + self.weights = 'pretrained_weights/Genesis_Chest_CT.h5' + elif args.suffix == 'genesis-autoencoder': + self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5' + elif args.suffix == 'genesis-nonlinear': + self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5' + elif args.suffix == 'genesis-localshuffling': + self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5' + elif args.suffix == 'genesis-outpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5' + elif args.suffix == 'genesis-inpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5' + elif args.suffix == 'denoisy': + self.weights = 'pretrained_weights/denoisy.h5' + elif args.suffix == 'patchshuffling': + self.weights = 'pretrained_weights/patchshuffling.h5' + elif args.suffix == 'hg': + self.weights = 'pretrained_weights/hg.h5' + else: + raise + + # logs + assert args.subsetting is not None + self.model_path = os.path.join("models/ecc", "run_"+str(args.run), args.subsetting) + if not os.path.exists(self.model_path): + os.makedirs(self.model_path) + self.logs_path = os.path.join(self.model_path, "logs") + if not os.path.exists(self.logs_path): + os.makedirs(self.logs_path) + self.patch_csv_path = 'Patch-20mm-cv-'+str(args.cv)+'-features_output_2_iter-100000.csv' + self.candidate_csv_path = 'Candidate-20mm-cv-'+str(args.cv)+'-features_output_2_iter-100000.csv' + self.csv_froc = 'features_output_2_iter-100000.csv' + + def display(self): + print("Configurations") + for a in dir(self): + if not a.startswith("__") and not callable(getattr(self,a)): + print("{:30} {}".format(a,getattr(self,a))) + #print("\n") + +class ncc_config: + arch = 'Vnet' + + # data + data = '/mnt/dataset/shared/zongwei/LUNA16/LUNA16_FPR_32x32x32' + train_fold=[0,1,2,3,4] + valid_fold=[5,6] + test_fold=[7,8,9] + hu_min = -1000 + hu_max = 1000 + input_rows = 64 + input_cols = 64 + input_deps = 32 + + # model + lr = 1e-3 + optimizer = keras.optimizers.Adam(lr=lr) + patience = 10 + verbose = 1 + batch_size = 24 + use_multiprocessing = False + workers = 1 + max_queue_size = workers * 1 + nb_epoch = 10000 + num_classes = 1 + verbose = 1 + + def __init__(self, args=None): + self.exp_name = self.arch + '-' + args.suffix + if args.data is not None: + self.data = args.data + + if args.suffix == 'random': + self.weights = None + elif args.suffix == 'genesis': + self.weights = 'pretrained_weights/Genesis_Chest_CT.h5' + elif args.suffix == 'genesis-autoencoder': + self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5' + elif args.suffix == 'genesis-nonlinear': + self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5' + elif args.suffix == 'genesis-localshuffling': + self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5' + elif args.suffix == 'genesis-outpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5' + elif args.suffix == 'genesis-inpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5' + elif args.suffix == 'denoisy': + self.weights = 'pretrained_weights/denoisy.h5' + elif args.suffix == 'patchshuffling': + self.weights = 'pretrained_weights/patchshuffling.h5' + elif args.suffix == 'hg': + self.weights = 'pretrained_weights/hg.h5' + else: + raise + + # logs + self.model_path = os.path.join("models/ncc", "run_"+str(args.run)) + if not os.path.exists(self.model_path): + os.makedirs(self.model_path) + self.logs_path = os.path.join(self.model_path, "logs") + if not os.path.exists(self.logs_path): + os.makedirs(self.logs_path) + + def display(self): + print("Configurations") + for a in dir(self): + if not a.startswith("__") and not callable(getattr(self,a)): + print("{:30} {}".format(a,getattr(self,a))) + #print("\n") + +class ncs_config: + arch = 'Vnet' + + # data + data = '/mnt/dataset/shared/zongwei/LIDC' + input_rows = 64 + input_cols = 64 + input_deps = 32 + + # model + lr = 1e-3 + optimizer = keras.optimizers.Adam(lr=lr) + patience = 50 + verbose = 1 + batch_size = 16 + use_multiprocessing = False + workers = 1 + max_queue_size = workers * 1 + nb_epoch = 10000 + + def __init__(self, args): + self.exp_name = self.arch + '-' + args.suffix + if args.data is not None: + self.data = args.data + + if args.suffix == 'random': + self.weights = None + elif args.suffix == 'genesis': + self.weights = 'pretrained_weights/Genesis_Chest_CT.h5' + elif args.suffix == 'genesis-autoencoder': + self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5' + elif args.suffix == 'genesis-nonlinear': + self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5' + elif args.suffix == 'genesis-localshuffling': + self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5' + elif args.suffix == 'genesis-outpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5' + elif args.suffix == 'genesis-inpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5' + elif args.suffix == 'denoisy': + self.weights = 'pretrained_weights/denoisy.h5' + elif args.suffix == 'patchshuffling': + self.weights = 'pretrained_weights/patchshuffling.h5' + elif args.suffix == 'hg': + self.weights = 'pretrained_weights/hg.h5' + else: + raise + + # logs + self.model_path = os.path.join("models/ncs", "run_"+str(args.run)) + if not os.path.exists(self.model_path): + os.makedirs(self.model_path) + self.logs_path = os.path.join(self.model_path, "logs") + if not os.path.exists(self.logs_path): + os.makedirs(self.logs_path) + + def display(self): + """Display Configuration values.""" + print("\nConfigurations:") + for a in dir(self): + if not a.startswith("__") and not callable(getattr(self, a)): + print("{:30} {}".format(a, getattr(self, a))) + print("\n") + + +class lcs_config: + arch = 'Vnet' + + # data + data = '/mnt/dfs/zongwei/Academic/MICCAI2019/Data/LiTS/3D_LiTS_NPY_256x256xZ' + nii = '/mnt/dataset/shared/zongwei/LiTS/Tr' + obj = 'liver' + train_idx = [n for n in range(0, 100)] + valid_idx = [n for n in range(100, 115)] + test_idx = [n for n in range(115, 130)] + num_train = len(train_idx) + num_valid = len(valid_idx) + num_test = len(test_idx) + hu_max = 1000 + hu_min = -1000 + input_rows = 64 + input_cols = 64 + input_deps = 32 + + # model + lr = 1e-3 + optimizer = keras.optimizers.Adam(lr=lr) + patience = 20 + verbose = 1 + batch_size = 16 + use_multiprocessing = False + workers = 1 + max_queue_size = workers * 1 + nb_epoch = 10000 + + def __init__(self, args): + self.exp_name = self.arch + '-' + args.suffix + if args.data is not None: + self.data = args.data + + if args.suffix == 'random': + self.weights = None + elif args.suffix == 'genesis': + self.weights = 'pretrained_weights/Genesis_Chest_CT.h5' + elif args.suffix == 'genesis-autoencoder': + self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5' + elif args.suffix == 'genesis-nonlinear': + self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5' + elif args.suffix == 'genesis-localshuffling': + self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5' + elif args.suffix == 'genesis-outpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5' + elif args.suffix == 'genesis-inpainting': + self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5' + elif args.suffix == 'denoisy': + self.weights = 'pretrained_weights/denoisy.h5' + elif args.suffix == 'patchshuffling': + self.weights = 'pretrained_weights/patchshuffling.h5' + elif args.suffix == 'hg': + self.weights = 'pretrained_weights/hg.h5' + else: + raise + + # logs + self.model_path = os.path.join("models/lcs", "run_"+str(args.run)) + if not os.path.exists(self.model_path): + os.makedirs(self.model_path) + self.logs_path = os.path.join(self.model_path, "logs") + if not os.path.exists(self.logs_path): + os.makedirs(self.logs_path) + + def display(self): + """Display Configuration values.""" + print("\nConfigurations:") + for a in dir(self): + if not a.startswith("__") and not callable(getattr(self, a)) and not '_idx' in a: + print("{:30} {}".format(a, getattr(self, a))) + print("\n") diff --git a/keras/downstream_tasks/data/bms/fold_1.csv b/keras/downstream_tasks/data/bms/fold_1.csv new file mode 100755 index 0000000..a001c92 --- /dev/null +++ b/keras/downstream_tasks/data/bms/fold_1.csv @@ -0,0 +1,95 @@ +Brats18_TCIA10_330_1 +Brats18_TCIA04_437_1 +Brats18_2013_3_1 +Brats18_TCIA08_469_1 +Brats18_TCIA09_620_1 +Brats18_TCIA08_242_1 +Brats18_TCIA10_103_1 +Brats18_TCIA01_221_1 +Brats18_TCIA04_149_1 +Brats18_CBICA_ANZ_1 +Brats18_2013_18_1 +Brats18_TCIA02_314_1 +Brats18_CBICA_ATD_1 +Brats18_TCIA02_274_1 +Brats18_CBICA_AQR_1 +Brats18_CBICA_ASU_1 +Brats18_TCIA06_372_1 +Brats18_TCIA13_623_1 +Brats18_TCIA02_331_1 +Brats18_CBICA_ABE_1 +Brats18_TCIA03_199_1 +Brats18_CBICA_ASO_1 +Brats18_TCIA08_436_1 +Brats18_TCIA10_625_1 +Brats18_TCIA10_241_1 +Brats18_CBICA_ASH_1 +Brats18_TCIA12_466_1 +Brats18_TCIA01_201_1 +Brats18_CBICA_ATB_1 +Brats18_TCIA01_425_1 +Brats18_CBICA_AYA_1 +Brats18_TCIA02_608_1 +Brats18_2013_16_1 +Brats18_2013_24_1 +Brats18_TCIA04_479_1 +Brats18_TCIA13_633_1 +Brats18_TCIA01_235_1 +Brats18_2013_0_1 +Brats18_TCIA10_640_1 +Brats18_TCIA04_328_1 +Brats18_TCIA02_473_1 +Brats18_TCIA09_254_1 +Brats18_CBICA_ABM_1 +Brats18_CBICA_ATV_1 +Brats18_CBICA_AQA_1 +Brats18_TCIA10_449_1 +Brats18_TCIA01_231_1 +Brats18_2013_12_1 +Brats18_CBICA_AXN_1 +Brats18_TCIA10_130_1 +Brats18_TCIA10_261_1 +Brats18_2013_17_1 +Brats18_TCIA08_205_1 +Brats18_TCIA10_490_1 +Brats18_TCIA02_198_1 +Brats18_2013_20_1 +Brats18_CBICA_ALX_1 +Brats18_CBICA_AQV_1 +Brats18_TCIA06_184_1 +Brats18_CBICA_AXJ_1 +Brats18_CBICA_AVJ_1 +Brats18_TCIA10_276_1 +Brats18_TCIA01_448_1 +Brats18_TCIA03_498_1 +Brats18_TCIA10_310_1 +Brats18_TCIA02_283_1 +Brats18_TCIA02_151_1 +Brats18_TCIA12_298_1 +Brats18_CBICA_BFP_1 +Brats18_TCIA03_138_1 +Brats18_TCIA10_420_1 +Brats18_TCIA10_410_1 +Brats18_TCIA02_471_1 +Brats18_CBICA_ATX_1 +Brats18_CBICA_APY_1 +Brats18_TCIA02_168_1 +Brats18_TCIA13_653_1 +Brats18_CBICA_AWH_1 +Brats18_2013_14_1 +Brats18_TCIA05_478_1 +Brats18_CBICA_AXL_1 +Brats18_TCIA09_462_1 +Brats18_CBICA_ARW_1 +Brats18_TCIA03_257_1 +Brats18_TCIA02_605_1 +Brats18_TCIA01_186_1 +Brats18_2013_26_1 +Brats18_CBICA_AAP_1 +Brats18_CBICA_AXO_1 +Brats18_TCIA10_639_1 +Brats18_2013_6_1 +Brats18_TCIA08_218_1 +Brats18_CBICA_AXM_1 +Brats18_CBICA_AVG_1 +Brats18_CBICA_AAL_1 diff --git a/keras/downstream_tasks/data/bms/fold_2.csv b/keras/downstream_tasks/data/bms/fold_2.csv new file mode 100755 index 0000000..18066eb --- /dev/null +++ b/keras/downstream_tasks/data/bms/fold_2.csv @@ -0,0 +1,95 @@ +Brats18_CBICA_AVV_1 +Brats18_TCIA01_147_1 +Brats18_TCIA01_460_1 +Brats18_TCIA10_387_1 +Brats18_2013_7_1 +Brats18_TCIA12_249_1 +Brats18_TCIA09_493_1 +Brats18_TCIA01_190_1 +Brats18_TCIA10_644_1 +Brats18_TCIA12_480_1 +Brats18_2013_8_1 +Brats18_TCIA10_351_1 +Brats18_TCIA01_203_1 +Brats18_TCIA10_299_1 +Brats18_CBICA_APZ_1 +Brats18_TCIA13_618_1 +Brats18_TCIA02_300_1 +Brats18_CBICA_AQN_1 +Brats18_TCIA03_419_1 +Brats18_TCIA08_105_1 +Brats18_TCIA13_650_1 +Brats18_TCIA10_413_1 +Brats18_TCIA13_645_1 +Brats18_CBICA_AQU_1 +Brats18_CBICA_AXW_1 +Brats18_TCIA01_390_1 +Brats18_TCIA02_394_1 +Brats18_TCIA09_141_1 +Brats18_TCIA03_133_1 +Brats18_TCIA10_637_1 +Brats18_CBICA_AQO_1 +Brats18_CBICA_AQY_1 +Brats18_TCIA02_491_1 +Brats18_CBICA_ABO_1 +Brats18_TCIA04_111_1 +Brats18_2013_22_1 +Brats18_TCIA08_319_1 +Brats18_TCIA03_375_1 +Brats18_2013_9_1 +Brats18_TCIA08_113_1 +Brats18_TCIA10_202_1 +Brats18_CBICA_AOZ_1 +Brats18_TCIA05_277_1 +Brats18_2013_4_1 +Brats18_TCIA09_312_1 +Brats18_CBICA_AUQ_1 +Brats18_TCIA02_171_1 +Brats18_CBICA_AME_1 +Brats18_CBICA_BHK_1 +Brats18_TCIA08_234_1 +Brats18_CBICA_AUR_1 +Brats18_2013_11_1 +Brats18_TCIA10_152_1 +Brats18_TCIA03_121_1 +Brats18_CBICA_ALN_1 +Brats18_CBICA_AWG_1 +Brats18_CBICA_AQQ_1 +Brats18_TCIA13_630_1 +Brats18_TCIA06_247_1 +Brats18_TCIA13_615_1 +Brats18_TCIA09_451_1 +Brats18_CBICA_ASE_1 +Brats18_2013_27_1 +Brats18_TCIA10_628_1 +Brats18_TCIA06_165_1 +Brats18_CBICA_ABY_1 +Brats18_TCIA02_377_1 +Brats18_TCIA02_455_1 +Brats18_TCIA12_470_1 +Brats18_2013_13_1 +Brats18_CBICA_ASW_1 +Brats18_TCIA09_255_1 +Brats18_2013_29_1 +Brats18_TCIA04_361_1 +Brats18_TCIA13_624_1 +Brats18_2013_15_1 +Brats18_CBICA_BHM_1 +Brats18_TCIA10_175_1 +Brats18_TCIA10_393_1 +Brats18_TCIA10_629_1 +Brats18_CBICA_AAG_1 +Brats18_CBICA_AQJ_1 +Brats18_TCIA02_321_1 +Brats18_TCIA02_222_1 +Brats18_TCIA10_307_1 +Brats18_CBICA_AQD_1 +Brats18_TCIA03_338_1 +Brats18_2013_23_1 +Brats18_2013_5_1 +Brats18_TCIA04_192_1 +Brats18_TCIA06_211_1 +Brats18_TCIA02_135_1 +Brats18_TCIA05_444_1 +Brats18_CBICA_ASV_1 +Brats18_TCIA02_226_1 diff --git a/keras/downstream_tasks/data/bms/fold_3.csv b/keras/downstream_tasks/data/bms/fold_3.csv new file mode 100755 index 0000000..04babdb --- /dev/null +++ b/keras/downstream_tasks/data/bms/fold_3.csv @@ -0,0 +1,95 @@ +Brats18_CBICA_AZD_1 +Brats18_CBICA_AUN_1 +Brats18_CBICA_ANP_1 +Brats18_CBICA_AQZ_1 +Brats18_CBICA_AZH_1 +Brats18_TCIA02_370_1 +Brats18_TCIA02_374_1 +Brats18_CBICA_AOO_1 +Brats18_TCIA09_428_1 +Brats18_2013_25_1 +Brats18_CBICA_AOH_1 +Brats18_TCIA10_442_1 +Brats18_CBICA_ABN_1 +Brats18_CBICA_ATF_1 +Brats18_TCIA03_265_1 +Brats18_TCIA13_654_1 +Brats18_TCIA02_430_1 +Brats18_TCIA01_429_1 +Brats18_TCIA01_401_1 +Brats18_TCIA10_346_1 +Brats18_TCIA02_368_1 +Brats18_TCIA01_131_1 +Brats18_TCIA08_278_1 +Brats18_2013_28_1 +Brats18_2013_1_1 +Brats18_TCIA06_409_1 +Brats18_TCIA08_162_1 +Brats18_CBICA_AQT_1 +Brats18_CBICA_ASK_1 +Brats18_CBICA_AQG_1 +Brats18_CBICA_ALU_1 +Brats18_CBICA_BHB_1 +Brats18_CBICA_AYW_1 +Brats18_CBICA_ARZ_1 +Brats18_TCIA02_290_1 +Brats18_CBICA_AYI_1 +Brats18_CBICA_AXQ_1 +Brats18_CBICA_AOD_1 +Brats18_TCIA01_378_1 +Brats18_TCIA05_396_1 +Brats18_TCIA08_406_1 +Brats18_TCIA13_634_1 +Brats18_TCIA09_402_1 +Brats18_TCIA03_296_1 +Brats18_TCIA02_179_1 +Brats18_CBICA_ASA_1 +Brats18_TCIA10_109_1 +Brats18_2013_10_1 +Brats18_CBICA_ASG_1 +Brats18_TCIA08_280_1 +Brats18_TCIA02_117_1 +Brats18_TCIA13_642_1 +Brats18_TCIA09_177_1 +Brats18_TCIA08_167_1 +Brats18_TCIA10_266_1 +Brats18_TCIA10_282_1 +Brats18_2013_2_1 +Brats18_CBICA_ARF_1 +Brats18_TCIA01_180_1 +Brats18_TCIA10_408_1 +Brats18_CBICA_ANG_1 +Brats18_CBICA_AQP_1 +Brats18_TCIA01_335_1 +Brats18_TCIA10_632_1 +Brats18_TCIA02_606_1 +Brats18_TCIA02_607_1 +Brats18_CBICA_AMH_1 +Brats18_TCIA04_343_1 +Brats18_TCIA13_621_1 +Brats18_TCIA02_322_1 +Brats18_CBICA_ASY_1 +Brats18_CBICA_AWI_1 +Brats18_TCIA01_499_1 +Brats18_TCIA01_411_1 +Brats18_2013_21_1 +Brats18_CBICA_AYU_1 +Brats18_TCIA01_412_1 +Brats18_CBICA_APR_1 +Brats18_CBICA_ATP_1 +Brats18_TCIA02_118_1 +Brats18_TCIA03_474_1 +Brats18_TCIA12_101_1 +Brats18_CBICA_BFB_1 +Brats18_CBICA_AAB_1 +Brats18_TCIA02_208_1 +Brats18_TCIA06_603_1 +Brats18_2013_19_1 +Brats18_CBICA_ABB_1 +Brats18_CBICA_ANI_1 +Brats18_CBICA_ASN_1 +Brats18_TCIA10_325_1 +Brats18_TCIA06_332_1 +Brats18_CBICA_AOP_1 +Brats18_TCIA02_309_1 +Brats18_TCIA01_150_1 diff --git a/keras/downstream_tasks/data/ecc/test_cv-1.csv b/keras/downstream_tasks/data/ecc/test_cv-1.csv new file mode 100755 index 0000000..347af46 --- /dev/null +++ b/keras/downstream_tasks/data/ecc/test_cv-1.csv @@ -0,0 +1,40 @@ +FileName +Patient03284 +Patient03266 +Patient03234 +Patient03280 +Patient03286 +Patient03262 +Patient03237 +Patient03243 +Patient03275 +Patient03268 +Patient03232 +Patient03240 +Patient03228 +Patient03229 +Patient03283 +Patient03222 +Patient03271 +Patient03256 +Patient03246 +Patient03239 +Patient03251 +Patient03276 +Patient03281 +Patient03263 +Patient03250 +Patient03257 +Patient03242 +Patient03267 +Patient03258 +Patient03272 +Patient03282 +Patient03291 +Patient03244 +Patient03231 +Patient03287 +Patient03249 +Patient03253 +Patient03278 +Patient03254 diff --git a/keras/downstream_tasks/data/ecc/test_cv-2.csv b/keras/downstream_tasks/data/ecc/test_cv-2.csv new file mode 100755 index 0000000..497dd97 --- /dev/null +++ b/keras/downstream_tasks/data/ecc/test_cv-2.csv @@ -0,0 +1,40 @@ +FileName +Patient21229 +Patient21108 +Patient03209 +Patient03210 +Patient21115 +Patient21107 +Patient03130 +Patient03204 +Patient03213 +Patient03129 +Patient03206 +Patient03218 +Patient21113 +Patient21228 +Patient21106 +Patient21112 +Patient21227 +Patient21101 +Patient03122 +Patient03201 +Patient03220 +Patient03221 +Patient21100 +Patient03216 +Patient03124 +Patient03215 +Patient21118 +Patient21110 +Patient21104 +Patient03125 +Patient21117 +Patient03212 +Patient03219 +Patient21109 +Patient21102 +Patient21119 +Patient03208 +Patient21116 +Patient21103 diff --git a/keras/downstream_tasks/data/ecc/test_cv-3.csv b/keras/downstream_tasks/data/ecc/test_cv-3.csv new file mode 100755 index 0000000..46b8084 --- /dev/null +++ b/keras/downstream_tasks/data/ecc/test_cv-3.csv @@ -0,0 +1,40 @@ +FileName +Patient21222 +Patient03118 +Patient21215 +Patient28005 +Patient21214 +Patient03000 +Patient03001 +Patient28010 +Patient03102 +Patient03103 +Patient21225 +Patient03120 +Patient03002 +Patient03108 +Patient03116 +Patient03003 +Patient28008 +Patient28002 +Patient03109 +Patient21220 +Patient21206 +Patient21216 +Patient21205 +Patient21217 +Patient03104 +Patient03005 +Patient03119 +Patient03110 +Patient28004 +Patient03106 +Patient21221 +Patient21213 +Patient28007 +Patient03114 +Patient21201 +Patient28006 +Patient21224 +Patient21208 +Patient21200 diff --git a/keras/downstream_tasks/data/ecc/train_cv-1.csv b/keras/downstream_tasks/data/ecc/train_cv-1.csv new file mode 100755 index 0000000..e7ce590 --- /dev/null +++ b/keras/downstream_tasks/data/ecc/train_cv-1.csv @@ -0,0 +1,75 @@ +FileName +Patient21108 +Patient03219 +Patient03201 +Patient21103 +Patient03125 +Patient21104 +Patient28002 +Patient03114 +Patient03102 +Patient03116 +Patient28010 +Patient21113 +Patient21205 +Patient03000 +Patient03129 +Patient21206 +Patient21227 +Patient21214 +Patient21220 +Patient03216 +Patient21201 +Patient28005 +Patient03204 +Patient03209 +Patient21119 +Patient03003 +Patient03118 +Patient03210 +Patient21224 +Patient21217 +Patient21112 +Patient21102 +Patient21215 +Patient03124 +Patient21221 +Patient21225 +Patient28006 +Patient21200 +Patient21117 +Patient28008 +Patient03215 +Patient21118 +Patient03208 +Patient03001 +Patient03110 +Patient28007 +Patient03212 +Patient03005 +Patient03130 +Patient03122 +Patient21110 +Patient21229 +Patient03119 +Patient21116 +Patient03104 +Patient21208 +Patient21222 +Patient03120 +Patient03213 +Patient21106 +Patient03108 +Patient03220 +Patient03109 +Patient21107 +Patient21228 +Patient21101 +Patient21100 +Patient03218 +Patient03103 +Patient21115 +Patient03002 +Patient28004 +Patient21213 +Patient03206 diff --git a/keras/downstream_tasks/data/ecc/train_cv-2.csv b/keras/downstream_tasks/data/ecc/train_cv-2.csv new file mode 100755 index 0000000..2cd1915 --- /dev/null +++ b/keras/downstream_tasks/data/ecc/train_cv-2.csv @@ -0,0 +1,75 @@ +FileName +Patient21215 +Patient21201 +Patient03103 +Patient03114 +Patient21224 +Patient03119 +Patient28005 +Patient21208 +Patient03256 +Patient21220 +Patient21217 +Patient03234 +Patient03278 +Patient03262 +Patient21200 +Patient28007 +Patient03108 +Patient28004 +Patient03104 +Patient28010 +Patient03001 +Patient21225 +Patient03232 +Patient03268 +Patient03110 +Patient03116 +Patient03257 +Patient03280 +Patient28008 +Patient03222 +Patient03251 +Patient03239 +Patient03254 +Patient21213 +Patient03267 +Patient21222 +Patient03250 +Patient03120 +Patient03237 +Patient03242 +Patient03240 +Patient03282 +Patient03263 +Patient03102 +Patient03253 +Patient03246 +Patient03231 +Patient03229 +Patient03258 +Patient03249 +Patient21221 +Patient28006 +Patient21216 +Patient03109 +Patient03005 +Patient21214 +Patient03002 +Patient03287 +Patient03281 +Patient03106 +Patient03243 +Patient21206 +Patient03118 +Patient03003 +Patient03271 +Patient03228 +Patient28002 +Patient03284 +Patient03276 +Patient03286 +Patient21205 +Patient03275 +Patient03000 +Patient03291 diff --git a/keras/downstream_tasks/data/ecc/train_cv-3.csv b/keras/downstream_tasks/data/ecc/train_cv-3.csv new file mode 100755 index 0000000..7ec23df --- /dev/null +++ b/keras/downstream_tasks/data/ecc/train_cv-3.csv @@ -0,0 +1,75 @@ +FileName +Patient03276 +Patient03251 +Patient03215 +Patient21118 +Patient03282 +Patient03232 +Patient03287 +Patient03284 +Patient21104 +Patient03209 +Patient21108 +Patient03281 +Patient21229 +Patient03278 +Patient03256 +Patient03208 +Patient03263 +Patient21116 +Patient21103 +Patient21100 +Patient03130 +Patient03129 +Patient03262 +Patient03212 +Patient03272 +Patient03254 +Patient03221 +Patient21102 +Patient21227 +Patient03228 +Patient03218 +Patient03268 +Patient21119 +Patient21107 +Patient03210 +Patient03206 +Patient03250 +Patient03216 +Patient21101 +Patient03244 +Patient03220 +Patient03125 +Patient03229 +Patient03124 +Patient03257 +Patient03283 +Patient21112 +Patient03271 +Patient03219 +Patient21113 +Patient03275 +Patient03204 +Patient03231 +Patient03201 +Patient21117 +Patient03213 +Patient21115 +Patient21110 +Patient03258 +Patient03239 +Patient03242 +Patient21106 +Patient03280 +Patient03249 +Patient03237 +Patient03240 +Patient03266 +Patient03222 +Patient03267 +Patient03291 +Patient03234 +Patient21109 +Patient03243 +Patient03246 diff --git a/keras/downstream_tasks/data/ecc/val_cv-1.csv b/keras/downstream_tasks/data/ecc/val_cv-1.csv new file mode 100755 index 0000000..ae2133b --- /dev/null +++ b/keras/downstream_tasks/data/ecc/val_cv-1.csv @@ -0,0 +1,5 @@ +FileName +Patient21216 +Patient03106 +Patient21109 +Patient03221 diff --git a/keras/downstream_tasks/data/ecc/val_cv-2.csv b/keras/downstream_tasks/data/ecc/val_cv-2.csv new file mode 100755 index 0000000..b820563 --- /dev/null +++ b/keras/downstream_tasks/data/ecc/val_cv-2.csv @@ -0,0 +1,5 @@ +FileName +Patient03283 +Patient03266 +Patient03272 +Patient03244 diff --git a/keras/downstream_tasks/data/ecc/val_cv-3.csv b/keras/downstream_tasks/data/ecc/val_cv-3.csv new file mode 100755 index 0000000..bfd59a4 --- /dev/null +++ b/keras/downstream_tasks/data/ecc/val_cv-3.csv @@ -0,0 +1,5 @@ +FileName +Patient03286 +Patient03253 +Patient21228 +Patient03122 diff --git a/keras/downstream_tasks/lung nodule segmentation.ipynb b/keras/downstream_tasks/lung nodule segmentation.ipynb new file mode 100644 index 0000000..d025690 --- /dev/null +++ b/keras/downstream_tasks/lung nodule segmentation.ipynb @@ -0,0 +1,489 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "keras = 2.2.4\n", + "tensorflow-gpu = 1.13.1\n", + "\n", + "Configurations:\n", + "arch Vnet\n", + "batch_size 16\n", + "data /mnt/dataset/shared/zongwei/LIDC\n", + "exp_name Vnet-genesis\n", + "input_cols 64\n", + "input_deps 32\n", + "input_rows 64\n", + "logs_path models/ncs/run_1/logs\n", + "lr 0.001\n", + "max_queue_size 1\n", + "model_path models/ncs/run_1\n", + "nb_epoch 10000\n", + "optimizer adam\n", + "patience 50\n", + "verbose 1\n", + "weights pretrained_weights/Genesis_Chest_CT.h5\n", + "workers 1\n", + "\n", + "\n" + ] + } + ], + "source": [ + "#!/usr/bin/env python\n", + "# coding: utf-8\n", + "from __future__ import print_function\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "import os\n", + "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\n", + "import keras\n", + "print(\"keras = {}\".format(keras.__version__))\n", + "import tensorflow as tf\n", + "print(\"tensorflow-gpu = {}\".format(tf.__version__))\n", + "try:\n", + " tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", + "except:\n", + " pass\n", + "import random\n", + "import shutil\n", + "import argparse\n", + "import sklearn\n", + "from pathlib import Path\n", + "from utils import *\n", + "from unet3d import *\n", + "from config import *\n", + "\n", + "class set_args():\n", + " gpu = 0\n", + " data = None\n", + " apps = 'ncs'\n", + " run = 1\n", + " cv = None\n", + " subsetting = None\n", + " suffix = 'genesis'\n", + " task = 'segmentation'\n", + " \n", + "args = set_args()\n", + "\n", + "if args.gpu is not None:\n", + " os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu)\n", + " \n", + "from ncs_data import *\n", + "conf = ncs_config(args)\n", + "\n", + "conf.display()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Train" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "x_train: (4082, 1, 64, 64, 32) | 0.0 ~ 1.0\n", + "y_train: (4082, 1, 64, 64, 32) | 0 ~ 1\n", + "x_valid: (3126, 1, 64, 64, 32) | 0.0 ~ 1.0\n", + "y_valid: (3126, 1, 64, 64, 32) | 0 ~ 1\n", + "[INFO] Load pre-trained weights from pretrained_weights/Genesis_Chest_CT.h5\n", + "Train on 4082 samples, validate on 3126 samples\n", + "Epoch 1/10000\n", + "\n", + "> Batch size = 14\n", + "Train on 4082 samples, validate on 3126 samples\n", + "Epoch 1/10000\n", + "1036/4082 [======>.......................] - ETA: 7:37 - loss: 0.4391 - mean_iou: 0.6119 - dice_coef: 0.5609\n", + "> Batch size = 12\n", + "Train on 4082 samples, validate on 3126 samples\n", + "Epoch 1/10000\n", + "4082/4082 [==============================] - 709s 174ms/step - loss: 0.2932 - mean_iou: 0.7156 - dice_coef: 0.7068 - val_loss: 0.3287 - val_mean_iou: 0.7407 - val_dice_coef: 0.6713\n", + "\n", + "Epoch 00001: val_loss improved from inf to 0.32870, saving model to models/ncs/run_1/Vnet-genesis.h5\n", + "Epoch 2/10000\n", + "4082/4082 [==============================] - 699s 171ms/step - loss: 0.2394 - mean_iou: 0.7524 - dice_coef: 0.7606 - val_loss: 0.3988 - val_mean_iou: 0.7547 - val_dice_coef: 0.6012\n", + "\n", + "Epoch 00002: val_loss did not improve from 0.32870\n", + "Epoch 3/10000\n", + "4082/4082 [==============================] - 700s 171ms/step - loss: 0.1950 - mean_iou: 0.7582 - dice_coef: 0.8050 - val_loss: 0.3279 - val_mean_iou: 0.7705 - val_dice_coef: 0.6721\n", + "\n", + "Epoch 00003: val_loss improved from 0.32870 to 0.32789, saving model to models/ncs/run_1/Vnet-genesis.h5\n", + "Epoch 4/10000\n", + "3756/4082 [==========================>...] - ETA: 44s - loss: 0.1836 - mean_iou: 0.7800 - dice_coef: 0.8164\n", + "> Batch size = 10\n", + "Train on 4082 samples, validate on 3126 samples\n", + "Epoch 1/10000\n", + "4082/4082 [==============================] - 708s 173ms/step - loss: 0.1886 - mean_iou: 0.7914 - dice_coef: 0.8114 - val_loss: 0.2994 - val_mean_iou: 0.7959 - val_dice_coef: 0.7006\n", + "\n", + "Epoch 00001: val_loss improved from 0.32789 to 0.29936, saving model to models/ncs/run_1/Vnet-genesis.h5\n", + "Epoch 2/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.1628 - mean_iou: 0.8009 - dice_coef: 0.8372 - val_loss: 0.2804 - val_mean_iou: 0.8049 - val_dice_coef: 0.7196\n", + "\n", + "Epoch 00002: val_loss improved from 0.29936 to 0.28040, saving model to models/ncs/run_1/Vnet-genesis.h5\n", + "Epoch 3/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.1491 - mean_iou: 0.8091 - dice_coef: 0.8509 - val_loss: 0.2657 - val_mean_iou: 0.8126 - val_dice_coef: 0.7343\n", + "\n", + "Epoch 00003: val_loss improved from 0.28040 to 0.26571, saving model to models/ncs/run_1/Vnet-genesis.h5\n", + "Epoch 4/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.1382 - mean_iou: 0.8161 - dice_coef: 0.8618 - val_loss: 0.2471 - val_mean_iou: 0.8187 - val_dice_coef: 0.7529\n", + "\n", + "Epoch 00004: val_loss improved from 0.26571 to 0.24712, saving model to models/ncs/run_1/Vnet-genesis.h5\n", + "Epoch 5/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.1279 - mean_iou: 0.8214 - dice_coef: 0.8721 - val_loss: 0.2390 - val_mean_iou: 0.8239 - val_dice_coef: 0.7610\n", + "\n", + "Epoch 00005: val_loss improved from 0.24712 to 0.23904, saving model to models/ncs/run_1/Vnet-genesis.h5\n", + "Epoch 6/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.1185 - mean_iou: 0.8267 - dice_coef: 0.8815 - val_loss: 0.2461 - val_mean_iou: 0.8287 - val_dice_coef: 0.7539\n", + "\n", + "Epoch 00006: val_loss did not improve from 0.23904\n", + "Epoch 7/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.1167 - mean_iou: 0.8308 - dice_coef: 0.8833 - val_loss: 0.2459 - val_mean_iou: 0.8322 - val_dice_coef: 0.7541\n", + "\n", + "Epoch 00007: val_loss did not improve from 0.23904\n", + "Epoch 8/10000\n", + "4082/4082 [==============================] - 703s 172ms/step - loss: 0.1251 - mean_iou: 0.8337 - dice_coef: 0.8749 - val_loss: 0.2395 - val_mean_iou: 0.8349 - val_dice_coef: 0.7605\n", + "\n", + "Epoch 00008: val_loss did not improve from 0.23904\n", + "Epoch 9/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.1071 - mean_iou: 0.8366 - dice_coef: 0.8929 - val_loss: 0.2241 - val_mean_iou: 0.8380 - val_dice_coef: 0.7759\n", + "\n", + "Epoch 00009: val_loss improved from 0.23904 to 0.22408, saving model to models/ncs/run_1/Vnet-genesis.h5\n", + "Epoch 10/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.1058 - mean_iou: 0.8395 - dice_coef: 0.8942 - val_loss: 0.2433 - val_mean_iou: 0.8406 - val_dice_coef: 0.7567\n", + "\n", + "Epoch 00010: val_loss did not improve from 0.22408\n", + "Epoch 11/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0951 - mean_iou: 0.8420 - dice_coef: 0.9049 - val_loss: 0.2280 - val_mean_iou: 0.8432 - val_dice_coef: 0.7720\n", + "\n", + "Epoch 00011: val_loss did not improve from 0.22408\n", + "Epoch 12/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.1027 - mean_iou: 0.8444 - dice_coef: 0.8973 - val_loss: 0.2540 - val_mean_iou: 0.8452 - val_dice_coef: 0.7460\n", + "\n", + "Epoch 00012: val_loss did not improve from 0.22408\n", + "Epoch 13/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0900 - mean_iou: 0.8463 - dice_coef: 0.9100 - val_loss: 0.2332 - val_mean_iou: 0.8473 - val_dice_coef: 0.7668\n", + "\n", + "Epoch 00013: val_loss did not improve from 0.22408\n", + "Epoch 14/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0853 - mean_iou: 0.8484 - dice_coef: 0.9147 - val_loss: 0.2253 - val_mean_iou: 0.8493 - val_dice_coef: 0.7747\n", + "\n", + "Epoch 00014: val_loss did not improve from 0.22408\n", + "Epoch 15/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0814 - mean_iou: 0.8504 - dice_coef: 0.9186 - val_loss: 0.2390 - val_mean_iou: 0.8512 - val_dice_coef: 0.7610\n", + "\n", + "Epoch 00015: val_loss did not improve from 0.22408\n", + "\n", + "Epoch 00015: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.\n", + "Epoch 16/10000\n", + "4082/4082 [==============================] - 705s 173ms/step - loss: 0.0713 - mean_iou: 0.8523 - dice_coef: 0.9287 - val_loss: 0.2344 - val_mean_iou: 0.8531 - val_dice_coef: 0.7656\n", + "\n", + "Epoch 00016: val_loss did not improve from 0.22408\n", + "Epoch 17/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0672 - mean_iou: 0.8541 - dice_coef: 0.9328 - val_loss: 0.2396 - val_mean_iou: 0.8550 - val_dice_coef: 0.7604\n", + "\n", + "Epoch 00017: val_loss did not improve from 0.22408\n", + "Epoch 18/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0652 - mean_iou: 0.8559 - dice_coef: 0.9348 - val_loss: 0.2411 - val_mean_iou: 0.8567 - val_dice_coef: 0.7589\n", + "\n", + "Epoch 00018: val_loss did not improve from 0.22408\n", + "Epoch 19/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0640 - mean_iou: 0.8575 - dice_coef: 0.9360 - val_loss: 0.2457 - val_mean_iou: 0.8582 - val_dice_coef: 0.7543\n", + "\n", + "Epoch 00019: val_loss did not improve from 0.22408\n", + "Epoch 20/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0617 - mean_iou: 0.8590 - dice_coef: 0.9383 - val_loss: 0.2381 - val_mean_iou: 0.8597 - val_dice_coef: 0.7619\n", + "\n", + "Epoch 00020: val_loss did not improve from 0.22408\n", + "Epoch 21/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0619 - mean_iou: 0.8604 - dice_coef: 0.9381 - val_loss: 0.2309 - val_mean_iou: 0.8610 - val_dice_coef: 0.7691\n", + "\n", + "Epoch 00021: val_loss did not improve from 0.22408\n", + "\n", + "Epoch 00021: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.\n", + "Epoch 22/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0547 - mean_iou: 0.8617 - dice_coef: 0.9453 - val_loss: 0.2348 - val_mean_iou: 0.8624 - val_dice_coef: 0.7652\n", + "\n", + "Epoch 00022: val_loss did not improve from 0.22408\n", + "Epoch 23/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0526 - mean_iou: 0.8631 - dice_coef: 0.9474 - val_loss: 0.2351 - val_mean_iou: 0.8637 - val_dice_coef: 0.7649\n", + "\n", + "Epoch 00023: val_loss did not improve from 0.22408\n", + "Epoch 24/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0516 - mean_iou: 0.8644 - dice_coef: 0.9484 - val_loss: 0.2368 - val_mean_iou: 0.8649 - val_dice_coef: 0.7632\n", + "\n", + "Epoch 00024: val_loss did not improve from 0.22408\n", + "Epoch 25/10000\n", + "4082/4082 [==============================] - 705s 173ms/step - loss: 0.0510 - mean_iou: 0.8655 - dice_coef: 0.9490 - val_loss: 0.2353 - val_mean_iou: 0.8661 - val_dice_coef: 0.7647\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Epoch 00025: val_loss did not improve from 0.22408\n", + "Epoch 26/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0485 - mean_iou: 0.8667 - dice_coef: 0.9515 - val_loss: 0.2327 - val_mean_iou: 0.8672 - val_dice_coef: 0.7673\n", + "\n", + "Epoch 00026: val_loss did not improve from 0.22408\n", + "Epoch 27/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0475 - mean_iou: 0.8678 - dice_coef: 0.9525 - val_loss: 0.2368 - val_mean_iou: 0.8682 - val_dice_coef: 0.7632\n", + "\n", + "Epoch 00027: val_loss did not improve from 0.22408\n", + "\n", + "Epoch 00027: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.\n", + "Epoch 28/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0447 - mean_iou: 0.8688 - dice_coef: 0.9553 - val_loss: 0.2344 - val_mean_iou: 0.8693 - val_dice_coef: 0.7656\n", + "\n", + "Epoch 00028: val_loss did not improve from 0.22408\n", + "Epoch 29/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0437 - mean_iou: 0.8698 - dice_coef: 0.9563 - val_loss: 0.2325 - val_mean_iou: 0.8703 - val_dice_coef: 0.7675\n", + "\n", + "Epoch 00029: val_loss did not improve from 0.22408\n", + "Epoch 30/10000\n", + "4082/4082 [==============================] - 705s 173ms/step - loss: 0.0430 - mean_iou: 0.8708 - dice_coef: 0.9570 - val_loss: 0.2345 - val_mean_iou: 0.8712 - val_dice_coef: 0.7655\n", + "\n", + "Epoch 00030: val_loss did not improve from 0.22408\n", + "Epoch 31/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0422 - mean_iou: 0.8717 - dice_coef: 0.9578 - val_loss: 0.2383 - val_mean_iou: 0.8721 - val_dice_coef: 0.7617\n", + "\n", + "Epoch 00031: val_loss did not improve from 0.22408\n", + "Epoch 32/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0421 - mean_iou: 0.8725 - dice_coef: 0.9579 - val_loss: 0.2336 - val_mean_iou: 0.8729 - val_dice_coef: 0.7664\n", + "\n", + "Epoch 00032: val_loss did not improve from 0.22408\n", + "Epoch 33/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0415 - mean_iou: 0.8733 - dice_coef: 0.9585 - val_loss: 0.2378 - val_mean_iou: 0.8737 - val_dice_coef: 0.7622\n", + "\n", + "Epoch 00033: val_loss did not improve from 0.22408\n", + "\n", + "Epoch 00033: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.\n", + "Epoch 34/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0402 - mean_iou: 0.8741 - dice_coef: 0.9598 - val_loss: 0.2364 - val_mean_iou: 0.8744 - val_dice_coef: 0.7636\n", + "\n", + "Epoch 00034: val_loss did not improve from 0.22408\n", + "Epoch 35/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0396 - mean_iou: 0.8749 - dice_coef: 0.9604 - val_loss: 0.2363 - val_mean_iou: 0.8752 - val_dice_coef: 0.7637\n", + "\n", + "Epoch 00035: val_loss did not improve from 0.22408\n", + "Epoch 36/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0395 - mean_iou: 0.8756 - dice_coef: 0.9605 - val_loss: 0.2357 - val_mean_iou: 0.8759 - val_dice_coef: 0.7643\n", + "\n", + "Epoch 00036: val_loss did not improve from 0.22408\n", + "Epoch 37/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0391 - mean_iou: 0.8763 - dice_coef: 0.9609 - val_loss: 0.2379 - val_mean_iou: 0.8766 - val_dice_coef: 0.7621\n", + "\n", + "Epoch 00037: val_loss did not improve from 0.22408\n", + "Epoch 38/10000\n", + "4082/4082 [==============================] - 705s 173ms/step - loss: 0.0391 - mean_iou: 0.8769 - dice_coef: 0.9609 - val_loss: 0.2384 - val_mean_iou: 0.8772 - val_dice_coef: 0.7616\n", + "\n", + "Epoch 00038: val_loss did not improve from 0.22408\n", + "Epoch 39/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0382 - mean_iou: 0.8775 - dice_coef: 0.9618 - val_loss: 0.2392 - val_mean_iou: 0.8778 - val_dice_coef: 0.7608\n", + "\n", + "Epoch 00039: val_loss did not improve from 0.22408\n", + "\n", + "Epoch 00039: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.\n", + "Epoch 40/10000\n", + "4082/4082 [==============================] - 705s 173ms/step - loss: 0.0379 - mean_iou: 0.8781 - dice_coef: 0.9621 - val_loss: 0.2373 - val_mean_iou: 0.8784 - val_dice_coef: 0.7627\n", + "\n", + "Epoch 00040: val_loss did not improve from 0.22408\n", + "Epoch 41/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0373 - mean_iou: 0.8787 - dice_coef: 0.9627 - val_loss: 0.2385 - val_mean_iou: 0.8790 - val_dice_coef: 0.7615\n", + "\n", + "Epoch 00041: val_loss did not improve from 0.22408\n", + "Epoch 42/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0372 - mean_iou: 0.8793 - dice_coef: 0.9628 - val_loss: 0.2375 - val_mean_iou: 0.8795 - val_dice_coef: 0.7625\n", + "\n", + "Epoch 00042: val_loss did not improve from 0.22408\n", + "Epoch 43/10000\n", + "4082/4082 [==============================] - 705s 173ms/step - loss: 0.0371 - mean_iou: 0.8798 - dice_coef: 0.9629 - val_loss: 0.2390 - val_mean_iou: 0.8800 - val_dice_coef: 0.7610\n", + "\n", + "Epoch 00043: val_loss did not improve from 0.22408\n", + "Epoch 44/10000\n", + "4082/4082 [==============================] - 703s 172ms/step - loss: 0.0371 - mean_iou: 0.8803 - dice_coef: 0.9629 - val_loss: 0.2383 - val_mean_iou: 0.8806 - val_dice_coef: 0.7617\n", + "\n", + "Epoch 00044: val_loss did not improve from 0.22408\n", + "Epoch 45/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0368 - mean_iou: 0.8808 - dice_coef: 0.9632 - val_loss: 0.2388 - val_mean_iou: 0.8810 - val_dice_coef: 0.7612\n", + "\n", + "Epoch 00045: val_loss did not improve from 0.22408\n", + "\n", + "Epoch 00045: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.\n", + "Epoch 46/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0365 - mean_iou: 0.8813 - dice_coef: 0.9635 - val_loss: 0.2383 - val_mean_iou: 0.8815 - val_dice_coef: 0.7617\n", + "\n", + "Epoch 00046: val_loss did not improve from 0.22408\n", + "Epoch 47/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0367 - mean_iou: 0.8818 - dice_coef: 0.9633 - val_loss: 0.2396 - val_mean_iou: 0.8820 - val_dice_coef: 0.7604\n", + "\n", + "Epoch 00047: val_loss did not improve from 0.22408\n", + "Epoch 48/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0361 - mean_iou: 0.8822 - dice_coef: 0.9639 - val_loss: 0.2396 - val_mean_iou: 0.8824 - val_dice_coef: 0.7604\n", + "\n", + "Epoch 00048: val_loss did not improve from 0.22408\n", + "Epoch 49/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0367 - mean_iou: 0.8826 - dice_coef: 0.9633 - val_loss: 0.2387 - val_mean_iou: 0.8828 - val_dice_coef: 0.7613\n", + "\n", + "Epoch 00049: val_loss did not improve from 0.22408\n", + "Epoch 50/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0364 - mean_iou: 0.8830 - dice_coef: 0.9636 - val_loss: 0.2390 - val_mean_iou: 0.8832 - val_dice_coef: 0.7610\n", + "\n", + "Epoch 00050: val_loss did not improve from 0.22408\n", + "Epoch 51/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0364 - mean_iou: 0.8834 - dice_coef: 0.9636 - val_loss: 0.2395 - val_mean_iou: 0.8836 - val_dice_coef: 0.7605\n", + "\n", + "Epoch 00051: val_loss did not improve from 0.22408\n", + "\n", + "Epoch 00051: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.\n", + "Epoch 52/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0359 - mean_iou: 0.8838 - dice_coef: 0.9641 - val_loss: 0.2394 - val_mean_iou: 0.8840 - val_dice_coef: 0.7606\n", + "\n", + "Epoch 00052: val_loss did not improve from 0.22408\n", + "Epoch 53/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0358 - mean_iou: 0.8842 - dice_coef: 0.9642 - val_loss: 0.2390 - val_mean_iou: 0.8843 - val_dice_coef: 0.7610\n", + "\n", + "Epoch 00053: val_loss did not improve from 0.22408\n", + "Epoch 54/10000\n", + "4082/4082 [==============================] - 705s 173ms/step - loss: 0.0354 - mean_iou: 0.8845 - dice_coef: 0.9646 - val_loss: 0.2390 - val_mean_iou: 0.8847 - val_dice_coef: 0.7610\n", + "\n", + "Epoch 00054: val_loss did not improve from 0.22408\n", + "Epoch 55/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0363 - mean_iou: 0.8849 - dice_coef: 0.9637 - val_loss: 0.2393 - val_mean_iou: 0.8850 - val_dice_coef: 0.7607\n", + "\n", + "Epoch 00055: val_loss did not improve from 0.22408\n", + "Epoch 56/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0360 - mean_iou: 0.8852 - dice_coef: 0.9640 - val_loss: 0.2397 - val_mean_iou: 0.8853 - val_dice_coef: 0.7603\n", + "\n", + "Epoch 00056: val_loss did not improve from 0.22408\n", + "Epoch 57/10000\n", + "4082/4082 [==============================] - 703s 172ms/step - loss: 0.0357 - mean_iou: 0.8855 - dice_coef: 0.9643 - val_loss: 0.2395 - val_mean_iou: 0.8857 - val_dice_coef: 0.7605\n", + "\n", + "Epoch 00057: val_loss did not improve from 0.22408\n", + "\n", + "Epoch 00057: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.\n", + "Epoch 58/10000\n", + "4082/4082 [==============================] - 704s 172ms/step - loss: 0.0360 - mean_iou: 0.8858 - dice_coef: 0.9640 - val_loss: 0.2393 - val_mean_iou: 0.8860 - val_dice_coef: 0.7607\n", + "\n", + "Epoch 00058: val_loss did not improve from 0.22408\n", + "Epoch 59/10000\n", + "4082/4082 [==============================] - 704s 173ms/step - loss: 0.0353 - mean_iou: 0.8861 - dice_coef: 0.9647 - val_loss: 0.2392 - val_mean_iou: 0.8863 - val_dice_coef: 0.7608\n", + "\n", + "Epoch 00059: val_loss did not improve from 0.22408\n", + "Epoch 00059: early stopping\n" + ] + } + ], + "source": [ + "x_train, y_train = load_image(conf, 'train')\n", + "print('x_train: {} | {} ~ {}'.format(x_train.shape, np.min(x_train), np.max(x_train)))\n", + "print('y_train: {} | {} ~ {}'.format(y_train.shape, np.min(y_train), np.max(y_train)))\n", + "\n", + "x_valid, y_valid = load_image(conf, 'valid')\n", + "print('x_valid: {} | {} ~ {}'.format(x_valid.shape, np.min(x_valid), np.max(x_valid)))\n", + "print('y_valid: {} | {} ~ {}'.format(y_valid.shape, np.min(y_valid), np.max(y_valid)))\n", + "\n", + "model = unet_model_3d((1,conf.input_rows,conf.input_cols,conf.input_deps), batch_normalization=True)\n", + "if conf.weights is not None:\n", + " print(\"[INFO] Load pre-trained weights from {}\".format(conf.weights))\n", + " model.load_weights(conf.weights)\n", + "model, callbacks = model_setup(model, conf, task=args.task)\n", + "\n", + "while conf.batch_size > 1:\n", + " # To find a largest batch size that can be fit into GPU\n", + " try:\n", + " model.fit(x_train, y_train,\n", + " validation_data=(x_valid, y_valid),\n", + " batch_size=conf.batch_size,\n", + " epochs=conf.nb_epoch, \n", + " verbose=conf.verbose, \n", + " shuffle=True,\n", + " callbacks=callbacks)\n", + " break\n", + " except tf.errors.ResourceExhaustedError as e:\n", + " conf.batch_size = int(conf.batch_size - 2)\n", + " print(\"\\n> Batch size = {}\".format(conf.batch_size))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[INFO] Load trained model from models/ncs/run_1/Vnet-genesis.h5\n", + "x_test: (852, 1, 64, 64, 32) | 0.0 ~ 1.0\n", + "y_test: (852, 1, 64, 64, 32) | 0 ~ 1\n", + "852/852 [==============================] - 36s 42ms/step\n", + "852/852 [==============================] - 40s 47ms/step\n", + "[INFO] Vnet-genesis\n", + "x: (852, 1, 64, 64, 32) | 0.0 ~ 1.0\n", + "y: (852, 1, 64, 64, 32) | 0.0 ~ 1.0\n", + "p: (852, 1, 64, 64, 32) | 0.0 ~ 1.0\n", + "[INFO] Dice = 74.39%\n", + "[INFO] IoU = 59.22%\n", + "[EVAL] Dice = 75.38%\n", + "[EVAL] IoU = 77.14%\n" + ] + } + ], + "source": [ + "model = unet_model_3d((1,conf.input_rows,conf.input_cols,conf.input_deps), batch_normalization=True)\n", + "print(\"[INFO] Load trained model from {}\".format( os.path.join(conf.model_path, conf.exp_name+\".h5\") ))\n", + "model.load_weights( os.path.join(conf.model_path, conf.exp_name+\".h5\") )\n", + "\n", + "x_test, y_test = load_image(conf, 'test')\n", + "print('x_test: {} | {} ~ {}'.format(x_test.shape, np.min(x_test), np.max(x_test)))\n", + "print('y_test: {} | {} ~ {}'.format(y_test.shape, np.min(y_test), np.max(y_test)))\n", + "\n", + "_ = segmentation_model_evaluation(model=model, config=conf, x=x_test, y=y_test, note=conf.exp_name)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/keras/downstream_tasks/ncs_data.py b/keras/downstream_tasks/ncs_data.py new file mode 100755 index 0000000..352dc4a --- /dev/null +++ b/keras/downstream_tasks/ncs_data.py @@ -0,0 +1,32 @@ +import os +import random +import copy +import keras +import shutil +import numpy as np +from tqdm import tqdm +from glob import glob +from skimage.transform import resize + +def load_image(config, status=None): + + x = np.squeeze(np.load(os.path.join(config.data, 'x_'+status+'_64x64x32.npy'))) + y = np.squeeze(np.load(os.path.join(config.data, 'm_'+status+'_64x64x32.npy'))) + x = np.expand_dims(x, axis=1) + y = np.expand_dims(y, axis=1) + + return x, y + +if __name__ == "__main__": + from config import * + + class set_args(): + apps = 'ncs' + task = 'segmentation' + suffix = 'random' + args = set_args() + + conf = ncs_config(args) + x_valid, y_valid = load_image(conf, 'valid') + print(x_valid.shape, np.min(x_valid), np.max(x_valid)) + print(y_valid.shape, np.min(y_valid), np.max(y_valid)) \ No newline at end of file diff --git a/keras/downstream_tasks/unet3d.py b/keras/downstream_tasks/unet3d.py new file mode 100755 index 0000000..f9076a9 --- /dev/null +++ b/keras/downstream_tasks/unet3d.py @@ -0,0 +1,123 @@ +import numpy as np +from keras import backend as K +from keras.engine import Input, Model +from keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, BatchNormalization, PReLU, Deconvolution3D +from keras.optimizers import Adam +K.set_image_data_format("channels_first") + +try: + from keras.engine import merge +except ImportError: + from keras.layers.merge import concatenate + + +def unet_model_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, deconvolution=False, + depth=4, n_base_filters=32, batch_normalization=False, activation_name="sigmoid"): + """ + Builds the 3D UNet Keras model.f + :param metrics: List metrics to be calculated during model training (default is dice coefficient). + :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice + coefficient for each label as metric. + :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following + layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required + to train the model. + :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling + layers will be added to the model. Lowering the depth may reduce the amount of memory required for training. + :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be + divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth. + :param pool_size: Pool size for the max pooling operations. + :param n_labels: Number of binary labels that the model is learning. + :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training. + :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This + increases the amount memory required during training. + :return: Untrained 3D UNet Model + """ + inputs = Input(input_shape) + current_layer = inputs + levels = list() + num_layer = 0 + + # add levels with max pooling + for layer_depth in range(depth): + layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth), + batch_normalization=batch_normalization, layer_depth=num_layer) + num_layer += 1 + layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2, + batch_normalization=batch_normalization, layer_depth=num_layer) + num_layer += 1 + if layer_depth < depth - 1: + current_layer = MaxPooling3D(pool_size=pool_size)(layer2) + levels.append([layer1, layer2, current_layer]) + else: + current_layer = layer2 + levels.append([layer1, layer2]) + + # add levels with up-convolution or up-sampling + for layer_depth in range(depth-2, -1, -1): + up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution, + n_filters=current_layer._keras_shape[1])(current_layer) + concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1) + current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1], layer_depth=num_layer, + input_layer=concat, batch_normalization=batch_normalization) + num_layer += 1 + current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1], layer_depth=num_layer, + input_layer=current_layer, + batch_normalization=batch_normalization) + num_layer += 1 + + final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer) + act = Activation(activation_name)(final_convolution) + model = Model(inputs=inputs, outputs=act) + + return model + +def create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3, 3), activation=None, + padding='same', strides=(1, 1, 1), instance_normalization=False, layer_depth=None): + """ + + :param strides: + :param input_layer: + :param n_filters: + :param batch_normalization: + :param kernel: + :param activation: Keras activation layer to use. (default is 'relu') + :param padding: + :return: + """ + layer = Conv3D(n_filters, kernel, padding=padding, strides=strides, name="depth_"+str(layer_depth)+"_conv")(input_layer) + if batch_normalization: + layer = BatchNormalization(axis=1, name="depth_"+str(layer_depth)+"_bn")(layer) + elif instance_normalization: + try: + from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization + except ImportError: + raise ImportError("Install keras_contrib in order to use instance normalization." + "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git") + layer = InstanceNormalization(axis=1, name="depth_"+str(layer_depth)+"_in")(layer) + if activation is None: + return Activation('relu', name="depth_"+str(layer_depth)+"_relu")(layer) + else: + return activation()(layer) + + +def compute_level_output_shape(n_filters, depth, pool_size, image_shape): + """ + Each level has a particular output shape based on the number of filters used in that level and the depth or number + of max pooling operations that have been done on the data at that point. + :param image_shape: shape of the 3d image. + :param pool_size: the pool_size parameter used in the max pooling operation. + :param n_filters: Number of filters used by the last node in a given level. + :param depth: The number of levels down in the U-shaped model a given node is. + :return: 5D vector of the shape of the output node + """ + output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist() + return tuple([None, n_filters] + output_image_shape) + + +def get_up_convolution(n_filters, pool_size, kernel_size=(2, 2, 2), strides=(2, 2, 2), + deconvolution=False): + if deconvolution: + return Deconvolution3D(filters=n_filters, kernel_size=kernel_size, + strides=strides) + else: + return UpSampling3D(size=pool_size) diff --git a/keras/downstream_tasks/utils.py b/keras/downstream_tasks/utils.py new file mode 100755 index 0000000..f678ac7 --- /dev/null +++ b/keras/downstream_tasks/utils.py @@ -0,0 +1,289 @@ +import os +import random +import copy +import keras +import shutil +import math +import numpy as np +import tensorflow as tf +import matplotlib.pyplot as plt +from tqdm import tqdm +from keras import backend as K +from sklearn import metrics +from skimage.transform import resize +from keras.callbacks import LambdaCallback,TensorBoard,ReduceLROnPlateau + +def augment_rician_noise(data_sample, noise_variance=(0, 0.1)): + variance = random.uniform(noise_variance[0], noise_variance[1]) + data_sample = np.sqrt( + (data_sample + np.random.normal(0.0, variance, size=data_sample.shape)) ** 2 + + np.random.normal(0.0, variance, size=data_sample.shape) ** 2) + return data_sample + +def augment_gaussian_noise(data_sample, noise_variance=(0, 0.1)): + if noise_variance[0] == noise_variance[1]: + variance = noise_variance[0] + else: + variance = random.uniform(noise_variance[0], noise_variance[1]) + data_sample = data_sample + np.random.normal(0.0, variance, size=data_sample.shape) + return data_sample + +###### +# Module: Evaluation metric +###### +def mean_iou(y_true, y_pred): + prec = [] + for t in np.arange(0.5, 1.0, 0.05): + y_pred_ = tf.to_int32(y_pred > t) + score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2) + K.get_session().run(tf.local_variables_initializer()) + with tf.control_dependencies([up_opt]): + score = tf.identity(score) + prec.append(score) + return K.mean(K.stack(prec), axis=0) + +def dice_coef(y_true, y_pred, smooth=1.): + y_true_f = K.flatten(y_true) + y_pred_f = K.flatten(y_pred) + intersection = K.sum(y_true_f * y_pred_f) + return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) + +def dice_coef_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + +def bce_dice_loss(y_true, y_pred): + return 0.5 * keras.losses.binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred) + +def iou(im1, im2): + overlap = (im1>0.5) * (im2>0.5) + union = (im1>0.5) + (im2>0.5) + return overlap.sum()/float(union.sum()) + +def dice(im1, im2, empty_score=1.0): + im1 = np.asarray(im1>0.5).astype(np.bool) + im2 = np.asarray(im2>0.5).astype(np.bool) + + if im1.shape != im2.shape: + raise ValueError("Shape mismatch: im1 and im2 must have the same shape.") + + im_sum = im1.sum() + im2.sum() + if im_sum == 0: + return empty_score + + intersection = np.logical_and(im1, im2) + + return 2. * intersection.sum() / im_sum + + + +###### +# Module: model setup +###### +def classification_model_compile(model, config): + if config.num_classes <= 2: + model.compile(optimizer=config.optimizer, + loss="binary_crossentropy", + metrics=['accuracy','binary_crossentropy'], + ) + else: + model.compile(optimizer=config.optimizer, + loss="categorical_crossentropy", + metrics=['categorical_accuracy','categorical_crossentropy'], + ) + return model + +def segmentation_model_compile(model, config): + model.compile(optimizer=config.optimizer, + loss=dice_coef_loss, + metrics=[mean_iou, + dice_coef], + ) + return model + +def model_setup(model, config, task=None): + if task == 'segmentation': + model = segmentation_model_compile(model, config) + elif task == 'classification': + model = classification_model_compile(model, config) + else: + raise + + if os.path.exists(os.path.join(config.model_path, config.exp_name+".txt")): + os.remove(os.path.join(config.model_path, config.exp_name+".txt")) + with open(os.path.join(config.model_path, config.exp_name+".txt"),'w') as fh: + model.summary(print_fn=lambda x: fh.write(x + '\n')) + + shutil.rmtree(os.path.join(config.logs_path, config.exp_name), ignore_errors=True) + if not os.path.exists(os.path.join(config.logs_path, config.exp_name)): + os.makedirs(os.path.join(config.logs_path, config.exp_name)) + tbCallBack = TensorBoard(log_dir=os.path.join(config.logs_path, config.exp_name), + histogram_freq=0, + write_graph=True, + write_images=True, + ) + tbCallBack.set_model(model) + + early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', + patience=config.patience, + verbose=config.verbose, + mode='min', + ) + check_point = keras.callbacks.ModelCheckpoint(os.path.join(config.model_path, config.exp_name+".h5"), + monitor='val_loss', + verbose=config.verbose, + save_best_only=True, + mode='min', + ) + lrate_scheduler = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=6, + min_delta=0.0001, min_lr=1e-6, verbose=1) + callbacks = [check_point, early_stopping, tbCallBack, lrate_scheduler] + return model, callbacks + +def classification_model_evaluation(model, config, x, y, note=None): + model = classification_model_compile(model, config) + p = model.predict(x, verbose=config.verbose, batch_size=config.batch_size) + if note is not None: + print("[INFO] {}".format(note)) + print("x: {} | {:.1f} ~ {:.1f}".format(x.shape, np.min(x), np.max(x))) + print("y: {} | {:.1f} ~ {:.1f}".format(y.shape, np.min(y), np.max(y))) + print("p: {} | {:.1f} ~ {:.1f}".format(p.shape, np.min(p), np.max(p))) + + fpr, tpr, thresholds = metrics.roc_curve(y, p, pos_label=1) + + print("[EVAL] AUC = {:.2f}%".format(100.0 * metrics.auc(fpr, tpr))) + + return p + +def segmentation_model_evaluation(model, config, x, y, note=None): + model.compile(optimizer=config.optimizer, + loss=dice_coef_loss, + metrics=[mean_iou, + dice_coef], + ) + p = model.predict(x, verbose=config.verbose, batch_size=config.batch_size) + eva = model.evaluate(x, y, verbose=config.verbose, batch_size=config.batch_size) + if note is not None: + print("[INFO] {}".format(note)) + print("x: {} | {:.1f} ~ {:.1f}".format(x.shape, np.min(x), np.max(x))) + print("y: {} | {:.1f} ~ {:.1f}".format(y.shape, np.min(y), np.max(y))) + print("p: {} | {:.1f} ~ {:.1f}".format(p.shape, np.min(p), np.max(p))) + print("[BIN] Dice = {:.2f}%".format(100.0 * dice(p, y))) + print("[BIN] IoU = {:.2f}%".format(100.0 * iou(p, y))) + print("[EVAL] Dice = {:.2f}%".format(100.0 * eva[-1])) + print("[EVAL] IoU = {:.2f}%".format(100.0 * eva[-2])) + + return p + +def plot_image_truth_prediction(x, y, p): + x, y, p = np.squeeze(x), np.squeeze(y), np.squeeze(p>0.5) + rows, cols = 12, 12 + plt.rcParams.update({'font.size': 30}) + plt.figure(figsize=(25*3, 25)) + + large_image = np.zeros((rows*x.shape[0], cols*x.shape[1])) + for b in range(rows*cols): + large_image[(b//rows)*x.shape[0]:(b//rows+1)*x.shape[0], + (b%cols)*x.shape[1]:(b%cols+1)*x.shape[1]] = np.transpose(np.squeeze(x[:, :, b])) + plt.subplot(1, 3, 1) + plt.imshow(large_image, cmap='gray', vmin=0, vmax=1); plt.axis('off') + + large_image = np.zeros((rows*x.shape[0], cols*x.shape[1])) + for b in range(rows*cols): + large_image[(b//rows)*y.shape[0]:(b//rows+1)*y.shape[0], + (b%cols)*y.shape[1]:(b%cols+1)*y.shape[1]] = np.transpose(np.squeeze(y[:, :, b])) + plt.subplot(1, 3, 2) + plt.imshow(large_image, cmap='gray', vmin=0, vmax=1); plt.axis('off') + + large_image = np.zeros((rows*p.shape[0], cols*p.shape[1])) + for b in range(rows*cols): + large_image[(b//rows)*p.shape[0]:(b//rows+1)*p.shape[0], + (b%cols)*p.shape[1]:(b%cols+1)*p.shape[1]] = np.transpose(np.squeeze(p[:, :, b])) + plt.subplot(1, 3, 3) + plt.imshow(large_image, cmap='gray', vmin=0, vmax=1); plt.axis('off') + + plt.show() + +def predict_on_test_image(x, model, config): + rows, cols, deps = x.shape[0], x.shape[1], x.shape[2] + _resize = False + + p = np.zeros((rows, cols, deps), dtype='float') + n = np.ones((rows, cols, deps), dtype='float') + + nb_rows = int( math.floor( (rows-config.crop_rows) / config.step_pixel_size) ) + 1 + nb_cols = int( math.floor( (cols-config.crop_cols) / config.step_pixel_size) ) + 1 + nb_deps = int( math.floor( (deps-config.crop_deps) / config.step_pixel_size) ) + 1 + row_list = [x*config.step_pixel_size for x in range(nb_rows)] + col_list = [x*config.step_pixel_size for x in range(nb_cols)] + dep_list = [x*config.step_pixel_size for x in range(nb_deps)] + + for i in row_list: + for j in col_list: + for k in dep_list: + im = x[i:i+config.crop_rows, j:j+config.crop_cols, k:k+config.crop_deps] + im = resize(im, (config.input_rows, config.input_cols, config.input_deps), preserve_range=True) + im = np.expand_dims(np.expand_dims(im, axis=0), axis=0) + pr = np.squeeze(model.predict(im, verbose=0)) + pr = resize(pr, (config.crop_rows, config.crop_cols, config.crop_deps), preserve_range=True) + p[i:i+config.crop_rows, j:j+config.crop_cols, k:k+config.crop_deps] += pr + n[i:i+config.crop_rows, j:j+config.crop_cols, k:k+config.crop_deps] += 1 + + im = x[i:i+config.crop_rows, j:j+config.crop_cols, deps-config.crop_deps:deps] + im = resize(im, (config.input_rows, config.input_cols, config.input_deps), preserve_range=True) + im = np.expand_dims(np.expand_dims(im, axis=0), axis=0) + pr = np.squeeze(model.predict(im, verbose=0)) + pr = resize(pr, (config.crop_rows, config.crop_cols, config.crop_deps), preserve_range=True) + p[i:i+config.crop_rows, j:j+config.crop_cols, deps-config.crop_deps:deps] += pr + n[i:i+config.crop_rows, j:j+config.crop_cols, deps-config.crop_deps:deps] += 1 + + im = x[i:i+config.crop_rows, cols-config.crop_cols:cols, deps-config.crop_deps:deps] + im = resize(im, (config.input_rows, config.input_cols, config.input_deps), preserve_range=True) + im = np.expand_dims(np.expand_dims(im, axis=0), axis=0) + pr = np.squeeze(model.predict(im, verbose=0)) + pr = resize(pr, (config.crop_rows, config.crop_cols, config.crop_deps), preserve_range=True) + p[i:i+config.crop_rows, cols-config.crop_cols:cols, deps-config.crop_deps:deps] += pr + n[i:i+config.crop_rows, cols-config.crop_cols:cols, deps-config.crop_deps:deps] += 1 + + im = x[rows-config.crop_rows:rows, cols-config.crop_cols:cols, deps-config.crop_deps:deps] + im = resize(im, (config.input_rows, config.input_cols, config.input_deps), preserve_range=True) + im = np.expand_dims(np.expand_dims(im, axis=0), axis=0) + pr = np.squeeze(model.predict(im, verbose=0)) + pr = resize(pr, (config.crop_rows, config.crop_cols, config.crop_deps), preserve_range=True) + p[rows-config.crop_rows:rows, cols-config.crop_cols:cols, deps-config.crop_deps:deps] += pr + n[rows-config.crop_rows:rows, cols-config.crop_cols:cols, deps-config.crop_deps:deps] += 1 + + p = 1.0 * p / n + + return p + +###### +# Module: Visualization +###### +def plot_case(case_id=None, mris=None, segs=None, rows=10, cols=10, increment=38): + assert case_id is not None + assert mris is not None + assert segs is not None + font = {'family' : 'times', + 'weight' : 'bold', + 'size' : 22} + plt.rc('font', **font) + + print("\n\n[INFO] case id {}".format(case_id)) + + # plot the patient MRI + plt.figure(figsize=(cols*1, rows*1)) + plt.subplots_adjust(wspace=0.01, hspace=0.1) + for i in range(rows*cols): + plt.subplot(rows, cols, i+1) + plt.imshow(np.transpose(mris[case_id, 0, :, :, i+increment]), cmap="gray", vmin=0, vmax=1) + plt.axis('off') + plt.show() + + # plot the segmentation mask + plt.figure(figsize=(cols*1, rows*1)) + plt.subplots_adjust(wspace=0.01, hspace=0.1) + for i in range(rows*cols): + plt.subplot(rows, cols, i+1) + plt.imshow(np.transpose(segs[case_id, 0, :, :, i+increment]), cmap="gray", vmin=0, vmax=1) + plt.axis('off') + plt.show() \ No newline at end of file