From cf70ce4499bce8267d02f173cfd64cdc71141f6d Mon Sep 17 00:00:00 2001 From: DaHyeonnn <90945094+DaHyeonnn@users.noreply.github.com> Date: Sat, 23 Jul 2022 15:13:56 +0900 Subject: [PATCH] Add files via upload --- scripts/__pycache__/export.cpython-36.pyc | Bin 0 -> 21714 bytes scripts/__pycache__/export.cpython-38.pyc | Bin 0 -> 21245 bytes scripts/data/Argoverse.yaml | 67 ++ scripts/data/GlobalWheat2020.yaml | 53 + scripts/data/Objects365.yaml | 112 ++ scripts/data/SKU-110K.yaml | 52 + scripts/data/VOC.yaml | 80 ++ scripts/data/VisDrone.yaml | 61 ++ scripts/data/coco.yaml | 44 + scripts/data/coco128.yaml | 30 + scripts/data/hyps/hyp.finetune.yaml | 39 + .../data/hyps/hyp.finetune_objects365.yaml | 31 + scripts/data/hyps/hyp.scratch-high.yaml | 34 + scripts/data/hyps/hyp.scratch-low.yaml | 34 + scripts/data/hyps/hyp.scratch-med.yaml | 34 + scripts/data/hyps/hyp.scratch.yaml | 34 + scripts/data/scripts/download_weights.sh | 20 + scripts/data/scripts/get_coco.sh | 27 + scripts/data/scripts/get_coco128.sh | 17 + scripts/data/xView.yaml | 102 ++ scripts/models/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 151 bytes .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 168 bytes .../models/__pycache__/common.cpython-36.pyc | Bin 0 -> 30529 bytes .../models/__pycache__/common.cpython-38.pyc | Bin 0 -> 29814 bytes .../__pycache__/experimental.cpython-36.pyc | Bin 0 -> 4961 bytes .../__pycache__/experimental.cpython-38.pyc | Bin 0 -> 4906 bytes .../models/__pycache__/yolo.cpython-36.pyc | Bin 0 -> 12688 bytes .../models/__pycache__/yolo.cpython-38.pyc | Bin 0 -> 12673 bytes scripts/models/common(1).py | 673 ++++++++++++ scripts/models/common.py | 673 ++++++++++++ scripts/models/experimental(1).py | 120 +++ scripts/models/experimental.py | 120 +++ scripts/models/hub/anchors.yaml | 59 ++ scripts/models/hub/yolov3-spp.yaml | 51 + scripts/models/hub/yolov3-tiny.yaml | 41 + scripts/models/hub/yolov3.yaml | 51 + scripts/models/hub/yolov5-bifpn.yaml | 48 + scripts/models/hub/yolov5-fpn.yaml | 42 + scripts/models/hub/yolov5-p2.yaml | 54 + scripts/models/hub/yolov5-p34.yaml | 41 + scripts/models/hub/yolov5-p6.yaml | 56 + scripts/models/hub/yolov5-p7.yaml | 67 ++ scripts/models/hub/yolov5-panet.yaml | 48 + scripts/models/hub/yolov5l6.yaml | 60 ++ scripts/models/hub/yolov5m6.yaml | 60 ++ scripts/models/hub/yolov5n6.yaml | 60 ++ scripts/models/hub/yolov5s-ghost.yaml | 48 + scripts/models/hub/yolov5s-transformer.yaml | 48 + scripts/models/hub/yolov5s6.yaml | 60 ++ scripts/models/hub/yolov5x6.yaml | 60 ++ scripts/models/tf.py | 464 ++++++++ scripts/models/yolo.py | 329 ++++++ scripts/models/yolov5l.yaml | 48 + scripts/models/yolov5m.yaml | 48 + scripts/models/yolov5n.yaml | 48 + scripts/models/yolov5s.yaml | 48 + scripts/models/yolov5x.yaml | 48 + scripts/utils/Camera_code.py | 48 + scripts/utils/FLIRCamera_code.py | 240 +++++ scripts/utils/FLIRCamera_code_backup.py | 260 +++++ scripts/utils/__init__.py | 37 + .../__pycache__/activations.cpython-38.pyc | Bin 0 -> 4576 bytes .../__pycache__/augmentations.cpython-38.pyc | Bin 0 -> 9068 bytes .../__pycache__/autoanchor.cpython-38.pyc | Bin 0 -> 6285 bytes scripts/utils/activations.py | 101 ++ scripts/utils/augmentations.py | 277 +++++ scripts/utils/autoanchor.py | 164 +++ scripts/utils/autobatch.py | 57 + scripts/utils/aws/__init__.py | 0 .../aws/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 157 bytes scripts/utils/aws/mime.sh | 26 + scripts/utils/aws/resume.py | 40 + scripts/utils/aws/userdata.sh | 27 + scripts/utils/benchmarks.py | 92 ++ scripts/utils/callbacks.py | 78 ++ scripts/utils/datasets.py | 992 ++++++++++++++++++ scripts/utils/downloads.py | 153 +++ scripts/utils/flask_rest_api/README.md | 73 ++ .../utils/flask_rest_api/example_request.py | 13 + scripts/utils/flask_rest_api/restapi.py | 37 + scripts/utils/general.py | 858 +++++++++++++++ scripts/utils/google_app_engine/Dockerfile | 25 + .../additional_requirements.txt | 4 + scripts/utils/google_app_engine/app.yaml | 14 + scripts/utils/loggers/__init__.py | 168 +++ .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 7404 bytes scripts/utils/loggers/wandb/README.md | 152 +++ scripts/utils/loggers/wandb/__init__.py | 0 .../wandb/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 167 bytes .../__pycache__/wandb_utils.cpython-36.pyc | Bin 0 -> 19550 bytes scripts/utils/loggers/wandb/log_dataset.py | 27 + scripts/utils/loggers/wandb/sweep.py | 41 + scripts/utils/loggers/wandb/sweep.yaml | 143 +++ scripts/utils/loggers/wandb/wandb_utils.py | 562 ++++++++++ scripts/utils/loss.py | 222 ++++ scripts/utils/metrics.py | 342 ++++++ scripts/utils/plots.py | 471 +++++++++ scripts/utils/torch_utils.py | 329 ++++++ scripts/utils/useing_video.py | 39 + 100 files changed, 10556 insertions(+) create mode 100644 scripts/__pycache__/export.cpython-36.pyc create mode 100644 scripts/__pycache__/export.cpython-38.pyc create mode 100644 scripts/data/Argoverse.yaml create mode 100644 scripts/data/GlobalWheat2020.yaml create mode 100644 scripts/data/Objects365.yaml create mode 100644 scripts/data/SKU-110K.yaml create mode 100644 scripts/data/VOC.yaml create mode 100644 scripts/data/VisDrone.yaml create mode 100644 scripts/data/coco.yaml create mode 100644 scripts/data/coco128.yaml create mode 100644 scripts/data/hyps/hyp.finetune.yaml create mode 100644 scripts/data/hyps/hyp.finetune_objects365.yaml create mode 100644 scripts/data/hyps/hyp.scratch-high.yaml create mode 100644 scripts/data/hyps/hyp.scratch-low.yaml create mode 100644 scripts/data/hyps/hyp.scratch-med.yaml create mode 100644 scripts/data/hyps/hyp.scratch.yaml create mode 100644 scripts/data/scripts/download_weights.sh create mode 100644 scripts/data/scripts/get_coco.sh create mode 100644 scripts/data/scripts/get_coco128.sh create mode 100644 scripts/data/xView.yaml create mode 100644 scripts/models/__init__.py create mode 100644 scripts/models/__pycache__/__init__.cpython-36.pyc create mode 100644 scripts/models/__pycache__/__init__.cpython-38.pyc create mode 100644 scripts/models/__pycache__/common.cpython-36.pyc create mode 100644 scripts/models/__pycache__/common.cpython-38.pyc create mode 100644 scripts/models/__pycache__/experimental.cpython-36.pyc create mode 100644 scripts/models/__pycache__/experimental.cpython-38.pyc create mode 100644 scripts/models/__pycache__/yolo.cpython-36.pyc create mode 100644 scripts/models/__pycache__/yolo.cpython-38.pyc create mode 100644 scripts/models/common(1).py create mode 100644 scripts/models/common.py create mode 100644 scripts/models/experimental(1).py create mode 100644 scripts/models/experimental.py create mode 100644 scripts/models/hub/anchors.yaml create mode 100644 scripts/models/hub/yolov3-spp.yaml create mode 100644 scripts/models/hub/yolov3-tiny.yaml create mode 100644 scripts/models/hub/yolov3.yaml create mode 100644 scripts/models/hub/yolov5-bifpn.yaml create mode 100644 scripts/models/hub/yolov5-fpn.yaml create mode 100644 scripts/models/hub/yolov5-p2.yaml create mode 100644 scripts/models/hub/yolov5-p34.yaml create mode 100644 scripts/models/hub/yolov5-p6.yaml create mode 100644 scripts/models/hub/yolov5-p7.yaml create mode 100644 scripts/models/hub/yolov5-panet.yaml create mode 100644 scripts/models/hub/yolov5l6.yaml create mode 100644 scripts/models/hub/yolov5m6.yaml create mode 100644 scripts/models/hub/yolov5n6.yaml create mode 100644 scripts/models/hub/yolov5s-ghost.yaml create mode 100644 scripts/models/hub/yolov5s-transformer.yaml create mode 100644 scripts/models/hub/yolov5s6.yaml create mode 100644 scripts/models/hub/yolov5x6.yaml create mode 100644 scripts/models/tf.py create mode 100644 scripts/models/yolo.py create mode 100644 scripts/models/yolov5l.yaml create mode 100644 scripts/models/yolov5m.yaml create mode 100644 scripts/models/yolov5n.yaml create mode 100644 scripts/models/yolov5s.yaml create mode 100644 scripts/models/yolov5x.yaml create mode 100644 scripts/utils/Camera_code.py create mode 100644 scripts/utils/FLIRCamera_code.py create mode 100644 scripts/utils/FLIRCamera_code_backup.py create mode 100644 scripts/utils/__init__.py create mode 100644 scripts/utils/__pycache__/activations.cpython-38.pyc create mode 100644 scripts/utils/__pycache__/augmentations.cpython-38.pyc create mode 100644 scripts/utils/__pycache__/autoanchor.cpython-38.pyc create mode 100644 scripts/utils/activations.py create mode 100644 scripts/utils/augmentations.py create mode 100644 scripts/utils/autoanchor.py create mode 100644 scripts/utils/autobatch.py create mode 100644 scripts/utils/aws/__init__.py create mode 100644 scripts/utils/aws/__pycache__/__init__.cpython-36.pyc create mode 100644 scripts/utils/aws/mime.sh create mode 100644 scripts/utils/aws/resume.py create mode 100644 scripts/utils/aws/userdata.sh create mode 100644 scripts/utils/benchmarks.py create mode 100644 scripts/utils/callbacks.py create mode 100644 scripts/utils/datasets.py create mode 100644 scripts/utils/downloads.py create mode 100644 scripts/utils/flask_rest_api/README.md create mode 100644 scripts/utils/flask_rest_api/example_request.py create mode 100644 scripts/utils/flask_rest_api/restapi.py create mode 100644 scripts/utils/general.py create mode 100644 scripts/utils/google_app_engine/Dockerfile create mode 100644 scripts/utils/google_app_engine/additional_requirements.txt create mode 100644 scripts/utils/google_app_engine/app.yaml create mode 100644 scripts/utils/loggers/__init__.py create mode 100644 scripts/utils/loggers/__pycache__/__init__.cpython-36.pyc create mode 100644 scripts/utils/loggers/wandb/README.md create mode 100644 scripts/utils/loggers/wandb/__init__.py create mode 100644 scripts/utils/loggers/wandb/__pycache__/__init__.cpython-36.pyc create mode 100644 scripts/utils/loggers/wandb/__pycache__/wandb_utils.cpython-36.pyc create mode 100644 scripts/utils/loggers/wandb/log_dataset.py create mode 100644 scripts/utils/loggers/wandb/sweep.py create mode 100644 scripts/utils/loggers/wandb/sweep.yaml create mode 100644 scripts/utils/loggers/wandb/wandb_utils.py create mode 100644 scripts/utils/loss.py create mode 100644 scripts/utils/metrics.py create mode 100644 scripts/utils/plots.py create mode 100644 scripts/utils/torch_utils.py create mode 100644 scripts/utils/useing_video.py diff --git a/scripts/__pycache__/export.cpython-36.pyc b/scripts/__pycache__/export.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4dc892b46eff9d08e1c4af4ac74e164a2d085d6 GIT binary patch literal 21714 zcmch9d5j#{d0$oaeN0c!eefy{&*9GW@YsVp%VT-XiVk^Do*l85=W371xlR2|3q;v#Q_q-37mqFz_H_m2%rFoVFU~U z<@bHBs=H=5D|oX=hU{1Gey`r~d*Aiy+|W?^^WV*`$7+i52TJH~3jWvev%aV)ilG!0 zLp8L9T2%R|6*YcFiV=QBi&1{YiZOo1i*bG?iV1!ui%C2qjZ`yTOslf4Ofk!8L&YI} z=IX=6Vfdnrd~>8YqAF(E9NkvyW7|q`+#Ih@80w5toHVD*$!*n)*QXyO#IMk^J|p9{ zRW1`j%&a+SrtAB*m2GWXEl#4=SYv=858o=E>s8=6rF!d8&A-`9kr9=0b6yxma9m7K(-DQgMm%W*VoP zXNqUw&l*D?D8;kpf|1)+tEw?<CBY+bLn zM)(B|FBppmzlLyu!%N0#gfEyw^^4nzIcc2PNAB5uit)PfqVbY(g!X`*Oe#(Z8iUpKGPd>Pt3W&Ube zPb*6L#^X*~*m_z2{Owz}AHJmD*;;Fh%BJ3I8)ied+j`sHG=*Mmi)Puj3i_Javf5&$ z(SD?xT-4IbU3;@FOhaGa(l>3pV=XT&ZPe_|?s}opZZ0_uL$udxb~?SnmFRo^x%#`Z zzCve9UtFxUDvhpTzT401>Tgny(u<3W{kYv87X?2#Ro6K^9lYW0(iBFHy`uYZuD;c7 zv>(1?6*_jB2kKr$)H-%QL*T*d*ffq7C%+p+_n(7`LfO){S6AQZXW8XJjqO(J@w>aC z`@untlr4R`W47+!T)o{-4|%96BtNXR+V6&<``&(4r6600m%~z8&i>uCe%6qOX0WwU zYlS8)luBb8jGfGuzJ^8f=B*uh`T=XHZ$Q!F}_)#tZXa`Jrj>a`DTc zHjCoqYdl|NzqV2+Tky7ARE!$d7Q%y(pPOYQYT9#dIH1tKGi%^z={n*y!l-uog!_sLL zv$JT`nw>_iS_2GGr0BNnTGP~nwYg}R4|SVisR~$HtaQ4%enh`^=dJ$-tuAh$_D^V; zT73hprr)y48)pBVw}6fhBBSNFVtCy?yxb$HL#Ty4*hg zVCiyI=!HTdoxa(sn!;>VKB;lTw9Sew5q6KpX_CT!M}pIU2Qr-UiT+^GhkNg5KBBMD zD@nlo^fmH&*xBw8odNT1upoqf8)J0szJ7gmReyvN2*YW#TA#kH$IZshmS}3JZbH;g zU!6Z;;VLm0ykhUJefsKr#M@b&52h#IELU#dTWq%)Tl1f~<}msNwFe#D1+1UC?m}lh zsIvduQFnl_pT1^)2lT6jXGhHhxu3e$T>#0I@2EAv-gF3%ft|s&uNc^p`tjpBC?UPl zXt&ItL>n#H)w;DPH_xK^xD3EE3se#npr$=i`Gt<^txhvQR6nNC(ibhXy@da=LjPfv z`tYoGy6cTv<@tnAtw3pEr=JpcvO2GN(K`SzJjdD{pl{7FOCSjW_UpBVNy*pRt%rDK z%eHMcJ9eqjE*l8PuQTLOj(fFRZ{eA~g_N63Afz>)^5VB{zwyS6yWUV`)2uuw8D+a{ znYNdc!CG^pWT7o@REEO`Mo{FTDXdz%g*8d1J&F#dFLUHzx3$G^UJcJZFen$8$e%5qL89uo2nF^b}jEtFB_q5f6csV^4@G zKfYQ__yp=RbEVK(2yiACI^0Hw-amGR=g3az}oppN2b!Rw~ zN*mUT%WaLm_?~e2VDotipN?^ccWqc_Xiy=xr5B|=mz~1)NQt`c?~@@ywr89%N0>>(%MlU|4l`l#Nv4N+R|pbX)b~PX}8J`7aO%! z_wmwo(|TaHJ4;wvW&=-uQ#Z?&ZHmG1@8u*VK--8ot6Q|>rr{`BQhokkRu!j^=43-_ zVYw2c{P+VSr{wYDb^9ZDw!TNJ%TV3uH^J@MT0Mei22_ocxH^mw<0~PdplbnaueHXP31YT$EX8+irrJ9iaquqE&hwD^irzg z%=Sy=+|)Ofk0X@tZRPWjix|l9e(D{irM&|`cFpjrr>33#I%8I?wc)d-k_O-{qGOd6 z3OhH<3MMlSz^GO1`G^;@Hp?9>JBlz&FV@0rTTZ&tZdGd=M4cjcwq!oGMY%*MVmY%x zGgh}!F)eFB=f$s=E#1-dlW6fw)rsnFUY&PFf~?hYtNOj=M7d&$MJg8ou6dDW`LQSAz+~Y?2rHx1Wid>7Q#T%0On`g4HJ|ok%v5<=t?5PSZ3#al?t5{R z$1d^Is;AkOry5=+G%B8I^5&4vJXJjEuk;XBeF5>-EF4AC)LAvHjl#{TQFU4y)#mWb ztKue7ofu)-awSPw@t4DIAAY>hzX!PfNZk^)LV_9+YZyuci<6eS5pfe(YEd^?r-c{m zFMTX@Ed7)nuVbmx(sxrv!i~65ai8Oo^;9eA`_gVSs~E`*6_BsGG4XQ}_Kl%$(E$M9 z|CVOtzooU%t}*fiA~v*^aZ{*24Igb6Y$C+QKT^e=tp@sepx@72FGKz0UXHmLzn9pS z$e*bXyVxpd@kiKpd2C7uuWBl{GyWv<1+ACvW!>yYs+F`yjfrjb%h-PPF_cMdV++2m zw9aEQjXy}E-BoO-A>K|wYm@HKbK7a?{UN!X`hA+BO=nEo6ZAgD47Td5zg1JbRb}np zd9L<-KdE-!tOr-cB&JZ%VJlvM1SRDg4tLQTW}Q`Cb$L;i%9z&2QKTc$VE?tlUb z_Mw-=o~yNhDc+dTE&&aIF`y`_?S?^-nOY0|C^1-gS=p-~WNab2Rw&)psU%N$s#oWrSEc(1w91ALu?MRHuh60u|R<JRdEg2}_5uFYwO-VY;E0d7bm&LjXq^sS9|7&S3*me{ zVZ;p_^!3yfg!R3ckpxkYHn7u?lMm^SqrJGDsb}3d5xR7+Q=MGn5Fg?>8&4p^Fpwd? z;}Fl`5dTf!LIS9>Kh%>E!n%(c;rhpT;{8PE7#~}GmJP^MlInyY;?O5RL?9E9FhS(9 zAR@{cX|`FGLYtH4QTIt=Cx>`{pBgoxui;q7(Qh^ypLugUK*RIHyeqt!;5`aB-=$2Z zBx!0dzdMErfza-uwc{m(*=gXk_q4W!quT87rt;EOcfBLpL~eLlxlQED@OtN!KcC#S z=y_q|C^rSIYcEo18hb9{;b0N_gZeGxwrG3PA)Uk`o>eEcNi`Q7&<~N~BqWD0SfB(2 z{RR4a2yFc_7UnS&m<&1@jBQXO`OQV0_ zow84&IHZT3XuX7+z@lY3mya@PBgYGN*k7=DEZ7l$!6w{T6%20tU>B@hKv(llec!VZ4^8DAiJg=>;`iqzob}!C8w2PNe zOuP(-xTO}3PrI06zJ6`1BZ*Xr#stK2VW#$8m=&8BrnrHO#U!|L-mrTv(IJ0{%2E6- z3iPSS0BH$B(^#TO;OV3)einJ25_qmSDajGuU0bfuBBj3>{D__;D#7|=5DXt_#5$4k zF2K`X%1za=%x1tqJ$N>Mw;bfrdgC6G0? z4I~6@M&(|h^wv?JFx8nNdh$sG*cz0`hDvBa=^@aP8Nz8SVk|^sn9AXq_vu-+p2kKP zc@VLFU}xzR0412I4{a;f?;;Gk5{nmjk65Y=Do@ajsX7>{IuySMfuRG9sBt)7A906D zqwtTp)Nanr)yKD$-Z0L9=(u7`TvFWO(u5JUCy{5WKE178QhIshnsM{n>em#+&VHz% ztkxTGN9z0R{ceVsNYJTO`+zYy=*?FZsAi`yc1io-hf4j>wn`(|8|C=ve!T9-kI@M5 z`_ItZsLJCP-O=mHPcQVw`zf>Tc>OR&W*?q&c<#sZ2;LqHs|qgRz3;qDrN+3_M8DJl zcfv0<;g_1&QR+Xwt+-LV;n6o0z*f+UgKiW`)ziiyP~thvMWV;GI& z^%K-O%Adrz3~y`SIMti9=iMxn$J#f>dsFUI{ggXde*r3UhEv$mfWpT8y z8NodD6-J*55z6#Oq%=_;_wcib_SsNe<)`(Y3ef=z$-bf)5S6j3enqQBXOzF7y%z;* z9x1GHRZJ|AIHhpfQ_H*Pr)TL4MAAW9+_^+FjYaLn73Z+O-Q~$yxX@?=0If^#1i7CQ z4Z7mQ&KFJ>PSaT)-7E{^DGBlX17ETjpFlqZh#;XDM`Ne!+>(vz&@&O;59JH#lJxbi ztv9=tt*=A%)FMUCf?g|_1?T~#hDR2Y$`vVB)K9E{9t!rl1T2*AFrX{LVJYcxnx! zlxUguqqcZ3AYsK}P2Mk|ieW1+7oXCG|Nc zFD@4iB$af2np60mS!ua3ta&j}Zf%%ey47vUHEMYo^2@n|%$C?{pY-90E>40bwn<`I+ALe8DrBmqYUj+$ z#U!!pRqA8Le|PS1bma~bv;6u2`5h(79FhVIpy{R1)mpg$6+?)Ap4m;NY|vZ)SQ=Gk z6une`>;wU#q^~2ku)QcxF|))>YsVXM;u8K0kRY9=075l%7NRsBEa6fEiQu&%+Us}` zganZ%o@x(BpI%aC)ifMsP*!_k>04A1HqLAA*MhKHwxKwv`1pL zoSUzz2v7VXHHxEKh>rHU} ziCy_8k$=RUa3_gT?oAm(?i9$VDYBu%;XhR$cZW$>$V@cJ+v#oc?l9aDh}S@d8KXGP z$Nb}b${qH3l1Ucez2hJ0v!SsV9~|k%6mumz5A^9(KE{c)g**k!f(DVxid4mUiM5qC z$&^YSjEDDTQzSV+_7pys!mRav%;$U*$7FSb_lT#pD#Wd9SdO?uX`0-vX%@>dupo+7 zxANcBY zv9+7`c@9)IsahzxO06bX`SM1qZP^eOQYZ)g(Ss5Wfl{Rb;U0)H3d3l|OV!%llD#QR zsImx2q+d#|ex3SF?;Cna0wMADs3onkDgG`z#Uz^K@$l5rU~0S!FO;9adIeJakb>`j zqCV>s93@JUuB?^^$mG?$HVz5aB)|j00>B6X4e?_XCy2ar#zS}!r~(HPgfy}cS};>a zI@zrs_*fzBUDYN;R#G6fw~!0WPMpR3BrQlVQvsCtAc-AM@_{5`Qy`bpWZPQL^nF>f zTSbl-X}O@|O4f(!Io`w3`Y;dyVS5DHEwEW|$B2D<5)BE)GHx7uI%|)UUd$N6p3eDu zI?g2e1Y``)u$xJ{6&8$vse6t<->Kiq_xJN*pVclsEaTw9g0o8YKrC&Q9-jTw+j_lE z>S3sv(r4O|$bE1v;7mZptE)aiDNZ07r_evgV?fM#$qS8gbKNLk(ou(nGzj&5+P$+L zrrTw%mHB9Kbc0OlN-!dj9>c5_>9~YYPJE0CiV2+XHZ)705g{hU->2G64?qDVI+l;U5Y*lv^{>4%rBc3Tx9ii#6N?4mH}AW&KTXx}2_UdOD+9Y)d{ z@og#_yBxfLw z;oTegn(|fUtLj%ZSb0Ot{Xr7tru`OXf)>DFQAW=jV`y(Pg%26+!c zXfk9Uax<_&*XlY6Q_wozpWl9%#2`lAK1{+9V+1HM>Z3%4QDTly{W@?0^bc@iFm7XR z?m3*uy`Kv)^5YUGJ~af`PIi&#Xy4QVbi*vf&p2hU{y_deGy}t&(uUa*ME9)_NzpgY ziINFKh1NjuN7&6ULSX)(_M!5jx=r>iK9EQ>i9iA48~nsu;OuMb;@=?q09V9sA$Xw= zDTn;^Dh)!OhZVd96x`?iw-Dbz;m-&jM;JVIqd=cR;El>u1L>q}vpBrALVMzx#1pVV zd1in@XKOKjyJOumZO(dg?MCV9TQ_g5-CVs_%s^aBcwzxIibGbnLmDO6EJI`EB|wRl zZSXR~gLq;SEwb?O_40}9H&(8^b!)8%4JJo1<7SC}KtsyoN{}vSLW+}nPj7J?{SSAl zbaQp>ycg@zuolM$iVNn+OVoqn#2{8OFfw`;a|Pat>}tx5QpiG4NTrNOlu65T*Yv zW#`IxP#6gVR!SCHpcO~zOJ=2fnuhdqSu{|IA zx=B~Iz^zh^{2u$*;$T12G-Ppb0Kjr4lXjiNyzr<2dV#$l$A{#H~s01R& z0UKL-e@Bv7kkbqLJrhEyjDCr%z?NFwMgyWr(ZJM)6hs#SfuL{IXz9VKfoZqwVnM$m z%_&I^85qtJqUg?y)ip@P>Aym#sb{w^+ zT+o1x0a`L`ZhXmVcLg+>1(NC(3=DUuG)F!g>-P!>sdqa71oPRk9g)HC1g!>R=)^di zN6abDKn>FX7$86FO!20$$MQu^g3?%A?VQ}3FMe){LhP|K&d1MgL&P12k~8l2FSHC{ zF80|>$)e&9sNa7`4vAF7ACW^+$@yU~8Hmha+O`E7#3qXtg+QtofsETrgLv(VRs}MF z&_PG6Fd4%c0Cf^gQZf|fxQJoPQ*VIU*Ny zfLtZ(?7kfZS(1(8$9O!7`nP@?dOuup?Cbr2%n(o!I)|+(VxMG90hEJn1yEJ8nQ;TQ z6}=?TnS@r%8p5U`WfLKrGH^|i#X%Vtx$9XFjA6f6U zlZw@^QR4(<(su3V;Hyl+p+Az=WbupOXN^Y{x^$obZRm2g_5qkO(z}06wbifEWocY< z_#kOqq&TqJQ?_PDNTUdv^3t{{uERu%k!zWeFquKx==|jb|0BZYRlV z;mazF$RSvV;G0C>(cX+Z1LT{jXEdcZ+uP@Al0r1{u&MYZV+5M?{Hsb&Ge$pDAH>CX zj4`w`1p9WDiGJB918Q<$~q=(;*u%JmMpGcK%~dpgp-3;O(ss~a;ATOLK+Vf$G9 zxG`&=Ffc+Gm;Iy`Fb=q62XW9o>B2(8ID`{H{}>cC&b7lZlA0S=dUN)C{giP8dqN67Zl^TpZ1Pj@Y9k|mt@^xH@BTco;>oLka?_z zoI%v|fxYDC9)e;8f|2b6@{S-c&oOd*+deIGY)6nXg_M(iOTS^C2~sfDhmbmdL9x%e zxTOD4M104c0`H=H1vWWfi1v>3j`oiAj=Qjbu>QzBMph8^IpZ?Dv3v9*t^FtN@vQPu z#De_>UBbtDdC~7z#?5~e>Fjq$*gekAqwaBc)E$F5XWUqLl1A$@Xnpa5;vPXAEADZ$ zf8Ty7=pE7uNIQzOb+jE3U$S5JYfqzA;U04p1DE`t#F6t1a#EgOuwMz4KZ{tZ^*i?Y zQ0zI3#*4H^-8o*xb9nnXynR&UdDdT&JK3F_R*aW1$FE=&oOh?|uhy4gO+q$8ucF1* zXB7CB;k&}V*WkO#z6&2i{DcvjgHYz$YF`9Q%6|fjdkh$G*t{T1h8_eQJB$mw7}P_i z2-N(9HM@|IjSBucA)quWaHc?ThUGSa8OL+&6ST8=1I$cd+~C5z;X(X9w&8mq$_8?z zaNA_o;D0t?KuJDJU4Y06?3%drH-dgd6=!m1KY}cRY{A3yV<93n@D^DIsr;_}k7 zgwH7oLS5&z#YNHzOFobo4v1Wc)nRLHScv!>o^L49wM>vMfoaGZ?wn+say|MY3ZyMF!b+jiL@WH)BX%6(SLn{yny3 z^ssLVDVf(X-`<^#_;uf)W9O9_@t@J1rzEU`ogG) za>>JQmV94=^K1kPc6%L{XTaiLym+Q?=B#sD3g_5vWNif$sQkzU9==(Rd0jbB>5`L$HhOS_DJ(C{t-F9NzOkZ=O4p)N`wD2PQhP; z1D^BWlm9=EL-MDm335m%_cR4($GqXFmV0m-9^Qj_+65v?bGOULhe@FD%sGn* zQ~mF3e6G7L%Sht=qP*E&pwEMlmD9-+Xbuej7POz1e_QUvf_|ccNohC9=PaCsnicCjucDc_sJoi zT@d{%{xux3g16d@hvxhV-ZoyspJgfYWfAaSq|E=D93CxtUmh}XmEw5EZIZ7-PL-T{ zJ(s}x!_OyB{&OM>g z;y+s5gihD+v%ZV|Nm+G|3~Mw)wIepNz{Hlm~q=U6E2I6i@Rx)zFc#_@b<8fh_H zn;zQGz!{{TkZm;8G>llfL_MelpK7AZ)Fhea9DJqPnCXXSA#2_j3hzhwKq!0=;X|RY zj_~177!)V2UJvR!itw>e_&9oW!Z;b~(L8dT3gvhKIq*TJP>up}EQNBMM)*u9d=@3n zfvy|8{*u2@f>VkB3z|8zS_U%1?hPV5C$+ffZ@p*g3t>jo z$xC?-zFk6}I5}y}!$1g^XK~|RBHwXZ<|p7d34GjyOof~ndmMbr31QrcbIcmCvrd-p zJ?JMl7xYK-Q06VN84+h-YRHjxMY!7Lm`*Yvr6ehKtkN~gx+JcAdh4{_7W(OhGYe-I z&XE_!z1Vhx{$N~)dL!5dc~P=O*K}sdVo;~(Md&{(09Xk1<^D%m+z#>I1>@mUYYwOd zJNSB*JR(odLs-wG+{HDj6fe#OLe8ihD-IwNg=iFltF=z}3n|O^%#76R(Z{!tm%g+> zb4e;OXWupcc+%3^icTsz-B+$LAxV?vW4$v$*2c)Stot!K-A1HFgWGh@IA5_1?h9^7 zqKNv;SLGz#IVSUPj2>uB&NQQk7I!wBU)i>{v7-ucAaaP|% z(wPnwzuA6E=j2VIr?9*s@T~d;u;uUvQ_4f30+f!s=s4a(<#^@s$I%8tALm}^ZuJ7l z)YfCT{xz-z-|%_C|6-7TOshM)~b1il|#af_Yl+{S7mp@silrY_sa+ zg0CjrVG~wZv=|0`XM?|_AjzTV-P^aH`)(u=jhgt&L z>sKf$f@Qypfb~+c98zFErC3BRk<_2gvHdvbAot(#k78u(D4z5$iSv!rwfKy}i>RSb zYQwJ(RmP_qRr~lX2yAF$ZEaf<BM0ByJxJhoo=)l(-&A-JhcJH_54!(440^F-rgNO!FlUO7Z4 z!qyI>0Q&#Z3kX?Xh7*16lOb7-0c{8YGg*A&DjbWNN!*-;eRnpJ%;(4R3;EIf`1mWJ f6%XVm@_PP2{ut4cbUFD?^DlQlc_XtO#yALLz!V3V!1#~}e2xIj zp@22N@5`#5n$hUO8wB)7S^3Va%>3S8*4qOEX$8OEc=ab%Vl_qi50nW1#*ug(|E-&v zq8Lg^F;qiqs3n!3T1n$)q!i(2v=rrMtQ6yCycFkWqLkogvXsO#(nvMarL-#h%9OHP zHc%SiXRbL|8sxHQBi|e<4XKKmHiy^Mjgd!6sbCiBqlP-El*Y_)b8KBT7k-rQf>&+%kq zx_O{q#4ZX??oy2BjTej;jhA>-8d?6) zEHF3x`7>D^Gfo+=e4t{*&l(HHt9bj@jMK&$JYP4@8E0>64>j=x^R;!Q^oIGm`Nlf- z%h0YXGp|*29LMy$hyXV=c_juhr~Z-IZds-JEwChG?(U>~wmO zJJEOix%y4nU$JvvpPQ?-s*SE;-t5AAVNe%|(oi-ylG)pagU2XDB&HKkEw zr|f>7tKV-o+IL>GiXA)63w6CJY8|^@A&B60Y+6T)i*E+m{pV&!p=#+XOH1$et89y) z$9AiA@8-7besZ%%s+PXeF}PF>YLiZ&7wT{8c%oG@2woF7Q8KY`KG9JZe1`}`;$PIot3A13Y7^%wFWdqnWEdWYfV!R&gPt9-qCGFORGRjbJb2)*AM9DufF~N zVAQ!a^!|*IY1B6{YWi)fvS#+*`982?OYcQ=u1&#dP{oYt~f=BF_4a5V3b2OuGcQVjc58YN-i~lA+4E|7r%Vv%{MPz z^9HK7%{rfg{+0HBxfc^PML8_+rfn;il)y(qzvm)?Yf zQe288Uhk_Nt#jkf)S8MlhMiMws-Db<0(J({9OI^U(Toje2`LXJ$_dZs{JPyZ` z3-h-VHP`g#RGiNbYpRZQz+qG8tt~GCo0L;qQfS8+=T5Dbk1=c1n;N>J+KrlsO z)@3?-QwS6-sqXOihtZ5Ufl?^irzgO!V93+|)Oek0Mm_9py!+OM0pIQ|~IDS6bS; zNCAunmpnD??A95P)mm%5yeb(3h8Z2lu2>XfSe->|Y8=R;R<&m$Ud*~x>EQTLhGBZK z7Ixio($#irwYEl_Dvn3CY~Hg)rA!#bdNLLCZgs0w)3RoD1_Hfe=}$EMCnw5LvWhBKZ?yGJADaMR$vtx+}I!7nr(Ti;blUr;;AMBi42ZY z$HV?X58Ad31wDI z<#7rRB45yY>0Z{&K2)uwJ#37wt3M9d+ZaKc)FZ&(J4)*m;HYprjd7O%NdpW?!DwUd zz&1!4cz+;(r2d@73Eqqed(=$)t1P#S+Z6D*B98 z8VS0QBS~D94J9mRq+(en)OyS|>4a!Z!x;^FCrP@Ge8d91^Q#n`rr-<(XA#Vdi7dr= zV+cQq*C;qg!5b93jsWZeYXQCNvcD>%ckz>wyAF?psa zWh6T%@k)YT5%29KgO^A38W0lWw%j>>6JVl?LbQeW6Erf>M@CdLIq??7mngV`z>D!_ zF?y`EyoA40f*3jRISTl7t|3-R^Y~>NUbML_PfOgOYDC;acnfHa^rT#w`m#QQK+&Q^ zoi##Qd36jZ3x5&qldPKjqqwI1&v-hmW}|Uc<609a)$mvN-|1)$NNrd>tP*+~R>fWP z<)i{WZJ`>a&ix;uyFMZRbtIgJTM=AS>qS+?j(~(mTq5dGH(Dp6>LWQj?m{2Gkuc&0 z2zfm<4*h&DW+XxM(*}?eYBCZ2DB6qLnR?cZ6JJY&oBHG)2bf6bY&-${g8=^gHX@w^ zk^U8cegddvDm0fNLTDc`8uQ6?;{8N`Tp0L=0sKxj;7m!G6T$-fo`D4dRfxm{1+0qL zMN2c(Y_kf5Adq)gXEfpDkn4k@Q4{(ys6A-B*=Rib^bhjs_a=jx6+xV(a-}R8V=uow zhpBi7N&x$SWC0R|m5 z1)X0nQf(SL9@fF&u=>mNP1LrC%TY8sidh`kF>OrE1=MvNB~C)J`CAVkfqnn*{VrAe z*4D$Q2?&nw@G8pGF#cTB#Y+;S*K zNaW2(frr(Rp)(02O``F3jxtnPm&bA+)@xC`fY6IEm0HX5uk#w{5hJ&lRjq)Zimucu zpa-C3L^`Zzp2?53rKK5pF2x7vcKdnjV`V}7khnl2LyBpEklCda%iPQNJCbRXh?JN; zdDc=ppS7yZXH8s3#ZnRyGegymr*6PMb#fuTg9d%BEN~|eUR^mn)B^#Cotuv>Nq)*kY;XcViocUMD|aZp#h~1 zfVa!o$&HkeqmyL}Qae2JK7Xm!(>U8hw1|BKaZy^mn52q2x8M)M3 z0lj72)COdmdagc*6r?NgY#OKY^&xklJdE-Ym&VPxxsAdjr8fwKJz7wVv9pRhSROS- z?J?9JuTQM2XO&(awI<#CBlRl^T26hapsm&$a)&l{*}L7$y81{3-?(b;F~&D%^JN95 z(+R9y(%$=_Qs1|((hBy5Ie)UBulxBUv;zG8Qy*!}u?}BQzBt<}^z(PQh5CN1%WgcU z@!W&w0lYh8M-@E#dq3-raEsA?i@ols-(u8nF}kJ2cipI)s~>zz0R{?2-{(eQ>^y4d z;EZXsX*Y{8q=ZW)m)*gQLs*-`^&>pX+{RI?$KWIFYsY$H_Kce~4y*Jz1@ZFmDPrybKOdK~) z%Tae?6C1G%JMsR+7CbaVcqnPJQbE{^ggo`-$A5%hl~~r`M~p2o3IwLsQ3Pv>t9()K zsgUDYf%PSg)L0yn3 zAqZ&Wil#XGeV~^lv3R=C21c{aA`;YpOibjW6FXHrUOc{x*NNV$2;(uS*qAm;cK;cK zKEMhRqk`gfy3S=eqz*d}ap5qIkgZ2w>Dqd;YuWk=OhYX)#LVioqFIEUPFi4OGim&g zdO!X6O=q`cj>*#P_scc~l5-byv4>`P6T#!X2%M?S%EZu0^MkDV)OL_EW(5D4@bY%# zdg_PD21Lw9s$6`z=4vSgiyg#q;Qo&_@zZGIskM)xfNGic-L|;B$%cwQjl$odPJ<2j zzZo`7!}?6epGBwQ8x%0(OR_o-_8-H8L4sJ4Q_?S(Qi%n=e8EYkc=GzRGcYZs7M4kL zC!<$daF?(_baW?`l4GG-7Q0nraYwM;=$Av8s$Vd!Maw1rj& z93ZQt#YK!)+T*VyEXoiiNzZbgI>2+WQCTbHF1~y9@}={amdkIPTR#7pQqJ$*KM&*H_F&3JW5$1Xo^W*WDhjjw`T=_!CC(WFI2O?K zQkZJ3(ts{M#NSSC=Q(WJ3*d*wDhq60s=s!kf+ETPMr>hwQQqRdEa$}=a^n*I4v@B* zwg4I{Ocr`7UM%q;4Uh<48+x;j7ePwwrhL^7@ihz!1O{Hf*T|#!i^#G_LJWkzoSFqM zQvf%eN0#&y&+8)CoM3>M{C{(u1g#gT8Az5Cwk$JwG-O zeD9z98vVZe+V^NMMfs<%Ib)mZZ2fxQ$6=wGku^9BI40r%ty`cG(1_{4{YpdWWrzke zlqR?gcAJVcv}U9?(2VwS#N%wlAv&f(&0^3vk+v)@X{QZQ5dtLiVn!0Y!=Rlej(uJI z5v`Z+4fTfI9C)WJcq*kn;0{CQ8|{I!0T+{BRgoV1I<&N)L?au6(5^+H$BMfNHwoQT zqE~RU^*ri~e_ezA_^%nz-nx14Tv~6G>yK`$KZg24?x;J)^1(QyfpKs|(-M!c)qJLhDSE-a@9b07u!coHNHk%c3f4ji)6!Pd&x3CzMTTO`6m9VQWa{X*bh@9K|+bMt{hVP zMuH^);DZm6fNhe~lgLd$R!Eb>Xg$-9WgkHzK#iE2fCQ0*9(-UU$Iu$B4+8d)wufLJ zf^39vgk-J83=*`*%vMd<`fZm)By>Xwv9tGqo;uAnFS%|{QCrD%4`0MR9 zqI@4__xo~k`A!9d3J1?B+nd^5t9<7qgRc08SVJk^A3$si`-MdXOYC!fNkkxAKy13M ze5@`>wrUQUAFJYD;6u{l`@C_$F+FH6~Z3;wl+AiSojIlXVAMRzmz6 zG}{jB#9?{K)o!awJWOenI7>7J+XAMjpY3}}T<@4w2`GdV#9yJdF@6t#t+n|^&~&v+ z8Rj2kretl1mcvrZ?-^ny(sEh*n&1!&^Nd+#rZ&9{I3>qy@vK7h{@>A5nBJ%QYd?cf;SkAT$vH;kTW6q=)7AwBPyJ?=&UUdc%)LKp-iUs1lSd|Can2LEShgFi^3 z-Go2FWH17x8XAdupF;UA|G66~y=-|;Xttzs$hg^!y--&S*!$cJytB0pozxf@9q-R? zKS&A)BX92~wS+MQP#^X|J>!G=G!y9tKphMVKz(!NM%>&sP|v-e+agLAB&ctPVj{6l zc8-`^-^l<3ds9 zq4J@+4(|@}Z;_P1Pmg$;};D8_;upOBD^qS#~X*gMAY_l}Dyhw-pyhIBSKJqMfgeYAp zex+kwH*KzZY58LLjkhmdUcR(+y_A8{l+c0&=r0Xe-45AP;F1e_jh6t=Q?VgI3~uI= zaAuM7if?L8T)4P+?(NIVC79zli^U~N{2ILhFBzd+L91OF+j;*=1(Qf!dfQ`iPbG~_Z;X_Q;$-ST@~SOH1C5kE=uApNKKs}vCM zOC9UrHWWn$fBTddr2`VZaR?bk;sKmajO6Wz_)~TqYS0uD ze%~g=7y`9{g*Izyy||Hu!th`(!9B%=>n2)hVnfIq>yd`KafksCRe7)Bk3~6gBmV$~ zhmG_jWg~-`4NXGz9~yI*m6#(w_RX@`lO%60;IEXunI1zr4TUL$rH^2J3yr{IQjh!& z6Kph}gx!pc=2O6EyL^lm7|mg*fP&WmM$0fp+kAg0XSZRr%=?)w7;VpzGv&KJf!H=X zP+1AsxuCKjhdAd*a2Ep#`AVZ$sm&YhDx8j_fB1aRV&02guC==NoP!dxKzY`MrmF zCoG~nlUCOt^PT?!p@C+i&Xp?M_xdiBP^a7`55gimE4xNh?fl1WKpKCf8z{Ud2 zn>G(VZ?(Gu*1aMrLW>5LJJgyposs^mfEV>{2WY@_4sFQ{7AP1sSVbqs)jSgZ?$aLJ zK>(`~#o;`OH-%$SpxdJ|@eqM?bZ5EqsZ9!rpH6}4$u`eA@dwmh!Jk&>^jZ3|sm-95 z475sct-5c)QIxfB;!o17Bhcr1X)ugk(W*jJ4pAkNBFP!c00t)Q9TmfVi%S8vBvG3v zHoFbQN{ssB=%AE@y|+`wDJn&2@jSKaMR3#@wU%O#{8k$wesCU5Bq9xb^Zb{+OqldA zN(qzF-xO3jqyS1rr&S#a9+KB!;0oA72vv%c+hh;PN4QGr+u)Z&q>n-iRQor#S-5~q zp}!%78aDFBo@L|*a)QYps8GsgaRVm*UJ^!r(&?}~1yg^@PSxQbWZ=RaYfCb25;~qN z7__vFI5B8n+Y6>Gx-meP=-{~&0gFjmo$OYdNZ&~SQXKjpwqcI~vKs-iD{Pb0@{H`j zy0HgRSW+8hAE9W_Gc2+pA0&1Y7$Zwac2iiXTOqu2VCQokt{pnk;s{n;koe|g=2>#b zo&aIq3D+m_1*ZefaeapZ>7026;=V6JaNAAKf)5*G^6^=s();rubU%HO?SKEh1YBdb8UysOkT|n!TR$Xy<9~ zIOXZ^bjv4ptb^<=wNCC_>*Us2&+lC8`RQJ-_iVH@Bpp38VTw4QFNAl_(K-1n1+2#+ zA(wHL_=IA_;dxQ0?G3nfbXT@Bd$}Dk5mXe4AnYO0PIR^0St-R=%X9z*p(*hub*t7( z!0;<@g1SL@jEpuhQs9h4?#wlGXw9OMlu?r!xg(LjISJ)aPRoL|gWFLQfBz$zE(Ayk z9)QY|A>unhn5yj)Cr=egHTNVP+~4WgGuq=H(=c|PW_*)o@n;Z#-i%V5HSa%zSal3R z@FTz?iwnsGEV zavt$wJkkK1oG0s0CF4yXBjY`W@NQ6%vx!`@$xI(HhF3LL`}5jYm5pH+Xsx%~jf2md zc6XDi+MT2jbK8xXCXs&}IWXn7;&_g-a%^&A?2!Tv_X}|D+|%0&`FyWEv96(Ri{kZ3 zd#b*R(|^w1wXV2(%ag5HuC<$UUx$i)Z+VZKLAVzZ_g-Vf&)-|uk+&a4_t))d14e6% zlI!hrCtcij?&-+?i}nFmHwsYW9YoH4`_RT=I29Z*a049cF;41mW5Oi^`=kvMcAv9_ zFB!}VyWzaEr=awv?V0*9V=va@x%#ZJkBE$+(|W@ge$1Yu@m%o7>rr>wD4bS|{eIb! zUG&S6Fcf6nK{vOaM4b%kOv^gfirhK$dIx)ldWT(@@~!W> zhtTiu*w53QZ;XYCh*xuI+ZWd~7q10zR-ZNKFAo<^^$?jct(jv3EAh@<8# zYEqq_v|kRjpF=M7x?!IRZHaRTK0B+kI| z?nM2SjfF?bI=Q*LfDvDuR1kZqAA7kUJM~e9PJ}xRH-lG}wx~*hSm4_4`zw$Nh7IId z@>d{)vBf;@#b6LI#mCY#E{dqV4I#3n`Y#b8q(0d%VI16KSUD1SBRn+=LRHV>Kbz^{ zGXW#L3p4%g_zhT;-UHvYMP)OLLBNM3{Ojo%Rr z2_2(~X$lTfaEJn;3^SS&C(&(T-bwf(9HU<6)www`jY6aK?MOBI6zesbKqJU7lGt9;q++Kst1mX{_ zS?bQ9zWuSV`9SL zg9pQrFrna!S6G>ii)E0@*4nT@G36tDuq1zbDfR#X99V~7X0}&wKLf(|nKLJfCr&z7 zq_T`%AeI-wV#;qS;Ng3z$d|x(6W|nD5g4R#e^$rEKiq)Bbzv+yOX75MkTg-nnYj38 zG~#bk@Xsmu7Zm&!1^*JkV_NygaZ3JO1ds;*JH`Kyg6|`EOd9FOq$GPxrn;v#t`Cny z+j-;i1Pee0*u3?vE-R8NqOIEr6u(b9M-!o)i{cywUzwmo+ki*B!I%)p@54f9`PU$i%<4y~*qU~eV$ST5ne+|lV@WFS&%vL;9q3c|GZn!lYUg13 zM$PK7gF{d`P`|)wL%^z1>2zjZh0h6ehSDNRIFQ#7zQE*AY7rcZA<7-5;9C?33TR{5 zYAUbxL;!y5Mj{re;AIM4r{LESz}njuf1Oe`rJ^S0>J1RE^4AZ#Fl>40n#GC*Tof0y z>s|y$9cJa0pd%vq2#sM6OIh-bFZ(|9UIMNlxZWwmB+8((m9Qv2rXFH;7dy*V-Vg&# zqAlw`y(GYvytQD9g})S}svym-s@?!@4`4Fy$Vz6~9aM;B;-Z8+Xi^BYgh7guhWznaaA=N7QVDTJUPo`|^^Bt5m?C z)1_F40*itQ1wV(ti@>P79S#WuB{2RS#7b$1i4wpqzL+3gU4EG=ouXg~fv4UU-=l=b ziTiT-u2Ieobb%oa{5QM^{fQr^nk0*S61q{Q(D0fB{toLY>KMOpqQCzc7ssa^s31=LG&UQ)pw27tBpNiIVDWYxW- zFQ)$(C9PhL^NgxAnczn!olwqs{I|Y?xk|-wk32>+L$xC|G#tC2C^n*G`{i7ixHvz7 ze7Z-74d#5lbc?bW6ukp$8oWJH56FL)dKyG7UBcb$1)uvN^(`raxdva!H75J%DO}Or z6-w_$dQT|57wLVWw2t)tPW>Di;sK#@sfsX=(Y7|jp zK2+m4(kDXclW0L72ikng7dhvpP|nMCoIVQ_%2`0ptN1X`W~(z$ho3cG!y3MBoRjOm zk$|wK(rr(OPcT>mbqF1YK0RQ zlxefSj`WDZttZEHk^wIyd8tFyu2In? zcjPln$Mv?*kI$Z%JvsY4Md51-Xxp3*mVnqV!q#9A%C_j5&J_8*>6ASOgJBhT73y*c z_Gr7i|1MYvpWSjm?d{-WOOnDJoq-~pd9gFg)G1z^-CvwxxmKJ&B}&mM1h+4p@CP*( z@Kq9NDWh*Sp)Os_r)?t>le6nQe`{xcc~K{mm+l*@m=mO3@=?4qO3tsSwV?YsI$c$x zL4&(lPJwS>23N`MOQweA%ondD<2WMgaE>0>BQOl2bfYe~I1X=rDy1wP&I?Wk$0cx< zWX8(bBlV_Kv0uXbsULmwjTt%_z{K!=!GuYxzx391a^&S@b8>TYffEqd+7r|Ya<^8` zW7XjmGzYWZ9CN(PMLGK=>xP~e&~@jwj&h$P#z?nKvqP)HE#Xh(9NAu-HiJHQ!(Eyd zO^+MlUEz8($F4`>@6lR0~zRC!;hGL_+)GN^f|4WBsv zhSsLS# zQ~M+i;+a*&o9JT;lZ>=NtQeEA7Z3qsjPEzt+PWr&DF*!w!Zp&U6kMeY!nUf|QSt%$ zD&*kzruxDpq`zb6O}t3Kd+3R2rWb|p5$*wrMatf$^NW(DX%du_Wn4woRdET`ykxmd zCFSzWc`s2e!}L`yi?h_ho78WQg8xN^L?5BEX^hy`Uts&@@_z+r6@C~BYqxGYv1b5Y_XT|OGt6aoNH-F z&X1(GJHC%Vg2v3re4ln%f+QhtKxUFy4sk%Tt|SLa*_5o?(uKUw$?}0@F5UA7;zcQQ zKaz1jL9O1QU@P}3IofA2u5kBnlbe$vwaaw+`a4YQJC=0Yx!6<)Cr@J)z}laseY74T zh(5Rdvk_VTl!RpW$U7M~u$b{94RKbT)v}RfK3~Ys=7;lz!pqF!5t|2QFaAk-Uw$;N b=lA3f5z9yyTK`yo|2ca*xi1+@qs9LLg)H?n literal 0 HcmV?d00001 diff --git a/scripts/data/Argoverse.yaml b/scripts/data/Argoverse.yaml new file mode 100644 index 0000000..312791b --- /dev/null +++ b/scripts/data/Argoverse.yaml @@ -0,0 +1,67 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI +# Example usage: python train.py --data Argoverse.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Argoverse ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Argoverse # dataset root dir +train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images +val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images +test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview + +# Classes +nc: 8 # number of classes +names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + + from tqdm import tqdm + from utils.general import download, Path + + + def argoverse2yolo(set): + labels = {} + a = json.load(open(set, "rb")) + for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): + img_id = annot['image_id'] + img_name = a['images'][img_id]['name'] + img_label_name = img_name[:-3] + "txt" + + cls = annot['category_id'] # instance class id + x_center, y_center, width, height = annot['bbox'] + x_center = (x_center + width / 2) / 1920.0 # offset and scale + y_center = (y_center + height / 2) / 1200.0 # offset and scale + width /= 1920.0 # scale + height /= 1200.0 # scale + + img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] + if not img_dir.exists(): + img_dir.mkdir(parents=True, exist_ok=True) + + k = str(img_dir / img_label_name) + if k not in labels: + labels[k] = [] + labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") + + for k in labels: + with open(k, "w") as f: + f.writelines(labels[k]) + + + # Download + dir = Path('../datasets/Argoverse') # dataset root dir + urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] + download(urls, dir=dir, delete=False) + + # Convert + annotations_dir = 'Argoverse-HD/annotations/' + (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' + for d in "train.json", "val.json": + argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels diff --git a/scripts/data/GlobalWheat2020.yaml b/scripts/data/GlobalWheat2020.yaml new file mode 100644 index 0000000..869dace --- /dev/null +++ b/scripts/data/GlobalWheat2020.yaml @@ -0,0 +1,53 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan +# Example usage: python train.py --data GlobalWheat2020.yaml +# parent +# ├── yolov5 +# └── datasets +# └── GlobalWheat2020 ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/GlobalWheat2020 # dataset root dir +train: # train images (relative to 'path') 3422 images + - images/arvalis_1 + - images/arvalis_2 + - images/arvalis_3 + - images/ethz_1 + - images/rres_1 + - images/inrae_1 + - images/usask_1 +val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) + - images/ethz_1 +test: # test images (optional) 1276 images + - images/utokyo_1 + - images/utokyo_2 + - images/nau_1 + - images/uq_1 + +# Classes +nc: 1 # number of classes +names: ['wheat_head'] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from utils.general import download, Path + + # Download + dir = Path(yaml['path']) # dataset root dir + urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] + download(urls, dir=dir) + + # Make Directories + for p in 'annotations', 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + + # Move + for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \ + 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1': + (dir / p).rename(dir / 'images' / p) # move to /images + f = (dir / p).with_suffix('.json') # json file + if f.exists(): + f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations diff --git a/scripts/data/Objects365.yaml b/scripts/data/Objects365.yaml new file mode 100644 index 0000000..4c7cf3f --- /dev/null +++ b/scripts/data/Objects365.yaml @@ -0,0 +1,112 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Objects365 dataset https://www.objects365.org/ by Megvii +# Example usage: python train.py --data Objects365.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Objects365 ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Objects365 # dataset root dir +train: images/train # train images (relative to 'path') 1742289 images +val: images/val # val images (relative to 'path') 80000 images +test: # test images (optional) + +# Classes +nc: 365 # number of classes +names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', + 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', + 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', + 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', + 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', + 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', + 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', + 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', + 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', + 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', + 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', + 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', + 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', + 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', + 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', + 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', + 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', + 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', + 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', + 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', + 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', + 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', + 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', + 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', + 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', + 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', + 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', + 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', + 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', + 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', + 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', + 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', + 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', + 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', + 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', + 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', + 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', + 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', + 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', + 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', + 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis'] + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from pycocotools.coco import COCO + from tqdm import tqdm + + from utils.general import Path, download, np, xyxy2xywhn + + # Make Directories + dir = Path(yaml['path']) # dataset root dir + for p in 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + for q in 'train', 'val': + (dir / p / q).mkdir(parents=True, exist_ok=True) + + # Train, Val Splits + for split, patches in [('train', 50 + 1), ('val', 43 + 1)]: + print(f"Processing {split} in {patches} patches ...") + images, labels = dir / 'images' / split, dir / 'labels' / split + + # Download + url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/" + if split == 'train': + download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json + download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8) + elif split == 'val': + download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json + download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8) + download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8) + + # Move + for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'): + f.rename(images / f.name) # move to /images/{split} + + # Labels + coco = COCO(dir / f'zhiyuan_objv2_{split}.json') + names = [x["name"] for x in coco.loadCats(coco.getCatIds())] + for cid, cat in enumerate(names): + catIds = coco.getCatIds(catNms=[cat]) + imgIds = coco.getImgIds(catIds=catIds) + for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'): + width, height = im["width"], im["height"] + path = Path(im["file_name"]) # image filename + try: + with open(labels / path.with_suffix('.txt').name, 'a') as file: + annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) + for a in coco.loadAnns(annIds): + x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) + xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4) + x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped + file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n") + except Exception as e: + print(e) diff --git a/scripts/data/SKU-110K.yaml b/scripts/data/SKU-110K.yaml new file mode 100644 index 0000000..9481b7a --- /dev/null +++ b/scripts/data/SKU-110K.yaml @@ -0,0 +1,52 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail +# Example usage: python train.py --data SKU-110K.yaml +# parent +# ├── yolov5 +# └── datasets +# └── SKU-110K ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/SKU-110K # dataset root dir +train: train.txt # train images (relative to 'path') 8219 images +val: val.txt # val images (relative to 'path') 588 images +test: test.txt # test images (optional) 2936 images + +# Classes +nc: 1 # number of classes +names: ['object'] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import shutil + from tqdm import tqdm + from utils.general import np, pd, Path, download, xyxy2xywh + + # Download + dir = Path(yaml['path']) # dataset root dir + parent = Path(dir.parent) # download dir + urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] + download(urls, dir=parent, delete=False) + + # Rename directories + if dir.exists(): + shutil.rmtree(dir) + (parent / 'SKU110K_fixed').rename(dir) # rename dir + (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir + + # Convert labels + names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names + for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv': + x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations + images, unique_images = x[:, 0], np.unique(x[:, 0]) + with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f: + f.writelines(f'./images/{s}\n' for s in unique_images) + for im in tqdm(unique_images, desc=f'Converting {dir / d}'): + cls = 0 # single-class dataset + with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f: + for r in x[images == im]: + w, h = r[6], r[7] # image width, height + xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance + f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label diff --git a/scripts/data/VOC.yaml b/scripts/data/VOC.yaml new file mode 100644 index 0000000..975d564 --- /dev/null +++ b/scripts/data/VOC.yaml @@ -0,0 +1,80 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford +# Example usage: python train.py --data VOC.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VOC ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VOC +train: # train images (relative to 'path') 16551 images + - images/train2012 + - images/train2007 + - images/val2012 + - images/val2007 +val: # val images (relative to 'path') 4952 images + - images/test2007 +test: # test images (optional) + - images/test2007 + +# Classes +nc: 20 # number of classes +names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import xml.etree.ElementTree as ET + + from tqdm import tqdm + from utils.general import download, Path + + + def convert_label(path, lb_path, year, image_id): + def convert_box(size, box): + dw, dh = 1. / size[0], 1. / size[1] + x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] + return x * dw, y * dh, w * dw, h * dh + + in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') + out_file = open(lb_path, 'w') + tree = ET.parse(in_file) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + + for obj in root.iter('object'): + cls = obj.find('name').text + if cls in yaml['names'] and not int(obj.find('difficult').text) == 1: + xmlbox = obj.find('bndbox') + bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) + cls_id = yaml['names'].index(cls) # class id + out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') + + + # Download + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images + url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images + url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images + download(urls, dir=dir / 'images', delete=False) + + # Convert + path = dir / f'images/VOCdevkit' + for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): + imgs_path = dir / 'images' / f'{image_set}{year}' + lbs_path = dir / 'labels' / f'{image_set}{year}' + imgs_path.mkdir(exist_ok=True, parents=True) + lbs_path.mkdir(exist_ok=True, parents=True) + + image_ids = open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt').read().strip().split() + for id in tqdm(image_ids, desc=f'{image_set}{year}'): + f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path + lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path + f.rename(imgs_path / f.name) # move image + convert_label(path, lb_path, year, id) # convert labels to YOLO format diff --git a/scripts/data/VisDrone.yaml b/scripts/data/VisDrone.yaml new file mode 100644 index 0000000..83a5c7d --- /dev/null +++ b/scripts/data/VisDrone.yaml @@ -0,0 +1,61 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University +# Example usage: python train.py --data VisDrone.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VisDrone ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VisDrone # dataset root dir +train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images +val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images +test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images + +# Classes +nc: 10 # number of classes +names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'] + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from utils.general import download, os, Path + + def visdrone2yolo(dir): + from PIL import Image + from tqdm import tqdm + + def convert_box(size, box): + # Convert VisDrone box to YOLO xywh box + dw = 1. / size[0] + dh = 1. / size[1] + return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh + + (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory + pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}') + for f in pbar: + img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size + lines = [] + with open(f, 'r') as file: # read annotation.txt + for row in [x.split(',') for x in file.read().strip().splitlines()]: + if row[4] == '0': # VisDrone 'ignored regions' class 0 + continue + cls = int(row[5]) - 1 + box = convert_box(img_size, tuple(map(int, row[:4]))) + lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n") + with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl: + fl.writelines(lines) # write label.txt + + + # Download + dir = Path(yaml['path']) # dataset root dir + urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] + download(urls, dir=dir) + + # Convert + for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': + visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels diff --git a/scripts/data/coco.yaml b/scripts/data/coco.yaml new file mode 100644 index 0000000..3ed7e48 --- /dev/null +++ b/scripts/data/coco.yaml @@ -0,0 +1,44 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO 2017 dataset http://cocodataset.org by Microsoft +# Example usage: python train.py --data coco.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 + +# Classes +nc: 80 # number of classes +names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush'] # class names + + +# Download script/URL (optional) +download: | + from utils.general import download, Path + + # Download labels + segments = False # segment or box labels + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels + download(urls, dir=dir.parent) + + # Download data + urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images + 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images + 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) + download(urls, dir=dir / 'images', threads=3) diff --git a/scripts/data/coco128.yaml b/scripts/data/coco128.yaml new file mode 100644 index 0000000..d07c704 --- /dev/null +++ b/scripts/data/coco128.yaml @@ -0,0 +1,30 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128 ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes +nc: 80 # number of classes +names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush'] # class names + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128.zip diff --git a/scripts/data/hyps/hyp.finetune.yaml b/scripts/data/hyps/hyp.finetune.yaml new file mode 100644 index 0000000..b89d66f --- /dev/null +++ b/scripts/data/hyps/hyp.finetune.yaml @@ -0,0 +1,39 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for VOC finetuning +# python train.py --batch 64 --weights yolov5m.pt --data VOC.yaml --img 512 --epochs 50 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +# Hyperparameter Evolution Results +# Generations: 306 +# P R mAP.5 mAP.5:.95 box obj cls +# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 + +lr0: 0.0032 +lrf: 0.12 +momentum: 0.843 +weight_decay: 0.00036 +warmup_epochs: 2.0 +warmup_momentum: 0.5 +warmup_bias_lr: 0.05 +box: 0.0296 +cls: 0.243 +cls_pw: 0.631 +obj: 0.301 +obj_pw: 0.911 +iou_t: 0.2 +anchor_t: 2.91 +# anchors: 3.63 +fl_gamma: 0.0 +hsv_h: 0.0138 +hsv_s: 0.664 +hsv_v: 0.464 +degrees: 0.373 +translate: 0.245 +scale: 0.898 +shear: 0.602 +perspective: 0.0 +flipud: 0.00856 +fliplr: 0.5 +mosaic: 1.0 +mixup: 0.243 +copy_paste: 0.0 diff --git a/scripts/data/hyps/hyp.finetune_objects365.yaml b/scripts/data/hyps/hyp.finetune_objects365.yaml new file mode 100644 index 0000000..073720a --- /dev/null +++ b/scripts/data/hyps/hyp.finetune_objects365.yaml @@ -0,0 +1,31 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +lr0: 0.00258 +lrf: 0.17 +momentum: 0.779 +weight_decay: 0.00058 +warmup_epochs: 1.33 +warmup_momentum: 0.86 +warmup_bias_lr: 0.0711 +box: 0.0539 +cls: 0.299 +cls_pw: 0.825 +obj: 0.632 +obj_pw: 1.0 +iou_t: 0.2 +anchor_t: 3.44 +anchors: 3.2 +fl_gamma: 0.0 +hsv_h: 0.0188 +hsv_s: 0.704 +hsv_v: 0.36 +degrees: 0.0 +translate: 0.0902 +scale: 0.491 +shear: 0.0 +perspective: 0.0 +flipud: 0.0 +fliplr: 0.5 +mosaic: 1.0 +mixup: 0.0 +copy_paste: 0.0 diff --git a/scripts/data/hyps/hyp.scratch-high.yaml b/scripts/data/hyps/hyp.scratch-high.yaml new file mode 100644 index 0000000..123cc84 --- /dev/null +++ b/scripts/data/hyps/hyp.scratch-high.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for high-augmentation COCO training from scratch +# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.1 # image mixup (probability) +copy_paste: 0.1 # segment copy-paste (probability) diff --git a/scripts/data/hyps/hyp.scratch-low.yaml b/scripts/data/hyps/hyp.scratch-low.yaml new file mode 100644 index 0000000..b9ef1d5 --- /dev/null +++ b/scripts/data/hyps/hyp.scratch-low.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for low-augmentation COCO training from scratch +# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/scripts/data/hyps/hyp.scratch-med.yaml b/scripts/data/hyps/hyp.scratch-med.yaml new file mode 100644 index 0000000..d6867d7 --- /dev/null +++ b/scripts/data/hyps/hyp.scratch-med.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for medium-augmentation COCO training from scratch +# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.1 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/scripts/data/hyps/hyp.scratch.yaml b/scripts/data/hyps/hyp.scratch.yaml new file mode 100644 index 0000000..31f6d14 --- /dev/null +++ b/scripts/data/hyps/hyp.scratch.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for COCO training from scratch +# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/scripts/data/scripts/download_weights.sh b/scripts/data/scripts/download_weights.sh new file mode 100644 index 0000000..e9fa653 --- /dev/null +++ b/scripts/data/scripts/download_weights.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Download latest models from https://github.com/ultralytics/yolov5/releases +# Example usage: bash path/to/download_weights.sh +# parent +# └── yolov5 +# ├── yolov5s.pt ← downloads here +# ├── yolov5m.pt +# └── ... + +python - <= cls >= 0, f'incorrect class index {cls}' + + # Write YOLO label + if id not in shapes: + shapes[id] = Image.open(file).size + box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) + with open((labels / id).with_suffix('.txt'), 'a') as f: + f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt + except Exception as e: + print(f'WARNING: skipping one label for {file}: {e}') + + + # Download manually from https://challenge.xviewdataset.org + dir = Path(yaml['path']) # dataset root dir + # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels + # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images + # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) + # download(urls, dir=dir, delete=False) + + # Convert labels + convert_labels(dir / 'xView_train.geojson') + + # Move images + images = Path(dir / 'images') + images.mkdir(parents=True, exist_ok=True) + Path(dir / 'train_images').rename(dir / 'images' / 'train') + Path(dir / 'val_images').rename(dir / 'images' / 'val') + + # Split + autosplit(dir / 'images' / 'train') diff --git a/scripts/models/__init__.py b/scripts/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/scripts/models/__pycache__/__init__.cpython-36.pyc b/scripts/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed547ee731981767d7a36c862cfc241867c81e4e GIT binary patch literal 151 zcmXr!<>iW8$CJbW1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnFBkoc{M=Oi#L|-d zyu>oyoXot^3VoN<;_QF4FKcyx$ literal 0 HcmV?d00001 diff --git a/scripts/models/__pycache__/__init__.cpython-38.pyc b/scripts/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f96831e6ab7e023f2a2b1a33a1b0eee63cd73df GIT binary patch literal 168 zcmWIL<>g`kf=kwINg(<$h(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o6vPKO;XkRX?$` zBtI{)Ot&~SuQ@GlH00JNvBo;SMkOTn`6lHP=5fDj@NDT+mvx^zbv3%VN z?2db^V<{7o4H32u%aSN7WtU^wj^o%a$8jZgl1e#_d?az4RI1BWv13=+bh#48NoN}pXRDDb z<|JRfI3)Ms;)vWwi=%QMD~`#1ygpH!kf+JwB<{(^w&qlEN=ivJwl}AX)AF2d%rs|< zv&|jF9rB)O>}=jqyra3RxU0FlxVu>>7MgpCdzyQTdz*I_?`-ZX?rZKZ?r$C_9%$ZG zyi4+A8+SM7igV3_#e>aziuXudu5oYkQ1MW6zBrHfhwQvP^qN^b{G?$I+as?T_DFT2 zeq=pXJc^i6dkisS)uVVnhWBxM0`C(+isOixw6`HhI9f-N3dOvD<0Pnl(-FV+E?+@a=VDG{E9(jKV?|bb#@qTAe z!o!H!XYWVM{vhTQVh-4MA?B_i<`Kl)ZO2J=<NKjI(}ZqIV+2cwN-yezg?>;r`B%y=}NoN zR*tLu;aaPrvy_(60)Mz!zFIBWwQDt}cDYgYCtB@RsaakvIV;P{s_IDgyi+MRs-;T1 zt!&57tgcNb&Zd!Yk0hVWiIAt9&feUZrN=sKdI~U^G^9%wbX7^OYDH(w}NnQ9iFzHXp#*8CCQ zTva>pF2UPp4qs_EtB1=gZo5^!cBoNnt*jnCS9Pws?d8LE^;)%o`x-{}+Q~!BvV&xoWm6XjQQoYx3i}jHOIyK@}qm%5Vk65bat@N<+440s?o;SI*;NZOoygqF&iz% zEw?IFKUQnGh%&GG=5nubY8MLfQ$aDiS#%PCVWrK2srKQi!q@PZ2p*hyJX8(PQ3EtI z`?|3RI%y|f%Yc$5?bK@qC~4Zx;GTFLqgPB~h;n}XY`b;sEe`C$rAj&|iuVjI&X{8( zaE*1NZmt`q;ac^WXL^CYuYkuxb*{xhzXshNi+qW*a{P@M%(;rj?G~Z7?BY5nmpR$2b zHFpg-S#jsme%z@x7JaL76xT7uY4lA;-6{7absTr#Z>ChLG|G-sD(Ok${HTKn?#AWF zpqW|BP2TJbv0d|FQwPAG04v}>g$K{5^TIIIEzew+KZ5MSTuf?JhmlAfLNI5k6MVjd zK?J}7j4Za*^|G=#^8$W`*~v=_f)4`?;A~0*M)43eps#96SN*MO%?E{-7FV3=(Y~t3 z`m2_&a|v|-2_#sM@X}n0Q%S;!smzv2n!@5aUn;%2Qf}xkGQ8>`=6x7}KNL(srP^qO z)4;(1l0V2qG9~-*#OefH($;V?9aD^};EH~n2L2poiO>Zo0d$GkaiWW$2PsI>Po8^$ zozhLO2pS?A@0@mys6&L*_z~enfFr<+gl*nTg25BME$|QZ08(qXimN$36OR0NlLR}q zRI%rhdb&^Y+kFfOlX72wiMq729$%qpA1Aepfa2w3a_O>eK@S3J~FHc3`D{oy+keuIBHha z)8)0Qs)VKRPKw66vx;C7=@B{cW(;H_4gx}A)Qchc8H^#3NpWKA*i8$0(wkCbHpOHk za%3u^IH*#l+Pqw~!AzTernS;6U8$CBXD-*9LUj)kf{WK$)w1%ms*;cdc#}+|!5FEq&5W#)3qMl%70Ync@Fz3q|Mqq%_jLY9B!p_XbfsCeo z0QEVdXIaE)1pTNV(jVM0ZlFVux@W+gw;N!lMm>%zq3Kf)M|Ny2C^8Iy>icn1PcjfF zeIK8mX7F7I-hpxnW}inIK~=-N+nh8z)0_Gd5E@jp70?QGcn8R3haYAgMESIp&nU)J za7DjZ@N~Pddj>52YK5cAAMdP-r8+Mm=)tayQ7?hg-!QJm)IJwueZv5GK%5(|CvF(Q zTM}=n*NxZBMbUy@OS@?=zHU+!`V33R++e8TGq$iv9wLEorV&6I8ts}cioF5Z>Ur?( zS1kc*Fc?#Y0h6;634ODKC!sS0z8-&jJ9BEO+N!QDt4A-fR0=n7qjPWH97*RRcL9QaySTIF#^8F;_E}~2JBE((dM!Df9KpB>*zU3-u76_KGzaBkgJ0W=k2^*vw zPJl22-G~U|0uo1flQh%d(Zk@>19&soj|gv4$jTWd&^VV702e^WuV&OFpNJI{IP2z1 z@vha4b>m%NN*(jIZq`M2wPWkCZc?y_VZuwU1Fz((d|!`IRKk~(7l#-{h7(^9nN`qB zsPD@N9#4`Br3gt%gG`jiOFJ^HlU;)rf&}=IUS@AJ(Oh307^#unM^BWCw6)(aW-No z89#fb?Ya#BY~|`D8Mr@2Zv1}#v50ANK6wdtvJpM(bS=z7Vv-6+lDH`Xt+@{B2Obmi zFiTKMOfL=|l0aO-i?z>@r(7ilCj)LF)xO2j0g+_$bE!V|um=$W{(h`-4TQgP?HFWG z+eV^6`1aGzm9}y#E3Opco4|!iX=|=fMxoJ^<;_t}*+g$7BHWB%T4Qkl)8zbwWT;-G z^?kDFc8Dv3F@Q~xO^FAJ)v8wzd@Bln1!;tGKrS8cYgY(FF+Er6BP{Foi2TdQT4FVt z$PcF!W4;3rHPmbG*^3usaxe9Oih#u508srnA~pb3l0fw-Llb;xNg+@rfcjtojG?|O zV3l$aOI9!tn|2{S-7qE%0AZLvgOphvE1Fgg?|IRrhVV2@2J*VK7_$?05-eoIi}kUP zQ32PKmd4Kmt|;LM)fr$XX~YUUDc}L@WIA9c#trKRG)#!KiGZ!pViH?ID*XQZF%)Ry zfoz5o5wIDdKM-`Ph|(f3XQ;<*)CIg=#})FKxq;D-0C_7WGqx3$33zHwLu-S8ERpUw zgA;0=aiU@!;S)Qmo?~zkfp0c9W2M`ZOlzX(wY&4jJ9xSVqch^T`o31UU&A!1t>;&33W8TF)_s;8-;Ya!w$v~m7%n)IyG zBn3%!E~AK`s?OjQ27=f%KFNr1fcylEJ{9ODm9je*?_(;~8g%BhV_R{OHRN-W2n=)> zo!vIr*%15V8mS~y1fbg_0gK4mz!j2!oMOy(fCN})kG~V%Q#gpRUQYD8#Cs9^B0Nq2 zx2+Jj*QwIT+(r0H#VHM|Nk9H-;V+?Q^8X9GeVtb;0+3Xt zdgv(dxd|cFRVI3`DEVrf!KpN>_9{deW=N`cc+Fq64!@Mloh<{2_6R zgtBD6P!<_YQm`o=zx3QU!yGaeQZ~UHDjphhC{EF)=uu3D#Y99h;hxCiCh$N+ zGT~s>!dq-F^n=KZq6DHA5UXJ(y+O3v3TA{Q1+=`6A92EO;c`MUCbm6vD}rti!vjbV zNH8uQ-@9$aL9WlR76Kb=D|{aqcLE@PgT^Xt+fCW&0b^CpPngzxWIUJ!kI#LR)6vme;l?|d53V8C7jT7>!}o#7A^7n( zI62=8DN<*TKY68%bsA~dAM318PdEqs@gB_wIey=KY>5|?D(mr{q0NO_(kJI>wd7qI z31ExZU?lKIq_sWHJs`=MT9WJ8s3^bvD6WtX+!^#Xny0NKw@v!dw@dhgW5iTS=YoN(qN;+g3up5X#bq|s*8cO&rz(5{3KM-1l` z!{xk)K*I{xXJ`zaHM%g3f}+NuTUbIfV|bVKnsw+K+qE_keoMH?I#z0Er1fGnHHl7< z7W{^sF^lpAXb1EIf+~e&r+`3~-$X-z776V@Hv&sSv0y1KFhR2@tS08JR#mIoC^@xG z)lUK?YBo$8STeI~ttG%2!NAWvQ(nE$Za0qEA>`eG=#Ap$Zp8gPTr&7rd@F$VYz1?@ z6}U_vzu3qSpv-A+@oO;#wOWL0F{ns@vJzC@aN8Z!S*tjzSc zZ9vW-otC*I8bP9XjsjT7L8Vp3W+F~5*Jw4S(R3IuVrG>zC4XKuS;$JZh6 zUo<{oJZHS9*BufIezLaIYOCtN3=)_o*vv@;y#-WOt!J$_*5M3LQ$T-v6Cs$!Irw=N zL}Ed$A*B!PqbU$zf??|wB=^%;ziMIH9e)@aM&(K=de1wxCKlEgYYmvW^xOrohb!m-YfR}5^@e;5A0e)9EAwABlO{5592z^EH5D-`e@s1 zfIg=;qK|x~7$4_CwW!WQ0s|40@nED&AfB602@3T0qKxF9QpOAbDH#iY&H%gm zRtDI_`F3T+k%8IZNI-!z#`5k)(1TMlDBC1FhGIViPP!1p0`Xo$UEeCHUOhO3QUH2_ z`MpH}A;{V>5gFV>0sAx5+k+O}c)i*zJlr?ewjx5mhe~?bG1hBa2s>Kyl}aSIbeqKJCMx>furm50Wr6l(Ht}9s=!KAF`?xJ- zRBa&RfT`0AjxjjIK%kYQh-vwzWb&Jfu78M}5f7s1S}>=q&h!R#a5JEiTykX3GXErk z5KwW)2v9f$aAFn7(#@0>ZWQ4*5U@XESu7X?Z2QI*gy3lPrusGkk?`^dtSX>k8G#@+ zF#2sx!@ia1mInE96lTCjQzH^08sY|IBq0ts@E0uTt8dE zfYz2)+AB_I5*G2l$kLu+Kp9Ydn1P4`TL@WyiI0JFIcg@MnN6FW(P*4PsM6k}w+T>h zpo%|dH32}y6=S{w0F``Vb-CQKzkP6$k^R=t1dBo+G>z!mzg<{L0*bJzZkrQu))n7} z5SD}{5|Y5mHo;Pqo)86CnlwUKn#JJu0Tdxfb0>lpIQb7Sf}o^+kO3h{eHOuOgHW8q zLRMmcJ_aB(91PBP1b{w=qW=%B9sn`sI{`odbST=IF@9V0$fRM0{Kd~;F|pdBJNecm zX?PHbbPF~EQAiPmAW^4tg>is=KoQfdBi1L)kb#E)9GL(%@Q-oUEWL!7AJp5MACZ!7 zPwM?VD$j@Ag6I-gjM;jAPUA~t_8T2A`;Cd=&xB|8gq^eV*a4ELCb64?dqBW3i;$7w zshvjWxB27es&2L7iaX(%a^-5ZWxt)(F6z$wvb%6Tl5;48aztXdoD% zO{>erl^EQ)L8YUQ{1%PnCxzIbM?Hp4F<@} zjM1HZ0Sb{eQh90h7hW=I$Ip2t+K_^bl)Y(nr`#cT*qf@4)Q8qlhn-uux)biGH)ZG1 z=KqW@WA*V?9QSe5Jq6}DaVy?B;$?1`>XJKI-)0YasV`$!&708a;fOn8kJilYc6X}2 zy*`avw%cQ@1^1B|<5oiLb!Y0c`VGC9M%~l)_zY^s9qrbAycKKxwiiPUul3b1p=)q= z*iWz~cW3>M`YsuZ8KnQJmqq$N?MuHcNIzBIT`z!Z&LZcOH;YuiU~g|(_VglzdbAj- zW@ArxhkW@Rd&Zt!vRZM3W_PEzqrMk?*lF)TAMQXc4|us-mb%hc%g(4(=cCf@tncgY z@^;ntqt@N-0k*Mz*Sc}ba!$E-2k&$CE*gR{kGty!;kK_+-DB^@928Jn0rB^G*bl@# zKsRIEJ-4jZFMIi0G4-Xs+V=&c&ORLK?sezAz4gQPe(njn8N(PI!Cs&zF{5`P{wS~d zG1R#aaFep<*R3}WcDEt+xVLZJ1O)B(_Sa8%+v+C?3Ein41ofWW`_QKYtE*TvpQ_(a zxO}ywAB0$G4CM4)7~8VdYJAL^~c3h z6!-4*?)K)qgD|BJd-3`iwqrfv9q{fxh0%NUZ(cCm`}D3P??CQgWr*WZsE-xcLJ;Emjhs~7unoE*#{^oV<+(%r+z{WK`UGid7(??`s#gE#L*{AKtzY=qwufMQv zU<^-q#{mDwy%SJ>P5TVs@?>jgU(T~SCsWzyZkpYb-pTrlvP^Qa{vq`8KD-xkzaP}@ z!}XWkmpP+oD>aq(1zygUKT4m==CWI|+t*u-cG>pxVGQ5iIt@Cz$+Uiv#CHmTnAV58 zKmpJEvhk)B?oRR(vZ+Kk7WmjsB=ybOMumJgq7Q~#490bAQrgG`B2K!MvAy;u-~wW_ zgx60JhIH%|^Ap@;0moTC-z+bexJ`*}>2DR-grwe((MdmzuE8*0=-g2N%Pd@?8_>my zsx7;PT5Az|;IM;fety0~k$kbXH1DpuK6Dc7+}R@~JiD;)V&U24 zYU}Ltg>wrF1Est@*~W*;wL{Bm?v-}y(ML|qpPWC@+11PV+zJH$W_2K!A8)r>tDO;s zN`F7Jw7lZy`XY+k&VKOR<4>$s;ZccBUQq38SZ-3q$+H(exG5IR&nyQioX)JS9SwW_ zsfA|;YJNM<_Fk*C+J~@Ptdlr6zuIhc;`5iOqWKAoVYT6>u2*YISFnLEhPp)J!nYjK z*D6)n0aMJ=jhTD>s*W4hcL}0YTRnGapuSGx{`n*GM=qg(@hjNzM+5 z^$SP^+anX}#e0fg8v*qK;z}W)!c%Fqs zOIRYuQbDX9!&kWM;|ZS8E#+4s&R1InsccSk|dk5x?I5_iC`cyaW!ZtCRf$M6|* z^$&Q9ahGKin>c;z+R@@D*1_NrTv}o;N_KTIAQ`^3gpF>le$^Yo(x)2b=4HG5=rU&l zMH!vCFO=RZV&>X#4{hjfOOR)t7aOe_Qas>OHl__ zeN088x>R4&EMV-toH=1lo$JffuMls`*7pve*;I%PqS=OFiA7`r6m9hiwtJII; zO?{2QPcfi2?#Iy?Y%1eptX^|b6HAEK9c;Qv@YzvZ(Gg2Hd|3vf0}9L6M{ zdV)kvk<$1n6i@yL>J4TI(_W)}xtM2DO0BAUy{)eLx$8Ce3btx<&v!8|@D_Y3rm+JA zEJC?zh=t`!rC>8Sc1Dt@VH;X|X$fE&e`?|Uv&B@vI*Z%wssp$vH(*&Sv715L{5We? z)RQ$w0v981af+h=&rqVY#STYfqsxH$l;2)3P!ToaN!=};8Y0N zm|aXg^R)1SVpb2&B3MK*0auf#=adwCfr@tu$#OI za@Y{Lt6I{nb@aFiEdF^uEpSsP~$V8d*3Wf|iJ4;sDCeC`2l?f+>u zEUBdo_3zkJLDwEiewYa?j5e1C)!*e?4AKjvLxElL9*(Y`;|_nD8+0MdSj(6Fq#hIX z&zU9eF4nbpCrIdhMUBn~xb;hNf;QRrr1|(pGekz3gaXstA zybv3TF&EJOv&@O!3T&r>neo#hv;ufqc6D_FvGRvP_=SHFWZzT(Nq+XS_?w6mk_8L> zucGu~CStOFE?|d?U~;`FFQy}^pX-AOe`sLl{Nce_13`Td#p6bC*w~IQOt54f$)IKA1EFdKfL7!%Mc-~9czuQipV!lFskGq%e^*hB>p+{tg)U*#ZnXNy<*ZnXVT|({ySdz@xS(w-$ z--e^<-c`RFhC%e+&QBWkIeSP9C#)q_KWGoj=$PG%J>q4cxMP2~dk^Mj6y6XpAM#p? zFTu?N#+E$dVPvb{iyF+}`4FG$bMAcouow_wV8rtgFYgW2kHXjmbDuYir&&H>M-$C| znbxS6agTW!m}hp(&0k*DhLy;UqjppeEUh(TlaKNX>yitBb+6+o$-J^XaW~8Lk z7{Q5Y!yBJOKQNZt;dL_oNo&zcU_5#w{~%PoQ-=FE#%mttDhs83uY0zBj$;QSYlirQ zxG#E{CFp&`D(Egdu|&(`N0ul+`WYM_gJT)E7zbhn7mf25jvp(=7tX%#1s)6ofw+Pb zO(0*zu?paTJeSu%`_wNY?VMH2T!v($C5Jd4{iG(0dZ{m3qJQgek=#$LfTBJCGEPlU zmiiW94chL7`UYAvuk2P@)!8U_ZO1_E!q{~JLutA%bHFjG-n*74J!(qCi+B@Rt7jvcm_$;4n>DdqzP!Y>%r z%a8+Myy4=m0u~$o@Z%0-IVxWwd}smrkN6pORYP%ss+iS`v|74$O#M1*O+s$Qo*bNqO+zW(y3du)&F3=|H%Nyb{WF^cwUhlf8(mZAUD|3Y%TUDb(*5-fFaW=p9qytQYm6Vmu|{&T%p!oI0h^gjsv2cEP%{Xpa&(fP1!lBM%WcGMw23@~ za=#B5oGW-RtYPc|EC^f8%RclZ!V_SlWVU%>{G)g~2u3@FRB6OaTV&a@<^gk`HEB^a zn8Y=UYYGgR`N{gp)b(@0f}ip17A!ef^wO)MM7y^>262DD1VYW>EG{|l2P6q62_Q5j zv?PqFSK#l04Nq_w$FV=^6+Xjt+{Bp%uOJRg_cKO4!&ne?tl+2x#AaJ@gdp?cX_=>Q zWnf{2qY?$6xgpgmo83wzzGg85pm^>p4Ro`r;KPt5Suo+V(?O4 zpC+J%qyDuwteX!%YrrQX=jHRpGe$Q@zap-M3#-jQw7?f{XfX#eo($~QCJ!DH0}>UG zGVF|T8Y`n7Q0-&ukFy6;2w)I|xmwPuNDk}@kwFYh)#6U$?L^afY<@%}DHmZic9 zcw~z)6@{Y2>FhHyg?2z350uo+RgZDlA>Q7JVc0LphpnuGK&UQnZfkvZRaNCWEHMp zr}D<~DF}v8F-o9LR0Z&)x6o!6I+KsfmXku%Dy(M|)Ni0Jm?Pfr~9D_nLmHKT4(+mm>I2o`;H7^V7 z`td5+6lIdoy+NM0^kn`clpi_kkq>a6d1t)Wp-o4TaX2~IGOP*@dRQs9LF$?pMYcZ=3|xioW8^H~ETk00{85 zJ%F0~L^8cE*iV+q%gc>5jl=KfP_pZKC`E#SF>o2?GVB@vCVKoxR`slgkD;bkYDqwBG7LyzPgLC#OZ2Lk1LI4P)=#@1M@e=b^3I?`Qp2}_Pk zXeLyY%%`j_{0>&1bH|waoN>#1`5aQgm&1;GIW9+bhfrqT9d{?7NJ7C>Q(QlF$GtpS zFlA$!)!Qz8CGzxDf$B5^#R1>EJj#bhh&x-~0hJUn=xx4rL`s{iPfA(m`@cd(&D&{j zXdRvt%cs2|o!`sWcexXYHKDdbErBlt@Y>7Q_t;q|ow=)MGZg%MeXl*m8t``72p|auf8g|}7}V7MboT(t?nMj!g*VztLbXKAh&hC7 z9@k-9M{pg*beW0X}+ttviX48-;vAY`M=Hsqb0G`ICK{)Mf&bAOfKY+S;Q1&sGyaS4euUyUevu zZWgSTn|_+cb1r;BmZ>ixpN&%Si z1Gx1GIJ@yW3)Un84%XxM_lQOyWA_O)(U!M?wnS@<IN(U0AC*I0%kuGo}L=xVVs#( z^?FI$@Z)8$ZW(B_gnU_ubl55i+9CZ-N`|A_>oStN6-VxlUK7^X!B zJj~s%W*94y;eX^4xr(1%E-Qc#lvYQ(6+VQ#ile2~jtBW#V9MkrS#UwSFa{N9$65hN zQHCv*9T8I1YfPD`!ghcq2Ne1*SrF%1QNLE-;O8uaH|089+EF`EJdS*~(9e7c{3xO5T+H73M0 zA_~~Qz;b?opF~8bmiCgfcn(>7T%w?*u{6;4XXVrRT+&obQ$ddC#~S%_ zJOq(Uzf-s!yfh-1k8=?`9_LEH50$3i*Pwcd9B)AGze9VaLi*oDBP!Bl z;I%r5G+BF_J%xJ?X2@yW^Yl{0eMlt#VF-M?Rf~PbWh_*}W)a3CN%U9n(i8Bxj|-M! zD^QN%77o@71_S{B(&i1b6!Rc%m7w|}gf9ldBoC&-nZpdzu)*^TX^T1VoXRD>vW9>?X7XXw)% z!7V_;XN&gyyr%>W3yNhI8aG74w}PE+(37(<#W0({wG|NjZ2|D-tOO+sApP zy&vn;;D^8nc!n^hB@A5qxBVwHNHe@wnVI}3-1N!P)inlDxw;yn9XztaEf5$C!nT&v zSi7EN(*G0eDuJE5k9nGJvhQz{R$p+YT{y{ci(xn~eqs>`-G)Nw19BFZ^9hu@gw3zF ztkp{{HY!t@rz%g?8=9Z>@+s7S-HXd*&=1Z-=L+NDzmBD-{8gO4jK#JgSSE*95?VwP z0dy%T8Tuf!;`k(M$-J4kA2!WbzX!U5<@jCf4)C}M%ro4_*k(9O6#{1xcMI8^s9u{eoEKzzsIAKEo?Gsal7e;E4_ zGOe_ivq#w{Xt){b)uI*u5_cb9bf*zBLoF{@iZjvA-+}(O1IxlY$!0_orl$KRNPP!d zEA5lm7&N$ZVGMRYP`6EblRLmlpu2KOeYb5*bIBO#Sf0-tcMC(E+_GO+bjr1b*y^}Q0ZVr!@tL9R6DWxV~g;KR_$f_v&FT06kSl@ ze|D4zB;wU^Ji)7m`hc)Cl#r1BNYNR&yVWk(;vG%Vbgq*W|Ik7wS&#+APQ2i@3;4eR z?S|bM)8C+VK)-P0gPHBKN9|Q*R=LKYhw>EhwdkE(Y1Ob!1gEx@rpEt0{6>5f!Ji9! zbjkt|5<0QOg0|zUpTWA4)@RfdQw!@W@aYd2(AQfl414&rm%%uL$Z+T<`QJW3z2Z11 z_PY86Ka$j{VFp#a&TZ2Ic@qETgR4^C&m>HT^(+i83V<>}uOyBxyor?h7*mi&>*ff3 zBh}MeDp|UpDTn_8WW#Kzb!PrJgTH2$K^E$#?3x4br?n?Y_0A!I0oipQ*1hsrb(w}- z&eP1N@SKEE9TGWLxvaFc5AR&n;zhR?O7L;4c9B-%-1A^F6eoH&mUcEIt8$l zLr`fd@Lak#jEmg-qL_teE*9W@sbQ{Ab_=5F6ksI&K@L$gjY^n|3^JJv>g=iXk*Maw zu@*Df;Mh@%G%!Nw{b(O&kOTvLr>rP_r|kf}i~278i%I_o5~z|*+PY=|npasbF4Wr@ z>8q1~_x1=Dz@5Vy;SOWTX25Vyy;s_>4-b^QAV6T55FREm_{NZ|U_KBBc{f-LOe}OZ zmUsztAdP>T*%gXx1_yAUGeCk% z(Ml)>FKRrwwMUC6Pbq;)5_UlN0|7@mfiE~Y2b6E?E~wA5Nnc=lW9l<}-ns+9!3@#I zjlE#j)m|@V2YW%p-Wm$1v-d**n@wR&;NT55%hI4pI`>V0fyumH-BgVGz(JFfsCEJ> zbvzDba5gGVp@-*-J<+x7$!=`*Mc3p%yrM$6ghO~_dwFaLCs#p)cnAVY9C{zktNh1T z_=gP;I^`S6z=^4VC4K7MIKO(LJ<;g+>b+o5?Af*jAB%Z#kB+ZNxC*lopW(JJiGHXz z(Jrx~_^H(jTwFj2Q)|IP;(E=-f8t19k)0drI@5O;tTOm+1W`AM7m+D%FJ43knC5L!9X^GWcNzUt%x_H%Rnl#(sstn+(3XMWVPOxzQrTBEHk-Rm^!{1tj(K zi@gpNT!p(*quqfQwOBatw? zoC03>Pa$At20Drf0u(GVtpE{)FhrRNA~f~N9Gt>~%)tqL6gX!Aph(~ZB@do}oxu|v z9r%wepco|kj+4XCi6y8fQ*BVA$PmLBqd>AX0N?D*Sf?MZG+b^4z$lXz0j-n<3x%fY z9c35RBRKLSn66K=3gNkM%pH#pjIpjJ8YZO12FHNmF$>8pMpFLShG@ZCGxt=fgjI}E z>9b*Pz-Ideo8UA223Y!r0L1?&i-bAU(z8Z#3XieTw4cDMU~KlTbRr$g#6@l8~ny3;z(;?Xlsi&}*GkEBk4nyAntn>5#1M&17IlTn?%fo+GX5EA< zZi)Yj4F8r4Px{oI=x}s(7Ydx)c;1db_PAYMc5BzFk6&BT2UEf$puDUoVJeC_`agA~ z&IkXZBhLTT5uPw{lHW<@1<|#I`%b>n5)_VYB=#vz*$;Do*-3p?HPJjAjmSzD$y^)= zG9wuf8pIyG*gU9>*Ik!5{#26)|0$KVBDHj||H_xTz-#*)E(_C;NKht)Hgd{9lXAGVND z6&UNpwLFyvc^UY4!q=<%!iTy`dwUz=8^3az6x}vl7`cAeA4$->3;~{<; zt@@#`E+9x!F@^-;Ngr5mw=3AH^KaSy#~J)7gV)(`_)<0+IORbWS2#wpp+`6gPe^sI zU{jdJGXuOaCK;-(Ur?3r{^Q`lX-MU%3xR7QOso zY>;h0odV+or*dQsN>=*(A^nAhHwD*}EUV5j$TJvXAj+pmVgdEU(I?0fHt!{P4mI)Hc`NF1JggAWiOD9Yp!BEW~&q?e1Gy~8f{aqO7` za9gvs>;pwe79`k;<%dXn&vwazO12Z(rHT{BPNi(wapWZBC{Ak1N$l83OqZ(SI90Za zaf$i9zh_@}2Nq>>ucl|Fr>DE8`+xs`_q{MMkTUT5=EI-3`0B?EV8_E2>;JB#~=oQyMY-OL_-%5Vmqq3eb- zRNPcPvJ%N2Mb5A@f}D}!QQRNH{iw4E_nX2J$B{GUY(~!JFy{nv#+@z5*%Id5i<}8( z5;>D$&V9(4a<(F8YnXFCa<)0!k+Z#cKU#VK_dA@OxZf%F58{5>*@gREa{mzScRP3C z{;sfwhmo_#*^8XLVa`e9>~r=bXMdRU2yzZMGsu|%1}ZaAP=ix?Zi;Ytz-bvs5X%sdos&b2CRdmjj3SN+^ z<*P-SF!cOge}?^W0NUJ^6evTDVj!T+XTD>q{k7tQKpY8)S5zyEH#vx)u!RyDLTImg==2 zQK(ny%Joz*SgIBDE4fAVAQ-IXFBfx8=}O5hU91#?O|^P0SIu9`xl4@P@`2t{MlIu5 zPUJdr&loFa)AUVDiaqxZD=3&q+COHHpq$6yjv-mVJFjN=hGR?_KWe>h<4sdd;n8j5 zwiQ^jM*{Pj+KPKg-u<4#m+IBx;rx0!A4DgkE zbI(6_{!levsvULY|M#YH$Y+&P7>$nn;6V&3?r5f?9=ty zm3KJSa~BGUo_cwm#KDPl9VDKyVzkUv!!$gr9Pv%x@*}?O*+`=+mLDU%y&3n7T-38# zG2dD>aUJ*LP6Shb7I}$Pqc-j({W#uF0SI37tZdi z&zGKgLG42KgV?iz;z8nR2gp|PmVvGX?{*@v-C|`vunI?U98-6(tm&#fa$Zm;aK3E@ z$y}~b$-8bYrzeY3qwYp>0Ea6BXQnVm8M86Ke$BSKx(+kqctJ2@92dUP;(?)RS-!a{ zf9KKO+)PATR!2}sab|B@>ONlYVA6r>5MJi%>S|s&Eawte46~7uUW5JenlSh@qOQ$5OLO{1Bx?%e#6TRe&a?UqM+Tb8FYQMI2nFqxC|!c3gANwSLB zOyzQ#{^B~5%e}soujnT-!s;=;`#6$dAe@Clu~KQzgmmj+7LhsGhbta}HwkMnmWU|c z7jX3bh_w8<9MKNm05^a)5yvLp2%eC##DdtFm-tz_^98|4d7FsegD8X=B1##QDMS#f zCCIXB`BBHb8G{f&K)1kS)I%sOqda4)gS;j{1$LE0J+e@6W@38I@8`1zm=HSEgG>mQ zNM~6hBycq!vu|vw#%>gYETGh<~#VN-*IZouJg?AF`N+j3iq}sIUwy9>&1(2v@ zv3jxSfaz9)WNoRMyHw0O?o7Hnjf#v9+`d#R=2c+69uk%-!RWI~6|cndryv!85`ni{ z)qD_{2L(cM46{T`56psk2Iq|?nF#6*A~8U8M&)l9X=8HjPqcfCU-G;jtSYedY;J!ghbH$ zEFNl3XC5%e%*MpJ4+*Kw?bwD;DPFR_wVom`^-;W^1-`^K_p8(Up zTp+Nvf4sJF2=Kmwi!Sgw81x)y{te@DMD6h~)He*UQRI1cD|*AYVSr*7xtM2{<8K;o zn)9MCT~Bxk-(E2(FMWnJByTX)02-OwP-r@icHIdi5SfOXriZF;KnQygocwi*kge&x zDT4wwvmHePGlwf7IRb-13rGfnG$_N#q} zA{>S!T2Mwk%T{6)jW?d6!nywRBjWlC|M@*xBY!#R#Pv!Y)gt8NAjr+DAT_y9e=R~d*1QYr_ zlMsmjQqiUSVljx%S0IfSYe5_`QgsQM#`mLR>iducaftscq(>~?6(IuiqI#Jn%&UQU zNu-?~sxQMuoO%g|D~dh{O$tM`1~>Ikg}A9+KJmBnf5s*3S5RkycpG#g*E5{&zIt~i;LvfIBFV&S-Sn{Njzy!n>avL#$JgV%>O~xGN+-&NOMF;=-0GQHY z2Y@Ma5P+eNk+kKG(Tc+;AX zI8i4CrZD73dYHnnfMi^Y+Go{A(Ptqc>(~SJ7GV$5xBz>Y2-$;i!@2=I67p&^WD_u# z+>UVQ@%yXCpla7~F$YLzo+6pJ9Z`$C7NNGrs}9ppS=?U5A+sp#Vy18CJFvPDuNc_~ zjWq1t!)6XKAv_YRjx#x-4l_@5Cu$~ISSJ5fudJ(ZMxCsTDf1DRR(KJ9o9-T?5CDg1q-g=Y-SFiB)1QQNR5QT}S04)O+klS_-Td&yt)5_+jwnoTc?k|A4XF& z6O87Zen5;CA+3Xgil2bF+p zKi<8G{4U<@IDPzmk*!yQ>F}%`FcQ!bwCkXghBT^X8==e(vH1*=Hp+;ilJ4n7ABK=W zktzv>X#UiK&9JO-g4iHX54;d1q>BsSPYt{W4Me)=<61SDtigjjv8zv^Y@1!B(7V3} zIP+6~kky2|Dyf)rXK>zuryVIL)+^<73=eS_=#F1_{!W0*k>VkMOgTaWGG!AQ=X{En zusDc(BW$V}2b-c%&IDMBd_z`o+nRfaeTGQQA=icgnSF|BGJ=pf`{fixaCdyyaO8QvZsZ9?{PmCgonSkD`uJ0q>Tqf3PF%u^>m;wvP?KHu4E2WKO)0Dl+Y;tk z>`fj-^&Yt>CUVhtp(aBkU#BJqL(=Ci^V8CgsimJT+*9WHc@71~L}Z@3!q4r@!A8=~ zI;G+LiS12P-jz>UY>(o-0l5TtHoUgQ?*`R=1l9ZuD-0)`_jjJ~eR#Ttn9;XG?*qLM zy(Yi7BC!?lE3^Ry8Di!m4n}aeR8IAD)8zm^5Bx<<82CWjY>3mAP_GE?#Rai~>E5CR z==QUy7i+=!Ky#xXp}j=J=!Ad(81NM87r0v|6vy#|7Atoei8z`>ET%~pMjPn}JROnH z=@gn&S>5eOkiJ}0wPGdbmKwz%2FNZsP|)EKa!R!YSbRB|LGs!BwR81)1-^@IxbLBJ zLgJ>8_e(egaKx=N4YIHi&~$xE$sWSZnGlX0M4%OGO-)4`$)v16Q5QxnYD%z(tQwzv9E?eJPdyS77&jYj0%lTz(PLIw7vC=}Vu8QlXlMuEE zhdYL(>o<<7JSe1UUPPYx3B z(9{40ZZHU;yKpJjxzD(zDm=^cr3!2}dcM|(FI}PKC)hvf&SSXx6&xMs6Ad(tbWfL{ zr3s0@qgjS%_(fL1FB0XNIUf#=!}WIV+*$P|E4(9;pTpA+hU88}wBt5%16H72L;l7A z!r<6q|BtUjZgm|kC<+XihJ(DOIHEcz(!~z-V|*aO;*ax6M2;>s=WG8IM;`*f;4;9k z?BHe}2JoxXI8O65@-||Cwh)FG@XT5akf#)NZTe3|{Sz`43UthmteOiTfbWY9PzcGX zd%;gp98f>SbuW{LnRLgA@bUXN zRcXDK_s;O}7OMF@Ru#fSR552AJn$)h{qUeAd9rqj7nJCG#fwCc6bY@zUrUzW{t+BH zAn^olx*$RG8>9>bC;;K&1Pd^H=ujFgkW0-8j0w5%Fju7cj)`7yU_a{Y)1>;T&3aAh zF1(jBC@E_q}hvFw7ZMqS@**GG50 zV$O#HC+gFvN|l)Kd;2_h95E7aj-E?mx(-Sh<`O~piwVEJW=jZRylknHP&)WW=mCh{ zj7pG8Y}G*7sCW_xLC9aF$p~&9dFzJM2Ac-{I&Z?)M@KdP^cz;YF1(ps$L!963KMu{ zQa!AWlAn5o2?tKR_UbsV1m6WTYq)*CP=h-Ognx$j#Ie{G2&c{A@5Jobd*kNtd$t*S zuQ9PkRoWOrNezkU0-==B+7OB}#)A&Bw)er(-DZ#P=y>=R9=0jembD#dgP_%&_d5e0 z;p0zPTS%mm0x=&D__WeXF?K^8C`f=lc?uy&+6>4+X#aT4Lx7J1(gS>O(&Q}RP>ysD1539*~v=5E7@&^ba0IMF&d>69n508Yk?Hk`;T zbb*D?p@Buf=I43)i%d3{Xqz9I!b1ZJZ8*_`;itA~g^nw|Gnx1j-u@1b?ld#!!CdGd$Oq%*Z( z)oi3@bDO`lyc^%J&Dn}?*p622^V7F1RqknJTc=msJGEhLNplA*Vmn%U(e6%fA7`w! zf7Q5UxsQ4W!uuI#$Et>eo#lgwsn(_Lc6MS0r_tWDzq5Rgk98beyKytp+;z*Uea+9@ zim0FLX@5^R?)-{F&E4Ltzq@?c*~_&YHzOFOBV5CQIlar@-8zbAN6W`BVtW8Nap&NQ z_1S~X&3JO$-?M50hW7e87>-4HYyCQ168!eo{IN#&%b7m)v z@{`1}=Am0=?S?;q(fYlf(K_5ST4$TH{%rZF)<*<}#!>!de-P#0?J0kxr~K2Og!Qxh zQM~azoj3ORL$_@8V$T~#``-|X#I-Jo=HVMg>zP%f^(^{QxSYtMlmZNST6{|HyX zIQL<;kJf(QA11yuxaR5p%5&9o&12P%HIJJJ>8c&G4e$BODYX4_Cb)bE#kz;*%jeL~ z^W_WWj{%#GHIIKUXb!Bb5BTxc3#-QE7~c9;PmdoAd;E~|uyfLRK7hstTV(g38pnBwBaW1;*7V2#5k{X_yn|pcsxy54b^o!GH z=H~ipnK2tg}edNUKy|X78JG!qvzXZvIyJsH`}r(Za8>d9+GguUT2&#C$qI8Ib{?DV-8*5%>@l8a#pw=tz#N5@`#dhWTt zmeJ!3dwiu-s~>WTR~pfSv)8JXhCO?+R6`7R6ysQ|1o5lI(!wPyzlxwWk+bkDjq;^J zQC28qGmI(aO0A;nwOhO(D~O)I(AQ!kdjIT^*&`RwMf(yKWCZaiN;Pr)Kw06822sab zMmXkcoJZTqdx-m9#7ZhlU4-)fb>#NJDrGw!E{|oXe=zpT6FpF8 zEA^|>=dqH^ffK4&=`XlW!6N zKnyjE=#f?gPZ3q^N6Ik<|F0stt>>P`blSb(7)C{XKQ<#xjN(E!QAW%UCAORT74^^X z1atR0JVm%_u8Ku@fpz6*b{K9QgmUH<_%%7FI4`sVz-299b&{uV#hDW%PFC{Oi%$O0 zMa;?nMO8-QBOho!uA=VTys9^fHQi1xh0#^Q;P`J)q51}r?0|k@sa7DOWuuEg$zs;7 z0kX3LBsK62E)~2b1yGfKXNU7{_yxs^tW$(l{0~qS0HC(G1hk3QmpopGN$|F*{w*G2 zKv+h9+{n64)(LqD6fn#w#?QUixbN@xwT;NNhmJH-Plrpo9y$WRmxlk4t^Wrk#O0@J zaI9e^B+_7jTmh3&DK6wa_-2dFLJ{3>99#>7&y>94^pkixec{{-eSp{)s+6$!L*AQp z>$682qdgf%XPsr-Hqz}9PNf3Ch+-wk(lx9j%B!m-#A?3;!cf2JqI)SJL$Z`JJC4Zu zqM}~rV)K_%&3FOkQH)9yO+{n<@YpF7)ImHK6l2i=7|s$c|jt=GocbXm4L zA_VGH6b0o5^$hIge6suQL7GY z^sz3gUl1vmyug+k?6QkhFHv5*in>r3zv*CE-;l!6n7N{NR@JNIM_AgoAGvO8{hS7jEaoo_>;4OF;v}N1%GL zLy&3kMwrqn^^4gIyOOIFy{mO~IY?hEd6%%JmWy+<83Cj4Q8t0_d#oo_o}yPLNau3l zs$DEnBh|t>p!&iBpwE7K?(B2fc*rNSTb!bc>CRVR$I9_D!@dPJTUOM9HS+<(A{%kD z!+^S6c&C|DthUG*%Z~Iu5iWxuPp(+*f1=k6q(CyNIj}XXX0>XIL6joYvM?Y}rh1Sd zYs!%`MS%uYH#>n9$G{f|4k3@mN@8FY#E93}$#elDS*>`8YH+cDUlz<}tP}Iv*pQ8QfcD?! zoA_FR?RYpdL81+<0G^stye7O+U`xwA18w+4SPVqncnC>B>Y~I&NbHOSyZC=Z?b&39 zdj{!{&&`8-b*DU==n(UC4@?9DeKQvf_RpF&d3}>(u#?H%NU#xv;c_sHfnh5J&NL0P zZ^E>(>}A5Tg;Pzyvm3)oJBGh-Im}e&j=3rL;}YO!qhW3acZ0Z_Kwj@#6XtH*52E}q z(rNthz4lwfmM$CKTbXEvJQ8nMa!#OC)@t$ZJGK@3jvX1Z*m^?#8e89om-bnr=vp8b zK8?eD3xvvt5C!9Yv>9v0n~c6OT`bivTk4~d9)#5iG0>}KZ4mcdC?oL~&^MF5t)B3c zSTvG!tVy_Q@Z45cVC0WNXNB{NYX{45*sSn2G(27>k(-27;@h}Osoz*rmewevoTgog zHI@e#SVOus)V}x45(GIulUlxtr^B$$Os}a2_9q8D#VRe>!_d!33zqhmIc&XAvk&Tgs4y}L0t!w4mS z-H^wEy@2QkSU@t!hjp!W4_Yw8>qES5&3Lor!>cCj0I)22v#le3#vf=Mg%vc7#fgLM zE387neKT1b_LJT*KM6bF$RsTQ{1PlJ+V~65)8pvZ4fYscg1t^|V&4G5MD6RyJAr3k z#`#_x_qFaP+&E)5&9KBTAol@STR0;RaujfWh-dT_7Epi4**s~W_YYsTnj`)Q>iRq6 zjZYd-o<^IS0J)=E4S&-Xj2uhB*2IxJ*?I(}wlLgcl~BN(M+FXe^_V}*XDFLQjcj|k zr^ckz^f*Rv(}dxVj^R5nmJ2uSSL>h7|0glAI_dTek^Oxo&Ly6 z+{XjbZUMVZK+3XV=66A)^UI)s>i1E0#>yryLcY=RgU!nzrpcpruXSAF@4ScNL39ap z^#KrbDtzKzn}>D|+XC4>wi7AU!1UaA-opjDX9FWhvMDV^pDDTpH73=zHTx_Z$ZRFDC7TIZ2cKXh zFG2!@l?K}q8EOkF7(C@dilaUyVuu!u{~vod$ud(&vMJ3%i@7Vu)PLpI#jfNluqS9y zAjxtU`9_S-l9bN!O68Y>Sl*>{hZs`qMFS~AQ5#i%CN<>M0wwWp^Ck-63Th`Ngx;be zXG9gG6y#E`=%^H%NHf{agi0E>l#%U7JVmF8+R0>^$pI#?L8IRs5-kvAq3~U)UoPr# z)p=Zeigu45(|Sa_=18}-z#r3PX2!I?$tqRVE>^r7Nf5#Ji%S;E8!z>EWPgSovmqB3 zxOi1eE4l? z_{q{)=a@NdAwG6$16CY5vu=k`FhsU@J`Pb|J%{eCGjf0{YyRxu0u%`m7f4M>ElDHl zHH4NJW%!E9*oj5G#_KYccwk}qYsdr7{fyB{G8Z%*o*OlX+*Hj*3MwyAh8e~chPG9N z|6r>sV`iWSD+SjC;&-7NDvIZNj%|U?D97DZ@8TH^UzK_FFiUXEVJ#4EBdEuM+fdIl z{|P4j2B{#1aMPw>CL(GgdDs?h z8Yug|f;u3-;5fg6=4zM#VXT&o17d3yCxrGTZTe$q1FSK7aMEMFlUKC+eUoEM%a#px z**LPn_=<*+GoL;=5?vP zz?INu0WP6~T}mZXiMov%Km}PFV-v1Se2ERq2mLoZ{sY{(!?-Xa8NtC6FgF2QY;1qu zj{|J}5?-1+i|uyKp2f~XY7R9qjOqoPv5@i>g+Q$?;5l~2!cJMGLbV8MxTDw+ zb%se7vLEKHbn`2?!lrXN)&QPqH}!BTZkdYtq|<#r&JBO=i%2^DX%n%I7W~nXY!pY# zv9oa}3hgJ7O$wZHlcz*hZ~VlKo)XafG9o_@IB6$y9p0`ieBAI_4*>=TvDX(it2)o! z0T!e+eVa_AP=op(OeUD@V#2WxB2M+90Ayen$&!8x znPG&4He<;g%9P4PQc?VKP9!0nfp4VvTngM6?e==Z&7U}KoLLz^2L0;sqh#A8Ww%rf zDRwJDYV%M0{nS^&>(J;Qg)U)Xmo2O|6(>Mq1)2tOxYiUqYjLzyt@9K+ZOK#AgINb{ z$F_vn_)E-yK`fVFT&yf>gnSJT^e1y{wUh!=-y)1*SjHklmQ7>tGHg!(gK)`#sZ}Vu zHDA-=hC8pA!Kr8^<)kH)z6Zv+>dHyN;3kaED_SdYRG zZ>nt`t@sfJO01Z*goni=1i@wux$*jaJ{Vsa%IM_?;(4LkVZi``6V%7CiaG;_W8|j! z*@Grb`c!CJ11t^ZILPfdNVr!c8!a`FdWKNT=Mmxnbn>w)OD%!5lyGp)TTm?$O>+VM z#07L2ib(R4Rx{;|u;dxzmig*glt7qax*~X24N|KXl)Pjph!X$wWRDWJRg1oPn&~&62TUOto=sB+v^Xmm|G1v z#I4p2AzdTRC>Hw+lEsES*jL_hh~iKYh}8{Wq`|j_GXT8+bBv1A!>3 zR%*uXy`N<6O2lN4 z!?*idycc6O4AFxabDuv{-nA0j2(BfPcrsj+F0!8qDq5qx&rvf#VF-1ew@9}p*R&P$ z)gVD*IX#*XS;~tGqJzp35Ksk)(V7C{>RbxAvoqGLUURojxFJcTQnPM-)>yc&W0#+X zUU)?vL6wGCvgr@*2_un|A}-Y$g+MeZJ9rn5$CU4KY{q(5X75*r|!CQ;xjW80*j zHDh2CByHjak(GQ9w@4A!^lDpguhuo!abg~5H#f~qICOJr{08g+0CC8!H^KcU+gn{m zxJ_cRR=k?i_Ix`JUM|CpP9@m~%2>D-FEHipHnon)3rE#2tncXd-i|^x!dpGz@!B-7 zpH)SJWU0QC^Dbd`A((HuFCs1yc`U$lArJ-WDz`YpE=(8cUgu7v4h+uMQQge_YO>us z^}l&VejB6~^9ry8oz)e!LVXO)C=QKQHO}#N7fZ%2h^GaO;e!iMjvUGyTY~9 z=U6gXgy{gj22}cAq#`DiU(}x<2~rT=l;_f?qmH3^Y`9nSU`fbd)e3bSPAzKSiU!gk zj)(MdK_6d)Ps~yOhZX$}lh^n>37bc_+p7&vO%=v?;D_kxTEqV$OO51R+PZaYvexgT z=tkkP`dijSey4;Be?&TjF;2VfTaPY-A5aB&{AF!POuv;8d(r<~_LPUMdqPOFQ^(}*3?B-Kk>p|C%6EPrUI=iwz~jCj+vLynk#r>c zt-=Hmu^FysiXsFWe?^99mQ1=jB)Hb z%r?$uXB_9Wv&EUfIm5_AoCieyAA}^hL%o3x-VU)Gmm#(0)m)+7Puz%yz{Xj`u;0W* zSMKM6DOkX*P*aA~4>qnDE@aaiW-j7GNXtPFMvCwYq%m$Sgk>*G6R^p1`(KNU8}~_k zHSWC>*cB|)EA9L=#GiIP_K)r4XS@NF#CCXichDP>9g$$c$7m%yMD6OhS9v<(LoCdV z3ZqMD?YoZ>^Q%c4gz(>k3nBb_uvQsFUuFjs_#K`;636N| z4p|n5@^{7>Vy(=B1P`#)f}){PV)>pYG= z_D}xTw>%qN7(<~uu9ttbJx1(IUmKi>P8(g8-VLJKx?ao(gQtY<39akW0GX9()cIJq z#n?Z%MfDjD?FOwpe#chuK)y?$j1UVb+tUOlK>IefK%nK-q%i@w9b@FFv9%3(@F%o| zv|qz}+{BBVKp+saFFIY6VSH|nAVWZWqTLw*#C`8S<85n^tlyd0)D1h)9Q_mc;J~I) zTdVV*;sD%|Oxcy(=7!`!q)|d_0y7XT!kn7$cHQ5TfFENch$+uL9e&PqsF9|_%z ziUx6<#^HLXdx5J=vEr9%Kb3we>Ch3aS5Huj#DS20=#?NiTMUH9bDg0WD{*c6$ugM3 zB=ci(4Y3f{@wbyOI;Qk9=uX#tm{mkmNKiwg;|$@}XQ7$(<@=e-Rx1v-+W-ud1FQ!v z(UbtKQtD0m1JI)FF}9ISDdbUB&<6QEhzH^`|U_`RBD-*u>D7|HfR9uxt_9eH{in+yA|Qc4wfG5&n|{k5EW;Iu=Wqd{SHlcxm)ub}854fm z8R6GJ15SEl<;|ise+5D2TcE8@AZL<#VCascF8+0Bh+E+r-bP(h^i*oXe~i-G(O2o6 z#Z*Lo+>|hT}cvO*@;h(%6E% z0G0{3B%^8^R)Sr06r!Z%C!i6JW9%%oh_SO&Md};(W2kLQ7+@Vg4>$nQPs}xR(B`y| z`*+R54eOz4%@+vk@0!zM9THS(Socgr--9s?|J0yTY(!>{%_AaO)fXAFX6slf#-m`V z^Hanf5w@Pj6+&z%_zNpT4N>_&0*#>qwfeLp>trad&NN~Y4LaS3O^aW#VNZMYX>9pk zuQ-hn{S4{_6bV-zn5miF>KYsRM@+gH{yFqq3*oV)S_uxLAmT1nHM;NSGa{d;06{_( z8J7i`BV^$RSkV?d4U%v$X>~7q7m}98uAA{{McxQdPo)(HICbFe_DUqmI1?>WC%EIJQO@0~q5 zUv;O!M-D;7DIkRDo_1a*-mjB0_w4y;L|1Bfo6dXFq6*W#{1pyVXEK#=78z>cESLFo z%?KiJ7PDC4_)$SLFiNAQ{a$0ZhapIjb>B~cM82Q41AJlU`{f%W@{I%ze5!P7gVqIj zUT51l&~{^}r&R*ryF>7ZyN5Np9fq@JogtrA4(Y`nTu|JC*Z_wi4fhC=A#K5|AP+)r z=oZADNxzJ4UAS!p3H*Z-7jbZx5kh|%pbDu$ApXmEe%n%Xyh-0fX+kl7%>c>^z1Btp=ujVI1BmR)~N{h zOt~flz(G{JC;#OD3Ri3ao6X4T?Z|>n)#}xUAb;3U z>1b!w$`sTc>^d&WPZQf-0$TJYYe}kFew6AP?T@b?l>gw8nuCiF&N_rS+sl$(gAIt+ zh*mhrvHvINkaiH_Up-wbAf5$b7V%|$5k#++9Q+4_*dix!`qD;QRSwZJ!%{*{)S!v7OwTN*403y-h{&?R7ef8bh^?IM~gNwhN>@=OUz~W zZqitQ#MZ8LPFqAR9TD<1)T9-d?hJR-iBGgASG1|nCo6Q6i@fvhMy5;Tb^f^wEVEvn zw;!;Dy*=P_=N8rtOGihZY7fgOt(Dvx4XYxIjz(-!bO7UDs)E&H!NZ~xB+nqE_$nx0i{U?5$V-htD5+g= znu8p~^jChI4Uj4GU+U0mvm2s30yTQkNtf*VcA`=HaHny@UG!lN211E-0Q9C~`2aEr za`#n$X64`M2uz*Cjlk3iz27p250FDx0?iGtfMwwo0u#yzNhrrwV!hVqxL9y{Bq0%M zM2`_r69z`Jg4zJyZuSmzqZb-97;OY^7*tWWD0+a3a%=-r^=y+S?FdN!I%exrY=Qt| zAeeU4?i*s=NZ$zI4THyN9P%0|X1ID>eHKr8K0|1;Olv%sgC8Q7`y3nNAcGtBN+IE6 zbobM!Vr~v#M=myFUC`$tbqepS_3@K_Xo~zxY`7z z5tEFJa0d<~e*335%vu_wMH%Y(NP9dwrm{Xkh(kdDl_6oR9yTuM;xl7}uomA^i0s80 zb~t&-=(vGa6KlX)Kuats!}{ZJJFPxwCMb5e&^T@c-x0Wjm%uw#d-om$O#UumsLW&o zTy62Y2oS;>_H@4KeY{#S|Dcj$%4_0P@t$b!$iHSYv&ai^INZvqp~1I{!;hiJ&$gR3 zpa@_Gln}f+JD~LJ0u=k{V$R+*;GSgw<9$2VSQ1Q?A4j?^7^{^{5r-GGJX`J+3%|#J?5{=qEIDDTIxP@{nfx z875o?q(t`S0-6Pkmne$ z_Az;q$)^0Q37#N;-UuP~t-Lo*sq zSI^9La1VNs3DSyj}8sTMn^_B ijSk=#8cy(Uba-@hG=rR>(LwyBMt{(Fa`ffF;{ONvQIHJ) literal 0 HcmV?d00001 diff --git a/scripts/models/__pycache__/experimental.cpython-36.pyc b/scripts/models/__pycache__/experimental.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b86475e46d9adcaca734a84e438c58488fa1f7c GIT binary patch literal 4961 zcmbVQ-ESOM6~A}BcfDRaj+?|unz}T#vW40-6x2eLR!!;#XhI=|l%-)bnHjHlJv+0$ zch+B;m5|mV<-t;lctc324@iAMyznpZN8DE)AQDJ@L+T6s&Yk_R+w#(}?%g|g?$2VruC6#WqS>@~<7TJ+=#3CnlIx|DFSwoK-dFb&*J+tVkL{;=uM?H1)%tSTx z)X+1BS@Y3+RFCG4c=JqT+-Hq5hv%wq@AqQS?#5{zB+K0_>L;nhFn1-W{{9Iji~aNZE}6?3S`x%ZCa0+#hNzuuByr{ zkx5A#JT)=zDM$5>a-~n-SP#9j$tw7DQD}_(09DS0tiy+lv)m|*!dYZPKJaq0;Ds0Q z!rU}aTbstvDwuDf-@_;M`c z>_a`u^Zj<(&V65*?KBq#Mkq7feTAy)$_%2YQBjtRlTBrWub^C4d|PoTNC?Ga<;kr~ ztcG}-xyB*8zJnOAR@ot>p_aIBM z-EY6#4P+jRH5e*RpI}h!g?t!-Wy@o03m?LJXs-bEaVh%rw?~4o6r>iCEaW^xu z7l?>nq~4t$o|ztO^tE9Nf|IsFS&{MA8x9#E4Z1OmQ1$&1sc6@Hf2SWLlf?I@XK8S8p4O*R2$GvlE2-fbbx-%4!Zv-O=QOq*jcp?`j|kgVWFN5;*mjk9 zzu#RC39|IiFReqr@;0g|gaJ+F8wB2nUjUE^vV|eu%)tk_+wme&zcIri^Ae+dj%j>F zL)9sv+QNExK7HdU>2w3&nicU*zb#_vw?q&%s)Afdt429{al5sZD_aC*wTfO6^8&NC5iDF+=ihVQ|~rAbrq0#+JpaAH}#^mF)9!j4zD!|UAQhc#&7i-05Z zQn^Jl5>zDn5*t7=0PjxD0Xfz&hhWP^HZb?^;LW(~_}_p1hn_KSo<&=dFSIb};rxJq2taQe9Ym>*j2-KK zX-&ALDf``qp^VjU>aYOQu42}dAU~_#NZK+Fvu^L^>$E8C;IPA$iFGEvw0a}S!XS}1 z(HKuD!OX=1W??rg*l$aXa=h3g%+hFbbh0FIwLba{H1yG5{lFOTfsupuKh&WW`rsd; zMC>9X>yCSe2(SpDZYdtvfqZElqB$**^?OoTkJ^ERmnAm0&msHsxKb-Ze(D7EqQzjzwwPEeYe#A)fg9wMjg1!1i0%_Pf&xQe-8 zx1KWN1CX7_l0?~o5W#_RFg8e9%1&f1cc7rGpGM_@t9Nx4^ez;u%)@q1@vfSAE6BsG zdzt87kCgE!Ro4CXyFW$b4|DLVCJ23a3cjUCBu3K%5eJDMZi%nc2NF~9M~Vj{zlIX@ zGz|nTxOQ~%xK~jmaRURNdSKIY{DLtLGFjvcc)w^^{0a!>aN&#SI2GC_o=f~j42!~Z zqhsm_20I>kC2PW?%prVW$Tdq&z;X)bir8XdNz=2L+Tw4uh)OLLbBOxWx=I5+= z2VG-qzc@Yph#vnxv~BT2TI~B&X|guMAQyU{GUu3XHr`lklc&Me?D>f? zN3;X!ZR0Bv^%~|9ZpTD@R#49=q9zxlN0N_>Q|75jG))s>^HXVs_jW4d?xQ50Sclk% z3auw86PuE=rY~e@#?)D$7sBcYeJAWfh`UQmkhK}|vhhX}%2^L7D$wJOiuTHzcxkbL zy??b}Iz}TF0AmiN)fAzJhzU!831>Ui2TaFyq~AkhU?UA@n-r0ea+3rlf0@WTdc7FF zNG2BdP&M2W5!em-ExN2Iw}Q zI1@1Wc(mK)ZBKlhKZ{UY@vrQB4DdWh2}I;>V=eI((uCP1+)47~2n6h$_4`CM9lro48R`?%>WS z_u6txRo;z*?Sr4hh`g4rZK}T`en8{gcG^o2{KYTy8!Q0f1Ty+K*9{v4kx8Tj$>{!$ z59*SiW~q;@B+d$2@gMa!H}U4zWcLKF{L00N^)k2?wV-9?V7Nq%K`t$RiK;Q9 z-12rM#rxFRr7EUss-z~pzJ#i2h1H zHM7oF^j)h?Y=&&0X4JWdEP>uuxH!NJ(CfxVV?nt-{&y4NCj1&Y0k?h^|1Ti+9_3E@ z(zlwXrP60dHbyeq&C*g~bN#%X$kp+GfTXska_;JENnf1BGOb6kPP|POK|&wm1P-Sn gdN)dI-z0X_Ah&RM56Mjxx2Br8P+xdvVQFFRfB)$+KL7v# literal 0 HcmV?d00001 diff --git a/scripts/models/__pycache__/experimental.cpython-38.pyc b/scripts/models/__pycache__/experimental.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b90f92ef24d09ffaadc1b3d911b9c17f1d506a58 GIT binary patch literal 4906 zcmaJ_&u<&Y6`t8&TvF5z%Ze@8PNJrcTLz8PxM`ZCh8x6=5~Q}0MoL={akuN;6}8fG zmzr5xmas&D%81j0fb`Nsn*!v~w_f@$^e^aXu06B{TA;TAy`X-RPnO$R*i9l?dyGA^^#)hg zjV!%~-#I_eqjo3v!fZcHGCvGx#Q0+mJMsz|5rdwrX@u2>Y>Vr?SD~@qanpLpEY`H~ z{hF%Y5Sf(pk;f+HJ?5zXUas`-AFTy$+0=Cvbp?gS$xl$_?1=UGfN_=^g;6*Q?1=Z= z+$?zEhP*Jh4Aj<^aby+Dvmkdd3TEFj3K$)=9U8|r-q}SZ+r%tm*Fet+8MbDw3kTnT z>b%vcC|h=6CgplwoW{B5DKk!UVPJ$ZgL|(~byb;u7&fZPl2NjyjNlcNtBUU`E(Ix~ zc%)pporyf?=6L5l;;QO-LE=m4d5_qCf3murwWC$Pn`fzi?`0XKG83zTpYO)0w=Y+v z2v!fWB)j*0@1qYkUT*twx+((^cXGK3&qaw`jmEsW(m4=>QJ|Aa4OXMZ>wJ-4;)nAm zc3&B(6p%KfhuWn74HWqjs)Fq>aREkx$x)1g5Ab&$rka#&ut$8oVTu{d5z|!FQ9Uxm zEPfQfFP2EAs|zHlY-M8K7a>Wc-T^;6bGo_V+lDO&U)m*QMMzkCFza7CNHP0*I zi+0`f_PTyjzR^Yy&(YVWhvp8VB=Nkly_~1jX+wg}E$vlO!!>4HEqMy=^p>7UxH}r| zMrb}DxLcw9fQ`YuqRfqMdo3UYj~;51YV<2_qB=!2(8@f6@Er0*V4F}}7~-wm-f{9u z-wnwL#uN+9i;TW?OyjE>y-tZ zWeY!TMaqhEfZa$_RsGNxFfVoOJQKmTav-2Fd>>veotlbo(MsBeC!W;{KM#2cJznDu zpWzlitiuLh4LY&|l^ZlGL`9k}u^#jS_-^JLuwy&s07SaLdM4V|O?Kx}4^{y%ttuT@6QqA;HlJD-&?6s_O0dK5_5W5C@c0KRsoe0rf`@u?O`v zO8<$*DN4b;CV1B=YjB|q^w8-d>z_rDi>O+dcxSH1KLdPsjXoQ&-x_X$@LBEs=FQQdxInU*hCYC{a#{IWa{YtL^98%4`K8 zro9G<-m*A{R>KfcNA}{ski++ z*j~>>`)a6+yQ#7^;#(gh0SI#Ns-_3Ma|*qsKBPm_1p)hs7i^1FdOIm#lW8|s8PQ0&AG8qKn zTZi!6=$o1@;K&DV$(lzvw*#Ds0oOb^ftU&0FJK{!n0k*8fo1<_F{H__g*ssMA?Av8su+WdNM{vO7Vvqs!}R*)n>nRv+5^h|K`si`$H6G!ujRKa~Gl~Ma{5=~g0n1?K{ z-A6grDG?J@iHi{7*qlUmFn~tmSH$Mq7 z>*x#S2Z9{RpDFepAp$J10q1Pb)e#$6_Q1esJ7ilFZ;@H+cuVo5VIrUCbRwKJa-8^> zWRb8YSa#NVG{jUUrGL^iHbHw8&H1L}?*;xV$gw$AUNoW9brHt$(&XpZ}ddBl27 zj(bRM9~t6u!BVtah=g~x5u2QyO70>W5wV)bM$auwkrqxLajbCrwa^-H@$=A1jnGCs zn;Niw9R+bsTlu9YM*ezYN!M zD907GBZJodobRm{^wk$oz8TiT=?6e!8Pf;6e|dm>aOcIs@lZCHj^#5Y`Y1HrL|Pjl z=LNq<3BqE%?RPv1r+J*Eh!JgH*#K9Rs3|`UaBoB2j6_i9Nw34*hv!CES`I|y=TW%4 zALrZ4iq|N}(&a-=9NrOSZeX$`^4F9(LzKCrZ?a>gNgI?oz85L8?cWzP=ShrHAl#?5 zPBMh$(KVf}=wC&cjVsM5t&?1#o1~z7w75eR!CKW~iI|x~UBpL9C`vxHXgNWgBYsB1 z%vJ=uA*lMfvbMSsgQI(XqAKNCM7Ylna9!5HzWN?+cXB_L+p2mi@^=qDiV!I+UE5T@ zCO)7wD{vC&OzuUZ{&+nx!5-C9zk~gm<^QwS}IC zm*Op2$nFT-)KvvL*301f(tl{Rc7}=OomGTxO#V^nolB!**PL1?S>e5Z1 zX$8BTVT_>^>j~GW`nV&S7$9jD<8!8i3-~-D_)EqW@X-~0E1IE;0j}mXV}`qk>)F=(BaIyJ15|O$& NKQsT#{L=jF{{a1FGY2$uYi;f7vWermvLr{goVdzbwH03&I}F7$+9x!kpozH}GoaL)bqU%vl8XL)SQefdu^*PD-N+K;r6zj=ILMDSlQG)-s)P3XdC z=mnjBje>z+vtc%^f~8ZL)krk$f~{+w?Irtq-RWzEl$WZzA~B~G(q6_(_jS*xkF6!; zC+N4HRe61#+uM!tYt)b3$nkHoG0~hXOmdmim}*WJrki^Tdzv$andaWY-sWs!mdjF& zx#oOfp1hg!IoUYpK`}0Jx3$6vZ$V6~8TSnN zoTt|x!g?R}9+kDhEy#WUJzf69d!+sW zgh%VY)Yp*yp2)Whkr0Q)5pkq%#jPImmc7Ts1LEjiqwvdUd5%`IaUExv7RUB!ILF7u z@xK0~_Ue_wsm%<=#GDG}v4f9d2QRF-sQ-+v6)x&pciwwKp!`iuobWEbsl8><$-bz) zjNQflUi{d_mw1Qo88?g8ntdIKnDwU zl&h@!Ua{P6-Ncxpc)b-()>^foMs>X6Ew5Hx4gARE^%ZNaN;^vVxJZNnxz#S98fm0pY#HZx^j8zDmuJYPQ?AwuZ>_L_+LX5Xnn1& z69I<$9j$1D5EOege3^k28hy%3(3ddgKvu$>r!qT8)*S&tsiM4|n5MkceeztCrbPlJ z8S1M(hOx5LLO8e^f*jml6D^p?NM& z)y-F|p4ChA>|Rop*R9Y(iv)rl+F`Pvs3%GB2vb-hffN*D+=Et6`ID&U`d>kurrzr& zxYGJ8cqHy^m|ipGcTg)6CebTs{uM*D!A!LJ*vO2seXZvpZyZAZT<8cJqbELVRM4jY zhl%7YwcyeA(pQegoz_NcqEBkaGa0r^OvNJ&Yscm1v{qxZ^)DW$m-@f@?zzE4eKJgi zF4we#re-gqxGT)| z%%fW*wzi1(wDkA*-(*=11KO8MgM+sSKRXzD2Wt~9;W?3zPTTW31afM=(B(j?8 z0ki-ldr+&cx7Yp131kV(*Q!RDcB{2f6q~J5vsRW5Q9o{x^#}2de3&wley!SUBSq96 z*?xc$PoAKJSqAgirB26diG{Qx{R32>cO$(P8Q1+tFM>}u5jG+nKN|?QR^=g7Qc<3& zCo64vt0V<2#HUqix&!8r%@d83<+c_(hVMNGz{tmKwK`HxttGq-oHY{@6WBtQC7ApOwnUPYQHPl>7GpS3 zEJjY#^H*7R+9jqm&{t3@uSGTlI`DxaF_P<1qTFb=Jb8gSBI;X6DMEOfGA>g@2N3Dd zHk&s=0XLb>n6`G&fqWblKZM}t5NUb_|0j*(`f+_$KcROg2GVj!pbMtFf{H3)@RTBL z0c!TJ2DeyLkPY9NV(|vxjw(@eJx47`M$2EJh{le_Vo6^18jWIcdwP)T@1sh2ilz`9 zM(|xk4k*I>6hV+h@5R*u!0FwdX(Z&^f#0+xM1mW?U0el90N68qE0T_SVDP$QjjYG;j5GA19 zyvDnkQ9vr+kJ+sZ!4nb0I08u_|LfS)ZSWL;eggm+xgXr26|L4|5Z_t@DkeyTP)gt$ zt(XiPP`Tn7^YfEEZ~-XahH63!U~BDHwdb^#h~6!-zW|vG3(AesA_3uU%C|#OYnHI* zG$@w0gLFM3h*vlV2p#}xfZzu;(aw_J|^LMeb4VAog(_{J^Wb6SuM#_1b5MpYMS z;%Uq&hcF>BV(hjSkiz3pNWt=qrzi*W%!+XcYppnqbWTjqfqStV$zV?(YFWM^CSIo% zO^3>zP=xanG?Cbc>oBn zxGy*W^^7%88OBAZA0z-9911Zr;SIB63-K5k+`m66|ZIpx)Sw#!MI^Pue1W>mI*xr(7GShSbl8tqRb=jUH4oA z5Ut#9cFwON&FInwx?}>sAGjP=XFYaJsakC4&iN6ro}&H;X7cl7DSN?ldT0g+ky>c ziCP)RluSBFyck&|yb3AGC{}7P?5sBQF08Op#VZ2FipZ>bflP8;Hqa8Xz1{*8?vx}< zJ3d)S2s29XH=uqA2mb-iq0W8Y8kS?M667L^lyqF6FBb6xeih0f-5O0YjE}9v>$Q@P zUa}}ya99ffB2=o|q^14RWsuWn%vpef z{eaOHQdxY>>p9a!nFWAw5-ErNO^UnOoy2f?DC6jqi4rd%_(Wo%A+z8pro-onw-`u6 zdYBNK`U?0bSB!y1v9w-odaVGuVvy6uc?}E%hCj(con%M0`K(d{bKBcR$h^J3M)RQU zB;4cb-7z)sxc{M5K>B6~Xx>JG$fEY+6(Ldo?2gsLzjnTJ|Cc&N} z(R1T0<6Acbw1F~!ND32#ZiUA1B(<^)KB}MpyZ=1#%IE*(LiNVu?mvFD^UoJp|A1qR zs;@wB1uI(^W2TDCZmlB;4MbL@QR_q|Tp3JD7 z24{ub6Udbp5iPhQIMspkqUgiHp{Q?zDiHvQ3?J~dg0O+GS=)VRo2c_C1fQgU20A3e zjv(uCJ!=qM;t*$I>Kjl=OcP}yJo=-PBxYqr`}_E);@{9-2RPlhN5qKJ`i))!yA03> zMR)~ghHqBDkKEDedroUjzoxwg8-mUr$_9C)T%e2$`Em4Ng$W2RkTDbnzNqAm4*L(M zjMC(qExUK%_=2y1REjAl0|)i#T&V`AiQI%_z1$%Wg}uU3^F0Ec1!kUC0D(T+1M>)d z&5+aR+r~^W&!O0))wAH^Escd{q}TC9ti!v%OLG1e@(1$$!Cjk5tT*C87z%kpz9ik? z2a`(9y3(iq`G{-u^=U*9UonRkfnke{(k2uk5s=b30exkto)v6%yajukbYw_nJt{Lq zz@luhqV09$sSeTT8wfG2kgRecsVq>519@cu+;*q#v#g=CB1A@krPa^yLBt^ow8>y} zgACAw2BRFv!PJ0+KnmFWbFg15GS!KBVOHzHIj7zDF-mQe{urh3eIqv&mjWTRKF58c zL>aZAxsPg)GtS(V1DD-)jtR1j-3)5%Z^UEUvl?us>3GcV#$zUsn+S9`E)wK~;1NKA zs=taQ(lDe!G$6Rl%94Bp3s&L-x?1_fl*BNWlr=N*OEngd=9*+j!ayLab%gt4wWN0n z@cq;}KI|IuV#?1_we6F8hLSt2RM|(~n+S{zG}xuHIKUYS#zn+*oEbg8eVC(CX(+=< z^-y50|4V$(VbT%R!JC2NV=XX=R~1ZzT-V{}!6gRjz+lCp8S_*VhXTlI!rv0toYs1l zst2;bb_#HQI^I^w2Wfx^cJ>l`B_TWR(-TNT^@M}l?gxazhm*dKG~iXy7Lik**B5g%KWa{I2ajj9Pz6eGz1Vju#3bFhdTK*j%N8-O(0LyqA$mL@y$|iaE-n{@EdJqL8f{=FOn+Wl+ zEYo&HfFLa5s3q|8dXu4@0;jS2l69M2jdX7tR1e0=qnW&o=<5i|0*BXS1_J5?JT4~z zwYq!mdj#cyM%y8%vN!+&_5oZhe<0rr5qh3bb8I3hW%v>6$G@W8`}G~_n| zy6kgA>P<_yu&>o`hJRsY!(apof^mgf(0VZMM2>0##i(B$9{vgBi~%ua2DL*Ark_h~ zU}%C1Rb4R|_pW}Y-oor-<805$Wgtl#F6DJxr6hs9ueW`V$(6vb=6(PBPbj;BklvO+ z)a9#`{E4Q&(6!E=Iqkoz=Og>+%U7Pg>PI$!eW~epjXd(2?6+o-*iFC{bhg4jrz`L3 zi``^Cf2|#q8cZ?p|F8m_LjO=~tL+Xev&f>mC^?B-ML18;7gH3$6iEgQ{CFLCmr`Fq z6k~j){FsGF&qaDU(%X?9$fu~{F0TGfKa*RMIWR*;1(Xd4etbGddOR2v-NLH8m??)G^Tj|3;{qLXu`-N*j zI%Cqiv0js&U#!ZK*ew;t)fs*g&DBX=z*l5lhpo2lErz*S3QCdH#5E*tSK3u|({X|t z(NU|*kJ>eyZq44;7a2nUYiCcq1LE0XZJ+C7sm>mW>oHLc+~ z!KjlEfC8<+9Kz89bT9v@Uz(yly?8Xe+ zi|R&}=-yoEPF!Ax^P`$)4DK&2E#+~4(1NTQ3LoCcD5(UU&oV5s=p1kn{38D*S4(~= z2&5dt#7UmDZpuHW&m^2$R1jqb))?J(v+WJTgKzeDS9c04*qo>p(N2&FN?Vv?;-#&G ze}V1_;Vh#{Tg0bj!Wr~OzeKsbiPRK|5Q~S|Y{sepJ_TKpn-w^KhyNnJ6$srq2PTRi zqtK_5(T0PPgJ&oX2jkJN{|Q9OXG>nS9&rp{C5c8rJR^V`K)ynAsl!+g4P1QVqMOsW zz~UT&g5Z_FrkcajR9p%-03R4@8l_KMc40+!5jR1NO-1iR4Wq1eiwKx(_RhBVKVCZQ z?AE9&Xaw~94VLm#S`%+S&buy0Ynv!Y_4{#cZ2?RdCZzEP*M^-&h2MjXAHh<%` z0N>CYB8^|Tt>3iXf`^72!XgXXKojE}=Qy5#u(NOr%o-4w3WAdT2OWOxFv+hSrsf%m z!BI~Z|MXtmxAv0ci@2kUBXgJ%j60{7>bdvKjT{{Pnwa6f_s&765;D7Iy!v0S7_S=` zC*8}uZjv2y$7J&Phah*mHY0sA0GdO#F{3Qkd>2HRWlt2|PK0CU z@Hpqj4=pWl@Qg>7ctS=0FC*_I1W1pJ*t16X>s*iu+%OYD#6zRP9S>BQ!G%JmH;y@^ zgG?|+j{hJROazm`R4^Uv31))5!E7*B9bdE2!#nU$0674rV?VIgT+hYrZgR~;+3y7N z_@@<;Ca?|9bi_T z&zu$Ioq{3X!TOHF6J^2^b%53oJb?9sHv0M-26JaI@kNgRR^i7tps7~zMh`r>P3bsXkdVg%kdQ*MOmD)~XZv?eY z!z*NO)x=0z`8vqlR=|VQPUKw2Rqb_nv?6QO10wt?GIwZrk~7N)BX&Ana?A}3R1)#} zea%v8H6o$OEX@I|v37!yesJWj;XbDMj!Do|r)Ux+{RVeIY8oSVK|I*b%N2E>z`{3= zrhL6JIhX`4fezh&5<}*02Xn!O;B8P7 zk1g=dX~(Ff%zX-Ua>`KxaD-=Sv4=n%0I+?c1+r+2u76wWY>#+wWJEIlPFHq}He4e? z4ds_0NCu}e0NNdGW-u!G+)P&ND+R< zZg87tKgEwHBKLV1Y+!a=n?*_K@e4D&(;OIAzS?AVRkN}EdprYg1FzOF9v-d*o>-Wc zFVGOn6!E1Ti3{AKh=NhQHSz?=R)|^#TwFe@T;SiOW}l-hy#%WsuXM_13P!Uizd{8x zpkh2+tJ61+!bi_IIZt^K(SIQLgoQNytbP=JqKq-EKg2J045V_pYaHd&B#>JcWrW>q z1j2H&9CB#qq7H)L7^jgt17Fjl=!MW8xygAXdf1^jshi+ewcQ=B7`p0ZD&Jg#hH2Xf z^;b%en!g4~`sfYgJQwaWU}xpgTozzSL;& zqu_^lzAT>^xw5$rlU_Mw$ftwH#9f!3YpAN-6f;P^*2;H{$MOpwjFQEoXqV~94#W`) zKKTANefedYHHn!hNpF)Iwd?8{8QVxVl4tZF6&aYMPk9M`U8nAcPH-hWpc1e~L2gl|<`+vhOSMLco@Sf4epL637Lf%W z^;H|~>+Cm+QcrD^q4|k`ew({xbA3eklWdlGDss-@FtEJyw6i`*1q;{ZbQ_Ujn<viyDj{mt8%5r>(eq7I><*ZIECiVXXPdq)y literal 0 HcmV?d00001 diff --git a/scripts/models/__pycache__/yolo.cpython-38.pyc b/scripts/models/__pycache__/yolo.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df2bfbba284efbdea80a4e7b0ecaca0875c55f5c GIT binary patch literal 12673 zcmbVSTX0*)c|JD)2N#0iMHETN5_Gj7ixg$qj;biKV%f5tI+SWDj*VJ{A?yJN5I62Q zq(t`v$B>>ZB{i@3unW+=7gQdhTgHeS~luLD=`t8(Xw$T;lwv| z$Eb`fN91et+exaruFm7b)zND-Pq>=m-$-?=Hl7{lHoH1eo6Jtuc4c?frm|DD-Pzr> z>FhMOMXNKl+3YMo$Etg3d$W6M4`m;!?aS_~?a%J7rL*bUf$RZYYbAK)2eSu1uGOb4 z%{jCjlRwrq`WHs_;450)cv*W^6LFEat%-zlxN>B}%pOI}h)AL)*{gY2jEdB4Eqlz# zh_PkkEkiCkdgnO4_Yvoj$_X*=x5v=8oAb&x$=_uXt!s-iPx3 zez`M;ndY4-XTEY0J2sy?=}coE7o7PG1LM=EKeeG}PdN{(9-T*>Q(ZlK+Bw~Mtc!0! z?c;Ch@?V@MIv;SJsGRBiTvtQ+yCPjTL`WPE2gSjz)f;uzIqjSkhs5DKM)rdkd5*s( z?8L>9JsPO^A#t>;KdoJVA$ziwz#Flm?0M|p8SLMa%Q5tRPS>&*u)EXFhXvZ-*2FRA z!rR&xtqsuitc6NY@%-wd%OB z*vH&_$vGQKgi_QTRQ1APNI`jbx*E0s*U;PdU|fIP%NdXwD!0=zn-sE zV_U?IAW-EZ4nUUY1-;yQ}Ery;bSt(7c+j zmRorb6J)|cs<`Spxk979hIfkGje0O%u9v+s^>K1HopNc(bAuRvU#?s)HiD>I$XA_Q zxmF5BT&L<3yqs{>$^|EgdgYpvbJy#I0)6c8pVce4XYk-?5Lb;vHEV(lmPo##6(M3G zayyz030p+L#$m8?LX6zjvynG6OUv3IPAUjL?Ra?O;-#%CnCM;YS|K#_8hW;oOeFWq z$UJRB>*&OOzP_pD3?FJ@w}vOvvwUNN>O%A+j2W<%FlVVP>_s}Zu)2B)?Um3Z)kPmr z7u(c2aS=jG!Z*5lXQZojk~9KeVs_0g#KIfxr1;w+!oF?$=F@mfm)<(&TgbGoer?Wm>TW}xEabiAay@s`J?Y8<^ZeT5xfd^AovY=`^^LaF=g1}j|xJ?Y9sFj z)>_%QDQ9V}*hg9NUqJUjuLY5Ey;;o{oJ>d_M6-ODvT5pQ6qe*;c);F8bGbq_@4C61 zJV5P-C_9Sm2$DOAOw(g}7%8TY>y!9DsL$%H(R((bKqT(}k;#i9T|{!f4@M|yZNmf6 zEBXeM;9L6jueD8-4BwmqpHsd8M)1vZy;8+|)oNSqP&?d?sP>NKTNn{S3j1L{(hYSY zWQK$(ED=HpX0kVfR*AYv^ozOgAdl1R6%z{E`XppV&9+@$HszoC8ss5@Ss??j8fpwy zqE$w=SJX*%wYH7AQJ8=;z72`dMd*`85pxPioQO=*2!7jcdP>oIC$-_8n3KlwN(SRX z6TLSLdiUDTY4z&x=%4+bcJ%+6yB+h!I^%xSk8w{+XlnIEQ`lVt-)3|se4F-J(07KR z4`6s!XeXm&_I!vW&j8)ig
@rf?75@12yA|zr zFTpyXBuNuMMwaprl2cSc!l2|Mf~wl82ZI15@0Cldja4_WJqb}Q*Goa7QLo?1iS>HE zRxZdBG*2wYri8>r&QV3gEthHyl!(cLu@S=~kfQ=N8hmR6pvOouARStE+zKMRE`-0^oEF*px6)AM=CPiy>Hy z7^EQ5Ur#3vnbRr>#p#_9)f*fR9_QW<9&ba|Z04TGVk>-V{?x+!0t|k*XOoR`1tq&& z7tSrvn|X=(E0bj9CCQFTGMb8@@m#KlE^@iRt~u@!tIcqpxeYw#XQlL9y8a z1_z-+wNZEEQ#26~F%wnH@DZwbp0Z2G0v#4-eGOc&#=OS-bq_Dd3+VV9k~@V=(-ZhV zVI0+u>eG7ieGADpT4Q~MIbhX{C7A)pZt;xQGN!yt1_0-s#PfX!JFh0)P?w1P9Qsg~)s2}d~2mLceJfp$pe8r1*5`rX& zYXJ6vwFvB!&h7WaI7@z)dq#Z=t-x$RXd^yc0v;JVr+Gdj@gDNw(&7uz zhW8?M_7T?g_WM{F>S2kwOuI8lU(0xNm$EWJDDM`^{u7=i873k^Y5A?!%m zkgj|j1ZJSqnz;SlG+^dIVRli>0L#%$@)%hLw z!CMlItHfO-(JNgQg8^*suCcw{!nSJnt&(6nNxVyVkykn11_1Z<3U+!%+oP$qX7sK9 z0fx`$@`LCn_aOsR)Lpe!)*H08CemPitfK=hla_=WSrY{@xOR0Y;5vLQOOy*htK|Ag z62rhE?UjvEMXp$e186m1l;N%Ai%t&ERs?3r@g$kTz+y)(Bpa)Bz~g3K!UbfaO2VB% zh@asHsPWmbSS}StQzrKrb?=QL1&BCgpu1jr=hrmzG6PI;Pu(4GRIRU`_)q>_rVBha{e6 z^^_Syn+4=>0wtUNP0Cxzoz%ayL%k7MDP2Tz$;t2y3Em3u`fnJAJ_rvg%;WKo?U1%2(zGD)C2g0$tx`<%*C`vXEF<~B~YoSb9JxY3K zWdUt5AD9R4f;RF)gnEU>xW!(S6|9swzVw-I{p8rIpZV7(ODi9W{lk}<|NJDI9|*cp zav9uZMp%jhvsG@&6vhQsv082hCZZk8f`M7f-;%FW%LpP9Dq7K$j*w4N-6$NiaPaI7W(57Fdd$~nkRCe}`gj1t*}o@3l5D%bgRF#qLwggT6-3ZLs86*+eyDw8S+(-!}3E=Rp4P53OrHErrx1B38qGt z!}8W9VvmR-kSQ@`%Cpfw1OQ7tz(y2DTuE(G2qUz}TJk;xNngYYGRH&cl<~=nCuIFzWsgX**natS1^fmH3dH_oIx8Cx{LHKTDHe7_8=} zt?Q45*LB}JNb{Yh9yANEx2)E8Uv>h z;->nF@!ss}cjAk1V|$L)LI^vd*7caNpC@LHOBAFI>Lj24Nuee9e8&NTaUC!D*JgRtQ0#M~m;#&qDD% zeXbQrr>`}|=4pCjRwB$HSE;*EZ_3}G(r+W{ z;dN!v(p39-WPx4?^hTh2@;R!yM_K+l>iz_2VCf!3;F~yX5{mMuK95psy2oMn+1nl3 zlCbZ<+s@-B~yU}mt_G>@vMfM}y&RGqx-?RmU-Wu%e>1`c5j zI)!iUAVQzBR;Jktk1M^&qQ6B_wDr1yJ)~H#B$Zh@&3`|5va}UH@T32J|KFax2IMj# zot4$HblhA?=0#>y5$%&Sh_sCYz}5{oTHA48_=a-(z zg_sMcYqAGuaIB$xu)wI583hOWjeBWKuF1ENT*5;dysattUMKYDwkbPwnn=q69w(jL z|07j0pe(WKfU?+60<70xM%)J!?wB}TgI&Yv8Yo%`w@p5gqc%Fshu9b4!+^$oE`f77 z#M_A;1XPKNGdQd3qR$u1DOMLNDW*aXxV9cU@P6^nP-ncmQI&`cCJhe5erTU zOAph43qTp6q8kjI1CS);oYDKfzpVja^zr6r+@8RPD~s z&!=%pP=}%#Xdd3mAfgnU%L**8h&DL={W$+dmhx`i^Q0WXqDh(6*W{nmV+4UIY6ud2 z--?d6*@=cnvAwVN?50J%0!*OHAhs|GWS8R2M6iGk0uk`ysR)e}2;21k=$W|qA`%a$ zlR0PuW5ZNtK>i9^5c?UWA5$&j9Xu<%xpO7V96zL)h&I~b)fBJUp%MFUrC;|oWGVtn z0g5&gj}RWTqv;zS{Cb#B(vu;IYvRNkC)`}NH*f}y8bW=LBQH!n2d&XwEBtzp&QSUa z9@7!LE(qsv=u=%+Y(KCuJ^e#`Om28{JILSra=6ZQ#oA9`mLY8OT@-(WGyo#R$O-t( zlv;_Nnc#OqLh>~D46?<=jCtu2%q$WGVlGJ{P$nf`r5@KQ`z6XKZlpq0SywwS#sP(i}r#+I<7L5<7C!0XY!iMh;Yu9HD>YupAtl1jW-RvBi>HDX_BY1X)@RRjThMjB&wtQrs;7w zfm8>q@kl$_+K06yx!-?+^ODrh$4v=;Fx5SyxrckH&Yy4 z4r7MzAkYw>Apnkjz&EDaF`V2+mQA$X^=9!*r_M8gtua64$BKx<<7yP!jn5khjF`O{ zXNnf)GvqtC`>_Y{`UKtb5Yz2M{rl*%xAM@kjT!$@y{!aauKpLO--lX5A})_`jLt_e zwjb~Lb+P(8=#l0gFJaaL+=q{5R38Sayq+1&JBYIcyxlXsL$pEw*9qE_4ct`$m2s7g zZmWoCIPTA3*26sO+VHoFi3y^4tbPpbN4Whr?`@x@c3ahAKNJ-V`5k=UQG}FCgp_bK z48GzaKcT$NyA;ZSw|ZA=k9l|km1e|#wz?1FzzVUx_PDnn2UB})YyMd0SXb+P{luo> z$Nh1Cf;iV7%Vm&`BRzt2g5+Y8rNRr-w~=J{qa+#rgg8i&;g6GK;Elu=BpnRv{b*=zHAisX9u#geJ#Yyw?E+y&1O*Tif!CmQ?_C0bey9#s zsq$ITknI^zW8aSVeg)G#@MTPPqNs{M0G-isDX`k6Hr0PA1#t}KsiEKl@X93&yN3r3 z&|AZ(r?0iO%*i3p#u(tQY0NQXLDZoSka_qMB`RCaTD}@Y7vW+&LuZR}le(@^_LtoM z*`cTz9V3ezeD$J`_t^^jcOY&Ph?xW>fGzwSQLYEo^+--?o#MLjC zXBycac=76$z_dZ0{>Sxzc*#9S(Oa2$KR&;Kze<~MqahuS|d2-chP4p_& z;ddIhgFznzcR+Joq*X)1b90{KdaVh{)A?$ZZvY?X^-5CBDnv~P6~4TR#84auH-}p> zx^khqwxTQ{>2f{YGR~$mPX&=&PBaR1AqRSh^&X;q>#ls4<|I!ch|opkYWapb3&!rz zu_6Y5_1s2v4MyV9^DjI@-6R>Hz?W1TrZyJ6>-_ek+|Ah zM?@)`TJ_3Rcb=|#2#L}qvs4p?kAREXNp4K^%6^qE@)Sbkz4I^qjL?5Ypx z`+>0u>~kOnKJPq{+kFX{6`_-C2p6KZOpen In Colab Open In Kaggle +Screen Shot 2021-09-29 at 10 23 13 PM + + + + +## Viewing Runs +
+ Toggle Details +Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: + + * Training & Validation losses + * Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 + * Learning Rate over time + * A bounding box debugging panel, showing the training progress over time + * GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** + * System: Disk I/0, CPU utilization, RAM memory usage + * Your trained model as W&B Artifact + * Environment: OS and Python types, Git repository and state, **training command** + +

Weights & Biases dashboard

+
+ + ## Disabling wandb +* training after running `wandb disabled` inside that directory creates no wandb run +![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) + +* To enable wandb again, run `wandb online` +![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) + +## Advanced Usage +You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. +
+

1: Train and Log Evaluation simultaneousy

+ This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table + Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, + so no images will be uploaded from your system more than once. +
+ Usage + Code $ python train.py --upload_data val + +![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) +
+ +

2. Visualize and Version Datasets

+ Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. + + ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) +
+ +

3: Train using dataset artifact

+ When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that + can be used to train a model directly from the dataset artifact. This also logs evaluation +
+ Usage + Code $ python train.py --data {data}_wandb.yaml + +![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) +
+ +

4: Save model checkpoints as artifacts

+ To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged + +
+ Usage + Code $ python train.py --save_period 1 + +![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) +
+ +
+ +

5: Resume runs from checkpoint artifacts.

+Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) +
+ +

6: Resume runs from dataset artifact & checkpoint artifacts.

+ Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device + The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or + train from _wandb.yaml file and set --save_period + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) +
+ + + +

Reports

+W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). + +Weights & Biases Reports + + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/scripts/utils/loggers/wandb/__init__.py b/scripts/utils/loggers/wandb/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/scripts/utils/loggers/wandb/__pycache__/__init__.cpython-36.pyc b/scripts/utils/loggers/wandb/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99d1f115dc2cb9e5c7127233f0b1768e2fec278a GIT binary patch literal 167 zcmXr!<>iW8$CJbW1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnuMqu={M=Oi#L|-d zyu>oyoXot^3jO57lI+a9`0`@?;-X~z%KV)CGSm1V|6u*%Q0d~i;PNJn{MiE6=R-A`n$8x3ea9pl9PFyKRNtBe6N~Q8p6en@IT&{AQREa;5 z(l1wvNhSHdbGv(I7Z4N0sbqn^eY^YK+xKzqIp2Bod~|B6_}VX**Ge7B`aNsnuZ;Ys zaQH9TmZhw^rEKMN?Yb>zr|!r(RZroZ>ZW^{dd6m%bT`|})pL^1bo0GJy&!p~TdWuH zJliexO7)U$c{#7VZ+E8lt$M|)bf#7AgjKJ4GhTJy_EMeMt(<ytElZu;cj`4Sb_3mPZMFLwt{(P%_ij7bbZ>j@jm^M! z&$(CHP2cmE-{V59*6gs-8uSBiC+Ic%%?(fEPWq*0u*vhab}QgHcvJP>OQC$t3FmGM zn(BHFMg7Ztb)&iFb^UO<-P`y`m-)sv>iV@*n7#4hr$7Dt&2Xx<>9w{Rsu|$5L0B}G z>+P-=mKu#ap7z^=exnhlZ(d&gBkzGwtkKL}3}@|9QwQz!W-EARWu@h?xA^0k9l46b zuOb;+fi=dYDC@_a6Bee$=9p4luU~WOrFA{%HT*zhF2d}#_SV}wf5p->sQ3QumCZrV zTWJo1LBDzDQn%e7?yR($0p_i7*I)5E0ar?l4_-0>RbbuK$9y|?uo@-2>#f4rB*AtO15jIuizO$S>O&P_fkQslOCrq;`D8>i663VTCZ9`W}H@;6IPJj zcLtv|`A#m#%e?SeHhQK_K6-{3^T%UH|FLoUWh*G~y)UCp%v)~tg;8dCx7q84+4e?% zpgqt@*3){R{V;$a-*b>PuK9 zc{w(RS=L5O_w_|QLysPO658ug8wxx0=<>}?E?>0oK7<$hTrCUBmd5f`?0FpKU*1kT z`U0LETFQhtpTm>D3T$jOM+lQ-n%cv4Cyg_f5@+VX$H(?@@WbQQ9u{p{nB1z5Cx z&<=LPIcX}Hu13Gv^N!9%c>~ib4fNLse+Vz~DU>V>JZtCeBB)q@1lP&HN!`i7zl@8( zQhA7xDu=&s_ZPwMTYg7m(lzsOuC11h!k7g4r|vR?>tyt{9|p56oX@1^$ALB3Pi zxArpQOs6=8usd#zF-dYx7YE$fv06o;{MobOsQ-{#ido*@tR!Z zeS2k;d#2kAJUugExH= zPm*=zmL}U5-t;ldo+0QC_$)=e@F83H5IF9hbHImG;dQfunKAgX;W_%A$s3-d?*)ZU zFvNQEY>;8Rc85T$?u$)r|IvQeANDT9%vuD?@0Uk zalXU$(`Y{(wO>H{1!+I~tTj&g?~l{>K<+^{Dw891?G?eDiz>hGygLhtlj>BtZTB79 zI+%lT5i|(O$3`x$>Jym8nyr^`(kGF;Z$C3Clg+HO1r@C5Q@A@iX2eH#&}w!?`WxPB z->o^TtF;+@n)QX#rde4(%4?Si3GXPueVq3`!Gv@od?T#HG}mZ$fob$Jd?KRHaA`H{ zZP!ZrS>C3A)#s7a3&L$1ps{)m%#|7ahw&h+mM**QbPb+hFXv*JMShckYVY3|t|;ajz|8onR|?4WnA^^CM6#wN}IYt%x*Q6uSJ&;YdK_e+Q!mj<6a) zAA#*xmEBJVcE@qRSQ`|KDOG(P!nl#fkx?^}&mx}#60sV2H7j=j0~^H+)+x#TvSOVn z$yX$u4yt3jGZV~0r2hlvI&;B1MEW;^W4sbkp5Dtqm}i6eaTfR{i~jtDU;$9w{yh5D zIS#f-Sc(m)vhQTjerlW@+l`t2>ktAq81HHJ{GRPENNI=%w7jTtTaNy=%J+-G30?#4 zFX4O=$0=3VN}*n!c>oWahzhM2-^rUir*U7W6`*@q~AgTyc=vG(|rbSIbf0|F8=aV_0WaO z&o`hT4xn;w)E7-BV;o|DhiHJD34@zt1|NpjE0;mVu&50B&r|0O(-6slnQLMuO%2hY zb3zt<3rVG1Lbla%(C)nMxvLefG(k%G!&9^s`8_@xK(YKaqdg$=Z(mE8;4jzr}mZ z;Yl7?Yu2eaWxJZH;!Y8{S$i};!9tef7beCg0cKXBz}2LfWVNNlp07nRhy6+z_ zyH{lWCs;19nH!@*DjkR&P>sS%qRCIz?Hf4!3KFYGx=lk@pqXZL zc9KfJ+m=kImgtFQ458ZhK~!xR;sh}n)C3d)N$mtexj`l}Vt{6WavYi~U_SdIOc+#+ zos+DS2d#r{p*faC{j^MRpc;`a$3_EG!*>OCZ!Lws%p`d?XOfARI z(<~I~EcypS%s8XcP10Y>J-%}s!|*2Ju2FdpzW%vPzi@IXc6d&b7D1!n}DMfdu%maTs_8dvTfwg?89_~ zkvM8b(9droKc%W$RLXzZZ1<#9YUC8v+WGL<}uX5gMZ_%_;>k?s}7 z9LMhihgyxQN`b4)Hoq~+RU&8umeTi#YPcs40FJ3NMQ3qt1-@rhZ#$gozAx!&LVBf_JJ-ppkfw4 zvlNYvmMEBz(~paAP|^LfSBmc}KLW$y>q{jK7*}P~T=kOs8LEBDgPqIr1;*S9njb%uGs7=pgr_KDL3_PG~U2Yv6N%e0o1*PjBFyl=m* zKZ^6}=tJj4+jvG$Y0{hcwQAt`{quqA?X>;iyl7_ShnjzdU(BIJvvtduvV(1&Z~Bex zf#2Q zw!{8lR*}T(wK|$4FT)z*wS_>RAg-_+44^C+spXG8CYn9%uB315V1JV)PZ~@I{qAmr z93bqeXmw!@mPm?aq0pkSQanx)JIh4$h<)aSLKE79H_HPVft2(JPa{$^3PMFXn^2rc z3;i}q)l1^GH$6Hk3Vakmc#7rEB4g#~E&%m`&F4iJj2?d|4IX~g3oVi!{|=d2zl<9J zqYrGPP6r-Jx^We}oMJkJub!;E120u4OTq)8W$tHT>P(5>GS6~KJW493@-+FOox(V) z3Ixx1YQaZF@sQD7RR+ujm{t`jSL&1%ykdOA&saZ;4S+PTqFxT+Gx;^*8I&7M#hFZ}B*F4~hVkKSLNg7y# zd)VMF_re-NguG^7>R&N=s@>Sw^r>?@7*Q3sbTD{s6ix?Qb9cSqKoQr3o%%^6VMf-* z#H3{F<$-Q*wBgZc40RU)T7u$?Rmf8#fqSljdq%S+`G-@7#*28PG!3r$PvBXCfMN5a zJt)i?-*=d%?|Womq8p~!#Sn(UFemKZ^GA=q+`Pll8PZ8O1iT++%2#VsW*7e`nw@}y zVn+*jPdwIl_|iN)H4+UImI;dE#`K#kMr#o*YwLp=WD)Otfr+>vyUZ~x2H-YX68%Nq zoke?wc? zh1laAXDdD6M zdv^s+g73iUXw-K2AjB&7fyyu2mko2^#vmh+aDa)%Cfu@ab8Rp*zKXbW-S(CTZJg{I zVjBCM9vLra9k8LqPKRd`jEY9N7r<+RVW->9*p9KvkzLWR{mE&di z6=Hv5eE=9A0yc9vF&~gFCQTs@P~X+<0QN%(zMJS6`jtF2td(P(m=&;MfKt8*Z-J{8 zZ!!40MGpst--H$CZt|*wWijzQE+BD$c*kY;`3?8db28*8Zjg~|2TS&Vj=|w@wI})_ z%bg6-SYl~zCSZ!yA#_wgVdOdkyEQTuaEkhEGD8PqRBRX`ExBzD zg3Bm6IwfAJ2?Z@84_QQUZq*(=FpY>aW;Dh8t#H>Os*xI7mx1r&~ z9gNC!4NK_*<_Mm(KJ1Fc;t~MgI@~b*77S!9B+Gg3PCnl(nO2jHa z4sBu+U%FvXxWRFfyZpw#P+(bhMdqm@Z z5!K*up-iBo6rN~;!z7^O7%tefEZTp;i>T#5+mNU~xHM>FH?f>giOLO=78VbdPoN+? z*}yUaqok-(`!C2nV46OHJ@zs?e`cJ)65op*4^ruhLPJl0p7L=G z_LJ@1<`}DLik}^*D)g+GcYw)o50PK|@BsST?7l^$M)b4uZ-P0FDVpNPK*(asFvTEXsO&9wG_24N+1|~o547ZI?IkYX`XdFmj z;UuUQa}=_Vcm_TIA;d$Ai%RF>7&8}|C{3D47iEVc0GZoTzk_6TRALKJyy%2BC*P7} zO-O{`f~cVB^^qu3tfdv*GS64UhF>q4s10c)%w08DQC^NW{c%)!fS(JxSi%#=h`oWB zF+mp$%(d=2KPd>KW@7;2B9Kv#hk^C{Ih>B%HPR}=8(o++1#kQUI{Q$(@h~Ypfj5!{ z-XmDiWV-ycrq@;qu_c+iZN&ijO$o6cIZyDrn>1e51Ou18ol70hhnNlvak1p1(e=hse6D9MkpKVM*Sh6#}bzN_#hoR<7QEWlAC zu-~ues4~->N?xp~&g!3HB2!7;OqUC$%GZ0m{foSlAt3bi&&$(bz=_a*UM?Vw=$#Qg zT)LgP&bVV(0Z=B)(&dFNr|{0&F@r`ZA@pBjB5#xk;=jeV@^K&QB{OK_&MC@;QSvha zoWg>c8c3Yb@!No0y$$`B*+hmai63VD&E~e}>tDq^nD8+4c-w}`m*qIhr4z~?iIe-+%}S2&09%fsj? z5rqsbAd(YNSjS5#3MmL75>g0-k3~Xy1D`5T>4%X3-*y<4e;)qzHiFi7eh%U+BW(XG z_ngmJGyp&?hA`X8=;sjo0y7`3GcXZCOn~X<8P<*z-{v6|6Ttm67P{YJvyxdV)DkM}a%|B&M>HYHAsU08Ecow5fS zJ9RUfL=kw5U`Gt$dlY5=kGQ)2tJpxl&J9bB4E6EpNu)~kA$Bb!e)xy+a|5EO=un-( z-#0%g0#E-E@`lgjqdu&{tCGHkw7J84bLX}9ZFiJf{^)uPdc^4LRPH5 zjwDQPdAq(*11JOxixr>w-{Iw(OzKRi3+TU#1m7QdIHAWRStCS5<@<*$AslqJc?TR6 zCKJX37*<-Zvbf3P(GWT>xa!Vsm}WWsPf)imp$w*o8H`Bm!yGp}_RvN8m4pWu7GYtX z$FU%eJw{=X<5DrHI`E!5W$2BIII6hgfDfOtN00q4i6HU1P~K6FbFzF2P_PqA(^7|T zYLLRsH%`56?E}dh_Dh>=C$nYg&*Pkhtu}3PxXJ-N!*;7b{}UEI4+i>wi^1Z;{J?~v zgN(CpLzfUH$8XZ0cLYU*ZXo~)48Kh84H>P{{sxp z2fxAX38MbL!_)qcxat2F{wb^^;eNE1k;LXmUfg$rX>W?8mX8tPrlU-S+?SMA34D+U z>6X5qMsx=V;%!Jdo<%y#XK5mx;S7Lu#s^0CGZ7{(8ZJgyG0C^|Kjn>o#^j$f`4>#S ziA1=T{+GP`77|2E9_Cg0U-8jz@{uhZrj|Ak`?q-i-!b7f$3tNkvae@(mpu8~2~SRs zE=_JnkRl9Bo{6-6C@=okZ1h1^2>fdxGu=bN!i24be50_#*O~hq6X9~2IpL0EjD~@d z-x=K_ZLiu+apDA8FWA4(H1j!t~n zVJBe+Bo7_}0iQ$$TmfIl?ck$}jxC^x>JNiJ3H`u}0GFzOOZ*hW8U1v^9vG`7Xd?O) zO3c(y9>H7T@}ucjKIKAy;RkMMvSJo>FI+<^Zpo;@6~h&vvckU1RMLvt4kGPV?7w!1zh`_e? zV3!!33jkIX<~jBcp)9~CJvs_2&_JEnkc0in`7!*!1b<{gjE*V!oQdD5rV?*bk$pjZ za(Dn(1ccxXct0U5!ma3~gFOjwR(xu<{tbL~u-w93vR^Rx& zVcF;pf@C=Lxho8f0=-`u>|FEKhWu5C(HocHR^}%b?w}>)DujEN-zphG*30fK%s`A2 z;Y&jhyM*C_#_4IH^Go7N^-xsyW%o1m{nJBMTs5C$cLV)pkfMGUN$mO7_juV&o@Hk>1BG~FM{1o!+`Zs&`rZsi!4#ZN^Xu*fRi<^XJqa0)PDs|uBR){&4&D$$1= z=}B9OR7INyn)4@cfz1FDF;ZJUiI^?x-C4pld&*V=tp*E!H5-_eRiYlXP2+e$rmm+_E zhq>Qn@@*#XF*z7!GFE*Z4@lYmX(ah_+A01(em4J9em*~2Tr7TnzIf`%d@a9_FBM%W zo2)n{#wy`xvGRwJ&{o30D10OjcFrGz0Q*Kj!e_Cv6p(UP*8bIKrJJ7u*|`2-s9W@1 z$KS|qKs8_9z&0PQErTJh`0d`Hf9bA=pr4hGf8yzne&171$vR&a6EtFxVaAnT<^e_U z%VxtZygU3Hha?m2Q&18D`thqgU@UDiA}|v*?N;}M-;`d@y1>v`6~PFU?{;TLYE4)wfV0m_n30nA7yNP{~-bHkiVNIysSp#yiI+&3Zb7xjN)!T*dTbOtad z;#+Y1c9Hp|q;7jnlRog1BrtvOD#fcTB9$<;T8LJBry1VH?x3R%I|3m=KI6p0^b6N- zJkQ$vrpnj9fMSLz3M3?YtXItM3DLj_<;geOfxteTGEX+VKHLOd!*aQW<;N6+4TL$^ zPMSfWB@Q+$UGHrVbRdDakqs4r@m~Lqo?^ALOsFo%ulbnI`98wSPcnI$33pMLkJrdd z(?fP!n19yn{^vmW#y^{e#w=gpfyv@4Jc&uC@3jinJMTCMUZgFvh^us}oX+R+)qEje P$uCy^sKtMubnO2L@>E1+ literal 0 HcmV?d00001 diff --git a/scripts/utils/loggers/wandb/log_dataset.py b/scripts/utils/loggers/wandb/log_dataset.py new file mode 100644 index 0000000..06e81fb --- /dev/null +++ b/scripts/utils/loggers/wandb/log_dataset.py @@ -0,0 +1,27 @@ +import argparse + +from wandb_utils import WandbLogger + +from utils.general import LOGGER + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused + if not logger.wandb: + LOGGER.info("install wandb using `pip install wandb` to log the dataset") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') + + opt = parser.parse_args() + opt.resume = False # Explicitly disallow resume check for dataset upload job + + create_dataset_artifact(opt) diff --git a/scripts/utils/loggers/wandb/sweep.py b/scripts/utils/loggers/wandb/sweep.py new file mode 100644 index 0000000..206059b --- /dev/null +++ b/scripts/utils/loggers/wandb/sweep.py @@ -0,0 +1,41 @@ +import sys +from pathlib import Path + +import wandb + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import parse_opt, train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + + +def sweep(): + wandb.init() + # Get hyp dict from sweep agent + hyp_dict = vars(wandb.config).get("_items") + + # Workaround: get necessary opt args + opt = parse_opt(known=True) + opt.batch_size = hyp_dict.get("batch_size") + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.epochs = hyp_dict.get("epochs") + opt.nosave = True + opt.data = hyp_dict.get("data") + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.hyp = str(opt.hyp) + opt.project = str(opt.project) + device = select_device(opt.device, batch_size=opt.batch_size) + + # train + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == "__main__": + sweep() diff --git a/scripts/utils/loggers/wandb/sweep.yaml b/scripts/utils/loggers/wandb/sweep.yaml new file mode 100644 index 0000000..c7790d7 --- /dev/null +++ b/scripts/utils/loggers/wandb/sweep.yaml @@ -0,0 +1,143 @@ +# Hyperparameters for training +# To set range- +# Provide min and max values as: +# parameter: +# +# min: scalar +# max: scalar +# OR +# +# Set a specific list of search space- +# parameter: +# values: [scalar1, scalar2, scalar3...] +# +# You can use grid, bayesian and hyperopt search strategy +# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration + +program: utils/loggers/wandb/sweep.py +method: random +metric: + name: metrics/mAP_0.5 + goal: maximize + +parameters: + # hyperparameters: set either min, max range or values list + data: + value: "data/coco128.yaml" + batch_size: + values: [64] + epochs: + values: [10] + + lr0: + distribution: uniform + min: 1e-5 + max: 1e-1 + lrf: + distribution: uniform + min: 0.01 + max: 1.0 + momentum: + distribution: uniform + min: 0.6 + max: 0.98 + weight_decay: + distribution: uniform + min: 0.0 + max: 0.001 + warmup_epochs: + distribution: uniform + min: 0.0 + max: 5.0 + warmup_momentum: + distribution: uniform + min: 0.0 + max: 0.95 + warmup_bias_lr: + distribution: uniform + min: 0.0 + max: 0.2 + box: + distribution: uniform + min: 0.02 + max: 0.2 + cls: + distribution: uniform + min: 0.2 + max: 4.0 + cls_pw: + distribution: uniform + min: 0.5 + max: 2.0 + obj: + distribution: uniform + min: 0.2 + max: 4.0 + obj_pw: + distribution: uniform + min: 0.5 + max: 2.0 + iou_t: + distribution: uniform + min: 0.1 + max: 0.7 + anchor_t: + distribution: uniform + min: 2.0 + max: 8.0 + fl_gamma: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_h: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_s: + distribution: uniform + min: 0.0 + max: 0.9 + hsv_v: + distribution: uniform + min: 0.0 + max: 0.9 + degrees: + distribution: uniform + min: 0.0 + max: 45.0 + translate: + distribution: uniform + min: 0.0 + max: 0.9 + scale: + distribution: uniform + min: 0.0 + max: 0.9 + shear: + distribution: uniform + min: 0.0 + max: 10.0 + perspective: + distribution: uniform + min: 0.0 + max: 0.001 + flipud: + distribution: uniform + min: 0.0 + max: 1.0 + fliplr: + distribution: uniform + min: 0.0 + max: 1.0 + mosaic: + distribution: uniform + min: 0.0 + max: 1.0 + mixup: + distribution: uniform + min: 0.0 + max: 1.0 + copy_paste: + distribution: uniform + min: 0.0 + max: 1.0 diff --git a/scripts/utils/loggers/wandb/wandb_utils.py b/scripts/utils/loggers/wandb/wandb_utils.py new file mode 100644 index 0000000..3835436 --- /dev/null +++ b/scripts/utils/loggers/wandb/wandb_utils.py @@ -0,0 +1,562 @@ +"""Utilities and tools for tracking runs with Weights & Biases.""" + +import logging +import os +import sys +from contextlib import contextmanager +from pathlib import Path +from typing import Dict + +import yaml +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from utils.datasets import LoadImagesAndLabels, img2label_paths +from utils.general import LOGGER, check_dataset, check_file + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + wandb = None + +RANK = int(os.getenv('RANK', -1)) +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def check_wandb_dataset(data_file): + is_trainset_wandb_artifact = False + is_valset_wandb_artifact = False + if check_file(data_file) and data_file.endswith('.yaml'): + with open(data_file, errors='ignore') as f: + data_dict = yaml.safe_load(f) + is_trainset_wandb_artifact = (isinstance(data_dict['train'], str) and + data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX)) + is_valset_wandb_artifact = (isinstance(data_dict['val'], str) and + data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) + if is_trainset_wandb_artifact or is_valset_wandb_artifact: + return data_dict + else: + return check_dataset(data_file) + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + entity = run_path.parent.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return entity, project, run_id, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if RANK not in [-1, 0]: # For resuming DDP runs + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(check_file(opt.data), errors='ignore') as f: + data_dict = yaml.safe_load(f) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.safe_dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + """Log training runs, datasets, models, and predictions to Weights & Biases. + + This logger sends information to W&B at wandb.ai. By default, this information + includes hyperparameters, system configuration and metrics, model metrics, + and basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + + For more on how this logger is used, see the Weights & Biases documentation: + https://docs.wandb.com/guides/integrations/yolov5 + """ + + def __init__(self, opt, run_id=None, job_type='Training'): + """ + - Initialize WandbLogger instance + - Upload dataset if opt.upload_dataset is True + - Setup trainig processes if job_type is 'Training' + + arguments: + opt (namespace) -- Commandline arguments for this run + run_id (str) -- Run ID of W&B run to be resumed + job_type (str) -- To set the job_type for this run + + """ + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.val_artifact, self.train_artifact = None, None + self.train_artifact_path, self.val_artifact_path = None, None + self.result_artifact = None + self.val_table, self.result_table = None, None + self.bbox_media_panel_images = [] + self.val_table_path_map = None + self.max_imgs_to_log = 16 + self.wandb_artifact_data_dict = None + self.data_dict = None + # It's more elegant to stick to 1 wandb.init call, + # but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, + project=project, + entity=entity, + resume='allow', + allow_val_change=True) + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + entity=opt.entity, + name=opt.name if opt.name != 'exp' else None, + job_type=job_type, + id=run_id, + allow_val_change=True) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if opt.upload_dataset: + if not opt.resume: + self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) + + if opt.resume: + # resume from artifact + if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + self.data_dict = dict(self.wandb_run.config.data_dict) + else: # local resume + self.data_dict = check_wandb_dataset(opt.data) + else: + self.data_dict = check_wandb_dataset(opt.data) + self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict + + # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, + allow_val_change=True) + self.setup_training(opt) + + if self.job_type == 'Dataset Creation': + self.wandb_run.config.update({"upload_dataset": True}) + self.data_dict = self.check_and_upload_dataset(opt) + + def check_and_upload_dataset(self, opt): + """ + Check if the dataset format is compatible and upload it as W&B artifact + + arguments: + opt (namespace)-- Commandline arguments for current run + + returns: + Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. + """ + assert wandb, 'Install wandb to upload dataset' + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + with open(config_path, errors='ignore') as f: + wandb_data_dict = yaml.safe_load(f) + return wandb_data_dict + + def setup_training(self, opt): + """ + Setup the necessary processes for training YOLO models: + - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX + - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded + - Setup log_dict, initialize bbox_interval + + arguments: + opt (namespace) -- commandline arguments for this run + + """ + self.log_dict, self.current_epoch = {}, 0 + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ + config.hyp, config.imgsz + data_dict = self.data_dict + if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.val_table = self.val_artifact.get("val") + if self.val_table_path_map is None: + self.map_val_table_path() + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + if opt.evolve: + self.bbox_interval = opt.bbox_interval = opt.epochs + 1 + train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None + # Update the the data_dict to point to local artifacts dir + if train_from_artifact: + self.data_dict = data_dict + + def download_dataset_artifact(self, path, alias): + """ + download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX + + arguments: + path -- path of the dataset to be used for training + alias (str)-- alias of the artifact to be download/used for training + + returns: + (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset + is found otherwise returns (None, None) + """ + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + """ + download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX + + arguments: + opt (namespace) -- Commandline arguments for this run + """ + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + # epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + is_finished = total_epochs is None + assert not is_finished, 'training is finished, can only resume incomplete runs.' + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + """ + Log the model checkpoint as W&B artifact + + arguments: + path (Path) -- Path of directory containing the checkpoints + opt (namespace) -- Command line arguments for this run + epoch (int) -- Current epoch number + fitness_score (float) -- fitness score for current epoch + best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. + """ + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + """ + Log the dataset as W&B artifact and return the new data file with W&B links + + arguments: + data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. + single_class (boolean) -- train multi-class data as single-class + project (str) -- project name. Used to construct the artifact path + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + file with _wandb postfix. Eg -> data_wandb.yaml + + returns: + the new .yaml file with artifact links. it can be used to start training directly from artifacts + """ + upload_dataset = self.wandb_run.config.upload_dataset + log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' + self.data_dict = check_dataset(data_file) # parse and check + data = dict(self.data_dict) + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + + # log train set + if not log_val_only: + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + + path = Path(data_file) + # create a _wandb.yaml file with artifacts links if both train and test set are logged + if not log_val_only: + path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path + path = ROOT / 'data' / path + data.pop('download', None) + data.pop('path', None) + with open(path, 'w') as f: + yaml.safe_dump(data, f) + LOGGER.info(f"Created dataset config file {path}") + + if self.job_type == 'Training': # builds correct artifact pipeline graph + if not log_val_only: + self.wandb_run.log_artifact( + self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! + self.wandb_run.use_artifact(self.val_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + """ + Map the validation dataset Table like name of file -> it's id in the W&B Table. + Useful for - referencing artifacts for evaluation. + """ + self.val_table_path_map = {} + LOGGER.info("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_path_map[data[3]] = data[0] + + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): + """ + Create and return W&B artifact containing W&B Table of the dataset. + + arguments: + dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id -- hash map that maps class ids to labels + name -- name of the artifact + + returns: + dataset artifact to be logged or used + """ + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.img_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + box_data, img_classes = [], {} + for cls, *xywh in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls])}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + """ + Build evaluation Table. Uses reference from validation dataset table. + + arguments: + predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + names (dict(int, str)): hash map that maps class ids to labels + """ + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + avg_conf_per_class = [0] * len(self.data_dict['names']) + pred_class_count = {} + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + cls = int(cls) + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": f"{names[cls]} {conf:.3f}", + "scores": {"class_score": conf}, + "domain": "pixel"}) + avg_conf_per_class[cls] += conf + + if cls in pred_class_count: + pred_class_count[cls] += 1 + else: + pred_class_count[cls] = 1 + + for pred_class in pred_class_count.keys(): + avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] + + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_path_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + self.val_table.data[id][1], + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + *avg_conf_per_class + ) + + def val_one_image(self, pred, predn, path, names, im): + """ + Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel + + arguments: + pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + """ + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + self.log_training_progress(predn, path, names) + + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": f"{names[int(cls)]} {conf:.3f}", + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + def log(self, log_dict): + """ + save the metrics to the logging dictionary + + arguments: + log_dict (Dict) -- metrics/media to be logged in current step + """ + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + """ + commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + + arguments: + best_result (boolean): Boolean representing if the result of this evaluation is best or not + """ + if self.wandb_run: + with all_logging_disabled(): + if self.bbox_media_panel_images: + self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images + try: + wandb.log(self.log_dict) + except BaseException as e: + LOGGER.info( + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") + self.wandb_run.finish() + self.wandb_run = None + + self.log_dict = {} + self.bbox_media_panel_images = [] + if self.result_artifact: + self.result_artifact.add(self.result_table, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + + wandb.log({"evaluation": self.result_table}) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + """ + Log metrics if any and finish the current W&B run + """ + if self.wandb_run: + if self.log_dict: + with all_logging_disabled(): + wandb.log(self.log_dict) + wandb.run.finish() + + +@contextmanager +def all_logging_disabled(highest_level=logging.CRITICAL): + """ source - https://gist.github.com/simon-weber/7853144 + A context manager that will prevent any logging messages triggered during the body from being processed. + :param highest_level: the maximum logging level in use. + This would only need to be changed if a custom level greater than CRITICAL is defined. + """ + previous_level = logging.root.manager.disable + logging.disable(highest_level) + try: + yield + finally: + logging.disable(previous_level) diff --git a/scripts/utils/loss.py b/scripts/utils/loss.py new file mode 100644 index 0000000..5aa9f01 --- /dev/null +++ b/scripts/utils/loss.py @@ -0,0 +1,222 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Loss functions +""" + +import torch +import torch.nn as nn + +from utils.metrics import bbox_iou +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super().__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False): + self.sort_obj_iou = False + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + pxy = ps[:, :2].sigmoid() * 2 - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + score_iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + sort_id = torch.argsort(score_iou) + b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=targets.device) # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch diff --git a/scripts/utils/metrics.py b/scripts/utils/metrics.py new file mode 100644 index 0000000..857fa5d --- /dev/null +++ b/scripts/utils/metrics.py @@ -0,0 +1,342 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = {i: v for i, v in enumerate(names)} # to dict + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = f1.mean(0).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype('int32') + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # background FN + + def matrix(self): + return self.matrix + + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + def plot(self, normalize=True, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, annot=nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, vmin=0.0, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close() + except Exception as e: + print(f'WARNING: ConfusionMatrix plot failure: {e}') + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if CIoU or DIoU or GIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def bbox_ioa(box1, box2, eps=1E-7): + """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(4) + box2: np.array of shape(nx4) + returns: np.array of shape(n) + """ + + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + plt.close() + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + plt.close() diff --git a/scripts/utils/plots.py b/scripts/utils/plots.py new file mode 100644 index 0000000..6c3f5bc --- /dev/null +++ b/scripts/utils/plots.py @@ -0,0 +1,471 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Plotting utils +""" + +import math +import os +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sn +import torch +from PIL import Image, ImageDraw, ImageFont + +from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, + increment_path, is_ascii, is_chinese, try_except, xywh2xyxy, xyxy2xywh) +from utils.metrics import fitness + +# Settings +RANK = int(os.getenv('RANK', -1)) +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb('#' + c) for c in hex] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +def check_pil_font(font=FONT, size=10): + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary + font = Path(font) + font = font if font.exists() else (CONFIG_DIR / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception: # download if missing + check_font(font) + try: + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + + +class Annotator: + if RANK in (-1, 0): + check_pil_font() # download TTF if necessary + + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + self.pil = pil or not is_ascii(example) or is_chinese(example) + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle((box[0], + box[1] - h if outside else box[1], + box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), fill=color) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h - 3 >= 0 # label fits outside box + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, + thickness=tf, lineType=cv2.LINE_AA) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255)): + # Add text to image (PIL-only) + w, h = self.font.getsize(text) # text width, height + self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + LOGGER.info(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(save_dir.glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(25, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) + + +@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 +@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 +def plot_labels(labels, names=(), save_dir=Path('')): + # plot dataset labels + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + try: # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + except Exception: + pass + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values + f = fitness(x) + j = np.argmax(f) # max fitness index + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + print(f'Best results from row {j} of {evolve_csv}:') + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print(f'{k:>15}: {mu:.3g}') + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + plt.close() + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for fi, f in enumerate(files): + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.info(f'Warning: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print(f'Warning: Plotting error for {f}; {e}') + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop) + return crop diff --git a/scripts/utils/torch_utils.py b/scripts/utils/torch_utils.py new file mode 100644 index 0000000..ca91ff6 --- /dev/null +++ b/scripts/utils/torch_utils.py @@ -0,0 +1,329 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch utils +""" + +import datetime +import math +import os +import platform +import subprocess +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F + +from utils.general import LOGGER + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + dist.barrier(device_ids=[local_rank]) + yield + if local_rank == 0: + dist.barrier(device_ids=[0]) + + +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + s = f'git -C {path} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] + except subprocess.CalledProcessError: + return '' # not a git repository + + +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux. + assert platform.system() == 'Linux', 'device_count() function only works on Linux' + try: + cmd = 'nvidia-smi -L | wc -l' + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception: + return 0 + + +def select_device(device='', batch_size=0, newline=True): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + + cuda = not cpu and torch.cuda.is_available() + if cuda: + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * (len(s) + 1) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB + else: + s += 'CPU\n' + + if not newline: + s = s.rstrip() + LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_sync(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(input, ops, n=10, device=None): + # YOLOv5 speed/memory/FLOPs profiler + # + # Usage: + # input = torch.randn(16, 3, 640, 640) + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(input, [m1, m2], n=100) # profile over 100 iterations + + results = [] + device = device or select_device() + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except Exception: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results + + +def is_parallel(model): + # Returns True if model is of type DP or DDP + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPs + from thop import profile + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs + except (ImportError, Exception): + fs = '' + + LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') + return stop + + +class ModelEMA: + """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = de_parallel(model).state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1 - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/scripts/utils/useing_video.py b/scripts/utils/useing_video.py new file mode 100644 index 0000000..37e8e0c --- /dev/null +++ b/scripts/utils/useing_video.py @@ -0,0 +1,39 @@ +# import pyrealsense2 as rs +import numpy as np +import cv2 +import os + +target = "Video" +TARGET_VIDEO = r"/home/autonav-linux/catkin_ws/src/yolov5_ROS/scripts/traffic_light_test.mp4" +TARGET_FOLDER = r"/home/autonav-linux/catkin_ws/src/yolov5_ROS/scripts/images" + +# Configure depth and color streams +class Camera: + + def __init__(self): + """ + Camera initializing part, usually insert here your camera code except loop part(l.e. get next frame) + """ + if target == "Video": + self.vid = cv2.VideoCapture(TARGET_VIDEO) + + elif target == "folder": + self.img_list = os.listdir(TARGET_FOLDER) + + def get_next_img(self) -> np.ndarray: + + """ + Send Img to main code return type "must" np.ndarray + Returns: img (np.ndarray) + + """ + + # Wait for a coherent pair of frames: depth and color + if target == "Video": + _, frame = self.vid.read() + return frame + + elif target == "folder": + img = cv2.imread(fr"{TARGET_FOLDER}/{self.img_list[0]}") + del self.img_list[0] + return img \ No newline at end of file